1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_reg.h"
19 #include "rvu_trace.h"
20
21 struct cgx_evq_entry {
22 struct list_head evq_node;
23 struct cgx_link_event link_event;
24 };
25
26 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
27 static struct _req_type __maybe_unused \
28 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
29 { \
30 struct _req_type *req; \
31 \
32 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
33 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
34 sizeof(struct _rsp_type)); \
35 if (!req) \
36 return NULL; \
37 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
38 req->hdr.id = _id; \
39 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
40 return req; \
41 }
42
43 MBOX_UP_CGX_MESSAGES
44 #undef M
45
is_mac_feature_supported(struct rvu * rvu,int pf,int feature)46 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
47 {
48 u8 cgx_id, lmac_id;
49 void *cgxd;
50
51 if (!is_pf_cgxmapped(rvu, pf))
52 return 0;
53
54 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
55 cgxd = rvu_cgx_pdata(cgx_id, rvu);
56
57 return (cgx_features_get(cgxd) & feature);
58 }
59
60 /* Returns bitmap of mapped PFs */
cgxlmac_to_pfmap(struct rvu * rvu,u8 cgx_id,u8 lmac_id)61 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
62 {
63 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
64 }
65
cgxlmac_to_pf(struct rvu * rvu,int cgx_id,int lmac_id)66 static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
67 {
68 unsigned long pfmap;
69
70 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
71
72 /* Assumes only one pf mapped to a cgx lmac port */
73 if (!pfmap)
74 return -ENODEV;
75 else
76 return find_first_bit(&pfmap, 16);
77 }
78
cgxlmac_id_to_bmap(u8 cgx_id,u8 lmac_id)79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
80 {
81 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
82 }
83
rvu_cgx_pdata(u8 cgx_id,struct rvu * rvu)84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
85 {
86 if (cgx_id >= rvu->cgx_cnt_max)
87 return NULL;
88
89 return rvu->cgx_idmap[cgx_id];
90 }
91
92 /* Return first enabled CGX instance if none are enabled then return NULL */
rvu_first_cgx_pdata(struct rvu * rvu)93 void *rvu_first_cgx_pdata(struct rvu *rvu)
94 {
95 int first_enabled_cgx = 0;
96 void *cgxd = NULL;
97
98 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
99 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
100 if (cgxd)
101 break;
102 }
103
104 return cgxd;
105 }
106
107 /* Based on P2X connectivity find mapped NIX block for a PF */
rvu_map_cgx_nix_block(struct rvu * rvu,int pf,int cgx_id,int lmac_id)108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109 int cgx_id, int lmac_id)
110 {
111 struct rvu_pfvf *pfvf = &rvu->pf[pf];
112 u8 p2x;
113
114 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116 pfvf->nix_blkaddr = BLKADDR_NIX0;
117 if (p2x == CMR_P2X_SEL_NIX1)
118 pfvf->nix_blkaddr = BLKADDR_NIX1;
119 }
120
rvu_map_cgx_lmac_pf(struct rvu * rvu)121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
122 {
123 struct npc_pkind *pkind = &rvu->hw->pkind;
124 int cgx_cnt_max = rvu->cgx_cnt_max;
125 int pf = PF_CGXMAP_BASE;
126 unsigned long lmac_bmap;
127 int size, free_pkind;
128 int cgx, lmac, iter;
129
130 if (!cgx_cnt_max)
131 return 0;
132
133 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
134 return -EINVAL;
135
136 /* Alloc map table
137 * An additional entry is required since PF id starts from 1 and
138 * hence entry at offset 0 is invalid.
139 */
140 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
141 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
142 if (!rvu->pf2cgxlmac_map)
143 return -ENOMEM;
144
145 /* Initialize all entries with an invalid cgx and lmac id */
146 memset(rvu->pf2cgxlmac_map, 0xFF, size);
147
148 /* Reverse map table */
149 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
150 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
151 GFP_KERNEL);
152 if (!rvu->cgxlmac2pf_map)
153 return -ENOMEM;
154
155 rvu->cgx_mapped_pfs = 0;
156 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
157 if (!rvu_cgx_pdata(cgx, rvu))
158 continue;
159 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
160 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
161 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
162 iter);
163 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
164 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
165 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
166 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
167 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
168 rvu->cgx_mapped_pfs++;
169 pf++;
170 }
171 }
172 return 0;
173 }
174
rvu_cgx_send_link_info(int cgx_id,int lmac_id,struct rvu * rvu)175 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
176 {
177 struct cgx_evq_entry *qentry;
178 unsigned long flags;
179 int err;
180
181 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
182 if (!qentry)
183 return -ENOMEM;
184
185 /* Lock the event queue before we read the local link status */
186 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
187 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
188 &qentry->link_event.link_uinfo);
189 qentry->link_event.cgx_id = cgx_id;
190 qentry->link_event.lmac_id = lmac_id;
191 if (err) {
192 kfree(qentry);
193 goto skip_add;
194 }
195 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
196 skip_add:
197 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
198
199 /* start worker to process the events */
200 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
201
202 return 0;
203 }
204
205 /* This is called from interrupt context and is expected to be atomic */
cgx_lmac_postevent(struct cgx_link_event * event,void * data)206 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
207 {
208 struct cgx_evq_entry *qentry;
209 struct rvu *rvu = data;
210
211 /* post event to the event queue */
212 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
213 if (!qentry)
214 return -ENOMEM;
215 qentry->link_event = *event;
216 spin_lock(&rvu->cgx_evq_lock);
217 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
218 spin_unlock(&rvu->cgx_evq_lock);
219
220 /* start worker to process the events */
221 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
222
223 return 0;
224 }
225
cgx_notify_pfs(struct cgx_link_event * event,struct rvu * rvu)226 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
227 {
228 struct cgx_link_user_info *linfo;
229 struct cgx_link_info_msg *msg;
230 unsigned long pfmap;
231 int err, pfid;
232
233 linfo = &event->link_uinfo;
234 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
235
236 do {
237 pfid = find_first_bit(&pfmap, 16);
238 clear_bit(pfid, &pfmap);
239
240 /* check if notification is enabled */
241 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
242 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
243 event->cgx_id, event->lmac_id,
244 linfo->link_up ? "UP" : "DOWN");
245 continue;
246 }
247
248 /* Send mbox message to PF */
249 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
250 if (!msg)
251 continue;
252 msg->link_info = *linfo;
253 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
254 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
255 if (err)
256 dev_warn(rvu->dev, "notification to pf %d failed\n",
257 pfid);
258 } while (pfmap);
259 }
260
cgx_evhandler_task(struct work_struct * work)261 static void cgx_evhandler_task(struct work_struct *work)
262 {
263 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
264 struct cgx_evq_entry *qentry;
265 struct cgx_link_event *event;
266 unsigned long flags;
267
268 do {
269 /* Dequeue an event */
270 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
271 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
272 struct cgx_evq_entry,
273 evq_node);
274 if (qentry)
275 list_del(&qentry->evq_node);
276 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
277 if (!qentry)
278 break; /* nothing more to process */
279
280 event = &qentry->link_event;
281
282 /* process event */
283 cgx_notify_pfs(event, rvu);
284 kfree(qentry);
285 } while (1);
286 }
287
cgx_lmac_event_handler_init(struct rvu * rvu)288 static int cgx_lmac_event_handler_init(struct rvu *rvu)
289 {
290 unsigned long lmac_bmap;
291 struct cgx_event_cb cb;
292 int cgx, lmac, err;
293 void *cgxd;
294
295 spin_lock_init(&rvu->cgx_evq_lock);
296 INIT_LIST_HEAD(&rvu->cgx_evq_head);
297 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
298 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
299 if (!rvu->cgx_evh_wq) {
300 dev_err(rvu->dev, "alloc workqueue failed");
301 return -ENOMEM;
302 }
303
304 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
305 cb.data = rvu;
306
307 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
308 cgxd = rvu_cgx_pdata(cgx, rvu);
309 if (!cgxd)
310 continue;
311 lmac_bmap = cgx_get_lmac_bmap(cgxd);
312 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
313 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
314 if (err)
315 dev_err(rvu->dev,
316 "%d:%d handler register failed\n",
317 cgx, lmac);
318 }
319 }
320
321 return 0;
322 }
323
rvu_cgx_wq_destroy(struct rvu * rvu)324 static void rvu_cgx_wq_destroy(struct rvu *rvu)
325 {
326 if (rvu->cgx_evh_wq) {
327 flush_workqueue(rvu->cgx_evh_wq);
328 destroy_workqueue(rvu->cgx_evh_wq);
329 rvu->cgx_evh_wq = NULL;
330 }
331 }
332
rvu_cgx_init(struct rvu * rvu)333 int rvu_cgx_init(struct rvu *rvu)
334 {
335 int cgx, err;
336 void *cgxd;
337
338 /* CGX port id starts from 0 and are not necessarily contiguous
339 * Hence we allocate resources based on the maximum port id value.
340 */
341 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
342 if (!rvu->cgx_cnt_max) {
343 dev_info(rvu->dev, "No CGX devices found!\n");
344 return -ENODEV;
345 }
346
347 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
348 sizeof(void *), GFP_KERNEL);
349 if (!rvu->cgx_idmap)
350 return -ENOMEM;
351
352 /* Initialize the cgxdata table */
353 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
354 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
355
356 /* Map CGX LMAC interfaces to RVU PFs */
357 err = rvu_map_cgx_lmac_pf(rvu);
358 if (err)
359 return err;
360
361 /* Register for CGX events */
362 err = cgx_lmac_event_handler_init(rvu);
363 if (err)
364 return err;
365
366 mutex_init(&rvu->cgx_cfg_lock);
367
368 /* Ensure event handler registration is completed, before
369 * we turn on the links
370 */
371 mb();
372
373 /* Do link up for all CGX ports */
374 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
375 cgxd = rvu_cgx_pdata(cgx, rvu);
376 if (!cgxd)
377 continue;
378 err = cgx_lmac_linkup_start(cgxd);
379 if (err)
380 dev_err(rvu->dev,
381 "Link up process failed to start on cgx %d\n",
382 cgx);
383 }
384
385 return 0;
386 }
387
rvu_cgx_exit(struct rvu * rvu)388 int rvu_cgx_exit(struct rvu *rvu)
389 {
390 unsigned long lmac_bmap;
391 int cgx, lmac;
392 void *cgxd;
393
394 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
395 cgxd = rvu_cgx_pdata(cgx, rvu);
396 if (!cgxd)
397 continue;
398 lmac_bmap = cgx_get_lmac_bmap(cgxd);
399 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
400 cgx_lmac_evh_unregister(cgxd, lmac);
401 }
402
403 /* Ensure event handler unregister is completed */
404 mb();
405
406 rvu_cgx_wq_destroy(rvu);
407 return 0;
408 }
409
410 /* Most of the CGX configuration is restricted to the mapped PF only,
411 * VF's of mapped PF and other PFs are not allowed. This fn() checks
412 * whether a PFFUNC is permitted to do the config or not.
413 */
is_cgx_config_permitted(struct rvu * rvu,u16 pcifunc)414 static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
415 {
416 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
417 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
418 return false;
419 return true;
420 }
421
rvu_cgx_enadis_rx_bp(struct rvu * rvu,int pf,bool enable)422 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
423 {
424 struct mac_ops *mac_ops;
425 u8 cgx_id, lmac_id;
426 void *cgxd;
427
428 if (!is_pf_cgxmapped(rvu, pf))
429 return;
430
431 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
432 cgxd = rvu_cgx_pdata(cgx_id, rvu);
433
434 mac_ops = get_mac_ops(cgxd);
435 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
436 if (enable)
437 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
438 else
439 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
440 }
441
rvu_cgx_config_rxtx(struct rvu * rvu,u16 pcifunc,bool start)442 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
443 {
444 int pf = rvu_get_pf(pcifunc);
445 u8 cgx_id, lmac_id;
446
447 if (!is_cgx_config_permitted(rvu, pcifunc))
448 return -EPERM;
449
450 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
451
452 cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
453
454 return 0;
455 }
456
rvu_mbox_handler_cgx_start_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)457 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
458 struct msg_rsp *rsp)
459 {
460 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
461 return 0;
462 }
463
rvu_mbox_handler_cgx_stop_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)464 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
465 struct msg_rsp *rsp)
466 {
467 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
468 return 0;
469 }
470
rvu_lmac_get_stats(struct rvu * rvu,struct msg_req * req,void * rsp)471 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
472 void *rsp)
473 {
474 int pf = rvu_get_pf(req->hdr.pcifunc);
475 struct mac_ops *mac_ops;
476 int stat = 0, err = 0;
477 u64 tx_stat, rx_stat;
478 u8 cgx_idx, lmac;
479 void *cgxd;
480
481 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
482 return -ENODEV;
483
484 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
485 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
486 mac_ops = get_mac_ops(cgxd);
487
488 /* Rx stats */
489 while (stat < mac_ops->rx_stats_cnt) {
490 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
491 if (err)
492 return err;
493 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
494 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
495 else
496 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
497 stat++;
498 }
499
500 /* Tx stats */
501 stat = 0;
502 while (stat < mac_ops->tx_stats_cnt) {
503 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
504 if (err)
505 return err;
506 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
507 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
508 else
509 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
510 stat++;
511 }
512 return 0;
513 }
514
rvu_mbox_handler_cgx_stats(struct rvu * rvu,struct msg_req * req,struct cgx_stats_rsp * rsp)515 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
516 struct cgx_stats_rsp *rsp)
517 {
518 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
519 }
520
rvu_mbox_handler_rpm_stats(struct rvu * rvu,struct msg_req * req,struct rpm_stats_rsp * rsp)521 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
522 struct rpm_stats_rsp *rsp)
523 {
524 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
525 }
526
rvu_mbox_handler_cgx_fec_stats(struct rvu * rvu,struct msg_req * req,struct cgx_fec_stats_rsp * rsp)527 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
528 struct msg_req *req,
529 struct cgx_fec_stats_rsp *rsp)
530 {
531 int pf = rvu_get_pf(req->hdr.pcifunc);
532 u8 cgx_idx, lmac;
533 void *cgxd;
534
535 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
536 return -EPERM;
537 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
538
539 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
540 return cgx_get_fec_stats(cgxd, lmac, rsp);
541 }
542
rvu_mbox_handler_cgx_mac_addr_set(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)543 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
544 struct cgx_mac_addr_set_or_get *req,
545 struct cgx_mac_addr_set_or_get *rsp)
546 {
547 int pf = rvu_get_pf(req->hdr.pcifunc);
548 u8 cgx_id, lmac_id;
549
550 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
551 return -EPERM;
552
553 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
554
555 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
556
557 return 0;
558 }
559
rvu_mbox_handler_cgx_mac_addr_get(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)560 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
561 struct cgx_mac_addr_set_or_get *req,
562 struct cgx_mac_addr_set_or_get *rsp)
563 {
564 int pf = rvu_get_pf(req->hdr.pcifunc);
565 u8 cgx_id, lmac_id;
566 int rc = 0, i;
567 u64 cfg;
568
569 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
570 return -EPERM;
571
572 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
573
574 rsp->hdr.rc = rc;
575 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
576 /* copy 48 bit mac address to req->mac_addr */
577 for (i = 0; i < ETH_ALEN; i++)
578 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
579 return 0;
580 }
581
rvu_mbox_handler_cgx_promisc_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)582 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
583 struct msg_rsp *rsp)
584 {
585 u16 pcifunc = req->hdr.pcifunc;
586 int pf = rvu_get_pf(pcifunc);
587 u8 cgx_id, lmac_id;
588
589 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
590 return -EPERM;
591
592 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
593
594 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
595 return 0;
596 }
597
rvu_mbox_handler_cgx_promisc_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)598 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
599 struct msg_rsp *rsp)
600 {
601 int pf = rvu_get_pf(req->hdr.pcifunc);
602 u8 cgx_id, lmac_id;
603
604 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
605 return -EPERM;
606
607 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
608
609 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
610 return 0;
611 }
612
rvu_cgx_ptp_rx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)613 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
614 {
615 int pf = rvu_get_pf(pcifunc);
616 u8 cgx_id, lmac_id;
617 void *cgxd;
618
619 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
620 return 0;
621
622 /* This msg is expected only from PFs that are mapped to CGX LMACs,
623 * if received from other PF/VF simply ACK, nothing to do.
624 */
625 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
626 !is_pf_cgxmapped(rvu, pf))
627 return -ENODEV;
628
629 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
630 cgxd = rvu_cgx_pdata(cgx_id, rvu);
631
632 cgx_lmac_ptp_config(cgxd, lmac_id, enable);
633 /* If PTP is enabled then inform NPC that packets to be
634 * parsed by this PF will have their data shifted by 8 bytes
635 * and if PTP is disabled then no shift is required
636 */
637 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
638 return -EINVAL;
639
640 return 0;
641 }
642
rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)643 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
644 struct msg_rsp *rsp)
645 {
646 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
647 }
648
rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)649 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
650 struct msg_rsp *rsp)
651 {
652 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
653 }
654
rvu_cgx_config_linkevents(struct rvu * rvu,u16 pcifunc,bool en)655 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
656 {
657 int pf = rvu_get_pf(pcifunc);
658 u8 cgx_id, lmac_id;
659
660 if (!is_cgx_config_permitted(rvu, pcifunc))
661 return -EPERM;
662
663 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
664
665 if (en) {
666 set_bit(pf, &rvu->pf_notify_bmap);
667 /* Send the current link status to PF */
668 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
669 } else {
670 clear_bit(pf, &rvu->pf_notify_bmap);
671 }
672
673 return 0;
674 }
675
rvu_mbox_handler_cgx_start_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)676 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
677 struct msg_rsp *rsp)
678 {
679 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
680 return 0;
681 }
682
rvu_mbox_handler_cgx_stop_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)683 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
684 struct msg_rsp *rsp)
685 {
686 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
687 return 0;
688 }
689
rvu_mbox_handler_cgx_get_linkinfo(struct rvu * rvu,struct msg_req * req,struct cgx_link_info_msg * rsp)690 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
691 struct cgx_link_info_msg *rsp)
692 {
693 u8 cgx_id, lmac_id;
694 int pf, err;
695
696 pf = rvu_get_pf(req->hdr.pcifunc);
697
698 if (!is_pf_cgxmapped(rvu, pf))
699 return -ENODEV;
700
701 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
702
703 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
704 &rsp->link_info);
705 return err;
706 }
707
rvu_mbox_handler_cgx_features_get(struct rvu * rvu,struct msg_req * req,struct cgx_features_info_msg * rsp)708 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
709 struct msg_req *req,
710 struct cgx_features_info_msg *rsp)
711 {
712 int pf = rvu_get_pf(req->hdr.pcifunc);
713 u8 cgx_idx, lmac;
714 void *cgxd;
715
716 if (!is_pf_cgxmapped(rvu, pf))
717 return 0;
718
719 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
720 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
721 rsp->lmac_features = cgx_features_get(cgxd);
722
723 return 0;
724 }
725
rvu_cgx_get_fifolen(struct rvu * rvu)726 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
727 {
728 struct mac_ops *mac_ops;
729 u32 fifo_len;
730
731 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
732 fifo_len = mac_ops ? mac_ops->fifo_len : 0;
733
734 return fifo_len;
735 }
736
rvu_cgx_config_intlbk(struct rvu * rvu,u16 pcifunc,bool en)737 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
738 {
739 int pf = rvu_get_pf(pcifunc);
740 struct mac_ops *mac_ops;
741 u8 cgx_id, lmac_id;
742
743 if (!is_cgx_config_permitted(rvu, pcifunc))
744 return -EPERM;
745
746 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
747 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
748
749 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
750 lmac_id, en);
751 }
752
rvu_mbox_handler_cgx_intlbk_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)753 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
754 struct msg_rsp *rsp)
755 {
756 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
757 return 0;
758 }
759
rvu_mbox_handler_cgx_intlbk_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)760 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
761 struct msg_rsp *rsp)
762 {
763 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
764 return 0;
765 }
766
rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu * rvu,struct cgx_pause_frm_cfg * req,struct cgx_pause_frm_cfg * rsp)767 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
768 struct cgx_pause_frm_cfg *req,
769 struct cgx_pause_frm_cfg *rsp)
770 {
771 int pf = rvu_get_pf(req->hdr.pcifunc);
772 struct mac_ops *mac_ops;
773 u8 cgx_id, lmac_id;
774 void *cgxd;
775
776 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
777 return 0;
778
779 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
780 * if received from other PF/VF simply ACK, nothing to do.
781 */
782 if (!is_pf_cgxmapped(rvu, pf))
783 return -ENODEV;
784
785 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
786 cgxd = rvu_cgx_pdata(cgx_id, rvu);
787 mac_ops = get_mac_ops(cgxd);
788
789 if (req->set)
790 mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
791 req->tx_pause, req->rx_pause);
792 else
793 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
794 &rsp->tx_pause,
795 &rsp->rx_pause);
796 return 0;
797 }
798
rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)799 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
800 struct msg_rsp *rsp)
801 {
802 int pf = rvu_get_pf(req->hdr.pcifunc);
803 u8 cgx_id, lmac_id;
804
805 if (!is_pf_cgxmapped(rvu, pf))
806 return -EPERM;
807
808 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
809 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
810 }
811
812 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
813 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
814 */
rvu_cgx_nix_cuml_stats(struct rvu * rvu,void * cgxd,int lmac_id,int index,int rxtxflag,u64 * stat)815 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
816 int index, int rxtxflag, u64 *stat)
817 {
818 struct rvu_block *block;
819 int blkaddr;
820 u16 pcifunc;
821 int pf, lf;
822
823 *stat = 0;
824
825 if (!cgxd || !rvu)
826 return -EINVAL;
827
828 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
829 if (pf < 0)
830 return pf;
831
832 /* Assumes LF of a PF and all of its VF belongs to the same
833 * NIX block
834 */
835 pcifunc = pf << RVU_PFVF_PF_SHIFT;
836 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
837 if (blkaddr < 0)
838 return 0;
839 block = &rvu->hw->block[blkaddr];
840
841 for (lf = 0; lf < block->lf.max; lf++) {
842 /* Check if a lf is attached to this PF or one of its VFs */
843 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
844 ~RVU_PFVF_FUNC_MASK)))
845 continue;
846 if (rxtxflag == NIX_STATS_RX)
847 *stat += rvu_read64(rvu, blkaddr,
848 NIX_AF_LFX_RX_STATX(lf, index));
849 else
850 *stat += rvu_read64(rvu, blkaddr,
851 NIX_AF_LFX_TX_STATX(lf, index));
852 }
853
854 return 0;
855 }
856
rvu_cgx_start_stop_io(struct rvu * rvu,u16 pcifunc,bool start)857 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
858 {
859 struct rvu_pfvf *parent_pf, *pfvf;
860 int cgx_users, err = 0;
861
862 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
863 return 0;
864
865 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
866 pfvf = rvu_get_pfvf(rvu, pcifunc);
867
868 mutex_lock(&rvu->cgx_cfg_lock);
869
870 if (start && pfvf->cgx_in_use)
871 goto exit; /* CGX is already started hence nothing to do */
872 if (!start && !pfvf->cgx_in_use)
873 goto exit; /* CGX is already stopped hence nothing to do */
874
875 if (start) {
876 cgx_users = parent_pf->cgx_users;
877 parent_pf->cgx_users++;
878 } else {
879 parent_pf->cgx_users--;
880 cgx_users = parent_pf->cgx_users;
881 }
882
883 /* Start CGX when first of all NIXLFs is started.
884 * Stop CGX when last of all NIXLFs is stopped.
885 */
886 if (!cgx_users) {
887 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
888 start);
889 if (err) {
890 dev_err(rvu->dev, "Unable to %s CGX\n",
891 start ? "start" : "stop");
892 /* Revert the usage count in case of error */
893 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
894 : parent_pf->cgx_users + 1;
895 goto exit;
896 }
897 }
898 pfvf->cgx_in_use = start;
899 exit:
900 mutex_unlock(&rvu->cgx_cfg_lock);
901 return err;
902 }
903
rvu_mbox_handler_cgx_set_fec_param(struct rvu * rvu,struct fec_mode * req,struct fec_mode * rsp)904 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
905 struct fec_mode *req,
906 struct fec_mode *rsp)
907 {
908 int pf = rvu_get_pf(req->hdr.pcifunc);
909 u8 cgx_id, lmac_id;
910
911 if (!is_pf_cgxmapped(rvu, pf))
912 return -EPERM;
913
914 if (req->fec == OTX2_FEC_OFF)
915 req->fec = OTX2_FEC_NONE;
916 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
917 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
918 return 0;
919 }
920
rvu_mbox_handler_cgx_get_aux_link_info(struct rvu * rvu,struct msg_req * req,struct cgx_fw_data * rsp)921 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
922 struct cgx_fw_data *rsp)
923 {
924 int pf = rvu_get_pf(req->hdr.pcifunc);
925 u8 cgx_id, lmac_id;
926
927 if (!rvu->fwdata)
928 return -ENXIO;
929
930 if (!is_pf_cgxmapped(rvu, pf))
931 return -EPERM;
932
933 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
934
935 memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
936 sizeof(struct cgx_lmac_fwdata_s));
937 return 0;
938 }
939
rvu_mbox_handler_cgx_set_link_mode(struct rvu * rvu,struct cgx_set_link_mode_req * req,struct cgx_set_link_mode_rsp * rsp)940 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
941 struct cgx_set_link_mode_req *req,
942 struct cgx_set_link_mode_rsp *rsp)
943 {
944 int pf = rvu_get_pf(req->hdr.pcifunc);
945 u8 cgx_idx, lmac;
946 void *cgxd;
947
948 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
949 return -EPERM;
950
951 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
952 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
953 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
954 return 0;
955 }
956