1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu.h"
16 #include "cgx.h"
17 #include "rvu_reg.h"
18 
19 struct cgx_evq_entry {
20 	struct list_head evq_node;
21 	struct cgx_link_event link_event;
22 };
23 
24 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
25 static struct _req_type __maybe_unused					\
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
27 {									\
28 	struct _req_type *req;						\
29 									\
30 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
31 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
32 		sizeof(struct _rsp_type));				\
33 	if (!req)							\
34 		return NULL;						\
35 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
36 	req->hdr.id = _id;						\
37 	return req;							\
38 }
39 
40 MBOX_UP_CGX_MESSAGES
41 #undef M
42 
43 /* Returns bitmap of mapped PFs */
44 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
45 {
46 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
47 }
48 
49 static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
50 {
51 	unsigned long pfmap;
52 
53 	pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
54 
55 	/* Assumes only one pf mapped to a cgx lmac port */
56 	if (!pfmap)
57 		return -ENODEV;
58 	else
59 		return find_first_bit(&pfmap, 16);
60 }
61 
62 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
63 {
64 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
65 }
66 
67 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
68 {
69 	if (cgx_id >= rvu->cgx_cnt_max)
70 		return NULL;
71 
72 	return rvu->cgx_idmap[cgx_id];
73 }
74 
75 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
76 {
77 	struct npc_pkind *pkind = &rvu->hw->pkind;
78 	int cgx_cnt_max = rvu->cgx_cnt_max;
79 	int cgx, lmac_cnt, lmac;
80 	int pf = PF_CGXMAP_BASE;
81 	int size, free_pkind;
82 
83 	if (!cgx_cnt_max)
84 		return 0;
85 
86 	if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
87 		return -EINVAL;
88 
89 	/* Alloc map table
90 	 * An additional entry is required since PF id starts from 1 and
91 	 * hence entry at offset 0 is invalid.
92 	 */
93 	size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
94 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
95 	if (!rvu->pf2cgxlmac_map)
96 		return -ENOMEM;
97 
98 	/* Initialize all entries with an invalid cgx and lmac id */
99 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
100 
101 	/* Reverse map table */
102 	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
103 				  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
104 				  GFP_KERNEL);
105 	if (!rvu->cgxlmac2pf_map)
106 		return -ENOMEM;
107 
108 	rvu->cgx_mapped_pfs = 0;
109 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
110 		if (!rvu_cgx_pdata(cgx, rvu))
111 			continue;
112 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
113 		for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
114 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
115 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
116 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
117 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
118 			rvu->cgx_mapped_pfs++;
119 		}
120 	}
121 	return 0;
122 }
123 
124 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
125 {
126 	struct cgx_evq_entry *qentry;
127 	unsigned long flags;
128 	int err;
129 
130 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
131 	if (!qentry)
132 		return -ENOMEM;
133 
134 	/* Lock the event queue before we read the local link status */
135 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
136 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
137 				&qentry->link_event.link_uinfo);
138 	qentry->link_event.cgx_id = cgx_id;
139 	qentry->link_event.lmac_id = lmac_id;
140 	if (err)
141 		goto skip_add;
142 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
143 skip_add:
144 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
145 
146 	/* start worker to process the events */
147 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
148 
149 	return 0;
150 }
151 
152 /* This is called from interrupt context and is expected to be atomic */
153 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
154 {
155 	struct cgx_evq_entry *qentry;
156 	struct rvu *rvu = data;
157 
158 	/* post event to the event queue */
159 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
160 	if (!qentry)
161 		return -ENOMEM;
162 	qentry->link_event = *event;
163 	spin_lock(&rvu->cgx_evq_lock);
164 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
165 	spin_unlock(&rvu->cgx_evq_lock);
166 
167 	/* start worker to process the events */
168 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
169 
170 	return 0;
171 }
172 
173 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
174 {
175 	struct cgx_link_user_info *linfo;
176 	struct cgx_link_info_msg *msg;
177 	unsigned long pfmap;
178 	int err, pfid;
179 
180 	linfo = &event->link_uinfo;
181 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
182 
183 	do {
184 		pfid = find_first_bit(&pfmap, 16);
185 		clear_bit(pfid, &pfmap);
186 
187 		/* check if notification is enabled */
188 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
189 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
190 				 event->cgx_id, event->lmac_id,
191 				 linfo->link_up ? "UP" : "DOWN");
192 			continue;
193 		}
194 
195 		/* Send mbox message to PF */
196 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
197 		if (!msg)
198 			continue;
199 		msg->link_info = *linfo;
200 		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
201 		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
202 		if (err)
203 			dev_warn(rvu->dev, "notification to pf %d failed\n",
204 				 pfid);
205 	} while (pfmap);
206 }
207 
208 static void cgx_evhandler_task(struct work_struct *work)
209 {
210 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
211 	struct cgx_evq_entry *qentry;
212 	struct cgx_link_event *event;
213 	unsigned long flags;
214 
215 	do {
216 		/* Dequeue an event */
217 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
218 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
219 						  struct cgx_evq_entry,
220 						  evq_node);
221 		if (qentry)
222 			list_del(&qentry->evq_node);
223 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
224 		if (!qentry)
225 			break; /* nothing more to process */
226 
227 		event = &qentry->link_event;
228 
229 		/* process event */
230 		cgx_notify_pfs(event, rvu);
231 		kfree(qentry);
232 	} while (1);
233 }
234 
235 static int cgx_lmac_event_handler_init(struct rvu *rvu)
236 {
237 	struct cgx_event_cb cb;
238 	int cgx, lmac, err;
239 	void *cgxd;
240 
241 	spin_lock_init(&rvu->cgx_evq_lock);
242 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
243 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
244 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
245 	if (!rvu->cgx_evh_wq) {
246 		dev_err(rvu->dev, "alloc workqueue failed");
247 		return -ENOMEM;
248 	}
249 
250 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
251 	cb.data = rvu;
252 
253 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
254 		cgxd = rvu_cgx_pdata(cgx, rvu);
255 		if (!cgxd)
256 			continue;
257 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
258 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
259 			if (err)
260 				dev_err(rvu->dev,
261 					"%d:%d handler register failed\n",
262 					cgx, lmac);
263 		}
264 	}
265 
266 	return 0;
267 }
268 
269 static void rvu_cgx_wq_destroy(struct rvu *rvu)
270 {
271 	if (rvu->cgx_evh_wq) {
272 		flush_workqueue(rvu->cgx_evh_wq);
273 		destroy_workqueue(rvu->cgx_evh_wq);
274 		rvu->cgx_evh_wq = NULL;
275 	}
276 }
277 
278 int rvu_cgx_init(struct rvu *rvu)
279 {
280 	int cgx, err;
281 	void *cgxd;
282 
283 	/* CGX port id starts from 0 and are not necessarily contiguous
284 	 * Hence we allocate resources based on the maximum port id value.
285 	 */
286 	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
287 	if (!rvu->cgx_cnt_max) {
288 		dev_info(rvu->dev, "No CGX devices found!\n");
289 		return -ENODEV;
290 	}
291 
292 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
293 				      sizeof(void *), GFP_KERNEL);
294 	if (!rvu->cgx_idmap)
295 		return -ENOMEM;
296 
297 	/* Initialize the cgxdata table */
298 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
299 		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
300 
301 	/* Map CGX LMAC interfaces to RVU PFs */
302 	err = rvu_map_cgx_lmac_pf(rvu);
303 	if (err)
304 		return err;
305 
306 	/* Register for CGX events */
307 	err = cgx_lmac_event_handler_init(rvu);
308 	if (err)
309 		return err;
310 
311 	mutex_init(&rvu->cgx_cfg_lock);
312 
313 	/* Ensure event handler registration is completed, before
314 	 * we turn on the links
315 	 */
316 	mb();
317 
318 	/* Do link up for all CGX ports */
319 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
320 		cgxd = rvu_cgx_pdata(cgx, rvu);
321 		if (!cgxd)
322 			continue;
323 		err = cgx_lmac_linkup_start(cgxd);
324 		if (err)
325 			dev_err(rvu->dev,
326 				"Link up process failed to start on cgx %d\n",
327 				cgx);
328 	}
329 
330 	return 0;
331 }
332 
333 int rvu_cgx_exit(struct rvu *rvu)
334 {
335 	int cgx, lmac;
336 	void *cgxd;
337 
338 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
339 		cgxd = rvu_cgx_pdata(cgx, rvu);
340 		if (!cgxd)
341 			continue;
342 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
343 			cgx_lmac_evh_unregister(cgxd, lmac);
344 	}
345 
346 	/* Ensure event handler unregister is completed */
347 	mb();
348 
349 	rvu_cgx_wq_destroy(rvu);
350 	return 0;
351 }
352 
353 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
354 {
355 	u8 cgx_id, lmac_id;
356 	void *cgxd;
357 
358 	if (!is_pf_cgxmapped(rvu, pf))
359 		return;
360 
361 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
362 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
363 
364 	/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
365 	if (enable)
366 		cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
367 	else
368 		cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
369 }
370 
371 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
372 {
373 	int pf = rvu_get_pf(pcifunc);
374 	u8 cgx_id, lmac_id;
375 
376 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
377 	 * if received from other PF/VF simply ACK, nothing to do.
378 	 */
379 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
380 		return -ENODEV;
381 
382 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
383 
384 	cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
385 
386 	return 0;
387 }
388 
389 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
390 				    struct msg_rsp *rsp)
391 {
392 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
393 	return 0;
394 }
395 
396 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
397 				   struct msg_rsp *rsp)
398 {
399 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
400 	return 0;
401 }
402 
403 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
404 			       struct cgx_stats_rsp *rsp)
405 {
406 	int pf = rvu_get_pf(req->hdr.pcifunc);
407 	int stat = 0, err = 0;
408 	u64 tx_stat, rx_stat;
409 	u8 cgx_idx, lmac;
410 	void *cgxd;
411 
412 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
413 	    !is_pf_cgxmapped(rvu, pf))
414 		return -ENODEV;
415 
416 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
417 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
418 
419 	/* Rx stats */
420 	while (stat < CGX_RX_STATS_COUNT) {
421 		err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
422 		if (err)
423 			return err;
424 		rsp->rx_stats[stat] = rx_stat;
425 		stat++;
426 	}
427 
428 	/* Tx stats */
429 	stat = 0;
430 	while (stat < CGX_TX_STATS_COUNT) {
431 		err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
432 		if (err)
433 			return err;
434 		rsp->tx_stats[stat] = tx_stat;
435 		stat++;
436 	}
437 	return 0;
438 }
439 
440 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
441 				      struct cgx_mac_addr_set_or_get *req,
442 				      struct cgx_mac_addr_set_or_get *rsp)
443 {
444 	int pf = rvu_get_pf(req->hdr.pcifunc);
445 	u8 cgx_id, lmac_id;
446 
447 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
448 
449 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
450 
451 	return 0;
452 }
453 
454 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
455 				      struct cgx_mac_addr_set_or_get *req,
456 				      struct cgx_mac_addr_set_or_get *rsp)
457 {
458 	int pf = rvu_get_pf(req->hdr.pcifunc);
459 	u8 cgx_id, lmac_id;
460 	int rc = 0, i;
461 	u64 cfg;
462 
463 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
464 
465 	rsp->hdr.rc = rc;
466 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
467 	/* copy 48 bit mac address to req->mac_addr */
468 	for (i = 0; i < ETH_ALEN; i++)
469 		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
470 	return 0;
471 }
472 
473 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
474 					struct msg_rsp *rsp)
475 {
476 	u16 pcifunc = req->hdr.pcifunc;
477 	int pf = rvu_get_pf(pcifunc);
478 	u8 cgx_id, lmac_id;
479 
480 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
481 	 * if received from other PF/VF simply ACK, nothing to do.
482 	 */
483 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
484 	    !is_pf_cgxmapped(rvu, pf))
485 		return -ENODEV;
486 
487 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
488 
489 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
490 	return 0;
491 }
492 
493 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
494 					 struct msg_rsp *rsp)
495 {
496 	u16 pcifunc = req->hdr.pcifunc;
497 	int pf = rvu_get_pf(pcifunc);
498 	u8 cgx_id, lmac_id;
499 
500 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
501 	 * if received from other PF/VF simply ACK, nothing to do.
502 	 */
503 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
504 	    !is_pf_cgxmapped(rvu, pf))
505 		return -ENODEV;
506 
507 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
508 
509 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
510 	return 0;
511 }
512 
513 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
514 {
515 	int pf = rvu_get_pf(pcifunc);
516 	u8 cgx_id, lmac_id;
517 
518 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
519 	 * if received from other PF/VF simply ACK, nothing to do.
520 	 */
521 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
522 		return -ENODEV;
523 
524 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
525 
526 	if (en) {
527 		set_bit(pf, &rvu->pf_notify_bmap);
528 		/* Send the current link status to PF */
529 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
530 	} else {
531 		clear_bit(pf, &rvu->pf_notify_bmap);
532 	}
533 
534 	return 0;
535 }
536 
537 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
538 					  struct msg_rsp *rsp)
539 {
540 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
541 	return 0;
542 }
543 
544 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
545 					 struct msg_rsp *rsp)
546 {
547 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
548 	return 0;
549 }
550 
551 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
552 				      struct cgx_link_info_msg *rsp)
553 {
554 	u8 cgx_id, lmac_id;
555 	int pf, err;
556 
557 	pf = rvu_get_pf(req->hdr.pcifunc);
558 
559 	if (!is_pf_cgxmapped(rvu, pf))
560 		return -ENODEV;
561 
562 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
563 
564 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
565 				&rsp->link_info);
566 	return err;
567 }
568 
569 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
570 {
571 	int pf = rvu_get_pf(pcifunc);
572 	u8 cgx_id, lmac_id;
573 
574 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
575 	 * if received from other PF/VF simply ACK, nothing to do.
576 	 */
577 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
578 		return -ENODEV;
579 
580 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
581 
582 	return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
583 					  lmac_id, en);
584 }
585 
586 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
587 				       struct msg_rsp *rsp)
588 {
589 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
590 	return 0;
591 }
592 
593 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
594 					struct msg_rsp *rsp)
595 {
596 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
597 	return 0;
598 }
599 
600 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
601  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
602  */
603 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
604 			   int index, int rxtxflag, u64 *stat)
605 {
606 	struct rvu_block *block;
607 	int blkaddr;
608 	u16 pcifunc;
609 	int pf, lf;
610 
611 	*stat = 0;
612 
613 	if (!cgxd || !rvu)
614 		return -EINVAL;
615 
616 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
617 	if (pf < 0)
618 		return pf;
619 
620 	/* Assumes LF of a PF and all of its VF belongs to the same
621 	 * NIX block
622 	 */
623 	pcifunc = pf << RVU_PFVF_PF_SHIFT;
624 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
625 	if (blkaddr < 0)
626 		return 0;
627 	block = &rvu->hw->block[blkaddr];
628 
629 	for (lf = 0; lf < block->lf.max; lf++) {
630 		/* Check if a lf is attached to this PF or one of its VFs */
631 		if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
632 			 ~RVU_PFVF_FUNC_MASK)))
633 			continue;
634 		if (rxtxflag == NIX_STATS_RX)
635 			*stat += rvu_read64(rvu, blkaddr,
636 					    NIX_AF_LFX_RX_STATX(lf, index));
637 		else
638 			*stat += rvu_read64(rvu, blkaddr,
639 					    NIX_AF_LFX_TX_STATX(lf, index));
640 	}
641 
642 	return 0;
643 }
644 
645 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
646 {
647 	struct rvu_pfvf *parent_pf, *pfvf;
648 	int cgx_users, err = 0;
649 
650 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
651 		return 0;
652 
653 	parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
654 	pfvf = rvu_get_pfvf(rvu, pcifunc);
655 
656 	mutex_lock(&rvu->cgx_cfg_lock);
657 
658 	if (start && pfvf->cgx_in_use)
659 		goto exit;  /* CGX is already started hence nothing to do */
660 	if (!start && !pfvf->cgx_in_use)
661 		goto exit; /* CGX is already stopped hence nothing to do */
662 
663 	if (start) {
664 		cgx_users = parent_pf->cgx_users;
665 		parent_pf->cgx_users++;
666 	} else {
667 		parent_pf->cgx_users--;
668 		cgx_users = parent_pf->cgx_users;
669 	}
670 
671 	/* Start CGX when first of all NIXLFs is started.
672 	 * Stop CGX when last of all NIXLFs is stopped.
673 	 */
674 	if (!cgx_users) {
675 		err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
676 					  start);
677 		if (err) {
678 			dev_err(rvu->dev, "Unable to %s CGX\n",
679 				start ? "start" : "stop");
680 			/* Revert the usage count in case of error */
681 			parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
682 					       : parent_pf->cgx_users  + 1;
683 			goto exit;
684 		}
685 	}
686 	pfvf->cgx_in_use = start;
687 exit:
688 	mutex_unlock(&rvu->cgx_cfg_lock);
689 	return err;
690 }
691