1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3 *
4 * Copyright (C) 2020 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 #include <linux/etherdevice.h>
15 #include <linux/of.h>
16 #include <linux/if_vlan.h>
17 #include <linux/iommu.h>
18 #include <net/ip.h>
19
20 #include "otx2_reg.h"
21 #include "otx2_common.h"
22 #include "otx2_txrx.h"
23 #include "otx2_struct.h"
24 #include "otx2_ptp.h"
25 #include "cn10k.h"
26 #include <rvu_trace.h>
27
28 #define DRV_NAME "rvu_nicpf"
29 #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
30
31 /* Supported devices */
32 static const struct pci_device_id otx2_pf_id_table[] = {
33 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
34 { 0, } /* end of table */
35 };
36
37 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
38 MODULE_DESCRIPTION(DRV_STRING);
39 MODULE_LICENSE("GPL v2");
40 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
41
42 enum {
43 TYPE_PFAF,
44 TYPE_PFVF,
45 };
46
47 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
48 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
49
otx2_change_mtu(struct net_device * netdev,int new_mtu)50 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
51 {
52 bool if_up = netif_running(netdev);
53 int err = 0;
54
55 if (if_up)
56 otx2_stop(netdev);
57
58 netdev_info(netdev, "Changing MTU from %d to %d\n",
59 netdev->mtu, new_mtu);
60 netdev->mtu = new_mtu;
61
62 if (if_up)
63 err = otx2_open(netdev);
64
65 return err;
66 }
67
otx2_disable_flr_me_intr(struct otx2_nic * pf)68 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
69 {
70 int irq, vfs = pf->total_vfs;
71
72 /* Disable VFs ME interrupts */
73 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
74 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
75 free_irq(irq, pf);
76
77 /* Disable VFs FLR interrupts */
78 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
79 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
80 free_irq(irq, pf);
81
82 if (vfs <= 64)
83 return;
84
85 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
86 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
87 free_irq(irq, pf);
88
89 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
90 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
91 free_irq(irq, pf);
92 }
93
otx2_flr_wq_destroy(struct otx2_nic * pf)94 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
95 {
96 if (!pf->flr_wq)
97 return;
98 destroy_workqueue(pf->flr_wq);
99 pf->flr_wq = NULL;
100 devm_kfree(pf->dev, pf->flr_wrk);
101 }
102
otx2_flr_handler(struct work_struct * work)103 static void otx2_flr_handler(struct work_struct *work)
104 {
105 struct flr_work *flrwork = container_of(work, struct flr_work, work);
106 struct otx2_nic *pf = flrwork->pf;
107 struct mbox *mbox = &pf->mbox;
108 struct msg_req *req;
109 int vf, reg = 0;
110
111 vf = flrwork - pf->flr_wrk;
112
113 mutex_lock(&mbox->lock);
114 req = otx2_mbox_alloc_msg_vf_flr(mbox);
115 if (!req) {
116 mutex_unlock(&mbox->lock);
117 return;
118 }
119 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
120 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
121
122 if (!otx2_sync_mbox_msg(&pf->mbox)) {
123 if (vf >= 64) {
124 reg = 1;
125 vf = vf - 64;
126 }
127 /* clear transcation pending bit */
128 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
129 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
130 }
131
132 mutex_unlock(&mbox->lock);
133 }
134
otx2_pf_flr_intr_handler(int irq,void * pf_irq)135 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
136 {
137 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
138 int reg, dev, vf, start_vf, num_reg = 1;
139 u64 intr;
140
141 if (pf->total_vfs > 64)
142 num_reg = 2;
143
144 for (reg = 0; reg < num_reg; reg++) {
145 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
146 if (!intr)
147 continue;
148 start_vf = 64 * reg;
149 for (vf = 0; vf < 64; vf++) {
150 if (!(intr & BIT_ULL(vf)))
151 continue;
152 dev = vf + start_vf;
153 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
154 /* Clear interrupt */
155 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
156 /* Disable the interrupt */
157 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
158 BIT_ULL(vf));
159 }
160 }
161 return IRQ_HANDLED;
162 }
163
otx2_pf_me_intr_handler(int irq,void * pf_irq)164 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
165 {
166 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
167 int vf, reg, num_reg = 1;
168 u64 intr;
169
170 if (pf->total_vfs > 64)
171 num_reg = 2;
172
173 for (reg = 0; reg < num_reg; reg++) {
174 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
175 if (!intr)
176 continue;
177 for (vf = 0; vf < 64; vf++) {
178 if (!(intr & BIT_ULL(vf)))
179 continue;
180 /* clear trpend bit */
181 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
182 /* clear interrupt */
183 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
184 }
185 }
186 return IRQ_HANDLED;
187 }
188
otx2_register_flr_me_intr(struct otx2_nic * pf,int numvfs)189 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
190 {
191 struct otx2_hw *hw = &pf->hw;
192 char *irq_name;
193 int ret;
194
195 /* Register ME interrupt handler*/
196 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
197 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
198 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
199 otx2_pf_me_intr_handler, 0, irq_name, pf);
200 if (ret) {
201 dev_err(pf->dev,
202 "RVUPF: IRQ registration failed for ME0\n");
203 }
204
205 /* Register FLR interrupt handler */
206 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
207 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
208 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
209 otx2_pf_flr_intr_handler, 0, irq_name, pf);
210 if (ret) {
211 dev_err(pf->dev,
212 "RVUPF: IRQ registration failed for FLR0\n");
213 return ret;
214 }
215
216 if (numvfs > 64) {
217 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
218 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
219 rvu_get_pf(pf->pcifunc));
220 ret = request_irq(pci_irq_vector
221 (pf->pdev, RVU_PF_INT_VEC_VFME1),
222 otx2_pf_me_intr_handler, 0, irq_name, pf);
223 if (ret) {
224 dev_err(pf->dev,
225 "RVUPF: IRQ registration failed for ME1\n");
226 }
227 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
228 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
229 rvu_get_pf(pf->pcifunc));
230 ret = request_irq(pci_irq_vector
231 (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
232 otx2_pf_flr_intr_handler, 0, irq_name, pf);
233 if (ret) {
234 dev_err(pf->dev,
235 "RVUPF: IRQ registration failed for FLR1\n");
236 return ret;
237 }
238 }
239
240 /* Enable ME interrupt for all VFs*/
241 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
242 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
243
244 /* Enable FLR interrupt for all VFs*/
245 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
246 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
247
248 if (numvfs > 64) {
249 numvfs -= 64;
250
251 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
252 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
253 INTR_MASK(numvfs));
254
255 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
256 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
257 INTR_MASK(numvfs));
258 }
259 return 0;
260 }
261
otx2_pf_flr_init(struct otx2_nic * pf,int num_vfs)262 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
263 {
264 int vf;
265
266 pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
267 WQ_UNBOUND | WQ_HIGHPRI, 1);
268 if (!pf->flr_wq)
269 return -ENOMEM;
270
271 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
272 sizeof(struct flr_work), GFP_KERNEL);
273 if (!pf->flr_wrk) {
274 destroy_workqueue(pf->flr_wq);
275 return -ENOMEM;
276 }
277
278 for (vf = 0; vf < num_vfs; vf++) {
279 pf->flr_wrk[vf].pf = pf;
280 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
281 }
282
283 return 0;
284 }
285
otx2_queue_work(struct mbox * mw,struct workqueue_struct * mbox_wq,int first,int mdevs,u64 intr,int type)286 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
287 int first, int mdevs, u64 intr, int type)
288 {
289 struct otx2_mbox_dev *mdev;
290 struct otx2_mbox *mbox;
291 struct mbox_hdr *hdr;
292 int i;
293
294 for (i = first; i < mdevs; i++) {
295 /* start from 0 */
296 if (!(intr & BIT_ULL(i - first)))
297 continue;
298
299 mbox = &mw->mbox;
300 mdev = &mbox->dev[i];
301 if (type == TYPE_PFAF)
302 otx2_sync_mbox_bbuf(mbox, i);
303 hdr = mdev->mbase + mbox->rx_start;
304 /* The hdr->num_msgs is set to zero immediately in the interrupt
305 * handler to ensure that it holds a correct value next time
306 * when the interrupt handler is called.
307 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
308 * pf>mbox.up_num_msgs holds the data for use in
309 * pfaf_mbox_up_handler.
310 */
311 if (hdr->num_msgs) {
312 mw[i].num_msgs = hdr->num_msgs;
313 hdr->num_msgs = 0;
314 if (type == TYPE_PFAF)
315 memset(mbox->hwbase + mbox->rx_start, 0,
316 ALIGN(sizeof(struct mbox_hdr),
317 sizeof(u64)));
318
319 queue_work(mbox_wq, &mw[i].mbox_wrk);
320 }
321
322 mbox = &mw->mbox_up;
323 mdev = &mbox->dev[i];
324 if (type == TYPE_PFAF)
325 otx2_sync_mbox_bbuf(mbox, i);
326 hdr = mdev->mbase + mbox->rx_start;
327 if (hdr->num_msgs) {
328 mw[i].up_num_msgs = hdr->num_msgs;
329 hdr->num_msgs = 0;
330 if (type == TYPE_PFAF)
331 memset(mbox->hwbase + mbox->rx_start, 0,
332 ALIGN(sizeof(struct mbox_hdr),
333 sizeof(u64)));
334
335 queue_work(mbox_wq, &mw[i].mbox_up_wrk);
336 }
337 }
338 }
339
otx2_forward_msg_pfvf(struct otx2_mbox_dev * mdev,struct otx2_mbox * pfvf_mbox,void * bbuf_base,int devid)340 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
341 struct otx2_mbox *pfvf_mbox, void *bbuf_base,
342 int devid)
343 {
344 struct otx2_mbox_dev *src_mdev = mdev;
345 int offset;
346
347 /* Msgs are already copied, trigger VF's mbox irq */
348 smp_wmb();
349
350 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
351 writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
352
353 /* Restore VF's mbox bounce buffer region address */
354 src_mdev->mbase = bbuf_base;
355 }
356
otx2_forward_vf_mbox_msgs(struct otx2_nic * pf,struct otx2_mbox * src_mbox,int dir,int vf,int num_msgs)357 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
358 struct otx2_mbox *src_mbox,
359 int dir, int vf, int num_msgs)
360 {
361 struct otx2_mbox_dev *src_mdev, *dst_mdev;
362 struct mbox_hdr *mbox_hdr;
363 struct mbox_hdr *req_hdr;
364 struct mbox *dst_mbox;
365 int dst_size, err;
366
367 if (dir == MBOX_DIR_PFAF) {
368 /* Set VF's mailbox memory as PF's bounce buffer memory, so
369 * that explicit copying of VF's msgs to PF=>AF mbox region
370 * and AF=>PF responses to VF's mbox region can be avoided.
371 */
372 src_mdev = &src_mbox->dev[vf];
373 mbox_hdr = src_mbox->hwbase +
374 src_mbox->rx_start + (vf * MBOX_SIZE);
375
376 dst_mbox = &pf->mbox;
377 dst_size = dst_mbox->mbox.tx_size -
378 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
379 /* Check if msgs fit into destination area and has valid size */
380 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
381 return -EINVAL;
382
383 dst_mdev = &dst_mbox->mbox.dev[0];
384
385 mutex_lock(&pf->mbox.lock);
386 dst_mdev->mbase = src_mdev->mbase;
387 dst_mdev->msg_size = mbox_hdr->msg_size;
388 dst_mdev->num_msgs = num_msgs;
389 err = otx2_sync_mbox_msg(dst_mbox);
390 if (err) {
391 dev_warn(pf->dev,
392 "AF not responding to VF%d messages\n", vf);
393 /* restore PF mbase and exit */
394 dst_mdev->mbase = pf->mbox.bbuf_base;
395 mutex_unlock(&pf->mbox.lock);
396 return err;
397 }
398 /* At this point, all the VF messages sent to AF are acked
399 * with proper responses and responses are copied to VF
400 * mailbox hence raise interrupt to VF.
401 */
402 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
403 dst_mbox->mbox.rx_start);
404 req_hdr->num_msgs = num_msgs;
405
406 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
407 pf->mbox.bbuf_base, vf);
408 mutex_unlock(&pf->mbox.lock);
409 } else if (dir == MBOX_DIR_PFVF_UP) {
410 src_mdev = &src_mbox->dev[0];
411 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
412 req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
413 src_mbox->rx_start);
414 req_hdr->num_msgs = num_msgs;
415
416 dst_mbox = &pf->mbox_pfvf[0];
417 dst_size = dst_mbox->mbox_up.tx_size -
418 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
419 /* Check if msgs fit into destination area */
420 if (mbox_hdr->msg_size > dst_size)
421 return -EINVAL;
422
423 dst_mdev = &dst_mbox->mbox_up.dev[vf];
424 dst_mdev->mbase = src_mdev->mbase;
425 dst_mdev->msg_size = mbox_hdr->msg_size;
426 dst_mdev->num_msgs = mbox_hdr->num_msgs;
427 err = otx2_sync_mbox_up_msg(dst_mbox, vf);
428 if (err) {
429 dev_warn(pf->dev,
430 "VF%d is not responding to mailbox\n", vf);
431 return err;
432 }
433 } else if (dir == MBOX_DIR_VFPF_UP) {
434 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
435 src_mbox->rx_start);
436 req_hdr->num_msgs = num_msgs;
437 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
438 &pf->mbox.mbox_up,
439 pf->mbox_pfvf[vf].bbuf_base,
440 0);
441 }
442
443 return 0;
444 }
445
otx2_pfvf_mbox_handler(struct work_struct * work)446 static void otx2_pfvf_mbox_handler(struct work_struct *work)
447 {
448 struct mbox_msghdr *msg = NULL;
449 int offset, vf_idx, id, err;
450 struct otx2_mbox_dev *mdev;
451 struct mbox_hdr *req_hdr;
452 struct otx2_mbox *mbox;
453 struct mbox *vf_mbox;
454 struct otx2_nic *pf;
455
456 vf_mbox = container_of(work, struct mbox, mbox_wrk);
457 pf = vf_mbox->pfvf;
458 vf_idx = vf_mbox - pf->mbox_pfvf;
459
460 mbox = &pf->mbox_pfvf[0].mbox;
461 mdev = &mbox->dev[vf_idx];
462 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
463
464 offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
465
466 for (id = 0; id < vf_mbox->num_msgs; id++) {
467 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
468 offset);
469
470 if (msg->sig != OTX2_MBOX_REQ_SIG)
471 goto inval_msg;
472
473 /* Set VF's number in each of the msg */
474 msg->pcifunc &= RVU_PFVF_FUNC_MASK;
475 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
476 offset = msg->next_msgoff;
477 }
478 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
479 vf_mbox->num_msgs);
480 if (err)
481 goto inval_msg;
482 return;
483
484 inval_msg:
485 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
486 otx2_mbox_msg_send(mbox, vf_idx);
487 }
488
otx2_pfvf_mbox_up_handler(struct work_struct * work)489 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
490 {
491 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
492 struct otx2_nic *pf = vf_mbox->pfvf;
493 struct otx2_mbox_dev *mdev;
494 int offset, id, vf_idx = 0;
495 struct mbox_hdr *rsp_hdr;
496 struct mbox_msghdr *msg;
497 struct otx2_mbox *mbox;
498
499 vf_idx = vf_mbox - pf->mbox_pfvf;
500 mbox = &pf->mbox_pfvf[0].mbox_up;
501 mdev = &mbox->dev[vf_idx];
502
503 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
504 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
505
506 for (id = 0; id < vf_mbox->up_num_msgs; id++) {
507 msg = mdev->mbase + offset;
508
509 if (msg->id >= MBOX_MSG_MAX) {
510 dev_err(pf->dev,
511 "Mbox msg with unknown ID 0x%x\n", msg->id);
512 goto end;
513 }
514
515 if (msg->sig != OTX2_MBOX_RSP_SIG) {
516 dev_err(pf->dev,
517 "Mbox msg with wrong signature %x, ID 0x%x\n",
518 msg->sig, msg->id);
519 goto end;
520 }
521
522 switch (msg->id) {
523 case MBOX_MSG_CGX_LINK_EVENT:
524 break;
525 default:
526 if (msg->rc)
527 dev_err(pf->dev,
528 "Mbox msg response has err %d, ID 0x%x\n",
529 msg->rc, msg->id);
530 break;
531 }
532
533 end:
534 offset = mbox->rx_start + msg->next_msgoff;
535 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
536 __otx2_mbox_reset(mbox, 0);
537 mdev->msgs_acked++;
538 }
539 }
540
otx2_pfvf_mbox_intr_handler(int irq,void * pf_irq)541 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
542 {
543 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
544 int vfs = pf->total_vfs;
545 struct mbox *mbox;
546 u64 intr;
547
548 mbox = pf->mbox_pfvf;
549 /* Handle VF interrupts */
550 if (vfs > 64) {
551 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
552 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
553 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
554 TYPE_PFVF);
555 vfs -= 64;
556 }
557
558 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
559 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
560
561 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
562
563 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
564
565 return IRQ_HANDLED;
566 }
567
otx2_pfvf_mbox_init(struct otx2_nic * pf,int numvfs)568 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
569 {
570 void __iomem *hwbase;
571 struct mbox *mbox;
572 int err, vf;
573 u64 base;
574
575 if (!numvfs)
576 return -EINVAL;
577
578 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
579 sizeof(struct mbox), GFP_KERNEL);
580 if (!pf->mbox_pfvf)
581 return -ENOMEM;
582
583 pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
584 WQ_UNBOUND | WQ_HIGHPRI |
585 WQ_MEM_RECLAIM, 1);
586 if (!pf->mbox_pfvf_wq)
587 return -ENOMEM;
588
589 /* On CN10K platform, PF <-> VF mailbox region follows after
590 * PF <-> AF mailbox region.
591 */
592 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
593 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
594 MBOX_SIZE;
595 else
596 base = readq((void __iomem *)((u64)pf->reg_base +
597 RVU_PF_VF_BAR4_ADDR));
598
599 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
600 if (!hwbase) {
601 err = -ENOMEM;
602 goto free_wq;
603 }
604
605 mbox = &pf->mbox_pfvf[0];
606 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
607 MBOX_DIR_PFVF, numvfs);
608 if (err)
609 goto free_iomem;
610
611 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
612 MBOX_DIR_PFVF_UP, numvfs);
613 if (err)
614 goto free_iomem;
615
616 for (vf = 0; vf < numvfs; vf++) {
617 mbox->pfvf = pf;
618 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
619 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
620 mbox++;
621 }
622
623 return 0;
624
625 free_iomem:
626 if (hwbase)
627 iounmap(hwbase);
628 free_wq:
629 destroy_workqueue(pf->mbox_pfvf_wq);
630 return err;
631 }
632
otx2_pfvf_mbox_destroy(struct otx2_nic * pf)633 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
634 {
635 struct mbox *mbox = &pf->mbox_pfvf[0];
636
637 if (!mbox)
638 return;
639
640 if (pf->mbox_pfvf_wq) {
641 destroy_workqueue(pf->mbox_pfvf_wq);
642 pf->mbox_pfvf_wq = NULL;
643 }
644
645 if (mbox->mbox.hwbase)
646 iounmap(mbox->mbox.hwbase);
647
648 otx2_mbox_destroy(&mbox->mbox);
649 }
650
otx2_enable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)651 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
652 {
653 /* Clear PF <=> VF mailbox IRQ */
654 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
655 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
656
657 /* Enable PF <=> VF mailbox IRQ */
658 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
659 if (numvfs > 64) {
660 numvfs -= 64;
661 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
662 INTR_MASK(numvfs));
663 }
664 }
665
otx2_disable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)666 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
667 {
668 int vector;
669
670 /* Disable PF <=> VF mailbox IRQ */
671 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
672 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
673
674 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
675 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
676 free_irq(vector, pf);
677
678 if (numvfs > 64) {
679 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
680 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
681 free_irq(vector, pf);
682 }
683 }
684
otx2_register_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)685 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
686 {
687 struct otx2_hw *hw = &pf->hw;
688 char *irq_name;
689 int err;
690
691 /* Register MBOX0 interrupt handler */
692 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
693 if (pf->pcifunc)
694 snprintf(irq_name, NAME_SIZE,
695 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
696 else
697 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
698 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
699 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
700 if (err) {
701 dev_err(pf->dev,
702 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
703 return err;
704 }
705
706 if (numvfs > 64) {
707 /* Register MBOX1 interrupt handler */
708 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
709 if (pf->pcifunc)
710 snprintf(irq_name, NAME_SIZE,
711 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
712 else
713 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
714 err = request_irq(pci_irq_vector(pf->pdev,
715 RVU_PF_INT_VEC_VFPF_MBOX1),
716 otx2_pfvf_mbox_intr_handler,
717 0, irq_name, pf);
718 if (err) {
719 dev_err(pf->dev,
720 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
721 return err;
722 }
723 }
724
725 otx2_enable_pfvf_mbox_intr(pf, numvfs);
726
727 return 0;
728 }
729
otx2_process_pfaf_mbox_msg(struct otx2_nic * pf,struct mbox_msghdr * msg)730 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
731 struct mbox_msghdr *msg)
732 {
733 int devid;
734
735 if (msg->id >= MBOX_MSG_MAX) {
736 dev_err(pf->dev,
737 "Mbox msg with unknown ID 0x%x\n", msg->id);
738 return;
739 }
740
741 if (msg->sig != OTX2_MBOX_RSP_SIG) {
742 dev_err(pf->dev,
743 "Mbox msg with wrong signature %x, ID 0x%x\n",
744 msg->sig, msg->id);
745 return;
746 }
747
748 /* message response heading VF */
749 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
750 if (devid) {
751 struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
752 struct delayed_work *dwork;
753
754 switch (msg->id) {
755 case MBOX_MSG_NIX_LF_START_RX:
756 config->intf_down = false;
757 dwork = &config->link_event_work;
758 schedule_delayed_work(dwork, msecs_to_jiffies(100));
759 break;
760 case MBOX_MSG_NIX_LF_STOP_RX:
761 config->intf_down = true;
762 break;
763 }
764
765 return;
766 }
767
768 switch (msg->id) {
769 case MBOX_MSG_READY:
770 pf->pcifunc = msg->pcifunc;
771 break;
772 case MBOX_MSG_MSIX_OFFSET:
773 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
774 break;
775 case MBOX_MSG_NPA_LF_ALLOC:
776 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
777 break;
778 case MBOX_MSG_NIX_LF_ALLOC:
779 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
780 break;
781 case MBOX_MSG_NIX_TXSCH_ALLOC:
782 mbox_handler_nix_txsch_alloc(pf,
783 (struct nix_txsch_alloc_rsp *)msg);
784 break;
785 case MBOX_MSG_NIX_BP_ENABLE:
786 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
787 break;
788 case MBOX_MSG_CGX_STATS:
789 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
790 break;
791 case MBOX_MSG_CGX_FEC_STATS:
792 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
793 break;
794 default:
795 if (msg->rc)
796 dev_err(pf->dev,
797 "Mbox msg response has err %d, ID 0x%x\n",
798 msg->rc, msg->id);
799 break;
800 }
801 }
802
otx2_pfaf_mbox_handler(struct work_struct * work)803 static void otx2_pfaf_mbox_handler(struct work_struct *work)
804 {
805 struct otx2_mbox_dev *mdev;
806 struct mbox_hdr *rsp_hdr;
807 struct mbox_msghdr *msg;
808 struct otx2_mbox *mbox;
809 struct mbox *af_mbox;
810 struct otx2_nic *pf;
811 int offset, id;
812
813 af_mbox = container_of(work, struct mbox, mbox_wrk);
814 mbox = &af_mbox->mbox;
815 mdev = &mbox->dev[0];
816 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
817
818 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
819 pf = af_mbox->pfvf;
820
821 for (id = 0; id < af_mbox->num_msgs; id++) {
822 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
823 otx2_process_pfaf_mbox_msg(pf, msg);
824 offset = mbox->rx_start + msg->next_msgoff;
825 if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
826 __otx2_mbox_reset(mbox, 0);
827 mdev->msgs_acked++;
828 }
829
830 }
831
otx2_handle_link_event(struct otx2_nic * pf)832 static void otx2_handle_link_event(struct otx2_nic *pf)
833 {
834 struct cgx_link_user_info *linfo = &pf->linfo;
835 struct net_device *netdev = pf->netdev;
836
837 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
838 linfo->link_up ? "UP" : "DOWN", linfo->speed,
839 linfo->full_duplex ? "Full" : "Half");
840 if (linfo->link_up) {
841 netif_carrier_on(netdev);
842 netif_tx_start_all_queues(netdev);
843 } else {
844 netif_tx_stop_all_queues(netdev);
845 netif_carrier_off(netdev);
846 }
847 }
848
otx2_mbox_up_handler_cgx_link_event(struct otx2_nic * pf,struct cgx_link_info_msg * msg,struct msg_rsp * rsp)849 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
850 struct cgx_link_info_msg *msg,
851 struct msg_rsp *rsp)
852 {
853 int i;
854
855 /* Copy the link info sent by AF */
856 pf->linfo = msg->link_info;
857
858 /* notify VFs about link event */
859 for (i = 0; i < pci_num_vf(pf->pdev); i++) {
860 struct otx2_vf_config *config = &pf->vf_configs[i];
861 struct delayed_work *dwork = &config->link_event_work;
862
863 if (config->intf_down)
864 continue;
865
866 schedule_delayed_work(dwork, msecs_to_jiffies(100));
867 }
868
869 /* interface has not been fully configured yet */
870 if (pf->flags & OTX2_FLAG_INTF_DOWN)
871 return 0;
872
873 otx2_handle_link_event(pf);
874 return 0;
875 }
876
otx2_process_mbox_msg_up(struct otx2_nic * pf,struct mbox_msghdr * req)877 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
878 struct mbox_msghdr *req)
879 {
880 /* Check if valid, if not reply with a invalid msg */
881 if (req->sig != OTX2_MBOX_REQ_SIG) {
882 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
883 return -ENODEV;
884 }
885
886 switch (req->id) {
887 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
888 case _id: { \
889 struct _rsp_type *rsp; \
890 int err; \
891 \
892 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
893 &pf->mbox.mbox_up, 0, \
894 sizeof(struct _rsp_type)); \
895 if (!rsp) \
896 return -ENOMEM; \
897 \
898 rsp->hdr.id = _id; \
899 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
900 rsp->hdr.pcifunc = 0; \
901 rsp->hdr.rc = 0; \
902 \
903 err = otx2_mbox_up_handler_ ## _fn_name( \
904 pf, (struct _req_type *)req, rsp); \
905 return err; \
906 }
907 MBOX_UP_CGX_MESSAGES
908 #undef M
909 break;
910 default:
911 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
912 return -ENODEV;
913 }
914 return 0;
915 }
916
otx2_pfaf_mbox_up_handler(struct work_struct * work)917 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
918 {
919 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
920 struct otx2_mbox *mbox = &af_mbox->mbox_up;
921 struct otx2_mbox_dev *mdev = &mbox->dev[0];
922 struct otx2_nic *pf = af_mbox->pfvf;
923 int offset, id, devid = 0;
924 struct mbox_hdr *rsp_hdr;
925 struct mbox_msghdr *msg;
926
927 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
928
929 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
930
931 for (id = 0; id < af_mbox->up_num_msgs; id++) {
932 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
933
934 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
935 /* Skip processing VF's messages */
936 if (!devid)
937 otx2_process_mbox_msg_up(pf, msg);
938 offset = mbox->rx_start + msg->next_msgoff;
939 }
940 if (devid) {
941 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
942 MBOX_DIR_PFVF_UP, devid - 1,
943 af_mbox->up_num_msgs);
944 return;
945 }
946
947 otx2_mbox_msg_send(mbox, 0);
948 }
949
otx2_pfaf_mbox_intr_handler(int irq,void * pf_irq)950 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
951 {
952 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
953 struct mbox *mbox;
954
955 /* Clear the IRQ */
956 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
957
958 mbox = &pf->mbox;
959
960 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
961
962 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
963
964 return IRQ_HANDLED;
965 }
966
otx2_disable_mbox_intr(struct otx2_nic * pf)967 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
968 {
969 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
970
971 /* Disable AF => PF mailbox IRQ */
972 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
973 free_irq(vector, pf);
974 }
975
otx2_register_mbox_intr(struct otx2_nic * pf,bool probe_af)976 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
977 {
978 struct otx2_hw *hw = &pf->hw;
979 struct msg_req *req;
980 char *irq_name;
981 int err;
982
983 /* Register mailbox interrupt handler */
984 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
985 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
986 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
987 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
988 if (err) {
989 dev_err(pf->dev,
990 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
991 return err;
992 }
993
994 /* Enable mailbox interrupt for msgs coming from AF.
995 * First clear to avoid spurious interrupts, if any.
996 */
997 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
998 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
999
1000 if (!probe_af)
1001 return 0;
1002
1003 /* Check mailbox communication with AF */
1004 req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1005 if (!req) {
1006 otx2_disable_mbox_intr(pf);
1007 return -ENOMEM;
1008 }
1009 err = otx2_sync_mbox_msg(&pf->mbox);
1010 if (err) {
1011 dev_warn(pf->dev,
1012 "AF not responding to mailbox, deferring probe\n");
1013 otx2_disable_mbox_intr(pf);
1014 return -EPROBE_DEFER;
1015 }
1016
1017 return 0;
1018 }
1019
otx2_pfaf_mbox_destroy(struct otx2_nic * pf)1020 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1021 {
1022 struct mbox *mbox = &pf->mbox;
1023
1024 if (pf->mbox_wq) {
1025 destroy_workqueue(pf->mbox_wq);
1026 pf->mbox_wq = NULL;
1027 }
1028
1029 if (mbox->mbox.hwbase)
1030 iounmap((void __iomem *)mbox->mbox.hwbase);
1031
1032 otx2_mbox_destroy(&mbox->mbox);
1033 otx2_mbox_destroy(&mbox->mbox_up);
1034 }
1035
otx2_pfaf_mbox_init(struct otx2_nic * pf)1036 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1037 {
1038 struct mbox *mbox = &pf->mbox;
1039 void __iomem *hwbase;
1040 int err;
1041
1042 mbox->pfvf = pf;
1043 pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
1044 WQ_UNBOUND | WQ_HIGHPRI |
1045 WQ_MEM_RECLAIM, 1);
1046 if (!pf->mbox_wq)
1047 return -ENOMEM;
1048
1049 /* Mailbox is a reserved memory (in RAM) region shared between
1050 * admin function (i.e AF) and this PF, shouldn't be mapped as
1051 * device memory to allow unaligned accesses.
1052 */
1053 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1054 MBOX_SIZE);
1055 if (!hwbase) {
1056 dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1057 err = -ENOMEM;
1058 goto exit;
1059 }
1060
1061 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1062 MBOX_DIR_PFAF, 1);
1063 if (err)
1064 goto exit;
1065
1066 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1067 MBOX_DIR_PFAF_UP, 1);
1068 if (err)
1069 goto exit;
1070
1071 err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1072 if (err)
1073 goto exit;
1074
1075 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1076 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1077 mutex_init(&mbox->lock);
1078
1079 return 0;
1080 exit:
1081 otx2_pfaf_mbox_destroy(pf);
1082 return err;
1083 }
1084
otx2_cgx_config_linkevents(struct otx2_nic * pf,bool enable)1085 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1086 {
1087 struct msg_req *msg;
1088 int err;
1089
1090 mutex_lock(&pf->mbox.lock);
1091 if (enable)
1092 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1093 else
1094 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1095
1096 if (!msg) {
1097 mutex_unlock(&pf->mbox.lock);
1098 return -ENOMEM;
1099 }
1100
1101 err = otx2_sync_mbox_msg(&pf->mbox);
1102 mutex_unlock(&pf->mbox.lock);
1103 return err;
1104 }
1105
otx2_cgx_config_loopback(struct otx2_nic * pf,bool enable)1106 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1107 {
1108 struct msg_req *msg;
1109 int err;
1110
1111 mutex_lock(&pf->mbox.lock);
1112 if (enable)
1113 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1114 else
1115 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1116
1117 if (!msg) {
1118 mutex_unlock(&pf->mbox.lock);
1119 return -ENOMEM;
1120 }
1121
1122 err = otx2_sync_mbox_msg(&pf->mbox);
1123 mutex_unlock(&pf->mbox.lock);
1124 return err;
1125 }
1126
otx2_set_real_num_queues(struct net_device * netdev,int tx_queues,int rx_queues)1127 int otx2_set_real_num_queues(struct net_device *netdev,
1128 int tx_queues, int rx_queues)
1129 {
1130 int err;
1131
1132 err = netif_set_real_num_tx_queues(netdev, tx_queues);
1133 if (err) {
1134 netdev_err(netdev,
1135 "Failed to set no of Tx queues: %d\n", tx_queues);
1136 return err;
1137 }
1138
1139 err = netif_set_real_num_rx_queues(netdev, rx_queues);
1140 if (err)
1141 netdev_err(netdev,
1142 "Failed to set no of Rx queues: %d\n", rx_queues);
1143 return err;
1144 }
1145 EXPORT_SYMBOL(otx2_set_real_num_queues);
1146
otx2_q_intr_handler(int irq,void * data)1147 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1148 {
1149 struct otx2_nic *pf = data;
1150 u64 val, *ptr;
1151 u64 qidx = 0;
1152
1153 /* CQ */
1154 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1155 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1156 val = otx2_atomic64_add((qidx << 44), ptr);
1157
1158 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1159 (val & NIX_CQERRINT_BITS));
1160 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1161 continue;
1162
1163 if (val & BIT_ULL(42)) {
1164 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1165 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1166 } else {
1167 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1168 netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1169 qidx);
1170 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1171 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1172 qidx);
1173 }
1174
1175 schedule_work(&pf->reset_task);
1176 }
1177
1178 /* SQ */
1179 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1180 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1181 val = otx2_atomic64_add((qidx << 44), ptr);
1182 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1183 (val & NIX_SQINT_BITS));
1184
1185 if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
1186 continue;
1187
1188 if (val & BIT_ULL(42)) {
1189 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1190 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1191 } else {
1192 if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
1193 netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
1194 qidx,
1195 otx2_read64(pf,
1196 NIX_LF_SQ_OP_ERR_DBG));
1197 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
1198 BIT_ULL(44));
1199 }
1200 if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
1201 netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
1202 qidx,
1203 otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
1204 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
1205 BIT_ULL(44));
1206 }
1207 if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
1208 netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
1209 qidx,
1210 otx2_read64(pf,
1211 NIX_LF_SEND_ERR_DBG));
1212 otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
1213 BIT_ULL(44));
1214 }
1215 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1216 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1217 qidx);
1218 }
1219
1220 schedule_work(&pf->reset_task);
1221 }
1222
1223 return IRQ_HANDLED;
1224 }
1225
otx2_cq_intr_handler(int irq,void * cq_irq)1226 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1227 {
1228 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1229 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1230 int qidx = cq_poll->cint_idx;
1231
1232 /* Disable interrupts.
1233 *
1234 * Completion interrupts behave in a level-triggered interrupt
1235 * fashion, and hence have to be cleared only after it is serviced.
1236 */
1237 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1238
1239 /* Schedule NAPI */
1240 napi_schedule_irqoff(&cq_poll->napi);
1241
1242 return IRQ_HANDLED;
1243 }
1244
otx2_disable_napi(struct otx2_nic * pf)1245 static void otx2_disable_napi(struct otx2_nic *pf)
1246 {
1247 struct otx2_qset *qset = &pf->qset;
1248 struct otx2_cq_poll *cq_poll;
1249 int qidx;
1250
1251 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1252 cq_poll = &qset->napi[qidx];
1253 napi_disable(&cq_poll->napi);
1254 netif_napi_del(&cq_poll->napi);
1255 }
1256 }
1257
otx2_free_cq_res(struct otx2_nic * pf)1258 static void otx2_free_cq_res(struct otx2_nic *pf)
1259 {
1260 struct otx2_qset *qset = &pf->qset;
1261 struct otx2_cq_queue *cq;
1262 int qidx;
1263
1264 /* Disable CQs */
1265 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1266 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1267 cq = &qset->cq[qidx];
1268 qmem_free(pf->dev, cq->cqe);
1269 }
1270 }
1271
otx2_free_sq_res(struct otx2_nic * pf)1272 static void otx2_free_sq_res(struct otx2_nic *pf)
1273 {
1274 struct otx2_qset *qset = &pf->qset;
1275 struct otx2_snd_queue *sq;
1276 int qidx;
1277
1278 /* Disable SQs */
1279 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1280 /* Free SQB pointers */
1281 otx2_sq_free_sqbs(pf);
1282 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1283 sq = &qset->sq[qidx];
1284 qmem_free(pf->dev, sq->sqe);
1285 qmem_free(pf->dev, sq->tso_hdrs);
1286 kfree(sq->sg);
1287 kfree(sq->sqb_ptrs);
1288 }
1289 }
1290
otx2_get_rbuf_size(struct otx2_nic * pf,int mtu)1291 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1292 {
1293 int frame_size;
1294 int total_size;
1295 int rbuf_size;
1296
1297 /* The data transferred by NIX to memory consists of actual packet
1298 * plus additional data which has timestamp and/or EDSA/HIGIG2
1299 * headers if interface is configured in corresponding modes.
1300 * NIX transfers entire data using 6 segments/buffers and writes
1301 * a CQE_RX descriptor with those segment addresses. First segment
1302 * has additional data prepended to packet. Also software omits a
1303 * headroom of 128 bytes and sizeof(struct skb_shared_info) in
1304 * each segment. Hence the total size of memory needed
1305 * to receive a packet with 'mtu' is:
1306 * frame size = mtu + additional data;
1307 * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
1308 * each receive buffer size = memory / 6;
1309 */
1310 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1311 total_size = frame_size + (OTX2_HEAD_ROOM +
1312 OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
1313 rbuf_size = total_size / 6;
1314
1315 return ALIGN(rbuf_size, 2048);
1316 }
1317
otx2_init_hw_resources(struct otx2_nic * pf)1318 static int otx2_init_hw_resources(struct otx2_nic *pf)
1319 {
1320 struct nix_lf_free_req *free_req;
1321 struct mbox *mbox = &pf->mbox;
1322 struct otx2_hw *hw = &pf->hw;
1323 struct msg_req *req;
1324 int err = 0, lvl;
1325
1326 /* Set required NPA LF's pool counts
1327 * Auras and Pools are used in a 1:1 mapping,
1328 * so, aura count = pool count.
1329 */
1330 hw->rqpool_cnt = hw->rx_queues;
1331 hw->sqpool_cnt = hw->tx_queues;
1332 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1333
1334 pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1335
1336 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1337
1338 mutex_lock(&mbox->lock);
1339 /* NPA init */
1340 err = otx2_config_npa(pf);
1341 if (err)
1342 goto exit;
1343
1344 /* NIX init */
1345 err = otx2_config_nix(pf);
1346 if (err)
1347 goto err_free_npa_lf;
1348
1349 /* Enable backpressure */
1350 otx2_nix_config_bp(pf, true);
1351
1352 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1353 err = otx2_rq_aura_pool_init(pf);
1354 if (err) {
1355 mutex_unlock(&mbox->lock);
1356 goto err_free_nix_lf;
1357 }
1358 /* Init Auras and pools used by NIX SQ, for queueing SQEs */
1359 err = otx2_sq_aura_pool_init(pf);
1360 if (err) {
1361 mutex_unlock(&mbox->lock);
1362 goto err_free_rq_ptrs;
1363 }
1364
1365 err = otx2_txsch_alloc(pf);
1366 if (err) {
1367 mutex_unlock(&mbox->lock);
1368 goto err_free_sq_ptrs;
1369 }
1370
1371 err = otx2_config_nix_queues(pf);
1372 if (err) {
1373 mutex_unlock(&mbox->lock);
1374 goto err_free_txsch;
1375 }
1376 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1377 err = otx2_txschq_config(pf, lvl);
1378 if (err) {
1379 mutex_unlock(&mbox->lock);
1380 goto err_free_nix_queues;
1381 }
1382 }
1383 mutex_unlock(&mbox->lock);
1384 return err;
1385
1386 err_free_nix_queues:
1387 otx2_free_sq_res(pf);
1388 otx2_free_cq_res(pf);
1389 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1390 err_free_txsch:
1391 if (otx2_txschq_stop(pf))
1392 dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
1393 err_free_sq_ptrs:
1394 otx2_sq_free_sqbs(pf);
1395 err_free_rq_ptrs:
1396 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1397 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1398 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1399 otx2_aura_pool_free(pf);
1400 err_free_nix_lf:
1401 mutex_lock(&mbox->lock);
1402 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1403 if (free_req) {
1404 free_req->flags = NIX_LF_DISABLE_FLOWS;
1405 if (otx2_sync_mbox_msg(mbox))
1406 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1407 }
1408 err_free_npa_lf:
1409 /* Reset NPA LF */
1410 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1411 if (req) {
1412 if (otx2_sync_mbox_msg(mbox))
1413 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1414 }
1415 exit:
1416 mutex_unlock(&mbox->lock);
1417 return err;
1418 }
1419
otx2_free_hw_resources(struct otx2_nic * pf)1420 static void otx2_free_hw_resources(struct otx2_nic *pf)
1421 {
1422 struct otx2_qset *qset = &pf->qset;
1423 struct nix_lf_free_req *free_req;
1424 struct mbox *mbox = &pf->mbox;
1425 struct otx2_cq_queue *cq;
1426 struct msg_req *req;
1427 int qidx, err;
1428
1429 /* Ensure all SQE are processed */
1430 otx2_sqb_flush(pf);
1431
1432 /* Stop transmission */
1433 err = otx2_txschq_stop(pf);
1434 if (err)
1435 dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
1436
1437 mutex_lock(&mbox->lock);
1438 /* Disable backpressure */
1439 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1440 otx2_nix_config_bp(pf, false);
1441 mutex_unlock(&mbox->lock);
1442
1443 /* Disable RQs */
1444 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1445
1446 /*Dequeue all CQEs */
1447 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1448 cq = &qset->cq[qidx];
1449 if (cq->cq_type == CQ_RX)
1450 otx2_cleanup_rx_cqes(pf, cq);
1451 else
1452 otx2_cleanup_tx_cqes(pf, cq);
1453 }
1454
1455 otx2_free_sq_res(pf);
1456
1457 /* Free RQ buffer pointers*/
1458 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1459
1460 otx2_free_cq_res(pf);
1461
1462 mutex_lock(&mbox->lock);
1463 /* Reset NIX LF */
1464 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1465 if (free_req) {
1466 free_req->flags = NIX_LF_DISABLE_FLOWS;
1467 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1468 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1469 if (otx2_sync_mbox_msg(mbox))
1470 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1471 }
1472 mutex_unlock(&mbox->lock);
1473
1474 /* Disable NPA Pool and Aura hw context */
1475 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1476 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1477 otx2_aura_pool_free(pf);
1478
1479 mutex_lock(&mbox->lock);
1480 /* Reset NPA LF */
1481 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1482 if (req) {
1483 if (otx2_sync_mbox_msg(mbox))
1484 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1485 }
1486 mutex_unlock(&mbox->lock);
1487 }
1488
otx2_open(struct net_device * netdev)1489 int otx2_open(struct net_device *netdev)
1490 {
1491 struct otx2_nic *pf = netdev_priv(netdev);
1492 struct otx2_cq_poll *cq_poll = NULL;
1493 struct otx2_qset *qset = &pf->qset;
1494 int err = 0, qidx, vec;
1495 char *irq_name;
1496
1497 netif_carrier_off(netdev);
1498
1499 pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
1500 /* RQ and SQs are mapped to different CQs,
1501 * so find out max CQ IRQs (i.e CINTs) needed.
1502 */
1503 pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1504 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1505 if (!qset->napi)
1506 return -ENOMEM;
1507
1508 /* CQ size of RQ */
1509 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1510 /* CQ size of SQ */
1511 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1512
1513 err = -ENOMEM;
1514 qset->cq = kcalloc(pf->qset.cq_cnt,
1515 sizeof(struct otx2_cq_queue), GFP_KERNEL);
1516 if (!qset->cq)
1517 goto err_free_mem;
1518
1519 qset->sq = kcalloc(pf->hw.tx_queues,
1520 sizeof(struct otx2_snd_queue), GFP_KERNEL);
1521 if (!qset->sq)
1522 goto err_free_mem;
1523
1524 qset->rq = kcalloc(pf->hw.rx_queues,
1525 sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1526 if (!qset->rq)
1527 goto err_free_mem;
1528
1529 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
1530 /* Reserve LMT lines for NPA AURA batch free */
1531 pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
1532 /* Reserve LMT lines for NIX TX */
1533 pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
1534 (NIX_LMTID_BASE * LMT_LINE_SIZE));
1535 }
1536
1537 err = otx2_init_hw_resources(pf);
1538 if (err)
1539 goto err_free_mem;
1540
1541 /* Register NAPI handler */
1542 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1543 cq_poll = &qset->napi[qidx];
1544 cq_poll->cint_idx = qidx;
1545 /* RQ0 & SQ0 are mapped to CINT0 and so on..
1546 * 'cq_ids[0]' points to RQ's CQ and
1547 * 'cq_ids[1]' points to SQ's CQ and
1548 */
1549 cq_poll->cq_ids[CQ_RX] =
1550 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1551 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1552 qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1553 cq_poll->dev = (void *)pf;
1554 netif_napi_add(netdev, &cq_poll->napi,
1555 otx2_napi_handler, NAPI_POLL_WEIGHT);
1556 napi_enable(&cq_poll->napi);
1557 }
1558
1559 /* Set maximum frame size allowed in HW */
1560 err = otx2_hw_set_mtu(pf, netdev->mtu);
1561 if (err)
1562 goto err_disable_napi;
1563
1564 /* Setup segmentation algorithms, if failed, clear offload capability */
1565 otx2_setup_segmentation(pf);
1566
1567 /* Initialize RSS */
1568 err = otx2_rss_init(pf);
1569 if (err)
1570 goto err_disable_napi;
1571
1572 /* Register Queue IRQ handlers */
1573 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1574 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1575
1576 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1577
1578 err = request_irq(pci_irq_vector(pf->pdev, vec),
1579 otx2_q_intr_handler, 0, irq_name, pf);
1580 if (err) {
1581 dev_err(pf->dev,
1582 "RVUPF%d: IRQ registration failed for QERR\n",
1583 rvu_get_pf(pf->pcifunc));
1584 goto err_disable_napi;
1585 }
1586
1587 /* Enable QINT IRQ */
1588 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1589
1590 /* Register CQ IRQ handlers */
1591 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1592 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1593 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1594
1595 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1596 qidx);
1597
1598 err = request_irq(pci_irq_vector(pf->pdev, vec),
1599 otx2_cq_intr_handler, 0, irq_name,
1600 &qset->napi[qidx]);
1601 if (err) {
1602 dev_err(pf->dev,
1603 "RVUPF%d: IRQ registration failed for CQ%d\n",
1604 rvu_get_pf(pf->pcifunc), qidx);
1605 goto err_free_cints;
1606 }
1607 vec++;
1608
1609 otx2_config_irq_coalescing(pf, qidx);
1610
1611 /* Enable CQ IRQ */
1612 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1613 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1614 }
1615
1616 otx2_set_cints_affinity(pf);
1617
1618 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1619 otx2_enable_rxvlan(pf, true);
1620
1621 /* When reinitializing enable time stamping if it is enabled before */
1622 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1623 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1624 otx2_config_hw_tx_tstamp(pf, true);
1625 }
1626 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1627 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1628 otx2_config_hw_rx_tstamp(pf, true);
1629 }
1630
1631 pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1632 /* 'intf_down' may be checked on any cpu */
1633 smp_wmb();
1634
1635 /* we have already received link status notification */
1636 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1637 otx2_handle_link_event(pf);
1638
1639 /* Restore pause frame settings */
1640 otx2_config_pause_frm(pf);
1641
1642 err = otx2_rxtx_enable(pf, true);
1643 if (err)
1644 goto err_tx_stop_queues;
1645
1646 return 0;
1647
1648 err_tx_stop_queues:
1649 netif_tx_stop_all_queues(netdev);
1650 netif_carrier_off(netdev);
1651 err_free_cints:
1652 otx2_free_cints(pf, qidx);
1653 vec = pci_irq_vector(pf->pdev,
1654 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1655 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1656 synchronize_irq(vec);
1657 free_irq(vec, pf);
1658 err_disable_napi:
1659 otx2_disable_napi(pf);
1660 otx2_free_hw_resources(pf);
1661 err_free_mem:
1662 kfree(qset->sq);
1663 kfree(qset->cq);
1664 kfree(qset->rq);
1665 kfree(qset->napi);
1666 return err;
1667 }
1668 EXPORT_SYMBOL(otx2_open);
1669
otx2_stop(struct net_device * netdev)1670 int otx2_stop(struct net_device *netdev)
1671 {
1672 struct otx2_nic *pf = netdev_priv(netdev);
1673 struct otx2_cq_poll *cq_poll = NULL;
1674 struct otx2_qset *qset = &pf->qset;
1675 struct otx2_rss_info *rss;
1676 int qidx, vec, wrk;
1677
1678 netif_carrier_off(netdev);
1679 netif_tx_stop_all_queues(netdev);
1680
1681 pf->flags |= OTX2_FLAG_INTF_DOWN;
1682 /* 'intf_down' may be checked on any cpu */
1683 smp_wmb();
1684
1685 /* First stop packet Rx/Tx */
1686 otx2_rxtx_enable(pf, false);
1687
1688 /* Clear RSS enable flag */
1689 rss = &pf->hw.rss_info;
1690 rss->enable = false;
1691
1692 /* Cleanup Queue IRQ */
1693 vec = pci_irq_vector(pf->pdev,
1694 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1695 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1696 synchronize_irq(vec);
1697 free_irq(vec, pf);
1698
1699 /* Cleanup CQ NAPI and IRQ */
1700 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1701 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1702 /* Disable interrupt */
1703 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1704
1705 synchronize_irq(pci_irq_vector(pf->pdev, vec));
1706
1707 cq_poll = &qset->napi[qidx];
1708 napi_synchronize(&cq_poll->napi);
1709 vec++;
1710 }
1711
1712 netif_tx_disable(netdev);
1713
1714 otx2_free_hw_resources(pf);
1715 otx2_free_cints(pf, pf->hw.cint_cnt);
1716 otx2_disable_napi(pf);
1717
1718 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1719 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1720
1721 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1722 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1723 devm_kfree(pf->dev, pf->refill_wrk);
1724
1725 kfree(qset->sq);
1726 kfree(qset->cq);
1727 kfree(qset->rq);
1728 kfree(qset->napi);
1729 /* Do not clear RQ/SQ ringsize settings */
1730 memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
1731 sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
1732 return 0;
1733 }
1734 EXPORT_SYMBOL(otx2_stop);
1735
otx2_xmit(struct sk_buff * skb,struct net_device * netdev)1736 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1737 {
1738 struct otx2_nic *pf = netdev_priv(netdev);
1739 int qidx = skb_get_queue_mapping(skb);
1740 struct otx2_snd_queue *sq;
1741 struct netdev_queue *txq;
1742
1743 /* Check for minimum and maximum packet length */
1744 if (skb->len <= ETH_HLEN ||
1745 (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
1746 dev_kfree_skb(skb);
1747 return NETDEV_TX_OK;
1748 }
1749
1750 sq = &pf->qset.sq[qidx];
1751 txq = netdev_get_tx_queue(netdev, qidx);
1752
1753 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1754 netif_tx_stop_queue(txq);
1755
1756 /* Check again, incase SQBs got freed up */
1757 smp_mb();
1758 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1759 > sq->sqe_thresh)
1760 netif_tx_wake_queue(txq);
1761
1762 return NETDEV_TX_BUSY;
1763 }
1764
1765 return NETDEV_TX_OK;
1766 }
1767
otx2_fix_features(struct net_device * dev,netdev_features_t features)1768 static netdev_features_t otx2_fix_features(struct net_device *dev,
1769 netdev_features_t features)
1770 {
1771 /* check if n-tuple filters are ON */
1772 if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) {
1773 netdev_info(dev, "Disabling n-tuple filters\n");
1774 features &= ~NETIF_F_NTUPLE;
1775 }
1776
1777 /* check if tc hw offload is ON */
1778 if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) {
1779 netdev_info(dev, "Disabling TC hardware offload\n");
1780 features &= ~NETIF_F_HW_TC;
1781 }
1782
1783 return features;
1784 }
1785
otx2_set_rx_mode(struct net_device * netdev)1786 static void otx2_set_rx_mode(struct net_device *netdev)
1787 {
1788 struct otx2_nic *pf = netdev_priv(netdev);
1789
1790 queue_work(pf->otx2_wq, &pf->rx_mode_work);
1791 }
1792
otx2_do_set_rx_mode(struct work_struct * work)1793 static void otx2_do_set_rx_mode(struct work_struct *work)
1794 {
1795 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
1796 struct net_device *netdev = pf->netdev;
1797 struct nix_rx_mode *req;
1798 bool promisc = false;
1799
1800 if (!(netdev->flags & IFF_UP))
1801 return;
1802
1803 if ((netdev->flags & IFF_PROMISC) ||
1804 (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1805 promisc = true;
1806 }
1807
1808 /* Write unicast address to mcam entries or del from mcam */
1809 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1810 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1811
1812 mutex_lock(&pf->mbox.lock);
1813 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1814 if (!req) {
1815 mutex_unlock(&pf->mbox.lock);
1816 return;
1817 }
1818
1819 req->mode = NIX_RX_MODE_UCAST;
1820
1821 if (promisc)
1822 req->mode |= NIX_RX_MODE_PROMISC;
1823 else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1824 req->mode |= NIX_RX_MODE_ALLMULTI;
1825
1826 otx2_sync_mbox_msg(&pf->mbox);
1827 mutex_unlock(&pf->mbox.lock);
1828 }
1829
otx2_set_features(struct net_device * netdev,netdev_features_t features)1830 static int otx2_set_features(struct net_device *netdev,
1831 netdev_features_t features)
1832 {
1833 netdev_features_t changed = features ^ netdev->features;
1834 bool ntuple = !!(features & NETIF_F_NTUPLE);
1835 struct otx2_nic *pf = netdev_priv(netdev);
1836
1837 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1838 return otx2_cgx_config_loopback(pf,
1839 features & NETIF_F_LOOPBACK);
1840
1841 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
1842 return otx2_enable_rxvlan(pf,
1843 features & NETIF_F_HW_VLAN_CTAG_RX);
1844
1845 if ((changed & NETIF_F_NTUPLE) && !ntuple)
1846 otx2_destroy_ntuple_flows(pf);
1847
1848 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
1849 pf->tc_info.num_entries) {
1850 netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
1851 return -EBUSY;
1852 }
1853
1854 return 0;
1855 }
1856
otx2_reset_task(struct work_struct * work)1857 static void otx2_reset_task(struct work_struct *work)
1858 {
1859 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1860
1861 if (!netif_running(pf->netdev))
1862 return;
1863
1864 rtnl_lock();
1865 otx2_stop(pf->netdev);
1866 pf->reset_count++;
1867 otx2_open(pf->netdev);
1868 netif_trans_update(pf->netdev);
1869 rtnl_unlock();
1870 }
1871
otx2_config_hw_rx_tstamp(struct otx2_nic * pfvf,bool enable)1872 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
1873 {
1874 struct msg_req *req;
1875 int err;
1876
1877 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
1878 return 0;
1879
1880 mutex_lock(&pfvf->mbox.lock);
1881 if (enable)
1882 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
1883 else
1884 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
1885 if (!req) {
1886 mutex_unlock(&pfvf->mbox.lock);
1887 return -ENOMEM;
1888 }
1889
1890 err = otx2_sync_mbox_msg(&pfvf->mbox);
1891 if (err) {
1892 mutex_unlock(&pfvf->mbox.lock);
1893 return err;
1894 }
1895
1896 mutex_unlock(&pfvf->mbox.lock);
1897 if (enable)
1898 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
1899 else
1900 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1901 return 0;
1902 }
1903
otx2_config_hw_tx_tstamp(struct otx2_nic * pfvf,bool enable)1904 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
1905 {
1906 struct msg_req *req;
1907 int err;
1908
1909 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
1910 return 0;
1911
1912 mutex_lock(&pfvf->mbox.lock);
1913 if (enable)
1914 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
1915 else
1916 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
1917 if (!req) {
1918 mutex_unlock(&pfvf->mbox.lock);
1919 return -ENOMEM;
1920 }
1921
1922 err = otx2_sync_mbox_msg(&pfvf->mbox);
1923 if (err) {
1924 mutex_unlock(&pfvf->mbox.lock);
1925 return err;
1926 }
1927
1928 mutex_unlock(&pfvf->mbox.lock);
1929 if (enable)
1930 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
1931 else
1932 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1933 return 0;
1934 }
1935
otx2_config_hwtstamp(struct net_device * netdev,struct ifreq * ifr)1936 static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1937 {
1938 struct otx2_nic *pfvf = netdev_priv(netdev);
1939 struct hwtstamp_config config;
1940
1941 if (!pfvf->ptp)
1942 return -ENODEV;
1943
1944 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1945 return -EFAULT;
1946
1947 /* reserved for future extensions */
1948 if (config.flags)
1949 return -EINVAL;
1950
1951 switch (config.tx_type) {
1952 case HWTSTAMP_TX_OFF:
1953 otx2_config_hw_tx_tstamp(pfvf, false);
1954 break;
1955 case HWTSTAMP_TX_ON:
1956 otx2_config_hw_tx_tstamp(pfvf, true);
1957 break;
1958 default:
1959 return -ERANGE;
1960 }
1961
1962 switch (config.rx_filter) {
1963 case HWTSTAMP_FILTER_NONE:
1964 otx2_config_hw_rx_tstamp(pfvf, false);
1965 break;
1966 case HWTSTAMP_FILTER_ALL:
1967 case HWTSTAMP_FILTER_SOME:
1968 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1969 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1970 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1971 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1972 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1973 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1974 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1975 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1976 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1977 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1978 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1979 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1980 otx2_config_hw_rx_tstamp(pfvf, true);
1981 config.rx_filter = HWTSTAMP_FILTER_ALL;
1982 break;
1983 default:
1984 return -ERANGE;
1985 }
1986
1987 memcpy(&pfvf->tstamp, &config, sizeof(config));
1988
1989 return copy_to_user(ifr->ifr_data, &config,
1990 sizeof(config)) ? -EFAULT : 0;
1991 }
1992
otx2_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)1993 static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1994 {
1995 struct otx2_nic *pfvf = netdev_priv(netdev);
1996 struct hwtstamp_config *cfg = &pfvf->tstamp;
1997
1998 switch (cmd) {
1999 case SIOCSHWTSTAMP:
2000 return otx2_config_hwtstamp(netdev, req);
2001 case SIOCGHWTSTAMP:
2002 return copy_to_user(req->ifr_data, cfg,
2003 sizeof(*cfg)) ? -EFAULT : 0;
2004 default:
2005 return -EOPNOTSUPP;
2006 }
2007 }
2008
otx2_do_set_vf_mac(struct otx2_nic * pf,int vf,const u8 * mac)2009 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2010 {
2011 struct npc_install_flow_req *req;
2012 int err;
2013
2014 mutex_lock(&pf->mbox.lock);
2015 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2016 if (!req) {
2017 err = -ENOMEM;
2018 goto out;
2019 }
2020
2021 ether_addr_copy(req->packet.dmac, mac);
2022 eth_broadcast_addr((u8 *)&req->mask.dmac);
2023 req->features = BIT_ULL(NPC_DMAC);
2024 req->channel = pf->hw.rx_chan_base;
2025 req->intf = NIX_INTF_RX;
2026 req->default_rule = 1;
2027 req->append = 1;
2028 req->vf = vf + 1;
2029 req->op = NIX_RX_ACTION_DEFAULT;
2030
2031 err = otx2_sync_mbox_msg(&pf->mbox);
2032 out:
2033 mutex_unlock(&pf->mbox.lock);
2034 return err;
2035 }
2036
otx2_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)2037 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2038 {
2039 struct otx2_nic *pf = netdev_priv(netdev);
2040 struct pci_dev *pdev = pf->pdev;
2041 struct otx2_vf_config *config;
2042 int ret;
2043
2044 if (!netif_running(netdev))
2045 return -EAGAIN;
2046
2047 if (vf >= pci_num_vf(pdev))
2048 return -EINVAL;
2049
2050 if (!is_valid_ether_addr(mac))
2051 return -EINVAL;
2052
2053 config = &pf->vf_configs[vf];
2054 ether_addr_copy(config->mac, mac);
2055
2056 ret = otx2_do_set_vf_mac(pf, vf, mac);
2057 if (ret == 0)
2058 dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
2059
2060 return ret;
2061 }
2062
otx2_do_set_vf_vlan(struct otx2_nic * pf,int vf,u16 vlan,u8 qos,__be16 proto)2063 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2064 __be16 proto)
2065 {
2066 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2067 struct nix_vtag_config_rsp *vtag_rsp;
2068 struct npc_delete_flow_req *del_req;
2069 struct nix_vtag_config *vtag_req;
2070 struct npc_install_flow_req *req;
2071 struct otx2_vf_config *config;
2072 int err = 0;
2073 u32 idx;
2074
2075 config = &pf->vf_configs[vf];
2076
2077 if (!vlan && !config->vlan)
2078 goto out;
2079
2080 mutex_lock(&pf->mbox.lock);
2081
2082 /* free old tx vtag entry */
2083 if (config->vlan) {
2084 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2085 if (!vtag_req) {
2086 err = -ENOMEM;
2087 goto out;
2088 }
2089 vtag_req->cfg_type = 0;
2090 vtag_req->tx.free_vtag0 = 1;
2091 vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2092
2093 err = otx2_sync_mbox_msg(&pf->mbox);
2094 if (err)
2095 goto out;
2096 }
2097
2098 if (!vlan && config->vlan) {
2099 /* rx */
2100 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2101 if (!del_req) {
2102 err = -ENOMEM;
2103 goto out;
2104 }
2105 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2106 del_req->entry =
2107 flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2108 err = otx2_sync_mbox_msg(&pf->mbox);
2109 if (err)
2110 goto out;
2111
2112 /* tx */
2113 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2114 if (!del_req) {
2115 err = -ENOMEM;
2116 goto out;
2117 }
2118 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2119 del_req->entry =
2120 flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2121 err = otx2_sync_mbox_msg(&pf->mbox);
2122
2123 goto out;
2124 }
2125
2126 /* rx */
2127 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2128 if (!req) {
2129 err = -ENOMEM;
2130 goto out;
2131 }
2132
2133 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2134 req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2135 req->packet.vlan_tci = htons(vlan);
2136 req->mask.vlan_tci = htons(VLAN_VID_MASK);
2137 /* af fills the destination mac addr */
2138 eth_broadcast_addr((u8 *)&req->mask.dmac);
2139 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2140 req->channel = pf->hw.rx_chan_base;
2141 req->intf = NIX_INTF_RX;
2142 req->vf = vf + 1;
2143 req->op = NIX_RX_ACTION_DEFAULT;
2144 req->vtag0_valid = true;
2145 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2146 req->set_cntr = 1;
2147
2148 err = otx2_sync_mbox_msg(&pf->mbox);
2149 if (err)
2150 goto out;
2151
2152 /* tx */
2153 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2154 if (!vtag_req) {
2155 err = -ENOMEM;
2156 goto out;
2157 }
2158
2159 /* configure tx vtag params */
2160 vtag_req->vtag_size = VTAGSIZE_T4;
2161 vtag_req->cfg_type = 0; /* tx vlan cfg */
2162 vtag_req->tx.cfg_vtag0 = 1;
2163 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2164
2165 err = otx2_sync_mbox_msg(&pf->mbox);
2166 if (err)
2167 goto out;
2168
2169 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2170 (&pf->mbox.mbox, 0, &vtag_req->hdr);
2171 if (IS_ERR(vtag_rsp)) {
2172 err = PTR_ERR(vtag_rsp);
2173 goto out;
2174 }
2175 config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2176
2177 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2178 if (!req) {
2179 err = -ENOMEM;
2180 goto out;
2181 }
2182
2183 eth_zero_addr((u8 *)&req->mask.dmac);
2184 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2185 req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2186 req->features = BIT_ULL(NPC_DMAC);
2187 req->channel = pf->hw.tx_chan_base;
2188 req->intf = NIX_INTF_TX;
2189 req->vf = vf + 1;
2190 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2191 req->vtag0_def = vtag_rsp->vtag0_idx;
2192 req->vtag0_op = VTAG_INSERT;
2193 req->set_cntr = 1;
2194
2195 err = otx2_sync_mbox_msg(&pf->mbox);
2196 out:
2197 config->vlan = vlan;
2198 mutex_unlock(&pf->mbox.lock);
2199 return err;
2200 }
2201
otx2_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)2202 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2203 __be16 proto)
2204 {
2205 struct otx2_nic *pf = netdev_priv(netdev);
2206 struct pci_dev *pdev = pf->pdev;
2207
2208 if (!netif_running(netdev))
2209 return -EAGAIN;
2210
2211 if (vf >= pci_num_vf(pdev))
2212 return -EINVAL;
2213
2214 /* qos is currently unsupported */
2215 if (vlan >= VLAN_N_VID || qos)
2216 return -EINVAL;
2217
2218 if (proto != htons(ETH_P_8021Q))
2219 return -EPROTONOSUPPORT;
2220
2221 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2222 return -EOPNOTSUPP;
2223
2224 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2225 }
2226
otx2_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivi)2227 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2228 struct ifla_vf_info *ivi)
2229 {
2230 struct otx2_nic *pf = netdev_priv(netdev);
2231 struct pci_dev *pdev = pf->pdev;
2232 struct otx2_vf_config *config;
2233
2234 if (!netif_running(netdev))
2235 return -EAGAIN;
2236
2237 if (vf >= pci_num_vf(pdev))
2238 return -EINVAL;
2239
2240 config = &pf->vf_configs[vf];
2241 ivi->vf = vf;
2242 ether_addr_copy(ivi->mac, config->mac);
2243 ivi->vlan = config->vlan;
2244
2245 return 0;
2246 }
2247
2248 static const struct net_device_ops otx2_netdev_ops = {
2249 .ndo_open = otx2_open,
2250 .ndo_stop = otx2_stop,
2251 .ndo_start_xmit = otx2_xmit,
2252 .ndo_fix_features = otx2_fix_features,
2253 .ndo_set_mac_address = otx2_set_mac_address,
2254 .ndo_change_mtu = otx2_change_mtu,
2255 .ndo_set_rx_mode = otx2_set_rx_mode,
2256 .ndo_set_features = otx2_set_features,
2257 .ndo_tx_timeout = otx2_tx_timeout,
2258 .ndo_get_stats64 = otx2_get_stats64,
2259 .ndo_do_ioctl = otx2_ioctl,
2260 .ndo_set_vf_mac = otx2_set_vf_mac,
2261 .ndo_set_vf_vlan = otx2_set_vf_vlan,
2262 .ndo_get_vf_config = otx2_get_vf_config,
2263 .ndo_setup_tc = otx2_setup_tc,
2264 };
2265
otx2_wq_init(struct otx2_nic * pf)2266 static int otx2_wq_init(struct otx2_nic *pf)
2267 {
2268 pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2269 if (!pf->otx2_wq)
2270 return -ENOMEM;
2271
2272 INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
2273 INIT_WORK(&pf->reset_task, otx2_reset_task);
2274 return 0;
2275 }
2276
otx2_check_pf_usable(struct otx2_nic * nic)2277 static int otx2_check_pf_usable(struct otx2_nic *nic)
2278 {
2279 u64 rev;
2280
2281 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2282 rev = (rev >> 12) & 0xFF;
2283 /* Check if AF has setup revision for RVUM block,
2284 * otherwise this driver probe should be deferred
2285 * until AF driver comes up.
2286 */
2287 if (!rev) {
2288 dev_warn(nic->dev,
2289 "AF is not initialized, deferring probe\n");
2290 return -EPROBE_DEFER;
2291 }
2292 return 0;
2293 }
2294
otx2_realloc_msix_vectors(struct otx2_nic * pf)2295 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2296 {
2297 struct otx2_hw *hw = &pf->hw;
2298 int num_vec, err;
2299
2300 /* NPA interrupts are inot registered, so alloc only
2301 * upto NIX vector offset.
2302 */
2303 num_vec = hw->nix_msixoff;
2304 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2305
2306 otx2_disable_mbox_intr(pf);
2307 pci_free_irq_vectors(hw->pdev);
2308 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2309 if (err < 0) {
2310 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2311 __func__, num_vec);
2312 return err;
2313 }
2314
2315 return otx2_register_mbox_intr(pf, false);
2316 }
2317
otx2_probe(struct pci_dev * pdev,const struct pci_device_id * id)2318 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2319 {
2320 struct device *dev = &pdev->dev;
2321 struct net_device *netdev;
2322 struct otx2_nic *pf;
2323 struct otx2_hw *hw;
2324 int err, qcount;
2325 int num_vec;
2326
2327 err = pcim_enable_device(pdev);
2328 if (err) {
2329 dev_err(dev, "Failed to enable PCI device\n");
2330 return err;
2331 }
2332
2333 err = pci_request_regions(pdev, DRV_NAME);
2334 if (err) {
2335 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2336 return err;
2337 }
2338
2339 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2340 if (err) {
2341 dev_err(dev, "DMA mask config failed, abort\n");
2342 goto err_release_regions;
2343 }
2344
2345 pci_set_master(pdev);
2346
2347 /* Set number of queues */
2348 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2349
2350 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
2351 if (!netdev) {
2352 err = -ENOMEM;
2353 goto err_release_regions;
2354 }
2355
2356 pci_set_drvdata(pdev, netdev);
2357 SET_NETDEV_DEV(netdev, &pdev->dev);
2358 pf = netdev_priv(netdev);
2359 pf->netdev = netdev;
2360 pf->pdev = pdev;
2361 pf->dev = dev;
2362 pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2363 pf->flags |= OTX2_FLAG_INTF_DOWN;
2364
2365 hw = &pf->hw;
2366 hw->pdev = pdev;
2367 hw->rx_queues = qcount;
2368 hw->tx_queues = qcount;
2369 hw->max_queues = qcount;
2370
2371 num_vec = pci_msix_vec_count(pdev);
2372 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2373 GFP_KERNEL);
2374 if (!hw->irq_name) {
2375 err = -ENOMEM;
2376 goto err_free_netdev;
2377 }
2378
2379 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2380 sizeof(cpumask_var_t), GFP_KERNEL);
2381 if (!hw->affinity_mask) {
2382 err = -ENOMEM;
2383 goto err_free_netdev;
2384 }
2385
2386 /* Map CSRs */
2387 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2388 if (!pf->reg_base) {
2389 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2390 err = -ENOMEM;
2391 goto err_free_netdev;
2392 }
2393
2394 err = otx2_check_pf_usable(pf);
2395 if (err)
2396 goto err_free_netdev;
2397
2398 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2399 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2400 if (err < 0) {
2401 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2402 __func__, num_vec);
2403 goto err_free_netdev;
2404 }
2405
2406 otx2_setup_dev_hw_settings(pf);
2407
2408 /* Init PF <=> AF mailbox stuff */
2409 err = otx2_pfaf_mbox_init(pf);
2410 if (err)
2411 goto err_free_irq_vectors;
2412
2413 /* Register mailbox interrupt */
2414 err = otx2_register_mbox_intr(pf, true);
2415 if (err)
2416 goto err_mbox_destroy;
2417
2418 /* Request AF to attach NPA and NIX LFs to this PF.
2419 * NIX and NPA LFs are needed for this PF to function as a NIC.
2420 */
2421 err = otx2_attach_npa_nix(pf);
2422 if (err)
2423 goto err_disable_mbox_intr;
2424
2425 err = otx2_realloc_msix_vectors(pf);
2426 if (err)
2427 goto err_detach_rsrc;
2428
2429 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2430 if (err)
2431 goto err_detach_rsrc;
2432
2433 err = cn10k_pf_lmtst_init(pf);
2434 if (err)
2435 goto err_detach_rsrc;
2436
2437 /* Assign default mac address */
2438 otx2_get_mac_from_af(netdev);
2439
2440 /* Don't check for error. Proceed without ptp */
2441 otx2_ptp_init(pf);
2442
2443 /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2444 * HW allocates buffer pointer from stack and uses it for DMA'ing
2445 * ingress packet. In some scenarios HW can free back allocated buffer
2446 * pointers to pool. This makes it impossible for SW to maintain a
2447 * parallel list where physical addresses of buffer pointers (IOVAs)
2448 * given to HW can be saved for later reference.
2449 *
2450 * So the only way to convert Rx packet's buffer address is to use
2451 * IOMMU's iova_to_phys() handler which translates the address by
2452 * walking through the translation tables.
2453 */
2454 pf->iommu_domain = iommu_get_domain_for_dev(dev);
2455
2456 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2457 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2458 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2459 NETIF_F_GSO_UDP_L4);
2460 netdev->features |= netdev->hw_features;
2461
2462 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
2463
2464 err = otx2_mcam_flow_init(pf);
2465 if (err)
2466 goto err_ptp_destroy;
2467
2468 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
2469 netdev->hw_features |= NETIF_F_NTUPLE;
2470
2471 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
2472 netdev->priv_flags |= IFF_UNICAST_FLT;
2473
2474 /* Support TSO on tag interface */
2475 netdev->vlan_features |= netdev->features;
2476 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
2477 NETIF_F_HW_VLAN_STAG_TX;
2478 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2479 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
2480 NETIF_F_HW_VLAN_STAG_RX;
2481 netdev->features |= netdev->hw_features;
2482
2483 /* HW supports tc offload but mutually exclusive with n-tuple filters */
2484 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
2485 netdev->hw_features |= NETIF_F_HW_TC;
2486
2487 netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
2488 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
2489
2490 netdev->netdev_ops = &otx2_netdev_ops;
2491
2492 /* MTU range: 64 - 9190 */
2493 netdev->min_mtu = OTX2_MIN_MTU;
2494 netdev->max_mtu = otx2_get_max_mtu(pf);
2495
2496 err = register_netdev(netdev);
2497 if (err) {
2498 dev_err(dev, "Failed to register netdevice\n");
2499 goto err_del_mcam_entries;
2500 }
2501
2502 err = otx2_wq_init(pf);
2503 if (err)
2504 goto err_unreg_netdev;
2505
2506 otx2_set_ethtool_ops(netdev);
2507
2508 err = otx2_init_tc(pf);
2509 if (err)
2510 goto err_mcam_flow_del;
2511
2512 /* Enable link notifications */
2513 otx2_cgx_config_linkevents(pf, true);
2514
2515 /* Enable pause frames by default */
2516 pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
2517 pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
2518
2519 return 0;
2520
2521 err_mcam_flow_del:
2522 otx2_mcam_flow_del(pf);
2523 err_unreg_netdev:
2524 unregister_netdev(netdev);
2525 err_del_mcam_entries:
2526 otx2_mcam_flow_del(pf);
2527 err_ptp_destroy:
2528 otx2_ptp_destroy(pf);
2529 err_detach_rsrc:
2530 if (hw->lmt_base)
2531 iounmap(hw->lmt_base);
2532 otx2_detach_resources(&pf->mbox);
2533 err_disable_mbox_intr:
2534 otx2_disable_mbox_intr(pf);
2535 err_mbox_destroy:
2536 otx2_pfaf_mbox_destroy(pf);
2537 err_free_irq_vectors:
2538 pci_free_irq_vectors(hw->pdev);
2539 err_free_netdev:
2540 pci_set_drvdata(pdev, NULL);
2541 free_netdev(netdev);
2542 err_release_regions:
2543 pci_release_regions(pdev);
2544 return err;
2545 }
2546
otx2_vf_link_event_task(struct work_struct * work)2547 static void otx2_vf_link_event_task(struct work_struct *work)
2548 {
2549 struct otx2_vf_config *config;
2550 struct cgx_link_info_msg *req;
2551 struct mbox_msghdr *msghdr;
2552 struct otx2_nic *pf;
2553 int vf_idx;
2554
2555 config = container_of(work, struct otx2_vf_config,
2556 link_event_work.work);
2557 vf_idx = config - config->pf->vf_configs;
2558 pf = config->pf;
2559
2560 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
2561 sizeof(*req), sizeof(struct msg_rsp));
2562 if (!msghdr) {
2563 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
2564 return;
2565 }
2566
2567 req = (struct cgx_link_info_msg *)msghdr;
2568 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
2569 req->hdr.sig = OTX2_MBOX_REQ_SIG;
2570 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
2571
2572 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
2573 }
2574
otx2_sriov_enable(struct pci_dev * pdev,int numvfs)2575 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
2576 {
2577 struct net_device *netdev = pci_get_drvdata(pdev);
2578 struct otx2_nic *pf = netdev_priv(netdev);
2579 int ret, i;
2580
2581 /* Init PF <=> VF mailbox stuff */
2582 ret = otx2_pfvf_mbox_init(pf, numvfs);
2583 if (ret)
2584 return ret;
2585
2586 ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
2587 if (ret)
2588 goto free_mbox;
2589
2590 pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
2591 GFP_KERNEL);
2592 if (!pf->vf_configs) {
2593 ret = -ENOMEM;
2594 goto free_intr;
2595 }
2596
2597 for (i = 0; i < numvfs; i++) {
2598 pf->vf_configs[i].pf = pf;
2599 pf->vf_configs[i].intf_down = true;
2600 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2601 otx2_vf_link_event_task);
2602 }
2603
2604 ret = otx2_pf_flr_init(pf, numvfs);
2605 if (ret)
2606 goto free_configs;
2607
2608 ret = otx2_register_flr_me_intr(pf, numvfs);
2609 if (ret)
2610 goto free_flr;
2611
2612 ret = pci_enable_sriov(pdev, numvfs);
2613 if (ret)
2614 goto free_flr_intr;
2615
2616 return numvfs;
2617 free_flr_intr:
2618 otx2_disable_flr_me_intr(pf);
2619 free_flr:
2620 otx2_flr_wq_destroy(pf);
2621 free_configs:
2622 kfree(pf->vf_configs);
2623 free_intr:
2624 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2625 free_mbox:
2626 otx2_pfvf_mbox_destroy(pf);
2627 return ret;
2628 }
2629
otx2_sriov_disable(struct pci_dev * pdev)2630 static int otx2_sriov_disable(struct pci_dev *pdev)
2631 {
2632 struct net_device *netdev = pci_get_drvdata(pdev);
2633 struct otx2_nic *pf = netdev_priv(netdev);
2634 int numvfs = pci_num_vf(pdev);
2635 int i;
2636
2637 if (!numvfs)
2638 return 0;
2639
2640 pci_disable_sriov(pdev);
2641
2642 for (i = 0; i < pci_num_vf(pdev); i++)
2643 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2644 kfree(pf->vf_configs);
2645
2646 otx2_disable_flr_me_intr(pf);
2647 otx2_flr_wq_destroy(pf);
2648 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2649 otx2_pfvf_mbox_destroy(pf);
2650
2651 return 0;
2652 }
2653
otx2_sriov_configure(struct pci_dev * pdev,int numvfs)2654 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
2655 {
2656 if (numvfs == 0)
2657 return otx2_sriov_disable(pdev);
2658 else
2659 return otx2_sriov_enable(pdev, numvfs);
2660 }
2661
otx2_remove(struct pci_dev * pdev)2662 static void otx2_remove(struct pci_dev *pdev)
2663 {
2664 struct net_device *netdev = pci_get_drvdata(pdev);
2665 struct otx2_nic *pf;
2666
2667 if (!netdev)
2668 return;
2669
2670 pf = netdev_priv(netdev);
2671
2672 pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
2673
2674 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
2675 otx2_config_hw_tx_tstamp(pf, false);
2676 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
2677 otx2_config_hw_rx_tstamp(pf, false);
2678
2679 cancel_work_sync(&pf->reset_task);
2680 /* Disable link notifications */
2681 otx2_cgx_config_linkevents(pf, false);
2682
2683 unregister_netdev(netdev);
2684 otx2_sriov_disable(pf->pdev);
2685 if (pf->otx2_wq)
2686 destroy_workqueue(pf->otx2_wq);
2687
2688 otx2_ptp_destroy(pf);
2689 otx2_mcam_flow_del(pf);
2690 otx2_shutdown_tc(pf);
2691 otx2_detach_resources(&pf->mbox);
2692 if (pf->hw.lmt_base)
2693 iounmap(pf->hw.lmt_base);
2694
2695 otx2_disable_mbox_intr(pf);
2696 otx2_pfaf_mbox_destroy(pf);
2697 pci_free_irq_vectors(pf->pdev);
2698 pci_set_drvdata(pdev, NULL);
2699 free_netdev(netdev);
2700
2701 pci_release_regions(pdev);
2702 }
2703
2704 static struct pci_driver otx2_pf_driver = {
2705 .name = DRV_NAME,
2706 .id_table = otx2_pf_id_table,
2707 .probe = otx2_probe,
2708 .shutdown = otx2_remove,
2709 .remove = otx2_remove,
2710 .sriov_configure = otx2_sriov_configure
2711 };
2712
otx2_rvupf_init_module(void)2713 static int __init otx2_rvupf_init_module(void)
2714 {
2715 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2716
2717 return pci_register_driver(&otx2_pf_driver);
2718 }
2719
otx2_rvupf_cleanup_module(void)2720 static void __exit otx2_rvupf_cleanup_module(void)
2721 {
2722 pci_unregister_driver(&otx2_pf_driver);
2723 }
2724
2725 module_init(otx2_rvupf_init_module);
2726 module_exit(otx2_rvupf_cleanup_module);
2727