1 /*
2 * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * LICENSE_BEGIN
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
29 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
30 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
32 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
33 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
34 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
36 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 *
39 * LICENSE_END
40 *
41 *
42 */
43
44 #include <linux/kernel.h>
45 #include <linux/errno.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/delay.h>
49 #include <linux/if_ether.h>
50
51 #include "kcompat.h"
52 #include "vnic_resource.h"
53 #include "vnic_devcmd.h"
54 #include "vnic_dev.h"
55 #include "vnic_stats.h"
56 #include "vnic_wq.h"
57
58 struct devcmd2_controller {
59 struct vnic_wq_ctrl *wq_ctrl;
60 struct vnic_dev_ring results_ring;
61 struct vnic_wq wq;
62 struct vnic_devcmd2 *cmd_ring;
63 struct devcmd2_result *result;
64 u16 next_result;
65 u16 result_size;
66 int color;
67 u32 posted;
68 };
69
70 enum vnic_proxy_type {
71 PROXY_NONE,
72 PROXY_BY_BDF,
73 PROXY_BY_INDEX,
74 };
75
76 struct vnic_res {
77 void __iomem *vaddr;
78 dma_addr_t bus_addr;
79 unsigned int count;
80 u8 bar_num;
81 u32 bar_offset;
82 unsigned long len;
83 };
84
85 struct vnic_intr_coal_timer_info {
86 u32 mul;
87 u32 div;
88 u32 max_usec;
89 };
90
91 struct vnic_dev {
92 void *priv;
93 struct pci_dev *pdev;
94 struct vnic_res res[RES_TYPE_MAX];
95 enum vnic_dev_intr_mode intr_mode;
96 struct vnic_devcmd __iomem *devcmd;
97 struct vnic_devcmd_notify *notify;
98 struct vnic_devcmd_notify notify_copy;
99 dma_addr_t notify_pa;
100 u32 notify_sz;
101 dma_addr_t linkstatus_pa;
102 struct vnic_stats *stats;
103 dma_addr_t stats_pa;
104 struct vnic_devcmd_fw_info *fw_info;
105 dma_addr_t fw_info_pa;
106 enum vnic_proxy_type proxy;
107 u32 proxy_index;
108 u64 args[VNIC_DEVCMD_NARGS];
109 struct vnic_intr_coal_timer_info intr_coal_timer_info;
110 struct devcmd2_controller *devcmd2;
111 int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait);
112 };
113
114 #define VNIC_MAX_RES_HDR_SIZE \
115 (sizeof(struct vnic_resource_header) + \
116 sizeof(struct vnic_resource) * RES_TYPE_MAX)
117 #define VNIC_RES_STRIDE 128
118
vnic_dev_priv(struct vnic_dev * vdev)119 void *vnic_dev_priv(struct vnic_dev *vdev)
120 {
121 return vdev->priv;
122 }
123
vnic_dev_get_size(void)124 int vnic_dev_get_size(void)
125 {
126 return sizeof(struct vnic_dev);
127 }
128
vnic_dev_discover_res(struct vnic_dev * vdev,struct vnic_dev_bar * bar,unsigned int num_bars)129 static int vnic_dev_discover_res(struct vnic_dev *vdev,
130 struct vnic_dev_bar *bar, unsigned int num_bars)
131 {
132 struct vnic_resource_header __iomem *rh;
133 struct mgmt_barmap_hdr __iomem *mrh;
134 struct vnic_resource __iomem *r;
135 u8 type;
136
137 if (num_bars == 0)
138 return -EINVAL;
139
140 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
141 pr_err("vNIC BAR0 res hdr length error\n");
142 return -EINVAL;
143 }
144
145 rh = bar->vaddr;
146 mrh = bar->vaddr;
147 if (!rh) {
148 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
149 return -EINVAL;
150 }
151
152 /* Check for mgmt vnic in addition to normal vnic */
153 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
154 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
155 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
156 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
157 pr_err("vNIC BAR0 res magic/version error "
158 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
159 VNIC_RES_MAGIC, VNIC_RES_VERSION,
160 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
161 ioread32(&rh->magic), ioread32(&rh->version));
162 return -EINVAL;
163 }
164 }
165
166 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
167 r = (struct vnic_resource __iomem *)(mrh + 1);
168 else
169 r = (struct vnic_resource __iomem *)(rh + 1);
170
171 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
172
173 u8 bar_num = ioread8(&r->bar);
174 u32 bar_offset = ioread32(&r->bar_offset);
175 u32 count = ioread32(&r->count);
176 u32 len;
177
178 r++;
179
180 if (bar_num >= num_bars)
181 continue;
182
183 if (!bar[bar_num].len || !bar[bar_num].vaddr)
184 continue;
185
186 switch (type) {
187 case RES_TYPE_WQ:
188 case RES_TYPE_RQ:
189 case RES_TYPE_CQ:
190 case RES_TYPE_INTR_CTRL:
191 case RES_TYPE_GRPMBR_INTR:
192 /* each count is stride bytes long */
193 len = count * VNIC_RES_STRIDE;
194 if (len + bar_offset > bar[bar_num].len) {
195 pr_err("vNIC BAR0 resource %d "
196 "out-of-bounds, offset 0x%x + "
197 "size 0x%x > bar len 0x%lx\n",
198 type, bar_offset,
199 len,
200 bar[bar_num].len);
201 return -EINVAL;
202 }
203 break;
204 case RES_TYPE_DPKT:
205 case RES_TYPE_MEM:
206 case RES_TYPE_INTR_PBA_LEGACY:
207 #ifdef CONFIG_MIPS
208 case RES_TYPE_DEV:
209 #endif
210 case RES_TYPE_DEVCMD2:
211 case RES_TYPE_DEVCMD:
212 len = count;
213 break;
214 default:
215 continue;
216 }
217
218 vdev->res[type].count = count;
219 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
220 bar_offset;
221 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
222 vdev->res[type].bar_num = bar_num;
223 vdev->res[type].bar_offset = bar_offset;
224 vdev->res[type].len = len;
225 }
226
227 return 0;
228 }
229
230 /*
231 * Assign virtual addresses to all resources whose bus address falls
232 * within the specified map.
233 * vnic_dev_discover_res assigns res vaddrs based on the assumption that
234 * the entire bar is mapped once. When memory regions on the bar
235 * are mapped seperately, the vnic res for those regions need to be updated
236 * with new virutal addresses.
237 * Notice that the mapping and virtual address update need to be done before
238 * other VNIC APIs that might use the old virtual address,
239 * such as vdev->devcmd
240 */
vnic_dev_upd_res_vaddr(struct vnic_dev * vdev,struct vnic_dev_iomap_info * map)241 void vnic_dev_upd_res_vaddr(struct vnic_dev *vdev,
242 struct vnic_dev_iomap_info *map)
243 {
244 int i;
245
246 for (i = RES_TYPE_EOL; i < RES_TYPE_MAX; i++) {
247 if (i == RES_TYPE_EOL)
248 continue;
249 if (vdev->res[i].bus_addr >= map->bus_addr &&
250 vdev->res[i].bus_addr < map->bus_addr + map->len)
251 vdev->res[i].vaddr = ((uint8_t *)map->vaddr) +
252 (vdev->res[i].bus_addr - map->bus_addr);
253 }
254 }
255 EXPORT_SYMBOL(vnic_dev_upd_res_vaddr);
256
vnic_dev_get_res_count(struct vnic_dev * vdev,enum vnic_res_type type)257 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
258 enum vnic_res_type type)
259 {
260 return vdev->res[type].count;
261 }
262 EXPORT_SYMBOL(vnic_dev_get_res_count);
263
vnic_dev_get_res(struct vnic_dev * vdev,enum vnic_res_type type,unsigned int index)264 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
265 unsigned int index)
266 {
267 if (!vdev->res[type].vaddr)
268 return NULL;
269
270 switch (type) {
271 case RES_TYPE_WQ:
272 case RES_TYPE_RQ:
273 case RES_TYPE_CQ:
274 case RES_TYPE_INTR_CTRL:
275 case RES_TYPE_GRPMBR_INTR:
276 return (char __iomem *)vdev->res[type].vaddr +
277 index * VNIC_RES_STRIDE;
278 default:
279 return (char __iomem *)vdev->res[type].vaddr;
280 }
281 }
282 EXPORT_SYMBOL(vnic_dev_get_res);
283
vnic_dev_get_res_bus_addr(struct vnic_dev * vdev,enum vnic_res_type type,unsigned int index)284 dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev,
285 enum vnic_res_type type, unsigned int index)
286 {
287 switch (type) {
288 case RES_TYPE_WQ:
289 case RES_TYPE_RQ:
290 case RES_TYPE_CQ:
291 case RES_TYPE_INTR_CTRL:
292 case RES_TYPE_GRPMBR_INTR:
293 return vdev->res[type].bus_addr +
294 index * VNIC_RES_STRIDE;
295 default:
296 return vdev->res[type].bus_addr;
297 }
298 }
299 EXPORT_SYMBOL(vnic_dev_get_res_bus_addr);
300
vnic_dev_get_res_bar(struct vnic_dev * vdev,enum vnic_res_type type)301 uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev,
302 enum vnic_res_type type)
303 {
304 return vdev->res[type].bar_num;
305 }
306 EXPORT_SYMBOL(vnic_dev_get_res_bar);
307
vnic_dev_get_res_offset(struct vnic_dev * vdev,enum vnic_res_type type,unsigned int index)308 uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev,
309 enum vnic_res_type type, unsigned int index)
310 {
311 switch (type) {
312 case RES_TYPE_WQ:
313 case RES_TYPE_RQ:
314 case RES_TYPE_CQ:
315 case RES_TYPE_INTR_CTRL:
316 case RES_TYPE_GRPMBR_INTR:
317 return vdev->res[type].bar_offset +
318 index * VNIC_RES_STRIDE;
319 default:
320 return vdev->res[type].bar_offset;
321 }
322 }
323 EXPORT_SYMBOL(vnic_dev_get_res_offset);
324
325 /*
326 * Get the length of the res type
327 */
vnic_dev_get_res_type_len(struct vnic_dev * vdev,enum vnic_res_type type)328 unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev,
329 enum vnic_res_type type)
330 {
331 return vdev->res[type].len;
332 }
333 EXPORT_SYMBOL(vnic_dev_get_res_type_len);
334
vnic_dev_desc_ring_size(struct vnic_dev_ring * ring,unsigned int desc_count,unsigned int desc_size)335 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
336 unsigned int desc_count, unsigned int desc_size)
337 {
338 /* The base address of the desc rings must be 512 byte aligned.
339 * Descriptor count is aligned to groups of 32 descriptors. A
340 * count of 0 means the maximum 4096 descriptors. Descriptor
341 * size is aligned to 16 bytes.
342 */
343
344 unsigned int count_align = 32;
345 unsigned int desc_align = 16;
346
347 ring->base_align = 512;
348
349 if (desc_count == 0)
350 desc_count = 4096;
351
352 ring->desc_count = ALIGN(desc_count, count_align);
353
354 ring->desc_size = ALIGN(desc_size, desc_align);
355
356 ring->size = ring->desc_count * ring->desc_size;
357 ring->size_unaligned = ring->size + ring->base_align;
358
359 return ring->size_unaligned;
360 }
361
vnic_dev_clear_desc_ring(struct vnic_dev_ring * ring)362 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
363 {
364 memset(ring->descs, 0, ring->size);
365 }
366
vnic_dev_alloc_desc_ring(struct vnic_dev * vdev,struct vnic_dev_ring * ring,unsigned int desc_count,unsigned int desc_size)367 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
368 unsigned int desc_count, unsigned int desc_size)
369 {
370 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
371
372 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
373 ring->size_unaligned,
374 &ring->base_addr_unaligned);
375
376 if (!ring->descs_unaligned) {
377 pr_err("Failed to allocate ring (size=%d), aborting\n",
378 (int)ring->size);
379 return -ENOMEM;
380 }
381
382 ring->base_addr = ALIGN(ring->base_addr_unaligned,
383 ring->base_align);
384 ring->descs = (u8 *)ring->descs_unaligned +
385 (ring->base_addr - ring->base_addr_unaligned);
386
387 vnic_dev_clear_desc_ring(ring);
388
389 ring->desc_avail = ring->desc_count - 1;
390
391 return 0;
392 }
393
vnic_dev_free_desc_ring(struct vnic_dev * vdev,struct vnic_dev_ring * ring)394 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
395 {
396 if (ring->descs) {
397 pci_free_consistent(vdev->pdev,
398 ring->size_unaligned,
399 ring->descs_unaligned,
400 ring->base_addr_unaligned);
401 ring->descs = NULL;
402 }
403 }
404
_vnic_dev_cmd(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int wait)405 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
406 int wait)
407 {
408 #if defined(CONFIG_MIPS) || defined(MGMT_VNIC)
409 return 0;
410 #else
411 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
412 unsigned int i;
413 int delay;
414 u32 status;
415 int err;
416
417 status = ioread32(&devcmd->status);
418 if (status == 0xFFFFFFFF) {
419 /* PCI-e target device is gone */
420 return -ENODEV;
421 }
422 if (status & STAT_BUSY) {
423 pr_err("%s: Busy devcmd %d\n",
424 pci_name(vdev->pdev), _CMD_N(cmd));
425 return -EBUSY;
426 }
427
428 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
429 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
430 writeq(vdev->args[i], &devcmd->args[i]);
431 wmb();
432 }
433
434 iowrite32(cmd, &devcmd->cmd);
435
436 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
437 return 0;
438
439 for (delay = 0; delay < wait; delay++) {
440
441 udelay(100);
442
443 status = ioread32(&devcmd->status);
444 if (status == 0xFFFFFFFF) {
445 /* PCI-e target device is gone */
446 return -ENODEV;
447 }
448
449 if (!(status & STAT_BUSY)) {
450 if (status & STAT_ERROR) {
451 err = -(int)readq(&devcmd->args[0]);
452 if (cmd != CMD_CAPABILITY)
453 pr_err("%s: Devcmd %d failed "
454 "with error code %d\n",
455 pci_name(vdev->pdev),
456 _CMD_N(cmd), err);
457 return err;
458 }
459
460 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
461 rmb();
462 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
463 vdev->args[i] = readq(&devcmd->args[i]);
464 }
465
466 return 0;
467 }
468 }
469
470 pr_err("%s: Timedout devcmd %d\n",
471 pci_name(vdev->pdev), _CMD_N(cmd));
472 return -ETIMEDOUT;
473 #endif
474 }
475
_vnic_dev_cmd2(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int wait)476 static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
477 int wait)
478 {
479 #if defined(CONFIG_MIPS) || defined(MGMT_VNIC)
480 return 0;
481 #else
482 struct devcmd2_controller *dc2c = vdev->devcmd2;
483 struct devcmd2_result *result;
484 u8 color;
485 unsigned int i;
486 int delay;
487 int err;
488 u32 fetch_index;
489 u32 posted = dc2c->posted;
490 u32 new_posted;
491
492 fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
493
494 if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
495 /* Hardware surprise removal: return error */
496 return -ENODEV;
497
498 }
499 new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
500
501 if (new_posted == fetch_index) {
502 pr_err("%s: wq is full while issuing devcmd2 command %d, "
503 "fetch index: %u, posted index: %u\n",
504 pci_name(vdev->pdev),
505 _CMD_N(cmd),
506 fetch_index, posted);
507 return -EBUSY;
508
509 }
510 dc2c->cmd_ring[posted].cmd = cmd;
511 dc2c->cmd_ring[posted].flags = 0;
512
513 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
514 dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
515 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
516 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
517 dc2c->cmd_ring[posted].args[i] = vdev->args[i];
518
519 }
520
521 /* Adding write memory barrier prevents compiler and/or CPU
522 * reordering, thus avoiding descriptor posting before
523 * descriptor is initialized. Otherwise, hardware can read
524 * stale descriptor fields.
525 */
526 wmb();
527 iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
528 dc2c->posted = new_posted;
529
530 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
531 return 0;
532
533 result = dc2c->result + dc2c->next_result;
534 color = dc2c->color;
535
536 dc2c->next_result++;
537 if (dc2c->next_result == dc2c->result_size) {
538 dc2c->next_result = 0;
539 dc2c->color = dc2c->color ? 0 : 1;
540 }
541
542 for (delay = 0; delay < wait; delay++) {
543 udelay(100);
544 if (result->color == color) {
545 if (result->error) {
546 err = -(int) result->error;
547 if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY)
548 pr_err("%s:Error %d devcmd %d\n",
549 pci_name(vdev->pdev),
550 err, _CMD_N(cmd));
551 return err;
552 }
553 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
554 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
555 vdev->args[i] = result->results[i];
556 }
557 return 0;
558 }
559 }
560
561 pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev),
562 _CMD_N(cmd));
563
564 return -ETIMEDOUT;
565 #endif
566 }
567
vnic_dev_init_devcmd1(struct vnic_dev * vdev)568 int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
569 {
570 #if !defined(CONFIG_MIPS) && !defined(MGMT_VNIC)
571 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
572 if (!vdev->devcmd)
573 return -ENODEV;
574
575 vdev->devcmd_rtn = &_vnic_dev_cmd;
576 return 0;
577 #else
578 return 0;
579 #endif
580 }
581
vnic_dev_init_devcmd2(struct vnic_dev * vdev)582 static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
583 {
584 #if !defined(CONFIG_MIPS) && !defined(MGMT_VNIC)
585 int err;
586 unsigned int fetch_index;
587
588 if (vdev->devcmd2)
589 return 0;
590
591 vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_ATOMIC);
592 if (!vdev->devcmd2)
593 return -ENOMEM;
594
595 vdev->devcmd2->color = 1;
596 vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
597 err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq,
598 DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
599 if (err)
600 goto err_free_devcmd2;
601
602 fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
603 if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
604 pr_err("Fatal error in devcmd2 init - hardware surprise removal");
605 return -ENODEV;
606 }
607
608 /*
609 * Don't change fetch_index ever and
610 * set posted_index same as fetch_index
611 * when setting up the WQ for devmcd2.
612 */
613 vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, 0);
614 vdev->devcmd2->posted = fetch_index;
615 vnic_wq_enable(&vdev->devcmd2->wq);
616
617 err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
618 DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
619 if (err)
620 goto err_free_wq;
621
622 vdev->devcmd2->result =
623 (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
624 vdev->devcmd2->cmd_ring =
625 (struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs;
626 vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
627 vdev->args[0] = (u64) vdev->devcmd2->results_ring.base_addr |
628 VNIC_PADDR_TARGET;
629 vdev->args[1] = DEVCMD2_RING_SIZE;
630
631 err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
632 if (err)
633 goto err_free_desc_ring;
634
635 vdev->devcmd_rtn = &_vnic_dev_cmd2;
636
637 return 0;
638
639 err_free_desc_ring:
640 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
641 err_free_wq:
642 vnic_wq_disable(&vdev->devcmd2->wq);
643 vnic_wq_free(&vdev->devcmd2->wq);
644 err_free_devcmd2:
645 kfree(vdev->devcmd2);
646 vdev->devcmd2 = NULL;
647
648 return err;
649 #else
650 return 0;
651 #endif
652 }
653
vnic_dev_deinit_devcmd2(struct vnic_dev * vdev)654 static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
655 {
656 #if !defined(CONFIG_MIPS) && !defined(MGMT_VNIC)
657 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
658 vnic_wq_disable(&vdev->devcmd2->wq);
659 vnic_wq_free(&vdev->devcmd2->wq);
660 kfree(vdev->devcmd2);
661 #endif
662 }
663
vnic_dev_cmd_proxy(struct vnic_dev * vdev,enum vnic_devcmd_cmd proxy_cmd,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)664 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
665 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
666 u64 *args, int nargs, int wait)
667 {
668 u32 status;
669 int err;
670
671 /*
672 * Proxy command consumes 2 arguments. One for proxy index,
673 * the other is for command to be proxied
674 */
675 if (nargs > VNIC_DEVCMD_NARGS - 2) {
676 pr_err("number of args %d exceeds the maximum\n", nargs);
677 return -EINVAL;
678 }
679 memset(vdev->args, 0, sizeof(vdev->args));
680
681 vdev->args[0] = vdev->proxy_index;
682 vdev->args[1] = cmd;
683 memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
684
685 err = (*vdev->devcmd_rtn)(vdev, proxy_cmd, wait);
686 if (err)
687 return err;
688
689 status = (u32)vdev->args[0];
690 if (status & STAT_ERROR) {
691 err = (int)vdev->args[1];
692 if (err != ERR_ECMDUNKNOWN ||
693 cmd != CMD_CAPABILITY)
694 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
695 return err;
696 }
697
698 memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
699
700 return 0;
701 }
702
vnic_dev_cmd_no_proxy(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)703 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
704 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
705 {
706 int err;
707
708 if (nargs > VNIC_DEVCMD_NARGS) {
709 pr_err("number of args %d exceeds the maximum\n", nargs);
710 return -EINVAL;
711 }
712 memset(vdev->args, 0, sizeof(vdev->args));
713 memcpy(vdev->args, args, nargs * sizeof(args[0]));
714
715 err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
716
717 memcpy(args, vdev->args, nargs * sizeof(args[0]));
718
719 return err;
720 }
721
vnic_dev_cmd_proxy_by_index_start(struct vnic_dev * vdev,u16 index)722 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
723 {
724 vdev->proxy = PROXY_BY_INDEX;
725 vdev->proxy_index = index;
726 }
727
vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev * vdev,u16 bdf)728 void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
729 {
730 vdev->proxy = PROXY_BY_BDF;
731 vdev->proxy_index = bdf;
732 }
733
vnic_dev_cmd_proxy_end(struct vnic_dev * vdev)734 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
735 {
736 vdev->proxy = PROXY_NONE;
737 vdev->proxy_index = 0;
738 }
739
vnic_dev_cmd(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * a0,u64 * a1,int wait)740 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
741 u64 *a0, u64 *a1, int wait)
742 {
743 u64 args[2];
744 int err;
745
746 args[0] = *a0;
747 args[1] = *a1;
748 memset(vdev->args, 0, sizeof(vdev->args));
749
750 switch (vdev->proxy) {
751 case PROXY_BY_INDEX:
752 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
753 args, ARRAY_SIZE(args), wait);
754 break;
755 case PROXY_BY_BDF:
756 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
757 args, ARRAY_SIZE(args), wait);
758 break;
759 case PROXY_NONE:
760 default:
761 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
762 break;
763 }
764
765 if (err == 0) {
766 *a0 = args[0];
767 *a1 = args[1];
768 }
769
770 return err;
771 }
772
vnic_dev_cmd_args(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)773 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
774 u64 *args, int nargs, int wait)
775 {
776 switch (vdev->proxy) {
777 case PROXY_BY_INDEX:
778 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
779 args, nargs, wait);
780 case PROXY_BY_BDF:
781 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
782 args, nargs, wait);
783 case PROXY_NONE:
784 default:
785 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
786 }
787 }
788
vnic_dev_capable(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd)789 static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
790 {
791 u64 a0 = (u32)cmd, a1 = 0;
792 int wait = 1000;
793 int err;
794
795 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
796
797 return !(err || a0);
798 }
799
vnic_dev_fw_info(struct vnic_dev * vdev,struct vnic_devcmd_fw_info ** fw_info)800 int vnic_dev_fw_info(struct vnic_dev *vdev,
801 struct vnic_devcmd_fw_info **fw_info)
802 {
803 u64 a0, a1 = 0;
804 int wait = 1000;
805 int err = 0;
806
807 if (!vdev->fw_info) {
808 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
809 sizeof(struct vnic_devcmd_fw_info),
810 &vdev->fw_info_pa);
811 if (!vdev->fw_info)
812 return -ENOMEM;
813
814 memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
815
816 a0 = vdev->fw_info_pa;
817 a1 = sizeof(struct vnic_devcmd_fw_info);
818
819 /* only get fw_info once and cache it */
820 if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
821 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
822 &a0, &a1, wait);
823 else
824 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
825 &a0, &a1, wait);
826 }
827
828 *fw_info = vdev->fw_info;
829
830 return err;
831 }
832
vnic_dev_asic_info(struct vnic_dev * vdev,u16 * asic_type,u16 * asic_rev)833 int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev)
834 {
835 struct vnic_devcmd_fw_info *fw_info;
836 int err;
837
838 err = vnic_dev_fw_info(vdev, &fw_info);
839 if (err)
840 return err;
841
842 *asic_type = fw_info->asic_type;
843 *asic_rev = fw_info->asic_rev;
844
845 return 0;
846 }
847
vnic_dev_spec(struct vnic_dev * vdev,unsigned int offset,unsigned int size,void * value)848 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
849 void *value)
850 {
851 #ifdef CONFIG_MIPS
852 u8 *v = vnic_dev_get_res(vdev, RES_TYPE_DEV, 0);
853 if (!v) {
854 pr_err("vNIC device-specific region not found.\n");
855 return -EINVAL;
856 }
857
858 switch (size) {
859 case 1:
860 *(u8 *)value = ioread8(v + offset);
861 break;
862 case 2:
863 *(u16 *)value = ioread16(v + offset);
864 break;
865 case 4:
866 *(u32 *)value = ioread32(v + offset);
867 break;
868 case 8:
869 *(u64 *)value = readq(v + offset);
870 break;
871 default:
872 BUG();
873 break;
874 }
875
876 return 0;
877 #else
878 u64 a0, a1;
879 int wait = 1000;
880 int err;
881
882 a0 = offset;
883 a1 = size;
884
885 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
886
887 switch (size) {
888 case 1:
889 *(u8 *)value = (u8)a0;
890 break;
891 case 2:
892 *(u16 *)value = (u16)a0;
893 break;
894 case 4:
895 *(u32 *)value = (u32)a0;
896 break;
897 case 8:
898 *(u64 *)value = a0;
899 break;
900 default:
901 BUG();
902 break;
903 }
904
905 return err;
906 #endif
907 }
908
vnic_dev_stats_clear(struct vnic_dev * vdev)909 int vnic_dev_stats_clear(struct vnic_dev *vdev)
910 {
911 u64 a0 = 0, a1 = 0;
912 int wait = 1000;
913 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
914 }
915
vnic_dev_stats_dump(struct vnic_dev * vdev,struct vnic_stats ** stats)916 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
917 {
918 u64 a0, a1;
919 int wait = 1000;
920
921 if (!vdev->stats) {
922 vdev->stats = pci_alloc_consistent(vdev->pdev,
923 sizeof(struct vnic_stats), &vdev->stats_pa);
924 if (!vdev->stats)
925 return -ENOMEM;
926 }
927
928 *stats = vdev->stats;
929 a0 = vdev->stats_pa;
930 a1 = sizeof(struct vnic_stats);
931
932 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
933 }
934
vnic_dev_close(struct vnic_dev * vdev)935 int vnic_dev_close(struct vnic_dev *vdev)
936 {
937 u64 a0 = 0, a1 = 0;
938 int wait = 1000;
939 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
940 }
941
942 /** Deprecated. @see vnic_dev_enable_wait */
vnic_dev_enable(struct vnic_dev * vdev)943 int vnic_dev_enable(struct vnic_dev *vdev)
944 {
945 u64 a0 = 0, a1 = 0;
946 int wait = 1000;
947 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
948 }
949
vnic_dev_enable_wait(struct vnic_dev * vdev)950 int vnic_dev_enable_wait(struct vnic_dev *vdev)
951 {
952 u64 a0 = 0, a1 = 0;
953 int wait = 1000;
954
955 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
956 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
957 else
958 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
959 }
960
vnic_dev_disable(struct vnic_dev * vdev)961 int vnic_dev_disable(struct vnic_dev *vdev)
962 {
963 u64 a0 = 0, a1 = 0;
964 int wait = 1000;
965 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
966 }
967
vnic_dev_open(struct vnic_dev * vdev,int arg)968 int vnic_dev_open(struct vnic_dev *vdev, int arg)
969 {
970 u64 a0 = (u32)arg, a1 = 0;
971 int wait = 1000;
972 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
973 }
974
vnic_dev_open_done(struct vnic_dev * vdev,int * done)975 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
976 {
977 u64 a0 = 0, a1 = 0;
978 int wait = 1000;
979 int err;
980
981 *done = 0;
982
983 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
984 if (err)
985 return err;
986
987 *done = (a0 == 0);
988
989 return 0;
990 }
991
vnic_dev_soft_reset(struct vnic_dev * vdev,int arg)992 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
993 {
994 u64 a0 = (u32)arg, a1 = 0;
995 int wait = 1000;
996
997 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
998 }
999
vnic_dev_soft_reset_done(struct vnic_dev * vdev,int * done)1000 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
1001 {
1002 u64 a0 = 0, a1 = 0;
1003 int wait = 1000;
1004 int err;
1005
1006 *done = 0;
1007
1008 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
1009 if (err)
1010 return err;
1011
1012 *done = (a0 == 0);
1013
1014 return 0;
1015 }
1016
vnic_dev_hang_reset(struct vnic_dev * vdev,int arg)1017 int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
1018 {
1019 u64 a0 = (u32)arg, a1 = 0;
1020 int wait = 1000;
1021 int err;
1022
1023 if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
1024 return vnic_dev_cmd(vdev, CMD_HANG_RESET,
1025 &a0, &a1, wait);
1026 } else {
1027 err = vnic_dev_soft_reset(vdev, arg);
1028 if (err)
1029 return err;
1030 return vnic_dev_init(vdev, 0);
1031 }
1032 }
1033
vnic_dev_hang_reset_done(struct vnic_dev * vdev,int * done)1034 int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
1035 {
1036 u64 a0 = 0, a1 = 0;
1037 int wait = 1000;
1038 int err;
1039
1040 *done = 0;
1041
1042 if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
1043 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
1044 &a0, &a1, wait);
1045 if (err)
1046 return err;
1047 } else {
1048 return vnic_dev_soft_reset_done(vdev, done);
1049 }
1050
1051 *done = (a0 == 0);
1052
1053 return 0;
1054 }
1055
vnic_dev_hang_notify(struct vnic_dev * vdev)1056 int vnic_dev_hang_notify(struct vnic_dev *vdev)
1057 {
1058 u64 a0 = 0, a1 = 0;
1059 int wait = 1000;
1060 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
1061 }
1062
vnic_dev_get_mac_addr(struct vnic_dev * vdev,u8 * mac_addr)1063 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
1064 {
1065 #if defined(CONFIG_MIPS) || defined(MGMT_VNIC)
1066 u64 laa = 0x02;
1067 memcpy(mac_addr, &laa, ETH_ALEN);
1068 return 0;
1069 #else
1070 u64 a0 = 0, a1 = 0;
1071 int wait = 1000;
1072 int err, i;
1073
1074 for (i = 0; i < ETH_ALEN; i++)
1075 mac_addr[i] = 0;
1076
1077 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
1078 if (err)
1079 return err;
1080
1081 for (i = 0; i < ETH_ALEN; i++)
1082 mac_addr[i] = ((u8 *)&a0)[i];
1083
1084 return 0;
1085 #endif
1086 }
1087
vnic_dev_packet_filter(struct vnic_dev * vdev,int directed,int multicast,int broadcast,int promisc,int allmulti)1088 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
1089 int broadcast, int promisc, int allmulti)
1090 {
1091 u64 a0, a1 = 0;
1092 int wait = 1000;
1093 int err;
1094
1095 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
1096 (multicast ? CMD_PFILTER_MULTICAST : 0) |
1097 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
1098 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
1099 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
1100
1101 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
1102 if (err)
1103 pr_err("Can't set packet filter\n");
1104
1105 return err;
1106 }
1107
vnic_dev_packet_filter_all(struct vnic_dev * vdev,int directed,int multicast,int broadcast,int promisc,int allmulti)1108 int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
1109 int multicast, int broadcast, int promisc, int allmulti)
1110 {
1111 u64 a0, a1 = 0;
1112 int wait = 1000;
1113 int err;
1114
1115 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
1116 (multicast ? CMD_PFILTER_MULTICAST : 0) |
1117 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
1118 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
1119 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
1120
1121 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
1122 if (err)
1123 pr_err("Can't set packet filter\n");
1124
1125 return err;
1126 }
1127
vnic_dev_add_addr(struct vnic_dev * vdev,u8 * addr)1128 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
1129 {
1130 u64 a0 = 0, a1 = 0;
1131 int wait = 1000;
1132 int err;
1133 int i;
1134
1135 for (i = 0; i < ETH_ALEN; i++)
1136 ((u8 *)&a0)[i] = addr[i];
1137
1138 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
1139 if (err)
1140 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
1141 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
1142 err);
1143
1144 return err;
1145 }
1146
vnic_dev_del_addr(struct vnic_dev * vdev,u8 * addr)1147 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
1148 {
1149 u64 a0 = 0, a1 = 0;
1150 int wait = 1000;
1151 int err;
1152 int i;
1153
1154 for (i = 0; i < ETH_ALEN; i++)
1155 ((u8 *)&a0)[i] = addr[i];
1156
1157 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
1158 if (err)
1159 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
1160 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
1161 err);
1162
1163 return err;
1164 }
1165
vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev * vdev,u8 ig_vlan_rewrite_mode)1166 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
1167 u8 ig_vlan_rewrite_mode)
1168 {
1169 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
1170 int wait = 1000;
1171
1172 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
1173 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
1174 &a0, &a1, wait);
1175 else
1176 return 0;
1177 }
1178
vnic_dev_raise_intr(struct vnic_dev * vdev,u16 intr)1179 int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
1180 {
1181 u64 a0 = intr, a1 = 0;
1182 int wait = 1000;
1183 int err;
1184
1185 err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
1186 if (err)
1187 pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
1188
1189 return err;
1190 }
1191
vnic_dev_notify_setcmd(struct vnic_dev * vdev,void * notify_addr,dma_addr_t notify_pa,u16 intr)1192 static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
1193 void *notify_addr, dma_addr_t notify_pa, u16 intr)
1194 {
1195 u64 a0, a1;
1196 int wait = 1000;
1197 int r;
1198
1199 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
1200 vdev->notify = notify_addr;
1201 vdev->notify_pa = notify_pa;
1202
1203 a0 = (u64)notify_pa;
1204 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
1205 a1 += sizeof(struct vnic_devcmd_notify);
1206
1207 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
1208 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
1209 return r;
1210 }
1211
vnic_dev_notify_set(struct vnic_dev * vdev,u16 intr)1212 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
1213 {
1214 void *notify_addr;
1215 dma_addr_t notify_pa;
1216
1217 if (vdev->notify || vdev->notify_pa) {
1218 pr_err("notify block %p still allocated", vdev->notify);
1219 return -EINVAL;
1220 }
1221
1222 notify_addr = pci_alloc_consistent(vdev->pdev,
1223 sizeof(struct vnic_devcmd_notify),
1224 ¬ify_pa);
1225 if (!notify_addr)
1226 return -ENOMEM;
1227
1228 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
1229 }
1230
vnic_dev_notify_unsetcmd(struct vnic_dev * vdev)1231 static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
1232 {
1233 u64 a0, a1;
1234 int wait = 1000;
1235 int err;
1236
1237 a0 = 0; /* paddr = 0 to unset notify buffer */
1238 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
1239 a1 += sizeof(struct vnic_devcmd_notify);
1240
1241 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
1242 vdev->notify = NULL;
1243 vdev->notify_pa = 0;
1244 vdev->notify_sz = 0;
1245
1246 return err;
1247 }
1248
vnic_dev_notify_unset(struct vnic_dev * vdev)1249 int vnic_dev_notify_unset(struct vnic_dev *vdev)
1250 {
1251 if (vdev->notify) {
1252 pci_free_consistent(vdev->pdev,
1253 sizeof(struct vnic_devcmd_notify),
1254 vdev->notify,
1255 vdev->notify_pa);
1256 }
1257
1258 return vnic_dev_notify_unsetcmd(vdev);
1259 }
1260
vnic_dev_notify_ready(struct vnic_dev * vdev)1261 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
1262 {
1263 u32 *words;
1264 unsigned int nwords = vdev->notify_sz / 4;
1265 unsigned int i;
1266 u32 csum;
1267
1268 if (!vdev->notify || !vdev->notify_sz)
1269 return 0;
1270
1271 do {
1272 csum = 0;
1273 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
1274 words = (u32 *)&vdev->notify_copy;
1275 for (i = 1; i < nwords; i++)
1276 csum += words[i];
1277 } while (csum != words[0]);
1278
1279 return 1;
1280 }
1281
vnic_dev_init(struct vnic_dev * vdev,int arg)1282 int vnic_dev_init(struct vnic_dev *vdev, int arg)
1283 {
1284 u64 a0 = (u32)arg, a1 = 0;
1285 int wait = 1000;
1286 int r = 0;
1287
1288 if (vnic_dev_capable(vdev, CMD_INIT))
1289 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
1290 else {
1291 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
1292 if (a0 & CMD_INITF_DEFAULT_MAC) {
1293 /* Emulate these for old CMD_INIT_v1 which
1294 * didn't pass a0 so no CMD_INITF_*.
1295 */
1296 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
1297 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
1298 }
1299 }
1300 return r;
1301 }
1302
vnic_dev_init_done(struct vnic_dev * vdev,int * done,int * err)1303 int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
1304 {
1305 u64 a0 = 0, a1 = 0;
1306 int wait = 1000;
1307 int ret;
1308
1309 *done = 0;
1310
1311 ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
1312 if (ret)
1313 return ret;
1314
1315 *done = (a0 == 0);
1316
1317 *err = (a0 == 0) ? (int)a1 : 0;
1318
1319 return 0;
1320 }
1321
vnic_dev_init_prov(struct vnic_dev * vdev,u8 * buf,u32 len)1322 int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
1323 {
1324 u64 a0, a1 = len;
1325 int wait = 1000;
1326 dma_addr_t prov_pa;
1327 void *prov_buf;
1328 int ret;
1329
1330 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
1331 if (!prov_buf)
1332 return -ENOMEM;
1333
1334 memcpy(prov_buf, buf, len);
1335
1336 a0 = prov_pa;
1337
1338 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
1339
1340 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
1341
1342 return ret;
1343 }
1344
vnic_dev_deinit(struct vnic_dev * vdev)1345 int vnic_dev_deinit(struct vnic_dev *vdev)
1346 {
1347 u64 a0 = 0, a1 = 0;
1348 int wait = 1000;
1349
1350 return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
1351 }
1352
1353 EXPORT_SYMBOL(vnic_dev_intr_coal_timer_info_default);
vnic_dev_intr_coal_timer_info_default(struct vnic_dev * vdev)1354 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
1355 {
1356 /* Default: hardware intr coal timer is in units of 1.5 usecs */
1357 vdev->intr_coal_timer_info.mul = 2;
1358 vdev->intr_coal_timer_info.div = 3;
1359 vdev->intr_coal_timer_info.max_usec =
1360 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
1361 }
1362
vnic_dev_intr_coal_timer_info(struct vnic_dev * vdev)1363 int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
1364 {
1365 int wait = 1000;
1366 int err;
1367
1368 memset(vdev->args, 0, sizeof(vdev->args));
1369
1370 if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
1371 err = (*vdev->devcmd_rtn)(vdev, CMD_INTR_COAL_CONVERT, wait);
1372 else
1373 err = ERR_ECMDUNKNOWN;
1374
1375 /* Use defaults when firmware doesn't support the devcmd at all or
1376 * supports it for only specific hardware
1377 */
1378 if ((err == ERR_ECMDUNKNOWN) ||
1379 (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
1380 pr_warning("Using default conversion factor for "
1381 "interrupt coalesce timer\n");
1382 vnic_dev_intr_coal_timer_info_default(vdev);
1383 return 0;
1384 }
1385
1386 if (!err) {
1387 vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
1388 vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
1389 vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
1390 }
1391
1392 return err;
1393 }
1394
vnic_dev_link_status(struct vnic_dev * vdev)1395 int vnic_dev_link_status(struct vnic_dev *vdev)
1396 {
1397 #ifdef CONFIG_MIPS
1398 return 1;
1399 #else
1400 if (!vnic_dev_notify_ready(vdev))
1401 return 0;
1402
1403 return vdev->notify_copy.link_state;
1404 #endif
1405 }
1406
vnic_dev_port_speed(struct vnic_dev * vdev)1407 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
1408 {
1409 if (!vnic_dev_notify_ready(vdev))
1410 return 0;
1411
1412 return vdev->notify_copy.port_speed;
1413 }
1414
vnic_dev_msg_lvl(struct vnic_dev * vdev)1415 u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
1416 {
1417 if (!vnic_dev_notify_ready(vdev))
1418 return 0;
1419
1420 return vdev->notify_copy.msglvl;
1421 }
1422
vnic_dev_mtu(struct vnic_dev * vdev)1423 u32 vnic_dev_mtu(struct vnic_dev *vdev)
1424 {
1425 #if defined(CONFIG_MIPS) || defined(MGMT_VNIC)
1426 return 1500;
1427 #else
1428 if (!vnic_dev_notify_ready(vdev))
1429 return 0;
1430
1431 return vdev->notify_copy.mtu;
1432 #endif
1433 }
1434
vnic_dev_link_down_cnt(struct vnic_dev * vdev)1435 u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
1436 {
1437 if (!vnic_dev_notify_ready(vdev))
1438 return 0;
1439
1440 return vdev->notify_copy.link_down_cnt;
1441 }
1442
vnic_dev_notify_status(struct vnic_dev * vdev)1443 u32 vnic_dev_notify_status(struct vnic_dev *vdev)
1444 {
1445 if (!vnic_dev_notify_ready(vdev))
1446 return 0;
1447
1448 return vdev->notify_copy.status;
1449 }
1450
vnic_dev_uif(struct vnic_dev * vdev)1451 u32 vnic_dev_uif(struct vnic_dev *vdev)
1452 {
1453 if (!vnic_dev_notify_ready(vdev))
1454 return 0;
1455
1456 return vdev->notify_copy.uif;
1457 }
1458
vnic_dev_perbi_rebuild_cnt(struct vnic_dev * vdev)1459 u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev)
1460 {
1461 if (!vnic_dev_notify_ready(vdev))
1462 return 0;
1463
1464 return vdev->notify_copy.perbi_rebuild_cnt;
1465 }
1466
1467 EXPORT_SYMBOL(vnic_dev_set_intr_mode);
vnic_dev_set_intr_mode(struct vnic_dev * vdev,enum vnic_dev_intr_mode intr_mode)1468 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
1469 enum vnic_dev_intr_mode intr_mode)
1470 {
1471 vdev->intr_mode = intr_mode;
1472 }
1473
1474 EXPORT_SYMBOL(vnic_dev_get_intr_mode);
vnic_dev_get_intr_mode(struct vnic_dev * vdev)1475 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
1476 struct vnic_dev *vdev)
1477 {
1478 return vdev->intr_mode;
1479 }
1480
vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev * vdev,u32 usec)1481 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
1482 {
1483 return (usec * vdev->intr_coal_timer_info.mul) /
1484 vdev->intr_coal_timer_info.div;
1485 }
1486
vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev * vdev,u32 hw_cycles)1487 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
1488 {
1489 return (hw_cycles * vdev->intr_coal_timer_info.div) /
1490 vdev->intr_coal_timer_info.mul;
1491 }
1492
vnic_dev_get_intr_coal_timer_max(struct vnic_dev * vdev)1493 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
1494 {
1495 return vdev->intr_coal_timer_info.max_usec;
1496 }
1497
vnic_dev_unregister(struct vnic_dev * vdev)1498 void vnic_dev_unregister(struct vnic_dev *vdev)
1499 {
1500 if (vdev) {
1501 if (vdev->notify)
1502 pci_free_consistent(vdev->pdev,
1503 sizeof(struct vnic_devcmd_notify),
1504 vdev->notify,
1505 vdev->notify_pa);
1506 if (vdev->stats)
1507 pci_free_consistent(vdev->pdev,
1508 sizeof(struct vnic_stats),
1509 vdev->stats, vdev->stats_pa);
1510 if (vdev->fw_info)
1511 pci_free_consistent(vdev->pdev,
1512 sizeof(struct vnic_devcmd_fw_info),
1513 vdev->fw_info, vdev->fw_info_pa);
1514 if (vdev->devcmd2)
1515 vnic_dev_deinit_devcmd2(vdev);
1516
1517 kfree(vdev);
1518 }
1519 }
1520 EXPORT_SYMBOL(vnic_dev_unregister);
1521
vnic_dev_alloc_discover(struct vnic_dev * vdev,void * priv,struct pci_dev * pdev,struct vnic_dev_bar * bar,unsigned int num_bars)1522 struct vnic_dev *vnic_dev_alloc_discover(struct vnic_dev *vdev,
1523 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
1524 unsigned int num_bars)
1525 {
1526 if (!vdev) {
1527 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
1528 if (!vdev)
1529 return NULL;
1530 }
1531
1532 vdev->priv = priv;
1533 vdev->pdev = pdev;
1534
1535 if (vnic_dev_discover_res(vdev, bar, num_bars))
1536 goto err_out;
1537
1538 return vdev;
1539
1540 err_out:
1541 vnic_dev_unregister(vdev);
1542 return NULL;
1543 }
1544 EXPORT_SYMBOL(vnic_dev_alloc_discover);
1545
vnic_dev_register(struct vnic_dev * vdev,void * priv,struct pci_dev * pdev,struct vnic_dev_bar * bar,unsigned int num_bars)1546 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
1547 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
1548 unsigned int num_bars)
1549 {
1550 vdev = vnic_dev_alloc_discover(vdev, priv, pdev, bar, num_bars);
1551 if (!vdev)
1552 goto err_out;
1553
1554 if (vnic_dev_init_devcmd1(vdev))
1555 goto err_free;
1556
1557 return vdev;
1558
1559 err_free:
1560 vnic_dev_unregister(vdev);
1561 err_out:
1562 return NULL;
1563 }
1564 EXPORT_SYMBOL(vnic_dev_register);
1565
vnic_dev_get_pdev(struct vnic_dev * vdev)1566 struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
1567 {
1568 return vdev->pdev;
1569 }
1570 EXPORT_SYMBOL(vnic_dev_get_pdev);
1571
vnic_devcmd_init(struct vnic_dev * vdev,int fallback)1572 int vnic_devcmd_init(struct vnic_dev *vdev, int fallback)
1573 {
1574 #if !defined(CONFIG_MIPS) && !defined(MGMT_VNIC)
1575 int err;
1576 void *p;
1577
1578 p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
1579 if (p)
1580 err = vnic_dev_init_devcmd2(vdev);
1581 else if (fallback) {
1582 pr_warning("DEVCMD2 resource not found, fall back to devcmd\n");
1583 err = vnic_dev_init_devcmd1(vdev);
1584 } else {
1585 pr_err("DEVCMD2 resource not found, no fall back to devcmd allowed\n");
1586 err = -ENODEV;
1587 }
1588
1589 return err;
1590 #else
1591 return 0;
1592 #endif
1593 }
1594
vnic_dev_int13(struct vnic_dev * vdev,u64 arg,u32 op)1595 int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op)
1596 {
1597 u64 a0 = arg, a1 = op;
1598 int wait = 1000;
1599 int r = 0;
1600
1601 r = vnic_dev_cmd(vdev, CMD_INT13, &a0, &a1, wait);
1602 return r;
1603 }
1604
vnic_dev_perbi(struct vnic_dev * vdev,u64 arg,u32 op)1605 int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op)
1606 {
1607 u64 a0 = arg, a1 = op;
1608 int wait = 5000;
1609 int r = 0;
1610
1611 r = vnic_dev_cmd(vdev, CMD_PERBI, &a0, &a1, wait);
1612
1613 return r;
1614 }
1615
vnic_dev_init_prov2(struct vnic_dev * vdev,u8 * buf,u32 len)1616 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
1617 {
1618 u64 a0, a1 = len;
1619 int wait = 1000;
1620 dma_addr_t prov_pa;
1621 void *prov_buf;
1622 int ret;
1623
1624 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
1625 if (!prov_buf)
1626 return -ENOMEM;
1627
1628 memcpy(prov_buf, buf, len);
1629
1630 a0 = prov_pa;
1631
1632 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
1633
1634 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
1635
1636 return ret;
1637 }
1638
vnic_dev_enable2(struct vnic_dev * vdev,int active)1639 int vnic_dev_enable2(struct vnic_dev *vdev, int active)
1640 {
1641 u64 a0, a1 = 0;
1642 int wait = 1000;
1643
1644 a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
1645
1646 return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
1647 }
1648
vnic_dev_cmd_status(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int * status)1649 static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
1650 int *status)
1651 {
1652 u64 a0 = cmd, a1 = 0;
1653 int wait = 1000;
1654 int ret;
1655
1656 ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
1657 if (!ret)
1658 *status = (int)a0;
1659
1660 return ret;
1661 }
1662
vnic_dev_enable2_done(struct vnic_dev * vdev,int * status)1663 int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
1664 {
1665 return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
1666 }
1667
vnic_dev_deinit_done(struct vnic_dev * vdev,int * status)1668 int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
1669 {
1670 return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
1671 }
1672
vnic_dev_set_mac_addr(struct vnic_dev * vdev,u8 * mac_addr)1673 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
1674 {
1675 u64 a0 = 0, a1 = 0;
1676 int wait = 1000;
1677 int i;
1678
1679 for (i = 0; i < ETH_ALEN; i++)
1680 ((u8 *)&a0)[i] = mac_addr[i];
1681
1682 return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
1683 }
1684
1685 /*
1686 * vnic_dev_classifier: Add/Delete classifier entries
1687 * @vdev: vdev of the device
1688 * @cmd: CLSF_ADD for Add filter
1689 * CLSF_DEL for Delete filter
1690 * @entry: In case of ADD filter, the caller passes the RQ number in this variable.
1691 * This function stores the filter_id returned by the
1692 * firmware in the same variable before return;
1693 *
1694 * In case of DEL filter, the caller passes the RQ number. Return
1695 * value is irrelevant.
1696 * @data: filter data
1697 */
vnic_dev_classifier(struct vnic_dev * vdev,u8 cmd,u16 * entry,struct filter * data)1698 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter *data)
1699 {
1700 u64 a0, a1;
1701 int wait = 1000;
1702 dma_addr_t tlv_pa;
1703 int ret = -EINVAL;
1704 struct filter_tlv *tlv, *tlv_va;
1705 struct filter_action *action;
1706 u64 tlv_size;
1707
1708 if (cmd == CLSF_ADD) {
1709 tlv_size = sizeof(struct filter) +
1710 sizeof(struct filter_action) +
1711 2*sizeof(struct filter_tlv);
1712 tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
1713 if (!tlv_va)
1714 return -ENOMEM;
1715 tlv = tlv_va;
1716 a0 = tlv_pa;
1717 a1 = tlv_size;
1718 memset(tlv, 0, tlv_size);
1719 tlv->type = CLSF_TLV_FILTER;
1720 tlv->length = sizeof(struct filter);
1721 *(struct filter *)&tlv->val = *data;
1722
1723 tlv = (struct filter_tlv *)((char *)tlv +
1724 sizeof(struct filter_tlv) +
1725 sizeof(struct filter));
1726
1727 tlv->type = CLSF_TLV_ACTION;
1728 tlv->length = sizeof (struct filter_action);
1729 action = (struct filter_action *)&tlv->val;
1730 action->type = FILTER_ACTION_RQ_STEERING;
1731 action->u.rq_idx = *entry;
1732
1733 ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
1734 *entry = (u16)a0;
1735 pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
1736 } else if (cmd == CLSF_DEL) {
1737 a0 = *entry;
1738 a1 = 0;
1739 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
1740 }
1741
1742 return ret;
1743 }
1744
vnic_dev_overlay_offload_ctrl(struct vnic_dev * vdev,u8 overlay,u8 config)1745 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay,
1746 u8 config)
1747 {
1748 u64 a0, a1;
1749 int wait = 1000;
1750 int ret = -EINVAL;
1751
1752 a0 = overlay;
1753 a1 = config;
1754
1755 ret = vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
1756
1757 return ret;
1758 }
1759
vnic_dev_overlay_offload_cfg(struct vnic_dev * vdev,u8 overlay,u16 vxlan_udp_port_number)1760 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
1761 u16 vxlan_udp_port_number)
1762 {
1763 u64 a0, a1;
1764 int wait = 1000;
1765 int ret = -EINVAL;
1766
1767 a0 = overlay;
1768 a1 = vxlan_udp_port_number;
1769
1770 ret = vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
1771
1772 return ret;
1773 }
1774
vnic_dev_get_supported_feature_ver(struct vnic_dev * vdev,u8 feature,u64 * supported_versions)1775 int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
1776 u64 *supported_versions)
1777 {
1778 u64 a0 = feature, a1 = 0;
1779 int wait = 1000;
1780 int ret = -EINVAL;
1781
1782 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1783 if (!ret)
1784 *supported_versions = a0;
1785
1786 return ret;
1787 }
1788