1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File: qlnx_os.c
30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31 */
32
33 #include <sys/cdefs.h>
34 #include "qlnx_os.h"
35 #include "bcm_osal.h"
36 #include "reg_addr.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore.h"
39 #include "ecore_chain.h"
40 #include "ecore_status.h"
41 #include "ecore_hw.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_ops.h"
44 #include "ecore_int.h"
45 #include "ecore_cxt.h"
46 #include "ecore_spq.h"
47 #include "ecore_init_fw_funcs.h"
48 #include "ecore_sp_commands.h"
49 #include "ecore_dev_api.h"
50 #include "ecore_l2_api.h"
51 #include "ecore_mcp.h"
52 #include "ecore_hw_defs.h"
53 #include "mcp_public.h"
54 #include "ecore_iro.h"
55 #include "nvm_cfg.h"
56 #include "ecore_dbg_fw_funcs.h"
57 #include "ecore_iov_api.h"
58 #include "ecore_vf_api.h"
59
60 #include "qlnx_ioctl.h"
61 #include "qlnx_def.h"
62 #include "qlnx_ver.h"
63
64 #ifdef QLNX_ENABLE_IWARP
65 #include "qlnx_rdma.h"
66 #endif /* #ifdef QLNX_ENABLE_IWARP */
67
68 #ifdef CONFIG_ECORE_SRIOV
69 #include <sys/nv.h>
70 #include <sys/iov_schema.h>
71 #include <dev/pci/pci_iov.h>
72 #endif /* #ifdef CONFIG_ECORE_SRIOV */
73
74 #include <sys/smp.h>
75
76 /*
77 * static functions
78 */
79 /*
80 * ioctl related functions
81 */
82 static void qlnx_add_sysctls(qlnx_host_t *ha);
83
84 /*
85 * main driver
86 */
87 static void qlnx_release(qlnx_host_t *ha);
88 static void qlnx_fp_isr(void *arg);
89 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
90 static void qlnx_init(void *arg);
91 static void qlnx_init_locked(qlnx_host_t *ha);
92 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
93 static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
94 static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
95 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
96 static int qlnx_media_change(if_t ifp);
97 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
98 static void qlnx_stop(qlnx_host_t *ha);
99 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
100 struct mbuf **m_headp);
101 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
102 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
103 struct qlnx_link_output *if_link);
104 static int qlnx_transmit(if_t ifp, struct mbuf *mp);
105 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
106 struct mbuf *mp);
107 static void qlnx_qflush(if_t ifp);
108
109 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
110 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
111 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
112 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
113 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
114 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
115
116 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
117 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
118
119 static int qlnx_nic_setup(struct ecore_dev *cdev,
120 struct ecore_pf_params *func_params);
121 static int qlnx_nic_start(struct ecore_dev *cdev);
122 static int qlnx_slowpath_start(qlnx_host_t *ha);
123 static int qlnx_slowpath_stop(qlnx_host_t *ha);
124 static int qlnx_init_hw(qlnx_host_t *ha);
125 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
126 char ver_str[VER_SIZE]);
127 static void qlnx_unload(qlnx_host_t *ha);
128 static int qlnx_load(qlnx_host_t *ha);
129 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
130 uint32_t add_mac);
131 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 uint32_t len);
133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
135 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
136 struct qlnx_rx_queue *rxq);
137 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
138 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
139 int hwfn_index);
140 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
141 int hwfn_index);
142 static void qlnx_timer(void *arg);
143 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
145 static void qlnx_trigger_dump(qlnx_host_t *ha);
146 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
147 struct qlnx_tx_queue *txq);
148 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
149 struct qlnx_tx_queue *txq);
150 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
151 int lro_enable);
152 static void qlnx_fp_taskqueue(void *context, int pending);
153 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
154 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
155 struct qlnx_agg_info *tpa);
156 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
157
158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
159
160 /*
161 * Hooks to the Operating Systems
162 */
163 static int qlnx_pci_probe (device_t);
164 static int qlnx_pci_attach (device_t);
165 static int qlnx_pci_detach (device_t);
166
167 #ifndef QLNX_VF
168
169 #ifdef CONFIG_ECORE_SRIOV
170
171 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
172 static void qlnx_iov_uninit(device_t dev);
173 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
174 static void qlnx_initialize_sriov(qlnx_host_t *ha);
175 static void qlnx_pf_taskqueue(void *context, int pending);
176 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
178 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
179
180 #endif /* #ifdef CONFIG_ECORE_SRIOV */
181
182 static device_method_t qlnx_pci_methods[] = {
183 /* Device interface */
184 DEVMETHOD(device_probe, qlnx_pci_probe),
185 DEVMETHOD(device_attach, qlnx_pci_attach),
186 DEVMETHOD(device_detach, qlnx_pci_detach),
187
188 #ifdef CONFIG_ECORE_SRIOV
189 DEVMETHOD(pci_iov_init, qlnx_iov_init),
190 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
191 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
192 #endif /* #ifdef CONFIG_ECORE_SRIOV */
193 { 0, 0 }
194 };
195
196 static driver_t qlnx_pci_driver = {
197 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
198 };
199
200 MODULE_VERSION(if_qlnxe,1);
201 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
202
203 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
204 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
205
206 #else
207
208 static device_method_t qlnxv_pci_methods[] = {
209 /* Device interface */
210 DEVMETHOD(device_probe, qlnx_pci_probe),
211 DEVMETHOD(device_attach, qlnx_pci_attach),
212 DEVMETHOD(device_detach, qlnx_pci_detach),
213 { 0, 0 }
214 };
215
216 static driver_t qlnxv_pci_driver = {
217 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
218 };
219
220 MODULE_VERSION(if_qlnxev,1);
221 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
222
223 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
224 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
225
226 #endif /* #ifdef QLNX_VF */
227
228 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
229
230 char qlnx_ver_str[VER_SIZE];
231 char qlnx_name_str[NAME_SIZE];
232
233 /*
234 * Some PCI Configuration Space Related Defines
235 */
236
237 #ifndef PCI_VENDOR_QLOGIC
238 #define PCI_VENDOR_QLOGIC 0x1077
239 #endif
240
241 /* 40G Adapter QLE45xxx*/
242 #ifndef QLOGIC_PCI_DEVICE_ID_1634
243 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
244 #endif
245
246 /* 100G Adapter QLE45xxx*/
247 #ifndef QLOGIC_PCI_DEVICE_ID_1644
248 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
249 #endif
250
251 /* 25G Adapter QLE45xxx*/
252 #ifndef QLOGIC_PCI_DEVICE_ID_1656
253 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
254 #endif
255
256 /* 50G Adapter QLE45xxx*/
257 #ifndef QLOGIC_PCI_DEVICE_ID_1654
258 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
259 #endif
260
261 /* 10G/25G/40G Adapter QLE41xxx*/
262 #ifndef QLOGIC_PCI_DEVICE_ID_8070
263 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
264 #endif
265
266 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
267 #ifndef QLOGIC_PCI_DEVICE_ID_8090
268 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
269 #endif
270
271 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
272 "qlnxe driver parameters");
273
274 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
275 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
276
277 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
278 &qlnxe_queue_count, 0, "Multi-Queue queue count");
279
280 /*
281 * Note on RDMA personality setting
282 *
283 * Read the personality configured in NVRAM
284 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
285 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
286 * use the personality in NVRAM.
287
288 * Otherwise use t the personality configured in sysctl.
289 *
290 */
291 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
292 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
293 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
294 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
295 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
296 #define QLNX_PERSONALIY_MASK 0xF
297
298 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
299 static uint64_t qlnxe_rdma_configuration = 0x22222222;
300
301 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
302 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
303
304 int
qlnx_vf_device(qlnx_host_t * ha)305 qlnx_vf_device(qlnx_host_t *ha)
306 {
307 uint16_t device_id;
308
309 device_id = ha->device_id;
310
311 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
312 return 0;
313
314 return -1;
315 }
316
317 static int
qlnx_valid_device(qlnx_host_t * ha)318 qlnx_valid_device(qlnx_host_t *ha)
319 {
320 uint16_t device_id;
321
322 device_id = ha->device_id;
323
324 #ifndef QLNX_VF
325 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
326 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
327 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
328 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
329 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
330 return 0;
331 #else
332 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
333 return 0;
334
335 #endif /* #ifndef QLNX_VF */
336 return -1;
337 }
338
339 #ifdef QLNX_ENABLE_IWARP
340 static int
qlnx_rdma_supported(struct qlnx_host * ha)341 qlnx_rdma_supported(struct qlnx_host *ha)
342 {
343 uint16_t device_id;
344
345 device_id = pci_get_device(ha->pci_dev);
346
347 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
348 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
349 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
350 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
351 return (0);
352
353 return (-1);
354 }
355 #endif /* #ifdef QLNX_ENABLE_IWARP */
356
357 /*
358 * Name: qlnx_pci_probe
359 * Function: Validate the PCI device to be a QLA80XX device
360 */
361 static int
qlnx_pci_probe(device_t dev)362 qlnx_pci_probe(device_t dev)
363 {
364 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
365 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
366 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
367
368 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
369 return (ENXIO);
370 }
371
372 switch (pci_get_device(dev)) {
373 #ifndef QLNX_VF
374
375 case QLOGIC_PCI_DEVICE_ID_1644:
376 device_set_descf(dev, "%s v%d.%d.%d",
377 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
378 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
379 QLNX_VERSION_BUILD);
380 break;
381
382 case QLOGIC_PCI_DEVICE_ID_1634:
383 device_set_descf(dev, "%s v%d.%d.%d",
384 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
385 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
386 QLNX_VERSION_BUILD);
387 break;
388
389 case QLOGIC_PCI_DEVICE_ID_1656:
390 device_set_descf(dev, "%s v%d.%d.%d",
391 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
392 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
393 QLNX_VERSION_BUILD);
394 break;
395
396 case QLOGIC_PCI_DEVICE_ID_1654:
397 device_set_descf(dev, "%s v%d.%d.%d",
398 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
399 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
400 QLNX_VERSION_BUILD);
401 break;
402
403 case QLOGIC_PCI_DEVICE_ID_8070:
404 device_set_descf(dev, "%s v%d.%d.%d",
405 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
406 " Adapter-Ethernet Function",
407 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
408 QLNX_VERSION_BUILD);
409 break;
410
411 #else
412 case QLOGIC_PCI_DEVICE_ID_8090:
413 device_set_descf(dev, "%s v%d.%d.%d",
414 "Qlogic SRIOV PCI CNA (AH) "
415 "Adapter-Ethernet Function",
416 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
417 QLNX_VERSION_BUILD);
418 break;
419
420 #endif /* #ifndef QLNX_VF */
421
422 default:
423 return (ENXIO);
424 }
425
426 #ifdef QLNX_ENABLE_IWARP
427 qlnx_rdma_init();
428 #endif /* #ifdef QLNX_ENABLE_IWARP */
429
430 return (BUS_PROBE_DEFAULT);
431 }
432
433 static uint16_t
qlnx_num_tx_compl(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)434 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
435 struct qlnx_tx_queue *txq)
436 {
437 u16 hw_bd_cons;
438 u16 ecore_cons_idx;
439
440 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
441
442 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
443
444 return (hw_bd_cons - ecore_cons_idx);
445 }
446
447 static void
qlnx_sp_intr(void * arg)448 qlnx_sp_intr(void *arg)
449 {
450 struct ecore_hwfn *p_hwfn;
451 qlnx_host_t *ha;
452 int i;
453
454 p_hwfn = arg;
455
456 if (p_hwfn == NULL) {
457 printf("%s: spurious slowpath intr\n", __func__);
458 return;
459 }
460
461 ha = (qlnx_host_t *)p_hwfn->p_dev;
462
463 QL_DPRINT2(ha, "enter\n");
464
465 for (i = 0; i < ha->cdev.num_hwfns; i++) {
466 if (&ha->cdev.hwfns[i] == p_hwfn) {
467 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
468 break;
469 }
470 }
471 QL_DPRINT2(ha, "exit\n");
472
473 return;
474 }
475
476 static void
qlnx_sp_taskqueue(void * context,int pending)477 qlnx_sp_taskqueue(void *context, int pending)
478 {
479 struct ecore_hwfn *p_hwfn;
480
481 p_hwfn = context;
482
483 if (p_hwfn != NULL) {
484 qlnx_sp_isr(p_hwfn);
485 }
486 return;
487 }
488
489 static int
qlnx_create_sp_taskqueues(qlnx_host_t * ha)490 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
491 {
492 int i;
493 uint8_t tq_name[32];
494
495 for (i = 0; i < ha->cdev.num_hwfns; i++) {
496 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
497
498 bzero(tq_name, sizeof (tq_name));
499 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
500
501 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
502
503 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
504 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
505
506 if (ha->sp_taskqueue[i] == NULL)
507 return (-1);
508
509 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
510 tq_name);
511
512 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
513 }
514
515 return (0);
516 }
517
518 static void
qlnx_destroy_sp_taskqueues(qlnx_host_t * ha)519 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
520 {
521 int i;
522
523 for (i = 0; i < ha->cdev.num_hwfns; i++) {
524 if (ha->sp_taskqueue[i] != NULL) {
525 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
526 taskqueue_free(ha->sp_taskqueue[i]);
527 }
528 }
529 return;
530 }
531
532 static void
qlnx_fp_taskqueue(void * context,int pending)533 qlnx_fp_taskqueue(void *context, int pending)
534 {
535 struct qlnx_fastpath *fp;
536 qlnx_host_t *ha;
537 if_t ifp;
538
539 fp = context;
540
541 if (fp == NULL)
542 return;
543
544 ha = (qlnx_host_t *)fp->edev;
545
546 ifp = ha->ifp;
547
548 if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
549 if (!drbr_empty(ifp, fp->tx_br)) {
550 if(mtx_trylock(&fp->tx_mtx)) {
551 #ifdef QLNX_TRACE_PERF_DATA
552 tx_pkts = fp->tx_pkts_transmitted;
553 tx_compl = fp->tx_pkts_completed;
554 #endif
555
556 qlnx_transmit_locked(ifp, fp, NULL);
557
558 #ifdef QLNX_TRACE_PERF_DATA
559 fp->tx_pkts_trans_fp +=
560 (fp->tx_pkts_transmitted - tx_pkts);
561 fp->tx_pkts_compl_fp +=
562 (fp->tx_pkts_completed - tx_compl);
563 #endif
564 mtx_unlock(&fp->tx_mtx);
565 }
566 }
567 }
568
569 QL_DPRINT2(ha, "exit \n");
570 return;
571 }
572
573 static int
qlnx_create_fp_taskqueues(qlnx_host_t * ha)574 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
575 {
576 int i;
577 uint8_t tq_name[32];
578 struct qlnx_fastpath *fp;
579
580 for (i = 0; i < ha->num_rss; i++) {
581 fp = &ha->fp_array[i];
582
583 bzero(tq_name, sizeof (tq_name));
584 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
585
586 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
587
588 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
589 taskqueue_thread_enqueue,
590 &fp->fp_taskqueue);
591
592 if (fp->fp_taskqueue == NULL)
593 return (-1);
594
595 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
596 tq_name);
597
598 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
599 }
600
601 return (0);
602 }
603
604 static void
qlnx_destroy_fp_taskqueues(qlnx_host_t * ha)605 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
606 {
607 int i;
608 struct qlnx_fastpath *fp;
609
610 for (i = 0; i < ha->num_rss; i++) {
611 fp = &ha->fp_array[i];
612
613 if (fp->fp_taskqueue != NULL) {
614 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
615 taskqueue_free(fp->fp_taskqueue);
616 fp->fp_taskqueue = NULL;
617 }
618 }
619 return;
620 }
621
622 static void
qlnx_drain_fp_taskqueues(qlnx_host_t * ha)623 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
624 {
625 int i;
626 struct qlnx_fastpath *fp;
627
628 for (i = 0; i < ha->num_rss; i++) {
629 fp = &ha->fp_array[i];
630
631 if (fp->fp_taskqueue != NULL) {
632 QLNX_UNLOCK(ha);
633 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
634 QLNX_LOCK(ha);
635 }
636 }
637 return;
638 }
639
640 static void
qlnx_get_params(qlnx_host_t * ha)641 qlnx_get_params(qlnx_host_t *ha)
642 {
643 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
644 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
645 qlnxe_queue_count);
646 qlnxe_queue_count = 0;
647 }
648 return;
649 }
650
651 static void
qlnx_error_recovery_taskqueue(void * context,int pending)652 qlnx_error_recovery_taskqueue(void *context, int pending)
653 {
654 qlnx_host_t *ha;
655
656 ha = context;
657
658 QL_DPRINT2(ha, "enter\n");
659
660 QLNX_LOCK(ha);
661 qlnx_stop(ha);
662 QLNX_UNLOCK(ha);
663
664 #ifdef QLNX_ENABLE_IWARP
665 qlnx_rdma_dev_remove(ha);
666 #endif /* #ifdef QLNX_ENABLE_IWARP */
667
668 qlnx_slowpath_stop(ha);
669 qlnx_slowpath_start(ha);
670
671 #ifdef QLNX_ENABLE_IWARP
672 qlnx_rdma_dev_add(ha);
673 #endif /* #ifdef QLNX_ENABLE_IWARP */
674
675 qlnx_init(ha);
676
677 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
678
679 QL_DPRINT2(ha, "exit\n");
680
681 return;
682 }
683
684 static int
qlnx_create_error_recovery_taskqueue(qlnx_host_t * ha)685 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
686 {
687 uint8_t tq_name[32];
688
689 bzero(tq_name, sizeof (tq_name));
690 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
691
692 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
693
694 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
695 taskqueue_thread_enqueue, &ha->err_taskqueue);
696
697 if (ha->err_taskqueue == NULL)
698 return (-1);
699
700 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
701
702 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
703
704 return (0);
705 }
706
707 static void
qlnx_destroy_error_recovery_taskqueue(qlnx_host_t * ha)708 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
709 {
710 if (ha->err_taskqueue != NULL) {
711 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
712 taskqueue_free(ha->err_taskqueue);
713 }
714
715 ha->err_taskqueue = NULL;
716
717 return;
718 }
719
720 /*
721 * Name: qlnx_pci_attach
722 * Function: attaches the device to the operating system
723 */
724 static int
qlnx_pci_attach(device_t dev)725 qlnx_pci_attach(device_t dev)
726 {
727 qlnx_host_t *ha = NULL;
728 uint32_t rsrc_len_reg __unused = 0;
729 uint32_t rsrc_len_dbells = 0;
730 uint32_t rsrc_len_msix __unused = 0;
731 int i;
732 uint32_t mfw_ver;
733 uint32_t num_sp_msix = 0;
734 uint32_t num_rdma_irqs = 0;
735
736 if ((ha = device_get_softc(dev)) == NULL) {
737 device_printf(dev, "cannot get softc\n");
738 return (ENOMEM);
739 }
740
741 memset(ha, 0, sizeof (qlnx_host_t));
742
743 ha->device_id = pci_get_device(dev);
744
745 if (qlnx_valid_device(ha) != 0) {
746 device_printf(dev, "device is not valid device\n");
747 return (ENXIO);
748 }
749 ha->pci_func = pci_get_function(dev);
750
751 ha->pci_dev = dev;
752
753 sx_init(&ha->hw_lock, "qlnx_hw_lock");
754
755 ha->flags.lock_init = 1;
756
757 pci_enable_busmaster(dev);
758
759 /*
760 * map the PCI BARs
761 */
762
763 ha->reg_rid = PCIR_BAR(0);
764 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
765 RF_ACTIVE);
766
767 if (ha->pci_reg == NULL) {
768 device_printf(dev, "unable to map BAR0\n");
769 goto qlnx_pci_attach_err;
770 }
771
772 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
773 ha->reg_rid);
774
775 ha->dbells_rid = PCIR_BAR(2);
776 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
777 SYS_RES_MEMORY,
778 ha->dbells_rid);
779 if (rsrc_len_dbells) {
780 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
781 &ha->dbells_rid, RF_ACTIVE);
782
783 if (ha->pci_dbells == NULL) {
784 device_printf(dev, "unable to map BAR1\n");
785 goto qlnx_pci_attach_err;
786 }
787 ha->dbells_phys_addr = (uint64_t)
788 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
789
790 ha->dbells_size = rsrc_len_dbells;
791 } else {
792 if (qlnx_vf_device(ha) != 0) {
793 device_printf(dev, " BAR1 size is zero\n");
794 goto qlnx_pci_attach_err;
795 }
796 }
797
798 ha->msix_rid = PCIR_BAR(4);
799 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
800 &ha->msix_rid, RF_ACTIVE);
801
802 if (ha->msix_bar == NULL) {
803 device_printf(dev, "unable to map BAR2\n");
804 goto qlnx_pci_attach_err;
805 }
806
807 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
808 ha->msix_rid);
809
810 ha->dbg_level = 0x0000;
811
812 QL_DPRINT1(ha, "\n\t\t\t"
813 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
814 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
815 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
816 " msix_avail = 0x%x "
817 "\n\t\t\t[ncpus = %d]\n",
818 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
819 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
820 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
821 mp_ncpus);
822 /*
823 * allocate dma tags
824 */
825
826 if (qlnx_alloc_parent_dma_tag(ha))
827 goto qlnx_pci_attach_err;
828
829 if (qlnx_alloc_tx_dma_tag(ha))
830 goto qlnx_pci_attach_err;
831
832 if (qlnx_alloc_rx_dma_tag(ha))
833 goto qlnx_pci_attach_err;
834
835
836 if (qlnx_init_hw(ha) != 0)
837 goto qlnx_pci_attach_err;
838
839 ha->flags.hw_init = 1;
840
841 qlnx_get_params(ha);
842
843 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
844 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
845 qlnxe_queue_count = QLNX_MAX_RSS;
846 }
847
848 /*
849 * Allocate MSI-x vectors
850 */
851 if (qlnx_vf_device(ha) != 0) {
852 if (qlnxe_queue_count == 0)
853 ha->num_rss = QLNX_DEFAULT_RSS;
854 else
855 ha->num_rss = qlnxe_queue_count;
856
857 num_sp_msix = ha->cdev.num_hwfns;
858 } else {
859 uint8_t max_rxq;
860 uint8_t max_txq;
861
862 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
863 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
864
865 if (max_rxq < max_txq)
866 ha->num_rss = max_rxq;
867 else
868 ha->num_rss = max_txq;
869
870 if (ha->num_rss > QLNX_MAX_VF_RSS)
871 ha->num_rss = QLNX_MAX_VF_RSS;
872
873 num_sp_msix = 0;
874 }
875
876 if (ha->num_rss > mp_ncpus)
877 ha->num_rss = mp_ncpus;
878
879 ha->num_tc = QLNX_MAX_TC;
880
881 ha->msix_count = pci_msix_count(dev);
882
883 #ifdef QLNX_ENABLE_IWARP
884
885 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
886
887 #endif /* #ifdef QLNX_ENABLE_IWARP */
888
889 if (!ha->msix_count ||
890 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
891 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
892 ha->msix_count);
893 goto qlnx_pci_attach_err;
894 }
895
896 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
897 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
898 else
899 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
900
901 QL_DPRINT1(ha, "\n\t\t\t"
902 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
903 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
904 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
905 " msix_avail = 0x%x msix_alloc = 0x%x"
906 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
907 ha->pci_reg, rsrc_len_reg,
908 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
909 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
910 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
911
912 if (pci_alloc_msix(dev, &ha->msix_count)) {
913 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
914 ha->msix_count);
915 ha->msix_count = 0;
916 goto qlnx_pci_attach_err;
917 }
918
919 /*
920 * Initialize slow path interrupt and task queue
921 */
922
923 if (num_sp_msix) {
924 if (qlnx_create_sp_taskqueues(ha) != 0)
925 goto qlnx_pci_attach_err;
926
927 for (i = 0; i < ha->cdev.num_hwfns; i++) {
928 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
929
930 ha->sp_irq_rid[i] = i + 1;
931 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
932 &ha->sp_irq_rid[i],
933 (RF_ACTIVE | RF_SHAREABLE));
934 if (ha->sp_irq[i] == NULL) {
935 device_printf(dev,
936 "could not allocate mbx interrupt\n");
937 goto qlnx_pci_attach_err;
938 }
939
940 if (bus_setup_intr(dev, ha->sp_irq[i],
941 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
942 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
943 device_printf(dev,
944 "could not setup slow path interrupt\n");
945 goto qlnx_pci_attach_err;
946 }
947
948 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
949 " sp_irq %p sp_handle %p\n", p_hwfn,
950 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
951 }
952 }
953
954 /*
955 * initialize fast path interrupt
956 */
957 if (qlnx_create_fp_taskqueues(ha) != 0)
958 goto qlnx_pci_attach_err;
959
960 for (i = 0; i < ha->num_rss; i++) {
961 ha->irq_vec[i].rss_idx = i;
962 ha->irq_vec[i].ha = ha;
963 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
964
965 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
966 &ha->irq_vec[i].irq_rid,
967 (RF_ACTIVE | RF_SHAREABLE));
968
969 if (ha->irq_vec[i].irq == NULL) {
970 device_printf(dev,
971 "could not allocate interrupt[%d] irq_rid = %d\n",
972 i, ha->irq_vec[i].irq_rid);
973 goto qlnx_pci_attach_err;
974 }
975
976 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
977 device_printf(dev, "could not allocate tx_br[%d]\n", i);
978 goto qlnx_pci_attach_err;
979 }
980 }
981
982 if (qlnx_vf_device(ha) != 0) {
983 callout_init(&ha->qlnx_callout, 1);
984 ha->flags.callout_init = 1;
985
986 for (i = 0; i < ha->cdev.num_hwfns; i++) {
987 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
988 goto qlnx_pci_attach_err;
989 if (ha->grcdump_size[i] == 0)
990 goto qlnx_pci_attach_err;
991
992 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
993 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
994 i, ha->grcdump_size[i]);
995
996 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
997 if (ha->grcdump[i] == NULL) {
998 device_printf(dev, "grcdump alloc[%d] failed\n", i);
999 goto qlnx_pci_attach_err;
1000 }
1001
1002 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1003 goto qlnx_pci_attach_err;
1004 if (ha->idle_chk_size[i] == 0)
1005 goto qlnx_pci_attach_err;
1006
1007 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1008 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1009 i, ha->idle_chk_size[i]);
1010
1011 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1012
1013 if (ha->idle_chk[i] == NULL) {
1014 device_printf(dev, "idle_chk alloc failed\n");
1015 goto qlnx_pci_attach_err;
1016 }
1017 }
1018
1019 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1020 goto qlnx_pci_attach_err;
1021 }
1022
1023 if (qlnx_slowpath_start(ha) != 0)
1024 goto qlnx_pci_attach_err;
1025 else
1026 ha->flags.slowpath_start = 1;
1027
1028 if (qlnx_vf_device(ha) != 0) {
1029 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1030 qlnx_mdelay(__func__, 1000);
1031 qlnx_trigger_dump(ha);
1032
1033 goto qlnx_pci_attach_err0;
1034 }
1035
1036 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1037 qlnx_mdelay(__func__, 1000);
1038 qlnx_trigger_dump(ha);
1039
1040 goto qlnx_pci_attach_err0;
1041 }
1042 } else {
1043 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1044 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1045 }
1046
1047 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1048 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1049 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1050 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1051 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1052 FW_ENGINEERING_VERSION);
1053
1054 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1055 ha->stormfw_ver, ha->mfw_ver);
1056
1057 qlnx_init_ifnet(dev, ha);
1058
1059 /*
1060 * add sysctls
1061 */
1062 qlnx_add_sysctls(ha);
1063
1064 qlnx_pci_attach_err0:
1065 /*
1066 * create ioctl device interface
1067 */
1068 if (qlnx_vf_device(ha) != 0) {
1069 if (qlnx_make_cdev(ha)) {
1070 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1071 goto qlnx_pci_attach_err;
1072 }
1073
1074 #ifdef QLNX_ENABLE_IWARP
1075 qlnx_rdma_dev_add(ha);
1076 #endif /* #ifdef QLNX_ENABLE_IWARP */
1077 }
1078
1079 #ifndef QLNX_VF
1080 #ifdef CONFIG_ECORE_SRIOV
1081
1082 if (qlnx_vf_device(ha) != 0)
1083 qlnx_initialize_sriov(ha);
1084
1085 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1086 #endif /* #ifdef QLNX_VF */
1087
1088 QL_DPRINT2(ha, "success\n");
1089
1090 return (0);
1091
1092 qlnx_pci_attach_err:
1093
1094 qlnx_release(ha);
1095
1096 return (ENXIO);
1097 }
1098
1099 /*
1100 * Name: qlnx_pci_detach
1101 * Function: Unhooks the device from the operating system
1102 */
1103 static int
qlnx_pci_detach(device_t dev)1104 qlnx_pci_detach(device_t dev)
1105 {
1106 qlnx_host_t *ha = NULL;
1107
1108 if ((ha = device_get_softc(dev)) == NULL) {
1109 device_printf(dev, "%s: cannot get softc\n", __func__);
1110 return (ENOMEM);
1111 }
1112
1113 if (qlnx_vf_device(ha) != 0) {
1114 #ifdef CONFIG_ECORE_SRIOV
1115 int ret;
1116
1117 ret = pci_iov_detach(dev);
1118 if (ret) {
1119 device_printf(dev, "%s: SRIOV in use\n", __func__);
1120 return (ret);
1121 }
1122
1123 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1124
1125 #ifdef QLNX_ENABLE_IWARP
1126 if (qlnx_rdma_dev_remove(ha) != 0)
1127 return (EBUSY);
1128 #endif /* #ifdef QLNX_ENABLE_IWARP */
1129 }
1130
1131 QLNX_LOCK(ha);
1132 qlnx_stop(ha);
1133 QLNX_UNLOCK(ha);
1134
1135 qlnx_release(ha);
1136
1137 return (0);
1138 }
1139
1140 #ifdef QLNX_ENABLE_IWARP
1141
1142 static uint8_t
qlnx_get_personality(uint8_t pci_func)1143 qlnx_get_personality(uint8_t pci_func)
1144 {
1145 uint8_t personality;
1146
1147 personality = (qlnxe_rdma_configuration >>
1148 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1149 QLNX_PERSONALIY_MASK;
1150 return (personality);
1151 }
1152
1153 static void
qlnx_set_personality(qlnx_host_t * ha)1154 qlnx_set_personality(qlnx_host_t *ha)
1155 {
1156 uint8_t personality;
1157
1158 personality = qlnx_get_personality(ha->pci_func);
1159
1160 switch (personality) {
1161 case QLNX_PERSONALITY_DEFAULT:
1162 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1163 __func__);
1164 ha->personality = ECORE_PCI_DEFAULT;
1165 break;
1166
1167 case QLNX_PERSONALITY_ETH_ONLY:
1168 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1169 __func__);
1170 ha->personality = ECORE_PCI_ETH;
1171 break;
1172
1173 case QLNX_PERSONALITY_ETH_IWARP:
1174 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1175 __func__);
1176 ha->personality = ECORE_PCI_ETH_IWARP;
1177 break;
1178
1179 case QLNX_PERSONALITY_ETH_ROCE:
1180 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1181 __func__);
1182 ha->personality = ECORE_PCI_ETH_ROCE;
1183 break;
1184 }
1185
1186 return;
1187 }
1188
1189 #endif /* #ifdef QLNX_ENABLE_IWARP */
1190
1191 static int
qlnx_init_hw(qlnx_host_t * ha)1192 qlnx_init_hw(qlnx_host_t *ha)
1193 {
1194 int rval = 0;
1195 struct ecore_hw_prepare_params params;
1196
1197 ha->cdev.ha = ha;
1198 ecore_init_struct(&ha->cdev);
1199
1200 /* ha->dp_module = ECORE_MSG_PROBE |
1201 ECORE_MSG_INTR |
1202 ECORE_MSG_SP |
1203 ECORE_MSG_LINK |
1204 ECORE_MSG_SPQ |
1205 ECORE_MSG_RDMA;
1206 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1207 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1208 ha->dp_level = ECORE_LEVEL_NOTICE;
1209 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1210
1211 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1212
1213 ha->cdev.regview = ha->pci_reg;
1214
1215 ha->personality = ECORE_PCI_DEFAULT;
1216
1217 if (qlnx_vf_device(ha) == 0) {
1218 ha->cdev.b_is_vf = true;
1219
1220 if (ha->pci_dbells != NULL) {
1221 ha->cdev.doorbells = ha->pci_dbells;
1222 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1223 ha->cdev.db_size = ha->dbells_size;
1224 } else {
1225 ha->pci_dbells = ha->pci_reg;
1226 }
1227 } else {
1228 ha->cdev.doorbells = ha->pci_dbells;
1229 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1230 ha->cdev.db_size = ha->dbells_size;
1231
1232 #ifdef QLNX_ENABLE_IWARP
1233
1234 if (qlnx_rdma_supported(ha) == 0)
1235 qlnx_set_personality(ha);
1236
1237 #endif /* #ifdef QLNX_ENABLE_IWARP */
1238 }
1239 QL_DPRINT2(ha, "%s: %s\n", __func__,
1240 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1241
1242 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1243
1244 params.personality = ha->personality;
1245
1246 params.drv_resc_alloc = false;
1247 params.chk_reg_fifo = false;
1248 params.initiate_pf_flr = true;
1249 params.epoch = 0;
1250
1251 ecore_hw_prepare(&ha->cdev, ¶ms);
1252
1253 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1254
1255 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1256 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1257
1258 return (rval);
1259 }
1260
1261 static void
qlnx_release(qlnx_host_t * ha)1262 qlnx_release(qlnx_host_t *ha)
1263 {
1264 device_t dev;
1265 int i;
1266
1267 dev = ha->pci_dev;
1268
1269 QL_DPRINT2(ha, "enter\n");
1270
1271 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1272 if (ha->idle_chk[i] != NULL) {
1273 free(ha->idle_chk[i], M_QLNXBUF);
1274 ha->idle_chk[i] = NULL;
1275 }
1276
1277 if (ha->grcdump[i] != NULL) {
1278 free(ha->grcdump[i], M_QLNXBUF);
1279 ha->grcdump[i] = NULL;
1280 }
1281 }
1282
1283 if (ha->flags.callout_init)
1284 callout_drain(&ha->qlnx_callout);
1285
1286 if (ha->flags.slowpath_start) {
1287 qlnx_slowpath_stop(ha);
1288 }
1289
1290 if (ha->flags.hw_init)
1291 ecore_hw_remove(&ha->cdev);
1292
1293 qlnx_del_cdev(ha);
1294
1295 if (ha->ifp != NULL)
1296 ether_ifdetach(ha->ifp);
1297
1298 qlnx_free_tx_dma_tag(ha);
1299
1300 qlnx_free_rx_dma_tag(ha);
1301
1302 qlnx_free_parent_dma_tag(ha);
1303
1304 if (qlnx_vf_device(ha) != 0) {
1305 qlnx_destroy_error_recovery_taskqueue(ha);
1306 }
1307
1308 for (i = 0; i < ha->num_rss; i++) {
1309 struct qlnx_fastpath *fp = &ha->fp_array[i];
1310
1311 if (ha->irq_vec[i].handle) {
1312 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1313 ha->irq_vec[i].handle);
1314 }
1315
1316 if (ha->irq_vec[i].irq) {
1317 (void)bus_release_resource(dev, SYS_RES_IRQ,
1318 ha->irq_vec[i].irq_rid,
1319 ha->irq_vec[i].irq);
1320 }
1321
1322 qlnx_free_tx_br(ha, fp);
1323 }
1324 qlnx_destroy_fp_taskqueues(ha);
1325
1326 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1327 if (ha->sp_handle[i])
1328 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1329 ha->sp_handle[i]);
1330
1331 if (ha->sp_irq[i])
1332 (void) bus_release_resource(dev, SYS_RES_IRQ,
1333 ha->sp_irq_rid[i], ha->sp_irq[i]);
1334 }
1335
1336 qlnx_destroy_sp_taskqueues(ha);
1337
1338 if (ha->msix_count)
1339 pci_release_msi(dev);
1340
1341 if (ha->flags.lock_init) {
1342 sx_destroy(&ha->hw_lock);
1343 }
1344
1345 if (ha->pci_reg)
1346 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1347 ha->pci_reg);
1348
1349 if (ha->dbells_size && ha->pci_dbells)
1350 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1351 ha->pci_dbells);
1352
1353 if (ha->msix_bar)
1354 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1355 ha->msix_bar);
1356
1357 QL_DPRINT2(ha, "exit\n");
1358 return;
1359 }
1360
1361 static void
qlnx_trigger_dump(qlnx_host_t * ha)1362 qlnx_trigger_dump(qlnx_host_t *ha)
1363 {
1364 int i;
1365
1366 if (ha->ifp != NULL)
1367 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1368
1369 QL_DPRINT2(ha, "enter\n");
1370
1371 if (qlnx_vf_device(ha) == 0)
1372 return;
1373
1374 ha->error_recovery = 1;
1375
1376 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1377 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1378 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1379 }
1380
1381 QL_DPRINT2(ha, "exit\n");
1382
1383 return;
1384 }
1385
1386 static int
qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)1387 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1388 {
1389 int err, ret = 0;
1390 qlnx_host_t *ha;
1391
1392 err = sysctl_handle_int(oidp, &ret, 0, req);
1393
1394 if (err || !req->newptr)
1395 return (err);
1396
1397 if (ret == 1) {
1398 ha = (qlnx_host_t *)arg1;
1399 qlnx_trigger_dump(ha);
1400 }
1401 return (err);
1402 }
1403
1404 static int
qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)1405 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1406 {
1407 int err, i, ret = 0, usecs = 0;
1408 qlnx_host_t *ha;
1409 struct ecore_hwfn *p_hwfn;
1410 struct qlnx_fastpath *fp;
1411
1412 err = sysctl_handle_int(oidp, &usecs, 0, req);
1413
1414 if (err || !req->newptr || !usecs || (usecs > 255))
1415 return (err);
1416
1417 ha = (qlnx_host_t *)arg1;
1418
1419 if (qlnx_vf_device(ha) == 0)
1420 return (-1);
1421
1422 for (i = 0; i < ha->num_rss; i++) {
1423 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1424
1425 fp = &ha->fp_array[i];
1426
1427 if (fp->txq[0]->handle != NULL) {
1428 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1429 (uint16_t)usecs, fp->txq[0]->handle);
1430 }
1431 }
1432
1433 if (!ret)
1434 ha->tx_coalesce_usecs = (uint8_t)usecs;
1435
1436 return (err);
1437 }
1438
1439 static int
qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)1440 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1441 {
1442 int err, i, ret = 0, usecs = 0;
1443 qlnx_host_t *ha;
1444 struct ecore_hwfn *p_hwfn;
1445 struct qlnx_fastpath *fp;
1446
1447 err = sysctl_handle_int(oidp, &usecs, 0, req);
1448
1449 if (err || !req->newptr || !usecs || (usecs > 255))
1450 return (err);
1451
1452 ha = (qlnx_host_t *)arg1;
1453
1454 if (qlnx_vf_device(ha) == 0)
1455 return (-1);
1456
1457 for (i = 0; i < ha->num_rss; i++) {
1458 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1459
1460 fp = &ha->fp_array[i];
1461
1462 if (fp->rxq->handle != NULL) {
1463 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1464 0, fp->rxq->handle);
1465 }
1466 }
1467
1468 if (!ret)
1469 ha->rx_coalesce_usecs = (uint8_t)usecs;
1470
1471 return (err);
1472 }
1473
1474 static void
qlnx_add_sp_stats_sysctls(qlnx_host_t * ha)1475 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1476 {
1477 struct sysctl_ctx_list *ctx;
1478 struct sysctl_oid_list *children;
1479 struct sysctl_oid *ctx_oid;
1480
1481 ctx = device_get_sysctl_ctx(ha->pci_dev);
1482 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1483
1484 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1485 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1486 children = SYSCTL_CHILDREN(ctx_oid);
1487
1488 SYSCTL_ADD_QUAD(ctx, children,
1489 OID_AUTO, "sp_interrupts",
1490 CTLFLAG_RD, &ha->sp_interrupts,
1491 "No. of slowpath interrupts");
1492
1493 return;
1494 }
1495
1496 static void
qlnx_add_fp_stats_sysctls(qlnx_host_t * ha)1497 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1498 {
1499 struct sysctl_ctx_list *ctx;
1500 struct sysctl_oid_list *children;
1501 struct sysctl_oid_list *node_children;
1502 struct sysctl_oid *ctx_oid;
1503 int i, j;
1504 uint8_t name_str[16];
1505
1506 ctx = device_get_sysctl_ctx(ha->pci_dev);
1507 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1508
1509 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1510 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1511 children = SYSCTL_CHILDREN(ctx_oid);
1512
1513 for (i = 0; i < ha->num_rss; i++) {
1514 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1515 snprintf(name_str, sizeof(name_str), "%d", i);
1516
1517 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1518 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1519 node_children = SYSCTL_CHILDREN(ctx_oid);
1520
1521 /* Tx Related */
1522
1523 SYSCTL_ADD_QUAD(ctx, node_children,
1524 OID_AUTO, "tx_pkts_processed",
1525 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1526 "No. of packets processed for transmission");
1527
1528 SYSCTL_ADD_QUAD(ctx, node_children,
1529 OID_AUTO, "tx_pkts_freed",
1530 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1531 "No. of freed packets");
1532
1533 SYSCTL_ADD_QUAD(ctx, node_children,
1534 OID_AUTO, "tx_pkts_transmitted",
1535 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1536 "No. of transmitted packets");
1537
1538 SYSCTL_ADD_QUAD(ctx, node_children,
1539 OID_AUTO, "tx_pkts_completed",
1540 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1541 "No. of transmit completions");
1542
1543 SYSCTL_ADD_QUAD(ctx, node_children,
1544 OID_AUTO, "tx_non_tso_pkts",
1545 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1546 "No. of non LSO transmited packets");
1547
1548 #ifdef QLNX_TRACE_PERF_DATA
1549
1550 SYSCTL_ADD_QUAD(ctx, node_children,
1551 OID_AUTO, "tx_pkts_trans_ctx",
1552 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1553 "No. of transmitted packets in transmit context");
1554
1555 SYSCTL_ADD_QUAD(ctx, node_children,
1556 OID_AUTO, "tx_pkts_compl_ctx",
1557 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1558 "No. of transmit completions in transmit context");
1559
1560 SYSCTL_ADD_QUAD(ctx, node_children,
1561 OID_AUTO, "tx_pkts_trans_fp",
1562 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1563 "No. of transmitted packets in taskqueue");
1564
1565 SYSCTL_ADD_QUAD(ctx, node_children,
1566 OID_AUTO, "tx_pkts_compl_fp",
1567 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1568 "No. of transmit completions in taskqueue");
1569
1570 SYSCTL_ADD_QUAD(ctx, node_children,
1571 OID_AUTO, "tx_pkts_compl_intr",
1572 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1573 "No. of transmit completions in interrupt ctx");
1574 #endif
1575
1576 SYSCTL_ADD_QUAD(ctx, node_children,
1577 OID_AUTO, "tx_tso_pkts",
1578 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1579 "No. of LSO transmited packets");
1580
1581 SYSCTL_ADD_QUAD(ctx, node_children,
1582 OID_AUTO, "tx_lso_wnd_min_len",
1583 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1584 "tx_lso_wnd_min_len");
1585
1586 SYSCTL_ADD_QUAD(ctx, node_children,
1587 OID_AUTO, "tx_defrag",
1588 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1589 "tx_defrag");
1590
1591 SYSCTL_ADD_QUAD(ctx, node_children,
1592 OID_AUTO, "tx_nsegs_gt_elem_left",
1593 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1594 "tx_nsegs_gt_elem_left");
1595
1596 SYSCTL_ADD_UINT(ctx, node_children,
1597 OID_AUTO, "tx_tso_max_nsegs",
1598 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1599 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1600
1601 SYSCTL_ADD_UINT(ctx, node_children,
1602 OID_AUTO, "tx_tso_min_nsegs",
1603 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1604 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1605
1606 SYSCTL_ADD_UINT(ctx, node_children,
1607 OID_AUTO, "tx_tso_max_pkt_len",
1608 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1609 ha->fp_array[i].tx_tso_max_pkt_len,
1610 "tx_tso_max_pkt_len");
1611
1612 SYSCTL_ADD_UINT(ctx, node_children,
1613 OID_AUTO, "tx_tso_min_pkt_len",
1614 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1615 ha->fp_array[i].tx_tso_min_pkt_len,
1616 "tx_tso_min_pkt_len");
1617
1618 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1619 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1620 snprintf(name_str, sizeof(name_str),
1621 "tx_pkts_nseg_%02d", (j+1));
1622
1623 SYSCTL_ADD_QUAD(ctx, node_children,
1624 OID_AUTO, name_str, CTLFLAG_RD,
1625 &ha->fp_array[i].tx_pkts[j], name_str);
1626 }
1627
1628 #ifdef QLNX_TRACE_PERF_DATA
1629 for (j = 0; j < 18; j++) {
1630 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1631 snprintf(name_str, sizeof(name_str),
1632 "tx_pkts_hist_%02d", (j+1));
1633
1634 SYSCTL_ADD_QUAD(ctx, node_children,
1635 OID_AUTO, name_str, CTLFLAG_RD,
1636 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1637 }
1638 for (j = 0; j < 5; j++) {
1639 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1640 snprintf(name_str, sizeof(name_str),
1641 "tx_comInt_%02d", (j+1));
1642
1643 SYSCTL_ADD_QUAD(ctx, node_children,
1644 OID_AUTO, name_str, CTLFLAG_RD,
1645 &ha->fp_array[i].tx_comInt[j], name_str);
1646 }
1647 for (j = 0; j < 18; j++) {
1648 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1649 snprintf(name_str, sizeof(name_str),
1650 "tx_pkts_q_%02d", (j+1));
1651
1652 SYSCTL_ADD_QUAD(ctx, node_children,
1653 OID_AUTO, name_str, CTLFLAG_RD,
1654 &ha->fp_array[i].tx_pkts_q[j], name_str);
1655 }
1656 #endif
1657
1658 SYSCTL_ADD_QUAD(ctx, node_children,
1659 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1660 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1661 "err_tx_nsegs_gt_elem_left");
1662
1663 SYSCTL_ADD_QUAD(ctx, node_children,
1664 OID_AUTO, "err_tx_dmamap_create",
1665 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1666 "err_tx_dmamap_create");
1667
1668 SYSCTL_ADD_QUAD(ctx, node_children,
1669 OID_AUTO, "err_tx_defrag_dmamap_load",
1670 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1671 "err_tx_defrag_dmamap_load");
1672
1673 SYSCTL_ADD_QUAD(ctx, node_children,
1674 OID_AUTO, "err_tx_non_tso_max_seg",
1675 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1676 "err_tx_non_tso_max_seg");
1677
1678 SYSCTL_ADD_QUAD(ctx, node_children,
1679 OID_AUTO, "err_tx_dmamap_load",
1680 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1681 "err_tx_dmamap_load");
1682
1683 SYSCTL_ADD_QUAD(ctx, node_children,
1684 OID_AUTO, "err_tx_defrag",
1685 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1686 "err_tx_defrag");
1687
1688 SYSCTL_ADD_QUAD(ctx, node_children,
1689 OID_AUTO, "err_tx_free_pkt_null",
1690 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1691 "err_tx_free_pkt_null");
1692
1693 SYSCTL_ADD_QUAD(ctx, node_children,
1694 OID_AUTO, "err_tx_cons_idx_conflict",
1695 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1696 "err_tx_cons_idx_conflict");
1697
1698 SYSCTL_ADD_QUAD(ctx, node_children,
1699 OID_AUTO, "lro_cnt_64",
1700 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1701 "lro_cnt_64");
1702
1703 SYSCTL_ADD_QUAD(ctx, node_children,
1704 OID_AUTO, "lro_cnt_128",
1705 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1706 "lro_cnt_128");
1707
1708 SYSCTL_ADD_QUAD(ctx, node_children,
1709 OID_AUTO, "lro_cnt_256",
1710 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1711 "lro_cnt_256");
1712
1713 SYSCTL_ADD_QUAD(ctx, node_children,
1714 OID_AUTO, "lro_cnt_512",
1715 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1716 "lro_cnt_512");
1717
1718 SYSCTL_ADD_QUAD(ctx, node_children,
1719 OID_AUTO, "lro_cnt_1024",
1720 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1721 "lro_cnt_1024");
1722
1723 /* Rx Related */
1724
1725 SYSCTL_ADD_QUAD(ctx, node_children,
1726 OID_AUTO, "rx_pkts",
1727 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1728 "No. of received packets");
1729
1730 SYSCTL_ADD_QUAD(ctx, node_children,
1731 OID_AUTO, "tpa_start",
1732 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1733 "No. of tpa_start packets");
1734
1735 SYSCTL_ADD_QUAD(ctx, node_children,
1736 OID_AUTO, "tpa_cont",
1737 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1738 "No. of tpa_cont packets");
1739
1740 SYSCTL_ADD_QUAD(ctx, node_children,
1741 OID_AUTO, "tpa_end",
1742 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1743 "No. of tpa_end packets");
1744
1745 SYSCTL_ADD_QUAD(ctx, node_children,
1746 OID_AUTO, "err_m_getcl",
1747 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1748 "err_m_getcl");
1749
1750 SYSCTL_ADD_QUAD(ctx, node_children,
1751 OID_AUTO, "err_m_getjcl",
1752 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1753 "err_m_getjcl");
1754
1755 SYSCTL_ADD_QUAD(ctx, node_children,
1756 OID_AUTO, "err_rx_hw_errors",
1757 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1758 "err_rx_hw_errors");
1759
1760 SYSCTL_ADD_QUAD(ctx, node_children,
1761 OID_AUTO, "err_rx_alloc_errors",
1762 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1763 "err_rx_alloc_errors");
1764 }
1765
1766 return;
1767 }
1768
1769 static void
qlnx_add_hw_stats_sysctls(qlnx_host_t * ha)1770 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1771 {
1772 struct sysctl_ctx_list *ctx;
1773 struct sysctl_oid_list *children;
1774 struct sysctl_oid *ctx_oid;
1775
1776 ctx = device_get_sysctl_ctx(ha->pci_dev);
1777 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1778
1779 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1780 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1781 children = SYSCTL_CHILDREN(ctx_oid);
1782
1783 SYSCTL_ADD_QUAD(ctx, children,
1784 OID_AUTO, "no_buff_discards",
1785 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1786 "No. of packets discarded due to lack of buffer");
1787
1788 SYSCTL_ADD_QUAD(ctx, children,
1789 OID_AUTO, "packet_too_big_discard",
1790 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1791 "No. of packets discarded because packet was too big");
1792
1793 SYSCTL_ADD_QUAD(ctx, children,
1794 OID_AUTO, "ttl0_discard",
1795 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1796 "ttl0_discard");
1797
1798 SYSCTL_ADD_QUAD(ctx, children,
1799 OID_AUTO, "rx_ucast_bytes",
1800 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1801 "rx_ucast_bytes");
1802
1803 SYSCTL_ADD_QUAD(ctx, children,
1804 OID_AUTO, "rx_mcast_bytes",
1805 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1806 "rx_mcast_bytes");
1807
1808 SYSCTL_ADD_QUAD(ctx, children,
1809 OID_AUTO, "rx_bcast_bytes",
1810 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1811 "rx_bcast_bytes");
1812
1813 SYSCTL_ADD_QUAD(ctx, children,
1814 OID_AUTO, "rx_ucast_pkts",
1815 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1816 "rx_ucast_pkts");
1817
1818 SYSCTL_ADD_QUAD(ctx, children,
1819 OID_AUTO, "rx_mcast_pkts",
1820 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1821 "rx_mcast_pkts");
1822
1823 SYSCTL_ADD_QUAD(ctx, children,
1824 OID_AUTO, "rx_bcast_pkts",
1825 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1826 "rx_bcast_pkts");
1827
1828 SYSCTL_ADD_QUAD(ctx, children,
1829 OID_AUTO, "mftag_filter_discards",
1830 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1831 "mftag_filter_discards");
1832
1833 SYSCTL_ADD_QUAD(ctx, children,
1834 OID_AUTO, "mac_filter_discards",
1835 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1836 "mac_filter_discards");
1837
1838 SYSCTL_ADD_QUAD(ctx, children,
1839 OID_AUTO, "tx_ucast_bytes",
1840 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1841 "tx_ucast_bytes");
1842
1843 SYSCTL_ADD_QUAD(ctx, children,
1844 OID_AUTO, "tx_mcast_bytes",
1845 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1846 "tx_mcast_bytes");
1847
1848 SYSCTL_ADD_QUAD(ctx, children,
1849 OID_AUTO, "tx_bcast_bytes",
1850 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1851 "tx_bcast_bytes");
1852
1853 SYSCTL_ADD_QUAD(ctx, children,
1854 OID_AUTO, "tx_ucast_pkts",
1855 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1856 "tx_ucast_pkts");
1857
1858 SYSCTL_ADD_QUAD(ctx, children,
1859 OID_AUTO, "tx_mcast_pkts",
1860 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1861 "tx_mcast_pkts");
1862
1863 SYSCTL_ADD_QUAD(ctx, children,
1864 OID_AUTO, "tx_bcast_pkts",
1865 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1866 "tx_bcast_pkts");
1867
1868 SYSCTL_ADD_QUAD(ctx, children,
1869 OID_AUTO, "tx_err_drop_pkts",
1870 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1871 "tx_err_drop_pkts");
1872
1873 SYSCTL_ADD_QUAD(ctx, children,
1874 OID_AUTO, "tpa_coalesced_pkts",
1875 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1876 "tpa_coalesced_pkts");
1877
1878 SYSCTL_ADD_QUAD(ctx, children,
1879 OID_AUTO, "tpa_coalesced_events",
1880 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1881 "tpa_coalesced_events");
1882
1883 SYSCTL_ADD_QUAD(ctx, children,
1884 OID_AUTO, "tpa_aborts_num",
1885 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1886 "tpa_aborts_num");
1887
1888 SYSCTL_ADD_QUAD(ctx, children,
1889 OID_AUTO, "tpa_not_coalesced_pkts",
1890 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1891 "tpa_not_coalesced_pkts");
1892
1893 SYSCTL_ADD_QUAD(ctx, children,
1894 OID_AUTO, "tpa_coalesced_bytes",
1895 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1896 "tpa_coalesced_bytes");
1897
1898 SYSCTL_ADD_QUAD(ctx, children,
1899 OID_AUTO, "rx_64_byte_packets",
1900 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1901 "rx_64_byte_packets");
1902
1903 SYSCTL_ADD_QUAD(ctx, children,
1904 OID_AUTO, "rx_65_to_127_byte_packets",
1905 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1906 "rx_65_to_127_byte_packets");
1907
1908 SYSCTL_ADD_QUAD(ctx, children,
1909 OID_AUTO, "rx_128_to_255_byte_packets",
1910 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1911 "rx_128_to_255_byte_packets");
1912
1913 SYSCTL_ADD_QUAD(ctx, children,
1914 OID_AUTO, "rx_256_to_511_byte_packets",
1915 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1916 "rx_256_to_511_byte_packets");
1917
1918 SYSCTL_ADD_QUAD(ctx, children,
1919 OID_AUTO, "rx_512_to_1023_byte_packets",
1920 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1921 "rx_512_to_1023_byte_packets");
1922
1923 SYSCTL_ADD_QUAD(ctx, children,
1924 OID_AUTO, "rx_1024_to_1518_byte_packets",
1925 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1926 "rx_1024_to_1518_byte_packets");
1927
1928 SYSCTL_ADD_QUAD(ctx, children,
1929 OID_AUTO, "rx_1519_to_1522_byte_packets",
1930 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1931 "rx_1519_to_1522_byte_packets");
1932
1933 SYSCTL_ADD_QUAD(ctx, children,
1934 OID_AUTO, "rx_1523_to_2047_byte_packets",
1935 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1936 "rx_1523_to_2047_byte_packets");
1937
1938 SYSCTL_ADD_QUAD(ctx, children,
1939 OID_AUTO, "rx_2048_to_4095_byte_packets",
1940 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1941 "rx_2048_to_4095_byte_packets");
1942
1943 SYSCTL_ADD_QUAD(ctx, children,
1944 OID_AUTO, "rx_4096_to_9216_byte_packets",
1945 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1946 "rx_4096_to_9216_byte_packets");
1947
1948 SYSCTL_ADD_QUAD(ctx, children,
1949 OID_AUTO, "rx_9217_to_16383_byte_packets",
1950 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1951 "rx_9217_to_16383_byte_packets");
1952
1953 SYSCTL_ADD_QUAD(ctx, children,
1954 OID_AUTO, "rx_crc_errors",
1955 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1956 "rx_crc_errors");
1957
1958 SYSCTL_ADD_QUAD(ctx, children,
1959 OID_AUTO, "rx_mac_crtl_frames",
1960 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1961 "rx_mac_crtl_frames");
1962
1963 SYSCTL_ADD_QUAD(ctx, children,
1964 OID_AUTO, "rx_pause_frames",
1965 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1966 "rx_pause_frames");
1967
1968 SYSCTL_ADD_QUAD(ctx, children,
1969 OID_AUTO, "rx_pfc_frames",
1970 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1971 "rx_pfc_frames");
1972
1973 SYSCTL_ADD_QUAD(ctx, children,
1974 OID_AUTO, "rx_align_errors",
1975 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1976 "rx_align_errors");
1977
1978 SYSCTL_ADD_QUAD(ctx, children,
1979 OID_AUTO, "rx_carrier_errors",
1980 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1981 "rx_carrier_errors");
1982
1983 SYSCTL_ADD_QUAD(ctx, children,
1984 OID_AUTO, "rx_oversize_packets",
1985 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1986 "rx_oversize_packets");
1987
1988 SYSCTL_ADD_QUAD(ctx, children,
1989 OID_AUTO, "rx_jabbers",
1990 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1991 "rx_jabbers");
1992
1993 SYSCTL_ADD_QUAD(ctx, children,
1994 OID_AUTO, "rx_undersize_packets",
1995 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1996 "rx_undersize_packets");
1997
1998 SYSCTL_ADD_QUAD(ctx, children,
1999 OID_AUTO, "rx_fragments",
2000 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2001 "rx_fragments");
2002
2003 SYSCTL_ADD_QUAD(ctx, children,
2004 OID_AUTO, "tx_64_byte_packets",
2005 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2006 "tx_64_byte_packets");
2007
2008 SYSCTL_ADD_QUAD(ctx, children,
2009 OID_AUTO, "tx_65_to_127_byte_packets",
2010 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2011 "tx_65_to_127_byte_packets");
2012
2013 SYSCTL_ADD_QUAD(ctx, children,
2014 OID_AUTO, "tx_128_to_255_byte_packets",
2015 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2016 "tx_128_to_255_byte_packets");
2017
2018 SYSCTL_ADD_QUAD(ctx, children,
2019 OID_AUTO, "tx_256_to_511_byte_packets",
2020 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2021 "tx_256_to_511_byte_packets");
2022
2023 SYSCTL_ADD_QUAD(ctx, children,
2024 OID_AUTO, "tx_512_to_1023_byte_packets",
2025 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2026 "tx_512_to_1023_byte_packets");
2027
2028 SYSCTL_ADD_QUAD(ctx, children,
2029 OID_AUTO, "tx_1024_to_1518_byte_packets",
2030 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2031 "tx_1024_to_1518_byte_packets");
2032
2033 SYSCTL_ADD_QUAD(ctx, children,
2034 OID_AUTO, "tx_1519_to_2047_byte_packets",
2035 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2036 "tx_1519_to_2047_byte_packets");
2037
2038 SYSCTL_ADD_QUAD(ctx, children,
2039 OID_AUTO, "tx_2048_to_4095_byte_packets",
2040 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2041 "tx_2048_to_4095_byte_packets");
2042
2043 SYSCTL_ADD_QUAD(ctx, children,
2044 OID_AUTO, "tx_4096_to_9216_byte_packets",
2045 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2046 "tx_4096_to_9216_byte_packets");
2047
2048 SYSCTL_ADD_QUAD(ctx, children,
2049 OID_AUTO, "tx_9217_to_16383_byte_packets",
2050 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2051 "tx_9217_to_16383_byte_packets");
2052
2053 SYSCTL_ADD_QUAD(ctx, children,
2054 OID_AUTO, "tx_pause_frames",
2055 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2056 "tx_pause_frames");
2057
2058 SYSCTL_ADD_QUAD(ctx, children,
2059 OID_AUTO, "tx_pfc_frames",
2060 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2061 "tx_pfc_frames");
2062
2063 SYSCTL_ADD_QUAD(ctx, children,
2064 OID_AUTO, "tx_lpi_entry_count",
2065 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2066 "tx_lpi_entry_count");
2067
2068 SYSCTL_ADD_QUAD(ctx, children,
2069 OID_AUTO, "tx_total_collisions",
2070 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2071 "tx_total_collisions");
2072
2073 SYSCTL_ADD_QUAD(ctx, children,
2074 OID_AUTO, "brb_truncates",
2075 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2076 "brb_truncates");
2077
2078 SYSCTL_ADD_QUAD(ctx, children,
2079 OID_AUTO, "brb_discards",
2080 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2081 "brb_discards");
2082
2083 SYSCTL_ADD_QUAD(ctx, children,
2084 OID_AUTO, "rx_mac_bytes",
2085 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2086 "rx_mac_bytes");
2087
2088 SYSCTL_ADD_QUAD(ctx, children,
2089 OID_AUTO, "rx_mac_uc_packets",
2090 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2091 "rx_mac_uc_packets");
2092
2093 SYSCTL_ADD_QUAD(ctx, children,
2094 OID_AUTO, "rx_mac_mc_packets",
2095 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2096 "rx_mac_mc_packets");
2097
2098 SYSCTL_ADD_QUAD(ctx, children,
2099 OID_AUTO, "rx_mac_bc_packets",
2100 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2101 "rx_mac_bc_packets");
2102
2103 SYSCTL_ADD_QUAD(ctx, children,
2104 OID_AUTO, "rx_mac_frames_ok",
2105 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2106 "rx_mac_frames_ok");
2107
2108 SYSCTL_ADD_QUAD(ctx, children,
2109 OID_AUTO, "tx_mac_bytes",
2110 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2111 "tx_mac_bytes");
2112
2113 SYSCTL_ADD_QUAD(ctx, children,
2114 OID_AUTO, "tx_mac_uc_packets",
2115 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2116 "tx_mac_uc_packets");
2117
2118 SYSCTL_ADD_QUAD(ctx, children,
2119 OID_AUTO, "tx_mac_mc_packets",
2120 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2121 "tx_mac_mc_packets");
2122
2123 SYSCTL_ADD_QUAD(ctx, children,
2124 OID_AUTO, "tx_mac_bc_packets",
2125 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2126 "tx_mac_bc_packets");
2127
2128 SYSCTL_ADD_QUAD(ctx, children,
2129 OID_AUTO, "tx_mac_ctrl_frames",
2130 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2131 "tx_mac_ctrl_frames");
2132 return;
2133 }
2134
2135 static void
qlnx_add_sysctls(qlnx_host_t * ha)2136 qlnx_add_sysctls(qlnx_host_t *ha)
2137 {
2138 device_t dev = ha->pci_dev;
2139 struct sysctl_ctx_list *ctx;
2140 struct sysctl_oid_list *children;
2141
2142 ctx = device_get_sysctl_ctx(dev);
2143 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2144
2145 qlnx_add_fp_stats_sysctls(ha);
2146 qlnx_add_sp_stats_sysctls(ha);
2147
2148 if (qlnx_vf_device(ha) != 0)
2149 qlnx_add_hw_stats_sysctls(ha);
2150
2151 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2152 CTLFLAG_RD, qlnx_ver_str, 0,
2153 "Driver Version");
2154
2155 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2156 CTLFLAG_RD, ha->stormfw_ver, 0,
2157 "STORM Firmware Version");
2158
2159 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2160 CTLFLAG_RD, ha->mfw_ver, 0,
2161 "Management Firmware Version");
2162
2163 SYSCTL_ADD_UINT(ctx, children,
2164 OID_AUTO, "personality", CTLFLAG_RD,
2165 &ha->personality, ha->personality,
2166 "\tpersonality = 0 => Ethernet Only\n"
2167 "\tpersonality = 3 => Ethernet and RoCE\n"
2168 "\tpersonality = 4 => Ethernet and iWARP\n"
2169 "\tpersonality = 6 => Default in Shared Memory\n");
2170
2171 ha->dbg_level = 0;
2172 SYSCTL_ADD_UINT(ctx, children,
2173 OID_AUTO, "debug", CTLFLAG_RW,
2174 &ha->dbg_level, ha->dbg_level, "Debug Level");
2175
2176 ha->dp_level = 0x01;
2177 SYSCTL_ADD_UINT(ctx, children,
2178 OID_AUTO, "dp_level", CTLFLAG_RW,
2179 &ha->dp_level, ha->dp_level, "DP Level");
2180
2181 ha->dbg_trace_lro_cnt = 0;
2182 SYSCTL_ADD_UINT(ctx, children,
2183 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2184 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2185 "Trace LRO Counts");
2186
2187 ha->dbg_trace_tso_pkt_len = 0;
2188 SYSCTL_ADD_UINT(ctx, children,
2189 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2190 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2191 "Trace TSO packet lengths");
2192
2193 ha->dp_module = 0;
2194 SYSCTL_ADD_UINT(ctx, children,
2195 OID_AUTO, "dp_module", CTLFLAG_RW,
2196 &ha->dp_module, ha->dp_module, "DP Module");
2197
2198 ha->err_inject = 0;
2199
2200 SYSCTL_ADD_UINT(ctx, children,
2201 OID_AUTO, "err_inject", CTLFLAG_RW,
2202 &ha->err_inject, ha->err_inject, "Error Inject");
2203
2204 ha->storm_stats_enable = 0;
2205
2206 SYSCTL_ADD_UINT(ctx, children,
2207 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2208 &ha->storm_stats_enable, ha->storm_stats_enable,
2209 "Enable Storm Statistics Gathering");
2210
2211 ha->storm_stats_index = 0;
2212
2213 SYSCTL_ADD_UINT(ctx, children,
2214 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2215 &ha->storm_stats_index, ha->storm_stats_index,
2216 "Enable Storm Statistics Gathering Current Index");
2217
2218 ha->grcdump_taken = 0;
2219 SYSCTL_ADD_UINT(ctx, children,
2220 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2221 &ha->grcdump_taken, ha->grcdump_taken,
2222 "grcdump_taken");
2223
2224 ha->idle_chk_taken = 0;
2225 SYSCTL_ADD_UINT(ctx, children,
2226 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2227 &ha->idle_chk_taken, ha->idle_chk_taken,
2228 "idle_chk_taken");
2229
2230 SYSCTL_ADD_UINT(ctx, children,
2231 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2232 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2233 "rx_coalesce_usecs");
2234
2235 SYSCTL_ADD_UINT(ctx, children,
2236 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2237 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2238 "tx_coalesce_usecs");
2239
2240 SYSCTL_ADD_PROC(ctx, children,
2241 OID_AUTO, "trigger_dump",
2242 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2243 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2244
2245 SYSCTL_ADD_PROC(ctx, children,
2246 OID_AUTO, "set_rx_coalesce_usecs",
2247 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2248 (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2249 "rx interrupt coalesce period microseconds");
2250
2251 SYSCTL_ADD_PROC(ctx, children,
2252 OID_AUTO, "set_tx_coalesce_usecs",
2253 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2254 (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2255 "tx interrupt coalesce period microseconds");
2256
2257 ha->rx_pkt_threshold = 128;
2258 SYSCTL_ADD_UINT(ctx, children,
2259 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2260 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2261 "No. of Rx Pkts to process at a time");
2262
2263 ha->rx_jumbo_buf_eq_mtu = 0;
2264 SYSCTL_ADD_UINT(ctx, children,
2265 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2266 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2267 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2268 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2269
2270 SYSCTL_ADD_QUAD(ctx, children,
2271 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2272 &ha->err_illegal_intr, "err_illegal_intr");
2273
2274 SYSCTL_ADD_QUAD(ctx, children,
2275 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2276 &ha->err_fp_null, "err_fp_null");
2277
2278 SYSCTL_ADD_QUAD(ctx, children,
2279 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2280 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2281 return;
2282 }
2283
2284 /*****************************************************************************
2285 * Operating System Network Interface Functions
2286 *****************************************************************************/
2287
2288 static void
qlnx_init_ifnet(device_t dev,qlnx_host_t * ha)2289 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2290 {
2291 uint16_t device_id;
2292 if_t ifp;
2293
2294 ifp = ha->ifp = if_alloc(IFT_ETHER);
2295 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2296
2297 device_id = pci_get_device(ha->pci_dev);
2298
2299 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2300 if_setbaudrate(ifp, IF_Gbps(40));
2301 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2302 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2303 if_setbaudrate(ifp, IF_Gbps(25));
2304 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2305 if_setbaudrate(ifp, IF_Gbps(50));
2306 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2307 if_setbaudrate(ifp, IF_Gbps(100));
2308
2309 if_setcapabilities(ifp, IFCAP_LINKSTATE);
2310
2311 if_setinitfn(ifp, qlnx_init);
2312 if_setsoftc(ifp, ha);
2313 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2314 if_setioctlfn(ifp, qlnx_ioctl);
2315 if_settransmitfn(ifp, qlnx_transmit);
2316 if_setqflushfn(ifp, qlnx_qflush);
2317
2318 if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2319 if_setsendqready(ifp);
2320
2321 if_setgetcounterfn(ifp, qlnx_get_counter);
2322
2323 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2324
2325 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2326
2327 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2328 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2329 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2330 uint32_t rnd;
2331
2332 rnd = arc4random();
2333
2334 ha->primary_mac[0] = 0x00;
2335 ha->primary_mac[1] = 0x0e;
2336 ha->primary_mac[2] = 0x1e;
2337 ha->primary_mac[3] = rnd & 0xFF;
2338 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2339 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2340 }
2341
2342 ether_ifattach(ifp, ha->primary_mac);
2343 bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2344
2345 if_setcapabilities(ifp, IFCAP_HWCSUM);
2346 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
2347
2348 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
2349 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
2350 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2351 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
2352 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
2353 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
2354 if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
2355 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
2356
2357 if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE -
2358 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2359 if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2360 if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
2361
2362 if_setcapenable(ifp, if_getcapabilities(ifp));
2363
2364 if_sethwassist(ifp, CSUM_IP);
2365 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
2366 if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
2367 if_sethwassistbits(ifp, CSUM_TSO, 0);
2368
2369 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2370
2371 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2372 qlnx_media_status);
2373
2374 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2375 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2376 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2377 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2378 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2379 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2380 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2381 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2382 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2383 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2384 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2385 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2386 ifmedia_add(&ha->media,
2387 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2388 ifmedia_add(&ha->media,
2389 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2390 ifmedia_add(&ha->media,
2391 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2392 }
2393
2394 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2395 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2396
2397 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2398
2399 QL_DPRINT2(ha, "exit\n");
2400
2401 return;
2402 }
2403
2404 static void
qlnx_init_locked(qlnx_host_t * ha)2405 qlnx_init_locked(qlnx_host_t *ha)
2406 {
2407 if_t ifp = ha->ifp;
2408
2409 QL_DPRINT1(ha, "Driver Initialization start \n");
2410
2411 qlnx_stop(ha);
2412
2413 if (qlnx_load(ha) == 0) {
2414 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2415 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2416
2417 #ifdef QLNX_ENABLE_IWARP
2418 if (qlnx_vf_device(ha) != 0) {
2419 qlnx_rdma_dev_open(ha);
2420 }
2421 #endif /* #ifdef QLNX_ENABLE_IWARP */
2422 }
2423
2424 return;
2425 }
2426
2427 static void
qlnx_init(void * arg)2428 qlnx_init(void *arg)
2429 {
2430 qlnx_host_t *ha;
2431
2432 ha = (qlnx_host_t *)arg;
2433
2434 QL_DPRINT2(ha, "enter\n");
2435
2436 QLNX_LOCK(ha);
2437 qlnx_init_locked(ha);
2438 QLNX_UNLOCK(ha);
2439
2440 QL_DPRINT2(ha, "exit\n");
2441
2442 return;
2443 }
2444
2445 static int
qlnx_config_mcast_mac_addr(qlnx_host_t * ha,uint8_t * mac_addr,uint32_t add_mac)2446 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2447 {
2448 struct ecore_filter_mcast *mcast;
2449 struct ecore_dev *cdev;
2450 int rc;
2451
2452 cdev = &ha->cdev;
2453
2454 mcast = &ha->ecore_mcast;
2455 bzero(mcast, sizeof(struct ecore_filter_mcast));
2456
2457 if (add_mac)
2458 mcast->opcode = ECORE_FILTER_ADD;
2459 else
2460 mcast->opcode = ECORE_FILTER_REMOVE;
2461
2462 mcast->num_mc_addrs = 1;
2463 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2464
2465 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2466
2467 return (rc);
2468 }
2469
2470 static int
qlnx_hw_add_mcast(qlnx_host_t * ha,uint8_t * mta)2471 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2472 {
2473 int i;
2474
2475 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2476 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2477 return 0; /* its been already added */
2478 }
2479
2480 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2481 if ((ha->mcast[i].addr[0] == 0) &&
2482 (ha->mcast[i].addr[1] == 0) &&
2483 (ha->mcast[i].addr[2] == 0) &&
2484 (ha->mcast[i].addr[3] == 0) &&
2485 (ha->mcast[i].addr[4] == 0) &&
2486 (ha->mcast[i].addr[5] == 0)) {
2487 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2488 return (-1);
2489
2490 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2491 ha->nmcast++;
2492
2493 return 0;
2494 }
2495 }
2496 return 0;
2497 }
2498
2499 static int
qlnx_hw_del_mcast(qlnx_host_t * ha,uint8_t * mta)2500 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2501 {
2502 int i;
2503
2504 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2505 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2506 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2507 return (-1);
2508
2509 ha->mcast[i].addr[0] = 0;
2510 ha->mcast[i].addr[1] = 0;
2511 ha->mcast[i].addr[2] = 0;
2512 ha->mcast[i].addr[3] = 0;
2513 ha->mcast[i].addr[4] = 0;
2514 ha->mcast[i].addr[5] = 0;
2515
2516 ha->nmcast--;
2517
2518 return 0;
2519 }
2520 }
2521 return 0;
2522 }
2523
2524 /*
2525 * Name: qls_hw_set_multi
2526 * Function: Sets the Multicast Addresses provided the host O.S into the
2527 * hardware (for the given interface)
2528 */
2529 static void
qlnx_hw_set_multi(qlnx_host_t * ha,uint8_t * mta,uint32_t mcnt,uint32_t add_mac)2530 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2531 uint32_t add_mac)
2532 {
2533 int i;
2534
2535 for (i = 0; i < mcnt; i++) {
2536 if (add_mac) {
2537 if (qlnx_hw_add_mcast(ha, mta))
2538 break;
2539 } else {
2540 if (qlnx_hw_del_mcast(ha, mta))
2541 break;
2542 }
2543
2544 mta += ETHER_HDR_LEN;
2545 }
2546 return;
2547 }
2548
2549 static u_int
qlnx_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int mcnt)2550 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2551 {
2552 uint8_t *mta = arg;
2553
2554 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2555 return (0);
2556
2557 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2558
2559 return (1);
2560 }
2561
2562 static int
qlnx_set_multi(qlnx_host_t * ha,uint32_t add_multi)2563 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2564 {
2565 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2566 if_t ifp = ha->ifp;
2567 u_int mcnt;
2568
2569 if (qlnx_vf_device(ha) == 0)
2570 return (0);
2571
2572 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2573
2574 QLNX_LOCK(ha);
2575 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2576 QLNX_UNLOCK(ha);
2577
2578 return (0);
2579 }
2580
2581 static int
qlnx_set_promisc(qlnx_host_t * ha,int enabled)2582 qlnx_set_promisc(qlnx_host_t *ha, int enabled)
2583 {
2584 int rc = 0;
2585 uint8_t filter;
2586
2587 if (qlnx_vf_device(ha) == 0)
2588 return (0);
2589
2590 filter = ha->filter;
2591 if (enabled) {
2592 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2593 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2594 } else {
2595 filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2596 filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
2597 }
2598
2599 rc = qlnx_set_rx_accept_filter(ha, filter);
2600 return (rc);
2601 }
2602
2603 static int
qlnx_set_allmulti(qlnx_host_t * ha,int enabled)2604 qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
2605 {
2606 int rc = 0;
2607 uint8_t filter;
2608
2609 if (qlnx_vf_device(ha) == 0)
2610 return (0);
2611
2612 filter = ha->filter;
2613 if (enabled) {
2614 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2615 } else {
2616 filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2617 }
2618 rc = qlnx_set_rx_accept_filter(ha, filter);
2619
2620 return (rc);
2621 }
2622
2623 static int
qlnx_ioctl(if_t ifp,u_long cmd,caddr_t data)2624 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
2625 {
2626 int ret = 0, mask;
2627 struct ifreq *ifr = (struct ifreq *)data;
2628 #ifdef INET
2629 struct ifaddr *ifa = (struct ifaddr *)data;
2630 #endif
2631 qlnx_host_t *ha;
2632
2633 ha = (qlnx_host_t *)if_getsoftc(ifp);
2634
2635 switch (cmd) {
2636 case SIOCSIFADDR:
2637 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2638
2639 #ifdef INET
2640 if (ifa->ifa_addr->sa_family == AF_INET) {
2641 if_setflagbits(ifp, IFF_UP, 0);
2642 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2643 QLNX_LOCK(ha);
2644 qlnx_init_locked(ha);
2645 QLNX_UNLOCK(ha);
2646 }
2647 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2648 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2649
2650 arp_ifinit(ifp, ifa);
2651 break;
2652 }
2653 #endif
2654 ether_ioctl(ifp, cmd, data);
2655 break;
2656
2657 case SIOCSIFMTU:
2658 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2659
2660 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2661 ret = EINVAL;
2662 } else {
2663 QLNX_LOCK(ha);
2664 if_setmtu(ifp, ifr->ifr_mtu);
2665 ha->max_frame_size =
2666 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2667 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2668 qlnx_init_locked(ha);
2669 }
2670
2671 QLNX_UNLOCK(ha);
2672 }
2673
2674 break;
2675
2676 case SIOCSIFFLAGS:
2677 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2678
2679 QLNX_LOCK(ha);
2680
2681 if (if_getflags(ifp) & IFF_UP) {
2682 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2683 if ((if_getflags(ifp) ^ ha->if_flags) &
2684 IFF_PROMISC) {
2685 ret = qlnx_set_promisc(ha, ifp->if_flags & IFF_PROMISC);
2686 } else if ((if_getflags(ifp) ^ ha->if_flags) &
2687 IFF_ALLMULTI) {
2688 ret = qlnx_set_allmulti(ha, ifp->if_flags & IFF_ALLMULTI);
2689 }
2690 } else {
2691 ha->max_frame_size = if_getmtu(ifp) +
2692 ETHER_HDR_LEN + ETHER_CRC_LEN;
2693 qlnx_init_locked(ha);
2694 }
2695 } else {
2696 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2697 qlnx_stop(ha);
2698 }
2699
2700 ha->if_flags = if_getflags(ifp);
2701 QLNX_UNLOCK(ha);
2702 break;
2703
2704 case SIOCADDMULTI:
2705 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2706
2707 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2708 if (qlnx_set_multi(ha, 1))
2709 ret = EINVAL;
2710 }
2711 break;
2712
2713 case SIOCDELMULTI:
2714 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2715
2716 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2717 if (qlnx_set_multi(ha, 0))
2718 ret = EINVAL;
2719 }
2720 break;
2721
2722 case SIOCSIFMEDIA:
2723 case SIOCGIFMEDIA:
2724 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2725
2726 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2727 break;
2728
2729 case SIOCSIFCAP:
2730
2731 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2732
2733 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2734
2735 if (mask & IFCAP_HWCSUM)
2736 if_togglecapenable(ifp, IFCAP_HWCSUM);
2737 if (mask & IFCAP_TSO4)
2738 if_togglecapenable(ifp, IFCAP_TSO4);
2739 if (mask & IFCAP_TSO6)
2740 if_togglecapenable(ifp, IFCAP_TSO6);
2741 if (mask & IFCAP_VLAN_HWTAGGING)
2742 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2743 if (mask & IFCAP_VLAN_HWTSO)
2744 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2745 if (mask & IFCAP_LRO)
2746 if_togglecapenable(ifp, IFCAP_LRO);
2747
2748 QLNX_LOCK(ha);
2749
2750 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2751 qlnx_init_locked(ha);
2752
2753 QLNX_UNLOCK(ha);
2754
2755 VLAN_CAPABILITIES(ifp);
2756 break;
2757
2758 case SIOCGI2C:
2759 {
2760 struct ifi2creq i2c;
2761 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2762 struct ecore_ptt *p_ptt;
2763
2764 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2765
2766 if (ret)
2767 break;
2768
2769 if ((i2c.len > sizeof (i2c.data)) ||
2770 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2771 ret = EINVAL;
2772 break;
2773 }
2774
2775 p_ptt = ecore_ptt_acquire(p_hwfn);
2776
2777 if (!p_ptt) {
2778 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2779 ret = -1;
2780 break;
2781 }
2782
2783 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2784 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2785 i2c.len, &i2c.data[0]);
2786
2787 ecore_ptt_release(p_hwfn, p_ptt);
2788
2789 if (ret) {
2790 ret = -1;
2791 break;
2792 }
2793
2794 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2795
2796 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2797 len = %d addr = 0x%02x offset = 0x%04x \
2798 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2799 0x%02x 0x%02x 0x%02x\n",
2800 ret, i2c.len, i2c.dev_addr, i2c.offset,
2801 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2802 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2803 break;
2804 }
2805
2806 default:
2807 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2808 ret = ether_ioctl(ifp, cmd, data);
2809 break;
2810 }
2811
2812 return (ret);
2813 }
2814
2815 static int
qlnx_media_change(if_t ifp)2816 qlnx_media_change(if_t ifp)
2817 {
2818 qlnx_host_t *ha;
2819 struct ifmedia *ifm;
2820 int ret = 0;
2821
2822 ha = (qlnx_host_t *)if_getsoftc(ifp);
2823
2824 QL_DPRINT2(ha, "enter\n");
2825
2826 ifm = &ha->media;
2827
2828 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2829 ret = EINVAL;
2830
2831 QL_DPRINT2(ha, "exit\n");
2832
2833 return (ret);
2834 }
2835
2836 static void
qlnx_media_status(if_t ifp,struct ifmediareq * ifmr)2837 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
2838 {
2839 qlnx_host_t *ha;
2840
2841 ha = (qlnx_host_t *)if_getsoftc(ifp);
2842
2843 QL_DPRINT2(ha, "enter\n");
2844
2845 ifmr->ifm_status = IFM_AVALID;
2846 ifmr->ifm_active = IFM_ETHER;
2847
2848 if (ha->link_up) {
2849 ifmr->ifm_status |= IFM_ACTIVE;
2850 ifmr->ifm_active |=
2851 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2852
2853 if (ha->if_link.link_partner_caps &
2854 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2855 ifmr->ifm_active |=
2856 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2857 }
2858
2859 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2860
2861 return;
2862 }
2863
2864 static void
qlnx_free_tx_pkt(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2865 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2866 struct qlnx_tx_queue *txq)
2867 {
2868 u16 idx;
2869 struct mbuf *mp;
2870 bus_dmamap_t map;
2871 int i;
2872 // struct eth_tx_bd *tx_data_bd;
2873 struct eth_tx_1st_bd *first_bd;
2874 int nbds = 0;
2875
2876 idx = txq->sw_tx_cons;
2877 mp = txq->sw_tx_ring[idx].mp;
2878 map = txq->sw_tx_ring[idx].map;
2879
2880 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2881 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2882
2883 QL_DPRINT1(ha, "(mp == NULL) "
2884 " tx_idx = 0x%x"
2885 " ecore_prod_idx = 0x%x"
2886 " ecore_cons_idx = 0x%x"
2887 " hw_bd_cons = 0x%x"
2888 " txq_db_last = 0x%x"
2889 " elem_left = 0x%x\n",
2890 fp->rss_id,
2891 ecore_chain_get_prod_idx(&txq->tx_pbl),
2892 ecore_chain_get_cons_idx(&txq->tx_pbl),
2893 le16toh(*txq->hw_cons_ptr),
2894 txq->tx_db.raw,
2895 ecore_chain_get_elem_left(&txq->tx_pbl));
2896
2897 fp->err_tx_free_pkt_null++;
2898
2899 //DEBUG
2900 qlnx_trigger_dump(ha);
2901
2902 return;
2903 } else {
2904 QLNX_INC_OPACKETS((ha->ifp));
2905 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2906
2907 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2908 bus_dmamap_unload(ha->tx_tag, map);
2909
2910 fp->tx_pkts_freed++;
2911 fp->tx_pkts_completed++;
2912
2913 m_freem(mp);
2914 }
2915
2916 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2917 nbds = first_bd->data.nbds;
2918
2919 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2920
2921 for (i = 1; i < nbds; i++) {
2922 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2923 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2924 }
2925 txq->sw_tx_ring[idx].flags = 0;
2926 txq->sw_tx_ring[idx].mp = NULL;
2927 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2928
2929 return;
2930 }
2931
2932 static void
qlnx_tx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2933 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2934 struct qlnx_tx_queue *txq)
2935 {
2936 u16 hw_bd_cons;
2937 u16 ecore_cons_idx;
2938 uint16_t diff;
2939 uint16_t idx, idx2;
2940
2941 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2942
2943 while (hw_bd_cons !=
2944 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2945 diff = hw_bd_cons - ecore_cons_idx;
2946 if ((diff > TX_RING_SIZE) ||
2947 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2948 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2949
2950 QL_DPRINT1(ha, "(diff = 0x%x) "
2951 " tx_idx = 0x%x"
2952 " ecore_prod_idx = 0x%x"
2953 " ecore_cons_idx = 0x%x"
2954 " hw_bd_cons = 0x%x"
2955 " txq_db_last = 0x%x"
2956 " elem_left = 0x%x\n",
2957 diff,
2958 fp->rss_id,
2959 ecore_chain_get_prod_idx(&txq->tx_pbl),
2960 ecore_chain_get_cons_idx(&txq->tx_pbl),
2961 le16toh(*txq->hw_cons_ptr),
2962 txq->tx_db.raw,
2963 ecore_chain_get_elem_left(&txq->tx_pbl));
2964
2965 fp->err_tx_cons_idx_conflict++;
2966
2967 //DEBUG
2968 qlnx_trigger_dump(ha);
2969 }
2970
2971 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2972 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2973 prefetch(txq->sw_tx_ring[idx].mp);
2974 prefetch(txq->sw_tx_ring[idx2].mp);
2975
2976 qlnx_free_tx_pkt(ha, fp, txq);
2977
2978 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2979 }
2980 return;
2981 }
2982
2983 static int
qlnx_transmit_locked(if_t ifp,struct qlnx_fastpath * fp,struct mbuf * mp)2984 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
2985 {
2986 int ret = 0;
2987 struct qlnx_tx_queue *txq;
2988 qlnx_host_t * ha;
2989 uint16_t elem_left;
2990
2991 txq = fp->txq[0];
2992 ha = (qlnx_host_t *)fp->edev;
2993
2994 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2995 if(mp != NULL)
2996 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2997 return (ret);
2998 }
2999
3000 if(mp != NULL)
3001 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3002
3003 mp = drbr_peek(ifp, fp->tx_br);
3004
3005 while (mp != NULL) {
3006 if (qlnx_send(ha, fp, &mp)) {
3007 if (mp != NULL) {
3008 drbr_putback(ifp, fp->tx_br, mp);
3009 } else {
3010 fp->tx_pkts_processed++;
3011 drbr_advance(ifp, fp->tx_br);
3012 }
3013 goto qlnx_transmit_locked_exit;
3014
3015 } else {
3016 drbr_advance(ifp, fp->tx_br);
3017 fp->tx_pkts_transmitted++;
3018 fp->tx_pkts_processed++;
3019 }
3020
3021 mp = drbr_peek(ifp, fp->tx_br);
3022 }
3023
3024 qlnx_transmit_locked_exit:
3025 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3026 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3027 < QLNX_TX_ELEM_MAX_THRESH))
3028 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3029
3030 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3031 return ret;
3032 }
3033
3034 static int
qlnx_transmit(if_t ifp,struct mbuf * mp)3035 qlnx_transmit(if_t ifp, struct mbuf *mp)
3036 {
3037 qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp);
3038 struct qlnx_fastpath *fp;
3039 int rss_id = 0, ret = 0;
3040
3041 #ifdef QLNX_TRACEPERF_DATA
3042 uint64_t tx_pkts = 0, tx_compl = 0;
3043 #endif
3044
3045 QL_DPRINT2(ha, "enter\n");
3046
3047 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3048 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3049 ha->num_rss;
3050
3051 fp = &ha->fp_array[rss_id];
3052
3053 if (fp->tx_br == NULL) {
3054 ret = EINVAL;
3055 goto qlnx_transmit_exit;
3056 }
3057
3058 if (mtx_trylock(&fp->tx_mtx)) {
3059 #ifdef QLNX_TRACEPERF_DATA
3060 tx_pkts = fp->tx_pkts_transmitted;
3061 tx_compl = fp->tx_pkts_completed;
3062 #endif
3063
3064 ret = qlnx_transmit_locked(ifp, fp, mp);
3065
3066 #ifdef QLNX_TRACEPERF_DATA
3067 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3068 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3069 #endif
3070 mtx_unlock(&fp->tx_mtx);
3071 } else {
3072 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3073 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3074 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3075 }
3076 }
3077
3078 qlnx_transmit_exit:
3079
3080 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3081 return ret;
3082 }
3083
3084 static void
qlnx_qflush(if_t ifp)3085 qlnx_qflush(if_t ifp)
3086 {
3087 int rss_id;
3088 struct qlnx_fastpath *fp;
3089 struct mbuf *mp;
3090 qlnx_host_t *ha;
3091
3092 ha = (qlnx_host_t *)if_getsoftc(ifp);
3093
3094 QL_DPRINT2(ha, "enter\n");
3095
3096 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3097 fp = &ha->fp_array[rss_id];
3098
3099 if (fp == NULL)
3100 continue;
3101
3102 if (fp->tx_br) {
3103 mtx_lock(&fp->tx_mtx);
3104
3105 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3106 fp->tx_pkts_freed++;
3107 m_freem(mp);
3108 }
3109 mtx_unlock(&fp->tx_mtx);
3110 }
3111 }
3112 QL_DPRINT2(ha, "exit\n");
3113
3114 return;
3115 }
3116
3117 static void
qlnx_txq_doorbell_wr32(qlnx_host_t * ha,void * reg_addr,uint32_t value)3118 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3119 {
3120 uint32_t offset;
3121
3122 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3123
3124 bus_write_4(ha->pci_dbells, offset, value);
3125 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3126 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3127
3128 return;
3129 }
3130
3131 static uint32_t
qlnx_tcp_offset(qlnx_host_t * ha,struct mbuf * mp)3132 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3133 {
3134 struct ether_vlan_header *eh = NULL;
3135 struct ip *ip = NULL;
3136 struct ip6_hdr *ip6 = NULL;
3137 struct tcphdr *th = NULL;
3138 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3139 uint16_t etype = 0;
3140 uint8_t buf[sizeof(struct ip6_hdr)];
3141
3142 eh = mtod(mp, struct ether_vlan_header *);
3143
3144 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3145 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3146 etype = ntohs(eh->evl_proto);
3147 } else {
3148 ehdrlen = ETHER_HDR_LEN;
3149 etype = ntohs(eh->evl_encap_proto);
3150 }
3151
3152 switch (etype) {
3153 case ETHERTYPE_IP:
3154 ip = (struct ip *)(mp->m_data + ehdrlen);
3155
3156 ip_hlen = sizeof (struct ip);
3157
3158 if (mp->m_len < (ehdrlen + ip_hlen)) {
3159 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3160 ip = (struct ip *)buf;
3161 }
3162
3163 th = (struct tcphdr *)(ip + 1);
3164 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3165 break;
3166
3167 case ETHERTYPE_IPV6:
3168 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3169
3170 ip_hlen = sizeof(struct ip6_hdr);
3171
3172 if (mp->m_len < (ehdrlen + ip_hlen)) {
3173 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3174 buf);
3175 ip6 = (struct ip6_hdr *)buf;
3176 }
3177 th = (struct tcphdr *)(ip6 + 1);
3178 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3179 break;
3180
3181 default:
3182 break;
3183 }
3184
3185 return (offset);
3186 }
3187
3188 static __inline int
qlnx_tso_check(struct qlnx_fastpath * fp,bus_dma_segment_t * segs,int nsegs,uint32_t offset)3189 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3190 uint32_t offset)
3191 {
3192 int i;
3193 uint32_t sum, nbds_in_hdr = 1;
3194 uint32_t window;
3195 bus_dma_segment_t *s_seg;
3196
3197 /* If the header spans multiple segments, skip those segments */
3198
3199 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3200 return (0);
3201
3202 i = 0;
3203
3204 while ((i < nsegs) && (offset >= segs->ds_len)) {
3205 offset = offset - segs->ds_len;
3206 segs++;
3207 i++;
3208 nbds_in_hdr++;
3209 }
3210
3211 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3212
3213 nsegs = nsegs - i;
3214
3215 while (nsegs >= window) {
3216 sum = 0;
3217 s_seg = segs;
3218
3219 for (i = 0; i < window; i++){
3220 sum += s_seg->ds_len;
3221 s_seg++;
3222 }
3223
3224 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3225 fp->tx_lso_wnd_min_len++;
3226 return (-1);
3227 }
3228
3229 nsegs = nsegs - 1;
3230 segs++;
3231 }
3232
3233 return (0);
3234 }
3235
3236 static int
qlnx_send(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf ** m_headp)3237 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3238 {
3239 bus_dma_segment_t *segs;
3240 bus_dmamap_t map = 0;
3241 uint32_t nsegs = 0;
3242 int ret = -1;
3243 struct mbuf *m_head = *m_headp;
3244 uint16_t idx = 0;
3245 uint16_t elem_left;
3246
3247 uint8_t nbd = 0;
3248 struct qlnx_tx_queue *txq;
3249
3250 struct eth_tx_1st_bd *first_bd;
3251 struct eth_tx_2nd_bd *second_bd;
3252 struct eth_tx_3rd_bd *third_bd;
3253 struct eth_tx_bd *tx_data_bd;
3254
3255 int seg_idx = 0;
3256 uint32_t nbds_in_hdr = 0;
3257 uint32_t offset = 0;
3258
3259 #ifdef QLNX_TRACE_PERF_DATA
3260 uint16_t bd_used;
3261 #endif
3262
3263 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3264
3265 if (!ha->link_up)
3266 return (-1);
3267
3268 first_bd = NULL;
3269 second_bd = NULL;
3270 third_bd = NULL;
3271 tx_data_bd = NULL;
3272
3273 txq = fp->txq[0];
3274
3275 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3276 QLNX_TX_ELEM_MIN_THRESH) {
3277 fp->tx_nsegs_gt_elem_left++;
3278 fp->err_tx_nsegs_gt_elem_left++;
3279
3280 return (ENOBUFS);
3281 }
3282
3283 idx = txq->sw_tx_prod;
3284
3285 map = txq->sw_tx_ring[idx].map;
3286 segs = txq->segs;
3287
3288 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3289 BUS_DMA_NOWAIT);
3290
3291 if (ha->dbg_trace_tso_pkt_len) {
3292 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3293 if (!fp->tx_tso_min_pkt_len) {
3294 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3295 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3296 } else {
3297 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3298 fp->tx_tso_min_pkt_len =
3299 m_head->m_pkthdr.len;
3300 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3301 fp->tx_tso_max_pkt_len =
3302 m_head->m_pkthdr.len;
3303 }
3304 }
3305 }
3306
3307 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3308 offset = qlnx_tcp_offset(ha, m_head);
3309
3310 if ((ret == EFBIG) ||
3311 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3312 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3313 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3314 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3315 struct mbuf *m;
3316
3317 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3318
3319 fp->tx_defrag++;
3320
3321 m = m_defrag(m_head, M_NOWAIT);
3322 if (m == NULL) {
3323 fp->err_tx_defrag++;
3324 fp->tx_pkts_freed++;
3325 m_freem(m_head);
3326 *m_headp = NULL;
3327 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3328 return (ENOBUFS);
3329 }
3330
3331 m_head = m;
3332 *m_headp = m_head;
3333
3334 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3335 segs, &nsegs, BUS_DMA_NOWAIT))) {
3336 fp->err_tx_defrag_dmamap_load++;
3337
3338 QL_DPRINT1(ha,
3339 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3340 ret, m_head->m_pkthdr.len);
3341
3342 fp->tx_pkts_freed++;
3343 m_freem(m_head);
3344 *m_headp = NULL;
3345
3346 return (ret);
3347 }
3348
3349 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3350 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3351 fp->err_tx_non_tso_max_seg++;
3352
3353 QL_DPRINT1(ha,
3354 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3355 ret, nsegs, m_head->m_pkthdr.len);
3356
3357 fp->tx_pkts_freed++;
3358 m_freem(m_head);
3359 *m_headp = NULL;
3360
3361 return (ret);
3362 }
3363 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3364 offset = qlnx_tcp_offset(ha, m_head);
3365
3366 } else if (ret) {
3367 fp->err_tx_dmamap_load++;
3368
3369 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3370 ret, m_head->m_pkthdr.len);
3371 fp->tx_pkts_freed++;
3372 m_freem(m_head);
3373 *m_headp = NULL;
3374 return (ret);
3375 }
3376
3377 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3378
3379 if (ha->dbg_trace_tso_pkt_len) {
3380 if (nsegs < QLNX_FP_MAX_SEGS)
3381 fp->tx_pkts[(nsegs - 1)]++;
3382 else
3383 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3384 }
3385
3386 #ifdef QLNX_TRACE_PERF_DATA
3387 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3388 if(m_head->m_pkthdr.len <= 2048)
3389 fp->tx_pkts_hist[0]++;
3390 else if((m_head->m_pkthdr.len > 2048) &&
3391 (m_head->m_pkthdr.len <= 4096))
3392 fp->tx_pkts_hist[1]++;
3393 else if((m_head->m_pkthdr.len > 4096) &&
3394 (m_head->m_pkthdr.len <= 8192))
3395 fp->tx_pkts_hist[2]++;
3396 else if((m_head->m_pkthdr.len > 8192) &&
3397 (m_head->m_pkthdr.len <= 12288 ))
3398 fp->tx_pkts_hist[3]++;
3399 else if((m_head->m_pkthdr.len > 11288) &&
3400 (m_head->m_pkthdr.len <= 16394))
3401 fp->tx_pkts_hist[4]++;
3402 else if((m_head->m_pkthdr.len > 16384) &&
3403 (m_head->m_pkthdr.len <= 20480))
3404 fp->tx_pkts_hist[5]++;
3405 else if((m_head->m_pkthdr.len > 20480) &&
3406 (m_head->m_pkthdr.len <= 24576))
3407 fp->tx_pkts_hist[6]++;
3408 else if((m_head->m_pkthdr.len > 24576) &&
3409 (m_head->m_pkthdr.len <= 28672))
3410 fp->tx_pkts_hist[7]++;
3411 else if((m_head->m_pkthdr.len > 28762) &&
3412 (m_head->m_pkthdr.len <= 32768))
3413 fp->tx_pkts_hist[8]++;
3414 else if((m_head->m_pkthdr.len > 32768) &&
3415 (m_head->m_pkthdr.len <= 36864))
3416 fp->tx_pkts_hist[9]++;
3417 else if((m_head->m_pkthdr.len > 36864) &&
3418 (m_head->m_pkthdr.len <= 40960))
3419 fp->tx_pkts_hist[10]++;
3420 else if((m_head->m_pkthdr.len > 40960) &&
3421 (m_head->m_pkthdr.len <= 45056))
3422 fp->tx_pkts_hist[11]++;
3423 else if((m_head->m_pkthdr.len > 45056) &&
3424 (m_head->m_pkthdr.len <= 49152))
3425 fp->tx_pkts_hist[12]++;
3426 else if((m_head->m_pkthdr.len > 49512) &&
3427 m_head->m_pkthdr.len <= 53248))
3428 fp->tx_pkts_hist[13]++;
3429 else if((m_head->m_pkthdr.len > 53248) &&
3430 (m_head->m_pkthdr.len <= 57344))
3431 fp->tx_pkts_hist[14]++;
3432 else if((m_head->m_pkthdr.len > 53248) &&
3433 (m_head->m_pkthdr.len <= 57344))
3434 fp->tx_pkts_hist[15]++;
3435 else if((m_head->m_pkthdr.len > 57344) &&
3436 (m_head->m_pkthdr.len <= 61440))
3437 fp->tx_pkts_hist[16]++;
3438 else
3439 fp->tx_pkts_hist[17]++;
3440 }
3441
3442 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3443 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3444 bd_used = TX_RING_SIZE - elem_left;
3445
3446 if(bd_used <= 100)
3447 fp->tx_pkts_q[0]++;
3448 else if((bd_used > 100) && (bd_used <= 500))
3449 fp->tx_pkts_q[1]++;
3450 else if((bd_used > 500) && (bd_used <= 1000))
3451 fp->tx_pkts_q[2]++;
3452 else if((bd_used > 1000) && (bd_used <= 2000))
3453 fp->tx_pkts_q[3]++;
3454 else if((bd_used > 3000) && (bd_used <= 4000))
3455 fp->tx_pkts_q[4]++;
3456 else if((bd_used > 4000) && (bd_used <= 5000))
3457 fp->tx_pkts_q[5]++;
3458 else if((bd_used > 6000) && (bd_used <= 7000))
3459 fp->tx_pkts_q[6]++;
3460 else if((bd_used > 7000) && (bd_used <= 8000))
3461 fp->tx_pkts_q[7]++;
3462 else if((bd_used > 8000) && (bd_used <= 9000))
3463 fp->tx_pkts_q[8]++;
3464 else if((bd_used > 9000) && (bd_used <= 10000))
3465 fp->tx_pkts_q[9]++;
3466 else if((bd_used > 10000) && (bd_used <= 11000))
3467 fp->tx_pkts_q[10]++;
3468 else if((bd_used > 11000) && (bd_used <= 12000))
3469 fp->tx_pkts_q[11]++;
3470 else if((bd_used > 12000) && (bd_used <= 13000))
3471 fp->tx_pkts_q[12]++;
3472 else if((bd_used > 13000) && (bd_used <= 14000))
3473 fp->tx_pkts_q[13]++;
3474 else if((bd_used > 14000) && (bd_used <= 15000))
3475 fp->tx_pkts_q[14]++;
3476 else if((bd_used > 15000) && (bd_used <= 16000))
3477 fp->tx_pkts_q[15]++;
3478 else
3479 fp->tx_pkts_q[16]++;
3480 }
3481
3482 #endif /* end of QLNX_TRACE_PERF_DATA */
3483
3484 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3485 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3486 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3487 " in chain[%d] trying to free packets\n",
3488 nsegs, elem_left, fp->rss_id);
3489
3490 fp->tx_nsegs_gt_elem_left++;
3491
3492 (void)qlnx_tx_int(ha, fp, txq);
3493
3494 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3495 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3496 QL_DPRINT1(ha,
3497 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3498 nsegs, elem_left, fp->rss_id);
3499
3500 fp->err_tx_nsegs_gt_elem_left++;
3501 fp->tx_ring_full = 1;
3502 if (ha->storm_stats_enable)
3503 ha->storm_stats_gather = 1;
3504 return (ENOBUFS);
3505 }
3506 }
3507
3508 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3509
3510 txq->sw_tx_ring[idx].mp = m_head;
3511
3512 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3513
3514 memset(first_bd, 0, sizeof(*first_bd));
3515
3516 first_bd->data.bd_flags.bitfields =
3517 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3518
3519 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3520
3521 nbd++;
3522
3523 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3524 first_bd->data.bd_flags.bitfields |=
3525 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3526 }
3527
3528 if (m_head->m_pkthdr.csum_flags &
3529 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3530 first_bd->data.bd_flags.bitfields |=
3531 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3532 }
3533
3534 if (m_head->m_flags & M_VLANTAG) {
3535 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3536 first_bd->data.bd_flags.bitfields |=
3537 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3538 }
3539
3540 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3541 first_bd->data.bd_flags.bitfields |=
3542 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3543 first_bd->data.bd_flags.bitfields |=
3544 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3545
3546 nbds_in_hdr = 1;
3547
3548 if (offset == segs->ds_len) {
3549 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3550 segs++;
3551 seg_idx++;
3552
3553 second_bd = (struct eth_tx_2nd_bd *)
3554 ecore_chain_produce(&txq->tx_pbl);
3555 memset(second_bd, 0, sizeof(*second_bd));
3556 nbd++;
3557
3558 if (seg_idx < nsegs) {
3559 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3560 (segs->ds_addr), (segs->ds_len));
3561 segs++;
3562 seg_idx++;
3563 }
3564
3565 third_bd = (struct eth_tx_3rd_bd *)
3566 ecore_chain_produce(&txq->tx_pbl);
3567 memset(third_bd, 0, sizeof(*third_bd));
3568 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3569 third_bd->data.bitfields |=
3570 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3571 nbd++;
3572
3573 if (seg_idx < nsegs) {
3574 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3575 (segs->ds_addr), (segs->ds_len));
3576 segs++;
3577 seg_idx++;
3578 }
3579
3580 for (; seg_idx < nsegs; seg_idx++) {
3581 tx_data_bd = (struct eth_tx_bd *)
3582 ecore_chain_produce(&txq->tx_pbl);
3583 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3584 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3585 segs->ds_addr,\
3586 segs->ds_len);
3587 segs++;
3588 nbd++;
3589 }
3590
3591 } else if (offset < segs->ds_len) {
3592 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3593
3594 second_bd = (struct eth_tx_2nd_bd *)
3595 ecore_chain_produce(&txq->tx_pbl);
3596 memset(second_bd, 0, sizeof(*second_bd));
3597 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3598 (segs->ds_addr + offset),\
3599 (segs->ds_len - offset));
3600 nbd++;
3601 segs++;
3602
3603 third_bd = (struct eth_tx_3rd_bd *)
3604 ecore_chain_produce(&txq->tx_pbl);
3605 memset(third_bd, 0, sizeof(*third_bd));
3606
3607 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3608 segs->ds_addr,\
3609 segs->ds_len);
3610 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3611 third_bd->data.bitfields |=
3612 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3613 segs++;
3614 nbd++;
3615
3616 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3617 tx_data_bd = (struct eth_tx_bd *)
3618 ecore_chain_produce(&txq->tx_pbl);
3619 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3620 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3621 segs->ds_addr,\
3622 segs->ds_len);
3623 segs++;
3624 nbd++;
3625 }
3626
3627 } else {
3628 offset = offset - segs->ds_len;
3629 segs++;
3630
3631 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3632 if (offset)
3633 nbds_in_hdr++;
3634
3635 tx_data_bd = (struct eth_tx_bd *)
3636 ecore_chain_produce(&txq->tx_pbl);
3637 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3638
3639 if (second_bd == NULL) {
3640 second_bd = (struct eth_tx_2nd_bd *)
3641 tx_data_bd;
3642 } else if (third_bd == NULL) {
3643 third_bd = (struct eth_tx_3rd_bd *)
3644 tx_data_bd;
3645 }
3646
3647 if (offset && (offset < segs->ds_len)) {
3648 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3649 segs->ds_addr, offset);
3650
3651 tx_data_bd = (struct eth_tx_bd *)
3652 ecore_chain_produce(&txq->tx_pbl);
3653
3654 memset(tx_data_bd, 0,
3655 sizeof(*tx_data_bd));
3656
3657 if (second_bd == NULL) {
3658 second_bd =
3659 (struct eth_tx_2nd_bd *)tx_data_bd;
3660 } else if (third_bd == NULL) {
3661 third_bd =
3662 (struct eth_tx_3rd_bd *)tx_data_bd;
3663 }
3664 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3665 (segs->ds_addr + offset), \
3666 (segs->ds_len - offset));
3667 nbd++;
3668 offset = 0;
3669 } else {
3670 if (offset)
3671 offset = offset - segs->ds_len;
3672 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3673 segs->ds_addr, segs->ds_len);
3674 }
3675 segs++;
3676 nbd++;
3677 }
3678
3679 if (third_bd == NULL) {
3680 third_bd = (struct eth_tx_3rd_bd *)
3681 ecore_chain_produce(&txq->tx_pbl);
3682 memset(third_bd, 0, sizeof(*third_bd));
3683 }
3684
3685 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3686 third_bd->data.bitfields |=
3687 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3688 }
3689 fp->tx_tso_pkts++;
3690 } else {
3691 segs++;
3692 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3693 tx_data_bd = (struct eth_tx_bd *)
3694 ecore_chain_produce(&txq->tx_pbl);
3695 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3696 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3697 segs->ds_len);
3698 segs++;
3699 nbd++;
3700 }
3701 first_bd->data.bitfields =
3702 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3703 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3704 first_bd->data.bitfields =
3705 htole16(first_bd->data.bitfields);
3706 fp->tx_non_tso_pkts++;
3707 }
3708
3709 first_bd->data.nbds = nbd;
3710
3711 if (ha->dbg_trace_tso_pkt_len) {
3712 if (fp->tx_tso_max_nsegs < nsegs)
3713 fp->tx_tso_max_nsegs = nsegs;
3714
3715 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3716 fp->tx_tso_min_nsegs = nsegs;
3717 }
3718
3719 txq->sw_tx_ring[idx].nsegs = nsegs;
3720 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3721
3722 txq->tx_db.data.bd_prod =
3723 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3724
3725 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3726
3727 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3728 return (0);
3729 }
3730
3731 static void
qlnx_stop(qlnx_host_t * ha)3732 qlnx_stop(qlnx_host_t *ha)
3733 {
3734 if_t ifp = ha->ifp;
3735 int i;
3736
3737 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
3738
3739 /*
3740 * We simply lock and unlock each fp->tx_mtx to
3741 * propagate the if_drv_flags
3742 * state to each tx thread
3743 */
3744 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3745
3746 if (ha->state == QLNX_STATE_OPEN) {
3747 for (i = 0; i < ha->num_rss; i++) {
3748 struct qlnx_fastpath *fp = &ha->fp_array[i];
3749
3750 mtx_lock(&fp->tx_mtx);
3751 mtx_unlock(&fp->tx_mtx);
3752
3753 if (fp->fp_taskqueue != NULL)
3754 taskqueue_enqueue(fp->fp_taskqueue,
3755 &fp->fp_task);
3756 }
3757 }
3758 #ifdef QLNX_ENABLE_IWARP
3759 if (qlnx_vf_device(ha) != 0) {
3760 qlnx_rdma_dev_close(ha);
3761 }
3762 #endif /* #ifdef QLNX_ENABLE_IWARP */
3763
3764 qlnx_unload(ha);
3765
3766 return;
3767 }
3768
3769 static int
qlnx_get_ifq_snd_maxlen(qlnx_host_t * ha)3770 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3771 {
3772 return(TX_RING_SIZE - 1);
3773 }
3774
3775 uint8_t *
qlnx_get_mac_addr(qlnx_host_t * ha)3776 qlnx_get_mac_addr(qlnx_host_t *ha)
3777 {
3778 struct ecore_hwfn *p_hwfn;
3779 unsigned char mac[ETHER_ADDR_LEN];
3780 uint8_t p_is_forced;
3781
3782 p_hwfn = &ha->cdev.hwfns[0];
3783
3784 if (qlnx_vf_device(ha) != 0)
3785 return (p_hwfn->hw_info.hw_mac_addr);
3786
3787 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3788 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3789 true) {
3790 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3791 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3792 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3793 memcpy(ha->primary_mac, mac, ETH_ALEN);
3794 }
3795
3796 return (ha->primary_mac);
3797 }
3798
3799 static uint32_t
qlnx_get_optics(qlnx_host_t * ha,struct qlnx_link_output * if_link)3800 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3801 {
3802 uint32_t ifm_type = 0;
3803
3804 switch (if_link->media_type) {
3805 case MEDIA_MODULE_FIBER:
3806 case MEDIA_UNSPECIFIED:
3807 if (if_link->speed == (100 * 1000))
3808 ifm_type = QLNX_IFM_100G_SR4;
3809 else if (if_link->speed == (40 * 1000))
3810 ifm_type = IFM_40G_SR4;
3811 else if (if_link->speed == (25 * 1000))
3812 ifm_type = QLNX_IFM_25G_SR;
3813 else if (if_link->speed == (10 * 1000))
3814 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3815 else if (if_link->speed == (1 * 1000))
3816 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3817
3818 break;
3819
3820 case MEDIA_DA_TWINAX:
3821 if (if_link->speed == (100 * 1000))
3822 ifm_type = QLNX_IFM_100G_CR4;
3823 else if (if_link->speed == (40 * 1000))
3824 ifm_type = IFM_40G_CR4;
3825 else if (if_link->speed == (25 * 1000))
3826 ifm_type = QLNX_IFM_25G_CR;
3827 else if (if_link->speed == (10 * 1000))
3828 ifm_type = IFM_10G_TWINAX;
3829
3830 break;
3831
3832 default :
3833 ifm_type = IFM_UNKNOWN;
3834 break;
3835 }
3836 return (ifm_type);
3837 }
3838
3839 /*****************************************************************************
3840 * Interrupt Service Functions
3841 *****************************************************************************/
3842
3843 static int
qlnx_rx_jumbo_chain(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf * mp_head,uint16_t len)3844 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3845 struct mbuf *mp_head, uint16_t len)
3846 {
3847 struct mbuf *mp, *mpf, *mpl;
3848 struct sw_rx_data *sw_rx_data;
3849 struct qlnx_rx_queue *rxq;
3850 uint16_t len_in_buffer;
3851
3852 rxq = fp->rxq;
3853 mpf = mpl = mp = NULL;
3854
3855 while (len) {
3856 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3857
3858 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3859 mp = sw_rx_data->data;
3860
3861 if (mp == NULL) {
3862 QL_DPRINT1(ha, "mp = NULL\n");
3863 fp->err_rx_mp_null++;
3864 rxq->sw_rx_cons =
3865 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3866
3867 if (mpf != NULL)
3868 m_freem(mpf);
3869
3870 return (-1);
3871 }
3872 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3873 BUS_DMASYNC_POSTREAD);
3874
3875 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3876 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3877 " incoming packet and reusing its buffer\n");
3878
3879 qlnx_reuse_rx_data(rxq);
3880 fp->err_rx_alloc_errors++;
3881
3882 if (mpf != NULL)
3883 m_freem(mpf);
3884
3885 return (-1);
3886 }
3887 ecore_chain_consume(&rxq->rx_bd_ring);
3888
3889 if (len > rxq->rx_buf_size)
3890 len_in_buffer = rxq->rx_buf_size;
3891 else
3892 len_in_buffer = len;
3893
3894 len = len - len_in_buffer;
3895
3896 mp->m_flags &= ~M_PKTHDR;
3897 mp->m_next = NULL;
3898 mp->m_len = len_in_buffer;
3899
3900 if (mpf == NULL)
3901 mpf = mpl = mp;
3902 else {
3903 mpl->m_next = mp;
3904 mpl = mp;
3905 }
3906 }
3907
3908 if (mpf != NULL)
3909 mp_head->m_next = mpf;
3910
3911 return (0);
3912 }
3913
3914 static void
qlnx_tpa_start(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)3915 qlnx_tpa_start(qlnx_host_t *ha,
3916 struct qlnx_fastpath *fp,
3917 struct qlnx_rx_queue *rxq,
3918 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3919 {
3920 uint32_t agg_index;
3921 if_t ifp = ha->ifp;
3922 struct mbuf *mp;
3923 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3924 struct sw_rx_data *sw_rx_data;
3925 dma_addr_t addr;
3926 bus_dmamap_t map;
3927 struct eth_rx_bd *rx_bd;
3928 int i;
3929 uint8_t hash_type;
3930
3931 agg_index = cqe->tpa_agg_index;
3932
3933 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3934 \t type = 0x%x\n \
3935 \t bitfields = 0x%x\n \
3936 \t seg_len = 0x%x\n \
3937 \t pars_flags = 0x%x\n \
3938 \t vlan_tag = 0x%x\n \
3939 \t rss_hash = 0x%x\n \
3940 \t len_on_first_bd = 0x%x\n \
3941 \t placement_offset = 0x%x\n \
3942 \t tpa_agg_index = 0x%x\n \
3943 \t header_len = 0x%x\n \
3944 \t ext_bd_len_list[0] = 0x%x\n \
3945 \t ext_bd_len_list[1] = 0x%x\n \
3946 \t ext_bd_len_list[2] = 0x%x\n \
3947 \t ext_bd_len_list[3] = 0x%x\n \
3948 \t ext_bd_len_list[4] = 0x%x\n",
3949 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3950 cqe->pars_flags.flags, cqe->vlan_tag,
3951 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3952 cqe->tpa_agg_index, cqe->header_len,
3953 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3954 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3955 cqe->ext_bd_len_list[4]);
3956
3957 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3958 fp->err_rx_tpa_invalid_agg_num++;
3959 return;
3960 }
3961
3962 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3963 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3964 mp = sw_rx_data->data;
3965
3966 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3967
3968 if (mp == NULL) {
3969 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3970 fp->err_rx_mp_null++;
3971 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3972
3973 return;
3974 }
3975
3976 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3977 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3978 " flags = %x, dropping incoming packet\n", fp->rss_id,
3979 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3980
3981 fp->err_rx_hw_errors++;
3982
3983 qlnx_reuse_rx_data(rxq);
3984
3985 QLNX_INC_IERRORS(ifp);
3986
3987 return;
3988 }
3989
3990 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3991 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3992 " dropping incoming packet and reusing its buffer\n",
3993 fp->rss_id);
3994
3995 fp->err_rx_alloc_errors++;
3996 QLNX_INC_IQDROPS(ifp);
3997
3998 /*
3999 * Load the tpa mbuf into the rx ring and save the
4000 * posted mbuf
4001 */
4002
4003 map = sw_rx_data->map;
4004 addr = sw_rx_data->dma_addr;
4005
4006 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4007
4008 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4009 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4010 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4011
4012 rxq->tpa_info[agg_index].rx_buf.data = mp;
4013 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4014 rxq->tpa_info[agg_index].rx_buf.map = map;
4015
4016 rx_bd = (struct eth_rx_bd *)
4017 ecore_chain_produce(&rxq->rx_bd_ring);
4018
4019 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4020 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4021
4022 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4023 BUS_DMASYNC_PREREAD);
4024
4025 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4026 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4027
4028 ecore_chain_consume(&rxq->rx_bd_ring);
4029
4030 /* Now reuse any buffers posted in ext_bd_len_list */
4031 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4032 if (cqe->ext_bd_len_list[i] == 0)
4033 break;
4034
4035 qlnx_reuse_rx_data(rxq);
4036 }
4037
4038 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4039 return;
4040 }
4041
4042 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4043 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4044 " dropping incoming packet and reusing its buffer\n",
4045 fp->rss_id);
4046
4047 QLNX_INC_IQDROPS(ifp);
4048
4049 /* if we already have mbuf head in aggregation free it */
4050 if (rxq->tpa_info[agg_index].mpf) {
4051 m_freem(rxq->tpa_info[agg_index].mpf);
4052 rxq->tpa_info[agg_index].mpl = NULL;
4053 }
4054 rxq->tpa_info[agg_index].mpf = mp;
4055 rxq->tpa_info[agg_index].mpl = NULL;
4056
4057 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4058 ecore_chain_consume(&rxq->rx_bd_ring);
4059
4060 /* Now reuse any buffers posted in ext_bd_len_list */
4061 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4062 if (cqe->ext_bd_len_list[i] == 0)
4063 break;
4064
4065 qlnx_reuse_rx_data(rxq);
4066 }
4067 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4068
4069 return;
4070 }
4071
4072 /*
4073 * first process the ext_bd_len_list
4074 * if this fails then we simply drop the packet
4075 */
4076 ecore_chain_consume(&rxq->rx_bd_ring);
4077 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4078
4079 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4080 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4081
4082 if (cqe->ext_bd_len_list[i] == 0)
4083 break;
4084
4085 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4086 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4087 BUS_DMASYNC_POSTREAD);
4088
4089 mpc = sw_rx_data->data;
4090
4091 if (mpc == NULL) {
4092 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4093 fp->err_rx_mp_null++;
4094 if (mpf != NULL)
4095 m_freem(mpf);
4096 mpf = mpl = NULL;
4097 rxq->tpa_info[agg_index].agg_state =
4098 QLNX_AGG_STATE_ERROR;
4099 ecore_chain_consume(&rxq->rx_bd_ring);
4100 rxq->sw_rx_cons =
4101 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4102 continue;
4103 }
4104
4105 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4106 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4107 " dropping incoming packet and reusing its"
4108 " buffer\n", fp->rss_id);
4109
4110 qlnx_reuse_rx_data(rxq);
4111
4112 if (mpf != NULL)
4113 m_freem(mpf);
4114 mpf = mpl = NULL;
4115
4116 rxq->tpa_info[agg_index].agg_state =
4117 QLNX_AGG_STATE_ERROR;
4118
4119 ecore_chain_consume(&rxq->rx_bd_ring);
4120 rxq->sw_rx_cons =
4121 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4122
4123 continue;
4124 }
4125
4126 mpc->m_flags &= ~M_PKTHDR;
4127 mpc->m_next = NULL;
4128 mpc->m_len = cqe->ext_bd_len_list[i];
4129
4130 if (mpf == NULL) {
4131 mpf = mpl = mpc;
4132 } else {
4133 mpl->m_len = ha->rx_buf_size;
4134 mpl->m_next = mpc;
4135 mpl = mpc;
4136 }
4137
4138 ecore_chain_consume(&rxq->rx_bd_ring);
4139 rxq->sw_rx_cons =
4140 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4141 }
4142
4143 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4144 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4145 " incoming packet and reusing its buffer\n",
4146 fp->rss_id);
4147
4148 QLNX_INC_IQDROPS(ifp);
4149
4150 rxq->tpa_info[agg_index].mpf = mp;
4151 rxq->tpa_info[agg_index].mpl = NULL;
4152
4153 return;
4154 }
4155
4156 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4157
4158 if (mpf != NULL) {
4159 mp->m_len = ha->rx_buf_size;
4160 mp->m_next = mpf;
4161 rxq->tpa_info[agg_index].mpf = mp;
4162 rxq->tpa_info[agg_index].mpl = mpl;
4163 } else {
4164 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4165 rxq->tpa_info[agg_index].mpf = mp;
4166 rxq->tpa_info[agg_index].mpl = mp;
4167 mp->m_next = NULL;
4168 }
4169
4170 mp->m_flags |= M_PKTHDR;
4171
4172 /* assign packet to this interface interface */
4173 mp->m_pkthdr.rcvif = ifp;
4174
4175 /* assume no hardware checksum has complated */
4176 mp->m_pkthdr.csum_flags = 0;
4177
4178 //mp->m_pkthdr.flowid = fp->rss_id;
4179 mp->m_pkthdr.flowid = cqe->rss_hash;
4180
4181 hash_type = cqe->bitfields &
4182 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4183 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4184
4185 switch (hash_type) {
4186 case RSS_HASH_TYPE_IPV4:
4187 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4188 break;
4189
4190 case RSS_HASH_TYPE_TCP_IPV4:
4191 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4192 break;
4193
4194 case RSS_HASH_TYPE_IPV6:
4195 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4196 break;
4197
4198 case RSS_HASH_TYPE_TCP_IPV6:
4199 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4200 break;
4201
4202 default:
4203 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4204 break;
4205 }
4206
4207 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4208 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4209
4210 mp->m_pkthdr.csum_data = 0xFFFF;
4211
4212 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4213 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4214 mp->m_flags |= M_VLANTAG;
4215 }
4216
4217 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4218
4219 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4220 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4221 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4222
4223 return;
4224 }
4225
4226 static void
qlnx_tpa_cont(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)4227 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4228 struct qlnx_rx_queue *rxq,
4229 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4230 {
4231 struct sw_rx_data *sw_rx_data;
4232 int i;
4233 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4234 struct mbuf *mp;
4235 uint32_t agg_index;
4236
4237 QL_DPRINT7(ha, "[%d]: enter\n \
4238 \t type = 0x%x\n \
4239 \t tpa_agg_index = 0x%x\n \
4240 \t len_list[0] = 0x%x\n \
4241 \t len_list[1] = 0x%x\n \
4242 \t len_list[2] = 0x%x\n \
4243 \t len_list[3] = 0x%x\n \
4244 \t len_list[4] = 0x%x\n \
4245 \t len_list[5] = 0x%x\n",
4246 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4247 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4248 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4249
4250 agg_index = cqe->tpa_agg_index;
4251
4252 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4253 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4254 fp->err_rx_tpa_invalid_agg_num++;
4255 return;
4256 }
4257
4258 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4259 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4260
4261 if (cqe->len_list[i] == 0)
4262 break;
4263
4264 if (rxq->tpa_info[agg_index].agg_state !=
4265 QLNX_AGG_STATE_START) {
4266 qlnx_reuse_rx_data(rxq);
4267 continue;
4268 }
4269
4270 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4271 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4272 BUS_DMASYNC_POSTREAD);
4273
4274 mpc = sw_rx_data->data;
4275
4276 if (mpc == NULL) {
4277 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4278
4279 fp->err_rx_mp_null++;
4280 if (mpf != NULL)
4281 m_freem(mpf);
4282 mpf = mpl = NULL;
4283 rxq->tpa_info[agg_index].agg_state =
4284 QLNX_AGG_STATE_ERROR;
4285 ecore_chain_consume(&rxq->rx_bd_ring);
4286 rxq->sw_rx_cons =
4287 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4288 continue;
4289 }
4290
4291 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4292 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4293 " dropping incoming packet and reusing its"
4294 " buffer\n", fp->rss_id);
4295
4296 qlnx_reuse_rx_data(rxq);
4297
4298 if (mpf != NULL)
4299 m_freem(mpf);
4300 mpf = mpl = NULL;
4301
4302 rxq->tpa_info[agg_index].agg_state =
4303 QLNX_AGG_STATE_ERROR;
4304
4305 ecore_chain_consume(&rxq->rx_bd_ring);
4306 rxq->sw_rx_cons =
4307 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4308
4309 continue;
4310 }
4311
4312 mpc->m_flags &= ~M_PKTHDR;
4313 mpc->m_next = NULL;
4314 mpc->m_len = cqe->len_list[i];
4315
4316 if (mpf == NULL) {
4317 mpf = mpl = mpc;
4318 } else {
4319 mpl->m_len = ha->rx_buf_size;
4320 mpl->m_next = mpc;
4321 mpl = mpc;
4322 }
4323
4324 ecore_chain_consume(&rxq->rx_bd_ring);
4325 rxq->sw_rx_cons =
4326 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4327 }
4328
4329 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4330 fp->rss_id, mpf, mpl);
4331
4332 if (mpf != NULL) {
4333 mp = rxq->tpa_info[agg_index].mpl;
4334 mp->m_len = ha->rx_buf_size;
4335 mp->m_next = mpf;
4336 rxq->tpa_info[agg_index].mpl = mpl;
4337 }
4338
4339 return;
4340 }
4341
4342 static int
qlnx_tpa_end(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_end_cqe * cqe)4343 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4344 struct qlnx_rx_queue *rxq,
4345 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4346 {
4347 struct sw_rx_data *sw_rx_data;
4348 int i;
4349 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4350 struct mbuf *mp;
4351 uint32_t agg_index;
4352 uint32_t len = 0;
4353 if_t ifp = ha->ifp;
4354
4355 QL_DPRINT7(ha, "[%d]: enter\n \
4356 \t type = 0x%x\n \
4357 \t tpa_agg_index = 0x%x\n \
4358 \t total_packet_len = 0x%x\n \
4359 \t num_of_bds = 0x%x\n \
4360 \t end_reason = 0x%x\n \
4361 \t num_of_coalesced_segs = 0x%x\n \
4362 \t ts_delta = 0x%x\n \
4363 \t len_list[0] = 0x%x\n \
4364 \t len_list[1] = 0x%x\n \
4365 \t len_list[2] = 0x%x\n \
4366 \t len_list[3] = 0x%x\n",
4367 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4368 cqe->total_packet_len, cqe->num_of_bds,
4369 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4370 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4371 cqe->len_list[3]);
4372
4373 agg_index = cqe->tpa_agg_index;
4374
4375 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4376 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4377
4378 fp->err_rx_tpa_invalid_agg_num++;
4379 return (0);
4380 }
4381
4382 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4383 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4384
4385 if (cqe->len_list[i] == 0)
4386 break;
4387
4388 if (rxq->tpa_info[agg_index].agg_state !=
4389 QLNX_AGG_STATE_START) {
4390 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4391
4392 qlnx_reuse_rx_data(rxq);
4393 continue;
4394 }
4395
4396 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4397 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4398 BUS_DMASYNC_POSTREAD);
4399
4400 mpc = sw_rx_data->data;
4401
4402 if (mpc == NULL) {
4403 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4404
4405 fp->err_rx_mp_null++;
4406 if (mpf != NULL)
4407 m_freem(mpf);
4408 mpf = mpl = NULL;
4409 rxq->tpa_info[agg_index].agg_state =
4410 QLNX_AGG_STATE_ERROR;
4411 ecore_chain_consume(&rxq->rx_bd_ring);
4412 rxq->sw_rx_cons =
4413 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4414 continue;
4415 }
4416
4417 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4418 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4419 " dropping incoming packet and reusing its"
4420 " buffer\n", fp->rss_id);
4421
4422 qlnx_reuse_rx_data(rxq);
4423
4424 if (mpf != NULL)
4425 m_freem(mpf);
4426 mpf = mpl = NULL;
4427
4428 rxq->tpa_info[agg_index].agg_state =
4429 QLNX_AGG_STATE_ERROR;
4430
4431 ecore_chain_consume(&rxq->rx_bd_ring);
4432 rxq->sw_rx_cons =
4433 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4434
4435 continue;
4436 }
4437
4438 mpc->m_flags &= ~M_PKTHDR;
4439 mpc->m_next = NULL;
4440 mpc->m_len = cqe->len_list[i];
4441
4442 if (mpf == NULL) {
4443 mpf = mpl = mpc;
4444 } else {
4445 mpl->m_len = ha->rx_buf_size;
4446 mpl->m_next = mpc;
4447 mpl = mpc;
4448 }
4449
4450 ecore_chain_consume(&rxq->rx_bd_ring);
4451 rxq->sw_rx_cons =
4452 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4453 }
4454
4455 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4456
4457 if (mpf != NULL) {
4458 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4459
4460 mp = rxq->tpa_info[agg_index].mpl;
4461 mp->m_len = ha->rx_buf_size;
4462 mp->m_next = mpf;
4463 }
4464
4465 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4466 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4467
4468 if (rxq->tpa_info[agg_index].mpf != NULL)
4469 m_freem(rxq->tpa_info[agg_index].mpf);
4470 rxq->tpa_info[agg_index].mpf = NULL;
4471 rxq->tpa_info[agg_index].mpl = NULL;
4472 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4473 return (0);
4474 }
4475
4476 mp = rxq->tpa_info[agg_index].mpf;
4477 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4478 mp->m_pkthdr.len = cqe->total_packet_len;
4479
4480 if (mp->m_next == NULL)
4481 mp->m_len = mp->m_pkthdr.len;
4482 else {
4483 /* compute the total packet length */
4484 mpf = mp;
4485 while (mpf != NULL) {
4486 len += mpf->m_len;
4487 mpf = mpf->m_next;
4488 }
4489
4490 if (cqe->total_packet_len > len) {
4491 mpl = rxq->tpa_info[agg_index].mpl;
4492 mpl->m_len += (cqe->total_packet_len - len);
4493 }
4494 }
4495
4496 QLNX_INC_IPACKETS(ifp);
4497 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4498
4499 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4500 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4501 fp->rss_id, mp->m_pkthdr.csum_data,
4502 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4503
4504 if_input(ifp, mp);
4505
4506 rxq->tpa_info[agg_index].mpf = NULL;
4507 rxq->tpa_info[agg_index].mpl = NULL;
4508 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4509
4510 return (cqe->num_of_coalesced_segs);
4511 }
4512
4513 static int
qlnx_rx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,int budget,int lro_enable)4514 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4515 int lro_enable)
4516 {
4517 uint16_t hw_comp_cons, sw_comp_cons;
4518 int rx_pkt = 0;
4519 struct qlnx_rx_queue *rxq = fp->rxq;
4520 if_t ifp = ha->ifp;
4521 struct ecore_dev *cdev = &ha->cdev;
4522 struct ecore_hwfn *p_hwfn;
4523
4524 #ifdef QLNX_SOFT_LRO
4525 struct lro_ctrl *lro;
4526
4527 lro = &rxq->lro;
4528 #endif /* #ifdef QLNX_SOFT_LRO */
4529
4530 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4531 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4532
4533 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4534
4535 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4536 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4537 * read before it is written by FW, then FW writes CQE and SB, and then
4538 * the CPU reads the hw_comp_cons, it will use an old CQE.
4539 */
4540
4541 /* Loop to complete all indicated BDs */
4542 while (sw_comp_cons != hw_comp_cons) {
4543 union eth_rx_cqe *cqe;
4544 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4545 struct sw_rx_data *sw_rx_data;
4546 register struct mbuf *mp;
4547 enum eth_rx_cqe_type cqe_type;
4548 uint16_t len, pad, len_on_first_bd;
4549 uint8_t *data;
4550 uint8_t hash_type;
4551
4552 /* Get the CQE from the completion ring */
4553 cqe = (union eth_rx_cqe *)
4554 ecore_chain_consume(&rxq->rx_comp_ring);
4555 cqe_type = cqe->fast_path_regular.type;
4556
4557 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4558 QL_DPRINT3(ha, "Got a slowath CQE\n");
4559
4560 ecore_eth_cqe_completion(p_hwfn,
4561 (struct eth_slow_path_rx_cqe *)cqe);
4562 goto next_cqe;
4563 }
4564
4565 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4566 switch (cqe_type) {
4567 case ETH_RX_CQE_TYPE_TPA_START:
4568 qlnx_tpa_start(ha, fp, rxq,
4569 &cqe->fast_path_tpa_start);
4570 fp->tpa_start++;
4571 break;
4572
4573 case ETH_RX_CQE_TYPE_TPA_CONT:
4574 qlnx_tpa_cont(ha, fp, rxq,
4575 &cqe->fast_path_tpa_cont);
4576 fp->tpa_cont++;
4577 break;
4578
4579 case ETH_RX_CQE_TYPE_TPA_END:
4580 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4581 &cqe->fast_path_tpa_end);
4582 fp->tpa_end++;
4583 break;
4584
4585 default:
4586 break;
4587 }
4588
4589 goto next_cqe;
4590 }
4591
4592 /* Get the data from the SW ring */
4593 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4594 mp = sw_rx_data->data;
4595
4596 if (mp == NULL) {
4597 QL_DPRINT1(ha, "mp = NULL\n");
4598 fp->err_rx_mp_null++;
4599 rxq->sw_rx_cons =
4600 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4601 goto next_cqe;
4602 }
4603 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4604 BUS_DMASYNC_POSTREAD);
4605
4606 /* non GRO */
4607 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4608 len = le16toh(fp_cqe->pkt_len);
4609 pad = fp_cqe->placement_offset;
4610 #if 0
4611 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4612 " len %u, parsing flags = %d pad = %d\n",
4613 cqe_type, fp_cqe->bitfields,
4614 le16toh(fp_cqe->vlan_tag),
4615 len, le16toh(fp_cqe->pars_flags.flags), pad);
4616 #endif
4617 data = mtod(mp, uint8_t *);
4618 data = data + pad;
4619
4620 if (0)
4621 qlnx_dump_buf8(ha, __func__, data, len);
4622
4623 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4624 * is always with a fixed size. If allocation fails, we take the
4625 * consumed BD and return it to the ring in the PROD position.
4626 * The packet that was received on that BD will be dropped (and
4627 * not passed to the upper stack).
4628 */
4629 /* If this is an error packet then drop it */
4630 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4631 CQE_FLAGS_ERR) {
4632 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4633 " dropping incoming packet\n", sw_comp_cons,
4634 le16toh(cqe->fast_path_regular.pars_flags.flags));
4635 fp->err_rx_hw_errors++;
4636
4637 qlnx_reuse_rx_data(rxq);
4638
4639 QLNX_INC_IERRORS(ifp);
4640
4641 goto next_cqe;
4642 }
4643
4644 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4645 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4646 " incoming packet and reusing its buffer\n");
4647 qlnx_reuse_rx_data(rxq);
4648
4649 fp->err_rx_alloc_errors++;
4650
4651 QLNX_INC_IQDROPS(ifp);
4652
4653 goto next_cqe;
4654 }
4655
4656 ecore_chain_consume(&rxq->rx_bd_ring);
4657
4658 len_on_first_bd = fp_cqe->len_on_first_bd;
4659 m_adj(mp, pad);
4660 mp->m_pkthdr.len = len;
4661
4662 if ((len > 60 ) && (len > len_on_first_bd)) {
4663 mp->m_len = len_on_first_bd;
4664
4665 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4666 (len - len_on_first_bd)) != 0) {
4667 m_freem(mp);
4668
4669 QLNX_INC_IQDROPS(ifp);
4670
4671 goto next_cqe;
4672 }
4673
4674 } else if (len_on_first_bd < len) {
4675 fp->err_rx_jumbo_chain_pkts++;
4676 } else {
4677 mp->m_len = len;
4678 }
4679
4680 mp->m_flags |= M_PKTHDR;
4681
4682 /* assign packet to this interface interface */
4683 mp->m_pkthdr.rcvif = ifp;
4684
4685 /* assume no hardware checksum has complated */
4686 mp->m_pkthdr.csum_flags = 0;
4687
4688 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4689
4690 hash_type = fp_cqe->bitfields &
4691 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4692 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4693
4694 switch (hash_type) {
4695 case RSS_HASH_TYPE_IPV4:
4696 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4697 break;
4698
4699 case RSS_HASH_TYPE_TCP_IPV4:
4700 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4701 break;
4702
4703 case RSS_HASH_TYPE_IPV6:
4704 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4705 break;
4706
4707 case RSS_HASH_TYPE_TCP_IPV6:
4708 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4709 break;
4710
4711 default:
4712 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4713 break;
4714 }
4715
4716 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4717 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4718 }
4719
4720 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4721 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4722 }
4723
4724 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4725 mp->m_pkthdr.csum_data = 0xFFFF;
4726 mp->m_pkthdr.csum_flags |=
4727 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4728 }
4729
4730 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4731 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4732 mp->m_flags |= M_VLANTAG;
4733 }
4734
4735 QLNX_INC_IPACKETS(ifp);
4736 QLNX_INC_IBYTES(ifp, len);
4737
4738 #ifdef QLNX_SOFT_LRO
4739 if (lro_enable)
4740 tcp_lro_queue_mbuf(lro, mp);
4741 else
4742 if_input(ifp, mp);
4743 #else
4744
4745 if_input(ifp, mp);
4746
4747 #endif /* #ifdef QLNX_SOFT_LRO */
4748
4749 rx_pkt++;
4750
4751 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4752
4753 next_cqe: /* don't consume bd rx buffer */
4754 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4755 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4756
4757 /* CR TPA - revisit how to handle budget in TPA perhaps
4758 increase on "end" */
4759 if (rx_pkt == budget)
4760 break;
4761 } /* repeat while sw_comp_cons != hw_comp_cons... */
4762
4763 /* Update producers */
4764 qlnx_update_rx_prod(p_hwfn, rxq);
4765
4766 return rx_pkt;
4767 }
4768
4769 /*
4770 * fast path interrupt
4771 */
4772
4773 static void
qlnx_fp_isr(void * arg)4774 qlnx_fp_isr(void *arg)
4775 {
4776 qlnx_ivec_t *ivec = arg;
4777 qlnx_host_t *ha;
4778 struct qlnx_fastpath *fp = NULL;
4779 int idx;
4780
4781 ha = ivec->ha;
4782
4783 if (ha->state != QLNX_STATE_OPEN) {
4784 return;
4785 }
4786
4787 idx = ivec->rss_idx;
4788
4789 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4790 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4791 ha->err_illegal_intr++;
4792 return;
4793 }
4794 fp = &ha->fp_array[idx];
4795
4796 if (fp == NULL) {
4797 ha->err_fp_null++;
4798 } else {
4799 int rx_int = 0;
4800 #ifdef QLNX_SOFT_LRO
4801 int total_rx_count = 0;
4802 #endif
4803 int lro_enable, tc;
4804 struct qlnx_tx_queue *txq;
4805 uint16_t elem_left;
4806
4807 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4808
4809 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4810
4811 do {
4812 for (tc = 0; tc < ha->num_tc; tc++) {
4813 txq = fp->txq[tc];
4814
4815 if((int)(elem_left =
4816 ecore_chain_get_elem_left(&txq->tx_pbl)) <
4817 QLNX_TX_ELEM_THRESH) {
4818 if (mtx_trylock(&fp->tx_mtx)) {
4819 #ifdef QLNX_TRACE_PERF_DATA
4820 tx_compl = fp->tx_pkts_completed;
4821 #endif
4822
4823 qlnx_tx_int(ha, fp, fp->txq[tc]);
4824 #ifdef QLNX_TRACE_PERF_DATA
4825 fp->tx_pkts_compl_intr +=
4826 (fp->tx_pkts_completed - tx_compl);
4827 if ((fp->tx_pkts_completed - tx_compl) <= 32)
4828 fp->tx_comInt[0]++;
4829 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4830 ((fp->tx_pkts_completed - tx_compl) <= 64))
4831 fp->tx_comInt[1]++;
4832 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4833 ((fp->tx_pkts_completed - tx_compl) <= 128))
4834 fp->tx_comInt[2]++;
4835 else if(((fp->tx_pkts_completed - tx_compl) > 128))
4836 fp->tx_comInt[3]++;
4837 #endif
4838 mtx_unlock(&fp->tx_mtx);
4839 }
4840 }
4841 }
4842
4843 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4844 lro_enable);
4845
4846 if (rx_int) {
4847 fp->rx_pkts += rx_int;
4848 #ifdef QLNX_SOFT_LRO
4849 total_rx_count += rx_int;
4850 #endif
4851 }
4852
4853 } while (rx_int);
4854
4855 #ifdef QLNX_SOFT_LRO
4856 {
4857 struct lro_ctrl *lro;
4858
4859 lro = &fp->rxq->lro;
4860
4861 if (lro_enable && total_rx_count) {
4862
4863 #ifdef QLNX_TRACE_LRO_CNT
4864 if (lro->lro_mbuf_count & ~1023)
4865 fp->lro_cnt_1024++;
4866 else if (lro->lro_mbuf_count & ~511)
4867 fp->lro_cnt_512++;
4868 else if (lro->lro_mbuf_count & ~255)
4869 fp->lro_cnt_256++;
4870 else if (lro->lro_mbuf_count & ~127)
4871 fp->lro_cnt_128++;
4872 else if (lro->lro_mbuf_count & ~63)
4873 fp->lro_cnt_64++;
4874 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4875
4876 tcp_lro_flush_all(lro);
4877 }
4878 }
4879 #endif /* #ifdef QLNX_SOFT_LRO */
4880
4881 ecore_sb_update_sb_idx(fp->sb_info);
4882 rmb();
4883 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4884 }
4885
4886 return;
4887 }
4888
4889 /*
4890 * slow path interrupt processing function
4891 * can be invoked in polled mode or in interrupt mode via taskqueue.
4892 */
4893 void
qlnx_sp_isr(void * arg)4894 qlnx_sp_isr(void *arg)
4895 {
4896 struct ecore_hwfn *p_hwfn;
4897 qlnx_host_t *ha;
4898
4899 p_hwfn = arg;
4900
4901 ha = (qlnx_host_t *)p_hwfn->p_dev;
4902
4903 ha->sp_interrupts++;
4904
4905 QL_DPRINT2(ha, "enter\n");
4906
4907 ecore_int_sp_dpc(p_hwfn);
4908
4909 QL_DPRINT2(ha, "exit\n");
4910
4911 return;
4912 }
4913
4914 /*****************************************************************************
4915 * Support Functions for DMA'able Memory
4916 *****************************************************************************/
4917
4918 static void
qlnx_dmamap_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)4919 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4920 {
4921 *((bus_addr_t *)arg) = 0;
4922
4923 if (error) {
4924 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4925 return;
4926 }
4927
4928 *((bus_addr_t *)arg) = segs[0].ds_addr;
4929
4930 return;
4931 }
4932
4933 static int
qlnx_alloc_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4934 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4935 {
4936 int ret = 0;
4937 bus_addr_t b_addr;
4938
4939 ret = bus_dma_tag_create(
4940 ha->parent_tag,/* parent */
4941 dma_buf->alignment,
4942 ((bus_size_t)(1ULL << 32)),/* boundary */
4943 BUS_SPACE_MAXADDR, /* lowaddr */
4944 BUS_SPACE_MAXADDR, /* highaddr */
4945 NULL, NULL, /* filter, filterarg */
4946 dma_buf->size, /* maxsize */
4947 1, /* nsegments */
4948 dma_buf->size, /* maxsegsize */
4949 0, /* flags */
4950 NULL, NULL, /* lockfunc, lockarg */
4951 &dma_buf->dma_tag);
4952
4953 if (ret) {
4954 QL_DPRINT1(ha, "could not create dma tag\n");
4955 goto qlnx_alloc_dmabuf_exit;
4956 }
4957 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4958 (void **)&dma_buf->dma_b,
4959 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4960 &dma_buf->dma_map);
4961 if (ret) {
4962 bus_dma_tag_destroy(dma_buf->dma_tag);
4963 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4964 goto qlnx_alloc_dmabuf_exit;
4965 }
4966
4967 ret = bus_dmamap_load(dma_buf->dma_tag,
4968 dma_buf->dma_map,
4969 dma_buf->dma_b,
4970 dma_buf->size,
4971 qlnx_dmamap_callback,
4972 &b_addr, BUS_DMA_NOWAIT);
4973
4974 if (ret || !b_addr) {
4975 bus_dma_tag_destroy(dma_buf->dma_tag);
4976 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4977 dma_buf->dma_map);
4978 ret = -1;
4979 goto qlnx_alloc_dmabuf_exit;
4980 }
4981
4982 dma_buf->dma_addr = b_addr;
4983
4984 qlnx_alloc_dmabuf_exit:
4985
4986 return ret;
4987 }
4988
4989 static void
qlnx_free_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4990 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4991 {
4992 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4993 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4994 bus_dma_tag_destroy(dma_buf->dma_tag);
4995 return;
4996 }
4997
4998 void *
qlnx_dma_alloc_coherent(void * ecore_dev,bus_addr_t * phys,uint32_t size)4999 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5000 {
5001 qlnx_dma_t dma_buf;
5002 qlnx_dma_t *dma_p;
5003 qlnx_host_t *ha __unused;
5004
5005 ha = (qlnx_host_t *)ecore_dev;
5006
5007 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5008
5009 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5010
5011 dma_buf.size = size + PAGE_SIZE;
5012 dma_buf.alignment = 8;
5013
5014 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5015 return (NULL);
5016 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5017
5018 *phys = dma_buf.dma_addr;
5019
5020 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5021
5022 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5023
5024 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5025 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5026 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5027
5028 return (dma_buf.dma_b);
5029 }
5030
5031 void
qlnx_dma_free_coherent(void * ecore_dev,void * v_addr,bus_addr_t phys,uint32_t size)5032 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5033 uint32_t size)
5034 {
5035 qlnx_dma_t dma_buf, *dma_p;
5036 qlnx_host_t *ha;
5037
5038 ha = (qlnx_host_t *)ecore_dev;
5039
5040 if (v_addr == NULL)
5041 return;
5042
5043 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5044
5045 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5046
5047 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5048 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5049 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5050
5051 dma_buf = *dma_p;
5052
5053 if (!ha->qlnxr_debug)
5054 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5055 return;
5056 }
5057
5058 static int
qlnx_alloc_parent_dma_tag(qlnx_host_t * ha)5059 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5060 {
5061 int ret;
5062 device_t dev;
5063
5064 dev = ha->pci_dev;
5065
5066 /*
5067 * Allocate parent DMA Tag
5068 */
5069 ret = bus_dma_tag_create(
5070 bus_get_dma_tag(dev), /* parent */
5071 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5072 BUS_SPACE_MAXADDR, /* lowaddr */
5073 BUS_SPACE_MAXADDR, /* highaddr */
5074 NULL, NULL, /* filter, filterarg */
5075 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5076 0, /* nsegments */
5077 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5078 0, /* flags */
5079 NULL, NULL, /* lockfunc, lockarg */
5080 &ha->parent_tag);
5081
5082 if (ret) {
5083 QL_DPRINT1(ha, "could not create parent dma tag\n");
5084 return (-1);
5085 }
5086
5087 ha->flags.parent_tag = 1;
5088
5089 return (0);
5090 }
5091
5092 static void
qlnx_free_parent_dma_tag(qlnx_host_t * ha)5093 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5094 {
5095 if (ha->parent_tag != NULL) {
5096 bus_dma_tag_destroy(ha->parent_tag);
5097 ha->parent_tag = NULL;
5098 }
5099 return;
5100 }
5101
5102 static int
qlnx_alloc_tx_dma_tag(qlnx_host_t * ha)5103 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5104 {
5105 if (bus_dma_tag_create(NULL, /* parent */
5106 1, 0, /* alignment, bounds */
5107 BUS_SPACE_MAXADDR, /* lowaddr */
5108 BUS_SPACE_MAXADDR, /* highaddr */
5109 NULL, NULL, /* filter, filterarg */
5110 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
5111 QLNX_MAX_SEGMENTS, /* nsegments */
5112 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
5113 0, /* flags */
5114 NULL, /* lockfunc */
5115 NULL, /* lockfuncarg */
5116 &ha->tx_tag)) {
5117 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5118 return (-1);
5119 }
5120
5121 return (0);
5122 }
5123
5124 static void
qlnx_free_tx_dma_tag(qlnx_host_t * ha)5125 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5126 {
5127 if (ha->tx_tag != NULL) {
5128 bus_dma_tag_destroy(ha->tx_tag);
5129 ha->tx_tag = NULL;
5130 }
5131 return;
5132 }
5133
5134 static int
qlnx_alloc_rx_dma_tag(qlnx_host_t * ha)5135 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5136 {
5137 if (bus_dma_tag_create(NULL, /* parent */
5138 1, 0, /* alignment, bounds */
5139 BUS_SPACE_MAXADDR, /* lowaddr */
5140 BUS_SPACE_MAXADDR, /* highaddr */
5141 NULL, NULL, /* filter, filterarg */
5142 MJUM9BYTES, /* maxsize */
5143 1, /* nsegments */
5144 MJUM9BYTES, /* maxsegsize */
5145 0, /* flags */
5146 NULL, /* lockfunc */
5147 NULL, /* lockfuncarg */
5148 &ha->rx_tag)) {
5149 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5150
5151 return (-1);
5152 }
5153 return (0);
5154 }
5155
5156 static void
qlnx_free_rx_dma_tag(qlnx_host_t * ha)5157 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5158 {
5159 if (ha->rx_tag != NULL) {
5160 bus_dma_tag_destroy(ha->rx_tag);
5161 ha->rx_tag = NULL;
5162 }
5163 return;
5164 }
5165
5166 /*********************************
5167 * Exported functions
5168 *********************************/
5169 uint32_t
qlnx_pci_bus_get_bar_size(void * ecore_dev,uint8_t bar_id)5170 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5171 {
5172 uint32_t bar_size;
5173
5174 bar_id = bar_id * 2;
5175
5176 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5177 SYS_RES_MEMORY,
5178 PCIR_BAR(bar_id));
5179
5180 return (bar_size);
5181 }
5182
5183 uint32_t
qlnx_pci_read_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t * reg_value)5184 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5185 {
5186 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5187 pci_reg, 1);
5188 return 0;
5189 }
5190
5191 uint32_t
qlnx_pci_read_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t * reg_value)5192 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5193 uint16_t *reg_value)
5194 {
5195 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5196 pci_reg, 2);
5197 return 0;
5198 }
5199
5200 uint32_t
qlnx_pci_read_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t * reg_value)5201 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5202 uint32_t *reg_value)
5203 {
5204 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5205 pci_reg, 4);
5206 return 0;
5207 }
5208
5209 void
qlnx_pci_write_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t reg_value)5210 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5211 {
5212 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5213 pci_reg, reg_value, 1);
5214 return;
5215 }
5216
5217 void
qlnx_pci_write_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t reg_value)5218 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5219 uint16_t reg_value)
5220 {
5221 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5222 pci_reg, reg_value, 2);
5223 return;
5224 }
5225
5226 void
qlnx_pci_write_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t reg_value)5227 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5228 uint32_t reg_value)
5229 {
5230 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5231 pci_reg, reg_value, 4);
5232 return;
5233 }
5234
5235 int
qlnx_pci_find_capability(void * ecore_dev,int cap)5236 qlnx_pci_find_capability(void *ecore_dev, int cap)
5237 {
5238 int reg;
5239 qlnx_host_t *ha;
5240
5241 ha = ecore_dev;
5242
5243 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5244 return reg;
5245 else {
5246 QL_DPRINT1(ha, "failed\n");
5247 return 0;
5248 }
5249 }
5250
5251 int
qlnx_pci_find_ext_capability(void * ecore_dev,int ext_cap)5252 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5253 {
5254 int reg;
5255 qlnx_host_t *ha;
5256
5257 ha = ecore_dev;
5258
5259 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5260 return reg;
5261 else {
5262 QL_DPRINT1(ha, "failed\n");
5263 return 0;
5264 }
5265 }
5266
5267 uint32_t
qlnx_reg_rd32(void * hwfn,uint32_t reg_addr)5268 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5269 {
5270 uint32_t data32;
5271 struct ecore_hwfn *p_hwfn;
5272
5273 p_hwfn = hwfn;
5274
5275 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5276 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5277
5278 return (data32);
5279 }
5280
5281 void
qlnx_reg_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5282 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5283 {
5284 struct ecore_hwfn *p_hwfn = hwfn;
5285
5286 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5287 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5288
5289 return;
5290 }
5291
5292 void
qlnx_reg_wr16(void * hwfn,uint32_t reg_addr,uint16_t value)5293 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5294 {
5295 struct ecore_hwfn *p_hwfn = hwfn;
5296
5297 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5298 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5299 return;
5300 }
5301
5302 void
qlnx_dbell_wr32_db(void * hwfn,void * reg_addr,uint32_t value)5303 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5304 {
5305 struct ecore_dev *cdev;
5306 struct ecore_hwfn *p_hwfn;
5307 uint32_t offset;
5308
5309 p_hwfn = hwfn;
5310
5311 cdev = p_hwfn->p_dev;
5312
5313 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5314 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5315
5316 return;
5317 }
5318
5319 void
qlnx_dbell_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5320 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5321 {
5322 struct ecore_hwfn *p_hwfn = hwfn;
5323
5324 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5325 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5326
5327 return;
5328 }
5329
5330 uint32_t
qlnx_direct_reg_rd32(void * p_hwfn,uint32_t * reg_addr)5331 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5332 {
5333 uint32_t data32;
5334 bus_size_t offset;
5335 struct ecore_dev *cdev;
5336
5337 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5338 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5339
5340 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5341
5342 return (data32);
5343 }
5344
5345 void
qlnx_direct_reg_wr32(void * p_hwfn,void * reg_addr,uint32_t value)5346 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5347 {
5348 bus_size_t offset;
5349 struct ecore_dev *cdev;
5350
5351 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5352 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5353
5354 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5355
5356 return;
5357 }
5358
5359 void
qlnx_direct_reg_wr64(void * p_hwfn,void * reg_addr,uint64_t value)5360 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5361 {
5362 bus_size_t offset;
5363 struct ecore_dev *cdev;
5364
5365 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5366 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5367
5368 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5369 return;
5370 }
5371
5372 void *
qlnx_zalloc(uint32_t size)5373 qlnx_zalloc(uint32_t size)
5374 {
5375 caddr_t va;
5376
5377 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5378 bzero(va, size);
5379 return ((void *)va);
5380 }
5381
5382 void
qlnx_barrier(void * p_dev)5383 qlnx_barrier(void *p_dev)
5384 {
5385 qlnx_host_t *ha;
5386
5387 ha = ((struct ecore_dev *) p_dev)->ha;
5388 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5389 }
5390
5391 void
qlnx_link_update(void * p_hwfn)5392 qlnx_link_update(void *p_hwfn)
5393 {
5394 qlnx_host_t *ha;
5395 int prev_link_state;
5396
5397 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5398
5399 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5400
5401 prev_link_state = ha->link_up;
5402 ha->link_up = ha->if_link.link_up;
5403
5404 if (prev_link_state != ha->link_up) {
5405 if (ha->link_up) {
5406 if_link_state_change(ha->ifp, LINK_STATE_UP);
5407 } else {
5408 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5409 }
5410 }
5411 #ifndef QLNX_VF
5412 #ifdef CONFIG_ECORE_SRIOV
5413
5414 if (qlnx_vf_device(ha) != 0) {
5415 if (ha->sriov_initialized)
5416 qlnx_inform_vf_link_state(p_hwfn, ha);
5417 }
5418
5419 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5420 #endif /* #ifdef QLNX_VF */
5421
5422 return;
5423 }
5424
5425 static void
__qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn * p_hwfn,struct ecore_vf_acquire_sw_info * p_sw_info)5426 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5427 struct ecore_vf_acquire_sw_info *p_sw_info)
5428 {
5429 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5430 (QLNX_VERSION_MINOR << 16) |
5431 QLNX_VERSION_BUILD;
5432 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5433
5434 return;
5435 }
5436
5437 void
qlnx_osal_vf_fill_acquire_resc_req(void * p_hwfn,void * p_resc_req,void * p_sw_info)5438 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5439 void *p_sw_info)
5440 {
5441 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5442
5443 return;
5444 }
5445
5446 void
qlnx_fill_link(qlnx_host_t * ha,struct ecore_hwfn * hwfn,struct qlnx_link_output * if_link)5447 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5448 struct qlnx_link_output *if_link)
5449 {
5450 struct ecore_mcp_link_params link_params;
5451 struct ecore_mcp_link_state link_state;
5452 uint8_t p_change;
5453 struct ecore_ptt *p_ptt = NULL;
5454
5455 memset(if_link, 0, sizeof(*if_link));
5456 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5457 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5458
5459 ha = (qlnx_host_t *)hwfn->p_dev;
5460
5461 /* Prepare source inputs */
5462 /* we only deal with physical functions */
5463 if (qlnx_vf_device(ha) != 0) {
5464 p_ptt = ecore_ptt_acquire(hwfn);
5465
5466 if (p_ptt == NULL) {
5467 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5468 return;
5469 }
5470
5471 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5472 ecore_ptt_release(hwfn, p_ptt);
5473
5474 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5475 sizeof(link_params));
5476 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5477 sizeof(link_state));
5478 } else {
5479 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5480 ecore_vf_read_bulletin(hwfn, &p_change);
5481 ecore_vf_get_link_params(hwfn, &link_params);
5482 ecore_vf_get_link_state(hwfn, &link_state);
5483 }
5484
5485 /* Set the link parameters to pass to protocol driver */
5486 if (link_state.link_up) {
5487 if_link->link_up = true;
5488 if_link->speed = link_state.speed;
5489 }
5490
5491 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5492
5493 if (link_params.speed.autoneg)
5494 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5495
5496 if (link_params.pause.autoneg ||
5497 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5498 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5499
5500 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5501 link_params.pause.forced_tx)
5502 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5503
5504 if (link_params.speed.advertised_speeds &
5505 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5506 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5507 QLNX_LINK_CAP_1000baseT_Full;
5508
5509 if (link_params.speed.advertised_speeds &
5510 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5511 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5512
5513 if (link_params.speed.advertised_speeds &
5514 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5515 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5516
5517 if (link_params.speed.advertised_speeds &
5518 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5519 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5520
5521 if (link_params.speed.advertised_speeds &
5522 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5523 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5524
5525 if (link_params.speed.advertised_speeds &
5526 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5527 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5528
5529 if_link->advertised_caps = if_link->supported_caps;
5530
5531 if_link->autoneg = link_params.speed.autoneg;
5532 if_link->duplex = QLNX_LINK_DUPLEX;
5533
5534 /* Link partner capabilities */
5535
5536 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5537 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5538
5539 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5540 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5541
5542 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5543 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5544
5545 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5546 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5547
5548 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5549 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5550
5551 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5552 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5553
5554 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5555 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5556
5557 if (link_state.an_complete)
5558 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5559
5560 if (link_state.partner_adv_pause)
5561 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5562
5563 if ((link_state.partner_adv_pause ==
5564 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5565 (link_state.partner_adv_pause ==
5566 ECORE_LINK_PARTNER_BOTH_PAUSE))
5567 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5568
5569 return;
5570 }
5571
5572 void
qlnx_schedule_recovery(void * p_hwfn)5573 qlnx_schedule_recovery(void *p_hwfn)
5574 {
5575 qlnx_host_t *ha;
5576
5577 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5578
5579 if (qlnx_vf_device(ha) != 0) {
5580 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5581 }
5582
5583 return;
5584 }
5585
5586 static int
qlnx_nic_setup(struct ecore_dev * cdev,struct ecore_pf_params * func_params)5587 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5588 {
5589 int rc, i;
5590
5591 for (i = 0; i < cdev->num_hwfns; i++) {
5592 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5593 p_hwfn->pf_params = *func_params;
5594
5595 #ifdef QLNX_ENABLE_IWARP
5596 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5597 p_hwfn->using_ll2 = true;
5598 }
5599 #endif /* #ifdef QLNX_ENABLE_IWARP */
5600 }
5601
5602 rc = ecore_resc_alloc(cdev);
5603 if (rc)
5604 goto qlnx_nic_setup_exit;
5605
5606 ecore_resc_setup(cdev);
5607
5608 qlnx_nic_setup_exit:
5609
5610 return rc;
5611 }
5612
5613 static int
qlnx_nic_start(struct ecore_dev * cdev)5614 qlnx_nic_start(struct ecore_dev *cdev)
5615 {
5616 int rc;
5617 struct ecore_hw_init_params params;
5618
5619 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5620
5621 params.p_tunn = NULL;
5622 params.b_hw_start = true;
5623 params.int_mode = cdev->int_mode;
5624 params.allow_npar_tx_switch = true;
5625 params.bin_fw_data = NULL;
5626
5627 rc = ecore_hw_init(cdev, ¶ms);
5628 if (rc) {
5629 ecore_resc_free(cdev);
5630 return rc;
5631 }
5632
5633 return 0;
5634 }
5635
5636 static int
qlnx_slowpath_start(qlnx_host_t * ha)5637 qlnx_slowpath_start(qlnx_host_t *ha)
5638 {
5639 struct ecore_dev *cdev;
5640 struct ecore_pf_params pf_params;
5641 int rc;
5642
5643 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5644 pf_params.eth_pf_params.num_cons =
5645 (ha->num_rss) * (ha->num_tc + 1);
5646
5647 #ifdef QLNX_ENABLE_IWARP
5648 if (qlnx_vf_device(ha) != 0) {
5649 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5650 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5651 pf_params.rdma_pf_params.num_qps = 1024;
5652 pf_params.rdma_pf_params.num_srqs = 1024;
5653 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5654 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5655 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5656 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5657 pf_params.rdma_pf_params.num_qps = 8192;
5658 pf_params.rdma_pf_params.num_srqs = 8192;
5659 //pf_params.rdma_pf_params.min_dpis = 0;
5660 pf_params.rdma_pf_params.min_dpis = 8;
5661 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5662 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5663 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5664 }
5665 }
5666 #endif /* #ifdef QLNX_ENABLE_IWARP */
5667
5668 cdev = &ha->cdev;
5669
5670 rc = qlnx_nic_setup(cdev, &pf_params);
5671 if (rc)
5672 goto qlnx_slowpath_start_exit;
5673
5674 cdev->int_mode = ECORE_INT_MODE_MSIX;
5675 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5676
5677 #ifdef QLNX_MAX_COALESCE
5678 cdev->rx_coalesce_usecs = 255;
5679 cdev->tx_coalesce_usecs = 255;
5680 #endif
5681
5682 rc = qlnx_nic_start(cdev);
5683
5684 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5685 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5686
5687 #ifdef QLNX_USER_LLDP
5688 (void)qlnx_set_lldp_tlvx(ha, NULL);
5689 #endif /* #ifdef QLNX_USER_LLDP */
5690
5691 qlnx_slowpath_start_exit:
5692
5693 return (rc);
5694 }
5695
5696 static int
qlnx_slowpath_stop(qlnx_host_t * ha)5697 qlnx_slowpath_stop(qlnx_host_t *ha)
5698 {
5699 struct ecore_dev *cdev;
5700 device_t dev = ha->pci_dev;
5701 int i;
5702
5703 cdev = &ha->cdev;
5704
5705 ecore_hw_stop(cdev);
5706
5707 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5708 if (ha->sp_handle[i])
5709 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5710 ha->sp_handle[i]);
5711
5712 ha->sp_handle[i] = NULL;
5713
5714 if (ha->sp_irq[i])
5715 (void) bus_release_resource(dev, SYS_RES_IRQ,
5716 ha->sp_irq_rid[i], ha->sp_irq[i]);
5717 ha->sp_irq[i] = NULL;
5718 }
5719
5720 ecore_resc_free(cdev);
5721
5722 return 0;
5723 }
5724
5725 static void
qlnx_set_id(struct ecore_dev * cdev,char name[NAME_SIZE],char ver_str[VER_SIZE])5726 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5727 char ver_str[VER_SIZE])
5728 {
5729 int i;
5730
5731 memcpy(cdev->name, name, NAME_SIZE);
5732
5733 for_each_hwfn(cdev, i) {
5734 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5735 }
5736
5737 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5738
5739 return ;
5740 }
5741
5742 void
qlnx_get_protocol_stats(void * cdev,int proto_type,void * proto_stats)5743 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5744 {
5745 enum ecore_mcp_protocol_type type;
5746 union ecore_mcp_protocol_stats *stats;
5747 struct ecore_eth_stats eth_stats;
5748 qlnx_host_t *ha;
5749
5750 ha = cdev;
5751 stats = proto_stats;
5752 type = proto_type;
5753
5754 switch (type) {
5755 case ECORE_MCP_LAN_STATS:
5756 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5757 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5758 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5759 stats->lan_stats.fcs_err = -1;
5760 break;
5761
5762 default:
5763 ha->err_get_proto_invalid_type++;
5764
5765 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5766 break;
5767 }
5768 return;
5769 }
5770
5771 static int
qlnx_get_mfw_version(qlnx_host_t * ha,uint32_t * mfw_ver)5772 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5773 {
5774 struct ecore_hwfn *p_hwfn;
5775 struct ecore_ptt *p_ptt;
5776
5777 p_hwfn = &ha->cdev.hwfns[0];
5778 p_ptt = ecore_ptt_acquire(p_hwfn);
5779
5780 if (p_ptt == NULL) {
5781 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5782 return (-1);
5783 }
5784 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5785
5786 ecore_ptt_release(p_hwfn, p_ptt);
5787
5788 return (0);
5789 }
5790
5791 static int
qlnx_get_flash_size(qlnx_host_t * ha,uint32_t * flash_size)5792 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5793 {
5794 struct ecore_hwfn *p_hwfn;
5795 struct ecore_ptt *p_ptt;
5796
5797 p_hwfn = &ha->cdev.hwfns[0];
5798 p_ptt = ecore_ptt_acquire(p_hwfn);
5799
5800 if (p_ptt == NULL) {
5801 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5802 return (-1);
5803 }
5804 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5805
5806 ecore_ptt_release(p_hwfn, p_ptt);
5807
5808 return (0);
5809 }
5810
5811 static int
qlnx_alloc_mem_arrays(qlnx_host_t * ha)5812 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5813 {
5814 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5815 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5816 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5817
5818 return 0;
5819 }
5820
5821 static void
qlnx_init_fp(qlnx_host_t * ha)5822 qlnx_init_fp(qlnx_host_t *ha)
5823 {
5824 int rss_id, txq_array_index, tc;
5825
5826 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5827 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5828
5829 fp->rss_id = rss_id;
5830 fp->edev = ha;
5831 fp->sb_info = &ha->sb_array[rss_id];
5832 fp->rxq = &ha->rxq_array[rss_id];
5833 fp->rxq->rxq_id = rss_id;
5834
5835 for (tc = 0; tc < ha->num_tc; tc++) {
5836 txq_array_index = tc * ha->num_rss + rss_id;
5837 fp->txq[tc] = &ha->txq_array[txq_array_index];
5838 fp->txq[tc]->index = txq_array_index;
5839 }
5840
5841 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5842 rss_id);
5843
5844 fp->tx_ring_full = 0;
5845
5846 /* reset all the statistics counters */
5847
5848 fp->tx_pkts_processed = 0;
5849 fp->tx_pkts_freed = 0;
5850 fp->tx_pkts_transmitted = 0;
5851 fp->tx_pkts_completed = 0;
5852
5853 #ifdef QLNX_TRACE_PERF_DATA
5854 fp->tx_pkts_trans_ctx = 0;
5855 fp->tx_pkts_compl_ctx = 0;
5856 fp->tx_pkts_trans_fp = 0;
5857 fp->tx_pkts_compl_fp = 0;
5858 fp->tx_pkts_compl_intr = 0;
5859 #endif
5860 fp->tx_lso_wnd_min_len = 0;
5861 fp->tx_defrag = 0;
5862 fp->tx_nsegs_gt_elem_left = 0;
5863 fp->tx_tso_max_nsegs = 0;
5864 fp->tx_tso_min_nsegs = 0;
5865 fp->err_tx_nsegs_gt_elem_left = 0;
5866 fp->err_tx_dmamap_create = 0;
5867 fp->err_tx_defrag_dmamap_load = 0;
5868 fp->err_tx_non_tso_max_seg = 0;
5869 fp->err_tx_dmamap_load = 0;
5870 fp->err_tx_defrag = 0;
5871 fp->err_tx_free_pkt_null = 0;
5872 fp->err_tx_cons_idx_conflict = 0;
5873
5874 fp->rx_pkts = 0;
5875 fp->err_m_getcl = 0;
5876 fp->err_m_getjcl = 0;
5877 }
5878 return;
5879 }
5880
5881 void
qlnx_free_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info)5882 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5883 {
5884 struct ecore_dev *cdev;
5885
5886 cdev = &ha->cdev;
5887
5888 if (sb_info->sb_virt) {
5889 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5890 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5891 sb_info->sb_virt = NULL;
5892 }
5893 }
5894
5895 static int
qlnx_sb_init(struct ecore_dev * cdev,struct ecore_sb_info * sb_info,void * sb_virt_addr,bus_addr_t sb_phy_addr,u16 sb_id)5896 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5897 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5898 {
5899 struct ecore_hwfn *p_hwfn;
5900 int hwfn_index, rc;
5901 u16 rel_sb_id;
5902
5903 hwfn_index = sb_id % cdev->num_hwfns;
5904 p_hwfn = &cdev->hwfns[hwfn_index];
5905 rel_sb_id = sb_id / cdev->num_hwfns;
5906
5907 QL_DPRINT2(((qlnx_host_t *)cdev),
5908 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5909 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5910 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5911 sb_virt_addr, (void *)sb_phy_addr);
5912
5913 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5914 sb_virt_addr, sb_phy_addr, rel_sb_id);
5915
5916 return rc;
5917 }
5918
5919 /* This function allocates fast-path status block memory */
5920 int
qlnx_alloc_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info,u16 sb_id)5921 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5922 {
5923 struct status_block_e4 *sb_virt;
5924 bus_addr_t sb_phys;
5925 int rc;
5926 uint32_t size;
5927 struct ecore_dev *cdev;
5928
5929 cdev = &ha->cdev;
5930
5931 size = sizeof(*sb_virt);
5932 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5933
5934 if (!sb_virt) {
5935 QL_DPRINT1(ha, "Status block allocation failed\n");
5936 return -ENOMEM;
5937 }
5938
5939 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5940 if (rc) {
5941 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5942 }
5943
5944 return rc;
5945 }
5946
5947 static void
qlnx_free_rx_buffers(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5948 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5949 {
5950 int i;
5951 struct sw_rx_data *rx_buf;
5952
5953 for (i = 0; i < rxq->num_rx_buffers; i++) {
5954 rx_buf = &rxq->sw_rx_ring[i];
5955
5956 if (rx_buf->data != NULL) {
5957 if (rx_buf->map != NULL) {
5958 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5959 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5960 rx_buf->map = NULL;
5961 }
5962 m_freem(rx_buf->data);
5963 rx_buf->data = NULL;
5964 }
5965 }
5966 return;
5967 }
5968
5969 static void
qlnx_free_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5970 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5971 {
5972 struct ecore_dev *cdev;
5973 int i;
5974
5975 cdev = &ha->cdev;
5976
5977 qlnx_free_rx_buffers(ha, rxq);
5978
5979 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5980 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5981 if (rxq->tpa_info[i].mpf != NULL)
5982 m_freem(rxq->tpa_info[i].mpf);
5983 }
5984
5985 bzero((void *)&rxq->sw_rx_ring[0],
5986 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5987
5988 /* Free the real RQ ring used by FW */
5989 if (rxq->rx_bd_ring.p_virt_addr) {
5990 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5991 rxq->rx_bd_ring.p_virt_addr = NULL;
5992 }
5993
5994 /* Free the real completion ring used by FW */
5995 if (rxq->rx_comp_ring.p_virt_addr &&
5996 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5997 ecore_chain_free(cdev, &rxq->rx_comp_ring);
5998 rxq->rx_comp_ring.p_virt_addr = NULL;
5999 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6000 }
6001
6002 #ifdef QLNX_SOFT_LRO
6003 {
6004 struct lro_ctrl *lro;
6005
6006 lro = &rxq->lro;
6007 tcp_lro_free(lro);
6008 }
6009 #endif /* #ifdef QLNX_SOFT_LRO */
6010
6011 return;
6012 }
6013
6014 static int
qlnx_alloc_rx_buffer(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6015 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6016 {
6017 register struct mbuf *mp;
6018 uint16_t rx_buf_size;
6019 struct sw_rx_data *sw_rx_data;
6020 struct eth_rx_bd *rx_bd;
6021 dma_addr_t dma_addr;
6022 bus_dmamap_t map;
6023 bus_dma_segment_t segs[1];
6024 int nsegs;
6025 int ret;
6026
6027 rx_buf_size = rxq->rx_buf_size;
6028
6029 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6030
6031 if (mp == NULL) {
6032 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6033 return -ENOMEM;
6034 }
6035
6036 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6037
6038 map = (bus_dmamap_t)0;
6039
6040 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6041 BUS_DMA_NOWAIT);
6042 dma_addr = segs[0].ds_addr;
6043
6044 if (ret || !dma_addr || (nsegs != 1)) {
6045 m_freem(mp);
6046 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6047 ret, (long long unsigned int)dma_addr, nsegs);
6048 return -ENOMEM;
6049 }
6050
6051 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6052 sw_rx_data->data = mp;
6053 sw_rx_data->dma_addr = dma_addr;
6054 sw_rx_data->map = map;
6055
6056 /* Advance PROD and get BD pointer */
6057 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6058 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6059 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6060 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6061
6062 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6063
6064 return 0;
6065 }
6066
6067 static int
qlnx_alloc_tpa_mbuf(qlnx_host_t * ha,uint16_t rx_buf_size,struct qlnx_agg_info * tpa)6068 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6069 struct qlnx_agg_info *tpa)
6070 {
6071 struct mbuf *mp;
6072 dma_addr_t dma_addr;
6073 bus_dmamap_t map;
6074 bus_dma_segment_t segs[1];
6075 int nsegs;
6076 int ret;
6077 struct sw_rx_data *rx_buf;
6078
6079 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6080
6081 if (mp == NULL) {
6082 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6083 return -ENOMEM;
6084 }
6085
6086 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6087
6088 map = (bus_dmamap_t)0;
6089
6090 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6091 BUS_DMA_NOWAIT);
6092 dma_addr = segs[0].ds_addr;
6093
6094 if (ret || !dma_addr || (nsegs != 1)) {
6095 m_freem(mp);
6096 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6097 ret, (long long unsigned int)dma_addr, nsegs);
6098 return -ENOMEM;
6099 }
6100
6101 rx_buf = &tpa->rx_buf;
6102
6103 memset(rx_buf, 0, sizeof (struct sw_rx_data));
6104
6105 rx_buf->data = mp;
6106 rx_buf->dma_addr = dma_addr;
6107 rx_buf->map = map;
6108
6109 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6110
6111 return (0);
6112 }
6113
6114 static void
qlnx_free_tpa_mbuf(qlnx_host_t * ha,struct qlnx_agg_info * tpa)6115 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6116 {
6117 struct sw_rx_data *rx_buf;
6118
6119 rx_buf = &tpa->rx_buf;
6120
6121 if (rx_buf->data != NULL) {
6122 if (rx_buf->map != NULL) {
6123 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6124 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6125 rx_buf->map = NULL;
6126 }
6127 m_freem(rx_buf->data);
6128 rx_buf->data = NULL;
6129 }
6130 return;
6131 }
6132
6133 /* This function allocates all memory needed per Rx queue */
6134 static int
qlnx_alloc_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6135 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6136 {
6137 int i, rc, num_allocated;
6138 struct ecore_dev *cdev;
6139
6140 cdev = &ha->cdev;
6141
6142 rxq->num_rx_buffers = RX_RING_SIZE;
6143
6144 rxq->rx_buf_size = ha->rx_buf_size;
6145
6146 /* Allocate the parallel driver ring for Rx buffers */
6147 bzero((void *)&rxq->sw_rx_ring[0],
6148 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6149
6150 /* Allocate FW Rx ring */
6151
6152 rc = ecore_chain_alloc(cdev,
6153 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6154 ECORE_CHAIN_MODE_NEXT_PTR,
6155 ECORE_CHAIN_CNT_TYPE_U16,
6156 RX_RING_SIZE,
6157 sizeof(struct eth_rx_bd),
6158 &rxq->rx_bd_ring, NULL);
6159
6160 if (rc)
6161 goto err;
6162
6163 /* Allocate FW completion ring */
6164 rc = ecore_chain_alloc(cdev,
6165 ECORE_CHAIN_USE_TO_CONSUME,
6166 ECORE_CHAIN_MODE_PBL,
6167 ECORE_CHAIN_CNT_TYPE_U16,
6168 RX_RING_SIZE,
6169 sizeof(union eth_rx_cqe),
6170 &rxq->rx_comp_ring, NULL);
6171
6172 if (rc)
6173 goto err;
6174
6175 /* Allocate buffers for the Rx ring */
6176
6177 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6178 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6179 &rxq->tpa_info[i]);
6180 if (rc)
6181 break;
6182 }
6183
6184 for (i = 0; i < rxq->num_rx_buffers; i++) {
6185 rc = qlnx_alloc_rx_buffer(ha, rxq);
6186 if (rc)
6187 break;
6188 }
6189 num_allocated = i;
6190 if (!num_allocated) {
6191 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6192 goto err;
6193 } else if (num_allocated < rxq->num_rx_buffers) {
6194 QL_DPRINT1(ha, "Allocated less buffers than"
6195 " desired (%d allocated)\n", num_allocated);
6196 }
6197
6198 #ifdef QLNX_SOFT_LRO
6199
6200 {
6201 struct lro_ctrl *lro;
6202
6203 lro = &rxq->lro;
6204
6205 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6206 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6207 rxq->rxq_id);
6208 goto err;
6209 }
6210
6211 lro->ifp = ha->ifp;
6212 }
6213 #endif /* #ifdef QLNX_SOFT_LRO */
6214 return 0;
6215
6216 err:
6217 qlnx_free_mem_rxq(ha, rxq);
6218 return -ENOMEM;
6219 }
6220
6221 static void
qlnx_free_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6222 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6223 struct qlnx_tx_queue *txq)
6224 {
6225 struct ecore_dev *cdev;
6226
6227 cdev = &ha->cdev;
6228
6229 bzero((void *)&txq->sw_tx_ring[0],
6230 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6231
6232 /* Free the real RQ ring used by FW */
6233 if (txq->tx_pbl.p_virt_addr) {
6234 ecore_chain_free(cdev, &txq->tx_pbl);
6235 txq->tx_pbl.p_virt_addr = NULL;
6236 }
6237 return;
6238 }
6239
6240 /* This function allocates all memory needed per Tx queue */
6241 static int
qlnx_alloc_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6242 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6243 struct qlnx_tx_queue *txq)
6244 {
6245 int ret = ECORE_SUCCESS;
6246 union eth_tx_bd_types *p_virt;
6247 struct ecore_dev *cdev;
6248
6249 cdev = &ha->cdev;
6250
6251 bzero((void *)&txq->sw_tx_ring[0],
6252 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6253
6254 /* Allocate the real Tx ring to be used by FW */
6255 ret = ecore_chain_alloc(cdev,
6256 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6257 ECORE_CHAIN_MODE_PBL,
6258 ECORE_CHAIN_CNT_TYPE_U16,
6259 TX_RING_SIZE,
6260 sizeof(*p_virt),
6261 &txq->tx_pbl, NULL);
6262
6263 if (ret != ECORE_SUCCESS) {
6264 goto err;
6265 }
6266
6267 txq->num_tx_buffers = TX_RING_SIZE;
6268
6269 return 0;
6270
6271 err:
6272 qlnx_free_mem_txq(ha, fp, txq);
6273 return -ENOMEM;
6274 }
6275
6276 static void
qlnx_free_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6277 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6278 {
6279 struct mbuf *mp;
6280 if_t ifp = ha->ifp;
6281
6282 if (mtx_initialized(&fp->tx_mtx)) {
6283 if (fp->tx_br != NULL) {
6284 mtx_lock(&fp->tx_mtx);
6285
6286 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6287 fp->tx_pkts_freed++;
6288 m_freem(mp);
6289 }
6290
6291 mtx_unlock(&fp->tx_mtx);
6292
6293 buf_ring_free(fp->tx_br, M_DEVBUF);
6294 fp->tx_br = NULL;
6295 }
6296 mtx_destroy(&fp->tx_mtx);
6297 }
6298 return;
6299 }
6300
6301 static void
qlnx_free_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6302 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6303 {
6304 int tc;
6305
6306 qlnx_free_mem_sb(ha, fp->sb_info);
6307
6308 qlnx_free_mem_rxq(ha, fp->rxq);
6309
6310 for (tc = 0; tc < ha->num_tc; tc++)
6311 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6312
6313 return;
6314 }
6315
6316 static int
qlnx_alloc_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6317 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6318 {
6319 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6320 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6321
6322 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6323
6324 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6325 M_NOWAIT, &fp->tx_mtx);
6326 if (fp->tx_br == NULL) {
6327 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6328 ha->dev_unit, fp->rss_id);
6329 return -ENOMEM;
6330 }
6331 return 0;
6332 }
6333
6334 static int
qlnx_alloc_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6335 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6336 {
6337 int rc, tc;
6338
6339 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6340 if (rc)
6341 goto err;
6342
6343 if (ha->rx_jumbo_buf_eq_mtu) {
6344 if (ha->max_frame_size <= MCLBYTES)
6345 ha->rx_buf_size = MCLBYTES;
6346 else if (ha->max_frame_size <= MJUMPAGESIZE)
6347 ha->rx_buf_size = MJUMPAGESIZE;
6348 else if (ha->max_frame_size <= MJUM9BYTES)
6349 ha->rx_buf_size = MJUM9BYTES;
6350 else if (ha->max_frame_size <= MJUM16BYTES)
6351 ha->rx_buf_size = MJUM16BYTES;
6352 } else {
6353 if (ha->max_frame_size <= MCLBYTES)
6354 ha->rx_buf_size = MCLBYTES;
6355 else
6356 ha->rx_buf_size = MJUMPAGESIZE;
6357 }
6358
6359 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6360 if (rc)
6361 goto err;
6362
6363 for (tc = 0; tc < ha->num_tc; tc++) {
6364 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6365 if (rc)
6366 goto err;
6367 }
6368
6369 return 0;
6370
6371 err:
6372 qlnx_free_mem_fp(ha, fp);
6373 return -ENOMEM;
6374 }
6375
6376 static void
qlnx_free_mem_load(qlnx_host_t * ha)6377 qlnx_free_mem_load(qlnx_host_t *ha)
6378 {
6379 int i;
6380
6381 for (i = 0; i < ha->num_rss; i++) {
6382 struct qlnx_fastpath *fp = &ha->fp_array[i];
6383
6384 qlnx_free_mem_fp(ha, fp);
6385 }
6386 return;
6387 }
6388
6389 static int
qlnx_alloc_mem_load(qlnx_host_t * ha)6390 qlnx_alloc_mem_load(qlnx_host_t *ha)
6391 {
6392 int rc = 0, rss_id;
6393
6394 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6395 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6396
6397 rc = qlnx_alloc_mem_fp(ha, fp);
6398 if (rc)
6399 break;
6400 }
6401 return (rc);
6402 }
6403
6404 static int
qlnx_start_vport(struct ecore_dev * cdev,u8 vport_id,u16 mtu,u8 drop_ttl0_flg,u8 inner_vlan_removal_en_flg,u8 tx_switching,u8 hw_lro_enable)6405 qlnx_start_vport(struct ecore_dev *cdev,
6406 u8 vport_id,
6407 u16 mtu,
6408 u8 drop_ttl0_flg,
6409 u8 inner_vlan_removal_en_flg,
6410 u8 tx_switching,
6411 u8 hw_lro_enable)
6412 {
6413 int rc, i;
6414 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6415 qlnx_host_t *ha __unused;
6416
6417 ha = (qlnx_host_t *)cdev;
6418
6419 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6420 vport_start_params.tx_switching = 0;
6421 vport_start_params.handle_ptp_pkts = 0;
6422 vport_start_params.only_untagged = 0;
6423 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6424
6425 vport_start_params.tpa_mode =
6426 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6427 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6428
6429 vport_start_params.vport_id = vport_id;
6430 vport_start_params.mtu = mtu;
6431
6432 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6433
6434 for_each_hwfn(cdev, i) {
6435 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6436
6437 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6438 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6439
6440 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6441
6442 if (rc) {
6443 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6444 " with MTU %d\n" , vport_id, mtu);
6445 return -ENOMEM;
6446 }
6447
6448 ecore_hw_start_fastpath(p_hwfn);
6449
6450 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6451 vport_id, mtu);
6452 }
6453 return 0;
6454 }
6455
6456 static int
qlnx_update_vport(struct ecore_dev * cdev,struct qlnx_update_vport_params * params)6457 qlnx_update_vport(struct ecore_dev *cdev,
6458 struct qlnx_update_vport_params *params)
6459 {
6460 struct ecore_sp_vport_update_params sp_params;
6461 int rc, i, j, fp_index;
6462 struct ecore_hwfn *p_hwfn;
6463 struct ecore_rss_params *rss;
6464 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6465 struct qlnx_fastpath *fp;
6466
6467 memset(&sp_params, 0, sizeof(sp_params));
6468 /* Translate protocol params into sp params */
6469 sp_params.vport_id = params->vport_id;
6470
6471 sp_params.update_vport_active_rx_flg =
6472 params->update_vport_active_rx_flg;
6473 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6474
6475 sp_params.update_vport_active_tx_flg =
6476 params->update_vport_active_tx_flg;
6477 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6478
6479 sp_params.update_inner_vlan_removal_flg =
6480 params->update_inner_vlan_removal_flg;
6481 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6482
6483 sp_params.sge_tpa_params = params->sge_tpa_params;
6484
6485 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6486 * We need to re-fix the rss values per engine for CMT.
6487 */
6488 if (params->rss_params->update_rss_config)
6489 sp_params.rss_params = params->rss_params;
6490 else
6491 sp_params.rss_params = NULL;
6492
6493 for_each_hwfn(cdev, i) {
6494 p_hwfn = &cdev->hwfns[i];
6495
6496 if ((cdev->num_hwfns > 1) &&
6497 params->rss_params->update_rss_config &&
6498 params->rss_params->rss_enable) {
6499 rss = params->rss_params;
6500
6501 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6502 fp_index = ((cdev->num_hwfns * j) + i) %
6503 ha->num_rss;
6504
6505 fp = &ha->fp_array[fp_index];
6506 rss->rss_ind_table[j] = fp->rxq->handle;
6507 }
6508
6509 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6510 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6511 rss->rss_ind_table[j],
6512 rss->rss_ind_table[j+1],
6513 rss->rss_ind_table[j+2],
6514 rss->rss_ind_table[j+3],
6515 rss->rss_ind_table[j+4],
6516 rss->rss_ind_table[j+5],
6517 rss->rss_ind_table[j+6],
6518 rss->rss_ind_table[j+7]);
6519 j += 8;
6520 }
6521 }
6522
6523 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6524
6525 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6526
6527 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6528 ECORE_SPQ_MODE_EBLOCK, NULL);
6529 if (rc) {
6530 QL_DPRINT1(ha, "Failed to update VPORT\n");
6531 return rc;
6532 }
6533
6534 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6535 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6536 params->vport_id, params->vport_active_tx_flg,
6537 params->vport_active_rx_flg,
6538 params->update_vport_active_tx_flg,
6539 params->update_vport_active_rx_flg);
6540 }
6541
6542 return 0;
6543 }
6544
6545 static void
qlnx_reuse_rx_data(struct qlnx_rx_queue * rxq)6546 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6547 {
6548 struct eth_rx_bd *rx_bd_cons =
6549 ecore_chain_consume(&rxq->rx_bd_ring);
6550 struct eth_rx_bd *rx_bd_prod =
6551 ecore_chain_produce(&rxq->rx_bd_ring);
6552 struct sw_rx_data *sw_rx_data_cons =
6553 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6554 struct sw_rx_data *sw_rx_data_prod =
6555 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6556
6557 sw_rx_data_prod->data = sw_rx_data_cons->data;
6558 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6559
6560 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6561 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6562
6563 return;
6564 }
6565
6566 static void
qlnx_update_rx_prod(struct ecore_hwfn * p_hwfn,struct qlnx_rx_queue * rxq)6567 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6568 {
6569
6570 uint16_t bd_prod;
6571 uint16_t cqe_prod;
6572 union {
6573 struct eth_rx_prod_data rx_prod_data;
6574 uint32_t data32;
6575 } rx_prods;
6576
6577 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6578 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6579
6580 /* Update producers */
6581 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6582 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6583
6584 /* Make sure that the BD and SGE data is updated before updating the
6585 * producers since FW might read the BD/SGE right after the producer
6586 * is updated.
6587 */
6588 wmb();
6589
6590 #ifdef ECORE_CONFIG_DIRECT_HWFN
6591 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6592 sizeof(rx_prods), &rx_prods.data32);
6593 #else
6594 internal_ram_wr(rxq->hw_rxq_prod_addr,
6595 sizeof(rx_prods), &rx_prods.data32);
6596 #endif
6597
6598 /* mmiowb is needed to synchronize doorbell writes from more than one
6599 * processor. It guarantees that the write arrives to the device before
6600 * the napi lock is released and another qlnx_poll is called (possibly
6601 * on another CPU). Without this barrier, the next doorbell can bypass
6602 * this doorbell. This is applicable to IA64/Altix systems.
6603 */
6604 wmb();
6605
6606 return;
6607 }
6608
6609 static uint32_t qlnx_hash_key[] = {
6610 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6611 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6612 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6613 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6614 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6615 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6616 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6617 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6618 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6619 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6620
6621 static int
qlnx_start_queues(qlnx_host_t * ha)6622 qlnx_start_queues(qlnx_host_t *ha)
6623 {
6624 int rc, tc, i, vport_id = 0,
6625 drop_ttl0_flg = 1, vlan_removal_en = 1,
6626 tx_switching = 0, hw_lro_enable = 0;
6627 struct ecore_dev *cdev = &ha->cdev;
6628 struct ecore_rss_params *rss_params = &ha->rss_params;
6629 struct qlnx_update_vport_params vport_update_params;
6630 if_t ifp;
6631 struct ecore_hwfn *p_hwfn;
6632 struct ecore_sge_tpa_params tpa_params;
6633 struct ecore_queue_start_common_params qparams;
6634 struct qlnx_fastpath *fp;
6635
6636 ifp = ha->ifp;
6637
6638 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6639
6640 if (!ha->num_rss) {
6641 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6642 " are no Rx queues\n");
6643 return -EINVAL;
6644 }
6645
6646 #ifndef QLNX_SOFT_LRO
6647 hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
6648 #endif /* #ifndef QLNX_SOFT_LRO */
6649
6650 rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
6651 vlan_removal_en, tx_switching, hw_lro_enable);
6652
6653 if (rc) {
6654 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6655 return rc;
6656 }
6657
6658 QL_DPRINT2(ha, "Start vport ramrod passed, "
6659 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6660 vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
6661
6662 for_each_rss(i) {
6663 struct ecore_rxq_start_ret_params rx_ret_params;
6664 struct ecore_txq_start_ret_params tx_ret_params;
6665
6666 fp = &ha->fp_array[i];
6667 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6668
6669 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6670 bzero(&rx_ret_params,
6671 sizeof (struct ecore_rxq_start_ret_params));
6672
6673 qparams.queue_id = i ;
6674 qparams.vport_id = vport_id;
6675 qparams.stats_id = vport_id;
6676 qparams.p_sb = fp->sb_info;
6677 qparams.sb_idx = RX_PI;
6678
6679
6680 rc = ecore_eth_rx_queue_start(p_hwfn,
6681 p_hwfn->hw_info.opaque_fid,
6682 &qparams,
6683 fp->rxq->rx_buf_size, /* bd_max_bytes */
6684 /* bd_chain_phys_addr */
6685 fp->rxq->rx_bd_ring.p_phys_addr,
6686 /* cqe_pbl_addr */
6687 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6688 /* cqe_pbl_size */
6689 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6690 &rx_ret_params);
6691
6692 if (rc) {
6693 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6694 return rc;
6695 }
6696
6697 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6698 fp->rxq->handle = rx_ret_params.p_handle;
6699 fp->rxq->hw_cons_ptr =
6700 &fp->sb_info->sb_virt->pi_array[RX_PI];
6701
6702 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6703
6704 for (tc = 0; tc < ha->num_tc; tc++) {
6705 struct qlnx_tx_queue *txq = fp->txq[tc];
6706
6707 bzero(&qparams,
6708 sizeof(struct ecore_queue_start_common_params));
6709 bzero(&tx_ret_params,
6710 sizeof (struct ecore_txq_start_ret_params));
6711
6712 qparams.queue_id = txq->index / cdev->num_hwfns ;
6713 qparams.vport_id = vport_id;
6714 qparams.stats_id = vport_id;
6715 qparams.p_sb = fp->sb_info;
6716 qparams.sb_idx = TX_PI(tc);
6717
6718 rc = ecore_eth_tx_queue_start(p_hwfn,
6719 p_hwfn->hw_info.opaque_fid,
6720 &qparams, tc,
6721 /* bd_chain_phys_addr */
6722 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6723 ecore_chain_get_page_cnt(&txq->tx_pbl),
6724 &tx_ret_params);
6725
6726 if (rc) {
6727 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6728 txq->index, rc);
6729 return rc;
6730 }
6731
6732 txq->doorbell_addr = tx_ret_params.p_doorbell;
6733 txq->handle = tx_ret_params.p_handle;
6734
6735 txq->hw_cons_ptr =
6736 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6737 SET_FIELD(txq->tx_db.data.params,
6738 ETH_DB_DATA_DEST, DB_DEST_XCM);
6739 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6740 DB_AGG_CMD_SET);
6741 SET_FIELD(txq->tx_db.data.params,
6742 ETH_DB_DATA_AGG_VAL_SEL,
6743 DQ_XCM_ETH_TX_BD_PROD_CMD);
6744
6745 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6746 }
6747 }
6748
6749 /* Fill struct with RSS params */
6750 if (ha->num_rss > 1) {
6751 rss_params->update_rss_config = 1;
6752 rss_params->rss_enable = 1;
6753 rss_params->update_rss_capabilities = 1;
6754 rss_params->update_rss_ind_table = 1;
6755 rss_params->update_rss_key = 1;
6756 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6757 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6758 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6759
6760 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6761 fp = &ha->fp_array[(i % ha->num_rss)];
6762 rss_params->rss_ind_table[i] = fp->rxq->handle;
6763 }
6764
6765 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6766 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6767
6768 } else {
6769 memset(rss_params, 0, sizeof(*rss_params));
6770 }
6771
6772 /* Prepare and send the vport enable */
6773 memset(&vport_update_params, 0, sizeof(vport_update_params));
6774 vport_update_params.vport_id = vport_id;
6775 vport_update_params.update_vport_active_tx_flg = 1;
6776 vport_update_params.vport_active_tx_flg = 1;
6777 vport_update_params.update_vport_active_rx_flg = 1;
6778 vport_update_params.vport_active_rx_flg = 1;
6779 vport_update_params.rss_params = rss_params;
6780 vport_update_params.update_inner_vlan_removal_flg = 1;
6781 vport_update_params.inner_vlan_removal_flg = 1;
6782
6783 if (hw_lro_enable) {
6784 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6785
6786 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6787
6788 tpa_params.update_tpa_en_flg = 1;
6789 tpa_params.tpa_ipv4_en_flg = 1;
6790 tpa_params.tpa_ipv6_en_flg = 1;
6791
6792 tpa_params.update_tpa_param_flg = 1;
6793 tpa_params.tpa_pkt_split_flg = 0;
6794 tpa_params.tpa_hdr_data_split_flg = 0;
6795 tpa_params.tpa_gro_consistent_flg = 0;
6796 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6797 tpa_params.tpa_max_size = (uint16_t)(-1);
6798 tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
6799 tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
6800
6801 vport_update_params.sge_tpa_params = &tpa_params;
6802 }
6803
6804 rc = qlnx_update_vport(cdev, &vport_update_params);
6805 if (rc) {
6806 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6807 return rc;
6808 }
6809
6810 return 0;
6811 }
6812
6813 static int
qlnx_drain_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6814 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6815 struct qlnx_tx_queue *txq)
6816 {
6817 uint16_t hw_bd_cons;
6818 uint16_t ecore_cons_idx;
6819
6820 QL_DPRINT2(ha, "enter\n");
6821
6822 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6823
6824 while (hw_bd_cons !=
6825 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6826 mtx_lock(&fp->tx_mtx);
6827
6828 (void)qlnx_tx_int(ha, fp, txq);
6829
6830 mtx_unlock(&fp->tx_mtx);
6831
6832 qlnx_mdelay(__func__, 2);
6833
6834 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6835 }
6836
6837 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6838
6839 return 0;
6840 }
6841
6842 static int
qlnx_stop_queues(qlnx_host_t * ha)6843 qlnx_stop_queues(qlnx_host_t *ha)
6844 {
6845 struct qlnx_update_vport_params vport_update_params;
6846 struct ecore_dev *cdev;
6847 struct qlnx_fastpath *fp;
6848 int rc, tc, i;
6849
6850 cdev = &ha->cdev;
6851
6852 /* Disable the vport */
6853
6854 memset(&vport_update_params, 0, sizeof(vport_update_params));
6855
6856 vport_update_params.vport_id = 0;
6857 vport_update_params.update_vport_active_tx_flg = 1;
6858 vport_update_params.vport_active_tx_flg = 0;
6859 vport_update_params.update_vport_active_rx_flg = 1;
6860 vport_update_params.vport_active_rx_flg = 0;
6861 vport_update_params.rss_params = &ha->rss_params;
6862 vport_update_params.rss_params->update_rss_config = 0;
6863 vport_update_params.rss_params->rss_enable = 0;
6864 vport_update_params.update_inner_vlan_removal_flg = 0;
6865 vport_update_params.inner_vlan_removal_flg = 0;
6866
6867 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6868
6869 rc = qlnx_update_vport(cdev, &vport_update_params);
6870 if (rc) {
6871 QL_DPRINT1(ha, "Failed to update vport\n");
6872 return rc;
6873 }
6874
6875 /* Flush Tx queues. If needed, request drain from MCP */
6876 for_each_rss(i) {
6877 fp = &ha->fp_array[i];
6878
6879 for (tc = 0; tc < ha->num_tc; tc++) {
6880 struct qlnx_tx_queue *txq = fp->txq[tc];
6881
6882 rc = qlnx_drain_txq(ha, fp, txq);
6883 if (rc)
6884 return rc;
6885 }
6886 }
6887
6888 /* Stop all Queues in reverse order*/
6889 for (i = ha->num_rss - 1; i >= 0; i--) {
6890 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6891
6892 fp = &ha->fp_array[i];
6893
6894 /* Stop the Tx Queue(s)*/
6895 for (tc = 0; tc < ha->num_tc; tc++) {
6896 int tx_queue_id __unused;
6897
6898 tx_queue_id = tc * ha->num_rss + i;
6899 rc = ecore_eth_tx_queue_stop(p_hwfn,
6900 fp->txq[tc]->handle);
6901
6902 if (rc) {
6903 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6904 tx_queue_id);
6905 return rc;
6906 }
6907 }
6908
6909 /* Stop the Rx Queue*/
6910 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6911 false);
6912 if (rc) {
6913 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6914 return rc;
6915 }
6916 }
6917
6918 /* Stop the vport */
6919 for_each_hwfn(cdev, i) {
6920 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6921
6922 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6923
6924 if (rc) {
6925 QL_DPRINT1(ha, "Failed to stop VPORT\n");
6926 return rc;
6927 }
6928 }
6929
6930 return rc;
6931 }
6932
6933 static int
qlnx_set_ucast_rx_mac(qlnx_host_t * ha,enum ecore_filter_opcode opcode,unsigned char mac[ETH_ALEN])6934 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6935 enum ecore_filter_opcode opcode,
6936 unsigned char mac[ETH_ALEN])
6937 {
6938 struct ecore_filter_ucast ucast;
6939 struct ecore_dev *cdev;
6940 int rc;
6941
6942 cdev = &ha->cdev;
6943
6944 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6945
6946 ucast.opcode = opcode;
6947 ucast.type = ECORE_FILTER_MAC;
6948 ucast.is_rx_filter = 1;
6949 ucast.vport_to_add_to = 0;
6950 memcpy(&ucast.mac[0], mac, ETH_ALEN);
6951
6952 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6953
6954 return (rc);
6955 }
6956
6957 static int
qlnx_remove_all_ucast_mac(qlnx_host_t * ha)6958 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6959 {
6960 struct ecore_filter_ucast ucast;
6961 struct ecore_dev *cdev;
6962 int rc;
6963
6964 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6965
6966 ucast.opcode = ECORE_FILTER_REPLACE;
6967 ucast.type = ECORE_FILTER_MAC;
6968 ucast.is_rx_filter = 1;
6969
6970 cdev = &ha->cdev;
6971
6972 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6973
6974 return (rc);
6975 }
6976
6977 static int
qlnx_remove_all_mcast_mac(qlnx_host_t * ha)6978 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6979 {
6980 struct ecore_filter_mcast *mcast;
6981 struct ecore_dev *cdev;
6982 int rc, i;
6983
6984 cdev = &ha->cdev;
6985
6986 mcast = &ha->ecore_mcast;
6987 bzero(mcast, sizeof(struct ecore_filter_mcast));
6988
6989 mcast->opcode = ECORE_FILTER_REMOVE;
6990
6991 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6992 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6993 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6994 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6995 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
6996 mcast->num_mc_addrs++;
6997 }
6998 }
6999 mcast = &ha->ecore_mcast;
7000
7001 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7002
7003 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7004 ha->nmcast = 0;
7005
7006 return (rc);
7007 }
7008
7009 static int
qlnx_clean_filters(qlnx_host_t * ha)7010 qlnx_clean_filters(qlnx_host_t *ha)
7011 {
7012 int rc = 0;
7013
7014 /* Remove all unicast macs */
7015 rc = qlnx_remove_all_ucast_mac(ha);
7016 if (rc)
7017 return rc;
7018
7019 /* Remove all multicast macs */
7020 rc = qlnx_remove_all_mcast_mac(ha);
7021 if (rc)
7022 return rc;
7023
7024 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7025
7026 return (rc);
7027 }
7028
7029 static int
qlnx_set_rx_accept_filter(qlnx_host_t * ha,uint8_t filter)7030 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7031 {
7032 struct ecore_filter_accept_flags accept;
7033 int rc = 0;
7034 struct ecore_dev *cdev;
7035
7036 cdev = &ha->cdev;
7037
7038 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7039
7040 accept.update_rx_mode_config = 1;
7041 accept.rx_accept_filter = filter;
7042
7043 accept.update_tx_mode_config = 1;
7044 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7045 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7046
7047 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7048 ECORE_SPQ_MODE_CB, NULL);
7049
7050 return (rc);
7051 }
7052
7053 static int
qlnx_set_rx_mode(qlnx_host_t * ha)7054 qlnx_set_rx_mode(qlnx_host_t *ha)
7055 {
7056 int rc = 0;
7057 uint8_t filter;
7058
7059 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7060 if (rc)
7061 return rc;
7062
7063 rc = qlnx_remove_all_mcast_mac(ha);
7064 if (rc)
7065 return rc;
7066
7067 filter = ECORE_ACCEPT_UCAST_MATCHED |
7068 ECORE_ACCEPT_MCAST_MATCHED |
7069 ECORE_ACCEPT_BCAST;
7070
7071 if (qlnx_vf_device(ha) == 0 || (ha->ifp->if_flags & IFF_PROMISC)) {
7072 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7073 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7074 } else if (ha->ifp->if_flags & IFF_ALLMULTI) {
7075 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7076 }
7077 ha->filter = filter;
7078
7079 rc = qlnx_set_rx_accept_filter(ha, filter);
7080
7081 return (rc);
7082 }
7083
7084 static int
qlnx_set_link(qlnx_host_t * ha,bool link_up)7085 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7086 {
7087 int i, rc = 0;
7088 struct ecore_dev *cdev;
7089 struct ecore_hwfn *hwfn;
7090 struct ecore_ptt *ptt;
7091
7092 if (qlnx_vf_device(ha) == 0)
7093 return (0);
7094
7095 cdev = &ha->cdev;
7096
7097 for_each_hwfn(cdev, i) {
7098 hwfn = &cdev->hwfns[i];
7099
7100 ptt = ecore_ptt_acquire(hwfn);
7101 if (!ptt)
7102 return -EBUSY;
7103
7104 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7105
7106 ecore_ptt_release(hwfn, ptt);
7107
7108 if (rc)
7109 return rc;
7110 }
7111 return (rc);
7112 }
7113
7114 static uint64_t
qlnx_get_counter(if_t ifp,ift_counter cnt)7115 qlnx_get_counter(if_t ifp, ift_counter cnt)
7116 {
7117 qlnx_host_t *ha;
7118 uint64_t count;
7119
7120 ha = (qlnx_host_t *)if_getsoftc(ifp);
7121
7122 switch (cnt) {
7123 case IFCOUNTER_IPACKETS:
7124 count = ha->hw_stats.common.rx_ucast_pkts +
7125 ha->hw_stats.common.rx_mcast_pkts +
7126 ha->hw_stats.common.rx_bcast_pkts;
7127 break;
7128
7129 case IFCOUNTER_IERRORS:
7130 count = ha->hw_stats.common.rx_crc_errors +
7131 ha->hw_stats.common.rx_align_errors +
7132 ha->hw_stats.common.rx_oversize_packets +
7133 ha->hw_stats.common.rx_undersize_packets;
7134 break;
7135
7136 case IFCOUNTER_OPACKETS:
7137 count = ha->hw_stats.common.tx_ucast_pkts +
7138 ha->hw_stats.common.tx_mcast_pkts +
7139 ha->hw_stats.common.tx_bcast_pkts;
7140 break;
7141
7142 case IFCOUNTER_OERRORS:
7143 count = ha->hw_stats.common.tx_err_drop_pkts;
7144 break;
7145
7146 case IFCOUNTER_COLLISIONS:
7147 return (0);
7148
7149 case IFCOUNTER_IBYTES:
7150 count = ha->hw_stats.common.rx_ucast_bytes +
7151 ha->hw_stats.common.rx_mcast_bytes +
7152 ha->hw_stats.common.rx_bcast_bytes;
7153 break;
7154
7155 case IFCOUNTER_OBYTES:
7156 count = ha->hw_stats.common.tx_ucast_bytes +
7157 ha->hw_stats.common.tx_mcast_bytes +
7158 ha->hw_stats.common.tx_bcast_bytes;
7159 break;
7160
7161 case IFCOUNTER_IMCASTS:
7162 count = ha->hw_stats.common.rx_mcast_bytes;
7163 break;
7164
7165 case IFCOUNTER_OMCASTS:
7166 count = ha->hw_stats.common.tx_mcast_bytes;
7167 break;
7168
7169 case IFCOUNTER_IQDROPS:
7170 case IFCOUNTER_OQDROPS:
7171 case IFCOUNTER_NOPROTO:
7172
7173 default:
7174 return (if_get_counter_default(ifp, cnt));
7175 }
7176 return (count);
7177 }
7178
7179 static void
qlnx_timer(void * arg)7180 qlnx_timer(void *arg)
7181 {
7182 qlnx_host_t *ha;
7183
7184 ha = (qlnx_host_t *)arg;
7185
7186 if (ha->error_recovery) {
7187 ha->error_recovery = 0;
7188 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7189 return;
7190 }
7191
7192 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7193
7194 if (ha->storm_stats_gather)
7195 qlnx_sample_storm_stats(ha);
7196
7197 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7198
7199 return;
7200 }
7201
7202 static int
qlnx_load(qlnx_host_t * ha)7203 qlnx_load(qlnx_host_t *ha)
7204 {
7205 int i;
7206 int rc = 0;
7207 device_t dev;
7208
7209 dev = ha->pci_dev;
7210
7211 QL_DPRINT2(ha, "enter\n");
7212
7213 rc = qlnx_alloc_mem_arrays(ha);
7214 if (rc)
7215 goto qlnx_load_exit0;
7216
7217 qlnx_init_fp(ha);
7218
7219 rc = qlnx_alloc_mem_load(ha);
7220 if (rc)
7221 goto qlnx_load_exit1;
7222
7223 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7224 ha->num_rss, ha->num_tc);
7225
7226 for (i = 0; i < ha->num_rss; i++) {
7227 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7228 (INTR_TYPE_NET | INTR_MPSAFE),
7229 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7230 &ha->irq_vec[i].handle))) {
7231 QL_DPRINT1(ha, "could not setup interrupt\n");
7232 goto qlnx_load_exit2;
7233 }
7234
7235 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7236 irq %p handle %p\n", i,
7237 ha->irq_vec[i].irq_rid,
7238 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7239
7240 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7241 }
7242
7243 rc = qlnx_start_queues(ha);
7244 if (rc)
7245 goto qlnx_load_exit2;
7246
7247 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7248
7249 /* Add primary mac and set Rx filters */
7250 rc = qlnx_set_rx_mode(ha);
7251 if (rc)
7252 goto qlnx_load_exit2;
7253
7254 /* Ask for link-up using current configuration */
7255 qlnx_set_link(ha, true);
7256
7257 if (qlnx_vf_device(ha) == 0)
7258 qlnx_link_update(&ha->cdev.hwfns[0]);
7259
7260 ha->state = QLNX_STATE_OPEN;
7261
7262 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7263
7264 if (ha->flags.callout_init)
7265 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7266
7267 goto qlnx_load_exit0;
7268
7269 qlnx_load_exit2:
7270 qlnx_free_mem_load(ha);
7271
7272 qlnx_load_exit1:
7273 ha->num_rss = 0;
7274
7275 qlnx_load_exit0:
7276 QL_DPRINT2(ha, "exit [%d]\n", rc);
7277 return rc;
7278 }
7279
7280 static void
qlnx_drain_soft_lro(qlnx_host_t * ha)7281 qlnx_drain_soft_lro(qlnx_host_t *ha)
7282 {
7283 #ifdef QLNX_SOFT_LRO
7284
7285 if_t ifp;
7286 int i;
7287
7288 ifp = ha->ifp;
7289
7290 if (if_getcapenable(ifp) & IFCAP_LRO) {
7291 for (i = 0; i < ha->num_rss; i++) {
7292 struct qlnx_fastpath *fp = &ha->fp_array[i];
7293 struct lro_ctrl *lro;
7294
7295 lro = &fp->rxq->lro;
7296
7297 tcp_lro_flush_all(lro);
7298 }
7299 }
7300
7301 #endif /* #ifdef QLNX_SOFT_LRO */
7302
7303 return;
7304 }
7305
7306 static void
qlnx_unload(qlnx_host_t * ha)7307 qlnx_unload(qlnx_host_t *ha)
7308 {
7309 struct ecore_dev *cdev;
7310 device_t dev;
7311 int i;
7312
7313 cdev = &ha->cdev;
7314 dev = ha->pci_dev;
7315
7316 QL_DPRINT2(ha, "enter\n");
7317 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7318
7319 if (ha->state == QLNX_STATE_OPEN) {
7320 qlnx_set_link(ha, false);
7321 qlnx_clean_filters(ha);
7322 qlnx_stop_queues(ha);
7323 ecore_hw_stop_fastpath(cdev);
7324
7325 for (i = 0; i < ha->num_rss; i++) {
7326 if (ha->irq_vec[i].handle) {
7327 (void)bus_teardown_intr(dev,
7328 ha->irq_vec[i].irq,
7329 ha->irq_vec[i].handle);
7330 ha->irq_vec[i].handle = NULL;
7331 }
7332 }
7333
7334 qlnx_drain_fp_taskqueues(ha);
7335 qlnx_drain_soft_lro(ha);
7336 qlnx_free_mem_load(ha);
7337 }
7338
7339 if (ha->flags.callout_init)
7340 callout_drain(&ha->qlnx_callout);
7341
7342 qlnx_mdelay(__func__, 1000);
7343
7344 ha->state = QLNX_STATE_CLOSED;
7345
7346 QL_DPRINT2(ha, "exit\n");
7347 return;
7348 }
7349
7350 static int
qlnx_grc_dumpsize(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7351 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7352 {
7353 int rval = -1;
7354 struct ecore_hwfn *p_hwfn;
7355 struct ecore_ptt *p_ptt;
7356
7357 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7358
7359 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7360 p_ptt = ecore_ptt_acquire(p_hwfn);
7361
7362 if (!p_ptt) {
7363 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7364 return (rval);
7365 }
7366
7367 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7368
7369 if (rval == DBG_STATUS_OK)
7370 rval = 0;
7371 else {
7372 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7373 "[0x%x]\n", rval);
7374 }
7375
7376 ecore_ptt_release(p_hwfn, p_ptt);
7377
7378 return (rval);
7379 }
7380
7381 static int
qlnx_idle_chk_size(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7382 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7383 {
7384 int rval = -1;
7385 struct ecore_hwfn *p_hwfn;
7386 struct ecore_ptt *p_ptt;
7387
7388 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7389
7390 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7391 p_ptt = ecore_ptt_acquire(p_hwfn);
7392
7393 if (!p_ptt) {
7394 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7395 return (rval);
7396 }
7397
7398 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7399
7400 if (rval == DBG_STATUS_OK)
7401 rval = 0;
7402 else {
7403 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7404 " [0x%x]\n", rval);
7405 }
7406
7407 ecore_ptt_release(p_hwfn, p_ptt);
7408
7409 return (rval);
7410 }
7411
7412 static void
qlnx_sample_storm_stats(qlnx_host_t * ha)7413 qlnx_sample_storm_stats(qlnx_host_t *ha)
7414 {
7415 int i, index;
7416 struct ecore_dev *cdev;
7417 qlnx_storm_stats_t *s_stats;
7418 uint32_t reg;
7419 struct ecore_ptt *p_ptt;
7420 struct ecore_hwfn *hwfn;
7421
7422 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7423 ha->storm_stats_gather = 0;
7424 return;
7425 }
7426
7427 cdev = &ha->cdev;
7428
7429 for_each_hwfn(cdev, i) {
7430 hwfn = &cdev->hwfns[i];
7431
7432 p_ptt = ecore_ptt_acquire(hwfn);
7433 if (!p_ptt)
7434 return;
7435
7436 index = ha->storm_stats_index +
7437 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7438
7439 s_stats = &ha->storm_stats[index];
7440
7441 /* XSTORM */
7442 reg = XSEM_REG_FAST_MEMORY +
7443 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7444 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7445
7446 reg = XSEM_REG_FAST_MEMORY +
7447 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7448 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7449
7450 reg = XSEM_REG_FAST_MEMORY +
7451 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7452 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7453
7454 reg = XSEM_REG_FAST_MEMORY +
7455 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7456 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7457
7458 /* YSTORM */
7459 reg = YSEM_REG_FAST_MEMORY +
7460 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7461 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7462
7463 reg = YSEM_REG_FAST_MEMORY +
7464 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7465 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7466
7467 reg = YSEM_REG_FAST_MEMORY +
7468 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7469 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7470
7471 reg = YSEM_REG_FAST_MEMORY +
7472 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7473 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7474
7475 /* PSTORM */
7476 reg = PSEM_REG_FAST_MEMORY +
7477 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7478 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7479
7480 reg = PSEM_REG_FAST_MEMORY +
7481 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7482 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7483
7484 reg = PSEM_REG_FAST_MEMORY +
7485 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7486 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7487
7488 reg = PSEM_REG_FAST_MEMORY +
7489 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7490 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7491
7492 /* TSTORM */
7493 reg = TSEM_REG_FAST_MEMORY +
7494 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7495 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7496
7497 reg = TSEM_REG_FAST_MEMORY +
7498 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7499 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7500
7501 reg = TSEM_REG_FAST_MEMORY +
7502 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7503 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7504
7505 reg = TSEM_REG_FAST_MEMORY +
7506 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7507 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7508
7509 /* MSTORM */
7510 reg = MSEM_REG_FAST_MEMORY +
7511 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7512 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7513
7514 reg = MSEM_REG_FAST_MEMORY +
7515 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7516 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7517
7518 reg = MSEM_REG_FAST_MEMORY +
7519 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7520 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7521
7522 reg = MSEM_REG_FAST_MEMORY +
7523 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7524 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7525
7526 /* USTORM */
7527 reg = USEM_REG_FAST_MEMORY +
7528 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7529 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7530
7531 reg = USEM_REG_FAST_MEMORY +
7532 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7533 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7534
7535 reg = USEM_REG_FAST_MEMORY +
7536 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7537 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7538
7539 reg = USEM_REG_FAST_MEMORY +
7540 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7541 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7542
7543 ecore_ptt_release(hwfn, p_ptt);
7544 }
7545
7546 ha->storm_stats_index++;
7547
7548 return;
7549 }
7550
7551 /*
7552 * Name: qlnx_dump_buf8
7553 * Function: dumps a buffer as bytes
7554 */
7555 static void
qlnx_dump_buf8(qlnx_host_t * ha,const char * msg,void * dbuf,uint32_t len)7556 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7557 {
7558 device_t dev;
7559 uint32_t i = 0;
7560 uint8_t *buf;
7561
7562 dev = ha->pci_dev;
7563 buf = dbuf;
7564
7565 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7566
7567 while (len >= 16) {
7568 device_printf(dev,"0x%08x:"
7569 " %02x %02x %02x %02x %02x %02x %02x %02x"
7570 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7571 buf[0], buf[1], buf[2], buf[3],
7572 buf[4], buf[5], buf[6], buf[7],
7573 buf[8], buf[9], buf[10], buf[11],
7574 buf[12], buf[13], buf[14], buf[15]);
7575 i += 16;
7576 len -= 16;
7577 buf += 16;
7578 }
7579 switch (len) {
7580 case 1:
7581 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7582 break;
7583 case 2:
7584 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7585 break;
7586 case 3:
7587 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7588 i, buf[0], buf[1], buf[2]);
7589 break;
7590 case 4:
7591 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7592 buf[0], buf[1], buf[2], buf[3]);
7593 break;
7594 case 5:
7595 device_printf(dev,"0x%08x:"
7596 " %02x %02x %02x %02x %02x\n", i,
7597 buf[0], buf[1], buf[2], buf[3], buf[4]);
7598 break;
7599 case 6:
7600 device_printf(dev,"0x%08x:"
7601 " %02x %02x %02x %02x %02x %02x\n", i,
7602 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7603 break;
7604 case 7:
7605 device_printf(dev,"0x%08x:"
7606 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7607 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7608 break;
7609 case 8:
7610 device_printf(dev,"0x%08x:"
7611 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7612 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7613 buf[7]);
7614 break;
7615 case 9:
7616 device_printf(dev,"0x%08x:"
7617 " %02x %02x %02x %02x %02x %02x %02x %02x"
7618 " %02x\n", i,
7619 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7620 buf[7], buf[8]);
7621 break;
7622 case 10:
7623 device_printf(dev,"0x%08x:"
7624 " %02x %02x %02x %02x %02x %02x %02x %02x"
7625 " %02x %02x\n", i,
7626 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7627 buf[7], buf[8], buf[9]);
7628 break;
7629 case 11:
7630 device_printf(dev,"0x%08x:"
7631 " %02x %02x %02x %02x %02x %02x %02x %02x"
7632 " %02x %02x %02x\n", i,
7633 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7634 buf[7], buf[8], buf[9], buf[10]);
7635 break;
7636 case 12:
7637 device_printf(dev,"0x%08x:"
7638 " %02x %02x %02x %02x %02x %02x %02x %02x"
7639 " %02x %02x %02x %02x\n", i,
7640 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7641 buf[7], buf[8], buf[9], buf[10], buf[11]);
7642 break;
7643 case 13:
7644 device_printf(dev,"0x%08x:"
7645 " %02x %02x %02x %02x %02x %02x %02x %02x"
7646 " %02x %02x %02x %02x %02x\n", i,
7647 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7648 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7649 break;
7650 case 14:
7651 device_printf(dev,"0x%08x:"
7652 " %02x %02x %02x %02x %02x %02x %02x %02x"
7653 " %02x %02x %02x %02x %02x %02x\n", i,
7654 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7655 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7656 buf[13]);
7657 break;
7658 case 15:
7659 device_printf(dev,"0x%08x:"
7660 " %02x %02x %02x %02x %02x %02x %02x %02x"
7661 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7662 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7663 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7664 buf[13], buf[14]);
7665 break;
7666 default:
7667 break;
7668 }
7669
7670 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7671
7672 return;
7673 }
7674
7675 #ifdef CONFIG_ECORE_SRIOV
7676
7677 static void
__qlnx_osal_iov_vf_cleanup(struct ecore_hwfn * p_hwfn,uint8_t rel_vf_id)7678 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7679 {
7680 struct ecore_public_vf_info *vf_info;
7681
7682 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7683
7684 if (!vf_info)
7685 return;
7686
7687 /* Clear the VF mac */
7688 memset(vf_info->forced_mac, 0, ETH_ALEN);
7689
7690 vf_info->forced_vlan = 0;
7691
7692 return;
7693 }
7694
7695 void
qlnx_osal_iov_vf_cleanup(void * p_hwfn,uint8_t relative_vf_id)7696 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7697 {
7698 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7699 return;
7700 }
7701
7702 static int
__qlnx_iov_chk_ucast(struct ecore_hwfn * p_hwfn,int vfid,struct ecore_filter_ucast * params)7703 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7704 struct ecore_filter_ucast *params)
7705 {
7706 struct ecore_public_vf_info *vf;
7707
7708 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7709 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7710 "VF[%d] vport not initialized\n", vfid);
7711 return ECORE_INVAL;
7712 }
7713
7714 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7715 if (!vf)
7716 return -EINVAL;
7717
7718 /* No real decision to make; Store the configured MAC */
7719 if (params->type == ECORE_FILTER_MAC ||
7720 params->type == ECORE_FILTER_MAC_VLAN)
7721 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7722
7723 return 0;
7724 }
7725
7726 int
qlnx_iov_chk_ucast(void * p_hwfn,int vfid,void * params)7727 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7728 {
7729 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7730 }
7731
7732 static int
__qlnx_iov_update_vport(struct ecore_hwfn * hwfn,uint8_t vfid,struct ecore_sp_vport_update_params * params,uint16_t * tlvs)7733 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7734 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7735 {
7736 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7737 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7738 "VF[%d] vport not initialized\n", vfid);
7739 return ECORE_INVAL;
7740 }
7741
7742 /* Untrusted VFs can't even be trusted to know that fact.
7743 * Simply indicate everything is configured fine, and trace
7744 * configuration 'behind their back'.
7745 */
7746 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7747 return 0;
7748
7749 return 0;
7750
7751 }
7752 int
qlnx_iov_update_vport(void * hwfn,uint8_t vfid,void * params,uint16_t * tlvs)7753 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7754 {
7755 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7756 }
7757
7758 static int
qlnx_find_hwfn_index(struct ecore_hwfn * p_hwfn)7759 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7760 {
7761 int i;
7762 struct ecore_dev *cdev;
7763
7764 cdev = p_hwfn->p_dev;
7765
7766 for (i = 0; i < cdev->num_hwfns; i++) {
7767 if (&cdev->hwfns[i] == p_hwfn)
7768 break;
7769 }
7770
7771 if (i >= cdev->num_hwfns)
7772 return (-1);
7773
7774 return (i);
7775 }
7776
7777 static int
__qlnx_pf_vf_msg(struct ecore_hwfn * p_hwfn,uint16_t rel_vf_id)7778 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7779 {
7780 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7781 int i;
7782
7783 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7784 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7785
7786 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7787 return (-1);
7788
7789 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7790 atomic_testandset_32(&ha->sriov_task[i].flags,
7791 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7792
7793 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7794 &ha->sriov_task[i].pf_task);
7795 }
7796
7797 return (ECORE_SUCCESS);
7798 }
7799
7800 int
qlnx_pf_vf_msg(void * p_hwfn,uint16_t relative_vf_id)7801 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7802 {
7803 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7804 }
7805
7806 static void
__qlnx_vf_flr_update(struct ecore_hwfn * p_hwfn)7807 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7808 {
7809 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7810 int i;
7811
7812 if (!ha->sriov_initialized)
7813 return;
7814
7815 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7816 ha, p_hwfn->p_dev, p_hwfn);
7817
7818 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7819 return;
7820
7821 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7822 atomic_testandset_32(&ha->sriov_task[i].flags,
7823 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7824
7825 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7826 &ha->sriov_task[i].pf_task);
7827 }
7828
7829 return;
7830 }
7831
7832 void
qlnx_vf_flr_update(void * p_hwfn)7833 qlnx_vf_flr_update(void *p_hwfn)
7834 {
7835 __qlnx_vf_flr_update(p_hwfn);
7836
7837 return;
7838 }
7839
7840 #ifndef QLNX_VF
7841
7842 static void
qlnx_vf_bulleting_update(struct ecore_hwfn * p_hwfn)7843 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7844 {
7845 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7846 int i;
7847
7848 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7849 ha, p_hwfn->p_dev, p_hwfn);
7850
7851 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7852 return;
7853
7854 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7855 ha, p_hwfn->p_dev, p_hwfn, i);
7856
7857 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7858 atomic_testandset_32(&ha->sriov_task[i].flags,
7859 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7860
7861 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7862 &ha->sriov_task[i].pf_task);
7863 }
7864 }
7865
7866 static void
qlnx_initialize_sriov(qlnx_host_t * ha)7867 qlnx_initialize_sriov(qlnx_host_t *ha)
7868 {
7869 device_t dev;
7870 nvlist_t *pf_schema, *vf_schema;
7871 int iov_error;
7872
7873 dev = ha->pci_dev;
7874
7875 pf_schema = pci_iov_schema_alloc_node();
7876 vf_schema = pci_iov_schema_alloc_node();
7877
7878 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7879 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7880 IOV_SCHEMA_HASDEFAULT, FALSE);
7881 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7882 IOV_SCHEMA_HASDEFAULT, FALSE);
7883 pci_iov_schema_add_uint16(vf_schema, "num-queues",
7884 IOV_SCHEMA_HASDEFAULT, 1);
7885
7886 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7887
7888 if (iov_error != 0) {
7889 ha->sriov_initialized = 0;
7890 } else {
7891 device_printf(dev, "SRIOV initialized\n");
7892 ha->sriov_initialized = 1;
7893 }
7894
7895 return;
7896 }
7897
7898 static void
qlnx_sriov_disable(qlnx_host_t * ha)7899 qlnx_sriov_disable(qlnx_host_t *ha)
7900 {
7901 struct ecore_dev *cdev;
7902 int i, j;
7903
7904 cdev = &ha->cdev;
7905
7906 ecore_iov_set_vfs_to_disable(cdev, true);
7907
7908 for_each_hwfn(cdev, i) {
7909 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7910 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7911
7912 if (!ptt) {
7913 QL_DPRINT1(ha, "Failed to acquire ptt\n");
7914 return;
7915 }
7916 /* Clean WFQ db and configure equal weight for all vports */
7917 ecore_clean_wfq_db(hwfn, ptt);
7918
7919 ecore_for_each_vf(hwfn, j) {
7920 int k = 0;
7921
7922 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
7923 continue;
7924
7925 if (ecore_iov_is_vf_started(hwfn, j)) {
7926 /* Wait until VF is disabled before releasing */
7927
7928 for (k = 0; k < 100; k++) {
7929 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
7930 qlnx_mdelay(__func__, 10);
7931 } else
7932 break;
7933 }
7934 }
7935
7936 if (k < 100)
7937 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7938 ptt, j);
7939 else {
7940 QL_DPRINT1(ha,
7941 "Timeout waiting for VF's FLR to end\n");
7942 }
7943 }
7944 ecore_ptt_release(hwfn, ptt);
7945 }
7946
7947 ecore_iov_set_vfs_to_disable(cdev, false);
7948
7949 return;
7950 }
7951
7952 static void
qlnx_sriov_enable_qid_config(struct ecore_hwfn * hwfn,u16 vfid,struct ecore_iov_vf_init_params * params)7953 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
7954 struct ecore_iov_vf_init_params *params)
7955 {
7956 u16 base, i;
7957
7958 /* Since we have an equal resource distribution per-VF, and we assume
7959 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
7960 * sequentially from there.
7961 */
7962 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7963
7964 params->rel_vf_id = vfid;
7965
7966 for (i = 0; i < params->num_queues; i++) {
7967 params->req_rx_queue[i] = base + i;
7968 params->req_tx_queue[i] = base + i;
7969 }
7970
7971 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
7972 params->vport_id = vfid + 1;
7973 params->rss_eng_id = vfid + 1;
7974
7975 return;
7976 }
7977
7978 static int
qlnx_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * nvlist_params)7979 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
7980 {
7981 qlnx_host_t *ha;
7982 struct ecore_dev *cdev;
7983 struct ecore_iov_vf_init_params params;
7984 int ret, j, i;
7985 uint32_t max_vfs;
7986
7987 if ((ha = device_get_softc(dev)) == NULL) {
7988 device_printf(dev, "%s: cannot get softc\n", __func__);
7989 return (-1);
7990 }
7991
7992 if (qlnx_create_pf_taskqueues(ha) != 0)
7993 goto qlnx_iov_init_err0;
7994
7995 cdev = &ha->cdev;
7996
7997 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
7998
7999 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8000 dev, num_vfs, max_vfs);
8001
8002 if (num_vfs >= max_vfs) {
8003 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8004 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8005 goto qlnx_iov_init_err0;
8006 }
8007
8008 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8009 M_NOWAIT);
8010
8011 if (ha->vf_attr == NULL)
8012 goto qlnx_iov_init_err0;
8013
8014 memset(¶ms, 0, sizeof(params));
8015
8016 /* Initialize HW for VF access */
8017 for_each_hwfn(cdev, j) {
8018 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8019 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8020
8021 /* Make sure not to use more than 16 queues per VF */
8022 params.num_queues = min_t(int,
8023 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8024 16);
8025
8026 if (!ptt) {
8027 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8028 goto qlnx_iov_init_err1;
8029 }
8030
8031 for (i = 0; i < num_vfs; i++) {
8032 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8033 continue;
8034
8035 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
8036
8037 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
8038
8039 if (ret) {
8040 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8041 ecore_ptt_release(hwfn, ptt);
8042 goto qlnx_iov_init_err1;
8043 }
8044 }
8045
8046 ecore_ptt_release(hwfn, ptt);
8047 }
8048
8049 ha->num_vfs = num_vfs;
8050 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8051
8052 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8053
8054 return (0);
8055
8056 qlnx_iov_init_err1:
8057 qlnx_sriov_disable(ha);
8058
8059 qlnx_iov_init_err0:
8060 qlnx_destroy_pf_taskqueues(ha);
8061 ha->num_vfs = 0;
8062
8063 return (-1);
8064 }
8065
8066 static void
qlnx_iov_uninit(device_t dev)8067 qlnx_iov_uninit(device_t dev)
8068 {
8069 qlnx_host_t *ha;
8070
8071 if ((ha = device_get_softc(dev)) == NULL) {
8072 device_printf(dev, "%s: cannot get softc\n", __func__);
8073 return;
8074 }
8075
8076 QL_DPRINT2(ha," dev = %p enter\n", dev);
8077
8078 qlnx_sriov_disable(ha);
8079 qlnx_destroy_pf_taskqueues(ha);
8080
8081 free(ha->vf_attr, M_QLNXBUF);
8082 ha->vf_attr = NULL;
8083
8084 ha->num_vfs = 0;
8085
8086 QL_DPRINT2(ha," dev = %p exit\n", dev);
8087 return;
8088 }
8089
8090 static int
qlnx_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * params)8091 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8092 {
8093 qlnx_host_t *ha;
8094 qlnx_vf_attr_t *vf_attr;
8095 unsigned const char *mac;
8096 size_t size;
8097 struct ecore_hwfn *p_hwfn;
8098
8099 if ((ha = device_get_softc(dev)) == NULL) {
8100 device_printf(dev, "%s: cannot get softc\n", __func__);
8101 return (-1);
8102 }
8103
8104 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8105
8106 if (vfnum > (ha->num_vfs - 1)) {
8107 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8108 vfnum, (ha->num_vfs - 1));
8109 }
8110
8111 vf_attr = &ha->vf_attr[vfnum];
8112
8113 if (nvlist_exists_binary(params, "mac-addr")) {
8114 mac = nvlist_get_binary(params, "mac-addr", &size);
8115 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8116 device_printf(dev,
8117 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8118 __func__, vf_attr->mac_addr[0],
8119 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8120 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8121 vf_attr->mac_addr[5]);
8122 p_hwfn = &ha->cdev.hwfns[0];
8123 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8124 vfnum);
8125 }
8126
8127 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8128 return (0);
8129 }
8130
8131 static void
qlnx_handle_vf_msg(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8132 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8133 {
8134 uint64_t events[ECORE_VF_ARRAY_LENGTH];
8135 struct ecore_ptt *ptt;
8136 int i;
8137
8138 ptt = ecore_ptt_acquire(p_hwfn);
8139 if (!ptt) {
8140 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8141 __qlnx_pf_vf_msg(p_hwfn, 0);
8142 return;
8143 }
8144
8145 ecore_iov_pf_get_pending_events(p_hwfn, events);
8146
8147 QL_DPRINT2(ha, "Event mask of VF events:"
8148 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8149 events[0], events[1], events[2]);
8150
8151 ecore_for_each_vf(p_hwfn, i) {
8152 /* Skip VFs with no pending messages */
8153 if (!(events[i / 64] & (1ULL << (i % 64))))
8154 continue;
8155
8156 QL_DPRINT2(ha,
8157 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8158 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8159
8160 /* Copy VF's message to PF's request buffer for that VF */
8161 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8162 continue;
8163
8164 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8165 }
8166
8167 ecore_ptt_release(p_hwfn, ptt);
8168
8169 return;
8170 }
8171
8172 static void
qlnx_handle_vf_flr_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8173 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8174 {
8175 struct ecore_ptt *ptt;
8176 int ret;
8177
8178 ptt = ecore_ptt_acquire(p_hwfn);
8179
8180 if (!ptt) {
8181 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8182 __qlnx_vf_flr_update(p_hwfn);
8183 return;
8184 }
8185
8186 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8187
8188 if (ret) {
8189 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8190 }
8191
8192 ecore_ptt_release(p_hwfn, ptt);
8193
8194 return;
8195 }
8196
8197 static void
qlnx_handle_bulletin_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8198 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8199 {
8200 struct ecore_ptt *ptt;
8201 int i;
8202
8203 ptt = ecore_ptt_acquire(p_hwfn);
8204
8205 if (!ptt) {
8206 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8207 qlnx_vf_bulleting_update(p_hwfn);
8208 return;
8209 }
8210
8211 ecore_for_each_vf(p_hwfn, i) {
8212 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8213 p_hwfn, i);
8214 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8215 }
8216
8217 ecore_ptt_release(p_hwfn, ptt);
8218
8219 return;
8220 }
8221
8222 static void
qlnx_pf_taskqueue(void * context,int pending)8223 qlnx_pf_taskqueue(void *context, int pending)
8224 {
8225 struct ecore_hwfn *p_hwfn;
8226 qlnx_host_t *ha;
8227 int i;
8228
8229 p_hwfn = context;
8230
8231 if (p_hwfn == NULL)
8232 return;
8233
8234 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8235
8236 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8237 return;
8238
8239 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8240 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8241 qlnx_handle_vf_msg(ha, p_hwfn);
8242
8243 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8244 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8245 qlnx_handle_vf_flr_update(ha, p_hwfn);
8246
8247 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8248 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8249 qlnx_handle_bulletin_update(ha, p_hwfn);
8250
8251 return;
8252 }
8253
8254 static int
qlnx_create_pf_taskqueues(qlnx_host_t * ha)8255 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8256 {
8257 int i;
8258 uint8_t tq_name[32];
8259
8260 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8261 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8262
8263 bzero(tq_name, sizeof (tq_name));
8264 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8265
8266 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8267
8268 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8269 taskqueue_thread_enqueue,
8270 &ha->sriov_task[i].pf_taskqueue);
8271
8272 if (ha->sriov_task[i].pf_taskqueue == NULL)
8273 return (-1);
8274
8275 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8276 PI_NET, "%s", tq_name);
8277
8278 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8279 }
8280
8281 return (0);
8282 }
8283
8284 static void
qlnx_destroy_pf_taskqueues(qlnx_host_t * ha)8285 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8286 {
8287 int i;
8288
8289 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8290 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8291 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8292 &ha->sriov_task[i].pf_task);
8293 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8294 ha->sriov_task[i].pf_taskqueue = NULL;
8295 }
8296 }
8297 return;
8298 }
8299
8300 static void
qlnx_inform_vf_link_state(struct ecore_hwfn * p_hwfn,qlnx_host_t * ha)8301 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8302 {
8303 struct ecore_mcp_link_capabilities caps;
8304 struct ecore_mcp_link_params params;
8305 struct ecore_mcp_link_state link;
8306 int i;
8307
8308 if (!p_hwfn->pf_iov_info)
8309 return;
8310
8311 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8312 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8313 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8314
8315 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8316 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8317 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8318
8319 QL_DPRINT2(ha, "called\n");
8320
8321 /* Update bulletin of all future possible VFs with link configuration */
8322 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8323 /* Modify link according to the VF's configured link state */
8324
8325 link.link_up = false;
8326
8327 if (ha->link_up) {
8328 link.link_up = true;
8329 /* Set speed according to maximum supported by HW.
8330 * that is 40G for regular devices and 100G for CMT
8331 * mode devices.
8332 */
8333 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8334 100000 : link.speed;
8335 }
8336 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8337 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8338 }
8339
8340 qlnx_vf_bulleting_update(p_hwfn);
8341
8342 return;
8343 }
8344 #endif /* #ifndef QLNX_VF */
8345 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8346