1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File: qlnx_os.c
30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31 */
32
33 #include <sys/cdefs.h>
34 #include "qlnx_os.h"
35 #include "bcm_osal.h"
36 #include "reg_addr.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore.h"
39 #include "ecore_chain.h"
40 #include "ecore_status.h"
41 #include "ecore_hw.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_ops.h"
44 #include "ecore_int.h"
45 #include "ecore_cxt.h"
46 #include "ecore_spq.h"
47 #include "ecore_init_fw_funcs.h"
48 #include "ecore_sp_commands.h"
49 #include "ecore_dev_api.h"
50 #include "ecore_l2_api.h"
51 #include "ecore_mcp.h"
52 #include "ecore_hw_defs.h"
53 #include "mcp_public.h"
54 #include "ecore_iro.h"
55 #include "nvm_cfg.h"
56 #include "ecore_dbg_fw_funcs.h"
57 #include "ecore_iov_api.h"
58 #include "ecore_vf_api.h"
59
60 #include "qlnx_ioctl.h"
61 #include "qlnx_def.h"
62 #include "qlnx_ver.h"
63
64 #ifdef QLNX_ENABLE_IWARP
65 #include "qlnx_rdma.h"
66 #endif /* #ifdef QLNX_ENABLE_IWARP */
67
68 #ifdef CONFIG_ECORE_SRIOV
69 #include <sys/nv.h>
70 #include <sys/iov_schema.h>
71 #include <dev/pci/pci_iov.h>
72 #endif /* #ifdef CONFIG_ECORE_SRIOV */
73
74 #include <sys/smp.h>
75
76 /*
77 * static functions
78 */
79 /*
80 * ioctl related functions
81 */
82 static void qlnx_add_sysctls(qlnx_host_t *ha);
83
84 /*
85 * main driver
86 */
87 static void qlnx_release(qlnx_host_t *ha);
88 static void qlnx_fp_isr(void *arg);
89 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
90 static void qlnx_init(void *arg);
91 static void qlnx_init_locked(qlnx_host_t *ha);
92 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
93 static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
94 static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
95 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
96 static int qlnx_media_change(if_t ifp);
97 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
98 static void qlnx_stop(qlnx_host_t *ha);
99 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
100 struct mbuf **m_headp);
101 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
102 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
103 struct qlnx_link_output *if_link);
104 static int qlnx_transmit(if_t ifp, struct mbuf *mp);
105 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
106 struct mbuf *mp);
107 static void qlnx_qflush(if_t ifp);
108
109 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
110 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
111 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
112 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
113 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
114 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
115
116 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
117 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
118
119 static int qlnx_nic_setup(struct ecore_dev *cdev,
120 struct ecore_pf_params *func_params);
121 static int qlnx_nic_start(struct ecore_dev *cdev);
122 static int qlnx_slowpath_start(qlnx_host_t *ha);
123 static int qlnx_slowpath_stop(qlnx_host_t *ha);
124 static int qlnx_init_hw(qlnx_host_t *ha);
125 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
126 char ver_str[VER_SIZE]);
127 static void qlnx_unload(qlnx_host_t *ha);
128 static int qlnx_load(qlnx_host_t *ha);
129 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
130 uint32_t add_mac);
131 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 uint32_t len);
133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
135 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
136 struct qlnx_rx_queue *rxq);
137 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
138 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
139 int hwfn_index);
140 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
141 int hwfn_index);
142 static void qlnx_timer(void *arg);
143 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
145 static void qlnx_trigger_dump(qlnx_host_t *ha);
146 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
147 struct qlnx_tx_queue *txq);
148 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
149 struct qlnx_tx_queue *txq);
150 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
151 int lro_enable);
152 static void qlnx_fp_taskqueue(void *context, int pending);
153 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
154 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
155 struct qlnx_agg_info *tpa);
156 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
157
158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
159
160 /*
161 * Hooks to the Operating Systems
162 */
163 static int qlnx_pci_probe (device_t);
164 static int qlnx_pci_attach (device_t);
165 static int qlnx_pci_detach (device_t);
166
167 #ifndef QLNX_VF
168
169 #ifdef CONFIG_ECORE_SRIOV
170
171 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
172 static void qlnx_iov_uninit(device_t dev);
173 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
174 static void qlnx_initialize_sriov(qlnx_host_t *ha);
175 static void qlnx_pf_taskqueue(void *context, int pending);
176 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
178 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
179
180 #endif /* #ifdef CONFIG_ECORE_SRIOV */
181
182 static device_method_t qlnx_pci_methods[] = {
183 /* Device interface */
184 DEVMETHOD(device_probe, qlnx_pci_probe),
185 DEVMETHOD(device_attach, qlnx_pci_attach),
186 DEVMETHOD(device_detach, qlnx_pci_detach),
187
188 #ifdef CONFIG_ECORE_SRIOV
189 DEVMETHOD(pci_iov_init, qlnx_iov_init),
190 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
191 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
192 #endif /* #ifdef CONFIG_ECORE_SRIOV */
193 { 0, 0 }
194 };
195
196 static driver_t qlnx_pci_driver = {
197 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
198 };
199
200 MODULE_VERSION(if_qlnxe,1);
201 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
202
203 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
204 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
205
206 #else
207
208 static device_method_t qlnxv_pci_methods[] = {
209 /* Device interface */
210 DEVMETHOD(device_probe, qlnx_pci_probe),
211 DEVMETHOD(device_attach, qlnx_pci_attach),
212 DEVMETHOD(device_detach, qlnx_pci_detach),
213 { 0, 0 }
214 };
215
216 static driver_t qlnxv_pci_driver = {
217 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
218 };
219
220 MODULE_VERSION(if_qlnxev,1);
221 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
222
223 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
224 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
225
226 #endif /* #ifdef QLNX_VF */
227
228 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
229
230 char qlnx_ver_str[VER_SIZE];
231 char qlnx_name_str[NAME_SIZE];
232
233 /*
234 * Some PCI Configuration Space Related Defines
235 */
236
237 #ifndef PCI_VENDOR_QLOGIC
238 #define PCI_VENDOR_QLOGIC 0x1077
239 #endif
240
241 /* 40G Adapter QLE45xxx*/
242 #ifndef QLOGIC_PCI_DEVICE_ID_1634
243 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
244 #endif
245
246 /* 100G Adapter QLE45xxx*/
247 #ifndef QLOGIC_PCI_DEVICE_ID_1644
248 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
249 #endif
250
251 /* 25G Adapter QLE45xxx*/
252 #ifndef QLOGIC_PCI_DEVICE_ID_1656
253 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
254 #endif
255
256 /* 50G Adapter QLE45xxx*/
257 #ifndef QLOGIC_PCI_DEVICE_ID_1654
258 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
259 #endif
260
261 /* 10G/25G/40G Adapter QLE41xxx*/
262 #ifndef QLOGIC_PCI_DEVICE_ID_8070
263 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
264 #endif
265
266 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
267 #ifndef QLOGIC_PCI_DEVICE_ID_8090
268 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
269 #endif
270
271 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
272 "qlnxe driver parameters");
273
274 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
275 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
276
277 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
278 &qlnxe_queue_count, 0, "Multi-Queue queue count");
279
280 /*
281 * Note on RDMA personality setting
282 *
283 * Read the personality configured in NVRAM
284 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
285 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
286 * use the personality in NVRAM.
287
288 * Otherwise use t the personality configured in sysctl.
289 *
290 */
291 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
292 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
293 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
294 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
295 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
296 #define QLNX_PERSONALIY_MASK 0xF
297
298 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
299 static uint64_t qlnxe_rdma_configuration = 0x22222222;
300
301 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
302 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
303
304 int
qlnx_vf_device(qlnx_host_t * ha)305 qlnx_vf_device(qlnx_host_t *ha)
306 {
307 uint16_t device_id;
308
309 device_id = ha->device_id;
310
311 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
312 return 0;
313
314 return -1;
315 }
316
317 static int
qlnx_valid_device(qlnx_host_t * ha)318 qlnx_valid_device(qlnx_host_t *ha)
319 {
320 uint16_t device_id;
321
322 device_id = ha->device_id;
323
324 #ifndef QLNX_VF
325 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
326 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
327 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
328 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
329 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
330 return 0;
331 #else
332 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
333 return 0;
334
335 #endif /* #ifndef QLNX_VF */
336 return -1;
337 }
338
339 #ifdef QLNX_ENABLE_IWARP
340 static int
qlnx_rdma_supported(struct qlnx_host * ha)341 qlnx_rdma_supported(struct qlnx_host *ha)
342 {
343 uint16_t device_id;
344
345 device_id = pci_get_device(ha->pci_dev);
346
347 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
348 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
349 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
350 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
351 return (0);
352
353 return (-1);
354 }
355 #endif /* #ifdef QLNX_ENABLE_IWARP */
356
357 /*
358 * Name: qlnx_pci_probe
359 * Function: Validate the PCI device to be a QLA80XX device
360 */
361 static int
qlnx_pci_probe(device_t dev)362 qlnx_pci_probe(device_t dev)
363 {
364 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
365 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
366 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
367
368 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
369 return (ENXIO);
370 }
371
372 switch (pci_get_device(dev)) {
373 #ifndef QLNX_VF
374
375 case QLOGIC_PCI_DEVICE_ID_1644:
376 device_set_descf(dev, "%s v%d.%d.%d",
377 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
378 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
379 QLNX_VERSION_BUILD);
380 break;
381
382 case QLOGIC_PCI_DEVICE_ID_1634:
383 device_set_descf(dev, "%s v%d.%d.%d",
384 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
385 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
386 QLNX_VERSION_BUILD);
387 break;
388
389 case QLOGIC_PCI_DEVICE_ID_1656:
390 device_set_descf(dev, "%s v%d.%d.%d",
391 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
392 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
393 QLNX_VERSION_BUILD);
394 break;
395
396 case QLOGIC_PCI_DEVICE_ID_1654:
397 device_set_descf(dev, "%s v%d.%d.%d",
398 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
399 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
400 QLNX_VERSION_BUILD);
401 break;
402
403 case QLOGIC_PCI_DEVICE_ID_8070:
404 device_set_descf(dev, "%s v%d.%d.%d",
405 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
406 " Adapter-Ethernet Function",
407 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
408 QLNX_VERSION_BUILD);
409 break;
410
411 #else
412 case QLOGIC_PCI_DEVICE_ID_8090:
413 device_set_descf(dev, "%s v%d.%d.%d",
414 "Qlogic SRIOV PCI CNA (AH) "
415 "Adapter-Ethernet Function",
416 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
417 QLNX_VERSION_BUILD);
418 break;
419
420 #endif /* #ifndef QLNX_VF */
421
422 default:
423 return (ENXIO);
424 }
425
426 #ifdef QLNX_ENABLE_IWARP
427 qlnx_rdma_init();
428 #endif /* #ifdef QLNX_ENABLE_IWARP */
429
430 return (BUS_PROBE_DEFAULT);
431 }
432
433 static uint16_t
qlnx_num_tx_compl(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)434 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
435 struct qlnx_tx_queue *txq)
436 {
437 u16 hw_bd_cons;
438 u16 ecore_cons_idx;
439
440 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
441
442 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
443
444 return (hw_bd_cons - ecore_cons_idx);
445 }
446
447 static void
qlnx_sp_intr(void * arg)448 qlnx_sp_intr(void *arg)
449 {
450 struct ecore_hwfn *p_hwfn;
451 qlnx_host_t *ha;
452 int i;
453
454 p_hwfn = arg;
455
456 if (p_hwfn == NULL) {
457 printf("%s: spurious slowpath intr\n", __func__);
458 return;
459 }
460
461 ha = (qlnx_host_t *)p_hwfn->p_dev;
462
463 QL_DPRINT2(ha, "enter\n");
464
465 for (i = 0; i < ha->cdev.num_hwfns; i++) {
466 if (&ha->cdev.hwfns[i] == p_hwfn) {
467 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
468 break;
469 }
470 }
471 QL_DPRINT2(ha, "exit\n");
472
473 return;
474 }
475
476 static void
qlnx_sp_taskqueue(void * context,int pending)477 qlnx_sp_taskqueue(void *context, int pending)
478 {
479 struct ecore_hwfn *p_hwfn;
480
481 p_hwfn = context;
482
483 if (p_hwfn != NULL) {
484 qlnx_sp_isr(p_hwfn);
485 }
486 return;
487 }
488
489 static int
qlnx_create_sp_taskqueues(qlnx_host_t * ha)490 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
491 {
492 int i;
493 uint8_t tq_name[32];
494
495 for (i = 0; i < ha->cdev.num_hwfns; i++) {
496 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
497
498 bzero(tq_name, sizeof (tq_name));
499 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
500
501 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
502
503 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
504 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
505
506 if (ha->sp_taskqueue[i] == NULL)
507 return (-1);
508
509 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
510 tq_name);
511
512 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
513 }
514
515 return (0);
516 }
517
518 static void
qlnx_destroy_sp_taskqueues(qlnx_host_t * ha)519 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
520 {
521 int i;
522
523 for (i = 0; i < ha->cdev.num_hwfns; i++) {
524 if (ha->sp_taskqueue[i] != NULL) {
525 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
526 taskqueue_free(ha->sp_taskqueue[i]);
527 }
528 }
529 return;
530 }
531
532 static void
qlnx_fp_taskqueue(void * context,int pending)533 qlnx_fp_taskqueue(void *context, int pending)
534 {
535 struct qlnx_fastpath *fp;
536 qlnx_host_t *ha;
537 if_t ifp;
538
539 fp = context;
540
541 if (fp == NULL)
542 return;
543
544 ha = (qlnx_host_t *)fp->edev;
545
546 ifp = ha->ifp;
547
548 if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
549 if (!drbr_empty(ifp, fp->tx_br)) {
550 if(mtx_trylock(&fp->tx_mtx)) {
551 #ifdef QLNX_TRACE_PERF_DATA
552 tx_pkts = fp->tx_pkts_transmitted;
553 tx_compl = fp->tx_pkts_completed;
554 #endif
555
556 qlnx_transmit_locked(ifp, fp, NULL);
557
558 #ifdef QLNX_TRACE_PERF_DATA
559 fp->tx_pkts_trans_fp +=
560 (fp->tx_pkts_transmitted - tx_pkts);
561 fp->tx_pkts_compl_fp +=
562 (fp->tx_pkts_completed - tx_compl);
563 #endif
564 mtx_unlock(&fp->tx_mtx);
565 }
566 }
567 }
568
569 QL_DPRINT2(ha, "exit \n");
570 return;
571 }
572
573 static int
qlnx_create_fp_taskqueues(qlnx_host_t * ha)574 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
575 {
576 int i;
577 uint8_t tq_name[32];
578 struct qlnx_fastpath *fp;
579
580 for (i = 0; i < ha->num_rss; i++) {
581 fp = &ha->fp_array[i];
582
583 bzero(tq_name, sizeof (tq_name));
584 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
585
586 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
587
588 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
589 taskqueue_thread_enqueue,
590 &fp->fp_taskqueue);
591
592 if (fp->fp_taskqueue == NULL)
593 return (-1);
594
595 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
596 tq_name);
597
598 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
599 }
600
601 return (0);
602 }
603
604 static void
qlnx_destroy_fp_taskqueues(qlnx_host_t * ha)605 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
606 {
607 int i;
608 struct qlnx_fastpath *fp;
609
610 for (i = 0; i < ha->num_rss; i++) {
611 fp = &ha->fp_array[i];
612
613 if (fp->fp_taskqueue != NULL) {
614 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
615 taskqueue_free(fp->fp_taskqueue);
616 fp->fp_taskqueue = NULL;
617 }
618 }
619 return;
620 }
621
622 static void
qlnx_drain_fp_taskqueues(qlnx_host_t * ha)623 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
624 {
625 int i;
626 struct qlnx_fastpath *fp;
627
628 for (i = 0; i < ha->num_rss; i++) {
629 fp = &ha->fp_array[i];
630
631 if (fp->fp_taskqueue != NULL) {
632 QLNX_UNLOCK(ha);
633 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
634 QLNX_LOCK(ha);
635 }
636 }
637 return;
638 }
639
640 static void
qlnx_get_params(qlnx_host_t * ha)641 qlnx_get_params(qlnx_host_t *ha)
642 {
643 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
644 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
645 qlnxe_queue_count);
646 qlnxe_queue_count = 0;
647 }
648 return;
649 }
650
651 static void
qlnx_error_recovery_taskqueue(void * context,int pending)652 qlnx_error_recovery_taskqueue(void *context, int pending)
653 {
654 qlnx_host_t *ha;
655
656 ha = context;
657
658 QL_DPRINT2(ha, "enter\n");
659
660 QLNX_LOCK(ha);
661 qlnx_stop(ha);
662 QLNX_UNLOCK(ha);
663
664 #ifdef QLNX_ENABLE_IWARP
665 qlnx_rdma_dev_remove(ha);
666 #endif /* #ifdef QLNX_ENABLE_IWARP */
667
668 qlnx_slowpath_stop(ha);
669 qlnx_slowpath_start(ha);
670
671 #ifdef QLNX_ENABLE_IWARP
672 qlnx_rdma_dev_add(ha);
673 #endif /* #ifdef QLNX_ENABLE_IWARP */
674
675 qlnx_init(ha);
676
677 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
678
679 QL_DPRINT2(ha, "exit\n");
680
681 return;
682 }
683
684 static int
qlnx_create_error_recovery_taskqueue(qlnx_host_t * ha)685 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
686 {
687 uint8_t tq_name[32];
688
689 bzero(tq_name, sizeof (tq_name));
690 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
691
692 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
693
694 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
695 taskqueue_thread_enqueue, &ha->err_taskqueue);
696
697 if (ha->err_taskqueue == NULL)
698 return (-1);
699
700 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
701
702 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
703
704 return (0);
705 }
706
707 static void
qlnx_destroy_error_recovery_taskqueue(qlnx_host_t * ha)708 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
709 {
710 if (ha->err_taskqueue != NULL) {
711 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
712 taskqueue_free(ha->err_taskqueue);
713 }
714
715 ha->err_taskqueue = NULL;
716
717 return;
718 }
719
720 /*
721 * Name: qlnx_pci_attach
722 * Function: attaches the device to the operating system
723 */
724 static int
qlnx_pci_attach(device_t dev)725 qlnx_pci_attach(device_t dev)
726 {
727 qlnx_host_t *ha = NULL;
728 uint32_t rsrc_len_reg __unused = 0;
729 uint32_t rsrc_len_dbells = 0;
730 uint32_t rsrc_len_msix __unused = 0;
731 int i;
732 uint32_t mfw_ver;
733 uint32_t num_sp_msix = 0;
734 uint32_t num_rdma_irqs = 0;
735
736 if ((ha = device_get_softc(dev)) == NULL) {
737 device_printf(dev, "cannot get softc\n");
738 return (ENOMEM);
739 }
740
741 memset(ha, 0, sizeof (qlnx_host_t));
742
743 ha->device_id = pci_get_device(dev);
744
745 if (qlnx_valid_device(ha) != 0) {
746 device_printf(dev, "device is not valid device\n");
747 return (ENXIO);
748 }
749 ha->pci_func = pci_get_function(dev);
750
751 ha->pci_dev = dev;
752
753 sx_init(&ha->hw_lock, "qlnx_hw_lock");
754
755 ha->flags.lock_init = 1;
756
757 pci_enable_busmaster(dev);
758
759 /*
760 * map the PCI BARs
761 */
762
763 ha->reg_rid = PCIR_BAR(0);
764 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
765 RF_ACTIVE);
766
767 if (ha->pci_reg == NULL) {
768 device_printf(dev, "unable to map BAR0\n");
769 goto qlnx_pci_attach_err;
770 }
771
772 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
773 ha->reg_rid);
774
775 ha->dbells_rid = PCIR_BAR(2);
776 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
777 SYS_RES_MEMORY,
778 ha->dbells_rid);
779 if (rsrc_len_dbells) {
780 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
781 &ha->dbells_rid, RF_ACTIVE);
782
783 if (ha->pci_dbells == NULL) {
784 device_printf(dev, "unable to map BAR1\n");
785 goto qlnx_pci_attach_err;
786 }
787 ha->dbells_phys_addr = (uint64_t)
788 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
789
790 ha->dbells_size = rsrc_len_dbells;
791 } else {
792 if (qlnx_vf_device(ha) != 0) {
793 device_printf(dev, " BAR1 size is zero\n");
794 goto qlnx_pci_attach_err;
795 }
796 }
797
798 ha->msix_rid = PCIR_BAR(4);
799 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
800 &ha->msix_rid, RF_ACTIVE);
801
802 if (ha->msix_bar == NULL) {
803 device_printf(dev, "unable to map BAR2\n");
804 goto qlnx_pci_attach_err;
805 }
806
807 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
808 ha->msix_rid);
809
810 ha->dbg_level = 0x0000;
811
812 QL_DPRINT1(ha, "\n\t\t\t"
813 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
814 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
815 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
816 " msix_avail = 0x%x "
817 "\n\t\t\t[ncpus = %d]\n",
818 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
819 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
820 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
821 mp_ncpus);
822 /*
823 * allocate dma tags
824 */
825
826 if (qlnx_alloc_parent_dma_tag(ha))
827 goto qlnx_pci_attach_err;
828
829 if (qlnx_alloc_tx_dma_tag(ha))
830 goto qlnx_pci_attach_err;
831
832 if (qlnx_alloc_rx_dma_tag(ha))
833 goto qlnx_pci_attach_err;
834
835
836 if (qlnx_init_hw(ha) != 0)
837 goto qlnx_pci_attach_err;
838
839 ha->flags.hw_init = 1;
840
841 qlnx_get_params(ha);
842
843 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
844 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
845 qlnxe_queue_count = QLNX_MAX_RSS;
846 }
847
848 /*
849 * Allocate MSI-x vectors
850 */
851 if (qlnx_vf_device(ha) != 0) {
852 if (qlnxe_queue_count == 0)
853 ha->num_rss = QLNX_DEFAULT_RSS;
854 else
855 ha->num_rss = qlnxe_queue_count;
856
857 num_sp_msix = ha->cdev.num_hwfns;
858 } else {
859 uint8_t max_rxq;
860 uint8_t max_txq;
861
862 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
863 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
864
865 if (max_rxq < max_txq)
866 ha->num_rss = max_rxq;
867 else
868 ha->num_rss = max_txq;
869
870 if (ha->num_rss > QLNX_MAX_VF_RSS)
871 ha->num_rss = QLNX_MAX_VF_RSS;
872
873 num_sp_msix = 0;
874 }
875
876 if (ha->num_rss > mp_ncpus)
877 ha->num_rss = mp_ncpus;
878
879 ha->num_tc = QLNX_MAX_TC;
880
881 ha->msix_count = pci_msix_count(dev);
882
883 #ifdef QLNX_ENABLE_IWARP
884
885 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
886
887 #endif /* #ifdef QLNX_ENABLE_IWARP */
888
889 if (!ha->msix_count ||
890 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
891 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
892 ha->msix_count);
893 goto qlnx_pci_attach_err;
894 }
895
896 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
897 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
898 else
899 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
900
901 QL_DPRINT1(ha, "\n\t\t\t"
902 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
903 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
904 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
905 " msix_avail = 0x%x msix_alloc = 0x%x"
906 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
907 ha->pci_reg, rsrc_len_reg,
908 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
909 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
910 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
911
912 if (pci_alloc_msix(dev, &ha->msix_count)) {
913 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
914 ha->msix_count);
915 ha->msix_count = 0;
916 goto qlnx_pci_attach_err;
917 }
918
919 /*
920 * Initialize slow path interrupt and task queue
921 */
922
923 if (num_sp_msix) {
924 if (qlnx_create_sp_taskqueues(ha) != 0)
925 goto qlnx_pci_attach_err;
926
927 for (i = 0; i < ha->cdev.num_hwfns; i++) {
928 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
929
930 ha->sp_irq_rid[i] = i + 1;
931 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
932 &ha->sp_irq_rid[i],
933 (RF_ACTIVE | RF_SHAREABLE));
934 if (ha->sp_irq[i] == NULL) {
935 device_printf(dev,
936 "could not allocate mbx interrupt\n");
937 goto qlnx_pci_attach_err;
938 }
939
940 if (bus_setup_intr(dev, ha->sp_irq[i],
941 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
942 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
943 device_printf(dev,
944 "could not setup slow path interrupt\n");
945 goto qlnx_pci_attach_err;
946 }
947
948 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
949 " sp_irq %p sp_handle %p\n", p_hwfn,
950 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
951 }
952 }
953
954 /*
955 * initialize fast path interrupt
956 */
957 if (qlnx_create_fp_taskqueues(ha) != 0)
958 goto qlnx_pci_attach_err;
959
960 for (i = 0; i < ha->num_rss; i++) {
961 ha->irq_vec[i].rss_idx = i;
962 ha->irq_vec[i].ha = ha;
963 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
964
965 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
966 &ha->irq_vec[i].irq_rid,
967 (RF_ACTIVE | RF_SHAREABLE));
968
969 if (ha->irq_vec[i].irq == NULL) {
970 device_printf(dev,
971 "could not allocate interrupt[%d] irq_rid = %d\n",
972 i, ha->irq_vec[i].irq_rid);
973 goto qlnx_pci_attach_err;
974 }
975
976 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
977 device_printf(dev, "could not allocate tx_br[%d]\n", i);
978 goto qlnx_pci_attach_err;
979 }
980 }
981
982 if (qlnx_vf_device(ha) != 0) {
983 callout_init(&ha->qlnx_callout, 1);
984 ha->flags.callout_init = 1;
985
986 for (i = 0; i < ha->cdev.num_hwfns; i++) {
987 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
988 goto qlnx_pci_attach_err;
989 if (ha->grcdump_size[i] == 0)
990 goto qlnx_pci_attach_err;
991
992 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
993 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
994 i, ha->grcdump_size[i]);
995
996 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
997 if (ha->grcdump[i] == NULL) {
998 device_printf(dev, "grcdump alloc[%d] failed\n", i);
999 goto qlnx_pci_attach_err;
1000 }
1001
1002 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1003 goto qlnx_pci_attach_err;
1004 if (ha->idle_chk_size[i] == 0)
1005 goto qlnx_pci_attach_err;
1006
1007 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1008 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1009 i, ha->idle_chk_size[i]);
1010
1011 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1012
1013 if (ha->idle_chk[i] == NULL) {
1014 device_printf(dev, "idle_chk alloc failed\n");
1015 goto qlnx_pci_attach_err;
1016 }
1017 }
1018
1019 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1020 goto qlnx_pci_attach_err;
1021 }
1022
1023 if (qlnx_slowpath_start(ha) != 0)
1024 goto qlnx_pci_attach_err;
1025 else
1026 ha->flags.slowpath_start = 1;
1027
1028 if (qlnx_vf_device(ha) != 0) {
1029 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1030 qlnx_mdelay(__func__, 1000);
1031 qlnx_trigger_dump(ha);
1032
1033 goto qlnx_pci_attach_err0;
1034 }
1035
1036 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1037 qlnx_mdelay(__func__, 1000);
1038 qlnx_trigger_dump(ha);
1039
1040 goto qlnx_pci_attach_err0;
1041 }
1042 } else {
1043 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1044 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1045 }
1046
1047 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1048 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1049 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1050 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1051 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1052 FW_ENGINEERING_VERSION);
1053
1054 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1055 ha->stormfw_ver, ha->mfw_ver);
1056
1057 qlnx_init_ifnet(dev, ha);
1058
1059 /*
1060 * add sysctls
1061 */
1062 qlnx_add_sysctls(ha);
1063
1064 qlnx_pci_attach_err0:
1065 /*
1066 * create ioctl device interface
1067 */
1068 if (qlnx_vf_device(ha) != 0) {
1069 if (qlnx_make_cdev(ha)) {
1070 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1071 goto qlnx_pci_attach_err;
1072 }
1073
1074 #ifdef QLNX_ENABLE_IWARP
1075 qlnx_rdma_dev_add(ha);
1076 #endif /* #ifdef QLNX_ENABLE_IWARP */
1077 }
1078
1079 #ifndef QLNX_VF
1080 #ifdef CONFIG_ECORE_SRIOV
1081
1082 if (qlnx_vf_device(ha) != 0)
1083 qlnx_initialize_sriov(ha);
1084
1085 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1086 #endif /* #ifdef QLNX_VF */
1087
1088 QL_DPRINT2(ha, "success\n");
1089
1090 return (0);
1091
1092 qlnx_pci_attach_err:
1093
1094 qlnx_release(ha);
1095
1096 return (ENXIO);
1097 }
1098
1099 /*
1100 * Name: qlnx_pci_detach
1101 * Function: Unhooks the device from the operating system
1102 */
1103 static int
qlnx_pci_detach(device_t dev)1104 qlnx_pci_detach(device_t dev)
1105 {
1106 qlnx_host_t *ha = NULL;
1107
1108 if ((ha = device_get_softc(dev)) == NULL) {
1109 device_printf(dev, "%s: cannot get softc\n", __func__);
1110 return (ENOMEM);
1111 }
1112
1113 if (qlnx_vf_device(ha) != 0) {
1114 #ifdef CONFIG_ECORE_SRIOV
1115 int ret;
1116
1117 ret = pci_iov_detach(dev);
1118 if (ret) {
1119 device_printf(dev, "%s: SRIOV in use\n", __func__);
1120 return (ret);
1121 }
1122
1123 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1124
1125 #ifdef QLNX_ENABLE_IWARP
1126 if (qlnx_rdma_dev_remove(ha) != 0)
1127 return (EBUSY);
1128 #endif /* #ifdef QLNX_ENABLE_IWARP */
1129 }
1130
1131 QLNX_LOCK(ha);
1132 qlnx_stop(ha);
1133 QLNX_UNLOCK(ha);
1134
1135 qlnx_release(ha);
1136
1137 return (0);
1138 }
1139
1140 #ifdef QLNX_ENABLE_IWARP
1141
1142 static uint8_t
qlnx_get_personality(uint8_t pci_func)1143 qlnx_get_personality(uint8_t pci_func)
1144 {
1145 uint8_t personality;
1146
1147 personality = (qlnxe_rdma_configuration >>
1148 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1149 QLNX_PERSONALIY_MASK;
1150 return (personality);
1151 }
1152
1153 static void
qlnx_set_personality(qlnx_host_t * ha)1154 qlnx_set_personality(qlnx_host_t *ha)
1155 {
1156 uint8_t personality;
1157
1158 personality = qlnx_get_personality(ha->pci_func);
1159
1160 switch (personality) {
1161 case QLNX_PERSONALITY_DEFAULT:
1162 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1163 __func__);
1164 ha->personality = ECORE_PCI_DEFAULT;
1165 break;
1166
1167 case QLNX_PERSONALITY_ETH_ONLY:
1168 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1169 __func__);
1170 ha->personality = ECORE_PCI_ETH;
1171 break;
1172
1173 case QLNX_PERSONALITY_ETH_IWARP:
1174 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1175 __func__);
1176 ha->personality = ECORE_PCI_ETH_IWARP;
1177 break;
1178
1179 case QLNX_PERSONALITY_ETH_ROCE:
1180 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1181 __func__);
1182 ha->personality = ECORE_PCI_ETH_ROCE;
1183 break;
1184 }
1185
1186 return;
1187 }
1188
1189 #endif /* #ifdef QLNX_ENABLE_IWARP */
1190
1191 static int
qlnx_init_hw(qlnx_host_t * ha)1192 qlnx_init_hw(qlnx_host_t *ha)
1193 {
1194 int rval = 0;
1195 struct ecore_hw_prepare_params params;
1196
1197 ha->cdev.ha = ha;
1198 ecore_init_struct(&ha->cdev);
1199
1200 /* ha->dp_module = ECORE_MSG_PROBE |
1201 ECORE_MSG_INTR |
1202 ECORE_MSG_SP |
1203 ECORE_MSG_LINK |
1204 ECORE_MSG_SPQ |
1205 ECORE_MSG_RDMA;
1206 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1207 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1208 ha->dp_level = ECORE_LEVEL_NOTICE;
1209 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1210
1211 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1212
1213 ha->cdev.regview = ha->pci_reg;
1214
1215 ha->personality = ECORE_PCI_DEFAULT;
1216
1217 if (qlnx_vf_device(ha) == 0) {
1218 ha->cdev.b_is_vf = true;
1219
1220 if (ha->pci_dbells != NULL) {
1221 ha->cdev.doorbells = ha->pci_dbells;
1222 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1223 ha->cdev.db_size = ha->dbells_size;
1224 } else {
1225 ha->pci_dbells = ha->pci_reg;
1226 }
1227 } else {
1228 ha->cdev.doorbells = ha->pci_dbells;
1229 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1230 ha->cdev.db_size = ha->dbells_size;
1231
1232 #ifdef QLNX_ENABLE_IWARP
1233
1234 if (qlnx_rdma_supported(ha) == 0)
1235 qlnx_set_personality(ha);
1236
1237 #endif /* #ifdef QLNX_ENABLE_IWARP */
1238 }
1239 QL_DPRINT2(ha, "%s: %s\n", __func__,
1240 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1241
1242 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1243
1244 params.personality = ha->personality;
1245
1246 params.drv_resc_alloc = false;
1247 params.chk_reg_fifo = false;
1248 params.initiate_pf_flr = true;
1249 params.epoch = 0;
1250
1251 ecore_hw_prepare(&ha->cdev, ¶ms);
1252
1253 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1254
1255 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1256 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1257
1258 return (rval);
1259 }
1260
1261 static void
qlnx_release(qlnx_host_t * ha)1262 qlnx_release(qlnx_host_t *ha)
1263 {
1264 device_t dev;
1265 int i;
1266
1267 dev = ha->pci_dev;
1268
1269 QL_DPRINT2(ha, "enter\n");
1270
1271 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1272 if (ha->idle_chk[i] != NULL) {
1273 free(ha->idle_chk[i], M_QLNXBUF);
1274 ha->idle_chk[i] = NULL;
1275 }
1276
1277 if (ha->grcdump[i] != NULL) {
1278 free(ha->grcdump[i], M_QLNXBUF);
1279 ha->grcdump[i] = NULL;
1280 }
1281 }
1282
1283 if (ha->flags.callout_init)
1284 callout_drain(&ha->qlnx_callout);
1285
1286 if (ha->flags.slowpath_start) {
1287 qlnx_slowpath_stop(ha);
1288 }
1289
1290 if (ha->flags.hw_init)
1291 ecore_hw_remove(&ha->cdev);
1292
1293 qlnx_del_cdev(ha);
1294
1295 if (ha->ifp != NULL)
1296 ether_ifdetach(ha->ifp);
1297
1298 qlnx_free_tx_dma_tag(ha);
1299
1300 qlnx_free_rx_dma_tag(ha);
1301
1302 qlnx_free_parent_dma_tag(ha);
1303
1304 if (qlnx_vf_device(ha) != 0) {
1305 qlnx_destroy_error_recovery_taskqueue(ha);
1306 }
1307
1308 for (i = 0; i < ha->num_rss; i++) {
1309 struct qlnx_fastpath *fp = &ha->fp_array[i];
1310
1311 if (ha->irq_vec[i].handle) {
1312 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1313 ha->irq_vec[i].handle);
1314 }
1315
1316 if (ha->irq_vec[i].irq) {
1317 (void)bus_release_resource(dev, SYS_RES_IRQ,
1318 ha->irq_vec[i].irq_rid,
1319 ha->irq_vec[i].irq);
1320 }
1321
1322 qlnx_free_tx_br(ha, fp);
1323 }
1324 qlnx_destroy_fp_taskqueues(ha);
1325
1326 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1327 if (ha->sp_handle[i])
1328 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1329 ha->sp_handle[i]);
1330
1331 if (ha->sp_irq[i])
1332 (void) bus_release_resource(dev, SYS_RES_IRQ,
1333 ha->sp_irq_rid[i], ha->sp_irq[i]);
1334 }
1335
1336 qlnx_destroy_sp_taskqueues(ha);
1337
1338 if (ha->msix_count)
1339 pci_release_msi(dev);
1340
1341 if (ha->flags.lock_init) {
1342 sx_destroy(&ha->hw_lock);
1343 }
1344
1345 if (ha->pci_reg)
1346 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1347 ha->pci_reg);
1348
1349 if (ha->dbells_size && ha->pci_dbells)
1350 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1351 ha->pci_dbells);
1352
1353 if (ha->msix_bar)
1354 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1355 ha->msix_bar);
1356
1357 QL_DPRINT2(ha, "exit\n");
1358 return;
1359 }
1360
1361 static void
qlnx_trigger_dump(qlnx_host_t * ha)1362 qlnx_trigger_dump(qlnx_host_t *ha)
1363 {
1364 int i;
1365
1366 if (ha->ifp != NULL)
1367 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1368
1369 QL_DPRINT2(ha, "enter\n");
1370
1371 if (qlnx_vf_device(ha) == 0)
1372 return;
1373
1374 ha->error_recovery = 1;
1375
1376 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1377 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1378 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1379 }
1380
1381 QL_DPRINT2(ha, "exit\n");
1382
1383 return;
1384 }
1385
1386 static int
qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)1387 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1388 {
1389 int err, ret = 0;
1390 qlnx_host_t *ha;
1391
1392 err = sysctl_handle_int(oidp, &ret, 0, req);
1393
1394 if (err || !req->newptr)
1395 return (err);
1396
1397 if (ret == 1) {
1398 ha = (qlnx_host_t *)arg1;
1399 qlnx_trigger_dump(ha);
1400 }
1401 return (err);
1402 }
1403
1404 static int
qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)1405 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1406 {
1407 int err, i, ret = 0, usecs = 0;
1408 qlnx_host_t *ha;
1409 struct ecore_hwfn *p_hwfn;
1410 struct qlnx_fastpath *fp;
1411
1412 err = sysctl_handle_int(oidp, &usecs, 0, req);
1413
1414 if (err || !req->newptr || !usecs || (usecs > 255))
1415 return (err);
1416
1417 ha = (qlnx_host_t *)arg1;
1418
1419 if (qlnx_vf_device(ha) == 0)
1420 return (-1);
1421
1422 for (i = 0; i < ha->num_rss; i++) {
1423 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1424
1425 fp = &ha->fp_array[i];
1426
1427 if (fp->txq[0]->handle != NULL) {
1428 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1429 (uint16_t)usecs, fp->txq[0]->handle);
1430 }
1431 }
1432
1433 if (!ret)
1434 ha->tx_coalesce_usecs = (uint8_t)usecs;
1435
1436 return (err);
1437 }
1438
1439 static int
qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)1440 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1441 {
1442 int err, i, ret = 0, usecs = 0;
1443 qlnx_host_t *ha;
1444 struct ecore_hwfn *p_hwfn;
1445 struct qlnx_fastpath *fp;
1446
1447 err = sysctl_handle_int(oidp, &usecs, 0, req);
1448
1449 if (err || !req->newptr || !usecs || (usecs > 255))
1450 return (err);
1451
1452 ha = (qlnx_host_t *)arg1;
1453
1454 if (qlnx_vf_device(ha) == 0)
1455 return (-1);
1456
1457 for (i = 0; i < ha->num_rss; i++) {
1458 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1459
1460 fp = &ha->fp_array[i];
1461
1462 if (fp->rxq->handle != NULL) {
1463 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1464 0, fp->rxq->handle);
1465 }
1466 }
1467
1468 if (!ret)
1469 ha->rx_coalesce_usecs = (uint8_t)usecs;
1470
1471 return (err);
1472 }
1473
1474 static void
qlnx_add_sp_stats_sysctls(qlnx_host_t * ha)1475 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1476 {
1477 struct sysctl_ctx_list *ctx;
1478 struct sysctl_oid_list *children;
1479 struct sysctl_oid *ctx_oid;
1480
1481 ctx = device_get_sysctl_ctx(ha->pci_dev);
1482 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1483
1484 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1485 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1486 children = SYSCTL_CHILDREN(ctx_oid);
1487
1488 SYSCTL_ADD_QUAD(ctx, children,
1489 OID_AUTO, "sp_interrupts",
1490 CTLFLAG_RD, &ha->sp_interrupts,
1491 "No. of slowpath interrupts");
1492
1493 return;
1494 }
1495
1496 static void
qlnx_add_fp_stats_sysctls(qlnx_host_t * ha)1497 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1498 {
1499 struct sysctl_ctx_list *ctx;
1500 struct sysctl_oid_list *children;
1501 struct sysctl_oid_list *node_children;
1502 struct sysctl_oid *ctx_oid;
1503 int i, j;
1504 uint8_t name_str[16];
1505
1506 ctx = device_get_sysctl_ctx(ha->pci_dev);
1507 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1508
1509 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1510 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1511 children = SYSCTL_CHILDREN(ctx_oid);
1512
1513 for (i = 0; i < ha->num_rss; i++) {
1514 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1515 snprintf(name_str, sizeof(name_str), "%d", i);
1516
1517 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1518 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1519 node_children = SYSCTL_CHILDREN(ctx_oid);
1520
1521 /* Tx Related */
1522
1523 SYSCTL_ADD_QUAD(ctx, node_children,
1524 OID_AUTO, "tx_pkts_processed",
1525 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1526 "No. of packets processed for transmission");
1527
1528 SYSCTL_ADD_QUAD(ctx, node_children,
1529 OID_AUTO, "tx_pkts_freed",
1530 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1531 "No. of freed packets");
1532
1533 SYSCTL_ADD_QUAD(ctx, node_children,
1534 OID_AUTO, "tx_pkts_transmitted",
1535 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1536 "No. of transmitted packets");
1537
1538 SYSCTL_ADD_QUAD(ctx, node_children,
1539 OID_AUTO, "tx_pkts_completed",
1540 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1541 "No. of transmit completions");
1542
1543 SYSCTL_ADD_QUAD(ctx, node_children,
1544 OID_AUTO, "tx_non_tso_pkts",
1545 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1546 "No. of non LSO transmited packets");
1547
1548 #ifdef QLNX_TRACE_PERF_DATA
1549
1550 SYSCTL_ADD_QUAD(ctx, node_children,
1551 OID_AUTO, "tx_pkts_trans_ctx",
1552 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1553 "No. of transmitted packets in transmit context");
1554
1555 SYSCTL_ADD_QUAD(ctx, node_children,
1556 OID_AUTO, "tx_pkts_compl_ctx",
1557 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1558 "No. of transmit completions in transmit context");
1559
1560 SYSCTL_ADD_QUAD(ctx, node_children,
1561 OID_AUTO, "tx_pkts_trans_fp",
1562 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1563 "No. of transmitted packets in taskqueue");
1564
1565 SYSCTL_ADD_QUAD(ctx, node_children,
1566 OID_AUTO, "tx_pkts_compl_fp",
1567 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1568 "No. of transmit completions in taskqueue");
1569
1570 SYSCTL_ADD_QUAD(ctx, node_children,
1571 OID_AUTO, "tx_pkts_compl_intr",
1572 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1573 "No. of transmit completions in interrupt ctx");
1574 #endif
1575
1576 SYSCTL_ADD_QUAD(ctx, node_children,
1577 OID_AUTO, "tx_tso_pkts",
1578 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1579 "No. of LSO transmited packets");
1580
1581 SYSCTL_ADD_QUAD(ctx, node_children,
1582 OID_AUTO, "tx_lso_wnd_min_len",
1583 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1584 "tx_lso_wnd_min_len");
1585
1586 SYSCTL_ADD_QUAD(ctx, node_children,
1587 OID_AUTO, "tx_defrag",
1588 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1589 "tx_defrag");
1590
1591 SYSCTL_ADD_QUAD(ctx, node_children,
1592 OID_AUTO, "tx_nsegs_gt_elem_left",
1593 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1594 "tx_nsegs_gt_elem_left");
1595
1596 SYSCTL_ADD_UINT(ctx, node_children,
1597 OID_AUTO, "tx_tso_max_nsegs",
1598 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1599 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1600
1601 SYSCTL_ADD_UINT(ctx, node_children,
1602 OID_AUTO, "tx_tso_min_nsegs",
1603 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1604 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1605
1606 SYSCTL_ADD_UINT(ctx, node_children,
1607 OID_AUTO, "tx_tso_max_pkt_len",
1608 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1609 ha->fp_array[i].tx_tso_max_pkt_len,
1610 "tx_tso_max_pkt_len");
1611
1612 SYSCTL_ADD_UINT(ctx, node_children,
1613 OID_AUTO, "tx_tso_min_pkt_len",
1614 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1615 ha->fp_array[i].tx_tso_min_pkt_len,
1616 "tx_tso_min_pkt_len");
1617
1618 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1619 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1620 snprintf(name_str, sizeof(name_str),
1621 "tx_pkts_nseg_%02d", (j+1));
1622
1623 SYSCTL_ADD_QUAD(ctx, node_children,
1624 OID_AUTO, name_str, CTLFLAG_RD,
1625 &ha->fp_array[i].tx_pkts[j], name_str);
1626 }
1627
1628 #ifdef QLNX_TRACE_PERF_DATA
1629 for (j = 0; j < 18; j++) {
1630 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1631 snprintf(name_str, sizeof(name_str),
1632 "tx_pkts_hist_%02d", (j+1));
1633
1634 SYSCTL_ADD_QUAD(ctx, node_children,
1635 OID_AUTO, name_str, CTLFLAG_RD,
1636 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1637 }
1638 for (j = 0; j < 5; j++) {
1639 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1640 snprintf(name_str, sizeof(name_str),
1641 "tx_comInt_%02d", (j+1));
1642
1643 SYSCTL_ADD_QUAD(ctx, node_children,
1644 OID_AUTO, name_str, CTLFLAG_RD,
1645 &ha->fp_array[i].tx_comInt[j], name_str);
1646 }
1647 for (j = 0; j < 18; j++) {
1648 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1649 snprintf(name_str, sizeof(name_str),
1650 "tx_pkts_q_%02d", (j+1));
1651
1652 SYSCTL_ADD_QUAD(ctx, node_children,
1653 OID_AUTO, name_str, CTLFLAG_RD,
1654 &ha->fp_array[i].tx_pkts_q[j], name_str);
1655 }
1656 #endif
1657
1658 SYSCTL_ADD_QUAD(ctx, node_children,
1659 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1660 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1661 "err_tx_nsegs_gt_elem_left");
1662
1663 SYSCTL_ADD_QUAD(ctx, node_children,
1664 OID_AUTO, "err_tx_dmamap_create",
1665 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1666 "err_tx_dmamap_create");
1667
1668 SYSCTL_ADD_QUAD(ctx, node_children,
1669 OID_AUTO, "err_tx_defrag_dmamap_load",
1670 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1671 "err_tx_defrag_dmamap_load");
1672
1673 SYSCTL_ADD_QUAD(ctx, node_children,
1674 OID_AUTO, "err_tx_non_tso_max_seg",
1675 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1676 "err_tx_non_tso_max_seg");
1677
1678 SYSCTL_ADD_QUAD(ctx, node_children,
1679 OID_AUTO, "err_tx_dmamap_load",
1680 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1681 "err_tx_dmamap_load");
1682
1683 SYSCTL_ADD_QUAD(ctx, node_children,
1684 OID_AUTO, "err_tx_defrag",
1685 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1686 "err_tx_defrag");
1687
1688 SYSCTL_ADD_QUAD(ctx, node_children,
1689 OID_AUTO, "err_tx_free_pkt_null",
1690 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1691 "err_tx_free_pkt_null");
1692
1693 SYSCTL_ADD_QUAD(ctx, node_children,
1694 OID_AUTO, "err_tx_cons_idx_conflict",
1695 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1696 "err_tx_cons_idx_conflict");
1697
1698 SYSCTL_ADD_QUAD(ctx, node_children,
1699 OID_AUTO, "lro_cnt_64",
1700 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1701 "lro_cnt_64");
1702
1703 SYSCTL_ADD_QUAD(ctx, node_children,
1704 OID_AUTO, "lro_cnt_128",
1705 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1706 "lro_cnt_128");
1707
1708 SYSCTL_ADD_QUAD(ctx, node_children,
1709 OID_AUTO, "lro_cnt_256",
1710 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1711 "lro_cnt_256");
1712
1713 SYSCTL_ADD_QUAD(ctx, node_children,
1714 OID_AUTO, "lro_cnt_512",
1715 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1716 "lro_cnt_512");
1717
1718 SYSCTL_ADD_QUAD(ctx, node_children,
1719 OID_AUTO, "lro_cnt_1024",
1720 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1721 "lro_cnt_1024");
1722
1723 /* Rx Related */
1724
1725 SYSCTL_ADD_QUAD(ctx, node_children,
1726 OID_AUTO, "rx_pkts",
1727 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1728 "No. of received packets");
1729
1730 SYSCTL_ADD_QUAD(ctx, node_children,
1731 OID_AUTO, "tpa_start",
1732 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1733 "No. of tpa_start packets");
1734
1735 SYSCTL_ADD_QUAD(ctx, node_children,
1736 OID_AUTO, "tpa_cont",
1737 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1738 "No. of tpa_cont packets");
1739
1740 SYSCTL_ADD_QUAD(ctx, node_children,
1741 OID_AUTO, "tpa_end",
1742 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1743 "No. of tpa_end packets");
1744
1745 SYSCTL_ADD_QUAD(ctx, node_children,
1746 OID_AUTO, "err_m_getcl",
1747 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1748 "err_m_getcl");
1749
1750 SYSCTL_ADD_QUAD(ctx, node_children,
1751 OID_AUTO, "err_m_getjcl",
1752 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1753 "err_m_getjcl");
1754
1755 SYSCTL_ADD_QUAD(ctx, node_children,
1756 OID_AUTO, "err_rx_hw_errors",
1757 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1758 "err_rx_hw_errors");
1759
1760 SYSCTL_ADD_QUAD(ctx, node_children,
1761 OID_AUTO, "err_rx_alloc_errors",
1762 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1763 "err_rx_alloc_errors");
1764 }
1765
1766 return;
1767 }
1768
1769 static void
qlnx_add_hw_stats_sysctls(qlnx_host_t * ha)1770 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1771 {
1772 struct sysctl_ctx_list *ctx;
1773 struct sysctl_oid_list *children;
1774 struct sysctl_oid *ctx_oid;
1775
1776 ctx = device_get_sysctl_ctx(ha->pci_dev);
1777 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1778
1779 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1780 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1781 children = SYSCTL_CHILDREN(ctx_oid);
1782
1783 SYSCTL_ADD_QUAD(ctx, children,
1784 OID_AUTO, "no_buff_discards",
1785 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1786 "No. of packets discarded due to lack of buffer");
1787
1788 SYSCTL_ADD_QUAD(ctx, children,
1789 OID_AUTO, "packet_too_big_discard",
1790 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1791 "No. of packets discarded because packet was too big");
1792
1793 SYSCTL_ADD_QUAD(ctx, children,
1794 OID_AUTO, "ttl0_discard",
1795 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1796 "ttl0_discard");
1797
1798 SYSCTL_ADD_QUAD(ctx, children,
1799 OID_AUTO, "rx_ucast_bytes",
1800 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1801 "rx_ucast_bytes");
1802
1803 SYSCTL_ADD_QUAD(ctx, children,
1804 OID_AUTO, "rx_mcast_bytes",
1805 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1806 "rx_mcast_bytes");
1807
1808 SYSCTL_ADD_QUAD(ctx, children,
1809 OID_AUTO, "rx_bcast_bytes",
1810 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1811 "rx_bcast_bytes");
1812
1813 SYSCTL_ADD_QUAD(ctx, children,
1814 OID_AUTO, "rx_ucast_pkts",
1815 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1816 "rx_ucast_pkts");
1817
1818 SYSCTL_ADD_QUAD(ctx, children,
1819 OID_AUTO, "rx_mcast_pkts",
1820 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1821 "rx_mcast_pkts");
1822
1823 SYSCTL_ADD_QUAD(ctx, children,
1824 OID_AUTO, "rx_bcast_pkts",
1825 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1826 "rx_bcast_pkts");
1827
1828 SYSCTL_ADD_QUAD(ctx, children,
1829 OID_AUTO, "mftag_filter_discards",
1830 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1831 "mftag_filter_discards");
1832
1833 SYSCTL_ADD_QUAD(ctx, children,
1834 OID_AUTO, "mac_filter_discards",
1835 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1836 "mac_filter_discards");
1837
1838 SYSCTL_ADD_QUAD(ctx, children,
1839 OID_AUTO, "tx_ucast_bytes",
1840 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1841 "tx_ucast_bytes");
1842
1843 SYSCTL_ADD_QUAD(ctx, children,
1844 OID_AUTO, "tx_mcast_bytes",
1845 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1846 "tx_mcast_bytes");
1847
1848 SYSCTL_ADD_QUAD(ctx, children,
1849 OID_AUTO, "tx_bcast_bytes",
1850 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1851 "tx_bcast_bytes");
1852
1853 SYSCTL_ADD_QUAD(ctx, children,
1854 OID_AUTO, "tx_ucast_pkts",
1855 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1856 "tx_ucast_pkts");
1857
1858 SYSCTL_ADD_QUAD(ctx, children,
1859 OID_AUTO, "tx_mcast_pkts",
1860 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1861 "tx_mcast_pkts");
1862
1863 SYSCTL_ADD_QUAD(ctx, children,
1864 OID_AUTO, "tx_bcast_pkts",
1865 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1866 "tx_bcast_pkts");
1867
1868 SYSCTL_ADD_QUAD(ctx, children,
1869 OID_AUTO, "tx_err_drop_pkts",
1870 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1871 "tx_err_drop_pkts");
1872
1873 SYSCTL_ADD_QUAD(ctx, children,
1874 OID_AUTO, "tpa_coalesced_pkts",
1875 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1876 "tpa_coalesced_pkts");
1877
1878 SYSCTL_ADD_QUAD(ctx, children,
1879 OID_AUTO, "tpa_coalesced_events",
1880 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1881 "tpa_coalesced_events");
1882
1883 SYSCTL_ADD_QUAD(ctx, children,
1884 OID_AUTO, "tpa_aborts_num",
1885 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1886 "tpa_aborts_num");
1887
1888 SYSCTL_ADD_QUAD(ctx, children,
1889 OID_AUTO, "tpa_not_coalesced_pkts",
1890 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1891 "tpa_not_coalesced_pkts");
1892
1893 SYSCTL_ADD_QUAD(ctx, children,
1894 OID_AUTO, "tpa_coalesced_bytes",
1895 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1896 "tpa_coalesced_bytes");
1897
1898 SYSCTL_ADD_QUAD(ctx, children,
1899 OID_AUTO, "rx_64_byte_packets",
1900 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1901 "rx_64_byte_packets");
1902
1903 SYSCTL_ADD_QUAD(ctx, children,
1904 OID_AUTO, "rx_65_to_127_byte_packets",
1905 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1906 "rx_65_to_127_byte_packets");
1907
1908 SYSCTL_ADD_QUAD(ctx, children,
1909 OID_AUTO, "rx_128_to_255_byte_packets",
1910 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1911 "rx_128_to_255_byte_packets");
1912
1913 SYSCTL_ADD_QUAD(ctx, children,
1914 OID_AUTO, "rx_256_to_511_byte_packets",
1915 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1916 "rx_256_to_511_byte_packets");
1917
1918 SYSCTL_ADD_QUAD(ctx, children,
1919 OID_AUTO, "rx_512_to_1023_byte_packets",
1920 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1921 "rx_512_to_1023_byte_packets");
1922
1923 SYSCTL_ADD_QUAD(ctx, children,
1924 OID_AUTO, "rx_1024_to_1518_byte_packets",
1925 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1926 "rx_1024_to_1518_byte_packets");
1927
1928 SYSCTL_ADD_QUAD(ctx, children,
1929 OID_AUTO, "rx_1519_to_1522_byte_packets",
1930 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1931 "rx_1519_to_1522_byte_packets");
1932
1933 SYSCTL_ADD_QUAD(ctx, children,
1934 OID_AUTO, "rx_1523_to_2047_byte_packets",
1935 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1936 "rx_1523_to_2047_byte_packets");
1937
1938 SYSCTL_ADD_QUAD(ctx, children,
1939 OID_AUTO, "rx_2048_to_4095_byte_packets",
1940 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1941 "rx_2048_to_4095_byte_packets");
1942
1943 SYSCTL_ADD_QUAD(ctx, children,
1944 OID_AUTO, "rx_4096_to_9216_byte_packets",
1945 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1946 "rx_4096_to_9216_byte_packets");
1947
1948 SYSCTL_ADD_QUAD(ctx, children,
1949 OID_AUTO, "rx_9217_to_16383_byte_packets",
1950 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1951 "rx_9217_to_16383_byte_packets");
1952
1953 SYSCTL_ADD_QUAD(ctx, children,
1954 OID_AUTO, "rx_crc_errors",
1955 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1956 "rx_crc_errors");
1957
1958 SYSCTL_ADD_QUAD(ctx, children,
1959 OID_AUTO, "rx_mac_crtl_frames",
1960 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1961 "rx_mac_crtl_frames");
1962
1963 SYSCTL_ADD_QUAD(ctx, children,
1964 OID_AUTO, "rx_pause_frames",
1965 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1966 "rx_pause_frames");
1967
1968 SYSCTL_ADD_QUAD(ctx, children,
1969 OID_AUTO, "rx_pfc_frames",
1970 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1971 "rx_pfc_frames");
1972
1973 SYSCTL_ADD_QUAD(ctx, children,
1974 OID_AUTO, "rx_align_errors",
1975 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1976 "rx_align_errors");
1977
1978 SYSCTL_ADD_QUAD(ctx, children,
1979 OID_AUTO, "rx_carrier_errors",
1980 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1981 "rx_carrier_errors");
1982
1983 SYSCTL_ADD_QUAD(ctx, children,
1984 OID_AUTO, "rx_oversize_packets",
1985 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1986 "rx_oversize_packets");
1987
1988 SYSCTL_ADD_QUAD(ctx, children,
1989 OID_AUTO, "rx_jabbers",
1990 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1991 "rx_jabbers");
1992
1993 SYSCTL_ADD_QUAD(ctx, children,
1994 OID_AUTO, "rx_undersize_packets",
1995 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1996 "rx_undersize_packets");
1997
1998 SYSCTL_ADD_QUAD(ctx, children,
1999 OID_AUTO, "rx_fragments",
2000 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2001 "rx_fragments");
2002
2003 SYSCTL_ADD_QUAD(ctx, children,
2004 OID_AUTO, "tx_64_byte_packets",
2005 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2006 "tx_64_byte_packets");
2007
2008 SYSCTL_ADD_QUAD(ctx, children,
2009 OID_AUTO, "tx_65_to_127_byte_packets",
2010 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2011 "tx_65_to_127_byte_packets");
2012
2013 SYSCTL_ADD_QUAD(ctx, children,
2014 OID_AUTO, "tx_128_to_255_byte_packets",
2015 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2016 "tx_128_to_255_byte_packets");
2017
2018 SYSCTL_ADD_QUAD(ctx, children,
2019 OID_AUTO, "tx_256_to_511_byte_packets",
2020 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2021 "tx_256_to_511_byte_packets");
2022
2023 SYSCTL_ADD_QUAD(ctx, children,
2024 OID_AUTO, "tx_512_to_1023_byte_packets",
2025 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2026 "tx_512_to_1023_byte_packets");
2027
2028 SYSCTL_ADD_QUAD(ctx, children,
2029 OID_AUTO, "tx_1024_to_1518_byte_packets",
2030 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2031 "tx_1024_to_1518_byte_packets");
2032
2033 SYSCTL_ADD_QUAD(ctx, children,
2034 OID_AUTO, "tx_1519_to_2047_byte_packets",
2035 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2036 "tx_1519_to_2047_byte_packets");
2037
2038 SYSCTL_ADD_QUAD(ctx, children,
2039 OID_AUTO, "tx_2048_to_4095_byte_packets",
2040 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2041 "tx_2048_to_4095_byte_packets");
2042
2043 SYSCTL_ADD_QUAD(ctx, children,
2044 OID_AUTO, "tx_4096_to_9216_byte_packets",
2045 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2046 "tx_4096_to_9216_byte_packets");
2047
2048 SYSCTL_ADD_QUAD(ctx, children,
2049 OID_AUTO, "tx_9217_to_16383_byte_packets",
2050 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2051 "tx_9217_to_16383_byte_packets");
2052
2053 SYSCTL_ADD_QUAD(ctx, children,
2054 OID_AUTO, "tx_pause_frames",
2055 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2056 "tx_pause_frames");
2057
2058 SYSCTL_ADD_QUAD(ctx, children,
2059 OID_AUTO, "tx_pfc_frames",
2060 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2061 "tx_pfc_frames");
2062
2063 SYSCTL_ADD_QUAD(ctx, children,
2064 OID_AUTO, "tx_lpi_entry_count",
2065 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2066 "tx_lpi_entry_count");
2067
2068 SYSCTL_ADD_QUAD(ctx, children,
2069 OID_AUTO, "tx_total_collisions",
2070 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2071 "tx_total_collisions");
2072
2073 SYSCTL_ADD_QUAD(ctx, children,
2074 OID_AUTO, "brb_truncates",
2075 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2076 "brb_truncates");
2077
2078 SYSCTL_ADD_QUAD(ctx, children,
2079 OID_AUTO, "brb_discards",
2080 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2081 "brb_discards");
2082
2083 SYSCTL_ADD_QUAD(ctx, children,
2084 OID_AUTO, "rx_mac_bytes",
2085 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2086 "rx_mac_bytes");
2087
2088 SYSCTL_ADD_QUAD(ctx, children,
2089 OID_AUTO, "rx_mac_uc_packets",
2090 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2091 "rx_mac_uc_packets");
2092
2093 SYSCTL_ADD_QUAD(ctx, children,
2094 OID_AUTO, "rx_mac_mc_packets",
2095 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2096 "rx_mac_mc_packets");
2097
2098 SYSCTL_ADD_QUAD(ctx, children,
2099 OID_AUTO, "rx_mac_bc_packets",
2100 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2101 "rx_mac_bc_packets");
2102
2103 SYSCTL_ADD_QUAD(ctx, children,
2104 OID_AUTO, "rx_mac_frames_ok",
2105 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2106 "rx_mac_frames_ok");
2107
2108 SYSCTL_ADD_QUAD(ctx, children,
2109 OID_AUTO, "tx_mac_bytes",
2110 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2111 "tx_mac_bytes");
2112
2113 SYSCTL_ADD_QUAD(ctx, children,
2114 OID_AUTO, "tx_mac_uc_packets",
2115 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2116 "tx_mac_uc_packets");
2117
2118 SYSCTL_ADD_QUAD(ctx, children,
2119 OID_AUTO, "tx_mac_mc_packets",
2120 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2121 "tx_mac_mc_packets");
2122
2123 SYSCTL_ADD_QUAD(ctx, children,
2124 OID_AUTO, "tx_mac_bc_packets",
2125 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2126 "tx_mac_bc_packets");
2127
2128 SYSCTL_ADD_QUAD(ctx, children,
2129 OID_AUTO, "tx_mac_ctrl_frames",
2130 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2131 "tx_mac_ctrl_frames");
2132 return;
2133 }
2134
2135 static void
qlnx_add_sysctls(qlnx_host_t * ha)2136 qlnx_add_sysctls(qlnx_host_t *ha)
2137 {
2138 device_t dev = ha->pci_dev;
2139 struct sysctl_ctx_list *ctx;
2140 struct sysctl_oid_list *children;
2141
2142 ctx = device_get_sysctl_ctx(dev);
2143 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2144
2145 qlnx_add_fp_stats_sysctls(ha);
2146 qlnx_add_sp_stats_sysctls(ha);
2147
2148 if (qlnx_vf_device(ha) != 0)
2149 qlnx_add_hw_stats_sysctls(ha);
2150
2151 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2152 CTLFLAG_RD, qlnx_ver_str, 0,
2153 "Driver Version");
2154
2155 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2156 CTLFLAG_RD, ha->stormfw_ver, 0,
2157 "STORM Firmware Version");
2158
2159 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2160 CTLFLAG_RD, ha->mfw_ver, 0,
2161 "Management Firmware Version");
2162
2163 SYSCTL_ADD_UINT(ctx, children,
2164 OID_AUTO, "personality", CTLFLAG_RD,
2165 &ha->personality, ha->personality,
2166 "\tpersonality = 0 => Ethernet Only\n"
2167 "\tpersonality = 3 => Ethernet and RoCE\n"
2168 "\tpersonality = 4 => Ethernet and iWARP\n"
2169 "\tpersonality = 6 => Default in Shared Memory\n");
2170
2171 ha->dbg_level = 0;
2172 SYSCTL_ADD_UINT(ctx, children,
2173 OID_AUTO, "debug", CTLFLAG_RW,
2174 &ha->dbg_level, ha->dbg_level, "Debug Level");
2175
2176 ha->dp_level = 0x01;
2177 SYSCTL_ADD_UINT(ctx, children,
2178 OID_AUTO, "dp_level", CTLFLAG_RW,
2179 &ha->dp_level, ha->dp_level, "DP Level");
2180
2181 ha->dbg_trace_lro_cnt = 0;
2182 SYSCTL_ADD_UINT(ctx, children,
2183 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2184 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2185 "Trace LRO Counts");
2186
2187 ha->dbg_trace_tso_pkt_len = 0;
2188 SYSCTL_ADD_UINT(ctx, children,
2189 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2190 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2191 "Trace TSO packet lengths");
2192
2193 ha->dp_module = 0;
2194 SYSCTL_ADD_UINT(ctx, children,
2195 OID_AUTO, "dp_module", CTLFLAG_RW,
2196 &ha->dp_module, ha->dp_module, "DP Module");
2197
2198 ha->err_inject = 0;
2199
2200 SYSCTL_ADD_UINT(ctx, children,
2201 OID_AUTO, "err_inject", CTLFLAG_RW,
2202 &ha->err_inject, ha->err_inject, "Error Inject");
2203
2204 ha->storm_stats_enable = 0;
2205
2206 SYSCTL_ADD_UINT(ctx, children,
2207 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2208 &ha->storm_stats_enable, ha->storm_stats_enable,
2209 "Enable Storm Statistics Gathering");
2210
2211 ha->storm_stats_index = 0;
2212
2213 SYSCTL_ADD_UINT(ctx, children,
2214 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2215 &ha->storm_stats_index, ha->storm_stats_index,
2216 "Enable Storm Statistics Gathering Current Index");
2217
2218 ha->grcdump_taken = 0;
2219 SYSCTL_ADD_UINT(ctx, children,
2220 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2221 &ha->grcdump_taken, ha->grcdump_taken,
2222 "grcdump_taken");
2223
2224 ha->idle_chk_taken = 0;
2225 SYSCTL_ADD_UINT(ctx, children,
2226 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2227 &ha->idle_chk_taken, ha->idle_chk_taken,
2228 "idle_chk_taken");
2229
2230 SYSCTL_ADD_UINT(ctx, children,
2231 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2232 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2233 "rx_coalesce_usecs");
2234
2235 SYSCTL_ADD_UINT(ctx, children,
2236 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2237 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2238 "tx_coalesce_usecs");
2239
2240 SYSCTL_ADD_PROC(ctx, children,
2241 OID_AUTO, "trigger_dump",
2242 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2243 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2244
2245 SYSCTL_ADD_PROC(ctx, children,
2246 OID_AUTO, "set_rx_coalesce_usecs",
2247 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2248 (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2249 "rx interrupt coalesce period microseconds");
2250
2251 SYSCTL_ADD_PROC(ctx, children,
2252 OID_AUTO, "set_tx_coalesce_usecs",
2253 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2254 (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2255 "tx interrupt coalesce period microseconds");
2256
2257 ha->rx_pkt_threshold = 128;
2258 SYSCTL_ADD_UINT(ctx, children,
2259 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2260 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2261 "No. of Rx Pkts to process at a time");
2262
2263 ha->rx_jumbo_buf_eq_mtu = 0;
2264 SYSCTL_ADD_UINT(ctx, children,
2265 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2266 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2267 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2268 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2269
2270 SYSCTL_ADD_QUAD(ctx, children,
2271 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2272 &ha->err_illegal_intr, "err_illegal_intr");
2273
2274 SYSCTL_ADD_QUAD(ctx, children,
2275 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2276 &ha->err_fp_null, "err_fp_null");
2277
2278 SYSCTL_ADD_QUAD(ctx, children,
2279 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2280 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2281 return;
2282 }
2283
2284 /*****************************************************************************
2285 * Operating System Network Interface Functions
2286 *****************************************************************************/
2287
2288 static void
qlnx_init_ifnet(device_t dev,qlnx_host_t * ha)2289 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2290 {
2291 uint16_t device_id;
2292 if_t ifp;
2293
2294 ifp = ha->ifp = if_alloc(IFT_ETHER);
2295
2296 if (ifp == NULL)
2297 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2298
2299 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2300
2301 device_id = pci_get_device(ha->pci_dev);
2302
2303 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2304 if_setbaudrate(ifp, IF_Gbps(40));
2305 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2306 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2307 if_setbaudrate(ifp, IF_Gbps(25));
2308 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2309 if_setbaudrate(ifp, IF_Gbps(50));
2310 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2311 if_setbaudrate(ifp, IF_Gbps(100));
2312
2313 if_setcapabilities(ifp, IFCAP_LINKSTATE);
2314
2315 if_setinitfn(ifp, qlnx_init);
2316 if_setsoftc(ifp, ha);
2317 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2318 if_setioctlfn(ifp, qlnx_ioctl);
2319 if_settransmitfn(ifp, qlnx_transmit);
2320 if_setqflushfn(ifp, qlnx_qflush);
2321
2322 if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2323 if_setsendqready(ifp);
2324
2325 if_setgetcounterfn(ifp, qlnx_get_counter);
2326
2327 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2328
2329 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2330
2331 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2332 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2333 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2334 uint32_t rnd;
2335
2336 rnd = arc4random();
2337
2338 ha->primary_mac[0] = 0x00;
2339 ha->primary_mac[1] = 0x0e;
2340 ha->primary_mac[2] = 0x1e;
2341 ha->primary_mac[3] = rnd & 0xFF;
2342 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2343 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2344 }
2345
2346 ether_ifattach(ifp, ha->primary_mac);
2347 bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2348
2349 if_setcapabilities(ifp, IFCAP_HWCSUM);
2350 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
2351
2352 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
2353 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
2354 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2355 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
2356 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
2357 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
2358 if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
2359 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
2360
2361 if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE -
2362 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2363 if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2364 if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
2365
2366 if_setcapenable(ifp, if_getcapabilities(ifp));
2367
2368 if_sethwassist(ifp, CSUM_IP);
2369 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
2370 if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
2371 if_sethwassistbits(ifp, CSUM_TSO, 0);
2372
2373 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2374
2375 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2376 qlnx_media_status);
2377
2378 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2379 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2380 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2381 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2382 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2383 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2384 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2385 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2386 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2387 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2388 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2389 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2390 ifmedia_add(&ha->media,
2391 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2392 ifmedia_add(&ha->media,
2393 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2394 ifmedia_add(&ha->media,
2395 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2396 }
2397
2398 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2399 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2400
2401 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2402
2403 QL_DPRINT2(ha, "exit\n");
2404
2405 return;
2406 }
2407
2408 static void
qlnx_init_locked(qlnx_host_t * ha)2409 qlnx_init_locked(qlnx_host_t *ha)
2410 {
2411 if_t ifp = ha->ifp;
2412
2413 QL_DPRINT1(ha, "Driver Initialization start \n");
2414
2415 qlnx_stop(ha);
2416
2417 if (qlnx_load(ha) == 0) {
2418 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2419 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2420
2421 #ifdef QLNX_ENABLE_IWARP
2422 if (qlnx_vf_device(ha) != 0) {
2423 qlnx_rdma_dev_open(ha);
2424 }
2425 #endif /* #ifdef QLNX_ENABLE_IWARP */
2426 }
2427
2428 return;
2429 }
2430
2431 static void
qlnx_init(void * arg)2432 qlnx_init(void *arg)
2433 {
2434 qlnx_host_t *ha;
2435
2436 ha = (qlnx_host_t *)arg;
2437
2438 QL_DPRINT2(ha, "enter\n");
2439
2440 QLNX_LOCK(ha);
2441 qlnx_init_locked(ha);
2442 QLNX_UNLOCK(ha);
2443
2444 QL_DPRINT2(ha, "exit\n");
2445
2446 return;
2447 }
2448
2449 static int
qlnx_config_mcast_mac_addr(qlnx_host_t * ha,uint8_t * mac_addr,uint32_t add_mac)2450 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2451 {
2452 struct ecore_filter_mcast *mcast;
2453 struct ecore_dev *cdev;
2454 int rc;
2455
2456 cdev = &ha->cdev;
2457
2458 mcast = &ha->ecore_mcast;
2459 bzero(mcast, sizeof(struct ecore_filter_mcast));
2460
2461 if (add_mac)
2462 mcast->opcode = ECORE_FILTER_ADD;
2463 else
2464 mcast->opcode = ECORE_FILTER_REMOVE;
2465
2466 mcast->num_mc_addrs = 1;
2467 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2468
2469 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2470
2471 return (rc);
2472 }
2473
2474 static int
qlnx_hw_add_mcast(qlnx_host_t * ha,uint8_t * mta)2475 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2476 {
2477 int i;
2478
2479 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2480 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2481 return 0; /* its been already added */
2482 }
2483
2484 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2485 if ((ha->mcast[i].addr[0] == 0) &&
2486 (ha->mcast[i].addr[1] == 0) &&
2487 (ha->mcast[i].addr[2] == 0) &&
2488 (ha->mcast[i].addr[3] == 0) &&
2489 (ha->mcast[i].addr[4] == 0) &&
2490 (ha->mcast[i].addr[5] == 0)) {
2491 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2492 return (-1);
2493
2494 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2495 ha->nmcast++;
2496
2497 return 0;
2498 }
2499 }
2500 return 0;
2501 }
2502
2503 static int
qlnx_hw_del_mcast(qlnx_host_t * ha,uint8_t * mta)2504 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2505 {
2506 int i;
2507
2508 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2509 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2510 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2511 return (-1);
2512
2513 ha->mcast[i].addr[0] = 0;
2514 ha->mcast[i].addr[1] = 0;
2515 ha->mcast[i].addr[2] = 0;
2516 ha->mcast[i].addr[3] = 0;
2517 ha->mcast[i].addr[4] = 0;
2518 ha->mcast[i].addr[5] = 0;
2519
2520 ha->nmcast--;
2521
2522 return 0;
2523 }
2524 }
2525 return 0;
2526 }
2527
2528 /*
2529 * Name: qls_hw_set_multi
2530 * Function: Sets the Multicast Addresses provided the host O.S into the
2531 * hardware (for the given interface)
2532 */
2533 static void
qlnx_hw_set_multi(qlnx_host_t * ha,uint8_t * mta,uint32_t mcnt,uint32_t add_mac)2534 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2535 uint32_t add_mac)
2536 {
2537 int i;
2538
2539 for (i = 0; i < mcnt; i++) {
2540 if (add_mac) {
2541 if (qlnx_hw_add_mcast(ha, mta))
2542 break;
2543 } else {
2544 if (qlnx_hw_del_mcast(ha, mta))
2545 break;
2546 }
2547
2548 mta += ETHER_HDR_LEN;
2549 }
2550 return;
2551 }
2552
2553 static u_int
qlnx_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int mcnt)2554 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2555 {
2556 uint8_t *mta = arg;
2557
2558 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2559 return (0);
2560
2561 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2562
2563 return (1);
2564 }
2565
2566 static int
qlnx_set_multi(qlnx_host_t * ha,uint32_t add_multi)2567 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2568 {
2569 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2570 if_t ifp = ha->ifp;
2571 u_int mcnt;
2572
2573 if (qlnx_vf_device(ha) == 0)
2574 return (0);
2575
2576 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2577
2578 QLNX_LOCK(ha);
2579 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2580 QLNX_UNLOCK(ha);
2581
2582 return (0);
2583 }
2584
2585 static int
qlnx_set_promisc(qlnx_host_t * ha,int enabled)2586 qlnx_set_promisc(qlnx_host_t *ha, int enabled)
2587 {
2588 int rc = 0;
2589 uint8_t filter;
2590
2591 if (qlnx_vf_device(ha) == 0)
2592 return (0);
2593
2594 filter = ha->filter;
2595 if (enabled) {
2596 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2597 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2598 } else {
2599 filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2600 filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
2601 }
2602
2603 rc = qlnx_set_rx_accept_filter(ha, filter);
2604 return (rc);
2605 }
2606
2607 static int
qlnx_set_allmulti(qlnx_host_t * ha,int enabled)2608 qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
2609 {
2610 int rc = 0;
2611 uint8_t filter;
2612
2613 if (qlnx_vf_device(ha) == 0)
2614 return (0);
2615
2616 filter = ha->filter;
2617 if (enabled) {
2618 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2619 } else {
2620 filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2621 }
2622 rc = qlnx_set_rx_accept_filter(ha, filter);
2623
2624 return (rc);
2625 }
2626
2627 static int
qlnx_ioctl(if_t ifp,u_long cmd,caddr_t data)2628 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
2629 {
2630 int ret = 0, mask;
2631 struct ifreq *ifr = (struct ifreq *)data;
2632 #ifdef INET
2633 struct ifaddr *ifa = (struct ifaddr *)data;
2634 #endif
2635 qlnx_host_t *ha;
2636
2637 ha = (qlnx_host_t *)if_getsoftc(ifp);
2638
2639 switch (cmd) {
2640 case SIOCSIFADDR:
2641 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2642
2643 #ifdef INET
2644 if (ifa->ifa_addr->sa_family == AF_INET) {
2645 if_setflagbits(ifp, IFF_UP, 0);
2646 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2647 QLNX_LOCK(ha);
2648 qlnx_init_locked(ha);
2649 QLNX_UNLOCK(ha);
2650 }
2651 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2652 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2653
2654 arp_ifinit(ifp, ifa);
2655 break;
2656 }
2657 #endif
2658 ether_ioctl(ifp, cmd, data);
2659 break;
2660
2661 case SIOCSIFMTU:
2662 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2663
2664 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2665 ret = EINVAL;
2666 } else {
2667 QLNX_LOCK(ha);
2668 if_setmtu(ifp, ifr->ifr_mtu);
2669 ha->max_frame_size =
2670 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2671 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2672 qlnx_init_locked(ha);
2673 }
2674
2675 QLNX_UNLOCK(ha);
2676 }
2677
2678 break;
2679
2680 case SIOCSIFFLAGS:
2681 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2682
2683 QLNX_LOCK(ha);
2684
2685 if (if_getflags(ifp) & IFF_UP) {
2686 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2687 if ((if_getflags(ifp) ^ ha->if_flags) &
2688 IFF_PROMISC) {
2689 ret = qlnx_set_promisc(ha, ifp->if_flags & IFF_PROMISC);
2690 } else if ((if_getflags(ifp) ^ ha->if_flags) &
2691 IFF_ALLMULTI) {
2692 ret = qlnx_set_allmulti(ha, ifp->if_flags & IFF_ALLMULTI);
2693 }
2694 } else {
2695 ha->max_frame_size = if_getmtu(ifp) +
2696 ETHER_HDR_LEN + ETHER_CRC_LEN;
2697 qlnx_init_locked(ha);
2698 }
2699 } else {
2700 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2701 qlnx_stop(ha);
2702 }
2703
2704 ha->if_flags = if_getflags(ifp);
2705 QLNX_UNLOCK(ha);
2706 break;
2707
2708 case SIOCADDMULTI:
2709 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2710
2711 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2712 if (qlnx_set_multi(ha, 1))
2713 ret = EINVAL;
2714 }
2715 break;
2716
2717 case SIOCDELMULTI:
2718 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2719
2720 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2721 if (qlnx_set_multi(ha, 0))
2722 ret = EINVAL;
2723 }
2724 break;
2725
2726 case SIOCSIFMEDIA:
2727 case SIOCGIFMEDIA:
2728 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2729
2730 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2731 break;
2732
2733 case SIOCSIFCAP:
2734
2735 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2736
2737 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2738
2739 if (mask & IFCAP_HWCSUM)
2740 if_togglecapenable(ifp, IFCAP_HWCSUM);
2741 if (mask & IFCAP_TSO4)
2742 if_togglecapenable(ifp, IFCAP_TSO4);
2743 if (mask & IFCAP_TSO6)
2744 if_togglecapenable(ifp, IFCAP_TSO6);
2745 if (mask & IFCAP_VLAN_HWTAGGING)
2746 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2747 if (mask & IFCAP_VLAN_HWTSO)
2748 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2749 if (mask & IFCAP_LRO)
2750 if_togglecapenable(ifp, IFCAP_LRO);
2751
2752 QLNX_LOCK(ha);
2753
2754 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2755 qlnx_init_locked(ha);
2756
2757 QLNX_UNLOCK(ha);
2758
2759 VLAN_CAPABILITIES(ifp);
2760 break;
2761
2762 case SIOCGI2C:
2763 {
2764 struct ifi2creq i2c;
2765 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2766 struct ecore_ptt *p_ptt;
2767
2768 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2769
2770 if (ret)
2771 break;
2772
2773 if ((i2c.len > sizeof (i2c.data)) ||
2774 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2775 ret = EINVAL;
2776 break;
2777 }
2778
2779 p_ptt = ecore_ptt_acquire(p_hwfn);
2780
2781 if (!p_ptt) {
2782 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2783 ret = -1;
2784 break;
2785 }
2786
2787 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2788 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2789 i2c.len, &i2c.data[0]);
2790
2791 ecore_ptt_release(p_hwfn, p_ptt);
2792
2793 if (ret) {
2794 ret = -1;
2795 break;
2796 }
2797
2798 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2799
2800 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2801 len = %d addr = 0x%02x offset = 0x%04x \
2802 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2803 0x%02x 0x%02x 0x%02x\n",
2804 ret, i2c.len, i2c.dev_addr, i2c.offset,
2805 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2806 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2807 break;
2808 }
2809
2810 default:
2811 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2812 ret = ether_ioctl(ifp, cmd, data);
2813 break;
2814 }
2815
2816 return (ret);
2817 }
2818
2819 static int
qlnx_media_change(if_t ifp)2820 qlnx_media_change(if_t ifp)
2821 {
2822 qlnx_host_t *ha;
2823 struct ifmedia *ifm;
2824 int ret = 0;
2825
2826 ha = (qlnx_host_t *)if_getsoftc(ifp);
2827
2828 QL_DPRINT2(ha, "enter\n");
2829
2830 ifm = &ha->media;
2831
2832 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2833 ret = EINVAL;
2834
2835 QL_DPRINT2(ha, "exit\n");
2836
2837 return (ret);
2838 }
2839
2840 static void
qlnx_media_status(if_t ifp,struct ifmediareq * ifmr)2841 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
2842 {
2843 qlnx_host_t *ha;
2844
2845 ha = (qlnx_host_t *)if_getsoftc(ifp);
2846
2847 QL_DPRINT2(ha, "enter\n");
2848
2849 ifmr->ifm_status = IFM_AVALID;
2850 ifmr->ifm_active = IFM_ETHER;
2851
2852 if (ha->link_up) {
2853 ifmr->ifm_status |= IFM_ACTIVE;
2854 ifmr->ifm_active |=
2855 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2856
2857 if (ha->if_link.link_partner_caps &
2858 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2859 ifmr->ifm_active |=
2860 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2861 }
2862
2863 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2864
2865 return;
2866 }
2867
2868 static void
qlnx_free_tx_pkt(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2869 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2870 struct qlnx_tx_queue *txq)
2871 {
2872 u16 idx;
2873 struct mbuf *mp;
2874 bus_dmamap_t map;
2875 int i;
2876 // struct eth_tx_bd *tx_data_bd;
2877 struct eth_tx_1st_bd *first_bd;
2878 int nbds = 0;
2879
2880 idx = txq->sw_tx_cons;
2881 mp = txq->sw_tx_ring[idx].mp;
2882 map = txq->sw_tx_ring[idx].map;
2883
2884 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2885 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2886
2887 QL_DPRINT1(ha, "(mp == NULL) "
2888 " tx_idx = 0x%x"
2889 " ecore_prod_idx = 0x%x"
2890 " ecore_cons_idx = 0x%x"
2891 " hw_bd_cons = 0x%x"
2892 " txq_db_last = 0x%x"
2893 " elem_left = 0x%x\n",
2894 fp->rss_id,
2895 ecore_chain_get_prod_idx(&txq->tx_pbl),
2896 ecore_chain_get_cons_idx(&txq->tx_pbl),
2897 le16toh(*txq->hw_cons_ptr),
2898 txq->tx_db.raw,
2899 ecore_chain_get_elem_left(&txq->tx_pbl));
2900
2901 fp->err_tx_free_pkt_null++;
2902
2903 //DEBUG
2904 qlnx_trigger_dump(ha);
2905
2906 return;
2907 } else {
2908 QLNX_INC_OPACKETS((ha->ifp));
2909 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2910
2911 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2912 bus_dmamap_unload(ha->tx_tag, map);
2913
2914 fp->tx_pkts_freed++;
2915 fp->tx_pkts_completed++;
2916
2917 m_freem(mp);
2918 }
2919
2920 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2921 nbds = first_bd->data.nbds;
2922
2923 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2924
2925 for (i = 1; i < nbds; i++) {
2926 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2927 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2928 }
2929 txq->sw_tx_ring[idx].flags = 0;
2930 txq->sw_tx_ring[idx].mp = NULL;
2931 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2932
2933 return;
2934 }
2935
2936 static void
qlnx_tx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2937 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2938 struct qlnx_tx_queue *txq)
2939 {
2940 u16 hw_bd_cons;
2941 u16 ecore_cons_idx;
2942 uint16_t diff;
2943 uint16_t idx, idx2;
2944
2945 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2946
2947 while (hw_bd_cons !=
2948 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2949 diff = hw_bd_cons - ecore_cons_idx;
2950 if ((diff > TX_RING_SIZE) ||
2951 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2952 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2953
2954 QL_DPRINT1(ha, "(diff = 0x%x) "
2955 " tx_idx = 0x%x"
2956 " ecore_prod_idx = 0x%x"
2957 " ecore_cons_idx = 0x%x"
2958 " hw_bd_cons = 0x%x"
2959 " txq_db_last = 0x%x"
2960 " elem_left = 0x%x\n",
2961 diff,
2962 fp->rss_id,
2963 ecore_chain_get_prod_idx(&txq->tx_pbl),
2964 ecore_chain_get_cons_idx(&txq->tx_pbl),
2965 le16toh(*txq->hw_cons_ptr),
2966 txq->tx_db.raw,
2967 ecore_chain_get_elem_left(&txq->tx_pbl));
2968
2969 fp->err_tx_cons_idx_conflict++;
2970
2971 //DEBUG
2972 qlnx_trigger_dump(ha);
2973 }
2974
2975 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2976 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2977 prefetch(txq->sw_tx_ring[idx].mp);
2978 prefetch(txq->sw_tx_ring[idx2].mp);
2979
2980 qlnx_free_tx_pkt(ha, fp, txq);
2981
2982 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2983 }
2984 return;
2985 }
2986
2987 static int
qlnx_transmit_locked(if_t ifp,struct qlnx_fastpath * fp,struct mbuf * mp)2988 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
2989 {
2990 int ret = 0;
2991 struct qlnx_tx_queue *txq;
2992 qlnx_host_t * ha;
2993 uint16_t elem_left;
2994
2995 txq = fp->txq[0];
2996 ha = (qlnx_host_t *)fp->edev;
2997
2998 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2999 if(mp != NULL)
3000 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3001 return (ret);
3002 }
3003
3004 if(mp != NULL)
3005 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3006
3007 mp = drbr_peek(ifp, fp->tx_br);
3008
3009 while (mp != NULL) {
3010 if (qlnx_send(ha, fp, &mp)) {
3011 if (mp != NULL) {
3012 drbr_putback(ifp, fp->tx_br, mp);
3013 } else {
3014 fp->tx_pkts_processed++;
3015 drbr_advance(ifp, fp->tx_br);
3016 }
3017 goto qlnx_transmit_locked_exit;
3018
3019 } else {
3020 drbr_advance(ifp, fp->tx_br);
3021 fp->tx_pkts_transmitted++;
3022 fp->tx_pkts_processed++;
3023 }
3024
3025 mp = drbr_peek(ifp, fp->tx_br);
3026 }
3027
3028 qlnx_transmit_locked_exit:
3029 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3030 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3031 < QLNX_TX_ELEM_MAX_THRESH))
3032 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3033
3034 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3035 return ret;
3036 }
3037
3038 static int
qlnx_transmit(if_t ifp,struct mbuf * mp)3039 qlnx_transmit(if_t ifp, struct mbuf *mp)
3040 {
3041 qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp);
3042 struct qlnx_fastpath *fp;
3043 int rss_id = 0, ret = 0;
3044
3045 #ifdef QLNX_TRACEPERF_DATA
3046 uint64_t tx_pkts = 0, tx_compl = 0;
3047 #endif
3048
3049 QL_DPRINT2(ha, "enter\n");
3050
3051 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3052 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3053 ha->num_rss;
3054
3055 fp = &ha->fp_array[rss_id];
3056
3057 if (fp->tx_br == NULL) {
3058 ret = EINVAL;
3059 goto qlnx_transmit_exit;
3060 }
3061
3062 if (mtx_trylock(&fp->tx_mtx)) {
3063 #ifdef QLNX_TRACEPERF_DATA
3064 tx_pkts = fp->tx_pkts_transmitted;
3065 tx_compl = fp->tx_pkts_completed;
3066 #endif
3067
3068 ret = qlnx_transmit_locked(ifp, fp, mp);
3069
3070 #ifdef QLNX_TRACEPERF_DATA
3071 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3072 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3073 #endif
3074 mtx_unlock(&fp->tx_mtx);
3075 } else {
3076 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3077 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3078 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3079 }
3080 }
3081
3082 qlnx_transmit_exit:
3083
3084 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3085 return ret;
3086 }
3087
3088 static void
qlnx_qflush(if_t ifp)3089 qlnx_qflush(if_t ifp)
3090 {
3091 int rss_id;
3092 struct qlnx_fastpath *fp;
3093 struct mbuf *mp;
3094 qlnx_host_t *ha;
3095
3096 ha = (qlnx_host_t *)if_getsoftc(ifp);
3097
3098 QL_DPRINT2(ha, "enter\n");
3099
3100 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3101 fp = &ha->fp_array[rss_id];
3102
3103 if (fp == NULL)
3104 continue;
3105
3106 if (fp->tx_br) {
3107 mtx_lock(&fp->tx_mtx);
3108
3109 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3110 fp->tx_pkts_freed++;
3111 m_freem(mp);
3112 }
3113 mtx_unlock(&fp->tx_mtx);
3114 }
3115 }
3116 QL_DPRINT2(ha, "exit\n");
3117
3118 return;
3119 }
3120
3121 static void
qlnx_txq_doorbell_wr32(qlnx_host_t * ha,void * reg_addr,uint32_t value)3122 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3123 {
3124 uint32_t offset;
3125
3126 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3127
3128 bus_write_4(ha->pci_dbells, offset, value);
3129 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3130 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3131
3132 return;
3133 }
3134
3135 static uint32_t
qlnx_tcp_offset(qlnx_host_t * ha,struct mbuf * mp)3136 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3137 {
3138 struct ether_vlan_header *eh = NULL;
3139 struct ip *ip = NULL;
3140 struct ip6_hdr *ip6 = NULL;
3141 struct tcphdr *th = NULL;
3142 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3143 uint16_t etype = 0;
3144 uint8_t buf[sizeof(struct ip6_hdr)];
3145
3146 eh = mtod(mp, struct ether_vlan_header *);
3147
3148 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3149 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3150 etype = ntohs(eh->evl_proto);
3151 } else {
3152 ehdrlen = ETHER_HDR_LEN;
3153 etype = ntohs(eh->evl_encap_proto);
3154 }
3155
3156 switch (etype) {
3157 case ETHERTYPE_IP:
3158 ip = (struct ip *)(mp->m_data + ehdrlen);
3159
3160 ip_hlen = sizeof (struct ip);
3161
3162 if (mp->m_len < (ehdrlen + ip_hlen)) {
3163 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3164 ip = (struct ip *)buf;
3165 }
3166
3167 th = (struct tcphdr *)(ip + 1);
3168 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3169 break;
3170
3171 case ETHERTYPE_IPV6:
3172 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3173
3174 ip_hlen = sizeof(struct ip6_hdr);
3175
3176 if (mp->m_len < (ehdrlen + ip_hlen)) {
3177 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3178 buf);
3179 ip6 = (struct ip6_hdr *)buf;
3180 }
3181 th = (struct tcphdr *)(ip6 + 1);
3182 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3183 break;
3184
3185 default:
3186 break;
3187 }
3188
3189 return (offset);
3190 }
3191
3192 static __inline int
qlnx_tso_check(struct qlnx_fastpath * fp,bus_dma_segment_t * segs,int nsegs,uint32_t offset)3193 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3194 uint32_t offset)
3195 {
3196 int i;
3197 uint32_t sum, nbds_in_hdr = 1;
3198 uint32_t window;
3199 bus_dma_segment_t *s_seg;
3200
3201 /* If the header spans multiple segments, skip those segments */
3202
3203 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3204 return (0);
3205
3206 i = 0;
3207
3208 while ((i < nsegs) && (offset >= segs->ds_len)) {
3209 offset = offset - segs->ds_len;
3210 segs++;
3211 i++;
3212 nbds_in_hdr++;
3213 }
3214
3215 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3216
3217 nsegs = nsegs - i;
3218
3219 while (nsegs >= window) {
3220 sum = 0;
3221 s_seg = segs;
3222
3223 for (i = 0; i < window; i++){
3224 sum += s_seg->ds_len;
3225 s_seg++;
3226 }
3227
3228 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3229 fp->tx_lso_wnd_min_len++;
3230 return (-1);
3231 }
3232
3233 nsegs = nsegs - 1;
3234 segs++;
3235 }
3236
3237 return (0);
3238 }
3239
3240 static int
qlnx_send(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf ** m_headp)3241 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3242 {
3243 bus_dma_segment_t *segs;
3244 bus_dmamap_t map = 0;
3245 uint32_t nsegs = 0;
3246 int ret = -1;
3247 struct mbuf *m_head = *m_headp;
3248 uint16_t idx = 0;
3249 uint16_t elem_left;
3250
3251 uint8_t nbd = 0;
3252 struct qlnx_tx_queue *txq;
3253
3254 struct eth_tx_1st_bd *first_bd;
3255 struct eth_tx_2nd_bd *second_bd;
3256 struct eth_tx_3rd_bd *third_bd;
3257 struct eth_tx_bd *tx_data_bd;
3258
3259 int seg_idx = 0;
3260 uint32_t nbds_in_hdr = 0;
3261 uint32_t offset = 0;
3262
3263 #ifdef QLNX_TRACE_PERF_DATA
3264 uint16_t bd_used;
3265 #endif
3266
3267 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3268
3269 if (!ha->link_up)
3270 return (-1);
3271
3272 first_bd = NULL;
3273 second_bd = NULL;
3274 third_bd = NULL;
3275 tx_data_bd = NULL;
3276
3277 txq = fp->txq[0];
3278
3279 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3280 QLNX_TX_ELEM_MIN_THRESH) {
3281 fp->tx_nsegs_gt_elem_left++;
3282 fp->err_tx_nsegs_gt_elem_left++;
3283
3284 return (ENOBUFS);
3285 }
3286
3287 idx = txq->sw_tx_prod;
3288
3289 map = txq->sw_tx_ring[idx].map;
3290 segs = txq->segs;
3291
3292 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3293 BUS_DMA_NOWAIT);
3294
3295 if (ha->dbg_trace_tso_pkt_len) {
3296 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3297 if (!fp->tx_tso_min_pkt_len) {
3298 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3299 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3300 } else {
3301 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3302 fp->tx_tso_min_pkt_len =
3303 m_head->m_pkthdr.len;
3304 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3305 fp->tx_tso_max_pkt_len =
3306 m_head->m_pkthdr.len;
3307 }
3308 }
3309 }
3310
3311 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3312 offset = qlnx_tcp_offset(ha, m_head);
3313
3314 if ((ret == EFBIG) ||
3315 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3316 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3317 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3318 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3319 struct mbuf *m;
3320
3321 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3322
3323 fp->tx_defrag++;
3324
3325 m = m_defrag(m_head, M_NOWAIT);
3326 if (m == NULL) {
3327 fp->err_tx_defrag++;
3328 fp->tx_pkts_freed++;
3329 m_freem(m_head);
3330 *m_headp = NULL;
3331 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3332 return (ENOBUFS);
3333 }
3334
3335 m_head = m;
3336 *m_headp = m_head;
3337
3338 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3339 segs, &nsegs, BUS_DMA_NOWAIT))) {
3340 fp->err_tx_defrag_dmamap_load++;
3341
3342 QL_DPRINT1(ha,
3343 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3344 ret, m_head->m_pkthdr.len);
3345
3346 fp->tx_pkts_freed++;
3347 m_freem(m_head);
3348 *m_headp = NULL;
3349
3350 return (ret);
3351 }
3352
3353 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3354 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3355 fp->err_tx_non_tso_max_seg++;
3356
3357 QL_DPRINT1(ha,
3358 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3359 ret, nsegs, m_head->m_pkthdr.len);
3360
3361 fp->tx_pkts_freed++;
3362 m_freem(m_head);
3363 *m_headp = NULL;
3364
3365 return (ret);
3366 }
3367 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3368 offset = qlnx_tcp_offset(ha, m_head);
3369
3370 } else if (ret) {
3371 fp->err_tx_dmamap_load++;
3372
3373 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3374 ret, m_head->m_pkthdr.len);
3375 fp->tx_pkts_freed++;
3376 m_freem(m_head);
3377 *m_headp = NULL;
3378 return (ret);
3379 }
3380
3381 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3382
3383 if (ha->dbg_trace_tso_pkt_len) {
3384 if (nsegs < QLNX_FP_MAX_SEGS)
3385 fp->tx_pkts[(nsegs - 1)]++;
3386 else
3387 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3388 }
3389
3390 #ifdef QLNX_TRACE_PERF_DATA
3391 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3392 if(m_head->m_pkthdr.len <= 2048)
3393 fp->tx_pkts_hist[0]++;
3394 else if((m_head->m_pkthdr.len > 2048) &&
3395 (m_head->m_pkthdr.len <= 4096))
3396 fp->tx_pkts_hist[1]++;
3397 else if((m_head->m_pkthdr.len > 4096) &&
3398 (m_head->m_pkthdr.len <= 8192))
3399 fp->tx_pkts_hist[2]++;
3400 else if((m_head->m_pkthdr.len > 8192) &&
3401 (m_head->m_pkthdr.len <= 12288 ))
3402 fp->tx_pkts_hist[3]++;
3403 else if((m_head->m_pkthdr.len > 11288) &&
3404 (m_head->m_pkthdr.len <= 16394))
3405 fp->tx_pkts_hist[4]++;
3406 else if((m_head->m_pkthdr.len > 16384) &&
3407 (m_head->m_pkthdr.len <= 20480))
3408 fp->tx_pkts_hist[5]++;
3409 else if((m_head->m_pkthdr.len > 20480) &&
3410 (m_head->m_pkthdr.len <= 24576))
3411 fp->tx_pkts_hist[6]++;
3412 else if((m_head->m_pkthdr.len > 24576) &&
3413 (m_head->m_pkthdr.len <= 28672))
3414 fp->tx_pkts_hist[7]++;
3415 else if((m_head->m_pkthdr.len > 28762) &&
3416 (m_head->m_pkthdr.len <= 32768))
3417 fp->tx_pkts_hist[8]++;
3418 else if((m_head->m_pkthdr.len > 32768) &&
3419 (m_head->m_pkthdr.len <= 36864))
3420 fp->tx_pkts_hist[9]++;
3421 else if((m_head->m_pkthdr.len > 36864) &&
3422 (m_head->m_pkthdr.len <= 40960))
3423 fp->tx_pkts_hist[10]++;
3424 else if((m_head->m_pkthdr.len > 40960) &&
3425 (m_head->m_pkthdr.len <= 45056))
3426 fp->tx_pkts_hist[11]++;
3427 else if((m_head->m_pkthdr.len > 45056) &&
3428 (m_head->m_pkthdr.len <= 49152))
3429 fp->tx_pkts_hist[12]++;
3430 else if((m_head->m_pkthdr.len > 49512) &&
3431 m_head->m_pkthdr.len <= 53248))
3432 fp->tx_pkts_hist[13]++;
3433 else if((m_head->m_pkthdr.len > 53248) &&
3434 (m_head->m_pkthdr.len <= 57344))
3435 fp->tx_pkts_hist[14]++;
3436 else if((m_head->m_pkthdr.len > 53248) &&
3437 (m_head->m_pkthdr.len <= 57344))
3438 fp->tx_pkts_hist[15]++;
3439 else if((m_head->m_pkthdr.len > 57344) &&
3440 (m_head->m_pkthdr.len <= 61440))
3441 fp->tx_pkts_hist[16]++;
3442 else
3443 fp->tx_pkts_hist[17]++;
3444 }
3445
3446 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3447 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3448 bd_used = TX_RING_SIZE - elem_left;
3449
3450 if(bd_used <= 100)
3451 fp->tx_pkts_q[0]++;
3452 else if((bd_used > 100) && (bd_used <= 500))
3453 fp->tx_pkts_q[1]++;
3454 else if((bd_used > 500) && (bd_used <= 1000))
3455 fp->tx_pkts_q[2]++;
3456 else if((bd_used > 1000) && (bd_used <= 2000))
3457 fp->tx_pkts_q[3]++;
3458 else if((bd_used > 3000) && (bd_used <= 4000))
3459 fp->tx_pkts_q[4]++;
3460 else if((bd_used > 4000) && (bd_used <= 5000))
3461 fp->tx_pkts_q[5]++;
3462 else if((bd_used > 6000) && (bd_used <= 7000))
3463 fp->tx_pkts_q[6]++;
3464 else if((bd_used > 7000) && (bd_used <= 8000))
3465 fp->tx_pkts_q[7]++;
3466 else if((bd_used > 8000) && (bd_used <= 9000))
3467 fp->tx_pkts_q[8]++;
3468 else if((bd_used > 9000) && (bd_used <= 10000))
3469 fp->tx_pkts_q[9]++;
3470 else if((bd_used > 10000) && (bd_used <= 11000))
3471 fp->tx_pkts_q[10]++;
3472 else if((bd_used > 11000) && (bd_used <= 12000))
3473 fp->tx_pkts_q[11]++;
3474 else if((bd_used > 12000) && (bd_used <= 13000))
3475 fp->tx_pkts_q[12]++;
3476 else if((bd_used > 13000) && (bd_used <= 14000))
3477 fp->tx_pkts_q[13]++;
3478 else if((bd_used > 14000) && (bd_used <= 15000))
3479 fp->tx_pkts_q[14]++;
3480 else if((bd_used > 15000) && (bd_used <= 16000))
3481 fp->tx_pkts_q[15]++;
3482 else
3483 fp->tx_pkts_q[16]++;
3484 }
3485
3486 #endif /* end of QLNX_TRACE_PERF_DATA */
3487
3488 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3489 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3490 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3491 " in chain[%d] trying to free packets\n",
3492 nsegs, elem_left, fp->rss_id);
3493
3494 fp->tx_nsegs_gt_elem_left++;
3495
3496 (void)qlnx_tx_int(ha, fp, txq);
3497
3498 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3499 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3500 QL_DPRINT1(ha,
3501 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3502 nsegs, elem_left, fp->rss_id);
3503
3504 fp->err_tx_nsegs_gt_elem_left++;
3505 fp->tx_ring_full = 1;
3506 if (ha->storm_stats_enable)
3507 ha->storm_stats_gather = 1;
3508 return (ENOBUFS);
3509 }
3510 }
3511
3512 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3513
3514 txq->sw_tx_ring[idx].mp = m_head;
3515
3516 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3517
3518 memset(first_bd, 0, sizeof(*first_bd));
3519
3520 first_bd->data.bd_flags.bitfields =
3521 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3522
3523 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3524
3525 nbd++;
3526
3527 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3528 first_bd->data.bd_flags.bitfields |=
3529 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3530 }
3531
3532 if (m_head->m_pkthdr.csum_flags &
3533 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3534 first_bd->data.bd_flags.bitfields |=
3535 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3536 }
3537
3538 if (m_head->m_flags & M_VLANTAG) {
3539 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3540 first_bd->data.bd_flags.bitfields |=
3541 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3542 }
3543
3544 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3545 first_bd->data.bd_flags.bitfields |=
3546 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3547 first_bd->data.bd_flags.bitfields |=
3548 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3549
3550 nbds_in_hdr = 1;
3551
3552 if (offset == segs->ds_len) {
3553 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3554 segs++;
3555 seg_idx++;
3556
3557 second_bd = (struct eth_tx_2nd_bd *)
3558 ecore_chain_produce(&txq->tx_pbl);
3559 memset(second_bd, 0, sizeof(*second_bd));
3560 nbd++;
3561
3562 if (seg_idx < nsegs) {
3563 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3564 (segs->ds_addr), (segs->ds_len));
3565 segs++;
3566 seg_idx++;
3567 }
3568
3569 third_bd = (struct eth_tx_3rd_bd *)
3570 ecore_chain_produce(&txq->tx_pbl);
3571 memset(third_bd, 0, sizeof(*third_bd));
3572 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3573 third_bd->data.bitfields |=
3574 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3575 nbd++;
3576
3577 if (seg_idx < nsegs) {
3578 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3579 (segs->ds_addr), (segs->ds_len));
3580 segs++;
3581 seg_idx++;
3582 }
3583
3584 for (; seg_idx < nsegs; seg_idx++) {
3585 tx_data_bd = (struct eth_tx_bd *)
3586 ecore_chain_produce(&txq->tx_pbl);
3587 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3588 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3589 segs->ds_addr,\
3590 segs->ds_len);
3591 segs++;
3592 nbd++;
3593 }
3594
3595 } else if (offset < segs->ds_len) {
3596 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3597
3598 second_bd = (struct eth_tx_2nd_bd *)
3599 ecore_chain_produce(&txq->tx_pbl);
3600 memset(second_bd, 0, sizeof(*second_bd));
3601 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3602 (segs->ds_addr + offset),\
3603 (segs->ds_len - offset));
3604 nbd++;
3605 segs++;
3606
3607 third_bd = (struct eth_tx_3rd_bd *)
3608 ecore_chain_produce(&txq->tx_pbl);
3609 memset(third_bd, 0, sizeof(*third_bd));
3610
3611 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3612 segs->ds_addr,\
3613 segs->ds_len);
3614 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3615 third_bd->data.bitfields |=
3616 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3617 segs++;
3618 nbd++;
3619
3620 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3621 tx_data_bd = (struct eth_tx_bd *)
3622 ecore_chain_produce(&txq->tx_pbl);
3623 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3624 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3625 segs->ds_addr,\
3626 segs->ds_len);
3627 segs++;
3628 nbd++;
3629 }
3630
3631 } else {
3632 offset = offset - segs->ds_len;
3633 segs++;
3634
3635 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3636 if (offset)
3637 nbds_in_hdr++;
3638
3639 tx_data_bd = (struct eth_tx_bd *)
3640 ecore_chain_produce(&txq->tx_pbl);
3641 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3642
3643 if (second_bd == NULL) {
3644 second_bd = (struct eth_tx_2nd_bd *)
3645 tx_data_bd;
3646 } else if (third_bd == NULL) {
3647 third_bd = (struct eth_tx_3rd_bd *)
3648 tx_data_bd;
3649 }
3650
3651 if (offset && (offset < segs->ds_len)) {
3652 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3653 segs->ds_addr, offset);
3654
3655 tx_data_bd = (struct eth_tx_bd *)
3656 ecore_chain_produce(&txq->tx_pbl);
3657
3658 memset(tx_data_bd, 0,
3659 sizeof(*tx_data_bd));
3660
3661 if (second_bd == NULL) {
3662 second_bd =
3663 (struct eth_tx_2nd_bd *)tx_data_bd;
3664 } else if (third_bd == NULL) {
3665 third_bd =
3666 (struct eth_tx_3rd_bd *)tx_data_bd;
3667 }
3668 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3669 (segs->ds_addr + offset), \
3670 (segs->ds_len - offset));
3671 nbd++;
3672 offset = 0;
3673 } else {
3674 if (offset)
3675 offset = offset - segs->ds_len;
3676 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3677 segs->ds_addr, segs->ds_len);
3678 }
3679 segs++;
3680 nbd++;
3681 }
3682
3683 if (third_bd == NULL) {
3684 third_bd = (struct eth_tx_3rd_bd *)
3685 ecore_chain_produce(&txq->tx_pbl);
3686 memset(third_bd, 0, sizeof(*third_bd));
3687 }
3688
3689 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3690 third_bd->data.bitfields |=
3691 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3692 }
3693 fp->tx_tso_pkts++;
3694 } else {
3695 segs++;
3696 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3697 tx_data_bd = (struct eth_tx_bd *)
3698 ecore_chain_produce(&txq->tx_pbl);
3699 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3700 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3701 segs->ds_len);
3702 segs++;
3703 nbd++;
3704 }
3705 first_bd->data.bitfields =
3706 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3707 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3708 first_bd->data.bitfields =
3709 htole16(first_bd->data.bitfields);
3710 fp->tx_non_tso_pkts++;
3711 }
3712
3713 first_bd->data.nbds = nbd;
3714
3715 if (ha->dbg_trace_tso_pkt_len) {
3716 if (fp->tx_tso_max_nsegs < nsegs)
3717 fp->tx_tso_max_nsegs = nsegs;
3718
3719 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3720 fp->tx_tso_min_nsegs = nsegs;
3721 }
3722
3723 txq->sw_tx_ring[idx].nsegs = nsegs;
3724 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3725
3726 txq->tx_db.data.bd_prod =
3727 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3728
3729 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3730
3731 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3732 return (0);
3733 }
3734
3735 static void
qlnx_stop(qlnx_host_t * ha)3736 qlnx_stop(qlnx_host_t *ha)
3737 {
3738 if_t ifp = ha->ifp;
3739 int i;
3740
3741 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
3742
3743 /*
3744 * We simply lock and unlock each fp->tx_mtx to
3745 * propagate the if_drv_flags
3746 * state to each tx thread
3747 */
3748 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3749
3750 if (ha->state == QLNX_STATE_OPEN) {
3751 for (i = 0; i < ha->num_rss; i++) {
3752 struct qlnx_fastpath *fp = &ha->fp_array[i];
3753
3754 mtx_lock(&fp->tx_mtx);
3755 mtx_unlock(&fp->tx_mtx);
3756
3757 if (fp->fp_taskqueue != NULL)
3758 taskqueue_enqueue(fp->fp_taskqueue,
3759 &fp->fp_task);
3760 }
3761 }
3762 #ifdef QLNX_ENABLE_IWARP
3763 if (qlnx_vf_device(ha) != 0) {
3764 qlnx_rdma_dev_close(ha);
3765 }
3766 #endif /* #ifdef QLNX_ENABLE_IWARP */
3767
3768 qlnx_unload(ha);
3769
3770 return;
3771 }
3772
3773 static int
qlnx_get_ifq_snd_maxlen(qlnx_host_t * ha)3774 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3775 {
3776 return(TX_RING_SIZE - 1);
3777 }
3778
3779 uint8_t *
qlnx_get_mac_addr(qlnx_host_t * ha)3780 qlnx_get_mac_addr(qlnx_host_t *ha)
3781 {
3782 struct ecore_hwfn *p_hwfn;
3783 unsigned char mac[ETHER_ADDR_LEN];
3784 uint8_t p_is_forced;
3785
3786 p_hwfn = &ha->cdev.hwfns[0];
3787
3788 if (qlnx_vf_device(ha) != 0)
3789 return (p_hwfn->hw_info.hw_mac_addr);
3790
3791 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3792 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3793 true) {
3794 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3795 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3796 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3797 memcpy(ha->primary_mac, mac, ETH_ALEN);
3798 }
3799
3800 return (ha->primary_mac);
3801 }
3802
3803 static uint32_t
qlnx_get_optics(qlnx_host_t * ha,struct qlnx_link_output * if_link)3804 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3805 {
3806 uint32_t ifm_type = 0;
3807
3808 switch (if_link->media_type) {
3809 case MEDIA_MODULE_FIBER:
3810 case MEDIA_UNSPECIFIED:
3811 if (if_link->speed == (100 * 1000))
3812 ifm_type = QLNX_IFM_100G_SR4;
3813 else if (if_link->speed == (40 * 1000))
3814 ifm_type = IFM_40G_SR4;
3815 else if (if_link->speed == (25 * 1000))
3816 ifm_type = QLNX_IFM_25G_SR;
3817 else if (if_link->speed == (10 * 1000))
3818 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3819 else if (if_link->speed == (1 * 1000))
3820 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3821
3822 break;
3823
3824 case MEDIA_DA_TWINAX:
3825 if (if_link->speed == (100 * 1000))
3826 ifm_type = QLNX_IFM_100G_CR4;
3827 else if (if_link->speed == (40 * 1000))
3828 ifm_type = IFM_40G_CR4;
3829 else if (if_link->speed == (25 * 1000))
3830 ifm_type = QLNX_IFM_25G_CR;
3831 else if (if_link->speed == (10 * 1000))
3832 ifm_type = IFM_10G_TWINAX;
3833
3834 break;
3835
3836 default :
3837 ifm_type = IFM_UNKNOWN;
3838 break;
3839 }
3840 return (ifm_type);
3841 }
3842
3843 /*****************************************************************************
3844 * Interrupt Service Functions
3845 *****************************************************************************/
3846
3847 static int
qlnx_rx_jumbo_chain(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf * mp_head,uint16_t len)3848 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3849 struct mbuf *mp_head, uint16_t len)
3850 {
3851 struct mbuf *mp, *mpf, *mpl;
3852 struct sw_rx_data *sw_rx_data;
3853 struct qlnx_rx_queue *rxq;
3854 uint16_t len_in_buffer;
3855
3856 rxq = fp->rxq;
3857 mpf = mpl = mp = NULL;
3858
3859 while (len) {
3860 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3861
3862 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3863 mp = sw_rx_data->data;
3864
3865 if (mp == NULL) {
3866 QL_DPRINT1(ha, "mp = NULL\n");
3867 fp->err_rx_mp_null++;
3868 rxq->sw_rx_cons =
3869 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3870
3871 if (mpf != NULL)
3872 m_freem(mpf);
3873
3874 return (-1);
3875 }
3876 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3877 BUS_DMASYNC_POSTREAD);
3878
3879 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3880 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3881 " incoming packet and reusing its buffer\n");
3882
3883 qlnx_reuse_rx_data(rxq);
3884 fp->err_rx_alloc_errors++;
3885
3886 if (mpf != NULL)
3887 m_freem(mpf);
3888
3889 return (-1);
3890 }
3891 ecore_chain_consume(&rxq->rx_bd_ring);
3892
3893 if (len > rxq->rx_buf_size)
3894 len_in_buffer = rxq->rx_buf_size;
3895 else
3896 len_in_buffer = len;
3897
3898 len = len - len_in_buffer;
3899
3900 mp->m_flags &= ~M_PKTHDR;
3901 mp->m_next = NULL;
3902 mp->m_len = len_in_buffer;
3903
3904 if (mpf == NULL)
3905 mpf = mpl = mp;
3906 else {
3907 mpl->m_next = mp;
3908 mpl = mp;
3909 }
3910 }
3911
3912 if (mpf != NULL)
3913 mp_head->m_next = mpf;
3914
3915 return (0);
3916 }
3917
3918 static void
qlnx_tpa_start(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)3919 qlnx_tpa_start(qlnx_host_t *ha,
3920 struct qlnx_fastpath *fp,
3921 struct qlnx_rx_queue *rxq,
3922 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3923 {
3924 uint32_t agg_index;
3925 if_t ifp = ha->ifp;
3926 struct mbuf *mp;
3927 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3928 struct sw_rx_data *sw_rx_data;
3929 dma_addr_t addr;
3930 bus_dmamap_t map;
3931 struct eth_rx_bd *rx_bd;
3932 int i;
3933 uint8_t hash_type;
3934
3935 agg_index = cqe->tpa_agg_index;
3936
3937 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3938 \t type = 0x%x\n \
3939 \t bitfields = 0x%x\n \
3940 \t seg_len = 0x%x\n \
3941 \t pars_flags = 0x%x\n \
3942 \t vlan_tag = 0x%x\n \
3943 \t rss_hash = 0x%x\n \
3944 \t len_on_first_bd = 0x%x\n \
3945 \t placement_offset = 0x%x\n \
3946 \t tpa_agg_index = 0x%x\n \
3947 \t header_len = 0x%x\n \
3948 \t ext_bd_len_list[0] = 0x%x\n \
3949 \t ext_bd_len_list[1] = 0x%x\n \
3950 \t ext_bd_len_list[2] = 0x%x\n \
3951 \t ext_bd_len_list[3] = 0x%x\n \
3952 \t ext_bd_len_list[4] = 0x%x\n",
3953 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3954 cqe->pars_flags.flags, cqe->vlan_tag,
3955 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3956 cqe->tpa_agg_index, cqe->header_len,
3957 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3958 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3959 cqe->ext_bd_len_list[4]);
3960
3961 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3962 fp->err_rx_tpa_invalid_agg_num++;
3963 return;
3964 }
3965
3966 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3967 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3968 mp = sw_rx_data->data;
3969
3970 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3971
3972 if (mp == NULL) {
3973 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3974 fp->err_rx_mp_null++;
3975 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3976
3977 return;
3978 }
3979
3980 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3981 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3982 " flags = %x, dropping incoming packet\n", fp->rss_id,
3983 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3984
3985 fp->err_rx_hw_errors++;
3986
3987 qlnx_reuse_rx_data(rxq);
3988
3989 QLNX_INC_IERRORS(ifp);
3990
3991 return;
3992 }
3993
3994 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3995 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3996 " dropping incoming packet and reusing its buffer\n",
3997 fp->rss_id);
3998
3999 fp->err_rx_alloc_errors++;
4000 QLNX_INC_IQDROPS(ifp);
4001
4002 /*
4003 * Load the tpa mbuf into the rx ring and save the
4004 * posted mbuf
4005 */
4006
4007 map = sw_rx_data->map;
4008 addr = sw_rx_data->dma_addr;
4009
4010 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4011
4012 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4013 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4014 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4015
4016 rxq->tpa_info[agg_index].rx_buf.data = mp;
4017 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4018 rxq->tpa_info[agg_index].rx_buf.map = map;
4019
4020 rx_bd = (struct eth_rx_bd *)
4021 ecore_chain_produce(&rxq->rx_bd_ring);
4022
4023 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4024 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4025
4026 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4027 BUS_DMASYNC_PREREAD);
4028
4029 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4030 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4031
4032 ecore_chain_consume(&rxq->rx_bd_ring);
4033
4034 /* Now reuse any buffers posted in ext_bd_len_list */
4035 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4036 if (cqe->ext_bd_len_list[i] == 0)
4037 break;
4038
4039 qlnx_reuse_rx_data(rxq);
4040 }
4041
4042 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4043 return;
4044 }
4045
4046 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4047 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4048 " dropping incoming packet and reusing its buffer\n",
4049 fp->rss_id);
4050
4051 QLNX_INC_IQDROPS(ifp);
4052
4053 /* if we already have mbuf head in aggregation free it */
4054 if (rxq->tpa_info[agg_index].mpf) {
4055 m_freem(rxq->tpa_info[agg_index].mpf);
4056 rxq->tpa_info[agg_index].mpl = NULL;
4057 }
4058 rxq->tpa_info[agg_index].mpf = mp;
4059 rxq->tpa_info[agg_index].mpl = NULL;
4060
4061 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4062 ecore_chain_consume(&rxq->rx_bd_ring);
4063
4064 /* Now reuse any buffers posted in ext_bd_len_list */
4065 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4066 if (cqe->ext_bd_len_list[i] == 0)
4067 break;
4068
4069 qlnx_reuse_rx_data(rxq);
4070 }
4071 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4072
4073 return;
4074 }
4075
4076 /*
4077 * first process the ext_bd_len_list
4078 * if this fails then we simply drop the packet
4079 */
4080 ecore_chain_consume(&rxq->rx_bd_ring);
4081 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4082
4083 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4084 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4085
4086 if (cqe->ext_bd_len_list[i] == 0)
4087 break;
4088
4089 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4090 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4091 BUS_DMASYNC_POSTREAD);
4092
4093 mpc = sw_rx_data->data;
4094
4095 if (mpc == NULL) {
4096 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4097 fp->err_rx_mp_null++;
4098 if (mpf != NULL)
4099 m_freem(mpf);
4100 mpf = mpl = NULL;
4101 rxq->tpa_info[agg_index].agg_state =
4102 QLNX_AGG_STATE_ERROR;
4103 ecore_chain_consume(&rxq->rx_bd_ring);
4104 rxq->sw_rx_cons =
4105 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4106 continue;
4107 }
4108
4109 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4110 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4111 " dropping incoming packet and reusing its"
4112 " buffer\n", fp->rss_id);
4113
4114 qlnx_reuse_rx_data(rxq);
4115
4116 if (mpf != NULL)
4117 m_freem(mpf);
4118 mpf = mpl = NULL;
4119
4120 rxq->tpa_info[agg_index].agg_state =
4121 QLNX_AGG_STATE_ERROR;
4122
4123 ecore_chain_consume(&rxq->rx_bd_ring);
4124 rxq->sw_rx_cons =
4125 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4126
4127 continue;
4128 }
4129
4130 mpc->m_flags &= ~M_PKTHDR;
4131 mpc->m_next = NULL;
4132 mpc->m_len = cqe->ext_bd_len_list[i];
4133
4134 if (mpf == NULL) {
4135 mpf = mpl = mpc;
4136 } else {
4137 mpl->m_len = ha->rx_buf_size;
4138 mpl->m_next = mpc;
4139 mpl = mpc;
4140 }
4141
4142 ecore_chain_consume(&rxq->rx_bd_ring);
4143 rxq->sw_rx_cons =
4144 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4145 }
4146
4147 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4148 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4149 " incoming packet and reusing its buffer\n",
4150 fp->rss_id);
4151
4152 QLNX_INC_IQDROPS(ifp);
4153
4154 rxq->tpa_info[agg_index].mpf = mp;
4155 rxq->tpa_info[agg_index].mpl = NULL;
4156
4157 return;
4158 }
4159
4160 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4161
4162 if (mpf != NULL) {
4163 mp->m_len = ha->rx_buf_size;
4164 mp->m_next = mpf;
4165 rxq->tpa_info[agg_index].mpf = mp;
4166 rxq->tpa_info[agg_index].mpl = mpl;
4167 } else {
4168 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4169 rxq->tpa_info[agg_index].mpf = mp;
4170 rxq->tpa_info[agg_index].mpl = mp;
4171 mp->m_next = NULL;
4172 }
4173
4174 mp->m_flags |= M_PKTHDR;
4175
4176 /* assign packet to this interface interface */
4177 mp->m_pkthdr.rcvif = ifp;
4178
4179 /* assume no hardware checksum has complated */
4180 mp->m_pkthdr.csum_flags = 0;
4181
4182 //mp->m_pkthdr.flowid = fp->rss_id;
4183 mp->m_pkthdr.flowid = cqe->rss_hash;
4184
4185 hash_type = cqe->bitfields &
4186 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4187 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4188
4189 switch (hash_type) {
4190 case RSS_HASH_TYPE_IPV4:
4191 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4192 break;
4193
4194 case RSS_HASH_TYPE_TCP_IPV4:
4195 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4196 break;
4197
4198 case RSS_HASH_TYPE_IPV6:
4199 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4200 break;
4201
4202 case RSS_HASH_TYPE_TCP_IPV6:
4203 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4204 break;
4205
4206 default:
4207 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4208 break;
4209 }
4210
4211 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4212 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4213
4214 mp->m_pkthdr.csum_data = 0xFFFF;
4215
4216 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4217 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4218 mp->m_flags |= M_VLANTAG;
4219 }
4220
4221 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4222
4223 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4224 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4225 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4226
4227 return;
4228 }
4229
4230 static void
qlnx_tpa_cont(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)4231 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4232 struct qlnx_rx_queue *rxq,
4233 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4234 {
4235 struct sw_rx_data *sw_rx_data;
4236 int i;
4237 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4238 struct mbuf *mp;
4239 uint32_t agg_index;
4240
4241 QL_DPRINT7(ha, "[%d]: enter\n \
4242 \t type = 0x%x\n \
4243 \t tpa_agg_index = 0x%x\n \
4244 \t len_list[0] = 0x%x\n \
4245 \t len_list[1] = 0x%x\n \
4246 \t len_list[2] = 0x%x\n \
4247 \t len_list[3] = 0x%x\n \
4248 \t len_list[4] = 0x%x\n \
4249 \t len_list[5] = 0x%x\n",
4250 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4251 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4252 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4253
4254 agg_index = cqe->tpa_agg_index;
4255
4256 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4257 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4258 fp->err_rx_tpa_invalid_agg_num++;
4259 return;
4260 }
4261
4262 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4263 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4264
4265 if (cqe->len_list[i] == 0)
4266 break;
4267
4268 if (rxq->tpa_info[agg_index].agg_state !=
4269 QLNX_AGG_STATE_START) {
4270 qlnx_reuse_rx_data(rxq);
4271 continue;
4272 }
4273
4274 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4275 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4276 BUS_DMASYNC_POSTREAD);
4277
4278 mpc = sw_rx_data->data;
4279
4280 if (mpc == NULL) {
4281 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4282
4283 fp->err_rx_mp_null++;
4284 if (mpf != NULL)
4285 m_freem(mpf);
4286 mpf = mpl = NULL;
4287 rxq->tpa_info[agg_index].agg_state =
4288 QLNX_AGG_STATE_ERROR;
4289 ecore_chain_consume(&rxq->rx_bd_ring);
4290 rxq->sw_rx_cons =
4291 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4292 continue;
4293 }
4294
4295 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4296 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4297 " dropping incoming packet and reusing its"
4298 " buffer\n", fp->rss_id);
4299
4300 qlnx_reuse_rx_data(rxq);
4301
4302 if (mpf != NULL)
4303 m_freem(mpf);
4304 mpf = mpl = NULL;
4305
4306 rxq->tpa_info[agg_index].agg_state =
4307 QLNX_AGG_STATE_ERROR;
4308
4309 ecore_chain_consume(&rxq->rx_bd_ring);
4310 rxq->sw_rx_cons =
4311 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4312
4313 continue;
4314 }
4315
4316 mpc->m_flags &= ~M_PKTHDR;
4317 mpc->m_next = NULL;
4318 mpc->m_len = cqe->len_list[i];
4319
4320 if (mpf == NULL) {
4321 mpf = mpl = mpc;
4322 } else {
4323 mpl->m_len = ha->rx_buf_size;
4324 mpl->m_next = mpc;
4325 mpl = mpc;
4326 }
4327
4328 ecore_chain_consume(&rxq->rx_bd_ring);
4329 rxq->sw_rx_cons =
4330 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4331 }
4332
4333 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4334 fp->rss_id, mpf, mpl);
4335
4336 if (mpf != NULL) {
4337 mp = rxq->tpa_info[agg_index].mpl;
4338 mp->m_len = ha->rx_buf_size;
4339 mp->m_next = mpf;
4340 rxq->tpa_info[agg_index].mpl = mpl;
4341 }
4342
4343 return;
4344 }
4345
4346 static int
qlnx_tpa_end(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_end_cqe * cqe)4347 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4348 struct qlnx_rx_queue *rxq,
4349 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4350 {
4351 struct sw_rx_data *sw_rx_data;
4352 int i;
4353 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4354 struct mbuf *mp;
4355 uint32_t agg_index;
4356 uint32_t len = 0;
4357 if_t ifp = ha->ifp;
4358
4359 QL_DPRINT7(ha, "[%d]: enter\n \
4360 \t type = 0x%x\n \
4361 \t tpa_agg_index = 0x%x\n \
4362 \t total_packet_len = 0x%x\n \
4363 \t num_of_bds = 0x%x\n \
4364 \t end_reason = 0x%x\n \
4365 \t num_of_coalesced_segs = 0x%x\n \
4366 \t ts_delta = 0x%x\n \
4367 \t len_list[0] = 0x%x\n \
4368 \t len_list[1] = 0x%x\n \
4369 \t len_list[2] = 0x%x\n \
4370 \t len_list[3] = 0x%x\n",
4371 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4372 cqe->total_packet_len, cqe->num_of_bds,
4373 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4374 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4375 cqe->len_list[3]);
4376
4377 agg_index = cqe->tpa_agg_index;
4378
4379 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4380 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4381
4382 fp->err_rx_tpa_invalid_agg_num++;
4383 return (0);
4384 }
4385
4386 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4387 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4388
4389 if (cqe->len_list[i] == 0)
4390 break;
4391
4392 if (rxq->tpa_info[agg_index].agg_state !=
4393 QLNX_AGG_STATE_START) {
4394 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4395
4396 qlnx_reuse_rx_data(rxq);
4397 continue;
4398 }
4399
4400 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4401 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4402 BUS_DMASYNC_POSTREAD);
4403
4404 mpc = sw_rx_data->data;
4405
4406 if (mpc == NULL) {
4407 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4408
4409 fp->err_rx_mp_null++;
4410 if (mpf != NULL)
4411 m_freem(mpf);
4412 mpf = mpl = NULL;
4413 rxq->tpa_info[agg_index].agg_state =
4414 QLNX_AGG_STATE_ERROR;
4415 ecore_chain_consume(&rxq->rx_bd_ring);
4416 rxq->sw_rx_cons =
4417 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4418 continue;
4419 }
4420
4421 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4422 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4423 " dropping incoming packet and reusing its"
4424 " buffer\n", fp->rss_id);
4425
4426 qlnx_reuse_rx_data(rxq);
4427
4428 if (mpf != NULL)
4429 m_freem(mpf);
4430 mpf = mpl = NULL;
4431
4432 rxq->tpa_info[agg_index].agg_state =
4433 QLNX_AGG_STATE_ERROR;
4434
4435 ecore_chain_consume(&rxq->rx_bd_ring);
4436 rxq->sw_rx_cons =
4437 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4438
4439 continue;
4440 }
4441
4442 mpc->m_flags &= ~M_PKTHDR;
4443 mpc->m_next = NULL;
4444 mpc->m_len = cqe->len_list[i];
4445
4446 if (mpf == NULL) {
4447 mpf = mpl = mpc;
4448 } else {
4449 mpl->m_len = ha->rx_buf_size;
4450 mpl->m_next = mpc;
4451 mpl = mpc;
4452 }
4453
4454 ecore_chain_consume(&rxq->rx_bd_ring);
4455 rxq->sw_rx_cons =
4456 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4457 }
4458
4459 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4460
4461 if (mpf != NULL) {
4462 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4463
4464 mp = rxq->tpa_info[agg_index].mpl;
4465 mp->m_len = ha->rx_buf_size;
4466 mp->m_next = mpf;
4467 }
4468
4469 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4470 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4471
4472 if (rxq->tpa_info[agg_index].mpf != NULL)
4473 m_freem(rxq->tpa_info[agg_index].mpf);
4474 rxq->tpa_info[agg_index].mpf = NULL;
4475 rxq->tpa_info[agg_index].mpl = NULL;
4476 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4477 return (0);
4478 }
4479
4480 mp = rxq->tpa_info[agg_index].mpf;
4481 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4482 mp->m_pkthdr.len = cqe->total_packet_len;
4483
4484 if (mp->m_next == NULL)
4485 mp->m_len = mp->m_pkthdr.len;
4486 else {
4487 /* compute the total packet length */
4488 mpf = mp;
4489 while (mpf != NULL) {
4490 len += mpf->m_len;
4491 mpf = mpf->m_next;
4492 }
4493
4494 if (cqe->total_packet_len > len) {
4495 mpl = rxq->tpa_info[agg_index].mpl;
4496 mpl->m_len += (cqe->total_packet_len - len);
4497 }
4498 }
4499
4500 QLNX_INC_IPACKETS(ifp);
4501 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4502
4503 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4504 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4505 fp->rss_id, mp->m_pkthdr.csum_data,
4506 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4507
4508 if_input(ifp, mp);
4509
4510 rxq->tpa_info[agg_index].mpf = NULL;
4511 rxq->tpa_info[agg_index].mpl = NULL;
4512 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4513
4514 return (cqe->num_of_coalesced_segs);
4515 }
4516
4517 static int
qlnx_rx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,int budget,int lro_enable)4518 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4519 int lro_enable)
4520 {
4521 uint16_t hw_comp_cons, sw_comp_cons;
4522 int rx_pkt = 0;
4523 struct qlnx_rx_queue *rxq = fp->rxq;
4524 if_t ifp = ha->ifp;
4525 struct ecore_dev *cdev = &ha->cdev;
4526 struct ecore_hwfn *p_hwfn;
4527
4528 #ifdef QLNX_SOFT_LRO
4529 struct lro_ctrl *lro;
4530
4531 lro = &rxq->lro;
4532 #endif /* #ifdef QLNX_SOFT_LRO */
4533
4534 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4535 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4536
4537 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4538
4539 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4540 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4541 * read before it is written by FW, then FW writes CQE and SB, and then
4542 * the CPU reads the hw_comp_cons, it will use an old CQE.
4543 */
4544
4545 /* Loop to complete all indicated BDs */
4546 while (sw_comp_cons != hw_comp_cons) {
4547 union eth_rx_cqe *cqe;
4548 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4549 struct sw_rx_data *sw_rx_data;
4550 register struct mbuf *mp;
4551 enum eth_rx_cqe_type cqe_type;
4552 uint16_t len, pad, len_on_first_bd;
4553 uint8_t *data;
4554 uint8_t hash_type;
4555
4556 /* Get the CQE from the completion ring */
4557 cqe = (union eth_rx_cqe *)
4558 ecore_chain_consume(&rxq->rx_comp_ring);
4559 cqe_type = cqe->fast_path_regular.type;
4560
4561 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4562 QL_DPRINT3(ha, "Got a slowath CQE\n");
4563
4564 ecore_eth_cqe_completion(p_hwfn,
4565 (struct eth_slow_path_rx_cqe *)cqe);
4566 goto next_cqe;
4567 }
4568
4569 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4570 switch (cqe_type) {
4571 case ETH_RX_CQE_TYPE_TPA_START:
4572 qlnx_tpa_start(ha, fp, rxq,
4573 &cqe->fast_path_tpa_start);
4574 fp->tpa_start++;
4575 break;
4576
4577 case ETH_RX_CQE_TYPE_TPA_CONT:
4578 qlnx_tpa_cont(ha, fp, rxq,
4579 &cqe->fast_path_tpa_cont);
4580 fp->tpa_cont++;
4581 break;
4582
4583 case ETH_RX_CQE_TYPE_TPA_END:
4584 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4585 &cqe->fast_path_tpa_end);
4586 fp->tpa_end++;
4587 break;
4588
4589 default:
4590 break;
4591 }
4592
4593 goto next_cqe;
4594 }
4595
4596 /* Get the data from the SW ring */
4597 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4598 mp = sw_rx_data->data;
4599
4600 if (mp == NULL) {
4601 QL_DPRINT1(ha, "mp = NULL\n");
4602 fp->err_rx_mp_null++;
4603 rxq->sw_rx_cons =
4604 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4605 goto next_cqe;
4606 }
4607 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4608 BUS_DMASYNC_POSTREAD);
4609
4610 /* non GRO */
4611 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4612 len = le16toh(fp_cqe->pkt_len);
4613 pad = fp_cqe->placement_offset;
4614 #if 0
4615 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4616 " len %u, parsing flags = %d pad = %d\n",
4617 cqe_type, fp_cqe->bitfields,
4618 le16toh(fp_cqe->vlan_tag),
4619 len, le16toh(fp_cqe->pars_flags.flags), pad);
4620 #endif
4621 data = mtod(mp, uint8_t *);
4622 data = data + pad;
4623
4624 if (0)
4625 qlnx_dump_buf8(ha, __func__, data, len);
4626
4627 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4628 * is always with a fixed size. If allocation fails, we take the
4629 * consumed BD and return it to the ring in the PROD position.
4630 * The packet that was received on that BD will be dropped (and
4631 * not passed to the upper stack).
4632 */
4633 /* If this is an error packet then drop it */
4634 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4635 CQE_FLAGS_ERR) {
4636 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4637 " dropping incoming packet\n", sw_comp_cons,
4638 le16toh(cqe->fast_path_regular.pars_flags.flags));
4639 fp->err_rx_hw_errors++;
4640
4641 qlnx_reuse_rx_data(rxq);
4642
4643 QLNX_INC_IERRORS(ifp);
4644
4645 goto next_cqe;
4646 }
4647
4648 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4649 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4650 " incoming packet and reusing its buffer\n");
4651 qlnx_reuse_rx_data(rxq);
4652
4653 fp->err_rx_alloc_errors++;
4654
4655 QLNX_INC_IQDROPS(ifp);
4656
4657 goto next_cqe;
4658 }
4659
4660 ecore_chain_consume(&rxq->rx_bd_ring);
4661
4662 len_on_first_bd = fp_cqe->len_on_first_bd;
4663 m_adj(mp, pad);
4664 mp->m_pkthdr.len = len;
4665
4666 if ((len > 60 ) && (len > len_on_first_bd)) {
4667 mp->m_len = len_on_first_bd;
4668
4669 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4670 (len - len_on_first_bd)) != 0) {
4671 m_freem(mp);
4672
4673 QLNX_INC_IQDROPS(ifp);
4674
4675 goto next_cqe;
4676 }
4677
4678 } else if (len_on_first_bd < len) {
4679 fp->err_rx_jumbo_chain_pkts++;
4680 } else {
4681 mp->m_len = len;
4682 }
4683
4684 mp->m_flags |= M_PKTHDR;
4685
4686 /* assign packet to this interface interface */
4687 mp->m_pkthdr.rcvif = ifp;
4688
4689 /* assume no hardware checksum has complated */
4690 mp->m_pkthdr.csum_flags = 0;
4691
4692 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4693
4694 hash_type = fp_cqe->bitfields &
4695 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4696 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4697
4698 switch (hash_type) {
4699 case RSS_HASH_TYPE_IPV4:
4700 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4701 break;
4702
4703 case RSS_HASH_TYPE_TCP_IPV4:
4704 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4705 break;
4706
4707 case RSS_HASH_TYPE_IPV6:
4708 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4709 break;
4710
4711 case RSS_HASH_TYPE_TCP_IPV6:
4712 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4713 break;
4714
4715 default:
4716 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4717 break;
4718 }
4719
4720 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4721 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4722 }
4723
4724 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4725 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4726 }
4727
4728 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4729 mp->m_pkthdr.csum_data = 0xFFFF;
4730 mp->m_pkthdr.csum_flags |=
4731 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4732 }
4733
4734 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4735 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4736 mp->m_flags |= M_VLANTAG;
4737 }
4738
4739 QLNX_INC_IPACKETS(ifp);
4740 QLNX_INC_IBYTES(ifp, len);
4741
4742 #ifdef QLNX_SOFT_LRO
4743 if (lro_enable)
4744 tcp_lro_queue_mbuf(lro, mp);
4745 else
4746 if_input(ifp, mp);
4747 #else
4748
4749 if_input(ifp, mp);
4750
4751 #endif /* #ifdef QLNX_SOFT_LRO */
4752
4753 rx_pkt++;
4754
4755 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4756
4757 next_cqe: /* don't consume bd rx buffer */
4758 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4759 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4760
4761 /* CR TPA - revisit how to handle budget in TPA perhaps
4762 increase on "end" */
4763 if (rx_pkt == budget)
4764 break;
4765 } /* repeat while sw_comp_cons != hw_comp_cons... */
4766
4767 /* Update producers */
4768 qlnx_update_rx_prod(p_hwfn, rxq);
4769
4770 return rx_pkt;
4771 }
4772
4773 /*
4774 * fast path interrupt
4775 */
4776
4777 static void
qlnx_fp_isr(void * arg)4778 qlnx_fp_isr(void *arg)
4779 {
4780 qlnx_ivec_t *ivec = arg;
4781 qlnx_host_t *ha;
4782 struct qlnx_fastpath *fp = NULL;
4783 int idx;
4784
4785 ha = ivec->ha;
4786
4787 if (ha->state != QLNX_STATE_OPEN) {
4788 return;
4789 }
4790
4791 idx = ivec->rss_idx;
4792
4793 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4794 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4795 ha->err_illegal_intr++;
4796 return;
4797 }
4798 fp = &ha->fp_array[idx];
4799
4800 if (fp == NULL) {
4801 ha->err_fp_null++;
4802 } else {
4803 int rx_int = 0;
4804 #ifdef QLNX_SOFT_LRO
4805 int total_rx_count = 0;
4806 #endif
4807 int lro_enable, tc;
4808 struct qlnx_tx_queue *txq;
4809 uint16_t elem_left;
4810
4811 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4812
4813 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4814
4815 do {
4816 for (tc = 0; tc < ha->num_tc; tc++) {
4817 txq = fp->txq[tc];
4818
4819 if((int)(elem_left =
4820 ecore_chain_get_elem_left(&txq->tx_pbl)) <
4821 QLNX_TX_ELEM_THRESH) {
4822 if (mtx_trylock(&fp->tx_mtx)) {
4823 #ifdef QLNX_TRACE_PERF_DATA
4824 tx_compl = fp->tx_pkts_completed;
4825 #endif
4826
4827 qlnx_tx_int(ha, fp, fp->txq[tc]);
4828 #ifdef QLNX_TRACE_PERF_DATA
4829 fp->tx_pkts_compl_intr +=
4830 (fp->tx_pkts_completed - tx_compl);
4831 if ((fp->tx_pkts_completed - tx_compl) <= 32)
4832 fp->tx_comInt[0]++;
4833 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4834 ((fp->tx_pkts_completed - tx_compl) <= 64))
4835 fp->tx_comInt[1]++;
4836 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4837 ((fp->tx_pkts_completed - tx_compl) <= 128))
4838 fp->tx_comInt[2]++;
4839 else if(((fp->tx_pkts_completed - tx_compl) > 128))
4840 fp->tx_comInt[3]++;
4841 #endif
4842 mtx_unlock(&fp->tx_mtx);
4843 }
4844 }
4845 }
4846
4847 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4848 lro_enable);
4849
4850 if (rx_int) {
4851 fp->rx_pkts += rx_int;
4852 #ifdef QLNX_SOFT_LRO
4853 total_rx_count += rx_int;
4854 #endif
4855 }
4856
4857 } while (rx_int);
4858
4859 #ifdef QLNX_SOFT_LRO
4860 {
4861 struct lro_ctrl *lro;
4862
4863 lro = &fp->rxq->lro;
4864
4865 if (lro_enable && total_rx_count) {
4866
4867 #ifdef QLNX_TRACE_LRO_CNT
4868 if (lro->lro_mbuf_count & ~1023)
4869 fp->lro_cnt_1024++;
4870 else if (lro->lro_mbuf_count & ~511)
4871 fp->lro_cnt_512++;
4872 else if (lro->lro_mbuf_count & ~255)
4873 fp->lro_cnt_256++;
4874 else if (lro->lro_mbuf_count & ~127)
4875 fp->lro_cnt_128++;
4876 else if (lro->lro_mbuf_count & ~63)
4877 fp->lro_cnt_64++;
4878 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4879
4880 tcp_lro_flush_all(lro);
4881 }
4882 }
4883 #endif /* #ifdef QLNX_SOFT_LRO */
4884
4885 ecore_sb_update_sb_idx(fp->sb_info);
4886 rmb();
4887 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4888 }
4889
4890 return;
4891 }
4892
4893 /*
4894 * slow path interrupt processing function
4895 * can be invoked in polled mode or in interrupt mode via taskqueue.
4896 */
4897 void
qlnx_sp_isr(void * arg)4898 qlnx_sp_isr(void *arg)
4899 {
4900 struct ecore_hwfn *p_hwfn;
4901 qlnx_host_t *ha;
4902
4903 p_hwfn = arg;
4904
4905 ha = (qlnx_host_t *)p_hwfn->p_dev;
4906
4907 ha->sp_interrupts++;
4908
4909 QL_DPRINT2(ha, "enter\n");
4910
4911 ecore_int_sp_dpc(p_hwfn);
4912
4913 QL_DPRINT2(ha, "exit\n");
4914
4915 return;
4916 }
4917
4918 /*****************************************************************************
4919 * Support Functions for DMA'able Memory
4920 *****************************************************************************/
4921
4922 static void
qlnx_dmamap_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)4923 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4924 {
4925 *((bus_addr_t *)arg) = 0;
4926
4927 if (error) {
4928 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4929 return;
4930 }
4931
4932 *((bus_addr_t *)arg) = segs[0].ds_addr;
4933
4934 return;
4935 }
4936
4937 static int
qlnx_alloc_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4938 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4939 {
4940 int ret = 0;
4941 bus_addr_t b_addr;
4942
4943 ret = bus_dma_tag_create(
4944 ha->parent_tag,/* parent */
4945 dma_buf->alignment,
4946 ((bus_size_t)(1ULL << 32)),/* boundary */
4947 BUS_SPACE_MAXADDR, /* lowaddr */
4948 BUS_SPACE_MAXADDR, /* highaddr */
4949 NULL, NULL, /* filter, filterarg */
4950 dma_buf->size, /* maxsize */
4951 1, /* nsegments */
4952 dma_buf->size, /* maxsegsize */
4953 0, /* flags */
4954 NULL, NULL, /* lockfunc, lockarg */
4955 &dma_buf->dma_tag);
4956
4957 if (ret) {
4958 QL_DPRINT1(ha, "could not create dma tag\n");
4959 goto qlnx_alloc_dmabuf_exit;
4960 }
4961 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4962 (void **)&dma_buf->dma_b,
4963 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4964 &dma_buf->dma_map);
4965 if (ret) {
4966 bus_dma_tag_destroy(dma_buf->dma_tag);
4967 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4968 goto qlnx_alloc_dmabuf_exit;
4969 }
4970
4971 ret = bus_dmamap_load(dma_buf->dma_tag,
4972 dma_buf->dma_map,
4973 dma_buf->dma_b,
4974 dma_buf->size,
4975 qlnx_dmamap_callback,
4976 &b_addr, BUS_DMA_NOWAIT);
4977
4978 if (ret || !b_addr) {
4979 bus_dma_tag_destroy(dma_buf->dma_tag);
4980 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4981 dma_buf->dma_map);
4982 ret = -1;
4983 goto qlnx_alloc_dmabuf_exit;
4984 }
4985
4986 dma_buf->dma_addr = b_addr;
4987
4988 qlnx_alloc_dmabuf_exit:
4989
4990 return ret;
4991 }
4992
4993 static void
qlnx_free_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4994 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4995 {
4996 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4997 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4998 bus_dma_tag_destroy(dma_buf->dma_tag);
4999 return;
5000 }
5001
5002 void *
qlnx_dma_alloc_coherent(void * ecore_dev,bus_addr_t * phys,uint32_t size)5003 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5004 {
5005 qlnx_dma_t dma_buf;
5006 qlnx_dma_t *dma_p;
5007 qlnx_host_t *ha __unused;
5008
5009 ha = (qlnx_host_t *)ecore_dev;
5010
5011 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5012
5013 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5014
5015 dma_buf.size = size + PAGE_SIZE;
5016 dma_buf.alignment = 8;
5017
5018 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5019 return (NULL);
5020 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5021
5022 *phys = dma_buf.dma_addr;
5023
5024 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5025
5026 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5027
5028 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5029 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5030 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5031
5032 return (dma_buf.dma_b);
5033 }
5034
5035 void
qlnx_dma_free_coherent(void * ecore_dev,void * v_addr,bus_addr_t phys,uint32_t size)5036 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5037 uint32_t size)
5038 {
5039 qlnx_dma_t dma_buf, *dma_p;
5040 qlnx_host_t *ha;
5041
5042 ha = (qlnx_host_t *)ecore_dev;
5043
5044 if (v_addr == NULL)
5045 return;
5046
5047 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5048
5049 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5050
5051 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5052 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5053 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5054
5055 dma_buf = *dma_p;
5056
5057 if (!ha->qlnxr_debug)
5058 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5059 return;
5060 }
5061
5062 static int
qlnx_alloc_parent_dma_tag(qlnx_host_t * ha)5063 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5064 {
5065 int ret;
5066 device_t dev;
5067
5068 dev = ha->pci_dev;
5069
5070 /*
5071 * Allocate parent DMA Tag
5072 */
5073 ret = bus_dma_tag_create(
5074 bus_get_dma_tag(dev), /* parent */
5075 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5076 BUS_SPACE_MAXADDR, /* lowaddr */
5077 BUS_SPACE_MAXADDR, /* highaddr */
5078 NULL, NULL, /* filter, filterarg */
5079 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5080 0, /* nsegments */
5081 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5082 0, /* flags */
5083 NULL, NULL, /* lockfunc, lockarg */
5084 &ha->parent_tag);
5085
5086 if (ret) {
5087 QL_DPRINT1(ha, "could not create parent dma tag\n");
5088 return (-1);
5089 }
5090
5091 ha->flags.parent_tag = 1;
5092
5093 return (0);
5094 }
5095
5096 static void
qlnx_free_parent_dma_tag(qlnx_host_t * ha)5097 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5098 {
5099 if (ha->parent_tag != NULL) {
5100 bus_dma_tag_destroy(ha->parent_tag);
5101 ha->parent_tag = NULL;
5102 }
5103 return;
5104 }
5105
5106 static int
qlnx_alloc_tx_dma_tag(qlnx_host_t * ha)5107 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5108 {
5109 if (bus_dma_tag_create(NULL, /* parent */
5110 1, 0, /* alignment, bounds */
5111 BUS_SPACE_MAXADDR, /* lowaddr */
5112 BUS_SPACE_MAXADDR, /* highaddr */
5113 NULL, NULL, /* filter, filterarg */
5114 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
5115 QLNX_MAX_SEGMENTS, /* nsegments */
5116 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
5117 0, /* flags */
5118 NULL, /* lockfunc */
5119 NULL, /* lockfuncarg */
5120 &ha->tx_tag)) {
5121 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5122 return (-1);
5123 }
5124
5125 return (0);
5126 }
5127
5128 static void
qlnx_free_tx_dma_tag(qlnx_host_t * ha)5129 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5130 {
5131 if (ha->tx_tag != NULL) {
5132 bus_dma_tag_destroy(ha->tx_tag);
5133 ha->tx_tag = NULL;
5134 }
5135 return;
5136 }
5137
5138 static int
qlnx_alloc_rx_dma_tag(qlnx_host_t * ha)5139 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5140 {
5141 if (bus_dma_tag_create(NULL, /* parent */
5142 1, 0, /* alignment, bounds */
5143 BUS_SPACE_MAXADDR, /* lowaddr */
5144 BUS_SPACE_MAXADDR, /* highaddr */
5145 NULL, NULL, /* filter, filterarg */
5146 MJUM9BYTES, /* maxsize */
5147 1, /* nsegments */
5148 MJUM9BYTES, /* maxsegsize */
5149 0, /* flags */
5150 NULL, /* lockfunc */
5151 NULL, /* lockfuncarg */
5152 &ha->rx_tag)) {
5153 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5154
5155 return (-1);
5156 }
5157 return (0);
5158 }
5159
5160 static void
qlnx_free_rx_dma_tag(qlnx_host_t * ha)5161 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5162 {
5163 if (ha->rx_tag != NULL) {
5164 bus_dma_tag_destroy(ha->rx_tag);
5165 ha->rx_tag = NULL;
5166 }
5167 return;
5168 }
5169
5170 /*********************************
5171 * Exported functions
5172 *********************************/
5173 uint32_t
qlnx_pci_bus_get_bar_size(void * ecore_dev,uint8_t bar_id)5174 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5175 {
5176 uint32_t bar_size;
5177
5178 bar_id = bar_id * 2;
5179
5180 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5181 SYS_RES_MEMORY,
5182 PCIR_BAR(bar_id));
5183
5184 return (bar_size);
5185 }
5186
5187 uint32_t
qlnx_pci_read_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t * reg_value)5188 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5189 {
5190 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5191 pci_reg, 1);
5192 return 0;
5193 }
5194
5195 uint32_t
qlnx_pci_read_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t * reg_value)5196 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5197 uint16_t *reg_value)
5198 {
5199 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5200 pci_reg, 2);
5201 return 0;
5202 }
5203
5204 uint32_t
qlnx_pci_read_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t * reg_value)5205 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5206 uint32_t *reg_value)
5207 {
5208 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5209 pci_reg, 4);
5210 return 0;
5211 }
5212
5213 void
qlnx_pci_write_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t reg_value)5214 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5215 {
5216 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5217 pci_reg, reg_value, 1);
5218 return;
5219 }
5220
5221 void
qlnx_pci_write_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t reg_value)5222 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5223 uint16_t reg_value)
5224 {
5225 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5226 pci_reg, reg_value, 2);
5227 return;
5228 }
5229
5230 void
qlnx_pci_write_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t reg_value)5231 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5232 uint32_t reg_value)
5233 {
5234 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5235 pci_reg, reg_value, 4);
5236 return;
5237 }
5238
5239 int
qlnx_pci_find_capability(void * ecore_dev,int cap)5240 qlnx_pci_find_capability(void *ecore_dev, int cap)
5241 {
5242 int reg;
5243 qlnx_host_t *ha;
5244
5245 ha = ecore_dev;
5246
5247 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5248 return reg;
5249 else {
5250 QL_DPRINT1(ha, "failed\n");
5251 return 0;
5252 }
5253 }
5254
5255 int
qlnx_pci_find_ext_capability(void * ecore_dev,int ext_cap)5256 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5257 {
5258 int reg;
5259 qlnx_host_t *ha;
5260
5261 ha = ecore_dev;
5262
5263 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5264 return reg;
5265 else {
5266 QL_DPRINT1(ha, "failed\n");
5267 return 0;
5268 }
5269 }
5270
5271 uint32_t
qlnx_reg_rd32(void * hwfn,uint32_t reg_addr)5272 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5273 {
5274 uint32_t data32;
5275 struct ecore_hwfn *p_hwfn;
5276
5277 p_hwfn = hwfn;
5278
5279 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5280 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5281
5282 return (data32);
5283 }
5284
5285 void
qlnx_reg_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5286 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5287 {
5288 struct ecore_hwfn *p_hwfn = hwfn;
5289
5290 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5291 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5292
5293 return;
5294 }
5295
5296 void
qlnx_reg_wr16(void * hwfn,uint32_t reg_addr,uint16_t value)5297 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5298 {
5299 struct ecore_hwfn *p_hwfn = hwfn;
5300
5301 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5302 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5303 return;
5304 }
5305
5306 void
qlnx_dbell_wr32_db(void * hwfn,void * reg_addr,uint32_t value)5307 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5308 {
5309 struct ecore_dev *cdev;
5310 struct ecore_hwfn *p_hwfn;
5311 uint32_t offset;
5312
5313 p_hwfn = hwfn;
5314
5315 cdev = p_hwfn->p_dev;
5316
5317 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5318 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5319
5320 return;
5321 }
5322
5323 void
qlnx_dbell_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5324 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5325 {
5326 struct ecore_hwfn *p_hwfn = hwfn;
5327
5328 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5329 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5330
5331 return;
5332 }
5333
5334 uint32_t
qlnx_direct_reg_rd32(void * p_hwfn,uint32_t * reg_addr)5335 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5336 {
5337 uint32_t data32;
5338 bus_size_t offset;
5339 struct ecore_dev *cdev;
5340
5341 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5342 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5343
5344 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5345
5346 return (data32);
5347 }
5348
5349 void
qlnx_direct_reg_wr32(void * p_hwfn,void * reg_addr,uint32_t value)5350 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5351 {
5352 bus_size_t offset;
5353 struct ecore_dev *cdev;
5354
5355 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5356 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5357
5358 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5359
5360 return;
5361 }
5362
5363 void
qlnx_direct_reg_wr64(void * p_hwfn,void * reg_addr,uint64_t value)5364 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5365 {
5366 bus_size_t offset;
5367 struct ecore_dev *cdev;
5368
5369 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5370 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5371
5372 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5373 return;
5374 }
5375
5376 void *
qlnx_zalloc(uint32_t size)5377 qlnx_zalloc(uint32_t size)
5378 {
5379 caddr_t va;
5380
5381 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5382 bzero(va, size);
5383 return ((void *)va);
5384 }
5385
5386 void
qlnx_barrier(void * p_dev)5387 qlnx_barrier(void *p_dev)
5388 {
5389 qlnx_host_t *ha;
5390
5391 ha = ((struct ecore_dev *) p_dev)->ha;
5392 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5393 }
5394
5395 void
qlnx_link_update(void * p_hwfn)5396 qlnx_link_update(void *p_hwfn)
5397 {
5398 qlnx_host_t *ha;
5399 int prev_link_state;
5400
5401 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5402
5403 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5404
5405 prev_link_state = ha->link_up;
5406 ha->link_up = ha->if_link.link_up;
5407
5408 if (prev_link_state != ha->link_up) {
5409 if (ha->link_up) {
5410 if_link_state_change(ha->ifp, LINK_STATE_UP);
5411 } else {
5412 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5413 }
5414 }
5415 #ifndef QLNX_VF
5416 #ifdef CONFIG_ECORE_SRIOV
5417
5418 if (qlnx_vf_device(ha) != 0) {
5419 if (ha->sriov_initialized)
5420 qlnx_inform_vf_link_state(p_hwfn, ha);
5421 }
5422
5423 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5424 #endif /* #ifdef QLNX_VF */
5425
5426 return;
5427 }
5428
5429 static void
__qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn * p_hwfn,struct ecore_vf_acquire_sw_info * p_sw_info)5430 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5431 struct ecore_vf_acquire_sw_info *p_sw_info)
5432 {
5433 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5434 (QLNX_VERSION_MINOR << 16) |
5435 QLNX_VERSION_BUILD;
5436 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5437
5438 return;
5439 }
5440
5441 void
qlnx_osal_vf_fill_acquire_resc_req(void * p_hwfn,void * p_resc_req,void * p_sw_info)5442 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5443 void *p_sw_info)
5444 {
5445 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5446
5447 return;
5448 }
5449
5450 void
qlnx_fill_link(qlnx_host_t * ha,struct ecore_hwfn * hwfn,struct qlnx_link_output * if_link)5451 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5452 struct qlnx_link_output *if_link)
5453 {
5454 struct ecore_mcp_link_params link_params;
5455 struct ecore_mcp_link_state link_state;
5456 uint8_t p_change;
5457 struct ecore_ptt *p_ptt = NULL;
5458
5459 memset(if_link, 0, sizeof(*if_link));
5460 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5461 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5462
5463 ha = (qlnx_host_t *)hwfn->p_dev;
5464
5465 /* Prepare source inputs */
5466 /* we only deal with physical functions */
5467 if (qlnx_vf_device(ha) != 0) {
5468 p_ptt = ecore_ptt_acquire(hwfn);
5469
5470 if (p_ptt == NULL) {
5471 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5472 return;
5473 }
5474
5475 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5476 ecore_ptt_release(hwfn, p_ptt);
5477
5478 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5479 sizeof(link_params));
5480 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5481 sizeof(link_state));
5482 } else {
5483 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5484 ecore_vf_read_bulletin(hwfn, &p_change);
5485 ecore_vf_get_link_params(hwfn, &link_params);
5486 ecore_vf_get_link_state(hwfn, &link_state);
5487 }
5488
5489 /* Set the link parameters to pass to protocol driver */
5490 if (link_state.link_up) {
5491 if_link->link_up = true;
5492 if_link->speed = link_state.speed;
5493 }
5494
5495 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5496
5497 if (link_params.speed.autoneg)
5498 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5499
5500 if (link_params.pause.autoneg ||
5501 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5502 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5503
5504 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5505 link_params.pause.forced_tx)
5506 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5507
5508 if (link_params.speed.advertised_speeds &
5509 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5510 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5511 QLNX_LINK_CAP_1000baseT_Full;
5512
5513 if (link_params.speed.advertised_speeds &
5514 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5515 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5516
5517 if (link_params.speed.advertised_speeds &
5518 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5519 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5520
5521 if (link_params.speed.advertised_speeds &
5522 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5523 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5524
5525 if (link_params.speed.advertised_speeds &
5526 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5527 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5528
5529 if (link_params.speed.advertised_speeds &
5530 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5531 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5532
5533 if_link->advertised_caps = if_link->supported_caps;
5534
5535 if_link->autoneg = link_params.speed.autoneg;
5536 if_link->duplex = QLNX_LINK_DUPLEX;
5537
5538 /* Link partner capabilities */
5539
5540 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5541 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5542
5543 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5544 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5545
5546 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5547 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5548
5549 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5550 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5551
5552 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5553 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5554
5555 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5556 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5557
5558 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5559 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5560
5561 if (link_state.an_complete)
5562 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5563
5564 if (link_state.partner_adv_pause)
5565 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5566
5567 if ((link_state.partner_adv_pause ==
5568 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5569 (link_state.partner_adv_pause ==
5570 ECORE_LINK_PARTNER_BOTH_PAUSE))
5571 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5572
5573 return;
5574 }
5575
5576 void
qlnx_schedule_recovery(void * p_hwfn)5577 qlnx_schedule_recovery(void *p_hwfn)
5578 {
5579 qlnx_host_t *ha;
5580
5581 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5582
5583 if (qlnx_vf_device(ha) != 0) {
5584 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5585 }
5586
5587 return;
5588 }
5589
5590 static int
qlnx_nic_setup(struct ecore_dev * cdev,struct ecore_pf_params * func_params)5591 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5592 {
5593 int rc, i;
5594
5595 for (i = 0; i < cdev->num_hwfns; i++) {
5596 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5597 p_hwfn->pf_params = *func_params;
5598
5599 #ifdef QLNX_ENABLE_IWARP
5600 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5601 p_hwfn->using_ll2 = true;
5602 }
5603 #endif /* #ifdef QLNX_ENABLE_IWARP */
5604 }
5605
5606 rc = ecore_resc_alloc(cdev);
5607 if (rc)
5608 goto qlnx_nic_setup_exit;
5609
5610 ecore_resc_setup(cdev);
5611
5612 qlnx_nic_setup_exit:
5613
5614 return rc;
5615 }
5616
5617 static int
qlnx_nic_start(struct ecore_dev * cdev)5618 qlnx_nic_start(struct ecore_dev *cdev)
5619 {
5620 int rc;
5621 struct ecore_hw_init_params params;
5622
5623 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5624
5625 params.p_tunn = NULL;
5626 params.b_hw_start = true;
5627 params.int_mode = cdev->int_mode;
5628 params.allow_npar_tx_switch = true;
5629 params.bin_fw_data = NULL;
5630
5631 rc = ecore_hw_init(cdev, ¶ms);
5632 if (rc) {
5633 ecore_resc_free(cdev);
5634 return rc;
5635 }
5636
5637 return 0;
5638 }
5639
5640 static int
qlnx_slowpath_start(qlnx_host_t * ha)5641 qlnx_slowpath_start(qlnx_host_t *ha)
5642 {
5643 struct ecore_dev *cdev;
5644 struct ecore_pf_params pf_params;
5645 int rc;
5646
5647 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5648 pf_params.eth_pf_params.num_cons =
5649 (ha->num_rss) * (ha->num_tc + 1);
5650
5651 #ifdef QLNX_ENABLE_IWARP
5652 if (qlnx_vf_device(ha) != 0) {
5653 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5654 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5655 pf_params.rdma_pf_params.num_qps = 1024;
5656 pf_params.rdma_pf_params.num_srqs = 1024;
5657 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5658 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5659 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5660 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5661 pf_params.rdma_pf_params.num_qps = 8192;
5662 pf_params.rdma_pf_params.num_srqs = 8192;
5663 //pf_params.rdma_pf_params.min_dpis = 0;
5664 pf_params.rdma_pf_params.min_dpis = 8;
5665 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5666 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5667 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5668 }
5669 }
5670 #endif /* #ifdef QLNX_ENABLE_IWARP */
5671
5672 cdev = &ha->cdev;
5673
5674 rc = qlnx_nic_setup(cdev, &pf_params);
5675 if (rc)
5676 goto qlnx_slowpath_start_exit;
5677
5678 cdev->int_mode = ECORE_INT_MODE_MSIX;
5679 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5680
5681 #ifdef QLNX_MAX_COALESCE
5682 cdev->rx_coalesce_usecs = 255;
5683 cdev->tx_coalesce_usecs = 255;
5684 #endif
5685
5686 rc = qlnx_nic_start(cdev);
5687
5688 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5689 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5690
5691 #ifdef QLNX_USER_LLDP
5692 (void)qlnx_set_lldp_tlvx(ha, NULL);
5693 #endif /* #ifdef QLNX_USER_LLDP */
5694
5695 qlnx_slowpath_start_exit:
5696
5697 return (rc);
5698 }
5699
5700 static int
qlnx_slowpath_stop(qlnx_host_t * ha)5701 qlnx_slowpath_stop(qlnx_host_t *ha)
5702 {
5703 struct ecore_dev *cdev;
5704 device_t dev = ha->pci_dev;
5705 int i;
5706
5707 cdev = &ha->cdev;
5708
5709 ecore_hw_stop(cdev);
5710
5711 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5712 if (ha->sp_handle[i])
5713 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5714 ha->sp_handle[i]);
5715
5716 ha->sp_handle[i] = NULL;
5717
5718 if (ha->sp_irq[i])
5719 (void) bus_release_resource(dev, SYS_RES_IRQ,
5720 ha->sp_irq_rid[i], ha->sp_irq[i]);
5721 ha->sp_irq[i] = NULL;
5722 }
5723
5724 ecore_resc_free(cdev);
5725
5726 return 0;
5727 }
5728
5729 static void
qlnx_set_id(struct ecore_dev * cdev,char name[NAME_SIZE],char ver_str[VER_SIZE])5730 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5731 char ver_str[VER_SIZE])
5732 {
5733 int i;
5734
5735 memcpy(cdev->name, name, NAME_SIZE);
5736
5737 for_each_hwfn(cdev, i) {
5738 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5739 }
5740
5741 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5742
5743 return ;
5744 }
5745
5746 void
qlnx_get_protocol_stats(void * cdev,int proto_type,void * proto_stats)5747 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5748 {
5749 enum ecore_mcp_protocol_type type;
5750 union ecore_mcp_protocol_stats *stats;
5751 struct ecore_eth_stats eth_stats;
5752 qlnx_host_t *ha;
5753
5754 ha = cdev;
5755 stats = proto_stats;
5756 type = proto_type;
5757
5758 switch (type) {
5759 case ECORE_MCP_LAN_STATS:
5760 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5761 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5762 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5763 stats->lan_stats.fcs_err = -1;
5764 break;
5765
5766 default:
5767 ha->err_get_proto_invalid_type++;
5768
5769 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5770 break;
5771 }
5772 return;
5773 }
5774
5775 static int
qlnx_get_mfw_version(qlnx_host_t * ha,uint32_t * mfw_ver)5776 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5777 {
5778 struct ecore_hwfn *p_hwfn;
5779 struct ecore_ptt *p_ptt;
5780
5781 p_hwfn = &ha->cdev.hwfns[0];
5782 p_ptt = ecore_ptt_acquire(p_hwfn);
5783
5784 if (p_ptt == NULL) {
5785 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5786 return (-1);
5787 }
5788 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5789
5790 ecore_ptt_release(p_hwfn, p_ptt);
5791
5792 return (0);
5793 }
5794
5795 static int
qlnx_get_flash_size(qlnx_host_t * ha,uint32_t * flash_size)5796 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5797 {
5798 struct ecore_hwfn *p_hwfn;
5799 struct ecore_ptt *p_ptt;
5800
5801 p_hwfn = &ha->cdev.hwfns[0];
5802 p_ptt = ecore_ptt_acquire(p_hwfn);
5803
5804 if (p_ptt == NULL) {
5805 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5806 return (-1);
5807 }
5808 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5809
5810 ecore_ptt_release(p_hwfn, p_ptt);
5811
5812 return (0);
5813 }
5814
5815 static int
qlnx_alloc_mem_arrays(qlnx_host_t * ha)5816 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5817 {
5818 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5819 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5820 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5821
5822 return 0;
5823 }
5824
5825 static void
qlnx_init_fp(qlnx_host_t * ha)5826 qlnx_init_fp(qlnx_host_t *ha)
5827 {
5828 int rss_id, txq_array_index, tc;
5829
5830 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5831 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5832
5833 fp->rss_id = rss_id;
5834 fp->edev = ha;
5835 fp->sb_info = &ha->sb_array[rss_id];
5836 fp->rxq = &ha->rxq_array[rss_id];
5837 fp->rxq->rxq_id = rss_id;
5838
5839 for (tc = 0; tc < ha->num_tc; tc++) {
5840 txq_array_index = tc * ha->num_rss + rss_id;
5841 fp->txq[tc] = &ha->txq_array[txq_array_index];
5842 fp->txq[tc]->index = txq_array_index;
5843 }
5844
5845 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5846 rss_id);
5847
5848 fp->tx_ring_full = 0;
5849
5850 /* reset all the statistics counters */
5851
5852 fp->tx_pkts_processed = 0;
5853 fp->tx_pkts_freed = 0;
5854 fp->tx_pkts_transmitted = 0;
5855 fp->tx_pkts_completed = 0;
5856
5857 #ifdef QLNX_TRACE_PERF_DATA
5858 fp->tx_pkts_trans_ctx = 0;
5859 fp->tx_pkts_compl_ctx = 0;
5860 fp->tx_pkts_trans_fp = 0;
5861 fp->tx_pkts_compl_fp = 0;
5862 fp->tx_pkts_compl_intr = 0;
5863 #endif
5864 fp->tx_lso_wnd_min_len = 0;
5865 fp->tx_defrag = 0;
5866 fp->tx_nsegs_gt_elem_left = 0;
5867 fp->tx_tso_max_nsegs = 0;
5868 fp->tx_tso_min_nsegs = 0;
5869 fp->err_tx_nsegs_gt_elem_left = 0;
5870 fp->err_tx_dmamap_create = 0;
5871 fp->err_tx_defrag_dmamap_load = 0;
5872 fp->err_tx_non_tso_max_seg = 0;
5873 fp->err_tx_dmamap_load = 0;
5874 fp->err_tx_defrag = 0;
5875 fp->err_tx_free_pkt_null = 0;
5876 fp->err_tx_cons_idx_conflict = 0;
5877
5878 fp->rx_pkts = 0;
5879 fp->err_m_getcl = 0;
5880 fp->err_m_getjcl = 0;
5881 }
5882 return;
5883 }
5884
5885 void
qlnx_free_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info)5886 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5887 {
5888 struct ecore_dev *cdev;
5889
5890 cdev = &ha->cdev;
5891
5892 if (sb_info->sb_virt) {
5893 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5894 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5895 sb_info->sb_virt = NULL;
5896 }
5897 }
5898
5899 static int
qlnx_sb_init(struct ecore_dev * cdev,struct ecore_sb_info * sb_info,void * sb_virt_addr,bus_addr_t sb_phy_addr,u16 sb_id)5900 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5901 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5902 {
5903 struct ecore_hwfn *p_hwfn;
5904 int hwfn_index, rc;
5905 u16 rel_sb_id;
5906
5907 hwfn_index = sb_id % cdev->num_hwfns;
5908 p_hwfn = &cdev->hwfns[hwfn_index];
5909 rel_sb_id = sb_id / cdev->num_hwfns;
5910
5911 QL_DPRINT2(((qlnx_host_t *)cdev),
5912 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5913 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5914 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5915 sb_virt_addr, (void *)sb_phy_addr);
5916
5917 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5918 sb_virt_addr, sb_phy_addr, rel_sb_id);
5919
5920 return rc;
5921 }
5922
5923 /* This function allocates fast-path status block memory */
5924 int
qlnx_alloc_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info,u16 sb_id)5925 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5926 {
5927 struct status_block_e4 *sb_virt;
5928 bus_addr_t sb_phys;
5929 int rc;
5930 uint32_t size;
5931 struct ecore_dev *cdev;
5932
5933 cdev = &ha->cdev;
5934
5935 size = sizeof(*sb_virt);
5936 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5937
5938 if (!sb_virt) {
5939 QL_DPRINT1(ha, "Status block allocation failed\n");
5940 return -ENOMEM;
5941 }
5942
5943 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5944 if (rc) {
5945 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5946 }
5947
5948 return rc;
5949 }
5950
5951 static void
qlnx_free_rx_buffers(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5952 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5953 {
5954 int i;
5955 struct sw_rx_data *rx_buf;
5956
5957 for (i = 0; i < rxq->num_rx_buffers; i++) {
5958 rx_buf = &rxq->sw_rx_ring[i];
5959
5960 if (rx_buf->data != NULL) {
5961 if (rx_buf->map != NULL) {
5962 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5963 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5964 rx_buf->map = NULL;
5965 }
5966 m_freem(rx_buf->data);
5967 rx_buf->data = NULL;
5968 }
5969 }
5970 return;
5971 }
5972
5973 static void
qlnx_free_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5974 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5975 {
5976 struct ecore_dev *cdev;
5977 int i;
5978
5979 cdev = &ha->cdev;
5980
5981 qlnx_free_rx_buffers(ha, rxq);
5982
5983 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5984 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5985 if (rxq->tpa_info[i].mpf != NULL)
5986 m_freem(rxq->tpa_info[i].mpf);
5987 }
5988
5989 bzero((void *)&rxq->sw_rx_ring[0],
5990 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5991
5992 /* Free the real RQ ring used by FW */
5993 if (rxq->rx_bd_ring.p_virt_addr) {
5994 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5995 rxq->rx_bd_ring.p_virt_addr = NULL;
5996 }
5997
5998 /* Free the real completion ring used by FW */
5999 if (rxq->rx_comp_ring.p_virt_addr &&
6000 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6001 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6002 rxq->rx_comp_ring.p_virt_addr = NULL;
6003 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6004 }
6005
6006 #ifdef QLNX_SOFT_LRO
6007 {
6008 struct lro_ctrl *lro;
6009
6010 lro = &rxq->lro;
6011 tcp_lro_free(lro);
6012 }
6013 #endif /* #ifdef QLNX_SOFT_LRO */
6014
6015 return;
6016 }
6017
6018 static int
qlnx_alloc_rx_buffer(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6019 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6020 {
6021 register struct mbuf *mp;
6022 uint16_t rx_buf_size;
6023 struct sw_rx_data *sw_rx_data;
6024 struct eth_rx_bd *rx_bd;
6025 dma_addr_t dma_addr;
6026 bus_dmamap_t map;
6027 bus_dma_segment_t segs[1];
6028 int nsegs;
6029 int ret;
6030
6031 rx_buf_size = rxq->rx_buf_size;
6032
6033 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6034
6035 if (mp == NULL) {
6036 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6037 return -ENOMEM;
6038 }
6039
6040 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6041
6042 map = (bus_dmamap_t)0;
6043
6044 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6045 BUS_DMA_NOWAIT);
6046 dma_addr = segs[0].ds_addr;
6047
6048 if (ret || !dma_addr || (nsegs != 1)) {
6049 m_freem(mp);
6050 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6051 ret, (long long unsigned int)dma_addr, nsegs);
6052 return -ENOMEM;
6053 }
6054
6055 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6056 sw_rx_data->data = mp;
6057 sw_rx_data->dma_addr = dma_addr;
6058 sw_rx_data->map = map;
6059
6060 /* Advance PROD and get BD pointer */
6061 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6062 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6063 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6064 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6065
6066 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6067
6068 return 0;
6069 }
6070
6071 static int
qlnx_alloc_tpa_mbuf(qlnx_host_t * ha,uint16_t rx_buf_size,struct qlnx_agg_info * tpa)6072 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6073 struct qlnx_agg_info *tpa)
6074 {
6075 struct mbuf *mp;
6076 dma_addr_t dma_addr;
6077 bus_dmamap_t map;
6078 bus_dma_segment_t segs[1];
6079 int nsegs;
6080 int ret;
6081 struct sw_rx_data *rx_buf;
6082
6083 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6084
6085 if (mp == NULL) {
6086 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6087 return -ENOMEM;
6088 }
6089
6090 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6091
6092 map = (bus_dmamap_t)0;
6093
6094 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6095 BUS_DMA_NOWAIT);
6096 dma_addr = segs[0].ds_addr;
6097
6098 if (ret || !dma_addr || (nsegs != 1)) {
6099 m_freem(mp);
6100 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6101 ret, (long long unsigned int)dma_addr, nsegs);
6102 return -ENOMEM;
6103 }
6104
6105 rx_buf = &tpa->rx_buf;
6106
6107 memset(rx_buf, 0, sizeof (struct sw_rx_data));
6108
6109 rx_buf->data = mp;
6110 rx_buf->dma_addr = dma_addr;
6111 rx_buf->map = map;
6112
6113 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6114
6115 return (0);
6116 }
6117
6118 static void
qlnx_free_tpa_mbuf(qlnx_host_t * ha,struct qlnx_agg_info * tpa)6119 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6120 {
6121 struct sw_rx_data *rx_buf;
6122
6123 rx_buf = &tpa->rx_buf;
6124
6125 if (rx_buf->data != NULL) {
6126 if (rx_buf->map != NULL) {
6127 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6128 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6129 rx_buf->map = NULL;
6130 }
6131 m_freem(rx_buf->data);
6132 rx_buf->data = NULL;
6133 }
6134 return;
6135 }
6136
6137 /* This function allocates all memory needed per Rx queue */
6138 static int
qlnx_alloc_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6139 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6140 {
6141 int i, rc, num_allocated;
6142 struct ecore_dev *cdev;
6143
6144 cdev = &ha->cdev;
6145
6146 rxq->num_rx_buffers = RX_RING_SIZE;
6147
6148 rxq->rx_buf_size = ha->rx_buf_size;
6149
6150 /* Allocate the parallel driver ring for Rx buffers */
6151 bzero((void *)&rxq->sw_rx_ring[0],
6152 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6153
6154 /* Allocate FW Rx ring */
6155
6156 rc = ecore_chain_alloc(cdev,
6157 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6158 ECORE_CHAIN_MODE_NEXT_PTR,
6159 ECORE_CHAIN_CNT_TYPE_U16,
6160 RX_RING_SIZE,
6161 sizeof(struct eth_rx_bd),
6162 &rxq->rx_bd_ring, NULL);
6163
6164 if (rc)
6165 goto err;
6166
6167 /* Allocate FW completion ring */
6168 rc = ecore_chain_alloc(cdev,
6169 ECORE_CHAIN_USE_TO_CONSUME,
6170 ECORE_CHAIN_MODE_PBL,
6171 ECORE_CHAIN_CNT_TYPE_U16,
6172 RX_RING_SIZE,
6173 sizeof(union eth_rx_cqe),
6174 &rxq->rx_comp_ring, NULL);
6175
6176 if (rc)
6177 goto err;
6178
6179 /* Allocate buffers for the Rx ring */
6180
6181 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6182 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6183 &rxq->tpa_info[i]);
6184 if (rc)
6185 break;
6186 }
6187
6188 for (i = 0; i < rxq->num_rx_buffers; i++) {
6189 rc = qlnx_alloc_rx_buffer(ha, rxq);
6190 if (rc)
6191 break;
6192 }
6193 num_allocated = i;
6194 if (!num_allocated) {
6195 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6196 goto err;
6197 } else if (num_allocated < rxq->num_rx_buffers) {
6198 QL_DPRINT1(ha, "Allocated less buffers than"
6199 " desired (%d allocated)\n", num_allocated);
6200 }
6201
6202 #ifdef QLNX_SOFT_LRO
6203
6204 {
6205 struct lro_ctrl *lro;
6206
6207 lro = &rxq->lro;
6208
6209 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6210 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6211 rxq->rxq_id);
6212 goto err;
6213 }
6214
6215 lro->ifp = ha->ifp;
6216 }
6217 #endif /* #ifdef QLNX_SOFT_LRO */
6218 return 0;
6219
6220 err:
6221 qlnx_free_mem_rxq(ha, rxq);
6222 return -ENOMEM;
6223 }
6224
6225 static void
qlnx_free_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6226 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6227 struct qlnx_tx_queue *txq)
6228 {
6229 struct ecore_dev *cdev;
6230
6231 cdev = &ha->cdev;
6232
6233 bzero((void *)&txq->sw_tx_ring[0],
6234 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6235
6236 /* Free the real RQ ring used by FW */
6237 if (txq->tx_pbl.p_virt_addr) {
6238 ecore_chain_free(cdev, &txq->tx_pbl);
6239 txq->tx_pbl.p_virt_addr = NULL;
6240 }
6241 return;
6242 }
6243
6244 /* This function allocates all memory needed per Tx queue */
6245 static int
qlnx_alloc_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6246 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6247 struct qlnx_tx_queue *txq)
6248 {
6249 int ret = ECORE_SUCCESS;
6250 union eth_tx_bd_types *p_virt;
6251 struct ecore_dev *cdev;
6252
6253 cdev = &ha->cdev;
6254
6255 bzero((void *)&txq->sw_tx_ring[0],
6256 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6257
6258 /* Allocate the real Tx ring to be used by FW */
6259 ret = ecore_chain_alloc(cdev,
6260 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6261 ECORE_CHAIN_MODE_PBL,
6262 ECORE_CHAIN_CNT_TYPE_U16,
6263 TX_RING_SIZE,
6264 sizeof(*p_virt),
6265 &txq->tx_pbl, NULL);
6266
6267 if (ret != ECORE_SUCCESS) {
6268 goto err;
6269 }
6270
6271 txq->num_tx_buffers = TX_RING_SIZE;
6272
6273 return 0;
6274
6275 err:
6276 qlnx_free_mem_txq(ha, fp, txq);
6277 return -ENOMEM;
6278 }
6279
6280 static void
qlnx_free_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6281 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6282 {
6283 struct mbuf *mp;
6284 if_t ifp = ha->ifp;
6285
6286 if (mtx_initialized(&fp->tx_mtx)) {
6287 if (fp->tx_br != NULL) {
6288 mtx_lock(&fp->tx_mtx);
6289
6290 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6291 fp->tx_pkts_freed++;
6292 m_freem(mp);
6293 }
6294
6295 mtx_unlock(&fp->tx_mtx);
6296
6297 buf_ring_free(fp->tx_br, M_DEVBUF);
6298 fp->tx_br = NULL;
6299 }
6300 mtx_destroy(&fp->tx_mtx);
6301 }
6302 return;
6303 }
6304
6305 static void
qlnx_free_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6306 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6307 {
6308 int tc;
6309
6310 qlnx_free_mem_sb(ha, fp->sb_info);
6311
6312 qlnx_free_mem_rxq(ha, fp->rxq);
6313
6314 for (tc = 0; tc < ha->num_tc; tc++)
6315 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6316
6317 return;
6318 }
6319
6320 static int
qlnx_alloc_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6321 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6322 {
6323 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6324 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6325
6326 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6327
6328 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6329 M_NOWAIT, &fp->tx_mtx);
6330 if (fp->tx_br == NULL) {
6331 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6332 ha->dev_unit, fp->rss_id);
6333 return -ENOMEM;
6334 }
6335 return 0;
6336 }
6337
6338 static int
qlnx_alloc_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6339 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6340 {
6341 int rc, tc;
6342
6343 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6344 if (rc)
6345 goto err;
6346
6347 if (ha->rx_jumbo_buf_eq_mtu) {
6348 if (ha->max_frame_size <= MCLBYTES)
6349 ha->rx_buf_size = MCLBYTES;
6350 else if (ha->max_frame_size <= MJUMPAGESIZE)
6351 ha->rx_buf_size = MJUMPAGESIZE;
6352 else if (ha->max_frame_size <= MJUM9BYTES)
6353 ha->rx_buf_size = MJUM9BYTES;
6354 else if (ha->max_frame_size <= MJUM16BYTES)
6355 ha->rx_buf_size = MJUM16BYTES;
6356 } else {
6357 if (ha->max_frame_size <= MCLBYTES)
6358 ha->rx_buf_size = MCLBYTES;
6359 else
6360 ha->rx_buf_size = MJUMPAGESIZE;
6361 }
6362
6363 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6364 if (rc)
6365 goto err;
6366
6367 for (tc = 0; tc < ha->num_tc; tc++) {
6368 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6369 if (rc)
6370 goto err;
6371 }
6372
6373 return 0;
6374
6375 err:
6376 qlnx_free_mem_fp(ha, fp);
6377 return -ENOMEM;
6378 }
6379
6380 static void
qlnx_free_mem_load(qlnx_host_t * ha)6381 qlnx_free_mem_load(qlnx_host_t *ha)
6382 {
6383 int i;
6384
6385 for (i = 0; i < ha->num_rss; i++) {
6386 struct qlnx_fastpath *fp = &ha->fp_array[i];
6387
6388 qlnx_free_mem_fp(ha, fp);
6389 }
6390 return;
6391 }
6392
6393 static int
qlnx_alloc_mem_load(qlnx_host_t * ha)6394 qlnx_alloc_mem_load(qlnx_host_t *ha)
6395 {
6396 int rc = 0, rss_id;
6397
6398 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6399 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6400
6401 rc = qlnx_alloc_mem_fp(ha, fp);
6402 if (rc)
6403 break;
6404 }
6405 return (rc);
6406 }
6407
6408 static int
qlnx_start_vport(struct ecore_dev * cdev,u8 vport_id,u16 mtu,u8 drop_ttl0_flg,u8 inner_vlan_removal_en_flg,u8 tx_switching,u8 hw_lro_enable)6409 qlnx_start_vport(struct ecore_dev *cdev,
6410 u8 vport_id,
6411 u16 mtu,
6412 u8 drop_ttl0_flg,
6413 u8 inner_vlan_removal_en_flg,
6414 u8 tx_switching,
6415 u8 hw_lro_enable)
6416 {
6417 int rc, i;
6418 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6419 qlnx_host_t *ha __unused;
6420
6421 ha = (qlnx_host_t *)cdev;
6422
6423 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6424 vport_start_params.tx_switching = 0;
6425 vport_start_params.handle_ptp_pkts = 0;
6426 vport_start_params.only_untagged = 0;
6427 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6428
6429 vport_start_params.tpa_mode =
6430 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6431 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6432
6433 vport_start_params.vport_id = vport_id;
6434 vport_start_params.mtu = mtu;
6435
6436 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6437
6438 for_each_hwfn(cdev, i) {
6439 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6440
6441 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6442 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6443
6444 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6445
6446 if (rc) {
6447 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6448 " with MTU %d\n" , vport_id, mtu);
6449 return -ENOMEM;
6450 }
6451
6452 ecore_hw_start_fastpath(p_hwfn);
6453
6454 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6455 vport_id, mtu);
6456 }
6457 return 0;
6458 }
6459
6460 static int
qlnx_update_vport(struct ecore_dev * cdev,struct qlnx_update_vport_params * params)6461 qlnx_update_vport(struct ecore_dev *cdev,
6462 struct qlnx_update_vport_params *params)
6463 {
6464 struct ecore_sp_vport_update_params sp_params;
6465 int rc, i, j, fp_index;
6466 struct ecore_hwfn *p_hwfn;
6467 struct ecore_rss_params *rss;
6468 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6469 struct qlnx_fastpath *fp;
6470
6471 memset(&sp_params, 0, sizeof(sp_params));
6472 /* Translate protocol params into sp params */
6473 sp_params.vport_id = params->vport_id;
6474
6475 sp_params.update_vport_active_rx_flg =
6476 params->update_vport_active_rx_flg;
6477 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6478
6479 sp_params.update_vport_active_tx_flg =
6480 params->update_vport_active_tx_flg;
6481 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6482
6483 sp_params.update_inner_vlan_removal_flg =
6484 params->update_inner_vlan_removal_flg;
6485 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6486
6487 sp_params.sge_tpa_params = params->sge_tpa_params;
6488
6489 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6490 * We need to re-fix the rss values per engine for CMT.
6491 */
6492 if (params->rss_params->update_rss_config)
6493 sp_params.rss_params = params->rss_params;
6494 else
6495 sp_params.rss_params = NULL;
6496
6497 for_each_hwfn(cdev, i) {
6498 p_hwfn = &cdev->hwfns[i];
6499
6500 if ((cdev->num_hwfns > 1) &&
6501 params->rss_params->update_rss_config &&
6502 params->rss_params->rss_enable) {
6503 rss = params->rss_params;
6504
6505 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6506 fp_index = ((cdev->num_hwfns * j) + i) %
6507 ha->num_rss;
6508
6509 fp = &ha->fp_array[fp_index];
6510 rss->rss_ind_table[j] = fp->rxq->handle;
6511 }
6512
6513 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6514 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6515 rss->rss_ind_table[j],
6516 rss->rss_ind_table[j+1],
6517 rss->rss_ind_table[j+2],
6518 rss->rss_ind_table[j+3],
6519 rss->rss_ind_table[j+4],
6520 rss->rss_ind_table[j+5],
6521 rss->rss_ind_table[j+6],
6522 rss->rss_ind_table[j+7]);
6523 j += 8;
6524 }
6525 }
6526
6527 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6528
6529 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6530
6531 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6532 ECORE_SPQ_MODE_EBLOCK, NULL);
6533 if (rc) {
6534 QL_DPRINT1(ha, "Failed to update VPORT\n");
6535 return rc;
6536 }
6537
6538 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6539 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6540 params->vport_id, params->vport_active_tx_flg,
6541 params->vport_active_rx_flg,
6542 params->update_vport_active_tx_flg,
6543 params->update_vport_active_rx_flg);
6544 }
6545
6546 return 0;
6547 }
6548
6549 static void
qlnx_reuse_rx_data(struct qlnx_rx_queue * rxq)6550 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6551 {
6552 struct eth_rx_bd *rx_bd_cons =
6553 ecore_chain_consume(&rxq->rx_bd_ring);
6554 struct eth_rx_bd *rx_bd_prod =
6555 ecore_chain_produce(&rxq->rx_bd_ring);
6556 struct sw_rx_data *sw_rx_data_cons =
6557 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6558 struct sw_rx_data *sw_rx_data_prod =
6559 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6560
6561 sw_rx_data_prod->data = sw_rx_data_cons->data;
6562 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6563
6564 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6565 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6566
6567 return;
6568 }
6569
6570 static void
qlnx_update_rx_prod(struct ecore_hwfn * p_hwfn,struct qlnx_rx_queue * rxq)6571 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6572 {
6573
6574 uint16_t bd_prod;
6575 uint16_t cqe_prod;
6576 union {
6577 struct eth_rx_prod_data rx_prod_data;
6578 uint32_t data32;
6579 } rx_prods;
6580
6581 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6582 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6583
6584 /* Update producers */
6585 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6586 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6587
6588 /* Make sure that the BD and SGE data is updated before updating the
6589 * producers since FW might read the BD/SGE right after the producer
6590 * is updated.
6591 */
6592 wmb();
6593
6594 #ifdef ECORE_CONFIG_DIRECT_HWFN
6595 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6596 sizeof(rx_prods), &rx_prods.data32);
6597 #else
6598 internal_ram_wr(rxq->hw_rxq_prod_addr,
6599 sizeof(rx_prods), &rx_prods.data32);
6600 #endif
6601
6602 /* mmiowb is needed to synchronize doorbell writes from more than one
6603 * processor. It guarantees that the write arrives to the device before
6604 * the napi lock is released and another qlnx_poll is called (possibly
6605 * on another CPU). Without this barrier, the next doorbell can bypass
6606 * this doorbell. This is applicable to IA64/Altix systems.
6607 */
6608 wmb();
6609
6610 return;
6611 }
6612
6613 static uint32_t qlnx_hash_key[] = {
6614 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6615 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6616 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6617 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6618 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6619 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6620 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6621 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6622 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6623 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6624
6625 static int
qlnx_start_queues(qlnx_host_t * ha)6626 qlnx_start_queues(qlnx_host_t *ha)
6627 {
6628 int rc, tc, i, vport_id = 0,
6629 drop_ttl0_flg = 1, vlan_removal_en = 1,
6630 tx_switching = 0, hw_lro_enable = 0;
6631 struct ecore_dev *cdev = &ha->cdev;
6632 struct ecore_rss_params *rss_params = &ha->rss_params;
6633 struct qlnx_update_vport_params vport_update_params;
6634 if_t ifp;
6635 struct ecore_hwfn *p_hwfn;
6636 struct ecore_sge_tpa_params tpa_params;
6637 struct ecore_queue_start_common_params qparams;
6638 struct qlnx_fastpath *fp;
6639
6640 ifp = ha->ifp;
6641
6642 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6643
6644 if (!ha->num_rss) {
6645 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6646 " are no Rx queues\n");
6647 return -EINVAL;
6648 }
6649
6650 #ifndef QLNX_SOFT_LRO
6651 hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
6652 #endif /* #ifndef QLNX_SOFT_LRO */
6653
6654 rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
6655 vlan_removal_en, tx_switching, hw_lro_enable);
6656
6657 if (rc) {
6658 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6659 return rc;
6660 }
6661
6662 QL_DPRINT2(ha, "Start vport ramrod passed, "
6663 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6664 vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
6665
6666 for_each_rss(i) {
6667 struct ecore_rxq_start_ret_params rx_ret_params;
6668 struct ecore_txq_start_ret_params tx_ret_params;
6669
6670 fp = &ha->fp_array[i];
6671 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6672
6673 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6674 bzero(&rx_ret_params,
6675 sizeof (struct ecore_rxq_start_ret_params));
6676
6677 qparams.queue_id = i ;
6678 qparams.vport_id = vport_id;
6679 qparams.stats_id = vport_id;
6680 qparams.p_sb = fp->sb_info;
6681 qparams.sb_idx = RX_PI;
6682
6683
6684 rc = ecore_eth_rx_queue_start(p_hwfn,
6685 p_hwfn->hw_info.opaque_fid,
6686 &qparams,
6687 fp->rxq->rx_buf_size, /* bd_max_bytes */
6688 /* bd_chain_phys_addr */
6689 fp->rxq->rx_bd_ring.p_phys_addr,
6690 /* cqe_pbl_addr */
6691 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6692 /* cqe_pbl_size */
6693 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6694 &rx_ret_params);
6695
6696 if (rc) {
6697 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6698 return rc;
6699 }
6700
6701 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6702 fp->rxq->handle = rx_ret_params.p_handle;
6703 fp->rxq->hw_cons_ptr =
6704 &fp->sb_info->sb_virt->pi_array[RX_PI];
6705
6706 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6707
6708 for (tc = 0; tc < ha->num_tc; tc++) {
6709 struct qlnx_tx_queue *txq = fp->txq[tc];
6710
6711 bzero(&qparams,
6712 sizeof(struct ecore_queue_start_common_params));
6713 bzero(&tx_ret_params,
6714 sizeof (struct ecore_txq_start_ret_params));
6715
6716 qparams.queue_id = txq->index / cdev->num_hwfns ;
6717 qparams.vport_id = vport_id;
6718 qparams.stats_id = vport_id;
6719 qparams.p_sb = fp->sb_info;
6720 qparams.sb_idx = TX_PI(tc);
6721
6722 rc = ecore_eth_tx_queue_start(p_hwfn,
6723 p_hwfn->hw_info.opaque_fid,
6724 &qparams, tc,
6725 /* bd_chain_phys_addr */
6726 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6727 ecore_chain_get_page_cnt(&txq->tx_pbl),
6728 &tx_ret_params);
6729
6730 if (rc) {
6731 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6732 txq->index, rc);
6733 return rc;
6734 }
6735
6736 txq->doorbell_addr = tx_ret_params.p_doorbell;
6737 txq->handle = tx_ret_params.p_handle;
6738
6739 txq->hw_cons_ptr =
6740 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6741 SET_FIELD(txq->tx_db.data.params,
6742 ETH_DB_DATA_DEST, DB_DEST_XCM);
6743 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6744 DB_AGG_CMD_SET);
6745 SET_FIELD(txq->tx_db.data.params,
6746 ETH_DB_DATA_AGG_VAL_SEL,
6747 DQ_XCM_ETH_TX_BD_PROD_CMD);
6748
6749 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6750 }
6751 }
6752
6753 /* Fill struct with RSS params */
6754 if (ha->num_rss > 1) {
6755 rss_params->update_rss_config = 1;
6756 rss_params->rss_enable = 1;
6757 rss_params->update_rss_capabilities = 1;
6758 rss_params->update_rss_ind_table = 1;
6759 rss_params->update_rss_key = 1;
6760 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6761 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6762 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6763
6764 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6765 fp = &ha->fp_array[(i % ha->num_rss)];
6766 rss_params->rss_ind_table[i] = fp->rxq->handle;
6767 }
6768
6769 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6770 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6771
6772 } else {
6773 memset(rss_params, 0, sizeof(*rss_params));
6774 }
6775
6776 /* Prepare and send the vport enable */
6777 memset(&vport_update_params, 0, sizeof(vport_update_params));
6778 vport_update_params.vport_id = vport_id;
6779 vport_update_params.update_vport_active_tx_flg = 1;
6780 vport_update_params.vport_active_tx_flg = 1;
6781 vport_update_params.update_vport_active_rx_flg = 1;
6782 vport_update_params.vport_active_rx_flg = 1;
6783 vport_update_params.rss_params = rss_params;
6784 vport_update_params.update_inner_vlan_removal_flg = 1;
6785 vport_update_params.inner_vlan_removal_flg = 1;
6786
6787 if (hw_lro_enable) {
6788 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6789
6790 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6791
6792 tpa_params.update_tpa_en_flg = 1;
6793 tpa_params.tpa_ipv4_en_flg = 1;
6794 tpa_params.tpa_ipv6_en_flg = 1;
6795
6796 tpa_params.update_tpa_param_flg = 1;
6797 tpa_params.tpa_pkt_split_flg = 0;
6798 tpa_params.tpa_hdr_data_split_flg = 0;
6799 tpa_params.tpa_gro_consistent_flg = 0;
6800 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6801 tpa_params.tpa_max_size = (uint16_t)(-1);
6802 tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
6803 tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
6804
6805 vport_update_params.sge_tpa_params = &tpa_params;
6806 }
6807
6808 rc = qlnx_update_vport(cdev, &vport_update_params);
6809 if (rc) {
6810 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6811 return rc;
6812 }
6813
6814 return 0;
6815 }
6816
6817 static int
qlnx_drain_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6818 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6819 struct qlnx_tx_queue *txq)
6820 {
6821 uint16_t hw_bd_cons;
6822 uint16_t ecore_cons_idx;
6823
6824 QL_DPRINT2(ha, "enter\n");
6825
6826 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6827
6828 while (hw_bd_cons !=
6829 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6830 mtx_lock(&fp->tx_mtx);
6831
6832 (void)qlnx_tx_int(ha, fp, txq);
6833
6834 mtx_unlock(&fp->tx_mtx);
6835
6836 qlnx_mdelay(__func__, 2);
6837
6838 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6839 }
6840
6841 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6842
6843 return 0;
6844 }
6845
6846 static int
qlnx_stop_queues(qlnx_host_t * ha)6847 qlnx_stop_queues(qlnx_host_t *ha)
6848 {
6849 struct qlnx_update_vport_params vport_update_params;
6850 struct ecore_dev *cdev;
6851 struct qlnx_fastpath *fp;
6852 int rc, tc, i;
6853
6854 cdev = &ha->cdev;
6855
6856 /* Disable the vport */
6857
6858 memset(&vport_update_params, 0, sizeof(vport_update_params));
6859
6860 vport_update_params.vport_id = 0;
6861 vport_update_params.update_vport_active_tx_flg = 1;
6862 vport_update_params.vport_active_tx_flg = 0;
6863 vport_update_params.update_vport_active_rx_flg = 1;
6864 vport_update_params.vport_active_rx_flg = 0;
6865 vport_update_params.rss_params = &ha->rss_params;
6866 vport_update_params.rss_params->update_rss_config = 0;
6867 vport_update_params.rss_params->rss_enable = 0;
6868 vport_update_params.update_inner_vlan_removal_flg = 0;
6869 vport_update_params.inner_vlan_removal_flg = 0;
6870
6871 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6872
6873 rc = qlnx_update_vport(cdev, &vport_update_params);
6874 if (rc) {
6875 QL_DPRINT1(ha, "Failed to update vport\n");
6876 return rc;
6877 }
6878
6879 /* Flush Tx queues. If needed, request drain from MCP */
6880 for_each_rss(i) {
6881 fp = &ha->fp_array[i];
6882
6883 for (tc = 0; tc < ha->num_tc; tc++) {
6884 struct qlnx_tx_queue *txq = fp->txq[tc];
6885
6886 rc = qlnx_drain_txq(ha, fp, txq);
6887 if (rc)
6888 return rc;
6889 }
6890 }
6891
6892 /* Stop all Queues in reverse order*/
6893 for (i = ha->num_rss - 1; i >= 0; i--) {
6894 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6895
6896 fp = &ha->fp_array[i];
6897
6898 /* Stop the Tx Queue(s)*/
6899 for (tc = 0; tc < ha->num_tc; tc++) {
6900 int tx_queue_id __unused;
6901
6902 tx_queue_id = tc * ha->num_rss + i;
6903 rc = ecore_eth_tx_queue_stop(p_hwfn,
6904 fp->txq[tc]->handle);
6905
6906 if (rc) {
6907 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6908 tx_queue_id);
6909 return rc;
6910 }
6911 }
6912
6913 /* Stop the Rx Queue*/
6914 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6915 false);
6916 if (rc) {
6917 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6918 return rc;
6919 }
6920 }
6921
6922 /* Stop the vport */
6923 for_each_hwfn(cdev, i) {
6924 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6925
6926 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6927
6928 if (rc) {
6929 QL_DPRINT1(ha, "Failed to stop VPORT\n");
6930 return rc;
6931 }
6932 }
6933
6934 return rc;
6935 }
6936
6937 static int
qlnx_set_ucast_rx_mac(qlnx_host_t * ha,enum ecore_filter_opcode opcode,unsigned char mac[ETH_ALEN])6938 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6939 enum ecore_filter_opcode opcode,
6940 unsigned char mac[ETH_ALEN])
6941 {
6942 struct ecore_filter_ucast ucast;
6943 struct ecore_dev *cdev;
6944 int rc;
6945
6946 cdev = &ha->cdev;
6947
6948 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6949
6950 ucast.opcode = opcode;
6951 ucast.type = ECORE_FILTER_MAC;
6952 ucast.is_rx_filter = 1;
6953 ucast.vport_to_add_to = 0;
6954 memcpy(&ucast.mac[0], mac, ETH_ALEN);
6955
6956 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6957
6958 return (rc);
6959 }
6960
6961 static int
qlnx_remove_all_ucast_mac(qlnx_host_t * ha)6962 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6963 {
6964 struct ecore_filter_ucast ucast;
6965 struct ecore_dev *cdev;
6966 int rc;
6967
6968 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6969
6970 ucast.opcode = ECORE_FILTER_REPLACE;
6971 ucast.type = ECORE_FILTER_MAC;
6972 ucast.is_rx_filter = 1;
6973
6974 cdev = &ha->cdev;
6975
6976 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6977
6978 return (rc);
6979 }
6980
6981 static int
qlnx_remove_all_mcast_mac(qlnx_host_t * ha)6982 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6983 {
6984 struct ecore_filter_mcast *mcast;
6985 struct ecore_dev *cdev;
6986 int rc, i;
6987
6988 cdev = &ha->cdev;
6989
6990 mcast = &ha->ecore_mcast;
6991 bzero(mcast, sizeof(struct ecore_filter_mcast));
6992
6993 mcast->opcode = ECORE_FILTER_REMOVE;
6994
6995 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6996 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6997 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6998 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6999 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7000 mcast->num_mc_addrs++;
7001 }
7002 }
7003 mcast = &ha->ecore_mcast;
7004
7005 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7006
7007 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7008 ha->nmcast = 0;
7009
7010 return (rc);
7011 }
7012
7013 static int
qlnx_clean_filters(qlnx_host_t * ha)7014 qlnx_clean_filters(qlnx_host_t *ha)
7015 {
7016 int rc = 0;
7017
7018 /* Remove all unicast macs */
7019 rc = qlnx_remove_all_ucast_mac(ha);
7020 if (rc)
7021 return rc;
7022
7023 /* Remove all multicast macs */
7024 rc = qlnx_remove_all_mcast_mac(ha);
7025 if (rc)
7026 return rc;
7027
7028 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7029
7030 return (rc);
7031 }
7032
7033 static int
qlnx_set_rx_accept_filter(qlnx_host_t * ha,uint8_t filter)7034 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7035 {
7036 struct ecore_filter_accept_flags accept;
7037 int rc = 0;
7038 struct ecore_dev *cdev;
7039
7040 cdev = &ha->cdev;
7041
7042 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7043
7044 accept.update_rx_mode_config = 1;
7045 accept.rx_accept_filter = filter;
7046
7047 accept.update_tx_mode_config = 1;
7048 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7049 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7050
7051 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7052 ECORE_SPQ_MODE_CB, NULL);
7053
7054 return (rc);
7055 }
7056
7057 static int
qlnx_set_rx_mode(qlnx_host_t * ha)7058 qlnx_set_rx_mode(qlnx_host_t *ha)
7059 {
7060 int rc = 0;
7061 uint8_t filter;
7062
7063 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7064 if (rc)
7065 return rc;
7066
7067 rc = qlnx_remove_all_mcast_mac(ha);
7068 if (rc)
7069 return rc;
7070
7071 filter = ECORE_ACCEPT_UCAST_MATCHED |
7072 ECORE_ACCEPT_MCAST_MATCHED |
7073 ECORE_ACCEPT_BCAST;
7074
7075 if (qlnx_vf_device(ha) == 0 || (ha->ifp->if_flags & IFF_PROMISC)) {
7076 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7077 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7078 } else if (ha->ifp->if_flags & IFF_ALLMULTI) {
7079 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7080 }
7081 ha->filter = filter;
7082
7083 rc = qlnx_set_rx_accept_filter(ha, filter);
7084
7085 return (rc);
7086 }
7087
7088 static int
qlnx_set_link(qlnx_host_t * ha,bool link_up)7089 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7090 {
7091 int i, rc = 0;
7092 struct ecore_dev *cdev;
7093 struct ecore_hwfn *hwfn;
7094 struct ecore_ptt *ptt;
7095
7096 if (qlnx_vf_device(ha) == 0)
7097 return (0);
7098
7099 cdev = &ha->cdev;
7100
7101 for_each_hwfn(cdev, i) {
7102 hwfn = &cdev->hwfns[i];
7103
7104 ptt = ecore_ptt_acquire(hwfn);
7105 if (!ptt)
7106 return -EBUSY;
7107
7108 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7109
7110 ecore_ptt_release(hwfn, ptt);
7111
7112 if (rc)
7113 return rc;
7114 }
7115 return (rc);
7116 }
7117
7118 static uint64_t
qlnx_get_counter(if_t ifp,ift_counter cnt)7119 qlnx_get_counter(if_t ifp, ift_counter cnt)
7120 {
7121 qlnx_host_t *ha;
7122 uint64_t count;
7123
7124 ha = (qlnx_host_t *)if_getsoftc(ifp);
7125
7126 switch (cnt) {
7127 case IFCOUNTER_IPACKETS:
7128 count = ha->hw_stats.common.rx_ucast_pkts +
7129 ha->hw_stats.common.rx_mcast_pkts +
7130 ha->hw_stats.common.rx_bcast_pkts;
7131 break;
7132
7133 case IFCOUNTER_IERRORS:
7134 count = ha->hw_stats.common.rx_crc_errors +
7135 ha->hw_stats.common.rx_align_errors +
7136 ha->hw_stats.common.rx_oversize_packets +
7137 ha->hw_stats.common.rx_undersize_packets;
7138 break;
7139
7140 case IFCOUNTER_OPACKETS:
7141 count = ha->hw_stats.common.tx_ucast_pkts +
7142 ha->hw_stats.common.tx_mcast_pkts +
7143 ha->hw_stats.common.tx_bcast_pkts;
7144 break;
7145
7146 case IFCOUNTER_OERRORS:
7147 count = ha->hw_stats.common.tx_err_drop_pkts;
7148 break;
7149
7150 case IFCOUNTER_COLLISIONS:
7151 return (0);
7152
7153 case IFCOUNTER_IBYTES:
7154 count = ha->hw_stats.common.rx_ucast_bytes +
7155 ha->hw_stats.common.rx_mcast_bytes +
7156 ha->hw_stats.common.rx_bcast_bytes;
7157 break;
7158
7159 case IFCOUNTER_OBYTES:
7160 count = ha->hw_stats.common.tx_ucast_bytes +
7161 ha->hw_stats.common.tx_mcast_bytes +
7162 ha->hw_stats.common.tx_bcast_bytes;
7163 break;
7164
7165 case IFCOUNTER_IMCASTS:
7166 count = ha->hw_stats.common.rx_mcast_bytes;
7167 break;
7168
7169 case IFCOUNTER_OMCASTS:
7170 count = ha->hw_stats.common.tx_mcast_bytes;
7171 break;
7172
7173 case IFCOUNTER_IQDROPS:
7174 case IFCOUNTER_OQDROPS:
7175 case IFCOUNTER_NOPROTO:
7176
7177 default:
7178 return (if_get_counter_default(ifp, cnt));
7179 }
7180 return (count);
7181 }
7182
7183 static void
qlnx_timer(void * arg)7184 qlnx_timer(void *arg)
7185 {
7186 qlnx_host_t *ha;
7187
7188 ha = (qlnx_host_t *)arg;
7189
7190 if (ha->error_recovery) {
7191 ha->error_recovery = 0;
7192 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7193 return;
7194 }
7195
7196 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7197
7198 if (ha->storm_stats_gather)
7199 qlnx_sample_storm_stats(ha);
7200
7201 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7202
7203 return;
7204 }
7205
7206 static int
qlnx_load(qlnx_host_t * ha)7207 qlnx_load(qlnx_host_t *ha)
7208 {
7209 int i;
7210 int rc = 0;
7211 device_t dev;
7212
7213 dev = ha->pci_dev;
7214
7215 QL_DPRINT2(ha, "enter\n");
7216
7217 rc = qlnx_alloc_mem_arrays(ha);
7218 if (rc)
7219 goto qlnx_load_exit0;
7220
7221 qlnx_init_fp(ha);
7222
7223 rc = qlnx_alloc_mem_load(ha);
7224 if (rc)
7225 goto qlnx_load_exit1;
7226
7227 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7228 ha->num_rss, ha->num_tc);
7229
7230 for (i = 0; i < ha->num_rss; i++) {
7231 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7232 (INTR_TYPE_NET | INTR_MPSAFE),
7233 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7234 &ha->irq_vec[i].handle))) {
7235 QL_DPRINT1(ha, "could not setup interrupt\n");
7236 goto qlnx_load_exit2;
7237 }
7238
7239 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7240 irq %p handle %p\n", i,
7241 ha->irq_vec[i].irq_rid,
7242 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7243
7244 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7245 }
7246
7247 rc = qlnx_start_queues(ha);
7248 if (rc)
7249 goto qlnx_load_exit2;
7250
7251 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7252
7253 /* Add primary mac and set Rx filters */
7254 rc = qlnx_set_rx_mode(ha);
7255 if (rc)
7256 goto qlnx_load_exit2;
7257
7258 /* Ask for link-up using current configuration */
7259 qlnx_set_link(ha, true);
7260
7261 if (qlnx_vf_device(ha) == 0)
7262 qlnx_link_update(&ha->cdev.hwfns[0]);
7263
7264 ha->state = QLNX_STATE_OPEN;
7265
7266 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7267
7268 if (ha->flags.callout_init)
7269 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7270
7271 goto qlnx_load_exit0;
7272
7273 qlnx_load_exit2:
7274 qlnx_free_mem_load(ha);
7275
7276 qlnx_load_exit1:
7277 ha->num_rss = 0;
7278
7279 qlnx_load_exit0:
7280 QL_DPRINT2(ha, "exit [%d]\n", rc);
7281 return rc;
7282 }
7283
7284 static void
qlnx_drain_soft_lro(qlnx_host_t * ha)7285 qlnx_drain_soft_lro(qlnx_host_t *ha)
7286 {
7287 #ifdef QLNX_SOFT_LRO
7288
7289 if_t ifp;
7290 int i;
7291
7292 ifp = ha->ifp;
7293
7294 if (if_getcapenable(ifp) & IFCAP_LRO) {
7295 for (i = 0; i < ha->num_rss; i++) {
7296 struct qlnx_fastpath *fp = &ha->fp_array[i];
7297 struct lro_ctrl *lro;
7298
7299 lro = &fp->rxq->lro;
7300
7301 tcp_lro_flush_all(lro);
7302 }
7303 }
7304
7305 #endif /* #ifdef QLNX_SOFT_LRO */
7306
7307 return;
7308 }
7309
7310 static void
qlnx_unload(qlnx_host_t * ha)7311 qlnx_unload(qlnx_host_t *ha)
7312 {
7313 struct ecore_dev *cdev;
7314 device_t dev;
7315 int i;
7316
7317 cdev = &ha->cdev;
7318 dev = ha->pci_dev;
7319
7320 QL_DPRINT2(ha, "enter\n");
7321 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7322
7323 if (ha->state == QLNX_STATE_OPEN) {
7324 qlnx_set_link(ha, false);
7325 qlnx_clean_filters(ha);
7326 qlnx_stop_queues(ha);
7327 ecore_hw_stop_fastpath(cdev);
7328
7329 for (i = 0; i < ha->num_rss; i++) {
7330 if (ha->irq_vec[i].handle) {
7331 (void)bus_teardown_intr(dev,
7332 ha->irq_vec[i].irq,
7333 ha->irq_vec[i].handle);
7334 ha->irq_vec[i].handle = NULL;
7335 }
7336 }
7337
7338 qlnx_drain_fp_taskqueues(ha);
7339 qlnx_drain_soft_lro(ha);
7340 qlnx_free_mem_load(ha);
7341 }
7342
7343 if (ha->flags.callout_init)
7344 callout_drain(&ha->qlnx_callout);
7345
7346 qlnx_mdelay(__func__, 1000);
7347
7348 ha->state = QLNX_STATE_CLOSED;
7349
7350 QL_DPRINT2(ha, "exit\n");
7351 return;
7352 }
7353
7354 static int
qlnx_grc_dumpsize(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7355 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7356 {
7357 int rval = -1;
7358 struct ecore_hwfn *p_hwfn;
7359 struct ecore_ptt *p_ptt;
7360
7361 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7362
7363 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7364 p_ptt = ecore_ptt_acquire(p_hwfn);
7365
7366 if (!p_ptt) {
7367 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7368 return (rval);
7369 }
7370
7371 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7372
7373 if (rval == DBG_STATUS_OK)
7374 rval = 0;
7375 else {
7376 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7377 "[0x%x]\n", rval);
7378 }
7379
7380 ecore_ptt_release(p_hwfn, p_ptt);
7381
7382 return (rval);
7383 }
7384
7385 static int
qlnx_idle_chk_size(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7386 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7387 {
7388 int rval = -1;
7389 struct ecore_hwfn *p_hwfn;
7390 struct ecore_ptt *p_ptt;
7391
7392 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7393
7394 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7395 p_ptt = ecore_ptt_acquire(p_hwfn);
7396
7397 if (!p_ptt) {
7398 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7399 return (rval);
7400 }
7401
7402 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7403
7404 if (rval == DBG_STATUS_OK)
7405 rval = 0;
7406 else {
7407 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7408 " [0x%x]\n", rval);
7409 }
7410
7411 ecore_ptt_release(p_hwfn, p_ptt);
7412
7413 return (rval);
7414 }
7415
7416 static void
qlnx_sample_storm_stats(qlnx_host_t * ha)7417 qlnx_sample_storm_stats(qlnx_host_t *ha)
7418 {
7419 int i, index;
7420 struct ecore_dev *cdev;
7421 qlnx_storm_stats_t *s_stats;
7422 uint32_t reg;
7423 struct ecore_ptt *p_ptt;
7424 struct ecore_hwfn *hwfn;
7425
7426 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7427 ha->storm_stats_gather = 0;
7428 return;
7429 }
7430
7431 cdev = &ha->cdev;
7432
7433 for_each_hwfn(cdev, i) {
7434 hwfn = &cdev->hwfns[i];
7435
7436 p_ptt = ecore_ptt_acquire(hwfn);
7437 if (!p_ptt)
7438 return;
7439
7440 index = ha->storm_stats_index +
7441 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7442
7443 s_stats = &ha->storm_stats[index];
7444
7445 /* XSTORM */
7446 reg = XSEM_REG_FAST_MEMORY +
7447 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7448 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7449
7450 reg = XSEM_REG_FAST_MEMORY +
7451 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7452 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7453
7454 reg = XSEM_REG_FAST_MEMORY +
7455 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7456 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7457
7458 reg = XSEM_REG_FAST_MEMORY +
7459 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7460 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7461
7462 /* YSTORM */
7463 reg = YSEM_REG_FAST_MEMORY +
7464 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7465 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7466
7467 reg = YSEM_REG_FAST_MEMORY +
7468 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7469 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7470
7471 reg = YSEM_REG_FAST_MEMORY +
7472 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7473 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7474
7475 reg = YSEM_REG_FAST_MEMORY +
7476 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7477 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7478
7479 /* PSTORM */
7480 reg = PSEM_REG_FAST_MEMORY +
7481 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7482 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7483
7484 reg = PSEM_REG_FAST_MEMORY +
7485 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7486 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7487
7488 reg = PSEM_REG_FAST_MEMORY +
7489 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7490 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7491
7492 reg = PSEM_REG_FAST_MEMORY +
7493 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7494 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7495
7496 /* TSTORM */
7497 reg = TSEM_REG_FAST_MEMORY +
7498 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7499 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7500
7501 reg = TSEM_REG_FAST_MEMORY +
7502 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7503 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7504
7505 reg = TSEM_REG_FAST_MEMORY +
7506 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7507 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7508
7509 reg = TSEM_REG_FAST_MEMORY +
7510 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7511 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7512
7513 /* MSTORM */
7514 reg = MSEM_REG_FAST_MEMORY +
7515 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7516 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7517
7518 reg = MSEM_REG_FAST_MEMORY +
7519 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7520 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7521
7522 reg = MSEM_REG_FAST_MEMORY +
7523 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7524 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7525
7526 reg = MSEM_REG_FAST_MEMORY +
7527 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7528 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7529
7530 /* USTORM */
7531 reg = USEM_REG_FAST_MEMORY +
7532 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7533 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7534
7535 reg = USEM_REG_FAST_MEMORY +
7536 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7537 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7538
7539 reg = USEM_REG_FAST_MEMORY +
7540 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7541 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7542
7543 reg = USEM_REG_FAST_MEMORY +
7544 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7545 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7546
7547 ecore_ptt_release(hwfn, p_ptt);
7548 }
7549
7550 ha->storm_stats_index++;
7551
7552 return;
7553 }
7554
7555 /*
7556 * Name: qlnx_dump_buf8
7557 * Function: dumps a buffer as bytes
7558 */
7559 static void
qlnx_dump_buf8(qlnx_host_t * ha,const char * msg,void * dbuf,uint32_t len)7560 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7561 {
7562 device_t dev;
7563 uint32_t i = 0;
7564 uint8_t *buf;
7565
7566 dev = ha->pci_dev;
7567 buf = dbuf;
7568
7569 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7570
7571 while (len >= 16) {
7572 device_printf(dev,"0x%08x:"
7573 " %02x %02x %02x %02x %02x %02x %02x %02x"
7574 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7575 buf[0], buf[1], buf[2], buf[3],
7576 buf[4], buf[5], buf[6], buf[7],
7577 buf[8], buf[9], buf[10], buf[11],
7578 buf[12], buf[13], buf[14], buf[15]);
7579 i += 16;
7580 len -= 16;
7581 buf += 16;
7582 }
7583 switch (len) {
7584 case 1:
7585 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7586 break;
7587 case 2:
7588 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7589 break;
7590 case 3:
7591 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7592 i, buf[0], buf[1], buf[2]);
7593 break;
7594 case 4:
7595 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7596 buf[0], buf[1], buf[2], buf[3]);
7597 break;
7598 case 5:
7599 device_printf(dev,"0x%08x:"
7600 " %02x %02x %02x %02x %02x\n", i,
7601 buf[0], buf[1], buf[2], buf[3], buf[4]);
7602 break;
7603 case 6:
7604 device_printf(dev,"0x%08x:"
7605 " %02x %02x %02x %02x %02x %02x\n", i,
7606 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7607 break;
7608 case 7:
7609 device_printf(dev,"0x%08x:"
7610 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7611 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7612 break;
7613 case 8:
7614 device_printf(dev,"0x%08x:"
7615 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7616 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7617 buf[7]);
7618 break;
7619 case 9:
7620 device_printf(dev,"0x%08x:"
7621 " %02x %02x %02x %02x %02x %02x %02x %02x"
7622 " %02x\n", i,
7623 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7624 buf[7], buf[8]);
7625 break;
7626 case 10:
7627 device_printf(dev,"0x%08x:"
7628 " %02x %02x %02x %02x %02x %02x %02x %02x"
7629 " %02x %02x\n", i,
7630 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7631 buf[7], buf[8], buf[9]);
7632 break;
7633 case 11:
7634 device_printf(dev,"0x%08x:"
7635 " %02x %02x %02x %02x %02x %02x %02x %02x"
7636 " %02x %02x %02x\n", i,
7637 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7638 buf[7], buf[8], buf[9], buf[10]);
7639 break;
7640 case 12:
7641 device_printf(dev,"0x%08x:"
7642 " %02x %02x %02x %02x %02x %02x %02x %02x"
7643 " %02x %02x %02x %02x\n", i,
7644 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7645 buf[7], buf[8], buf[9], buf[10], buf[11]);
7646 break;
7647 case 13:
7648 device_printf(dev,"0x%08x:"
7649 " %02x %02x %02x %02x %02x %02x %02x %02x"
7650 " %02x %02x %02x %02x %02x\n", i,
7651 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7652 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7653 break;
7654 case 14:
7655 device_printf(dev,"0x%08x:"
7656 " %02x %02x %02x %02x %02x %02x %02x %02x"
7657 " %02x %02x %02x %02x %02x %02x\n", i,
7658 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7659 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7660 buf[13]);
7661 break;
7662 case 15:
7663 device_printf(dev,"0x%08x:"
7664 " %02x %02x %02x %02x %02x %02x %02x %02x"
7665 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7666 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7667 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7668 buf[13], buf[14]);
7669 break;
7670 default:
7671 break;
7672 }
7673
7674 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7675
7676 return;
7677 }
7678
7679 #ifdef CONFIG_ECORE_SRIOV
7680
7681 static void
__qlnx_osal_iov_vf_cleanup(struct ecore_hwfn * p_hwfn,uint8_t rel_vf_id)7682 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7683 {
7684 struct ecore_public_vf_info *vf_info;
7685
7686 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7687
7688 if (!vf_info)
7689 return;
7690
7691 /* Clear the VF mac */
7692 memset(vf_info->forced_mac, 0, ETH_ALEN);
7693
7694 vf_info->forced_vlan = 0;
7695
7696 return;
7697 }
7698
7699 void
qlnx_osal_iov_vf_cleanup(void * p_hwfn,uint8_t relative_vf_id)7700 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7701 {
7702 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7703 return;
7704 }
7705
7706 static int
__qlnx_iov_chk_ucast(struct ecore_hwfn * p_hwfn,int vfid,struct ecore_filter_ucast * params)7707 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7708 struct ecore_filter_ucast *params)
7709 {
7710 struct ecore_public_vf_info *vf;
7711
7712 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7713 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7714 "VF[%d] vport not initialized\n", vfid);
7715 return ECORE_INVAL;
7716 }
7717
7718 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7719 if (!vf)
7720 return -EINVAL;
7721
7722 /* No real decision to make; Store the configured MAC */
7723 if (params->type == ECORE_FILTER_MAC ||
7724 params->type == ECORE_FILTER_MAC_VLAN)
7725 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7726
7727 return 0;
7728 }
7729
7730 int
qlnx_iov_chk_ucast(void * p_hwfn,int vfid,void * params)7731 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7732 {
7733 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7734 }
7735
7736 static int
__qlnx_iov_update_vport(struct ecore_hwfn * hwfn,uint8_t vfid,struct ecore_sp_vport_update_params * params,uint16_t * tlvs)7737 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7738 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7739 {
7740 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7741 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7742 "VF[%d] vport not initialized\n", vfid);
7743 return ECORE_INVAL;
7744 }
7745
7746 /* Untrusted VFs can't even be trusted to know that fact.
7747 * Simply indicate everything is configured fine, and trace
7748 * configuration 'behind their back'.
7749 */
7750 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7751 return 0;
7752
7753 return 0;
7754
7755 }
7756 int
qlnx_iov_update_vport(void * hwfn,uint8_t vfid,void * params,uint16_t * tlvs)7757 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7758 {
7759 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7760 }
7761
7762 static int
qlnx_find_hwfn_index(struct ecore_hwfn * p_hwfn)7763 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7764 {
7765 int i;
7766 struct ecore_dev *cdev;
7767
7768 cdev = p_hwfn->p_dev;
7769
7770 for (i = 0; i < cdev->num_hwfns; i++) {
7771 if (&cdev->hwfns[i] == p_hwfn)
7772 break;
7773 }
7774
7775 if (i >= cdev->num_hwfns)
7776 return (-1);
7777
7778 return (i);
7779 }
7780
7781 static int
__qlnx_pf_vf_msg(struct ecore_hwfn * p_hwfn,uint16_t rel_vf_id)7782 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7783 {
7784 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7785 int i;
7786
7787 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7788 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7789
7790 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7791 return (-1);
7792
7793 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7794 atomic_testandset_32(&ha->sriov_task[i].flags,
7795 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7796
7797 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7798 &ha->sriov_task[i].pf_task);
7799 }
7800
7801 return (ECORE_SUCCESS);
7802 }
7803
7804 int
qlnx_pf_vf_msg(void * p_hwfn,uint16_t relative_vf_id)7805 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7806 {
7807 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7808 }
7809
7810 static void
__qlnx_vf_flr_update(struct ecore_hwfn * p_hwfn)7811 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7812 {
7813 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7814 int i;
7815
7816 if (!ha->sriov_initialized)
7817 return;
7818
7819 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7820 ha, p_hwfn->p_dev, p_hwfn);
7821
7822 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7823 return;
7824
7825 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7826 atomic_testandset_32(&ha->sriov_task[i].flags,
7827 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7828
7829 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7830 &ha->sriov_task[i].pf_task);
7831 }
7832
7833 return;
7834 }
7835
7836 void
qlnx_vf_flr_update(void * p_hwfn)7837 qlnx_vf_flr_update(void *p_hwfn)
7838 {
7839 __qlnx_vf_flr_update(p_hwfn);
7840
7841 return;
7842 }
7843
7844 #ifndef QLNX_VF
7845
7846 static void
qlnx_vf_bulleting_update(struct ecore_hwfn * p_hwfn)7847 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7848 {
7849 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7850 int i;
7851
7852 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7853 ha, p_hwfn->p_dev, p_hwfn);
7854
7855 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7856 return;
7857
7858 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7859 ha, p_hwfn->p_dev, p_hwfn, i);
7860
7861 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7862 atomic_testandset_32(&ha->sriov_task[i].flags,
7863 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7864
7865 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7866 &ha->sriov_task[i].pf_task);
7867 }
7868 }
7869
7870 static void
qlnx_initialize_sriov(qlnx_host_t * ha)7871 qlnx_initialize_sriov(qlnx_host_t *ha)
7872 {
7873 device_t dev;
7874 nvlist_t *pf_schema, *vf_schema;
7875 int iov_error;
7876
7877 dev = ha->pci_dev;
7878
7879 pf_schema = pci_iov_schema_alloc_node();
7880 vf_schema = pci_iov_schema_alloc_node();
7881
7882 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7883 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7884 IOV_SCHEMA_HASDEFAULT, FALSE);
7885 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7886 IOV_SCHEMA_HASDEFAULT, FALSE);
7887 pci_iov_schema_add_uint16(vf_schema, "num-queues",
7888 IOV_SCHEMA_HASDEFAULT, 1);
7889
7890 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7891
7892 if (iov_error != 0) {
7893 ha->sriov_initialized = 0;
7894 } else {
7895 device_printf(dev, "SRIOV initialized\n");
7896 ha->sriov_initialized = 1;
7897 }
7898
7899 return;
7900 }
7901
7902 static void
qlnx_sriov_disable(qlnx_host_t * ha)7903 qlnx_sriov_disable(qlnx_host_t *ha)
7904 {
7905 struct ecore_dev *cdev;
7906 int i, j;
7907
7908 cdev = &ha->cdev;
7909
7910 ecore_iov_set_vfs_to_disable(cdev, true);
7911
7912 for_each_hwfn(cdev, i) {
7913 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7914 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7915
7916 if (!ptt) {
7917 QL_DPRINT1(ha, "Failed to acquire ptt\n");
7918 return;
7919 }
7920 /* Clean WFQ db and configure equal weight for all vports */
7921 ecore_clean_wfq_db(hwfn, ptt);
7922
7923 ecore_for_each_vf(hwfn, j) {
7924 int k = 0;
7925
7926 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
7927 continue;
7928
7929 if (ecore_iov_is_vf_started(hwfn, j)) {
7930 /* Wait until VF is disabled before releasing */
7931
7932 for (k = 0; k < 100; k++) {
7933 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
7934 qlnx_mdelay(__func__, 10);
7935 } else
7936 break;
7937 }
7938 }
7939
7940 if (k < 100)
7941 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7942 ptt, j);
7943 else {
7944 QL_DPRINT1(ha,
7945 "Timeout waiting for VF's FLR to end\n");
7946 }
7947 }
7948 ecore_ptt_release(hwfn, ptt);
7949 }
7950
7951 ecore_iov_set_vfs_to_disable(cdev, false);
7952
7953 return;
7954 }
7955
7956 static void
qlnx_sriov_enable_qid_config(struct ecore_hwfn * hwfn,u16 vfid,struct ecore_iov_vf_init_params * params)7957 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
7958 struct ecore_iov_vf_init_params *params)
7959 {
7960 u16 base, i;
7961
7962 /* Since we have an equal resource distribution per-VF, and we assume
7963 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
7964 * sequentially from there.
7965 */
7966 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7967
7968 params->rel_vf_id = vfid;
7969
7970 for (i = 0; i < params->num_queues; i++) {
7971 params->req_rx_queue[i] = base + i;
7972 params->req_tx_queue[i] = base + i;
7973 }
7974
7975 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
7976 params->vport_id = vfid + 1;
7977 params->rss_eng_id = vfid + 1;
7978
7979 return;
7980 }
7981
7982 static int
qlnx_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * nvlist_params)7983 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
7984 {
7985 qlnx_host_t *ha;
7986 struct ecore_dev *cdev;
7987 struct ecore_iov_vf_init_params params;
7988 int ret, j, i;
7989 uint32_t max_vfs;
7990
7991 if ((ha = device_get_softc(dev)) == NULL) {
7992 device_printf(dev, "%s: cannot get softc\n", __func__);
7993 return (-1);
7994 }
7995
7996 if (qlnx_create_pf_taskqueues(ha) != 0)
7997 goto qlnx_iov_init_err0;
7998
7999 cdev = &ha->cdev;
8000
8001 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8002
8003 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8004 dev, num_vfs, max_vfs);
8005
8006 if (num_vfs >= max_vfs) {
8007 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8008 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8009 goto qlnx_iov_init_err0;
8010 }
8011
8012 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8013 M_NOWAIT);
8014
8015 if (ha->vf_attr == NULL)
8016 goto qlnx_iov_init_err0;
8017
8018 memset(¶ms, 0, sizeof(params));
8019
8020 /* Initialize HW for VF access */
8021 for_each_hwfn(cdev, j) {
8022 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8023 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8024
8025 /* Make sure not to use more than 16 queues per VF */
8026 params.num_queues = min_t(int,
8027 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8028 16);
8029
8030 if (!ptt) {
8031 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8032 goto qlnx_iov_init_err1;
8033 }
8034
8035 for (i = 0; i < num_vfs; i++) {
8036 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8037 continue;
8038
8039 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
8040
8041 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
8042
8043 if (ret) {
8044 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8045 ecore_ptt_release(hwfn, ptt);
8046 goto qlnx_iov_init_err1;
8047 }
8048 }
8049
8050 ecore_ptt_release(hwfn, ptt);
8051 }
8052
8053 ha->num_vfs = num_vfs;
8054 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8055
8056 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8057
8058 return (0);
8059
8060 qlnx_iov_init_err1:
8061 qlnx_sriov_disable(ha);
8062
8063 qlnx_iov_init_err0:
8064 qlnx_destroy_pf_taskqueues(ha);
8065 ha->num_vfs = 0;
8066
8067 return (-1);
8068 }
8069
8070 static void
qlnx_iov_uninit(device_t dev)8071 qlnx_iov_uninit(device_t dev)
8072 {
8073 qlnx_host_t *ha;
8074
8075 if ((ha = device_get_softc(dev)) == NULL) {
8076 device_printf(dev, "%s: cannot get softc\n", __func__);
8077 return;
8078 }
8079
8080 QL_DPRINT2(ha," dev = %p enter\n", dev);
8081
8082 qlnx_sriov_disable(ha);
8083 qlnx_destroy_pf_taskqueues(ha);
8084
8085 free(ha->vf_attr, M_QLNXBUF);
8086 ha->vf_attr = NULL;
8087
8088 ha->num_vfs = 0;
8089
8090 QL_DPRINT2(ha," dev = %p exit\n", dev);
8091 return;
8092 }
8093
8094 static int
qlnx_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * params)8095 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8096 {
8097 qlnx_host_t *ha;
8098 qlnx_vf_attr_t *vf_attr;
8099 unsigned const char *mac;
8100 size_t size;
8101 struct ecore_hwfn *p_hwfn;
8102
8103 if ((ha = device_get_softc(dev)) == NULL) {
8104 device_printf(dev, "%s: cannot get softc\n", __func__);
8105 return (-1);
8106 }
8107
8108 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8109
8110 if (vfnum > (ha->num_vfs - 1)) {
8111 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8112 vfnum, (ha->num_vfs - 1));
8113 }
8114
8115 vf_attr = &ha->vf_attr[vfnum];
8116
8117 if (nvlist_exists_binary(params, "mac-addr")) {
8118 mac = nvlist_get_binary(params, "mac-addr", &size);
8119 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8120 device_printf(dev,
8121 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8122 __func__, vf_attr->mac_addr[0],
8123 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8124 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8125 vf_attr->mac_addr[5]);
8126 p_hwfn = &ha->cdev.hwfns[0];
8127 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8128 vfnum);
8129 }
8130
8131 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8132 return (0);
8133 }
8134
8135 static void
qlnx_handle_vf_msg(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8136 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8137 {
8138 uint64_t events[ECORE_VF_ARRAY_LENGTH];
8139 struct ecore_ptt *ptt;
8140 int i;
8141
8142 ptt = ecore_ptt_acquire(p_hwfn);
8143 if (!ptt) {
8144 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8145 __qlnx_pf_vf_msg(p_hwfn, 0);
8146 return;
8147 }
8148
8149 ecore_iov_pf_get_pending_events(p_hwfn, events);
8150
8151 QL_DPRINT2(ha, "Event mask of VF events:"
8152 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8153 events[0], events[1], events[2]);
8154
8155 ecore_for_each_vf(p_hwfn, i) {
8156 /* Skip VFs with no pending messages */
8157 if (!(events[i / 64] & (1ULL << (i % 64))))
8158 continue;
8159
8160 QL_DPRINT2(ha,
8161 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8162 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8163
8164 /* Copy VF's message to PF's request buffer for that VF */
8165 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8166 continue;
8167
8168 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8169 }
8170
8171 ecore_ptt_release(p_hwfn, ptt);
8172
8173 return;
8174 }
8175
8176 static void
qlnx_handle_vf_flr_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8177 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8178 {
8179 struct ecore_ptt *ptt;
8180 int ret;
8181
8182 ptt = ecore_ptt_acquire(p_hwfn);
8183
8184 if (!ptt) {
8185 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8186 __qlnx_vf_flr_update(p_hwfn);
8187 return;
8188 }
8189
8190 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8191
8192 if (ret) {
8193 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8194 }
8195
8196 ecore_ptt_release(p_hwfn, ptt);
8197
8198 return;
8199 }
8200
8201 static void
qlnx_handle_bulletin_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8202 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8203 {
8204 struct ecore_ptt *ptt;
8205 int i;
8206
8207 ptt = ecore_ptt_acquire(p_hwfn);
8208
8209 if (!ptt) {
8210 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8211 qlnx_vf_bulleting_update(p_hwfn);
8212 return;
8213 }
8214
8215 ecore_for_each_vf(p_hwfn, i) {
8216 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8217 p_hwfn, i);
8218 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8219 }
8220
8221 ecore_ptt_release(p_hwfn, ptt);
8222
8223 return;
8224 }
8225
8226 static void
qlnx_pf_taskqueue(void * context,int pending)8227 qlnx_pf_taskqueue(void *context, int pending)
8228 {
8229 struct ecore_hwfn *p_hwfn;
8230 qlnx_host_t *ha;
8231 int i;
8232
8233 p_hwfn = context;
8234
8235 if (p_hwfn == NULL)
8236 return;
8237
8238 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8239
8240 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8241 return;
8242
8243 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8244 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8245 qlnx_handle_vf_msg(ha, p_hwfn);
8246
8247 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8248 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8249 qlnx_handle_vf_flr_update(ha, p_hwfn);
8250
8251 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8252 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8253 qlnx_handle_bulletin_update(ha, p_hwfn);
8254
8255 return;
8256 }
8257
8258 static int
qlnx_create_pf_taskqueues(qlnx_host_t * ha)8259 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8260 {
8261 int i;
8262 uint8_t tq_name[32];
8263
8264 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8265 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8266
8267 bzero(tq_name, sizeof (tq_name));
8268 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8269
8270 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8271
8272 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8273 taskqueue_thread_enqueue,
8274 &ha->sriov_task[i].pf_taskqueue);
8275
8276 if (ha->sriov_task[i].pf_taskqueue == NULL)
8277 return (-1);
8278
8279 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8280 PI_NET, "%s", tq_name);
8281
8282 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8283 }
8284
8285 return (0);
8286 }
8287
8288 static void
qlnx_destroy_pf_taskqueues(qlnx_host_t * ha)8289 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8290 {
8291 int i;
8292
8293 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8294 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8295 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8296 &ha->sriov_task[i].pf_task);
8297 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8298 ha->sriov_task[i].pf_taskqueue = NULL;
8299 }
8300 }
8301 return;
8302 }
8303
8304 static void
qlnx_inform_vf_link_state(struct ecore_hwfn * p_hwfn,qlnx_host_t * ha)8305 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8306 {
8307 struct ecore_mcp_link_capabilities caps;
8308 struct ecore_mcp_link_params params;
8309 struct ecore_mcp_link_state link;
8310 int i;
8311
8312 if (!p_hwfn->pf_iov_info)
8313 return;
8314
8315 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8316 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8317 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8318
8319 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8320 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8321 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8322
8323 QL_DPRINT2(ha, "called\n");
8324
8325 /* Update bulletin of all future possible VFs with link configuration */
8326 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8327 /* Modify link according to the VF's configured link state */
8328
8329 link.link_up = false;
8330
8331 if (ha->link_up) {
8332 link.link_up = true;
8333 /* Set speed according to maximum supported by HW.
8334 * that is 40G for regular devices and 100G for CMT
8335 * mode devices.
8336 */
8337 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8338 100000 : link.speed;
8339 }
8340 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8341 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8342 }
8343
8344 qlnx_vf_bulleting_update(p_hwfn);
8345
8346 return;
8347 }
8348 #endif /* #ifndef QLNX_VF */
8349 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8350