xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_os.c (revision 2a58b312)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnx_os.c
30  * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "qlnx_os.h"
37 #include "bcm_osal.h"
38 #include "reg_addr.h"
39 #include "ecore_gtt_reg_addr.h"
40 #include "ecore.h"
41 #include "ecore_chain.h"
42 #include "ecore_status.h"
43 #include "ecore_hw.h"
44 #include "ecore_rt_defs.h"
45 #include "ecore_init_ops.h"
46 #include "ecore_int.h"
47 #include "ecore_cxt.h"
48 #include "ecore_spq.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sp_commands.h"
51 #include "ecore_dev_api.h"
52 #include "ecore_l2_api.h"
53 #include "ecore_mcp.h"
54 #include "ecore_hw_defs.h"
55 #include "mcp_public.h"
56 #include "ecore_iro.h"
57 #include "nvm_cfg.h"
58 #include "ecore_dev_api.h"
59 #include "ecore_dbg_fw_funcs.h"
60 #include "ecore_iov_api.h"
61 #include "ecore_vf_api.h"
62 
63 #include "qlnx_ioctl.h"
64 #include "qlnx_def.h"
65 #include "qlnx_ver.h"
66 
67 #ifdef QLNX_ENABLE_IWARP
68 #include "qlnx_rdma.h"
69 #endif /* #ifdef QLNX_ENABLE_IWARP */
70 
71 #include <sys/smp.h>
72 
73 /*
74  * static functions
75  */
76 /*
77  * ioctl related functions
78  */
79 static void qlnx_add_sysctls(qlnx_host_t *ha);
80 
81 /*
82  * main driver
83  */
84 static void qlnx_release(qlnx_host_t *ha);
85 static void qlnx_fp_isr(void *arg);
86 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
87 static void qlnx_init(void *arg);
88 static void qlnx_init_locked(qlnx_host_t *ha);
89 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
90 static int qlnx_set_promisc(qlnx_host_t *ha);
91 static int qlnx_set_allmulti(qlnx_host_t *ha);
92 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
93 static int qlnx_media_change(if_t ifp);
94 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
95 static void qlnx_stop(qlnx_host_t *ha);
96 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
97 		struct mbuf **m_headp);
98 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
99 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
100 			struct qlnx_link_output *if_link);
101 static int qlnx_transmit(if_t ifp, struct mbuf  *mp);
102 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
103 		struct mbuf *mp);
104 static void qlnx_qflush(if_t ifp);
105 
106 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
107 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
108 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
109 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
110 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
111 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
112 
113 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
114 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
115 
116 static int qlnx_nic_setup(struct ecore_dev *cdev,
117 		struct ecore_pf_params *func_params);
118 static int qlnx_nic_start(struct ecore_dev *cdev);
119 static int qlnx_slowpath_start(qlnx_host_t *ha);
120 static int qlnx_slowpath_stop(qlnx_host_t *ha);
121 static int qlnx_init_hw(qlnx_host_t *ha);
122 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
123 		char ver_str[VER_SIZE]);
124 static void qlnx_unload(qlnx_host_t *ha);
125 static int qlnx_load(qlnx_host_t *ha);
126 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
127 		uint32_t add_mac);
128 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
129 		uint32_t len);
130 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
131 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
132 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
133 		struct qlnx_rx_queue *rxq);
134 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
135 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
136 		int hwfn_index);
137 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
138 		int hwfn_index);
139 static void qlnx_timer(void *arg);
140 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
141 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
142 static void qlnx_trigger_dump(qlnx_host_t *ha);
143 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
144 			struct qlnx_tx_queue *txq);
145 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
146 		struct qlnx_tx_queue *txq);
147 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
148 		int lro_enable);
149 static void qlnx_fp_taskqueue(void *context, int pending);
150 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
151 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
152 		struct qlnx_agg_info *tpa);
153 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
154 
155 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
156 
157 /*
158  * Hooks to the Operating Systems
159  */
160 static int qlnx_pci_probe (device_t);
161 static int qlnx_pci_attach (device_t);
162 static int qlnx_pci_detach (device_t);
163 
164 #ifndef QLNX_VF
165 
166 #ifdef CONFIG_ECORE_SRIOV
167 
168 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
169 static void qlnx_iov_uninit(device_t dev);
170 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
171 static void qlnx_initialize_sriov(qlnx_host_t *ha);
172 static void qlnx_pf_taskqueue(void *context, int pending);
173 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
174 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
175 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
176 
177 #endif /* #ifdef CONFIG_ECORE_SRIOV */
178 
179 static device_method_t qlnx_pci_methods[] = {
180 	/* Device interface */
181 	DEVMETHOD(device_probe, qlnx_pci_probe),
182 	DEVMETHOD(device_attach, qlnx_pci_attach),
183 	DEVMETHOD(device_detach, qlnx_pci_detach),
184 
185 #ifdef CONFIG_ECORE_SRIOV
186 	DEVMETHOD(pci_iov_init, qlnx_iov_init),
187 	DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
188 	DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
189 #endif /* #ifdef CONFIG_ECORE_SRIOV */
190 	{ 0, 0 }
191 };
192 
193 static driver_t qlnx_pci_driver = {
194 	"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
195 };
196 
197 MODULE_VERSION(if_qlnxe,1);
198 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
199 
200 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
201 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
202 
203 #else
204 
205 static device_method_t qlnxv_pci_methods[] = {
206 	/* Device interface */
207 	DEVMETHOD(device_probe, qlnx_pci_probe),
208 	DEVMETHOD(device_attach, qlnx_pci_attach),
209 	DEVMETHOD(device_detach, qlnx_pci_detach),
210 	{ 0, 0 }
211 };
212 
213 static driver_t qlnxv_pci_driver = {
214 	"ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
215 };
216 
217 MODULE_VERSION(if_qlnxev,1);
218 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
219 
220 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
221 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
222 
223 #endif /* #ifdef QLNX_VF */
224 
225 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
226 
227 char qlnx_dev_str[128];
228 char qlnx_ver_str[VER_SIZE];
229 char qlnx_name_str[NAME_SIZE];
230 
231 /*
232  * Some PCI Configuration Space Related Defines
233  */
234 
235 #ifndef PCI_VENDOR_QLOGIC
236 #define PCI_VENDOR_QLOGIC		0x1077
237 #endif
238 
239 /* 40G Adapter QLE45xxx*/
240 #ifndef QLOGIC_PCI_DEVICE_ID_1634
241 #define QLOGIC_PCI_DEVICE_ID_1634	0x1634
242 #endif
243 
244 /* 100G Adapter QLE45xxx*/
245 #ifndef QLOGIC_PCI_DEVICE_ID_1644
246 #define QLOGIC_PCI_DEVICE_ID_1644	0x1644
247 #endif
248 
249 /* 25G Adapter QLE45xxx*/
250 #ifndef QLOGIC_PCI_DEVICE_ID_1656
251 #define QLOGIC_PCI_DEVICE_ID_1656	0x1656
252 #endif
253 
254 /* 50G Adapter QLE45xxx*/
255 #ifndef QLOGIC_PCI_DEVICE_ID_1654
256 #define QLOGIC_PCI_DEVICE_ID_1654	0x1654
257 #endif
258 
259 /* 10G/25G/40G Adapter QLE41xxx*/
260 #ifndef QLOGIC_PCI_DEVICE_ID_8070
261 #define QLOGIC_PCI_DEVICE_ID_8070	0x8070
262 #endif
263 
264 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
265 #ifndef QLOGIC_PCI_DEVICE_ID_8090
266 #define QLOGIC_PCI_DEVICE_ID_8090	0x8090
267 #endif
268 
269 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
270     "qlnxe driver parameters");
271 
272 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
273 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
274 
275 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
276 		&qlnxe_queue_count, 0, "Multi-Queue queue count");
277 
278 /*
279  * Note on RDMA personality setting
280  *
281  * Read the personality configured in NVRAM
282  * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
283  * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
284  * use the personality in NVRAM.
285 
286  * Otherwise use t the personality configured in sysctl.
287  *
288  */
289 #define QLNX_PERSONALITY_DEFAULT	0x0  /* use personality in NVRAM */
290 #define QLNX_PERSONALITY_ETH_ONLY	0x1  /* Override with ETH_ONLY */
291 #define QLNX_PERSONALITY_ETH_IWARP	0x2  /* Override with ETH_IWARP */
292 #define QLNX_PERSONALITY_ETH_ROCE	0x3  /* Override with ETH_ROCE */
293 #define QLNX_PERSONALITY_BITS_PER_FUNC	4
294 #define QLNX_PERSONALIY_MASK		0xF
295 
296 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
297 static uint64_t qlnxe_rdma_configuration = 0x22222222;
298 
299 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
300                 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
301 
302 int
303 qlnx_vf_device(qlnx_host_t *ha)
304 {
305         uint16_t	device_id;
306 
307         device_id = ha->device_id;
308 
309         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
310                 return 0;
311 
312         return -1;
313 }
314 
315 static int
316 qlnx_valid_device(qlnx_host_t *ha)
317 {
318         uint16_t device_id;
319 
320         device_id = ha->device_id;
321 
322 #ifndef QLNX_VF
323         if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
324                 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
325                 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
326                 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
327                 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
328                 return 0;
329 #else
330         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
331 		return 0;
332 
333 #endif /* #ifndef QLNX_VF */
334         return -1;
335 }
336 
337 #ifdef QLNX_ENABLE_IWARP
338 static int
339 qlnx_rdma_supported(struct qlnx_host *ha)
340 {
341 	uint16_t device_id;
342 
343 	device_id = pci_get_device(ha->pci_dev);
344 
345 	if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
346 		(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
347 		(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
348 		(device_id == QLOGIC_PCI_DEVICE_ID_8070))
349 		return (0);
350 
351 	return (-1);
352 }
353 #endif /* #ifdef QLNX_ENABLE_IWARP */
354 
355 /*
356  * Name:	qlnx_pci_probe
357  * Function:	Validate the PCI device to be a QLA80XX device
358  */
359 static int
360 qlnx_pci_probe(device_t dev)
361 {
362 	snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
363 		QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
364 	snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
365 
366 	if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
367                 return (ENXIO);
368 	}
369 
370         switch (pci_get_device(dev)) {
371 #ifndef QLNX_VF
372 
373         case QLOGIC_PCI_DEVICE_ID_1644:
374 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
375 			"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
376 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
377 			QLNX_VERSION_BUILD);
378                 device_set_desc_copy(dev, qlnx_dev_str);
379 
380                 break;
381 
382         case QLOGIC_PCI_DEVICE_ID_1634:
383 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
384 			"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
385 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
386 			QLNX_VERSION_BUILD);
387                 device_set_desc_copy(dev, qlnx_dev_str);
388 
389                 break;
390 
391         case QLOGIC_PCI_DEVICE_ID_1656:
392 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
393 			"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
394 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
395 			QLNX_VERSION_BUILD);
396                 device_set_desc_copy(dev, qlnx_dev_str);
397 
398                 break;
399 
400         case QLOGIC_PCI_DEVICE_ID_1654:
401 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
402 			"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
403 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
404 			QLNX_VERSION_BUILD);
405                 device_set_desc_copy(dev, qlnx_dev_str);
406 
407                 break;
408 
409 	case QLOGIC_PCI_DEVICE_ID_8070:
410 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
411 			"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
412 			" Adapter-Ethernet Function",
413 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
414 			QLNX_VERSION_BUILD);
415 		device_set_desc_copy(dev, qlnx_dev_str);
416 
417 		break;
418 
419 #else
420 	case QLOGIC_PCI_DEVICE_ID_8090:
421 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
422 			"Qlogic SRIOV PCI CNA (AH) "
423 			"Adapter-Ethernet Function",
424 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
425 			QLNX_VERSION_BUILD);
426 		device_set_desc_copy(dev, qlnx_dev_str);
427 
428 		break;
429 
430 #endif /* #ifndef QLNX_VF */
431 
432         default:
433                 return (ENXIO);
434         }
435 
436 #ifdef QLNX_ENABLE_IWARP
437 	qlnx_rdma_init();
438 #endif /* #ifdef QLNX_ENABLE_IWARP */
439 
440         return (BUS_PROBE_DEFAULT);
441 }
442 
443 static uint16_t
444 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
445 	struct qlnx_tx_queue *txq)
446 {
447 	u16 hw_bd_cons;
448 	u16 ecore_cons_idx;
449 	uint16_t diff;
450 
451 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
452 
453 	ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
454 	if (hw_bd_cons < ecore_cons_idx) {
455 		diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
456 	} else {
457 		diff = hw_bd_cons - ecore_cons_idx;
458 	}
459 	return diff;
460 }
461 
462 static void
463 qlnx_sp_intr(void *arg)
464 {
465 	struct ecore_hwfn	*p_hwfn;
466 	qlnx_host_t		*ha;
467 	int			i;
468 
469 	p_hwfn = arg;
470 
471 	if (p_hwfn == NULL) {
472 		printf("%s: spurious slowpath intr\n", __func__);
473 		return;
474 	}
475 
476 	ha = (qlnx_host_t *)p_hwfn->p_dev;
477 
478 	QL_DPRINT2(ha, "enter\n");
479 
480 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
481 		if (&ha->cdev.hwfns[i] == p_hwfn) {
482 			taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
483 			break;
484 		}
485 	}
486 	QL_DPRINT2(ha, "exit\n");
487 
488 	return;
489 }
490 
491 static void
492 qlnx_sp_taskqueue(void *context, int pending)
493 {
494 	struct ecore_hwfn	*p_hwfn;
495 
496 	p_hwfn = context;
497 
498 	if (p_hwfn != NULL) {
499 		qlnx_sp_isr(p_hwfn);
500 	}
501 	return;
502 }
503 
504 static int
505 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
506 {
507 	int	i;
508 	uint8_t	tq_name[32];
509 
510 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
511                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
512 
513 		bzero(tq_name, sizeof (tq_name));
514 		snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
515 
516 		TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
517 
518 		ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
519 			 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
520 
521 		if (ha->sp_taskqueue[i] == NULL)
522 			return (-1);
523 
524 		taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
525 			tq_name);
526 
527 		QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
528 	}
529 
530 	return (0);
531 }
532 
533 static void
534 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
535 {
536 	int	i;
537 
538 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
539 		if (ha->sp_taskqueue[i] != NULL) {
540 			taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
541 			taskqueue_free(ha->sp_taskqueue[i]);
542 		}
543 	}
544 	return;
545 }
546 
547 static void
548 qlnx_fp_taskqueue(void *context, int pending)
549 {
550         struct qlnx_fastpath	*fp;
551         qlnx_host_t		*ha;
552         if_t			ifp;
553 
554         fp = context;
555 
556         if (fp == NULL)
557                 return;
558 
559 	ha = (qlnx_host_t *)fp->edev;
560 
561 	ifp = ha->ifp;
562 
563         if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
564                 if (!drbr_empty(ifp, fp->tx_br)) {
565                         if(mtx_trylock(&fp->tx_mtx)) {
566 #ifdef QLNX_TRACE_PERF_DATA
567                                 tx_pkts = fp->tx_pkts_transmitted;
568                                 tx_compl = fp->tx_pkts_completed;
569 #endif
570 
571                                 qlnx_transmit_locked(ifp, fp, NULL);
572 
573 #ifdef QLNX_TRACE_PERF_DATA
574                                 fp->tx_pkts_trans_fp +=
575 					(fp->tx_pkts_transmitted - tx_pkts);
576                                 fp->tx_pkts_compl_fp +=
577 					(fp->tx_pkts_completed - tx_compl);
578 #endif
579                                 mtx_unlock(&fp->tx_mtx);
580                         }
581                 }
582         }
583 
584         QL_DPRINT2(ha, "exit \n");
585         return;
586 }
587 
588 static int
589 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
590 {
591 	int	i;
592 	uint8_t	tq_name[32];
593 	struct qlnx_fastpath *fp;
594 
595 	for (i = 0; i < ha->num_rss; i++) {
596                 fp = &ha->fp_array[i];
597 
598 		bzero(tq_name, sizeof (tq_name));
599 		snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
600 
601 		TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
602 
603 		fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
604 					taskqueue_thread_enqueue,
605 					&fp->fp_taskqueue);
606 
607 		if (fp->fp_taskqueue == NULL)
608 			return (-1);
609 
610 		taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
611 			tq_name);
612 
613 		QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
614 	}
615 
616 	return (0);
617 }
618 
619 static void
620 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
621 {
622 	int			i;
623 	struct qlnx_fastpath	*fp;
624 
625 	for (i = 0; i < ha->num_rss; i++) {
626                 fp = &ha->fp_array[i];
627 
628 		if (fp->fp_taskqueue != NULL) {
629 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
630 			taskqueue_free(fp->fp_taskqueue);
631 			fp->fp_taskqueue = NULL;
632 		}
633 	}
634 	return;
635 }
636 
637 static void
638 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
639 {
640 	int			i;
641 	struct qlnx_fastpath	*fp;
642 
643 	for (i = 0; i < ha->num_rss; i++) {
644                 fp = &ha->fp_array[i];
645 
646 		if (fp->fp_taskqueue != NULL) {
647 			QLNX_UNLOCK(ha);
648 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
649 			QLNX_LOCK(ha);
650 		}
651 	}
652 	return;
653 }
654 
655 static void
656 qlnx_get_params(qlnx_host_t *ha)
657 {
658 	if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
659 		device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
660 			qlnxe_queue_count);
661 		qlnxe_queue_count = 0;
662 	}
663 	return;
664 }
665 
666 static void
667 qlnx_error_recovery_taskqueue(void *context, int pending)
668 {
669         qlnx_host_t *ha;
670 
671         ha = context;
672 
673         QL_DPRINT2(ha, "enter\n");
674 
675         QLNX_LOCK(ha);
676         qlnx_stop(ha);
677         QLNX_UNLOCK(ha);
678 
679 #ifdef QLNX_ENABLE_IWARP
680 	qlnx_rdma_dev_remove(ha);
681 #endif /* #ifdef QLNX_ENABLE_IWARP */
682 
683         qlnx_slowpath_stop(ha);
684         qlnx_slowpath_start(ha);
685 
686 #ifdef QLNX_ENABLE_IWARP
687 	qlnx_rdma_dev_add(ha);
688 #endif /* #ifdef QLNX_ENABLE_IWARP */
689 
690         qlnx_init(ha);
691 
692         callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
693 
694         QL_DPRINT2(ha, "exit\n");
695 
696         return;
697 }
698 
699 static int
700 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
701 {
702         uint8_t tq_name[32];
703 
704         bzero(tq_name, sizeof (tq_name));
705         snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
706 
707         TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
708 
709         ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
710                                 taskqueue_thread_enqueue, &ha->err_taskqueue);
711 
712         if (ha->err_taskqueue == NULL)
713                 return (-1);
714 
715         taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
716 
717         QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
718 
719         return (0);
720 }
721 
722 static void
723 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
724 {
725         if (ha->err_taskqueue != NULL) {
726                 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
727                 taskqueue_free(ha->err_taskqueue);
728         }
729 
730         ha->err_taskqueue = NULL;
731 
732         return;
733 }
734 
735 /*
736  * Name:	qlnx_pci_attach
737  * Function:	attaches the device to the operating system
738  */
739 static int
740 qlnx_pci_attach(device_t dev)
741 {
742 	qlnx_host_t	*ha = NULL;
743 	uint32_t	rsrc_len_reg __unused = 0;
744 	uint32_t	rsrc_len_dbells = 0;
745 	uint32_t	rsrc_len_msix __unused = 0;
746 	int		i;
747 	uint32_t	mfw_ver;
748 	uint32_t	num_sp_msix = 0;
749 	uint32_t	num_rdma_irqs = 0;
750 
751         if ((ha = device_get_softc(dev)) == NULL) {
752                 device_printf(dev, "cannot get softc\n");
753                 return (ENOMEM);
754         }
755 
756         memset(ha, 0, sizeof (qlnx_host_t));
757 
758         ha->device_id = pci_get_device(dev);
759 
760         if (qlnx_valid_device(ha) != 0) {
761                 device_printf(dev, "device is not valid device\n");
762                 return (ENXIO);
763 	}
764         ha->pci_func = pci_get_function(dev);
765 
766         ha->pci_dev = dev;
767 
768 	mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
769 
770         ha->flags.lock_init = 1;
771 
772         pci_enable_busmaster(dev);
773 
774 	/*
775 	 * map the PCI BARs
776 	 */
777 
778         ha->reg_rid = PCIR_BAR(0);
779         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
780                                 RF_ACTIVE);
781 
782         if (ha->pci_reg == NULL) {
783                 device_printf(dev, "unable to map BAR0\n");
784                 goto qlnx_pci_attach_err;
785         }
786 
787         rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
788                                         ha->reg_rid);
789 
790 	ha->dbells_rid = PCIR_BAR(2);
791 	rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
792 					SYS_RES_MEMORY,
793 					ha->dbells_rid);
794 	if (rsrc_len_dbells) {
795 		ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
796 					&ha->dbells_rid, RF_ACTIVE);
797 
798 		if (ha->pci_dbells == NULL) {
799 			device_printf(dev, "unable to map BAR1\n");
800 			goto qlnx_pci_attach_err;
801 		}
802 		ha->dbells_phys_addr = (uint64_t)
803 			bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
804 
805 		ha->dbells_size = rsrc_len_dbells;
806 	} else {
807 		if (qlnx_vf_device(ha) != 0) {
808 			device_printf(dev, " BAR1 size is zero\n");
809 			goto qlnx_pci_attach_err;
810 		}
811 	}
812 
813         ha->msix_rid = PCIR_BAR(4);
814         ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
815                         &ha->msix_rid, RF_ACTIVE);
816 
817         if (ha->msix_bar == NULL) {
818                 device_printf(dev, "unable to map BAR2\n");
819                 goto qlnx_pci_attach_err;
820 	}
821 
822         rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
823                                         ha->msix_rid);
824 
825 	ha->dbg_level = 0x0000;
826 
827 	QL_DPRINT1(ha, "\n\t\t\t"
828 		"pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
829 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
830 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
831 		" msix_avail = 0x%x "
832 		"\n\t\t\t[ncpus = %d]\n",
833 		ha->pci_dev, ha->pci_reg, rsrc_len_reg,
834 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
835 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
836 		mp_ncpus);
837 	/*
838 	 * allocate dma tags
839 	 */
840 
841 	if (qlnx_alloc_parent_dma_tag(ha))
842                 goto qlnx_pci_attach_err;
843 
844 	if (qlnx_alloc_tx_dma_tag(ha))
845                 goto qlnx_pci_attach_err;
846 
847 	if (qlnx_alloc_rx_dma_tag(ha))
848                 goto qlnx_pci_attach_err;
849 
850 
851 	if (qlnx_init_hw(ha) != 0)
852 		goto qlnx_pci_attach_err;
853 
854         ha->flags.hw_init = 1;
855 
856 	qlnx_get_params(ha);
857 
858 	if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
859 		(qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
860 		qlnxe_queue_count = QLNX_MAX_RSS;
861 	}
862 
863 	/*
864 	 * Allocate MSI-x vectors
865 	 */
866 	if (qlnx_vf_device(ha) != 0) {
867 		if (qlnxe_queue_count == 0)
868 			ha->num_rss = QLNX_DEFAULT_RSS;
869 		else
870 			ha->num_rss = qlnxe_queue_count;
871 
872 		num_sp_msix = ha->cdev.num_hwfns;
873 	} else {
874 		uint8_t max_rxq;
875 		uint8_t max_txq;
876 
877 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
878 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
879 
880 		if (max_rxq < max_txq)
881 			ha->num_rss = max_rxq;
882 		else
883 			ha->num_rss = max_txq;
884 
885 		if (ha->num_rss > QLNX_MAX_VF_RSS)
886 			ha->num_rss = QLNX_MAX_VF_RSS;
887 
888 		num_sp_msix = 0;
889 	}
890 
891 	if (ha->num_rss > mp_ncpus)
892 		ha->num_rss = mp_ncpus;
893 
894 	ha->num_tc = QLNX_MAX_TC;
895 
896         ha->msix_count = pci_msix_count(dev);
897 
898 #ifdef QLNX_ENABLE_IWARP
899 
900 	num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
901 
902 #endif /* #ifdef QLNX_ENABLE_IWARP */
903 
904         if (!ha->msix_count ||
905 		(ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
906                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
907                         ha->msix_count);
908                 goto qlnx_pci_attach_err;
909         }
910 
911 	if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
912 		ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
913 	else
914 		ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
915 
916 	QL_DPRINT1(ha, "\n\t\t\t"
917 		"pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
918 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
919 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
920 		" msix_avail = 0x%x msix_alloc = 0x%x"
921 		"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
922 		 ha->pci_reg, rsrc_len_reg,
923 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
924 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
925 		ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
926 
927         if (pci_alloc_msix(dev, &ha->msix_count)) {
928                 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
929                         ha->msix_count);
930                 ha->msix_count = 0;
931                 goto qlnx_pci_attach_err;
932         }
933 
934 	/*
935 	 * Initialize slow path interrupt and task queue
936 	 */
937 
938 	if (num_sp_msix) {
939 		if (qlnx_create_sp_taskqueues(ha) != 0)
940 			goto qlnx_pci_attach_err;
941 
942 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
943 			struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
944 
945 			ha->sp_irq_rid[i] = i + 1;
946 			ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
947 						&ha->sp_irq_rid[i],
948 						(RF_ACTIVE | RF_SHAREABLE));
949 			if (ha->sp_irq[i] == NULL) {
950                 		device_printf(dev,
951 					"could not allocate mbx interrupt\n");
952 				goto qlnx_pci_attach_err;
953 			}
954 
955 			if (bus_setup_intr(dev, ha->sp_irq[i],
956 				(INTR_TYPE_NET | INTR_MPSAFE), NULL,
957 				qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
958 				device_printf(dev,
959 					"could not setup slow path interrupt\n");
960 				goto qlnx_pci_attach_err;
961 			}
962 
963 			QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
964 				" sp_irq %p sp_handle %p\n", p_hwfn,
965 				ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
966 		}
967 	}
968 
969 	/*
970 	 * initialize fast path interrupt
971 	 */
972 	if (qlnx_create_fp_taskqueues(ha) != 0)
973 		goto qlnx_pci_attach_err;
974 
975         for (i = 0; i < ha->num_rss; i++) {
976                 ha->irq_vec[i].rss_idx = i;
977                 ha->irq_vec[i].ha = ha;
978                 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
979 
980                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
981                                 &ha->irq_vec[i].irq_rid,
982                                 (RF_ACTIVE | RF_SHAREABLE));
983 
984                 if (ha->irq_vec[i].irq == NULL) {
985                         device_printf(dev,
986 				"could not allocate interrupt[%d] irq_rid = %d\n",
987 				i, ha->irq_vec[i].irq_rid);
988                         goto qlnx_pci_attach_err;
989                 }
990 
991 		if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
992                         device_printf(dev, "could not allocate tx_br[%d]\n", i);
993                         goto qlnx_pci_attach_err;
994 		}
995 	}
996 
997 	if (qlnx_vf_device(ha) != 0) {
998 		callout_init(&ha->qlnx_callout, 1);
999 		ha->flags.callout_init = 1;
1000 
1001 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
1002 			if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1003 				goto qlnx_pci_attach_err;
1004 			if (ha->grcdump_size[i] == 0)
1005 				goto qlnx_pci_attach_err;
1006 
1007 			ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1008 			QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1009 				i, ha->grcdump_size[i]);
1010 
1011 			ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1012 			if (ha->grcdump[i] == NULL) {
1013 				device_printf(dev, "grcdump alloc[%d] failed\n", i);
1014 				goto qlnx_pci_attach_err;
1015 			}
1016 
1017 			if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1018 				goto qlnx_pci_attach_err;
1019 			if (ha->idle_chk_size[i] == 0)
1020 				goto qlnx_pci_attach_err;
1021 
1022 			ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1023 			QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1024 				i, ha->idle_chk_size[i]);
1025 
1026 			ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1027 
1028 			if (ha->idle_chk[i] == NULL) {
1029 				device_printf(dev, "idle_chk alloc failed\n");
1030 				goto qlnx_pci_attach_err;
1031 			}
1032 		}
1033 
1034 		if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1035 			goto qlnx_pci_attach_err;
1036 	}
1037 
1038 	if (qlnx_slowpath_start(ha) != 0)
1039 		goto qlnx_pci_attach_err;
1040 	else
1041 		ha->flags.slowpath_start = 1;
1042 
1043 	if (qlnx_vf_device(ha) != 0) {
1044 		if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1045 			qlnx_mdelay(__func__, 1000);
1046 			qlnx_trigger_dump(ha);
1047 
1048 			goto qlnx_pci_attach_err0;
1049 		}
1050 
1051 		if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1052 			qlnx_mdelay(__func__, 1000);
1053 			qlnx_trigger_dump(ha);
1054 
1055 			goto qlnx_pci_attach_err0;
1056 		}
1057 	} else {
1058 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1059 		ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1060 	}
1061 
1062 	snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1063 		((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1064 		((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1065 	snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1066 		FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1067 		FW_ENGINEERING_VERSION);
1068 
1069 	QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1070 		 ha->stormfw_ver, ha->mfw_ver);
1071 
1072 	qlnx_init_ifnet(dev, ha);
1073 
1074 	/*
1075 	 * add sysctls
1076 	 */
1077 	qlnx_add_sysctls(ha);
1078 
1079 qlnx_pci_attach_err0:
1080         /*
1081 	 * create ioctl device interface
1082 	 */
1083 	if (qlnx_vf_device(ha) != 0) {
1084 		if (qlnx_make_cdev(ha)) {
1085 			device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1086 			goto qlnx_pci_attach_err;
1087 		}
1088 
1089 #ifdef QLNX_ENABLE_IWARP
1090 		qlnx_rdma_dev_add(ha);
1091 #endif /* #ifdef QLNX_ENABLE_IWARP */
1092 	}
1093 
1094 #ifndef QLNX_VF
1095 #ifdef CONFIG_ECORE_SRIOV
1096 
1097 	if (qlnx_vf_device(ha) != 0)
1098 		qlnx_initialize_sriov(ha);
1099 
1100 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1101 #endif /* #ifdef QLNX_VF */
1102 
1103 	QL_DPRINT2(ha, "success\n");
1104 
1105         return (0);
1106 
1107 qlnx_pci_attach_err:
1108 
1109 	qlnx_release(ha);
1110 
1111 	return (ENXIO);
1112 }
1113 
1114 /*
1115  * Name:	qlnx_pci_detach
1116  * Function:	Unhooks the device from the operating system
1117  */
1118 static int
1119 qlnx_pci_detach(device_t dev)
1120 {
1121 	qlnx_host_t	*ha = NULL;
1122 
1123         if ((ha = device_get_softc(dev)) == NULL) {
1124                 device_printf(dev, "%s: cannot get softc\n", __func__);
1125                 return (ENOMEM);
1126         }
1127 
1128 	if (qlnx_vf_device(ha) != 0) {
1129 #ifdef CONFIG_ECORE_SRIOV
1130 		int ret;
1131 
1132 		ret = pci_iov_detach(dev);
1133 		if (ret) {
1134                 	device_printf(dev, "%s: SRIOV in use\n", __func__);
1135 			return (ret);
1136 		}
1137 
1138 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1139 
1140 #ifdef QLNX_ENABLE_IWARP
1141 		if (qlnx_rdma_dev_remove(ha) != 0)
1142 			return (EBUSY);
1143 #endif /* #ifdef QLNX_ENABLE_IWARP */
1144 	}
1145 
1146 	QLNX_LOCK(ha);
1147 	qlnx_stop(ha);
1148 	QLNX_UNLOCK(ha);
1149 
1150 	qlnx_release(ha);
1151 
1152         return (0);
1153 }
1154 
1155 #ifdef QLNX_ENABLE_IWARP
1156 
1157 static uint8_t
1158 qlnx_get_personality(uint8_t pci_func)
1159 {
1160 	uint8_t personality;
1161 
1162 	personality = (qlnxe_rdma_configuration >>
1163 				(pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1164 				QLNX_PERSONALIY_MASK;
1165 	return (personality);
1166 }
1167 
1168 static void
1169 qlnx_set_personality(qlnx_host_t *ha)
1170 {
1171 	uint8_t personality;
1172 
1173 	personality = qlnx_get_personality(ha->pci_func);
1174 
1175 	switch (personality) {
1176 	case QLNX_PERSONALITY_DEFAULT:
1177                	device_printf(ha->pci_dev, "%s: DEFAULT\n",
1178 			__func__);
1179 		ha->personality = ECORE_PCI_DEFAULT;
1180 		break;
1181 
1182 	case QLNX_PERSONALITY_ETH_ONLY:
1183                	device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1184 			__func__);
1185 		ha->personality = ECORE_PCI_ETH;
1186 		break;
1187 
1188 	case QLNX_PERSONALITY_ETH_IWARP:
1189                	device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1190 			__func__);
1191 		ha->personality = ECORE_PCI_ETH_IWARP;
1192 		break;
1193 
1194 	case QLNX_PERSONALITY_ETH_ROCE:
1195                	device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1196 			__func__);
1197 		ha->personality = ECORE_PCI_ETH_ROCE;
1198 		break;
1199 	}
1200 
1201 	return;
1202 }
1203 
1204 #endif /* #ifdef QLNX_ENABLE_IWARP */
1205 
1206 static int
1207 qlnx_init_hw(qlnx_host_t *ha)
1208 {
1209 	int				rval = 0;
1210 	struct ecore_hw_prepare_params	params;
1211 
1212 	ecore_init_struct(&ha->cdev);
1213 
1214 	/* ha->dp_module = ECORE_MSG_PROBE |
1215 				ECORE_MSG_INTR |
1216 				ECORE_MSG_SP |
1217 				ECORE_MSG_LINK |
1218 				ECORE_MSG_SPQ |
1219 				ECORE_MSG_RDMA;
1220 	ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1221 	//ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1222 	ha->dp_level = ECORE_LEVEL_NOTICE;
1223 	//ha->dp_level = ECORE_LEVEL_VERBOSE;
1224 
1225 	ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1226 
1227 	ha->cdev.regview = ha->pci_reg;
1228 
1229 	ha->personality = ECORE_PCI_DEFAULT;
1230 
1231 	if (qlnx_vf_device(ha) == 0) {
1232 		ha->cdev.b_is_vf = true;
1233 
1234 		if (ha->pci_dbells != NULL) {
1235 			ha->cdev.doorbells = ha->pci_dbells;
1236 			ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1237 			ha->cdev.db_size = ha->dbells_size;
1238 		} else {
1239 			ha->pci_dbells = ha->pci_reg;
1240 		}
1241 	} else {
1242 		ha->cdev.doorbells = ha->pci_dbells;
1243 		ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1244 		ha->cdev.db_size = ha->dbells_size;
1245 
1246 #ifdef QLNX_ENABLE_IWARP
1247 
1248 		if (qlnx_rdma_supported(ha) == 0)
1249 			qlnx_set_personality(ha);
1250 
1251 #endif /* #ifdef QLNX_ENABLE_IWARP */
1252 	}
1253 	QL_DPRINT2(ha, "%s: %s\n", __func__,
1254 		(ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1255 
1256 	bzero(&params, sizeof (struct ecore_hw_prepare_params));
1257 
1258 	params.personality = ha->personality;
1259 
1260 	params.drv_resc_alloc = false;
1261 	params.chk_reg_fifo = false;
1262 	params.initiate_pf_flr = true;
1263 	params.epoch = 0;
1264 
1265 	ecore_hw_prepare(&ha->cdev, &params);
1266 
1267 	qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1268 
1269 	QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1270 		ha, &ha->cdev, &ha->cdev.hwfns[0]);
1271 
1272 	return (rval);
1273 }
1274 
1275 static void
1276 qlnx_release(qlnx_host_t *ha)
1277 {
1278         device_t	dev;
1279         int		i;
1280 
1281         dev = ha->pci_dev;
1282 
1283 	QL_DPRINT2(ha, "enter\n");
1284 
1285 	for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1286 		if (ha->idle_chk[i] != NULL) {
1287 			free(ha->idle_chk[i], M_QLNXBUF);
1288 			ha->idle_chk[i] = NULL;
1289 		}
1290 
1291 		if (ha->grcdump[i] != NULL) {
1292 			free(ha->grcdump[i], M_QLNXBUF);
1293 			ha->grcdump[i] = NULL;
1294 		}
1295 	}
1296 
1297         if (ha->flags.callout_init)
1298                 callout_drain(&ha->qlnx_callout);
1299 
1300 	if (ha->flags.slowpath_start) {
1301 		qlnx_slowpath_stop(ha);
1302 	}
1303 
1304         if (ha->flags.hw_init)
1305 		ecore_hw_remove(&ha->cdev);
1306 
1307         qlnx_del_cdev(ha);
1308 
1309         if (ha->ifp != NULL)
1310                 ether_ifdetach(ha->ifp);
1311 
1312 	qlnx_free_tx_dma_tag(ha);
1313 
1314 	qlnx_free_rx_dma_tag(ha);
1315 
1316 	qlnx_free_parent_dma_tag(ha);
1317 
1318 	if (qlnx_vf_device(ha) != 0) {
1319 		qlnx_destroy_error_recovery_taskqueue(ha);
1320 	}
1321 
1322         for (i = 0; i < ha->num_rss; i++) {
1323 		struct qlnx_fastpath *fp = &ha->fp_array[i];
1324 
1325                 if (ha->irq_vec[i].handle) {
1326                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1327                                         ha->irq_vec[i].handle);
1328                 }
1329 
1330                 if (ha->irq_vec[i].irq) {
1331                         (void)bus_release_resource(dev, SYS_RES_IRQ,
1332                                 ha->irq_vec[i].irq_rid,
1333                                 ha->irq_vec[i].irq);
1334                 }
1335 
1336 		qlnx_free_tx_br(ha, fp);
1337         }
1338 	qlnx_destroy_fp_taskqueues(ha);
1339 
1340  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1341         	if (ha->sp_handle[i])
1342                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
1343 				ha->sp_handle[i]);
1344 
1345         	if (ha->sp_irq[i])
1346 			(void) bus_release_resource(dev, SYS_RES_IRQ,
1347 				ha->sp_irq_rid[i], ha->sp_irq[i]);
1348 	}
1349 
1350 	qlnx_destroy_sp_taskqueues(ha);
1351 
1352         if (ha->msix_count)
1353                 pci_release_msi(dev);
1354 
1355         if (ha->flags.lock_init) {
1356                 mtx_destroy(&ha->hw_lock);
1357         }
1358 
1359         if (ha->pci_reg)
1360                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1361                                 ha->pci_reg);
1362 
1363         if (ha->dbells_size && ha->pci_dbells)
1364                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1365                                 ha->pci_dbells);
1366 
1367         if (ha->msix_bar)
1368                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1369                                 ha->msix_bar);
1370 
1371 	QL_DPRINT2(ha, "exit\n");
1372 	return;
1373 }
1374 
1375 static void
1376 qlnx_trigger_dump(qlnx_host_t *ha)
1377 {
1378 	int	i;
1379 
1380 	if (ha->ifp != NULL)
1381 		if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1382 
1383 	QL_DPRINT2(ha, "enter\n");
1384 
1385 	if (qlnx_vf_device(ha) == 0)
1386 		return;
1387 
1388 	ha->error_recovery = 1;
1389 
1390 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1391 		qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1392 		qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1393 	}
1394 
1395 	QL_DPRINT2(ha, "exit\n");
1396 
1397 	return;
1398 }
1399 
1400 static int
1401 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1402 {
1403         int		err, ret = 0;
1404         qlnx_host_t	*ha;
1405 
1406         err = sysctl_handle_int(oidp, &ret, 0, req);
1407 
1408         if (err || !req->newptr)
1409                 return (err);
1410 
1411         if (ret == 1) {
1412                 ha = (qlnx_host_t *)arg1;
1413                 qlnx_trigger_dump(ha);
1414         }
1415         return (err);
1416 }
1417 
1418 static int
1419 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1420 {
1421         int			err, i, ret = 0, usecs = 0;
1422         qlnx_host_t		*ha;
1423 	struct ecore_hwfn	*p_hwfn;
1424 	struct qlnx_fastpath	*fp;
1425 
1426         err = sysctl_handle_int(oidp, &usecs, 0, req);
1427 
1428         if (err || !req->newptr || !usecs || (usecs > 255))
1429                 return (err);
1430 
1431         ha = (qlnx_host_t *)arg1;
1432 
1433 	if (qlnx_vf_device(ha) == 0)
1434 		return (-1);
1435 
1436 	for (i = 0; i < ha->num_rss; i++) {
1437 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1438 
1439         	fp = &ha->fp_array[i];
1440 
1441 		if (fp->txq[0]->handle != NULL) {
1442 			ret = ecore_set_queue_coalesce(p_hwfn, 0,
1443 					(uint16_t)usecs, fp->txq[0]->handle);
1444 		}
1445         }
1446 
1447 	if (!ret)
1448 		ha->tx_coalesce_usecs = (uint8_t)usecs;
1449 
1450         return (err);
1451 }
1452 
1453 static int
1454 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1455 {
1456         int			err, i, ret = 0, usecs = 0;
1457         qlnx_host_t		*ha;
1458 	struct ecore_hwfn	*p_hwfn;
1459 	struct qlnx_fastpath	*fp;
1460 
1461         err = sysctl_handle_int(oidp, &usecs, 0, req);
1462 
1463         if (err || !req->newptr || !usecs || (usecs > 255))
1464                 return (err);
1465 
1466         ha = (qlnx_host_t *)arg1;
1467 
1468 	if (qlnx_vf_device(ha) == 0)
1469 		return (-1);
1470 
1471 	for (i = 0; i < ha->num_rss; i++) {
1472 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1473 
1474         	fp = &ha->fp_array[i];
1475 
1476 		if (fp->rxq->handle != NULL) {
1477 			ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1478 					 0, fp->rxq->handle);
1479 		}
1480 	}
1481 
1482 	if (!ret)
1483 		ha->rx_coalesce_usecs = (uint8_t)usecs;
1484 
1485         return (err);
1486 }
1487 
1488 static void
1489 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1490 {
1491         struct sysctl_ctx_list	*ctx;
1492         struct sysctl_oid_list	*children;
1493 	struct sysctl_oid	*ctx_oid;
1494 
1495         ctx = device_get_sysctl_ctx(ha->pci_dev);
1496 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1497 
1498 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1499 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1500         children = SYSCTL_CHILDREN(ctx_oid);
1501 
1502 	SYSCTL_ADD_QUAD(ctx, children,
1503                 OID_AUTO, "sp_interrupts",
1504                 CTLFLAG_RD, &ha->sp_interrupts,
1505                 "No. of slowpath interrupts");
1506 
1507 	return;
1508 }
1509 
1510 static void
1511 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1512 {
1513         struct sysctl_ctx_list	*ctx;
1514         struct sysctl_oid_list	*children;
1515         struct sysctl_oid_list	*node_children;
1516 	struct sysctl_oid	*ctx_oid;
1517 	int			i, j;
1518 	uint8_t			name_str[16];
1519 
1520         ctx = device_get_sysctl_ctx(ha->pci_dev);
1521 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1522 
1523 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1524 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1525 	children = SYSCTL_CHILDREN(ctx_oid);
1526 
1527 	for (i = 0; i < ha->num_rss; i++) {
1528 		bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1529 		snprintf(name_str, sizeof(name_str), "%d", i);
1530 
1531 		ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1532 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1533 		node_children = SYSCTL_CHILDREN(ctx_oid);
1534 
1535 		/* Tx Related */
1536 
1537 		SYSCTL_ADD_QUAD(ctx, node_children,
1538 			OID_AUTO, "tx_pkts_processed",
1539 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1540 			"No. of packets processed for transmission");
1541 
1542 		SYSCTL_ADD_QUAD(ctx, node_children,
1543 			OID_AUTO, "tx_pkts_freed",
1544 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1545 			"No. of freed packets");
1546 
1547 		SYSCTL_ADD_QUAD(ctx, node_children,
1548 			OID_AUTO, "tx_pkts_transmitted",
1549 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1550 			"No. of transmitted packets");
1551 
1552 		SYSCTL_ADD_QUAD(ctx, node_children,
1553 			OID_AUTO, "tx_pkts_completed",
1554 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1555 			"No. of transmit completions");
1556 
1557                 SYSCTL_ADD_QUAD(ctx, node_children,
1558                         OID_AUTO, "tx_non_tso_pkts",
1559                         CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1560                         "No. of non LSO transmited packets");
1561 
1562 #ifdef QLNX_TRACE_PERF_DATA
1563 
1564                 SYSCTL_ADD_QUAD(ctx, node_children,
1565                         OID_AUTO, "tx_pkts_trans_ctx",
1566                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1567                         "No. of transmitted packets in transmit context");
1568 
1569                 SYSCTL_ADD_QUAD(ctx, node_children,
1570                         OID_AUTO, "tx_pkts_compl_ctx",
1571                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1572                         "No. of transmit completions in transmit context");
1573 
1574                 SYSCTL_ADD_QUAD(ctx, node_children,
1575                         OID_AUTO, "tx_pkts_trans_fp",
1576                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1577                         "No. of transmitted packets in taskqueue");
1578 
1579                 SYSCTL_ADD_QUAD(ctx, node_children,
1580                         OID_AUTO, "tx_pkts_compl_fp",
1581                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1582                         "No. of transmit completions in taskqueue");
1583 
1584                 SYSCTL_ADD_QUAD(ctx, node_children,
1585                         OID_AUTO, "tx_pkts_compl_intr",
1586                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1587                         "No. of transmit completions in interrupt ctx");
1588 #endif
1589 
1590                 SYSCTL_ADD_QUAD(ctx, node_children,
1591                         OID_AUTO, "tx_tso_pkts",
1592                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1593                         "No. of LSO transmited packets");
1594 
1595 		SYSCTL_ADD_QUAD(ctx, node_children,
1596 			OID_AUTO, "tx_lso_wnd_min_len",
1597 			CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1598 			"tx_lso_wnd_min_len");
1599 
1600 		SYSCTL_ADD_QUAD(ctx, node_children,
1601 			OID_AUTO, "tx_defrag",
1602 			CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1603 			"tx_defrag");
1604 
1605 		SYSCTL_ADD_QUAD(ctx, node_children,
1606 			OID_AUTO, "tx_nsegs_gt_elem_left",
1607 			CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1608 			"tx_nsegs_gt_elem_left");
1609 
1610 		SYSCTL_ADD_UINT(ctx, node_children,
1611 			OID_AUTO, "tx_tso_max_nsegs",
1612 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1613 			ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1614 
1615 		SYSCTL_ADD_UINT(ctx, node_children,
1616 			OID_AUTO, "tx_tso_min_nsegs",
1617 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1618 			ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1619 
1620 		SYSCTL_ADD_UINT(ctx, node_children,
1621 			OID_AUTO, "tx_tso_max_pkt_len",
1622 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1623 			ha->fp_array[i].tx_tso_max_pkt_len,
1624 			"tx_tso_max_pkt_len");
1625 
1626 		SYSCTL_ADD_UINT(ctx, node_children,
1627 			OID_AUTO, "tx_tso_min_pkt_len",
1628 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1629 			ha->fp_array[i].tx_tso_min_pkt_len,
1630 			"tx_tso_min_pkt_len");
1631 
1632 		for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1633 			bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1634 			snprintf(name_str, sizeof(name_str),
1635 				"tx_pkts_nseg_%02d", (j+1));
1636 
1637 			SYSCTL_ADD_QUAD(ctx, node_children,
1638 				OID_AUTO, name_str, CTLFLAG_RD,
1639 				&ha->fp_array[i].tx_pkts[j], name_str);
1640 		}
1641 
1642 #ifdef QLNX_TRACE_PERF_DATA
1643                 for (j = 0; j < 18; j++) {
1644                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1645                         snprintf(name_str, sizeof(name_str),
1646                                 "tx_pkts_hist_%02d", (j+1));
1647 
1648                         SYSCTL_ADD_QUAD(ctx, node_children,
1649                                 OID_AUTO, name_str, CTLFLAG_RD,
1650                                 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1651                 }
1652                 for (j = 0; j < 5; j++) {
1653                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1654                         snprintf(name_str, sizeof(name_str),
1655                                 "tx_comInt_%02d", (j+1));
1656 
1657                         SYSCTL_ADD_QUAD(ctx, node_children,
1658                                 OID_AUTO, name_str, CTLFLAG_RD,
1659                                 &ha->fp_array[i].tx_comInt[j], name_str);
1660                 }
1661                 for (j = 0; j < 18; j++) {
1662                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1663                         snprintf(name_str, sizeof(name_str),
1664                                 "tx_pkts_q_%02d", (j+1));
1665 
1666                         SYSCTL_ADD_QUAD(ctx, node_children,
1667                                 OID_AUTO, name_str, CTLFLAG_RD,
1668                                 &ha->fp_array[i].tx_pkts_q[j], name_str);
1669                 }
1670 #endif
1671 
1672 		SYSCTL_ADD_QUAD(ctx, node_children,
1673 			OID_AUTO, "err_tx_nsegs_gt_elem_left",
1674 			CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1675 			"err_tx_nsegs_gt_elem_left");
1676 
1677 		SYSCTL_ADD_QUAD(ctx, node_children,
1678 			OID_AUTO, "err_tx_dmamap_create",
1679 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1680 			"err_tx_dmamap_create");
1681 
1682 		SYSCTL_ADD_QUAD(ctx, node_children,
1683 			OID_AUTO, "err_tx_defrag_dmamap_load",
1684 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1685 			"err_tx_defrag_dmamap_load");
1686 
1687 		SYSCTL_ADD_QUAD(ctx, node_children,
1688 			OID_AUTO, "err_tx_non_tso_max_seg",
1689 			CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1690 			"err_tx_non_tso_max_seg");
1691 
1692 		SYSCTL_ADD_QUAD(ctx, node_children,
1693 			OID_AUTO, "err_tx_dmamap_load",
1694 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1695 			"err_tx_dmamap_load");
1696 
1697 		SYSCTL_ADD_QUAD(ctx, node_children,
1698 			OID_AUTO, "err_tx_defrag",
1699 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1700 			"err_tx_defrag");
1701 
1702 		SYSCTL_ADD_QUAD(ctx, node_children,
1703 			OID_AUTO, "err_tx_free_pkt_null",
1704 			CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1705 			"err_tx_free_pkt_null");
1706 
1707 		SYSCTL_ADD_QUAD(ctx, node_children,
1708 			OID_AUTO, "err_tx_cons_idx_conflict",
1709 			CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1710 			"err_tx_cons_idx_conflict");
1711 
1712 		SYSCTL_ADD_QUAD(ctx, node_children,
1713 			OID_AUTO, "lro_cnt_64",
1714 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1715 			"lro_cnt_64");
1716 
1717 		SYSCTL_ADD_QUAD(ctx, node_children,
1718 			OID_AUTO, "lro_cnt_128",
1719 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1720 			"lro_cnt_128");
1721 
1722 		SYSCTL_ADD_QUAD(ctx, node_children,
1723 			OID_AUTO, "lro_cnt_256",
1724 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1725 			"lro_cnt_256");
1726 
1727 		SYSCTL_ADD_QUAD(ctx, node_children,
1728 			OID_AUTO, "lro_cnt_512",
1729 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1730 			"lro_cnt_512");
1731 
1732 		SYSCTL_ADD_QUAD(ctx, node_children,
1733 			OID_AUTO, "lro_cnt_1024",
1734 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1735 			"lro_cnt_1024");
1736 
1737 		/* Rx Related */
1738 
1739 		SYSCTL_ADD_QUAD(ctx, node_children,
1740 			OID_AUTO, "rx_pkts",
1741 			CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1742 			"No. of received packets");
1743 
1744 		SYSCTL_ADD_QUAD(ctx, node_children,
1745 			OID_AUTO, "tpa_start",
1746 			CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1747 			"No. of tpa_start packets");
1748 
1749 		SYSCTL_ADD_QUAD(ctx, node_children,
1750 			OID_AUTO, "tpa_cont",
1751 			CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1752 			"No. of tpa_cont packets");
1753 
1754 		SYSCTL_ADD_QUAD(ctx, node_children,
1755 			OID_AUTO, "tpa_end",
1756 			CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1757 			"No. of tpa_end packets");
1758 
1759 		SYSCTL_ADD_QUAD(ctx, node_children,
1760 			OID_AUTO, "err_m_getcl",
1761 			CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1762 			"err_m_getcl");
1763 
1764 		SYSCTL_ADD_QUAD(ctx, node_children,
1765 			OID_AUTO, "err_m_getjcl",
1766 			CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1767 			"err_m_getjcl");
1768 
1769 		SYSCTL_ADD_QUAD(ctx, node_children,
1770 			OID_AUTO, "err_rx_hw_errors",
1771 			CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1772 			"err_rx_hw_errors");
1773 
1774 		SYSCTL_ADD_QUAD(ctx, node_children,
1775 			OID_AUTO, "err_rx_alloc_errors",
1776 			CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1777 			"err_rx_alloc_errors");
1778 	}
1779 
1780 	return;
1781 }
1782 
1783 static void
1784 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1785 {
1786         struct sysctl_ctx_list	*ctx;
1787         struct sysctl_oid_list	*children;
1788 	struct sysctl_oid	*ctx_oid;
1789 
1790         ctx = device_get_sysctl_ctx(ha->pci_dev);
1791 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1792 
1793 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1794 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1795         children = SYSCTL_CHILDREN(ctx_oid);
1796 
1797 	SYSCTL_ADD_QUAD(ctx, children,
1798                 OID_AUTO, "no_buff_discards",
1799                 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1800                 "No. of packets discarded due to lack of buffer");
1801 
1802 	SYSCTL_ADD_QUAD(ctx, children,
1803                 OID_AUTO, "packet_too_big_discard",
1804                 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1805                 "No. of packets discarded because packet was too big");
1806 
1807 	SYSCTL_ADD_QUAD(ctx, children,
1808                 OID_AUTO, "ttl0_discard",
1809                 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1810                 "ttl0_discard");
1811 
1812 	SYSCTL_ADD_QUAD(ctx, children,
1813                 OID_AUTO, "rx_ucast_bytes",
1814                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1815                 "rx_ucast_bytes");
1816 
1817 	SYSCTL_ADD_QUAD(ctx, children,
1818                 OID_AUTO, "rx_mcast_bytes",
1819                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1820                 "rx_mcast_bytes");
1821 
1822 	SYSCTL_ADD_QUAD(ctx, children,
1823                 OID_AUTO, "rx_bcast_bytes",
1824                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1825                 "rx_bcast_bytes");
1826 
1827 	SYSCTL_ADD_QUAD(ctx, children,
1828                 OID_AUTO, "rx_ucast_pkts",
1829                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1830                 "rx_ucast_pkts");
1831 
1832 	SYSCTL_ADD_QUAD(ctx, children,
1833                 OID_AUTO, "rx_mcast_pkts",
1834                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1835                 "rx_mcast_pkts");
1836 
1837 	SYSCTL_ADD_QUAD(ctx, children,
1838                 OID_AUTO, "rx_bcast_pkts",
1839                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1840                 "rx_bcast_pkts");
1841 
1842 	SYSCTL_ADD_QUAD(ctx, children,
1843                 OID_AUTO, "mftag_filter_discards",
1844                 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1845                 "mftag_filter_discards");
1846 
1847 	SYSCTL_ADD_QUAD(ctx, children,
1848                 OID_AUTO, "mac_filter_discards",
1849                 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1850                 "mac_filter_discards");
1851 
1852 	SYSCTL_ADD_QUAD(ctx, children,
1853                 OID_AUTO, "tx_ucast_bytes",
1854                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1855                 "tx_ucast_bytes");
1856 
1857 	SYSCTL_ADD_QUAD(ctx, children,
1858                 OID_AUTO, "tx_mcast_bytes",
1859                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1860                 "tx_mcast_bytes");
1861 
1862 	SYSCTL_ADD_QUAD(ctx, children,
1863                 OID_AUTO, "tx_bcast_bytes",
1864                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1865                 "tx_bcast_bytes");
1866 
1867 	SYSCTL_ADD_QUAD(ctx, children,
1868                 OID_AUTO, "tx_ucast_pkts",
1869                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1870                 "tx_ucast_pkts");
1871 
1872 	SYSCTL_ADD_QUAD(ctx, children,
1873                 OID_AUTO, "tx_mcast_pkts",
1874                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1875                 "tx_mcast_pkts");
1876 
1877 	SYSCTL_ADD_QUAD(ctx, children,
1878                 OID_AUTO, "tx_bcast_pkts",
1879                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1880                 "tx_bcast_pkts");
1881 
1882 	SYSCTL_ADD_QUAD(ctx, children,
1883                 OID_AUTO, "tx_err_drop_pkts",
1884                 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1885                 "tx_err_drop_pkts");
1886 
1887 	SYSCTL_ADD_QUAD(ctx, children,
1888                 OID_AUTO, "tpa_coalesced_pkts",
1889                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1890                 "tpa_coalesced_pkts");
1891 
1892 	SYSCTL_ADD_QUAD(ctx, children,
1893                 OID_AUTO, "tpa_coalesced_events",
1894                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1895                 "tpa_coalesced_events");
1896 
1897 	SYSCTL_ADD_QUAD(ctx, children,
1898                 OID_AUTO, "tpa_aborts_num",
1899                 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1900                 "tpa_aborts_num");
1901 
1902 	SYSCTL_ADD_QUAD(ctx, children,
1903                 OID_AUTO, "tpa_not_coalesced_pkts",
1904                 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1905                 "tpa_not_coalesced_pkts");
1906 
1907 	SYSCTL_ADD_QUAD(ctx, children,
1908                 OID_AUTO, "tpa_coalesced_bytes",
1909                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1910                 "tpa_coalesced_bytes");
1911 
1912 	SYSCTL_ADD_QUAD(ctx, children,
1913                 OID_AUTO, "rx_64_byte_packets",
1914                 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1915                 "rx_64_byte_packets");
1916 
1917 	SYSCTL_ADD_QUAD(ctx, children,
1918                 OID_AUTO, "rx_65_to_127_byte_packets",
1919                 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1920                 "rx_65_to_127_byte_packets");
1921 
1922 	SYSCTL_ADD_QUAD(ctx, children,
1923                 OID_AUTO, "rx_128_to_255_byte_packets",
1924                 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1925                 "rx_128_to_255_byte_packets");
1926 
1927 	SYSCTL_ADD_QUAD(ctx, children,
1928                 OID_AUTO, "rx_256_to_511_byte_packets",
1929                 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1930                 "rx_256_to_511_byte_packets");
1931 
1932 	SYSCTL_ADD_QUAD(ctx, children,
1933                 OID_AUTO, "rx_512_to_1023_byte_packets",
1934                 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1935                 "rx_512_to_1023_byte_packets");
1936 
1937 	SYSCTL_ADD_QUAD(ctx, children,
1938                 OID_AUTO, "rx_1024_to_1518_byte_packets",
1939                 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1940                 "rx_1024_to_1518_byte_packets");
1941 
1942 	SYSCTL_ADD_QUAD(ctx, children,
1943                 OID_AUTO, "rx_1519_to_1522_byte_packets",
1944                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1945                 "rx_1519_to_1522_byte_packets");
1946 
1947 	SYSCTL_ADD_QUAD(ctx, children,
1948                 OID_AUTO, "rx_1523_to_2047_byte_packets",
1949                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1950                 "rx_1523_to_2047_byte_packets");
1951 
1952 	SYSCTL_ADD_QUAD(ctx, children,
1953                 OID_AUTO, "rx_2048_to_4095_byte_packets",
1954                 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1955                 "rx_2048_to_4095_byte_packets");
1956 
1957 	SYSCTL_ADD_QUAD(ctx, children,
1958                 OID_AUTO, "rx_4096_to_9216_byte_packets",
1959                 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1960                 "rx_4096_to_9216_byte_packets");
1961 
1962 	SYSCTL_ADD_QUAD(ctx, children,
1963                 OID_AUTO, "rx_9217_to_16383_byte_packets",
1964                 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1965                 "rx_9217_to_16383_byte_packets");
1966 
1967 	SYSCTL_ADD_QUAD(ctx, children,
1968                 OID_AUTO, "rx_crc_errors",
1969                 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1970                 "rx_crc_errors");
1971 
1972 	SYSCTL_ADD_QUAD(ctx, children,
1973                 OID_AUTO, "rx_mac_crtl_frames",
1974                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1975                 "rx_mac_crtl_frames");
1976 
1977 	SYSCTL_ADD_QUAD(ctx, children,
1978                 OID_AUTO, "rx_pause_frames",
1979                 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1980                 "rx_pause_frames");
1981 
1982 	SYSCTL_ADD_QUAD(ctx, children,
1983                 OID_AUTO, "rx_pfc_frames",
1984                 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1985                 "rx_pfc_frames");
1986 
1987 	SYSCTL_ADD_QUAD(ctx, children,
1988                 OID_AUTO, "rx_align_errors",
1989                 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1990                 "rx_align_errors");
1991 
1992 	SYSCTL_ADD_QUAD(ctx, children,
1993                 OID_AUTO, "rx_carrier_errors",
1994                 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1995                 "rx_carrier_errors");
1996 
1997 	SYSCTL_ADD_QUAD(ctx, children,
1998                 OID_AUTO, "rx_oversize_packets",
1999                 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
2000                 "rx_oversize_packets");
2001 
2002 	SYSCTL_ADD_QUAD(ctx, children,
2003                 OID_AUTO, "rx_jabbers",
2004                 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2005                 "rx_jabbers");
2006 
2007 	SYSCTL_ADD_QUAD(ctx, children,
2008                 OID_AUTO, "rx_undersize_packets",
2009                 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2010                 "rx_undersize_packets");
2011 
2012 	SYSCTL_ADD_QUAD(ctx, children,
2013                 OID_AUTO, "rx_fragments",
2014                 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2015                 "rx_fragments");
2016 
2017 	SYSCTL_ADD_QUAD(ctx, children,
2018                 OID_AUTO, "tx_64_byte_packets",
2019                 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2020                 "tx_64_byte_packets");
2021 
2022 	SYSCTL_ADD_QUAD(ctx, children,
2023                 OID_AUTO, "tx_65_to_127_byte_packets",
2024                 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2025                 "tx_65_to_127_byte_packets");
2026 
2027 	SYSCTL_ADD_QUAD(ctx, children,
2028                 OID_AUTO, "tx_128_to_255_byte_packets",
2029                 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2030                 "tx_128_to_255_byte_packets");
2031 
2032 	SYSCTL_ADD_QUAD(ctx, children,
2033                 OID_AUTO, "tx_256_to_511_byte_packets",
2034                 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2035                 "tx_256_to_511_byte_packets");
2036 
2037 	SYSCTL_ADD_QUAD(ctx, children,
2038                 OID_AUTO, "tx_512_to_1023_byte_packets",
2039                 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2040                 "tx_512_to_1023_byte_packets");
2041 
2042 	SYSCTL_ADD_QUAD(ctx, children,
2043                 OID_AUTO, "tx_1024_to_1518_byte_packets",
2044                 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2045                 "tx_1024_to_1518_byte_packets");
2046 
2047 	SYSCTL_ADD_QUAD(ctx, children,
2048                 OID_AUTO, "tx_1519_to_2047_byte_packets",
2049                 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2050                 "tx_1519_to_2047_byte_packets");
2051 
2052 	SYSCTL_ADD_QUAD(ctx, children,
2053                 OID_AUTO, "tx_2048_to_4095_byte_packets",
2054                 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2055                 "tx_2048_to_4095_byte_packets");
2056 
2057 	SYSCTL_ADD_QUAD(ctx, children,
2058                 OID_AUTO, "tx_4096_to_9216_byte_packets",
2059                 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2060                 "tx_4096_to_9216_byte_packets");
2061 
2062 	SYSCTL_ADD_QUAD(ctx, children,
2063                 OID_AUTO, "tx_9217_to_16383_byte_packets",
2064                 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2065                 "tx_9217_to_16383_byte_packets");
2066 
2067 	SYSCTL_ADD_QUAD(ctx, children,
2068                 OID_AUTO, "tx_pause_frames",
2069                 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2070                 "tx_pause_frames");
2071 
2072 	SYSCTL_ADD_QUAD(ctx, children,
2073                 OID_AUTO, "tx_pfc_frames",
2074                 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2075                 "tx_pfc_frames");
2076 
2077 	SYSCTL_ADD_QUAD(ctx, children,
2078                 OID_AUTO, "tx_lpi_entry_count",
2079                 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2080                 "tx_lpi_entry_count");
2081 
2082 	SYSCTL_ADD_QUAD(ctx, children,
2083                 OID_AUTO, "tx_total_collisions",
2084                 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2085                 "tx_total_collisions");
2086 
2087 	SYSCTL_ADD_QUAD(ctx, children,
2088                 OID_AUTO, "brb_truncates",
2089                 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2090                 "brb_truncates");
2091 
2092 	SYSCTL_ADD_QUAD(ctx, children,
2093                 OID_AUTO, "brb_discards",
2094                 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2095                 "brb_discards");
2096 
2097 	SYSCTL_ADD_QUAD(ctx, children,
2098                 OID_AUTO, "rx_mac_bytes",
2099                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2100                 "rx_mac_bytes");
2101 
2102 	SYSCTL_ADD_QUAD(ctx, children,
2103                 OID_AUTO, "rx_mac_uc_packets",
2104                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2105                 "rx_mac_uc_packets");
2106 
2107 	SYSCTL_ADD_QUAD(ctx, children,
2108                 OID_AUTO, "rx_mac_mc_packets",
2109                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2110                 "rx_mac_mc_packets");
2111 
2112 	SYSCTL_ADD_QUAD(ctx, children,
2113                 OID_AUTO, "rx_mac_bc_packets",
2114                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2115                 "rx_mac_bc_packets");
2116 
2117 	SYSCTL_ADD_QUAD(ctx, children,
2118                 OID_AUTO, "rx_mac_frames_ok",
2119                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2120                 "rx_mac_frames_ok");
2121 
2122 	SYSCTL_ADD_QUAD(ctx, children,
2123                 OID_AUTO, "tx_mac_bytes",
2124                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2125                 "tx_mac_bytes");
2126 
2127 	SYSCTL_ADD_QUAD(ctx, children,
2128                 OID_AUTO, "tx_mac_uc_packets",
2129                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2130                 "tx_mac_uc_packets");
2131 
2132 	SYSCTL_ADD_QUAD(ctx, children,
2133                 OID_AUTO, "tx_mac_mc_packets",
2134                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2135                 "tx_mac_mc_packets");
2136 
2137 	SYSCTL_ADD_QUAD(ctx, children,
2138                 OID_AUTO, "tx_mac_bc_packets",
2139                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2140                 "tx_mac_bc_packets");
2141 
2142 	SYSCTL_ADD_QUAD(ctx, children,
2143                 OID_AUTO, "tx_mac_ctrl_frames",
2144                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2145                 "tx_mac_ctrl_frames");
2146 	return;
2147 }
2148 
2149 static void
2150 qlnx_add_sysctls(qlnx_host_t *ha)
2151 {
2152         device_t		dev = ha->pci_dev;
2153 	struct sysctl_ctx_list	*ctx;
2154 	struct sysctl_oid_list	*children;
2155 
2156 	ctx = device_get_sysctl_ctx(dev);
2157 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2158 
2159 	qlnx_add_fp_stats_sysctls(ha);
2160 	qlnx_add_sp_stats_sysctls(ha);
2161 
2162 	if (qlnx_vf_device(ha) != 0)
2163 		qlnx_add_hw_stats_sysctls(ha);
2164 
2165 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2166 		CTLFLAG_RD, qlnx_ver_str, 0,
2167 		"Driver Version");
2168 
2169 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2170 		CTLFLAG_RD, ha->stormfw_ver, 0,
2171 		"STORM Firmware Version");
2172 
2173 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2174 		CTLFLAG_RD, ha->mfw_ver, 0,
2175 		"Management Firmware Version");
2176 
2177         SYSCTL_ADD_UINT(ctx, children,
2178                 OID_AUTO, "personality", CTLFLAG_RD,
2179                 &ha->personality, ha->personality,
2180 		"\tpersonality = 0 => Ethernet Only\n"
2181 		"\tpersonality = 3 => Ethernet and RoCE\n"
2182 		"\tpersonality = 4 => Ethernet and iWARP\n"
2183 		"\tpersonality = 6 => Default in Shared Memory\n");
2184 
2185         ha->dbg_level = 0;
2186         SYSCTL_ADD_UINT(ctx, children,
2187                 OID_AUTO, "debug", CTLFLAG_RW,
2188                 &ha->dbg_level, ha->dbg_level, "Debug Level");
2189 
2190         ha->dp_level = 0x01;
2191         SYSCTL_ADD_UINT(ctx, children,
2192                 OID_AUTO, "dp_level", CTLFLAG_RW,
2193                 &ha->dp_level, ha->dp_level, "DP Level");
2194 
2195         ha->dbg_trace_lro_cnt = 0;
2196         SYSCTL_ADD_UINT(ctx, children,
2197                 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2198                 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2199 		"Trace LRO Counts");
2200 
2201         ha->dbg_trace_tso_pkt_len = 0;
2202         SYSCTL_ADD_UINT(ctx, children,
2203                 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2204                 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2205 		"Trace TSO packet lengths");
2206 
2207         ha->dp_module = 0;
2208         SYSCTL_ADD_UINT(ctx, children,
2209                 OID_AUTO, "dp_module", CTLFLAG_RW,
2210                 &ha->dp_module, ha->dp_module, "DP Module");
2211 
2212         ha->err_inject = 0;
2213 
2214         SYSCTL_ADD_UINT(ctx, children,
2215                 OID_AUTO, "err_inject", CTLFLAG_RW,
2216                 &ha->err_inject, ha->err_inject, "Error Inject");
2217 
2218 	ha->storm_stats_enable = 0;
2219 
2220 	SYSCTL_ADD_UINT(ctx, children,
2221 		OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2222 		&ha->storm_stats_enable, ha->storm_stats_enable,
2223 		"Enable Storm Statistics Gathering");
2224 
2225 	ha->storm_stats_index = 0;
2226 
2227 	SYSCTL_ADD_UINT(ctx, children,
2228 		OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2229 		&ha->storm_stats_index, ha->storm_stats_index,
2230 		"Enable Storm Statistics Gathering Current Index");
2231 
2232 	ha->grcdump_taken = 0;
2233 	SYSCTL_ADD_UINT(ctx, children,
2234 		OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2235 		&ha->grcdump_taken, ha->grcdump_taken,
2236 		"grcdump_taken");
2237 
2238 	ha->idle_chk_taken = 0;
2239 	SYSCTL_ADD_UINT(ctx, children,
2240 		OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2241 		&ha->idle_chk_taken, ha->idle_chk_taken,
2242 		"idle_chk_taken");
2243 
2244 	SYSCTL_ADD_UINT(ctx, children,
2245 		OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2246 		&ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2247 		"rx_coalesce_usecs");
2248 
2249 	SYSCTL_ADD_UINT(ctx, children,
2250 		OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2251 		&ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2252 		"tx_coalesce_usecs");
2253 
2254 	SYSCTL_ADD_PROC(ctx, children,
2255 	    OID_AUTO, "trigger_dump",
2256 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2257 	    (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2258 
2259 	SYSCTL_ADD_PROC(ctx, children,
2260 	    OID_AUTO, "set_rx_coalesce_usecs",
2261 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2262 	    (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2263 	    "rx interrupt coalesce period microseconds");
2264 
2265 	SYSCTL_ADD_PROC(ctx, children,
2266 	    OID_AUTO, "set_tx_coalesce_usecs",
2267 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2268 	    (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2269 	    "tx interrupt coalesce period microseconds");
2270 
2271 	ha->rx_pkt_threshold = 128;
2272         SYSCTL_ADD_UINT(ctx, children,
2273                 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2274                 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2275 		"No. of Rx Pkts to process at a time");
2276 
2277 	ha->rx_jumbo_buf_eq_mtu = 0;
2278         SYSCTL_ADD_UINT(ctx, children,
2279                 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2280                 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2281 		"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2282 		"otherwise Rx Jumbo buffers are set to >= MTU size\n");
2283 
2284 	SYSCTL_ADD_QUAD(ctx, children,
2285                 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2286 		&ha->err_illegal_intr, "err_illegal_intr");
2287 
2288 	SYSCTL_ADD_QUAD(ctx, children,
2289                 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2290 		&ha->err_fp_null, "err_fp_null");
2291 
2292 	SYSCTL_ADD_QUAD(ctx, children,
2293                 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2294 		&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2295 	return;
2296 }
2297 
2298 /*****************************************************************************
2299  * Operating System Network Interface Functions
2300  *****************************************************************************/
2301 
2302 static void
2303 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2304 {
2305 	uint16_t	device_id;
2306         if_t		ifp;
2307 
2308         ifp = ha->ifp = if_alloc(IFT_ETHER);
2309 
2310         if (ifp == NULL)
2311                 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2312 
2313         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2314 
2315 	device_id = pci_get_device(ha->pci_dev);
2316 
2317         if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2318 		if_setbaudrate(ifp, IF_Gbps(40));
2319         else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2320 			(device_id == QLOGIC_PCI_DEVICE_ID_8070))
2321 		if_setbaudrate(ifp, IF_Gbps(25));
2322         else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2323 		if_setbaudrate(ifp, IF_Gbps(50));
2324         else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2325 		if_setbaudrate(ifp, IF_Gbps(100));
2326 
2327         if_setcapabilities(ifp, IFCAP_LINKSTATE);
2328 
2329         if_setinitfn(ifp, qlnx_init);
2330         if_setsoftc(ifp, ha);
2331         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2332         if_setioctlfn(ifp, qlnx_ioctl);
2333         if_settransmitfn(ifp, qlnx_transmit);
2334         if_setqflushfn(ifp, qlnx_qflush);
2335 
2336         if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2337         if_setsendqready(ifp);
2338 
2339 	if_setgetcounterfn(ifp, qlnx_get_counter);
2340 
2341         ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2342 
2343         memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2344 
2345 	if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2346 		!ha->primary_mac[2] && !ha->primary_mac[3] &&
2347 		!ha->primary_mac[4] && !ha->primary_mac[5]) {
2348 		uint32_t rnd;
2349 
2350 		rnd = arc4random();
2351 
2352 		ha->primary_mac[0] = 0x00;
2353 		ha->primary_mac[1] = 0x0e;
2354 		ha->primary_mac[2] = 0x1e;
2355 		ha->primary_mac[3] = rnd & 0xFF;
2356 		ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2357 		ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2358 	}
2359 
2360 	ether_ifattach(ifp, ha->primary_mac);
2361 	bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2362 
2363 	if_setcapabilities(ifp, IFCAP_HWCSUM);
2364 	if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
2365 
2366 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
2367 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
2368 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2369 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
2370 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
2371 	if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
2372 	if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
2373 	if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
2374 
2375 	if_sethwtsomax(ifp,  QLNX_MAX_TSO_FRAME_SIZE -
2376 				(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2377 	if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2378 	if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
2379 
2380         if_setcapenable(ifp, if_getcapabilities(ifp));
2381 
2382 	if_sethwassist(ifp, CSUM_IP);
2383 	if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
2384 	if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
2385 	if_sethwassistbits(ifp, CSUM_TSO, 0);
2386 
2387 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2388 
2389         ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2390 		qlnx_media_status);
2391 
2392         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2393 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2394 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2395 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2396         } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2397 			(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2398 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2399 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2400         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2401 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2402 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2403         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2404 		ifmedia_add(&ha->media,
2405 			(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2406 		ifmedia_add(&ha->media,
2407 			(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2408 		ifmedia_add(&ha->media,
2409 			(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2410 	}
2411 
2412         ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2413         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2414 
2415         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2416 
2417         QL_DPRINT2(ha, "exit\n");
2418 
2419         return;
2420 }
2421 
2422 static void
2423 qlnx_init_locked(qlnx_host_t *ha)
2424 {
2425 	if_t		ifp = ha->ifp;
2426 
2427 	QL_DPRINT1(ha, "Driver Initialization start \n");
2428 
2429 	qlnx_stop(ha);
2430 
2431 	if (qlnx_load(ha) == 0) {
2432 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2433 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2434 
2435 #ifdef QLNX_ENABLE_IWARP
2436 		if (qlnx_vf_device(ha) != 0) {
2437 			qlnx_rdma_dev_open(ha);
2438 		}
2439 #endif /* #ifdef QLNX_ENABLE_IWARP */
2440 	}
2441 
2442 	return;
2443 }
2444 
2445 static void
2446 qlnx_init(void *arg)
2447 {
2448 	qlnx_host_t	*ha;
2449 
2450 	ha = (qlnx_host_t *)arg;
2451 
2452 	QL_DPRINT2(ha, "enter\n");
2453 
2454 	QLNX_LOCK(ha);
2455 	qlnx_init_locked(ha);
2456 	QLNX_UNLOCK(ha);
2457 
2458 	QL_DPRINT2(ha, "exit\n");
2459 
2460 	return;
2461 }
2462 
2463 static int
2464 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2465 {
2466 	struct ecore_filter_mcast	*mcast;
2467 	struct ecore_dev		*cdev;
2468 	int				rc;
2469 
2470 	cdev = &ha->cdev;
2471 
2472 	mcast = &ha->ecore_mcast;
2473 	bzero(mcast, sizeof(struct ecore_filter_mcast));
2474 
2475 	if (add_mac)
2476 		mcast->opcode = ECORE_FILTER_ADD;
2477 	else
2478 		mcast->opcode = ECORE_FILTER_REMOVE;
2479 
2480 	mcast->num_mc_addrs = 1;
2481 	memcpy(mcast->mac, mac_addr, ETH_ALEN);
2482 
2483 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2484 
2485 	return (rc);
2486 }
2487 
2488 static int
2489 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2490 {
2491         int	i;
2492 
2493         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2494                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2495                         return 0; /* its been already added */
2496         }
2497 
2498         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2499                 if ((ha->mcast[i].addr[0] == 0) &&
2500                         (ha->mcast[i].addr[1] == 0) &&
2501                         (ha->mcast[i].addr[2] == 0) &&
2502                         (ha->mcast[i].addr[3] == 0) &&
2503                         (ha->mcast[i].addr[4] == 0) &&
2504                         (ha->mcast[i].addr[5] == 0)) {
2505                         if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2506                                 return (-1);
2507 
2508                         bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2509                         ha->nmcast++;
2510 
2511                         return 0;
2512                 }
2513         }
2514         return 0;
2515 }
2516 
2517 static int
2518 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2519 {
2520         int	i;
2521 
2522         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2523                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2524                         if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2525                                 return (-1);
2526 
2527                         ha->mcast[i].addr[0] = 0;
2528                         ha->mcast[i].addr[1] = 0;
2529                         ha->mcast[i].addr[2] = 0;
2530                         ha->mcast[i].addr[3] = 0;
2531                         ha->mcast[i].addr[4] = 0;
2532                         ha->mcast[i].addr[5] = 0;
2533 
2534                         ha->nmcast--;
2535 
2536                         return 0;
2537                 }
2538         }
2539         return 0;
2540 }
2541 
2542 /*
2543  * Name: qls_hw_set_multi
2544  * Function: Sets the Multicast Addresses provided the host O.S into the
2545  *      hardware (for the given interface)
2546  */
2547 static void
2548 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2549 	uint32_t add_mac)
2550 {
2551         int	i;
2552 
2553         for (i = 0; i < mcnt; i++) {
2554                 if (add_mac) {
2555                         if (qlnx_hw_add_mcast(ha, mta))
2556                                 break;
2557                 } else {
2558                         if (qlnx_hw_del_mcast(ha, mta))
2559                                 break;
2560                 }
2561 
2562                 mta += ETHER_HDR_LEN;
2563         }
2564         return;
2565 }
2566 
2567 static u_int
2568 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2569 {
2570 	uint8_t *mta = arg;
2571 
2572 	if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2573 		return (0);
2574 
2575 	bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2576 
2577 	return (1);
2578 }
2579 
2580 static int
2581 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2582 {
2583 	uint8_t		mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2584 	if_t		ifp = ha->ifp;
2585 	u_int		mcnt;
2586 
2587 	if (qlnx_vf_device(ha) == 0)
2588 		return (0);
2589 
2590 	mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2591 
2592 	QLNX_LOCK(ha);
2593 	qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2594 	QLNX_UNLOCK(ha);
2595 
2596 	return (0);
2597 }
2598 
2599 static int
2600 qlnx_set_promisc(qlnx_host_t *ha)
2601 {
2602 	int	rc = 0;
2603 	uint8_t	filter;
2604 
2605 	if (qlnx_vf_device(ha) == 0)
2606 		return (0);
2607 
2608 	filter = ha->filter;
2609 	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2610 	filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2611 
2612 	rc = qlnx_set_rx_accept_filter(ha, filter);
2613 	return (rc);
2614 }
2615 
2616 static int
2617 qlnx_set_allmulti(qlnx_host_t *ha)
2618 {
2619 	int	rc = 0;
2620 	uint8_t	filter;
2621 
2622 	if (qlnx_vf_device(ha) == 0)
2623 		return (0);
2624 
2625 	filter = ha->filter;
2626 	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2627 	rc = qlnx_set_rx_accept_filter(ha, filter);
2628 
2629 	return (rc);
2630 }
2631 
2632 static int
2633 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
2634 {
2635 	int		ret = 0, mask;
2636 	struct ifreq	*ifr = (struct ifreq *)data;
2637 	struct ifaddr	*ifa = (struct ifaddr *)data;
2638 	qlnx_host_t	*ha;
2639 
2640 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2641 
2642 	switch (cmd) {
2643 	case SIOCSIFADDR:
2644 		QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2645 
2646 		if (ifa->ifa_addr->sa_family == AF_INET) {
2647 			if_setflagbits(ifp, IFF_UP, 0);
2648 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2649 				QLNX_LOCK(ha);
2650 				qlnx_init_locked(ha);
2651 				QLNX_UNLOCK(ha);
2652 			}
2653 			QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2654 				   cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2655 
2656 			arp_ifinit(ifp, ifa);
2657 		} else {
2658 			ether_ioctl(ifp, cmd, data);
2659 		}
2660 		break;
2661 
2662 	case SIOCSIFMTU:
2663 		QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2664 
2665 		if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2666 			ret = EINVAL;
2667 		} else {
2668 			QLNX_LOCK(ha);
2669 			if_setmtu(ifp, ifr->ifr_mtu);
2670 			ha->max_frame_size =
2671 				if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2672 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2673 				qlnx_init_locked(ha);
2674 			}
2675 
2676 			QLNX_UNLOCK(ha);
2677 		}
2678 
2679 		break;
2680 
2681 	case SIOCSIFFLAGS:
2682 		QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2683 
2684 		QLNX_LOCK(ha);
2685 
2686 		if (if_getflags(ifp) & IFF_UP) {
2687 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2688 				if ((if_getflags(ifp) ^ ha->if_flags) &
2689 					IFF_PROMISC) {
2690 					ret = qlnx_set_promisc(ha);
2691 				} else if ((if_getflags(ifp) ^ ha->if_flags) &
2692 					IFF_ALLMULTI) {
2693 					ret = qlnx_set_allmulti(ha);
2694 				}
2695 			} else {
2696 				ha->max_frame_size = if_getmtu(ifp) +
2697 					ETHER_HDR_LEN + ETHER_CRC_LEN;
2698 				qlnx_init_locked(ha);
2699 			}
2700 		} else {
2701 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2702 				qlnx_stop(ha);
2703 			ha->if_flags = if_getflags(ifp);
2704 		}
2705 
2706 		QLNX_UNLOCK(ha);
2707 		break;
2708 
2709 	case SIOCADDMULTI:
2710 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2711 
2712 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2713 			if (qlnx_set_multi(ha, 1))
2714 				ret = EINVAL;
2715 		}
2716 		break;
2717 
2718 	case SIOCDELMULTI:
2719 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2720 
2721 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2722 			if (qlnx_set_multi(ha, 0))
2723 				ret = EINVAL;
2724 		}
2725 		break;
2726 
2727 	case SIOCSIFMEDIA:
2728 	case SIOCGIFMEDIA:
2729 		QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2730 
2731 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2732 		break;
2733 
2734 	case SIOCSIFCAP:
2735 
2736 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2737 
2738 		QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2739 
2740 		if (mask & IFCAP_HWCSUM)
2741 			if_togglecapenable(ifp, IFCAP_HWCSUM);
2742 		if (mask & IFCAP_TSO4)
2743 			if_togglecapenable(ifp, IFCAP_TSO4);
2744 		if (mask & IFCAP_TSO6)
2745 			if_togglecapenable(ifp, IFCAP_TSO6);
2746 		if (mask & IFCAP_VLAN_HWTAGGING)
2747 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2748 		if (mask & IFCAP_VLAN_HWTSO)
2749 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2750 		if (mask & IFCAP_LRO)
2751 			if_togglecapenable(ifp, IFCAP_LRO);
2752 
2753 		QLNX_LOCK(ha);
2754 
2755 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2756 			qlnx_init_locked(ha);
2757 
2758 		QLNX_UNLOCK(ha);
2759 
2760 		VLAN_CAPABILITIES(ifp);
2761 		break;
2762 
2763 	case SIOCGI2C:
2764 	{
2765 		struct ifi2creq i2c;
2766 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2767 		struct ecore_ptt *p_ptt;
2768 
2769 		ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2770 
2771 		if (ret)
2772 			break;
2773 
2774 		if ((i2c.len > sizeof (i2c.data)) ||
2775 			(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2776 			ret = EINVAL;
2777 			break;
2778 		}
2779 
2780 		p_ptt = ecore_ptt_acquire(p_hwfn);
2781 
2782 		if (!p_ptt) {
2783 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2784 			ret = -1;
2785 			break;
2786 		}
2787 
2788 		ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2789 			(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2790 			i2c.len, &i2c.data[0]);
2791 
2792 		ecore_ptt_release(p_hwfn, p_ptt);
2793 
2794 		if (ret) {
2795 			ret = -1;
2796 			break;
2797 		}
2798 
2799 		ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2800 
2801 		QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2802 			 len = %d addr = 0x%02x offset = 0x%04x \
2803 			 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2804 			 0x%02x 0x%02x 0x%02x\n",
2805 			ret, i2c.len, i2c.dev_addr, i2c.offset,
2806 			i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2807 			i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2808 		break;
2809 	}
2810 
2811 	default:
2812 		QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2813 		ret = ether_ioctl(ifp, cmd, data);
2814 		break;
2815 	}
2816 
2817 	return (ret);
2818 }
2819 
2820 static int
2821 qlnx_media_change(if_t ifp)
2822 {
2823 	qlnx_host_t	*ha;
2824 	struct ifmedia	*ifm;
2825 	int		ret = 0;
2826 
2827 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2828 
2829 	QL_DPRINT2(ha, "enter\n");
2830 
2831 	ifm = &ha->media;
2832 
2833 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2834 		ret = EINVAL;
2835 
2836 	QL_DPRINT2(ha, "exit\n");
2837 
2838 	return (ret);
2839 }
2840 
2841 static void
2842 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
2843 {
2844 	qlnx_host_t		*ha;
2845 
2846 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2847 
2848 	QL_DPRINT2(ha, "enter\n");
2849 
2850 	ifmr->ifm_status = IFM_AVALID;
2851 	ifmr->ifm_active = IFM_ETHER;
2852 
2853 	if (ha->link_up) {
2854 		ifmr->ifm_status |= IFM_ACTIVE;
2855 		ifmr->ifm_active |=
2856 			(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2857 
2858 		if (ha->if_link.link_partner_caps &
2859 			(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2860 			ifmr->ifm_active |=
2861 				(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2862 	}
2863 
2864 	QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2865 
2866 	return;
2867 }
2868 
2869 static void
2870 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2871 	struct qlnx_tx_queue *txq)
2872 {
2873 	u16			idx;
2874 	struct mbuf		*mp;
2875 	bus_dmamap_t		map;
2876 	int			i;
2877 //	struct eth_tx_bd	*tx_data_bd;
2878 	struct eth_tx_1st_bd	*first_bd;
2879 	int			nbds = 0;
2880 
2881 	idx = txq->sw_tx_cons;
2882 	mp = txq->sw_tx_ring[idx].mp;
2883 	map = txq->sw_tx_ring[idx].map;
2884 
2885 	if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2886 		QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2887 
2888 		QL_DPRINT1(ha, "(mp == NULL) "
2889 			" tx_idx = 0x%x"
2890 			" ecore_prod_idx = 0x%x"
2891 			" ecore_cons_idx = 0x%x"
2892 			" hw_bd_cons = 0x%x"
2893 			" txq_db_last = 0x%x"
2894 			" elem_left = 0x%x\n",
2895 			fp->rss_id,
2896 			ecore_chain_get_prod_idx(&txq->tx_pbl),
2897 			ecore_chain_get_cons_idx(&txq->tx_pbl),
2898 			le16toh(*txq->hw_cons_ptr),
2899 			txq->tx_db.raw,
2900 			ecore_chain_get_elem_left(&txq->tx_pbl));
2901 
2902 		fp->err_tx_free_pkt_null++;
2903 
2904 		//DEBUG
2905 		qlnx_trigger_dump(ha);
2906 
2907 		return;
2908 	} else {
2909 		QLNX_INC_OPACKETS((ha->ifp));
2910 		QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2911 
2912 		bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2913 		bus_dmamap_unload(ha->tx_tag, map);
2914 
2915 		fp->tx_pkts_freed++;
2916 		fp->tx_pkts_completed++;
2917 
2918 		m_freem(mp);
2919 	}
2920 
2921 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2922 	nbds = first_bd->data.nbds;
2923 
2924 //	BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2925 
2926 	for (i = 1; i < nbds; i++) {
2927 		/* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2928 //		BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2929 	}
2930 	txq->sw_tx_ring[idx].flags = 0;
2931 	txq->sw_tx_ring[idx].mp = NULL;
2932 	txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2933 
2934 	return;
2935 }
2936 
2937 static void
2938 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2939 	struct qlnx_tx_queue *txq)
2940 {
2941 	u16 hw_bd_cons;
2942 	u16 ecore_cons_idx;
2943 	uint16_t diff;
2944 	uint16_t idx, idx2;
2945 
2946 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2947 
2948 	while (hw_bd_cons !=
2949 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2950 		if (hw_bd_cons < ecore_cons_idx) {
2951 			diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2952 		} else {
2953 			diff = hw_bd_cons - ecore_cons_idx;
2954 		}
2955 		if ((diff > TX_RING_SIZE) ||
2956 			QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2957 			QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2958 
2959 			QL_DPRINT1(ha, "(diff = 0x%x) "
2960 				" tx_idx = 0x%x"
2961 				" ecore_prod_idx = 0x%x"
2962 				" ecore_cons_idx = 0x%x"
2963 				" hw_bd_cons = 0x%x"
2964 				" txq_db_last = 0x%x"
2965 				" elem_left = 0x%x\n",
2966 				diff,
2967 				fp->rss_id,
2968 				ecore_chain_get_prod_idx(&txq->tx_pbl),
2969 				ecore_chain_get_cons_idx(&txq->tx_pbl),
2970 				le16toh(*txq->hw_cons_ptr),
2971 				txq->tx_db.raw,
2972 				ecore_chain_get_elem_left(&txq->tx_pbl));
2973 
2974 			fp->err_tx_cons_idx_conflict++;
2975 
2976 			//DEBUG
2977 			qlnx_trigger_dump(ha);
2978 		}
2979 
2980 		idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2981 		idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2982 		prefetch(txq->sw_tx_ring[idx].mp);
2983 		prefetch(txq->sw_tx_ring[idx2].mp);
2984 
2985 		qlnx_free_tx_pkt(ha, fp, txq);
2986 
2987 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2988 	}
2989 	return;
2990 }
2991 
2992 static int
2993 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
2994 {
2995         int                     ret = 0;
2996         struct qlnx_tx_queue    *txq;
2997         qlnx_host_t *           ha;
2998         uint16_t elem_left;
2999 
3000         txq = fp->txq[0];
3001         ha = (qlnx_host_t *)fp->edev;
3002 
3003         if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3004                 if(mp != NULL)
3005                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3006                 return (ret);
3007         }
3008 
3009         if(mp != NULL)
3010                 ret  = drbr_enqueue(ifp, fp->tx_br, mp);
3011 
3012         mp = drbr_peek(ifp, fp->tx_br);
3013 
3014         while (mp != NULL) {
3015                 if (qlnx_send(ha, fp, &mp)) {
3016                         if (mp != NULL) {
3017                                 drbr_putback(ifp, fp->tx_br, mp);
3018                         } else {
3019                                 fp->tx_pkts_processed++;
3020                                 drbr_advance(ifp, fp->tx_br);
3021                         }
3022                         goto qlnx_transmit_locked_exit;
3023 
3024                 } else {
3025                         drbr_advance(ifp, fp->tx_br);
3026                         fp->tx_pkts_transmitted++;
3027                         fp->tx_pkts_processed++;
3028                 }
3029 
3030                 mp = drbr_peek(ifp, fp->tx_br);
3031         }
3032 
3033 qlnx_transmit_locked_exit:
3034         if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3035                 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3036                                         < QLNX_TX_ELEM_MAX_THRESH))
3037                 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3038 
3039         QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3040         return ret;
3041 }
3042 
3043 static int
3044 qlnx_transmit(if_t ifp, struct mbuf  *mp)
3045 {
3046         qlnx_host_t		*ha = (qlnx_host_t *)if_getsoftc(ifp);
3047         struct qlnx_fastpath	*fp;
3048         int			rss_id = 0, ret = 0;
3049 
3050 #ifdef QLNX_TRACEPERF_DATA
3051         uint64_t tx_pkts = 0, tx_compl = 0;
3052 #endif
3053 
3054         QL_DPRINT2(ha, "enter\n");
3055 
3056         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3057                 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3058 					ha->num_rss;
3059 
3060         fp = &ha->fp_array[rss_id];
3061 
3062         if (fp->tx_br == NULL) {
3063                 ret = EINVAL;
3064                 goto qlnx_transmit_exit;
3065         }
3066 
3067         if (mtx_trylock(&fp->tx_mtx)) {
3068 #ifdef QLNX_TRACEPERF_DATA
3069                         tx_pkts = fp->tx_pkts_transmitted;
3070                         tx_compl = fp->tx_pkts_completed;
3071 #endif
3072 
3073                         ret = qlnx_transmit_locked(ifp, fp, mp);
3074 
3075 #ifdef QLNX_TRACEPERF_DATA
3076                         fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3077                         fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3078 #endif
3079                         mtx_unlock(&fp->tx_mtx);
3080         } else {
3081                 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3082                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3083                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3084                 }
3085         }
3086 
3087 qlnx_transmit_exit:
3088 
3089         QL_DPRINT2(ha, "exit ret = %d\n", ret);
3090         return ret;
3091 }
3092 
3093 static void
3094 qlnx_qflush(if_t ifp)
3095 {
3096 	int			rss_id;
3097 	struct qlnx_fastpath	*fp;
3098 	struct mbuf		*mp;
3099 	qlnx_host_t		*ha;
3100 
3101 	ha = (qlnx_host_t *)if_getsoftc(ifp);
3102 
3103 	QL_DPRINT2(ha, "enter\n");
3104 
3105 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3106 		fp = &ha->fp_array[rss_id];
3107 
3108 		if (fp == NULL)
3109 			continue;
3110 
3111 		if (fp->tx_br) {
3112 			mtx_lock(&fp->tx_mtx);
3113 
3114 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3115 				fp->tx_pkts_freed++;
3116 				m_freem(mp);
3117 			}
3118 			mtx_unlock(&fp->tx_mtx);
3119 		}
3120 	}
3121 	QL_DPRINT2(ha, "exit\n");
3122 
3123 	return;
3124 }
3125 
3126 static void
3127 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3128 {
3129 	uint32_t		offset;
3130 
3131 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3132 
3133 	bus_write_4(ha->pci_dbells, offset, value);
3134 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
3135 	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3136 
3137 	return;
3138 }
3139 
3140 static uint32_t
3141 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3142 {
3143         struct ether_vlan_header	*eh = NULL;
3144         struct ip			*ip = NULL;
3145         struct ip6_hdr			*ip6 = NULL;
3146         struct tcphdr			*th = NULL;
3147         uint32_t			ehdrlen = 0, ip_hlen = 0, offset = 0;
3148         uint16_t			etype = 0;
3149         uint8_t				buf[sizeof(struct ip6_hdr)];
3150 
3151         eh = mtod(mp, struct ether_vlan_header *);
3152 
3153         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3154                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3155                 etype = ntohs(eh->evl_proto);
3156         } else {
3157                 ehdrlen = ETHER_HDR_LEN;
3158                 etype = ntohs(eh->evl_encap_proto);
3159         }
3160 
3161         switch (etype) {
3162                 case ETHERTYPE_IP:
3163                         ip = (struct ip *)(mp->m_data + ehdrlen);
3164 
3165                         ip_hlen = sizeof (struct ip);
3166 
3167                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3168                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3169                                 ip = (struct ip *)buf;
3170                         }
3171 
3172                         th = (struct tcphdr *)(ip + 1);
3173 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3174                 break;
3175 
3176                 case ETHERTYPE_IPV6:
3177                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3178 
3179                         ip_hlen = sizeof(struct ip6_hdr);
3180 
3181                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3182                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3183                                         buf);
3184                                 ip6 = (struct ip6_hdr *)buf;
3185                         }
3186                         th = (struct tcphdr *)(ip6 + 1);
3187 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3188                 break;
3189 
3190                 default:
3191                 break;
3192         }
3193 
3194         return (offset);
3195 }
3196 
3197 static __inline int
3198 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3199 	uint32_t offset)
3200 {
3201 	int			i;
3202 	uint32_t		sum, nbds_in_hdr = 1;
3203         uint32_t		window;
3204         bus_dma_segment_t	*s_seg;
3205 
3206         /* If the header spans multiple segments, skip those segments */
3207 
3208         if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3209                 return (0);
3210 
3211         i = 0;
3212 
3213         while ((i < nsegs) && (offset >= segs->ds_len)) {
3214                 offset = offset - segs->ds_len;
3215                 segs++;
3216                 i++;
3217                 nbds_in_hdr++;
3218         }
3219 
3220         window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3221 
3222         nsegs = nsegs - i;
3223 
3224         while (nsegs >= window) {
3225                 sum = 0;
3226                 s_seg = segs;
3227 
3228                 for (i = 0; i < window; i++){
3229                         sum += s_seg->ds_len;
3230                         s_seg++;
3231                 }
3232 
3233                 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3234                         fp->tx_lso_wnd_min_len++;
3235                         return (-1);
3236                 }
3237 
3238                 nsegs = nsegs - 1;
3239                 segs++;
3240         }
3241 
3242 	return (0);
3243 }
3244 
3245 static int
3246 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3247 {
3248 	bus_dma_segment_t	*segs;
3249 	bus_dmamap_t		map = 0;
3250 	uint32_t		nsegs = 0;
3251 	int			ret = -1;
3252 	struct mbuf		*m_head = *m_headp;
3253 	uint16_t		idx = 0;
3254 	uint16_t		elem_left;
3255 
3256 	uint8_t			nbd = 0;
3257 	struct qlnx_tx_queue    *txq;
3258 
3259 	struct eth_tx_1st_bd    *first_bd;
3260 	struct eth_tx_2nd_bd    *second_bd;
3261 	struct eth_tx_3rd_bd    *third_bd;
3262 	struct eth_tx_bd        *tx_data_bd;
3263 
3264 	int			seg_idx = 0;
3265 	uint32_t		nbds_in_hdr = 0;
3266 	uint32_t		offset = 0;
3267 
3268 #ifdef QLNX_TRACE_PERF_DATA
3269         uint16_t                bd_used;
3270 #endif
3271 
3272 	QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3273 
3274 	if (!ha->link_up)
3275 		return (-1);
3276 
3277 	first_bd	= NULL;
3278 	second_bd	= NULL;
3279 	third_bd	= NULL;
3280 	tx_data_bd	= NULL;
3281 
3282 	txq = fp->txq[0];
3283 
3284         if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3285 		QLNX_TX_ELEM_MIN_THRESH) {
3286                 fp->tx_nsegs_gt_elem_left++;
3287                 fp->err_tx_nsegs_gt_elem_left++;
3288 
3289                 return (ENOBUFS);
3290         }
3291 
3292 	idx = txq->sw_tx_prod;
3293 
3294 	map = txq->sw_tx_ring[idx].map;
3295 	segs = txq->segs;
3296 
3297 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3298 			BUS_DMA_NOWAIT);
3299 
3300 	if (ha->dbg_trace_tso_pkt_len) {
3301 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3302 			if (!fp->tx_tso_min_pkt_len) {
3303 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3304 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3305 			} else {
3306 				if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3307 					fp->tx_tso_min_pkt_len =
3308 						m_head->m_pkthdr.len;
3309 				if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3310 					fp->tx_tso_max_pkt_len =
3311 						m_head->m_pkthdr.len;
3312 			}
3313 		}
3314 	}
3315 
3316 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3317 		offset = qlnx_tcp_offset(ha, m_head);
3318 
3319 	if ((ret == EFBIG) ||
3320 		((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3321 			(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3322 		((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3323 			qlnx_tso_check(fp, segs, nsegs, offset))))) {
3324 		struct mbuf *m;
3325 
3326 		QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3327 
3328 		fp->tx_defrag++;
3329 
3330 		m = m_defrag(m_head, M_NOWAIT);
3331 		if (m == NULL) {
3332 			fp->err_tx_defrag++;
3333 			fp->tx_pkts_freed++;
3334 			m_freem(m_head);
3335 			*m_headp = NULL;
3336 			QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3337 			return (ENOBUFS);
3338 		}
3339 
3340 		m_head = m;
3341 		*m_headp = m_head;
3342 
3343 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3344 				segs, &nsegs, BUS_DMA_NOWAIT))) {
3345 			fp->err_tx_defrag_dmamap_load++;
3346 
3347 			QL_DPRINT1(ha,
3348 				"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3349 				ret, m_head->m_pkthdr.len);
3350 
3351 			fp->tx_pkts_freed++;
3352 			m_freem(m_head);
3353 			*m_headp = NULL;
3354 
3355 			return (ret);
3356 		}
3357 
3358 		if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3359 			!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3360 			fp->err_tx_non_tso_max_seg++;
3361 
3362 			QL_DPRINT1(ha,
3363 				"(%d) nsegs too many for non-TSO [%d, %d]\n",
3364 				ret, nsegs, m_head->m_pkthdr.len);
3365 
3366 			fp->tx_pkts_freed++;
3367 			m_freem(m_head);
3368 			*m_headp = NULL;
3369 
3370 			return (ret);
3371 		}
3372 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3373 			offset = qlnx_tcp_offset(ha, m_head);
3374 
3375 	} else if (ret) {
3376 		fp->err_tx_dmamap_load++;
3377 
3378 		QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3379 			   ret, m_head->m_pkthdr.len);
3380 		fp->tx_pkts_freed++;
3381 		m_freem(m_head);
3382 		*m_headp = NULL;
3383 		return (ret);
3384 	}
3385 
3386 	QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3387 
3388 	if (ha->dbg_trace_tso_pkt_len) {
3389 		if (nsegs < QLNX_FP_MAX_SEGS)
3390 			fp->tx_pkts[(nsegs - 1)]++;
3391 		else
3392 			fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3393 	}
3394 
3395 #ifdef QLNX_TRACE_PERF_DATA
3396         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3397                 if(m_head->m_pkthdr.len <= 2048)
3398                         fp->tx_pkts_hist[0]++;
3399                 else if((m_head->m_pkthdr.len > 2048) &&
3400 				(m_head->m_pkthdr.len <= 4096))
3401                         fp->tx_pkts_hist[1]++;
3402                 else if((m_head->m_pkthdr.len > 4096) &&
3403 				(m_head->m_pkthdr.len <= 8192))
3404                         fp->tx_pkts_hist[2]++;
3405                 else if((m_head->m_pkthdr.len > 8192) &&
3406 				(m_head->m_pkthdr.len <= 12288 ))
3407                         fp->tx_pkts_hist[3]++;
3408                 else if((m_head->m_pkthdr.len > 11288) &&
3409 				(m_head->m_pkthdr.len <= 16394))
3410                         fp->tx_pkts_hist[4]++;
3411                 else if((m_head->m_pkthdr.len > 16384) &&
3412 				(m_head->m_pkthdr.len <= 20480))
3413                         fp->tx_pkts_hist[5]++;
3414                 else if((m_head->m_pkthdr.len > 20480) &&
3415 				(m_head->m_pkthdr.len <= 24576))
3416                         fp->tx_pkts_hist[6]++;
3417                 else if((m_head->m_pkthdr.len > 24576) &&
3418 				(m_head->m_pkthdr.len <= 28672))
3419                         fp->tx_pkts_hist[7]++;
3420                 else if((m_head->m_pkthdr.len > 28762) &&
3421 				(m_head->m_pkthdr.len <= 32768))
3422                         fp->tx_pkts_hist[8]++;
3423                 else if((m_head->m_pkthdr.len > 32768) &&
3424 				(m_head->m_pkthdr.len <= 36864))
3425                         fp->tx_pkts_hist[9]++;
3426                 else if((m_head->m_pkthdr.len > 36864) &&
3427 				(m_head->m_pkthdr.len <= 40960))
3428                         fp->tx_pkts_hist[10]++;
3429                 else if((m_head->m_pkthdr.len > 40960) &&
3430 				(m_head->m_pkthdr.len <= 45056))
3431                         fp->tx_pkts_hist[11]++;
3432                 else if((m_head->m_pkthdr.len > 45056) &&
3433 				(m_head->m_pkthdr.len <= 49152))
3434                         fp->tx_pkts_hist[12]++;
3435                 else if((m_head->m_pkthdr.len > 49512) &&
3436 				m_head->m_pkthdr.len <= 53248))
3437                         fp->tx_pkts_hist[13]++;
3438                 else if((m_head->m_pkthdr.len > 53248) &&
3439 				(m_head->m_pkthdr.len <= 57344))
3440                         fp->tx_pkts_hist[14]++;
3441                 else if((m_head->m_pkthdr.len > 53248) &&
3442 				(m_head->m_pkthdr.len <= 57344))
3443                         fp->tx_pkts_hist[15]++;
3444                 else if((m_head->m_pkthdr.len > 57344) &&
3445 				(m_head->m_pkthdr.len <= 61440))
3446                         fp->tx_pkts_hist[16]++;
3447                 else
3448                         fp->tx_pkts_hist[17]++;
3449         }
3450 
3451         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3452                 elem_left =  ecore_chain_get_elem_left(&txq->tx_pbl);
3453                 bd_used = TX_RING_SIZE - elem_left;
3454 
3455                 if(bd_used <= 100)
3456                         fp->tx_pkts_q[0]++;
3457                 else if((bd_used > 100) && (bd_used <= 500))
3458                         fp->tx_pkts_q[1]++;
3459                 else if((bd_used > 500) && (bd_used <= 1000))
3460                         fp->tx_pkts_q[2]++;
3461                 else if((bd_used > 1000) && (bd_used <= 2000))
3462                         fp->tx_pkts_q[3]++;
3463                 else if((bd_used > 3000) && (bd_used <= 4000))
3464                         fp->tx_pkts_q[4]++;
3465                 else if((bd_used > 4000) && (bd_used <= 5000))
3466                         fp->tx_pkts_q[5]++;
3467                 else if((bd_used > 6000) && (bd_used <= 7000))
3468                         fp->tx_pkts_q[6]++;
3469                 else if((bd_used > 7000) && (bd_used <= 8000))
3470                         fp->tx_pkts_q[7]++;
3471                 else if((bd_used > 8000) && (bd_used <= 9000))
3472                         fp->tx_pkts_q[8]++;
3473                 else if((bd_used > 9000) && (bd_used <= 10000))
3474                         fp->tx_pkts_q[9]++;
3475                 else if((bd_used > 10000) && (bd_used <= 11000))
3476                         fp->tx_pkts_q[10]++;
3477                 else if((bd_used > 11000) && (bd_used <= 12000))
3478                         fp->tx_pkts_q[11]++;
3479                 else if((bd_used > 12000) && (bd_used <= 13000))
3480                         fp->tx_pkts_q[12]++;
3481                 else if((bd_used > 13000) && (bd_used <= 14000))
3482                         fp->tx_pkts_q[13]++;
3483                 else if((bd_used > 14000) && (bd_used <= 15000))
3484                         fp->tx_pkts_q[14]++;
3485                else if((bd_used > 15000) && (bd_used <= 16000))
3486                         fp->tx_pkts_q[15]++;
3487                 else
3488                         fp->tx_pkts_q[16]++;
3489         }
3490 
3491 #endif /* end of QLNX_TRACE_PERF_DATA */
3492 
3493 	if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3494 		(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3495 		QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3496 			" in chain[%d] trying to free packets\n",
3497 			nsegs, elem_left, fp->rss_id);
3498 
3499 		fp->tx_nsegs_gt_elem_left++;
3500 
3501 		(void)qlnx_tx_int(ha, fp, txq);
3502 
3503 		if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3504 			ecore_chain_get_elem_left(&txq->tx_pbl))) {
3505 			QL_DPRINT1(ha,
3506 				"(%d, 0x%x) insuffient BDs in chain[%d]\n",
3507 				nsegs, elem_left, fp->rss_id);
3508 
3509 			fp->err_tx_nsegs_gt_elem_left++;
3510 			fp->tx_ring_full = 1;
3511 			if (ha->storm_stats_enable)
3512 				ha->storm_stats_gather = 1;
3513 			return (ENOBUFS);
3514 		}
3515 	}
3516 
3517 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3518 
3519 	txq->sw_tx_ring[idx].mp = m_head;
3520 
3521 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3522 
3523 	memset(first_bd, 0, sizeof(*first_bd));
3524 
3525 	first_bd->data.bd_flags.bitfields =
3526 		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3527 
3528 	BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3529 
3530 	nbd++;
3531 
3532 	if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3533 		first_bd->data.bd_flags.bitfields |=
3534 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3535 	}
3536 
3537 	if (m_head->m_pkthdr.csum_flags &
3538 		(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3539 		first_bd->data.bd_flags.bitfields |=
3540 			(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3541 	}
3542 
3543         if (m_head->m_flags & M_VLANTAG) {
3544                 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3545 		first_bd->data.bd_flags.bitfields |=
3546 			(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3547         }
3548 
3549 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3550                 first_bd->data.bd_flags.bitfields |=
3551 			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3552 		first_bd->data.bd_flags.bitfields |=
3553 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3554 
3555 		nbds_in_hdr = 1;
3556 
3557 		if (offset == segs->ds_len) {
3558 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3559 			segs++;
3560 			seg_idx++;
3561 
3562 			second_bd = (struct eth_tx_2nd_bd *)
3563 					ecore_chain_produce(&txq->tx_pbl);
3564 			memset(second_bd, 0, sizeof(*second_bd));
3565 			nbd++;
3566 
3567 			if (seg_idx < nsegs) {
3568 				BD_SET_UNMAP_ADDR_LEN(second_bd, \
3569 					(segs->ds_addr), (segs->ds_len));
3570 				segs++;
3571 				seg_idx++;
3572 			}
3573 
3574 			third_bd = (struct eth_tx_3rd_bd *)
3575 					ecore_chain_produce(&txq->tx_pbl);
3576 			memset(third_bd, 0, sizeof(*third_bd));
3577 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3578 			third_bd->data.bitfields |=
3579 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3580 			nbd++;
3581 
3582 			if (seg_idx < nsegs) {
3583 				BD_SET_UNMAP_ADDR_LEN(third_bd, \
3584 					(segs->ds_addr), (segs->ds_len));
3585 				segs++;
3586 				seg_idx++;
3587 			}
3588 
3589 			for (; seg_idx < nsegs; seg_idx++) {
3590 				tx_data_bd = (struct eth_tx_bd *)
3591 					ecore_chain_produce(&txq->tx_pbl);
3592 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3593 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3594 					segs->ds_addr,\
3595 					segs->ds_len);
3596 				segs++;
3597 				nbd++;
3598 			}
3599 
3600 		} else if (offset < segs->ds_len) {
3601 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3602 
3603 			second_bd = (struct eth_tx_2nd_bd *)
3604 					ecore_chain_produce(&txq->tx_pbl);
3605 			memset(second_bd, 0, sizeof(*second_bd));
3606 			BD_SET_UNMAP_ADDR_LEN(second_bd, \
3607 				(segs->ds_addr + offset),\
3608 				(segs->ds_len - offset));
3609 			nbd++;
3610 			segs++;
3611 
3612 			third_bd = (struct eth_tx_3rd_bd *)
3613 					ecore_chain_produce(&txq->tx_pbl);
3614 			memset(third_bd, 0, sizeof(*third_bd));
3615 
3616 			BD_SET_UNMAP_ADDR_LEN(third_bd, \
3617 					segs->ds_addr,\
3618 					segs->ds_len);
3619 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3620 			third_bd->data.bitfields |=
3621 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3622 			segs++;
3623 			nbd++;
3624 
3625 			for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3626 				tx_data_bd = (struct eth_tx_bd *)
3627 					ecore_chain_produce(&txq->tx_pbl);
3628 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3629 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3630 					segs->ds_addr,\
3631 					segs->ds_len);
3632 				segs++;
3633 				nbd++;
3634 			}
3635 
3636 		} else {
3637 			offset = offset - segs->ds_len;
3638 			segs++;
3639 
3640 			for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3641 				if (offset)
3642 					nbds_in_hdr++;
3643 
3644 				tx_data_bd = (struct eth_tx_bd *)
3645 					ecore_chain_produce(&txq->tx_pbl);
3646 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3647 
3648 				if (second_bd == NULL) {
3649 					second_bd = (struct eth_tx_2nd_bd *)
3650 								tx_data_bd;
3651 				} else if (third_bd == NULL) {
3652 					third_bd = (struct eth_tx_3rd_bd *)
3653 								tx_data_bd;
3654 				}
3655 
3656 				if (offset && (offset < segs->ds_len)) {
3657 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3658 						segs->ds_addr, offset);
3659 
3660 					tx_data_bd = (struct eth_tx_bd *)
3661 					ecore_chain_produce(&txq->tx_pbl);
3662 
3663 					memset(tx_data_bd, 0,
3664 						sizeof(*tx_data_bd));
3665 
3666 					if (second_bd == NULL) {
3667 						second_bd =
3668 					(struct eth_tx_2nd_bd *)tx_data_bd;
3669 					} else if (third_bd == NULL) {
3670 						third_bd =
3671 					(struct eth_tx_3rd_bd *)tx_data_bd;
3672 					}
3673 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3674 						(segs->ds_addr + offset), \
3675 						(segs->ds_len - offset));
3676 					nbd++;
3677 					offset = 0;
3678 				} else {
3679 					if (offset)
3680 						offset = offset - segs->ds_len;
3681 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3682 						segs->ds_addr, segs->ds_len);
3683 				}
3684 				segs++;
3685 				nbd++;
3686 			}
3687 
3688 			if (third_bd == NULL) {
3689 				third_bd = (struct eth_tx_3rd_bd *)
3690 					ecore_chain_produce(&txq->tx_pbl);
3691 				memset(third_bd, 0, sizeof(*third_bd));
3692 			}
3693 
3694 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3695 			third_bd->data.bitfields |=
3696 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3697 		}
3698 		fp->tx_tso_pkts++;
3699 	} else {
3700 		segs++;
3701 		for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3702 			tx_data_bd = (struct eth_tx_bd *)
3703 					ecore_chain_produce(&txq->tx_pbl);
3704 			memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3705 			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3706 				segs->ds_len);
3707 			segs++;
3708 			nbd++;
3709 		}
3710 		first_bd->data.bitfields =
3711 			(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3712 				 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3713 		first_bd->data.bitfields =
3714 			htole16(first_bd->data.bitfields);
3715 		fp->tx_non_tso_pkts++;
3716 	}
3717 
3718 	first_bd->data.nbds = nbd;
3719 
3720 	if (ha->dbg_trace_tso_pkt_len) {
3721 		if (fp->tx_tso_max_nsegs < nsegs)
3722 			fp->tx_tso_max_nsegs = nsegs;
3723 
3724 		if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3725 			fp->tx_tso_min_nsegs = nsegs;
3726 	}
3727 
3728 	txq->sw_tx_ring[idx].nsegs = nsegs;
3729 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3730 
3731 	txq->tx_db.data.bd_prod =
3732 		htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3733 
3734 	qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3735 
3736 	QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3737 	return (0);
3738 }
3739 
3740 static void
3741 qlnx_stop(qlnx_host_t *ha)
3742 {
3743 	if_t		ifp = ha->ifp;
3744 	int		i;
3745 
3746 	if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
3747 
3748 	/*
3749 	 * We simply lock and unlock each fp->tx_mtx to
3750 	 * propagate the if_drv_flags
3751 	 * state to each tx thread
3752 	 */
3753         QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3754 
3755 	if (ha->state == QLNX_STATE_OPEN) {
3756         	for (i = 0; i < ha->num_rss; i++) {
3757 			struct qlnx_fastpath *fp = &ha->fp_array[i];
3758 
3759 			mtx_lock(&fp->tx_mtx);
3760 			mtx_unlock(&fp->tx_mtx);
3761 
3762 			if (fp->fp_taskqueue != NULL)
3763 				taskqueue_enqueue(fp->fp_taskqueue,
3764 					&fp->fp_task);
3765 		}
3766 	}
3767 #ifdef QLNX_ENABLE_IWARP
3768 	if (qlnx_vf_device(ha) != 0) {
3769 		qlnx_rdma_dev_close(ha);
3770 	}
3771 #endif /* #ifdef QLNX_ENABLE_IWARP */
3772 
3773 	qlnx_unload(ha);
3774 
3775 	return;
3776 }
3777 
3778 static int
3779 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3780 {
3781         return(TX_RING_SIZE - 1);
3782 }
3783 
3784 uint8_t *
3785 qlnx_get_mac_addr(qlnx_host_t *ha)
3786 {
3787 	struct ecore_hwfn	*p_hwfn;
3788 	unsigned char mac[ETHER_ADDR_LEN];
3789 	uint8_t			p_is_forced;
3790 
3791 	p_hwfn = &ha->cdev.hwfns[0];
3792 
3793 	if (qlnx_vf_device(ha) != 0)
3794 		return (p_hwfn->hw_info.hw_mac_addr);
3795 
3796 	ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3797 	if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3798 		true) {
3799 		device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3800 			" mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3801 			p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3802         	memcpy(ha->primary_mac, mac, ETH_ALEN);
3803 	}
3804 
3805 	return (ha->primary_mac);
3806 }
3807 
3808 static uint32_t
3809 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3810 {
3811 	uint32_t	ifm_type = 0;
3812 
3813 	switch (if_link->media_type) {
3814 	case MEDIA_MODULE_FIBER:
3815 	case MEDIA_UNSPECIFIED:
3816 		if (if_link->speed == (100 * 1000))
3817 			ifm_type = QLNX_IFM_100G_SR4;
3818 		else if (if_link->speed == (40 * 1000))
3819 			ifm_type = IFM_40G_SR4;
3820 		else if (if_link->speed == (25 * 1000))
3821 			ifm_type = QLNX_IFM_25G_SR;
3822 		else if (if_link->speed == (10 * 1000))
3823 			ifm_type = (IFM_10G_LR | IFM_10G_SR);
3824 		else if (if_link->speed == (1 * 1000))
3825 			ifm_type = (IFM_1000_SX | IFM_1000_LX);
3826 
3827 		break;
3828 
3829 	case MEDIA_DA_TWINAX:
3830 		if (if_link->speed == (100 * 1000))
3831 			ifm_type = QLNX_IFM_100G_CR4;
3832 		else if (if_link->speed == (40 * 1000))
3833 			ifm_type = IFM_40G_CR4;
3834 		else if (if_link->speed == (25 * 1000))
3835 			ifm_type = QLNX_IFM_25G_CR;
3836 		else if (if_link->speed == (10 * 1000))
3837 			ifm_type = IFM_10G_TWINAX;
3838 
3839 		break;
3840 
3841 	default :
3842 		ifm_type = IFM_UNKNOWN;
3843 		break;
3844 	}
3845 	return (ifm_type);
3846 }
3847 
3848 /*****************************************************************************
3849  * Interrupt Service Functions
3850  *****************************************************************************/
3851 
3852 static int
3853 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3854 	struct mbuf *mp_head, uint16_t len)
3855 {
3856 	struct mbuf		*mp, *mpf, *mpl;
3857 	struct sw_rx_data	*sw_rx_data;
3858 	struct qlnx_rx_queue	*rxq;
3859 	uint16_t 		len_in_buffer;
3860 
3861 	rxq = fp->rxq;
3862 	mpf = mpl = mp = NULL;
3863 
3864 	while (len) {
3865         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3866 
3867                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3868                 mp = sw_rx_data->data;
3869 
3870 		if (mp == NULL) {
3871                 	QL_DPRINT1(ha, "mp = NULL\n");
3872 			fp->err_rx_mp_null++;
3873         		rxq->sw_rx_cons  =
3874 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3875 
3876 			if (mpf != NULL)
3877 				m_freem(mpf);
3878 
3879 			return (-1);
3880 		}
3881 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3882 			BUS_DMASYNC_POSTREAD);
3883 
3884                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3885                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3886 				" incoming packet and reusing its buffer\n");
3887 
3888                         qlnx_reuse_rx_data(rxq);
3889                         fp->err_rx_alloc_errors++;
3890 
3891 			if (mpf != NULL)
3892 				m_freem(mpf);
3893 
3894 			return (-1);
3895 		}
3896                 ecore_chain_consume(&rxq->rx_bd_ring);
3897 
3898 		if (len > rxq->rx_buf_size)
3899 			len_in_buffer = rxq->rx_buf_size;
3900 		else
3901 			len_in_buffer = len;
3902 
3903 		len = len - len_in_buffer;
3904 
3905 		mp->m_flags &= ~M_PKTHDR;
3906 		mp->m_next = NULL;
3907 		mp->m_len = len_in_buffer;
3908 
3909 		if (mpf == NULL)
3910 			mpf = mpl = mp;
3911 		else {
3912 			mpl->m_next = mp;
3913 			mpl = mp;
3914 		}
3915 	}
3916 
3917 	if (mpf != NULL)
3918 		mp_head->m_next = mpf;
3919 
3920 	return (0);
3921 }
3922 
3923 static void
3924 qlnx_tpa_start(qlnx_host_t *ha,
3925 	struct qlnx_fastpath *fp,
3926 	struct qlnx_rx_queue *rxq,
3927 	struct eth_fast_path_rx_tpa_start_cqe *cqe)
3928 {
3929 	uint32_t		agg_index;
3930         if_t ifp = ha->ifp;
3931 	struct mbuf		*mp;
3932 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3933 	struct sw_rx_data	*sw_rx_data;
3934 	dma_addr_t		addr;
3935 	bus_dmamap_t		map;
3936 	struct eth_rx_bd	*rx_bd;
3937 	int			i;
3938 	uint8_t			hash_type;
3939 
3940 	agg_index = cqe->tpa_agg_index;
3941 
3942         QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3943                 \t type = 0x%x\n \
3944                 \t bitfields = 0x%x\n \
3945                 \t seg_len = 0x%x\n \
3946                 \t pars_flags = 0x%x\n \
3947                 \t vlan_tag = 0x%x\n \
3948                 \t rss_hash = 0x%x\n \
3949                 \t len_on_first_bd = 0x%x\n \
3950                 \t placement_offset = 0x%x\n \
3951                 \t tpa_agg_index = 0x%x\n \
3952                 \t header_len = 0x%x\n \
3953                 \t ext_bd_len_list[0] = 0x%x\n \
3954                 \t ext_bd_len_list[1] = 0x%x\n \
3955                 \t ext_bd_len_list[2] = 0x%x\n \
3956                 \t ext_bd_len_list[3] = 0x%x\n \
3957                 \t ext_bd_len_list[4] = 0x%x\n",
3958                 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3959                 cqe->pars_flags.flags, cqe->vlan_tag,
3960                 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3961                 cqe->tpa_agg_index, cqe->header_len,
3962                 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3963                 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3964                 cqe->ext_bd_len_list[4]);
3965 
3966 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3967 		fp->err_rx_tpa_invalid_agg_num++;
3968 		return;
3969 	}
3970 
3971 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3972 	bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3973 	mp = sw_rx_data->data;
3974 
3975 	QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3976 
3977 	if (mp == NULL) {
3978                	QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3979 		fp->err_rx_mp_null++;
3980        		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3981 
3982 		return;
3983 	}
3984 
3985 	if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3986 		QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3987 			" flags = %x, dropping incoming packet\n", fp->rss_id,
3988 			rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3989 
3990 		fp->err_rx_hw_errors++;
3991 
3992 		qlnx_reuse_rx_data(rxq);
3993 
3994 		QLNX_INC_IERRORS(ifp);
3995 
3996 		return;
3997 	}
3998 
3999 	if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4000 		QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4001 			" dropping incoming packet and reusing its buffer\n",
4002 			fp->rss_id);
4003 
4004 		fp->err_rx_alloc_errors++;
4005 		QLNX_INC_IQDROPS(ifp);
4006 
4007 		/*
4008 		 * Load the tpa mbuf into the rx ring and save the
4009 		 * posted mbuf
4010 		 */
4011 
4012 		map = sw_rx_data->map;
4013 		addr = sw_rx_data->dma_addr;
4014 
4015 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4016 
4017 		sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4018 		sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4019 		sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4020 
4021 		rxq->tpa_info[agg_index].rx_buf.data = mp;
4022 		rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4023 		rxq->tpa_info[agg_index].rx_buf.map = map;
4024 
4025 		rx_bd = (struct eth_rx_bd *)
4026 				ecore_chain_produce(&rxq->rx_bd_ring);
4027 
4028 		rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4029 		rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4030 
4031 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4032 			BUS_DMASYNC_PREREAD);
4033 
4034 		rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4035 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4036 
4037 		ecore_chain_consume(&rxq->rx_bd_ring);
4038 
4039 		/* Now reuse any buffers posted in ext_bd_len_list */
4040 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4041 			if (cqe->ext_bd_len_list[i] == 0)
4042 				break;
4043 
4044 			qlnx_reuse_rx_data(rxq);
4045 		}
4046 
4047 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4048 		return;
4049 	}
4050 
4051 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4052 		QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4053 			" dropping incoming packet and reusing its buffer\n",
4054 			fp->rss_id);
4055 
4056 		QLNX_INC_IQDROPS(ifp);
4057 
4058 		/* if we already have mbuf head in aggregation free it */
4059 		if (rxq->tpa_info[agg_index].mpf) {
4060 			m_freem(rxq->tpa_info[agg_index].mpf);
4061 			rxq->tpa_info[agg_index].mpl = NULL;
4062 		}
4063 		rxq->tpa_info[agg_index].mpf = mp;
4064 		rxq->tpa_info[agg_index].mpl = NULL;
4065 
4066 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4067 		ecore_chain_consume(&rxq->rx_bd_ring);
4068 
4069 		/* Now reuse any buffers posted in ext_bd_len_list */
4070 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4071 			if (cqe->ext_bd_len_list[i] == 0)
4072 				break;
4073 
4074 			qlnx_reuse_rx_data(rxq);
4075 		}
4076 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4077 
4078 		return;
4079 	}
4080 
4081 	/*
4082 	 * first process the ext_bd_len_list
4083 	 * if this fails then we simply drop the packet
4084 	 */
4085 	ecore_chain_consume(&rxq->rx_bd_ring);
4086 	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4087 
4088 	for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4089 		QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4090 
4091 		if (cqe->ext_bd_len_list[i] == 0)
4092 			break;
4093 
4094 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4095 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4096 			BUS_DMASYNC_POSTREAD);
4097 
4098 		mpc = sw_rx_data->data;
4099 
4100 		if (mpc == NULL) {
4101 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4102 			fp->err_rx_mp_null++;
4103 			if (mpf != NULL)
4104 				m_freem(mpf);
4105 			mpf = mpl = NULL;
4106 			rxq->tpa_info[agg_index].agg_state =
4107 						QLNX_AGG_STATE_ERROR;
4108 			ecore_chain_consume(&rxq->rx_bd_ring);
4109 			rxq->sw_rx_cons =
4110 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4111 			continue;
4112 		}
4113 
4114 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4115 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4116 				" dropping incoming packet and reusing its"
4117 				" buffer\n", fp->rss_id);
4118 
4119 			qlnx_reuse_rx_data(rxq);
4120 
4121 			if (mpf != NULL)
4122 				m_freem(mpf);
4123 			mpf = mpl = NULL;
4124 
4125 			rxq->tpa_info[agg_index].agg_state =
4126 						QLNX_AGG_STATE_ERROR;
4127 
4128 			ecore_chain_consume(&rxq->rx_bd_ring);
4129 			rxq->sw_rx_cons =
4130 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4131 
4132 			continue;
4133 		}
4134 
4135 		mpc->m_flags &= ~M_PKTHDR;
4136 		mpc->m_next = NULL;
4137 		mpc->m_len = cqe->ext_bd_len_list[i];
4138 
4139 		if (mpf == NULL) {
4140 			mpf = mpl = mpc;
4141 		} else {
4142 			mpl->m_len = ha->rx_buf_size;
4143 			mpl->m_next = mpc;
4144 			mpl = mpc;
4145 		}
4146 
4147 		ecore_chain_consume(&rxq->rx_bd_ring);
4148 		rxq->sw_rx_cons =
4149 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4150 	}
4151 
4152 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4153 		QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4154 			" incoming packet and reusing its buffer\n",
4155 			fp->rss_id);
4156 
4157 		QLNX_INC_IQDROPS(ifp);
4158 
4159 		rxq->tpa_info[agg_index].mpf = mp;
4160 		rxq->tpa_info[agg_index].mpl = NULL;
4161 
4162 		return;
4163 	}
4164 
4165         rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4166 
4167         if (mpf != NULL) {
4168                 mp->m_len = ha->rx_buf_size;
4169                 mp->m_next = mpf;
4170                 rxq->tpa_info[agg_index].mpf = mp;
4171                 rxq->tpa_info[agg_index].mpl = mpl;
4172         } else {
4173                 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4174                 rxq->tpa_info[agg_index].mpf = mp;
4175                 rxq->tpa_info[agg_index].mpl = mp;
4176                 mp->m_next = NULL;
4177         }
4178 
4179 	mp->m_flags |= M_PKTHDR;
4180 
4181 	/* assign packet to this interface interface */
4182 	mp->m_pkthdr.rcvif = ifp;
4183 
4184 	/* assume no hardware checksum has complated */
4185 	mp->m_pkthdr.csum_flags = 0;
4186 
4187 	//mp->m_pkthdr.flowid = fp->rss_id;
4188 	mp->m_pkthdr.flowid = cqe->rss_hash;
4189 
4190 	hash_type = cqe->bitfields &
4191 			(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4192 			ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4193 
4194 	switch (hash_type) {
4195 	case RSS_HASH_TYPE_IPV4:
4196 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4197 		break;
4198 
4199 	case RSS_HASH_TYPE_TCP_IPV4:
4200 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4201 		break;
4202 
4203 	case RSS_HASH_TYPE_IPV6:
4204 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4205 		break;
4206 
4207 	case RSS_HASH_TYPE_TCP_IPV6:
4208 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4209 		break;
4210 
4211 	default:
4212 		M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4213 		break;
4214 	}
4215 
4216 	mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4217 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4218 
4219 	mp->m_pkthdr.csum_data = 0xFFFF;
4220 
4221 	if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4222 		mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4223 		mp->m_flags |= M_VLANTAG;
4224 	}
4225 
4226 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4227 
4228         QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4229 		fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4230                 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4231 
4232 	return;
4233 }
4234 
4235 static void
4236 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4237 	struct qlnx_rx_queue *rxq,
4238 	struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4239 {
4240 	struct sw_rx_data	*sw_rx_data;
4241 	int			i;
4242 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4243 	struct mbuf		*mp;
4244 	uint32_t		agg_index;
4245 
4246         QL_DPRINT7(ha, "[%d]: enter\n \
4247                 \t type = 0x%x\n \
4248                 \t tpa_agg_index = 0x%x\n \
4249                 \t len_list[0] = 0x%x\n \
4250                 \t len_list[1] = 0x%x\n \
4251                 \t len_list[2] = 0x%x\n \
4252                 \t len_list[3] = 0x%x\n \
4253                 \t len_list[4] = 0x%x\n \
4254                 \t len_list[5] = 0x%x\n",
4255                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4256                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4257                 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4258 
4259 	agg_index = cqe->tpa_agg_index;
4260 
4261 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4262 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4263 		fp->err_rx_tpa_invalid_agg_num++;
4264 		return;
4265 	}
4266 
4267 	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4268 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4269 
4270 		if (cqe->len_list[i] == 0)
4271 			break;
4272 
4273 		if (rxq->tpa_info[agg_index].agg_state !=
4274 			QLNX_AGG_STATE_START) {
4275 			qlnx_reuse_rx_data(rxq);
4276 			continue;
4277 		}
4278 
4279 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4280 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4281 			BUS_DMASYNC_POSTREAD);
4282 
4283 		mpc = sw_rx_data->data;
4284 
4285 		if (mpc == NULL) {
4286 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4287 
4288 			fp->err_rx_mp_null++;
4289 			if (mpf != NULL)
4290 				m_freem(mpf);
4291 			mpf = mpl = NULL;
4292 			rxq->tpa_info[agg_index].agg_state =
4293 						QLNX_AGG_STATE_ERROR;
4294 			ecore_chain_consume(&rxq->rx_bd_ring);
4295 			rxq->sw_rx_cons =
4296 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4297 			continue;
4298 		}
4299 
4300 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4301 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4302 				" dropping incoming packet and reusing its"
4303 				" buffer\n", fp->rss_id);
4304 
4305 			qlnx_reuse_rx_data(rxq);
4306 
4307 			if (mpf != NULL)
4308 				m_freem(mpf);
4309 			mpf = mpl = NULL;
4310 
4311 			rxq->tpa_info[agg_index].agg_state =
4312 						QLNX_AGG_STATE_ERROR;
4313 
4314 			ecore_chain_consume(&rxq->rx_bd_ring);
4315 			rxq->sw_rx_cons =
4316 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4317 
4318 			continue;
4319 		}
4320 
4321 		mpc->m_flags &= ~M_PKTHDR;
4322 		mpc->m_next = NULL;
4323 		mpc->m_len = cqe->len_list[i];
4324 
4325 		if (mpf == NULL) {
4326 			mpf = mpl = mpc;
4327 		} else {
4328 			mpl->m_len = ha->rx_buf_size;
4329 			mpl->m_next = mpc;
4330 			mpl = mpc;
4331 		}
4332 
4333 		ecore_chain_consume(&rxq->rx_bd_ring);
4334 		rxq->sw_rx_cons =
4335 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4336 	}
4337 
4338         QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4339                   fp->rss_id, mpf, mpl);
4340 
4341 	if (mpf != NULL) {
4342 		mp = rxq->tpa_info[agg_index].mpl;
4343 		mp->m_len = ha->rx_buf_size;
4344 		mp->m_next = mpf;
4345 		rxq->tpa_info[agg_index].mpl = mpl;
4346 	}
4347 
4348 	return;
4349 }
4350 
4351 static int
4352 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4353 	struct qlnx_rx_queue *rxq,
4354 	struct eth_fast_path_rx_tpa_end_cqe *cqe)
4355 {
4356 	struct sw_rx_data	*sw_rx_data;
4357 	int			i;
4358 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4359 	struct mbuf		*mp;
4360 	uint32_t		agg_index;
4361 	uint32_t		len = 0;
4362         if_t ifp = ha->ifp;
4363 
4364         QL_DPRINT7(ha, "[%d]: enter\n \
4365                 \t type = 0x%x\n \
4366                 \t tpa_agg_index = 0x%x\n \
4367                 \t total_packet_len = 0x%x\n \
4368                 \t num_of_bds = 0x%x\n \
4369                 \t end_reason = 0x%x\n \
4370                 \t num_of_coalesced_segs = 0x%x\n \
4371                 \t ts_delta = 0x%x\n \
4372                 \t len_list[0] = 0x%x\n \
4373                 \t len_list[1] = 0x%x\n \
4374                 \t len_list[2] = 0x%x\n \
4375                 \t len_list[3] = 0x%x\n",
4376                  fp->rss_id, cqe->type, cqe->tpa_agg_index,
4377                 cqe->total_packet_len, cqe->num_of_bds,
4378                 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4379                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4380                 cqe->len_list[3]);
4381 
4382 	agg_index = cqe->tpa_agg_index;
4383 
4384 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4385 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4386 
4387 		fp->err_rx_tpa_invalid_agg_num++;
4388 		return (0);
4389 	}
4390 
4391 	for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4392 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4393 
4394 		if (cqe->len_list[i] == 0)
4395 			break;
4396 
4397 		if (rxq->tpa_info[agg_index].agg_state !=
4398 			QLNX_AGG_STATE_START) {
4399 			QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4400 
4401 			qlnx_reuse_rx_data(rxq);
4402 			continue;
4403 		}
4404 
4405 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4406 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4407 			BUS_DMASYNC_POSTREAD);
4408 
4409 		mpc = sw_rx_data->data;
4410 
4411 		if (mpc == NULL) {
4412 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4413 
4414 			fp->err_rx_mp_null++;
4415 			if (mpf != NULL)
4416 				m_freem(mpf);
4417 			mpf = mpl = NULL;
4418 			rxq->tpa_info[agg_index].agg_state =
4419 						QLNX_AGG_STATE_ERROR;
4420 			ecore_chain_consume(&rxq->rx_bd_ring);
4421 			rxq->sw_rx_cons =
4422 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4423 			continue;
4424 		}
4425 
4426 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4427 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4428 				" dropping incoming packet and reusing its"
4429 				" buffer\n", fp->rss_id);
4430 
4431 			qlnx_reuse_rx_data(rxq);
4432 
4433 			if (mpf != NULL)
4434 				m_freem(mpf);
4435 			mpf = mpl = NULL;
4436 
4437 			rxq->tpa_info[agg_index].agg_state =
4438 						QLNX_AGG_STATE_ERROR;
4439 
4440 			ecore_chain_consume(&rxq->rx_bd_ring);
4441 			rxq->sw_rx_cons =
4442 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4443 
4444 			continue;
4445 		}
4446 
4447 		mpc->m_flags &= ~M_PKTHDR;
4448 		mpc->m_next = NULL;
4449 		mpc->m_len = cqe->len_list[i];
4450 
4451 		if (mpf == NULL) {
4452 			mpf = mpl = mpc;
4453 		} else {
4454 			mpl->m_len = ha->rx_buf_size;
4455 			mpl->m_next = mpc;
4456 			mpl = mpc;
4457 		}
4458 
4459 		ecore_chain_consume(&rxq->rx_bd_ring);
4460 		rxq->sw_rx_cons =
4461 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4462 	}
4463 
4464 	QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4465 
4466 	if (mpf != NULL) {
4467 		QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4468 
4469 		mp = rxq->tpa_info[agg_index].mpl;
4470 		mp->m_len = ha->rx_buf_size;
4471 		mp->m_next = mpf;
4472 	}
4473 
4474 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4475 		QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4476 
4477 		if (rxq->tpa_info[agg_index].mpf != NULL)
4478 			m_freem(rxq->tpa_info[agg_index].mpf);
4479 		rxq->tpa_info[agg_index].mpf = NULL;
4480 		rxq->tpa_info[agg_index].mpl = NULL;
4481 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4482 		return (0);
4483 	}
4484 
4485 	mp = rxq->tpa_info[agg_index].mpf;
4486 	m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4487 	mp->m_pkthdr.len = cqe->total_packet_len;
4488 
4489 	if (mp->m_next  == NULL)
4490 		mp->m_len = mp->m_pkthdr.len;
4491 	else {
4492 		/* compute the total packet length */
4493 		mpf = mp;
4494 		while (mpf != NULL) {
4495 			len += mpf->m_len;
4496 			mpf = mpf->m_next;
4497 		}
4498 
4499 		if (cqe->total_packet_len > len) {
4500 			mpl = rxq->tpa_info[agg_index].mpl;
4501 			mpl->m_len += (cqe->total_packet_len - len);
4502 		}
4503 	}
4504 
4505 	QLNX_INC_IPACKETS(ifp);
4506 	QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4507 
4508         QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4509 		m_len = 0x%x m_pkthdr_len = 0x%x\n",
4510                 fp->rss_id, mp->m_pkthdr.csum_data,
4511                 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4512 
4513 	if_input(ifp, mp);
4514 
4515 	rxq->tpa_info[agg_index].mpf = NULL;
4516 	rxq->tpa_info[agg_index].mpl = NULL;
4517 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4518 
4519 	return (cqe->num_of_coalesced_segs);
4520 }
4521 
4522 static int
4523 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4524 	int lro_enable)
4525 {
4526         uint16_t		hw_comp_cons, sw_comp_cons;
4527         int			rx_pkt = 0;
4528         struct qlnx_rx_queue	*rxq = fp->rxq;
4529         if_t ifp = ha->ifp;
4530 	struct ecore_dev	*cdev = &ha->cdev;
4531 	struct ecore_hwfn       *p_hwfn;
4532 
4533 #ifdef QLNX_SOFT_LRO
4534 	struct lro_ctrl		*lro;
4535 
4536 	lro = &rxq->lro;
4537 #endif /* #ifdef QLNX_SOFT_LRO */
4538 
4539         hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4540         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4541 
4542 	p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4543 
4544         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4545          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4546          * read before it is written by FW, then FW writes CQE and SB, and then
4547          * the CPU reads the hw_comp_cons, it will use an old CQE.
4548          */
4549 
4550         /* Loop to complete all indicated BDs */
4551         while (sw_comp_cons != hw_comp_cons) {
4552                 union eth_rx_cqe		*cqe;
4553                 struct eth_fast_path_rx_reg_cqe	*fp_cqe;
4554                 struct sw_rx_data		*sw_rx_data;
4555 		register struct mbuf		*mp;
4556                 enum eth_rx_cqe_type		cqe_type;
4557                 uint16_t			len, pad, len_on_first_bd;
4558                 uint8_t				*data;
4559 		uint8_t				hash_type;
4560 
4561                 /* Get the CQE from the completion ring */
4562                 cqe = (union eth_rx_cqe *)
4563                         ecore_chain_consume(&rxq->rx_comp_ring);
4564                 cqe_type = cqe->fast_path_regular.type;
4565 
4566                 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4567                         QL_DPRINT3(ha, "Got a slowath CQE\n");
4568 
4569                         ecore_eth_cqe_completion(p_hwfn,
4570                                         (struct eth_slow_path_rx_cqe *)cqe);
4571                         goto next_cqe;
4572                 }
4573 
4574 		if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4575 			switch (cqe_type) {
4576 			case ETH_RX_CQE_TYPE_TPA_START:
4577 				qlnx_tpa_start(ha, fp, rxq,
4578 					&cqe->fast_path_tpa_start);
4579 				fp->tpa_start++;
4580 				break;
4581 
4582 			case ETH_RX_CQE_TYPE_TPA_CONT:
4583 				qlnx_tpa_cont(ha, fp, rxq,
4584 					&cqe->fast_path_tpa_cont);
4585 				fp->tpa_cont++;
4586 				break;
4587 
4588 			case ETH_RX_CQE_TYPE_TPA_END:
4589 				rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4590 						&cqe->fast_path_tpa_end);
4591 				fp->tpa_end++;
4592 				break;
4593 
4594 			default:
4595 				break;
4596 			}
4597 
4598                         goto next_cqe;
4599 		}
4600 
4601                 /* Get the data from the SW ring */
4602                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4603                 mp = sw_rx_data->data;
4604 
4605 		if (mp == NULL) {
4606                 	QL_DPRINT1(ha, "mp = NULL\n");
4607 			fp->err_rx_mp_null++;
4608         		rxq->sw_rx_cons  =
4609 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4610 			goto next_cqe;
4611 		}
4612 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4613 			BUS_DMASYNC_POSTREAD);
4614 
4615                 /* non GRO */
4616                 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4617                 len =  le16toh(fp_cqe->pkt_len);
4618                 pad = fp_cqe->placement_offset;
4619 #if 0
4620 		QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4621 			" len %u, parsing flags = %d pad  = %d\n",
4622 			cqe_type, fp_cqe->bitfields,
4623 			le16toh(fp_cqe->vlan_tag),
4624 			len, le16toh(fp_cqe->pars_flags.flags), pad);
4625 #endif
4626 		data = mtod(mp, uint8_t *);
4627 		data = data + pad;
4628 
4629 		if (0)
4630 			qlnx_dump_buf8(ha, __func__, data, len);
4631 
4632                 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4633                  * is always with a fixed size. If allocation fails, we take the
4634                  * consumed BD and return it to the ring in the PROD position.
4635                  * The packet that was received on that BD will be dropped (and
4636                  * not passed to the upper stack).
4637                  */
4638 		/* If this is an error packet then drop it */
4639 		if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4640 			CQE_FLAGS_ERR) {
4641 			QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4642 				" dropping incoming packet\n", sw_comp_cons,
4643 			le16toh(cqe->fast_path_regular.pars_flags.flags));
4644 			fp->err_rx_hw_errors++;
4645 
4646                         qlnx_reuse_rx_data(rxq);
4647 
4648 			QLNX_INC_IERRORS(ifp);
4649 
4650 			goto next_cqe;
4651 		}
4652 
4653                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4654                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4655 				" incoming packet and reusing its buffer\n");
4656                         qlnx_reuse_rx_data(rxq);
4657 
4658                         fp->err_rx_alloc_errors++;
4659 
4660 			QLNX_INC_IQDROPS(ifp);
4661 
4662                         goto next_cqe;
4663                 }
4664 
4665                 ecore_chain_consume(&rxq->rx_bd_ring);
4666 
4667 		len_on_first_bd = fp_cqe->len_on_first_bd;
4668 		m_adj(mp, pad);
4669 		mp->m_pkthdr.len = len;
4670 
4671 		if ((len > 60 ) && (len > len_on_first_bd)) {
4672 			mp->m_len = len_on_first_bd;
4673 
4674 			if (qlnx_rx_jumbo_chain(ha, fp, mp,
4675 				(len - len_on_first_bd)) != 0) {
4676 				m_freem(mp);
4677 
4678 				QLNX_INC_IQDROPS(ifp);
4679 
4680                         	goto next_cqe;
4681 			}
4682 
4683 		} else if (len_on_first_bd < len) {
4684 			fp->err_rx_jumbo_chain_pkts++;
4685 		} else {
4686 			mp->m_len = len;
4687 		}
4688 
4689 		mp->m_flags |= M_PKTHDR;
4690 
4691 		/* assign packet to this interface interface */
4692 		mp->m_pkthdr.rcvif = ifp;
4693 
4694 		/* assume no hardware checksum has complated */
4695 		mp->m_pkthdr.csum_flags = 0;
4696 
4697 		mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4698 
4699 		hash_type = fp_cqe->bitfields &
4700 				(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4701 				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4702 
4703 		switch (hash_type) {
4704 		case RSS_HASH_TYPE_IPV4:
4705 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4706 			break;
4707 
4708 		case RSS_HASH_TYPE_TCP_IPV4:
4709 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4710 			break;
4711 
4712 		case RSS_HASH_TYPE_IPV6:
4713 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4714 			break;
4715 
4716 		case RSS_HASH_TYPE_TCP_IPV6:
4717 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4718 			break;
4719 
4720 		default:
4721 			M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4722 			break;
4723 		}
4724 
4725 		if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4726 			mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4727 		}
4728 
4729 		if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4730 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4731 		}
4732 
4733 		if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4734 			mp->m_pkthdr.csum_data = 0xFFFF;
4735 			mp->m_pkthdr.csum_flags |=
4736 				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4737 		}
4738 
4739 		if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4740 			mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4741 			mp->m_flags |= M_VLANTAG;
4742 		}
4743 
4744 		QLNX_INC_IPACKETS(ifp);
4745 		QLNX_INC_IBYTES(ifp, len);
4746 
4747 #ifdef QLNX_SOFT_LRO
4748 		if (lro_enable)
4749 			tcp_lro_queue_mbuf(lro, mp);
4750 		else
4751 			if_input(ifp, mp);
4752 #else
4753 
4754 		if_input(ifp, mp);
4755 
4756 #endif /* #ifdef QLNX_SOFT_LRO */
4757 
4758                 rx_pkt++;
4759 
4760         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4761 
4762 next_cqe:	/* don't consume bd rx buffer */
4763                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4764                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4765 
4766 		/* CR TPA - revisit how to handle budget in TPA perhaps
4767 		   increase on "end" */
4768                 if (rx_pkt == budget)
4769                         break;
4770         } /* repeat while sw_comp_cons != hw_comp_cons... */
4771 
4772         /* Update producers */
4773         qlnx_update_rx_prod(p_hwfn, rxq);
4774 
4775         return rx_pkt;
4776 }
4777 
4778 /*
4779  * fast path interrupt
4780  */
4781 
4782 static void
4783 qlnx_fp_isr(void *arg)
4784 {
4785         qlnx_ivec_t		*ivec = arg;
4786         qlnx_host_t		*ha;
4787         struct qlnx_fastpath	*fp = NULL;
4788         int			idx;
4789 
4790         ha = ivec->ha;
4791 
4792         if (ha->state != QLNX_STATE_OPEN) {
4793                 return;
4794         }
4795 
4796         idx = ivec->rss_idx;
4797 
4798         if ((idx = ivec->rss_idx) >= ha->num_rss) {
4799                 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4800                 ha->err_illegal_intr++;
4801                 return;
4802         }
4803         fp = &ha->fp_array[idx];
4804 
4805         if (fp == NULL) {
4806                 ha->err_fp_null++;
4807         } else {
4808 		int			rx_int = 0;
4809 #ifdef QLNX_SOFT_LRO
4810 		int			total_rx_count = 0;
4811 #endif
4812 		int 			lro_enable, tc;
4813 		struct qlnx_tx_queue	*txq;
4814 		uint16_t		elem_left;
4815 
4816 		lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4817 
4818                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4819 
4820                 do {
4821                         for (tc = 0; tc < ha->num_tc; tc++) {
4822 				txq = fp->txq[tc];
4823 
4824 				if((int)(elem_left =
4825 					ecore_chain_get_elem_left(&txq->tx_pbl)) <
4826 						QLNX_TX_ELEM_THRESH)  {
4827                                 	if (mtx_trylock(&fp->tx_mtx)) {
4828 #ifdef QLNX_TRACE_PERF_DATA
4829 						tx_compl = fp->tx_pkts_completed;
4830 #endif
4831 
4832 						qlnx_tx_int(ha, fp, fp->txq[tc]);
4833 #ifdef QLNX_TRACE_PERF_DATA
4834 						fp->tx_pkts_compl_intr +=
4835 							(fp->tx_pkts_completed - tx_compl);
4836 						if ((fp->tx_pkts_completed - tx_compl) <= 32)
4837 							fp->tx_comInt[0]++;
4838 						else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4839 							((fp->tx_pkts_completed - tx_compl) <= 64))
4840 							fp->tx_comInt[1]++;
4841 						else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4842 							((fp->tx_pkts_completed - tx_compl) <= 128))
4843 							fp->tx_comInt[2]++;
4844 						else if(((fp->tx_pkts_completed - tx_compl) > 128))
4845 							fp->tx_comInt[3]++;
4846 #endif
4847 						mtx_unlock(&fp->tx_mtx);
4848 					}
4849 				}
4850                         }
4851 
4852                         rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4853                                         lro_enable);
4854 
4855                         if (rx_int) {
4856                                 fp->rx_pkts += rx_int;
4857 #ifdef QLNX_SOFT_LRO
4858                                 total_rx_count += rx_int;
4859 #endif
4860                         }
4861 
4862                 } while (rx_int);
4863 
4864 #ifdef QLNX_SOFT_LRO
4865                 {
4866                         struct lro_ctrl *lro;
4867 
4868                         lro = &fp->rxq->lro;
4869 
4870                         if (lro_enable && total_rx_count) {
4871 
4872 #ifdef QLNX_TRACE_LRO_CNT
4873                                 if (lro->lro_mbuf_count & ~1023)
4874                                         fp->lro_cnt_1024++;
4875                                 else if (lro->lro_mbuf_count & ~511)
4876                                         fp->lro_cnt_512++;
4877                                 else if (lro->lro_mbuf_count & ~255)
4878                                         fp->lro_cnt_256++;
4879                                 else if (lro->lro_mbuf_count & ~127)
4880                                         fp->lro_cnt_128++;
4881                                 else if (lro->lro_mbuf_count & ~63)
4882                                         fp->lro_cnt_64++;
4883 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4884 
4885                                 tcp_lro_flush_all(lro);
4886                         }
4887                 }
4888 #endif /* #ifdef QLNX_SOFT_LRO */
4889 
4890                 ecore_sb_update_sb_idx(fp->sb_info);
4891                 rmb();
4892                 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4893         }
4894 
4895         return;
4896 }
4897 
4898 /*
4899  * slow path interrupt processing function
4900  * can be invoked in polled mode or in interrupt mode via taskqueue.
4901  */
4902 void
4903 qlnx_sp_isr(void *arg)
4904 {
4905 	struct ecore_hwfn	*p_hwfn;
4906 	qlnx_host_t		*ha;
4907 
4908 	p_hwfn = arg;
4909 
4910 	ha = (qlnx_host_t *)p_hwfn->p_dev;
4911 
4912 	ha->sp_interrupts++;
4913 
4914 	QL_DPRINT2(ha, "enter\n");
4915 
4916 	ecore_int_sp_dpc(p_hwfn);
4917 
4918 	QL_DPRINT2(ha, "exit\n");
4919 
4920 	return;
4921 }
4922 
4923 /*****************************************************************************
4924  * Support Functions for DMA'able Memory
4925  *****************************************************************************/
4926 
4927 static void
4928 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4929 {
4930         *((bus_addr_t *)arg) = 0;
4931 
4932         if (error) {
4933                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4934                 return;
4935         }
4936 
4937         *((bus_addr_t *)arg) = segs[0].ds_addr;
4938 
4939         return;
4940 }
4941 
4942 static int
4943 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4944 {
4945         int             ret = 0;
4946         bus_addr_t      b_addr;
4947 
4948         ret = bus_dma_tag_create(
4949                         ha->parent_tag,/* parent */
4950                         dma_buf->alignment,
4951                         ((bus_size_t)(1ULL << 32)),/* boundary */
4952                         BUS_SPACE_MAXADDR,      /* lowaddr */
4953                         BUS_SPACE_MAXADDR,      /* highaddr */
4954                         NULL, NULL,             /* filter, filterarg */
4955                         dma_buf->size,          /* maxsize */
4956                         1,                      /* nsegments */
4957                         dma_buf->size,          /* maxsegsize */
4958                         0,                      /* flags */
4959                         NULL, NULL,             /* lockfunc, lockarg */
4960                         &dma_buf->dma_tag);
4961 
4962         if (ret) {
4963                 QL_DPRINT1(ha, "could not create dma tag\n");
4964                 goto qlnx_alloc_dmabuf_exit;
4965         }
4966         ret = bus_dmamem_alloc(dma_buf->dma_tag,
4967                         (void **)&dma_buf->dma_b,
4968                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4969                         &dma_buf->dma_map);
4970         if (ret) {
4971                 bus_dma_tag_destroy(dma_buf->dma_tag);
4972                 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4973                 goto qlnx_alloc_dmabuf_exit;
4974         }
4975 
4976         ret = bus_dmamap_load(dma_buf->dma_tag,
4977                         dma_buf->dma_map,
4978                         dma_buf->dma_b,
4979                         dma_buf->size,
4980                         qlnx_dmamap_callback,
4981                         &b_addr, BUS_DMA_NOWAIT);
4982 
4983         if (ret || !b_addr) {
4984                 bus_dma_tag_destroy(dma_buf->dma_tag);
4985                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4986                         dma_buf->dma_map);
4987                 ret = -1;
4988                 goto qlnx_alloc_dmabuf_exit;
4989         }
4990 
4991         dma_buf->dma_addr = b_addr;
4992 
4993 qlnx_alloc_dmabuf_exit:
4994 
4995         return ret;
4996 }
4997 
4998 static void
4999 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5000 {
5001 	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5002         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5003         bus_dma_tag_destroy(dma_buf->dma_tag);
5004 	return;
5005 }
5006 
5007 void *
5008 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5009 {
5010 	qlnx_dma_t	dma_buf;
5011 	qlnx_dma_t	*dma_p;
5012 	qlnx_host_t	*ha __unused;
5013 
5014 	ha = (qlnx_host_t *)ecore_dev;
5015 
5016 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5017 
5018 	memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5019 
5020 	dma_buf.size = size + PAGE_SIZE;
5021 	dma_buf.alignment = 8;
5022 
5023 	if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5024 		return (NULL);
5025 	bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5026 
5027 	*phys = dma_buf.dma_addr;
5028 
5029 	dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5030 
5031 	memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5032 
5033 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5034 		(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5035 		dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5036 
5037 	return (dma_buf.dma_b);
5038 }
5039 
5040 void
5041 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5042 	uint32_t size)
5043 {
5044 	qlnx_dma_t dma_buf, *dma_p;
5045 	qlnx_host_t	*ha;
5046 
5047 	ha = (qlnx_host_t *)ecore_dev;
5048 
5049 	if (v_addr == NULL)
5050 		return;
5051 
5052 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5053 
5054 	dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5055 
5056 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5057 		(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5058 		dma_p->dma_b, (void *)dma_p->dma_addr, size);
5059 
5060 	dma_buf = *dma_p;
5061 
5062 	if (!ha->qlnxr_debug)
5063 	qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5064 	return;
5065 }
5066 
5067 static int
5068 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5069 {
5070         int             ret;
5071         device_t        dev;
5072 
5073         dev = ha->pci_dev;
5074 
5075         /*
5076          * Allocate parent DMA Tag
5077          */
5078         ret = bus_dma_tag_create(
5079                         bus_get_dma_tag(dev),   /* parent */
5080                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5081                         BUS_SPACE_MAXADDR,      /* lowaddr */
5082                         BUS_SPACE_MAXADDR,      /* highaddr */
5083                         NULL, NULL,             /* filter, filterarg */
5084                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5085                         0,                      /* nsegments */
5086                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5087                         0,                      /* flags */
5088                         NULL, NULL,             /* lockfunc, lockarg */
5089                         &ha->parent_tag);
5090 
5091         if (ret) {
5092                 QL_DPRINT1(ha, "could not create parent dma tag\n");
5093                 return (-1);
5094         }
5095 
5096         ha->flags.parent_tag = 1;
5097 
5098         return (0);
5099 }
5100 
5101 static void
5102 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5103 {
5104         if (ha->parent_tag != NULL) {
5105                 bus_dma_tag_destroy(ha->parent_tag);
5106 		ha->parent_tag = NULL;
5107         }
5108 	return;
5109 }
5110 
5111 static int
5112 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5113 {
5114         if (bus_dma_tag_create(NULL,    /* parent */
5115                 1, 0,    /* alignment, bounds */
5116                 BUS_SPACE_MAXADDR,       /* lowaddr */
5117                 BUS_SPACE_MAXADDR,       /* highaddr */
5118                 NULL, NULL,      /* filter, filterarg */
5119                 QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
5120                 QLNX_MAX_SEGMENTS,        /* nsegments */
5121                 QLNX_MAX_TX_MBUF_SIZE,	  /* maxsegsize */
5122                 0,        /* flags */
5123                 NULL,    /* lockfunc */
5124                 NULL,    /* lockfuncarg */
5125                 &ha->tx_tag)) {
5126                 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5127                 return (-1);
5128         }
5129 
5130 	return (0);
5131 }
5132 
5133 static void
5134 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5135 {
5136         if (ha->tx_tag != NULL) {
5137                 bus_dma_tag_destroy(ha->tx_tag);
5138 		ha->tx_tag = NULL;
5139         }
5140 	return;
5141 }
5142 
5143 static int
5144 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5145 {
5146         if (bus_dma_tag_create(NULL,    /* parent */
5147                         1, 0,    /* alignment, bounds */
5148                         BUS_SPACE_MAXADDR,       /* lowaddr */
5149                         BUS_SPACE_MAXADDR,       /* highaddr */
5150                         NULL, NULL,      /* filter, filterarg */
5151                         MJUM9BYTES,     /* maxsize */
5152                         1,        /* nsegments */
5153                         MJUM9BYTES,        /* maxsegsize */
5154                         0,        /* flags */
5155                         NULL,    /* lockfunc */
5156                         NULL,    /* lockfuncarg */
5157                         &ha->rx_tag)) {
5158                 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5159 
5160                 return (-1);
5161         }
5162 	return (0);
5163 }
5164 
5165 static void
5166 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5167 {
5168         if (ha->rx_tag != NULL) {
5169                 bus_dma_tag_destroy(ha->rx_tag);
5170 		ha->rx_tag = NULL;
5171         }
5172 	return;
5173 }
5174 
5175 /*********************************
5176  * Exported functions
5177  *********************************/
5178 uint32_t
5179 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5180 {
5181 	uint32_t bar_size;
5182 
5183 	bar_id = bar_id * 2;
5184 
5185 	bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5186 				SYS_RES_MEMORY,
5187 				PCIR_BAR(bar_id));
5188 
5189 	return (bar_size);
5190 }
5191 
5192 uint32_t
5193 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5194 {
5195 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5196 				pci_reg, 1);
5197 	return 0;
5198 }
5199 
5200 uint32_t
5201 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5202 	uint16_t *reg_value)
5203 {
5204 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5205 				pci_reg, 2);
5206 	return 0;
5207 }
5208 
5209 uint32_t
5210 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5211 	uint32_t *reg_value)
5212 {
5213 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5214 				pci_reg, 4);
5215 	return 0;
5216 }
5217 
5218 void
5219 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5220 {
5221 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5222 		pci_reg, reg_value, 1);
5223 	return;
5224 }
5225 
5226 void
5227 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5228 	uint16_t reg_value)
5229 {
5230 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5231 		pci_reg, reg_value, 2);
5232 	return;
5233 }
5234 
5235 void
5236 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5237 	uint32_t reg_value)
5238 {
5239 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5240 		pci_reg, reg_value, 4);
5241 	return;
5242 }
5243 
5244 int
5245 qlnx_pci_find_capability(void *ecore_dev, int cap)
5246 {
5247 	int		reg;
5248 	qlnx_host_t	*ha;
5249 
5250 	ha = ecore_dev;
5251 
5252 	if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
5253 		return reg;
5254 	else {
5255 		QL_DPRINT1(ha, "failed\n");
5256 		return 0;
5257 	}
5258 }
5259 
5260 int
5261 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5262 {
5263 	int		reg;
5264 	qlnx_host_t	*ha;
5265 
5266 	ha = ecore_dev;
5267 
5268 	if (pci_find_extcap(ha->pci_dev, ext_cap, &reg) == 0)
5269 		return reg;
5270 	else {
5271 		QL_DPRINT1(ha, "failed\n");
5272 		return 0;
5273 	}
5274 }
5275 
5276 uint32_t
5277 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5278 {
5279 	uint32_t		data32;
5280 	struct ecore_hwfn	*p_hwfn;
5281 
5282 	p_hwfn = hwfn;
5283 
5284 	data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5285 			(bus_size_t)(p_hwfn->reg_offset + reg_addr));
5286 
5287 	return (data32);
5288 }
5289 
5290 void
5291 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5292 {
5293 	struct ecore_hwfn	*p_hwfn = hwfn;
5294 
5295 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5296 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5297 
5298 	return;
5299 }
5300 
5301 void
5302 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5303 {
5304 	struct ecore_hwfn	*p_hwfn = hwfn;
5305 
5306 	bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5307 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5308 	return;
5309 }
5310 
5311 void
5312 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5313 {
5314 	struct ecore_dev	*cdev;
5315 	struct ecore_hwfn	*p_hwfn;
5316 	uint32_t	offset;
5317 
5318 	p_hwfn = hwfn;
5319 
5320 	cdev = p_hwfn->p_dev;
5321 
5322 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5323 	bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5324 
5325 	return;
5326 }
5327 
5328 void
5329 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5330 {
5331 	struct ecore_hwfn	*p_hwfn = hwfn;
5332 
5333 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5334 		(bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5335 
5336 	return;
5337 }
5338 
5339 uint32_t
5340 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5341 {
5342 	uint32_t		data32;
5343 	bus_size_t		offset;
5344 	struct ecore_dev	*cdev;
5345 
5346 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5347 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5348 
5349 	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5350 
5351 	return (data32);
5352 }
5353 
5354 void
5355 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5356 {
5357 	bus_size_t		offset;
5358 	struct ecore_dev	*cdev;
5359 
5360 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5361 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5362 
5363 	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5364 
5365 	return;
5366 }
5367 
5368 void
5369 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5370 {
5371 	bus_size_t		offset;
5372 	struct ecore_dev	*cdev;
5373 
5374 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5375 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5376 
5377 	bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5378 	return;
5379 }
5380 
5381 void *
5382 qlnx_zalloc(uint32_t size)
5383 {
5384 	caddr_t	va;
5385 
5386 	va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5387 	bzero(va, size);
5388 	return ((void *)va);
5389 }
5390 
5391 void
5392 qlnx_barrier(void *p_hwfn)
5393 {
5394 	qlnx_host_t	*ha;
5395 
5396 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5397 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
5398 }
5399 
5400 void
5401 qlnx_link_update(void *p_hwfn)
5402 {
5403 	qlnx_host_t	*ha;
5404 	int		prev_link_state;
5405 
5406 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5407 
5408 	qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5409 
5410 	prev_link_state = ha->link_up;
5411 	ha->link_up = ha->if_link.link_up;
5412 
5413         if (prev_link_state !=  ha->link_up) {
5414                 if (ha->link_up) {
5415                         if_link_state_change(ha->ifp, LINK_STATE_UP);
5416                 } else {
5417                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5418                 }
5419         }
5420 #ifndef QLNX_VF
5421 #ifdef CONFIG_ECORE_SRIOV
5422 
5423 	if (qlnx_vf_device(ha) != 0) {
5424 		if (ha->sriov_initialized)
5425 			qlnx_inform_vf_link_state(p_hwfn, ha);
5426 	}
5427 
5428 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5429 #endif /* #ifdef QLNX_VF */
5430 
5431         return;
5432 }
5433 
5434 static void
5435 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5436 	struct ecore_vf_acquire_sw_info *p_sw_info)
5437 {
5438 	p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5439 					(QLNX_VERSION_MINOR << 16) |
5440 					 QLNX_VERSION_BUILD;
5441 	p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5442 
5443 	return;
5444 }
5445 
5446 void
5447 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5448 	void *p_sw_info)
5449 {
5450 	__qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5451 
5452 	return;
5453 }
5454 
5455 void
5456 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5457 	struct qlnx_link_output *if_link)
5458 {
5459 	struct ecore_mcp_link_params    link_params;
5460 	struct ecore_mcp_link_state     link_state;
5461 	uint8_t				p_change;
5462 	struct ecore_ptt *p_ptt = NULL;
5463 
5464 	memset(if_link, 0, sizeof(*if_link));
5465 	memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5466 	memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5467 
5468 	ha = (qlnx_host_t *)hwfn->p_dev;
5469 
5470 	/* Prepare source inputs */
5471 	/* we only deal with physical functions */
5472 	if (qlnx_vf_device(ha) != 0) {
5473         	p_ptt = ecore_ptt_acquire(hwfn);
5474 
5475 	        if (p_ptt == NULL) {
5476 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5477 			return;
5478 		}
5479 
5480 		ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5481 		ecore_ptt_release(hwfn, p_ptt);
5482 
5483 		memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5484 			sizeof(link_params));
5485 		memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5486 			sizeof(link_state));
5487 	} else {
5488 		ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5489 		ecore_vf_read_bulletin(hwfn, &p_change);
5490 		ecore_vf_get_link_params(hwfn, &link_params);
5491 		ecore_vf_get_link_state(hwfn, &link_state);
5492 	}
5493 
5494 	/* Set the link parameters to pass to protocol driver */
5495 	if (link_state.link_up) {
5496 		if_link->link_up = true;
5497 		if_link->speed = link_state.speed;
5498 	}
5499 
5500 	if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5501 
5502 	if (link_params.speed.autoneg)
5503 		if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5504 
5505 	if (link_params.pause.autoneg ||
5506 		(link_params.pause.forced_rx && link_params.pause.forced_tx))
5507 		if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5508 
5509 	if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5510 		link_params.pause.forced_tx)
5511 		if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5512 
5513 	if (link_params.speed.advertised_speeds &
5514 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5515 		if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5516                                            QLNX_LINK_CAP_1000baseT_Full;
5517 
5518 	if (link_params.speed.advertised_speeds &
5519 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5520 		if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5521 
5522 	if (link_params.speed.advertised_speeds &
5523 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5524 		if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5525 
5526 	if (link_params.speed.advertised_speeds &
5527 		NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5528 		if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5529 
5530 	if (link_params.speed.advertised_speeds &
5531 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5532 		if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5533 
5534 	if (link_params.speed.advertised_speeds &
5535 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5536 		if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5537 
5538 	if_link->advertised_caps = if_link->supported_caps;
5539 
5540 	if_link->autoneg = link_params.speed.autoneg;
5541 	if_link->duplex = QLNX_LINK_DUPLEX;
5542 
5543 	/* Link partner capabilities */
5544 
5545 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5546 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5547 
5548 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5549 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5550 
5551 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5552 		if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5553 
5554 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5555 		if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5556 
5557 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5558 		if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5559 
5560 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5561 		if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5562 
5563 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5564 		if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5565 
5566 	if (link_state.an_complete)
5567 		if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5568 
5569 	if (link_state.partner_adv_pause)
5570 		if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5571 
5572 	if ((link_state.partner_adv_pause ==
5573 		ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5574 		(link_state.partner_adv_pause ==
5575 			ECORE_LINK_PARTNER_BOTH_PAUSE))
5576 		if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5577 
5578 	return;
5579 }
5580 
5581 void
5582 qlnx_schedule_recovery(void *p_hwfn)
5583 {
5584 	qlnx_host_t	*ha;
5585 
5586 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5587 
5588 	if (qlnx_vf_device(ha) != 0) {
5589 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5590 	}
5591 
5592 	return;
5593 }
5594 
5595 static int
5596 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5597 {
5598         int	rc, i;
5599 
5600         for (i = 0; i < cdev->num_hwfns; i++) {
5601                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5602                 p_hwfn->pf_params = *func_params;
5603 
5604 #ifdef QLNX_ENABLE_IWARP
5605 		if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5606 			p_hwfn->using_ll2 = true;
5607 		}
5608 #endif /* #ifdef QLNX_ENABLE_IWARP */
5609         }
5610 
5611         rc = ecore_resc_alloc(cdev);
5612         if (rc)
5613                 goto qlnx_nic_setup_exit;
5614 
5615         ecore_resc_setup(cdev);
5616 
5617 qlnx_nic_setup_exit:
5618 
5619         return rc;
5620 }
5621 
5622 static int
5623 qlnx_nic_start(struct ecore_dev *cdev)
5624 {
5625         int				rc;
5626 	struct ecore_hw_init_params	params;
5627 
5628 	bzero(&params, sizeof (struct ecore_hw_init_params));
5629 
5630 	params.p_tunn = NULL;
5631 	params.b_hw_start = true;
5632 	params.int_mode = cdev->int_mode;
5633 	params.allow_npar_tx_switch = true;
5634 	params.bin_fw_data = NULL;
5635 
5636         rc = ecore_hw_init(cdev, &params);
5637         if (rc) {
5638                 ecore_resc_free(cdev);
5639                 return rc;
5640         }
5641 
5642         return 0;
5643 }
5644 
5645 static int
5646 qlnx_slowpath_start(qlnx_host_t *ha)
5647 {
5648 	struct ecore_dev	*cdev;
5649 	struct ecore_pf_params	pf_params;
5650 	int			rc;
5651 
5652 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5653 	pf_params.eth_pf_params.num_cons  =
5654 		(ha->num_rss) * (ha->num_tc + 1);
5655 
5656 #ifdef QLNX_ENABLE_IWARP
5657 	if (qlnx_vf_device(ha) != 0) {
5658 		if(ha->personality == ECORE_PCI_ETH_IWARP) {
5659 			device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5660 			pf_params.rdma_pf_params.num_qps = 1024;
5661 			pf_params.rdma_pf_params.num_srqs = 1024;
5662 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5663 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5664 		} else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5665 			device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5666 			pf_params.rdma_pf_params.num_qps = 8192;
5667 			pf_params.rdma_pf_params.num_srqs = 8192;
5668 			//pf_params.rdma_pf_params.min_dpis = 0;
5669 			pf_params.rdma_pf_params.min_dpis = 8;
5670 			pf_params.rdma_pf_params.roce_edpm_mode = 0;
5671 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5672 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5673 		}
5674 	}
5675 #endif /* #ifdef QLNX_ENABLE_IWARP */
5676 
5677 	cdev = &ha->cdev;
5678 
5679 	rc = qlnx_nic_setup(cdev, &pf_params);
5680         if (rc)
5681                 goto qlnx_slowpath_start_exit;
5682 
5683         cdev->int_mode = ECORE_INT_MODE_MSIX;
5684         cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5685 
5686 #ifdef QLNX_MAX_COALESCE
5687 	cdev->rx_coalesce_usecs = 255;
5688 	cdev->tx_coalesce_usecs = 255;
5689 #endif
5690 
5691 	rc = qlnx_nic_start(cdev);
5692 
5693 	ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5694 	ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5695 
5696 #ifdef QLNX_USER_LLDP
5697 	(void)qlnx_set_lldp_tlvx(ha, NULL);
5698 #endif /* #ifdef QLNX_USER_LLDP */
5699 
5700 qlnx_slowpath_start_exit:
5701 
5702 	return (rc);
5703 }
5704 
5705 static int
5706 qlnx_slowpath_stop(qlnx_host_t *ha)
5707 {
5708 	struct ecore_dev	*cdev;
5709 	device_t		dev = ha->pci_dev;
5710 	int			i;
5711 
5712 	cdev = &ha->cdev;
5713 
5714 	ecore_hw_stop(cdev);
5715 
5716  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
5717         	if (ha->sp_handle[i])
5718                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
5719 				ha->sp_handle[i]);
5720 
5721 		ha->sp_handle[i] = NULL;
5722 
5723         	if (ha->sp_irq[i])
5724 			(void) bus_release_resource(dev, SYS_RES_IRQ,
5725 				ha->sp_irq_rid[i], ha->sp_irq[i]);
5726 		ha->sp_irq[i] = NULL;
5727 	}
5728 
5729         ecore_resc_free(cdev);
5730 
5731         return 0;
5732 }
5733 
5734 static void
5735 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5736 	char ver_str[VER_SIZE])
5737 {
5738         int	i;
5739 
5740         memcpy(cdev->name, name, NAME_SIZE);
5741 
5742         for_each_hwfn(cdev, i) {
5743                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5744         }
5745 
5746         cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5747 
5748 	return ;
5749 }
5750 
5751 void
5752 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5753 {
5754 	enum ecore_mcp_protocol_type	type;
5755 	union ecore_mcp_protocol_stats	*stats;
5756 	struct ecore_eth_stats		eth_stats;
5757 	qlnx_host_t			*ha;
5758 
5759 	ha = cdev;
5760 	stats = proto_stats;
5761 	type = proto_type;
5762 
5763         switch (type) {
5764         case ECORE_MCP_LAN_STATS:
5765                 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5766                 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5767                 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5768                 stats->lan_stats.fcs_err = -1;
5769                 break;
5770 
5771 	default:
5772 		ha->err_get_proto_invalid_type++;
5773 
5774 		QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5775 		break;
5776 	}
5777 	return;
5778 }
5779 
5780 static int
5781 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5782 {
5783 	struct ecore_hwfn	*p_hwfn;
5784 	struct ecore_ptt	*p_ptt;
5785 
5786 	p_hwfn = &ha->cdev.hwfns[0];
5787 	p_ptt = ecore_ptt_acquire(p_hwfn);
5788 
5789 	if (p_ptt ==  NULL) {
5790                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5791                 return (-1);
5792 	}
5793 	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5794 
5795 	ecore_ptt_release(p_hwfn, p_ptt);
5796 
5797 	return (0);
5798 }
5799 
5800 static int
5801 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5802 {
5803 	struct ecore_hwfn	*p_hwfn;
5804 	struct ecore_ptt	*p_ptt;
5805 
5806 	p_hwfn = &ha->cdev.hwfns[0];
5807 	p_ptt = ecore_ptt_acquire(p_hwfn);
5808 
5809 	if (p_ptt ==  NULL) {
5810                 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5811                 return (-1);
5812 	}
5813 	ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5814 
5815 	ecore_ptt_release(p_hwfn, p_ptt);
5816 
5817 	return (0);
5818 }
5819 
5820 static int
5821 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5822 {
5823 	bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5824 	bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5825 	bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5826 
5827         return 0;
5828 }
5829 
5830 static void
5831 qlnx_init_fp(qlnx_host_t *ha)
5832 {
5833 	int rss_id, txq_array_index, tc;
5834 
5835 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5836 		struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5837 
5838 		fp->rss_id = rss_id;
5839 		fp->edev = ha;
5840 		fp->sb_info = &ha->sb_array[rss_id];
5841 		fp->rxq = &ha->rxq_array[rss_id];
5842 		fp->rxq->rxq_id = rss_id;
5843 
5844 		for (tc = 0; tc < ha->num_tc; tc++) {
5845                         txq_array_index = tc * ha->num_rss + rss_id;
5846                         fp->txq[tc] = &ha->txq_array[txq_array_index];
5847                         fp->txq[tc]->index = txq_array_index;
5848 		}
5849 
5850 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5851 			rss_id);
5852 
5853 		fp->tx_ring_full = 0;
5854 
5855 		/* reset all the statistics counters */
5856 
5857 		fp->tx_pkts_processed = 0;
5858 		fp->tx_pkts_freed = 0;
5859 		fp->tx_pkts_transmitted = 0;
5860 		fp->tx_pkts_completed = 0;
5861 
5862 #ifdef QLNX_TRACE_PERF_DATA
5863 		fp->tx_pkts_trans_ctx = 0;
5864 		fp->tx_pkts_compl_ctx = 0;
5865 		fp->tx_pkts_trans_fp = 0;
5866 		fp->tx_pkts_compl_fp = 0;
5867 		fp->tx_pkts_compl_intr = 0;
5868 #endif
5869 		fp->tx_lso_wnd_min_len = 0;
5870 		fp->tx_defrag = 0;
5871 		fp->tx_nsegs_gt_elem_left = 0;
5872 		fp->tx_tso_max_nsegs = 0;
5873 		fp->tx_tso_min_nsegs = 0;
5874 		fp->err_tx_nsegs_gt_elem_left = 0;
5875 		fp->err_tx_dmamap_create = 0;
5876 		fp->err_tx_defrag_dmamap_load = 0;
5877 		fp->err_tx_non_tso_max_seg = 0;
5878 		fp->err_tx_dmamap_load = 0;
5879 		fp->err_tx_defrag = 0;
5880 		fp->err_tx_free_pkt_null = 0;
5881 		fp->err_tx_cons_idx_conflict = 0;
5882 
5883 		fp->rx_pkts = 0;
5884 		fp->err_m_getcl = 0;
5885 		fp->err_m_getjcl = 0;
5886         }
5887 	return;
5888 }
5889 
5890 void
5891 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5892 {
5893 	struct ecore_dev	*cdev;
5894 
5895 	cdev = &ha->cdev;
5896 
5897         if (sb_info->sb_virt) {
5898                 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5899 			(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5900 		sb_info->sb_virt = NULL;
5901 	}
5902 }
5903 
5904 static int
5905 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5906 	void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5907 {
5908         struct ecore_hwfn	*p_hwfn;
5909         int			hwfn_index, rc;
5910         u16			rel_sb_id;
5911 
5912         hwfn_index = sb_id % cdev->num_hwfns;
5913         p_hwfn = &cdev->hwfns[hwfn_index];
5914         rel_sb_id = sb_id / cdev->num_hwfns;
5915 
5916         QL_DPRINT2(((qlnx_host_t *)cdev),
5917                 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5918                 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5919                 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5920                 sb_virt_addr, (void *)sb_phy_addr);
5921 
5922         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5923                              sb_virt_addr, sb_phy_addr, rel_sb_id);
5924 
5925         return rc;
5926 }
5927 
5928 /* This function allocates fast-path status block memory */
5929 int
5930 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5931 {
5932         struct status_block_e4	*sb_virt;
5933         bus_addr_t		sb_phys;
5934         int			rc;
5935 	uint32_t		size;
5936 	struct ecore_dev	*cdev;
5937 
5938 	cdev = &ha->cdev;
5939 
5940 	size = sizeof(*sb_virt);
5941 	sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5942 
5943         if (!sb_virt) {
5944                 QL_DPRINT1(ha, "Status block allocation failed\n");
5945                 return -ENOMEM;
5946         }
5947 
5948         rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5949         if (rc) {
5950                 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5951         }
5952 
5953 	return rc;
5954 }
5955 
5956 static void
5957 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5958 {
5959         int			i;
5960 	struct sw_rx_data	*rx_buf;
5961 
5962         for (i = 0; i < rxq->num_rx_buffers; i++) {
5963                 rx_buf = &rxq->sw_rx_ring[i];
5964 
5965 		if (rx_buf->data != NULL) {
5966 			if (rx_buf->map != NULL) {
5967 				bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5968 				bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5969 				rx_buf->map = NULL;
5970 			}
5971 			m_freem(rx_buf->data);
5972 			rx_buf->data = NULL;
5973 		}
5974         }
5975 	return;
5976 }
5977 
5978 static void
5979 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5980 {
5981 	struct ecore_dev	*cdev;
5982 	int			i;
5983 
5984 	cdev = &ha->cdev;
5985 
5986 	qlnx_free_rx_buffers(ha, rxq);
5987 
5988 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5989 		qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5990 		if (rxq->tpa_info[i].mpf != NULL)
5991 			m_freem(rxq->tpa_info[i].mpf);
5992 	}
5993 
5994 	bzero((void *)&rxq->sw_rx_ring[0],
5995 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
5996 
5997         /* Free the real RQ ring used by FW */
5998 	if (rxq->rx_bd_ring.p_virt_addr) {
5999                 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6000                 rxq->rx_bd_ring.p_virt_addr = NULL;
6001         }
6002 
6003         /* Free the real completion ring used by FW */
6004         if (rxq->rx_comp_ring.p_virt_addr &&
6005                         rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6006                 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6007                 rxq->rx_comp_ring.p_virt_addr = NULL;
6008                 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6009         }
6010 
6011 #ifdef QLNX_SOFT_LRO
6012 	{
6013 		struct lro_ctrl *lro;
6014 
6015 		lro = &rxq->lro;
6016 		tcp_lro_free(lro);
6017 	}
6018 #endif /* #ifdef QLNX_SOFT_LRO */
6019 
6020 	return;
6021 }
6022 
6023 static int
6024 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6025 {
6026         register struct mbuf	*mp;
6027         uint16_t		rx_buf_size;
6028         struct sw_rx_data	*sw_rx_data;
6029         struct eth_rx_bd	*rx_bd;
6030         dma_addr_t		dma_addr;
6031 	bus_dmamap_t		map;
6032 	bus_dma_segment_t       segs[1];
6033 	int			nsegs;
6034 	int			ret;
6035 
6036         rx_buf_size = rxq->rx_buf_size;
6037 
6038 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6039 
6040         if (mp == NULL) {
6041                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6042                 return -ENOMEM;
6043         }
6044 
6045 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6046 
6047 	map = (bus_dmamap_t)0;
6048 
6049 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6050 			BUS_DMA_NOWAIT);
6051 	dma_addr = segs[0].ds_addr;
6052 
6053 	if (ret || !dma_addr || (nsegs != 1)) {
6054 		m_freem(mp);
6055 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6056                            ret, (long long unsigned int)dma_addr, nsegs);
6057 		return -ENOMEM;
6058 	}
6059 
6060         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6061         sw_rx_data->data = mp;
6062         sw_rx_data->dma_addr = dma_addr;
6063         sw_rx_data->map = map;
6064 
6065         /* Advance PROD and get BD pointer */
6066         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6067         rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6068         rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6069 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6070 
6071         rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6072 
6073         return 0;
6074 }
6075 
6076 static int
6077 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6078 	struct qlnx_agg_info *tpa)
6079 {
6080 	struct mbuf		*mp;
6081         dma_addr_t		dma_addr;
6082 	bus_dmamap_t		map;
6083 	bus_dma_segment_t       segs[1];
6084 	int			nsegs;
6085 	int			ret;
6086         struct sw_rx_data	*rx_buf;
6087 
6088 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6089 
6090         if (mp == NULL) {
6091                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6092                 return -ENOMEM;
6093         }
6094 
6095 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6096 
6097 	map = (bus_dmamap_t)0;
6098 
6099 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6100 			BUS_DMA_NOWAIT);
6101 	dma_addr = segs[0].ds_addr;
6102 
6103 	if (ret || !dma_addr || (nsegs != 1)) {
6104 		m_freem(mp);
6105 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6106 			ret, (long long unsigned int)dma_addr, nsegs);
6107 		return -ENOMEM;
6108 	}
6109 
6110         rx_buf = &tpa->rx_buf;
6111 
6112 	memset(rx_buf, 0, sizeof (struct sw_rx_data));
6113 
6114         rx_buf->data = mp;
6115         rx_buf->dma_addr = dma_addr;
6116         rx_buf->map = map;
6117 
6118 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6119 
6120 	return (0);
6121 }
6122 
6123 static void
6124 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6125 {
6126         struct sw_rx_data	*rx_buf;
6127 
6128 	rx_buf = &tpa->rx_buf;
6129 
6130 	if (rx_buf->data != NULL) {
6131 		if (rx_buf->map != NULL) {
6132 			bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6133 			bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6134 			rx_buf->map = NULL;
6135 		}
6136 		m_freem(rx_buf->data);
6137 		rx_buf->data = NULL;
6138 	}
6139 	return;
6140 }
6141 
6142 /* This function allocates all memory needed per Rx queue */
6143 static int
6144 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6145 {
6146         int			i, rc, num_allocated;
6147 	struct ecore_dev	 *cdev;
6148 
6149 	cdev = &ha->cdev;
6150 
6151         rxq->num_rx_buffers = RX_RING_SIZE;
6152 
6153 	rxq->rx_buf_size = ha->rx_buf_size;
6154 
6155         /* Allocate the parallel driver ring for Rx buffers */
6156 	bzero((void *)&rxq->sw_rx_ring[0],
6157 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
6158 
6159         /* Allocate FW Rx ring  */
6160 
6161         rc = ecore_chain_alloc(cdev,
6162 			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6163 			ECORE_CHAIN_MODE_NEXT_PTR,
6164 			ECORE_CHAIN_CNT_TYPE_U16,
6165 			RX_RING_SIZE,
6166 			sizeof(struct eth_rx_bd),
6167 			&rxq->rx_bd_ring, NULL);
6168 
6169         if (rc)
6170                 goto err;
6171 
6172         /* Allocate FW completion ring */
6173         rc = ecore_chain_alloc(cdev,
6174                         ECORE_CHAIN_USE_TO_CONSUME,
6175                         ECORE_CHAIN_MODE_PBL,
6176 			ECORE_CHAIN_CNT_TYPE_U16,
6177                         RX_RING_SIZE,
6178                         sizeof(union eth_rx_cqe),
6179                         &rxq->rx_comp_ring, NULL);
6180 
6181         if (rc)
6182                 goto err;
6183 
6184         /* Allocate buffers for the Rx ring */
6185 
6186 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6187 		rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6188 			&rxq->tpa_info[i]);
6189                 if (rc)
6190                         break;
6191 	}
6192 
6193         for (i = 0; i < rxq->num_rx_buffers; i++) {
6194                 rc = qlnx_alloc_rx_buffer(ha, rxq);
6195                 if (rc)
6196                         break;
6197         }
6198         num_allocated = i;
6199         if (!num_allocated) {
6200 		QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6201                 goto err;
6202         } else if (num_allocated < rxq->num_rx_buffers) {
6203 		QL_DPRINT1(ha, "Allocated less buffers than"
6204 			" desired (%d allocated)\n", num_allocated);
6205         }
6206 
6207 #ifdef QLNX_SOFT_LRO
6208 
6209 	{
6210 		struct lro_ctrl *lro;
6211 
6212 		lro = &rxq->lro;
6213 
6214 		if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6215 			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6216 				   rxq->rxq_id);
6217 			goto err;
6218 		}
6219 
6220 		lro->ifp = ha->ifp;
6221 	}
6222 #endif /* #ifdef QLNX_SOFT_LRO */
6223         return 0;
6224 
6225 err:
6226         qlnx_free_mem_rxq(ha, rxq);
6227         return -ENOMEM;
6228 }
6229 
6230 static void
6231 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6232 	struct qlnx_tx_queue *txq)
6233 {
6234 	struct ecore_dev	*cdev;
6235 
6236 	cdev = &ha->cdev;
6237 
6238 	bzero((void *)&txq->sw_tx_ring[0],
6239 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6240 
6241         /* Free the real RQ ring used by FW */
6242         if (txq->tx_pbl.p_virt_addr) {
6243                 ecore_chain_free(cdev, &txq->tx_pbl);
6244                 txq->tx_pbl.p_virt_addr = NULL;
6245         }
6246 	return;
6247 }
6248 
6249 /* This function allocates all memory needed per Tx queue */
6250 static int
6251 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6252 	struct qlnx_tx_queue *txq)
6253 {
6254         int			ret = ECORE_SUCCESS;
6255         union eth_tx_bd_types	*p_virt;
6256 	struct ecore_dev	*cdev;
6257 
6258 	cdev = &ha->cdev;
6259 
6260 	bzero((void *)&txq->sw_tx_ring[0],
6261 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6262 
6263         /* Allocate the real Tx ring to be used by FW */
6264         ret = ecore_chain_alloc(cdev,
6265                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6266                         ECORE_CHAIN_MODE_PBL,
6267 			ECORE_CHAIN_CNT_TYPE_U16,
6268                         TX_RING_SIZE,
6269                         sizeof(*p_virt),
6270                         &txq->tx_pbl, NULL);
6271 
6272         if (ret != ECORE_SUCCESS) {
6273                 goto err;
6274         }
6275 
6276 	txq->num_tx_buffers = TX_RING_SIZE;
6277 
6278         return 0;
6279 
6280 err:
6281         qlnx_free_mem_txq(ha, fp, txq);
6282         return -ENOMEM;
6283 }
6284 
6285 static void
6286 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6287 {
6288 	struct mbuf	*mp;
6289 	if_t		ifp = ha->ifp;
6290 
6291 	if (mtx_initialized(&fp->tx_mtx)) {
6292 		if (fp->tx_br != NULL) {
6293 			mtx_lock(&fp->tx_mtx);
6294 
6295 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6296 				fp->tx_pkts_freed++;
6297 				m_freem(mp);
6298 			}
6299 
6300 			mtx_unlock(&fp->tx_mtx);
6301 
6302 			buf_ring_free(fp->tx_br, M_DEVBUF);
6303 			fp->tx_br = NULL;
6304 		}
6305 		mtx_destroy(&fp->tx_mtx);
6306 	}
6307 	return;
6308 }
6309 
6310 static void
6311 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6312 {
6313         int	tc;
6314 
6315         qlnx_free_mem_sb(ha, fp->sb_info);
6316 
6317         qlnx_free_mem_rxq(ha, fp->rxq);
6318 
6319         for (tc = 0; tc < ha->num_tc; tc++)
6320                 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6321 
6322 	return;
6323 }
6324 
6325 static int
6326 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6327 {
6328 	snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6329 		"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6330 
6331 	mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6332 
6333         fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6334                                    M_NOWAIT, &fp->tx_mtx);
6335         if (fp->tx_br == NULL) {
6336 		QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6337 			ha->dev_unit, fp->rss_id);
6338 		return -ENOMEM;
6339         }
6340 	return 0;
6341 }
6342 
6343 static int
6344 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6345 {
6346         int	rc, tc;
6347 
6348         rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6349         if (rc)
6350                 goto err;
6351 
6352 	if (ha->rx_jumbo_buf_eq_mtu) {
6353 		if (ha->max_frame_size <= MCLBYTES)
6354 			ha->rx_buf_size = MCLBYTES;
6355 		else if (ha->max_frame_size <= MJUMPAGESIZE)
6356 			ha->rx_buf_size = MJUMPAGESIZE;
6357 		else if (ha->max_frame_size <= MJUM9BYTES)
6358 			ha->rx_buf_size = MJUM9BYTES;
6359 		else if (ha->max_frame_size <= MJUM16BYTES)
6360 			ha->rx_buf_size = MJUM16BYTES;
6361 	} else {
6362 		if (ha->max_frame_size <= MCLBYTES)
6363 			ha->rx_buf_size = MCLBYTES;
6364 		else
6365 			ha->rx_buf_size = MJUMPAGESIZE;
6366 	}
6367 
6368         rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6369         if (rc)
6370                 goto err;
6371 
6372         for (tc = 0; tc < ha->num_tc; tc++) {
6373                 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6374                 if (rc)
6375                         goto err;
6376         }
6377 
6378         return 0;
6379 
6380 err:
6381         qlnx_free_mem_fp(ha, fp);
6382         return -ENOMEM;
6383 }
6384 
6385 static void
6386 qlnx_free_mem_load(qlnx_host_t *ha)
6387 {
6388         int			i;
6389 
6390         for (i = 0; i < ha->num_rss; i++) {
6391                 struct qlnx_fastpath *fp = &ha->fp_array[i];
6392 
6393                 qlnx_free_mem_fp(ha, fp);
6394         }
6395 	return;
6396 }
6397 
6398 static int
6399 qlnx_alloc_mem_load(qlnx_host_t *ha)
6400 {
6401         int	rc = 0, rss_id;
6402 
6403         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6404                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6405 
6406                 rc = qlnx_alloc_mem_fp(ha, fp);
6407                 if (rc)
6408                         break;
6409         }
6410 	return (rc);
6411 }
6412 
6413 static int
6414 qlnx_start_vport(struct ecore_dev *cdev,
6415                 u8 vport_id,
6416                 u16 mtu,
6417                 u8 drop_ttl0_flg,
6418                 u8 inner_vlan_removal_en_flg,
6419 		u8 tx_switching,
6420 		u8 hw_lro_enable)
6421 {
6422         int					rc, i;
6423 	struct ecore_sp_vport_start_params	vport_start_params = { 0 };
6424 	qlnx_host_t				*ha __unused;
6425 
6426 	ha = (qlnx_host_t *)cdev;
6427 
6428 	vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6429 	vport_start_params.tx_switching = 0;
6430 	vport_start_params.handle_ptp_pkts = 0;
6431 	vport_start_params.only_untagged = 0;
6432 	vport_start_params.drop_ttl0 = drop_ttl0_flg;
6433 
6434 	vport_start_params.tpa_mode =
6435 		(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6436 	vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6437 
6438 	vport_start_params.vport_id = vport_id;
6439 	vport_start_params.mtu = mtu;
6440 
6441 	QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6442 
6443         for_each_hwfn(cdev, i) {
6444                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6445 
6446 		vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6447 		vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6448 
6449                 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6450 
6451                 if (rc) {
6452 			QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6453 				" with MTU %d\n" , vport_id, mtu);
6454                         return -ENOMEM;
6455                 }
6456 
6457                 ecore_hw_start_fastpath(p_hwfn);
6458 
6459 		QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6460 			vport_id, mtu);
6461         }
6462         return 0;
6463 }
6464 
6465 static int
6466 qlnx_update_vport(struct ecore_dev *cdev,
6467 	struct qlnx_update_vport_params *params)
6468 {
6469         struct ecore_sp_vport_update_params	sp_params;
6470         int					rc, i, j, fp_index;
6471 	struct ecore_hwfn			*p_hwfn;
6472         struct ecore_rss_params			*rss;
6473 	qlnx_host_t				*ha = (qlnx_host_t *)cdev;
6474         struct qlnx_fastpath			*fp;
6475 
6476         memset(&sp_params, 0, sizeof(sp_params));
6477         /* Translate protocol params into sp params */
6478         sp_params.vport_id = params->vport_id;
6479 
6480         sp_params.update_vport_active_rx_flg =
6481 		params->update_vport_active_rx_flg;
6482         sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6483 
6484         sp_params.update_vport_active_tx_flg =
6485 		params->update_vport_active_tx_flg;
6486         sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6487 
6488         sp_params.update_inner_vlan_removal_flg =
6489                 params->update_inner_vlan_removal_flg;
6490         sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6491 
6492 	sp_params.sge_tpa_params = params->sge_tpa_params;
6493 
6494         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6495          * We need to re-fix the rss values per engine for CMT.
6496          */
6497 	if (params->rss_params->update_rss_config)
6498         sp_params.rss_params = params->rss_params;
6499 	else
6500 		sp_params.rss_params =  NULL;
6501 
6502         for_each_hwfn(cdev, i) {
6503 		p_hwfn = &cdev->hwfns[i];
6504 
6505 		if ((cdev->num_hwfns > 1) &&
6506 			params->rss_params->update_rss_config &&
6507 			params->rss_params->rss_enable) {
6508 			rss = params->rss_params;
6509 
6510 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6511 				fp_index = ((cdev->num_hwfns * j) + i) %
6512 						ha->num_rss;
6513 
6514                 		fp = &ha->fp_array[fp_index];
6515                         	rss->rss_ind_table[j] = fp->rxq->handle;
6516 			}
6517 
6518 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6519 				QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6520 					rss->rss_ind_table[j],
6521 					rss->rss_ind_table[j+1],
6522 					rss->rss_ind_table[j+2],
6523 					rss->rss_ind_table[j+3],
6524 					rss->rss_ind_table[j+4],
6525 					rss->rss_ind_table[j+5],
6526 					rss->rss_ind_table[j+6],
6527 					rss->rss_ind_table[j+7]);
6528 					j += 8;
6529 			}
6530 		}
6531 
6532                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6533 
6534 		QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6535 
6536                 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6537                                            ECORE_SPQ_MODE_EBLOCK, NULL);
6538                 if (rc) {
6539 			QL_DPRINT1(ha, "Failed to update VPORT\n");
6540                         return rc;
6541                 }
6542 
6543                 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6544 			rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6545 			params->vport_id, params->vport_active_tx_flg,
6546 			params->vport_active_rx_flg,
6547 			params->update_vport_active_tx_flg,
6548 			params->update_vport_active_rx_flg);
6549         }
6550 
6551         return 0;
6552 }
6553 
6554 static void
6555 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6556 {
6557         struct eth_rx_bd	*rx_bd_cons =
6558 					ecore_chain_consume(&rxq->rx_bd_ring);
6559         struct eth_rx_bd	*rx_bd_prod =
6560 					ecore_chain_produce(&rxq->rx_bd_ring);
6561         struct sw_rx_data	*sw_rx_data_cons =
6562 					&rxq->sw_rx_ring[rxq->sw_rx_cons];
6563         struct sw_rx_data	*sw_rx_data_prod =
6564 					&rxq->sw_rx_ring[rxq->sw_rx_prod];
6565 
6566         sw_rx_data_prod->data = sw_rx_data_cons->data;
6567         memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6568 
6569         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6570         rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6571 
6572 	return;
6573 }
6574 
6575 static void
6576 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6577 {
6578 
6579         uint16_t	 	bd_prod;
6580         uint16_t		cqe_prod;
6581 	union {
6582 		struct eth_rx_prod_data rx_prod_data;
6583 		uint32_t		data32;
6584 	} rx_prods;
6585 
6586         bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6587         cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6588 
6589         /* Update producers */
6590         rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6591         rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6592 
6593         /* Make sure that the BD and SGE data is updated before updating the
6594          * producers since FW might read the BD/SGE right after the producer
6595          * is updated.
6596          */
6597 	wmb();
6598 
6599         internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6600 		sizeof(rx_prods), &rx_prods.data32);
6601 
6602         /* mmiowb is needed to synchronize doorbell writes from more than one
6603          * processor. It guarantees that the write arrives to the device before
6604          * the napi lock is released and another qlnx_poll is called (possibly
6605          * on another CPU). Without this barrier, the next doorbell can bypass
6606          * this doorbell. This is applicable to IA64/Altix systems.
6607          */
6608         wmb();
6609 
6610 	return;
6611 }
6612 
6613 static uint32_t qlnx_hash_key[] = {
6614                 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6615                 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6616                 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6617                 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6618                 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6619                 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6620                 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6621                 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6622                 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6623                 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6624 
6625 static int
6626 qlnx_start_queues(qlnx_host_t *ha)
6627 {
6628         int				rc, tc, i, vport_id = 0,
6629 					drop_ttl0_flg = 1, vlan_removal_en = 1,
6630 					tx_switching = 0, hw_lro_enable = 0;
6631         struct ecore_dev		*cdev = &ha->cdev;
6632         struct ecore_rss_params		*rss_params = &ha->rss_params;
6633         struct qlnx_update_vport_params	vport_update_params;
6634         if_t				ifp;
6635         struct ecore_hwfn		*p_hwfn;
6636 	struct ecore_sge_tpa_params	tpa_params;
6637 	struct ecore_queue_start_common_params qparams;
6638         struct qlnx_fastpath		*fp;
6639 
6640 	ifp = ha->ifp;
6641 
6642 	QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6643 
6644         if (!ha->num_rss) {
6645 		QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6646 			" are no Rx queues\n");
6647                 return -EINVAL;
6648         }
6649 
6650 #ifndef QLNX_SOFT_LRO
6651         hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
6652 #endif /* #ifndef QLNX_SOFT_LRO */
6653 
6654         rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
6655 			vlan_removal_en, tx_switching, hw_lro_enable);
6656 
6657         if (rc) {
6658                 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6659                 return rc;
6660         }
6661 
6662 	QL_DPRINT2(ha, "Start vport ramrod passed, "
6663 		"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6664 		vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
6665 
6666         for_each_rss(i) {
6667 		struct ecore_rxq_start_ret_params rx_ret_params;
6668 		struct ecore_txq_start_ret_params tx_ret_params;
6669 
6670                 fp = &ha->fp_array[i];
6671         	p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6672 
6673 		bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6674 		bzero(&rx_ret_params,
6675 			sizeof (struct ecore_rxq_start_ret_params));
6676 
6677 		qparams.queue_id = i ;
6678 		qparams.vport_id = vport_id;
6679 		qparams.stats_id = vport_id;
6680 		qparams.p_sb = fp->sb_info;
6681 		qparams.sb_idx = RX_PI;
6682 
6683 
6684 		rc = ecore_eth_rx_queue_start(p_hwfn,
6685 			p_hwfn->hw_info.opaque_fid,
6686 			&qparams,
6687 			fp->rxq->rx_buf_size,	/* bd_max_bytes */
6688 			/* bd_chain_phys_addr */
6689 			fp->rxq->rx_bd_ring.p_phys_addr,
6690 			/* cqe_pbl_addr */
6691 			ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6692 			/* cqe_pbl_size */
6693 			ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6694 			&rx_ret_params);
6695 
6696                 if (rc) {
6697                 	QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6698                         return rc;
6699                 }
6700 
6701 		fp->rxq->hw_rxq_prod_addr	= rx_ret_params.p_prod;
6702 		fp->rxq->handle			= rx_ret_params.p_handle;
6703                 fp->rxq->hw_cons_ptr		=
6704 				&fp->sb_info->sb_virt->pi_array[RX_PI];
6705 
6706                 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6707 
6708                 for (tc = 0; tc < ha->num_tc; tc++) {
6709                         struct qlnx_tx_queue *txq = fp->txq[tc];
6710 
6711 			bzero(&qparams,
6712 				sizeof(struct ecore_queue_start_common_params));
6713 			bzero(&tx_ret_params,
6714 				sizeof (struct ecore_txq_start_ret_params));
6715 
6716 			qparams.queue_id = txq->index / cdev->num_hwfns ;
6717 			qparams.vport_id = vport_id;
6718 			qparams.stats_id = vport_id;
6719 			qparams.p_sb = fp->sb_info;
6720 			qparams.sb_idx = TX_PI(tc);
6721 
6722 			rc = ecore_eth_tx_queue_start(p_hwfn,
6723 				p_hwfn->hw_info.opaque_fid,
6724 				&qparams, tc,
6725 				/* bd_chain_phys_addr */
6726 				ecore_chain_get_pbl_phys(&txq->tx_pbl),
6727 				ecore_chain_get_page_cnt(&txq->tx_pbl),
6728 				&tx_ret_params);
6729 
6730                         if (rc) {
6731                 		QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6732 					   txq->index, rc);
6733                                 return rc;
6734                         }
6735 
6736 			txq->doorbell_addr = tx_ret_params.p_doorbell;
6737 			txq->handle = tx_ret_params.p_handle;
6738 
6739                         txq->hw_cons_ptr =
6740                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6741                         SET_FIELD(txq->tx_db.data.params,
6742                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
6743                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6744                                   DB_AGG_CMD_SET);
6745                         SET_FIELD(txq->tx_db.data.params,
6746                                   ETH_DB_DATA_AGG_VAL_SEL,
6747                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
6748 
6749                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6750                 }
6751         }
6752 
6753         /* Fill struct with RSS params */
6754         if (ha->num_rss > 1) {
6755                 rss_params->update_rss_config = 1;
6756                 rss_params->rss_enable = 1;
6757                 rss_params->update_rss_capabilities = 1;
6758                 rss_params->update_rss_ind_table = 1;
6759                 rss_params->update_rss_key = 1;
6760                 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6761                                        ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6762                 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6763 
6764                 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6765                 	fp = &ha->fp_array[(i % ha->num_rss)];
6766                         rss_params->rss_ind_table[i] = fp->rxq->handle;
6767 		}
6768 
6769                 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6770 			rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6771 
6772         } else {
6773                 memset(rss_params, 0, sizeof(*rss_params));
6774         }
6775 
6776         /* Prepare and send the vport enable */
6777         memset(&vport_update_params, 0, sizeof(vport_update_params));
6778         vport_update_params.vport_id = vport_id;
6779         vport_update_params.update_vport_active_tx_flg = 1;
6780         vport_update_params.vport_active_tx_flg = 1;
6781         vport_update_params.update_vport_active_rx_flg = 1;
6782         vport_update_params.vport_active_rx_flg = 1;
6783         vport_update_params.rss_params = rss_params;
6784         vport_update_params.update_inner_vlan_removal_flg = 1;
6785         vport_update_params.inner_vlan_removal_flg = 1;
6786 
6787 	if (hw_lro_enable) {
6788 		memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6789 
6790 		tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6791 
6792 		tpa_params.update_tpa_en_flg = 1;
6793 		tpa_params.tpa_ipv4_en_flg = 1;
6794 		tpa_params.tpa_ipv6_en_flg = 1;
6795 
6796 		tpa_params.update_tpa_param_flg = 1;
6797 		tpa_params.tpa_pkt_split_flg = 0;
6798 		tpa_params.tpa_hdr_data_split_flg = 0;
6799 		tpa_params.tpa_gro_consistent_flg = 0;
6800 		tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6801 		tpa_params.tpa_max_size = (uint16_t)(-1);
6802 		tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
6803 		tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
6804 
6805 		vport_update_params.sge_tpa_params = &tpa_params;
6806 	}
6807 
6808         rc = qlnx_update_vport(cdev, &vport_update_params);
6809         if (rc) {
6810 		QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6811                 return rc;
6812         }
6813 
6814         return 0;
6815 }
6816 
6817 static int
6818 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6819 	struct qlnx_tx_queue *txq)
6820 {
6821 	uint16_t	hw_bd_cons;
6822 	uint16_t	ecore_cons_idx;
6823 
6824 	QL_DPRINT2(ha, "enter\n");
6825 
6826 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6827 
6828 	while (hw_bd_cons !=
6829 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6830 		mtx_lock(&fp->tx_mtx);
6831 
6832 		(void)qlnx_tx_int(ha, fp, txq);
6833 
6834 		mtx_unlock(&fp->tx_mtx);
6835 
6836 		qlnx_mdelay(__func__, 2);
6837 
6838 		hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6839 	}
6840 
6841 	QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6842 
6843         return 0;
6844 }
6845 
6846 static int
6847 qlnx_stop_queues(qlnx_host_t *ha)
6848 {
6849         struct qlnx_update_vport_params	vport_update_params;
6850         struct ecore_dev		*cdev;
6851         struct qlnx_fastpath		*fp;
6852         int				rc, tc, i;
6853 
6854         cdev = &ha->cdev;
6855 
6856         /* Disable the vport */
6857 
6858         memset(&vport_update_params, 0, sizeof(vport_update_params));
6859 
6860         vport_update_params.vport_id = 0;
6861         vport_update_params.update_vport_active_tx_flg = 1;
6862         vport_update_params.vport_active_tx_flg = 0;
6863         vport_update_params.update_vport_active_rx_flg = 1;
6864         vport_update_params.vport_active_rx_flg = 0;
6865         vport_update_params.rss_params = &ha->rss_params;
6866         vport_update_params.rss_params->update_rss_config = 0;
6867         vport_update_params.rss_params->rss_enable = 0;
6868         vport_update_params.update_inner_vlan_removal_flg = 0;
6869         vport_update_params.inner_vlan_removal_flg = 0;
6870 
6871 	QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6872 
6873         rc = qlnx_update_vport(cdev, &vport_update_params);
6874         if (rc) {
6875 		QL_DPRINT1(ha, "Failed to update vport\n");
6876                 return rc;
6877         }
6878 
6879         /* Flush Tx queues. If needed, request drain from MCP */
6880         for_each_rss(i) {
6881                 fp = &ha->fp_array[i];
6882 
6883                 for (tc = 0; tc < ha->num_tc; tc++) {
6884                         struct qlnx_tx_queue *txq = fp->txq[tc];
6885 
6886                         rc = qlnx_drain_txq(ha, fp, txq);
6887                         if (rc)
6888                                 return rc;
6889                 }
6890         }
6891 
6892         /* Stop all Queues in reverse order*/
6893         for (i = ha->num_rss - 1; i >= 0; i--) {
6894 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6895 
6896                 fp = &ha->fp_array[i];
6897 
6898                 /* Stop the Tx Queue(s)*/
6899                 for (tc = 0; tc < ha->num_tc; tc++) {
6900 			int tx_queue_id __unused;
6901 
6902 			tx_queue_id = tc * ha->num_rss + i;
6903 			rc = ecore_eth_tx_queue_stop(p_hwfn,
6904 					fp->txq[tc]->handle);
6905 
6906                         if (rc) {
6907 				QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6908 					   tx_queue_id);
6909                                 return rc;
6910                         }
6911                 }
6912 
6913                 /* Stop the Rx Queue*/
6914 		rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6915 				false);
6916                 if (rc) {
6917                         QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6918                         return rc;
6919                 }
6920         }
6921 
6922         /* Stop the vport */
6923 	for_each_hwfn(cdev, i) {
6924 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6925 
6926 		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6927 
6928 		if (rc) {
6929                         QL_DPRINT1(ha, "Failed to stop VPORT\n");
6930 			return rc;
6931 		}
6932 	}
6933 
6934         return rc;
6935 }
6936 
6937 static int
6938 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6939 	enum ecore_filter_opcode opcode,
6940 	unsigned char mac[ETH_ALEN])
6941 {
6942 	struct ecore_filter_ucast	ucast;
6943 	struct ecore_dev		*cdev;
6944 	int				rc;
6945 
6946 	cdev = &ha->cdev;
6947 
6948 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6949 
6950         ucast.opcode = opcode;
6951         ucast.type = ECORE_FILTER_MAC;
6952         ucast.is_rx_filter = 1;
6953         ucast.vport_to_add_to = 0;
6954         memcpy(&ucast.mac[0], mac, ETH_ALEN);
6955 
6956 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6957 
6958         return (rc);
6959 }
6960 
6961 static int
6962 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6963 {
6964 	struct ecore_filter_ucast	ucast;
6965 	struct ecore_dev		*cdev;
6966 	int				rc;
6967 
6968 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6969 
6970 	ucast.opcode = ECORE_FILTER_REPLACE;
6971 	ucast.type = ECORE_FILTER_MAC;
6972 	ucast.is_rx_filter = 1;
6973 
6974 	cdev = &ha->cdev;
6975 
6976 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6977 
6978 	return (rc);
6979 }
6980 
6981 static int
6982 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6983 {
6984 	struct ecore_filter_mcast	*mcast;
6985 	struct ecore_dev		*cdev;
6986 	int				rc, i;
6987 
6988 	cdev = &ha->cdev;
6989 
6990 	mcast = &ha->ecore_mcast;
6991 	bzero(mcast, sizeof(struct ecore_filter_mcast));
6992 
6993 	mcast->opcode = ECORE_FILTER_REMOVE;
6994 
6995 	for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6996 		if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6997 			ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6998 			ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6999 			memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7000 			mcast->num_mc_addrs++;
7001 		}
7002 	}
7003 	mcast = &ha->ecore_mcast;
7004 
7005 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7006 
7007 	bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7008 	ha->nmcast = 0;
7009 
7010 	return (rc);
7011 }
7012 
7013 static int
7014 qlnx_clean_filters(qlnx_host_t *ha)
7015 {
7016         int	rc = 0;
7017 
7018 	/* Remove all unicast macs */
7019 	rc = qlnx_remove_all_ucast_mac(ha);
7020 	if (rc)
7021 		return rc;
7022 
7023 	/* Remove all multicast macs */
7024 	rc = qlnx_remove_all_mcast_mac(ha);
7025 	if (rc)
7026 		return rc;
7027 
7028         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7029 
7030         return (rc);
7031 }
7032 
7033 static int
7034 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7035 {
7036 	struct ecore_filter_accept_flags	accept;
7037 	int					rc = 0;
7038 	struct ecore_dev			*cdev;
7039 
7040 	cdev = &ha->cdev;
7041 
7042 	bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7043 
7044 	accept.update_rx_mode_config = 1;
7045 	accept.rx_accept_filter = filter;
7046 
7047 	accept.update_tx_mode_config = 1;
7048 	accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7049 		ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7050 
7051 	rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7052 			ECORE_SPQ_MODE_CB, NULL);
7053 
7054 	return (rc);
7055 }
7056 
7057 static int
7058 qlnx_set_rx_mode(qlnx_host_t *ha)
7059 {
7060 	int	rc = 0;
7061 	uint8_t	filter;
7062 
7063 	rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7064         if (rc)
7065                 return rc;
7066 
7067 	rc = qlnx_remove_all_mcast_mac(ha);
7068         if (rc)
7069                 return rc;
7070 
7071 	filter = ECORE_ACCEPT_UCAST_MATCHED |
7072 			ECORE_ACCEPT_MCAST_MATCHED |
7073 			ECORE_ACCEPT_BCAST;
7074 
7075 	if (qlnx_vf_device(ha) == 0) {
7076 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7077 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7078 	}
7079 	ha->filter = filter;
7080 
7081 	rc = qlnx_set_rx_accept_filter(ha, filter);
7082 
7083 	return (rc);
7084 }
7085 
7086 static int
7087 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7088 {
7089         int			i, rc = 0;
7090 	struct ecore_dev	*cdev;
7091 	struct ecore_hwfn	*hwfn;
7092 	struct ecore_ptt	*ptt;
7093 
7094 	if (qlnx_vf_device(ha) == 0)
7095 		return (0);
7096 
7097 	cdev = &ha->cdev;
7098 
7099         for_each_hwfn(cdev, i) {
7100                 hwfn = &cdev->hwfns[i];
7101 
7102                 ptt = ecore_ptt_acquire(hwfn);
7103        	        if (!ptt)
7104                         return -EBUSY;
7105 
7106                 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7107 
7108                 ecore_ptt_release(hwfn, ptt);
7109 
7110                 if (rc)
7111                         return rc;
7112         }
7113         return (rc);
7114 }
7115 
7116 static uint64_t
7117 qlnx_get_counter(if_t ifp, ift_counter cnt)
7118 {
7119 	qlnx_host_t *ha;
7120 	uint64_t count;
7121 
7122         ha = (qlnx_host_t *)if_getsoftc(ifp);
7123 
7124         switch (cnt) {
7125         case IFCOUNTER_IPACKETS:
7126 		count = ha->hw_stats.common.rx_ucast_pkts +
7127 			ha->hw_stats.common.rx_mcast_pkts +
7128 			ha->hw_stats.common.rx_bcast_pkts;
7129 		break;
7130 
7131         case IFCOUNTER_IERRORS:
7132 		count = ha->hw_stats.common.rx_crc_errors +
7133 			ha->hw_stats.common.rx_align_errors +
7134 			ha->hw_stats.common.rx_oversize_packets +
7135 			ha->hw_stats.common.rx_undersize_packets;
7136 		break;
7137 
7138         case IFCOUNTER_OPACKETS:
7139 		count = ha->hw_stats.common.tx_ucast_pkts +
7140 			ha->hw_stats.common.tx_mcast_pkts +
7141 			ha->hw_stats.common.tx_bcast_pkts;
7142 		break;
7143 
7144         case IFCOUNTER_OERRORS:
7145                 count = ha->hw_stats.common.tx_err_drop_pkts;
7146 		break;
7147 
7148         case IFCOUNTER_COLLISIONS:
7149                 return (0);
7150 
7151         case IFCOUNTER_IBYTES:
7152 		count = ha->hw_stats.common.rx_ucast_bytes +
7153 			ha->hw_stats.common.rx_mcast_bytes +
7154 			ha->hw_stats.common.rx_bcast_bytes;
7155 		break;
7156 
7157         case IFCOUNTER_OBYTES:
7158 		count = ha->hw_stats.common.tx_ucast_bytes +
7159 			ha->hw_stats.common.tx_mcast_bytes +
7160 			ha->hw_stats.common.tx_bcast_bytes;
7161 		break;
7162 
7163         case IFCOUNTER_IMCASTS:
7164 		count = ha->hw_stats.common.rx_mcast_bytes;
7165 		break;
7166 
7167         case IFCOUNTER_OMCASTS:
7168 		count = ha->hw_stats.common.tx_mcast_bytes;
7169 		break;
7170 
7171         case IFCOUNTER_IQDROPS:
7172         case IFCOUNTER_OQDROPS:
7173         case IFCOUNTER_NOPROTO:
7174 
7175         default:
7176                 return (if_get_counter_default(ifp, cnt));
7177         }
7178 	return (count);
7179 }
7180 
7181 static void
7182 qlnx_timer(void *arg)
7183 {
7184 	qlnx_host_t	*ha;
7185 
7186 	ha = (qlnx_host_t *)arg;
7187 
7188 	if (ha->error_recovery) {
7189 		ha->error_recovery = 0;
7190 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7191 		return;
7192 	}
7193 
7194        	ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7195 
7196 	if (ha->storm_stats_gather)
7197 		qlnx_sample_storm_stats(ha);
7198 
7199 	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7200 
7201 	return;
7202 }
7203 
7204 static int
7205 qlnx_load(qlnx_host_t *ha)
7206 {
7207 	int			i;
7208 	int			rc = 0;
7209         device_t		dev;
7210 
7211         dev = ha->pci_dev;
7212 
7213 	QL_DPRINT2(ha, "enter\n");
7214 
7215         rc = qlnx_alloc_mem_arrays(ha);
7216         if (rc)
7217                 goto qlnx_load_exit0;
7218 
7219         qlnx_init_fp(ha);
7220 
7221         rc = qlnx_alloc_mem_load(ha);
7222         if (rc)
7223                 goto qlnx_load_exit1;
7224 
7225         QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7226 		   ha->num_rss, ha->num_tc);
7227 
7228 	for (i = 0; i < ha->num_rss; i++) {
7229 		if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7230                         (INTR_TYPE_NET | INTR_MPSAFE),
7231                         NULL, qlnx_fp_isr, &ha->irq_vec[i],
7232                         &ha->irq_vec[i].handle))) {
7233                         QL_DPRINT1(ha, "could not setup interrupt\n");
7234                         goto qlnx_load_exit2;
7235 		}
7236 
7237 		QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7238 			 irq %p handle %p\n", i,
7239 			ha->irq_vec[i].irq_rid,
7240 			ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7241 
7242 		bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7243 	}
7244 
7245         rc = qlnx_start_queues(ha);
7246         if (rc)
7247                 goto qlnx_load_exit2;
7248 
7249         QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7250 
7251         /* Add primary mac and set Rx filters */
7252         rc = qlnx_set_rx_mode(ha);
7253         if (rc)
7254                 goto qlnx_load_exit2;
7255 
7256         /* Ask for link-up using current configuration */
7257 	qlnx_set_link(ha, true);
7258 
7259 	if (qlnx_vf_device(ha) == 0)
7260 		qlnx_link_update(&ha->cdev.hwfns[0]);
7261 
7262         ha->state = QLNX_STATE_OPEN;
7263 
7264 	bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7265 
7266 	if (ha->flags.callout_init)
7267         	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7268 
7269         goto qlnx_load_exit0;
7270 
7271 qlnx_load_exit2:
7272         qlnx_free_mem_load(ha);
7273 
7274 qlnx_load_exit1:
7275         ha->num_rss = 0;
7276 
7277 qlnx_load_exit0:
7278 	QL_DPRINT2(ha, "exit [%d]\n", rc);
7279         return rc;
7280 }
7281 
7282 static void
7283 qlnx_drain_soft_lro(qlnx_host_t *ha)
7284 {
7285 #ifdef QLNX_SOFT_LRO
7286 
7287 	if_t		ifp;
7288 	int		i;
7289 
7290 	ifp = ha->ifp;
7291 
7292 	if (if_getcapenable(ifp) & IFCAP_LRO) {
7293 	        for (i = 0; i < ha->num_rss; i++) {
7294 			struct qlnx_fastpath *fp = &ha->fp_array[i];
7295 			struct lro_ctrl *lro;
7296 
7297 			lro = &fp->rxq->lro;
7298 
7299 			tcp_lro_flush_all(lro);
7300                 }
7301 	}
7302 
7303 #endif /* #ifdef QLNX_SOFT_LRO */
7304 
7305 	return;
7306 }
7307 
7308 static void
7309 qlnx_unload(qlnx_host_t *ha)
7310 {
7311 	struct ecore_dev	*cdev;
7312         device_t		dev;
7313 	int			i;
7314 
7315 	cdev = &ha->cdev;
7316         dev = ha->pci_dev;
7317 
7318 	QL_DPRINT2(ha, "enter\n");
7319         QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7320 
7321 	if (ha->state == QLNX_STATE_OPEN) {
7322 		qlnx_set_link(ha, false);
7323 		qlnx_clean_filters(ha);
7324 		qlnx_stop_queues(ha);
7325 		ecore_hw_stop_fastpath(cdev);
7326 
7327 		for (i = 0; i < ha->num_rss; i++) {
7328 			if (ha->irq_vec[i].handle) {
7329 				(void)bus_teardown_intr(dev,
7330 					ha->irq_vec[i].irq,
7331 					ha->irq_vec[i].handle);
7332 				ha->irq_vec[i].handle = NULL;
7333 			}
7334 		}
7335 
7336 		qlnx_drain_fp_taskqueues(ha);
7337 		qlnx_drain_soft_lro(ha);
7338         	qlnx_free_mem_load(ha);
7339 	}
7340 
7341 	if (ha->flags.callout_init)
7342 		callout_drain(&ha->qlnx_callout);
7343 
7344 	qlnx_mdelay(__func__, 1000);
7345 
7346         ha->state = QLNX_STATE_CLOSED;
7347 
7348 	QL_DPRINT2(ha, "exit\n");
7349 	return;
7350 }
7351 
7352 static int
7353 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7354 {
7355 	int			rval = -1;
7356 	struct ecore_hwfn	*p_hwfn;
7357 	struct ecore_ptt	*p_ptt;
7358 
7359 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7360 
7361 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7362 	p_ptt = ecore_ptt_acquire(p_hwfn);
7363 
7364         if (!p_ptt) {
7365 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7366                 return (rval);
7367         }
7368 
7369         rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7370 
7371 	if (rval == DBG_STATUS_OK)
7372                 rval = 0;
7373         else {
7374 		QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7375 			"[0x%x]\n", rval);
7376 	}
7377 
7378         ecore_ptt_release(p_hwfn, p_ptt);
7379 
7380         return (rval);
7381 }
7382 
7383 static int
7384 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7385 {
7386 	int			rval = -1;
7387 	struct ecore_hwfn	*p_hwfn;
7388 	struct ecore_ptt	*p_ptt;
7389 
7390 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7391 
7392 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7393 	p_ptt = ecore_ptt_acquire(p_hwfn);
7394 
7395         if (!p_ptt) {
7396 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7397                 return (rval);
7398         }
7399 
7400         rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7401 
7402 	if (rval == DBG_STATUS_OK)
7403                 rval = 0;
7404         else {
7405 		QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7406 			" [0x%x]\n", rval);
7407 	}
7408 
7409         ecore_ptt_release(p_hwfn, p_ptt);
7410 
7411         return (rval);
7412 }
7413 
7414 static void
7415 qlnx_sample_storm_stats(qlnx_host_t *ha)
7416 {
7417         int			i, index;
7418         struct ecore_dev	*cdev;
7419 	qlnx_storm_stats_t	*s_stats;
7420 	uint32_t		reg;
7421         struct ecore_ptt	*p_ptt;
7422         struct ecore_hwfn	*hwfn;
7423 
7424 	if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7425 		ha->storm_stats_gather = 0;
7426 		return;
7427 	}
7428 
7429         cdev = &ha->cdev;
7430 
7431         for_each_hwfn(cdev, i) {
7432                 hwfn = &cdev->hwfns[i];
7433 
7434                 p_ptt = ecore_ptt_acquire(hwfn);
7435                 if (!p_ptt)
7436                         return;
7437 
7438 		index = ha->storm_stats_index +
7439 				(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7440 
7441 		s_stats = &ha->storm_stats[index];
7442 
7443 		/* XSTORM */
7444 		reg = XSEM_REG_FAST_MEMORY +
7445 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7446 		s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7447 
7448 		reg = XSEM_REG_FAST_MEMORY +
7449 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7450 		s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7451 
7452 		reg = XSEM_REG_FAST_MEMORY +
7453 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7454 		s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7455 
7456 		reg = XSEM_REG_FAST_MEMORY +
7457 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7458 		s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7459 
7460 		/* YSTORM */
7461 		reg = YSEM_REG_FAST_MEMORY +
7462 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7463 		s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7464 
7465 		reg = YSEM_REG_FAST_MEMORY +
7466 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7467 		s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7468 
7469 		reg = YSEM_REG_FAST_MEMORY +
7470 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7471 		s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7472 
7473 		reg = YSEM_REG_FAST_MEMORY +
7474 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7475 		s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7476 
7477 		/* PSTORM */
7478 		reg = PSEM_REG_FAST_MEMORY +
7479 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7480 		s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7481 
7482 		reg = PSEM_REG_FAST_MEMORY +
7483 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7484 		s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7485 
7486 		reg = PSEM_REG_FAST_MEMORY +
7487 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7488 		s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7489 
7490 		reg = PSEM_REG_FAST_MEMORY +
7491 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7492 		s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7493 
7494 		/* TSTORM */
7495 		reg = TSEM_REG_FAST_MEMORY +
7496 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7497 		s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7498 
7499 		reg = TSEM_REG_FAST_MEMORY +
7500 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7501 		s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7502 
7503 		reg = TSEM_REG_FAST_MEMORY +
7504 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7505 		s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7506 
7507 		reg = TSEM_REG_FAST_MEMORY +
7508 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7509 		s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7510 
7511 		/* MSTORM */
7512 		reg = MSEM_REG_FAST_MEMORY +
7513 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7514 		s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7515 
7516 		reg = MSEM_REG_FAST_MEMORY +
7517 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7518 		s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7519 
7520 		reg = MSEM_REG_FAST_MEMORY +
7521 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7522 		s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7523 
7524 		reg = MSEM_REG_FAST_MEMORY +
7525 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7526 		s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7527 
7528 		/* USTORM */
7529 		reg = USEM_REG_FAST_MEMORY +
7530 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7531 		s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7532 
7533 		reg = USEM_REG_FAST_MEMORY +
7534 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7535 		s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7536 
7537 		reg = USEM_REG_FAST_MEMORY +
7538 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7539 		s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7540 
7541 		reg = USEM_REG_FAST_MEMORY +
7542 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7543 		s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7544 
7545                 ecore_ptt_release(hwfn, p_ptt);
7546         }
7547 
7548 	ha->storm_stats_index++;
7549 
7550         return;
7551 }
7552 
7553 /*
7554  * Name: qlnx_dump_buf8
7555  * Function: dumps a buffer as bytes
7556  */
7557 static void
7558 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7559 {
7560         device_t	dev;
7561         uint32_t	i = 0;
7562         uint8_t		*buf;
7563 
7564         dev = ha->pci_dev;
7565         buf = dbuf;
7566 
7567         device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7568 
7569         while (len >= 16) {
7570                 device_printf(dev,"0x%08x:"
7571                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7572                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7573                         buf[0], buf[1], buf[2], buf[3],
7574                         buf[4], buf[5], buf[6], buf[7],
7575                         buf[8], buf[9], buf[10], buf[11],
7576                         buf[12], buf[13], buf[14], buf[15]);
7577                 i += 16;
7578                 len -= 16;
7579                 buf += 16;
7580         }
7581         switch (len) {
7582         case 1:
7583                 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7584                 break;
7585         case 2:
7586                 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7587                 break;
7588         case 3:
7589                 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7590                         i, buf[0], buf[1], buf[2]);
7591                 break;
7592         case 4:
7593                 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7594                         buf[0], buf[1], buf[2], buf[3]);
7595                 break;
7596         case 5:
7597                 device_printf(dev,"0x%08x:"
7598                         " %02x %02x %02x %02x %02x\n", i,
7599                         buf[0], buf[1], buf[2], buf[3], buf[4]);
7600                 break;
7601         case 6:
7602                 device_printf(dev,"0x%08x:"
7603                         " %02x %02x %02x %02x %02x %02x\n", i,
7604                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7605                 break;
7606         case 7:
7607                 device_printf(dev,"0x%08x:"
7608                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7609                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7610                 break;
7611         case 8:
7612                 device_printf(dev,"0x%08x:"
7613                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7614                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7615                         buf[7]);
7616                 break;
7617         case 9:
7618                 device_printf(dev,"0x%08x:"
7619                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7620                         " %02x\n", i,
7621                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7622                         buf[7], buf[8]);
7623                 break;
7624         case 10:
7625                 device_printf(dev,"0x%08x:"
7626                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7627                         " %02x %02x\n", i,
7628                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7629                         buf[7], buf[8], buf[9]);
7630                 break;
7631         case 11:
7632                 device_printf(dev,"0x%08x:"
7633                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7634                         " %02x %02x %02x\n", i,
7635                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7636                         buf[7], buf[8], buf[9], buf[10]);
7637                 break;
7638         case 12:
7639                 device_printf(dev,"0x%08x:"
7640                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7641                         " %02x %02x %02x %02x\n", i,
7642                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7643                         buf[7], buf[8], buf[9], buf[10], buf[11]);
7644                 break;
7645         case 13:
7646                 device_printf(dev,"0x%08x:"
7647                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7648                         " %02x %02x %02x %02x %02x\n", i,
7649                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7650                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7651                 break;
7652         case 14:
7653                 device_printf(dev,"0x%08x:"
7654                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7655                         " %02x %02x %02x %02x %02x %02x\n", i,
7656                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7657                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7658                         buf[13]);
7659                 break;
7660         case 15:
7661                 device_printf(dev,"0x%08x:"
7662                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7663                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7664                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7665                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7666                         buf[13], buf[14]);
7667                 break;
7668         default:
7669                 break;
7670         }
7671 
7672         device_printf(dev, "%s: %s dump end\n", __func__, msg);
7673 
7674         return;
7675 }
7676 
7677 #ifdef CONFIG_ECORE_SRIOV
7678 
7679 static void
7680 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7681 {
7682         struct ecore_public_vf_info *vf_info;
7683 
7684         vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7685 
7686         if (!vf_info)
7687                 return;
7688 
7689         /* Clear the VF mac */
7690         memset(vf_info->forced_mac, 0, ETH_ALEN);
7691 
7692         vf_info->forced_vlan = 0;
7693 
7694 	return;
7695 }
7696 
7697 void
7698 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7699 {
7700 	__qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7701 	return;
7702 }
7703 
7704 static int
7705 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7706 	struct ecore_filter_ucast *params)
7707 {
7708         struct ecore_public_vf_info *vf;
7709 
7710 	if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7711 		QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7712 			"VF[%d] vport not initialized\n", vfid);
7713 		return ECORE_INVAL;
7714 	}
7715 
7716         vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7717         if (!vf)
7718                 return -EINVAL;
7719 
7720         /* No real decision to make; Store the configured MAC */
7721         if (params->type == ECORE_FILTER_MAC ||
7722             params->type == ECORE_FILTER_MAC_VLAN)
7723                 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7724 
7725         return 0;
7726 }
7727 
7728 int
7729 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7730 {
7731 	return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7732 }
7733 
7734 static int
7735 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7736         struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7737 {
7738 	if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7739 		QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7740 			"VF[%d] vport not initialized\n", vfid);
7741 		return ECORE_INVAL;
7742 	}
7743 
7744         /* Untrusted VFs can't even be trusted to know that fact.
7745          * Simply indicate everything is configured fine, and trace
7746          * configuration 'behind their back'.
7747          */
7748         if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7749                 return 0;
7750 
7751         return 0;
7752 
7753 }
7754 int
7755 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7756 {
7757 	return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7758 }
7759 
7760 static int
7761 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7762 {
7763 	int			i;
7764 	struct ecore_dev	*cdev;
7765 
7766 	cdev = p_hwfn->p_dev;
7767 
7768 	for (i = 0; i < cdev->num_hwfns; i++) {
7769 		if (&cdev->hwfns[i] == p_hwfn)
7770 			break;
7771 	}
7772 
7773 	if (i >= cdev->num_hwfns)
7774 		return (-1);
7775 
7776 	return (i);
7777 }
7778 
7779 static int
7780 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7781 {
7782 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7783 	int i;
7784 
7785 	QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7786 		ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7787 
7788 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7789 		return (-1);
7790 
7791 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7792 		atomic_testandset_32(&ha->sriov_task[i].flags,
7793 			QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7794 
7795 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7796 			&ha->sriov_task[i].pf_task);
7797 	}
7798 
7799 	return (ECORE_SUCCESS);
7800 }
7801 
7802 int
7803 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7804 {
7805 	return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7806 }
7807 
7808 static void
7809 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7810 {
7811 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7812 	int i;
7813 
7814 	if (!ha->sriov_initialized)
7815 		return;
7816 
7817 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7818 		ha, p_hwfn->p_dev, p_hwfn);
7819 
7820 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7821 		return;
7822 
7823 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7824 		atomic_testandset_32(&ha->sriov_task[i].flags,
7825 			QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7826 
7827 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7828 			&ha->sriov_task[i].pf_task);
7829 	}
7830 
7831 	return;
7832 }
7833 
7834 void
7835 qlnx_vf_flr_update(void *p_hwfn)
7836 {
7837 	__qlnx_vf_flr_update(p_hwfn);
7838 
7839 	return;
7840 }
7841 
7842 #ifndef QLNX_VF
7843 
7844 static void
7845 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7846 {
7847 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7848 	int i;
7849 
7850 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7851 		ha, p_hwfn->p_dev, p_hwfn);
7852 
7853 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7854 		return;
7855 
7856 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7857 		ha, p_hwfn->p_dev, p_hwfn, i);
7858 
7859 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7860 		atomic_testandset_32(&ha->sriov_task[i].flags,
7861 			QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7862 
7863 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7864 			&ha->sriov_task[i].pf_task);
7865 	}
7866 }
7867 
7868 static void
7869 qlnx_initialize_sriov(qlnx_host_t *ha)
7870 {
7871 	device_t	dev;
7872 	nvlist_t	*pf_schema, *vf_schema;
7873 	int		iov_error;
7874 
7875 	dev = ha->pci_dev;
7876 
7877 	pf_schema = pci_iov_schema_alloc_node();
7878 	vf_schema = pci_iov_schema_alloc_node();
7879 
7880 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7881 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7882 		IOV_SCHEMA_HASDEFAULT, FALSE);
7883 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7884 		IOV_SCHEMA_HASDEFAULT, FALSE);
7885 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
7886 		IOV_SCHEMA_HASDEFAULT, 1);
7887 
7888 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7889 
7890 	if (iov_error != 0) {
7891 		ha->sriov_initialized = 0;
7892 	} else {
7893 		device_printf(dev, "SRIOV initialized\n");
7894 		ha->sriov_initialized = 1;
7895 	}
7896 
7897 	return;
7898 }
7899 
7900 static void
7901 qlnx_sriov_disable(qlnx_host_t *ha)
7902 {
7903 	struct ecore_dev *cdev;
7904 	int i, j;
7905 
7906 	cdev = &ha->cdev;
7907 
7908 	ecore_iov_set_vfs_to_disable(cdev, true);
7909 
7910 	for_each_hwfn(cdev, i) {
7911 		struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7912 		struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7913 
7914 		if (!ptt) {
7915 			QL_DPRINT1(ha, "Failed to acquire ptt\n");
7916 			return;
7917 		}
7918 		/* Clean WFQ db and configure equal weight for all vports */
7919 		ecore_clean_wfq_db(hwfn, ptt);
7920 
7921 		ecore_for_each_vf(hwfn, j) {
7922 			int k = 0;
7923 
7924 			if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
7925 				continue;
7926 
7927 			if (ecore_iov_is_vf_started(hwfn, j)) {
7928 				/* Wait until VF is disabled before releasing */
7929 
7930 				for (k = 0; k < 100; k++) {
7931 					if (!ecore_iov_is_vf_stopped(hwfn, j)) {
7932 						qlnx_mdelay(__func__, 10);
7933 					} else
7934 						break;
7935 				}
7936 			}
7937 
7938 			if (k < 100)
7939 				ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7940                                                           ptt, j);
7941 			else {
7942 				QL_DPRINT1(ha,
7943 					"Timeout waiting for VF's FLR to end\n");
7944 			}
7945 		}
7946 		ecore_ptt_release(hwfn, ptt);
7947 	}
7948 
7949 	ecore_iov_set_vfs_to_disable(cdev, false);
7950 
7951 	return;
7952 }
7953 
7954 static void
7955 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
7956 	struct ecore_iov_vf_init_params *params)
7957 {
7958         u16 base, i;
7959 
7960         /* Since we have an equal resource distribution per-VF, and we assume
7961          * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
7962          * sequentially from there.
7963          */
7964         base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7965 
7966         params->rel_vf_id = vfid;
7967 
7968         for (i = 0; i < params->num_queues; i++) {
7969                 params->req_rx_queue[i] = base + i;
7970                 params->req_tx_queue[i] = base + i;
7971         }
7972 
7973         /* PF uses indices 0 for itself; Set vport/RSS afterwards */
7974         params->vport_id = vfid + 1;
7975         params->rss_eng_id = vfid + 1;
7976 
7977 	return;
7978 }
7979 
7980 static int
7981 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
7982 {
7983 	qlnx_host_t		*ha;
7984 	struct ecore_dev	*cdev;
7985 	struct ecore_iov_vf_init_params params;
7986 	int ret, j, i;
7987 	uint32_t max_vfs;
7988 
7989 	if ((ha = device_get_softc(dev)) == NULL) {
7990 		device_printf(dev, "%s: cannot get softc\n", __func__);
7991 		return (-1);
7992 	}
7993 
7994 	if (qlnx_create_pf_taskqueues(ha) != 0)
7995 		goto qlnx_iov_init_err0;
7996 
7997 	cdev = &ha->cdev;
7998 
7999 	max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8000 
8001 	QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8002 		dev, num_vfs, max_vfs);
8003 
8004         if (num_vfs >= max_vfs) {
8005                 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8006                           (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8007 		goto qlnx_iov_init_err0;
8008         }
8009 
8010 	ha->vf_attr =  malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8011 				M_NOWAIT);
8012 
8013 	if (ha->vf_attr == NULL)
8014 		goto qlnx_iov_init_err0;
8015 
8016         memset(&params, 0, sizeof(params));
8017 
8018         /* Initialize HW for VF access */
8019         for_each_hwfn(cdev, j) {
8020                 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8021                 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8022 
8023                 /* Make sure not to use more than 16 queues per VF */
8024                 params.num_queues = min_t(int,
8025                                           (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8026                                           16);
8027 
8028                 if (!ptt) {
8029                         QL_DPRINT1(ha, "Failed to acquire ptt\n");
8030                         goto qlnx_iov_init_err1;
8031                 }
8032 
8033                 for (i = 0; i < num_vfs; i++) {
8034                         if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8035                                 continue;
8036 
8037                         qlnx_sriov_enable_qid_config(hwfn, i, &params);
8038 
8039                         ret = ecore_iov_init_hw_for_vf(hwfn, ptt, &params);
8040 
8041                         if (ret) {
8042                                 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8043                                 ecore_ptt_release(hwfn, ptt);
8044                                 goto qlnx_iov_init_err1;
8045                         }
8046                 }
8047 
8048                 ecore_ptt_release(hwfn, ptt);
8049         }
8050 
8051 	ha->num_vfs = num_vfs;
8052 	qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8053 
8054 	QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8055 
8056 	return (0);
8057 
8058 qlnx_iov_init_err1:
8059 	qlnx_sriov_disable(ha);
8060 
8061 qlnx_iov_init_err0:
8062 	qlnx_destroy_pf_taskqueues(ha);
8063 	ha->num_vfs = 0;
8064 
8065 	return (-1);
8066 }
8067 
8068 static void
8069 qlnx_iov_uninit(device_t dev)
8070 {
8071 	qlnx_host_t	*ha;
8072 
8073 	if ((ha = device_get_softc(dev)) == NULL) {
8074 		device_printf(dev, "%s: cannot get softc\n", __func__);
8075 		return;
8076 	}
8077 
8078 	QL_DPRINT2(ha," dev = %p enter\n", dev);
8079 
8080 	qlnx_sriov_disable(ha);
8081 	qlnx_destroy_pf_taskqueues(ha);
8082 
8083 	free(ha->vf_attr, M_QLNXBUF);
8084 	ha->vf_attr = NULL;
8085 
8086 	ha->num_vfs = 0;
8087 
8088 	QL_DPRINT2(ha," dev = %p exit\n", dev);
8089 	return;
8090 }
8091 
8092 static int
8093 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8094 {
8095 	qlnx_host_t	*ha;
8096 	qlnx_vf_attr_t	*vf_attr;
8097 	unsigned const char *mac;
8098 	size_t size;
8099 	struct ecore_hwfn *p_hwfn;
8100 
8101 	if ((ha = device_get_softc(dev)) == NULL) {
8102 		device_printf(dev, "%s: cannot get softc\n", __func__);
8103 		return (-1);
8104 	}
8105 
8106 	QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8107 
8108 	if (vfnum > (ha->num_vfs - 1)) {
8109 		QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8110 			vfnum, (ha->num_vfs - 1));
8111 	}
8112 
8113 	vf_attr = &ha->vf_attr[vfnum];
8114 
8115         if (nvlist_exists_binary(params, "mac-addr")) {
8116                 mac = nvlist_get_binary(params, "mac-addr", &size);
8117                 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8118 		device_printf(dev,
8119 			"%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8120 			__func__, vf_attr->mac_addr[0],
8121 			vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8122 			vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8123 			vf_attr->mac_addr[5]);
8124 		p_hwfn = &ha->cdev.hwfns[0];
8125 		ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8126 			vfnum);
8127 	}
8128 
8129 	QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8130 	return (0);
8131 }
8132 
8133 static void
8134 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8135 {
8136         uint64_t events[ECORE_VF_ARRAY_LENGTH];
8137         struct ecore_ptt *ptt;
8138         int i;
8139 
8140         ptt = ecore_ptt_acquire(p_hwfn);
8141         if (!ptt) {
8142                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8143 		__qlnx_pf_vf_msg(p_hwfn, 0);
8144                 return;
8145         }
8146 
8147         ecore_iov_pf_get_pending_events(p_hwfn, events);
8148 
8149         QL_DPRINT2(ha, "Event mask of VF events:"
8150 		"0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8151                    events[0], events[1], events[2]);
8152 
8153         ecore_for_each_vf(p_hwfn, i) {
8154                 /* Skip VFs with no pending messages */
8155                 if (!(events[i / 64] & (1ULL << (i % 64))))
8156                         continue;
8157 
8158 		QL_DPRINT2(ha,
8159                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8160                            i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8161 
8162                 /* Copy VF's message to PF's request buffer for that VF */
8163                 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8164                         continue;
8165 
8166                 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8167         }
8168 
8169         ecore_ptt_release(p_hwfn, ptt);
8170 
8171 	return;
8172 }
8173 
8174 static void
8175 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8176 {
8177         struct ecore_ptt *ptt;
8178 	int ret;
8179 
8180 	ptt = ecore_ptt_acquire(p_hwfn);
8181 
8182 	if (!ptt) {
8183                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8184 		__qlnx_vf_flr_update(p_hwfn);
8185                 return;
8186 	}
8187 
8188 	ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8189 
8190 	if (ret) {
8191                 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8192 	}
8193 
8194 	ecore_ptt_release(p_hwfn, ptt);
8195 
8196 	return;
8197 }
8198 
8199 static void
8200 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8201 {
8202         struct ecore_ptt *ptt;
8203 	int i;
8204 
8205 	ptt = ecore_ptt_acquire(p_hwfn);
8206 
8207 	if (!ptt) {
8208                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8209 		qlnx_vf_bulleting_update(p_hwfn);
8210                 return;
8211 	}
8212 
8213 	ecore_for_each_vf(p_hwfn, i) {
8214 		QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8215 			p_hwfn, i);
8216 		ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8217 	}
8218 
8219 	ecore_ptt_release(p_hwfn, ptt);
8220 
8221 	return;
8222 }
8223 
8224 static void
8225 qlnx_pf_taskqueue(void *context, int pending)
8226 {
8227 	struct ecore_hwfn	*p_hwfn;
8228 	qlnx_host_t		*ha;
8229 	int			i;
8230 
8231 	p_hwfn = context;
8232 
8233 	if (p_hwfn == NULL)
8234 		return;
8235 
8236 	ha = (qlnx_host_t *)(p_hwfn->p_dev);
8237 
8238 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8239 		return;
8240 
8241 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8242 		QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8243 		qlnx_handle_vf_msg(ha, p_hwfn);
8244 
8245 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8246 		QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8247 		qlnx_handle_vf_flr_update(ha, p_hwfn);
8248 
8249 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8250 		QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8251 		qlnx_handle_bulletin_update(ha, p_hwfn);
8252 
8253 	return;
8254 }
8255 
8256 static int
8257 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8258 {
8259 	int	i;
8260 	uint8_t	tq_name[32];
8261 
8262 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8263                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8264 
8265 		bzero(tq_name, sizeof (tq_name));
8266 		snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8267 
8268 		TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8269 
8270 		ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8271 			 taskqueue_thread_enqueue,
8272 			&ha->sriov_task[i].pf_taskqueue);
8273 
8274 		if (ha->sriov_task[i].pf_taskqueue == NULL)
8275 			return (-1);
8276 
8277 		taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8278 			PI_NET, "%s", tq_name);
8279 
8280 		QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8281 	}
8282 
8283 	return (0);
8284 }
8285 
8286 static void
8287 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8288 {
8289 	int	i;
8290 
8291 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8292 		if (ha->sriov_task[i].pf_taskqueue != NULL) {
8293 			taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8294 				&ha->sriov_task[i].pf_task);
8295 			taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8296 			ha->sriov_task[i].pf_taskqueue = NULL;
8297 		}
8298 	}
8299 	return;
8300 }
8301 
8302 static void
8303 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8304 {
8305 	struct ecore_mcp_link_capabilities caps;
8306 	struct ecore_mcp_link_params params;
8307 	struct ecore_mcp_link_state link;
8308 	int i;
8309 
8310 	if (!p_hwfn->pf_iov_info)
8311 		return;
8312 
8313 	memset(&params, 0, sizeof(struct ecore_mcp_link_params));
8314 	memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8315 	memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8316 
8317 	memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8318         memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8319         memcpy(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8320 
8321 	QL_DPRINT2(ha, "called\n");
8322 
8323         /* Update bulletin of all future possible VFs with link configuration */
8324         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8325                 /* Modify link according to the VF's configured link state */
8326 
8327                 link.link_up = false;
8328 
8329                 if (ha->link_up) {
8330                         link.link_up = true;
8331                         /* Set speed according to maximum supported by HW.
8332                          * that is 40G for regular devices and 100G for CMT
8333                          * mode devices.
8334                          */
8335                         link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8336 						100000 : link.speed;
8337 		}
8338 		QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8339                 ecore_iov_set_link(p_hwfn, i, &params, &link, &caps);
8340         }
8341 
8342 	qlnx_vf_bulleting_update(p_hwfn);
8343 
8344 	return;
8345 }
8346 #endif /* #ifndef QLNX_VF */
8347 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8348