xref: /freebsd/sys/dev/qlxgb/qla_os.c (revision a0ee8cc6)
1 /*
2  * Copyright (c) 2011-2013 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qla_os.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "qla_os.h"
37 #include "qla_reg.h"
38 #include "qla_hw.h"
39 #include "qla_def.h"
40 #include "qla_inline.h"
41 #include "qla_ver.h"
42 #include "qla_glbl.h"
43 #include "qla_dbg.h"
44 
45 /*
46  * Some PCI Configuration Space Related Defines
47  */
48 
49 #ifndef PCI_VENDOR_QLOGIC
50 #define PCI_VENDOR_QLOGIC	0x1077
51 #endif
52 
53 #ifndef PCI_PRODUCT_QLOGIC_ISP8020
54 #define PCI_PRODUCT_QLOGIC_ISP8020	0x8020
55 #endif
56 
57 #define PCI_QLOGIC_ISP8020 \
58 	((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
59 
60 /*
61  * static functions
62  */
63 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
64 static void qla_free_parent_dma_tag(qla_host_t *ha);
65 static int qla_alloc_xmt_bufs(qla_host_t *ha);
66 static void qla_free_xmt_bufs(qla_host_t *ha);
67 static int qla_alloc_rcv_bufs(qla_host_t *ha);
68 static void qla_free_rcv_bufs(qla_host_t *ha);
69 
70 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
71 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
72 static void qla_release(qla_host_t *ha);
73 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
74 		int error);
75 static void qla_stop(qla_host_t *ha);
76 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
77 static void qla_tx_done(void *context, int pending);
78 
79 /*
80  * Hooks to the Operating Systems
81  */
82 static int qla_pci_probe (device_t);
83 static int qla_pci_attach (device_t);
84 static int qla_pci_detach (device_t);
85 
86 static void qla_init(void *arg);
87 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88 static int qla_media_change(struct ifnet *ifp);
89 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90 
91 static device_method_t qla_pci_methods[] = {
92 	/* Device interface */
93 	DEVMETHOD(device_probe, qla_pci_probe),
94 	DEVMETHOD(device_attach, qla_pci_attach),
95 	DEVMETHOD(device_detach, qla_pci_detach),
96 	{ 0, 0 }
97 };
98 
99 static driver_t qla_pci_driver = {
100 	"ql", qla_pci_methods, sizeof (qla_host_t),
101 };
102 
103 static devclass_t qla80xx_devclass;
104 
105 DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0);
106 
107 MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
108 MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
109 
110 MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
111 
112 uint32_t std_replenish = 8;
113 uint32_t jumbo_replenish = 2;
114 uint32_t rcv_pkt_thres = 128;
115 uint32_t rcv_pkt_thres_d = 32;
116 uint32_t snd_pkt_thres = 16;
117 uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
118 
119 static char dev_str[64];
120 
121 /*
122  * Name:	qla_pci_probe
123  * Function:	Validate the PCI device to be a QLA80XX device
124  */
125 static int
126 qla_pci_probe(device_t dev)
127 {
128         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
129         case PCI_QLOGIC_ISP8020:
130 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
131 			"Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
132 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
133 			QLA_VERSION_BUILD);
134                 device_set_desc(dev, dev_str);
135                 break;
136         default:
137                 return (ENXIO);
138         }
139 
140         if (bootverbose)
141                 printf("%s: %s\n ", __func__, dev_str);
142 
143         return (BUS_PROBE_DEFAULT);
144 }
145 
146 static void
147 qla_add_sysctls(qla_host_t *ha)
148 {
149         device_t dev = ha->pci_dev;
150 
151         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
152                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
153                 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD,
154                 (void *)ha, 0,
155                 qla_sysctl_get_stats, "I", "Statistics");
156 
157 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
158 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159 		OID_AUTO, "fw_version", CTLFLAG_RD,
160 		ha->fw_ver_str, 0, "firmware version");
161 
162 	dbg_level = 0;
163         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
164                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
165                 OID_AUTO, "debug", CTLFLAG_RW,
166                 &dbg_level, dbg_level, "Debug Level");
167 
168         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
169                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170                 OID_AUTO, "std_replenish", CTLFLAG_RW,
171                 &std_replenish, std_replenish,
172                 "Threshold for Replenishing Standard Frames");
173 
174         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
175                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
176                 OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
177                 &jumbo_replenish, jumbo_replenish,
178                 "Threshold for Replenishing Jumbo Frames");
179 
180         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
181                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
182                 OID_AUTO, "rcv_pkt_thres",  CTLFLAG_RW,
183                 &rcv_pkt_thres, rcv_pkt_thres,
184                 "Threshold for # of rcv pkts to trigger indication isr");
185 
186         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
187                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
188                 OID_AUTO, "rcv_pkt_thres_d",  CTLFLAG_RW,
189                 &rcv_pkt_thres_d, rcv_pkt_thres_d,
190                 "Threshold for # of rcv pkts to trigger indication defered");
191 
192         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
193                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
194                 OID_AUTO, "snd_pkt_thres",  CTLFLAG_RW,
195                 &snd_pkt_thres, snd_pkt_thres,
196                 "Threshold for # of snd packets");
197 
198         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
199                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
200                 OID_AUTO, "free_pkt_thres",  CTLFLAG_RW,
201                 &free_pkt_thres, free_pkt_thres,
202                 "Threshold for # of packets to free at a time");
203 
204         return;
205 }
206 
207 static void
208 qla_watchdog(void *arg)
209 {
210 	qla_host_t *ha = arg;
211 	qla_hw_t *hw;
212 	struct ifnet *ifp;
213 
214 	hw = &ha->hw;
215 	ifp = ha->ifp;
216 
217         if (ha->flags.qla_watchdog_exit)
218 		return;
219 
220 	if (!ha->flags.qla_watchdog_pause) {
221 		if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
222 			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
223 		} else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
224 			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
225 		}
226 	}
227 	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
228 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
229 		qla_watchdog, ha);
230 }
231 
232 /*
233  * Name:	qla_pci_attach
234  * Function:	attaches the device to the operating system
235  */
236 static int
237 qla_pci_attach(device_t dev)
238 {
239 	qla_host_t *ha = NULL;
240 	uint32_t rsrc_len, i;
241 
242 	QL_DPRINT2((dev, "%s: enter\n", __func__));
243 
244         if ((ha = device_get_softc(dev)) == NULL) {
245                 device_printf(dev, "cannot get softc\n");
246                 return (ENOMEM);
247         }
248 
249         memset(ha, 0, sizeof (qla_host_t));
250 
251         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
252                 device_printf(dev, "device is not ISP8020\n");
253                 return (ENXIO);
254 	}
255 
256         ha->pci_func = pci_get_function(dev);
257 
258         ha->pci_dev = dev;
259 
260 	pci_enable_busmaster(dev);
261 
262 	ha->reg_rid = PCIR_BAR(0);
263 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
264 				RF_ACTIVE);
265 
266         if (ha->pci_reg == NULL) {
267                 device_printf(dev, "unable to map any ports\n");
268                 goto qla_pci_attach_err;
269         }
270 
271 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
272 					ha->reg_rid);
273 
274 	mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
275 	mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
276 	mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
277 	mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
278 	ha->flags.lock_init = 1;
279 
280 	ha->msix_count = pci_msix_count(dev);
281 
282 	if (ha->msix_count < qla_get_msix_count(ha)) {
283 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
284 			ha->msix_count);
285 		goto qla_pci_attach_err;
286 	}
287 
288 	QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
289 		" msix_count 0x%x pci_reg %p\n", __func__, ha,
290 		ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
291 
292 	ha->msix_count = qla_get_msix_count(ha);
293 
294 	if (pci_alloc_msix(dev, &ha->msix_count)) {
295 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
296 			ha->msix_count);
297 		ha->msix_count = 0;
298 		goto qla_pci_attach_err;
299 	}
300 
301 	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
302 	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
303 			taskqueue_thread_enqueue, &ha->tx_tq);
304 	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
305 		device_get_nameunit(ha->pci_dev));
306 
307         for (i = 0; i < ha->msix_count; i++) {
308                 ha->irq_vec[i].irq_rid = i+1;
309                 ha->irq_vec[i].ha = ha;
310 
311                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
312                                         &ha->irq_vec[i].irq_rid,
313                                         (RF_ACTIVE | RF_SHAREABLE));
314 
315                 if (ha->irq_vec[i].irq == NULL) {
316                         device_printf(dev, "could not allocate interrupt\n");
317                         goto qla_pci_attach_err;
318                 }
319 
320                 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
321                         (INTR_TYPE_NET | INTR_MPSAFE),
322                         NULL, qla_isr, &ha->irq_vec[i],
323                         &ha->irq_vec[i].handle)) {
324                         device_printf(dev, "could not setup interrupt\n");
325                         goto qla_pci_attach_err;
326                 }
327 
328 		TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
329 			&ha->irq_vec[i]);
330 
331 		ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
332 			M_NOWAIT, taskqueue_thread_enqueue,
333 			&ha->irq_vec[i].rcv_tq);
334 
335 		taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
336 			"%s rcvq",
337 			device_get_nameunit(ha->pci_dev));
338         }
339 
340 	qla_add_sysctls(ha);
341 
342 	/* add hardware specific sysctls */
343 	qla_hw_add_sysctls(ha);
344 
345 	/* initialize hardware */
346 	if (qla_init_hw(ha)) {
347 		device_printf(dev, "%s: qla_init_hw failed\n", __func__);
348 		goto qla_pci_attach_err;
349 	}
350 
351 	device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
352 		ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
353 		ha->fw_ver_build);
354 
355 	snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
356 			ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
357 			ha->fw_ver_build);
358 
359 	//qla_get_hw_caps(ha);
360 	qla_read_mac_addr(ha);
361 
362 	/* allocate parent dma tag */
363 	if (qla_alloc_parent_dma_tag(ha)) {
364 		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
365 			__func__);
366 		goto qla_pci_attach_err;
367 	}
368 
369 	/* alloc all dma buffers */
370 	if (qla_alloc_dma(ha)) {
371 		device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
372 		goto qla_pci_attach_err;
373 	}
374 
375 	/* create the o.s ethernet interface */
376 	qla_init_ifnet(dev, ha);
377 
378 	ha->flags.qla_watchdog_active = 1;
379 	ha->flags.qla_watchdog_pause = 1;
380 
381 	callout_init(&ha->tx_callout, 1);
382 
383 	/* create ioctl device interface */
384 	if (qla_make_cdev(ha)) {
385 		device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
386 		goto qla_pci_attach_err;
387 	}
388 
389 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
390 		qla_watchdog, ha);
391 
392 	QL_DPRINT2((dev, "%s: exit 0\n", __func__));
393         return (0);
394 
395 qla_pci_attach_err:
396 
397 	qla_release(ha);
398 
399 	QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
400         return (ENXIO);
401 }
402 
403 /*
404  * Name:	qla_pci_detach
405  * Function:	Unhooks the device from the operating system
406  */
407 static int
408 qla_pci_detach(device_t dev)
409 {
410 	qla_host_t *ha = NULL;
411 	struct ifnet *ifp;
412 	int i;
413 
414 	QL_DPRINT2((dev, "%s: enter\n", __func__));
415 
416         if ((ha = device_get_softc(dev)) == NULL) {
417                 device_printf(dev, "cannot get softc\n");
418                 return (ENOMEM);
419         }
420 
421 	ifp = ha->ifp;
422 
423 	QLA_LOCK(ha, __func__);
424 	qla_stop(ha);
425 	QLA_UNLOCK(ha, __func__);
426 
427 	if (ha->tx_tq) {
428 		taskqueue_drain(ha->tx_tq, &ha->tx_task);
429 		taskqueue_free(ha->tx_tq);
430 	}
431 
432         for (i = 0; i < ha->msix_count; i++) {
433 		taskqueue_drain(ha->irq_vec[i].rcv_tq,
434 			&ha->irq_vec[i].rcv_task);
435 		taskqueue_free(ha->irq_vec[i].rcv_tq);
436 	}
437 
438 	qla_release(ha);
439 
440 	QL_DPRINT2((dev, "%s: exit\n", __func__));
441 
442         return (0);
443 }
444 
445 /*
446  * SYSCTL Related Callbacks
447  */
448 static int
449 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
450 {
451 	int err, ret = 0;
452 	qla_host_t *ha;
453 
454 	err = sysctl_handle_int(oidp, &ret, 0, req);
455 
456 	if (err)
457 		return (err);
458 
459 	ha = (qla_host_t *)arg1;
460 	//qla_get_stats(ha);
461 	QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
462 	return (err);
463 }
464 
465 
466 /*
467  * Name:	qla_release
468  * Function:	Releases the resources allocated for the device
469  */
470 static void
471 qla_release(qla_host_t *ha)
472 {
473 	device_t dev;
474 	int i;
475 
476 	dev = ha->pci_dev;
477 
478 	qla_del_cdev(ha);
479 
480 	if (ha->flags.qla_watchdog_active)
481 		ha->flags.qla_watchdog_exit = 1;
482 
483 	callout_stop(&ha->tx_callout);
484 	qla_mdelay(__func__, 100);
485 
486 	if (ha->ifp != NULL)
487 		ether_ifdetach(ha->ifp);
488 
489 	qla_free_dma(ha);
490 	qla_free_parent_dma_tag(ha);
491 
492 	for (i = 0; i < ha->msix_count; i++) {
493 		if (ha->irq_vec[i].handle)
494 			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
495 				ha->irq_vec[i].handle);
496 		if (ha->irq_vec[i].irq)
497 			(void) bus_release_resource(dev, SYS_RES_IRQ,
498 				ha->irq_vec[i].irq_rid,
499 				ha->irq_vec[i].irq);
500 	}
501 	if (ha->msix_count)
502 		pci_release_msi(dev);
503 
504 	if (ha->flags.lock_init) {
505 		mtx_destroy(&ha->tx_lock);
506 		mtx_destroy(&ha->rx_lock);
507 		mtx_destroy(&ha->rxj_lock);
508 		mtx_destroy(&ha->hw_lock);
509 	}
510 
511         if (ha->pci_reg)
512                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
513 				ha->pci_reg);
514 }
515 
516 /*
517  * DMA Related Functions
518  */
519 
520 static void
521 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
522 {
523         *((bus_addr_t *)arg) = 0;
524 
525         if (error) {
526                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
527                 return;
528 	}
529 
530         QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
531 
532         *((bus_addr_t *)arg) = segs[0].ds_addr;
533 
534 	return;
535 }
536 
537 int
538 qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
539 {
540         int             ret = 0;
541         device_t        dev;
542         bus_addr_t      b_addr;
543 
544         dev = ha->pci_dev;
545 
546         QL_DPRINT2((dev, "%s: enter\n", __func__));
547 
548         ret = bus_dma_tag_create(
549                         ha->parent_tag,/* parent */
550                         dma_buf->alignment,
551                         ((bus_size_t)(1ULL << 32)),/* boundary */
552                         BUS_SPACE_MAXADDR,      /* lowaddr */
553                         BUS_SPACE_MAXADDR,      /* highaddr */
554                         NULL, NULL,             /* filter, filterarg */
555                         dma_buf->size,          /* maxsize */
556                         1,                      /* nsegments */
557                         dma_buf->size,          /* maxsegsize */
558                         0,                      /* flags */
559                         NULL, NULL,             /* lockfunc, lockarg */
560                         &dma_buf->dma_tag);
561 
562         if (ret) {
563                 device_printf(dev, "%s: could not create dma tag\n", __func__);
564                 goto qla_alloc_dmabuf_exit;
565         }
566         ret = bus_dmamem_alloc(dma_buf->dma_tag,
567                         (void **)&dma_buf->dma_b,
568                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
569                         &dma_buf->dma_map);
570         if (ret) {
571                 bus_dma_tag_destroy(dma_buf->dma_tag);
572                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
573                 goto qla_alloc_dmabuf_exit;
574         }
575 
576         ret = bus_dmamap_load(dma_buf->dma_tag,
577                         dma_buf->dma_map,
578                         dma_buf->dma_b,
579                         dma_buf->size,
580                         qla_dmamap_callback,
581                         &b_addr, BUS_DMA_NOWAIT);
582 
583         if (ret || !b_addr) {
584                 bus_dma_tag_destroy(dma_buf->dma_tag);
585                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
586                         dma_buf->dma_map);
587                 ret = -1;
588                 goto qla_alloc_dmabuf_exit;
589         }
590 
591         dma_buf->dma_addr = b_addr;
592 
593 qla_alloc_dmabuf_exit:
594         QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
595                 __func__, ret, (void *)dma_buf->dma_tag,
596                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
597 		dma_buf->size));
598 
599         return ret;
600 }
601 
602 void
603 qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
604 {
605         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
606         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
607         bus_dma_tag_destroy(dma_buf->dma_tag);
608 }
609 
610 static int
611 qla_alloc_parent_dma_tag(qla_host_t *ha)
612 {
613 	int		ret;
614 	device_t	dev;
615 
616 	dev = ha->pci_dev;
617 
618         /*
619          * Allocate parent DMA Tag
620          */
621         ret = bus_dma_tag_create(
622                         bus_get_dma_tag(dev),   /* parent */
623                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
624                         BUS_SPACE_MAXADDR,      /* lowaddr */
625                         BUS_SPACE_MAXADDR,      /* highaddr */
626                         NULL, NULL,             /* filter, filterarg */
627                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
628                         0,                      /* nsegments */
629                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
630                         0,                      /* flags */
631                         NULL, NULL,             /* lockfunc, lockarg */
632                         &ha->parent_tag);
633 
634         if (ret) {
635                 device_printf(dev, "%s: could not create parent dma tag\n",
636                         __func__);
637 		return (-1);
638         }
639 
640         ha->flags.parent_tag = 1;
641 
642 	return (0);
643 }
644 
645 static void
646 qla_free_parent_dma_tag(qla_host_t *ha)
647 {
648         if (ha->flags.parent_tag) {
649                 bus_dma_tag_destroy(ha->parent_tag);
650                 ha->flags.parent_tag = 0;
651         }
652 }
653 
654 /*
655  * Name: qla_init_ifnet
656  * Function: Creates the Network Device Interface and Registers it with the O.S
657  */
658 
659 static void
660 qla_init_ifnet(device_t dev, qla_host_t *ha)
661 {
662 	struct ifnet *ifp;
663 
664 	QL_DPRINT2((dev, "%s: enter\n", __func__));
665 
666 	ifp = ha->ifp = if_alloc(IFT_ETHER);
667 
668 	if (ifp == NULL)
669 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
670 
671 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
672 
673 	ifp->if_baudrate = IF_Gbps(10);
674 	ifp->if_init = qla_init;
675 	ifp->if_softc = ha;
676 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
677 	ifp->if_ioctl = qla_ioctl;
678 	ifp->if_start = qla_start;
679 
680 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
681 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
682 	IFQ_SET_READY(&ifp->if_snd);
683 
684 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
685 
686 	ether_ifattach(ifp, qla_get_mac_addr(ha));
687 
688 	ifp->if_capabilities = IFCAP_HWCSUM |
689 				IFCAP_TSO4 |
690 				IFCAP_JUMBO_MTU;
691 
692 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
693 	ifp->if_capabilities |= IFCAP_LINKSTATE;
694 
695 #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002)
696 	ifp->if_timer = 0;
697 	ifp->if_watchdog = NULL;
698 #endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */
699 
700 	ifp->if_capenable = ifp->if_capabilities;
701 
702 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
703 
704 	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
705 
706 	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
707 		NULL);
708 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
709 
710 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
711 
712 	QL_DPRINT2((dev, "%s: exit\n", __func__));
713 
714 	return;
715 }
716 
717 static void
718 qla_init_locked(qla_host_t *ha)
719 {
720 	struct ifnet *ifp = ha->ifp;
721 
722 	qla_stop(ha);
723 
724 	if (qla_alloc_xmt_bufs(ha) != 0)
725 		return;
726 
727 	if (qla_alloc_rcv_bufs(ha) != 0)
728 		return;
729 
730 	if (qla_config_lro(ha))
731 		return;
732 
733 	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
734 
735 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
736 
737 	ha->flags.stop_rcv = 0;
738 	if (qla_init_hw_if(ha) == 0) {
739 		ifp = ha->ifp;
740 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
741 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
742 		ha->flags.qla_watchdog_pause = 0;
743 	}
744 
745 	return;
746 }
747 
748 static void
749 qla_init(void *arg)
750 {
751 	qla_host_t *ha;
752 
753 	ha = (qla_host_t *)arg;
754 
755 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
756 
757 	QLA_LOCK(ha, __func__);
758 	qla_init_locked(ha);
759 	QLA_UNLOCK(ha, __func__);
760 
761 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
762 }
763 
764 static void
765 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
766 {
767 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
768 	struct ifmultiaddr *ifma;
769 	int mcnt = 0;
770 	struct ifnet *ifp = ha->ifp;
771 
772 	if_maddr_rlock(ifp);
773 
774 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
775 
776 		if (ifma->ifma_addr->sa_family != AF_LINK)
777 			continue;
778 
779 		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
780 			break;
781 
782 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
783 			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
784 
785 		mcnt++;
786 	}
787 
788 	if_maddr_runlock(ifp);
789 
790 	qla_hw_set_multi(ha, mta, mcnt, add_multi);
791 
792 	return;
793 }
794 
795 static int
796 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
797 {
798 	int ret = 0;
799 	struct ifreq *ifr = (struct ifreq *)data;
800 	struct ifaddr *ifa = (struct ifaddr *)data;
801 	qla_host_t *ha;
802 
803 	ha = (qla_host_t *)ifp->if_softc;
804 
805 	switch (cmd) {
806 	case SIOCSIFADDR:
807 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
808 			__func__, cmd));
809 
810 		if (ifa->ifa_addr->sa_family == AF_INET) {
811 			ifp->if_flags |= IFF_UP;
812 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
813 				QLA_LOCK(ha, __func__);
814 				qla_init_locked(ha);
815 				QLA_UNLOCK(ha, __func__);
816 			}
817 		QL_DPRINT4((ha->pci_dev,
818 			"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
819 			__func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
820 
821 			arp_ifinit(ifp, ifa);
822 			if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
823 				qla_config_ipv4_addr(ha,
824 					(IA_SIN(ifa)->sin_addr.s_addr));
825 			}
826 		} else {
827 			ether_ioctl(ifp, cmd, data);
828 		}
829 		break;
830 
831 	case SIOCSIFMTU:
832 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
833 			__func__, cmd));
834 
835 		if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
836 			ret = EINVAL;
837 		} else {
838 			QLA_LOCK(ha, __func__);
839 			ifp->if_mtu = ifr->ifr_mtu;
840 			ha->max_frame_size =
841 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
842 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
843 				ret = qla_set_max_mtu(ha, ha->max_frame_size,
844 					(ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
845 			}
846 			QLA_UNLOCK(ha, __func__);
847 
848 			if (ret)
849 				ret = EINVAL;
850 		}
851 
852 		break;
853 
854 	case SIOCSIFFLAGS:
855 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
856 			__func__, cmd));
857 
858 		if (ifp->if_flags & IFF_UP) {
859 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
860 				if ((ifp->if_flags ^ ha->if_flags) &
861 					IFF_PROMISC) {
862 					qla_set_promisc(ha);
863 				} else if ((ifp->if_flags ^ ha->if_flags) &
864 					IFF_ALLMULTI) {
865 					qla_set_allmulti(ha);
866 				}
867 			} else {
868 				QLA_LOCK(ha, __func__);
869 				qla_init_locked(ha);
870 				ha->max_frame_size = ifp->if_mtu +
871 					ETHER_HDR_LEN + ETHER_CRC_LEN;
872 				ret = qla_set_max_mtu(ha, ha->max_frame_size,
873 					(ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
874 				QLA_UNLOCK(ha, __func__);
875 			}
876 		} else {
877 			QLA_LOCK(ha, __func__);
878 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
879 				qla_stop(ha);
880 			ha->if_flags = ifp->if_flags;
881 			QLA_UNLOCK(ha, __func__);
882 		}
883 		break;
884 
885 	case SIOCADDMULTI:
886 		QL_DPRINT4((ha->pci_dev,
887 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
888 
889 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
890 			qla_set_multi(ha, 1);
891 		}
892 		break;
893 
894 	case SIOCDELMULTI:
895 		QL_DPRINT4((ha->pci_dev,
896 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
897 
898 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
899 			qla_set_multi(ha, 0);
900 		}
901 		break;
902 
903 	case SIOCSIFMEDIA:
904 	case SIOCGIFMEDIA:
905 		QL_DPRINT4((ha->pci_dev,
906 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
907 			__func__, cmd));
908 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
909 		break;
910 
911 	case SIOCSIFCAP:
912 	{
913 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
914 
915 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
916 			__func__, cmd));
917 
918 		if (mask & IFCAP_HWCSUM)
919 			ifp->if_capenable ^= IFCAP_HWCSUM;
920 		if (mask & IFCAP_TSO4)
921 			ifp->if_capenable ^= IFCAP_TSO4;
922 		if (mask & IFCAP_TSO6)
923 			ifp->if_capenable ^= IFCAP_TSO6;
924 		if (mask & IFCAP_VLAN_HWTAGGING)
925 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
926 
927 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
928 			qla_init(ha);
929 
930 		VLAN_CAPABILITIES(ifp);
931 		break;
932 	}
933 
934 	default:
935 		QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
936 			__func__, cmd));
937 		ret = ether_ioctl(ifp, cmd, data);
938 		break;
939 	}
940 
941 	return (ret);
942 }
943 
944 static int
945 qla_media_change(struct ifnet *ifp)
946 {
947 	qla_host_t *ha;
948 	struct ifmedia *ifm;
949 	int ret = 0;
950 
951 	ha = (qla_host_t *)ifp->if_softc;
952 
953 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
954 
955 	ifm = &ha->media;
956 
957 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
958 		ret = EINVAL;
959 
960 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
961 
962 	return (ret);
963 }
964 
965 static void
966 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
967 {
968 	qla_host_t *ha;
969 
970 	ha = (qla_host_t *)ifp->if_softc;
971 
972 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
973 
974 	ifmr->ifm_status = IFM_AVALID;
975 	ifmr->ifm_active = IFM_ETHER;
976 
977 	qla_update_link_state(ha);
978 	if (ha->hw.flags.link_up) {
979 		ifmr->ifm_status |= IFM_ACTIVE;
980 		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
981 	}
982 
983 	QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
984 		(ha->hw.flags.link_up ? "link_up" : "link_down")));
985 
986 	return;
987 }
988 
989 void
990 qla_start(struct ifnet *ifp)
991 {
992 	struct mbuf    *m_head;
993 	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
994 
995 	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
996 
997 	if (!mtx_trylock(&ha->tx_lock)) {
998 		QL_DPRINT8((ha->pci_dev,
999 			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1000 		return;
1001 	}
1002 
1003 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1004 		IFF_DRV_RUNNING) {
1005 		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1006 		QLA_TX_UNLOCK(ha);
1007 		return;
1008 	}
1009 
1010 	if (!ha->watchdog_ticks)
1011 		qla_update_link_state(ha);
1012 
1013 	if (!ha->hw.flags.link_up) {
1014 		QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1015 		QLA_TX_UNLOCK(ha);
1016 		return;
1017 	}
1018 
1019 	while (ifp->if_snd.ifq_head != NULL) {
1020 		IF_DEQUEUE(&ifp->if_snd, m_head);
1021 
1022 		if (m_head == NULL) {
1023 			QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1024 				__func__));
1025 			break;
1026 		}
1027 
1028 		if (qla_send(ha, &m_head)) {
1029 			if (m_head == NULL)
1030 				break;
1031 			QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1032 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1033 			IF_PREPEND(&ifp->if_snd, m_head);
1034 			break;
1035 		}
1036 		/* Send a copy of the frame to the BPF listener */
1037 		ETHER_BPF_MTAP(ifp, m_head);
1038 	}
1039 	QLA_TX_UNLOCK(ha);
1040 	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1041 	return;
1042 }
1043 
1044 static int
1045 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1046 {
1047 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1048 	bus_dmamap_t		map;
1049 	int			nsegs;
1050 	int			ret = -1;
1051 	uint32_t		tx_idx;
1052 	struct mbuf *m_head = *m_headp;
1053 
1054 	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1055 
1056 	if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
1057 		ha->err_tx_dmamap_create++;
1058 		device_printf(ha->pci_dev,
1059 			"%s: bus_dmamap_create failed[%d, %d]\n",
1060 			__func__, ret, m_head->m_pkthdr.len);
1061 		return (ret);
1062 	}
1063 
1064 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1065 			BUS_DMA_NOWAIT);
1066 
1067 	if (ret == EFBIG) {
1068 
1069 		struct mbuf *m;
1070 
1071 		QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1072 			m_head->m_pkthdr.len));
1073 
1074 		m = m_defrag(m_head, M_NOWAIT);
1075 		if (m == NULL) {
1076 			ha->err_tx_defrag++;
1077 			m_freem(m_head);
1078 			*m_headp = NULL;
1079 			device_printf(ha->pci_dev,
1080 				"%s: m_defrag() = NULL [%d]\n",
1081 				__func__, ret);
1082 			return (ENOBUFS);
1083 		}
1084 		m_head = m;
1085 
1086 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1087 					segs, &nsegs, BUS_DMA_NOWAIT))) {
1088 
1089 			ha->err_tx_dmamap_load++;
1090 
1091 			device_printf(ha->pci_dev,
1092 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1093 				__func__, ret, m_head->m_pkthdr.len);
1094 
1095 			bus_dmamap_destroy(ha->tx_tag, map);
1096 			if (ret != ENOMEM) {
1097 				m_freem(m_head);
1098 				*m_headp = NULL;
1099 			}
1100 			return (ret);
1101 		}
1102 	} else if (ret) {
1103 		ha->err_tx_dmamap_load++;
1104 
1105 		device_printf(ha->pci_dev,
1106 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1107 			__func__, ret, m_head->m_pkthdr.len);
1108 
1109 		bus_dmamap_destroy(ha->tx_tag, map);
1110 
1111 		if (ret != ENOMEM) {
1112 			m_freem(m_head);
1113 			*m_headp = NULL;
1114 		}
1115 		return (ret);
1116 	}
1117 
1118 	QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
1119 
1120 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1121 
1122 	if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
1123 		ha->tx_buf[tx_idx].m_head = m_head;
1124 		ha->tx_buf[tx_idx].map = map;
1125 	} else {
1126 		if (ret == EINVAL) {
1127 			m_freem(m_head);
1128 			*m_headp = NULL;
1129 		}
1130 	}
1131 
1132 	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1133 	return (ret);
1134 }
1135 
1136 static void
1137 qla_stop(qla_host_t *ha)
1138 {
1139 	struct ifnet *ifp = ha->ifp;
1140 	device_t	dev;
1141 
1142 	dev = ha->pci_dev;
1143 
1144 	ha->flags.qla_watchdog_pause = 1;
1145 	qla_mdelay(__func__, 100);
1146 
1147 	ha->flags.stop_rcv = 1;
1148 	qla_hw_stop_rcv(ha);
1149 
1150 	qla_del_hw_if(ha);
1151 
1152 	qla_free_lro(ha);
1153 
1154 	qla_free_xmt_bufs(ha);
1155 	qla_free_rcv_bufs(ha);
1156 
1157 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1158 
1159 	return;
1160 }
1161 
1162 /*
1163  * Buffer Management Functions for Transmit and Receive Rings
1164  */
1165 static int
1166 qla_alloc_xmt_bufs(qla_host_t *ha)
1167 {
1168 	if (bus_dma_tag_create(NULL,    /* parent */
1169 		1, 0,    /* alignment, bounds */
1170 		BUS_SPACE_MAXADDR,       /* lowaddr */
1171 		BUS_SPACE_MAXADDR,       /* highaddr */
1172 		NULL, NULL,      /* filter, filterarg */
1173 		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1174 		QLA_MAX_SEGMENTS,        /* nsegments */
1175 		PAGE_SIZE,        /* maxsegsize */
1176 		BUS_DMA_ALLOCNOW,        /* flags */
1177 		NULL,    /* lockfunc */
1178 		NULL,    /* lockfuncarg */
1179 		&ha->tx_tag)) {
1180 		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1181 			__func__);
1182 		return (ENOMEM);
1183 	}
1184 	bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1185 
1186 	return 0;
1187 }
1188 
1189 /*
1190  * Release mbuf after it sent on the wire
1191  */
1192 static void
1193 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1194 {
1195 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1196 
1197 	if (txb->m_head) {
1198 
1199 		bus_dmamap_unload(ha->tx_tag, txb->map);
1200 		bus_dmamap_destroy(ha->tx_tag, txb->map);
1201 
1202 		m_freem(txb->m_head);
1203 		txb->m_head = NULL;
1204 	}
1205 
1206 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1207 }
1208 
1209 static void
1210 qla_free_xmt_bufs(qla_host_t *ha)
1211 {
1212 	int		i;
1213 
1214 	for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1215 		qla_clear_tx_buf(ha, &ha->tx_buf[i]);
1216 
1217 	if (ha->tx_tag != NULL) {
1218 		bus_dma_tag_destroy(ha->tx_tag);
1219 		ha->tx_tag = NULL;
1220 	}
1221 	bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1222 
1223 	return;
1224 }
1225 
1226 
1227 static int
1228 qla_alloc_rcv_bufs(qla_host_t *ha)
1229 {
1230 	int		i, j, ret = 0;
1231 	qla_rx_buf_t	*rxb;
1232 
1233 	if (bus_dma_tag_create(NULL,    /* parent */
1234 			1, 0,    /* alignment, bounds */
1235 			BUS_SPACE_MAXADDR,       /* lowaddr */
1236 			BUS_SPACE_MAXADDR,       /* highaddr */
1237 			NULL, NULL,      /* filter, filterarg */
1238 			MJUM9BYTES,     /* maxsize */
1239 			1,        /* nsegments */
1240 			MJUM9BYTES,        /* maxsegsize */
1241 			BUS_DMA_ALLOCNOW,        /* flags */
1242 			NULL,    /* lockfunc */
1243 			NULL,    /* lockfuncarg */
1244 			&ha->rx_tag)) {
1245 
1246 		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1247 			__func__);
1248 
1249 		return (ENOMEM);
1250 	}
1251 
1252 	bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1253 	bzero((void *)ha->rx_jbuf,
1254 		(sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1255 
1256 	for (i = 0; i < MAX_SDS_RINGS; i++) {
1257 		ha->hw.sds[i].sdsr_next = 0;
1258 		ha->hw.sds[i].rxb_free = NULL;
1259 		ha->hw.sds[i].rx_free = 0;
1260 		ha->hw.sds[i].rxjb_free = NULL;
1261 		ha->hw.sds[i].rxj_free = 0;
1262 	}
1263 
1264 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1265 
1266 		rxb = &ha->rx_buf[i];
1267 
1268 		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1269 
1270 		if (ret) {
1271 			device_printf(ha->pci_dev,
1272 				"%s: dmamap[%d] failed\n", __func__, i);
1273 
1274 			for (j = 0; j < i; j++) {
1275 				bus_dmamap_destroy(ha->rx_tag,
1276 					ha->rx_buf[j].map);
1277 			}
1278 			goto qla_alloc_rcv_bufs_failed;
1279 		}
1280 	}
1281 
1282 	qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
1283 
1284 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1285 		rxb = &ha->rx_buf[i];
1286 		rxb->handle = i;
1287 		if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
1288 			/*
1289 		 	 * set the physical address in the corresponding
1290 			 * descriptor entry in the receive ring/queue for the
1291 			 * hba
1292 			 */
1293 			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
1294 				rxb->handle, rxb->paddr,
1295 				(rxb->m_head)->m_pkthdr.len);
1296 		} else {
1297 			device_printf(ha->pci_dev,
1298 				"%s: qla_get_mbuf [standard(%d)] failed\n",
1299 				__func__, i);
1300 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1301 			goto qla_alloc_rcv_bufs_failed;
1302 		}
1303 	}
1304 
1305 
1306 	for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1307 
1308 		rxb = &ha->rx_jbuf[i];
1309 
1310 		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1311 
1312 		if (ret) {
1313 			device_printf(ha->pci_dev,
1314 				"%s: dmamap[%d] failed\n", __func__, i);
1315 
1316 			for (j = 0; j < i; j++) {
1317 				bus_dmamap_destroy(ha->rx_tag,
1318 					ha->rx_jbuf[j].map);
1319 			}
1320 			goto qla_alloc_rcv_bufs_failed;
1321 		}
1322 	}
1323 
1324 	qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
1325 
1326 	for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1327 		rxb = &ha->rx_jbuf[i];
1328 		rxb->handle = i;
1329 		if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
1330 			/*
1331 		 	 * set the physical address in the corresponding
1332 			 * descriptor entry in the receive ring/queue for the
1333 			 * hba
1334 			 */
1335 			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
1336 				rxb->handle, rxb->paddr,
1337 				(rxb->m_head)->m_pkthdr.len);
1338 		} else {
1339 			device_printf(ha->pci_dev,
1340 				"%s: qla_get_mbuf [jumbo(%d)] failed\n",
1341 				__func__, i);
1342 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1343 			goto qla_alloc_rcv_bufs_failed;
1344 		}
1345 	}
1346 
1347 	return (0);
1348 
1349 qla_alloc_rcv_bufs_failed:
1350 	qla_free_rcv_bufs(ha);
1351 	return (ret);
1352 }
1353 
1354 static void
1355 qla_free_rcv_bufs(qla_host_t *ha)
1356 {
1357 	int		i;
1358 	qla_rx_buf_t	*rxb;
1359 
1360 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1361 		rxb = &ha->rx_buf[i];
1362 		if (rxb->m_head != NULL) {
1363 			bus_dmamap_unload(ha->rx_tag, rxb->map);
1364 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1365 			m_freem(rxb->m_head);
1366 			rxb->m_head = NULL;
1367 		}
1368 	}
1369 
1370 	for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1371 		rxb = &ha->rx_jbuf[i];
1372 		if (rxb->m_head != NULL) {
1373 			bus_dmamap_unload(ha->rx_tag, rxb->map);
1374 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1375 			m_freem(rxb->m_head);
1376 			rxb->m_head = NULL;
1377 		}
1378 	}
1379 
1380 	if (ha->rx_tag != NULL) {
1381 		bus_dma_tag_destroy(ha->rx_tag);
1382 		ha->rx_tag = NULL;
1383 	}
1384 
1385 	bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1386 	bzero((void *)ha->rx_jbuf,
1387 		(sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1388 
1389 	for (i = 0; i < MAX_SDS_RINGS; i++) {
1390 		ha->hw.sds[i].sdsr_next = 0;
1391 		ha->hw.sds[i].rxb_free = NULL;
1392 		ha->hw.sds[i].rx_free = 0;
1393 		ha->hw.sds[i].rxjb_free = NULL;
1394 		ha->hw.sds[i].rxj_free = 0;
1395 	}
1396 
1397 	return;
1398 }
1399 
1400 int
1401 qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
1402 	uint32_t jumbo)
1403 {
1404 	register struct mbuf *mp = nmp;
1405 	struct ifnet   *ifp;
1406 	int             ret = 0;
1407 	uint32_t	offset;
1408 
1409 	QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
1410 
1411 	ifp = ha->ifp;
1412 
1413 	if (mp == NULL) {
1414 
1415 		if (!jumbo) {
1416 			mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1417 
1418 			if (mp == NULL) {
1419 				ha->err_m_getcl++;
1420 				ret = ENOBUFS;
1421 				device_printf(ha->pci_dev,
1422 					"%s: m_getcl failed\n", __func__);
1423 				goto exit_qla_get_mbuf;
1424 			}
1425 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1426 		} else {
1427 			mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1428 				MJUM9BYTES);
1429 			if (mp == NULL) {
1430 				ha->err_m_getjcl++;
1431 				ret = ENOBUFS;
1432 				device_printf(ha->pci_dev,
1433 					"%s: m_getjcl failed\n", __func__);
1434 				goto exit_qla_get_mbuf;
1435 			}
1436 			mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1437 		}
1438 	} else {
1439 		if (!jumbo)
1440 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1441 		else
1442 			mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1443 
1444 		mp->m_data = mp->m_ext.ext_buf;
1445 		mp->m_next = NULL;
1446 	}
1447 
1448 
1449 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1450 	if (offset) {
1451 		offset = 8 - offset;
1452 		m_adj(mp, offset);
1453 	}
1454 
1455 	/*
1456 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1457 	 * machinery to arrange the memory mapping.
1458 	 */
1459 	ret = bus_dmamap_load(ha->rx_tag, rxb->map,
1460 				mtod(mp, void *), mp->m_len,
1461 				qla_dmamap_callback, &rxb->paddr,
1462 				BUS_DMA_NOWAIT);
1463 	if (ret || !rxb->paddr) {
1464 		m_free(mp);
1465 		rxb->m_head = NULL;
1466 		device_printf(ha->pci_dev,
1467 			"%s: bus_dmamap_load failed\n", __func__);
1468                 ret = -1;
1469 		goto exit_qla_get_mbuf;
1470 	}
1471 	rxb->m_head = mp;
1472 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1473 
1474 exit_qla_get_mbuf:
1475 	QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1476 	return (ret);
1477 }
1478 
1479 static void
1480 qla_tx_done(void *context, int pending)
1481 {
1482 	qla_host_t *ha = context;
1483 
1484 	qla_hw_tx_done(ha);
1485 	qla_start(ha->ifp);
1486 }
1487 
1488