xref: /dragonfly/sys/dev/netif/oce/oce_if.c (revision 7ce1da6a)
1 /*-
2  * Copyright (C) 2013 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 
40 /* $FreeBSD: src/sys/dev/oce/oce_if.c,v 1.14 2013/07/07 00:30:13 svnexp Exp $ */
41 
42 #include "opt_inet6.h"
43 #include "opt_inet.h"
44 
45 #include "oce_if.h"
46 
47 
48 /* Driver entry points prototypes */
49 static int  oce_probe(device_t dev);
50 static int  oce_attach(device_t dev);
51 static int  oce_detach(device_t dev);
52 static int  oce_shutdown(device_t dev);
53 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr);
54 static void oce_init(void *xsc);
55 #if 0 /* XXX swildner: MULTIQUEUE */
56 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
57 static void oce_multiq_flush(struct ifnet *ifp);
58 #endif
59 
60 /* Driver interrupt routines protypes */
61 static void oce_intr(void *arg, int pending);
62 static int  oce_setup_intr(POCE_SOFTC sc);
63 static void oce_fast_isr(void *arg);
64 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
65 			  void (*isr) (void *arg, int pending));
66 
67 /* Media callbacks prototypes */
68 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
69 static int  oce_media_change(struct ifnet *ifp);
70 
71 /* Transmit routines prototypes */
72 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
73 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
74 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
75 					uint32_t status);
76 #if 0 /* XXX swildner: MULTIQUEUE */
77 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
78 				 struct oce_wq *wq);
79 #endif
80 
81 /* Receive routines prototypes */
82 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
83 #if 0 /* XXX swildner: ETHER_VTAG */
84 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
85 #endif
86 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
87 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
88 						struct oce_nic_rx_cqe *cqe);
89 
90 /* Helper function prototypes in this file */
91 static int  oce_attach_ifp(POCE_SOFTC sc);
92 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
93 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
94 static int  oce_vid_config(POCE_SOFTC sc);
95 static void oce_mac_addr_set(POCE_SOFTC sc);
96 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
97 static void oce_local_timer(void *arg);
98 static void oce_if_deactivate(POCE_SOFTC sc);
99 static void oce_if_activate(POCE_SOFTC sc);
100 static void setup_max_queues_want(POCE_SOFTC sc);
101 static void update_queues_got(POCE_SOFTC sc);
102 static void process_link_state(POCE_SOFTC sc,
103 		 struct oce_async_cqe_link_state *acqe);
104 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
105 static void oce_get_config(POCE_SOFTC sc);
106 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
107 
108 /* IP specific */
109 #if defined(INET6) || defined(INET)
110 #if 0 /* XXX swildner: LRO */
111 static int  oce_init_lro(POCE_SOFTC sc);
112 static void oce_rx_flush_lro(struct oce_rq *rq);
113 #endif
114 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
115 #endif
116 
117 static device_method_t oce_dispatch[] = {
118 	DEVMETHOD(device_probe, oce_probe),
119 	DEVMETHOD(device_attach, oce_attach),
120 	DEVMETHOD(device_detach, oce_detach),
121 	DEVMETHOD(device_shutdown, oce_shutdown),
122 
123 	DEVMETHOD_END
124 };
125 
126 static driver_t oce_driver = {
127 	"oce",
128 	oce_dispatch,
129 	sizeof(OCE_SOFTC)
130 };
131 static devclass_t oce_devclass;
132 
133 
134 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, NULL, NULL);
135 MODULE_DEPEND(oce, pci, 1, 1, 1);
136 MODULE_DEPEND(oce, ether, 1, 1, 1);
137 MODULE_VERSION(oce, 1);
138 
139 
140 /* global vars */
141 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
142 
143 /* Module capabilites and parameters */
144 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
145 #if 0 /* XXX swildner: RSS */
146 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
147 #else
148 uint32_t oce_enable_rss = 0;
149 #endif
150 
151 
152 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
153 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
154 
155 
156 /* Supported devices table */
157 static uint32_t supportedDevices[] =  {
158 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
159 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
160 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
161 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
162 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
163 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
164 };
165 
166 
167 
168 
169 /*****************************************************************************
170  *			Driver entry points functions                        *
171  *****************************************************************************/
172 
173 static int
174 oce_probe(device_t dev)
175 {
176 	uint16_t vendor = 0;
177 	uint16_t device = 0;
178 	int i = 0;
179 	char str[256] = {0};
180 	POCE_SOFTC sc;
181 
182 	sc = device_get_softc(dev);
183 	bzero(sc, sizeof(OCE_SOFTC));
184 	sc->dev = dev;
185 
186 	vendor = pci_get_vendor(dev);
187 	device = pci_get_device(dev);
188 
189 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
190 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
191 			if (device == (supportedDevices[i] & 0xffff)) {
192 				ksprintf(str, "%s:%s", "Emulex CNA NIC function",
193 					component_revision);
194 				device_set_desc_copy(dev, str);
195 
196 				switch (device) {
197 				case PCI_PRODUCT_BE2:
198 					sc->flags |= OCE_FLAGS_BE2;
199 					break;
200 				case PCI_PRODUCT_BE3:
201 					sc->flags |= OCE_FLAGS_BE3;
202 					break;
203 				case PCI_PRODUCT_XE201:
204 				case PCI_PRODUCT_XE201_VF:
205 					sc->flags |= OCE_FLAGS_XE201;
206 					break;
207 				case PCI_PRODUCT_SH:
208 					sc->flags |= OCE_FLAGS_SH;
209 					break;
210 				default:
211 					return ENXIO;
212 				}
213 				return BUS_PROBE_DEFAULT;
214 			}
215 		}
216 	}
217 
218 	return ENXIO;
219 }
220 
221 
222 static int
223 oce_attach(device_t dev)
224 {
225 	POCE_SOFTC sc;
226 	int rc = 0;
227 
228 	sc = device_get_softc(dev);
229 
230 	rc = oce_hw_pci_alloc(sc);
231 	if (rc)
232 		return rc;
233 
234 	sc->tx_ring_size = OCE_TX_RING_SIZE;
235 	sc->rx_ring_size = OCE_RX_RING_SIZE;
236 	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
237 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
238 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
239 
240 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
241 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
242 
243 	/* initialise the hardware */
244 	rc = oce_hw_init(sc);
245 	if (rc)
246 		goto pci_res_free;
247 
248 	oce_get_config(sc);
249 
250 	setup_max_queues_want(sc);
251 
252 	rc = oce_setup_intr(sc);
253 	if (rc)
254 		goto mbox_free;
255 
256 	rc = oce_queue_init_all(sc);
257 	if (rc)
258 		goto intr_free;
259 
260 	rc = oce_attach_ifp(sc);
261 	if (rc)
262 		goto queues_free;
263 
264 #if defined(INET6) || defined(INET)
265 #if 0 /* XXX swildner: LRO */
266 	rc = oce_init_lro(sc);
267 	if (rc)
268 		goto ifp_free;
269 #endif
270 #endif
271 
272 	rc = oce_hw_start(sc);
273 	if (rc)
274 		goto lro_free;
275 
276 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
277 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
278 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
279 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
280 
281 	rc = oce_stats_init(sc);
282 	if (rc)
283 		goto vlan_free;
284 
285 	oce_add_sysctls(sc);
286 
287 	callout_init_mp(&sc->timer);
288 	callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
289 
290 	return 0;
291 
292 vlan_free:
293 	if (sc->vlan_attach)
294 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
295 	if (sc->vlan_detach)
296 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
297 	oce_hw_intr_disable(sc);
298 lro_free:
299 #if defined(INET6) || defined(INET)
300 #if 0 /* XXX swildner: LRO */
301 	oce_free_lro(sc);
302 ifp_free:
303 #endif
304 #endif
305 	ether_ifdetach(sc->ifp);
306 	if_free(sc->ifp);
307 queues_free:
308 	oce_queue_release_all(sc);
309 intr_free:
310 	oce_intr_free(sc);
311 mbox_free:
312 	oce_dma_free(sc, &sc->bsmbx);
313 pci_res_free:
314 	oce_hw_pci_free(sc);
315 	LOCK_DESTROY(&sc->dev_lock);
316 	LOCK_DESTROY(&sc->bmbx_lock);
317 	return rc;
318 
319 }
320 
321 
322 static int
323 oce_detach(device_t dev)
324 {
325 	POCE_SOFTC sc = device_get_softc(dev);
326 
327 	LOCK(&sc->dev_lock);
328 	oce_if_deactivate(sc);
329 	UNLOCK(&sc->dev_lock);
330 
331 	callout_stop_sync(&sc->timer);
332 
333 	if (sc->vlan_attach != NULL)
334 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
335 	if (sc->vlan_detach != NULL)
336 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
337 
338 	ether_ifdetach(sc->ifp);
339 
340 	if_free(sc->ifp);
341 
342 	oce_hw_shutdown(sc);
343 
344 	bus_generic_detach(dev);
345 	return 0;
346 }
347 
348 
349 static int
350 oce_shutdown(device_t dev)
351 {
352 	int rc;
353 
354 	rc = oce_detach(dev);
355 
356 	return rc;
357 }
358 
359 
360 static int
361 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
362 {
363 	struct ifreq *ifr = (struct ifreq *)data;
364 	POCE_SOFTC sc = ifp->if_softc;
365 	int rc = 0;
366 	uint32_t u;
367 
368 	switch (command) {
369 
370 	case SIOCGIFMEDIA:
371 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
372 		break;
373 
374 	case SIOCSIFMTU:
375 		if (ifr->ifr_mtu > OCE_MAX_MTU)
376 			rc = EINVAL;
377 		else
378 			ifp->if_mtu = ifr->ifr_mtu;
379 		break;
380 
381 	case SIOCSIFFLAGS:
382 		if (ifp->if_flags & IFF_UP) {
383 			if (!(ifp->if_flags & IFF_RUNNING)) {
384 				sc->ifp->if_flags |= IFF_RUNNING;
385 				oce_init(sc);
386 			}
387 			device_printf(sc->dev, "Interface Up\n");
388 		} else {
389 			LOCK(&sc->dev_lock);
390 
391 			sc->ifp->if_flags &= ~IFF_RUNNING;
392 			ifq_clr_oactive(&ifp->if_snd);
393 			oce_if_deactivate(sc);
394 
395 			UNLOCK(&sc->dev_lock);
396 
397 			device_printf(sc->dev, "Interface Down\n");
398 		}
399 
400 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
401 			sc->promisc = TRUE;
402 			oce_rxf_set_promiscuous(sc, sc->promisc);
403 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
404 			sc->promisc = FALSE;
405 			oce_rxf_set_promiscuous(sc, sc->promisc);
406 		}
407 
408 		break;
409 
410 	case SIOCADDMULTI:
411 	case SIOCDELMULTI:
412 		rc = oce_hw_update_multicast(sc);
413 		if (rc)
414 			device_printf(sc->dev,
415 				"Update multicast address failed\n");
416 		break;
417 
418 	case SIOCSIFCAP:
419 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
420 
421 		if (u & IFCAP_TXCSUM) {
422 			ifp->if_capenable ^= IFCAP_TXCSUM;
423 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
424 
425 			if (IFCAP_TSO & ifp->if_capenable &&
426 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
427 				ifp->if_capenable &= ~IFCAP_TSO;
428 				ifp->if_hwassist &= ~CSUM_TSO;
429 				if_printf(ifp,
430 					 "TSO disabled due to -txcsum.\n");
431 			}
432 		}
433 
434 		if (u & IFCAP_RXCSUM)
435 			ifp->if_capenable ^= IFCAP_RXCSUM;
436 
437 		if (u & IFCAP_TSO4) {
438 			ifp->if_capenable ^= IFCAP_TSO4;
439 
440 			if (IFCAP_TSO & ifp->if_capenable) {
441 				if (IFCAP_TXCSUM & ifp->if_capenable)
442 					ifp->if_hwassist |= CSUM_TSO;
443 				else {
444 					ifp->if_capenable &= ~IFCAP_TSO;
445 					ifp->if_hwassist &= ~CSUM_TSO;
446 					if_printf(ifp,
447 					    "Enable txcsum first.\n");
448 					rc = EAGAIN;
449 				}
450 			} else
451 				ifp->if_hwassist &= ~CSUM_TSO;
452 		}
453 
454 		if (u & IFCAP_VLAN_HWTAGGING)
455 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
456 
457 #if 0 /* XXX swildner: VLAN_HWFILTER */
458 		if (u & IFCAP_VLAN_HWFILTER) {
459 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
460 			oce_vid_config(sc);
461 		}
462 #endif
463 #if defined(INET6) || defined(INET)
464 #if 0 /* XXX swildner: LRO */
465 		if (u & IFCAP_LRO)
466 			ifp->if_capenable ^= IFCAP_LRO;
467 #endif
468 #endif
469 
470 		break;
471 
472 	case SIOCGPRIVATE_0:
473 		rc = oce_handle_passthrough(ifp, data);
474 		break;
475 	default:
476 		rc = ether_ioctl(ifp, command, data);
477 		break;
478 	}
479 
480 	return rc;
481 }
482 
483 
484 static void
485 oce_init(void *arg)
486 {
487 	POCE_SOFTC sc = arg;
488 
489 	LOCK(&sc->dev_lock);
490 
491 	if (sc->ifp->if_flags & IFF_UP) {
492 		oce_if_deactivate(sc);
493 		oce_if_activate(sc);
494 	}
495 
496 	UNLOCK(&sc->dev_lock);
497 
498 }
499 
500 
501 #if 0 /* XXX swildner: MULTIQUEUE */
502 static int
503 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
504 {
505 	POCE_SOFTC sc = ifp->if_softc;
506 	struct oce_wq *wq = NULL;
507 	int queue_index = 0;
508 	int status = 0;
509 
510 	if (!sc->link_status) {
511 		ifq_purge(&ifp->if_snd);
512 		return ENXIO;
513 	}
514 
515 	if ((m->m_flags & M_FLOWID) != 0)
516 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
517 
518 	wq = sc->wq[queue_index];
519 
520 	LOCK(&wq->tx_lock);
521 	status = oce_multiq_transmit(ifp, m, wq);
522 	UNLOCK(&wq->tx_lock);
523 
524 	return status;
525 
526 }
527 
528 
529 static void
530 oce_multiq_flush(struct ifnet *ifp)
531 {
532 	POCE_SOFTC sc = ifp->if_softc;
533 	struct mbuf     *m;
534 	int i = 0;
535 
536 	for (i = 0; i < sc->nwqs; i++) {
537 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
538 			m_freem(m);
539 	}
540 	if_qflush(ifp);
541 }
542 #endif
543 
544 
545 
546 /*****************************************************************************
547  *                   Driver interrupt routines functions                     *
548  *****************************************************************************/
549 
550 static void
551 oce_intr(void *arg, int pending)
552 {
553 
554 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
555 	POCE_SOFTC sc = ii->sc;
556 	struct oce_eq *eq = ii->eq;
557 	struct oce_eqe *eqe;
558 	struct oce_cq *cq = NULL;
559 	int i, num_eqes = 0;
560 
561 
562 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
563 				 BUS_DMASYNC_POSTWRITE);
564 	do {
565 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
566 		if (eqe->evnt == 0)
567 			break;
568 		eqe->evnt = 0;
569 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
570 					BUS_DMASYNC_POSTWRITE);
571 		RING_GET(eq->ring, 1);
572 		num_eqes++;
573 
574 	} while (TRUE);
575 
576 	if (!num_eqes)
577 		goto eq_arm; /* Spurious */
578 
579 	/* Clear EQ entries, but dont arm */
580 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
581 
582 	/* Process TX, RX and MCC. But dont arm CQ*/
583 	for (i = 0; i < eq->cq_valid; i++) {
584 		cq = eq->cq[i];
585 		(*cq->cq_handler)(cq->cb_arg);
586 	}
587 
588 	/* Arm all cqs connected to this EQ */
589 	for (i = 0; i < eq->cq_valid; i++) {
590 		cq = eq->cq[i];
591 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
592 	}
593 
594 eq_arm:
595 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
596 
597 	return;
598 }
599 
600 
601 static int
602 oce_setup_intr(POCE_SOFTC sc)
603 {
604 	int rc = 0, use_intx = 0;
605 	int vector = 0;
606 #if 0 /* XXX swildner: MSI-X */
607 	int req_vectors = 0;
608 
609 	if (is_rss_enabled(sc))
610 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
611 	else
612 		req_vectors = 1;
613 
614 	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
615 		sc->intr_count = req_vectors;
616 		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
617 		if (rc != 0) {
618 			use_intx = 1;
619 			pci_release_msi(sc->dev);
620 		} else
621 			sc->flags |= OCE_FLAGS_USING_MSIX;
622 	} else
623 #endif
624 		use_intx = 1;
625 
626 	if (use_intx)
627 		sc->intr_count = 1;
628 
629 	/* Scale number of queues based on intr we got */
630 	update_queues_got(sc);
631 
632 	if (use_intx) {
633 		device_printf(sc->dev, "Using legacy interrupt\n");
634 		rc = oce_alloc_intr(sc, vector, oce_intr);
635 		if (rc)
636 			goto error;
637 #if 0 /* XXX swildner: MSI-X */
638 	} else {
639 		for (; vector < sc->intr_count; vector++) {
640 			rc = oce_alloc_intr(sc, vector, oce_intr);
641 			if (rc)
642 				goto error;
643 		}
644 #endif
645 	}
646 
647 	return 0;
648 error:
649 	oce_intr_free(sc);
650 	return rc;
651 }
652 
653 
654 void
655 oce_fast_isr(void *arg)
656 {
657 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
658 	POCE_SOFTC sc = ii->sc;
659 
660 	if (ii->eq == NULL)
661 		return;
662 
663 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
664 
665 	taskqueue_enqueue(ii->tq, &ii->task);
666 
667 	ii->eq->intr++;
668 }
669 
670 
671 static int
672 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
673 {
674 	POCE_INTR_INFO ii = &sc->intrs[vector];
675 	int rc = 0, rr;
676 	u_int irq_flags;
677 
678 	if (vector >= OCE_MAX_EQ)
679 		return (EINVAL);
680 
681 #if 0 /* XXX swildner: MSI-X */
682 	/* Set the resource id for the interrupt.
683 	 * MSIx is vector + 1 for the resource id,
684 	 * INTx is 0 for the resource id.
685 	 */
686 	if (sc->flags & OCE_FLAGS_USING_MSIX)
687 		rr = vector + 1;
688 	else
689 #endif
690 		rr = 0;
691 	ii->irq_type = pci_alloc_1intr(sc->dev,
692 	    sc->flags & OCE_FLAGS_USING_MSI, &rr, &irq_flags);
693 	ii->intr_res = bus_alloc_resource_any(sc->dev,
694 					      SYS_RES_IRQ,
695 					      &rr, irq_flags);
696 	ii->irq_rr = rr;
697 	if (ii->intr_res == NULL) {
698 		device_printf(sc->dev,
699 			  "Could not allocate interrupt\n");
700 		rc = ENXIO;
701 		return rc;
702 	}
703 
704 	TASK_INIT(&ii->task, 0, isr, ii);
705 	ii->vector = vector;
706 	ksprintf(ii->task_name, "oce_task[%d]", ii->vector);
707 	ii->tq = taskqueue_create(ii->task_name,
708 			M_NOWAIT,
709 			taskqueue_thread_enqueue,
710 			&ii->tq);
711 	taskqueue_start_threads(&ii->tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq",
712 			device_get_nameunit(sc->dev));
713 
714 	ii->sc = sc;
715 	rc = bus_setup_intr(sc->dev,
716 			ii->intr_res,
717 			0,
718 			oce_fast_isr, ii, &ii->tag, NULL);
719 	return rc;
720 
721 }
722 
723 
724 void
725 oce_intr_free(POCE_SOFTC sc)
726 {
727 	int i = 0;
728 
729 	for (i = 0; i < sc->intr_count; i++) {
730 
731 		if (sc->intrs[i].tag != NULL)
732 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
733 						sc->intrs[i].tag);
734 		if (sc->intrs[i].tq != NULL)
735 			taskqueue_free(sc->intrs[i].tq);
736 
737 		if (sc->intrs[i].intr_res != NULL)
738 			bus_release_resource(sc->dev, SYS_RES_IRQ,
739 						sc->intrs[i].irq_rr,
740 						sc->intrs[i].intr_res);
741 		sc->intrs[i].tag = NULL;
742 		sc->intrs[i].intr_res = NULL;
743 	}
744 
745 	if (sc->flags & OCE_FLAGS_USING_MSIX ||
746 	    sc->flags & OCE_FLAGS_USING_MSI)
747 		pci_release_msi(sc->dev);
748 
749 }
750 
751 
752 
753 /******************************************************************************
754 *			  Media callbacks functions 			      *
755 ******************************************************************************/
756 
757 static void
758 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
759 {
760 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
761 
762 
763 	req->ifm_status = IFM_AVALID;
764 	req->ifm_active = IFM_ETHER;
765 
766 	if (sc->link_status == 1)
767 		req->ifm_status |= IFM_ACTIVE;
768 	else
769 		return;
770 
771 	switch (sc->link_speed) {
772 	case 1: /* 10 Mbps */
773 		req->ifm_active |= IFM_10_T | IFM_FDX;
774 		sc->speed = 10;
775 		break;
776 	case 2: /* 100 Mbps */
777 		req->ifm_active |= IFM_100_TX | IFM_FDX;
778 		sc->speed = 100;
779 		break;
780 	case 3: /* 1 Gbps */
781 		req->ifm_active |= IFM_1000_T | IFM_FDX;
782 		sc->speed = 1000;
783 		break;
784 	case 4: /* 10 Gbps */
785 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
786 		sc->speed = 10000;
787 		break;
788 	}
789 
790 	return;
791 }
792 
793 
794 int
795 oce_media_change(struct ifnet *ifp)
796 {
797 	return 0;
798 }
799 
800 
801 
802 
803 /*****************************************************************************
804  *			  Transmit routines functions			     *
805  *****************************************************************************/
806 
807 static int
808 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
809 {
810 	int rc = 0, i, retry_cnt = 0;
811 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
812 	struct mbuf *m, *m_temp;
813 	struct oce_wq *wq = sc->wq[wq_index];
814 	struct oce_packet_desc *pd;
815 	struct oce_nic_hdr_wqe *nichdr;
816 	struct oce_nic_frag_wqe *nicfrag;
817 	int num_wqes;
818 	uint32_t reg_value;
819 	boolean_t complete = TRUE;
820 
821 	m = *mpp;
822 	if (!m)
823 		return EINVAL;
824 
825 	if (!(m->m_flags & M_PKTHDR)) {
826 		rc = ENXIO;
827 		goto free_ret;
828 	}
829 
830 	if(oce_tx_asic_stall_verify(sc, m)) {
831 		m = oce_insert_vlan_tag(sc, m, &complete);
832 		if(!m) {
833 			device_printf(sc->dev, "Insertion unsuccessful\n");
834 			return 0;
835 		}
836 
837 	}
838 
839 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
840 		/* consolidate packet buffers for TSO/LSO segment offload */
841 #if defined(INET6) || defined(INET)
842 		m = oce_tso_setup(sc, mpp);
843 #else
844 		m = NULL;
845 #endif
846 		if (m == NULL) {
847 			rc = ENXIO;
848 			goto free_ret;
849 		}
850 	}
851 
852 	pd = &wq->pckts[wq->pkt_desc_head];
853 retry:
854 	rc = bus_dmamap_load_mbuf_defrag(wq->tag,
855 				     pd->map,
856 				     mpp, segs, OCE_MAX_TX_ELEMENTS,
857 				     &pd->nsegs, BUS_DMA_NOWAIT);
858 	if (rc == 0) {
859 		num_wqes = pd->nsegs + 1;
860 		if (IS_BE(sc) || IS_SH(sc)) {
861 			/*Dummy required only for BE3.*/
862 			if (num_wqes & 1)
863 				num_wqes++;
864 		}
865 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
866 			bus_dmamap_unload(wq->tag, pd->map);
867 			return EBUSY;
868 		}
869 		atomic_store_rel_int(&wq->pkt_desc_head,
870 				     (wq->pkt_desc_head + 1) % \
871 				      OCE_WQ_PACKET_ARRAY_SIZE);
872 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
873 		pd->mbuf = m;
874 
875 		nichdr =
876 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
877 		nichdr->u0.dw[0] = 0;
878 		nichdr->u0.dw[1] = 0;
879 		nichdr->u0.dw[2] = 0;
880 		nichdr->u0.dw[3] = 0;
881 
882 		nichdr->u0.s.complete = complete;
883 		nichdr->u0.s.event = 1;
884 		nichdr->u0.s.crc = 1;
885 		nichdr->u0.s.forward = 0;
886 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
887 		nichdr->u0.s.udpcs =
888 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
889 		nichdr->u0.s.tcpcs =
890 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
891 		nichdr->u0.s.num_wqe = num_wqes;
892 		nichdr->u0.s.total_length = m->m_pkthdr.len;
893 #if 0 /* XXX swildner: ETHER_VTAG */
894 		if (m->m_flags & M_VLANTAG) {
895 			nichdr->u0.s.vlan = 1; /*Vlan present*/
896 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
897 		}
898 #endif
899 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
900 			if (m->m_pkthdr.tso_segsz) {
901 				nichdr->u0.s.lso = 1;
902 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
903 			}
904 			if (!IS_BE(sc) || !IS_SH(sc))
905 				nichdr->u0.s.ipcs = 1;
906 		}
907 
908 		RING_PUT(wq->ring, 1);
909 		atomic_add_int(&wq->ring->num_used, 1);
910 
911 		for (i = 0; i < pd->nsegs; i++) {
912 			nicfrag =
913 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
914 						      struct oce_nic_frag_wqe);
915 			nicfrag->u0.s.rsvd0 = 0;
916 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
917 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
918 			nicfrag->u0.s.frag_len = segs[i].ds_len;
919 			pd->wqe_idx = wq->ring->pidx;
920 			RING_PUT(wq->ring, 1);
921 			atomic_add_int(&wq->ring->num_used, 1);
922 		}
923 		if (num_wqes > (pd->nsegs + 1)) {
924 			nicfrag =
925 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
926 						      struct oce_nic_frag_wqe);
927 			nicfrag->u0.dw[0] = 0;
928 			nicfrag->u0.dw[1] = 0;
929 			nicfrag->u0.dw[2] = 0;
930 			nicfrag->u0.dw[3] = 0;
931 			pd->wqe_idx = wq->ring->pidx;
932 			RING_PUT(wq->ring, 1);
933 			atomic_add_int(&wq->ring->num_used, 1);
934 			pd->nsegs++;
935 		}
936 
937 		sc->ifp->if_opackets++;
938 		wq->tx_stats.tx_reqs++;
939 		wq->tx_stats.tx_wrbs += num_wqes;
940 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
941 		wq->tx_stats.tx_pkts++;
942 
943 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
944 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
945 		reg_value = (num_wqes << 16) | wq->wq_id;
946 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
947 
948 	} else if (rc == EFBIG)	{
949 		if (retry_cnt == 0) {
950 			m_temp = m_defrag(m, M_NOWAIT);
951 			if (m_temp == NULL)
952 				goto free_ret;
953 			m = m_temp;
954 			*mpp = m_temp;
955 			retry_cnt = retry_cnt + 1;
956 			goto retry;
957 		} else
958 			goto free_ret;
959 	} else if (rc == ENOMEM)
960 		return rc;
961 	else
962 		goto free_ret;
963 
964 	return 0;
965 
966 free_ret:
967 	m_freem(*mpp);
968 	*mpp = NULL;
969 	return rc;
970 }
971 
972 
973 static void
974 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
975 {
976 	struct oce_packet_desc *pd;
977 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
978 	struct mbuf *m;
979 
980 	pd = &wq->pckts[wq->pkt_desc_tail];
981 	atomic_store_rel_int(&wq->pkt_desc_tail,
982 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
983 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
984 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
985 	bus_dmamap_unload(wq->tag, pd->map);
986 
987 	m = pd->mbuf;
988 	m_freem(m);
989 	pd->mbuf = NULL;
990 
991 	if (ifq_is_oactive(&sc->ifp->if_snd)) {
992 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
993 			ifq_clr_oactive(&sc->ifp->if_snd);
994 			oce_tx_restart(sc, wq);
995 		}
996 	}
997 }
998 
999 
1000 static void
1001 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1002 {
1003 
1004 	if ((sc->ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
1005 		return;
1006 
1007 #if 0 /* __FreeBSD_version >= 800000 */
1008 	if (!drbr_empty(sc->ifp, wq->br))
1009 #else
1010 	if (!ifq_is_empty(&sc->ifp->if_snd))
1011 #endif
1012 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1013 
1014 }
1015 
1016 
1017 #if defined(INET6) || defined(INET)
1018 static struct mbuf *
1019 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1020 {
1021 	struct mbuf *m;
1022 #ifdef INET
1023 	struct ip *ip;
1024 #endif
1025 #ifdef INET6
1026 	struct ip6_hdr *ip6;
1027 #endif
1028 	struct ether_vlan_header *eh;
1029 	struct tcphdr *th;
1030 	uint16_t etype;
1031 	int total_len = 0, ehdrlen = 0;
1032 
1033 	m = *mpp;
1034 
1035 	if (M_WRITABLE(m) == 0) {
1036 		m = m_dup(*mpp, M_NOWAIT);
1037 		if (!m)
1038 			return NULL;
1039 		m_freem(*mpp);
1040 		*mpp = m;
1041 	}
1042 
1043 	eh = mtod(m, struct ether_vlan_header *);
1044 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1045 		etype = ntohs(eh->evl_proto);
1046 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1047 	} else {
1048 		etype = ntohs(eh->evl_encap_proto);
1049 		ehdrlen = ETHER_HDR_LEN;
1050 	}
1051 
1052 	switch (etype) {
1053 #ifdef INET
1054 	case ETHERTYPE_IP:
1055 		ip = (struct ip *)(m->m_data + ehdrlen);
1056 		if (ip->ip_p != IPPROTO_TCP)
1057 			return NULL;
1058 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1059 
1060 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1061 		break;
1062 #endif
1063 #ifdef INET6
1064 	case ETHERTYPE_IPV6:
1065 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1066 		if (ip6->ip6_nxt != IPPROTO_TCP)
1067 			return NULL;
1068 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1069 
1070 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1071 		break;
1072 #endif
1073 	default:
1074 		return NULL;
1075 	}
1076 
1077 	m = m_pullup(m, total_len);
1078 	if (!m)
1079 		return NULL;
1080 	*mpp = m;
1081 	return m;
1082 
1083 }
1084 #endif /* INET6 || INET */
1085 
1086 void
1087 oce_tx_task(void *arg, int npending)
1088 {
1089 	struct oce_wq *wq = arg;
1090 	POCE_SOFTC sc = wq->parent;
1091 	struct ifnet *ifp = sc->ifp;
1092 #if 0 /* XXX swildner: MULTIQUEUE */
1093 	int rc = 0;
1094 
1095 	LOCK(&wq->tx_lock);
1096 	rc = oce_multiq_transmit(ifp, NULL, wq);
1097 	if (rc) {
1098 		device_printf(sc->dev,
1099 				"TX[%d] restart failed\n", wq->queue_index);
1100 	}
1101 	UNLOCK(&wq->tx_lock);
1102 #else
1103 	lwkt_serialize_enter(ifp->if_serializer);
1104 	oce_start_locked(ifp);
1105 	lwkt_serialize_exit(ifp->if_serializer);
1106 #endif
1107 }
1108 
1109 
1110 void
1111 oce_start_locked(struct ifnet *ifp)
1112 {
1113 	POCE_SOFTC sc = ifp->if_softc;
1114 	struct mbuf *m;
1115 	int rc = 0;
1116 	int def_q = 0; /* Default tx queue is 0 */
1117 
1118 	if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd)))
1119 		return;
1120 
1121 	if (!sc->link_status) {
1122 		ifq_purge(&ifp->if_snd);
1123 		return;
1124 	}
1125 
1126 	do {
1127 		m = ifq_dequeue(&sc->ifp->if_snd);
1128 		if (m == NULL)
1129 			break;
1130 
1131 		rc = oce_tx(sc, &m, def_q);
1132 		if (rc) {
1133 			if (m != NULL) {
1134 				sc->wq[def_q]->tx_stats.tx_stops ++;
1135 				ifq_set_oactive(&ifp->if_snd);
1136 				ifq_prepend(&ifp->if_snd, m);
1137 				m = NULL;
1138 			}
1139 			break;
1140 		}
1141 		if (m != NULL)
1142 			ETHER_BPF_MTAP(ifp, m);
1143 
1144 	} while (TRUE);
1145 
1146 	return;
1147 }
1148 
1149 void
1150 oce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1151 {
1152 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1153 	oce_start_locked(ifp);
1154 }
1155 
1156 
1157 /* Handle the Completion Queue for transmit */
1158 uint16_t
1159 oce_wq_handler(void *arg)
1160 {
1161 	struct oce_wq *wq = (struct oce_wq *)arg;
1162 	POCE_SOFTC sc = wq->parent;
1163 	struct oce_cq *cq = wq->cq;
1164 	struct oce_nic_tx_cqe *cqe;
1165 	int num_cqes = 0;
1166 
1167 	bus_dmamap_sync(cq->ring->dma.tag,
1168 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1169 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1170 	while (cqe->u0.dw[3]) {
1171 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1172 
1173 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1174 		if (wq->ring->cidx >= wq->ring->num_items)
1175 			wq->ring->cidx -= wq->ring->num_items;
1176 
1177 		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1178 		wq->tx_stats.tx_compl++;
1179 		cqe->u0.dw[3] = 0;
1180 		RING_GET(cq->ring, 1);
1181 		bus_dmamap_sync(cq->ring->dma.tag,
1182 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1183 		cqe =
1184 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1185 		num_cqes++;
1186 	}
1187 
1188 	if (num_cqes)
1189 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1190 
1191 	return 0;
1192 }
1193 
1194 
1195 #if 0 /* XXX swildner: MULTIQUEUE */
1196 static int
1197 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1198 {
1199 	POCE_SOFTC sc = ifp->if_softc;
1200 	int status = 0, queue_index = 0;
1201 	struct mbuf *next = NULL;
1202 	struct buf_ring *br = NULL;
1203 
1204 	br  = wq->br;
1205 	queue_index = wq->queue_index;
1206 
1207 	if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd))) {
1208 		if (m != NULL)
1209 			status = drbr_enqueue(ifp, br, m);
1210 		return status;
1211 	}
1212 
1213 	 if (m != NULL) {
1214 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1215 			return status;
1216 	}
1217 	while ((next = drbr_peek(ifp, br)) != NULL) {
1218 		if (oce_tx(sc, &next, queue_index)) {
1219 			if (next == NULL) {
1220 				drbr_advance(ifp, br);
1221 			} else {
1222 				drbr_putback(ifp, br, next);
1223 				wq->tx_stats.tx_stops ++;
1224 				ifp_set_oactive(&ifp->if_snd);
1225 				status = drbr_enqueue(ifp, br, next);
1226 			}
1227 			break;
1228 		}
1229 		drbr_advance(ifp, br);
1230 		ifp->if_obytes += next->m_pkthdr.len;
1231 		if (next->m_flags & M_MCAST)
1232 			ifp->if_omcasts++;
1233 		ETHER_BPF_MTAP(ifp, next);
1234 	}
1235 
1236 	return status;
1237 }
1238 #endif
1239 
1240 
1241 
1242 
1243 /*****************************************************************************
1244  *			    Receive  routines functions 		     *
1245  *****************************************************************************/
1246 
1247 static void
1248 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1249 {
1250 	uint32_t out;
1251 	struct oce_packet_desc *pd;
1252 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1253 	int i, len, frag_len;
1254 	struct mbuf *m = NULL, *tail = NULL;
1255 	uint16_t vtag;
1256 
1257 	len = cqe->u0.s.pkt_size;
1258 	if (!len) {
1259 		/*partial DMA workaround for Lancer*/
1260 		oce_discard_rx_comp(rq, cqe);
1261 		goto exit;
1262 	}
1263 
1264 	 /* Get vlan_tag value */
1265 	if(IS_BE(sc) || IS_SH(sc))
1266 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1267 	else
1268 		vtag = cqe->u0.s.vlan_tag;
1269 
1270 
1271 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1272 
1273 		if (rq->packets_out == rq->packets_in) {
1274 			device_printf(sc->dev,
1275 				  "RQ transmit descriptor missing\n");
1276 		}
1277 		out = rq->packets_out + 1;
1278 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1279 			out = 0;
1280 		pd = &rq->pckts[rq->packets_out];
1281 		rq->packets_out = out;
1282 
1283 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1284 		bus_dmamap_unload(rq->tag, pd->map);
1285 		rq->pending--;
1286 
1287 		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1288 		pd->mbuf->m_len = frag_len;
1289 
1290 		if (tail != NULL) {
1291 			/* additional fragments */
1292 			tail->m_next = pd->mbuf;
1293 			tail = pd->mbuf;
1294 		} else {
1295 			/* first fragment, fill out much of the packet header */
1296 			pd->mbuf->m_pkthdr.len = len;
1297 			pd->mbuf->m_pkthdr.csum_flags = 0;
1298 			if (IF_CSUM_ENABLED(sc)) {
1299 				if (cqe->u0.s.l4_cksum_pass) {
1300 					pd->mbuf->m_pkthdr.csum_flags |=
1301 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1302 					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1303 				}
1304 				if (cqe->u0.s.ip_cksum_pass) {
1305 					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1306 						pd->mbuf->m_pkthdr.csum_flags |=
1307 						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1308 					}
1309 				}
1310 			}
1311 			m = tail = pd->mbuf;
1312 		}
1313 		pd->mbuf = NULL;
1314 		len -= frag_len;
1315 	}
1316 
1317 	if (m) {
1318 		if (!oce_cqe_portid_valid(sc, cqe)) {
1319 			 m_freem(m);
1320 			 goto exit;
1321 		}
1322 
1323 		m->m_pkthdr.rcvif = sc->ifp;
1324 #if 0 /* __FreeBSD_version >= 800000 */
1325 		if (rq->queue_index)
1326 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1327 		else
1328 			m->m_pkthdr.flowid = rq->queue_index;
1329 		m->m_flags |= M_FLOWID;
1330 #endif
1331 #if 0 /* XXX swildner: ETHER_VTAG */
1332 		/* This deternies if vlan tag is Valid */
1333 		if (oce_cqe_vtp_valid(sc, cqe)) {
1334 			if (sc->function_mode & FNM_FLEX10_MODE) {
1335 				/* FLEX10. If QnQ is not set, neglect VLAN */
1336 				if (cqe->u0.s.qnq) {
1337 					m->m_pkthdr.ether_vtag = vtag;
1338 					m->m_flags |= M_VLANTAG;
1339 				}
1340 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1341 				/* In UMC mode generally pvid will be striped by
1342 				   hw. But in some cases we have seen it comes
1343 				   with pvid. So if pvid == vlan, neglect vlan.
1344 				*/
1345 				m->m_pkthdr.ether_vtag = vtag;
1346 				m->m_flags |= M_VLANTAG;
1347 			}
1348 		}
1349 #endif
1350 
1351 		sc->ifp->if_ipackets++;
1352 #if defined(INET6) || defined(INET)
1353 #if 0 /* XXX swildner: LRO */
1354 		/* Try to queue to LRO */
1355 		if (IF_LRO_ENABLED(sc) &&
1356 		    (cqe->u0.s.ip_cksum_pass) &&
1357 		    (cqe->u0.s.l4_cksum_pass) &&
1358 		    (!cqe->u0.s.ip_ver)       &&
1359 		    (rq->lro.lro_cnt != 0)) {
1360 
1361 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1362 				rq->lro_pkts_queued ++;
1363 				goto post_done;
1364 			}
1365 			/* If LRO posting fails then try to post to STACK */
1366 		}
1367 #endif
1368 #endif
1369 
1370 		sc->ifp->if_input(sc->ifp, m, NULL, -1);
1371 #if defined(INET6) || defined(INET)
1372 #if 0 /* XXX swildner: LRO */
1373 post_done:
1374 #endif
1375 #endif
1376 		/* Update rx stats per queue */
1377 		rq->rx_stats.rx_pkts++;
1378 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1379 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1380 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1381 			rq->rx_stats.rx_mcast_pkts++;
1382 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1383 			rq->rx_stats.rx_ucast_pkts++;
1384 	}
1385 exit:
1386 	return;
1387 }
1388 
1389 
1390 static void
1391 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1392 {
1393 	uint32_t out, i = 0;
1394 	struct oce_packet_desc *pd;
1395 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1396 	int num_frags = cqe->u0.s.num_fragments;
1397 
1398 	for (i = 0; i < num_frags; i++) {
1399 		if (rq->packets_out == rq->packets_in) {
1400 			device_printf(sc->dev,
1401 				"RQ transmit descriptor missing\n");
1402 		}
1403 		out = rq->packets_out + 1;
1404 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1405 			out = 0;
1406 		pd = &rq->pckts[rq->packets_out];
1407 		rq->packets_out = out;
1408 
1409 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1410 		bus_dmamap_unload(rq->tag, pd->map);
1411 		rq->pending--;
1412 		m_freem(pd->mbuf);
1413 	}
1414 
1415 }
1416 
1417 
1418 #if 0 /* XXX swildner: ETHER_VTAG */
1419 static int
1420 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1421 {
1422 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1423 	int vtp = 0;
1424 
1425 	if (sc->be3_native) {
1426 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1427 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1428 	} else
1429 		vtp = cqe->u0.s.vlan_tag_present;
1430 
1431 	return vtp;
1432 
1433 }
1434 #endif
1435 
1436 
1437 static int
1438 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1439 {
1440 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1441 	int port_id = 0;
1442 
1443 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1444 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1445 		port_id =  cqe_v1->u0.s.port;
1446 		if (sc->port_id != port_id)
1447 			return 0;
1448 	} else
1449 		;/* For BE3 legacy and Lancer this is dummy */
1450 
1451 	return 1;
1452 
1453 }
1454 
1455 #if defined(INET6) || defined(INET)
1456 #if 0 /* XXX swildner: LRO */
1457 static void
1458 oce_rx_flush_lro(struct oce_rq *rq)
1459 {
1460 	struct lro_ctrl	*lro = &rq->lro;
1461 	struct lro_entry *queued;
1462 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1463 
1464 	if (!IF_LRO_ENABLED(sc))
1465 		return;
1466 
1467 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1468 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1469 		tcp_lro_flush(lro, queued);
1470 	}
1471 	rq->lro_pkts_queued = 0;
1472 
1473 	return;
1474 }
1475 
1476 
1477 static int
1478 oce_init_lro(POCE_SOFTC sc)
1479 {
1480 	struct lro_ctrl *lro = NULL;
1481 	int i = 0, rc = 0;
1482 
1483 	for (i = 0; i < sc->nrqs; i++) {
1484 		lro = &sc->rq[i]->lro;
1485 		rc = tcp_lro_init(lro);
1486 		if (rc != 0) {
1487 			device_printf(sc->dev, "LRO init failed\n");
1488 			return rc;
1489 		}
1490 		lro->ifp = sc->ifp;
1491 	}
1492 
1493 	return rc;
1494 }
1495 
1496 
1497 void
1498 oce_free_lro(POCE_SOFTC sc)
1499 {
1500 	struct lro_ctrl *lro = NULL;
1501 	int i = 0;
1502 
1503 	for (i = 0; i < sc->nrqs; i++) {
1504 		lro = &sc->rq[i]->lro;
1505 		if (lro)
1506 			tcp_lro_free(lro);
1507 	}
1508 }
1509 #endif
1510 #endif
1511 
1512 int
1513 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1514 {
1515 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1516 	int i, in, rc;
1517 	struct oce_packet_desc *pd;
1518 	bus_dma_segment_t segs[6];
1519 	int nsegs, added = 0;
1520 	struct oce_nic_rqe *rqe;
1521 	pd_rxulp_db_t rxdb_reg;
1522 
1523 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1524 	for (i = 0; i < count; i++) {
1525 		in = rq->packets_in + 1;
1526 		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1527 			in = 0;
1528 		if (in == rq->packets_out)
1529 			break;	/* no more room */
1530 
1531 		pd = &rq->pckts[rq->packets_in];
1532 		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1533 		if (pd->mbuf == NULL)
1534 			break;
1535 
1536 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1537 		rc = bus_dmamap_load_mbuf_segment(rq->tag,
1538 					     pd->map,
1539 					     pd->mbuf,
1540 					     segs, 1,
1541 					     &nsegs, BUS_DMA_NOWAIT);
1542 		if (rc) {
1543 			m_free(pd->mbuf);
1544 			break;
1545 		}
1546 
1547 		if (nsegs != 1) {
1548 			i--;
1549 			continue;
1550 		}
1551 
1552 		rq->packets_in = in;
1553 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1554 
1555 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1556 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1557 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1558 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1559 		RING_PUT(rq->ring, 1);
1560 		added++;
1561 		rq->pending++;
1562 	}
1563 	if (added != 0) {
1564 		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1565 			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1566 			rxdb_reg.bits.qid = rq->rq_id;
1567 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1568 			added -= OCE_MAX_RQ_POSTS;
1569 		}
1570 		if (added > 0) {
1571 			rxdb_reg.bits.qid = rq->rq_id;
1572 			rxdb_reg.bits.num_posted = added;
1573 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1574 		}
1575 	}
1576 
1577 	return 0;
1578 }
1579 
1580 
1581 /* Handle the Completion Queue for receive */
1582 uint16_t
1583 oce_rq_handler(void *arg)
1584 {
1585 	struct oce_rq *rq = (struct oce_rq *)arg;
1586 	struct oce_cq *cq = rq->cq;
1587 	POCE_SOFTC sc = rq->parent;
1588 	struct oce_nic_rx_cqe *cqe;
1589 	int num_cqes = 0, rq_buffers_used = 0;
1590 
1591 	bus_dmamap_sync(cq->ring->dma.tag,
1592 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1593 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1594 	while (cqe->u0.dw[2]) {
1595 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1596 
1597 		RING_GET(rq->ring, 1);
1598 		if (cqe->u0.s.error == 0) {
1599 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1600 		} else {
1601 			rq->rx_stats.rxcp_err++;
1602 			sc->ifp->if_ierrors++;
1603 			/* Post L3/L4 errors to stack.*/
1604 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1605 		}
1606 		rq->rx_stats.rx_compl++;
1607 		cqe->u0.dw[2] = 0;
1608 
1609 #if defined(INET6) || defined(INET)
1610 #if 0 /* XXX swildner: LRO */
1611 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1612 			oce_rx_flush_lro(rq);
1613 		}
1614 #endif
1615 #endif
1616 
1617 		RING_GET(cq->ring, 1);
1618 		bus_dmamap_sync(cq->ring->dma.tag,
1619 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1620 		cqe =
1621 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1622 		num_cqes++;
1623 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1624 			break;
1625 	}
1626 
1627 #if defined(INET6) || defined(INET)
1628 #if 0 /* XXX swildner: LRO */
1629 	if (IF_LRO_ENABLED(sc))
1630 		oce_rx_flush_lro(rq);
1631 #endif
1632 #endif
1633 
1634 	if (num_cqes) {
1635 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1636 		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1637 		if (rq_buffers_used > 1)
1638 			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1639 	}
1640 
1641 	return 0;
1642 
1643 }
1644 
1645 
1646 
1647 
1648 /*****************************************************************************
1649  *		   Helper function prototypes in this file 		     *
1650  *****************************************************************************/
1651 
1652 static int
1653 oce_attach_ifp(POCE_SOFTC sc)
1654 {
1655 
1656 	sc->ifp = if_alloc(IFT_ETHER);
1657 	if (!sc->ifp)
1658 		return ENOMEM;
1659 
1660 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1661 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1662 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1663 
1664 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1665 	sc->ifp->if_ioctl = oce_ioctl;
1666 	sc->ifp->if_start = oce_start;
1667 	sc->ifp->if_init = oce_init;
1668 	sc->ifp->if_mtu = ETHERMTU;
1669 	sc->ifp->if_softc = sc;
1670 #if 0 /* XXX swildner: MULTIQUEUE */
1671 	sc->ifp->if_transmit = oce_multiq_start;
1672 	sc->ifp->if_qflush = oce_multiq_flush;
1673 #endif
1674 
1675 	if_initname(sc->ifp,
1676 		    device_get_name(sc->dev), device_get_unit(sc->dev));
1677 
1678 	sc->ifp->if_nmbclusters = sc->nrqs * sc->rq[0]->cfg.q_len;
1679 
1680 	ifq_set_maxlen(&sc->ifp->if_snd, OCE_MAX_TX_DESC - 1);
1681 	ifq_set_ready(&sc->ifp->if_snd);
1682 
1683 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1684 	sc->ifp->if_hwassist |= CSUM_TSO;
1685 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1686 
1687 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1688 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1689 #if 0 /* XXX swildner: VLAN_HWFILTER */
1690 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1691 #endif
1692 
1693 #if defined(INET6) || defined(INET)
1694 	sc->ifp->if_capabilities |= IFCAP_TSO;
1695 #if 0 /* XXX swildner: LRO */
1696 	sc->ifp->if_capabilities |= IFCAP_LRO;
1697 #endif
1698 #if 0 /* XXX swildner: VLAN_HWTSO */
1699 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1700 #endif
1701 #endif
1702 
1703 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1704 	sc->ifp->if_baudrate = IF_Gbps(10UL);
1705 
1706 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr, NULL);
1707 
1708 	return 0;
1709 }
1710 
1711 
1712 static void
1713 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1714 {
1715 	POCE_SOFTC sc = ifp->if_softc;
1716 
1717 	if (ifp->if_softc !=  arg)
1718 		return;
1719 	if ((vtag == 0) || (vtag > 4095))
1720 		return;
1721 
1722 	sc->vlan_tag[vtag] = 1;
1723 	sc->vlans_added++;
1724 	oce_vid_config(sc);
1725 }
1726 
1727 
1728 static void
1729 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1730 {
1731 	POCE_SOFTC sc = ifp->if_softc;
1732 
1733 	if (ifp->if_softc !=  arg)
1734 		return;
1735 	if ((vtag == 0) || (vtag > 4095))
1736 		return;
1737 
1738 	sc->vlan_tag[vtag] = 0;
1739 	sc->vlans_added--;
1740 	oce_vid_config(sc);
1741 }
1742 
1743 
1744 /*
1745  * A max of 64 vlans can be configured in BE. If the user configures
1746  * more, place the card in vlan promiscuous mode.
1747  */
1748 static int
1749 oce_vid_config(POCE_SOFTC sc)
1750 {
1751 #if 0 /* XXX swildner: VLAN_HWFILTER */
1752 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1753 	uint16_t ntags = 0, i;
1754 #endif
1755 	int status = 0;
1756 
1757 #if 0 /* XXX swildner: VLAN_HWFILTER */
1758 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1759 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1760 		for (i = 0; i < MAX_VLANS; i++) {
1761 			if (sc->vlan_tag[i]) {
1762 				vtags[ntags].vtag = i;
1763 				ntags++;
1764 			}
1765 		}
1766 		if (ntags)
1767 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1768 						vtags, ntags, 1, 0);
1769 	} else
1770 #endif
1771 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1772 						NULL, 0, 1, 1);
1773 	return status;
1774 }
1775 
1776 
1777 static void
1778 oce_mac_addr_set(POCE_SOFTC sc)
1779 {
1780 	uint32_t old_pmac_id = sc->pmac_id;
1781 	int status = 0;
1782 
1783 
1784 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1785 			 sc->macaddr.size_of_struct);
1786 	if (!status)
1787 		return;
1788 
1789 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1790 					sc->if_id, &sc->pmac_id);
1791 	if (!status) {
1792 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1793 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1794 				 sc->macaddr.size_of_struct);
1795 	}
1796 	if (status)
1797 		device_printf(sc->dev, "Failed update macaddress\n");
1798 
1799 }
1800 
1801 
1802 static int
1803 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1804 {
1805 	POCE_SOFTC sc = ifp->if_softc;
1806 	struct ifreq *ifr = (struct ifreq *)data;
1807 	int rc = ENXIO;
1808 	char cookie[32] = {0};
1809 	void *priv_data = (void *)ifr->ifr_data;
1810 	void *ioctl_ptr;
1811 	uint32_t req_size;
1812 	struct mbx_hdr req;
1813 	OCE_DMA_MEM dma_mem;
1814 	struct mbx_common_get_cntl_attr *fw_cmd;
1815 
1816 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1817 		return EFAULT;
1818 
1819 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1820 		return EINVAL;
1821 
1822 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1823 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1824 		return EFAULT;
1825 
1826 	req_size = le32toh(req.u0.req.request_length);
1827 	if (req_size > 65536)
1828 		return EINVAL;
1829 
1830 	req_size += sizeof(struct mbx_hdr);
1831 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1832 	if (rc)
1833 		return ENOMEM;
1834 
1835 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1836 		rc = EFAULT;
1837 		goto dma_free;
1838 	}
1839 
1840 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1841 	if (rc) {
1842 		rc = EIO;
1843 		goto dma_free;
1844 	}
1845 
1846 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1847 		rc =  EFAULT;
1848 
1849 	/*
1850 	   firmware is filling all the attributes for this ioctl except
1851 	   the driver version..so fill it
1852 	 */
1853 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1854 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1855 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1856 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1857 	}
1858 
1859 dma_free:
1860 	oce_dma_free(sc, &dma_mem);
1861 	return rc;
1862 
1863 }
1864 
1865 static void
1866 oce_eqd_set_periodic(POCE_SOFTC sc)
1867 {
1868 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1869 	struct oce_aic_obj *aic;
1870 	struct oce_eq *eqo;
1871 	uint64_t now = 0, delta;
1872 	int eqd, i, num = 0;
1873 	uint32_t ips = 0;
1874 	int tps;
1875 
1876 	for (i = 0 ; i < sc->neqs; i++) {
1877 		eqo = sc->eq[i];
1878 		aic = &sc->aic_obj[i];
1879 		/* When setting the static eq delay from the user space */
1880 		if (!aic->enable) {
1881 			eqd = aic->et_eqd;
1882 			goto modify_eqd;
1883 		}
1884 
1885 		now = ticks;
1886 
1887 		/* Over flow check */
1888 		if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1889 			goto done;
1890 
1891 		delta = now - aic->ticks;
1892 		tps = delta/hz;
1893 
1894 		/* Interrupt rate based on elapsed ticks */
1895 		if(tps)
1896 			ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1897 
1898 		if (ips > INTR_RATE_HWM)
1899 			eqd = aic->cur_eqd + 20;
1900 		else if (ips < INTR_RATE_LWM)
1901 			eqd = aic->cur_eqd / 2;
1902 		else
1903 			goto done;
1904 
1905 		if (eqd < 10)
1906 			eqd = 0;
1907 
1908 		/* Make sure that the eq delay is in the known range */
1909 		eqd = min(eqd, aic->max_eqd);
1910 		eqd = max(eqd, aic->min_eqd);
1911 
1912 modify_eqd:
1913 		if (eqd != aic->cur_eqd) {
1914 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
1915 			set_eqd[num].eq_id = eqo->eq_id;
1916 			aic->cur_eqd = eqd;
1917 			num++;
1918 		}
1919 done:
1920 		aic->intr_prev = eqo->intr;
1921 		aic->ticks = now;
1922 	}
1923 
1924 	/* Is there atleast one eq that needs to be modified? */
1925 	if(num)
1926 		oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1927 
1928 }
1929 
1930 static void
1931 oce_local_timer(void *arg)
1932 {
1933 	POCE_SOFTC sc = arg;
1934 	int i = 0;
1935 
1936 	lwkt_serialize_enter(sc->ifp->if_serializer);
1937 	oce_refresh_nic_stats(sc);
1938 	oce_refresh_queue_stats(sc);
1939 	oce_mac_addr_set(sc);
1940 
1941 	/* TX Watch Dog*/
1942 	for (i = 0; i < sc->nwqs; i++)
1943 		oce_tx_restart(sc, sc->wq[i]);
1944 
1945 	/* calculate and set the eq delay for optimal interrupt rate */
1946 	if (IS_BE(sc) || IS_SH(sc))
1947 		oce_eqd_set_periodic(sc);
1948 
1949 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
1950 	lwkt_serialize_exit(sc->ifp->if_serializer);
1951 }
1952 
1953 
1954 /* NOTE : This should only be called holding
1955  *        DEVICE_LOCK.
1956 */
1957 static void
1958 oce_if_deactivate(POCE_SOFTC sc)
1959 {
1960 	int i, mtime = 0;
1961 	int wait_req = 0;
1962 	struct oce_rq *rq;
1963 	struct oce_wq *wq;
1964 	struct oce_eq *eq;
1965 
1966 	sc->ifp->if_flags &= ~IFF_RUNNING;
1967 	ifq_clr_oactive(&sc->ifp->if_snd);
1968 
1969 	/*Wait for max of 400ms for TX completions to be done */
1970 	while (mtime < 400) {
1971 		wait_req = 0;
1972 		for_all_wq_queues(sc, wq, i) {
1973 			if (wq->ring->num_used) {
1974 				wait_req = 1;
1975 				DELAY(1);
1976 				break;
1977 			}
1978 		}
1979 		mtime += 1;
1980 		if (!wait_req)
1981 			break;
1982 	}
1983 
1984 	/* Stop intrs and finish any bottom halves pending */
1985 	oce_hw_intr_disable(sc);
1986 
1987 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1988 	   any other lock. So unlock device lock and require after
1989 	   completing taskqueue_drain.
1990 	*/
1991 	UNLOCK(&sc->dev_lock);
1992 	for (i = 0; i < sc->intr_count; i++) {
1993 		if (sc->intrs[i].tq != NULL) {
1994 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1995 		}
1996 	}
1997 	LOCK(&sc->dev_lock);
1998 
1999 	/* Delete RX queue in card with flush param */
2000 	oce_stop_rx(sc);
2001 
2002 	/* Invalidate any pending cq and eq entries*/
2003 	for_all_evnt_queues(sc, eq, i)
2004 		oce_drain_eq(eq);
2005 	for_all_rq_queues(sc, rq, i)
2006 		oce_drain_rq_cq(rq);
2007 	for_all_wq_queues(sc, wq, i)
2008 		oce_drain_wq_cq(wq);
2009 
2010 	/* But still we need to get MCC aync events.
2011 	   So enable intrs and also arm first EQ
2012 	*/
2013 	oce_hw_intr_enable(sc);
2014 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2015 
2016 	DELAY(10);
2017 }
2018 
2019 
2020 static void
2021 oce_if_activate(POCE_SOFTC sc)
2022 {
2023 	struct oce_eq *eq;
2024 	struct oce_rq *rq;
2025 	struct oce_wq *wq;
2026 	int i, rc = 0;
2027 
2028 	sc->ifp->if_flags |= IFF_RUNNING;
2029 
2030 	oce_hw_intr_disable(sc);
2031 
2032 	oce_start_rx(sc);
2033 
2034 	for_all_rq_queues(sc, rq, i) {
2035 		rc = oce_start_rq(rq);
2036 		if (rc)
2037 			device_printf(sc->dev, "Unable to start RX\n");
2038 	}
2039 
2040 	for_all_wq_queues(sc, wq, i) {
2041 		rc = oce_start_wq(wq);
2042 		if (rc)
2043 			device_printf(sc->dev, "Unable to start TX\n");
2044 	}
2045 
2046 
2047 	for_all_evnt_queues(sc, eq, i)
2048 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2049 
2050 	oce_hw_intr_enable(sc);
2051 
2052 }
2053 
2054 static void
2055 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2056 {
2057 	/* Update Link status */
2058 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2059 	     ASYNC_EVENT_LINK_UP) {
2060 		sc->link_status = ASYNC_EVENT_LINK_UP;
2061 		if_link_state_change(sc->ifp);
2062 	} else {
2063 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2064 		if_link_state_change(sc->ifp);
2065 	}
2066 
2067 	/* Update speed */
2068 	sc->link_speed = acqe->u0.s.speed;
2069 	sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2070 
2071 }
2072 
2073 
2074 /* Handle the Completion Queue for the Mailbox/Async notifications */
2075 uint16_t
2076 oce_mq_handler(void *arg)
2077 {
2078 	struct oce_mq *mq = (struct oce_mq *)arg;
2079 	POCE_SOFTC sc = mq->parent;
2080 	struct oce_cq *cq = mq->cq;
2081 	int num_cqes = 0, evt_type = 0, optype = 0;
2082 	struct oce_mq_cqe *cqe;
2083 	struct oce_async_cqe_link_state *acqe;
2084 	struct oce_async_event_grp5_pvid_state *gcqe;
2085 	struct oce_async_event_qnq *dbgcqe;
2086 
2087 
2088 	bus_dmamap_sync(cq->ring->dma.tag,
2089 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2090 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2091 
2092 	while (cqe->u0.dw[3]) {
2093 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2094 		if (cqe->u0.s.async_event) {
2095 			evt_type = cqe->u0.s.event_type;
2096 			optype = cqe->u0.s.async_type;
2097 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2098 				/* Link status evt */
2099 				acqe = (struct oce_async_cqe_link_state *)cqe;
2100 				process_link_state(sc, acqe);
2101 			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
2102 				   (optype == ASYNC_EVENT_PVID_STATE)) {
2103 				/* GRP5 PVID */
2104 				gcqe =
2105 				(struct oce_async_event_grp5_pvid_state *)cqe;
2106 				if (gcqe->enabled)
2107 					sc->pvid = gcqe->tag & VLAN_VID_MASK;
2108 				else
2109 					sc->pvid = 0;
2110 
2111 			}
2112 			else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2113 				optype == ASYNC_EVENT_DEBUG_QNQ) {
2114 				dbgcqe =
2115 				(struct oce_async_event_qnq *)cqe;
2116 				if(dbgcqe->valid)
2117 					sc->qnqid = dbgcqe->vlan_tag;
2118 				sc->qnq_debug_event = TRUE;
2119 			}
2120 		}
2121 		cqe->u0.dw[3] = 0;
2122 		RING_GET(cq->ring, 1);
2123 		bus_dmamap_sync(cq->ring->dma.tag,
2124 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2125 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2126 		num_cqes++;
2127 	}
2128 
2129 	if (num_cqes)
2130 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2131 
2132 	return 0;
2133 }
2134 
2135 
2136 static void
2137 setup_max_queues_want(POCE_SOFTC sc)
2138 {
2139 	/* Check if it is FLEX machine. Is so dont use RSS */
2140 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2141 	    (sc->function_mode & FNM_UMC_MODE)    ||
2142 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2143 	    (!is_rss_enabled(sc))		  ||
2144 	    (sc->flags & OCE_FLAGS_BE2)) {
2145 		sc->nrqs = 1;
2146 		sc->nwqs = 1;
2147 	}
2148 }
2149 
2150 
2151 static void
2152 update_queues_got(POCE_SOFTC sc)
2153 {
2154 	if (is_rss_enabled(sc)) {
2155 		sc->nrqs = sc->intr_count + 1;
2156 		sc->nwqs = sc->intr_count;
2157 	} else {
2158 		sc->nrqs = 1;
2159 		sc->nwqs = 1;
2160 	}
2161 }
2162 
2163 static int
2164 oce_check_ipv6_ext_hdr(struct mbuf *m)
2165 {
2166 	struct ether_header *eh = mtod(m, struct ether_header *);
2167 	caddr_t m_datatemp = m->m_data;
2168 
2169 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2170 		m->m_data += sizeof(struct ether_header);
2171 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2172 
2173 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2174 				(ip6->ip6_nxt != IPPROTO_UDP)){
2175 			struct ip6_ext *ip6e = NULL;
2176 			m->m_data += sizeof(struct ip6_hdr);
2177 
2178 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2179 			if(ip6e->ip6e_len == 0xff) {
2180 				m->m_data = m_datatemp;
2181 				return TRUE;
2182 			}
2183 		}
2184 		m->m_data = m_datatemp;
2185 	}
2186 	return FALSE;
2187 }
2188 
2189 static int
2190 is_be3_a1(POCE_SOFTC sc)
2191 {
2192 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2193 		return TRUE;
2194 	}
2195 	return FALSE;
2196 }
2197 
2198 static struct mbuf *
2199 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2200 {
2201 	uint16_t vlan_tag = 0;
2202 
2203 	if(!M_WRITABLE(m))
2204 		return NULL;
2205 
2206 #if 0 /* XXX swildner: ETHER_VTAG */
2207 	/* Embed vlan tag in the packet if it is not part of it */
2208 	if(m->m_flags & M_VLANTAG) {
2209 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2210 		m->m_flags &= ~M_VLANTAG;
2211 	}
2212 #endif
2213 
2214 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2215 	if(sc->pvid) {
2216 		if(!vlan_tag)
2217 			vlan_tag = sc->pvid;
2218 		*complete = FALSE;
2219 	}
2220 
2221 #if 0 /* XXX swildner: ETHER_VTAG */
2222 	if(vlan_tag) {
2223 		m = ether_vlanencap(m, vlan_tag);
2224 	}
2225 
2226 	if(sc->qnqid) {
2227 		m = ether_vlanencap(m, sc->qnqid);
2228 		*complete = FALSE;
2229 	}
2230 #endif
2231 	return m;
2232 }
2233 
2234 static int
2235 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2236 {
2237 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2238 			oce_check_ipv6_ext_hdr(m)) {
2239 		return TRUE;
2240 	}
2241 	return FALSE;
2242 }
2243 
2244 static void
2245 oce_get_config(POCE_SOFTC sc)
2246 {
2247 	int rc = 0;
2248 	uint32_t max_rss = 0;
2249 
2250 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2251 		max_rss = OCE_LEGACY_MODE_RSS;
2252 	else
2253 		max_rss = OCE_MAX_RSS;
2254 
2255 	if (!IS_BE(sc)) {
2256 		rc = oce_get_func_config(sc);
2257 		if (rc) {
2258 			sc->nwqs = OCE_MAX_WQ;
2259 			sc->nrssqs = max_rss;
2260 			sc->nrqs = sc->nrssqs + 1;
2261 		}
2262 	}
2263 	else {
2264 		rc = oce_get_profile_config(sc);
2265 		sc->nrssqs = max_rss;
2266 		sc->nrqs = sc->nrssqs + 1;
2267 		if (rc)
2268 			sc->nwqs = OCE_MAX_WQ;
2269 	}
2270 }
2271