xref: /dragonfly/sys/dev/netif/oce/oce_if.c (revision 8d6aeca2)
1 /*-
2  * Copyright (C) 2013 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 
40 /* $FreeBSD: src/sys/dev/oce/oce_if.c,v 1.14 2013/07/07 00:30:13 svnexp Exp $ */
41 
42 #include "opt_inet6.h"
43 #include "opt_inet.h"
44 
45 #include "oce_if.h"
46 
47 
48 /* Driver entry points prototypes */
49 static int  oce_probe(device_t dev);
50 static int  oce_attach(device_t dev);
51 static int  oce_detach(device_t dev);
52 static int  oce_shutdown(device_t dev);
53 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr);
54 static void oce_init(void *xsc);
55 #if 0 /* XXX swildner: MULTIQUEUE */
56 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
57 static void oce_multiq_flush(struct ifnet *ifp);
58 #endif
59 
60 /* Driver interrupt routines protypes */
61 static void oce_intr(void *arg, int pending);
62 static int  oce_setup_intr(POCE_SOFTC sc);
63 static void oce_fast_isr(void *arg);
64 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
65 			  void (*isr) (void *arg, int pending));
66 
67 /* Media callbacks prototypes */
68 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
69 static int  oce_media_change(struct ifnet *ifp);
70 
71 /* Transmit routines prototypes */
72 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
73 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
74 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
75 					uint32_t status);
76 #if 0 /* XXX swildner: MULTIQUEUE */
77 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
78 				 struct oce_wq *wq);
79 #endif
80 
81 /* Receive routines prototypes */
82 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
83 #if 0 /* XXX swildner: ETHER_VTAG */
84 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
85 #endif
86 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
87 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
88 						struct oce_nic_rx_cqe *cqe);
89 
90 /* Helper function prototypes in this file */
91 static int  oce_attach_ifp(POCE_SOFTC sc);
92 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
93 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
94 static int  oce_vid_config(POCE_SOFTC sc);
95 static void oce_mac_addr_set(POCE_SOFTC sc);
96 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
97 static void oce_local_timer(void *arg);
98 static void oce_if_deactivate(POCE_SOFTC sc);
99 static void oce_if_activate(POCE_SOFTC sc);
100 static void setup_max_queues_want(POCE_SOFTC sc);
101 static void update_queues_got(POCE_SOFTC sc);
102 static void process_link_state(POCE_SOFTC sc,
103 		 struct oce_async_cqe_link_state *acqe);
104 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
105 static void oce_get_config(POCE_SOFTC sc);
106 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
107 
108 /* IP specific */
109 #if defined(INET6) || defined(INET)
110 #if 0 /* XXX swildner: LRO */
111 static int  oce_init_lro(POCE_SOFTC sc);
112 static void oce_rx_flush_lro(struct oce_rq *rq);
113 #endif
114 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
115 #endif
116 
117 static device_method_t oce_dispatch[] = {
118 	DEVMETHOD(device_probe, oce_probe),
119 	DEVMETHOD(device_attach, oce_attach),
120 	DEVMETHOD(device_detach, oce_detach),
121 	DEVMETHOD(device_shutdown, oce_shutdown),
122 
123 	DEVMETHOD_END
124 };
125 
126 static driver_t oce_driver = {
127 	"oce",
128 	oce_dispatch,
129 	sizeof(OCE_SOFTC)
130 };
131 static devclass_t oce_devclass;
132 
133 
134 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, NULL, NULL);
135 MODULE_DEPEND(oce, pci, 1, 1, 1);
136 MODULE_DEPEND(oce, ether, 1, 1, 1);
137 MODULE_VERSION(oce, 1);
138 
139 
140 /* global vars */
141 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
142 
143 /* Module capabilites and parameters */
144 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
145 #if 0 /* XXX swildner: RSS */
146 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
147 #else
148 uint32_t oce_enable_rss = 0;
149 #endif
150 
151 
152 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
153 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
154 
155 
156 /* Supported devices table */
157 static uint32_t supportedDevices[] =  {
158 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
159 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
160 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
161 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
162 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
163 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
164 };
165 
166 
167 
168 
169 /*****************************************************************************
170  *			Driver entry points functions                        *
171  *****************************************************************************/
172 
173 static int
174 oce_probe(device_t dev)
175 {
176 	uint16_t vendor = 0;
177 	uint16_t device = 0;
178 	int i = 0;
179 	char str[256] = {0};
180 	POCE_SOFTC sc;
181 
182 	sc = device_get_softc(dev);
183 	bzero(sc, sizeof(OCE_SOFTC));
184 	sc->dev = dev;
185 
186 	vendor = pci_get_vendor(dev);
187 	device = pci_get_device(dev);
188 
189 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
190 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
191 			if (device == (supportedDevices[i] & 0xffff)) {
192 				ksprintf(str, "%s:%s", "Emulex CNA NIC function",
193 					component_revision);
194 				device_set_desc_copy(dev, str);
195 
196 				switch (device) {
197 				case PCI_PRODUCT_BE2:
198 					sc->flags |= OCE_FLAGS_BE2;
199 					break;
200 				case PCI_PRODUCT_BE3:
201 					sc->flags |= OCE_FLAGS_BE3;
202 					break;
203 				case PCI_PRODUCT_XE201:
204 				case PCI_PRODUCT_XE201_VF:
205 					sc->flags |= OCE_FLAGS_XE201;
206 					break;
207 				case PCI_PRODUCT_SH:
208 					sc->flags |= OCE_FLAGS_SH;
209 					break;
210 				default:
211 					return ENXIO;
212 				}
213 				return BUS_PROBE_DEFAULT;
214 			}
215 		}
216 	}
217 
218 	return ENXIO;
219 }
220 
221 
222 static int
223 oce_attach(device_t dev)
224 {
225 	POCE_SOFTC sc;
226 	int rc = 0;
227 
228 	sc = device_get_softc(dev);
229 
230 	rc = oce_hw_pci_alloc(sc);
231 	if (rc)
232 		return rc;
233 
234 	sc->tx_ring_size = OCE_TX_RING_SIZE;
235 	sc->rx_ring_size = OCE_RX_RING_SIZE;
236 	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
237 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
238 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
239 
240 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
241 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
242 
243 	/* initialise the hardware */
244 	rc = oce_hw_init(sc);
245 	if (rc)
246 		goto pci_res_free;
247 
248 	oce_get_config(sc);
249 
250 	setup_max_queues_want(sc);
251 
252 	rc = oce_setup_intr(sc);
253 	if (rc)
254 		goto mbox_free;
255 
256 	rc = oce_queue_init_all(sc);
257 	if (rc)
258 		goto intr_free;
259 
260 	rc = oce_attach_ifp(sc);
261 	if (rc)
262 		goto queues_free;
263 
264 #if defined(INET6) || defined(INET)
265 #if 0 /* XXX swildner: LRO */
266 	rc = oce_init_lro(sc);
267 	if (rc)
268 		goto ifp_free;
269 #endif
270 #endif
271 
272 	rc = oce_hw_start(sc);
273 	if (rc)
274 		goto lro_free;
275 
276 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
277 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
278 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
279 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
280 
281 	rc = oce_stats_init(sc);
282 	if (rc)
283 		goto vlan_free;
284 
285 	oce_add_sysctls(sc);
286 
287 	callout_init_mp(&sc->timer);
288 	callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
289 
290 	return 0;
291 
292 vlan_free:
293 	if (sc->vlan_attach)
294 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
295 	if (sc->vlan_detach)
296 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
297 	oce_hw_intr_disable(sc);
298 lro_free:
299 #if defined(INET6) || defined(INET)
300 #if 0 /* XXX swildner: LRO */
301 	oce_free_lro(sc);
302 ifp_free:
303 #endif
304 #endif
305 	ether_ifdetach(sc->ifp);
306 	if_free(sc->ifp);
307 queues_free:
308 	oce_queue_release_all(sc);
309 intr_free:
310 	oce_intr_free(sc);
311 mbox_free:
312 	oce_dma_free(sc, &sc->bsmbx);
313 pci_res_free:
314 	oce_hw_pci_free(sc);
315 	LOCK_DESTROY(&sc->dev_lock);
316 	LOCK_DESTROY(&sc->bmbx_lock);
317 	return rc;
318 
319 }
320 
321 
322 static int
323 oce_detach(device_t dev)
324 {
325 	POCE_SOFTC sc = device_get_softc(dev);
326 
327 	LOCK(&sc->dev_lock);
328 	oce_if_deactivate(sc);
329 	UNLOCK(&sc->dev_lock);
330 
331 	callout_terminate(&sc->timer);
332 
333 	if (sc->vlan_attach != NULL)
334 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
335 	if (sc->vlan_detach != NULL)
336 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
337 
338 	ether_ifdetach(sc->ifp);
339 
340 	if_free(sc->ifp);
341 
342 	oce_hw_shutdown(sc);
343 
344 	bus_generic_detach(dev);
345 	return 0;
346 }
347 
348 
349 static int
350 oce_shutdown(device_t dev)
351 {
352 	int rc;
353 
354 	rc = oce_detach(dev);
355 
356 	return rc;
357 }
358 
359 
360 static int
361 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
362 {
363 	struct ifreq *ifr = (struct ifreq *)data;
364 	POCE_SOFTC sc = ifp->if_softc;
365 	int rc = 0;
366 	uint32_t u;
367 
368 	switch (command) {
369 
370 	case SIOCGIFMEDIA:
371 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
372 		break;
373 
374 	case SIOCSIFMTU:
375 		if (ifr->ifr_mtu > OCE_MAX_MTU)
376 			rc = EINVAL;
377 		else
378 			ifp->if_mtu = ifr->ifr_mtu;
379 		break;
380 
381 	case SIOCSIFFLAGS:
382 		if (ifp->if_flags & IFF_UP) {
383 			if (!(ifp->if_flags & IFF_RUNNING)) {
384 				sc->ifp->if_flags |= IFF_RUNNING;
385 				oce_init(sc);
386 			}
387 			device_printf(sc->dev, "Interface Up\n");
388 		} else {
389 			LOCK(&sc->dev_lock);
390 
391 			sc->ifp->if_flags &= ~IFF_RUNNING;
392 			ifq_clr_oactive(&ifp->if_snd);
393 			oce_if_deactivate(sc);
394 
395 			UNLOCK(&sc->dev_lock);
396 
397 			device_printf(sc->dev, "Interface Down\n");
398 		}
399 
400 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
401 			sc->promisc = TRUE;
402 			oce_rxf_set_promiscuous(sc, sc->promisc);
403 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
404 			sc->promisc = FALSE;
405 			oce_rxf_set_promiscuous(sc, sc->promisc);
406 		}
407 
408 		break;
409 
410 	case SIOCADDMULTI:
411 	case SIOCDELMULTI:
412 		rc = oce_hw_update_multicast(sc);
413 		if (rc)
414 			device_printf(sc->dev,
415 				"Update multicast address failed\n");
416 		break;
417 
418 	case SIOCSIFCAP:
419 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
420 
421 		if (u & IFCAP_TXCSUM) {
422 			ifp->if_capenable ^= IFCAP_TXCSUM;
423 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
424 
425 			if (IFCAP_TSO & ifp->if_capenable &&
426 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
427 				ifp->if_capenable &= ~IFCAP_TSO;
428 				ifp->if_hwassist &= ~CSUM_TSO;
429 				if_printf(ifp,
430 					 "TSO disabled due to -txcsum.\n");
431 			}
432 		}
433 
434 		if (u & IFCAP_RXCSUM)
435 			ifp->if_capenable ^= IFCAP_RXCSUM;
436 
437 		if (u & IFCAP_TSO4) {
438 			ifp->if_capenable ^= IFCAP_TSO4;
439 
440 			if (IFCAP_TSO & ifp->if_capenable) {
441 				if (IFCAP_TXCSUM & ifp->if_capenable)
442 					ifp->if_hwassist |= CSUM_TSO;
443 				else {
444 					ifp->if_capenable &= ~IFCAP_TSO;
445 					ifp->if_hwassist &= ~CSUM_TSO;
446 					if_printf(ifp,
447 					    "Enable txcsum first.\n");
448 					rc = EAGAIN;
449 				}
450 			} else
451 				ifp->if_hwassist &= ~CSUM_TSO;
452 		}
453 
454 		if (u & IFCAP_VLAN_HWTAGGING)
455 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
456 
457 #if 0 /* XXX swildner: VLAN_HWFILTER */
458 		if (u & IFCAP_VLAN_HWFILTER) {
459 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
460 			oce_vid_config(sc);
461 		}
462 #endif
463 #if defined(INET6) || defined(INET)
464 #if 0 /* XXX swildner: LRO */
465 		if (u & IFCAP_LRO)
466 			ifp->if_capenable ^= IFCAP_LRO;
467 #endif
468 #endif
469 
470 		break;
471 
472 	case SIOCGPRIVATE_0:
473 		rc = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY);
474 		if (rc == 0)
475 			rc = oce_handle_passthrough(ifp, data);
476 		break;
477 	default:
478 		rc = ether_ioctl(ifp, command, data);
479 		break;
480 	}
481 
482 	return rc;
483 }
484 
485 
486 static void
487 oce_init(void *arg)
488 {
489 	POCE_SOFTC sc = arg;
490 
491 	LOCK(&sc->dev_lock);
492 
493 	if (sc->ifp->if_flags & IFF_UP) {
494 		oce_if_deactivate(sc);
495 		oce_if_activate(sc);
496 	}
497 
498 	UNLOCK(&sc->dev_lock);
499 
500 }
501 
502 
503 #if 0 /* XXX swildner: MULTIQUEUE */
504 static int
505 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
506 {
507 	POCE_SOFTC sc = ifp->if_softc;
508 	struct oce_wq *wq = NULL;
509 	int queue_index = 0;
510 	int status = 0;
511 
512 	if (!sc->link_status) {
513 		ifq_purge(&ifp->if_snd);
514 		return ENXIO;
515 	}
516 
517 	if ((m->m_flags & M_FLOWID) != 0)
518 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
519 
520 	wq = sc->wq[queue_index];
521 
522 	LOCK(&wq->tx_lock);
523 	status = oce_multiq_transmit(ifp, m, wq);
524 	UNLOCK(&wq->tx_lock);
525 
526 	return status;
527 
528 }
529 
530 
531 static void
532 oce_multiq_flush(struct ifnet *ifp)
533 {
534 	POCE_SOFTC sc = ifp->if_softc;
535 	struct mbuf     *m;
536 	int i = 0;
537 
538 	for (i = 0; i < sc->nwqs; i++) {
539 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
540 			m_freem(m);
541 	}
542 	if_qflush(ifp);
543 }
544 #endif
545 
546 
547 
548 /*****************************************************************************
549  *                   Driver interrupt routines functions                     *
550  *****************************************************************************/
551 
552 static void
553 oce_intr(void *arg, int pending)
554 {
555 
556 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
557 	POCE_SOFTC sc = ii->sc;
558 	struct oce_eq *eq = ii->eq;
559 	struct oce_eqe *eqe;
560 	struct oce_cq *cq = NULL;
561 	int i, num_eqes = 0;
562 
563 
564 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
565 				 BUS_DMASYNC_POSTWRITE);
566 	do {
567 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
568 		if (eqe->evnt == 0)
569 			break;
570 		eqe->evnt = 0;
571 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
572 					BUS_DMASYNC_POSTWRITE);
573 		RING_GET(eq->ring, 1);
574 		num_eqes++;
575 
576 	} while (TRUE);
577 
578 	if (!num_eqes)
579 		goto eq_arm; /* Spurious */
580 
581 	/* Clear EQ entries, but dont arm */
582 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
583 
584 	/* Process TX, RX and MCC. But dont arm CQ*/
585 	for (i = 0; i < eq->cq_valid; i++) {
586 		cq = eq->cq[i];
587 		(*cq->cq_handler)(cq->cb_arg);
588 	}
589 
590 	/* Arm all cqs connected to this EQ */
591 	for (i = 0; i < eq->cq_valid; i++) {
592 		cq = eq->cq[i];
593 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
594 	}
595 
596 eq_arm:
597 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
598 
599 	return;
600 }
601 
602 
603 static int
604 oce_setup_intr(POCE_SOFTC sc)
605 {
606 	int rc = 0, use_intx = 0;
607 	int vector = 0;
608 #if 0 /* XXX swildner: MSI-X */
609 	int req_vectors = 0;
610 
611 	if (is_rss_enabled(sc))
612 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
613 	else
614 		req_vectors = 1;
615 
616 	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
617 		sc->intr_count = req_vectors;
618 		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
619 		if (rc != 0) {
620 			use_intx = 1;
621 			pci_release_msi(sc->dev);
622 		} else
623 			sc->flags |= OCE_FLAGS_USING_MSIX;
624 	} else
625 #endif
626 		use_intx = 1;
627 
628 	if (use_intx)
629 		sc->intr_count = 1;
630 
631 	/* Scale number of queues based on intr we got */
632 	update_queues_got(sc);
633 
634 	if (use_intx) {
635 		device_printf(sc->dev, "Using legacy interrupt\n");
636 		rc = oce_alloc_intr(sc, vector, oce_intr);
637 		if (rc)
638 			goto error;
639 #if 0 /* XXX swildner: MSI-X */
640 	} else {
641 		for (; vector < sc->intr_count; vector++) {
642 			rc = oce_alloc_intr(sc, vector, oce_intr);
643 			if (rc)
644 				goto error;
645 		}
646 #endif
647 	}
648 
649 	return 0;
650 error:
651 	oce_intr_free(sc);
652 	return rc;
653 }
654 
655 
656 void
657 oce_fast_isr(void *arg)
658 {
659 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
660 	POCE_SOFTC sc = ii->sc;
661 
662 	if (ii->eq == NULL)
663 		return;
664 
665 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
666 
667 	taskqueue_enqueue(ii->tq, &ii->task);
668 
669 	ii->eq->intr++;
670 }
671 
672 
673 static int
674 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
675 {
676 	POCE_INTR_INFO ii = &sc->intrs[vector];
677 	int rc = 0, rr;
678 	u_int irq_flags;
679 
680 	if (vector >= OCE_MAX_EQ)
681 		return (EINVAL);
682 
683 #if 0 /* XXX swildner: MSI-X */
684 	/* Set the resource id for the interrupt.
685 	 * MSIx is vector + 1 for the resource id,
686 	 * INTx is 0 for the resource id.
687 	 */
688 	if (sc->flags & OCE_FLAGS_USING_MSIX)
689 		rr = vector + 1;
690 	else
691 #endif
692 		rr = 0;
693 	ii->irq_type = pci_alloc_1intr(sc->dev,
694 	    sc->flags & OCE_FLAGS_USING_MSI, &rr, &irq_flags);
695 	ii->intr_res = bus_alloc_resource_any(sc->dev,
696 					      SYS_RES_IRQ,
697 					      &rr, irq_flags);
698 	ii->irq_rr = rr;
699 	if (ii->intr_res == NULL) {
700 		device_printf(sc->dev,
701 			  "Could not allocate interrupt\n");
702 		rc = ENXIO;
703 		return rc;
704 	}
705 
706 	TASK_INIT(&ii->task, 0, isr, ii);
707 	ii->vector = vector;
708 	ksprintf(ii->task_name, "oce_task[%d]", ii->vector);
709 	ii->tq = taskqueue_create(ii->task_name,
710 			M_NOWAIT,
711 			taskqueue_thread_enqueue,
712 			&ii->tq);
713 	taskqueue_start_threads(&ii->tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq",
714 			device_get_nameunit(sc->dev));
715 
716 	ii->sc = sc;
717 	rc = bus_setup_intr(sc->dev,
718 			ii->intr_res,
719 			0,
720 			oce_fast_isr, ii, &ii->tag, NULL);
721 	return rc;
722 
723 }
724 
725 
726 void
727 oce_intr_free(POCE_SOFTC sc)
728 {
729 	int i = 0;
730 
731 	for (i = 0; i < sc->intr_count; i++) {
732 
733 		if (sc->intrs[i].tag != NULL)
734 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
735 						sc->intrs[i].tag);
736 		if (sc->intrs[i].tq != NULL)
737 			taskqueue_free(sc->intrs[i].tq);
738 
739 		if (sc->intrs[i].intr_res != NULL)
740 			bus_release_resource(sc->dev, SYS_RES_IRQ,
741 						sc->intrs[i].irq_rr,
742 						sc->intrs[i].intr_res);
743 		sc->intrs[i].tag = NULL;
744 		sc->intrs[i].intr_res = NULL;
745 	}
746 
747 	if (sc->flags & OCE_FLAGS_USING_MSIX ||
748 	    sc->flags & OCE_FLAGS_USING_MSI)
749 		pci_release_msi(sc->dev);
750 
751 }
752 
753 
754 
755 /******************************************************************************
756 *			  Media callbacks functions 			      *
757 ******************************************************************************/
758 
759 static void
760 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
761 {
762 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
763 
764 
765 	req->ifm_status = IFM_AVALID;
766 	req->ifm_active = IFM_ETHER;
767 
768 	if (sc->link_status == 1)
769 		req->ifm_status |= IFM_ACTIVE;
770 	else
771 		return;
772 
773 	switch (sc->link_speed) {
774 	case 1: /* 10 Mbps */
775 		req->ifm_active |= IFM_10_T | IFM_FDX;
776 		sc->speed = 10;
777 		break;
778 	case 2: /* 100 Mbps */
779 		req->ifm_active |= IFM_100_TX | IFM_FDX;
780 		sc->speed = 100;
781 		break;
782 	case 3: /* 1 Gbps */
783 		req->ifm_active |= IFM_1000_T | IFM_FDX;
784 		sc->speed = 1000;
785 		break;
786 	case 4: /* 10 Gbps */
787 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
788 		sc->speed = 10000;
789 		break;
790 	}
791 
792 	return;
793 }
794 
795 
796 int
797 oce_media_change(struct ifnet *ifp)
798 {
799 	return 0;
800 }
801 
802 
803 
804 
805 /*****************************************************************************
806  *			  Transmit routines functions			     *
807  *****************************************************************************/
808 
809 static int
810 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
811 {
812 	int rc = 0, i, retry_cnt = 0;
813 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
814 	struct mbuf *m, *m_temp;
815 	struct oce_wq *wq = sc->wq[wq_index];
816 	struct oce_packet_desc *pd;
817 	struct oce_nic_hdr_wqe *nichdr;
818 	struct oce_nic_frag_wqe *nicfrag;
819 	int num_wqes;
820 	uint32_t reg_value;
821 	boolean_t complete = TRUE;
822 
823 	m = *mpp;
824 	if (!m)
825 		return EINVAL;
826 
827 	if (!(m->m_flags & M_PKTHDR)) {
828 		rc = ENXIO;
829 		goto free_ret;
830 	}
831 
832 	if(oce_tx_asic_stall_verify(sc, m)) {
833 		m = oce_insert_vlan_tag(sc, m, &complete);
834 		if(!m) {
835 			device_printf(sc->dev, "Insertion unsuccessful\n");
836 			return 0;
837 		}
838 
839 	}
840 
841 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
842 		/* consolidate packet buffers for TSO/LSO segment offload */
843 #if defined(INET6) || defined(INET)
844 		m = oce_tso_setup(sc, mpp);
845 #else
846 		m = NULL;
847 #endif
848 		if (m == NULL) {
849 			rc = ENXIO;
850 			goto free_ret;
851 		}
852 	}
853 
854 	pd = &wq->pckts[wq->pkt_desc_head];
855 retry:
856 	rc = bus_dmamap_load_mbuf_defrag(wq->tag,
857 				     pd->map,
858 				     mpp, segs, OCE_MAX_TX_ELEMENTS,
859 				     &pd->nsegs, BUS_DMA_NOWAIT);
860 	if (rc == 0) {
861 		num_wqes = pd->nsegs + 1;
862 		if (IS_BE(sc) || IS_SH(sc)) {
863 			/*Dummy required only for BE3.*/
864 			if (num_wqes & 1)
865 				num_wqes++;
866 		}
867 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
868 			bus_dmamap_unload(wq->tag, pd->map);
869 			return EBUSY;
870 		}
871 		atomic_store_rel_int(&wq->pkt_desc_head,
872 				     (wq->pkt_desc_head + 1) % \
873 				      OCE_WQ_PACKET_ARRAY_SIZE);
874 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
875 		pd->mbuf = m;
876 
877 		nichdr =
878 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
879 		nichdr->u0.dw[0] = 0;
880 		nichdr->u0.dw[1] = 0;
881 		nichdr->u0.dw[2] = 0;
882 		nichdr->u0.dw[3] = 0;
883 
884 		nichdr->u0.s.complete = complete;
885 		nichdr->u0.s.event = 1;
886 		nichdr->u0.s.crc = 1;
887 		nichdr->u0.s.forward = 0;
888 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
889 		nichdr->u0.s.udpcs =
890 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
891 		nichdr->u0.s.tcpcs =
892 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
893 		nichdr->u0.s.num_wqe = num_wqes;
894 		nichdr->u0.s.total_length = m->m_pkthdr.len;
895 #if 0 /* XXX swildner: ETHER_VTAG */
896 		if (m->m_flags & M_VLANTAG) {
897 			nichdr->u0.s.vlan = 1; /*Vlan present*/
898 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
899 		}
900 #endif
901 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
902 			if (m->m_pkthdr.tso_segsz) {
903 				nichdr->u0.s.lso = 1;
904 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
905 			}
906 			if (!IS_BE(sc) || !IS_SH(sc))
907 				nichdr->u0.s.ipcs = 1;
908 		}
909 
910 		RING_PUT(wq->ring, 1);
911 		atomic_add_int(&wq->ring->num_used, 1);
912 
913 		for (i = 0; i < pd->nsegs; i++) {
914 			nicfrag =
915 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
916 						      struct oce_nic_frag_wqe);
917 			nicfrag->u0.s.rsvd0 = 0;
918 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
919 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
920 			nicfrag->u0.s.frag_len = segs[i].ds_len;
921 			pd->wqe_idx = wq->ring->pidx;
922 			RING_PUT(wq->ring, 1);
923 			atomic_add_int(&wq->ring->num_used, 1);
924 		}
925 		if (num_wqes > (pd->nsegs + 1)) {
926 			nicfrag =
927 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
928 						      struct oce_nic_frag_wqe);
929 			nicfrag->u0.dw[0] = 0;
930 			nicfrag->u0.dw[1] = 0;
931 			nicfrag->u0.dw[2] = 0;
932 			nicfrag->u0.dw[3] = 0;
933 			pd->wqe_idx = wq->ring->pidx;
934 			RING_PUT(wq->ring, 1);
935 			atomic_add_int(&wq->ring->num_used, 1);
936 			pd->nsegs++;
937 		}
938 
939 		sc->ifp->if_opackets++;
940 		wq->tx_stats.tx_reqs++;
941 		wq->tx_stats.tx_wrbs += num_wqes;
942 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
943 		wq->tx_stats.tx_pkts++;
944 
945 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
946 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
947 		reg_value = (num_wqes << 16) | wq->wq_id;
948 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
949 
950 	} else if (rc == EFBIG)	{
951 		if (retry_cnt == 0) {
952 			m_temp = m_defrag(m, M_NOWAIT);
953 			if (m_temp == NULL)
954 				goto free_ret;
955 			m = m_temp;
956 			*mpp = m_temp;
957 			retry_cnt = retry_cnt + 1;
958 			goto retry;
959 		} else
960 			goto free_ret;
961 	} else if (rc == ENOMEM)
962 		return rc;
963 	else
964 		goto free_ret;
965 
966 	return 0;
967 
968 free_ret:
969 	m_freem(*mpp);
970 	*mpp = NULL;
971 	return rc;
972 }
973 
974 
975 static void
976 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
977 {
978 	struct oce_packet_desc *pd;
979 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
980 	struct mbuf *m;
981 
982 	pd = &wq->pckts[wq->pkt_desc_tail];
983 	atomic_store_rel_int(&wq->pkt_desc_tail,
984 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
985 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
986 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
987 	bus_dmamap_unload(wq->tag, pd->map);
988 
989 	m = pd->mbuf;
990 	m_freem(m);
991 	pd->mbuf = NULL;
992 
993 	if (ifq_is_oactive(&sc->ifp->if_snd)) {
994 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
995 			ifq_clr_oactive(&sc->ifp->if_snd);
996 			oce_tx_restart(sc, wq);
997 		}
998 	}
999 }
1000 
1001 
1002 static void
1003 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1004 {
1005 
1006 	if ((sc->ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
1007 		return;
1008 
1009 #if 0 /* __FreeBSD_version >= 800000 */
1010 	if (!drbr_empty(sc->ifp, wq->br))
1011 #else
1012 	if (!ifq_is_empty(&sc->ifp->if_snd))
1013 #endif
1014 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1015 
1016 }
1017 
1018 
1019 #if defined(INET6) || defined(INET)
1020 static struct mbuf *
1021 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1022 {
1023 	struct mbuf *m;
1024 #ifdef INET
1025 	struct ip *ip;
1026 #endif
1027 #ifdef INET6
1028 	struct ip6_hdr *ip6;
1029 #endif
1030 	struct ether_vlan_header *eh;
1031 	struct tcphdr *th;
1032 	uint16_t etype;
1033 	int total_len = 0, ehdrlen = 0;
1034 
1035 	m = *mpp;
1036 
1037 	if (M_WRITABLE(m) == 0) {
1038 		m = m_dup(*mpp, M_NOWAIT);
1039 		if (!m)
1040 			return NULL;
1041 		m_freem(*mpp);
1042 		*mpp = m;
1043 	}
1044 
1045 	eh = mtod(m, struct ether_vlan_header *);
1046 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1047 		etype = ntohs(eh->evl_proto);
1048 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1049 	} else {
1050 		etype = ntohs(eh->evl_encap_proto);
1051 		ehdrlen = ETHER_HDR_LEN;
1052 	}
1053 
1054 	switch (etype) {
1055 #ifdef INET
1056 	case ETHERTYPE_IP:
1057 		ip = (struct ip *)(m->m_data + ehdrlen);
1058 		if (ip->ip_p != IPPROTO_TCP)
1059 			return NULL;
1060 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1061 
1062 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1063 		break;
1064 #endif
1065 #ifdef INET6
1066 	case ETHERTYPE_IPV6:
1067 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1068 		if (ip6->ip6_nxt != IPPROTO_TCP)
1069 			return NULL;
1070 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1071 
1072 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1073 		break;
1074 #endif
1075 	default:
1076 		return NULL;
1077 	}
1078 
1079 	m = m_pullup(m, total_len);
1080 	if (!m)
1081 		return NULL;
1082 	*mpp = m;
1083 	return m;
1084 
1085 }
1086 #endif /* INET6 || INET */
1087 
1088 void
1089 oce_tx_task(void *arg, int npending)
1090 {
1091 	struct oce_wq *wq = arg;
1092 	POCE_SOFTC sc = wq->parent;
1093 	struct ifnet *ifp = sc->ifp;
1094 #if 0 /* XXX swildner: MULTIQUEUE */
1095 	int rc = 0;
1096 
1097 	LOCK(&wq->tx_lock);
1098 	rc = oce_multiq_transmit(ifp, NULL, wq);
1099 	if (rc) {
1100 		device_printf(sc->dev,
1101 				"TX[%d] restart failed\n", wq->queue_index);
1102 	}
1103 	UNLOCK(&wq->tx_lock);
1104 #else
1105 	lwkt_serialize_enter(ifp->if_serializer);
1106 	oce_start_locked(ifp);
1107 	lwkt_serialize_exit(ifp->if_serializer);
1108 #endif
1109 }
1110 
1111 
1112 void
1113 oce_start_locked(struct ifnet *ifp)
1114 {
1115 	POCE_SOFTC sc = ifp->if_softc;
1116 	struct mbuf *m;
1117 	int rc = 0;
1118 	int def_q = 0; /* Default tx queue is 0 */
1119 
1120 	if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd)))
1121 		return;
1122 
1123 	if (!sc->link_status) {
1124 		ifq_purge(&ifp->if_snd);
1125 		return;
1126 	}
1127 
1128 	do {
1129 		m = ifq_dequeue(&sc->ifp->if_snd);
1130 		if (m == NULL)
1131 			break;
1132 
1133 		rc = oce_tx(sc, &m, def_q);
1134 		if (rc) {
1135 			if (m != NULL) {
1136 				sc->wq[def_q]->tx_stats.tx_stops ++;
1137 				ifq_set_oactive(&ifp->if_snd);
1138 				ifq_prepend(&ifp->if_snd, m);
1139 				m = NULL;
1140 			}
1141 			break;
1142 		}
1143 		if (m != NULL)
1144 			ETHER_BPF_MTAP(ifp, m);
1145 
1146 	} while (TRUE);
1147 
1148 	return;
1149 }
1150 
1151 void
1152 oce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1153 {
1154 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1155 	oce_start_locked(ifp);
1156 }
1157 
1158 
1159 /* Handle the Completion Queue for transmit */
1160 uint16_t
1161 oce_wq_handler(void *arg)
1162 {
1163 	struct oce_wq *wq = (struct oce_wq *)arg;
1164 	POCE_SOFTC sc = wq->parent;
1165 	struct oce_cq *cq = wq->cq;
1166 	struct oce_nic_tx_cqe *cqe;
1167 	int num_cqes = 0;
1168 
1169 	bus_dmamap_sync(cq->ring->dma.tag,
1170 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1171 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1172 	while (cqe->u0.dw[3]) {
1173 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1174 
1175 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1176 		if (wq->ring->cidx >= wq->ring->num_items)
1177 			wq->ring->cidx -= wq->ring->num_items;
1178 
1179 		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1180 		wq->tx_stats.tx_compl++;
1181 		cqe->u0.dw[3] = 0;
1182 		RING_GET(cq->ring, 1);
1183 		bus_dmamap_sync(cq->ring->dma.tag,
1184 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1185 		cqe =
1186 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1187 		num_cqes++;
1188 	}
1189 
1190 	if (num_cqes)
1191 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1192 
1193 	return 0;
1194 }
1195 
1196 
1197 #if 0 /* XXX swildner: MULTIQUEUE */
1198 static int
1199 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1200 {
1201 	POCE_SOFTC sc = ifp->if_softc;
1202 	int status = 0, queue_index = 0;
1203 	struct mbuf *next = NULL;
1204 	struct buf_ring *br = NULL;
1205 
1206 	br  = wq->br;
1207 	queue_index = wq->queue_index;
1208 
1209 	if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd))) {
1210 		if (m != NULL)
1211 			status = drbr_enqueue(ifp, br, m);
1212 		return status;
1213 	}
1214 
1215 	 if (m != NULL) {
1216 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1217 			return status;
1218 	}
1219 	while ((next = drbr_peek(ifp, br)) != NULL) {
1220 		if (oce_tx(sc, &next, queue_index)) {
1221 			if (next == NULL) {
1222 				drbr_advance(ifp, br);
1223 			} else {
1224 				drbr_putback(ifp, br, next);
1225 				wq->tx_stats.tx_stops ++;
1226 				ifp_set_oactive(&ifp->if_snd);
1227 				status = drbr_enqueue(ifp, br, next);
1228 			}
1229 			break;
1230 		}
1231 		drbr_advance(ifp, br);
1232 		ifp->if_obytes += next->m_pkthdr.len;
1233 		if (next->m_flags & M_MCAST)
1234 			ifp->if_omcasts++;
1235 		ETHER_BPF_MTAP(ifp, next);
1236 	}
1237 
1238 	return status;
1239 }
1240 #endif
1241 
1242 
1243 
1244 
1245 /*****************************************************************************
1246  *			    Receive  routines functions 		     *
1247  *****************************************************************************/
1248 
1249 static void
1250 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1251 {
1252 	uint32_t out;
1253 	struct oce_packet_desc *pd;
1254 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1255 	int i, len, frag_len;
1256 	struct mbuf *m = NULL, *tail = NULL;
1257 	uint16_t vtag;
1258 
1259 	len = cqe->u0.s.pkt_size;
1260 	if (!len) {
1261 		/*partial DMA workaround for Lancer*/
1262 		oce_discard_rx_comp(rq, cqe);
1263 		goto exit;
1264 	}
1265 
1266 	 /* Get vlan_tag value */
1267 	if(IS_BE(sc) || IS_SH(sc))
1268 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1269 	else
1270 		vtag = cqe->u0.s.vlan_tag;
1271 
1272 
1273 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1274 
1275 		if (rq->packets_out == rq->packets_in) {
1276 			device_printf(sc->dev,
1277 				  "RQ transmit descriptor missing\n");
1278 		}
1279 		out = rq->packets_out + 1;
1280 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1281 			out = 0;
1282 		pd = &rq->pckts[rq->packets_out];
1283 		rq->packets_out = out;
1284 
1285 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1286 		bus_dmamap_unload(rq->tag, pd->map);
1287 		rq->pending--;
1288 
1289 		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1290 		pd->mbuf->m_len = frag_len;
1291 
1292 		if (tail != NULL) {
1293 			/* additional fragments */
1294 			tail->m_next = pd->mbuf;
1295 			tail = pd->mbuf;
1296 		} else {
1297 			/* first fragment, fill out much of the packet header */
1298 			pd->mbuf->m_pkthdr.len = len;
1299 			pd->mbuf->m_pkthdr.csum_flags = 0;
1300 			if (IF_CSUM_ENABLED(sc)) {
1301 				if (cqe->u0.s.l4_cksum_pass) {
1302 					pd->mbuf->m_pkthdr.csum_flags |=
1303 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1304 					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1305 				}
1306 				if (cqe->u0.s.ip_cksum_pass) {
1307 					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1308 						pd->mbuf->m_pkthdr.csum_flags |=
1309 						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1310 					}
1311 				}
1312 			}
1313 			m = tail = pd->mbuf;
1314 		}
1315 		pd->mbuf = NULL;
1316 		len -= frag_len;
1317 	}
1318 
1319 	if (m) {
1320 		if (!oce_cqe_portid_valid(sc, cqe)) {
1321 			 m_freem(m);
1322 			 goto exit;
1323 		}
1324 
1325 		m->m_pkthdr.rcvif = sc->ifp;
1326 #if 0 /* __FreeBSD_version >= 800000 */
1327 		if (rq->queue_index)
1328 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1329 		else
1330 			m->m_pkthdr.flowid = rq->queue_index;
1331 		m->m_flags |= M_FLOWID;
1332 #endif
1333 #if 0 /* XXX swildner: ETHER_VTAG */
1334 		/* This deternies if vlan tag is Valid */
1335 		if (oce_cqe_vtp_valid(sc, cqe)) {
1336 			if (sc->function_mode & FNM_FLEX10_MODE) {
1337 				/* FLEX10. If QnQ is not set, neglect VLAN */
1338 				if (cqe->u0.s.qnq) {
1339 					m->m_pkthdr.ether_vtag = vtag;
1340 					m->m_flags |= M_VLANTAG;
1341 				}
1342 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1343 				/* In UMC mode generally pvid will be striped by
1344 				   hw. But in some cases we have seen it comes
1345 				   with pvid. So if pvid == vlan, neglect vlan.
1346 				*/
1347 				m->m_pkthdr.ether_vtag = vtag;
1348 				m->m_flags |= M_VLANTAG;
1349 			}
1350 		}
1351 #endif
1352 
1353 		sc->ifp->if_ipackets++;
1354 #if defined(INET6) || defined(INET)
1355 #if 0 /* XXX swildner: LRO */
1356 		/* Try to queue to LRO */
1357 		if (IF_LRO_ENABLED(sc) &&
1358 		    (cqe->u0.s.ip_cksum_pass) &&
1359 		    (cqe->u0.s.l4_cksum_pass) &&
1360 		    (!cqe->u0.s.ip_ver)       &&
1361 		    (rq->lro.lro_cnt != 0)) {
1362 
1363 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1364 				rq->lro_pkts_queued ++;
1365 				goto post_done;
1366 			}
1367 			/* If LRO posting fails then try to post to STACK */
1368 		}
1369 #endif
1370 #endif
1371 
1372 		sc->ifp->if_input(sc->ifp, m, NULL, -1);
1373 #if defined(INET6) || defined(INET)
1374 #if 0 /* XXX swildner: LRO */
1375 post_done:
1376 #endif
1377 #endif
1378 		/* Update rx stats per queue */
1379 		rq->rx_stats.rx_pkts++;
1380 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1381 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1382 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1383 			rq->rx_stats.rx_mcast_pkts++;
1384 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1385 			rq->rx_stats.rx_ucast_pkts++;
1386 	}
1387 exit:
1388 	return;
1389 }
1390 
1391 
1392 static void
1393 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1394 {
1395 	uint32_t out, i = 0;
1396 	struct oce_packet_desc *pd;
1397 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1398 	int num_frags = cqe->u0.s.num_fragments;
1399 
1400 	for (i = 0; i < num_frags; i++) {
1401 		if (rq->packets_out == rq->packets_in) {
1402 			device_printf(sc->dev,
1403 				"RQ transmit descriptor missing\n");
1404 		}
1405 		out = rq->packets_out + 1;
1406 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1407 			out = 0;
1408 		pd = &rq->pckts[rq->packets_out];
1409 		rq->packets_out = out;
1410 
1411 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1412 		bus_dmamap_unload(rq->tag, pd->map);
1413 		rq->pending--;
1414 		m_freem(pd->mbuf);
1415 	}
1416 
1417 }
1418 
1419 
1420 #if 0 /* XXX swildner: ETHER_VTAG */
1421 static int
1422 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1423 {
1424 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1425 	int vtp = 0;
1426 
1427 	if (sc->be3_native) {
1428 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1429 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1430 	} else
1431 		vtp = cqe->u0.s.vlan_tag_present;
1432 
1433 	return vtp;
1434 
1435 }
1436 #endif
1437 
1438 
1439 static int
1440 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1441 {
1442 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1443 	int port_id = 0;
1444 
1445 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1446 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1447 		port_id =  cqe_v1->u0.s.port;
1448 		if (sc->port_id != port_id)
1449 			return 0;
1450 	} else {
1451 		;/* For BE3 legacy and Lancer this is dummy */
1452 	}
1453 
1454 	return 1;
1455 
1456 }
1457 
1458 #if defined(INET6) || defined(INET)
1459 #if 0 /* XXX swildner: LRO */
1460 static void
1461 oce_rx_flush_lro(struct oce_rq *rq)
1462 {
1463 	struct lro_ctrl	*lro = &rq->lro;
1464 	struct lro_entry *queued;
1465 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1466 
1467 	if (!IF_LRO_ENABLED(sc))
1468 		return;
1469 
1470 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1471 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1472 		tcp_lro_flush(lro, queued);
1473 	}
1474 	rq->lro_pkts_queued = 0;
1475 
1476 	return;
1477 }
1478 
1479 
1480 static int
1481 oce_init_lro(POCE_SOFTC sc)
1482 {
1483 	struct lro_ctrl *lro = NULL;
1484 	int i = 0, rc = 0;
1485 
1486 	for (i = 0; i < sc->nrqs; i++) {
1487 		lro = &sc->rq[i]->lro;
1488 		rc = tcp_lro_init(lro);
1489 		if (rc != 0) {
1490 			device_printf(sc->dev, "LRO init failed\n");
1491 			return rc;
1492 		}
1493 		lro->ifp = sc->ifp;
1494 	}
1495 
1496 	return rc;
1497 }
1498 
1499 
1500 void
1501 oce_free_lro(POCE_SOFTC sc)
1502 {
1503 	struct lro_ctrl *lro = NULL;
1504 	int i = 0;
1505 
1506 	for (i = 0; i < sc->nrqs; i++) {
1507 		lro = &sc->rq[i]->lro;
1508 		if (lro)
1509 			tcp_lro_free(lro);
1510 	}
1511 }
1512 #endif
1513 #endif
1514 
1515 int
1516 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1517 {
1518 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1519 	int i, in, rc;
1520 	struct oce_packet_desc *pd;
1521 	bus_dma_segment_t segs[6];
1522 	int nsegs, added = 0;
1523 	struct oce_nic_rqe *rqe;
1524 	pd_rxulp_db_t rxdb_reg;
1525 
1526 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1527 	for (i = 0; i < count; i++) {
1528 		in = rq->packets_in + 1;
1529 		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1530 			in = 0;
1531 		if (in == rq->packets_out)
1532 			break;	/* no more room */
1533 
1534 		pd = &rq->pckts[rq->packets_in];
1535 		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1536 		if (pd->mbuf == NULL)
1537 			break;
1538 
1539 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1540 		rc = bus_dmamap_load_mbuf_segment(rq->tag,
1541 					     pd->map,
1542 					     pd->mbuf,
1543 					     segs, 1,
1544 					     &nsegs, BUS_DMA_NOWAIT);
1545 		if (rc) {
1546 			m_free(pd->mbuf);
1547 			break;
1548 		}
1549 
1550 		if (nsegs != 1) {
1551 			i--;
1552 			continue;
1553 		}
1554 
1555 		rq->packets_in = in;
1556 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1557 
1558 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1559 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1560 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1561 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1562 		RING_PUT(rq->ring, 1);
1563 		added++;
1564 		rq->pending++;
1565 	}
1566 	if (added != 0) {
1567 		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1568 			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1569 			rxdb_reg.bits.qid = rq->rq_id;
1570 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1571 			added -= OCE_MAX_RQ_POSTS;
1572 		}
1573 		if (added > 0) {
1574 			rxdb_reg.bits.qid = rq->rq_id;
1575 			rxdb_reg.bits.num_posted = added;
1576 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1577 		}
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 
1584 /* Handle the Completion Queue for receive */
1585 uint16_t
1586 oce_rq_handler(void *arg)
1587 {
1588 	struct oce_rq *rq = (struct oce_rq *)arg;
1589 	struct oce_cq *cq = rq->cq;
1590 	POCE_SOFTC sc = rq->parent;
1591 	struct oce_nic_rx_cqe *cqe;
1592 	int num_cqes = 0, rq_buffers_used = 0;
1593 
1594 	bus_dmamap_sync(cq->ring->dma.tag,
1595 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1596 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1597 	while (cqe->u0.dw[2]) {
1598 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1599 
1600 		RING_GET(rq->ring, 1);
1601 		if (cqe->u0.s.error == 0) {
1602 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1603 		} else {
1604 			rq->rx_stats.rxcp_err++;
1605 			sc->ifp->if_ierrors++;
1606 			/* Post L3/L4 errors to stack.*/
1607 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1608 		}
1609 		rq->rx_stats.rx_compl++;
1610 		cqe->u0.dw[2] = 0;
1611 
1612 #if defined(INET6) || defined(INET)
1613 #if 0 /* XXX swildner: LRO */
1614 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1615 			oce_rx_flush_lro(rq);
1616 		}
1617 #endif
1618 #endif
1619 
1620 		RING_GET(cq->ring, 1);
1621 		bus_dmamap_sync(cq->ring->dma.tag,
1622 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1623 		cqe =
1624 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1625 		num_cqes++;
1626 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1627 			break;
1628 	}
1629 
1630 #if defined(INET6) || defined(INET)
1631 #if 0 /* XXX swildner: LRO */
1632 	if (IF_LRO_ENABLED(sc))
1633 		oce_rx_flush_lro(rq);
1634 #endif
1635 #endif
1636 
1637 	if (num_cqes) {
1638 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1639 		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1640 		if (rq_buffers_used > 1)
1641 			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1642 	}
1643 
1644 	return 0;
1645 
1646 }
1647 
1648 
1649 
1650 
1651 /*****************************************************************************
1652  *		   Helper function prototypes in this file 		     *
1653  *****************************************************************************/
1654 
1655 static int
1656 oce_attach_ifp(POCE_SOFTC sc)
1657 {
1658 
1659 	sc->ifp = if_alloc(IFT_ETHER);
1660 	if (!sc->ifp)
1661 		return ENOMEM;
1662 
1663 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1664 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1665 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1666 
1667 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1668 	sc->ifp->if_ioctl = oce_ioctl;
1669 	sc->ifp->if_start = oce_start;
1670 	sc->ifp->if_init = oce_init;
1671 	sc->ifp->if_mtu = ETHERMTU;
1672 	sc->ifp->if_softc = sc;
1673 #if 0 /* XXX swildner: MULTIQUEUE */
1674 	sc->ifp->if_transmit = oce_multiq_start;
1675 	sc->ifp->if_qflush = oce_multiq_flush;
1676 #endif
1677 
1678 	if_initname(sc->ifp,
1679 		    device_get_name(sc->dev), device_get_unit(sc->dev));
1680 
1681 	sc->ifp->if_nmbclusters = sc->nrqs * sc->rq[0]->cfg.q_len;
1682 
1683 	ifq_set_maxlen(&sc->ifp->if_snd, OCE_MAX_TX_DESC - 1);
1684 	ifq_set_ready(&sc->ifp->if_snd);
1685 
1686 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1687 	sc->ifp->if_hwassist |= CSUM_TSO;
1688 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1689 
1690 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1691 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1692 #if 0 /* XXX swildner: VLAN_HWFILTER */
1693 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1694 #endif
1695 
1696 #if defined(INET6) || defined(INET)
1697 	sc->ifp->if_capabilities |= IFCAP_TSO;
1698 #if 0 /* XXX swildner: LRO */
1699 	sc->ifp->if_capabilities |= IFCAP_LRO;
1700 #endif
1701 #if 0 /* XXX swildner: VLAN_HWTSO */
1702 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1703 #endif
1704 #endif
1705 
1706 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1707 	sc->ifp->if_baudrate = IF_Gbps(10UL);
1708 
1709 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr, NULL);
1710 
1711 	return 0;
1712 }
1713 
1714 
1715 static void
1716 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1717 {
1718 	POCE_SOFTC sc = ifp->if_softc;
1719 
1720 	if (ifp->if_softc !=  arg)
1721 		return;
1722 	if ((vtag == 0) || (vtag > 4095))
1723 		return;
1724 
1725 	sc->vlan_tag[vtag] = 1;
1726 	sc->vlans_added++;
1727 	oce_vid_config(sc);
1728 }
1729 
1730 
1731 static void
1732 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1733 {
1734 	POCE_SOFTC sc = ifp->if_softc;
1735 
1736 	if (ifp->if_softc !=  arg)
1737 		return;
1738 	if ((vtag == 0) || (vtag > 4095))
1739 		return;
1740 
1741 	sc->vlan_tag[vtag] = 0;
1742 	sc->vlans_added--;
1743 	oce_vid_config(sc);
1744 }
1745 
1746 
1747 /*
1748  * A max of 64 vlans can be configured in BE. If the user configures
1749  * more, place the card in vlan promiscuous mode.
1750  */
1751 static int
1752 oce_vid_config(POCE_SOFTC sc)
1753 {
1754 #if 0 /* XXX swildner: VLAN_HWFILTER */
1755 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1756 	uint16_t ntags = 0, i;
1757 #endif
1758 	int status = 0;
1759 
1760 #if 0 /* XXX swildner: VLAN_HWFILTER */
1761 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1762 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1763 		for (i = 0; i < MAX_VLANS; i++) {
1764 			if (sc->vlan_tag[i]) {
1765 				vtags[ntags].vtag = i;
1766 				ntags++;
1767 			}
1768 		}
1769 		if (ntags)
1770 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1771 						vtags, ntags, 1, 0);
1772 	} else
1773 #endif
1774 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1775 						NULL, 0, 1, 1);
1776 	return status;
1777 }
1778 
1779 
1780 static void
1781 oce_mac_addr_set(POCE_SOFTC sc)
1782 {
1783 	uint32_t old_pmac_id = sc->pmac_id;
1784 	int status = 0;
1785 
1786 
1787 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1788 			 sc->macaddr.size_of_struct);
1789 	if (!status)
1790 		return;
1791 
1792 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1793 					sc->if_id, &sc->pmac_id);
1794 	if (!status) {
1795 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1796 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1797 				 sc->macaddr.size_of_struct);
1798 	}
1799 	if (status)
1800 		device_printf(sc->dev, "Failed update macaddress\n");
1801 
1802 }
1803 
1804 
1805 static int
1806 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1807 {
1808 	POCE_SOFTC sc = ifp->if_softc;
1809 	struct ifreq *ifr = (struct ifreq *)data;
1810 	int rc = ENXIO;
1811 	char cookie[32] = {0};
1812 	void *priv_data = (void *)ifr->ifr_data;
1813 	void *ioctl_ptr;
1814 	uint32_t req_size;
1815 	struct mbx_hdr req;
1816 	OCE_DMA_MEM dma_mem;
1817 	struct mbx_common_get_cntl_attr *fw_cmd;
1818 
1819 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1820 		return EFAULT;
1821 
1822 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1823 		return EINVAL;
1824 
1825 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1826 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1827 		return EFAULT;
1828 
1829 	req_size = le32toh(req.u0.req.request_length);
1830 	if (req_size > 65536)
1831 		return EINVAL;
1832 
1833 	req_size += sizeof(struct mbx_hdr);
1834 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1835 	if (rc)
1836 		return ENOMEM;
1837 
1838 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1839 		rc = EFAULT;
1840 		goto dma_free;
1841 	}
1842 
1843 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1844 	if (rc) {
1845 		rc = EIO;
1846 		goto dma_free;
1847 	}
1848 
1849 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1850 		rc =  EFAULT;
1851 
1852 	/*
1853 	   firmware is filling all the attributes for this ioctl except
1854 	   the driver version..so fill it
1855 	 */
1856 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1857 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1858 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1859 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1860 	}
1861 
1862 dma_free:
1863 	oce_dma_free(sc, &dma_mem);
1864 	return rc;
1865 
1866 }
1867 
1868 static void
1869 oce_eqd_set_periodic(POCE_SOFTC sc)
1870 {
1871 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1872 	struct oce_aic_obj *aic;
1873 	struct oce_eq *eqo;
1874 	uint64_t now = 0, delta;
1875 	int eqd, i, num = 0;
1876 	uint32_t ips = 0;
1877 	int tps;
1878 
1879 	for (i = 0 ; i < sc->neqs; i++) {
1880 		eqo = sc->eq[i];
1881 		aic = &sc->aic_obj[i];
1882 		/* When setting the static eq delay from the user space */
1883 		if (!aic->enable) {
1884 			eqd = aic->et_eqd;
1885 			goto modify_eqd;
1886 		}
1887 
1888 		now = ticks;
1889 
1890 		/* Over flow check */
1891 		if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1892 			goto done;
1893 
1894 		delta = now - aic->ticks;
1895 		tps = delta/hz;
1896 
1897 		/* Interrupt rate based on elapsed ticks */
1898 		if(tps)
1899 			ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1900 
1901 		if (ips > INTR_RATE_HWM)
1902 			eqd = aic->cur_eqd + 20;
1903 		else if (ips < INTR_RATE_LWM)
1904 			eqd = aic->cur_eqd / 2;
1905 		else
1906 			goto done;
1907 
1908 		if (eqd < 10)
1909 			eqd = 0;
1910 
1911 		/* Make sure that the eq delay is in the known range */
1912 		eqd = min(eqd, aic->max_eqd);
1913 		eqd = max(eqd, aic->min_eqd);
1914 
1915 modify_eqd:
1916 		if (eqd != aic->cur_eqd) {
1917 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
1918 			set_eqd[num].eq_id = eqo->eq_id;
1919 			aic->cur_eqd = eqd;
1920 			num++;
1921 		}
1922 done:
1923 		aic->intr_prev = eqo->intr;
1924 		aic->ticks = now;
1925 	}
1926 
1927 	/* Is there atleast one eq that needs to be modified? */
1928 	if(num)
1929 		oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1930 
1931 }
1932 
1933 static void
1934 oce_local_timer(void *arg)
1935 {
1936 	POCE_SOFTC sc = arg;
1937 	int i = 0;
1938 
1939 	lwkt_serialize_enter(sc->ifp->if_serializer);
1940 	oce_refresh_nic_stats(sc);
1941 	oce_refresh_queue_stats(sc);
1942 	oce_mac_addr_set(sc);
1943 
1944 	/* TX Watch Dog*/
1945 	for (i = 0; i < sc->nwqs; i++)
1946 		oce_tx_restart(sc, sc->wq[i]);
1947 
1948 	/* calculate and set the eq delay for optimal interrupt rate */
1949 	if (IS_BE(sc) || IS_SH(sc))
1950 		oce_eqd_set_periodic(sc);
1951 
1952 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
1953 	lwkt_serialize_exit(sc->ifp->if_serializer);
1954 }
1955 
1956 
1957 /* NOTE : This should only be called holding
1958  *        DEVICE_LOCK.
1959 */
1960 static void
1961 oce_if_deactivate(POCE_SOFTC sc)
1962 {
1963 	int i, mtime = 0;
1964 	int wait_req = 0;
1965 	struct oce_rq *rq;
1966 	struct oce_wq *wq;
1967 	struct oce_eq *eq;
1968 
1969 	sc->ifp->if_flags &= ~IFF_RUNNING;
1970 	ifq_clr_oactive(&sc->ifp->if_snd);
1971 
1972 	/*Wait for max of 400ms for TX completions to be done */
1973 	while (mtime < 400) {
1974 		wait_req = 0;
1975 		for_all_wq_queues(sc, wq, i) {
1976 			if (wq->ring->num_used) {
1977 				wait_req = 1;
1978 				DELAY(1);
1979 				break;
1980 			}
1981 		}
1982 		mtime += 1;
1983 		if (!wait_req)
1984 			break;
1985 	}
1986 
1987 	/* Stop intrs and finish any bottom halves pending */
1988 	oce_hw_intr_disable(sc);
1989 
1990 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1991 	   any other lock. So unlock device lock and require after
1992 	   completing taskqueue_drain.
1993 	*/
1994 	UNLOCK(&sc->dev_lock);
1995 	for (i = 0; i < sc->intr_count; i++) {
1996 		if (sc->intrs[i].tq != NULL) {
1997 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1998 		}
1999 	}
2000 	LOCK(&sc->dev_lock);
2001 
2002 	/* Delete RX queue in card with flush param */
2003 	oce_stop_rx(sc);
2004 
2005 	/* Invalidate any pending cq and eq entries*/
2006 	for_all_evnt_queues(sc, eq, i)
2007 		oce_drain_eq(eq);
2008 	for_all_rq_queues(sc, rq, i)
2009 		oce_drain_rq_cq(rq);
2010 	for_all_wq_queues(sc, wq, i)
2011 		oce_drain_wq_cq(wq);
2012 
2013 	/* But still we need to get MCC aync events.
2014 	   So enable intrs and also arm first EQ
2015 	*/
2016 	oce_hw_intr_enable(sc);
2017 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2018 
2019 	DELAY(10);
2020 }
2021 
2022 
2023 static void
2024 oce_if_activate(POCE_SOFTC sc)
2025 {
2026 	struct oce_eq *eq;
2027 	struct oce_rq *rq;
2028 	struct oce_wq *wq;
2029 	int i, rc = 0;
2030 
2031 	sc->ifp->if_flags |= IFF_RUNNING;
2032 
2033 	oce_hw_intr_disable(sc);
2034 
2035 	oce_start_rx(sc);
2036 
2037 	for_all_rq_queues(sc, rq, i) {
2038 		rc = oce_start_rq(rq);
2039 		if (rc)
2040 			device_printf(sc->dev, "Unable to start RX\n");
2041 	}
2042 
2043 	for_all_wq_queues(sc, wq, i) {
2044 		rc = oce_start_wq(wq);
2045 		if (rc)
2046 			device_printf(sc->dev, "Unable to start TX\n");
2047 	}
2048 
2049 
2050 	for_all_evnt_queues(sc, eq, i)
2051 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2052 
2053 	oce_hw_intr_enable(sc);
2054 
2055 }
2056 
2057 static void
2058 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2059 {
2060 	/* Update Link status */
2061 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2062 	     ASYNC_EVENT_LINK_UP) {
2063 		sc->link_status = ASYNC_EVENT_LINK_UP;
2064 		if_link_state_change(sc->ifp);
2065 	} else {
2066 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2067 		if_link_state_change(sc->ifp);
2068 	}
2069 
2070 	/* Update speed */
2071 	sc->link_speed = acqe->u0.s.speed;
2072 	sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2073 
2074 }
2075 
2076 
2077 /* Handle the Completion Queue for the Mailbox/Async notifications */
2078 uint16_t
2079 oce_mq_handler(void *arg)
2080 {
2081 	struct oce_mq *mq = (struct oce_mq *)arg;
2082 	POCE_SOFTC sc = mq->parent;
2083 	struct oce_cq *cq = mq->cq;
2084 	int num_cqes = 0, evt_type = 0, optype = 0;
2085 	struct oce_mq_cqe *cqe;
2086 	struct oce_async_cqe_link_state *acqe;
2087 	struct oce_async_event_grp5_pvid_state *gcqe;
2088 	struct oce_async_event_qnq *dbgcqe;
2089 
2090 
2091 	bus_dmamap_sync(cq->ring->dma.tag,
2092 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2093 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2094 
2095 	while (cqe->u0.dw[3]) {
2096 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2097 		if (cqe->u0.s.async_event) {
2098 			evt_type = cqe->u0.s.event_type;
2099 			optype = cqe->u0.s.async_type;
2100 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2101 				/* Link status evt */
2102 				acqe = (struct oce_async_cqe_link_state *)cqe;
2103 				process_link_state(sc, acqe);
2104 			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
2105 				   (optype == ASYNC_EVENT_PVID_STATE)) {
2106 				/* GRP5 PVID */
2107 				gcqe =
2108 				(struct oce_async_event_grp5_pvid_state *)cqe;
2109 				if (gcqe->enabled)
2110 					sc->pvid = gcqe->tag & VLAN_VID_MASK;
2111 				else
2112 					sc->pvid = 0;
2113 
2114 			}
2115 			else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2116 				optype == ASYNC_EVENT_DEBUG_QNQ) {
2117 				dbgcqe =
2118 				(struct oce_async_event_qnq *)cqe;
2119 				if(dbgcqe->valid)
2120 					sc->qnqid = dbgcqe->vlan_tag;
2121 				sc->qnq_debug_event = TRUE;
2122 			}
2123 		}
2124 		cqe->u0.dw[3] = 0;
2125 		RING_GET(cq->ring, 1);
2126 		bus_dmamap_sync(cq->ring->dma.tag,
2127 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2128 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2129 		num_cqes++;
2130 	}
2131 
2132 	if (num_cqes)
2133 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2134 
2135 	return 0;
2136 }
2137 
2138 
2139 static void
2140 setup_max_queues_want(POCE_SOFTC sc)
2141 {
2142 	/* Check if it is FLEX machine. Is so dont use RSS */
2143 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2144 	    (sc->function_mode & FNM_UMC_MODE)    ||
2145 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2146 	    (!is_rss_enabled(sc))		  ||
2147 	    (sc->flags & OCE_FLAGS_BE2)) {
2148 		sc->nrqs = 1;
2149 		sc->nwqs = 1;
2150 	}
2151 }
2152 
2153 
2154 static void
2155 update_queues_got(POCE_SOFTC sc)
2156 {
2157 	if (is_rss_enabled(sc)) {
2158 		sc->nrqs = sc->intr_count + 1;
2159 		sc->nwqs = sc->intr_count;
2160 	} else {
2161 		sc->nrqs = 1;
2162 		sc->nwqs = 1;
2163 	}
2164 }
2165 
2166 static int
2167 oce_check_ipv6_ext_hdr(struct mbuf *m)
2168 {
2169 	struct ether_header *eh = mtod(m, struct ether_header *);
2170 	caddr_t m_datatemp = m->m_data;
2171 
2172 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2173 		m->m_data += sizeof(struct ether_header);
2174 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2175 
2176 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2177 				(ip6->ip6_nxt != IPPROTO_UDP)){
2178 			struct ip6_ext *ip6e = NULL;
2179 			m->m_data += sizeof(struct ip6_hdr);
2180 
2181 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2182 			if(ip6e->ip6e_len == 0xff) {
2183 				m->m_data = m_datatemp;
2184 				return TRUE;
2185 			}
2186 		}
2187 		m->m_data = m_datatemp;
2188 	}
2189 	return FALSE;
2190 }
2191 
2192 static int
2193 is_be3_a1(POCE_SOFTC sc)
2194 {
2195 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2196 		return TRUE;
2197 	}
2198 	return FALSE;
2199 }
2200 
2201 static struct mbuf *
2202 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2203 {
2204 	uint16_t vlan_tag = 0;
2205 
2206 	if(!M_WRITABLE(m))
2207 		return NULL;
2208 
2209 #if 0 /* XXX swildner: ETHER_VTAG */
2210 	/* Embed vlan tag in the packet if it is not part of it */
2211 	if(m->m_flags & M_VLANTAG) {
2212 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2213 		m->m_flags &= ~M_VLANTAG;
2214 	}
2215 #endif
2216 
2217 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2218 	if(sc->pvid) {
2219 		if(!vlan_tag)
2220 			vlan_tag = sc->pvid;
2221 		*complete = FALSE;
2222 	}
2223 
2224 #if 0 /* XXX swildner: ETHER_VTAG */
2225 	if(vlan_tag) {
2226 		m = ether_vlanencap(m, vlan_tag);
2227 	}
2228 
2229 	if(sc->qnqid) {
2230 		m = ether_vlanencap(m, sc->qnqid);
2231 		*complete = FALSE;
2232 	}
2233 #endif
2234 	return m;
2235 }
2236 
2237 static int
2238 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2239 {
2240 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2241 			oce_check_ipv6_ext_hdr(m)) {
2242 		return TRUE;
2243 	}
2244 	return FALSE;
2245 }
2246 
2247 static void
2248 oce_get_config(POCE_SOFTC sc)
2249 {
2250 	int rc = 0;
2251 	uint32_t max_rss = 0;
2252 
2253 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2254 		max_rss = OCE_LEGACY_MODE_RSS;
2255 	else
2256 		max_rss = OCE_MAX_RSS;
2257 
2258 	if (!IS_BE(sc)) {
2259 		rc = oce_get_func_config(sc);
2260 		if (rc) {
2261 			sc->nwqs = OCE_MAX_WQ;
2262 			sc->nrssqs = max_rss;
2263 			sc->nrqs = sc->nrssqs + 1;
2264 		}
2265 	}
2266 	else {
2267 		rc = oce_get_profile_config(sc);
2268 		sc->nrssqs = max_rss;
2269 		sc->nrqs = sc->nrssqs + 1;
2270 		if (rc)
2271 			sc->nwqs = OCE_MAX_WQ;
2272 	}
2273 }
2274