xref: /freebsd/sys/dev/oce/oce_if.c (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 
136 /* Driver entry points prototypes */
137 static int  oce_probe(device_t dev);
138 static int  oce_attach(device_t dev);
139 static int  oce_detach(device_t dev);
140 static int  oce_shutdown(device_t dev);
141 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
142 static void oce_init(void *xsc);
143 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
144 static void oce_multiq_flush(struct ifnet *ifp);
145 
146 /* Driver interrupt routines protypes */
147 static void oce_intr(void *arg, int pending);
148 static int  oce_setup_intr(POCE_SOFTC sc);
149 static int  oce_fast_isr(void *arg);
150 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
151 			  void (*isr) (void *arg, int pending));
152 
153 /* Media callbacks prototypes */
154 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
155 static int  oce_media_change(struct ifnet *ifp);
156 
157 /* Transmit routines prototypes */
158 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
159 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
160 static void oce_process_tx_completion(struct oce_wq *wq);
161 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
162 				 struct oce_wq *wq);
163 
164 /* Receive routines prototypes */
165 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
167 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
168 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
169 static uint16_t oce_rq_handler_lro(void *arg);
170 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
172 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
173 
174 /* Helper function prototypes in this file */
175 static int  oce_attach_ifp(POCE_SOFTC sc);
176 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
177 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
178 static int  oce_vid_config(POCE_SOFTC sc);
179 static void oce_mac_addr_set(POCE_SOFTC sc);
180 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
181 static void oce_local_timer(void *arg);
182 static void oce_if_deactivate(POCE_SOFTC sc);
183 static void oce_if_activate(POCE_SOFTC sc);
184 static void setup_max_queues_want(POCE_SOFTC sc);
185 static void update_queues_got(POCE_SOFTC sc);
186 static void process_link_state(POCE_SOFTC sc,
187 		 struct oce_async_cqe_link_state *acqe);
188 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
189 static void oce_get_config(POCE_SOFTC sc);
190 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
191 static void oce_read_env_variables(POCE_SOFTC sc);
192 
193 
194 /* IP specific */
195 #if defined(INET6) || defined(INET)
196 static int  oce_init_lro(POCE_SOFTC sc);
197 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
198 #endif
199 
200 static device_method_t oce_dispatch[] = {
201 	DEVMETHOD(device_probe, oce_probe),
202 	DEVMETHOD(device_attach, oce_attach),
203 	DEVMETHOD(device_detach, oce_detach),
204 	DEVMETHOD(device_shutdown, oce_shutdown),
205 
206 	DEVMETHOD_END
207 };
208 
209 static driver_t oce_driver = {
210 	"oce",
211 	oce_dispatch,
212 	sizeof(OCE_SOFTC)
213 };
214 static devclass_t oce_devclass;
215 
216 
217 /* global vars */
218 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
219 
220 /* Module capabilites and parameters */
221 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
222 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
223 uint32_t oce_rq_buf_size = 2048;
224 
225 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
226 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
227 
228 
229 /* Supported devices table */
230 static uint32_t supportedDevices[] =  {
231 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
232 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
233 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
234 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
235 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
236 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
237 };
238 
239 
240 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
241 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
242     nitems(supportedDevices));
243 MODULE_DEPEND(oce, pci, 1, 1, 1);
244 MODULE_DEPEND(oce, ether, 1, 1, 1);
245 MODULE_VERSION(oce, 1);
246 
247 
248 POCE_SOFTC softc_head = NULL;
249 POCE_SOFTC softc_tail = NULL;
250 
251 struct oce_rdma_if *oce_rdma_if = NULL;
252 
253 /*****************************************************************************
254  *			Driver entry points functions                        *
255  *****************************************************************************/
256 
257 static int
258 oce_probe(device_t dev)
259 {
260 	uint16_t vendor = 0;
261 	uint16_t device = 0;
262 	int i = 0;
263 	char str[256] = {0};
264 	POCE_SOFTC sc;
265 
266 	sc = device_get_softc(dev);
267 	bzero(sc, sizeof(OCE_SOFTC));
268 	sc->dev = dev;
269 
270 	vendor = pci_get_vendor(dev);
271 	device = pci_get_device(dev);
272 
273 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
274 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
275 			if (device == (supportedDevices[i] & 0xffff)) {
276 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
277 					component_revision);
278 				device_set_desc_copy(dev, str);
279 
280 				switch (device) {
281 				case PCI_PRODUCT_BE2:
282 					sc->flags |= OCE_FLAGS_BE2;
283 					break;
284 				case PCI_PRODUCT_BE3:
285 					sc->flags |= OCE_FLAGS_BE3;
286 					break;
287 				case PCI_PRODUCT_XE201:
288 				case PCI_PRODUCT_XE201_VF:
289 					sc->flags |= OCE_FLAGS_XE201;
290 					break;
291 				case PCI_PRODUCT_SH:
292 					sc->flags |= OCE_FLAGS_SH;
293 					break;
294 				default:
295 					return ENXIO;
296 				}
297 				return BUS_PROBE_DEFAULT;
298 			}
299 		}
300 	}
301 
302 	return ENXIO;
303 }
304 
305 
306 static int
307 oce_attach(device_t dev)
308 {
309 	POCE_SOFTC sc;
310 	int rc = 0;
311 
312 	sc = device_get_softc(dev);
313 
314 	rc = oce_hw_pci_alloc(sc);
315 	if (rc)
316 		return rc;
317 
318 	sc->tx_ring_size = OCE_TX_RING_SIZE;
319 	sc->rx_ring_size = OCE_RX_RING_SIZE;
320 	/* receive fragment size should be multiple of 2K */
321 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
322 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
323 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
324 
325 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
326 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
327 
328 	/* initialise the hardware */
329 	rc = oce_hw_init(sc);
330 	if (rc)
331 		goto pci_res_free;
332 
333 	oce_read_env_variables(sc);
334 
335 	oce_get_config(sc);
336 
337 	setup_max_queues_want(sc);
338 
339 	rc = oce_setup_intr(sc);
340 	if (rc)
341 		goto mbox_free;
342 
343 	rc = oce_queue_init_all(sc);
344 	if (rc)
345 		goto intr_free;
346 
347 	rc = oce_attach_ifp(sc);
348 	if (rc)
349 		goto queues_free;
350 
351 #if defined(INET6) || defined(INET)
352 	rc = oce_init_lro(sc);
353 	if (rc)
354 		goto ifp_free;
355 #endif
356 
357 	rc = oce_hw_start(sc);
358 	if (rc)
359 		goto lro_free;
360 
361 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
362 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
363 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
364 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
365 
366 	rc = oce_stats_init(sc);
367 	if (rc)
368 		goto vlan_free;
369 
370 	oce_add_sysctls(sc);
371 
372 	callout_init(&sc->timer, CALLOUT_MPSAFE);
373 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
374 	if (rc)
375 		goto stats_free;
376 
377 	sc->next =NULL;
378 	if (softc_tail != NULL) {
379 	  softc_tail->next = sc;
380 	} else {
381 	  softc_head = sc;
382 	}
383 	softc_tail = sc;
384 
385 	return 0;
386 
387 stats_free:
388 	callout_drain(&sc->timer);
389 	oce_stats_free(sc);
390 vlan_free:
391 	if (sc->vlan_attach)
392 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
393 	if (sc->vlan_detach)
394 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
395 	oce_hw_intr_disable(sc);
396 lro_free:
397 #if defined(INET6) || defined(INET)
398 	oce_free_lro(sc);
399 ifp_free:
400 #endif
401 	ether_ifdetach(sc->ifp);
402 	if_free(sc->ifp);
403 queues_free:
404 	oce_queue_release_all(sc);
405 intr_free:
406 	oce_intr_free(sc);
407 mbox_free:
408 	oce_dma_free(sc, &sc->bsmbx);
409 pci_res_free:
410 	oce_hw_pci_free(sc);
411 	LOCK_DESTROY(&sc->dev_lock);
412 	LOCK_DESTROY(&sc->bmbx_lock);
413 	return rc;
414 
415 }
416 
417 
418 static int
419 oce_detach(device_t dev)
420 {
421 	POCE_SOFTC sc = device_get_softc(dev);
422 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
423 
424         poce_sc_tmp = softc_head;
425         ppoce_sc_tmp1 = &softc_head;
426         while (poce_sc_tmp != NULL) {
427           if (poce_sc_tmp == sc) {
428             *ppoce_sc_tmp1 = sc->next;
429             if (sc->next == NULL) {
430               softc_tail = poce_sc_tmp2;
431             }
432             break;
433           }
434           poce_sc_tmp2 = poce_sc_tmp;
435           ppoce_sc_tmp1 = &poce_sc_tmp->next;
436           poce_sc_tmp = poce_sc_tmp->next;
437         }
438 
439 	LOCK(&sc->dev_lock);
440 	oce_if_deactivate(sc);
441 	UNLOCK(&sc->dev_lock);
442 
443 	callout_drain(&sc->timer);
444 
445 	if (sc->vlan_attach != NULL)
446 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
447 	if (sc->vlan_detach != NULL)
448 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
449 
450 	ether_ifdetach(sc->ifp);
451 
452 	if_free(sc->ifp);
453 
454 	oce_hw_shutdown(sc);
455 
456 	bus_generic_detach(dev);
457 
458 	return 0;
459 }
460 
461 
462 static int
463 oce_shutdown(device_t dev)
464 {
465 	int rc;
466 
467 	rc = oce_detach(dev);
468 
469 	return rc;
470 }
471 
472 
473 static int
474 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
475 {
476 	struct ifreq *ifr = (struct ifreq *)data;
477 	POCE_SOFTC sc = ifp->if_softc;
478 	struct ifi2creq i2c;
479 	uint8_t	offset = 0;
480 	int rc = 0;
481 	uint32_t u;
482 
483 	switch (command) {
484 
485 	case SIOCGIFMEDIA:
486 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
487 		break;
488 
489 	case SIOCSIFMTU:
490 		if (ifr->ifr_mtu > OCE_MAX_MTU)
491 			rc = EINVAL;
492 		else
493 			ifp->if_mtu = ifr->ifr_mtu;
494 		break;
495 
496 	case SIOCSIFFLAGS:
497 		if (ifp->if_flags & IFF_UP) {
498 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
499 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
500 				oce_init(sc);
501 			}
502 			device_printf(sc->dev, "Interface Up\n");
503 		} else {
504 			LOCK(&sc->dev_lock);
505 
506 			sc->ifp->if_drv_flags &=
507 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
508 			oce_if_deactivate(sc);
509 
510 			UNLOCK(&sc->dev_lock);
511 
512 			device_printf(sc->dev, "Interface Down\n");
513 		}
514 
515 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
516 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
517 				sc->promisc = TRUE;
518 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
519 			if (!oce_rxf_set_promiscuous(sc, 0))
520 				sc->promisc = FALSE;
521 		}
522 
523 		break;
524 
525 	case SIOCADDMULTI:
526 	case SIOCDELMULTI:
527 		rc = oce_hw_update_multicast(sc);
528 		if (rc)
529 			device_printf(sc->dev,
530 				"Update multicast address failed\n");
531 		break;
532 
533 	case SIOCSIFCAP:
534 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
535 
536 		if (u & IFCAP_TXCSUM) {
537 			ifp->if_capenable ^= IFCAP_TXCSUM;
538 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
539 
540 			if (IFCAP_TSO & ifp->if_capenable &&
541 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
542 				ifp->if_capenable &= ~IFCAP_TSO;
543 				ifp->if_hwassist &= ~CSUM_TSO;
544 				if_printf(ifp,
545 					 "TSO disabled due to -txcsum.\n");
546 			}
547 		}
548 
549 		if (u & IFCAP_RXCSUM)
550 			ifp->if_capenable ^= IFCAP_RXCSUM;
551 
552 		if (u & IFCAP_TSO4) {
553 			ifp->if_capenable ^= IFCAP_TSO4;
554 
555 			if (IFCAP_TSO & ifp->if_capenable) {
556 				if (IFCAP_TXCSUM & ifp->if_capenable)
557 					ifp->if_hwassist |= CSUM_TSO;
558 				else {
559 					ifp->if_capenable &= ~IFCAP_TSO;
560 					ifp->if_hwassist &= ~CSUM_TSO;
561 					if_printf(ifp,
562 					    "Enable txcsum first.\n");
563 					rc = EAGAIN;
564 				}
565 			} else
566 				ifp->if_hwassist &= ~CSUM_TSO;
567 		}
568 
569 		if (u & IFCAP_VLAN_HWTAGGING)
570 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
571 
572 		if (u & IFCAP_VLAN_HWFILTER) {
573 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
574 			oce_vid_config(sc);
575 		}
576 #if defined(INET6) || defined(INET)
577 		if (u & IFCAP_LRO) {
578 			ifp->if_capenable ^= IFCAP_LRO;
579 			if(sc->enable_hwlro) {
580 				if(ifp->if_capenable & IFCAP_LRO) {
581 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
582 				}else {
583 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
584 				}
585 			}
586 		}
587 #endif
588 
589 		break;
590 
591 	case SIOCGI2C:
592 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
593 		if (rc)
594 			break;
595 
596 		if (i2c.dev_addr != PAGE_NUM_A0 &&
597 		    i2c.dev_addr != PAGE_NUM_A2) {
598 			rc = EINVAL;
599 			break;
600 		}
601 
602 		if (i2c.len > sizeof(i2c.data)) {
603 			rc = EINVAL;
604 			break;
605 		}
606 
607 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
608 		if(rc) {
609 			rc = -rc;
610 			break;
611 		}
612 
613 		if (i2c.dev_addr == PAGE_NUM_A0)
614 			offset = i2c.offset;
615 		else
616 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
617 
618 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
619 
620 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
621 		break;
622 
623 	case SIOCGPRIVATE_0:
624 		rc = oce_handle_passthrough(ifp, data);
625 		break;
626 	default:
627 		rc = ether_ioctl(ifp, command, data);
628 		break;
629 	}
630 
631 	return rc;
632 }
633 
634 
635 static void
636 oce_init(void *arg)
637 {
638 	POCE_SOFTC sc = arg;
639 
640 	LOCK(&sc->dev_lock);
641 
642 	if (sc->ifp->if_flags & IFF_UP) {
643 		oce_if_deactivate(sc);
644 		oce_if_activate(sc);
645 	}
646 
647 	UNLOCK(&sc->dev_lock);
648 
649 }
650 
651 
652 static int
653 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
654 {
655 	POCE_SOFTC sc = ifp->if_softc;
656 	struct oce_wq *wq = NULL;
657 	int queue_index = 0;
658 	int status = 0;
659 
660 	if (!sc->link_status)
661 		return ENXIO;
662 
663 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
664 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
665 
666 	wq = sc->wq[queue_index];
667 
668 	LOCK(&wq->tx_lock);
669 	status = oce_multiq_transmit(ifp, m, wq);
670 	UNLOCK(&wq->tx_lock);
671 
672 	return status;
673 
674 }
675 
676 
677 static void
678 oce_multiq_flush(struct ifnet *ifp)
679 {
680 	POCE_SOFTC sc = ifp->if_softc;
681 	struct mbuf     *m;
682 	int i = 0;
683 
684 	for (i = 0; i < sc->nwqs; i++) {
685 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
686 			m_freem(m);
687 	}
688 	if_qflush(ifp);
689 }
690 
691 
692 
693 /*****************************************************************************
694  *                   Driver interrupt routines functions                     *
695  *****************************************************************************/
696 
697 static void
698 oce_intr(void *arg, int pending)
699 {
700 
701 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
702 	POCE_SOFTC sc = ii->sc;
703 	struct oce_eq *eq = ii->eq;
704 	struct oce_eqe *eqe;
705 	struct oce_cq *cq = NULL;
706 	int i, num_eqes = 0;
707 
708 
709 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
710 				 BUS_DMASYNC_POSTWRITE);
711 	do {
712 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
713 		if (eqe->evnt == 0)
714 			break;
715 		eqe->evnt = 0;
716 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
717 					BUS_DMASYNC_POSTWRITE);
718 		RING_GET(eq->ring, 1);
719 		num_eqes++;
720 
721 	} while (TRUE);
722 
723 	if (!num_eqes)
724 		goto eq_arm; /* Spurious */
725 
726  	/* Clear EQ entries, but dont arm */
727 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
728 
729 	/* Process TX, RX and MCC. But dont arm CQ*/
730 	for (i = 0; i < eq->cq_valid; i++) {
731 		cq = eq->cq[i];
732 		(*cq->cq_handler)(cq->cb_arg);
733 	}
734 
735 	/* Arm all cqs connected to this EQ */
736 	for (i = 0; i < eq->cq_valid; i++) {
737 		cq = eq->cq[i];
738 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
739 	}
740 
741 eq_arm:
742 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
743 
744 	return;
745 }
746 
747 
748 static int
749 oce_setup_intr(POCE_SOFTC sc)
750 {
751 	int rc = 0, use_intx = 0;
752 	int vector = 0, req_vectors = 0;
753 	int tot_req_vectors, tot_vectors;
754 
755 	if (is_rss_enabled(sc))
756 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
757 	else
758 		req_vectors = 1;
759 
760 	tot_req_vectors = req_vectors;
761 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
762 	  if (req_vectors > 1) {
763 	    tot_req_vectors += OCE_RDMA_VECTORS;
764 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
765 	  }
766 	}
767 
768         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
769 		sc->intr_count = req_vectors;
770                 tot_vectors = tot_req_vectors;
771 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
772 		if (rc != 0) {
773 			use_intx = 1;
774 			pci_release_msi(sc->dev);
775 		} else {
776 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
777 		    if (tot_vectors < tot_req_vectors) {
778 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
779 			sc->roce_intr_count = (tot_vectors / 2);
780 		      }
781 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
782 		    }
783 		  } else {
784 		    sc->intr_count = tot_vectors;
785 		  }
786     		  sc->flags |= OCE_FLAGS_USING_MSIX;
787 		}
788 	} else
789 		use_intx = 1;
790 
791 	if (use_intx)
792 		sc->intr_count = 1;
793 
794 	/* Scale number of queues based on intr we got */
795 	update_queues_got(sc);
796 
797 	if (use_intx) {
798 		device_printf(sc->dev, "Using legacy interrupt\n");
799 		rc = oce_alloc_intr(sc, vector, oce_intr);
800 		if (rc)
801 			goto error;
802 	} else {
803 		for (; vector < sc->intr_count; vector++) {
804 			rc = oce_alloc_intr(sc, vector, oce_intr);
805 			if (rc)
806 				goto error;
807 		}
808 	}
809 
810 	return 0;
811 error:
812 	oce_intr_free(sc);
813 	return rc;
814 }
815 
816 
817 static int
818 oce_fast_isr(void *arg)
819 {
820 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
821 	POCE_SOFTC sc = ii->sc;
822 
823 	if (ii->eq == NULL)
824 		return FILTER_STRAY;
825 
826 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
827 
828 	taskqueue_enqueue(ii->tq, &ii->task);
829 
830  	ii->eq->intr++;
831 
832 	return FILTER_HANDLED;
833 }
834 
835 
836 static int
837 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
838 {
839 	POCE_INTR_INFO ii = &sc->intrs[vector];
840 	int rc = 0, rr;
841 
842 	if (vector >= OCE_MAX_EQ)
843 		return (EINVAL);
844 
845 	/* Set the resource id for the interrupt.
846 	 * MSIx is vector + 1 for the resource id,
847 	 * INTx is 0 for the resource id.
848 	 */
849 	if (sc->flags & OCE_FLAGS_USING_MSIX)
850 		rr = vector + 1;
851 	else
852 		rr = 0;
853 	ii->intr_res = bus_alloc_resource_any(sc->dev,
854 					      SYS_RES_IRQ,
855 					      &rr, RF_ACTIVE|RF_SHAREABLE);
856 	ii->irq_rr = rr;
857 	if (ii->intr_res == NULL) {
858 		device_printf(sc->dev,
859 			  "Could not allocate interrupt\n");
860 		rc = ENXIO;
861 		return rc;
862 	}
863 
864 	TASK_INIT(&ii->task, 0, isr, ii);
865 	ii->vector = vector;
866 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
867 	ii->tq = taskqueue_create_fast(ii->task_name,
868 			M_NOWAIT,
869 			taskqueue_thread_enqueue,
870 			&ii->tq);
871 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
872 			device_get_nameunit(sc->dev));
873 
874 	ii->sc = sc;
875 	rc = bus_setup_intr(sc->dev,
876 			ii->intr_res,
877 			INTR_TYPE_NET,
878 			oce_fast_isr, NULL, ii, &ii->tag);
879 	return rc;
880 
881 }
882 
883 
884 void
885 oce_intr_free(POCE_SOFTC sc)
886 {
887 	int i = 0;
888 
889 	for (i = 0; i < sc->intr_count; i++) {
890 
891 		if (sc->intrs[i].tag != NULL)
892 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
893 						sc->intrs[i].tag);
894 		if (sc->intrs[i].tq != NULL)
895 			taskqueue_free(sc->intrs[i].tq);
896 
897 		if (sc->intrs[i].intr_res != NULL)
898 			bus_release_resource(sc->dev, SYS_RES_IRQ,
899 						sc->intrs[i].irq_rr,
900 						sc->intrs[i].intr_res);
901 		sc->intrs[i].tag = NULL;
902 		sc->intrs[i].intr_res = NULL;
903 	}
904 
905 	if (sc->flags & OCE_FLAGS_USING_MSIX)
906 		pci_release_msi(sc->dev);
907 
908 }
909 
910 
911 
912 /******************************************************************************
913 *			  Media callbacks functions 			      *
914 ******************************************************************************/
915 
916 static void
917 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
918 {
919 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
920 
921 
922 	req->ifm_status = IFM_AVALID;
923 	req->ifm_active = IFM_ETHER;
924 
925 	if (sc->link_status == 1)
926 		req->ifm_status |= IFM_ACTIVE;
927 	else
928 		return;
929 
930 	switch (sc->link_speed) {
931 	case 1: /* 10 Mbps */
932 		req->ifm_active |= IFM_10_T | IFM_FDX;
933 		sc->speed = 10;
934 		break;
935 	case 2: /* 100 Mbps */
936 		req->ifm_active |= IFM_100_TX | IFM_FDX;
937 		sc->speed = 100;
938 		break;
939 	case 3: /* 1 Gbps */
940 		req->ifm_active |= IFM_1000_T | IFM_FDX;
941 		sc->speed = 1000;
942 		break;
943 	case 4: /* 10 Gbps */
944 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
945 		sc->speed = 10000;
946 		break;
947 	case 5: /* 20 Gbps */
948 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
949 		sc->speed = 20000;
950 		break;
951 	case 6: /* 25 Gbps */
952 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
953 		sc->speed = 25000;
954 		break;
955 	case 7: /* 40 Gbps */
956 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
957 		sc->speed = 40000;
958 		break;
959 	default:
960 		sc->speed = 0;
961 		break;
962 	}
963 
964 	return;
965 }
966 
967 
968 int
969 oce_media_change(struct ifnet *ifp)
970 {
971 	return 0;
972 }
973 
974 
975 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
976 				struct mbuf *m, boolean_t *os2bmc,
977 				struct mbuf **m_new)
978 {
979 	struct ether_header *eh = NULL;
980 
981 	eh = mtod(m, struct ether_header *);
982 
983 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
984 		*os2bmc = FALSE;
985 		goto done;
986 	}
987 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
988 		goto done;
989 
990 	if (is_mc_allowed_on_bmc(sc, eh) ||
991 	    is_bc_allowed_on_bmc(sc, eh) ||
992 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
993 		*os2bmc = TRUE;
994 		goto done;
995 	}
996 
997 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
998 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
999 		uint8_t nexthdr = ip6->ip6_nxt;
1000 		if (nexthdr == IPPROTO_ICMPV6) {
1001 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
1002 			switch (icmp6->icmp6_type) {
1003 			case ND_ROUTER_ADVERT:
1004 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
1005 				goto done;
1006 			case ND_NEIGHBOR_ADVERT:
1007 				*os2bmc = is_ipv6_na_filt_enabled(sc);
1008 				goto done;
1009 			default:
1010 				break;
1011 			}
1012 		}
1013 	}
1014 
1015 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
1016 		struct ip *ip = mtod(m, struct ip *);
1017 		int iphlen = ip->ip_hl << 2;
1018 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
1019 		switch (uh->uh_dport) {
1020 		case DHCP_CLIENT_PORT:
1021 			*os2bmc = is_dhcp_client_filt_enabled(sc);
1022 			goto done;
1023 		case DHCP_SERVER_PORT:
1024 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1025 			goto done;
1026 		case NET_BIOS_PORT1:
1027 		case NET_BIOS_PORT2:
1028 			*os2bmc = is_nbios_filt_enabled(sc);
1029 			goto done;
1030 		case DHCPV6_RAS_PORT:
1031 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1032 			goto done;
1033 		default:
1034 			break;
1035 		}
1036 	}
1037 done:
1038 	if (*os2bmc) {
1039 		*m_new = m_dup(m, M_NOWAIT);
1040 		if (!*m_new) {
1041 			*os2bmc = FALSE;
1042 			return;
1043 		}
1044 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1045 	}
1046 }
1047 
1048 
1049 
1050 /*****************************************************************************
1051  *			  Transmit routines functions			     *
1052  *****************************************************************************/
1053 
1054 static int
1055 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1056 {
1057 	int rc = 0, i, retry_cnt = 0;
1058 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1059 	struct mbuf *m, *m_temp, *m_new = NULL;
1060 	struct oce_wq *wq = sc->wq[wq_index];
1061 	struct oce_packet_desc *pd;
1062 	struct oce_nic_hdr_wqe *nichdr;
1063 	struct oce_nic_frag_wqe *nicfrag;
1064 	struct ether_header *eh = NULL;
1065 	int num_wqes;
1066 	uint32_t reg_value;
1067 	boolean_t complete = TRUE;
1068 	boolean_t os2bmc = FALSE;
1069 
1070 	m = *mpp;
1071 	if (!m)
1072 		return EINVAL;
1073 
1074 	if (!(m->m_flags & M_PKTHDR)) {
1075 		rc = ENXIO;
1076 		goto free_ret;
1077 	}
1078 
1079 	/* Don't allow non-TSO packets longer than MTU */
1080 	if (!is_tso_pkt(m)) {
1081 		eh = mtod(m, struct ether_header *);
1082 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1083 			 goto free_ret;
1084 	}
1085 
1086 	if(oce_tx_asic_stall_verify(sc, m)) {
1087 		m = oce_insert_vlan_tag(sc, m, &complete);
1088 		if(!m) {
1089 			device_printf(sc->dev, "Insertion unsuccessful\n");
1090 			return 0;
1091 		}
1092 
1093 	}
1094 
1095 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1096 	 * may cause a transmit stall on that port. So the work-around is to
1097 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1098 	*/
1099 	if(IS_SH(sc) || IS_XE201(sc) ) {
1100 		if(m->m_pkthdr.len <= 32) {
1101 			char buf[36];
1102 			bzero((void *)buf, 36);
1103 			m_append(m, (36 - m->m_pkthdr.len), buf);
1104 		}
1105 	}
1106 
1107 tx_start:
1108 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1109 		/* consolidate packet buffers for TSO/LSO segment offload */
1110 #if defined(INET6) || defined(INET)
1111 		m = oce_tso_setup(sc, mpp);
1112 #else
1113 		m = NULL;
1114 #endif
1115 		if (m == NULL) {
1116 			rc = ENXIO;
1117 			goto free_ret;
1118 		}
1119 	}
1120 
1121 
1122 	pd = &wq->pckts[wq->pkt_desc_head];
1123 
1124 retry:
1125 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1126 				     pd->map,
1127 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1128 	if (rc == 0) {
1129 		num_wqes = pd->nsegs + 1;
1130 		if (IS_BE(sc) || IS_SH(sc)) {
1131 			/*Dummy required only for BE3.*/
1132 			if (num_wqes & 1)
1133 				num_wqes++;
1134 		}
1135 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1136 			bus_dmamap_unload(wq->tag, pd->map);
1137 			return EBUSY;
1138 		}
1139 		atomic_store_rel_int(&wq->pkt_desc_head,
1140 				     (wq->pkt_desc_head + 1) % \
1141 				      OCE_WQ_PACKET_ARRAY_SIZE);
1142 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1143 		pd->mbuf = m;
1144 
1145 		nichdr =
1146 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1147 		nichdr->u0.dw[0] = 0;
1148 		nichdr->u0.dw[1] = 0;
1149 		nichdr->u0.dw[2] = 0;
1150 		nichdr->u0.dw[3] = 0;
1151 
1152 		nichdr->u0.s.complete = complete;
1153 		nichdr->u0.s.mgmt = os2bmc;
1154 		nichdr->u0.s.event = 1;
1155 		nichdr->u0.s.crc = 1;
1156 		nichdr->u0.s.forward = 0;
1157 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1158 		nichdr->u0.s.udpcs =
1159 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1160 		nichdr->u0.s.tcpcs =
1161 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1162 		nichdr->u0.s.num_wqe = num_wqes;
1163 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1164 
1165 		if (m->m_flags & M_VLANTAG) {
1166 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1167 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1168 		}
1169 
1170 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1171 			if (m->m_pkthdr.tso_segsz) {
1172 				nichdr->u0.s.lso = 1;
1173 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1174 			}
1175 			if (!IS_BE(sc) || !IS_SH(sc))
1176 				nichdr->u0.s.ipcs = 1;
1177 		}
1178 
1179 		RING_PUT(wq->ring, 1);
1180 		atomic_add_int(&wq->ring->num_used, 1);
1181 
1182 		for (i = 0; i < pd->nsegs; i++) {
1183 			nicfrag =
1184 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1185 						      struct oce_nic_frag_wqe);
1186 			nicfrag->u0.s.rsvd0 = 0;
1187 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1188 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1189 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1190 			pd->wqe_idx = wq->ring->pidx;
1191 			RING_PUT(wq->ring, 1);
1192 			atomic_add_int(&wq->ring->num_used, 1);
1193 		}
1194 		if (num_wqes > (pd->nsegs + 1)) {
1195 			nicfrag =
1196 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1197 						      struct oce_nic_frag_wqe);
1198 			nicfrag->u0.dw[0] = 0;
1199 			nicfrag->u0.dw[1] = 0;
1200 			nicfrag->u0.dw[2] = 0;
1201 			nicfrag->u0.dw[3] = 0;
1202 			pd->wqe_idx = wq->ring->pidx;
1203 			RING_PUT(wq->ring, 1);
1204 			atomic_add_int(&wq->ring->num_used, 1);
1205 			pd->nsegs++;
1206 		}
1207 
1208 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1209 		wq->tx_stats.tx_reqs++;
1210 		wq->tx_stats.tx_wrbs += num_wqes;
1211 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1212 		wq->tx_stats.tx_pkts++;
1213 
1214 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1215 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1216 		reg_value = (num_wqes << 16) | wq->wq_id;
1217 
1218 		/* if os2bmc is not enabled or if the pkt is already tagged as
1219 		   bmc, do nothing
1220 		 */
1221 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1222 
1223 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1224 
1225 	} else if (rc == EFBIG)	{
1226 		if (retry_cnt == 0) {
1227 			m_temp = m_defrag(m, M_NOWAIT);
1228 			if (m_temp == NULL)
1229 				goto free_ret;
1230 			m = m_temp;
1231 			*mpp = m_temp;
1232 			retry_cnt = retry_cnt + 1;
1233 			goto retry;
1234 		} else
1235 			goto free_ret;
1236 	} else if (rc == ENOMEM)
1237 		return rc;
1238 	else
1239 		goto free_ret;
1240 
1241 	if (os2bmc) {
1242 		m = m_new;
1243 		goto tx_start;
1244 	}
1245 
1246 	return 0;
1247 
1248 free_ret:
1249 	m_freem(*mpp);
1250 	*mpp = NULL;
1251 	return rc;
1252 }
1253 
1254 
1255 static void
1256 oce_process_tx_completion(struct oce_wq *wq)
1257 {
1258 	struct oce_packet_desc *pd;
1259 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1260 	struct mbuf *m;
1261 
1262 	pd = &wq->pckts[wq->pkt_desc_tail];
1263 	atomic_store_rel_int(&wq->pkt_desc_tail,
1264 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1265 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1266 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1267 	bus_dmamap_unload(wq->tag, pd->map);
1268 
1269 	m = pd->mbuf;
1270 	m_freem(m);
1271 	pd->mbuf = NULL;
1272 
1273 
1274 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1275 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1276 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1277 			oce_tx_restart(sc, wq);
1278 		}
1279 	}
1280 }
1281 
1282 
1283 static void
1284 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1285 {
1286 
1287 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1288 		return;
1289 
1290 #if __FreeBSD_version >= 800000
1291 	if (!drbr_empty(sc->ifp, wq->br))
1292 #else
1293 	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1294 #endif
1295 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1296 
1297 }
1298 
1299 
1300 #if defined(INET6) || defined(INET)
1301 static struct mbuf *
1302 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1303 {
1304 	struct mbuf *m;
1305 #ifdef INET
1306 	struct ip *ip;
1307 #endif
1308 #ifdef INET6
1309 	struct ip6_hdr *ip6;
1310 #endif
1311 	struct ether_vlan_header *eh;
1312 	struct tcphdr *th;
1313 	uint16_t etype;
1314 	int total_len = 0, ehdrlen = 0;
1315 
1316 	m = *mpp;
1317 
1318 	if (M_WRITABLE(m) == 0) {
1319 		m = m_dup(*mpp, M_NOWAIT);
1320 		if (!m)
1321 			return NULL;
1322 		m_freem(*mpp);
1323 		*mpp = m;
1324 	}
1325 
1326 	eh = mtod(m, struct ether_vlan_header *);
1327 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1328 		etype = ntohs(eh->evl_proto);
1329 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1330 	} else {
1331 		etype = ntohs(eh->evl_encap_proto);
1332 		ehdrlen = ETHER_HDR_LEN;
1333 	}
1334 
1335 	switch (etype) {
1336 #ifdef INET
1337 	case ETHERTYPE_IP:
1338 		ip = (struct ip *)(m->m_data + ehdrlen);
1339 		if (ip->ip_p != IPPROTO_TCP)
1340 			return NULL;
1341 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1342 
1343 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1344 		break;
1345 #endif
1346 #ifdef INET6
1347 	case ETHERTYPE_IPV6:
1348 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1349 		if (ip6->ip6_nxt != IPPROTO_TCP)
1350 			return NULL;
1351 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1352 
1353 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1354 		break;
1355 #endif
1356 	default:
1357 		return NULL;
1358 	}
1359 
1360 	m = m_pullup(m, total_len);
1361 	if (!m)
1362 		return NULL;
1363 	*mpp = m;
1364 	return m;
1365 
1366 }
1367 #endif /* INET6 || INET */
1368 
1369 void
1370 oce_tx_task(void *arg, int npending)
1371 {
1372 	struct oce_wq *wq = arg;
1373 	POCE_SOFTC sc = wq->parent;
1374 	struct ifnet *ifp = sc->ifp;
1375 	int rc = 0;
1376 
1377 #if __FreeBSD_version >= 800000
1378 	LOCK(&wq->tx_lock);
1379 	rc = oce_multiq_transmit(ifp, NULL, wq);
1380 	if (rc) {
1381 		device_printf(sc->dev,
1382 				"TX[%d] restart failed\n", wq->queue_index);
1383 	}
1384 	UNLOCK(&wq->tx_lock);
1385 #else
1386 	oce_start(ifp);
1387 #endif
1388 
1389 }
1390 
1391 
1392 void
1393 oce_start(struct ifnet *ifp)
1394 {
1395 	POCE_SOFTC sc = ifp->if_softc;
1396 	struct mbuf *m;
1397 	int rc = 0;
1398 	int def_q = 0; /* Defualt tx queue is 0*/
1399 
1400 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1401 			IFF_DRV_RUNNING)
1402 		return;
1403 
1404 	if (!sc->link_status)
1405 		return;
1406 
1407 	do {
1408 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1409 		if (m == NULL)
1410 			break;
1411 
1412 		LOCK(&sc->wq[def_q]->tx_lock);
1413 		rc = oce_tx(sc, &m, def_q);
1414 		UNLOCK(&sc->wq[def_q]->tx_lock);
1415 		if (rc) {
1416 			if (m != NULL) {
1417 				sc->wq[def_q]->tx_stats.tx_stops ++;
1418 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1419 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1420 				m = NULL;
1421 			}
1422 			break;
1423 		}
1424 		if (m != NULL)
1425 			ETHER_BPF_MTAP(ifp, m);
1426 
1427 	} while (TRUE);
1428 
1429 	return;
1430 }
1431 
1432 
1433 /* Handle the Completion Queue for transmit */
1434 uint16_t
1435 oce_wq_handler(void *arg)
1436 {
1437 	struct oce_wq *wq = (struct oce_wq *)arg;
1438 	POCE_SOFTC sc = wq->parent;
1439 	struct oce_cq *cq = wq->cq;
1440 	struct oce_nic_tx_cqe *cqe;
1441 	int num_cqes = 0;
1442 
1443 	LOCK(&wq->tx_compl_lock);
1444 	bus_dmamap_sync(cq->ring->dma.tag,
1445 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1446 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1447 	while (cqe->u0.dw[3]) {
1448 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1449 
1450 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1451 		if (wq->ring->cidx >= wq->ring->num_items)
1452 			wq->ring->cidx -= wq->ring->num_items;
1453 
1454 		oce_process_tx_completion(wq);
1455 		wq->tx_stats.tx_compl++;
1456 		cqe->u0.dw[3] = 0;
1457 		RING_GET(cq->ring, 1);
1458 		bus_dmamap_sync(cq->ring->dma.tag,
1459 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1460 		cqe =
1461 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1462 		num_cqes++;
1463 	}
1464 
1465 	if (num_cqes)
1466 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1467 
1468 	UNLOCK(&wq->tx_compl_lock);
1469 	return num_cqes;
1470 }
1471 
1472 
1473 static int
1474 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1475 {
1476 	POCE_SOFTC sc = ifp->if_softc;
1477 	int status = 0, queue_index = 0;
1478 	struct mbuf *next = NULL;
1479 	struct buf_ring *br = NULL;
1480 
1481 	br  = wq->br;
1482 	queue_index = wq->queue_index;
1483 
1484 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1485 		IFF_DRV_RUNNING) {
1486 		if (m != NULL)
1487 			status = drbr_enqueue(ifp, br, m);
1488 		return status;
1489 	}
1490 
1491 	if (m != NULL) {
1492 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1493 			return status;
1494 	}
1495 	while ((next = drbr_peek(ifp, br)) != NULL) {
1496 		if (oce_tx(sc, &next, queue_index)) {
1497 			if (next == NULL) {
1498 				drbr_advance(ifp, br);
1499 			} else {
1500 				drbr_putback(ifp, br, next);
1501 				wq->tx_stats.tx_stops ++;
1502 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1503 			}
1504 			break;
1505 		}
1506 		drbr_advance(ifp, br);
1507 		if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1508 		if (next->m_flags & M_MCAST)
1509 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1510 		ETHER_BPF_MTAP(ifp, next);
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 
1517 
1518 
1519 /*****************************************************************************
1520  *			    Receive  routines functions 		     *
1521  *****************************************************************************/
1522 
1523 static void
1524 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1525 {
1526 	uint32_t *p;
1527         struct ether_header *eh = NULL;
1528         struct tcphdr *tcp_hdr = NULL;
1529         struct ip *ip4_hdr = NULL;
1530         struct ip6_hdr *ip6 = NULL;
1531         uint32_t payload_len = 0;
1532 
1533         eh = mtod(m, struct ether_header *);
1534         /* correct IP header */
1535         if(!cqe2->ipv6_frame) {
1536 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1537                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1538                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1539                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1540         }else {
1541         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1542                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1543                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1544                                                 - sizeof(struct ip6_hdr);
1545                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1546                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1547         }
1548 
1549         /* correct tcp header */
1550         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1551         if(cqe2->push) {
1552         	tcp_hdr->th_flags |= TH_PUSH;
1553         }
1554         tcp_hdr->th_win = htons(cqe2->tcp_window);
1555         tcp_hdr->th_sum = 0xffff;
1556         if(cqe2->ts_opt) {
1557                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1558                 *p = cqe1->tcp_timestamp_val;
1559                 *(p+1) = cqe1->tcp_timestamp_ecr;
1560         }
1561 
1562 	return;
1563 }
1564 
1565 static void
1566 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1567 {
1568 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1569         uint32_t i = 0, frag_len = 0;
1570 	uint32_t len = cqe_info->pkt_size;
1571         struct oce_packet_desc *pd;
1572         struct mbuf *tail = NULL;
1573 
1574         for (i = 0; i < cqe_info->num_frags; i++) {
1575                 if (rq->ring->cidx == rq->ring->pidx) {
1576                         device_printf(sc->dev,
1577                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1578                         return;
1579                 }
1580                 pd = &rq->pckts[rq->ring->cidx];
1581 
1582                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1583                 bus_dmamap_unload(rq->tag, pd->map);
1584 		RING_GET(rq->ring, 1);
1585                 rq->pending--;
1586 
1587                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1588                 pd->mbuf->m_len = frag_len;
1589 
1590                 if (tail != NULL) {
1591                         /* additional fragments */
1592                         pd->mbuf->m_flags &= ~M_PKTHDR;
1593                         tail->m_next = pd->mbuf;
1594 			if(rq->islro)
1595                         	tail->m_nextpkt = NULL;
1596                         tail = pd->mbuf;
1597                 } else {
1598                         /* first fragment, fill out much of the packet header */
1599                         pd->mbuf->m_pkthdr.len = len;
1600 			if(rq->islro)
1601                         	pd->mbuf->m_nextpkt = NULL;
1602                         pd->mbuf->m_pkthdr.csum_flags = 0;
1603                         if (IF_CSUM_ENABLED(sc)) {
1604                                 if (cqe_info->l4_cksum_pass) {
1605                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1606                                                 pd->mbuf->m_pkthdr.csum_flags |=
1607                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1608                                         }else { /* IPV6 frame */
1609 						if(rq->islro) {
1610                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1611                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1612 						}
1613                                         }
1614                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1615                                 }
1616                                 if (cqe_info->ip_cksum_pass) {
1617                                         pd->mbuf->m_pkthdr.csum_flags |=
1618                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1619                                 }
1620                         }
1621                         *m = tail = pd->mbuf;
1622                }
1623                 pd->mbuf = NULL;
1624                 len -= frag_len;
1625         }
1626 
1627         return;
1628 }
1629 
1630 static void
1631 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1632 {
1633         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1634         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1635         struct mbuf *m = NULL;
1636 	struct oce_common_cqe_info cq_info;
1637 
1638 	/* parse cqe */
1639         if(cqe2 == NULL) {
1640                 cq_info.pkt_size =  cqe->pkt_size;
1641                 cq_info.vtag = cqe->vlan_tag;
1642                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1643                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1644                 cq_info.ipv6_frame = cqe->ipv6_frame;
1645                 cq_info.vtp = cqe->vtp;
1646                 cq_info.qnq = cqe->qnq;
1647         }else {
1648                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1649                 cq_info.pkt_size =  cqe2->coalesced_size;
1650                 cq_info.vtag = cqe2->vlan_tag;
1651                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1652                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1653                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1654                 cq_info.vtp = cqe2->vtp;
1655                 cq_info.qnq = cqe1->qnq;
1656         }
1657 
1658 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1659 
1660         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1661         if(cq_info.pkt_size % rq->cfg.frag_size)
1662                 cq_info.num_frags++;
1663 
1664 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1665 
1666 	if (m) {
1667 		if(cqe2) {
1668 			//assert(cqe2->valid != 0);
1669 
1670 			//assert(cqe2->cqe_type != 2);
1671 			oce_correct_header(m, cqe1, cqe2);
1672 		}
1673 
1674 		m->m_pkthdr.rcvif = sc->ifp;
1675 #if __FreeBSD_version >= 800000
1676 		if (rq->queue_index)
1677 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1678 		else
1679 			m->m_pkthdr.flowid = rq->queue_index;
1680 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1681 #endif
1682 		/* This deternies if vlan tag is Valid */
1683 		if (cq_info.vtp) {
1684 			if (sc->function_mode & FNM_FLEX10_MODE) {
1685 				/* FLEX10. If QnQ is not set, neglect VLAN */
1686 				if (cq_info.qnq) {
1687 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1688 					m->m_flags |= M_VLANTAG;
1689 				}
1690 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1691 				/* In UMC mode generally pvid will be striped by
1692 				   hw. But in some cases we have seen it comes
1693 				   with pvid. So if pvid == vlan, neglect vlan.
1694 				 */
1695 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1696 				m->m_flags |= M_VLANTAG;
1697 			}
1698 		}
1699 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1700 
1701 		(*sc->ifp->if_input) (sc->ifp, m);
1702 
1703 		/* Update rx stats per queue */
1704 		rq->rx_stats.rx_pkts++;
1705 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1706 		rq->rx_stats.rx_frags += cq_info.num_frags;
1707 		rq->rx_stats.rx_ucast_pkts++;
1708 	}
1709         return;
1710 }
1711 
1712 static void
1713 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1714 {
1715 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1716 	int len;
1717 	struct mbuf *m = NULL;
1718 	struct oce_common_cqe_info cq_info;
1719 	uint16_t vtag = 0;
1720 
1721 	/* Is it a flush compl that has no data */
1722 	if(!cqe->u0.s.num_fragments)
1723 		goto exit;
1724 
1725 	len = cqe->u0.s.pkt_size;
1726 	if (!len) {
1727 		/*partial DMA workaround for Lancer*/
1728 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1729 		goto exit;
1730 	}
1731 
1732 	if (!oce_cqe_portid_valid(sc, cqe)) {
1733 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1734 		goto exit;
1735 	}
1736 
1737 	 /* Get vlan_tag value */
1738 	if(IS_BE(sc) || IS_SH(sc))
1739 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1740 	else
1741 		vtag = cqe->u0.s.vlan_tag;
1742 
1743 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1744 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1745 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1746 	cq_info.num_frags = cqe->u0.s.num_fragments;
1747 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1748 
1749 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1750 
1751 	if (m) {
1752 		m->m_pkthdr.rcvif = sc->ifp;
1753 #if __FreeBSD_version >= 800000
1754 		if (rq->queue_index)
1755 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1756 		else
1757 			m->m_pkthdr.flowid = rq->queue_index;
1758 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1759 #endif
1760 		/* This deternies if vlan tag is Valid */
1761 		if (oce_cqe_vtp_valid(sc, cqe)) {
1762 			if (sc->function_mode & FNM_FLEX10_MODE) {
1763 				/* FLEX10. If QnQ is not set, neglect VLAN */
1764 				if (cqe->u0.s.qnq) {
1765 					m->m_pkthdr.ether_vtag = vtag;
1766 					m->m_flags |= M_VLANTAG;
1767 				}
1768 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1769 				/* In UMC mode generally pvid will be striped by
1770 				   hw. But in some cases we have seen it comes
1771 				   with pvid. So if pvid == vlan, neglect vlan.
1772 				*/
1773 				m->m_pkthdr.ether_vtag = vtag;
1774 				m->m_flags |= M_VLANTAG;
1775 			}
1776 		}
1777 
1778 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1779 #if defined(INET6) || defined(INET)
1780 		/* Try to queue to LRO */
1781 		if (IF_LRO_ENABLED(sc) &&
1782 		    (cqe->u0.s.ip_cksum_pass) &&
1783 		    (cqe->u0.s.l4_cksum_pass) &&
1784 		    (!cqe->u0.s.ip_ver)       &&
1785 		    (rq->lro.lro_cnt != 0)) {
1786 
1787 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1788 				rq->lro_pkts_queued ++;
1789 				goto post_done;
1790 			}
1791 			/* If LRO posting fails then try to post to STACK */
1792 		}
1793 #endif
1794 
1795 		(*sc->ifp->if_input) (sc->ifp, m);
1796 #if defined(INET6) || defined(INET)
1797 post_done:
1798 #endif
1799 		/* Update rx stats per queue */
1800 		rq->rx_stats.rx_pkts++;
1801 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1802 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1803 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1804 			rq->rx_stats.rx_mcast_pkts++;
1805 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1806 			rq->rx_stats.rx_ucast_pkts++;
1807 	}
1808 exit:
1809 	return;
1810 }
1811 
1812 
1813 void
1814 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1815 {
1816 	uint32_t i = 0;
1817 	struct oce_packet_desc *pd;
1818 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1819 
1820 	for (i = 0; i < num_frags; i++) {
1821                 if (rq->ring->cidx == rq->ring->pidx) {
1822                         device_printf(sc->dev,
1823                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1824                         return;
1825                 }
1826                 pd = &rq->pckts[rq->ring->cidx];
1827                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1828                 bus_dmamap_unload(rq->tag, pd->map);
1829                 if (pd->mbuf != NULL) {
1830                         m_freem(pd->mbuf);
1831                         pd->mbuf = NULL;
1832                 }
1833 
1834 		RING_GET(rq->ring, 1);
1835                 rq->pending--;
1836 	}
1837 }
1838 
1839 
1840 static int
1841 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1842 {
1843 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1844 	int vtp = 0;
1845 
1846 	if (sc->be3_native) {
1847 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1848 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1849 	} else
1850 		vtp = cqe->u0.s.vlan_tag_present;
1851 
1852 	return vtp;
1853 
1854 }
1855 
1856 
1857 static int
1858 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1859 {
1860 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1861 	int port_id = 0;
1862 
1863 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1864 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1865 		port_id =  cqe_v1->u0.s.port;
1866 		if (sc->port_id != port_id)
1867 			return 0;
1868 	} else
1869 		;/* For BE3 legacy and Lancer this is dummy */
1870 
1871 	return 1;
1872 
1873 }
1874 
1875 #if defined(INET6) || defined(INET)
1876 void
1877 oce_rx_flush_lro(struct oce_rq *rq)
1878 {
1879 	struct lro_ctrl	*lro = &rq->lro;
1880 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1881 
1882 	if (!IF_LRO_ENABLED(sc))
1883 		return;
1884 
1885 	tcp_lro_flush_all(lro);
1886 	rq->lro_pkts_queued = 0;
1887 
1888 	return;
1889 }
1890 
1891 
1892 static int
1893 oce_init_lro(POCE_SOFTC sc)
1894 {
1895 	struct lro_ctrl *lro = NULL;
1896 	int i = 0, rc = 0;
1897 
1898 	for (i = 0; i < sc->nrqs; i++) {
1899 		lro = &sc->rq[i]->lro;
1900 		rc = tcp_lro_init(lro);
1901 		if (rc != 0) {
1902 			device_printf(sc->dev, "LRO init failed\n");
1903 			return rc;
1904 		}
1905 		lro->ifp = sc->ifp;
1906 	}
1907 
1908 	return rc;
1909 }
1910 
1911 
1912 void
1913 oce_free_lro(POCE_SOFTC sc)
1914 {
1915 	struct lro_ctrl *lro = NULL;
1916 	int i = 0;
1917 
1918 	for (i = 0; i < sc->nrqs; i++) {
1919 		lro = &sc->rq[i]->lro;
1920 		if (lro)
1921 			tcp_lro_free(lro);
1922 	}
1923 }
1924 #endif
1925 
1926 int
1927 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1928 {
1929 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1930 	int i, in, rc;
1931 	struct oce_packet_desc *pd;
1932 	bus_dma_segment_t segs[6];
1933 	int nsegs, added = 0;
1934 	struct oce_nic_rqe *rqe;
1935 	pd_rxulp_db_t rxdb_reg;
1936 	uint32_t val = 0;
1937 	uint32_t oce_max_rq_posts = 64;
1938 
1939 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1940 	for (i = 0; i < count; i++) {
1941 		in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
1942 
1943 		pd = &rq->pckts[rq->ring->pidx];
1944 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1945 		if (pd->mbuf == NULL) {
1946 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1947 			break;
1948 		}
1949 		pd->mbuf->m_nextpkt = NULL;
1950 
1951 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1952 
1953 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1954 					     pd->map,
1955 					     pd->mbuf,
1956 					     segs, &nsegs, BUS_DMA_NOWAIT);
1957 		if (rc) {
1958 			m_free(pd->mbuf);
1959 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1960 			break;
1961 		}
1962 
1963 		if (nsegs != 1) {
1964 			i--;
1965 			continue;
1966 		}
1967 
1968 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1969 
1970 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1971 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1972 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1973 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1974 		RING_PUT(rq->ring, 1);
1975 		added++;
1976 		rq->pending++;
1977 	}
1978 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1979 	if (added != 0) {
1980 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1981 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1982 			rxdb_reg.bits.qid = rq->rq_id;
1983 			if(rq->islro) {
1984                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1985                                 val |= oce_max_rq_posts << 16;
1986                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1987 			}else {
1988 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1989 			}
1990 			added -= oce_max_rq_posts;
1991 		}
1992 		if (added > 0) {
1993 			rxdb_reg.bits.qid = rq->rq_id;
1994 			rxdb_reg.bits.num_posted = added;
1995 			if(rq->islro) {
1996                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1997                                 val |= added << 16;
1998                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1999 			}else {
2000 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
2001 			}
2002 		}
2003 	}
2004 
2005 	return 0;
2006 }
2007 
2008 static void
2009 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
2010 {
2011         if (num_cqes) {
2012                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
2013 		if(!sc->enable_hwlro) {
2014 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
2015 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
2016 		}else {
2017                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
2018                         	oce_alloc_rx_bufs(rq, 64);
2019         	}
2020 	}
2021 
2022         return;
2023 }
2024 
2025 uint16_t
2026 oce_rq_handler_lro(void *arg)
2027 {
2028         struct oce_rq *rq = (struct oce_rq *)arg;
2029         struct oce_cq *cq = rq->cq;
2030         POCE_SOFTC sc = rq->parent;
2031         struct nic_hwlro_singleton_cqe *cqe;
2032         struct nic_hwlro_cqe_part2 *cqe2;
2033         int num_cqes = 0;
2034 
2035 	LOCK(&rq->rx_lock);
2036         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2037         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2038         while (cqe->valid) {
2039                 if(cqe->cqe_type == 0) { /* singleton cqe */
2040 			/* we should not get singleton cqe after cqe1 on same rq */
2041 			if(rq->cqe_firstpart != NULL) {
2042 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
2043 				goto exit_rq_handler_lro;
2044 			}
2045                         if(cqe->error != 0) {
2046                                 rq->rx_stats.rxcp_err++;
2047 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2048                         }
2049                         oce_rx_lro(rq, cqe, NULL);
2050                         rq->rx_stats.rx_compl++;
2051                         cqe->valid = 0;
2052                         RING_GET(cq->ring, 1);
2053                         num_cqes++;
2054                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2055                                 break;
2056                 }else if(cqe->cqe_type == 0x1) { /* first part */
2057 			/* we should not get cqe1 after cqe1 on same rq */
2058 			if(rq->cqe_firstpart != NULL) {
2059 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
2060 				goto exit_rq_handler_lro;
2061 			}
2062 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2063                         RING_GET(cq->ring, 1);
2064                 }else if(cqe->cqe_type == 0x2) { /* second part */
2065 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2066                         if(cqe2->error != 0) {
2067                                 rq->rx_stats.rxcp_err++;
2068 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2069                         }
2070 			/* We should not get cqe2 without cqe1 */
2071 			if(rq->cqe_firstpart == NULL) {
2072 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2073 				goto exit_rq_handler_lro;
2074 			}
2075                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2076 
2077                         rq->rx_stats.rx_compl++;
2078                         rq->cqe_firstpart->valid = 0;
2079                         cqe2->valid = 0;
2080 			rq->cqe_firstpart = NULL;
2081 
2082                         RING_GET(cq->ring, 1);
2083                         num_cqes += 2;
2084                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2085                                 break;
2086 		}
2087 
2088                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2089                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2090         }
2091 	oce_check_rx_bufs(sc, num_cqes, rq);
2092 exit_rq_handler_lro:
2093 	UNLOCK(&rq->rx_lock);
2094 	return 0;
2095 }
2096 
2097 /* Handle the Completion Queue for receive */
2098 uint16_t
2099 oce_rq_handler(void *arg)
2100 {
2101 	struct oce_rq *rq = (struct oce_rq *)arg;
2102 	struct oce_cq *cq = rq->cq;
2103 	POCE_SOFTC sc = rq->parent;
2104 	struct oce_nic_rx_cqe *cqe;
2105 	int num_cqes = 0;
2106 
2107 	if(rq->islro) {
2108 		oce_rq_handler_lro(arg);
2109 		return 0;
2110 	}
2111 	LOCK(&rq->rx_lock);
2112 	bus_dmamap_sync(cq->ring->dma.tag,
2113 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2114 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2115 	while (cqe->u0.dw[2]) {
2116 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2117 
2118 		if (cqe->u0.s.error == 0) {
2119 			oce_rx(rq, cqe);
2120 		} else {
2121 			rq->rx_stats.rxcp_err++;
2122 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2123 			/* Post L3/L4 errors to stack.*/
2124 			oce_rx(rq, cqe);
2125 		}
2126 		rq->rx_stats.rx_compl++;
2127 		cqe->u0.dw[2] = 0;
2128 
2129 #if defined(INET6) || defined(INET)
2130 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2131 			oce_rx_flush_lro(rq);
2132 		}
2133 #endif
2134 
2135 		RING_GET(cq->ring, 1);
2136 		bus_dmamap_sync(cq->ring->dma.tag,
2137 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2138 		cqe =
2139 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2140 		num_cqes++;
2141 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2142 			break;
2143 	}
2144 
2145 #if defined(INET6) || defined(INET)
2146         if (IF_LRO_ENABLED(sc))
2147                 oce_rx_flush_lro(rq);
2148 #endif
2149 
2150 	oce_check_rx_bufs(sc, num_cqes, rq);
2151 	UNLOCK(&rq->rx_lock);
2152 	return 0;
2153 
2154 }
2155 
2156 
2157 
2158 
2159 /*****************************************************************************
2160  *		   Helper function prototypes in this file 		     *
2161  *****************************************************************************/
2162 
2163 static int
2164 oce_attach_ifp(POCE_SOFTC sc)
2165 {
2166 
2167 	sc->ifp = if_alloc(IFT_ETHER);
2168 	if (!sc->ifp)
2169 		return ENOMEM;
2170 
2171 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2172 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2173 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2174 
2175 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
2176 	sc->ifp->if_ioctl = oce_ioctl;
2177 	sc->ifp->if_start = oce_start;
2178 	sc->ifp->if_init = oce_init;
2179 	sc->ifp->if_mtu = ETHERMTU;
2180 	sc->ifp->if_softc = sc;
2181 #if __FreeBSD_version >= 800000
2182 	sc->ifp->if_transmit = oce_multiq_start;
2183 	sc->ifp->if_qflush = oce_multiq_flush;
2184 #endif
2185 
2186 	if_initname(sc->ifp,
2187 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2188 
2189 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
2190 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
2191 	IFQ_SET_READY(&sc->ifp->if_snd);
2192 
2193 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
2194 	sc->ifp->if_hwassist |= CSUM_TSO;
2195 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
2196 
2197 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
2198 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
2199 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2200 
2201 #if defined(INET6) || defined(INET)
2202 	sc->ifp->if_capabilities |= IFCAP_TSO;
2203 	sc->ifp->if_capabilities |= IFCAP_LRO;
2204 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2205 #endif
2206 
2207 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
2208 	sc->ifp->if_baudrate = IF_Gbps(10);
2209 
2210 #if __FreeBSD_version >= 1000000
2211 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2212 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
2213 	sc->ifp->if_hw_tsomaxsegsize = 4096;
2214 #endif
2215 
2216 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2217 
2218 	return 0;
2219 }
2220 
2221 
2222 static void
2223 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2224 {
2225 	POCE_SOFTC sc = ifp->if_softc;
2226 
2227 	if (ifp->if_softc !=  arg)
2228 		return;
2229 	if ((vtag == 0) || (vtag > 4095))
2230 		return;
2231 
2232 	sc->vlan_tag[vtag] = 1;
2233 	sc->vlans_added++;
2234 	if (sc->vlans_added <= (sc->max_vlans + 1))
2235 		oce_vid_config(sc);
2236 }
2237 
2238 
2239 static void
2240 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2241 {
2242 	POCE_SOFTC sc = ifp->if_softc;
2243 
2244 	if (ifp->if_softc !=  arg)
2245 		return;
2246 	if ((vtag == 0) || (vtag > 4095))
2247 		return;
2248 
2249 	sc->vlan_tag[vtag] = 0;
2250 	sc->vlans_added--;
2251 	oce_vid_config(sc);
2252 }
2253 
2254 
2255 /*
2256  * A max of 64 vlans can be configured in BE. If the user configures
2257  * more, place the card in vlan promiscuous mode.
2258  */
2259 static int
2260 oce_vid_config(POCE_SOFTC sc)
2261 {
2262 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2263 	uint16_t ntags = 0, i;
2264 	int status = 0;
2265 
2266 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2267 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
2268 		for (i = 0; i < MAX_VLANS; i++) {
2269 			if (sc->vlan_tag[i]) {
2270 				vtags[ntags].vtag = i;
2271 				ntags++;
2272 			}
2273 		}
2274 		if (ntags)
2275 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2276 						vtags, ntags, 1, 0);
2277 	} else
2278 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2279 					 	NULL, 0, 1, 1);
2280 	return status;
2281 }
2282 
2283 
2284 static void
2285 oce_mac_addr_set(POCE_SOFTC sc)
2286 {
2287 	uint32_t old_pmac_id = sc->pmac_id;
2288 	int status = 0;
2289 
2290 
2291 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2292 			 sc->macaddr.size_of_struct);
2293 	if (!status)
2294 		return;
2295 
2296 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
2297 					sc->if_id, &sc->pmac_id);
2298 	if (!status) {
2299 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2300 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2301 				 sc->macaddr.size_of_struct);
2302 	}
2303 	if (status)
2304 		device_printf(sc->dev, "Failed update macaddress\n");
2305 
2306 }
2307 
2308 
2309 static int
2310 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
2311 {
2312 	POCE_SOFTC sc = ifp->if_softc;
2313 	struct ifreq *ifr = (struct ifreq *)data;
2314 	int rc = ENXIO;
2315 	char cookie[32] = {0};
2316 	void *priv_data = ifr_data_get_ptr(ifr);
2317 	void *ioctl_ptr;
2318 	uint32_t req_size;
2319 	struct mbx_hdr req;
2320 	OCE_DMA_MEM dma_mem;
2321 	struct mbx_common_get_cntl_attr *fw_cmd;
2322 
2323 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2324 		return EFAULT;
2325 
2326 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2327 		return EINVAL;
2328 
2329 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2330 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2331 		return EFAULT;
2332 
2333 	req_size = le32toh(req.u0.req.request_length);
2334 	if (req_size > 65536)
2335 		return EINVAL;
2336 
2337 	req_size += sizeof(struct mbx_hdr);
2338 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2339 	if (rc)
2340 		return ENOMEM;
2341 
2342 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2343 		rc = EFAULT;
2344 		goto dma_free;
2345 	}
2346 
2347 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2348 	if (rc) {
2349 		rc = EIO;
2350 		goto dma_free;
2351 	}
2352 
2353 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2354 		rc =  EFAULT;
2355 
2356 	/*
2357 	   firmware is filling all the attributes for this ioctl except
2358 	   the driver version..so fill it
2359 	 */
2360 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2361 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2362 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2363 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2364 	}
2365 
2366 dma_free:
2367 	oce_dma_free(sc, &dma_mem);
2368 	return rc;
2369 
2370 }
2371 
2372 static void
2373 oce_eqd_set_periodic(POCE_SOFTC sc)
2374 {
2375 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2376 	struct oce_aic_obj *aic;
2377 	struct oce_eq *eqo;
2378 	uint64_t now = 0, delta;
2379 	int eqd, i, num = 0;
2380 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2381 	struct oce_wq *wq;
2382 	struct oce_rq *rq;
2383 
2384 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2385 
2386 	for (i = 0 ; i < sc->neqs; i++) {
2387 		eqo = sc->eq[i];
2388 		aic = &sc->aic_obj[i];
2389 		/* When setting the static eq delay from the user space */
2390 		if (!aic->enable) {
2391 			if (aic->ticks)
2392 				aic->ticks = 0;
2393 			eqd = aic->et_eqd;
2394 			goto modify_eqd;
2395 		}
2396 
2397 		rq = sc->rq[i];
2398 		rxpkts = rq->rx_stats.rx_pkts;
2399 		wq = sc->wq[i];
2400 		tx_reqs = wq->tx_stats.tx_reqs;
2401 		now = ticks;
2402 
2403 		if (!aic->ticks || now < aic->ticks ||
2404 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2405 			aic->prev_rxpkts = rxpkts;
2406 			aic->prev_txreqs = tx_reqs;
2407 			aic->ticks = now;
2408 			continue;
2409 		}
2410 
2411 		delta = ticks_to_msecs(now - aic->ticks);
2412 
2413 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2414 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2415 		eqd = (pps / 15000) << 2;
2416 		if (eqd < 8)
2417 			eqd = 0;
2418 
2419 		/* Make sure that the eq delay is in the known range */
2420 		eqd = min(eqd, aic->max_eqd);
2421 		eqd = max(eqd, aic->min_eqd);
2422 
2423 		aic->prev_rxpkts = rxpkts;
2424 		aic->prev_txreqs = tx_reqs;
2425 		aic->ticks = now;
2426 
2427 modify_eqd:
2428 		if (eqd != aic->cur_eqd) {
2429 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2430 			set_eqd[num].eq_id = eqo->eq_id;
2431 			aic->cur_eqd = eqd;
2432 			num++;
2433 		}
2434 	}
2435 
2436 	/* Is there atleast one eq that needs to be modified? */
2437         for(i = 0; i < num; i += 8) {
2438                 if((num - i) >=8 )
2439                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2440                 else
2441                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2442         }
2443 
2444 }
2445 
2446 static void oce_detect_hw_error(POCE_SOFTC sc)
2447 {
2448 
2449 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2450 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2451 	uint32_t i;
2452 
2453 	if (sc->hw_error)
2454 		return;
2455 
2456 	if (IS_XE201(sc)) {
2457 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2458 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2459 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2460 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2461 		}
2462 	} else {
2463 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2464 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2465 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2466 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2467 
2468 		ue_low = (ue_low & ~ue_low_mask);
2469 		ue_high = (ue_high & ~ue_high_mask);
2470 	}
2471 
2472 	/* On certain platforms BE hardware can indicate spurious UEs.
2473 	 * Allow the h/w to stop working completely in case of a real UE.
2474 	 * Hence not setting the hw_error for UE detection.
2475 	 */
2476 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2477 		sc->hw_error = TRUE;
2478 		device_printf(sc->dev, "Error detected in the card\n");
2479 	}
2480 
2481 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2482 		device_printf(sc->dev,
2483 				"ERR: sliport status 0x%x\n", sliport_status);
2484 		device_printf(sc->dev,
2485 				"ERR: sliport error1 0x%x\n", sliport_err1);
2486 		device_printf(sc->dev,
2487 				"ERR: sliport error2 0x%x\n", sliport_err2);
2488 	}
2489 
2490 	if (ue_low) {
2491 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2492 			if (ue_low & 1)
2493 				device_printf(sc->dev, "UE: %s bit set\n",
2494 							ue_status_low_desc[i]);
2495 		}
2496 	}
2497 
2498 	if (ue_high) {
2499 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2500 			if (ue_high & 1)
2501 				device_printf(sc->dev, "UE: %s bit set\n",
2502 							ue_status_hi_desc[i]);
2503 		}
2504 	}
2505 
2506 }
2507 
2508 
2509 static void
2510 oce_local_timer(void *arg)
2511 {
2512 	POCE_SOFTC sc = arg;
2513 	int i = 0;
2514 
2515 	oce_detect_hw_error(sc);
2516 	oce_refresh_nic_stats(sc);
2517 	oce_refresh_queue_stats(sc);
2518 	oce_mac_addr_set(sc);
2519 
2520 	/* TX Watch Dog*/
2521 	for (i = 0; i < sc->nwqs; i++)
2522 		oce_tx_restart(sc, sc->wq[i]);
2523 
2524 	/* calculate and set the eq delay for optimal interrupt rate */
2525 	if (IS_BE(sc) || IS_SH(sc))
2526 		oce_eqd_set_periodic(sc);
2527 
2528 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2529 }
2530 
2531 static void
2532 oce_tx_compl_clean(POCE_SOFTC sc)
2533 {
2534 	struct oce_wq *wq;
2535 	int i = 0, timeo = 0, num_wqes = 0;
2536 	int pending_txqs = sc->nwqs;
2537 
2538 	/* Stop polling for compls when HW has been silent for 10ms or
2539 	 * hw_error or no outstanding completions expected
2540 	 */
2541 	do {
2542 		pending_txqs = sc->nwqs;
2543 
2544 		for_all_wq_queues(sc, wq, i) {
2545 			num_wqes = oce_wq_handler(wq);
2546 
2547 			if(num_wqes)
2548 				timeo = 0;
2549 
2550 			if(!wq->ring->num_used)
2551 				pending_txqs--;
2552 		}
2553 
2554 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2555 			break;
2556 
2557 		DELAY(1000);
2558 	} while (TRUE);
2559 
2560 	for_all_wq_queues(sc, wq, i) {
2561 		while(wq->ring->num_used) {
2562 			LOCK(&wq->tx_compl_lock);
2563 			oce_process_tx_completion(wq);
2564 			UNLOCK(&wq->tx_compl_lock);
2565 		}
2566 	}
2567 
2568 }
2569 
2570 /* NOTE : This should only be called holding
2571  *        DEVICE_LOCK.
2572  */
2573 static void
2574 oce_if_deactivate(POCE_SOFTC sc)
2575 {
2576 	int i;
2577 	struct oce_rq *rq;
2578 	struct oce_wq *wq;
2579 	struct oce_eq *eq;
2580 
2581 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2582 
2583 	oce_tx_compl_clean(sc);
2584 
2585 	/* Stop intrs and finish any bottom halves pending */
2586 	oce_hw_intr_disable(sc);
2587 
2588 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2589 	   any other lock. So unlock device lock and require after
2590 	   completing taskqueue_drain.
2591 	*/
2592 	UNLOCK(&sc->dev_lock);
2593 	for (i = 0; i < sc->intr_count; i++) {
2594 		if (sc->intrs[i].tq != NULL) {
2595 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2596 		}
2597 	}
2598 	LOCK(&sc->dev_lock);
2599 
2600 	/* Delete RX queue in card with flush param */
2601 	oce_stop_rx(sc);
2602 
2603 	/* Invalidate any pending cq and eq entries*/
2604 	for_all_evnt_queues(sc, eq, i)
2605 		oce_drain_eq(eq);
2606 	for_all_rq_queues(sc, rq, i)
2607 		oce_drain_rq_cq(rq);
2608 	for_all_wq_queues(sc, wq, i)
2609 		oce_drain_wq_cq(wq);
2610 
2611 	/* But still we need to get MCC aync events.
2612 	   So enable intrs and also arm first EQ
2613 	*/
2614 	oce_hw_intr_enable(sc);
2615 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2616 
2617 	DELAY(10);
2618 }
2619 
2620 
2621 static void
2622 oce_if_activate(POCE_SOFTC sc)
2623 {
2624 	struct oce_eq *eq;
2625 	struct oce_rq *rq;
2626 	struct oce_wq *wq;
2627 	int i, rc = 0;
2628 
2629 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2630 
2631 	oce_hw_intr_disable(sc);
2632 
2633 	oce_start_rx(sc);
2634 
2635 	for_all_rq_queues(sc, rq, i) {
2636 		rc = oce_start_rq(rq);
2637 		if (rc)
2638 			device_printf(sc->dev, "Unable to start RX\n");
2639 	}
2640 
2641 	for_all_wq_queues(sc, wq, i) {
2642 		rc = oce_start_wq(wq);
2643 		if (rc)
2644 			device_printf(sc->dev, "Unable to start TX\n");
2645 	}
2646 
2647 
2648 	for_all_evnt_queues(sc, eq, i)
2649 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2650 
2651 	oce_hw_intr_enable(sc);
2652 
2653 }
2654 
2655 static void
2656 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2657 {
2658 	/* Update Link status */
2659 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2660 	     ASYNC_EVENT_LINK_UP) {
2661 		sc->link_status = ASYNC_EVENT_LINK_UP;
2662 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2663 	} else {
2664 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2665 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2666 	}
2667 }
2668 
2669 
2670 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2671 					 struct oce_async_evt_grp5_os2bmc *evt)
2672 {
2673 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2674 	if (evt->u.s.mgmt_enable)
2675 		sc->flags |= OCE_FLAGS_OS2BMC;
2676 	else
2677 		return;
2678 
2679 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2680 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2681 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2682 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2683 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2684 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2685 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2686 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2687 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2688 }
2689 
2690 
2691 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2692 {
2693 	struct oce_async_event_grp5_pvid_state *gcqe;
2694 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2695 
2696 	switch (cqe->u0.s.async_type) {
2697 	case ASYNC_EVENT_PVID_STATE:
2698 		/* GRP5 PVID */
2699 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2700 		if (gcqe->enabled)
2701 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2702 		else
2703 			sc->pvid = 0;
2704 		break;
2705 	case ASYNC_EVENT_OS2BMC:
2706 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2707 		oce_async_grp5_osbmc_process(sc, bmccqe);
2708 		break;
2709 	default:
2710 		break;
2711 	}
2712 }
2713 
2714 /* Handle the Completion Queue for the Mailbox/Async notifications */
2715 uint16_t
2716 oce_mq_handler(void *arg)
2717 {
2718 	struct oce_mq *mq = (struct oce_mq *)arg;
2719 	POCE_SOFTC sc = mq->parent;
2720 	struct oce_cq *cq = mq->cq;
2721 	int num_cqes = 0, evt_type = 0, optype = 0;
2722 	struct oce_mq_cqe *cqe;
2723 	struct oce_async_cqe_link_state *acqe;
2724 	struct oce_async_event_qnq *dbgcqe;
2725 
2726 
2727 	bus_dmamap_sync(cq->ring->dma.tag,
2728 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2729 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2730 
2731 	while (cqe->u0.dw[3]) {
2732 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2733 		if (cqe->u0.s.async_event) {
2734 			evt_type = cqe->u0.s.event_type;
2735 			optype = cqe->u0.s.async_type;
2736 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2737 				/* Link status evt */
2738 				acqe = (struct oce_async_cqe_link_state *)cqe;
2739 				process_link_state(sc, acqe);
2740 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2741 				oce_process_grp5_events(sc, cqe);
2742 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2743 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2744 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2745 				if(dbgcqe->valid)
2746 					sc->qnqid = dbgcqe->vlan_tag;
2747 				sc->qnq_debug_event = TRUE;
2748 			}
2749 		}
2750 		cqe->u0.dw[3] = 0;
2751 		RING_GET(cq->ring, 1);
2752 		bus_dmamap_sync(cq->ring->dma.tag,
2753 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2754 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2755 		num_cqes++;
2756 	}
2757 
2758 	if (num_cqes)
2759 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2760 
2761 	return 0;
2762 }
2763 
2764 
2765 static void
2766 setup_max_queues_want(POCE_SOFTC sc)
2767 {
2768 	/* Check if it is FLEX machine. Is so dont use RSS */
2769 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2770 	    (sc->function_mode & FNM_UMC_MODE)    ||
2771 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2772 	    (!is_rss_enabled(sc))		  ||
2773 	    IS_BE2(sc)) {
2774 		sc->nrqs = 1;
2775 		sc->nwqs = 1;
2776 	} else {
2777 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2778 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2779 	}
2780 
2781 	if (IS_BE2(sc) && is_rss_enabled(sc))
2782 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2783 }
2784 
2785 
2786 static void
2787 update_queues_got(POCE_SOFTC sc)
2788 {
2789 	if (is_rss_enabled(sc)) {
2790 		sc->nrqs = sc->intr_count + 1;
2791 		sc->nwqs = sc->intr_count;
2792 	} else {
2793 		sc->nrqs = 1;
2794 		sc->nwqs = 1;
2795 	}
2796 
2797 	if (IS_BE2(sc))
2798 		sc->nwqs = 1;
2799 }
2800 
2801 static int
2802 oce_check_ipv6_ext_hdr(struct mbuf *m)
2803 {
2804 	struct ether_header *eh = mtod(m, struct ether_header *);
2805 	caddr_t m_datatemp = m->m_data;
2806 
2807 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2808 		m->m_data += sizeof(struct ether_header);
2809 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2810 
2811 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2812 				(ip6->ip6_nxt != IPPROTO_UDP)){
2813 			struct ip6_ext *ip6e = NULL;
2814 			m->m_data += sizeof(struct ip6_hdr);
2815 
2816 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2817 			if(ip6e->ip6e_len == 0xff) {
2818 				m->m_data = m_datatemp;
2819 				return TRUE;
2820 			}
2821 		}
2822 		m->m_data = m_datatemp;
2823 	}
2824 	return FALSE;
2825 }
2826 
2827 static int
2828 is_be3_a1(POCE_SOFTC sc)
2829 {
2830 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2831 		return TRUE;
2832 	}
2833 	return FALSE;
2834 }
2835 
2836 static struct mbuf *
2837 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2838 {
2839 	uint16_t vlan_tag = 0;
2840 
2841 	if(!M_WRITABLE(m))
2842 		return NULL;
2843 
2844 	/* Embed vlan tag in the packet if it is not part of it */
2845 	if(m->m_flags & M_VLANTAG) {
2846 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2847 		m->m_flags &= ~M_VLANTAG;
2848 	}
2849 
2850 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2851 	if(sc->pvid) {
2852 		if(!vlan_tag)
2853 			vlan_tag = sc->pvid;
2854 		if (complete)
2855 			*complete = FALSE;
2856 	}
2857 
2858 	if(vlan_tag) {
2859 		m = ether_vlanencap(m, vlan_tag);
2860 	}
2861 
2862 	if(sc->qnqid) {
2863 		m = ether_vlanencap(m, sc->qnqid);
2864 
2865 		if (complete)
2866 			*complete = FALSE;
2867 	}
2868 	return m;
2869 }
2870 
2871 static int
2872 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2873 {
2874 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2875 			oce_check_ipv6_ext_hdr(m)) {
2876 		return TRUE;
2877 	}
2878 	return FALSE;
2879 }
2880 
2881 static void
2882 oce_get_config(POCE_SOFTC sc)
2883 {
2884 	int rc = 0;
2885 	uint32_t max_rss = 0;
2886 
2887 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2888 		max_rss = OCE_LEGACY_MODE_RSS;
2889 	else
2890 		max_rss = OCE_MAX_RSS;
2891 
2892 	if (!IS_BE(sc)) {
2893 		rc = oce_get_profile_config(sc, max_rss);
2894 		if (rc) {
2895 			sc->nwqs = OCE_MAX_WQ;
2896 			sc->nrssqs = max_rss;
2897 			sc->nrqs = sc->nrssqs + 1;
2898 		}
2899 	}
2900 	else { /* For BE3 don't rely on fw for determining the resources */
2901 		sc->nrssqs = max_rss;
2902 		sc->nrqs = sc->nrssqs + 1;
2903 		sc->nwqs = OCE_MAX_WQ;
2904 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2905 	}
2906 }
2907 
2908 static void
2909 oce_rdma_close(void)
2910 {
2911   if (oce_rdma_if != NULL) {
2912     oce_rdma_if = NULL;
2913   }
2914 }
2915 
2916 static void
2917 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2918 {
2919   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2920 }
2921 
2922 int
2923 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2924 {
2925   POCE_SOFTC sc;
2926   struct oce_dev_info di;
2927   int i;
2928 
2929   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2930     return -EINVAL;
2931   }
2932 
2933   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2934       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2935     return -ENXIO;
2936   }
2937 
2938   rdma_info->close = oce_rdma_close;
2939   rdma_info->mbox_post = oce_mbox_post;
2940   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2941   rdma_info->get_mac_addr = oce_get_mac_addr;
2942 
2943   oce_rdma_if = rdma_if;
2944 
2945   sc = softc_head;
2946   while (sc != NULL) {
2947     if (oce_rdma_if->announce != NULL) {
2948       memset(&di, 0, sizeof(di));
2949       di.dev = sc->dev;
2950       di.softc = sc;
2951       di.ifp = sc->ifp;
2952       di.db_bhandle = sc->db_bhandle;
2953       di.db_btag = sc->db_btag;
2954       di.db_page_size = 4096;
2955       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2956         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2957       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2958         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2959       } else {
2960         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2961       }
2962       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2963       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2964         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2965         di.msix.start_vector = sc->intr_count;
2966         for (i=0; i<di.msix.num_vectors; i++) {
2967           di.msix.vector_list[i] = sc->intrs[i].vector;
2968         }
2969       } else {
2970       }
2971       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2972       di.vendor_id = pci_get_vendor(sc->dev);
2973       di.dev_id = pci_get_device(sc->dev);
2974 
2975       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2976           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2977       }
2978 
2979       rdma_if->announce(&di);
2980       sc = sc->next;
2981     }
2982   }
2983 
2984   return 0;
2985 }
2986 
2987 static void
2988 oce_read_env_variables( POCE_SOFTC sc )
2989 {
2990 	char *value = NULL;
2991 	int rc = 0;
2992 
2993         /* read if user wants to enable hwlro or swlro */
2994         //value = getenv("oce_enable_hwlro");
2995         if(value && IS_SH(sc)) {
2996                 sc->enable_hwlro = strtol(value, NULL, 10);
2997                 if(sc->enable_hwlro) {
2998                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2999                         if(rc) {
3000                                 device_printf(sc->dev, "no hardware lro support\n");
3001                 		device_printf(sc->dev, "software lro enabled\n");
3002                                 sc->enable_hwlro = 0;
3003                         }else {
3004                                 device_printf(sc->dev, "hardware lro enabled\n");
3005 				oce_max_rsp_handled = 32;
3006                         }
3007                 }else {
3008                         device_printf(sc->dev, "software lro enabled\n");
3009                 }
3010         }else {
3011                 sc->enable_hwlro = 0;
3012         }
3013 
3014         /* read mbuf size */
3015         //value = getenv("oce_rq_buf_size");
3016         if(value && IS_SH(sc)) {
3017                 oce_rq_buf_size = strtol(value, NULL, 10);
3018                 switch(oce_rq_buf_size) {
3019                 case 2048:
3020                 case 4096:
3021                 case 9216:
3022                 case 16384:
3023                         break;
3024 
3025                 default:
3026                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
3027                         oce_rq_buf_size = 2048;
3028                 }
3029         }
3030 
3031 	return;
3032 }
3033