xref: /freebsd/sys/dev/oce/oce_if.c (revision 4d3fc8b0)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 /* Driver entry points prototypes */
136 static int  oce_probe(device_t dev);
137 static int  oce_attach(device_t dev);
138 static int  oce_detach(device_t dev);
139 static int  oce_shutdown(device_t dev);
140 static int  oce_ioctl(if_t ifp, u_long command, caddr_t data);
141 static void oce_init(void *xsc);
142 static int  oce_multiq_start(if_t ifp, struct mbuf *m);
143 static void oce_multiq_flush(if_t ifp);
144 
145 /* Driver interrupt routines protypes */
146 static void oce_intr(void *arg, int pending);
147 static int  oce_setup_intr(POCE_SOFTC sc);
148 static int  oce_fast_isr(void *arg);
149 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
150 			  void (*isr) (void *arg, int pending));
151 
152 /* Media callbacks prototypes */
153 static void oce_media_status(if_t ifp, struct ifmediareq *req);
154 static int  oce_media_change(if_t ifp);
155 
156 /* Transmit routines prototypes */
157 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
158 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
159 static void oce_process_tx_completion(struct oce_wq *wq);
160 static int  oce_multiq_transmit(if_t ifp, struct mbuf *m,
161 				 struct oce_wq *wq);
162 
163 /* Receive routines prototypes */
164 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
165 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
167 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
168 static uint16_t oce_rq_handler_lro(void *arg);
169 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
170 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
172 
173 /* Helper function prototypes in this file */
174 static int  oce_attach_ifp(POCE_SOFTC sc);
175 static void oce_add_vlan(void *arg, if_t ifp, uint16_t vtag);
176 static void oce_del_vlan(void *arg, if_t ifp, uint16_t vtag);
177 static int  oce_vid_config(POCE_SOFTC sc);
178 static void oce_mac_addr_set(POCE_SOFTC sc);
179 static int  oce_handle_passthrough(if_t ifp, caddr_t data);
180 static void oce_local_timer(void *arg);
181 static void oce_if_deactivate(POCE_SOFTC sc);
182 static void oce_if_activate(POCE_SOFTC sc);
183 static void setup_max_queues_want(POCE_SOFTC sc);
184 static void update_queues_got(POCE_SOFTC sc);
185 static void process_link_state(POCE_SOFTC sc,
186 		 struct oce_async_cqe_link_state *acqe);
187 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
188 static void oce_get_config(POCE_SOFTC sc);
189 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
190 static void oce_read_env_variables(POCE_SOFTC sc);
191 
192 /* IP specific */
193 #if defined(INET6) || defined(INET)
194 static int  oce_init_lro(POCE_SOFTC sc);
195 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
196 #endif
197 
198 static device_method_t oce_dispatch[] = {
199 	DEVMETHOD(device_probe, oce_probe),
200 	DEVMETHOD(device_attach, oce_attach),
201 	DEVMETHOD(device_detach, oce_detach),
202 	DEVMETHOD(device_shutdown, oce_shutdown),
203 
204 	DEVMETHOD_END
205 };
206 
207 static driver_t oce_driver = {
208 	"oce",
209 	oce_dispatch,
210 	sizeof(OCE_SOFTC)
211 };
212 
213 /* global vars */
214 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
215 
216 /* Module capabilites and parameters */
217 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
218 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
219 uint32_t oce_rq_buf_size = 2048;
220 
221 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
222 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
223 
224 /* Supported devices table */
225 static uint32_t supportedDevices[] =  {
226 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
227 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
228 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
229 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
230 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
231 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
232 };
233 
234 DRIVER_MODULE(oce, pci, oce_driver, 0, 0);
235 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
236     nitems(supportedDevices));
237 MODULE_DEPEND(oce, pci, 1, 1, 1);
238 MODULE_DEPEND(oce, ether, 1, 1, 1);
239 MODULE_VERSION(oce, 1);
240 
241 POCE_SOFTC softc_head = NULL;
242 POCE_SOFTC softc_tail = NULL;
243 
244 struct oce_rdma_if *oce_rdma_if = NULL;
245 
246 /*****************************************************************************
247  *			Driver entry points functions                        *
248  *****************************************************************************/
249 
250 static int
251 oce_probe(device_t dev)
252 {
253 	uint16_t vendor = 0;
254 	uint16_t device = 0;
255 	int i = 0;
256 	char str[256] = {0};
257 	POCE_SOFTC sc;
258 
259 	sc = device_get_softc(dev);
260 	bzero(sc, sizeof(OCE_SOFTC));
261 	sc->dev = dev;
262 
263 	vendor = pci_get_vendor(dev);
264 	device = pci_get_device(dev);
265 
266 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
267 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
268 			if (device == (supportedDevices[i] & 0xffff)) {
269 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
270 					component_revision);
271 				device_set_desc_copy(dev, str);
272 
273 				switch (device) {
274 				case PCI_PRODUCT_BE2:
275 					sc->flags |= OCE_FLAGS_BE2;
276 					break;
277 				case PCI_PRODUCT_BE3:
278 					sc->flags |= OCE_FLAGS_BE3;
279 					break;
280 				case PCI_PRODUCT_XE201:
281 				case PCI_PRODUCT_XE201_VF:
282 					sc->flags |= OCE_FLAGS_XE201;
283 					break;
284 				case PCI_PRODUCT_SH:
285 					sc->flags |= OCE_FLAGS_SH;
286 					break;
287 				default:
288 					return ENXIO;
289 				}
290 				return BUS_PROBE_DEFAULT;
291 			}
292 		}
293 	}
294 
295 	return ENXIO;
296 }
297 
298 static int
299 oce_attach(device_t dev)
300 {
301 	POCE_SOFTC sc;
302 	int rc = 0;
303 
304 	sc = device_get_softc(dev);
305 
306 	rc = oce_hw_pci_alloc(sc);
307 	if (rc)
308 		return rc;
309 
310 	sc->tx_ring_size = OCE_TX_RING_SIZE;
311 	sc->rx_ring_size = OCE_RX_RING_SIZE;
312 	/* receive fragment size should be multiple of 2K */
313 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
314 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
315 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
316 
317 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
318 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
319 
320 	/* initialise the hardware */
321 	rc = oce_hw_init(sc);
322 	if (rc)
323 		goto pci_res_free;
324 
325 	oce_read_env_variables(sc);
326 
327 	oce_get_config(sc);
328 
329 	setup_max_queues_want(sc);
330 
331 	rc = oce_setup_intr(sc);
332 	if (rc)
333 		goto mbox_free;
334 
335 	rc = oce_queue_init_all(sc);
336 	if (rc)
337 		goto intr_free;
338 
339 	rc = oce_attach_ifp(sc);
340 	if (rc)
341 		goto queues_free;
342 
343 #if defined(INET6) || defined(INET)
344 	rc = oce_init_lro(sc);
345 	if (rc)
346 		goto ifp_free;
347 #endif
348 
349 	rc = oce_hw_start(sc);
350 	if (rc)
351 		goto lro_free;
352 
353 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
354 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
355 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
356 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
357 
358 	rc = oce_stats_init(sc);
359 	if (rc)
360 		goto vlan_free;
361 
362 	oce_add_sysctls(sc);
363 
364 	callout_init(&sc->timer, CALLOUT_MPSAFE);
365 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
366 	if (rc)
367 		goto stats_free;
368 
369 	sc->next =NULL;
370 	if (softc_tail != NULL) {
371 	  softc_tail->next = sc;
372 	} else {
373 	  softc_head = sc;
374 	}
375 	softc_tail = sc;
376 
377 	return 0;
378 
379 stats_free:
380 	callout_drain(&sc->timer);
381 	oce_stats_free(sc);
382 vlan_free:
383 	if (sc->vlan_attach)
384 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
385 	if (sc->vlan_detach)
386 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
387 	oce_hw_intr_disable(sc);
388 lro_free:
389 #if defined(INET6) || defined(INET)
390 	oce_free_lro(sc);
391 ifp_free:
392 #endif
393 	ether_ifdetach(sc->ifp);
394 	if_free(sc->ifp);
395 queues_free:
396 	oce_queue_release_all(sc);
397 intr_free:
398 	oce_intr_free(sc);
399 mbox_free:
400 	oce_dma_free(sc, &sc->bsmbx);
401 pci_res_free:
402 	oce_hw_pci_free(sc);
403 	LOCK_DESTROY(&sc->dev_lock);
404 	LOCK_DESTROY(&sc->bmbx_lock);
405 	return rc;
406 
407 }
408 
409 static int
410 oce_detach(device_t dev)
411 {
412 	POCE_SOFTC sc = device_get_softc(dev);
413 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
414 
415         poce_sc_tmp = softc_head;
416         ppoce_sc_tmp1 = &softc_head;
417         while (poce_sc_tmp != NULL) {
418           if (poce_sc_tmp == sc) {
419             *ppoce_sc_tmp1 = sc->next;
420             if (sc->next == NULL) {
421               softc_tail = poce_sc_tmp2;
422             }
423             break;
424           }
425           poce_sc_tmp2 = poce_sc_tmp;
426           ppoce_sc_tmp1 = &poce_sc_tmp->next;
427           poce_sc_tmp = poce_sc_tmp->next;
428         }
429 
430 	LOCK(&sc->dev_lock);
431 	oce_if_deactivate(sc);
432 	UNLOCK(&sc->dev_lock);
433 
434 	callout_drain(&sc->timer);
435 
436 	if (sc->vlan_attach != NULL)
437 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
438 	if (sc->vlan_detach != NULL)
439 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
440 
441 	ether_ifdetach(sc->ifp);
442 
443 	if_free(sc->ifp);
444 
445 	oce_hw_shutdown(sc);
446 
447 	bus_generic_detach(dev);
448 
449 	return 0;
450 }
451 
452 static int
453 oce_shutdown(device_t dev)
454 {
455 	int rc;
456 
457 	rc = oce_detach(dev);
458 
459 	return rc;
460 }
461 
462 static int
463 oce_ioctl(if_t ifp, u_long command, caddr_t data)
464 {
465 	struct ifreq *ifr = (struct ifreq *)data;
466 	POCE_SOFTC sc = if_getsoftc(ifp);
467 	struct ifi2creq i2c;
468 	uint8_t	offset = 0;
469 	int rc = 0;
470 	uint32_t u;
471 
472 	switch (command) {
473 	case SIOCGIFMEDIA:
474 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
475 		break;
476 
477 	case SIOCSIFMTU:
478 		if (ifr->ifr_mtu > OCE_MAX_MTU)
479 			rc = EINVAL;
480 		else
481 			if_setmtu(ifp, ifr->ifr_mtu);
482 		break;
483 
484 	case SIOCSIFFLAGS:
485 		if (if_getflags(ifp) & IFF_UP) {
486 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
487 				if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
488 				oce_init(sc);
489 			}
490 			device_printf(sc->dev, "Interface Up\n");
491 		} else {
492 			LOCK(&sc->dev_lock);
493 
494 			if_setdrvflagbits(sc->ifp, 0,
495 			    IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
496 			oce_if_deactivate(sc);
497 
498 			UNLOCK(&sc->dev_lock);
499 
500 			device_printf(sc->dev, "Interface Down\n");
501 		}
502 
503 		if ((if_getflags(ifp) & IFF_PROMISC) && !sc->promisc) {
504 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
505 				sc->promisc = TRUE;
506 		} else if (!(if_getflags(ifp) & IFF_PROMISC) && sc->promisc) {
507 			if (!oce_rxf_set_promiscuous(sc, 0))
508 				sc->promisc = FALSE;
509 		}
510 
511 		break;
512 
513 	case SIOCADDMULTI:
514 	case SIOCDELMULTI:
515 		rc = oce_hw_update_multicast(sc);
516 		if (rc)
517 			device_printf(sc->dev,
518 				"Update multicast address failed\n");
519 		break;
520 
521 	case SIOCSIFCAP:
522 		u = ifr->ifr_reqcap ^ if_getcapenable(ifp);
523 
524 		if (u & IFCAP_TXCSUM) {
525 			if_togglecapenable(ifp, IFCAP_TXCSUM);
526 			if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
527 
528 			if (IFCAP_TSO & if_getcapenable(ifp) &&
529 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
530 				u &= ~IFCAP_TSO;
531 				if_setcapenablebit(ifp, 0, IFCAP_TSO);
532 				if_sethwassistbits(ifp, 0, CSUM_TSO);
533 				if_printf(ifp,
534 					 "TSO disabled due to -txcsum.\n");
535 			}
536 		}
537 
538 		if (u & IFCAP_RXCSUM)
539 			if_togglecapenable(ifp, IFCAP_RXCSUM);
540 
541 		if (u & IFCAP_TSO4) {
542 			if_togglecapenable(ifp, IFCAP_TSO4);
543 
544 			if (IFCAP_TSO & if_getcapenable(ifp)) {
545 				if (IFCAP_TXCSUM & if_getcapenable(ifp))
546 					if_sethwassistbits(ifp, CSUM_TSO, 0);
547 				else {
548 					if_setcapenablebit(ifp, 0, IFCAP_TSO);
549 					if_sethwassistbits(ifp, 0, CSUM_TSO);
550 					if_printf(ifp,
551 					    "Enable txcsum first.\n");
552 					rc = EAGAIN;
553 				}
554 			} else
555 				if_sethwassistbits(ifp, 0, CSUM_TSO);
556 		}
557 
558 		if (u & IFCAP_VLAN_HWTAGGING)
559 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
560 
561 		if (u & IFCAP_VLAN_HWFILTER) {
562 			if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
563 			oce_vid_config(sc);
564 		}
565 #if defined(INET6) || defined(INET)
566 		if (u & IFCAP_LRO) {
567 			if_togglecapenable(ifp, IFCAP_LRO);
568 			if(sc->enable_hwlro) {
569 				if(if_getcapenable(ifp) & IFCAP_LRO) {
570 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
571 				}else {
572 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
573 				}
574 			}
575 		}
576 #endif
577 
578 		break;
579 
580 	case SIOCGI2C:
581 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
582 		if (rc)
583 			break;
584 
585 		if (i2c.dev_addr == PAGE_NUM_A0) {
586 			offset = i2c.offset;
587 		} else if (i2c.dev_addr == PAGE_NUM_A2) {
588 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
589 		} else {
590 			rc = EINVAL;
591 			break;
592 		}
593 
594 		if (i2c.len > sizeof(i2c.data) ||
595 		    i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
596 			rc = EINVAL;
597 			break;
598 		}
599 
600 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
601 		if (rc) {
602 			rc = -rc;
603 			break;
604 		}
605 
606 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
607 
608 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
609 		break;
610 
611 	case SIOCGPRIVATE_0:
612 		rc = priv_check(curthread, PRIV_DRIVER);
613 		if (rc != 0)
614 			break;
615 		rc = oce_handle_passthrough(ifp, data);
616 		break;
617 	default:
618 		rc = ether_ioctl(ifp, command, data);
619 		break;
620 	}
621 
622 	return rc;
623 }
624 
625 static void
626 oce_init(void *arg)
627 {
628 	POCE_SOFTC sc = arg;
629 
630 	LOCK(&sc->dev_lock);
631 
632 	if (if_getflags(sc->ifp) & IFF_UP) {
633 		oce_if_deactivate(sc);
634 		oce_if_activate(sc);
635 	}
636 
637 	UNLOCK(&sc->dev_lock);
638 
639 }
640 
641 static int
642 oce_multiq_start(if_t ifp, struct mbuf *m)
643 {
644 	POCE_SOFTC sc = if_getsoftc(ifp);
645 	struct oce_wq *wq = NULL;
646 	int queue_index = 0;
647 	int status = 0;
648 
649 	if (!sc->link_status)
650 		return ENXIO;
651 
652 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
653 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
654 
655 	wq = sc->wq[queue_index];
656 
657 	LOCK(&wq->tx_lock);
658 	status = oce_multiq_transmit(ifp, m, wq);
659 	UNLOCK(&wq->tx_lock);
660 
661 	return status;
662 
663 }
664 
665 static void
666 oce_multiq_flush(if_t ifp)
667 {
668 	POCE_SOFTC sc = if_getsoftc(ifp);
669 	struct mbuf     *m;
670 	int i = 0;
671 
672 	for (i = 0; i < sc->nwqs; i++) {
673 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
674 			m_freem(m);
675 	}
676 	if_qflush(ifp);
677 }
678 
679 /*****************************************************************************
680  *                   Driver interrupt routines functions                     *
681  *****************************************************************************/
682 
683 static void
684 oce_intr(void *arg, int pending)
685 {
686 
687 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
688 	POCE_SOFTC sc = ii->sc;
689 	struct oce_eq *eq = ii->eq;
690 	struct oce_eqe *eqe;
691 	struct oce_cq *cq = NULL;
692 	int i, num_eqes = 0;
693 
694 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
695 				 BUS_DMASYNC_POSTWRITE);
696 	do {
697 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
698 		if (eqe->evnt == 0)
699 			break;
700 		eqe->evnt = 0;
701 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
702 					BUS_DMASYNC_POSTWRITE);
703 		RING_GET(eq->ring, 1);
704 		num_eqes++;
705 
706 	} while (TRUE);
707 
708 	if (!num_eqes)
709 		goto eq_arm; /* Spurious */
710 
711  	/* Clear EQ entries, but dont arm */
712 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
713 
714 	/* Process TX, RX and MCC. But dont arm CQ*/
715 	for (i = 0; i < eq->cq_valid; i++) {
716 		cq = eq->cq[i];
717 		(*cq->cq_handler)(cq->cb_arg);
718 	}
719 
720 	/* Arm all cqs connected to this EQ */
721 	for (i = 0; i < eq->cq_valid; i++) {
722 		cq = eq->cq[i];
723 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
724 	}
725 
726 eq_arm:
727 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
728 
729 	return;
730 }
731 
732 static int
733 oce_setup_intr(POCE_SOFTC sc)
734 {
735 	int rc = 0, use_intx = 0;
736 	int vector = 0, req_vectors = 0;
737 	int tot_req_vectors, tot_vectors;
738 
739 	if (is_rss_enabled(sc))
740 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
741 	else
742 		req_vectors = 1;
743 
744 	tot_req_vectors = req_vectors;
745 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
746 	  if (req_vectors > 1) {
747 	    tot_req_vectors += OCE_RDMA_VECTORS;
748 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
749 	  }
750 	}
751 
752         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
753 		sc->intr_count = req_vectors;
754                 tot_vectors = tot_req_vectors;
755 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
756 		if (rc != 0) {
757 			use_intx = 1;
758 			pci_release_msi(sc->dev);
759 		} else {
760 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
761 		    if (tot_vectors < tot_req_vectors) {
762 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
763 			sc->roce_intr_count = (tot_vectors / 2);
764 		      }
765 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
766 		    }
767 		  } else {
768 		    sc->intr_count = tot_vectors;
769 		  }
770     		  sc->flags |= OCE_FLAGS_USING_MSIX;
771 		}
772 	} else
773 		use_intx = 1;
774 
775 	if (use_intx)
776 		sc->intr_count = 1;
777 
778 	/* Scale number of queues based on intr we got */
779 	update_queues_got(sc);
780 
781 	if (use_intx) {
782 		device_printf(sc->dev, "Using legacy interrupt\n");
783 		rc = oce_alloc_intr(sc, vector, oce_intr);
784 		if (rc)
785 			goto error;
786 	} else {
787 		for (; vector < sc->intr_count; vector++) {
788 			rc = oce_alloc_intr(sc, vector, oce_intr);
789 			if (rc)
790 				goto error;
791 		}
792 	}
793 
794 	return 0;
795 error:
796 	oce_intr_free(sc);
797 	return rc;
798 }
799 
800 static int
801 oce_fast_isr(void *arg)
802 {
803 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
804 	POCE_SOFTC sc = ii->sc;
805 
806 	if (ii->eq == NULL)
807 		return FILTER_STRAY;
808 
809 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
810 
811 	taskqueue_enqueue(ii->tq, &ii->task);
812 
813  	ii->eq->intr++;
814 
815 	return FILTER_HANDLED;
816 }
817 
818 static int
819 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
820 {
821 	POCE_INTR_INFO ii;
822 	int rc = 0, rr;
823 
824 	if (vector >= OCE_MAX_EQ)
825 		return (EINVAL);
826 
827 	ii = &sc->intrs[vector];
828 
829 	/* Set the resource id for the interrupt.
830 	 * MSIx is vector + 1 for the resource id,
831 	 * INTx is 0 for the resource id.
832 	 */
833 	if (sc->flags & OCE_FLAGS_USING_MSIX)
834 		rr = vector + 1;
835 	else
836 		rr = 0;
837 	ii->intr_res = bus_alloc_resource_any(sc->dev,
838 					      SYS_RES_IRQ,
839 					      &rr, RF_ACTIVE|RF_SHAREABLE);
840 	ii->irq_rr = rr;
841 	if (ii->intr_res == NULL) {
842 		device_printf(sc->dev,
843 			  "Could not allocate interrupt\n");
844 		rc = ENXIO;
845 		return rc;
846 	}
847 
848 	TASK_INIT(&ii->task, 0, isr, ii);
849 	ii->vector = vector;
850 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
851 	ii->tq = taskqueue_create_fast(ii->task_name,
852 			M_NOWAIT,
853 			taskqueue_thread_enqueue,
854 			&ii->tq);
855 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
856 			device_get_nameunit(sc->dev));
857 
858 	ii->sc = sc;
859 	rc = bus_setup_intr(sc->dev,
860 			ii->intr_res,
861 			INTR_TYPE_NET,
862 			oce_fast_isr, NULL, ii, &ii->tag);
863 	return rc;
864 
865 }
866 
867 void
868 oce_intr_free(POCE_SOFTC sc)
869 {
870 	int i = 0;
871 
872 	for (i = 0; i < sc->intr_count; i++) {
873 
874 		if (sc->intrs[i].tag != NULL)
875 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
876 						sc->intrs[i].tag);
877 		if (sc->intrs[i].tq != NULL)
878 			taskqueue_free(sc->intrs[i].tq);
879 
880 		if (sc->intrs[i].intr_res != NULL)
881 			bus_release_resource(sc->dev, SYS_RES_IRQ,
882 						sc->intrs[i].irq_rr,
883 						sc->intrs[i].intr_res);
884 		sc->intrs[i].tag = NULL;
885 		sc->intrs[i].intr_res = NULL;
886 	}
887 
888 	if (sc->flags & OCE_FLAGS_USING_MSIX)
889 		pci_release_msi(sc->dev);
890 
891 }
892 
893 /******************************************************************************
894 *			  Media callbacks functions 			      *
895 ******************************************************************************/
896 
897 static void
898 oce_media_status(if_t ifp, struct ifmediareq *req)
899 {
900 	POCE_SOFTC sc = (POCE_SOFTC) if_getsoftc(ifp);
901 
902 	req->ifm_status = IFM_AVALID;
903 	req->ifm_active = IFM_ETHER;
904 
905 	if (sc->link_status == 1)
906 		req->ifm_status |= IFM_ACTIVE;
907 	else
908 		return;
909 
910 	switch (sc->link_speed) {
911 	case 1: /* 10 Mbps */
912 		req->ifm_active |= IFM_10_T | IFM_FDX;
913 		sc->speed = 10;
914 		break;
915 	case 2: /* 100 Mbps */
916 		req->ifm_active |= IFM_100_TX | IFM_FDX;
917 		sc->speed = 100;
918 		break;
919 	case 3: /* 1 Gbps */
920 		req->ifm_active |= IFM_1000_T | IFM_FDX;
921 		sc->speed = 1000;
922 		break;
923 	case 4: /* 10 Gbps */
924 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
925 		sc->speed = 10000;
926 		break;
927 	case 5: /* 20 Gbps */
928 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
929 		sc->speed = 20000;
930 		break;
931 	case 6: /* 25 Gbps */
932 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
933 		sc->speed = 25000;
934 		break;
935 	case 7: /* 40 Gbps */
936 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
937 		sc->speed = 40000;
938 		break;
939 	default:
940 		sc->speed = 0;
941 		break;
942 	}
943 
944 	return;
945 }
946 
947 int
948 oce_media_change(if_t ifp)
949 {
950 	return 0;
951 }
952 
953 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
954 				struct mbuf *m, boolean_t *os2bmc,
955 				struct mbuf **m_new)
956 {
957 	struct ether_header *eh = NULL;
958 
959 	eh = mtod(m, struct ether_header *);
960 
961 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
962 		*os2bmc = FALSE;
963 		goto done;
964 	}
965 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
966 		goto done;
967 
968 	if (is_mc_allowed_on_bmc(sc, eh) ||
969 	    is_bc_allowed_on_bmc(sc, eh) ||
970 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
971 		*os2bmc = TRUE;
972 		goto done;
973 	}
974 
975 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
976 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
977 		uint8_t nexthdr = ip6->ip6_nxt;
978 		if (nexthdr == IPPROTO_ICMPV6) {
979 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
980 			switch (icmp6->icmp6_type) {
981 			case ND_ROUTER_ADVERT:
982 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
983 				goto done;
984 			case ND_NEIGHBOR_ADVERT:
985 				*os2bmc = is_ipv6_na_filt_enabled(sc);
986 				goto done;
987 			default:
988 				break;
989 			}
990 		}
991 	}
992 
993 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
994 		struct ip *ip = mtod(m, struct ip *);
995 		int iphlen = ip->ip_hl << 2;
996 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
997 		switch (uh->uh_dport) {
998 		case DHCP_CLIENT_PORT:
999 			*os2bmc = is_dhcp_client_filt_enabled(sc);
1000 			goto done;
1001 		case DHCP_SERVER_PORT:
1002 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1003 			goto done;
1004 		case NET_BIOS_PORT1:
1005 		case NET_BIOS_PORT2:
1006 			*os2bmc = is_nbios_filt_enabled(sc);
1007 			goto done;
1008 		case DHCPV6_RAS_PORT:
1009 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1010 			goto done;
1011 		default:
1012 			break;
1013 		}
1014 	}
1015 done:
1016 	if (*os2bmc) {
1017 		*m_new = m_dup(m, M_NOWAIT);
1018 		if (!*m_new) {
1019 			*os2bmc = FALSE;
1020 			return;
1021 		}
1022 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1023 	}
1024 }
1025 
1026 /*****************************************************************************
1027  *			  Transmit routines functions			     *
1028  *****************************************************************************/
1029 
1030 static int
1031 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1032 {
1033 	int rc = 0, i, retry_cnt = 0;
1034 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1035 	struct mbuf *m, *m_temp, *m_new = NULL;
1036 	struct oce_wq *wq = sc->wq[wq_index];
1037 	struct oce_packet_desc *pd;
1038 	struct oce_nic_hdr_wqe *nichdr;
1039 	struct oce_nic_frag_wqe *nicfrag;
1040 	struct ether_header *eh = NULL;
1041 	int num_wqes;
1042 	uint32_t reg_value;
1043 	boolean_t complete = TRUE;
1044 	boolean_t os2bmc = FALSE;
1045 
1046 	m = *mpp;
1047 	if (!m)
1048 		return EINVAL;
1049 
1050 	if (!(m->m_flags & M_PKTHDR)) {
1051 		rc = ENXIO;
1052 		goto free_ret;
1053 	}
1054 
1055 	/* Don't allow non-TSO packets longer than MTU */
1056 	if (!is_tso_pkt(m)) {
1057 		eh = mtod(m, struct ether_header *);
1058 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1059 			 goto free_ret;
1060 	}
1061 
1062 	if(oce_tx_asic_stall_verify(sc, m)) {
1063 		m = oce_insert_vlan_tag(sc, m, &complete);
1064 		if(!m) {
1065 			device_printf(sc->dev, "Insertion unsuccessful\n");
1066 			return 0;
1067 		}
1068 	}
1069 
1070 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1071 	 * may cause a transmit stall on that port. So the work-around is to
1072 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1073 	*/
1074 	if(IS_SH(sc) || IS_XE201(sc) ) {
1075 		if(m->m_pkthdr.len <= 32) {
1076 			char buf[36];
1077 			bzero((void *)buf, 36);
1078 			m_append(m, (36 - m->m_pkthdr.len), buf);
1079 		}
1080 	}
1081 
1082 tx_start:
1083 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1084 		/* consolidate packet buffers for TSO/LSO segment offload */
1085 #if defined(INET6) || defined(INET)
1086 		m = oce_tso_setup(sc, mpp);
1087 #else
1088 		m = NULL;
1089 #endif
1090 		if (m == NULL) {
1091 			rc = ENXIO;
1092 			goto free_ret;
1093 		}
1094 	}
1095 
1096 	pd = &wq->pckts[wq->pkt_desc_head];
1097 
1098 retry:
1099 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1100 				     pd->map,
1101 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1102 	if (rc == 0) {
1103 		num_wqes = pd->nsegs + 1;
1104 		if (IS_BE(sc) || IS_SH(sc)) {
1105 			/*Dummy required only for BE3.*/
1106 			if (num_wqes & 1)
1107 				num_wqes++;
1108 		}
1109 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1110 			bus_dmamap_unload(wq->tag, pd->map);
1111 			return EBUSY;
1112 		}
1113 		atomic_store_rel_int(&wq->pkt_desc_head,
1114 				     (wq->pkt_desc_head + 1) % \
1115 				      OCE_WQ_PACKET_ARRAY_SIZE);
1116 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1117 		pd->mbuf = m;
1118 
1119 		nichdr =
1120 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1121 		nichdr->u0.dw[0] = 0;
1122 		nichdr->u0.dw[1] = 0;
1123 		nichdr->u0.dw[2] = 0;
1124 		nichdr->u0.dw[3] = 0;
1125 
1126 		nichdr->u0.s.complete = complete;
1127 		nichdr->u0.s.mgmt = os2bmc;
1128 		nichdr->u0.s.event = 1;
1129 		nichdr->u0.s.crc = 1;
1130 		nichdr->u0.s.forward = 0;
1131 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1132 		nichdr->u0.s.udpcs =
1133 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1134 		nichdr->u0.s.tcpcs =
1135 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1136 		nichdr->u0.s.num_wqe = num_wqes;
1137 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1138 
1139 		if (m->m_flags & M_VLANTAG) {
1140 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1141 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1142 		}
1143 
1144 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1145 			if (m->m_pkthdr.tso_segsz) {
1146 				nichdr->u0.s.lso = 1;
1147 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1148 			}
1149 			if (!IS_BE(sc) || !IS_SH(sc))
1150 				nichdr->u0.s.ipcs = 1;
1151 		}
1152 
1153 		RING_PUT(wq->ring, 1);
1154 		atomic_add_int(&wq->ring->num_used, 1);
1155 
1156 		for (i = 0; i < pd->nsegs; i++) {
1157 			nicfrag =
1158 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1159 						      struct oce_nic_frag_wqe);
1160 			nicfrag->u0.s.rsvd0 = 0;
1161 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1162 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1163 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1164 			pd->wqe_idx = wq->ring->pidx;
1165 			RING_PUT(wq->ring, 1);
1166 			atomic_add_int(&wq->ring->num_used, 1);
1167 		}
1168 		if (num_wqes > (pd->nsegs + 1)) {
1169 			nicfrag =
1170 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1171 						      struct oce_nic_frag_wqe);
1172 			nicfrag->u0.dw[0] = 0;
1173 			nicfrag->u0.dw[1] = 0;
1174 			nicfrag->u0.dw[2] = 0;
1175 			nicfrag->u0.dw[3] = 0;
1176 			pd->wqe_idx = wq->ring->pidx;
1177 			RING_PUT(wq->ring, 1);
1178 			atomic_add_int(&wq->ring->num_used, 1);
1179 			pd->nsegs++;
1180 		}
1181 
1182 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1183 		wq->tx_stats.tx_reqs++;
1184 		wq->tx_stats.tx_wrbs += num_wqes;
1185 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1186 		wq->tx_stats.tx_pkts++;
1187 
1188 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1189 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1190 		reg_value = (num_wqes << 16) | wq->wq_id;
1191 
1192 		/* if os2bmc is not enabled or if the pkt is already tagged as
1193 		   bmc, do nothing
1194 		 */
1195 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1196 
1197 		if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1198 		if (m->m_flags & M_MCAST)
1199 			if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
1200 		ETHER_BPF_MTAP(sc->ifp, m);
1201 
1202 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1203 
1204 	} else if (rc == EFBIG)	{
1205 		if (retry_cnt == 0) {
1206 			m_temp = m_defrag(m, M_NOWAIT);
1207 			if (m_temp == NULL)
1208 				goto free_ret;
1209 			m = m_temp;
1210 			*mpp = m_temp;
1211 			retry_cnt = retry_cnt + 1;
1212 			goto retry;
1213 		} else
1214 			goto free_ret;
1215 	} else if (rc == ENOMEM)
1216 		return rc;
1217 	else
1218 		goto free_ret;
1219 
1220 	if (os2bmc) {
1221 		m = m_new;
1222 		goto tx_start;
1223 	}
1224 
1225 	return 0;
1226 
1227 free_ret:
1228 	m_freem(*mpp);
1229 	*mpp = NULL;
1230 	return rc;
1231 }
1232 
1233 static void
1234 oce_process_tx_completion(struct oce_wq *wq)
1235 {
1236 	struct oce_packet_desc *pd;
1237 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1238 	struct mbuf *m;
1239 
1240 	pd = &wq->pckts[wq->pkt_desc_tail];
1241 	atomic_store_rel_int(&wq->pkt_desc_tail,
1242 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1243 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1244 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1245 	bus_dmamap_unload(wq->tag, pd->map);
1246 
1247 	m = pd->mbuf;
1248 	m_freem(m);
1249 	pd->mbuf = NULL;
1250 
1251 	if (if_getdrvflags(sc->ifp) & IFF_DRV_OACTIVE) {
1252 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1253 			if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_OACTIVE));
1254 			oce_tx_restart(sc, wq);
1255 		}
1256 	}
1257 }
1258 
1259 static void
1260 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1261 {
1262 
1263 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1264 		return;
1265 
1266 	if (!drbr_empty(sc->ifp, wq->br))
1267 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1268 
1269 }
1270 
1271 #if defined(INET6) || defined(INET)
1272 static struct mbuf *
1273 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1274 {
1275 	struct mbuf *m;
1276 #ifdef INET
1277 	struct ip *ip;
1278 #endif
1279 #ifdef INET6
1280 	struct ip6_hdr *ip6;
1281 #endif
1282 	struct ether_vlan_header *eh;
1283 	struct tcphdr *th;
1284 	uint16_t etype;
1285 	int total_len = 0, ehdrlen = 0;
1286 
1287 	m = *mpp;
1288 
1289 	if (M_WRITABLE(m) == 0) {
1290 		m = m_dup(*mpp, M_NOWAIT);
1291 		if (!m)
1292 			return NULL;
1293 		m_freem(*mpp);
1294 		*mpp = m;
1295 	}
1296 
1297 	eh = mtod(m, struct ether_vlan_header *);
1298 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1299 		etype = ntohs(eh->evl_proto);
1300 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1301 	} else {
1302 		etype = ntohs(eh->evl_encap_proto);
1303 		ehdrlen = ETHER_HDR_LEN;
1304 	}
1305 
1306 	switch (etype) {
1307 #ifdef INET
1308 	case ETHERTYPE_IP:
1309 		ip = (struct ip *)(m->m_data + ehdrlen);
1310 		if (ip->ip_p != IPPROTO_TCP)
1311 			return NULL;
1312 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1313 
1314 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1315 		break;
1316 #endif
1317 #ifdef INET6
1318 	case ETHERTYPE_IPV6:
1319 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1320 		if (ip6->ip6_nxt != IPPROTO_TCP)
1321 			return NULL;
1322 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1323 
1324 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1325 		break;
1326 #endif
1327 	default:
1328 		return NULL;
1329 	}
1330 
1331 	m = m_pullup(m, total_len);
1332 	*mpp = m;
1333 	return m;
1334 }
1335 #endif /* INET6 || INET */
1336 
1337 void
1338 oce_tx_task(void *arg, int npending)
1339 {
1340 	struct oce_wq *wq = arg;
1341 	POCE_SOFTC sc = wq->parent;
1342 	if_t ifp = sc->ifp;
1343 	int rc = 0;
1344 
1345 	LOCK(&wq->tx_lock);
1346 	rc = oce_multiq_transmit(ifp, NULL, wq);
1347 	if (rc) {
1348 		device_printf(sc->dev,
1349 				"TX[%d] restart failed\n", wq->queue_index);
1350 	}
1351 	UNLOCK(&wq->tx_lock);
1352 }
1353 
1354 void
1355 oce_start(if_t ifp)
1356 {
1357 	POCE_SOFTC sc = if_getsoftc(ifp);
1358 	struct mbuf *m;
1359 	int rc = 0;
1360 	int def_q = 0; /* Defualt tx queue is 0*/
1361 
1362 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1363 			IFF_DRV_RUNNING)
1364 		return;
1365 
1366 	if (!sc->link_status)
1367 		return;
1368 
1369 	while (true) {
1370 		m = if_dequeue(sc->ifp);
1371 		if (m == NULL)
1372 			break;
1373 
1374 		LOCK(&sc->wq[def_q]->tx_lock);
1375 		rc = oce_tx(sc, &m, def_q);
1376 		UNLOCK(&sc->wq[def_q]->tx_lock);
1377 		if (rc) {
1378 			if (m != NULL) {
1379 				sc->wq[def_q]->tx_stats.tx_stops ++;
1380 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1381 				if_sendq_prepend(ifp, m);
1382 				m = NULL;
1383 			}
1384 			break;
1385 		}
1386 	}
1387 }
1388 
1389 /* Handle the Completion Queue for transmit */
1390 uint16_t
1391 oce_wq_handler(void *arg)
1392 {
1393 	struct oce_wq *wq = (struct oce_wq *)arg;
1394 	POCE_SOFTC sc = wq->parent;
1395 	struct oce_cq *cq = wq->cq;
1396 	struct oce_nic_tx_cqe *cqe;
1397 	int num_cqes = 0;
1398 
1399 	LOCK(&wq->tx_compl_lock);
1400 	bus_dmamap_sync(cq->ring->dma.tag,
1401 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1402 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1403 	while (cqe->u0.dw[3]) {
1404 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1405 
1406 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1407 		if (wq->ring->cidx >= wq->ring->num_items)
1408 			wq->ring->cidx -= wq->ring->num_items;
1409 
1410 		oce_process_tx_completion(wq);
1411 		wq->tx_stats.tx_compl++;
1412 		cqe->u0.dw[3] = 0;
1413 		RING_GET(cq->ring, 1);
1414 		bus_dmamap_sync(cq->ring->dma.tag,
1415 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1416 		cqe =
1417 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1418 		num_cqes++;
1419 	}
1420 
1421 	if (num_cqes)
1422 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1423 
1424 	UNLOCK(&wq->tx_compl_lock);
1425 	return num_cqes;
1426 }
1427 
1428 static int
1429 oce_multiq_transmit(if_t ifp, struct mbuf *m, struct oce_wq *wq)
1430 {
1431 	POCE_SOFTC sc = if_getsoftc(ifp);
1432 	int status = 0, queue_index = 0;
1433 	struct mbuf *next = NULL;
1434 	struct buf_ring *br = NULL;
1435 
1436 	br  = wq->br;
1437 	queue_index = wq->queue_index;
1438 
1439 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1440 		IFF_DRV_RUNNING) {
1441 		if (m != NULL)
1442 			status = drbr_enqueue(ifp, br, m);
1443 		return status;
1444 	}
1445 
1446 	if (m != NULL) {
1447 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1448 			return status;
1449 	}
1450 	while ((next = drbr_peek(ifp, br)) != NULL) {
1451 		if (oce_tx(sc, &next, queue_index)) {
1452 			if (next == NULL) {
1453 				drbr_advance(ifp, br);
1454 			} else {
1455 				drbr_putback(ifp, br, next);
1456 				wq->tx_stats.tx_stops ++;
1457 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1458 			}
1459 			break;
1460 		}
1461 		drbr_advance(ifp, br);
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 /*****************************************************************************
1468  *			    Receive  routines functions 		     *
1469  *****************************************************************************/
1470 
1471 static void
1472 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1473 {
1474 	uint32_t *p;
1475         struct ether_header *eh = NULL;
1476         struct tcphdr *tcp_hdr = NULL;
1477         struct ip *ip4_hdr = NULL;
1478         struct ip6_hdr *ip6 = NULL;
1479         uint32_t payload_len = 0;
1480 
1481         eh = mtod(m, struct ether_header *);
1482         /* correct IP header */
1483         if(!cqe2->ipv6_frame) {
1484 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1485                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1486                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1487                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1488         }else {
1489         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1490                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1491                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1492                                                 - sizeof(struct ip6_hdr);
1493                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1494                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1495         }
1496 
1497         /* correct tcp header */
1498         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1499         if(cqe2->push) {
1500         	tcp_hdr->th_flags |= TH_PUSH;
1501         }
1502         tcp_hdr->th_win = htons(cqe2->tcp_window);
1503         tcp_hdr->th_sum = 0xffff;
1504         if(cqe2->ts_opt) {
1505                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1506                 *p = cqe1->tcp_timestamp_val;
1507                 *(p+1) = cqe1->tcp_timestamp_ecr;
1508         }
1509 
1510 	return;
1511 }
1512 
1513 static void
1514 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1515 {
1516 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1517         uint32_t i = 0, frag_len = 0;
1518 	uint32_t len = cqe_info->pkt_size;
1519         struct oce_packet_desc *pd;
1520         struct mbuf *tail = NULL;
1521 
1522         for (i = 0; i < cqe_info->num_frags; i++) {
1523                 if (rq->ring->cidx == rq->ring->pidx) {
1524                         device_printf(sc->dev,
1525                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1526                         return;
1527                 }
1528                 pd = &rq->pckts[rq->ring->cidx];
1529 
1530                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1531                 bus_dmamap_unload(rq->tag, pd->map);
1532 		RING_GET(rq->ring, 1);
1533                 rq->pending--;
1534 
1535                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1536                 pd->mbuf->m_len = frag_len;
1537 
1538                 if (tail != NULL) {
1539                         /* additional fragments */
1540                         pd->mbuf->m_flags &= ~M_PKTHDR;
1541                         tail->m_next = pd->mbuf;
1542 			if(rq->islro)
1543                         	tail->m_nextpkt = NULL;
1544                         tail = pd->mbuf;
1545                 } else {
1546                         /* first fragment, fill out much of the packet header */
1547                         pd->mbuf->m_pkthdr.len = len;
1548 			if(rq->islro)
1549                         	pd->mbuf->m_nextpkt = NULL;
1550                         pd->mbuf->m_pkthdr.csum_flags = 0;
1551                         if (IF_CSUM_ENABLED(sc)) {
1552                                 if (cqe_info->l4_cksum_pass) {
1553                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1554                                                 pd->mbuf->m_pkthdr.csum_flags |=
1555                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1556                                         }else { /* IPV6 frame */
1557 						if(rq->islro) {
1558                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1559                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1560 						}
1561                                         }
1562                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1563                                 }
1564                                 if (cqe_info->ip_cksum_pass) {
1565                                         pd->mbuf->m_pkthdr.csum_flags |=
1566                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1567                                 }
1568                         }
1569                         *m = tail = pd->mbuf;
1570                }
1571                 pd->mbuf = NULL;
1572                 len -= frag_len;
1573         }
1574 
1575         return;
1576 }
1577 
1578 static void
1579 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1580 {
1581         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1582         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1583         struct mbuf *m = NULL;
1584 	struct oce_common_cqe_info cq_info;
1585 
1586 	/* parse cqe */
1587         if(cqe2 == NULL) {
1588                 cq_info.pkt_size =  cqe->pkt_size;
1589                 cq_info.vtag = cqe->vlan_tag;
1590                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1591                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1592                 cq_info.ipv6_frame = cqe->ipv6_frame;
1593                 cq_info.vtp = cqe->vtp;
1594                 cq_info.qnq = cqe->qnq;
1595         }else {
1596                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1597                 cq_info.pkt_size =  cqe2->coalesced_size;
1598                 cq_info.vtag = cqe2->vlan_tag;
1599                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1600                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1601                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1602                 cq_info.vtp = cqe2->vtp;
1603                 cq_info.qnq = cqe1->qnq;
1604         }
1605 
1606 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1607 
1608         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1609         if(cq_info.pkt_size % rq->cfg.frag_size)
1610                 cq_info.num_frags++;
1611 
1612 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1613 
1614 	if (m) {
1615 		if(cqe2) {
1616 			//assert(cqe2->valid != 0);
1617 
1618 			//assert(cqe2->cqe_type != 2);
1619 			oce_correct_header(m, cqe1, cqe2);
1620 		}
1621 
1622 		m->m_pkthdr.rcvif = sc->ifp;
1623 		if (rq->queue_index)
1624 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1625 		else
1626 			m->m_pkthdr.flowid = rq->queue_index;
1627 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1628 
1629 		/* This deternies if vlan tag is Valid */
1630 		if (cq_info.vtp) {
1631 			if (sc->function_mode & FNM_FLEX10_MODE) {
1632 				/* FLEX10. If QnQ is not set, neglect VLAN */
1633 				if (cq_info.qnq) {
1634 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1635 					m->m_flags |= M_VLANTAG;
1636 				}
1637 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1638 				/* In UMC mode generally pvid will be striped by
1639 				   hw. But in some cases we have seen it comes
1640 				   with pvid. So if pvid == vlan, neglect vlan.
1641 				 */
1642 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1643 				m->m_flags |= M_VLANTAG;
1644 			}
1645 		}
1646 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1647 
1648 		if_input(sc->ifp, m);
1649 
1650 		/* Update rx stats per queue */
1651 		rq->rx_stats.rx_pkts++;
1652 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1653 		rq->rx_stats.rx_frags += cq_info.num_frags;
1654 		rq->rx_stats.rx_ucast_pkts++;
1655 	}
1656         return;
1657 }
1658 
1659 static void
1660 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1661 {
1662 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1663 	int len;
1664 	struct mbuf *m = NULL;
1665 	struct oce_common_cqe_info cq_info;
1666 	uint16_t vtag = 0;
1667 
1668 	/* Is it a flush compl that has no data */
1669 	if(!cqe->u0.s.num_fragments)
1670 		goto exit;
1671 
1672 	len = cqe->u0.s.pkt_size;
1673 	if (!len) {
1674 		/*partial DMA workaround for Lancer*/
1675 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1676 		goto exit;
1677 	}
1678 
1679 	if (!oce_cqe_portid_valid(sc, cqe)) {
1680 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1681 		goto exit;
1682 	}
1683 
1684 	 /* Get vlan_tag value */
1685 	if(IS_BE(sc) || IS_SH(sc))
1686 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1687 	else
1688 		vtag = cqe->u0.s.vlan_tag;
1689 
1690 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1691 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1692 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1693 	cq_info.num_frags = cqe->u0.s.num_fragments;
1694 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1695 
1696 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1697 
1698 	if (m) {
1699 		m->m_pkthdr.rcvif = sc->ifp;
1700 		if (rq->queue_index)
1701 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1702 		else
1703 			m->m_pkthdr.flowid = rq->queue_index;
1704 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1705 
1706 		/* This deternies if vlan tag is Valid */
1707 		if (oce_cqe_vtp_valid(sc, cqe)) {
1708 			if (sc->function_mode & FNM_FLEX10_MODE) {
1709 				/* FLEX10. If QnQ is not set, neglect VLAN */
1710 				if (cqe->u0.s.qnq) {
1711 					m->m_pkthdr.ether_vtag = vtag;
1712 					m->m_flags |= M_VLANTAG;
1713 				}
1714 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1715 				/* In UMC mode generally pvid will be striped by
1716 				   hw. But in some cases we have seen it comes
1717 				   with pvid. So if pvid == vlan, neglect vlan.
1718 				*/
1719 				m->m_pkthdr.ether_vtag = vtag;
1720 				m->m_flags |= M_VLANTAG;
1721 			}
1722 		}
1723 
1724 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1725 #if defined(INET6) || defined(INET)
1726 		/* Try to queue to LRO */
1727 		if (IF_LRO_ENABLED(sc) &&
1728 		    (cqe->u0.s.ip_cksum_pass) &&
1729 		    (cqe->u0.s.l4_cksum_pass) &&
1730 		    (!cqe->u0.s.ip_ver)       &&
1731 		    (rq->lro.lro_cnt != 0)) {
1732 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1733 				rq->lro_pkts_queued ++;
1734 				goto post_done;
1735 			}
1736 			/* If LRO posting fails then try to post to STACK */
1737 		}
1738 #endif
1739 
1740 		if_input(sc->ifp, m);
1741 #if defined(INET6) || defined(INET)
1742 post_done:
1743 #endif
1744 		/* Update rx stats per queue */
1745 		rq->rx_stats.rx_pkts++;
1746 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1747 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1748 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1749 			rq->rx_stats.rx_mcast_pkts++;
1750 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1751 			rq->rx_stats.rx_ucast_pkts++;
1752 	}
1753 exit:
1754 	return;
1755 }
1756 
1757 void
1758 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1759 {
1760 	uint32_t i = 0;
1761 	struct oce_packet_desc *pd;
1762 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1763 
1764 	for (i = 0; i < num_frags; i++) {
1765                 if (rq->ring->cidx == rq->ring->pidx) {
1766                         device_printf(sc->dev,
1767                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1768                         return;
1769                 }
1770                 pd = &rq->pckts[rq->ring->cidx];
1771                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1772                 bus_dmamap_unload(rq->tag, pd->map);
1773                 if (pd->mbuf != NULL) {
1774                         m_freem(pd->mbuf);
1775                         pd->mbuf = NULL;
1776                 }
1777 
1778 		RING_GET(rq->ring, 1);
1779                 rq->pending--;
1780 	}
1781 }
1782 
1783 static int
1784 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1785 {
1786 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1787 	int vtp = 0;
1788 
1789 	if (sc->be3_native) {
1790 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1791 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1792 	} else
1793 		vtp = cqe->u0.s.vlan_tag_present;
1794 
1795 	return vtp;
1796 
1797 }
1798 
1799 static int
1800 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1801 {
1802 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1803 	int port_id = 0;
1804 
1805 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1806 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1807 		port_id =  cqe_v1->u0.s.port;
1808 		if (sc->port_id != port_id)
1809 			return 0;
1810 	} else
1811 		;/* For BE3 legacy and Lancer this is dummy */
1812 
1813 	return 1;
1814 
1815 }
1816 
1817 #if defined(INET6) || defined(INET)
1818 void
1819 oce_rx_flush_lro(struct oce_rq *rq)
1820 {
1821 	struct lro_ctrl	*lro = &rq->lro;
1822 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1823 
1824 	if (!IF_LRO_ENABLED(sc))
1825 		return;
1826 
1827 	tcp_lro_flush_all(lro);
1828 	rq->lro_pkts_queued = 0;
1829 
1830 	return;
1831 }
1832 
1833 static int
1834 oce_init_lro(POCE_SOFTC sc)
1835 {
1836 	struct lro_ctrl *lro = NULL;
1837 	int i = 0, rc = 0;
1838 
1839 	for (i = 0; i < sc->nrqs; i++) {
1840 		lro = &sc->rq[i]->lro;
1841 		rc = tcp_lro_init(lro);
1842 		if (rc != 0) {
1843 			device_printf(sc->dev, "LRO init failed\n");
1844 			return rc;
1845 		}
1846 		lro->ifp = sc->ifp;
1847 	}
1848 
1849 	return rc;
1850 }
1851 
1852 void
1853 oce_free_lro(POCE_SOFTC sc)
1854 {
1855 	struct lro_ctrl *lro = NULL;
1856 	int i = 0;
1857 
1858 	for (i = 0; i < sc->nrqs; i++) {
1859 		lro = &sc->rq[i]->lro;
1860 		if (lro)
1861 			tcp_lro_free(lro);
1862 	}
1863 }
1864 #endif
1865 
1866 int
1867 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1868 {
1869 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1870 	int i, rc;
1871 	struct oce_packet_desc *pd;
1872 	bus_dma_segment_t segs[6];
1873 	int nsegs, added = 0;
1874 	struct oce_nic_rqe *rqe;
1875 	pd_rxulp_db_t rxdb_reg;
1876 	uint32_t val = 0;
1877 	uint32_t oce_max_rq_posts = 64;
1878 
1879 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1880 	for (i = 0; i < count; i++) {
1881 		pd = &rq->pckts[rq->ring->pidx];
1882 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1883 		if (pd->mbuf == NULL) {
1884 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1885 			break;
1886 		}
1887 		pd->mbuf->m_nextpkt = NULL;
1888 
1889 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1890 
1891 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1892 					     pd->map,
1893 					     pd->mbuf,
1894 					     segs, &nsegs, BUS_DMA_NOWAIT);
1895 		if (rc) {
1896 			m_free(pd->mbuf);
1897 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1898 			break;
1899 		}
1900 
1901 		if (nsegs != 1) {
1902 			i--;
1903 			continue;
1904 		}
1905 
1906 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1907 
1908 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1909 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1910 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1911 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1912 		RING_PUT(rq->ring, 1);
1913 		added++;
1914 		rq->pending++;
1915 	}
1916 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1917 	if (added != 0) {
1918 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1919 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1920 			rxdb_reg.bits.qid = rq->rq_id;
1921 			if(rq->islro) {
1922                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1923                                 val |= oce_max_rq_posts << 16;
1924                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1925 			}else {
1926 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1927 			}
1928 			added -= oce_max_rq_posts;
1929 		}
1930 		if (added > 0) {
1931 			rxdb_reg.bits.qid = rq->rq_id;
1932 			rxdb_reg.bits.num_posted = added;
1933 			if(rq->islro) {
1934                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1935                                 val |= added << 16;
1936                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1937 			}else {
1938 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1939 			}
1940 		}
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 static void
1947 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
1948 {
1949         if (num_cqes) {
1950                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1951 		if(!sc->enable_hwlro) {
1952 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1953 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1954 		}else {
1955                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1956                         	oce_alloc_rx_bufs(rq, 64);
1957         	}
1958 	}
1959 
1960         return;
1961 }
1962 
1963 uint16_t
1964 oce_rq_handler_lro(void *arg)
1965 {
1966         struct oce_rq *rq = (struct oce_rq *)arg;
1967         struct oce_cq *cq = rq->cq;
1968         POCE_SOFTC sc = rq->parent;
1969         struct nic_hwlro_singleton_cqe *cqe;
1970         struct nic_hwlro_cqe_part2 *cqe2;
1971         int num_cqes = 0;
1972 
1973 	LOCK(&rq->rx_lock);
1974         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1975         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1976         while (cqe->valid) {
1977                 if(cqe->cqe_type == 0) { /* singleton cqe */
1978 			/* we should not get singleton cqe after cqe1 on same rq */
1979 			if(rq->cqe_firstpart != NULL) {
1980 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1981 				goto exit_rq_handler_lro;
1982 			}
1983                         if(cqe->error != 0) {
1984                                 rq->rx_stats.rxcp_err++;
1985 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1986                         }
1987                         oce_rx_lro(rq, cqe, NULL);
1988                         rq->rx_stats.rx_compl++;
1989                         cqe->valid = 0;
1990                         RING_GET(cq->ring, 1);
1991                         num_cqes++;
1992                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1993                                 break;
1994                 }else if(cqe->cqe_type == 0x1) { /* first part */
1995 			/* we should not get cqe1 after cqe1 on same rq */
1996 			if(rq->cqe_firstpart != NULL) {
1997 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1998 				goto exit_rq_handler_lro;
1999 			}
2000 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2001                         RING_GET(cq->ring, 1);
2002                 }else if(cqe->cqe_type == 0x2) { /* second part */
2003 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2004                         if(cqe2->error != 0) {
2005                                 rq->rx_stats.rxcp_err++;
2006 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2007                         }
2008 			/* We should not get cqe2 without cqe1 */
2009 			if(rq->cqe_firstpart == NULL) {
2010 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2011 				goto exit_rq_handler_lro;
2012 			}
2013                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2014 
2015                         rq->rx_stats.rx_compl++;
2016                         rq->cqe_firstpart->valid = 0;
2017                         cqe2->valid = 0;
2018 			rq->cqe_firstpart = NULL;
2019 
2020                         RING_GET(cq->ring, 1);
2021                         num_cqes += 2;
2022                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2023                                 break;
2024 		}
2025 
2026                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2027                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2028         }
2029 	oce_check_rx_bufs(sc, num_cqes, rq);
2030 exit_rq_handler_lro:
2031 	UNLOCK(&rq->rx_lock);
2032 	return 0;
2033 }
2034 
2035 /* Handle the Completion Queue for receive */
2036 uint16_t
2037 oce_rq_handler(void *arg)
2038 {
2039 	struct epoch_tracker et;
2040 	struct oce_rq *rq = (struct oce_rq *)arg;
2041 	struct oce_cq *cq = rq->cq;
2042 	POCE_SOFTC sc = rq->parent;
2043 	struct oce_nic_rx_cqe *cqe;
2044 	int num_cqes = 0;
2045 
2046 	NET_EPOCH_ENTER(et);
2047 	if(rq->islro) {
2048 		oce_rq_handler_lro(arg);
2049 		NET_EPOCH_EXIT(et);
2050 		return 0;
2051 	}
2052 	LOCK(&rq->rx_lock);
2053 	bus_dmamap_sync(cq->ring->dma.tag,
2054 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2055 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2056 	while (cqe->u0.dw[2]) {
2057 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2058 
2059 		if (cqe->u0.s.error == 0) {
2060 			oce_rx(rq, cqe);
2061 		} else {
2062 			rq->rx_stats.rxcp_err++;
2063 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2064 			/* Post L3/L4 errors to stack.*/
2065 			oce_rx(rq, cqe);
2066 		}
2067 		rq->rx_stats.rx_compl++;
2068 		cqe->u0.dw[2] = 0;
2069 
2070 #if defined(INET6) || defined(INET)
2071 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2072 			oce_rx_flush_lro(rq);
2073 		}
2074 #endif
2075 
2076 		RING_GET(cq->ring, 1);
2077 		bus_dmamap_sync(cq->ring->dma.tag,
2078 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2079 		cqe =
2080 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2081 		num_cqes++;
2082 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2083 			break;
2084 	}
2085 
2086 #if defined(INET6) || defined(INET)
2087         if (IF_LRO_ENABLED(sc))
2088                 oce_rx_flush_lro(rq);
2089 #endif
2090 
2091 	oce_check_rx_bufs(sc, num_cqes, rq);
2092 	UNLOCK(&rq->rx_lock);
2093 	NET_EPOCH_EXIT(et);
2094 	return 0;
2095 
2096 }
2097 
2098 /*****************************************************************************
2099  *		   Helper function prototypes in this file 		     *
2100  *****************************************************************************/
2101 
2102 static int
2103 oce_attach_ifp(POCE_SOFTC sc)
2104 {
2105 
2106 	sc->ifp = if_alloc(IFT_ETHER);
2107 	if (!sc->ifp)
2108 		return ENOMEM;
2109 
2110 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2111 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2112 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2113 
2114 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_KNOWSEPOCH);
2115 	if_setioctlfn(sc->ifp, oce_ioctl);
2116 	if_setstartfn(sc->ifp, oce_start);
2117 	if_setinitfn(sc->ifp, oce_init);
2118 	if_setmtu(sc->ifp, ETHERMTU);
2119 	if_setsoftc(sc->ifp, sc);
2120 	if_settransmitfn(sc->ifp, oce_multiq_start);
2121 	if_setqflushfn(sc->ifp, oce_multiq_flush);
2122 
2123 	if_initname(sc->ifp,
2124 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2125 
2126 	if_setsendqlen(sc->ifp, OCE_MAX_TX_DESC - 1);
2127 	if_setsendqready(sc->ifp);
2128 
2129 	if_sethwassist(sc->ifp, OCE_IF_HWASSIST);
2130 	if_sethwassistbits(sc->ifp, CSUM_TSO, 0);
2131 	if_sethwassistbits(sc->ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP), 0);
2132 
2133 	if_setcapabilities(sc->ifp, OCE_IF_CAPABILITIES);
2134 	if_setcapabilitiesbit(sc->ifp, IFCAP_HWCSUM, 0);
2135 	if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWFILTER, 0);
2136 
2137 #if defined(INET6) || defined(INET)
2138 	if_setcapabilitiesbit(sc->ifp, IFCAP_TSO, 0);
2139 	if_setcapabilitiesbit(sc->ifp, IFCAP_LRO, 0);
2140 	if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWTSO, 0);
2141 #endif
2142 
2143 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
2144 	if_setbaudrate(sc->ifp, IF_Gbps(10));
2145 
2146 	if_sethwtsomax(sc->ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2147 	if_sethwtsomaxsegcount(sc->ifp, OCE_MAX_TX_ELEMENTS);
2148 	if_sethwtsomaxsegsize(sc->ifp, 4096);
2149 
2150 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2151 
2152 	return 0;
2153 }
2154 
2155 static void
2156 oce_add_vlan(void *arg, if_t ifp, uint16_t vtag)
2157 {
2158 	POCE_SOFTC sc = if_getsoftc(ifp);
2159 
2160 	if (if_getsoftc(ifp) !=  arg)
2161 		return;
2162 	if ((vtag == 0) || (vtag > 4095))
2163 		return;
2164 
2165 	sc->vlan_tag[vtag] = 1;
2166 	sc->vlans_added++;
2167 	if (sc->vlans_added <= (sc->max_vlans + 1))
2168 		oce_vid_config(sc);
2169 }
2170 
2171 static void
2172 oce_del_vlan(void *arg, if_t ifp, uint16_t vtag)
2173 {
2174 	POCE_SOFTC sc = if_getsoftc(ifp);
2175 
2176 	if (if_getsoftc(ifp) !=  arg)
2177 		return;
2178 	if ((vtag == 0) || (vtag > 4095))
2179 		return;
2180 
2181 	sc->vlan_tag[vtag] = 0;
2182 	sc->vlans_added--;
2183 	oce_vid_config(sc);
2184 }
2185 
2186 /*
2187  * A max of 64 vlans can be configured in BE. If the user configures
2188  * more, place the card in vlan promiscuous mode.
2189  */
2190 static int
2191 oce_vid_config(POCE_SOFTC sc)
2192 {
2193 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2194 	uint16_t ntags = 0, i;
2195 	int status = 0;
2196 
2197 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2198 			(if_getcapenable(sc->ifp) & IFCAP_VLAN_HWFILTER)) {
2199 		for (i = 0; i < MAX_VLANS; i++) {
2200 			if (sc->vlan_tag[i]) {
2201 				vtags[ntags].vtag = i;
2202 				ntags++;
2203 			}
2204 		}
2205 		if (ntags)
2206 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2207 						vtags, ntags, 1, 0);
2208 	} else
2209 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2210 					 	NULL, 0, 1, 1);
2211 	return status;
2212 }
2213 
2214 static void
2215 oce_mac_addr_set(POCE_SOFTC sc)
2216 {
2217 	uint32_t old_pmac_id = sc->pmac_id;
2218 	int status = 0;
2219 
2220 	status = bcmp((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2221 			 sc->macaddr.size_of_struct);
2222 	if (!status)
2223 		return;
2224 
2225 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(if_getlladdr(sc->ifp)),
2226 					sc->if_id, &sc->pmac_id);
2227 	if (!status) {
2228 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2229 		bcopy((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2230 				 sc->macaddr.size_of_struct);
2231 	}
2232 	if (status)
2233 		device_printf(sc->dev, "Failed update macaddress\n");
2234 
2235 }
2236 
2237 static int
2238 oce_handle_passthrough(if_t ifp, caddr_t data)
2239 {
2240 	POCE_SOFTC sc = if_getsoftc(ifp);
2241 	struct ifreq *ifr = (struct ifreq *)data;
2242 	int rc = ENXIO;
2243 	char cookie[32] = {0};
2244 	void *priv_data = ifr_data_get_ptr(ifr);
2245 	void *ioctl_ptr;
2246 	uint32_t req_size;
2247 	struct mbx_hdr req;
2248 	OCE_DMA_MEM dma_mem;
2249 	struct mbx_common_get_cntl_attr *fw_cmd;
2250 
2251 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2252 		return EFAULT;
2253 
2254 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2255 		return EINVAL;
2256 
2257 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2258 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2259 		return EFAULT;
2260 
2261 	req_size = le32toh(req.u0.req.request_length);
2262 	if (req_size > 65536)
2263 		return EINVAL;
2264 
2265 	req_size += sizeof(struct mbx_hdr);
2266 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2267 	if (rc)
2268 		return ENOMEM;
2269 
2270 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2271 		rc = EFAULT;
2272 		goto dma_free;
2273 	}
2274 
2275 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2276 	if (rc) {
2277 		rc = EIO;
2278 		goto dma_free;
2279 	}
2280 
2281 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2282 		rc =  EFAULT;
2283 
2284 	/*
2285 	   firmware is filling all the attributes for this ioctl except
2286 	   the driver version..so fill it
2287 	 */
2288 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2289 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2290 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2291 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2292 	}
2293 
2294 dma_free:
2295 	oce_dma_free(sc, &dma_mem);
2296 	return rc;
2297 
2298 }
2299 
2300 static void
2301 oce_eqd_set_periodic(POCE_SOFTC sc)
2302 {
2303 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2304 	struct oce_aic_obj *aic;
2305 	struct oce_eq *eqo;
2306 	uint64_t now = 0, delta;
2307 	int eqd, i, num = 0;
2308 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2309 	struct oce_wq *wq;
2310 	struct oce_rq *rq;
2311 
2312 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2313 
2314 	for (i = 0 ; i < sc->neqs; i++) {
2315 		eqo = sc->eq[i];
2316 		aic = &sc->aic_obj[i];
2317 		/* When setting the static eq delay from the user space */
2318 		if (!aic->enable) {
2319 			if (aic->ticks)
2320 				aic->ticks = 0;
2321 			eqd = aic->et_eqd;
2322 			goto modify_eqd;
2323 		}
2324 
2325 		if (i == 0) {
2326 			rq = sc->rq[0];
2327 			rxpkts = rq->rx_stats.rx_pkts;
2328 		} else
2329 			rxpkts = 0;
2330 		if (i + 1 < sc->nrqs) {
2331 			rq = sc->rq[i + 1];
2332 			rxpkts += rq->rx_stats.rx_pkts;
2333 		}
2334 		if (i < sc->nwqs) {
2335 			wq = sc->wq[i];
2336 			tx_reqs = wq->tx_stats.tx_reqs;
2337 		} else
2338 			tx_reqs = 0;
2339 		now = ticks;
2340 
2341 		if (!aic->ticks || now < aic->ticks ||
2342 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2343 			aic->prev_rxpkts = rxpkts;
2344 			aic->prev_txreqs = tx_reqs;
2345 			aic->ticks = now;
2346 			continue;
2347 		}
2348 
2349 		delta = ticks_to_msecs(now - aic->ticks);
2350 
2351 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2352 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2353 		eqd = (pps / 15000) << 2;
2354 		if (eqd < 8)
2355 			eqd = 0;
2356 
2357 		/* Make sure that the eq delay is in the known range */
2358 		eqd = min(eqd, aic->max_eqd);
2359 		eqd = max(eqd, aic->min_eqd);
2360 
2361 		aic->prev_rxpkts = rxpkts;
2362 		aic->prev_txreqs = tx_reqs;
2363 		aic->ticks = now;
2364 
2365 modify_eqd:
2366 		if (eqd != aic->cur_eqd) {
2367 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2368 			set_eqd[num].eq_id = eqo->eq_id;
2369 			aic->cur_eqd = eqd;
2370 			num++;
2371 		}
2372 	}
2373 
2374 	/* Is there atleast one eq that needs to be modified? */
2375         for(i = 0; i < num; i += 8) {
2376                 if((num - i) >=8 )
2377                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2378                 else
2379                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2380         }
2381 
2382 }
2383 
2384 static void oce_detect_hw_error(POCE_SOFTC sc)
2385 {
2386 
2387 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2388 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2389 	uint32_t i;
2390 
2391 	if (sc->hw_error)
2392 		return;
2393 
2394 	if (IS_XE201(sc)) {
2395 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2396 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2397 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2398 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2399 		}
2400 	} else {
2401 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2402 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2403 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2404 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2405 
2406 		ue_low = (ue_low & ~ue_low_mask);
2407 		ue_high = (ue_high & ~ue_high_mask);
2408 	}
2409 
2410 	/* On certain platforms BE hardware can indicate spurious UEs.
2411 	 * Allow the h/w to stop working completely in case of a real UE.
2412 	 * Hence not setting the hw_error for UE detection.
2413 	 */
2414 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2415 		sc->hw_error = TRUE;
2416 		device_printf(sc->dev, "Error detected in the card\n");
2417 	}
2418 
2419 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2420 		device_printf(sc->dev,
2421 				"ERR: sliport status 0x%x\n", sliport_status);
2422 		device_printf(sc->dev,
2423 				"ERR: sliport error1 0x%x\n", sliport_err1);
2424 		device_printf(sc->dev,
2425 				"ERR: sliport error2 0x%x\n", sliport_err2);
2426 	}
2427 
2428 	if (ue_low) {
2429 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2430 			if (ue_low & 1)
2431 				device_printf(sc->dev, "UE: %s bit set\n",
2432 							ue_status_low_desc[i]);
2433 		}
2434 	}
2435 
2436 	if (ue_high) {
2437 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2438 			if (ue_high & 1)
2439 				device_printf(sc->dev, "UE: %s bit set\n",
2440 							ue_status_hi_desc[i]);
2441 		}
2442 	}
2443 
2444 }
2445 
2446 static void
2447 oce_local_timer(void *arg)
2448 {
2449 	POCE_SOFTC sc = arg;
2450 	int i = 0;
2451 
2452 	oce_detect_hw_error(sc);
2453 	oce_refresh_nic_stats(sc);
2454 	oce_refresh_queue_stats(sc);
2455 	oce_mac_addr_set(sc);
2456 
2457 	/* TX Watch Dog*/
2458 	for (i = 0; i < sc->nwqs; i++)
2459 		oce_tx_restart(sc, sc->wq[i]);
2460 
2461 	/* calculate and set the eq delay for optimal interrupt rate */
2462 	if (IS_BE(sc) || IS_SH(sc))
2463 		oce_eqd_set_periodic(sc);
2464 
2465 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2466 }
2467 
2468 static void
2469 oce_tx_compl_clean(POCE_SOFTC sc)
2470 {
2471 	struct oce_wq *wq;
2472 	int i = 0, timeo = 0, num_wqes = 0;
2473 	int pending_txqs = sc->nwqs;
2474 
2475 	/* Stop polling for compls when HW has been silent for 10ms or
2476 	 * hw_error or no outstanding completions expected
2477 	 */
2478 	do {
2479 		pending_txqs = sc->nwqs;
2480 
2481 		for_all_wq_queues(sc, wq, i) {
2482 			num_wqes = oce_wq_handler(wq);
2483 
2484 			if(num_wqes)
2485 				timeo = 0;
2486 
2487 			if(!wq->ring->num_used)
2488 				pending_txqs--;
2489 		}
2490 
2491 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2492 			break;
2493 
2494 		DELAY(1000);
2495 	} while (TRUE);
2496 
2497 	for_all_wq_queues(sc, wq, i) {
2498 		while(wq->ring->num_used) {
2499 			LOCK(&wq->tx_compl_lock);
2500 			oce_process_tx_completion(wq);
2501 			UNLOCK(&wq->tx_compl_lock);
2502 		}
2503 	}
2504 
2505 }
2506 
2507 /* NOTE : This should only be called holding
2508  *        DEVICE_LOCK.
2509  */
2510 static void
2511 oce_if_deactivate(POCE_SOFTC sc)
2512 {
2513 	int i;
2514 	struct oce_rq *rq;
2515 	struct oce_wq *wq;
2516 	struct oce_eq *eq;
2517 
2518 	if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2519 
2520 	oce_tx_compl_clean(sc);
2521 
2522 	/* Stop intrs and finish any bottom halves pending */
2523 	oce_hw_intr_disable(sc);
2524 
2525 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2526 	   any other lock. So unlock device lock and require after
2527 	   completing taskqueue_drain.
2528 	*/
2529 	UNLOCK(&sc->dev_lock);
2530 	for (i = 0; i < sc->intr_count; i++) {
2531 		if (sc->intrs[i].tq != NULL) {
2532 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2533 		}
2534 	}
2535 	LOCK(&sc->dev_lock);
2536 
2537 	/* Delete RX queue in card with flush param */
2538 	oce_stop_rx(sc);
2539 
2540 	/* Invalidate any pending cq and eq entries*/
2541 	for_all_evnt_queues(sc, eq, i)
2542 		oce_drain_eq(eq);
2543 	for_all_rq_queues(sc, rq, i)
2544 		oce_drain_rq_cq(rq);
2545 	for_all_wq_queues(sc, wq, i)
2546 		oce_drain_wq_cq(wq);
2547 
2548 	/* But still we need to get MCC aync events.
2549 	   So enable intrs and also arm first EQ
2550 	*/
2551 	oce_hw_intr_enable(sc);
2552 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2553 
2554 	DELAY(10);
2555 }
2556 
2557 static void
2558 oce_if_activate(POCE_SOFTC sc)
2559 {
2560 	struct oce_eq *eq;
2561 	struct oce_rq *rq;
2562 	struct oce_wq *wq;
2563 	int i, rc = 0;
2564 
2565 	if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING , 0);
2566 
2567 	oce_hw_intr_disable(sc);
2568 
2569 	oce_start_rx(sc);
2570 
2571 	for_all_rq_queues(sc, rq, i) {
2572 		rc = oce_start_rq(rq);
2573 		if (rc)
2574 			device_printf(sc->dev, "Unable to start RX\n");
2575 	}
2576 
2577 	for_all_wq_queues(sc, wq, i) {
2578 		rc = oce_start_wq(wq);
2579 		if (rc)
2580 			device_printf(sc->dev, "Unable to start TX\n");
2581 	}
2582 
2583 	for_all_evnt_queues(sc, eq, i)
2584 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2585 
2586 	oce_hw_intr_enable(sc);
2587 
2588 }
2589 
2590 static void
2591 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2592 {
2593 	/* Update Link status */
2594 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2595 	     ASYNC_EVENT_LINK_UP) {
2596 		sc->link_status = ASYNC_EVENT_LINK_UP;
2597 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2598 	} else {
2599 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2600 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2601 	}
2602 }
2603 
2604 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2605 					 struct oce_async_evt_grp5_os2bmc *evt)
2606 {
2607 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2608 	if (evt->u.s.mgmt_enable)
2609 		sc->flags |= OCE_FLAGS_OS2BMC;
2610 	else
2611 		return;
2612 
2613 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2614 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2615 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2616 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2617 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2618 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2619 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2620 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2621 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2622 }
2623 
2624 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2625 {
2626 	struct oce_async_event_grp5_pvid_state *gcqe;
2627 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2628 
2629 	switch (cqe->u0.s.async_type) {
2630 	case ASYNC_EVENT_PVID_STATE:
2631 		/* GRP5 PVID */
2632 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2633 		if (gcqe->enabled)
2634 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2635 		else
2636 			sc->pvid = 0;
2637 		break;
2638 	case ASYNC_EVENT_OS2BMC:
2639 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2640 		oce_async_grp5_osbmc_process(sc, bmccqe);
2641 		break;
2642 	default:
2643 		break;
2644 	}
2645 }
2646 
2647 /* Handle the Completion Queue for the Mailbox/Async notifications */
2648 uint16_t
2649 oce_mq_handler(void *arg)
2650 {
2651 	struct oce_mq *mq = (struct oce_mq *)arg;
2652 	POCE_SOFTC sc = mq->parent;
2653 	struct oce_cq *cq = mq->cq;
2654 	int num_cqes = 0, evt_type = 0, optype = 0;
2655 	struct oce_mq_cqe *cqe;
2656 	struct oce_async_cqe_link_state *acqe;
2657 	struct oce_async_event_qnq *dbgcqe;
2658 
2659 	bus_dmamap_sync(cq->ring->dma.tag,
2660 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2661 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2662 
2663 	while (cqe->u0.dw[3]) {
2664 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2665 		if (cqe->u0.s.async_event) {
2666 			evt_type = cqe->u0.s.event_type;
2667 			optype = cqe->u0.s.async_type;
2668 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2669 				/* Link status evt */
2670 				acqe = (struct oce_async_cqe_link_state *)cqe;
2671 				process_link_state(sc, acqe);
2672 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2673 				oce_process_grp5_events(sc, cqe);
2674 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2675 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2676 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2677 				if(dbgcqe->valid)
2678 					sc->qnqid = dbgcqe->vlan_tag;
2679 				sc->qnq_debug_event = TRUE;
2680 			}
2681 		}
2682 		cqe->u0.dw[3] = 0;
2683 		RING_GET(cq->ring, 1);
2684 		bus_dmamap_sync(cq->ring->dma.tag,
2685 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2686 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2687 		num_cqes++;
2688 	}
2689 
2690 	if (num_cqes)
2691 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2692 
2693 	return 0;
2694 }
2695 
2696 static void
2697 setup_max_queues_want(POCE_SOFTC sc)
2698 {
2699 	/* Check if it is FLEX machine. Is so dont use RSS */
2700 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2701 	    (sc->function_mode & FNM_UMC_MODE)    ||
2702 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2703 	    (!is_rss_enabled(sc))		  ||
2704 	    IS_BE2(sc)) {
2705 		sc->nrqs = 1;
2706 		sc->nwqs = 1;
2707 	} else {
2708 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2709 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2710 	}
2711 
2712 	if (IS_BE2(sc) && is_rss_enabled(sc))
2713 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2714 }
2715 
2716 static void
2717 update_queues_got(POCE_SOFTC sc)
2718 {
2719 	if (is_rss_enabled(sc)) {
2720 		sc->nrqs = sc->intr_count + 1;
2721 		sc->nwqs = sc->intr_count;
2722 	} else {
2723 		sc->nrqs = 1;
2724 		sc->nwqs = 1;
2725 	}
2726 
2727 	if (IS_BE2(sc))
2728 		sc->nwqs = 1;
2729 }
2730 
2731 static int
2732 oce_check_ipv6_ext_hdr(struct mbuf *m)
2733 {
2734 	struct ether_header *eh = mtod(m, struct ether_header *);
2735 	caddr_t m_datatemp = m->m_data;
2736 
2737 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2738 		m->m_data += sizeof(struct ether_header);
2739 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2740 
2741 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2742 				(ip6->ip6_nxt != IPPROTO_UDP)){
2743 			struct ip6_ext *ip6e = NULL;
2744 			m->m_data += sizeof(struct ip6_hdr);
2745 
2746 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2747 			if(ip6e->ip6e_len == 0xff) {
2748 				m->m_data = m_datatemp;
2749 				return TRUE;
2750 			}
2751 		}
2752 		m->m_data = m_datatemp;
2753 	}
2754 	return FALSE;
2755 }
2756 
2757 static int
2758 is_be3_a1(POCE_SOFTC sc)
2759 {
2760 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2761 		return TRUE;
2762 	}
2763 	return FALSE;
2764 }
2765 
2766 static struct mbuf *
2767 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2768 {
2769 	uint16_t vlan_tag = 0;
2770 
2771 	if(!M_WRITABLE(m))
2772 		return NULL;
2773 
2774 	/* Embed vlan tag in the packet if it is not part of it */
2775 	if(m->m_flags & M_VLANTAG) {
2776 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2777 		m->m_flags &= ~M_VLANTAG;
2778 	}
2779 
2780 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2781 	if(sc->pvid) {
2782 		if(!vlan_tag)
2783 			vlan_tag = sc->pvid;
2784 		if (complete)
2785 			*complete = FALSE;
2786 	}
2787 
2788 	if(vlan_tag) {
2789 		m = ether_vlanencap(m, vlan_tag);
2790 	}
2791 
2792 	if(sc->qnqid) {
2793 		m = ether_vlanencap(m, sc->qnqid);
2794 
2795 		if (complete)
2796 			*complete = FALSE;
2797 	}
2798 	return m;
2799 }
2800 
2801 static int
2802 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2803 {
2804 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2805 			oce_check_ipv6_ext_hdr(m)) {
2806 		return TRUE;
2807 	}
2808 	return FALSE;
2809 }
2810 
2811 static void
2812 oce_get_config(POCE_SOFTC sc)
2813 {
2814 	int rc = 0;
2815 	uint32_t max_rss = 0;
2816 
2817 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2818 		max_rss = OCE_LEGACY_MODE_RSS;
2819 	else
2820 		max_rss = OCE_MAX_RSS;
2821 
2822 	if (!IS_BE(sc)) {
2823 		rc = oce_get_profile_config(sc, max_rss);
2824 		if (rc) {
2825 			sc->nwqs = OCE_MAX_WQ;
2826 			sc->nrssqs = max_rss;
2827 			sc->nrqs = sc->nrssqs + 1;
2828 		}
2829 	}
2830 	else { /* For BE3 don't rely on fw for determining the resources */
2831 		sc->nrssqs = max_rss;
2832 		sc->nrqs = sc->nrssqs + 1;
2833 		sc->nwqs = OCE_MAX_WQ;
2834 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2835 	}
2836 }
2837 
2838 static void
2839 oce_rdma_close(void)
2840 {
2841   if (oce_rdma_if != NULL) {
2842     oce_rdma_if = NULL;
2843   }
2844 }
2845 
2846 static void
2847 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2848 {
2849   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2850 }
2851 
2852 int
2853 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2854 {
2855   POCE_SOFTC sc;
2856   struct oce_dev_info di;
2857   int i;
2858 
2859   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2860     return -EINVAL;
2861   }
2862 
2863   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2864       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2865     return -ENXIO;
2866   }
2867 
2868   rdma_info->close = oce_rdma_close;
2869   rdma_info->mbox_post = oce_mbox_post;
2870   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2871   rdma_info->get_mac_addr = oce_get_mac_addr;
2872 
2873   oce_rdma_if = rdma_if;
2874 
2875   sc = softc_head;
2876   while (sc != NULL) {
2877     if (oce_rdma_if->announce != NULL) {
2878       memset(&di, 0, sizeof(di));
2879       di.dev = sc->dev;
2880       di.softc = sc;
2881       di.ifp = sc->ifp;
2882       di.db_bhandle = sc->db_bhandle;
2883       di.db_btag = sc->db_btag;
2884       di.db_page_size = 4096;
2885       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2886         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2887       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2888         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2889       } else {
2890         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2891       }
2892       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2893       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2894         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2895         di.msix.start_vector = sc->intr_count;
2896         for (i=0; i<di.msix.num_vectors; i++) {
2897           di.msix.vector_list[i] = sc->intrs[i].vector;
2898         }
2899       } else {
2900       }
2901       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2902       di.vendor_id = pci_get_vendor(sc->dev);
2903       di.dev_id = pci_get_device(sc->dev);
2904 
2905       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2906           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2907       }
2908 
2909       rdma_if->announce(&di);
2910       sc = sc->next;
2911     }
2912   }
2913 
2914   return 0;
2915 }
2916 
2917 static void
2918 oce_read_env_variables( POCE_SOFTC sc )
2919 {
2920 	char *value = NULL;
2921 	int rc = 0;
2922 
2923         /* read if user wants to enable hwlro or swlro */
2924         //value = getenv("oce_enable_hwlro");
2925         if(value && IS_SH(sc)) {
2926                 sc->enable_hwlro = strtol(value, NULL, 10);
2927                 if(sc->enable_hwlro) {
2928                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2929                         if(rc) {
2930                                 device_printf(sc->dev, "no hardware lro support\n");
2931                 		device_printf(sc->dev, "software lro enabled\n");
2932                                 sc->enable_hwlro = 0;
2933                         }else {
2934                                 device_printf(sc->dev, "hardware lro enabled\n");
2935 				oce_max_rsp_handled = 32;
2936                         }
2937                 }else {
2938                         device_printf(sc->dev, "software lro enabled\n");
2939                 }
2940         }else {
2941                 sc->enable_hwlro = 0;
2942         }
2943 
2944         /* read mbuf size */
2945         //value = getenv("oce_rq_buf_size");
2946         if(value && IS_SH(sc)) {
2947                 oce_rq_buf_size = strtol(value, NULL, 10);
2948                 switch(oce_rq_buf_size) {
2949                 case 2048:
2950                 case 4096:
2951                 case 9216:
2952                 case 16384:
2953                         break;
2954 
2955                 default:
2956                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
2957                         oce_rq_buf_size = 2048;
2958                 }
2959         }
2960 
2961 	return;
2962 }
2963