xref: /dragonfly/sys/dev/netif/oce/oce_if.c (revision 2b3f93ea)
1 /*-
2  * Copyright (C) 2013 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 
40 /* $FreeBSD: src/sys/dev/oce/oce_if.c,v 1.14 2013/07/07 00:30:13 svnexp Exp $ */
41 
42 #include "opt_inet6.h"
43 #include "opt_inet.h"
44 
45 #include "oce_if.h"
46 
47 
48 /* Driver entry points prototypes */
49 static int  oce_probe(device_t dev);
50 static int  oce_attach(device_t dev);
51 static int  oce_detach(device_t dev);
52 static int  oce_shutdown(device_t dev);
53 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr);
54 static void oce_init(void *xsc);
55 #if 0 /* XXX swildner: MULTIQUEUE */
56 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
57 static void oce_multiq_flush(struct ifnet *ifp);
58 #endif
59 
60 /* Driver interrupt routines protypes */
61 static void oce_intr(void *arg, int pending);
62 static int  oce_setup_intr(POCE_SOFTC sc);
63 static void oce_fast_isr(void *arg);
64 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
65 			  void (*isr) (void *arg, int pending));
66 
67 /* Media callbacks prototypes */
68 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
69 static int  oce_media_change(struct ifnet *ifp);
70 
71 /* Transmit routines prototypes */
72 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
73 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
74 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
75 					uint32_t status);
76 #if 0 /* XXX swildner: MULTIQUEUE */
77 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
78 				 struct oce_wq *wq);
79 #endif
80 
81 /* Receive routines prototypes */
82 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
83 #if 0 /* XXX swildner: ETHER_VTAG */
84 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
85 #endif
86 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
87 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
88 						struct oce_nic_rx_cqe *cqe);
89 
90 /* Helper function prototypes in this file */
91 static int  oce_attach_ifp(POCE_SOFTC sc);
92 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
93 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
94 static int  oce_vid_config(POCE_SOFTC sc);
95 static void oce_mac_addr_set(POCE_SOFTC sc);
96 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
97 static void oce_local_timer(void *arg);
98 static void oce_if_deactivate(POCE_SOFTC sc);
99 static void oce_if_activate(POCE_SOFTC sc);
100 static void setup_max_queues_want(POCE_SOFTC sc);
101 static void update_queues_got(POCE_SOFTC sc);
102 static void process_link_state(POCE_SOFTC sc,
103 		 struct oce_async_cqe_link_state *acqe);
104 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
105 static void oce_get_config(POCE_SOFTC sc);
106 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
107 
108 /* IP specific */
109 #if defined(INET6) || defined(INET)
110 #if 0 /* XXX swildner: LRO */
111 static int  oce_init_lro(POCE_SOFTC sc);
112 static void oce_rx_flush_lro(struct oce_rq *rq);
113 #endif
114 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
115 #endif
116 
117 static device_method_t oce_dispatch[] = {
118 	DEVMETHOD(device_probe, oce_probe),
119 	DEVMETHOD(device_attach, oce_attach),
120 	DEVMETHOD(device_detach, oce_detach),
121 	DEVMETHOD(device_shutdown, oce_shutdown),
122 
123 	DEVMETHOD_END
124 };
125 
126 static driver_t oce_driver = {
127 	"oce",
128 	oce_dispatch,
129 	sizeof(OCE_SOFTC)
130 };
131 static devclass_t oce_devclass;
132 
133 
134 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, NULL, NULL);
135 MODULE_DEPEND(oce, pci, 1, 1, 1);
136 MODULE_DEPEND(oce, ether, 1, 1, 1);
137 MODULE_VERSION(oce, 1);
138 
139 
140 /* global vars */
141 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
142 
143 /* Module capabilites and parameters */
144 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
145 #if 0 /* XXX swildner: RSS */
146 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
147 #else
148 uint32_t oce_enable_rss = 0;
149 #endif
150 
151 
152 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
153 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
154 
155 
156 /* Supported devices table */
157 static uint32_t supportedDevices[] =  {
158 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
159 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
160 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
161 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
162 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
163 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
164 };
165 
166 
167 
168 
169 /*****************************************************************************
170  *			Driver entry points functions                        *
171  *****************************************************************************/
172 
173 static int
oce_probe(device_t dev)174 oce_probe(device_t dev)
175 {
176 	uint16_t vendor = 0;
177 	uint16_t device = 0;
178 	int i = 0;
179 	char str[256] = {0};
180 	POCE_SOFTC sc;
181 
182 	sc = device_get_softc(dev);
183 	bzero(sc, sizeof(OCE_SOFTC));
184 	sc->dev = dev;
185 
186 	vendor = pci_get_vendor(dev);
187 	device = pci_get_device(dev);
188 
189 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
190 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
191 			if (device == (supportedDevices[i] & 0xffff)) {
192 				ksprintf(str, "%s:%s", "Emulex CNA NIC function",
193 					component_revision);
194 				device_set_desc_copy(dev, str);
195 
196 				switch (device) {
197 				case PCI_PRODUCT_BE2:
198 					sc->flags |= OCE_FLAGS_BE2;
199 					break;
200 				case PCI_PRODUCT_BE3:
201 					sc->flags |= OCE_FLAGS_BE3;
202 					break;
203 				case PCI_PRODUCT_XE201:
204 				case PCI_PRODUCT_XE201_VF:
205 					sc->flags |= OCE_FLAGS_XE201;
206 					break;
207 				case PCI_PRODUCT_SH:
208 					sc->flags |= OCE_FLAGS_SH;
209 					break;
210 				default:
211 					return ENXIO;
212 				}
213 				return BUS_PROBE_DEFAULT;
214 			}
215 		}
216 	}
217 
218 	return ENXIO;
219 }
220 
221 
222 static int
oce_attach(device_t dev)223 oce_attach(device_t dev)
224 {
225 	POCE_SOFTC sc;
226 	int rc = 0;
227 
228 	sc = device_get_softc(dev);
229 
230 	rc = oce_hw_pci_alloc(sc);
231 	if (rc)
232 		return rc;
233 
234 	sc->tx_ring_size = OCE_TX_RING_SIZE;
235 	sc->rx_ring_size = OCE_RX_RING_SIZE;
236 	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
237 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
238 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
239 
240 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
241 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
242 
243 	/* initialise the hardware */
244 	rc = oce_hw_init(sc);
245 	if (rc)
246 		goto pci_res_free;
247 
248 	oce_get_config(sc);
249 
250 	setup_max_queues_want(sc);
251 
252 	rc = oce_setup_intr(sc);
253 	if (rc)
254 		goto mbox_free;
255 
256 	rc = oce_queue_init_all(sc);
257 	if (rc)
258 		goto intr_free;
259 
260 	rc = oce_attach_ifp(sc);
261 	if (rc)
262 		goto queues_free;
263 
264 #if defined(INET6) || defined(INET)
265 #if 0 /* XXX swildner: LRO */
266 	rc = oce_init_lro(sc);
267 	if (rc)
268 		goto ifp_free;
269 #endif
270 #endif
271 
272 	rc = oce_hw_start(sc);
273 	if (rc)
274 		goto lro_free;
275 
276 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
277 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
278 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
279 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
280 
281 	rc = oce_stats_init(sc);
282 	if (rc)
283 		goto vlan_free;
284 
285 	oce_add_sysctls(sc);
286 
287 	callout_init_mp(&sc->timer);
288 	callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
289 
290 	return 0;
291 
292 vlan_free:
293 	if (sc->vlan_attach)
294 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
295 	if (sc->vlan_detach)
296 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
297 	oce_hw_intr_disable(sc);
298 lro_free:
299 #if defined(INET6) || defined(INET)
300 #if 0 /* XXX swildner: LRO */
301 	oce_free_lro(sc);
302 ifp_free:
303 #endif
304 #endif
305 	ether_ifdetach(sc->ifp);
306 	if_free(sc->ifp);
307 queues_free:
308 	oce_queue_release_all(sc);
309 intr_free:
310 	oce_intr_free(sc);
311 mbox_free:
312 	oce_dma_free(sc, &sc->bsmbx);
313 pci_res_free:
314 	oce_hw_pci_free(sc);
315 	LOCK_DESTROY(&sc->dev_lock);
316 	LOCK_DESTROY(&sc->bmbx_lock);
317 	return rc;
318 
319 }
320 
321 
322 static int
oce_detach(device_t dev)323 oce_detach(device_t dev)
324 {
325 	POCE_SOFTC sc = device_get_softc(dev);
326 
327 	LOCK(&sc->dev_lock);
328 	oce_if_deactivate(sc);
329 	UNLOCK(&sc->dev_lock);
330 
331 	callout_terminate(&sc->timer);
332 
333 	if (sc->vlan_attach != NULL)
334 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
335 	if (sc->vlan_detach != NULL)
336 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
337 
338 	ether_ifdetach(sc->ifp);
339 
340 	if_free(sc->ifp);
341 
342 	oce_hw_shutdown(sc);
343 
344 	bus_generic_detach(dev);
345 	return 0;
346 }
347 
348 
349 static int
oce_shutdown(device_t dev)350 oce_shutdown(device_t dev)
351 {
352 	int rc;
353 
354 	rc = oce_detach(dev);
355 
356 	return rc;
357 }
358 
359 
360 static int
oce_ioctl(struct ifnet * ifp,u_long command,caddr_t data,struct ucred * cr)361 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
362 {
363 	struct ifreq *ifr = (struct ifreq *)data;
364 	POCE_SOFTC sc = ifp->if_softc;
365 	int rc = 0;
366 	uint32_t u;
367 
368 	switch (command) {
369 
370 	case SIOCGIFMEDIA:
371 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
372 		break;
373 
374 	case SIOCSIFMTU:
375 		if (ifr->ifr_mtu > OCE_MAX_MTU)
376 			rc = EINVAL;
377 		else
378 			ifp->if_mtu = ifr->ifr_mtu;
379 		break;
380 
381 	case SIOCSIFFLAGS:
382 		if (ifp->if_flags & IFF_UP) {
383 			if (!(ifp->if_flags & IFF_RUNNING)) {
384 				sc->ifp->if_flags |= IFF_RUNNING;
385 				oce_init(sc);
386 			}
387 			device_printf(sc->dev, "Interface Up\n");
388 		} else {
389 			LOCK(&sc->dev_lock);
390 
391 			sc->ifp->if_flags &= ~IFF_RUNNING;
392 			ifq_clr_oactive(&ifp->if_snd);
393 			oce_if_deactivate(sc);
394 
395 			UNLOCK(&sc->dev_lock);
396 
397 			device_printf(sc->dev, "Interface Down\n");
398 		}
399 
400 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
401 			sc->promisc = TRUE;
402 			oce_rxf_set_promiscuous(sc, sc->promisc);
403 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
404 			sc->promisc = FALSE;
405 			oce_rxf_set_promiscuous(sc, sc->promisc);
406 		}
407 
408 		break;
409 
410 	case SIOCADDMULTI:
411 	case SIOCDELMULTI:
412 		rc = oce_hw_update_multicast(sc);
413 		if (rc)
414 			device_printf(sc->dev,
415 				"Update multicast address failed\n");
416 		break;
417 
418 	case SIOCSIFCAP:
419 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
420 
421 		if (u & IFCAP_TXCSUM) {
422 			ifp->if_capenable ^= IFCAP_TXCSUM;
423 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
424 
425 			if (IFCAP_TSO & ifp->if_capenable &&
426 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
427 				ifp->if_capenable &= ~IFCAP_TSO;
428 				ifp->if_hwassist &= ~CSUM_TSO;
429 				if_printf(ifp,
430 					 "TSO disabled due to -txcsum.\n");
431 			}
432 		}
433 
434 		if (u & IFCAP_RXCSUM)
435 			ifp->if_capenable ^= IFCAP_RXCSUM;
436 
437 		if (u & IFCAP_TSO4) {
438 			ifp->if_capenable ^= IFCAP_TSO4;
439 
440 			if (IFCAP_TSO & ifp->if_capenable) {
441 				if (IFCAP_TXCSUM & ifp->if_capenable)
442 					ifp->if_hwassist |= CSUM_TSO;
443 				else {
444 					ifp->if_capenable &= ~IFCAP_TSO;
445 					ifp->if_hwassist &= ~CSUM_TSO;
446 					if_printf(ifp,
447 					    "Enable txcsum first.\n");
448 					rc = EAGAIN;
449 				}
450 			} else
451 				ifp->if_hwassist &= ~CSUM_TSO;
452 		}
453 
454 		if (u & IFCAP_VLAN_HWTAGGING)
455 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
456 
457 #if 0 /* XXX swildner: VLAN_HWFILTER */
458 		if (u & IFCAP_VLAN_HWFILTER) {
459 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
460 			oce_vid_config(sc);
461 		}
462 #endif
463 #if defined(INET6) || defined(INET)
464 #if 0 /* XXX swildner: LRO */
465 		if (u & IFCAP_LRO)
466 			ifp->if_capenable ^= IFCAP_LRO;
467 #endif
468 #endif
469 
470 		break;
471 
472 	case SIOCGPRIVATE_0:
473 		rc = caps_priv_check(cr, SYSCAP_RESTRICTEDROOT |
474 					 __SYSCAP_NULLCRED);
475 		if (rc == 0)
476 			rc = oce_handle_passthrough(ifp, data);
477 		break;
478 	default:
479 		rc = ether_ioctl(ifp, command, data);
480 		break;
481 	}
482 
483 	return rc;
484 }
485 
486 
487 static void
oce_init(void * arg)488 oce_init(void *arg)
489 {
490 	POCE_SOFTC sc = arg;
491 
492 	LOCK(&sc->dev_lock);
493 
494 	if (sc->ifp->if_flags & IFF_UP) {
495 		oce_if_deactivate(sc);
496 		oce_if_activate(sc);
497 	}
498 
499 	UNLOCK(&sc->dev_lock);
500 
501 }
502 
503 
504 #if 0 /* XXX swildner: MULTIQUEUE */
505 static int
506 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
507 {
508 	POCE_SOFTC sc = ifp->if_softc;
509 	struct oce_wq *wq = NULL;
510 	int queue_index = 0;
511 	int status = 0;
512 
513 	if (!sc->link_status) {
514 		ifq_purge(&ifp->if_snd);
515 		return ENXIO;
516 	}
517 
518 	if ((m->m_flags & M_FLOWID) != 0)
519 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
520 
521 	wq = sc->wq[queue_index];
522 
523 	LOCK(&wq->tx_lock);
524 	status = oce_multiq_transmit(ifp, m, wq);
525 	UNLOCK(&wq->tx_lock);
526 
527 	return status;
528 
529 }
530 
531 
532 static void
533 oce_multiq_flush(struct ifnet *ifp)
534 {
535 	POCE_SOFTC sc = ifp->if_softc;
536 	struct mbuf     *m;
537 	int i = 0;
538 
539 	for (i = 0; i < sc->nwqs; i++) {
540 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
541 			m_freem(m);
542 	}
543 	if_qflush(ifp);
544 }
545 #endif
546 
547 
548 
549 /*****************************************************************************
550  *                   Driver interrupt routines functions                     *
551  *****************************************************************************/
552 
553 static void
oce_intr(void * arg,int pending)554 oce_intr(void *arg, int pending)
555 {
556 
557 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
558 	POCE_SOFTC sc = ii->sc;
559 	struct oce_eq *eq = ii->eq;
560 	struct oce_eqe *eqe;
561 	struct oce_cq *cq = NULL;
562 	int i, num_eqes = 0;
563 
564 
565 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
566 				 BUS_DMASYNC_POSTWRITE);
567 	do {
568 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
569 		if (eqe->evnt == 0)
570 			break;
571 		eqe->evnt = 0;
572 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
573 					BUS_DMASYNC_POSTWRITE);
574 		RING_GET(eq->ring, 1);
575 		num_eqes++;
576 
577 	} while (TRUE);
578 
579 	if (!num_eqes)
580 		goto eq_arm; /* Spurious */
581 
582 	/* Clear EQ entries, but dont arm */
583 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
584 
585 	/* Process TX, RX and MCC. But dont arm CQ*/
586 	for (i = 0; i < eq->cq_valid; i++) {
587 		cq = eq->cq[i];
588 		(*cq->cq_handler)(cq->cb_arg);
589 	}
590 
591 	/* Arm all cqs connected to this EQ */
592 	for (i = 0; i < eq->cq_valid; i++) {
593 		cq = eq->cq[i];
594 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
595 	}
596 
597 eq_arm:
598 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
599 
600 	return;
601 }
602 
603 
604 static int
oce_setup_intr(POCE_SOFTC sc)605 oce_setup_intr(POCE_SOFTC sc)
606 {
607 	int rc = 0, use_intx = 0;
608 	int vector = 0;
609 #if 0 /* XXX swildner: MSI-X */
610 	int req_vectors = 0;
611 
612 	if (is_rss_enabled(sc))
613 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
614 	else
615 		req_vectors = 1;
616 
617 	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
618 		sc->intr_count = req_vectors;
619 		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
620 		if (rc != 0) {
621 			use_intx = 1;
622 			pci_release_msi(sc->dev);
623 		} else
624 			sc->flags |= OCE_FLAGS_USING_MSIX;
625 	} else
626 #endif
627 		use_intx = 1;
628 
629 	if (use_intx)
630 		sc->intr_count = 1;
631 
632 	/* Scale number of queues based on intr we got */
633 	update_queues_got(sc);
634 
635 	if (use_intx) {
636 		device_printf(sc->dev, "Using legacy interrupt\n");
637 		rc = oce_alloc_intr(sc, vector, oce_intr);
638 		if (rc)
639 			goto error;
640 #if 0 /* XXX swildner: MSI-X */
641 	} else {
642 		for (; vector < sc->intr_count; vector++) {
643 			rc = oce_alloc_intr(sc, vector, oce_intr);
644 			if (rc)
645 				goto error;
646 		}
647 #endif
648 	}
649 
650 	return 0;
651 error:
652 	oce_intr_free(sc);
653 	return rc;
654 }
655 
656 
657 void
oce_fast_isr(void * arg)658 oce_fast_isr(void *arg)
659 {
660 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
661 	POCE_SOFTC sc = ii->sc;
662 
663 	if (ii->eq == NULL)
664 		return;
665 
666 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
667 
668 	taskqueue_enqueue(ii->tq, &ii->task);
669 
670 	ii->eq->intr++;
671 }
672 
673 
674 static int
oce_alloc_intr(POCE_SOFTC sc,int vector,void (* isr)(void * arg,int pending))675 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
676 {
677 	POCE_INTR_INFO ii = &sc->intrs[vector];
678 	int rc = 0, rr;
679 	u_int irq_flags;
680 
681 	if (vector >= OCE_MAX_EQ)
682 		return (EINVAL);
683 
684 #if 0 /* XXX swildner: MSI-X */
685 	/* Set the resource id for the interrupt.
686 	 * MSIx is vector + 1 for the resource id,
687 	 * INTx is 0 for the resource id.
688 	 */
689 	if (sc->flags & OCE_FLAGS_USING_MSIX)
690 		rr = vector + 1;
691 	else
692 #endif
693 		rr = 0;
694 	ii->irq_type = pci_alloc_1intr(sc->dev,
695 	    sc->flags & OCE_FLAGS_USING_MSI, &rr, &irq_flags);
696 	ii->intr_res = bus_alloc_resource_any(sc->dev,
697 					      SYS_RES_IRQ,
698 					      &rr, irq_flags);
699 	ii->irq_rr = rr;
700 	if (ii->intr_res == NULL) {
701 		device_printf(sc->dev,
702 			  "Could not allocate interrupt\n");
703 		rc = ENXIO;
704 		return rc;
705 	}
706 
707 	TASK_INIT(&ii->task, 0, isr, ii);
708 	ii->vector = vector;
709 	ksprintf(ii->task_name, "oce_task[%d]", ii->vector);
710 	ii->tq = taskqueue_create(ii->task_name,
711 			M_NOWAIT,
712 			taskqueue_thread_enqueue,
713 			&ii->tq);
714 	taskqueue_start_threads(&ii->tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq",
715 			device_get_nameunit(sc->dev));
716 
717 	ii->sc = sc;
718 	rc = bus_setup_intr(sc->dev,
719 			ii->intr_res,
720 			0,
721 			oce_fast_isr, ii, &ii->tag, NULL);
722 	return rc;
723 
724 }
725 
726 
727 void
oce_intr_free(POCE_SOFTC sc)728 oce_intr_free(POCE_SOFTC sc)
729 {
730 	int i = 0;
731 
732 	for (i = 0; i < sc->intr_count; i++) {
733 
734 		if (sc->intrs[i].tag != NULL)
735 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
736 						sc->intrs[i].tag);
737 		if (sc->intrs[i].tq != NULL)
738 			taskqueue_free(sc->intrs[i].tq);
739 
740 		if (sc->intrs[i].intr_res != NULL)
741 			bus_release_resource(sc->dev, SYS_RES_IRQ,
742 						sc->intrs[i].irq_rr,
743 						sc->intrs[i].intr_res);
744 		sc->intrs[i].tag = NULL;
745 		sc->intrs[i].intr_res = NULL;
746 	}
747 
748 	if (sc->flags & OCE_FLAGS_USING_MSIX ||
749 	    sc->flags & OCE_FLAGS_USING_MSI)
750 		pci_release_msi(sc->dev);
751 
752 }
753 
754 
755 
756 /******************************************************************************
757 *			  Media callbacks functions 			      *
758 ******************************************************************************/
759 
760 static void
oce_media_status(struct ifnet * ifp,struct ifmediareq * req)761 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
762 {
763 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
764 
765 
766 	req->ifm_status = IFM_AVALID;
767 	req->ifm_active = IFM_ETHER;
768 
769 	if (sc->link_status == 1)
770 		req->ifm_status |= IFM_ACTIVE;
771 	else
772 		return;
773 
774 	switch (sc->link_speed) {
775 	case 1: /* 10 Mbps */
776 		req->ifm_active |= IFM_10_T | IFM_FDX;
777 		sc->speed = 10;
778 		break;
779 	case 2: /* 100 Mbps */
780 		req->ifm_active |= IFM_100_TX | IFM_FDX;
781 		sc->speed = 100;
782 		break;
783 	case 3: /* 1 Gbps */
784 		req->ifm_active |= IFM_1000_T | IFM_FDX;
785 		sc->speed = 1000;
786 		break;
787 	case 4: /* 10 Gbps */
788 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
789 		sc->speed = 10000;
790 		break;
791 	}
792 
793 	return;
794 }
795 
796 
797 int
oce_media_change(struct ifnet * ifp)798 oce_media_change(struct ifnet *ifp)
799 {
800 	return 0;
801 }
802 
803 
804 
805 
806 /*****************************************************************************
807  *			  Transmit routines functions			     *
808  *****************************************************************************/
809 
810 static int
oce_tx(POCE_SOFTC sc,struct mbuf ** mpp,int wq_index)811 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
812 {
813 	int rc = 0, i, retry_cnt = 0;
814 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
815 	struct mbuf *m, *m_temp;
816 	struct oce_wq *wq = sc->wq[wq_index];
817 	struct oce_packet_desc *pd;
818 	struct oce_nic_hdr_wqe *nichdr;
819 	struct oce_nic_frag_wqe *nicfrag;
820 	int num_wqes;
821 	uint32_t reg_value;
822 	boolean_t complete = TRUE;
823 
824 	m = *mpp;
825 	if (!m)
826 		return EINVAL;
827 
828 	if (!(m->m_flags & M_PKTHDR)) {
829 		rc = ENXIO;
830 		goto free_ret;
831 	}
832 
833 	if(oce_tx_asic_stall_verify(sc, m)) {
834 		m = oce_insert_vlan_tag(sc, m, &complete);
835 		if(!m) {
836 			device_printf(sc->dev, "Insertion unsuccessful\n");
837 			return 0;
838 		}
839 
840 	}
841 
842 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
843 		/* consolidate packet buffers for TSO/LSO segment offload */
844 #if defined(INET6) || defined(INET)
845 		m = oce_tso_setup(sc, mpp);
846 #else
847 		m = NULL;
848 #endif
849 		if (m == NULL) {
850 			rc = ENXIO;
851 			goto free_ret;
852 		}
853 	}
854 
855 	pd = &wq->pckts[wq->pkt_desc_head];
856 retry:
857 	rc = bus_dmamap_load_mbuf_defrag(wq->tag,
858 				     pd->map,
859 				     mpp, segs, OCE_MAX_TX_ELEMENTS,
860 				     &pd->nsegs, BUS_DMA_NOWAIT);
861 	if (rc == 0) {
862 		num_wqes = pd->nsegs + 1;
863 		if (IS_BE(sc) || IS_SH(sc)) {
864 			/*Dummy required only for BE3.*/
865 			if (num_wqes & 1)
866 				num_wqes++;
867 		}
868 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
869 			bus_dmamap_unload(wq->tag, pd->map);
870 			return EBUSY;
871 		}
872 		atomic_store_rel_int(&wq->pkt_desc_head,
873 				     (wq->pkt_desc_head + 1) % \
874 				      OCE_WQ_PACKET_ARRAY_SIZE);
875 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
876 		pd->mbuf = m;
877 
878 		nichdr =
879 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
880 		nichdr->u0.dw[0] = 0;
881 		nichdr->u0.dw[1] = 0;
882 		nichdr->u0.dw[2] = 0;
883 		nichdr->u0.dw[3] = 0;
884 
885 		nichdr->u0.s.complete = complete;
886 		nichdr->u0.s.event = 1;
887 		nichdr->u0.s.crc = 1;
888 		nichdr->u0.s.forward = 0;
889 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
890 		nichdr->u0.s.udpcs =
891 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
892 		nichdr->u0.s.tcpcs =
893 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
894 		nichdr->u0.s.num_wqe = num_wqes;
895 		nichdr->u0.s.total_length = m->m_pkthdr.len;
896 #if 0 /* XXX swildner: ETHER_VTAG */
897 		if (m->m_flags & M_VLANTAG) {
898 			nichdr->u0.s.vlan = 1; /*Vlan present*/
899 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
900 		}
901 #endif
902 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
903 			if (m->m_pkthdr.tso_segsz) {
904 				nichdr->u0.s.lso = 1;
905 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
906 			}
907 			if (!IS_BE(sc) || !IS_SH(sc))
908 				nichdr->u0.s.ipcs = 1;
909 		}
910 
911 		RING_PUT(wq->ring, 1);
912 		atomic_add_int(&wq->ring->num_used, 1);
913 
914 		for (i = 0; i < pd->nsegs; i++) {
915 			nicfrag =
916 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
917 						      struct oce_nic_frag_wqe);
918 			nicfrag->u0.s.rsvd0 = 0;
919 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
920 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
921 			nicfrag->u0.s.frag_len = segs[i].ds_len;
922 			pd->wqe_idx = wq->ring->pidx;
923 			RING_PUT(wq->ring, 1);
924 			atomic_add_int(&wq->ring->num_used, 1);
925 		}
926 		if (num_wqes > (pd->nsegs + 1)) {
927 			nicfrag =
928 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
929 						      struct oce_nic_frag_wqe);
930 			nicfrag->u0.dw[0] = 0;
931 			nicfrag->u0.dw[1] = 0;
932 			nicfrag->u0.dw[2] = 0;
933 			nicfrag->u0.dw[3] = 0;
934 			pd->wqe_idx = wq->ring->pidx;
935 			RING_PUT(wq->ring, 1);
936 			atomic_add_int(&wq->ring->num_used, 1);
937 			pd->nsegs++;
938 		}
939 
940 		sc->ifp->if_opackets++;
941 		wq->tx_stats.tx_reqs++;
942 		wq->tx_stats.tx_wrbs += num_wqes;
943 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
944 		wq->tx_stats.tx_pkts++;
945 
946 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
947 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
948 		reg_value = (num_wqes << 16) | wq->wq_id;
949 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
950 
951 	} else if (rc == EFBIG)	{
952 		if (retry_cnt == 0) {
953 			m_temp = m_defrag(m, M_NOWAIT);
954 			if (m_temp == NULL)
955 				goto free_ret;
956 			m = m_temp;
957 			*mpp = m_temp;
958 			retry_cnt = retry_cnt + 1;
959 			goto retry;
960 		} else
961 			goto free_ret;
962 	} else if (rc == ENOMEM)
963 		return rc;
964 	else
965 		goto free_ret;
966 
967 	return 0;
968 
969 free_ret:
970 	m_freem(*mpp);
971 	*mpp = NULL;
972 	return rc;
973 }
974 
975 
976 static void
oce_tx_complete(struct oce_wq * wq,uint32_t wqe_idx,uint32_t status)977 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
978 {
979 	struct oce_packet_desc *pd;
980 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
981 	struct mbuf *m;
982 
983 	pd = &wq->pckts[wq->pkt_desc_tail];
984 	atomic_store_rel_int(&wq->pkt_desc_tail,
985 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
986 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
987 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
988 	bus_dmamap_unload(wq->tag, pd->map);
989 
990 	m = pd->mbuf;
991 	m_freem(m);
992 	pd->mbuf = NULL;
993 
994 	if (ifq_is_oactive(&sc->ifp->if_snd)) {
995 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
996 			ifq_clr_oactive(&sc->ifp->if_snd);
997 			oce_tx_restart(sc, wq);
998 		}
999 	}
1000 }
1001 
1002 
1003 static void
oce_tx_restart(POCE_SOFTC sc,struct oce_wq * wq)1004 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1005 {
1006 
1007 	if ((sc->ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
1008 		return;
1009 
1010 #if 0 /* __FreeBSD_version >= 800000 */
1011 	if (!drbr_empty(sc->ifp, wq->br))
1012 #else
1013 	if (!ifq_is_empty(&sc->ifp->if_snd))
1014 #endif
1015 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1016 
1017 }
1018 
1019 
1020 #if defined(INET6) || defined(INET)
1021 static struct mbuf *
oce_tso_setup(POCE_SOFTC sc,struct mbuf ** mpp)1022 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1023 {
1024 	struct mbuf *m;
1025 #ifdef INET
1026 	struct ip *ip;
1027 #endif
1028 #ifdef INET6
1029 	struct ip6_hdr *ip6;
1030 #endif
1031 	struct ether_vlan_header *eh;
1032 	struct tcphdr *th;
1033 	uint16_t etype;
1034 	int total_len = 0, ehdrlen = 0;
1035 
1036 	m = *mpp;
1037 
1038 	if (M_WRITABLE(m) == 0) {
1039 		m = m_dup(*mpp, M_NOWAIT);
1040 		if (!m)
1041 			return NULL;
1042 		m_freem(*mpp);
1043 		*mpp = m;
1044 	}
1045 
1046 	eh = mtod(m, struct ether_vlan_header *);
1047 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1048 		etype = ntohs(eh->evl_proto);
1049 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1050 	} else {
1051 		etype = ntohs(eh->evl_encap_proto);
1052 		ehdrlen = ETHER_HDR_LEN;
1053 	}
1054 
1055 	switch (etype) {
1056 #ifdef INET
1057 	case ETHERTYPE_IP:
1058 		ip = (struct ip *)(m->m_data + ehdrlen);
1059 		if (ip->ip_p != IPPROTO_TCP)
1060 			return NULL;
1061 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1062 
1063 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1064 		break;
1065 #endif
1066 #ifdef INET6
1067 	case ETHERTYPE_IPV6:
1068 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1069 		if (ip6->ip6_nxt != IPPROTO_TCP)
1070 			return NULL;
1071 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1072 
1073 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1074 		break;
1075 #endif
1076 	default:
1077 		return NULL;
1078 	}
1079 
1080 	m = m_pullup(m, total_len);
1081 	if (!m)
1082 		return NULL;
1083 	*mpp = m;
1084 	return m;
1085 
1086 }
1087 #endif /* INET6 || INET */
1088 
1089 void
oce_tx_task(void * arg,int npending)1090 oce_tx_task(void *arg, int npending)
1091 {
1092 	struct oce_wq *wq = arg;
1093 	POCE_SOFTC sc = wq->parent;
1094 	struct ifnet *ifp = sc->ifp;
1095 #if 0 /* XXX swildner: MULTIQUEUE */
1096 	int rc = 0;
1097 
1098 	LOCK(&wq->tx_lock);
1099 	rc = oce_multiq_transmit(ifp, NULL, wq);
1100 	if (rc) {
1101 		device_printf(sc->dev,
1102 				"TX[%d] restart failed\n", wq->queue_index);
1103 	}
1104 	UNLOCK(&wq->tx_lock);
1105 #else
1106 	lwkt_serialize_enter(ifp->if_serializer);
1107 	oce_start_locked(ifp);
1108 	lwkt_serialize_exit(ifp->if_serializer);
1109 #endif
1110 }
1111 
1112 
1113 void
oce_start_locked(struct ifnet * ifp)1114 oce_start_locked(struct ifnet *ifp)
1115 {
1116 	POCE_SOFTC sc = ifp->if_softc;
1117 	struct mbuf *m;
1118 	int rc = 0;
1119 	int def_q = 0; /* Default tx queue is 0 */
1120 
1121 	if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd)))
1122 		return;
1123 
1124 	if (!sc->link_status) {
1125 		ifq_purge(&ifp->if_snd);
1126 		return;
1127 	}
1128 
1129 	do {
1130 		m = ifq_dequeue(&sc->ifp->if_snd);
1131 		if (m == NULL)
1132 			break;
1133 
1134 		rc = oce_tx(sc, &m, def_q);
1135 		if (rc) {
1136 			if (m != NULL) {
1137 				sc->wq[def_q]->tx_stats.tx_stops ++;
1138 				ifq_set_oactive(&ifp->if_snd);
1139 				ifq_prepend(&ifp->if_snd, m);
1140 				m = NULL;
1141 			}
1142 			break;
1143 		}
1144 		if (m != NULL)
1145 			ETHER_BPF_MTAP(ifp, m);
1146 
1147 	} while (TRUE);
1148 
1149 	return;
1150 }
1151 
1152 void
oce_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1153 oce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1154 {
1155 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1156 	oce_start_locked(ifp);
1157 }
1158 
1159 
1160 /* Handle the Completion Queue for transmit */
1161 uint16_t
oce_wq_handler(void * arg)1162 oce_wq_handler(void *arg)
1163 {
1164 	struct oce_wq *wq = (struct oce_wq *)arg;
1165 	POCE_SOFTC sc = wq->parent;
1166 	struct oce_cq *cq = wq->cq;
1167 	struct oce_nic_tx_cqe *cqe;
1168 	int num_cqes = 0;
1169 
1170 	bus_dmamap_sync(cq->ring->dma.tag,
1171 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1172 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1173 	while (cqe->u0.dw[3]) {
1174 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1175 
1176 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1177 		if (wq->ring->cidx >= wq->ring->num_items)
1178 			wq->ring->cidx -= wq->ring->num_items;
1179 
1180 		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1181 		wq->tx_stats.tx_compl++;
1182 		cqe->u0.dw[3] = 0;
1183 		RING_GET(cq->ring, 1);
1184 		bus_dmamap_sync(cq->ring->dma.tag,
1185 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1186 		cqe =
1187 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1188 		num_cqes++;
1189 	}
1190 
1191 	if (num_cqes)
1192 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1193 
1194 	return 0;
1195 }
1196 
1197 
1198 #if 0 /* XXX swildner: MULTIQUEUE */
1199 static int
1200 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1201 {
1202 	POCE_SOFTC sc = ifp->if_softc;
1203 	int status = 0, queue_index = 0;
1204 	struct mbuf *next = NULL;
1205 	struct buf_ring *br = NULL;
1206 
1207 	br  = wq->br;
1208 	queue_index = wq->queue_index;
1209 
1210 	if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd))) {
1211 		if (m != NULL)
1212 			status = drbr_enqueue(ifp, br, m);
1213 		return status;
1214 	}
1215 
1216 	 if (m != NULL) {
1217 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1218 			return status;
1219 	}
1220 	while ((next = drbr_peek(ifp, br)) != NULL) {
1221 		if (oce_tx(sc, &next, queue_index)) {
1222 			if (next == NULL) {
1223 				drbr_advance(ifp, br);
1224 			} else {
1225 				drbr_putback(ifp, br, next);
1226 				wq->tx_stats.tx_stops ++;
1227 				ifp_set_oactive(&ifp->if_snd);
1228 				status = drbr_enqueue(ifp, br, next);
1229 			}
1230 			break;
1231 		}
1232 		drbr_advance(ifp, br);
1233 		ifp->if_obytes += next->m_pkthdr.len;
1234 		if (next->m_flags & M_MCAST)
1235 			ifp->if_omcasts++;
1236 		ETHER_BPF_MTAP(ifp, next);
1237 	}
1238 
1239 	return status;
1240 }
1241 #endif
1242 
1243 
1244 
1245 
1246 /*****************************************************************************
1247  *			    Receive  routines functions 		     *
1248  *****************************************************************************/
1249 
1250 static void
oce_rx(struct oce_rq * rq,uint32_t rqe_idx,struct oce_nic_rx_cqe * cqe)1251 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1252 {
1253 	uint32_t out;
1254 	struct oce_packet_desc *pd;
1255 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1256 	int i, len, frag_len;
1257 	struct mbuf *m = NULL, *tail = NULL;
1258 	uint16_t vtag;
1259 
1260 	len = cqe->u0.s.pkt_size;
1261 	if (!len) {
1262 		/*partial DMA workaround for Lancer*/
1263 		oce_discard_rx_comp(rq, cqe);
1264 		goto exit;
1265 	}
1266 
1267 	 /* Get vlan_tag value */
1268 	if(IS_BE(sc) || IS_SH(sc))
1269 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1270 	else
1271 		vtag = cqe->u0.s.vlan_tag;
1272 
1273 
1274 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1275 
1276 		if (rq->packets_out == rq->packets_in) {
1277 			device_printf(sc->dev,
1278 				  "RQ transmit descriptor missing\n");
1279 		}
1280 		out = rq->packets_out + 1;
1281 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1282 			out = 0;
1283 		pd = &rq->pckts[rq->packets_out];
1284 		rq->packets_out = out;
1285 
1286 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1287 		bus_dmamap_unload(rq->tag, pd->map);
1288 		rq->pending--;
1289 
1290 		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1291 		pd->mbuf->m_len = frag_len;
1292 
1293 		if (tail != NULL) {
1294 			/* additional fragments */
1295 			tail->m_next = pd->mbuf;
1296 			tail = pd->mbuf;
1297 		} else {
1298 			/* first fragment, fill out much of the packet header */
1299 			pd->mbuf->m_pkthdr.len = len;
1300 			pd->mbuf->m_pkthdr.csum_flags = 0;
1301 			if (IF_CSUM_ENABLED(sc)) {
1302 				if (cqe->u0.s.l4_cksum_pass) {
1303 					pd->mbuf->m_pkthdr.csum_flags |=
1304 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1305 					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1306 				}
1307 				if (cqe->u0.s.ip_cksum_pass) {
1308 					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1309 						pd->mbuf->m_pkthdr.csum_flags |=
1310 						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1311 					}
1312 				}
1313 			}
1314 			m = tail = pd->mbuf;
1315 		}
1316 		pd->mbuf = NULL;
1317 		len -= frag_len;
1318 	}
1319 
1320 	if (m) {
1321 		if (!oce_cqe_portid_valid(sc, cqe)) {
1322 			 m_freem(m);
1323 			 goto exit;
1324 		}
1325 
1326 		m->m_pkthdr.rcvif = sc->ifp;
1327 #if 0 /* __FreeBSD_version >= 800000 */
1328 		if (rq->queue_index)
1329 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1330 		else
1331 			m->m_pkthdr.flowid = rq->queue_index;
1332 		m->m_flags |= M_FLOWID;
1333 #endif
1334 #if 0 /* XXX swildner: ETHER_VTAG */
1335 		/* This deternies if vlan tag is Valid */
1336 		if (oce_cqe_vtp_valid(sc, cqe)) {
1337 			if (sc->function_mode & FNM_FLEX10_MODE) {
1338 				/* FLEX10. If QnQ is not set, neglect VLAN */
1339 				if (cqe->u0.s.qnq) {
1340 					m->m_pkthdr.ether_vtag = vtag;
1341 					m->m_flags |= M_VLANTAG;
1342 				}
1343 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1344 				/* In UMC mode generally pvid will be striped by
1345 				   hw. But in some cases we have seen it comes
1346 				   with pvid. So if pvid == vlan, neglect vlan.
1347 				*/
1348 				m->m_pkthdr.ether_vtag = vtag;
1349 				m->m_flags |= M_VLANTAG;
1350 			}
1351 		}
1352 #endif
1353 
1354 		sc->ifp->if_ipackets++;
1355 #if defined(INET6) || defined(INET)
1356 #if 0 /* XXX swildner: LRO */
1357 		/* Try to queue to LRO */
1358 		if (IF_LRO_ENABLED(sc) &&
1359 		    (cqe->u0.s.ip_cksum_pass) &&
1360 		    (cqe->u0.s.l4_cksum_pass) &&
1361 		    (!cqe->u0.s.ip_ver)       &&
1362 		    (rq->lro.lro_cnt != 0)) {
1363 
1364 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1365 				rq->lro_pkts_queued ++;
1366 				goto post_done;
1367 			}
1368 			/* If LRO posting fails then try to post to STACK */
1369 		}
1370 #endif
1371 #endif
1372 
1373 		sc->ifp->if_input(sc->ifp, m, NULL, -1);
1374 #if defined(INET6) || defined(INET)
1375 #if 0 /* XXX swildner: LRO */
1376 post_done:
1377 #endif
1378 #endif
1379 		/* Update rx stats per queue */
1380 		rq->rx_stats.rx_pkts++;
1381 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1382 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1383 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1384 			rq->rx_stats.rx_mcast_pkts++;
1385 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1386 			rq->rx_stats.rx_ucast_pkts++;
1387 	}
1388 exit:
1389 	return;
1390 }
1391 
1392 
1393 static void
oce_discard_rx_comp(struct oce_rq * rq,struct oce_nic_rx_cqe * cqe)1394 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1395 {
1396 	uint32_t out, i = 0;
1397 	struct oce_packet_desc *pd;
1398 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1399 	int num_frags = cqe->u0.s.num_fragments;
1400 
1401 	for (i = 0; i < num_frags; i++) {
1402 		if (rq->packets_out == rq->packets_in) {
1403 			device_printf(sc->dev,
1404 				"RQ transmit descriptor missing\n");
1405 		}
1406 		out = rq->packets_out + 1;
1407 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1408 			out = 0;
1409 		pd = &rq->pckts[rq->packets_out];
1410 		rq->packets_out = out;
1411 
1412 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1413 		bus_dmamap_unload(rq->tag, pd->map);
1414 		rq->pending--;
1415 		m_freem(pd->mbuf);
1416 	}
1417 
1418 }
1419 
1420 
1421 #if 0 /* XXX swildner: ETHER_VTAG */
1422 static int
1423 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1424 {
1425 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1426 	int vtp = 0;
1427 
1428 	if (sc->be3_native) {
1429 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1430 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1431 	} else
1432 		vtp = cqe->u0.s.vlan_tag_present;
1433 
1434 	return vtp;
1435 
1436 }
1437 #endif
1438 
1439 
1440 static int
oce_cqe_portid_valid(POCE_SOFTC sc,struct oce_nic_rx_cqe * cqe)1441 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1442 {
1443 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1444 	int port_id = 0;
1445 
1446 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1447 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1448 		port_id =  cqe_v1->u0.s.port;
1449 		if (sc->port_id != port_id)
1450 			return 0;
1451 	} else {
1452 		;/* For BE3 legacy and Lancer this is dummy */
1453 	}
1454 
1455 	return 1;
1456 
1457 }
1458 
1459 #if defined(INET6) || defined(INET)
1460 #if 0 /* XXX swildner: LRO */
1461 static void
1462 oce_rx_flush_lro(struct oce_rq *rq)
1463 {
1464 	struct lro_ctrl	*lro = &rq->lro;
1465 	struct lro_entry *queued;
1466 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1467 
1468 	if (!IF_LRO_ENABLED(sc))
1469 		return;
1470 
1471 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1472 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1473 		tcp_lro_flush(lro, queued);
1474 	}
1475 	rq->lro_pkts_queued = 0;
1476 
1477 	return;
1478 }
1479 
1480 
1481 static int
1482 oce_init_lro(POCE_SOFTC sc)
1483 {
1484 	struct lro_ctrl *lro = NULL;
1485 	int i = 0, rc = 0;
1486 
1487 	for (i = 0; i < sc->nrqs; i++) {
1488 		lro = &sc->rq[i]->lro;
1489 		rc = tcp_lro_init(lro);
1490 		if (rc != 0) {
1491 			device_printf(sc->dev, "LRO init failed\n");
1492 			return rc;
1493 		}
1494 		lro->ifp = sc->ifp;
1495 	}
1496 
1497 	return rc;
1498 }
1499 
1500 
1501 void
1502 oce_free_lro(POCE_SOFTC sc)
1503 {
1504 	struct lro_ctrl *lro = NULL;
1505 	int i = 0;
1506 
1507 	for (i = 0; i < sc->nrqs; i++) {
1508 		lro = &sc->rq[i]->lro;
1509 		if (lro)
1510 			tcp_lro_free(lro);
1511 	}
1512 }
1513 #endif
1514 #endif
1515 
1516 int
oce_alloc_rx_bufs(struct oce_rq * rq,int count)1517 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1518 {
1519 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1520 	int i, in, rc;
1521 	struct oce_packet_desc *pd;
1522 	bus_dma_segment_t segs[6];
1523 	int nsegs, added = 0;
1524 	struct oce_nic_rqe *rqe;
1525 	pd_rxulp_db_t rxdb_reg;
1526 
1527 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1528 	for (i = 0; i < count; i++) {
1529 		in = rq->packets_in + 1;
1530 		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1531 			in = 0;
1532 		if (in == rq->packets_out)
1533 			break;	/* no more room */
1534 
1535 		pd = &rq->pckts[rq->packets_in];
1536 		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1537 		if (pd->mbuf == NULL)
1538 			break;
1539 
1540 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1541 		rc = bus_dmamap_load_mbuf_segment(rq->tag,
1542 					     pd->map,
1543 					     pd->mbuf,
1544 					     segs, 1,
1545 					     &nsegs, BUS_DMA_NOWAIT);
1546 		if (rc) {
1547 			m_free(pd->mbuf);
1548 			break;
1549 		}
1550 
1551 		if (nsegs != 1) {
1552 			i--;
1553 			continue;
1554 		}
1555 
1556 		rq->packets_in = in;
1557 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1558 
1559 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1560 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1561 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1562 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1563 		RING_PUT(rq->ring, 1);
1564 		added++;
1565 		rq->pending++;
1566 	}
1567 	if (added != 0) {
1568 		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1569 			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1570 			rxdb_reg.bits.qid = rq->rq_id;
1571 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1572 			added -= OCE_MAX_RQ_POSTS;
1573 		}
1574 		if (added > 0) {
1575 			rxdb_reg.bits.qid = rq->rq_id;
1576 			rxdb_reg.bits.num_posted = added;
1577 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1578 		}
1579 	}
1580 
1581 	return 0;
1582 }
1583 
1584 
1585 /* Handle the Completion Queue for receive */
1586 uint16_t
oce_rq_handler(void * arg)1587 oce_rq_handler(void *arg)
1588 {
1589 	struct oce_rq *rq = (struct oce_rq *)arg;
1590 	struct oce_cq *cq = rq->cq;
1591 	POCE_SOFTC sc = rq->parent;
1592 	struct oce_nic_rx_cqe *cqe;
1593 	int num_cqes = 0, rq_buffers_used = 0;
1594 
1595 	bus_dmamap_sync(cq->ring->dma.tag,
1596 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1597 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1598 	while (cqe->u0.dw[2]) {
1599 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1600 
1601 		RING_GET(rq->ring, 1);
1602 		if (cqe->u0.s.error == 0) {
1603 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1604 		} else {
1605 			rq->rx_stats.rxcp_err++;
1606 			sc->ifp->if_ierrors++;
1607 			/* Post L3/L4 errors to stack.*/
1608 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1609 		}
1610 		rq->rx_stats.rx_compl++;
1611 		cqe->u0.dw[2] = 0;
1612 
1613 #if defined(INET6) || defined(INET)
1614 #if 0 /* XXX swildner: LRO */
1615 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1616 			oce_rx_flush_lro(rq);
1617 		}
1618 #endif
1619 #endif
1620 
1621 		RING_GET(cq->ring, 1);
1622 		bus_dmamap_sync(cq->ring->dma.tag,
1623 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1624 		cqe =
1625 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1626 		num_cqes++;
1627 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1628 			break;
1629 	}
1630 
1631 #if defined(INET6) || defined(INET)
1632 #if 0 /* XXX swildner: LRO */
1633 	if (IF_LRO_ENABLED(sc))
1634 		oce_rx_flush_lro(rq);
1635 #endif
1636 #endif
1637 
1638 	if (num_cqes) {
1639 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1640 		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1641 		if (rq_buffers_used > 1)
1642 			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1643 	}
1644 
1645 	return 0;
1646 
1647 }
1648 
1649 
1650 
1651 
1652 /*****************************************************************************
1653  *		   Helper function prototypes in this file 		     *
1654  *****************************************************************************/
1655 
1656 static int
oce_attach_ifp(POCE_SOFTC sc)1657 oce_attach_ifp(POCE_SOFTC sc)
1658 {
1659 
1660 	sc->ifp = if_alloc(IFT_ETHER);
1661 	if (!sc->ifp)
1662 		return ENOMEM;
1663 
1664 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1665 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1666 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1667 
1668 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1669 	sc->ifp->if_ioctl = oce_ioctl;
1670 	sc->ifp->if_start = oce_start;
1671 	sc->ifp->if_init = oce_init;
1672 	sc->ifp->if_mtu = ETHERMTU;
1673 	sc->ifp->if_softc = sc;
1674 #if 0 /* XXX swildner: MULTIQUEUE */
1675 	sc->ifp->if_transmit = oce_multiq_start;
1676 	sc->ifp->if_qflush = oce_multiq_flush;
1677 #endif
1678 
1679 	if_initname(sc->ifp,
1680 		    device_get_name(sc->dev), device_get_unit(sc->dev));
1681 
1682 	sc->ifp->if_nmbclusters = sc->nrqs * sc->rq[0]->cfg.q_len;
1683 
1684 	ifq_set_maxlen(&sc->ifp->if_snd, OCE_MAX_TX_DESC - 1);
1685 	ifq_set_ready(&sc->ifp->if_snd);
1686 
1687 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1688 	sc->ifp->if_hwassist |= CSUM_TSO;
1689 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1690 
1691 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1692 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1693 #if 0 /* XXX swildner: VLAN_HWFILTER */
1694 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1695 #endif
1696 
1697 #if defined(INET6) || defined(INET)
1698 	sc->ifp->if_capabilities |= IFCAP_TSO;
1699 #if 0 /* XXX swildner: LRO */
1700 	sc->ifp->if_capabilities |= IFCAP_LRO;
1701 #endif
1702 #if 0 /* XXX swildner: VLAN_HWTSO */
1703 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1704 #endif
1705 #endif
1706 
1707 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1708 	sc->ifp->if_baudrate = IF_Gbps(10UL);
1709 
1710 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr, NULL);
1711 
1712 	return 0;
1713 }
1714 
1715 
1716 static void
oce_add_vlan(void * arg,struct ifnet * ifp,uint16_t vtag)1717 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1718 {
1719 	POCE_SOFTC sc = ifp->if_softc;
1720 
1721 	if (ifp->if_softc !=  arg)
1722 		return;
1723 	if ((vtag == 0) || (vtag > 4095))
1724 		return;
1725 
1726 	sc->vlan_tag[vtag] = 1;
1727 	sc->vlans_added++;
1728 	oce_vid_config(sc);
1729 }
1730 
1731 
1732 static void
oce_del_vlan(void * arg,struct ifnet * ifp,uint16_t vtag)1733 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1734 {
1735 	POCE_SOFTC sc = ifp->if_softc;
1736 
1737 	if (ifp->if_softc !=  arg)
1738 		return;
1739 	if ((vtag == 0) || (vtag > 4095))
1740 		return;
1741 
1742 	sc->vlan_tag[vtag] = 0;
1743 	sc->vlans_added--;
1744 	oce_vid_config(sc);
1745 }
1746 
1747 
1748 /*
1749  * A max of 64 vlans can be configured in BE. If the user configures
1750  * more, place the card in vlan promiscuous mode.
1751  */
1752 static int
oce_vid_config(POCE_SOFTC sc)1753 oce_vid_config(POCE_SOFTC sc)
1754 {
1755 #if 0 /* XXX swildner: VLAN_HWFILTER */
1756 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1757 	uint16_t ntags = 0, i;
1758 #endif
1759 	int status = 0;
1760 
1761 #if 0 /* XXX swildner: VLAN_HWFILTER */
1762 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1763 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1764 		for (i = 0; i < MAX_VLANS; i++) {
1765 			if (sc->vlan_tag[i]) {
1766 				vtags[ntags].vtag = i;
1767 				ntags++;
1768 			}
1769 		}
1770 		if (ntags)
1771 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1772 						vtags, ntags, 1, 0);
1773 	} else
1774 #endif
1775 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1776 						NULL, 0, 1, 1);
1777 	return status;
1778 }
1779 
1780 
1781 static void
oce_mac_addr_set(POCE_SOFTC sc)1782 oce_mac_addr_set(POCE_SOFTC sc)
1783 {
1784 	uint32_t old_pmac_id = sc->pmac_id;
1785 	int status = 0;
1786 
1787 
1788 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1789 			 sc->macaddr.size_of_struct);
1790 	if (!status)
1791 		return;
1792 
1793 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1794 					sc->if_id, &sc->pmac_id);
1795 	if (!status) {
1796 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1797 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1798 				 sc->macaddr.size_of_struct);
1799 	}
1800 	if (status)
1801 		device_printf(sc->dev, "Failed update macaddress\n");
1802 
1803 }
1804 
1805 
1806 static int
oce_handle_passthrough(struct ifnet * ifp,caddr_t data)1807 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1808 {
1809 	POCE_SOFTC sc = ifp->if_softc;
1810 	struct ifreq *ifr = (struct ifreq *)data;
1811 	int rc = ENXIO;
1812 	char cookie[32] = {0};
1813 	void *priv_data = (void *)ifr->ifr_data;
1814 	void *ioctl_ptr;
1815 	uint32_t req_size;
1816 	struct mbx_hdr req;
1817 	OCE_DMA_MEM dma_mem;
1818 	struct mbx_common_get_cntl_attr *fw_cmd;
1819 
1820 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1821 		return EFAULT;
1822 
1823 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1824 		return EINVAL;
1825 
1826 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1827 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1828 		return EFAULT;
1829 
1830 	req_size = le32toh(req.u0.req.request_length);
1831 	if (req_size > 65536)
1832 		return EINVAL;
1833 
1834 	req_size += sizeof(struct mbx_hdr);
1835 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1836 	if (rc)
1837 		return ENOMEM;
1838 
1839 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1840 		rc = EFAULT;
1841 		goto dma_free;
1842 	}
1843 
1844 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1845 	if (rc) {
1846 		rc = EIO;
1847 		goto dma_free;
1848 	}
1849 
1850 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1851 		rc =  EFAULT;
1852 
1853 	/*
1854 	   firmware is filling all the attributes for this ioctl except
1855 	   the driver version..so fill it
1856 	 */
1857 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1858 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1859 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1860 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1861 	}
1862 
1863 dma_free:
1864 	oce_dma_free(sc, &dma_mem);
1865 	return rc;
1866 
1867 }
1868 
1869 static void
oce_eqd_set_periodic(POCE_SOFTC sc)1870 oce_eqd_set_periodic(POCE_SOFTC sc)
1871 {
1872 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1873 	struct oce_aic_obj *aic;
1874 	struct oce_eq *eqo;
1875 	uint64_t now = 0, delta;
1876 	int eqd, i, num = 0;
1877 	uint32_t ips = 0;
1878 	int tps;
1879 
1880 	for (i = 0 ; i < sc->neqs; i++) {
1881 		eqo = sc->eq[i];
1882 		aic = &sc->aic_obj[i];
1883 		/* When setting the static eq delay from the user space */
1884 		if (!aic->enable) {
1885 			eqd = aic->et_eqd;
1886 			goto modify_eqd;
1887 		}
1888 
1889 		now = ticks;
1890 
1891 		/* Over flow check */
1892 		if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1893 			goto done;
1894 
1895 		delta = now - aic->ticks;
1896 		tps = delta/hz;
1897 
1898 		/* Interrupt rate based on elapsed ticks */
1899 		if(tps)
1900 			ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1901 
1902 		if (ips > INTR_RATE_HWM)
1903 			eqd = aic->cur_eqd + 20;
1904 		else if (ips < INTR_RATE_LWM)
1905 			eqd = aic->cur_eqd / 2;
1906 		else
1907 			goto done;
1908 
1909 		if (eqd < 10)
1910 			eqd = 0;
1911 
1912 		/* Make sure that the eq delay is in the known range */
1913 		eqd = min(eqd, aic->max_eqd);
1914 		eqd = max(eqd, aic->min_eqd);
1915 
1916 modify_eqd:
1917 		if (eqd != aic->cur_eqd) {
1918 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
1919 			set_eqd[num].eq_id = eqo->eq_id;
1920 			aic->cur_eqd = eqd;
1921 			num++;
1922 		}
1923 done:
1924 		aic->intr_prev = eqo->intr;
1925 		aic->ticks = now;
1926 	}
1927 
1928 	/* Is there atleast one eq that needs to be modified? */
1929 	if(num)
1930 		oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1931 
1932 }
1933 
1934 static void
oce_local_timer(void * arg)1935 oce_local_timer(void *arg)
1936 {
1937 	POCE_SOFTC sc = arg;
1938 	int i = 0;
1939 
1940 	lwkt_serialize_enter(sc->ifp->if_serializer);
1941 	oce_refresh_nic_stats(sc);
1942 	oce_refresh_queue_stats(sc);
1943 	oce_mac_addr_set(sc);
1944 
1945 	/* TX Watch Dog*/
1946 	for (i = 0; i < sc->nwqs; i++)
1947 		oce_tx_restart(sc, sc->wq[i]);
1948 
1949 	/* calculate and set the eq delay for optimal interrupt rate */
1950 	if (IS_BE(sc) || IS_SH(sc))
1951 		oce_eqd_set_periodic(sc);
1952 
1953 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
1954 	lwkt_serialize_exit(sc->ifp->if_serializer);
1955 }
1956 
1957 
1958 /* NOTE : This should only be called holding
1959  *        DEVICE_LOCK.
1960 */
1961 static void
oce_if_deactivate(POCE_SOFTC sc)1962 oce_if_deactivate(POCE_SOFTC sc)
1963 {
1964 	int i, mtime = 0;
1965 	int wait_req = 0;
1966 	struct oce_rq *rq;
1967 	struct oce_wq *wq;
1968 	struct oce_eq *eq;
1969 
1970 	sc->ifp->if_flags &= ~IFF_RUNNING;
1971 	ifq_clr_oactive(&sc->ifp->if_snd);
1972 
1973 	/*Wait for max of 400ms for TX completions to be done */
1974 	while (mtime < 400) {
1975 		wait_req = 0;
1976 		for_all_wq_queues(sc, wq, i) {
1977 			if (wq->ring->num_used) {
1978 				wait_req = 1;
1979 				DELAY(1);
1980 				break;
1981 			}
1982 		}
1983 		mtime += 1;
1984 		if (!wait_req)
1985 			break;
1986 	}
1987 
1988 	/* Stop intrs and finish any bottom halves pending */
1989 	oce_hw_intr_disable(sc);
1990 
1991 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1992 	   any other lock. So unlock device lock and require after
1993 	   completing taskqueue_drain.
1994 	*/
1995 	UNLOCK(&sc->dev_lock);
1996 	for (i = 0; i < sc->intr_count; i++) {
1997 		if (sc->intrs[i].tq != NULL) {
1998 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1999 		}
2000 	}
2001 	LOCK(&sc->dev_lock);
2002 
2003 	/* Delete RX queue in card with flush param */
2004 	oce_stop_rx(sc);
2005 
2006 	/* Invalidate any pending cq and eq entries*/
2007 	for_all_evnt_queues(sc, eq, i)
2008 		oce_drain_eq(eq);
2009 	for_all_rq_queues(sc, rq, i)
2010 		oce_drain_rq_cq(rq);
2011 	for_all_wq_queues(sc, wq, i)
2012 		oce_drain_wq_cq(wq);
2013 
2014 	/* But still we need to get MCC aync events.
2015 	   So enable intrs and also arm first EQ
2016 	*/
2017 	oce_hw_intr_enable(sc);
2018 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2019 
2020 	DELAY(10);
2021 }
2022 
2023 
2024 static void
oce_if_activate(POCE_SOFTC sc)2025 oce_if_activate(POCE_SOFTC sc)
2026 {
2027 	struct oce_eq *eq;
2028 	struct oce_rq *rq;
2029 	struct oce_wq *wq;
2030 	int i, rc = 0;
2031 
2032 	sc->ifp->if_flags |= IFF_RUNNING;
2033 
2034 	oce_hw_intr_disable(sc);
2035 
2036 	oce_start_rx(sc);
2037 
2038 	for_all_rq_queues(sc, rq, i) {
2039 		rc = oce_start_rq(rq);
2040 		if (rc)
2041 			device_printf(sc->dev, "Unable to start RX\n");
2042 	}
2043 
2044 	for_all_wq_queues(sc, wq, i) {
2045 		rc = oce_start_wq(wq);
2046 		if (rc)
2047 			device_printf(sc->dev, "Unable to start TX\n");
2048 	}
2049 
2050 
2051 	for_all_evnt_queues(sc, eq, i)
2052 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2053 
2054 	oce_hw_intr_enable(sc);
2055 
2056 }
2057 
2058 static void
process_link_state(POCE_SOFTC sc,struct oce_async_cqe_link_state * acqe)2059 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2060 {
2061 	/* Update Link status */
2062 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2063 	     ASYNC_EVENT_LINK_UP) {
2064 		sc->link_status = ASYNC_EVENT_LINK_UP;
2065 		if_link_state_change(sc->ifp);
2066 	} else {
2067 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2068 		if_link_state_change(sc->ifp);
2069 	}
2070 
2071 	/* Update speed */
2072 	sc->link_speed = acqe->u0.s.speed;
2073 	sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2074 
2075 }
2076 
2077 
2078 /* Handle the Completion Queue for the Mailbox/Async notifications */
2079 uint16_t
oce_mq_handler(void * arg)2080 oce_mq_handler(void *arg)
2081 {
2082 	struct oce_mq *mq = (struct oce_mq *)arg;
2083 	POCE_SOFTC sc = mq->parent;
2084 	struct oce_cq *cq = mq->cq;
2085 	int num_cqes = 0, evt_type = 0, optype = 0;
2086 	struct oce_mq_cqe *cqe;
2087 	struct oce_async_cqe_link_state *acqe;
2088 	struct oce_async_event_grp5_pvid_state *gcqe;
2089 	struct oce_async_event_qnq *dbgcqe;
2090 
2091 
2092 	bus_dmamap_sync(cq->ring->dma.tag,
2093 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2094 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2095 
2096 	while (cqe->u0.dw[3]) {
2097 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2098 		if (cqe->u0.s.async_event) {
2099 			evt_type = cqe->u0.s.event_type;
2100 			optype = cqe->u0.s.async_type;
2101 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2102 				/* Link status evt */
2103 				acqe = (struct oce_async_cqe_link_state *)cqe;
2104 				process_link_state(sc, acqe);
2105 			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
2106 				   (optype == ASYNC_EVENT_PVID_STATE)) {
2107 				/* GRP5 PVID */
2108 				gcqe =
2109 				(struct oce_async_event_grp5_pvid_state *)cqe;
2110 				if (gcqe->enabled)
2111 					sc->pvid = gcqe->tag & VLAN_VID_MASK;
2112 				else
2113 					sc->pvid = 0;
2114 
2115 			}
2116 			else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2117 				optype == ASYNC_EVENT_DEBUG_QNQ) {
2118 				dbgcqe =
2119 				(struct oce_async_event_qnq *)cqe;
2120 				if(dbgcqe->valid)
2121 					sc->qnqid = dbgcqe->vlan_tag;
2122 				sc->qnq_debug_event = TRUE;
2123 			}
2124 		}
2125 		cqe->u0.dw[3] = 0;
2126 		RING_GET(cq->ring, 1);
2127 		bus_dmamap_sync(cq->ring->dma.tag,
2128 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2129 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2130 		num_cqes++;
2131 	}
2132 
2133 	if (num_cqes)
2134 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2135 
2136 	return 0;
2137 }
2138 
2139 
2140 static void
setup_max_queues_want(POCE_SOFTC sc)2141 setup_max_queues_want(POCE_SOFTC sc)
2142 {
2143 	/* Check if it is FLEX machine. Is so dont use RSS */
2144 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2145 	    (sc->function_mode & FNM_UMC_MODE)    ||
2146 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2147 	    (!is_rss_enabled(sc))		  ||
2148 	    (sc->flags & OCE_FLAGS_BE2)) {
2149 		sc->nrqs = 1;
2150 		sc->nwqs = 1;
2151 	}
2152 }
2153 
2154 
2155 static void
update_queues_got(POCE_SOFTC sc)2156 update_queues_got(POCE_SOFTC sc)
2157 {
2158 	if (is_rss_enabled(sc)) {
2159 		sc->nrqs = sc->intr_count + 1;
2160 		sc->nwqs = sc->intr_count;
2161 	} else {
2162 		sc->nrqs = 1;
2163 		sc->nwqs = 1;
2164 	}
2165 }
2166 
2167 static int
oce_check_ipv6_ext_hdr(struct mbuf * m)2168 oce_check_ipv6_ext_hdr(struct mbuf *m)
2169 {
2170 	struct ether_header *eh = mtod(m, struct ether_header *);
2171 	caddr_t m_datatemp = m->m_data;
2172 
2173 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2174 		m->m_data += sizeof(struct ether_header);
2175 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2176 
2177 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2178 				(ip6->ip6_nxt != IPPROTO_UDP)){
2179 			struct ip6_ext *ip6e = NULL;
2180 			m->m_data += sizeof(struct ip6_hdr);
2181 
2182 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2183 			if(ip6e->ip6e_len == 0xff) {
2184 				m->m_data = m_datatemp;
2185 				return TRUE;
2186 			}
2187 		}
2188 		m->m_data = m_datatemp;
2189 	}
2190 	return FALSE;
2191 }
2192 
2193 static int
is_be3_a1(POCE_SOFTC sc)2194 is_be3_a1(POCE_SOFTC sc)
2195 {
2196 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2197 		return TRUE;
2198 	}
2199 	return FALSE;
2200 }
2201 
2202 static struct mbuf *
oce_insert_vlan_tag(POCE_SOFTC sc,struct mbuf * m,boolean_t * complete)2203 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2204 {
2205 	uint16_t vlan_tag = 0;
2206 
2207 	if(!M_WRITABLE(m))
2208 		return NULL;
2209 
2210 #if 0 /* XXX swildner: ETHER_VTAG */
2211 	/* Embed vlan tag in the packet if it is not part of it */
2212 	if(m->m_flags & M_VLANTAG) {
2213 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2214 		m->m_flags &= ~M_VLANTAG;
2215 	}
2216 #endif
2217 
2218 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2219 	if(sc->pvid) {
2220 		if(!vlan_tag)
2221 			vlan_tag = sc->pvid;
2222 		*complete = FALSE;
2223 	}
2224 
2225 #if 0 /* XXX swildner: ETHER_VTAG */
2226 	if(vlan_tag) {
2227 		m = ether_vlanencap(m, vlan_tag);
2228 	}
2229 
2230 	if(sc->qnqid) {
2231 		m = ether_vlanencap(m, sc->qnqid);
2232 		*complete = FALSE;
2233 	}
2234 #endif
2235 	return m;
2236 }
2237 
2238 static int
oce_tx_asic_stall_verify(POCE_SOFTC sc,struct mbuf * m)2239 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2240 {
2241 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2242 			oce_check_ipv6_ext_hdr(m)) {
2243 		return TRUE;
2244 	}
2245 	return FALSE;
2246 }
2247 
2248 static void
oce_get_config(POCE_SOFTC sc)2249 oce_get_config(POCE_SOFTC sc)
2250 {
2251 	int rc = 0;
2252 	uint32_t max_rss = 0;
2253 
2254 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2255 		max_rss = OCE_LEGACY_MODE_RSS;
2256 	else
2257 		max_rss = OCE_MAX_RSS;
2258 
2259 	if (!IS_BE(sc)) {
2260 		rc = oce_get_func_config(sc);
2261 		if (rc) {
2262 			sc->nwqs = OCE_MAX_WQ;
2263 			sc->nrssqs = max_rss;
2264 			sc->nrqs = sc->nrssqs + 1;
2265 		}
2266 	}
2267 	else {
2268 		rc = oce_get_profile_config(sc);
2269 		sc->nrssqs = max_rss;
2270 		sc->nrqs = sc->nrssqs + 1;
2271 		if (rc)
2272 			sc->nwqs = OCE_MAX_WQ;
2273 	}
2274 }
2275