xref: /openbsd/sys/dev/pci/if_oce.c (revision d89ec533)
1 /*	$OpenBSD: if_oce.c,v 1.104 2020/12/12 11:48:53 jan Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*-
20  * Copyright (C) 2012 Emulex
21  * All rights reserved.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions are met:
25  *
26  * 1. Redistributions of source code must retain the above copyright notice,
27  *    this list of conditions and the following disclaimer.
28  *
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * 3. Neither the name of the Emulex Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived from
35  *    this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47  * POSSIBILITY OF SUCH DAMAGE.
48  *
49  * Contact Information:
50  * freebsd-drivers@emulex.com
51  *
52  * Emulex
53  * 3333 Susan Street
54  * Costa Mesa, CA 92626
55  */
56 
57 #include "bpfilter.h"
58 #include "vlan.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/device.h>
67 #include <sys/socket.h>
68 #include <sys/queue.h>
69 #include <sys/timeout.h>
70 #include <sys/pool.h>
71 
72 #include <net/if.h>
73 #include <net/if_media.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #ifdef INET6
79 #include <netinet/ip6.h>
80 #endif
81 
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #endif
85 
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89 
90 #include <dev/pci/if_ocereg.h>
91 
92 #ifndef TRUE
93 #define TRUE			1
94 #endif
95 #ifndef FALSE
96 #define FALSE			0
97 #endif
98 
99 #define OCE_MBX_TIMEOUT		5
100 
101 #define OCE_MAX_PAYLOAD		65536
102 
103 #define OCE_TX_RING_SIZE	512
104 #define OCE_RX_RING_SIZE	1024
105 
106 /* This should be powers of 2. Like 2,4,8 & 16 */
107 #define OCE_MAX_RSS		4 /* TODO: 8 */
108 #define OCE_MAX_RQ		OCE_MAX_RSS + 1 /* one default queue */
109 #define OCE_MAX_WQ		8
110 
111 #define OCE_MAX_EQ		32
112 #define OCE_MAX_CQ		OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
113 #define OCE_MAX_CQ_EQ		8 /* Max CQ that can attached to an EQ */
114 
115 #define OCE_DEFAULT_EQD		80
116 
117 #define OCE_MIN_MTU		256
118 #define OCE_MAX_MTU		9000
119 
120 #define OCE_MAX_RQ_COMPL	64
121 #define OCE_MAX_RQ_POSTS	255
122 #define OCE_RX_BUF_SIZE		2048
123 
124 #define OCE_MAX_TX_ELEMENTS	29
125 #define OCE_MAX_TX_DESC		1024
126 #define OCE_MAX_TX_SIZE		65535
127 
128 #define OCE_MEM_KVA(_m)		((void *)((_m)->vaddr))
129 #define OCE_MEM_DVA(_m)		((_m)->paddr)
130 
131 #define OCE_WQ_FOREACH(sc, wq, i) 	\
132 	for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
133 #define OCE_RQ_FOREACH(sc, rq, i) 	\
134 	for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
135 #define OCE_EQ_FOREACH(sc, eq, i) 	\
136 	for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
137 #define OCE_CQ_FOREACH(sc, cq, i) 	\
138 	for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
139 #define OCE_RING_FOREACH(_r, _v, _c)	\
140 	for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
141 
142 static inline int
143 ilog2(unsigned int v)
144 {
145 	int r = 0;
146 
147 	while (v >>= 1)
148 		r++;
149 	return (r);
150 }
151 
152 struct oce_pkt {
153 	struct mbuf *		mbuf;
154 	bus_dmamap_t		map;
155 	int			nsegs;
156 	SIMPLEQ_ENTRY(oce_pkt)	entry;
157 };
158 SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
159 
160 struct oce_dma_mem {
161 	bus_dma_tag_t		tag;
162 	bus_dmamap_t		map;
163 	bus_dma_segment_t	segs;
164 	int			nsegs;
165 	bus_size_t		size;
166 	caddr_t			vaddr;
167 	bus_addr_t		paddr;
168 };
169 
170 struct oce_ring {
171 	int			index;
172 	int			nitems;
173 	int			nused;
174 	int			isize;
175 	struct oce_dma_mem	dma;
176 };
177 
178 struct oce_softc;
179 
180 enum cq_len {
181 	CQ_LEN_256  = 256,
182 	CQ_LEN_512  = 512,
183 	CQ_LEN_1024 = 1024
184 };
185 
186 enum eq_len {
187 	EQ_LEN_256  = 256,
188 	EQ_LEN_512  = 512,
189 	EQ_LEN_1024 = 1024,
190 	EQ_LEN_2048 = 2048,
191 	EQ_LEN_4096 = 4096
192 };
193 
194 enum eqe_size {
195 	EQE_SIZE_4  = 4,
196 	EQE_SIZE_16 = 16
197 };
198 
199 enum qtype {
200 	QTYPE_EQ,
201 	QTYPE_MQ,
202 	QTYPE_WQ,
203 	QTYPE_RQ,
204 	QTYPE_CQ,
205 	QTYPE_RSS
206 };
207 
208 struct oce_eq {
209 	struct oce_softc *	sc;
210 	struct oce_ring *	ring;
211 	enum qtype		type;
212 	int			id;
213 
214 	struct oce_cq *		cq[OCE_MAX_CQ_EQ];
215 	int			cq_valid;
216 
217 	int			nitems;
218 	int			isize;
219 	int			delay;
220 };
221 
222 struct oce_cq {
223 	struct oce_softc *	sc;
224 	struct oce_ring *	ring;
225 	enum qtype		type;
226 	int			id;
227 
228 	struct oce_eq *		eq;
229 
230 	void			(*cq_intr)(void *);
231 	void *			cb_arg;
232 
233 	int			nitems;
234 	int			nodelay;
235 	int			eventable;
236 	int			ncoalesce;
237 };
238 
239 struct oce_mq {
240 	struct oce_softc *	sc;
241 	struct oce_ring *	ring;
242 	enum qtype		type;
243 	int			id;
244 
245 	struct oce_cq *		cq;
246 
247 	int			nitems;
248 };
249 
250 struct oce_wq {
251 	struct oce_softc *	sc;
252 	struct oce_ring *	ring;
253 	enum qtype		type;
254 	int			id;
255 
256 	struct oce_cq *		cq;
257 
258 	struct oce_pkt_list	pkt_list;
259 	struct oce_pkt_list	pkt_free;
260 
261 	int			nitems;
262 };
263 
264 struct oce_rq {
265 	struct oce_softc *	sc;
266 	struct oce_ring *	ring;
267 	enum qtype		type;
268 	int			id;
269 
270 	struct oce_cq *		cq;
271 
272 	struct if_rxring	rxring;
273 	struct oce_pkt_list	pkt_list;
274 	struct oce_pkt_list	pkt_free;
275 
276 	uint32_t		rss_cpuid;
277 
278 #ifdef OCE_LRO
279 	struct lro_ctrl		lro;
280 	int			lro_pkts_queued;
281 #endif
282 
283 	int			nitems;
284 	int			fragsize;
285 	int			mtu;
286 	int			rss;
287 };
288 
289 struct oce_softc {
290 	struct device		sc_dev;
291 
292 	uint			sc_flags;
293 #define  OCE_F_BE2		 0x00000001
294 #define  OCE_F_BE3		 0x00000002
295 #define  OCE_F_XE201		 0x00000008
296 #define  OCE_F_BE3_NATIVE	 0x00000100
297 #define  OCE_F_RESET_RQD	 0x00001000
298 #define  OCE_F_MBOX_ENDIAN_RQD	 0x00002000
299 
300 	bus_dma_tag_t		sc_dmat;
301 
302 	bus_space_tag_t		sc_cfg_iot;
303 	bus_space_handle_t	sc_cfg_ioh;
304 	bus_size_t		sc_cfg_size;
305 
306 	bus_space_tag_t		sc_csr_iot;
307 	bus_space_handle_t	sc_csr_ioh;
308 	bus_size_t		sc_csr_size;
309 
310 	bus_space_tag_t		sc_db_iot;
311 	bus_space_handle_t	sc_db_ioh;
312 	bus_size_t		sc_db_size;
313 
314 	void *			sc_ih;
315 
316 	struct arpcom		sc_ac;
317 	struct ifmedia		sc_media;
318 	ushort			sc_link_up;
319 	ushort			sc_link_speed;
320 	uint64_t		sc_fc;
321 
322 	struct oce_dma_mem	sc_mbx;
323 	struct oce_dma_mem	sc_pld;
324 
325 	uint			sc_port;
326 	uint			sc_fmode;
327 
328 	struct oce_wq *		sc_wq[OCE_MAX_WQ];	/* TX work queues */
329 	struct oce_rq *		sc_rq[OCE_MAX_RQ];	/* RX work queues */
330 	struct oce_cq *		sc_cq[OCE_MAX_CQ];	/* Completion queues */
331 	struct oce_eq *		sc_eq[OCE_MAX_EQ];	/* Event queues */
332 	struct oce_mq *		sc_mq;			/* Mailbox queue */
333 
334 	ushort			sc_neq;
335 	ushort			sc_ncq;
336 	ushort			sc_nrq;
337 	ushort			sc_nwq;
338 	ushort			sc_nintr;
339 
340 	ushort			sc_tx_ring_size;
341 	ushort			sc_rx_ring_size;
342 	ushort			sc_rss_enable;
343 
344 	uint32_t		sc_if_id;	/* interface ID */
345 	uint32_t		sc_pmac_id;	/* PMAC id */
346 	char			sc_macaddr[ETHER_ADDR_LEN];
347 
348 	uint32_t		sc_pvid;
349 
350 	uint64_t		sc_rx_errors;
351 	uint64_t		sc_tx_errors;
352 
353 	struct timeout		sc_tick;
354 	struct timeout		sc_rxrefill;
355 
356 	void *			sc_statcmd;
357 };
358 
359 #define IS_BE(sc)		ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
360 #define IS_XE201(sc)		ISSET((sc)->sc_flags, OCE_F_XE201)
361 
362 #define ADDR_HI(x)		((uint32_t)((uint64_t)(x) >> 32))
363 #define ADDR_LO(x)		((uint32_t)((uint64_t)(x) & 0xffffffff))
364 
365 #define IF_LRO_ENABLED(ifp)	ISSET((ifp)->if_capabilities, IFCAP_LRO)
366 
367 int 	oce_match(struct device *, void *, void *);
368 void	oce_attach(struct device *, struct device *, void *);
369 int 	oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
370 void	oce_attachhook(struct device *);
371 void	oce_attach_ifp(struct oce_softc *);
372 int 	oce_ioctl(struct ifnet *, u_long, caddr_t);
373 int	oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
374 void	oce_iff(struct oce_softc *);
375 void	oce_link_status(struct oce_softc *);
376 void	oce_media_status(struct ifnet *, struct ifmediareq *);
377 int 	oce_media_change(struct ifnet *);
378 void	oce_tick(void *);
379 void	oce_init(void *);
380 void	oce_stop(struct oce_softc *);
381 void	oce_watchdog(struct ifnet *);
382 void	oce_start(struct ifnet *);
383 int	oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
384 #ifdef OCE_TSO
385 struct mbuf *
386 	oce_tso(struct oce_softc *, struct mbuf **);
387 #endif
388 int 	oce_intr(void *);
389 void	oce_intr_wq(void *);
390 void	oce_txeof(struct oce_wq *);
391 void	oce_intr_rq(void *);
392 void	oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
393 void	oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
394 int 	oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
395 int 	oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
396 #ifdef OCE_LRO
397 void	oce_flush_lro(struct oce_rq *);
398 int 	oce_init_lro(struct oce_softc *);
399 void	oce_free_lro(struct oce_softc *);
400 #endif
401 int	oce_get_buf(struct oce_rq *);
402 int	oce_alloc_rx_bufs(struct oce_rq *);
403 void	oce_refill_rx(void *);
404 void	oce_free_posted_rxbuf(struct oce_rq *);
405 void	oce_intr_mq(void *);
406 void	oce_link_event(struct oce_softc *,
407 	    struct oce_async_cqe_link_state *);
408 
409 int 	oce_init_queues(struct oce_softc *);
410 void	oce_release_queues(struct oce_softc *);
411 struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
412 void	oce_drain_wq(struct oce_wq *);
413 void	oce_destroy_wq(struct oce_wq *);
414 struct oce_rq *
415 	oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
416 void	oce_drain_rq(struct oce_rq *);
417 void	oce_destroy_rq(struct oce_rq *);
418 struct oce_eq *
419 	oce_create_eq(struct oce_softc *);
420 static inline void
421 	oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
422 void	oce_drain_eq(struct oce_eq *);
423 void	oce_destroy_eq(struct oce_eq *);
424 struct oce_mq *
425 	oce_create_mq(struct oce_softc *, struct oce_eq *);
426 void	oce_drain_mq(struct oce_mq *);
427 void	oce_destroy_mq(struct oce_mq *);
428 struct oce_cq *
429 	oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
430 	    int isize, int eventable, int nodelay, int ncoalesce);
431 static inline void
432 	oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
433 void	oce_destroy_cq(struct oce_cq *);
434 
435 int	oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
436 void	oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
437 #define	oce_dma_sync(d, f) \
438 	    bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
439 
440 struct oce_ring *
441 	oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
442 void	oce_destroy_ring(struct oce_softc *, struct oce_ring *);
443 int	oce_load_ring(struct oce_softc *, struct oce_ring *,
444 	    struct oce_pa *, int max_segs);
445 static inline void *
446 	oce_ring_get(struct oce_ring *);
447 static inline void *
448 	oce_ring_first(struct oce_ring *);
449 static inline void *
450 	oce_ring_next(struct oce_ring *);
451 struct oce_pkt *
452 	oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
453 	    int maxsegsz);
454 void	oce_pkt_free(struct oce_softc *, struct oce_pkt *);
455 static inline struct oce_pkt *
456 	oce_pkt_get(struct oce_pkt_list *);
457 static inline void
458 	oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
459 
460 int	oce_init_fw(struct oce_softc *);
461 int	oce_mbox_init(struct oce_softc *);
462 int	oce_mbox_dispatch(struct oce_softc *);
463 int	oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
464 	    void *payload, int length);
465 void	oce_first_mcc(struct oce_softc *);
466 
467 int	oce_get_fw_config(struct oce_softc *);
468 int	oce_check_native_mode(struct oce_softc *);
469 int	oce_create_iface(struct oce_softc *, uint8_t *macaddr);
470 int	oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
471 	    int nvtags, int untagged, int promisc);
472 int	oce_set_flow_control(struct oce_softc *, uint64_t);
473 int	oce_config_rss(struct oce_softc *, int enable);
474 int	oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
475 	    int naddr);
476 int	oce_set_promisc(struct oce_softc *, int enable);
477 int	oce_get_link_status(struct oce_softc *);
478 
479 void	oce_macaddr_set(struct oce_softc *);
480 int	oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
481 int	oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
482 int	oce_macaddr_del(struct oce_softc *, uint32_t pmac);
483 
484 int	oce_new_rq(struct oce_softc *, struct oce_rq *);
485 int	oce_new_wq(struct oce_softc *, struct oce_wq *);
486 int	oce_new_mq(struct oce_softc *, struct oce_mq *);
487 int	oce_new_eq(struct oce_softc *, struct oce_eq *);
488 int	oce_new_cq(struct oce_softc *, struct oce_cq *);
489 
490 int	oce_init_stats(struct oce_softc *);
491 int	oce_update_stats(struct oce_softc *);
492 int	oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
493 int	oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
494 int	oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
495 
496 struct pool *oce_pkt_pool;
497 
498 struct cfdriver oce_cd = {
499 	NULL, "oce", DV_IFNET
500 };
501 
502 struct cfattach oce_ca = {
503 	sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
504 };
505 
506 const struct pci_matchid oce_devices[] = {
507 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
508 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
509 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
510 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
511 	{ PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
512 };
513 
514 int
515 oce_match(struct device *parent, void *match, void *aux)
516 {
517 	return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
518 }
519 
520 void
521 oce_attach(struct device *parent, struct device *self, void *aux)
522 {
523 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
524 	struct oce_softc *sc = (struct oce_softc *)self;
525 	const char *intrstr = NULL;
526 	pci_intr_handle_t ih;
527 
528 	switch (PCI_PRODUCT(pa->pa_id)) {
529 	case PCI_PRODUCT_SERVERENGINES_BE2:
530 	case PCI_PRODUCT_SERVERENGINES_OCBE2:
531 		SET(sc->sc_flags, OCE_F_BE2);
532 		break;
533 	case PCI_PRODUCT_SERVERENGINES_BE3:
534 	case PCI_PRODUCT_SERVERENGINES_OCBE3:
535 		SET(sc->sc_flags, OCE_F_BE3);
536 		break;
537 	case PCI_PRODUCT_EMULEX_XE201:
538 		SET(sc->sc_flags, OCE_F_XE201);
539 		break;
540 	}
541 
542 	sc->sc_dmat = pa->pa_dmat;
543 	if (oce_pci_alloc(sc, pa))
544 		return;
545 
546 	sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
547 	sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
548 
549 	/* create the bootstrap mailbox */
550 	if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
551 		printf(": failed to allocate mailbox memory\n");
552 		return;
553 	}
554 	if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
555 		printf(": failed to allocate payload memory\n");
556 		goto fail_1;
557 	}
558 
559 	if (oce_init_fw(sc))
560 		goto fail_2;
561 
562 	if (oce_mbox_init(sc)) {
563 		printf(": failed to initialize mailbox\n");
564 		goto fail_2;
565 	}
566 
567 	if (oce_get_fw_config(sc)) {
568 		printf(": failed to get firmware configuration\n");
569 		goto fail_2;
570 	}
571 
572 	if (ISSET(sc->sc_flags, OCE_F_BE3)) {
573 		if (oce_check_native_mode(sc))
574 			goto fail_2;
575 	}
576 
577 	if (oce_macaddr_get(sc, sc->sc_macaddr)) {
578 		printf(": failed to fetch MAC address\n");
579 		goto fail_2;
580 	}
581 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
582 
583 	if (oce_pkt_pool == NULL) {
584 		oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
585 		if (oce_pkt_pool == NULL) {
586 			printf(": unable to allocate descriptor pool\n");
587 			goto fail_2;
588 		}
589 		pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, IPL_NET,
590 		    0, "ocepkts", NULL);
591 	}
592 
593 	/* We allocate a single interrupt resource */
594 	sc->sc_nintr = 1;
595 	if (pci_intr_map_msi(pa, &ih) != 0 &&
596 	    pci_intr_map(pa, &ih) != 0) {
597 		printf(": couldn't map interrupt\n");
598 		goto fail_2;
599 	}
600 
601 	intrstr = pci_intr_string(pa->pa_pc, ih);
602 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
603 	    sc->sc_dev.dv_xname);
604 	if (sc->sc_ih == NULL) {
605 		printf(": couldn't establish interrupt\n");
606 		if (intrstr != NULL)
607 			printf(" at %s", intrstr);
608 		printf("\n");
609 		goto fail_2;
610 	}
611 	printf(": %s", intrstr);
612 
613 	if (oce_init_stats(sc))
614 		goto fail_3;
615 
616 	if (oce_init_queues(sc))
617 		goto fail_3;
618 
619 	oce_attach_ifp(sc);
620 
621 #ifdef OCE_LRO
622 	if (oce_init_lro(sc))
623 		goto fail_4;
624 #endif
625 
626 	timeout_set(&sc->sc_tick, oce_tick, sc);
627 	timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
628 
629 	config_mountroot(self, oce_attachhook);
630 
631 	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
632 
633 	return;
634 
635 #ifdef OCE_LRO
636 fail_4:
637 	oce_free_lro(sc);
638 	ether_ifdetach(&sc->sc_ac.ac_if);
639 	if_detach(&sc->sc_ac.ac_if);
640 	oce_release_queues(sc);
641 #endif
642 fail_3:
643 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
644 fail_2:
645 	oce_dma_free(sc, &sc->sc_pld);
646 fail_1:
647 	oce_dma_free(sc, &sc->sc_mbx);
648 }
649 
650 int
651 oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
652 {
653 	pcireg_t memtype, reg;
654 
655 	/* setup the device config region */
656 	if (ISSET(sc->sc_flags, OCE_F_BE2))
657 		reg = OCE_BAR_CFG_BE2;
658 	else
659 		reg = OCE_BAR_CFG;
660 
661 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
662 	if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
663 	    &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
664 	    IS_BE(sc) ? 0 : 32768)) {
665 		printf(": can't find cfg mem space\n");
666 		return (ENXIO);
667 	}
668 
669 	/*
670 	 * Read the SLI_INTF register and determine whether we
671 	 * can use this port and its features
672 	 */
673 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
674 	if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
675 		printf(": invalid signature\n");
676 		goto fail_1;
677 	}
678 	if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
679 		printf(": unsupported SLI revision\n");
680 		goto fail_1;
681 	}
682 	if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
683 		SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
684 	if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
685 		SET(sc->sc_flags, OCE_F_RESET_RQD);
686 
687 	/* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
688 	if (IS_BE(sc)) {
689 		/* set up CSR region */
690 		reg = OCE_BAR_CSR;
691 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
692 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
693 		    &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
694 			printf(": can't find csr mem space\n");
695 			goto fail_1;
696 		}
697 
698 		/* set up DB doorbell region */
699 		reg = OCE_BAR_DB;
700 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
701 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
702 		    &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
703 			printf(": can't find csr mem space\n");
704 			goto fail_2;
705 		}
706 	} else {
707 		sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
708 		sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
709 	}
710 
711 	return (0);
712 
713 fail_2:
714 	bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
715 fail_1:
716 	bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
717 	return (ENXIO);
718 }
719 
720 static inline uint32_t
721 oce_read_cfg(struct oce_softc *sc, bus_size_t off)
722 {
723 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
724 	    BUS_SPACE_BARRIER_READ);
725 	return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
726 }
727 
728 static inline uint32_t
729 oce_read_csr(struct oce_softc *sc, bus_size_t off)
730 {
731 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
732 	    BUS_SPACE_BARRIER_READ);
733 	return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
734 }
735 
736 static inline uint32_t
737 oce_read_db(struct oce_softc *sc, bus_size_t off)
738 {
739 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
740 	    BUS_SPACE_BARRIER_READ);
741 	return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
742 }
743 
744 static inline void
745 oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
746 {
747 	bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
748 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
749 	    BUS_SPACE_BARRIER_WRITE);
750 }
751 
752 static inline void
753 oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
754 {
755 	bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
756 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
757 	    BUS_SPACE_BARRIER_WRITE);
758 }
759 
760 static inline void
761 oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
762 {
763 	bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
764 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
765 	    BUS_SPACE_BARRIER_WRITE);
766 }
767 
768 static inline void
769 oce_intr_enable(struct oce_softc *sc)
770 {
771 	uint32_t reg;
772 
773 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
774 	oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
775 }
776 
777 static inline void
778 oce_intr_disable(struct oce_softc *sc)
779 {
780 	uint32_t reg;
781 
782 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
783 	oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
784 }
785 
786 void
787 oce_attachhook(struct device *self)
788 {
789 	struct oce_softc *sc = (struct oce_softc *)self;
790 
791 	oce_get_link_status(sc);
792 
793 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
794 
795 	/*
796 	 * We need to get MCC async events. So enable intrs and arm
797 	 * first EQ, Other EQs will be armed after interface is UP
798 	 */
799 	oce_intr_enable(sc);
800 	oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
801 
802 	/*
803 	 * Send first mcc cmd and after that we get gracious
804 	 * MCC notifications from FW
805 	 */
806 	oce_first_mcc(sc);
807 }
808 
809 void
810 oce_attach_ifp(struct oce_softc *sc)
811 {
812 	struct ifnet *ifp = &sc->sc_ac.ac_if;
813 
814 	ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
815 	    oce_media_status);
816 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
817 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
818 
819 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
820 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821 	ifp->if_ioctl = oce_ioctl;
822 	ifp->if_start = oce_start;
823 	ifp->if_watchdog = oce_watchdog;
824 	ifp->if_hardmtu = OCE_MAX_MTU;
825 	ifp->if_softc = sc;
826 	ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_size - 1);
827 
828 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
829 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
830 
831 #if NVLAN > 0
832 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
833 #endif
834 
835 #ifdef OCE_TSO
836 	ifp->if_capabilities |= IFCAP_TSO;
837 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
838 #endif
839 #ifdef OCE_LRO
840 	ifp->if_capabilities |= IFCAP_LRO;
841 #endif
842 
843 	if_attach(ifp);
844 	ether_ifattach(ifp);
845 }
846 
847 int
848 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
849 {
850 	struct oce_softc *sc = ifp->if_softc;
851 	struct ifreq *ifr = (struct ifreq *)data;
852 	int s, error = 0;
853 
854 	s = splnet();
855 
856 	switch (command) {
857 	case SIOCSIFADDR:
858 		ifp->if_flags |= IFF_UP;
859 		if (!(ifp->if_flags & IFF_RUNNING))
860 			oce_init(sc);
861 		break;
862 	case SIOCSIFFLAGS:
863 		if (ifp->if_flags & IFF_UP) {
864 			if (ifp->if_flags & IFF_RUNNING)
865 				error = ENETRESET;
866 			else
867 				oce_init(sc);
868 		} else {
869 			if (ifp->if_flags & IFF_RUNNING)
870 				oce_stop(sc);
871 		}
872 		break;
873 	case SIOCGIFMEDIA:
874 	case SIOCSIFMEDIA:
875 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
876 		break;
877 	case SIOCGIFRXR:
878 		error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
879 		break;
880 	default:
881 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
882 		break;
883 	}
884 
885 	if (error == ENETRESET) {
886 		if (ifp->if_flags & IFF_RUNNING)
887 			oce_iff(sc);
888 		error = 0;
889 	}
890 
891 	splx(s);
892 
893 	return (error);
894 }
895 
896 int
897 oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
898 {
899 	struct if_rxring_info *ifr, ifr1;
900 	struct oce_rq *rq;
901 	int error, i;
902 	u_int n = 0;
903 
904 	if (sc->sc_nrq > 1) {
905 		if ((ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF,
906 		    M_WAITOK | M_ZERO)) == NULL)
907 			return (ENOMEM);
908 	} else
909 		ifr = &ifr1;
910 
911 	OCE_RQ_FOREACH(sc, rq, i) {
912 		ifr[n].ifr_size = MCLBYTES;
913 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
914 		ifr[n].ifr_info = rq->rxring;
915 		n++;
916 	}
917 
918 	error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
919 
920 	if (sc->sc_nrq > 1)
921 		free(ifr, M_DEVBUF, sc->sc_nrq * sizeof(*ifr));
922 	return (error);
923 }
924 
925 
926 void
927 oce_iff(struct oce_softc *sc)
928 {
929 	uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
930 	struct arpcom *ac = &sc->sc_ac;
931 	struct ifnet *ifp = &ac->ac_if;
932 	struct ether_multi *enm;
933 	struct ether_multistep step;
934 	int naddr = 0, promisc = 0;
935 
936 	ifp->if_flags &= ~IFF_ALLMULTI;
937 
938 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
939 	    ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
940 		ifp->if_flags |= IFF_ALLMULTI;
941 		promisc = 1;
942 	} else {
943 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
944 		while (enm != NULL) {
945 			memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
946 			ETHER_NEXT_MULTI(step, enm);
947 		}
948 		oce_update_mcast(sc, multi, naddr);
949 	}
950 
951 	oce_set_promisc(sc, promisc);
952 }
953 
954 void
955 oce_link_status(struct oce_softc *sc)
956 {
957 	struct ifnet *ifp = &sc->sc_ac.ac_if;
958 	int link_state = LINK_STATE_DOWN;
959 
960 	ifp->if_baudrate = 0;
961 	if (sc->sc_link_up) {
962 		link_state = LINK_STATE_FULL_DUPLEX;
963 
964 		switch (sc->sc_link_speed) {
965 		case 1:
966 			ifp->if_baudrate = IF_Mbps(10);
967 			break;
968 		case 2:
969 			ifp->if_baudrate = IF_Mbps(100);
970 			break;
971 		case 3:
972 			ifp->if_baudrate = IF_Gbps(1);
973 			break;
974 		case 4:
975 			ifp->if_baudrate = IF_Gbps(10);
976 			break;
977 		}
978 	}
979 	if (ifp->if_link_state != link_state) {
980 		ifp->if_link_state = link_state;
981 		if_link_state_change(ifp);
982 	}
983 }
984 
985 void
986 oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
987 {
988 	struct oce_softc *sc = ifp->if_softc;
989 
990 	ifmr->ifm_status = IFM_AVALID;
991 	ifmr->ifm_active = IFM_ETHER;
992 
993 	if (oce_get_link_status(sc) == 0)
994 		oce_link_status(sc);
995 
996 	if (!sc->sc_link_up) {
997 		ifmr->ifm_active |= IFM_NONE;
998 		return;
999 	}
1000 
1001 	ifmr->ifm_status |= IFM_ACTIVE;
1002 
1003 	switch (sc->sc_link_speed) {
1004 	case 1: /* 10 Mbps */
1005 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1006 		break;
1007 	case 2: /* 100 Mbps */
1008 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1009 		break;
1010 	case 3: /* 1 Gbps */
1011 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1012 		break;
1013 	case 4: /* 10 Gbps */
1014 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1015 		break;
1016 	}
1017 
1018 	if (sc->sc_fc & IFM_ETH_RXPAUSE)
1019 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1020 	if (sc->sc_fc & IFM_ETH_TXPAUSE)
1021 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1022 }
1023 
1024 int
1025 oce_media_change(struct ifnet *ifp)
1026 {
1027 	return (0);
1028 }
1029 
1030 void
1031 oce_tick(void *arg)
1032 {
1033 	struct oce_softc *sc = arg;
1034 	int s;
1035 
1036 	s = splnet();
1037 
1038 	if (oce_update_stats(sc) == 0)
1039 		timeout_add_sec(&sc->sc_tick, 1);
1040 
1041 	splx(s);
1042 }
1043 
1044 void
1045 oce_init(void *arg)
1046 {
1047 	struct oce_softc *sc = arg;
1048 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1049 	struct oce_eq *eq;
1050 	struct oce_rq *rq;
1051 	struct oce_wq *wq;
1052 	int i;
1053 
1054 	oce_stop(sc);
1055 
1056 	DELAY(10);
1057 
1058 	oce_macaddr_set(sc);
1059 
1060 	oce_iff(sc);
1061 
1062 	/* Enable VLAN promiscuous mode */
1063 	if (oce_config_vlan(sc, NULL, 0, 1, 1))
1064 		goto error;
1065 
1066 	if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1067 		goto error;
1068 
1069 	OCE_RQ_FOREACH(sc, rq, i) {
1070 		rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1071 		    ETHER_VLAN_ENCAP_LEN;
1072 		if (oce_new_rq(sc, rq)) {
1073 			printf("%s: failed to create rq\n",
1074 			    sc->sc_dev.dv_xname);
1075 			goto error;
1076 		}
1077 		rq->ring->index	 = 0;
1078 
1079 		/* oce splits jumbos into 2k chunks... */
1080 		if_rxr_init(&rq->rxring, 8, rq->nitems);
1081 
1082 		if (!oce_alloc_rx_bufs(rq)) {
1083 			printf("%s: failed to allocate rx buffers\n",
1084 			    sc->sc_dev.dv_xname);
1085 			goto error;
1086 		}
1087 	}
1088 
1089 #ifdef OCE_RSS
1090 	/* RSS config */
1091 	if (sc->sc_rss_enable) {
1092 		if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1093 			printf("%s: failed to configure RSS\n",
1094 			    sc->sc_dev.dv_xname);
1095 			goto error;
1096 		}
1097 	}
1098 #endif
1099 
1100 	OCE_RQ_FOREACH(sc, rq, i)
1101 		oce_arm_cq(rq->cq, 0, TRUE);
1102 
1103 	OCE_WQ_FOREACH(sc, wq, i)
1104 		oce_arm_cq(wq->cq, 0, TRUE);
1105 
1106 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1107 
1108 	OCE_EQ_FOREACH(sc, eq, i)
1109 		oce_arm_eq(eq, 0, TRUE, FALSE);
1110 
1111 	if (oce_get_link_status(sc) == 0)
1112 		oce_link_status(sc);
1113 
1114 	ifp->if_flags |= IFF_RUNNING;
1115 	ifq_clr_oactive(&ifp->if_snd);
1116 
1117 	timeout_add_sec(&sc->sc_tick, 1);
1118 
1119 	oce_intr_enable(sc);
1120 
1121 	return;
1122 error:
1123 	oce_stop(sc);
1124 }
1125 
1126 void
1127 oce_stop(struct oce_softc *sc)
1128 {
1129 	struct mbx_delete_nic_rq cmd;
1130 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1131 	struct oce_rq *rq;
1132 	struct oce_wq *wq;
1133 	struct oce_eq *eq;
1134 	int i;
1135 
1136 	timeout_del(&sc->sc_tick);
1137 	timeout_del(&sc->sc_rxrefill);
1138 
1139 	ifp->if_flags &= ~IFF_RUNNING;
1140 	ifq_clr_oactive(&ifp->if_snd);
1141 
1142 	/* Stop intrs and finish any bottom halves pending */
1143 	oce_intr_disable(sc);
1144 
1145 	/* Invalidate any pending cq and eq entries */
1146 	OCE_EQ_FOREACH(sc, eq, i)
1147 		oce_drain_eq(eq);
1148 	OCE_RQ_FOREACH(sc, rq, i) {
1149 		/* destroy the work queue in the firmware */
1150 		memset(&cmd, 0, sizeof(cmd));
1151 		cmd.params.req.rq_id = htole16(rq->id);
1152 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1153 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1154 		DELAY(1000);
1155 		oce_drain_rq(rq);
1156 		oce_free_posted_rxbuf(rq);
1157 	}
1158 	OCE_WQ_FOREACH(sc, wq, i)
1159 		oce_drain_wq(wq);
1160 }
1161 
1162 void
1163 oce_watchdog(struct ifnet *ifp)
1164 {
1165 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1166 
1167 	oce_init(ifp->if_softc);
1168 
1169 	ifp->if_oerrors++;
1170 }
1171 
1172 void
1173 oce_start(struct ifnet *ifp)
1174 {
1175 	struct oce_softc *sc = ifp->if_softc;
1176 	struct mbuf *m;
1177 	int pkts = 0;
1178 
1179 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1180 		return;
1181 
1182 	for (;;) {
1183 		m = ifq_dequeue(&ifp->if_snd);
1184 		if (m == NULL)
1185 			break;
1186 
1187 		if (oce_encap(sc, &m, 0)) {
1188 			ifq_set_oactive(&ifp->if_snd);
1189 			break;
1190 		}
1191 
1192 #if NBPFILTER > 0
1193 		if (ifp->if_bpf)
1194 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1195 #endif
1196 		pkts++;
1197 	}
1198 
1199 	/* Set a timeout in case the chip goes out to lunch */
1200 	if (pkts)
1201 		ifp->if_timer = 5;
1202 }
1203 
1204 int
1205 oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1206 {
1207 	struct mbuf *m = *mpp;
1208 	struct oce_wq *wq = sc->sc_wq[wqidx];
1209 	struct oce_pkt *pkt = NULL;
1210 	struct oce_nic_hdr_wqe *nhe;
1211 	struct oce_nic_frag_wqe *nfe;
1212 	int i, nwqe, err;
1213 
1214 #ifdef OCE_TSO
1215 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1216 		/* consolidate packet buffers for TSO/LSO segment offload */
1217 		m = oce_tso(sc, mpp);
1218 		if (m == NULL)
1219 			goto error;
1220 	}
1221 #endif
1222 
1223 	if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1224 		goto error;
1225 
1226 	err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1227 	if (err == EFBIG) {
1228 		if (m_defrag(m, M_DONTWAIT) ||
1229 		    bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1230 			BUS_DMA_NOWAIT))
1231 			goto error;
1232 		*mpp = m;
1233 	} else if (err != 0)
1234 		goto error;
1235 
1236 	pkt->nsegs = pkt->map->dm_nsegs;
1237 
1238 	nwqe = pkt->nsegs + 1;
1239 	if (IS_BE(sc)) {
1240 		/* BE2 and BE3 require even number of WQEs */
1241 		if (nwqe & 1)
1242 			nwqe++;
1243 	}
1244 
1245 	/* Fail if there's not enough free WQEs */
1246 	if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1247 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1248 		goto error;
1249 	}
1250 
1251 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1252 	    BUS_DMASYNC_PREWRITE);
1253 	pkt->mbuf = m;
1254 
1255 	/* TX work queue entry for the header */
1256 	nhe = oce_ring_get(wq->ring);
1257 	memset(nhe, 0, sizeof(*nhe));
1258 
1259 	nhe->u0.s.complete = 1;
1260 	nhe->u0.s.event = 1;
1261 	nhe->u0.s.crc = 1;
1262 	nhe->u0.s.forward = 0;
1263 	nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1264 	nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1265 	nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1266 	nhe->u0.s.num_wqe = nwqe;
1267 	nhe->u0.s.total_length = m->m_pkthdr.len;
1268 
1269 #if NVLAN > 0
1270 	if (m->m_flags & M_VLANTAG) {
1271 		nhe->u0.s.vlan = 1; /* Vlan present */
1272 		nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1273 	}
1274 #endif
1275 
1276 #ifdef OCE_TSO
1277 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1278 		if (m->m_pkthdr.tso_segsz) {
1279 			nhe->u0.s.lso = 1;
1280 			nhe->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1281 		}
1282 		if (!IS_BE(sc))
1283 			nhe->u0.s.ipcs = 1;
1284 	}
1285 #endif
1286 
1287 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1288 	    BUS_DMASYNC_PREWRITE);
1289 
1290 	wq->ring->nused++;
1291 
1292 	/* TX work queue entries for data chunks */
1293 	for (i = 0; i < pkt->nsegs; i++) {
1294 		nfe = oce_ring_get(wq->ring);
1295 		memset(nfe, 0, sizeof(*nfe));
1296 		nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1297 		nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1298 		nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1299 		wq->ring->nused++;
1300 	}
1301 	if (nwqe > (pkt->nsegs + 1)) {
1302 		nfe = oce_ring_get(wq->ring);
1303 		memset(nfe, 0, sizeof(*nfe));
1304 		wq->ring->nused++;
1305 		pkt->nsegs++;
1306 	}
1307 
1308 	oce_pkt_put(&wq->pkt_list, pkt);
1309 
1310 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1311 	    BUS_DMASYNC_POSTWRITE);
1312 
1313 	oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1314 
1315 	return (0);
1316 
1317 error:
1318 	if (pkt)
1319 		oce_pkt_put(&wq->pkt_free, pkt);
1320 	m_freem(*mpp);
1321 	*mpp = NULL;
1322 	return (1);
1323 }
1324 
1325 #ifdef OCE_TSO
1326 struct mbuf *
1327 oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1328 {
1329 	struct mbuf *m;
1330 	struct ip *ip;
1331 #ifdef INET6
1332 	struct ip6_hdr *ip6;
1333 #endif
1334 	struct ether_vlan_header *eh;
1335 	struct tcphdr *th;
1336 	uint16_t etype;
1337 	int total_len = 0, ehdrlen = 0;
1338 
1339 	m = *mpp;
1340 
1341 	if (M_WRITABLE(m) == 0) {
1342 		m = m_dup(*mpp, M_DONTWAIT);
1343 		if (!m)
1344 			return (NULL);
1345 		m_freem(*mpp);
1346 		*mpp = m;
1347 	}
1348 
1349 	eh = mtod(m, struct ether_vlan_header *);
1350 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1351 		etype = ntohs(eh->evl_proto);
1352 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1353 	} else {
1354 		etype = ntohs(eh->evl_encap_proto);
1355 		ehdrlen = ETHER_HDR_LEN;
1356 	}
1357 
1358 	switch (etype) {
1359 	case ETHERTYPE_IP:
1360 		ip = (struct ip *)(m->m_data + ehdrlen);
1361 		if (ip->ip_p != IPPROTO_TCP)
1362 			return (NULL);
1363 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1364 
1365 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1366 		break;
1367 #ifdef INET6
1368 	case ETHERTYPE_IPV6:
1369 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1370 		if (ip6->ip6_nxt != IPPROTO_TCP)
1371 			return NULL;
1372 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1373 
1374 		total_len = ehdrlen + sizeof(struct ip6_hdr) +
1375 		    (th->th_off << 2);
1376 		break;
1377 #endif
1378 	default:
1379 		return (NULL);
1380 	}
1381 
1382 	m = m_pullup(m, total_len);
1383 	if (!m)
1384 		return (NULL);
1385 	*mpp = m;
1386 	return (m);
1387 
1388 }
1389 #endif /* OCE_TSO */
1390 
1391 int
1392 oce_intr(void *arg)
1393 {
1394 	struct oce_softc *sc = arg;
1395 	struct oce_eq *eq = sc->sc_eq[0];
1396 	struct oce_eqe *eqe;
1397 	struct oce_cq *cq = NULL;
1398 	int i, neqe = 0;
1399 
1400 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1401 
1402 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1403 		eqe->evnt = 0;
1404 		neqe++;
1405 	}
1406 
1407 	/* Spurious? */
1408 	if (!neqe) {
1409 		oce_arm_eq(eq, 0, TRUE, FALSE);
1410 		return (0);
1411 	}
1412 
1413 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1414 
1415  	/* Clear EQ entries, but dont arm */
1416 	oce_arm_eq(eq, neqe, FALSE, TRUE);
1417 
1418 	/* Process TX, RX and MCC completion queues */
1419 	for (i = 0; i < eq->cq_valid; i++) {
1420 		cq = eq->cq[i];
1421 		(*cq->cq_intr)(cq->cb_arg);
1422 		oce_arm_cq(cq, 0, TRUE);
1423 	}
1424 
1425 	oce_arm_eq(eq, 0, TRUE, FALSE);
1426 	return (1);
1427 }
1428 
1429 /* Handle the Completion Queue for transmit */
1430 void
1431 oce_intr_wq(void *arg)
1432 {
1433 	struct oce_wq *wq = (struct oce_wq *)arg;
1434 	struct oce_cq *cq = wq->cq;
1435 	struct oce_nic_tx_cqe *cqe;
1436 	struct oce_softc *sc = wq->sc;
1437 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1438 	int ncqe = 0;
1439 
1440 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1441 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1442 		oce_txeof(wq);
1443 		WQ_CQE_INVALIDATE(cqe);
1444 		ncqe++;
1445 	}
1446 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1447 
1448 	if (ifq_is_oactive(&ifp->if_snd)) {
1449 		if (wq->ring->nused < (wq->ring->nitems / 2)) {
1450 			ifq_clr_oactive(&ifp->if_snd);
1451 			oce_start(ifp);
1452 		}
1453 	}
1454 	if (wq->ring->nused == 0)
1455 		ifp->if_timer = 0;
1456 
1457 	if (ncqe)
1458 		oce_arm_cq(cq, ncqe, FALSE);
1459 }
1460 
1461 void
1462 oce_txeof(struct oce_wq *wq)
1463 {
1464 	struct oce_softc *sc = wq->sc;
1465 	struct oce_pkt *pkt;
1466 	struct mbuf *m;
1467 
1468 	if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1469 		printf("%s: missing descriptor in txeof\n",
1470 		    sc->sc_dev.dv_xname);
1471 		return;
1472 	}
1473 
1474 	wq->ring->nused -= pkt->nsegs + 1;
1475 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1476 	    BUS_DMASYNC_POSTWRITE);
1477 	bus_dmamap_unload(sc->sc_dmat, pkt->map);
1478 
1479 	m = pkt->mbuf;
1480 	m_freem(m);
1481 	pkt->mbuf = NULL;
1482 	oce_pkt_put(&wq->pkt_free, pkt);
1483 }
1484 
1485 /* Handle the Completion Queue for receive */
1486 void
1487 oce_intr_rq(void *arg)
1488 {
1489 	struct oce_rq *rq = (struct oce_rq *)arg;
1490 	struct oce_cq *cq = rq->cq;
1491 	struct oce_softc *sc = rq->sc;
1492 	struct oce_nic_rx_cqe *cqe;
1493 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1494 	int maxrx, ncqe = 0;
1495 
1496 	maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1497 
1498 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1499 
1500 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1501 		if (cqe->u0.s.error == 0) {
1502 			if (cqe->u0.s.pkt_size == 0)
1503 				/* partial DMA workaround for Lancer */
1504 				oce_rxeoc(rq, cqe);
1505 			else
1506 				oce_rxeof(rq, cqe);
1507 		} else {
1508 			ifp->if_ierrors++;
1509 			if (IS_XE201(sc))
1510 				/* Lancer A0 no buffer workaround */
1511 				oce_rxeoc(rq, cqe);
1512 			else
1513 				/* Post L3/L4 errors to stack.*/
1514 				oce_rxeof(rq, cqe);
1515 		}
1516 #ifdef OCE_LRO
1517 		if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1518 			oce_flush_lro(rq);
1519 #endif
1520 		RQ_CQE_INVALIDATE(cqe);
1521 		ncqe++;
1522 	}
1523 
1524 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1525 
1526 #ifdef OCE_LRO
1527 	if (IF_LRO_ENABLED(ifp))
1528 		oce_flush_lro(rq);
1529 #endif
1530 
1531 	if (ncqe) {
1532 		oce_arm_cq(cq, ncqe, FALSE);
1533 		if (!oce_alloc_rx_bufs(rq))
1534 			timeout_add(&sc->sc_rxrefill, 1);
1535 	}
1536 }
1537 
1538 void
1539 oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1540 {
1541 	struct oce_softc *sc = rq->sc;
1542 	struct oce_pkt *pkt = NULL;
1543 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1544 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1545 	struct mbuf *m = NULL, *tail = NULL;
1546 	int i, len, frag_len;
1547 	uint16_t vtag;
1548 
1549 	len = cqe->u0.s.pkt_size;
1550 
1551 	 /* Get vlan_tag value */
1552 	if (IS_BE(sc))
1553 		vtag = ntohs(cqe->u0.s.vlan_tag);
1554 	else
1555 		vtag = cqe->u0.s.vlan_tag;
1556 
1557 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1558 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1559 			printf("%s: missing descriptor in rxeof\n",
1560 			    sc->sc_dev.dv_xname);
1561 			goto exit;
1562 		}
1563 
1564 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1565 		    BUS_DMASYNC_POSTREAD);
1566 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1567 		if_rxr_put(&rq->rxring, 1);
1568 
1569 		frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1570 		pkt->mbuf->m_len = frag_len;
1571 
1572 		if (tail != NULL) {
1573 			/* additional fragments */
1574 			pkt->mbuf->m_flags &= ~M_PKTHDR;
1575 			tail->m_next = pkt->mbuf;
1576 			tail = pkt->mbuf;
1577 		} else {
1578 			/* first fragment, fill out most of the header */
1579 			pkt->mbuf->m_pkthdr.len = len;
1580 			pkt->mbuf->m_pkthdr.csum_flags = 0;
1581 			if (cqe->u0.s.ip_cksum_pass) {
1582 				if (!cqe->u0.s.ip_ver) { /* IPV4 */
1583 					pkt->mbuf->m_pkthdr.csum_flags =
1584 					    M_IPV4_CSUM_IN_OK;
1585 				}
1586 			}
1587 			if (cqe->u0.s.l4_cksum_pass) {
1588 				pkt->mbuf->m_pkthdr.csum_flags |=
1589 				    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1590 			}
1591 			m = tail = pkt->mbuf;
1592 		}
1593 		pkt->mbuf = NULL;
1594 		oce_pkt_put(&rq->pkt_free, pkt);
1595 		len -= frag_len;
1596 	}
1597 
1598 	if (m) {
1599 		if (!oce_port_valid(sc, cqe)) {
1600 			 m_freem(m);
1601 			 goto exit;
1602 		}
1603 
1604 #if NVLAN > 0
1605 		/* This determines if vlan tag is valid */
1606 		if (oce_vtp_valid(sc, cqe)) {
1607 			if (sc->sc_fmode & FNM_FLEX10_MODE) {
1608 				/* FLEX10. If QnQ is not set, neglect VLAN */
1609 				if (cqe->u0.s.qnq) {
1610 					m->m_pkthdr.ether_vtag = vtag;
1611 					m->m_flags |= M_VLANTAG;
1612 				}
1613 			} else if (sc->sc_pvid != (vtag & VLAN_VID_MASK))  {
1614 				/*
1615 				 * In UMC mode generally pvid will be striped.
1616 				 * But in some cases we have seen it comes
1617 				 * with pvid. So if pvid == vlan, neglect vlan.
1618 				 */
1619 				m->m_pkthdr.ether_vtag = vtag;
1620 				m->m_flags |= M_VLANTAG;
1621 			}
1622 		}
1623 #endif
1624 
1625 #ifdef OCE_LRO
1626 		/* Try to queue to LRO */
1627 		if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1628 		    cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1629 		    !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1630 
1631 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1632 				rq->lro_pkts_queued ++;
1633 				goto exit;
1634 			}
1635 			/* If LRO posting fails then try to post to STACK */
1636 		}
1637 #endif
1638 
1639 		ml_enqueue(&ml, m);
1640 	}
1641 exit:
1642 	if (ifiq_input(&ifp->if_rcv, &ml))
1643 		if_rxr_livelocked(&rq->rxring);
1644 }
1645 
1646 void
1647 oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1648 {
1649 	struct oce_softc *sc = rq->sc;
1650 	struct oce_pkt *pkt;
1651 	int i, num_frags = cqe->u0.s.num_fragments;
1652 
1653 	if (IS_XE201(sc) && cqe->u0.s.error) {
1654 		/*
1655 		 * Lancer A0 workaround:
1656 		 * num_frags will be 1 more than actual in case of error
1657 		 */
1658 		if (num_frags)
1659 			num_frags--;
1660 	}
1661 	for (i = 0; i < num_frags; i++) {
1662 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1663 			printf("%s: missing descriptor in rxeoc\n",
1664 			    sc->sc_dev.dv_xname);
1665 			return;
1666 		}
1667 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1668 		    BUS_DMASYNC_POSTREAD);
1669 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1670 		if_rxr_put(&rq->rxring, 1);
1671 		m_freem(pkt->mbuf);
1672 		oce_pkt_put(&rq->pkt_free, pkt);
1673 	}
1674 }
1675 
1676 int
1677 oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1678 {
1679 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1680 
1681 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1682 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1683 		return (cqe_v1->u0.s.vlan_tag_present);
1684 	}
1685 	return (cqe->u0.s.vlan_tag_present);
1686 }
1687 
1688 int
1689 oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1690 {
1691 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1692 
1693 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1694 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1695 		if (sc->sc_port != cqe_v1->u0.s.port)
1696 			return (0);
1697 	}
1698 	return (1);
1699 }
1700 
1701 #ifdef OCE_LRO
1702 void
1703 oce_flush_lro(struct oce_rq *rq)
1704 {
1705 	struct oce_softc *sc = rq->sc;
1706 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1707 	struct lro_ctrl	*lro = &rq->lro;
1708 	struct lro_entry *queued;
1709 
1710 	if (!IF_LRO_ENABLED(ifp))
1711 		return;
1712 
1713 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1714 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1715 		tcp_lro_flush(lro, queued);
1716 	}
1717 	rq->lro_pkts_queued = 0;
1718 }
1719 
1720 int
1721 oce_init_lro(struct oce_softc *sc)
1722 {
1723 	struct lro_ctrl *lro = NULL;
1724 	int i = 0, rc = 0;
1725 
1726 	for (i = 0; i < sc->sc_nrq; i++) {
1727 		lro = &sc->sc_rq[i]->lro;
1728 		rc = tcp_lro_init(lro);
1729 		if (rc != 0) {
1730 			printf("%s: LRO init failed\n",
1731 			    sc->sc_dev.dv_xname);
1732 			return rc;
1733 		}
1734 		lro->ifp = &sc->sc_ac.ac_if;
1735 	}
1736 
1737 	return (rc);
1738 }
1739 
1740 void
1741 oce_free_lro(struct oce_softc *sc)
1742 {
1743 	struct lro_ctrl *lro = NULL;
1744 	int i = 0;
1745 
1746 	for (i = 0; i < sc->sc_nrq; i++) {
1747 		lro = &sc->sc_rq[i]->lro;
1748 		if (lro)
1749 			tcp_lro_free(lro);
1750 	}
1751 }
1752 #endif /* OCE_LRO */
1753 
1754 int
1755 oce_get_buf(struct oce_rq *rq)
1756 {
1757 	struct oce_softc *sc = rq->sc;
1758 	struct oce_pkt *pkt;
1759 	struct oce_nic_rqe *rqe;
1760 
1761 	if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1762 		return (0);
1763 
1764 	pkt->mbuf = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1765 	if (pkt->mbuf == NULL) {
1766 		oce_pkt_put(&rq->pkt_free, pkt);
1767 		return (0);
1768 	}
1769 
1770 	pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1771 #ifdef __STRICT_ALIGNMENT
1772 	m_adj(pkt->mbuf, ETHER_ALIGN);
1773 #endif
1774 
1775 	if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1776 	    BUS_DMA_NOWAIT)) {
1777 		m_freem(pkt->mbuf);
1778 		pkt->mbuf = NULL;
1779 		oce_pkt_put(&rq->pkt_free, pkt);
1780 		return (0);
1781 	}
1782 
1783 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1784 	    BUS_DMASYNC_PREREAD);
1785 
1786 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1787 	    BUS_DMASYNC_PREWRITE);
1788 
1789 	rqe = oce_ring_get(rq->ring);
1790 	rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1791 	rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1792 
1793 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1794 	    BUS_DMASYNC_POSTWRITE);
1795 
1796 	oce_pkt_put(&rq->pkt_list, pkt);
1797 
1798 	return (1);
1799 }
1800 
1801 int
1802 oce_alloc_rx_bufs(struct oce_rq *rq)
1803 {
1804 	struct oce_softc *sc = rq->sc;
1805 	int i, nbufs = 0;
1806 	u_int slots;
1807 
1808 	for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1809 		if (oce_get_buf(rq) == 0)
1810 			break;
1811 
1812 		nbufs++;
1813 	}
1814 	if_rxr_put(&rq->rxring, slots);
1815 
1816 	if (!nbufs)
1817 		return (0);
1818 	for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1819 		oce_write_db(sc, PD_RXULP_DB, rq->id |
1820 		    (OCE_MAX_RQ_POSTS << 24));
1821 		nbufs -= OCE_MAX_RQ_POSTS;
1822 	}
1823 	if (nbufs > 0)
1824 		oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1825 	return (1);
1826 }
1827 
1828 void
1829 oce_refill_rx(void *arg)
1830 {
1831 	struct oce_softc *sc = arg;
1832 	struct oce_rq *rq;
1833 	int i, s;
1834 
1835 	s = splnet();
1836 	OCE_RQ_FOREACH(sc, rq, i) {
1837 		if (!oce_alloc_rx_bufs(rq))
1838 			timeout_add(&sc->sc_rxrefill, 5);
1839 	}
1840 	splx(s);
1841 }
1842 
1843 /* Handle the Completion Queue for the Mailbox/Async notifications */
1844 void
1845 oce_intr_mq(void *arg)
1846 {
1847 	struct oce_mq *mq = (struct oce_mq *)arg;
1848 	struct oce_softc *sc = mq->sc;
1849 	struct oce_cq *cq = mq->cq;
1850 	struct oce_mq_cqe *cqe;
1851 	struct oce_async_cqe_link_state *acqe;
1852 	struct oce_async_event_grp5_pvid_state *gcqe;
1853 	int evtype, optype, ncqe = 0;
1854 
1855 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1856 
1857 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1858 		if (cqe->u0.s.async_event) {
1859 			evtype = cqe->u0.s.event_type;
1860 			optype = cqe->u0.s.async_type;
1861 			if (evtype  == ASYNC_EVENT_CODE_LINK_STATE) {
1862 				/* Link status evt */
1863 				acqe = (struct oce_async_cqe_link_state *)cqe;
1864 				oce_link_event(sc, acqe);
1865 			} else if ((evtype == ASYNC_EVENT_GRP5) &&
1866 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1867 				/* GRP5 PVID */
1868 				gcqe =
1869 				(struct oce_async_event_grp5_pvid_state *)cqe;
1870 				if (gcqe->enabled)
1871 					sc->sc_pvid =
1872 					    gcqe->tag & VLAN_VID_MASK;
1873 				else
1874 					sc->sc_pvid = 0;
1875 			}
1876 		}
1877 		MQ_CQE_INVALIDATE(cqe);
1878 		ncqe++;
1879 	}
1880 
1881 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1882 
1883 	if (ncqe)
1884 		oce_arm_cq(cq, ncqe, FALSE);
1885 }
1886 
1887 void
1888 oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1889 {
1890 	/* Update Link status */
1891 	sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1892 	    ASYNC_EVENT_LINK_UP);
1893 	/* Update speed */
1894 	sc->sc_link_speed = acqe->u0.s.speed;
1895 	oce_link_status(sc);
1896 }
1897 
1898 int
1899 oce_init_queues(struct oce_softc *sc)
1900 {
1901 	struct oce_wq *wq;
1902 	struct oce_rq *rq;
1903 	int i;
1904 
1905 	sc->sc_nrq = 1;
1906 	sc->sc_nwq = 1;
1907 
1908 	/* Create network interface on card */
1909 	if (oce_create_iface(sc, sc->sc_macaddr))
1910 		goto error;
1911 
1912 	/* create all of the event queues */
1913 	for (i = 0; i < sc->sc_nintr; i++) {
1914 		sc->sc_eq[i] = oce_create_eq(sc);
1915 		if (!sc->sc_eq[i])
1916 			goto error;
1917 	}
1918 
1919 	/* alloc tx queues */
1920 	OCE_WQ_FOREACH(sc, wq, i) {
1921 		sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1922 		if (!sc->sc_wq[i])
1923 			goto error;
1924 	}
1925 
1926 	/* alloc rx queues */
1927 	OCE_RQ_FOREACH(sc, rq, i) {
1928 		sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1929 		    i > 0 ? sc->sc_rss_enable : 0);
1930 		if (!sc->sc_rq[i])
1931 			goto error;
1932 	}
1933 
1934 	/* alloc mailbox queue */
1935 	sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1936 	if (!sc->sc_mq)
1937 		goto error;
1938 
1939 	return (0);
1940 error:
1941 	oce_release_queues(sc);
1942 	return (1);
1943 }
1944 
1945 void
1946 oce_release_queues(struct oce_softc *sc)
1947 {
1948 	struct oce_wq *wq;
1949 	struct oce_rq *rq;
1950 	struct oce_eq *eq;
1951 	int i;
1952 
1953 	OCE_RQ_FOREACH(sc, rq, i) {
1954 		if (rq)
1955 			oce_destroy_rq(sc->sc_rq[i]);
1956 	}
1957 
1958 	OCE_WQ_FOREACH(sc, wq, i) {
1959 		if (wq)
1960 			oce_destroy_wq(sc->sc_wq[i]);
1961 	}
1962 
1963 	if (sc->sc_mq)
1964 		oce_destroy_mq(sc->sc_mq);
1965 
1966 	OCE_EQ_FOREACH(sc, eq, i) {
1967 		if (eq)
1968 			oce_destroy_eq(sc->sc_eq[i]);
1969 	}
1970 }
1971 
1972 /**
1973  * @brief 		Function to create a WQ for NIC Tx
1974  * @param sc 		software handle to the device
1975  * @returns		the pointer to the WQ created or NULL on failure
1976  */
1977 struct oce_wq *
1978 oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1979 {
1980 	struct oce_wq *wq;
1981 	struct oce_cq *cq;
1982 	struct oce_pkt *pkt;
1983 	int i;
1984 
1985 	if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1986 		return (NULL);
1987 
1988 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
1989 	if (!wq)
1990 		return (NULL);
1991 
1992 	wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
1993 	if (!wq->ring) {
1994 		free(wq, M_DEVBUF, 0);
1995 		return (NULL);
1996 	}
1997 
1998 	cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
1999 	    1, 0, 3);
2000 	if (!cq) {
2001 		oce_destroy_ring(sc, wq->ring);
2002 		free(wq, M_DEVBUF, 0);
2003 		return (NULL);
2004 	}
2005 
2006 	wq->id = -1;
2007 	wq->sc = sc;
2008 
2009 	wq->cq = cq;
2010 	wq->nitems = sc->sc_tx_ring_size;
2011 
2012 	SIMPLEQ_INIT(&wq->pkt_free);
2013 	SIMPLEQ_INIT(&wq->pkt_list);
2014 
2015 	for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2016 		pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2017 		    PAGE_SIZE);
2018 		if (pkt == NULL) {
2019 			oce_destroy_wq(wq);
2020 			return (NULL);
2021 		}
2022 		oce_pkt_put(&wq->pkt_free, pkt);
2023 	}
2024 
2025 	if (oce_new_wq(sc, wq)) {
2026 		oce_destroy_wq(wq);
2027 		return (NULL);
2028 	}
2029 
2030 	eq->cq[eq->cq_valid] = cq;
2031 	eq->cq_valid++;
2032 	cq->cb_arg = wq;
2033 	cq->cq_intr = oce_intr_wq;
2034 
2035 	return (wq);
2036 }
2037 
2038 void
2039 oce_drain_wq(struct oce_wq *wq)
2040 {
2041 	struct oce_cq *cq = wq->cq;
2042 	struct oce_nic_tx_cqe *cqe;
2043 	int ncqe = 0;
2044 
2045 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2046 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2047 		WQ_CQE_INVALIDATE(cqe);
2048 		ncqe++;
2049 	}
2050 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2051 	oce_arm_cq(cq, ncqe, FALSE);
2052 }
2053 
2054 void
2055 oce_destroy_wq(struct oce_wq *wq)
2056 {
2057 	struct mbx_delete_nic_wq cmd;
2058 	struct oce_softc *sc = wq->sc;
2059 	struct oce_pkt *pkt;
2060 
2061 	if (wq->id >= 0) {
2062 		memset(&cmd, 0, sizeof(cmd));
2063 		cmd.params.req.wq_id = htole16(wq->id);
2064 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2065 		    &cmd, sizeof(cmd));
2066 	}
2067 	if (wq->cq != NULL)
2068 		oce_destroy_cq(wq->cq);
2069 	if (wq->ring != NULL)
2070 		oce_destroy_ring(sc, wq->ring);
2071 	while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2072 		oce_pkt_free(sc, pkt);
2073 	free(wq, M_DEVBUF, 0);
2074 }
2075 
2076 /**
2077  * @brief 		function to allocate receive queue resources
2078  * @param sc		software handle to the device
2079  * @param eq		pointer to associated event queue
2080  * @param rss		is-rss-queue flag
2081  * @returns		the pointer to the RQ created or NULL on failure
2082  */
2083 struct oce_rq *
2084 oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2085 {
2086 	struct oce_rq *rq;
2087 	struct oce_cq *cq;
2088 	struct oce_pkt *pkt;
2089 	int i;
2090 
2091 	/* Hardware doesn't support any other value */
2092 	if (sc->sc_rx_ring_size != 1024)
2093 		return (NULL);
2094 
2095 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2096 	if (!rq)
2097 		return (NULL);
2098 
2099 	rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2100 	    sizeof(struct oce_nic_rqe), 2);
2101 	if (!rq->ring) {
2102 		free(rq, M_DEVBUF, 0);
2103 		return (NULL);
2104 	}
2105 
2106 	cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2107 	    1, 0, 3);
2108 	if (!cq) {
2109 		oce_destroy_ring(sc, rq->ring);
2110 		free(rq, M_DEVBUF, 0);
2111 		return (NULL);
2112 	}
2113 
2114 	rq->id = -1;
2115 	rq->sc = sc;
2116 
2117 	rq->nitems = sc->sc_rx_ring_size;
2118 	rq->fragsize = OCE_RX_BUF_SIZE;
2119 	rq->rss = rss;
2120 
2121 	SIMPLEQ_INIT(&rq->pkt_free);
2122 	SIMPLEQ_INIT(&rq->pkt_list);
2123 
2124 	for (i = 0; i < sc->sc_rx_ring_size; i++) {
2125 		pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2126 		if (pkt == NULL) {
2127 			oce_destroy_rq(rq);
2128 			return (NULL);
2129 		}
2130 		oce_pkt_put(&rq->pkt_free, pkt);
2131 	}
2132 
2133 	rq->cq = cq;
2134 	eq->cq[eq->cq_valid] = cq;
2135 	eq->cq_valid++;
2136 	cq->cb_arg = rq;
2137 	cq->cq_intr = oce_intr_rq;
2138 
2139 	/* RX queue is created in oce_init */
2140 
2141 	return (rq);
2142 }
2143 
2144 void
2145 oce_drain_rq(struct oce_rq *rq)
2146 {
2147 	struct oce_nic_rx_cqe *cqe;
2148 	struct oce_cq *cq = rq->cq;
2149 	int ncqe = 0;
2150 
2151 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2152 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2153 		RQ_CQE_INVALIDATE(cqe);
2154 		ncqe++;
2155 	}
2156 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2157 	oce_arm_cq(cq, ncqe, FALSE);
2158 }
2159 
2160 void
2161 oce_destroy_rq(struct oce_rq *rq)
2162 {
2163 	struct mbx_delete_nic_rq cmd;
2164 	struct oce_softc *sc = rq->sc;
2165 	struct oce_pkt *pkt;
2166 
2167 	if (rq->id >= 0) {
2168 		memset(&cmd, 0, sizeof(cmd));
2169 		cmd.params.req.rq_id = htole16(rq->id);
2170 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2171 		    &cmd, sizeof(cmd));
2172 	}
2173 	if (rq->cq != NULL)
2174 		oce_destroy_cq(rq->cq);
2175 	if (rq->ring != NULL)
2176 		oce_destroy_ring(sc, rq->ring);
2177 	while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2178 		oce_pkt_free(sc, pkt);
2179 	free(rq, M_DEVBUF, 0);
2180 }
2181 
2182 struct oce_eq *
2183 oce_create_eq(struct oce_softc *sc)
2184 {
2185 	struct oce_eq *eq;
2186 
2187 	/* allocate an eq */
2188 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2189 	if (eq == NULL)
2190 		return (NULL);
2191 
2192 	eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2193 	if (!eq->ring) {
2194 		free(eq, M_DEVBUF, 0);
2195 		return (NULL);
2196 	}
2197 
2198 	eq->id = -1;
2199 	eq->sc = sc;
2200 	eq->nitems = EQ_LEN_1024;	/* length of event queue */
2201 	eq->isize = EQE_SIZE_4; 	/* size of a queue item */
2202 	eq->delay = OCE_DEFAULT_EQD;	/* event queue delay */
2203 
2204 	if (oce_new_eq(sc, eq)) {
2205 		oce_destroy_ring(sc, eq->ring);
2206 		free(eq, M_DEVBUF, 0);
2207 		return (NULL);
2208 	}
2209 
2210 	return (eq);
2211 }
2212 
2213 /**
2214  * @brief		Function to arm an EQ so that it can generate events
2215  * @param eq		pointer to event queue structure
2216  * @param neqe		number of EQEs to arm
2217  * @param rearm		rearm bit enable/disable
2218  * @param clearint	bit to clear the interrupt condition because of which
2219  *			EQEs are generated
2220  */
2221 static inline void
2222 oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2223 {
2224 	oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2225 	    (clearint << 9) | (neqe << 16) | (rearm << 29));
2226 }
2227 
2228 void
2229 oce_drain_eq(struct oce_eq *eq)
2230 {
2231 	struct oce_eqe *eqe;
2232 	int neqe = 0;
2233 
2234 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2235 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2236 		eqe->evnt = 0;
2237 		neqe++;
2238 	}
2239 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2240 	oce_arm_eq(eq, neqe, FALSE, TRUE);
2241 }
2242 
2243 void
2244 oce_destroy_eq(struct oce_eq *eq)
2245 {
2246 	struct mbx_destroy_common_eq cmd;
2247 	struct oce_softc *sc = eq->sc;
2248 
2249 	if (eq->id >= 0) {
2250 		memset(&cmd, 0, sizeof(cmd));
2251 		cmd.params.req.id = htole16(eq->id);
2252 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2253 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2254 	}
2255 	if (eq->ring != NULL)
2256 		oce_destroy_ring(sc, eq->ring);
2257 	free(eq, M_DEVBUF, 0);
2258 }
2259 
2260 struct oce_mq *
2261 oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2262 {
2263 	struct oce_mq *mq = NULL;
2264 	struct oce_cq *cq;
2265 
2266 	/* allocate the mq */
2267 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2268 	if (!mq)
2269 		return (NULL);
2270 
2271 	mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2272 	if (!mq->ring) {
2273 		free(mq, M_DEVBUF, 0);
2274 		return (NULL);
2275 	}
2276 
2277 	cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2278 	    1, 0, 0);
2279 	if (!cq) {
2280 		oce_destroy_ring(sc, mq->ring);
2281 		free(mq, M_DEVBUF, 0);
2282 		return (NULL);
2283 	}
2284 
2285 	mq->id = -1;
2286 	mq->sc = sc;
2287 	mq->cq = cq;
2288 
2289 	mq->nitems = 128;
2290 
2291 	if (oce_new_mq(sc, mq)) {
2292 		oce_destroy_cq(mq->cq);
2293 		oce_destroy_ring(sc, mq->ring);
2294 		free(mq, M_DEVBUF, 0);
2295 		return (NULL);
2296 	}
2297 
2298 	eq->cq[eq->cq_valid] = cq;
2299 	eq->cq_valid++;
2300 	mq->cq->eq = eq;
2301 	mq->cq->cb_arg = mq;
2302 	mq->cq->cq_intr = oce_intr_mq;
2303 
2304 	return (mq);
2305 }
2306 
2307 void
2308 oce_drain_mq(struct oce_mq *mq)
2309 {
2310 	struct oce_cq *cq = mq->cq;
2311 	struct oce_mq_cqe *cqe;
2312 	int ncqe = 0;
2313 
2314 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2315 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2316 		MQ_CQE_INVALIDATE(cqe);
2317 		ncqe++;
2318 	}
2319 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2320 	oce_arm_cq(cq, ncqe, FALSE);
2321 }
2322 
2323 void
2324 oce_destroy_mq(struct oce_mq *mq)
2325 {
2326 	struct mbx_destroy_common_mq cmd;
2327 	struct oce_softc *sc = mq->sc;
2328 
2329 	if (mq->id >= 0) {
2330 		memset(&cmd, 0, sizeof(cmd));
2331 		cmd.params.req.id = htole16(mq->id);
2332 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2333 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2334 	}
2335 	if (mq->ring != NULL)
2336 		oce_destroy_ring(sc, mq->ring);
2337 	if (mq->cq != NULL)
2338 		oce_destroy_cq(mq->cq);
2339 	free(mq, M_DEVBUF, 0);
2340 }
2341 
2342 /**
2343  * @brief		Function to create a completion queue
2344  * @param sc		software handle to the device
2345  * @param eq		optional eq to be associated with to the cq
2346  * @param nitems	length of completion queue
2347  * @param isize		size of completion queue items
2348  * @param eventable	event table
2349  * @param nodelay	no delay flag
2350  * @param ncoalesce	no coalescence flag
2351  * @returns 		pointer to the cq created, NULL on failure
2352  */
2353 struct oce_cq *
2354 oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2355     int eventable, int nodelay, int ncoalesce)
2356 {
2357 	struct oce_cq *cq = NULL;
2358 
2359 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2360 	if (!cq)
2361 		return (NULL);
2362 
2363 	cq->ring = oce_create_ring(sc, nitems, isize, 4);
2364 	if (!cq->ring) {
2365 		free(cq, M_DEVBUF, 0);
2366 		return (NULL);
2367 	}
2368 
2369 	cq->sc = sc;
2370 	cq->eq = eq;
2371 	cq->nitems = nitems;
2372 	cq->nodelay = nodelay;
2373 	cq->ncoalesce = ncoalesce;
2374 	cq->eventable = eventable;
2375 
2376 	if (oce_new_cq(sc, cq)) {
2377 		oce_destroy_ring(sc, cq->ring);
2378 		free(cq, M_DEVBUF, 0);
2379 		return (NULL);
2380 	}
2381 
2382 	sc->sc_cq[sc->sc_ncq++] = cq;
2383 
2384 	return (cq);
2385 }
2386 
2387 void
2388 oce_destroy_cq(struct oce_cq *cq)
2389 {
2390 	struct mbx_destroy_common_cq cmd;
2391 	struct oce_softc *sc = cq->sc;
2392 
2393 	if (cq->id >= 0) {
2394 		memset(&cmd, 0, sizeof(cmd));
2395 		cmd.params.req.id = htole16(cq->id);
2396 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2397 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2398 	}
2399 	if (cq->ring != NULL)
2400 		oce_destroy_ring(sc, cq->ring);
2401 	free(cq, M_DEVBUF, 0);
2402 }
2403 
2404 /**
2405  * @brief		Function to arm a CQ with CQEs
2406  * @param cq		pointer to the completion queue structure
2407  * @param ncqe		number of CQEs to arm
2408  * @param rearm		rearm bit enable/disable
2409  */
2410 static inline void
2411 oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2412 {
2413 	oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2414 }
2415 
2416 void
2417 oce_free_posted_rxbuf(struct oce_rq *rq)
2418 {
2419 	struct oce_softc *sc = rq->sc;
2420 	struct oce_pkt *pkt;
2421 
2422 	while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2423 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2424 		    BUS_DMASYNC_POSTREAD);
2425 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2426 		if (pkt->mbuf != NULL) {
2427 			m_freem(pkt->mbuf);
2428 			pkt->mbuf = NULL;
2429 		}
2430 		oce_pkt_put(&rq->pkt_free, pkt);
2431 		if_rxr_put(&rq->rxring, 1);
2432 	}
2433 }
2434 
2435 int
2436 oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2437 {
2438 	int rc;
2439 
2440 	memset(dma, 0, sizeof(struct oce_dma_mem));
2441 
2442 	dma->tag = sc->sc_dmat;
2443 	rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2444 	    &dma->map);
2445 	if (rc != 0) {
2446 		printf("%s: failed to allocate DMA handle",
2447 		    sc->sc_dev.dv_xname);
2448 		goto fail_0;
2449 	}
2450 
2451 	rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2452 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2453 	if (rc != 0) {
2454 		printf("%s: failed to allocate DMA memory",
2455 		    sc->sc_dev.dv_xname);
2456 		goto fail_1;
2457 	}
2458 
2459 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2460 	    &dma->vaddr, BUS_DMA_NOWAIT);
2461 	if (rc != 0) {
2462 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2463 		goto fail_2;
2464 	}
2465 
2466 	rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2467 	    BUS_DMA_NOWAIT);
2468 	if (rc != 0) {
2469 		printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2470 		goto fail_3;
2471 	}
2472 
2473 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2474 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2475 
2476 	dma->paddr = dma->map->dm_segs[0].ds_addr;
2477 	dma->size = size;
2478 
2479 	return (0);
2480 
2481 fail_3:
2482 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2483 fail_2:
2484 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2485 fail_1:
2486 	bus_dmamap_destroy(dma->tag, dma->map);
2487 fail_0:
2488 	return (rc);
2489 }
2490 
2491 void
2492 oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2493 {
2494 	if (dma->tag == NULL)
2495 		return;
2496 
2497 	if (dma->map != NULL) {
2498 		oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2499 		bus_dmamap_unload(dma->tag, dma->map);
2500 
2501 		if (dma->vaddr != 0) {
2502 			bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2503 			dma->vaddr = 0;
2504 		}
2505 
2506 		bus_dmamap_destroy(dma->tag, dma->map);
2507 		dma->map = NULL;
2508 		dma->tag = NULL;
2509 	}
2510 }
2511 
2512 struct oce_ring *
2513 oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2514 {
2515 	struct oce_dma_mem *dma;
2516 	struct oce_ring *ring;
2517 	bus_size_t size = nitems * isize;
2518 	int rc;
2519 
2520 	if (size > maxsegs * PAGE_SIZE)
2521 		return (NULL);
2522 
2523 	ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2524 	if (ring == NULL)
2525 		return (NULL);
2526 
2527 	ring->isize = isize;
2528 	ring->nitems = nitems;
2529 
2530 	dma = &ring->dma;
2531 	dma->tag = sc->sc_dmat;
2532 	rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2533 	    BUS_DMA_NOWAIT, &dma->map);
2534 	if (rc != 0) {
2535 		printf("%s: failed to allocate DMA handle",
2536 		    sc->sc_dev.dv_xname);
2537 		goto fail_0;
2538 	}
2539 
2540 	rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2541 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2542 	if (rc != 0) {
2543 		printf("%s: failed to allocate DMA memory",
2544 		    sc->sc_dev.dv_xname);
2545 		goto fail_1;
2546 	}
2547 
2548 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2549 	    &dma->vaddr, BUS_DMA_NOWAIT);
2550 	if (rc != 0) {
2551 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2552 		goto fail_2;
2553 	}
2554 
2555 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2556 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2557 
2558 	dma->paddr = 0;
2559 	dma->size = size;
2560 
2561 	return (ring);
2562 
2563 fail_2:
2564 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2565 fail_1:
2566 	bus_dmamap_destroy(dma->tag, dma->map);
2567 fail_0:
2568 	free(ring, M_DEVBUF, 0);
2569 	return (NULL);
2570 }
2571 
2572 void
2573 oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2574 {
2575 	oce_dma_free(sc, &ring->dma);
2576 	free(ring, M_DEVBUF, 0);
2577 }
2578 
2579 int
2580 oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2581     struct oce_pa *pa, int maxsegs)
2582 {
2583 	struct oce_dma_mem *dma = &ring->dma;
2584 	int i;
2585 
2586 	if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2587 	    ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2588 		printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2589 		return (0);
2590 	}
2591 
2592 	if (dma->map->dm_nsegs > maxsegs) {
2593 		printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2594 		return (0);
2595 	}
2596 
2597 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2598 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2599 
2600 	for (i = 0; i < dma->map->dm_nsegs; i++)
2601 		pa[i].addr = dma->map->dm_segs[i].ds_addr;
2602 
2603 	return (dma->map->dm_nsegs);
2604 }
2605 
2606 static inline void *
2607 oce_ring_get(struct oce_ring *ring)
2608 {
2609 	int index = ring->index;
2610 
2611 	if (++ring->index == ring->nitems)
2612 		ring->index = 0;
2613 	return ((void *)(ring->dma.vaddr + index * ring->isize));
2614 }
2615 
2616 static inline void *
2617 oce_ring_first(struct oce_ring *ring)
2618 {
2619 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2620 }
2621 
2622 static inline void *
2623 oce_ring_next(struct oce_ring *ring)
2624 {
2625 	if (++ring->index == ring->nitems)
2626 		ring->index = 0;
2627 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2628 }
2629 
2630 struct oce_pkt *
2631 oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2632 {
2633 	struct oce_pkt *pkt;
2634 
2635 	if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2636 		return (NULL);
2637 
2638 	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2639 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2640 		pool_put(oce_pkt_pool, pkt);
2641 		return (NULL);
2642 	}
2643 
2644 	return (pkt);
2645 }
2646 
2647 void
2648 oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2649 {
2650 	if (pkt->map) {
2651 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2652 		bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2653 	}
2654 	pool_put(oce_pkt_pool, pkt);
2655 }
2656 
2657 static inline struct oce_pkt *
2658 oce_pkt_get(struct oce_pkt_list *lst)
2659 {
2660 	struct oce_pkt *pkt;
2661 
2662 	pkt = SIMPLEQ_FIRST(lst);
2663 	if (pkt == NULL)
2664 		return (NULL);
2665 
2666 	SIMPLEQ_REMOVE_HEAD(lst, entry);
2667 
2668 	return (pkt);
2669 }
2670 
2671 static inline void
2672 oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2673 {
2674 	SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2675 }
2676 
2677 /**
2678  * @brief Wait for FW to become ready and reset it
2679  * @param sc		software handle to the device
2680  */
2681 int
2682 oce_init_fw(struct oce_softc *sc)
2683 {
2684 	struct ioctl_common_function_reset cmd;
2685 	uint32_t reg;
2686 	int err = 0, tmo = 60000;
2687 
2688 	/* read semaphore CSR */
2689 	reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2690 
2691 	/* if host is ready then wait for fw ready else send POST */
2692 	if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2693 		reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2694 		oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2695 	}
2696 
2697 	/* wait for FW to become ready */
2698 	for (;;) {
2699 		if (--tmo == 0)
2700 			break;
2701 
2702 		DELAY(1000);
2703 
2704 		reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2705 		if (reg & MPU_EP_SEM_ERROR) {
2706 			printf(": POST failed: %#x\n", reg);
2707 			return (ENXIO);
2708 		}
2709 		if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2710 			/* reset FW */
2711 			if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2712 				memset(&cmd, 0, sizeof(cmd));
2713 				err = oce_cmd(sc, SUBSYS_COMMON,
2714 				    OPCODE_COMMON_FUNCTION_RESET,
2715 				    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2716 			}
2717 			return (err);
2718 		}
2719 	}
2720 
2721 	printf(": POST timed out: %#x\n", reg);
2722 
2723 	return (ENXIO);
2724 }
2725 
2726 static inline int
2727 oce_mbox_wait(struct oce_softc *sc)
2728 {
2729 	int i;
2730 
2731 	for (i = 0; i < 20000; i++) {
2732 		if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2733 			return (0);
2734 		DELAY(100);
2735 	}
2736 	return (ETIMEDOUT);
2737 }
2738 
2739 /**
2740  * @brief Mailbox dispatch
2741  * @param sc		software handle to the device
2742  */
2743 int
2744 oce_mbox_dispatch(struct oce_softc *sc)
2745 {
2746 	uint32_t pa, reg;
2747 	int err;
2748 
2749 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2750 	reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2751 
2752 	if ((err = oce_mbox_wait(sc)) != 0)
2753 		goto out;
2754 
2755 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2756 
2757 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2758 	reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2759 
2760 	if ((err = oce_mbox_wait(sc)) != 0)
2761 		goto out;
2762 
2763 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2764 
2765 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2766 
2767 	if ((err = oce_mbox_wait(sc)) != 0)
2768 		goto out;
2769 
2770 out:
2771 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2772 	return (err);
2773 }
2774 
2775 /**
2776  * @brief Function to initialize the hw with host endian information
2777  * @param sc		software handle to the device
2778  * @returns		0 on success, ETIMEDOUT on failure
2779  */
2780 int
2781 oce_mbox_init(struct oce_softc *sc)
2782 {
2783 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2784 	uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2785 
2786 	if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2787 		return (0);
2788 
2789 	/* Endian Signature */
2790 	*ptr++ = 0xff;
2791 	*ptr++ = 0x12;
2792 	*ptr++ = 0x34;
2793 	*ptr++ = 0xff;
2794 	*ptr++ = 0xff;
2795 	*ptr++ = 0x56;
2796 	*ptr++ = 0x78;
2797 	*ptr = 0xff;
2798 
2799 	return (oce_mbox_dispatch(sc));
2800 }
2801 
2802 int
2803 oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2804     void *payload, int length)
2805 {
2806 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2807 	struct oce_mbx *mbx = &bmbx->mbx;
2808 	struct mbx_hdr *hdr;
2809 	caddr_t epayload = NULL;
2810 	int err;
2811 
2812 	if (length > OCE_MBX_PAYLOAD)
2813 		epayload = OCE_MEM_KVA(&sc->sc_pld);
2814 	if (length > OCE_MAX_PAYLOAD)
2815 		return (EINVAL);
2816 
2817 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2818 
2819 	memset(mbx, 0, sizeof(struct oce_mbx));
2820 
2821 	mbx->payload_length = length;
2822 
2823 	if (epayload) {
2824 		mbx->flags = OCE_MBX_F_SGE;
2825 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2826 		memcpy(epayload, payload, length);
2827 		mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2828 		mbx->pld.sgl[0].length = length;
2829 		hdr = (struct mbx_hdr *)epayload;
2830 	} else {
2831 		mbx->flags = OCE_MBX_F_EMBED;
2832 		memcpy(mbx->pld.data, payload, length);
2833 		hdr = (struct mbx_hdr *)&mbx->pld.data;
2834 	}
2835 
2836 	hdr->subsys = subsys;
2837 	hdr->opcode = opcode;
2838 	hdr->version = version;
2839 	hdr->length = length - sizeof(*hdr);
2840 	if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2841 		hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2842 	else
2843 		hdr->timeout = OCE_MBX_TIMEOUT;
2844 
2845 	if (epayload)
2846 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2847 
2848 	err = oce_mbox_dispatch(sc);
2849 	if (err == 0) {
2850 		if (epayload) {
2851 			oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2852 			memcpy(payload, epayload, length);
2853 		} else
2854 			memcpy(payload, &mbx->pld.data, length);
2855 	} else
2856 		printf("%s: mailbox timeout, subsys %d op %d ver %d "
2857 		    "%spayload length %d\n", sc->sc_dev.dv_xname, subsys,
2858 		    opcode, version, epayload ? "ext " : "",
2859 		    length);
2860 	return (err);
2861 }
2862 
2863 /**
2864  * @brief	Firmware will send gracious notifications during
2865  *		attach only after sending first mcc commnad. We
2866  *		use MCC queue only for getting async and mailbox
2867  *		for sending cmds. So to get gracious notifications
2868  *		atleast send one dummy command on mcc.
2869  */
2870 void
2871 oce_first_mcc(struct oce_softc *sc)
2872 {
2873 	struct oce_mbx *mbx;
2874 	struct oce_mq *mq = sc->sc_mq;
2875 	struct mbx_hdr *hdr;
2876 	struct mbx_get_common_fw_version *cmd;
2877 
2878 	mbx = oce_ring_get(mq->ring);
2879 	memset(mbx, 0, sizeof(struct oce_mbx));
2880 
2881 	cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2882 
2883 	hdr = &cmd->hdr;
2884 	hdr->subsys = SUBSYS_COMMON;
2885 	hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2886 	hdr->version = OCE_MBX_VER_V0;
2887 	hdr->timeout = OCE_MBX_TIMEOUT;
2888 	hdr->length = sizeof(*cmd) - sizeof(*hdr);
2889 
2890 	mbx->flags = OCE_MBX_F_EMBED;
2891 	mbx->payload_length = sizeof(*cmd);
2892 	oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2893 	    BUS_DMASYNC_PREWRITE);
2894 	oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2895 }
2896 
2897 int
2898 oce_get_fw_config(struct oce_softc *sc)
2899 {
2900 	struct mbx_common_query_fw_config cmd;
2901 	int err;
2902 
2903 	memset(&cmd, 0, sizeof(cmd));
2904 
2905 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2906 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2907 	if (err)
2908 		return (err);
2909 
2910 	sc->sc_port = cmd.params.rsp.port_id;
2911 	sc->sc_fmode = cmd.params.rsp.function_mode;
2912 
2913 	return (0);
2914 }
2915 
2916 int
2917 oce_check_native_mode(struct oce_softc *sc)
2918 {
2919 	struct mbx_common_set_function_cap cmd;
2920 	int err;
2921 
2922 	memset(&cmd, 0, sizeof(cmd));
2923 
2924 	cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2925 	    CAP_BE3_NATIVE_ERX_API;
2926 	cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2927 
2928 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2929 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2930 	if (err)
2931 		return (err);
2932 
2933 	if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2934 		SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2935 
2936 	return (0);
2937 }
2938 
2939 /**
2940  * @brief Function for creating a network interface.
2941  * @param sc		software handle to the device
2942  * @returns		0 on success, error otherwise
2943  */
2944 int
2945 oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2946 {
2947 	struct mbx_create_common_iface cmd;
2948 	uint32_t caps, caps_en;
2949 	int err = 0;
2950 
2951 	/* interface capabilities to give device when creating interface */
2952 	caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2953 	    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2954 	    MBX_RX_IFACE_RSS;
2955 
2956 	/* capabilities to enable by default (others set dynamically) */
2957 	caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2958 
2959 	if (!IS_XE201(sc)) {
2960 		/* LANCER A0 workaround */
2961 		caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2962 		caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2963 	}
2964 
2965 	/* enable capabilities controlled via driver startup parameters */
2966 	if (sc->sc_rss_enable)
2967 		caps_en |= MBX_RX_IFACE_RSS;
2968 
2969 	memset(&cmd, 0, sizeof(cmd));
2970 
2971 	cmd.params.req.version = 0;
2972 	cmd.params.req.cap_flags = htole32(caps);
2973 	cmd.params.req.enable_flags = htole32(caps_en);
2974 	if (macaddr != NULL) {
2975 		memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2976 		cmd.params.req.mac_invalid = 0;
2977 	} else
2978 		cmd.params.req.mac_invalid = 1;
2979 
2980 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2981 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2982 	if (err)
2983 		return (err);
2984 
2985 	sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
2986 
2987 	if (macaddr != NULL)
2988 		sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
2989 
2990 	return (0);
2991 }
2992 
2993 /**
2994  * @brief Function to send the mbx command to configure vlan
2995  * @param sc 		software handle to the device
2996  * @param vtags		array of vlan tags
2997  * @param nvtags	number of elements in array
2998  * @param untagged	boolean TRUE/FLASE
2999  * @param promisc	flag to enable/disable VLAN promiscuous mode
3000  * @returns		0 on success, EIO on failure
3001  */
3002 int
3003 oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3004     int untagged, int promisc)
3005 {
3006 	struct mbx_common_config_vlan cmd;
3007 
3008 	memset(&cmd, 0, sizeof(cmd));
3009 
3010 	cmd.params.req.if_id = sc->sc_if_id;
3011 	cmd.params.req.promisc = promisc;
3012 	cmd.params.req.untagged = untagged;
3013 	cmd.params.req.num_vlans = nvtags;
3014 
3015 	if (!promisc)
3016 		memcpy(cmd.params.req.tags.normal_vlans, vtags,
3017 			nvtags * sizeof(struct normal_vlan));
3018 
3019 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3020 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3021 }
3022 
3023 /**
3024  * @brief Function to set flow control capability in the hardware
3025  * @param sc 		software handle to the device
3026  * @param flags		flow control flags to set
3027  * @returns		0 on success, EIO on failure
3028  */
3029 int
3030 oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3031 {
3032 	struct mbx_common_get_set_flow_control cmd;
3033 	int err;
3034 
3035 	memset(&cmd, 0, sizeof(cmd));
3036 
3037 	cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3038 	cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3039 
3040 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3041 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3042 	if (err)
3043 		return (err);
3044 
3045 	memset(&cmd, 0, sizeof(cmd));
3046 
3047 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3048 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3049 	if (err)
3050 		return (err);
3051 
3052 	sc->sc_fc  = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3053 	sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3054 
3055 	return (0);
3056 }
3057 
3058 #ifdef OCE_RSS
3059 /**
3060  * @brief Function to set flow control capability in the hardware
3061  * @param sc 		software handle to the device
3062  * @param enable	0=disable, OCE_RSS_xxx flags otherwise
3063  * @returns		0 on success, EIO on failure
3064  */
3065 int
3066 oce_config_rss(struct oce_softc *sc, int enable)
3067 {
3068 	struct mbx_config_nic_rss cmd;
3069 	uint8_t *tbl = &cmd.params.req.cputable;
3070 	int i, j;
3071 
3072 	memset(&cmd, 0, sizeof(cmd));
3073 
3074 	if (enable)
3075 		cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3076 		    RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3077 	cmd.params.req.flush = OCE_FLUSH;
3078 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3079 
3080 	arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3081 
3082 	/*
3083 	 * Initialize the RSS CPU indirection table.
3084 	 *
3085 	 * The table is used to choose the queue to place incoming packets.
3086 	 * Incoming packets are hashed.  The lowest bits in the hash result
3087 	 * are used as the index into the CPU indirection table.
3088 	 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3089 	 * create.  Based on the CPU ID, the receive completion is routed to
3090 	 * the corresponding RSS CQs.  (Non-RSS packets are always completed
3091 	 * on the default (0) CQ).
3092 	 */
3093 	for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3094 		if (sc->sc_rq[j]->cfg.is_rss_queue)
3095 			tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3096 	}
3097 	if (i > 0)
3098 		cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3099 	else
3100 		return (ENXIO);
3101 
3102 	return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3103 	    &cmd, sizeof(cmd)));
3104 }
3105 #endif	/* OCE_RSS */
3106 
3107 /**
3108  * @brief Function for hardware update multicast filter
3109  * @param sc		software handle to the device
3110  * @param multi		table of multicast addresses
3111  * @param naddr		number of multicast addresses in the table
3112  */
3113 int
3114 oce_update_mcast(struct oce_softc *sc,
3115     uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3116 {
3117 	struct mbx_set_common_iface_multicast cmd;
3118 
3119 	memset(&cmd, 0, sizeof(cmd));
3120 
3121 	memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3122 	cmd.params.req.num_mac = htole16(naddr);
3123 	cmd.params.req.if_id = sc->sc_if_id;
3124 
3125 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3126 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3127 }
3128 
3129 /**
3130  * @brief RXF function to enable/disable device promiscuous mode
3131  * @param sc		software handle to the device
3132  * @param enable	enable/disable flag
3133  * @returns		0 on success, EIO on failure
3134  * @note
3135  *	The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3136  *	This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3137  */
3138 int
3139 oce_set_promisc(struct oce_softc *sc, int enable)
3140 {
3141 	struct mbx_set_common_iface_rx_filter cmd;
3142 	struct iface_rx_filter_ctx *req;
3143 
3144 	memset(&cmd, 0, sizeof(cmd));
3145 
3146 	req = &cmd.params.req;
3147 	req->if_id = sc->sc_if_id;
3148 
3149 	if (enable)
3150 		req->iface_flags = req->iface_flags_mask =
3151 		    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3152 
3153 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3154 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3155 }
3156 
3157 /**
3158  * @brief Function to query the link status from the hardware
3159  * @param sc 		software handle to the device
3160  * @param[out] link	pointer to the structure returning link attributes
3161  * @returns		0 on success, EIO on failure
3162  */
3163 int
3164 oce_get_link_status(struct oce_softc *sc)
3165 {
3166 	struct mbx_query_common_link_config cmd;
3167 	int err;
3168 
3169 	memset(&cmd, 0, sizeof(cmd));
3170 
3171 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3172 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3173 	if (err)
3174 		return (err);
3175 
3176 	sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3177 	    NTWK_LOGICAL_LINK_UP);
3178 
3179 	if (cmd.params.rsp.mac_speed < 5)
3180 		sc->sc_link_speed = cmd.params.rsp.mac_speed;
3181 	else
3182 		sc->sc_link_speed = 0;
3183 
3184 	return (0);
3185 }
3186 
3187 void
3188 oce_macaddr_set(struct oce_softc *sc)
3189 {
3190 	uint32_t old_pmac_id = sc->sc_pmac_id;
3191 	int status = 0;
3192 
3193 	if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3194 		return;
3195 
3196 	status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3197 	if (!status)
3198 		status = oce_macaddr_del(sc, old_pmac_id);
3199 	else
3200 		printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3201 }
3202 
3203 int
3204 oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3205 {
3206 	struct mbx_query_common_iface_mac cmd;
3207 	int err;
3208 
3209 	memset(&cmd, 0, sizeof(cmd));
3210 
3211 	cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3212 	cmd.params.req.permanent = 1;
3213 
3214 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3215 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3216 	if (err == 0)
3217 		memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3218 		    ETHER_ADDR_LEN);
3219 	return (err);
3220 }
3221 
3222 int
3223 oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3224 {
3225 	struct mbx_add_common_iface_mac cmd;
3226 	int err;
3227 
3228 	memset(&cmd, 0, sizeof(cmd));
3229 
3230 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3231 	memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3232 
3233 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3234 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3235 	if (err == 0)
3236 		*pmac = letoh32(cmd.params.rsp.pmac_id);
3237 	return (err);
3238 }
3239 
3240 int
3241 oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3242 {
3243 	struct mbx_del_common_iface_mac cmd;
3244 
3245 	memset(&cmd, 0, sizeof(cmd));
3246 
3247 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3248 	cmd.params.req.pmac_id = htole32(pmac);
3249 
3250 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3251 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3252 }
3253 
3254 int
3255 oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3256 {
3257 	struct mbx_create_nic_rq cmd;
3258 	int err, npages;
3259 
3260 	memset(&cmd, 0, sizeof(cmd));
3261 
3262 	npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3263 	    nitems(cmd.params.req.pages));
3264 	if (!npages) {
3265 		printf("%s: failed to load the rq ring\n", __func__);
3266 		return (1);
3267 	}
3268 
3269 	if (IS_XE201(sc)) {
3270 		cmd.params.req.frag_size = rq->fragsize / 2048;
3271 		cmd.params.req.page_size = 1;
3272 	} else
3273 		cmd.params.req.frag_size = ilog2(rq->fragsize);
3274 	cmd.params.req.num_pages = npages;
3275 	cmd.params.req.cq_id = rq->cq->id;
3276 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3277 	cmd.params.req.max_frame_size = htole16(rq->mtu);
3278 	cmd.params.req.is_rss_queue = htole32(rq->rss);
3279 
3280 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3281 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3282 	    sizeof(cmd));
3283 	if (err)
3284 		return (err);
3285 
3286 	rq->id = letoh16(cmd.params.rsp.rq_id);
3287 	rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3288 
3289 	return (0);
3290 }
3291 
3292 int
3293 oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3294 {
3295 	struct mbx_create_nic_wq cmd;
3296 	int err, npages;
3297 
3298 	memset(&cmd, 0, sizeof(cmd));
3299 
3300 	npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3301 	    nitems(cmd.params.req.pages));
3302 	if (!npages) {
3303 		printf("%s: failed to load the wq ring\n", __func__);
3304 		return (1);
3305 	}
3306 
3307 	if (IS_XE201(sc))
3308 		cmd.params.req.if_id = sc->sc_if_id;
3309 	cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3310 	cmd.params.req.num_pages = npages;
3311 	cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3312 	cmd.params.req.cq_id = htole16(wq->cq->id);
3313 	cmd.params.req.ulp_num = 1;
3314 
3315 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3316 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3317 	    sizeof(cmd));
3318 	if (err)
3319 		return (err);
3320 
3321 	wq->id = letoh16(cmd.params.rsp.wq_id);
3322 
3323 	return (0);
3324 }
3325 
3326 int
3327 oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3328 {
3329 	struct mbx_create_common_mq_ex cmd;
3330 	union oce_mq_ext_ctx *ctx;
3331 	int err, npages;
3332 
3333 	memset(&cmd, 0, sizeof(cmd));
3334 
3335 	npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3336 	    nitems(cmd.params.req.pages));
3337 	if (!npages) {
3338 		printf("%s: failed to load the mq ring\n", __func__);
3339 		return (-1);
3340 	}
3341 
3342 	ctx = &cmd.params.req.context;
3343 	ctx->v0.num_pages = npages;
3344 	ctx->v0.cq_id = mq->cq->id;
3345 	ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3346 	ctx->v0.valid = 1;
3347 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3348 	ctx->v0.async_evt_bitmap = 0xffffffff;
3349 
3350 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3351 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3352 	if (err)
3353 		return (err);
3354 
3355 	mq->id = letoh16(cmd.params.rsp.mq_id);
3356 
3357 	return (0);
3358 }
3359 
3360 int
3361 oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3362 {
3363 	struct mbx_create_common_eq cmd;
3364 	int err, npages;
3365 
3366 	memset(&cmd, 0, sizeof(cmd));
3367 
3368 	npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3369 	    nitems(cmd.params.req.pages));
3370 	if (!npages) {
3371 		printf("%s: failed to load the eq ring\n", __func__);
3372 		return (-1);
3373 	}
3374 
3375 	cmd.params.req.ctx.num_pages = htole16(npages);
3376 	cmd.params.req.ctx.valid = 1;
3377 	cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3378 	cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3379 	cmd.params.req.ctx.armed = 0;
3380 	cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3381 
3382 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3383 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3384 	if (err)
3385 		return (err);
3386 
3387 	eq->id = letoh16(cmd.params.rsp.eq_id);
3388 
3389 	return (0);
3390 }
3391 
3392 int
3393 oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3394 {
3395 	struct mbx_create_common_cq cmd;
3396 	union oce_cq_ctx *ctx;
3397 	int err, npages;
3398 
3399 	memset(&cmd, 0, sizeof(cmd));
3400 
3401 	npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3402 	    nitems(cmd.params.req.pages));
3403 	if (!npages) {
3404 		printf("%s: failed to load the cq ring\n", __func__);
3405 		return (-1);
3406 	}
3407 
3408 	ctx = &cmd.params.req.cq_ctx;
3409 
3410 	if (IS_XE201(sc)) {
3411 		ctx->v2.num_pages = htole16(npages);
3412 		ctx->v2.page_size = 1; /* for 4K */
3413 		ctx->v2.eventable = cq->eventable;
3414 		ctx->v2.valid = 1;
3415 		ctx->v2.count = ilog2(cq->nitems / 256);
3416 		ctx->v2.nodelay = cq->nodelay;
3417 		ctx->v2.coalesce_wm = cq->ncoalesce;
3418 		ctx->v2.armed = 0;
3419 		ctx->v2.eq_id = cq->eq->id;
3420 		if (ctx->v2.count == 3) {
3421 			if (cq->nitems > (4*1024)-1)
3422 				ctx->v2.cqe_count = (4*1024)-1;
3423 			else
3424 				ctx->v2.cqe_count = cq->nitems;
3425 		}
3426 	} else {
3427 		ctx->v0.num_pages = htole16(npages);
3428 		ctx->v0.eventable = cq->eventable;
3429 		ctx->v0.valid = 1;
3430 		ctx->v0.count = ilog2(cq->nitems / 256);
3431 		ctx->v0.nodelay = cq->nodelay;
3432 		ctx->v0.coalesce_wm = cq->ncoalesce;
3433 		ctx->v0.armed = 0;
3434 		ctx->v0.eq_id = cq->eq->id;
3435 	}
3436 
3437 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3438 	    IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3439 	    sizeof(cmd));
3440 	if (err)
3441 		return (err);
3442 
3443 	cq->id = letoh16(cmd.params.rsp.cq_id);
3444 
3445 	return (0);
3446 }
3447 
3448 int
3449 oce_init_stats(struct oce_softc *sc)
3450 {
3451 	union cmd {
3452 		struct mbx_get_nic_stats_v0	_be2;
3453 		struct mbx_get_nic_stats	_be3;
3454 		struct mbx_get_pport_stats	_xe201;
3455 	};
3456 
3457 	sc->sc_statcmd = malloc(sizeof(union cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
3458 	if (sc->sc_statcmd == NULL) {
3459 		printf("%s: failed to allocate statistics command block\n",
3460 		    sc->sc_dev.dv_xname);
3461 		return (-1);
3462 	}
3463 	return (0);
3464 }
3465 
3466 int
3467 oce_update_stats(struct oce_softc *sc)
3468 {
3469 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3470 	uint64_t rxe, txe;
3471 	int err;
3472 
3473 	if (ISSET(sc->sc_flags, OCE_F_BE2))
3474 		err = oce_stats_be2(sc, &rxe, &txe);
3475 	else if (ISSET(sc->sc_flags, OCE_F_BE3))
3476 		err = oce_stats_be3(sc, &rxe, &txe);
3477 	else
3478 		err = oce_stats_xe(sc, &rxe, &txe);
3479 	if (err)
3480 		return (err);
3481 
3482 	ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3483 	    rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3484 	sc->sc_rx_errors = rxe;
3485 	ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3486 	    txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3487 	sc->sc_tx_errors = txe;
3488 
3489 	return (0);
3490 }
3491 
3492 int
3493 oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3494 {
3495 	struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3496 	struct oce_pmem_stats *ms;
3497 	struct oce_rxf_stats_v0 *rs;
3498 	struct oce_port_rxf_stats_v0 *ps;
3499 	int err;
3500 
3501 	memset(cmd, 0, sizeof(*cmd));
3502 
3503 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3504 	    cmd, sizeof(*cmd));
3505 	if (err)
3506 		return (err);
3507 
3508 	ms = &cmd->params.rsp.stats.pmem;
3509 	rs = &cmd->params.rsp.stats.rxf;
3510 	ps = &rs->port[sc->sc_port];
3511 
3512 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3513 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3514 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3515 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3516 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3517 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3518 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3519 	    ps->rx_alignment_symbol_errors;
3520 	if (sc->sc_if_id)
3521 		*rxe += rs->port1_jabber_events;
3522 	else
3523 		*rxe += rs->port0_jabber_events;
3524 	*rxe += ms->eth_red_drops;
3525 
3526 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3527 
3528 	return (0);
3529 }
3530 
3531 int
3532 oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3533 {
3534 	struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3535 	struct oce_pmem_stats *ms;
3536 	struct oce_rxf_stats_v1 *rs;
3537 	struct oce_port_rxf_stats_v1 *ps;
3538 	int err;
3539 
3540 	memset(cmd, 0, sizeof(*cmd));
3541 
3542 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3543 	    cmd, sizeof(*cmd));
3544 	if (err)
3545 		return (err);
3546 
3547 	ms = &cmd->params.rsp.stats.pmem;
3548 	rs = &cmd->params.rsp.stats.rxf;
3549 	ps = &rs->port[sc->sc_port];
3550 
3551 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3552 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3553 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3554 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3555 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3556 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3557 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3558 	    ps->rx_alignment_symbol_errors + ps->jabber_events;
3559 	*rxe += ms->eth_red_drops;
3560 
3561 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3562 
3563 	return (0);
3564 }
3565 
3566 int
3567 oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3568 {
3569 	struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3570 	struct oce_pport_stats *pps;
3571 	int err;
3572 
3573 	memset(cmd, 0, sizeof(*cmd));
3574 
3575 	cmd->params.req.reset_stats = 0;
3576 	cmd->params.req.port_number = sc->sc_if_id;
3577 
3578 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3579 	    OCE_MBX_VER_V0, cmd, sizeof(*cmd));
3580 	if (err)
3581 		return (err);
3582 
3583 	pps = &cmd->params.rsp.pps;
3584 
3585 	*rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3586 	    pps->rx_alignment_errors + pps->rx_symbol_errors +
3587 	    pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3588 	    pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3589 	    pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3590 	    pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3591 	    pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3592 	    pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3593 	    pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3594 
3595 	*txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3596 
3597 	return (0);
3598 }
3599