xref: /openbsd/sys/dev/pci/if_oce.c (revision d25d28bf)
1 /*	$OpenBSD: if_oce.c,v 1.98 2016/09/15 02:00:17 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*-
20  * Copyright (C) 2012 Emulex
21  * All rights reserved.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions are met:
25  *
26  * 1. Redistributions of source code must retain the above copyright notice,
27  *    this list of conditions and the following disclaimer.
28  *
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * 3. Neither the name of the Emulex Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived from
35  *    this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47  * POSSIBILITY OF SUCH DAMAGE.
48  *
49  * Contact Information:
50  * freebsd-drivers@emulex.com
51  *
52  * Emulex
53  * 3333 Susan Street
54  * Costa Mesa, CA 92626
55  */
56 
57 #include "bpfilter.h"
58 #include "vlan.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/device.h>
67 #include <sys/socket.h>
68 #include <sys/queue.h>
69 #include <sys/timeout.h>
70 #include <sys/pool.h>
71 
72 #include <net/if.h>
73 #include <net/if_media.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #ifdef INET6
79 #include <netinet/ip6.h>
80 #endif
81 
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #endif
85 
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89 
90 #include <dev/pci/if_ocereg.h>
91 
92 #ifndef TRUE
93 #define TRUE			1
94 #endif
95 #ifndef FALSE
96 #define FALSE			0
97 #endif
98 
99 #define OCE_MBX_TIMEOUT		5
100 
101 #define OCE_MAX_PAYLOAD		65536
102 
103 #define OCE_TX_RING_SIZE	512
104 #define OCE_RX_RING_SIZE	1024
105 
106 /* This should be powers of 2. Like 2,4,8 & 16 */
107 #define OCE_MAX_RSS		4 /* TODO: 8 */
108 #define OCE_MAX_RQ		OCE_MAX_RSS + 1 /* one default queue */
109 #define OCE_MAX_WQ		8
110 
111 #define OCE_MAX_EQ		32
112 #define OCE_MAX_CQ		OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
113 #define OCE_MAX_CQ_EQ		8 /* Max CQ that can attached to an EQ */
114 
115 #define OCE_DEFAULT_EQD		80
116 
117 #define OCE_MIN_MTU		256
118 #define OCE_MAX_MTU		9000
119 
120 #define OCE_MAX_RQ_COMPL	64
121 #define OCE_MAX_RQ_POSTS	255
122 #define OCE_RX_BUF_SIZE		2048
123 
124 #define OCE_MAX_TX_ELEMENTS	29
125 #define OCE_MAX_TX_DESC		1024
126 #define OCE_MAX_TX_SIZE		65535
127 
128 #define OCE_MEM_KVA(_m)		((void *)((_m)->vaddr))
129 #define OCE_MEM_DVA(_m)		((_m)->paddr)
130 
131 #define OCE_WQ_FOREACH(sc, wq, i) 	\
132 	for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
133 #define OCE_RQ_FOREACH(sc, rq, i) 	\
134 	for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
135 #define OCE_EQ_FOREACH(sc, eq, i) 	\
136 	for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
137 #define OCE_CQ_FOREACH(sc, cq, i) 	\
138 	for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
139 #define OCE_RING_FOREACH(_r, _v, _c)	\
140 	for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
141 
142 static inline int
143 ilog2(unsigned int v)
144 {
145 	int r = 0;
146 
147 	while (v >>= 1)
148 		r++;
149 	return (r);
150 }
151 
152 struct oce_pkt {
153 	struct mbuf *		mbuf;
154 	bus_dmamap_t		map;
155 	int			nsegs;
156 	SIMPLEQ_ENTRY(oce_pkt)	entry;
157 };
158 SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
159 
160 struct oce_dma_mem {
161 	bus_dma_tag_t		tag;
162 	bus_dmamap_t		map;
163 	bus_dma_segment_t	segs;
164 	int			nsegs;
165 	bus_size_t		size;
166 	caddr_t			vaddr;
167 	bus_addr_t		paddr;
168 };
169 
170 struct oce_ring {
171 	int			index;
172 	int			nitems;
173 	int			nused;
174 	int			isize;
175 	struct oce_dma_mem	dma;
176 };
177 
178 struct oce_softc;
179 
180 enum cq_len {
181 	CQ_LEN_256  = 256,
182 	CQ_LEN_512  = 512,
183 	CQ_LEN_1024 = 1024
184 };
185 
186 enum eq_len {
187 	EQ_LEN_256  = 256,
188 	EQ_LEN_512  = 512,
189 	EQ_LEN_1024 = 1024,
190 	EQ_LEN_2048 = 2048,
191 	EQ_LEN_4096 = 4096
192 };
193 
194 enum eqe_size {
195 	EQE_SIZE_4  = 4,
196 	EQE_SIZE_16 = 16
197 };
198 
199 enum qtype {
200 	QTYPE_EQ,
201 	QTYPE_MQ,
202 	QTYPE_WQ,
203 	QTYPE_RQ,
204 	QTYPE_CQ,
205 	QTYPE_RSS
206 };
207 
208 struct oce_eq {
209 	struct oce_softc *	sc;
210 	struct oce_ring *	ring;
211 	enum qtype		type;
212 	int			id;
213 
214 	struct oce_cq *		cq[OCE_MAX_CQ_EQ];
215 	int			cq_valid;
216 
217 	int			nitems;
218 	int			isize;
219 	int			delay;
220 };
221 
222 struct oce_cq {
223 	struct oce_softc *	sc;
224 	struct oce_ring *	ring;
225 	enum qtype		type;
226 	int			id;
227 
228 	struct oce_eq *		eq;
229 
230 	void			(*cq_intr)(void *);
231 	void *			cb_arg;
232 
233 	int			nitems;
234 	int			nodelay;
235 	int			eventable;
236 	int			ncoalesce;
237 };
238 
239 struct oce_mq {
240 	struct oce_softc *	sc;
241 	struct oce_ring *	ring;
242 	enum qtype		type;
243 	int			id;
244 
245 	struct oce_cq *		cq;
246 
247 	int			nitems;
248 };
249 
250 struct oce_wq {
251 	struct oce_softc *	sc;
252 	struct oce_ring *	ring;
253 	enum qtype		type;
254 	int			id;
255 
256 	struct oce_cq *		cq;
257 
258 	struct oce_pkt_list	pkt_list;
259 	struct oce_pkt_list	pkt_free;
260 
261 	int			nitems;
262 };
263 
264 struct oce_rq {
265 	struct oce_softc *	sc;
266 	struct oce_ring *	ring;
267 	enum qtype		type;
268 	int			id;
269 
270 	struct oce_cq *		cq;
271 
272 	struct if_rxring	rxring;
273 	struct oce_pkt_list	pkt_list;
274 	struct oce_pkt_list	pkt_free;
275 
276 	uint32_t		rss_cpuid;
277 
278 #ifdef OCE_LRO
279 	struct lro_ctrl		lro;
280 	int			lro_pkts_queued;
281 #endif
282 
283 	int			nitems;
284 	int			fragsize;
285 	int			mtu;
286 	int			rss;
287 };
288 
289 struct oce_softc {
290 	struct device		sc_dev;
291 
292 	uint			sc_flags;
293 #define  OCE_F_BE2		 0x00000001
294 #define  OCE_F_BE3		 0x00000002
295 #define  OCE_F_XE201		 0x00000008
296 #define  OCE_F_BE3_NATIVE	 0x00000100
297 #define  OCE_F_RESET_RQD	 0x00001000
298 #define  OCE_F_MBOX_ENDIAN_RQD	 0x00002000
299 
300 	bus_dma_tag_t		sc_dmat;
301 
302 	bus_space_tag_t		sc_cfg_iot;
303 	bus_space_handle_t	sc_cfg_ioh;
304 	bus_size_t		sc_cfg_size;
305 
306 	bus_space_tag_t		sc_csr_iot;
307 	bus_space_handle_t	sc_csr_ioh;
308 	bus_size_t		sc_csr_size;
309 
310 	bus_space_tag_t		sc_db_iot;
311 	bus_space_handle_t	sc_db_ioh;
312 	bus_size_t		sc_db_size;
313 
314 	void *			sc_ih;
315 
316 	struct arpcom		sc_ac;
317 	struct ifmedia		sc_media;
318 	ushort			sc_link_up;
319 	ushort			sc_link_speed;
320 	uint64_t		sc_fc;
321 
322 	struct oce_dma_mem	sc_mbx;
323 	struct oce_dma_mem	sc_pld;
324 
325 	uint			sc_port;
326 	uint			sc_fmode;
327 
328 	struct oce_wq *		sc_wq[OCE_MAX_WQ];	/* TX work queues */
329 	struct oce_rq *		sc_rq[OCE_MAX_RQ];	/* RX work queues */
330 	struct oce_cq *		sc_cq[OCE_MAX_CQ];	/* Completion queues */
331 	struct oce_eq *		sc_eq[OCE_MAX_EQ];	/* Event queues */
332 	struct oce_mq *		sc_mq;			/* Mailbox queue */
333 
334 	ushort			sc_neq;
335 	ushort			sc_ncq;
336 	ushort			sc_nrq;
337 	ushort			sc_nwq;
338 	ushort			sc_nintr;
339 
340 	ushort			sc_tx_ring_size;
341 	ushort			sc_rx_ring_size;
342 	ushort			sc_rss_enable;
343 
344 	uint32_t		sc_if_id;	/* interface ID */
345 	uint32_t		sc_pmac_id;	/* PMAC id */
346 	char			sc_macaddr[ETHER_ADDR_LEN];
347 
348 	uint32_t		sc_pvid;
349 
350 	uint64_t		sc_rx_errors;
351 	uint64_t		sc_tx_errors;
352 
353 	struct timeout		sc_tick;
354 	struct timeout		sc_rxrefill;
355 
356 	void *			sc_statcmd;
357 };
358 
359 #define IS_BE(sc)		ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
360 #define IS_XE201(sc)		ISSET((sc)->sc_flags, OCE_F_XE201)
361 
362 #define ADDR_HI(x)		((uint32_t)((uint64_t)(x) >> 32))
363 #define ADDR_LO(x)		((uint32_t)((uint64_t)(x) & 0xffffffff))
364 
365 #define IF_LRO_ENABLED(ifp)	ISSET((ifp)->if_capabilities, IFCAP_LRO)
366 
367 int 	oce_match(struct device *, void *, void *);
368 void	oce_attach(struct device *, struct device *, void *);
369 int 	oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
370 void	oce_attachhook(struct device *);
371 void	oce_attach_ifp(struct oce_softc *);
372 int 	oce_ioctl(struct ifnet *, u_long, caddr_t);
373 int	oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
374 void	oce_iff(struct oce_softc *);
375 void	oce_link_status(struct oce_softc *);
376 void	oce_media_status(struct ifnet *, struct ifmediareq *);
377 int 	oce_media_change(struct ifnet *);
378 void	oce_tick(void *);
379 void	oce_init(void *);
380 void	oce_stop(struct oce_softc *);
381 void	oce_watchdog(struct ifnet *);
382 void	oce_start(struct ifnet *);
383 int	oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
384 #ifdef OCE_TSO
385 struct mbuf *
386 	oce_tso(struct oce_softc *, struct mbuf **);
387 #endif
388 int 	oce_intr(void *);
389 void	oce_intr_wq(void *);
390 void	oce_txeof(struct oce_wq *);
391 void	oce_intr_rq(void *);
392 void	oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
393 void	oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
394 int 	oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
395 int 	oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
396 #ifdef OCE_LRO
397 void	oce_flush_lro(struct oce_rq *);
398 int 	oce_init_lro(struct oce_softc *);
399 void	oce_free_lro(struct oce_softc *);
400 #endif
401 int	oce_get_buf(struct oce_rq *);
402 int	oce_alloc_rx_bufs(struct oce_rq *);
403 void	oce_refill_rx(void *);
404 void	oce_free_posted_rxbuf(struct oce_rq *);
405 void	oce_intr_mq(void *);
406 void	oce_link_event(struct oce_softc *,
407 	    struct oce_async_cqe_link_state *);
408 
409 int 	oce_init_queues(struct oce_softc *);
410 void	oce_release_queues(struct oce_softc *);
411 struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
412 void	oce_drain_wq(struct oce_wq *);
413 void	oce_destroy_wq(struct oce_wq *);
414 struct oce_rq *
415 	oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
416 void	oce_drain_rq(struct oce_rq *);
417 void	oce_destroy_rq(struct oce_rq *);
418 struct oce_eq *
419 	oce_create_eq(struct oce_softc *);
420 static inline void
421 	oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
422 void	oce_drain_eq(struct oce_eq *);
423 void	oce_destroy_eq(struct oce_eq *);
424 struct oce_mq *
425 	oce_create_mq(struct oce_softc *, struct oce_eq *);
426 void	oce_drain_mq(struct oce_mq *);
427 void	oce_destroy_mq(struct oce_mq *);
428 struct oce_cq *
429 	oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
430 	    int isize, int eventable, int nodelay, int ncoalesce);
431 static inline void
432 	oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
433 void	oce_destroy_cq(struct oce_cq *);
434 
435 int	oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
436 void	oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
437 #define	oce_dma_sync(d, f) \
438 	    bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
439 
440 struct oce_ring *
441 	oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
442 void	oce_destroy_ring(struct oce_softc *, struct oce_ring *);
443 int	oce_load_ring(struct oce_softc *, struct oce_ring *,
444 	    struct oce_pa *, int max_segs);
445 static inline void *
446 	oce_ring_get(struct oce_ring *);
447 static inline void *
448 	oce_ring_first(struct oce_ring *);
449 static inline void *
450 	oce_ring_next(struct oce_ring *);
451 struct oce_pkt *
452 	oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
453 	    int maxsegsz);
454 void	oce_pkt_free(struct oce_softc *, struct oce_pkt *);
455 static inline struct oce_pkt *
456 	oce_pkt_get(struct oce_pkt_list *);
457 static inline void
458 	oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
459 
460 int	oce_init_fw(struct oce_softc *);
461 int	oce_mbox_init(struct oce_softc *);
462 int	oce_mbox_dispatch(struct oce_softc *);
463 int	oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
464 	    void *payload, int length);
465 void	oce_first_mcc(struct oce_softc *);
466 
467 int	oce_get_fw_config(struct oce_softc *);
468 int	oce_check_native_mode(struct oce_softc *);
469 int	oce_create_iface(struct oce_softc *, uint8_t *macaddr);
470 int	oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
471 	    int nvtags, int untagged, int promisc);
472 int	oce_set_flow_control(struct oce_softc *, uint64_t);
473 int	oce_config_rss(struct oce_softc *, int enable);
474 int	oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
475 	    int naddr);
476 int	oce_set_promisc(struct oce_softc *, int enable);
477 int	oce_get_link_status(struct oce_softc *);
478 
479 void	oce_macaddr_set(struct oce_softc *);
480 int	oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
481 int	oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
482 int	oce_macaddr_del(struct oce_softc *, uint32_t pmac);
483 
484 int	oce_new_rq(struct oce_softc *, struct oce_rq *);
485 int	oce_new_wq(struct oce_softc *, struct oce_wq *);
486 int	oce_new_mq(struct oce_softc *, struct oce_mq *);
487 int	oce_new_eq(struct oce_softc *, struct oce_eq *);
488 int	oce_new_cq(struct oce_softc *, struct oce_cq *);
489 
490 int	oce_init_stats(struct oce_softc *);
491 int	oce_update_stats(struct oce_softc *);
492 int	oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
493 int	oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
494 int	oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
495 
496 struct pool *oce_pkt_pool;
497 
498 struct cfdriver oce_cd = {
499 	NULL, "oce", DV_IFNET
500 };
501 
502 struct cfattach oce_ca = {
503 	sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
504 };
505 
506 const struct pci_matchid oce_devices[] = {
507 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
508 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
509 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
510 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
511 	{ PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
512 };
513 
514 int
515 oce_match(struct device *parent, void *match, void *aux)
516 {
517 	return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
518 }
519 
520 void
521 oce_attach(struct device *parent, struct device *self, void *aux)
522 {
523 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
524 	struct oce_softc *sc = (struct oce_softc *)self;
525 	const char *intrstr = NULL;
526 	pci_intr_handle_t ih;
527 
528 	switch (PCI_PRODUCT(pa->pa_id)) {
529 	case PCI_PRODUCT_SERVERENGINES_BE2:
530 	case PCI_PRODUCT_SERVERENGINES_OCBE2:
531 		SET(sc->sc_flags, OCE_F_BE2);
532 		break;
533 	case PCI_PRODUCT_SERVERENGINES_BE3:
534 	case PCI_PRODUCT_SERVERENGINES_OCBE3:
535 		SET(sc->sc_flags, OCE_F_BE3);
536 		break;
537 	case PCI_PRODUCT_EMULEX_XE201:
538 		SET(sc->sc_flags, OCE_F_XE201);
539 		break;
540 	}
541 
542 	sc->sc_dmat = pa->pa_dmat;
543 	if (oce_pci_alloc(sc, pa))
544 		return;
545 
546 	sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
547 	sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
548 
549 	/* create the bootstrap mailbox */
550 	if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
551 		printf(": failed to allocate mailbox memory\n");
552 		return;
553 	}
554 	if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
555 		printf(": failed to allocate payload memory\n");
556 		goto fail_1;
557 	}
558 
559 	if (oce_init_fw(sc))
560 		goto fail_2;
561 
562 	if (oce_mbox_init(sc)) {
563 		printf(": failed to initialize mailbox\n");
564 		goto fail_2;
565 	}
566 
567 	if (oce_get_fw_config(sc)) {
568 		printf(": failed to get firmware configuration\n");
569 		goto fail_2;
570 	}
571 
572 	if (ISSET(sc->sc_flags, OCE_F_BE3)) {
573 		if (oce_check_native_mode(sc))
574 			goto fail_2;
575 	}
576 
577 	if (oce_macaddr_get(sc, sc->sc_macaddr)) {
578 		printf(": failed to fetch MAC address\n");
579 		goto fail_2;
580 	}
581 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
582 
583 	if (oce_pkt_pool == NULL) {
584 		oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
585 		if (oce_pkt_pool == NULL) {
586 			printf(": unable to allocate descriptor pool\n");
587 			goto fail_2;
588 		}
589 		pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, IPL_NET,
590 		    0, "ocepkts", NULL);
591 	}
592 
593 	/* We allocate a single interrupt resource */
594 	sc->sc_nintr = 1;
595 	if (pci_intr_map_msi(pa, &ih) != 0 &&
596 	    pci_intr_map(pa, &ih) != 0) {
597 		printf(": couldn't map interrupt\n");
598 		goto fail_2;
599 	}
600 
601 	intrstr = pci_intr_string(pa->pa_pc, ih);
602 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
603 	    sc->sc_dev.dv_xname);
604 	if (sc->sc_ih == NULL) {
605 		printf(": couldn't establish interrupt\n");
606 		if (intrstr != NULL)
607 			printf(" at %s", intrstr);
608 		printf("\n");
609 		goto fail_2;
610 	}
611 	printf(": %s", intrstr);
612 
613 	if (oce_init_stats(sc))
614 		goto fail_3;
615 
616 	if (oce_init_queues(sc))
617 		goto fail_3;
618 
619 	oce_attach_ifp(sc);
620 
621 #ifdef OCE_LRO
622 	if (oce_init_lro(sc))
623 		goto fail_4;
624 #endif
625 
626 	timeout_set(&sc->sc_tick, oce_tick, sc);
627 	timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
628 
629 	config_mountroot(self, oce_attachhook);
630 
631 	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
632 
633 	return;
634 
635 #ifdef OCE_LRO
636 fail_4:
637 	oce_free_lro(sc);
638 	ether_ifdetach(&sc->sc_ac.ac_if);
639 	if_detach(&sc->sc_ac.ac_if);
640 	oce_release_queues(sc);
641 #endif
642 fail_3:
643 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
644 fail_2:
645 	oce_dma_free(sc, &sc->sc_pld);
646 fail_1:
647 	oce_dma_free(sc, &sc->sc_mbx);
648 }
649 
650 int
651 oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
652 {
653 	pcireg_t memtype, reg;
654 
655 	/* setup the device config region */
656 	if (ISSET(sc->sc_flags, OCE_F_BE2))
657 		reg = OCE_BAR_CFG_BE2;
658 	else
659 		reg = OCE_BAR_CFG;
660 
661 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
662 	if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
663 	    &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
664 	    IS_BE(sc) ? 0 : 32768)) {
665 		printf(": can't find cfg mem space\n");
666 		return (ENXIO);
667 	}
668 
669 	/*
670 	 * Read the SLI_INTF register and determine whether we
671 	 * can use this port and its features
672 	 */
673 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
674 	if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
675 		printf(": invalid signature\n");
676 		goto fail_1;
677 	}
678 	if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
679 		printf(": unsupported SLI revision\n");
680 		goto fail_1;
681 	}
682 	if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
683 		SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
684 	if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
685 		SET(sc->sc_flags, OCE_F_RESET_RQD);
686 
687 	/* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
688 	if (IS_BE(sc)) {
689 		/* set up CSR region */
690 		reg = OCE_BAR_CSR;
691 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
692 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
693 		    &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
694 			printf(": can't find csr mem space\n");
695 			goto fail_1;
696 		}
697 
698 		/* set up DB doorbell region */
699 		reg = OCE_BAR_DB;
700 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
701 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
702 		    &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
703 			printf(": can't find csr mem space\n");
704 			goto fail_2;
705 		}
706 	} else {
707 		sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
708 		sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
709 	}
710 
711 	return (0);
712 
713 fail_2:
714 	bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
715 fail_1:
716 	bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
717 	return (ENXIO);
718 }
719 
720 static inline uint32_t
721 oce_read_cfg(struct oce_softc *sc, bus_size_t off)
722 {
723 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
724 	    BUS_SPACE_BARRIER_READ);
725 	return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
726 }
727 
728 static inline uint32_t
729 oce_read_csr(struct oce_softc *sc, bus_size_t off)
730 {
731 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
732 	    BUS_SPACE_BARRIER_READ);
733 	return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
734 }
735 
736 static inline uint32_t
737 oce_read_db(struct oce_softc *sc, bus_size_t off)
738 {
739 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
740 	    BUS_SPACE_BARRIER_READ);
741 	return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
742 }
743 
744 static inline void
745 oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
746 {
747 	bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
748 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
749 	    BUS_SPACE_BARRIER_WRITE);
750 }
751 
752 static inline void
753 oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
754 {
755 	bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
756 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
757 	    BUS_SPACE_BARRIER_WRITE);
758 }
759 
760 static inline void
761 oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
762 {
763 	bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
764 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
765 	    BUS_SPACE_BARRIER_WRITE);
766 }
767 
768 static inline void
769 oce_intr_enable(struct oce_softc *sc)
770 {
771 	uint32_t reg;
772 
773 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
774 	oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
775 }
776 
777 static inline void
778 oce_intr_disable(struct oce_softc *sc)
779 {
780 	uint32_t reg;
781 
782 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
783 	oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
784 }
785 
786 void
787 oce_attachhook(struct device *self)
788 {
789 	struct oce_softc *sc = (struct oce_softc *)self;
790 
791 	oce_get_link_status(sc);
792 
793 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
794 
795 	/*
796 	 * We need to get MCC async events. So enable intrs and arm
797 	 * first EQ, Other EQs will be armed after interface is UP
798 	 */
799 	oce_intr_enable(sc);
800 	oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
801 
802 	/*
803 	 * Send first mcc cmd and after that we get gracious
804 	 * MCC notifications from FW
805 	 */
806 	oce_first_mcc(sc);
807 }
808 
809 void
810 oce_attach_ifp(struct oce_softc *sc)
811 {
812 	struct ifnet *ifp = &sc->sc_ac.ac_if;
813 
814 	ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
815 	    oce_media_status);
816 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
817 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
818 
819 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
820 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821 	ifp->if_ioctl = oce_ioctl;
822 	ifp->if_start = oce_start;
823 	ifp->if_watchdog = oce_watchdog;
824 	ifp->if_hardmtu = OCE_MAX_MTU;
825 	ifp->if_softc = sc;
826 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_size - 1);
827 
828 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
829 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
830 
831 #if NVLAN > 0
832 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
833 #endif
834 
835 #ifdef OCE_TSO
836 	ifp->if_capabilities |= IFCAP_TSO;
837 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
838 #endif
839 #ifdef OCE_LRO
840 	ifp->if_capabilities |= IFCAP_LRO;
841 #endif
842 
843 	if_attach(ifp);
844 	ether_ifattach(ifp);
845 }
846 
847 int
848 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
849 {
850 	struct oce_softc *sc = ifp->if_softc;
851 	struct ifreq *ifr = (struct ifreq *)data;
852 	int s, error = 0;
853 
854 	s = splnet();
855 
856 	switch (command) {
857 	case SIOCSIFADDR:
858 		ifp->if_flags |= IFF_UP;
859 		if (!(ifp->if_flags & IFF_RUNNING))
860 			oce_init(sc);
861 		break;
862 	case SIOCSIFFLAGS:
863 		if (ifp->if_flags & IFF_UP) {
864 			if (ifp->if_flags & IFF_RUNNING)
865 				error = ENETRESET;
866 			else
867 				oce_init(sc);
868 		} else {
869 			if (ifp->if_flags & IFF_RUNNING)
870 				oce_stop(sc);
871 		}
872 		break;
873 	case SIOCGIFMEDIA:
874 	case SIOCSIFMEDIA:
875 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
876 		break;
877 	case SIOCGIFRXR:
878 		error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
879 		break;
880 	default:
881 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
882 		break;
883 	}
884 
885 	if (error == ENETRESET) {
886 		if (ifp->if_flags & IFF_RUNNING)
887 			oce_iff(sc);
888 		error = 0;
889 	}
890 
891 	splx(s);
892 
893 	return (error);
894 }
895 
896 int
897 oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
898 {
899 	struct if_rxring_info *ifr, ifr1;
900 	struct oce_rq *rq;
901 	int error, i;
902 	u_int n = 0;
903 
904 	if (sc->sc_nrq > 1) {
905 		if ((ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF,
906 		    M_WAITOK | M_ZERO)) == NULL)
907 			return (ENOMEM);
908 	} else
909 		ifr = &ifr1;
910 
911 	OCE_RQ_FOREACH(sc, rq, i) {
912 		ifr[n].ifr_size = MCLBYTES;
913 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
914 		ifr[n].ifr_info = rq->rxring;
915 		n++;
916 	}
917 
918 	error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
919 
920 	if (sc->sc_nrq > 1)
921 		free(ifr, M_DEVBUF, sc->sc_nrq * sizeof(*ifr));
922 	return (error);
923 }
924 
925 
926 void
927 oce_iff(struct oce_softc *sc)
928 {
929 	uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
930 	struct arpcom *ac = &sc->sc_ac;
931 	struct ifnet *ifp = &ac->ac_if;
932 	struct ether_multi *enm;
933 	struct ether_multistep step;
934 	int naddr = 0, promisc = 0;
935 
936 	ifp->if_flags &= ~IFF_ALLMULTI;
937 
938 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
939 	    ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
940 		ifp->if_flags |= IFF_ALLMULTI;
941 		promisc = 1;
942 	} else {
943 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
944 		while (enm != NULL) {
945 			memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
946 			ETHER_NEXT_MULTI(step, enm);
947 		}
948 		oce_update_mcast(sc, multi, naddr);
949 	}
950 
951 	oce_set_promisc(sc, promisc);
952 }
953 
954 void
955 oce_link_status(struct oce_softc *sc)
956 {
957 	struct ifnet *ifp = &sc->sc_ac.ac_if;
958 	int link_state = LINK_STATE_DOWN;
959 
960 	ifp->if_baudrate = 0;
961 	if (sc->sc_link_up) {
962 		link_state = LINK_STATE_FULL_DUPLEX;
963 
964 		switch (sc->sc_link_speed) {
965 		case 1:
966 			ifp->if_baudrate = IF_Mbps(10);
967 			break;
968 		case 2:
969 			ifp->if_baudrate = IF_Mbps(100);
970 			break;
971 		case 3:
972 			ifp->if_baudrate = IF_Gbps(1);
973 			break;
974 		case 4:
975 			ifp->if_baudrate = IF_Gbps(10);
976 			break;
977 		}
978 	}
979 	if (ifp->if_link_state != link_state) {
980 		ifp->if_link_state = link_state;
981 		if_link_state_change(ifp);
982 	}
983 }
984 
985 void
986 oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
987 {
988 	struct oce_softc *sc = ifp->if_softc;
989 
990 	ifmr->ifm_status = IFM_AVALID;
991 	ifmr->ifm_active = IFM_ETHER;
992 
993 	if (oce_get_link_status(sc) == 0)
994 		oce_link_status(sc);
995 
996 	if (!sc->sc_link_up) {
997 		ifmr->ifm_active |= IFM_NONE;
998 		return;
999 	}
1000 
1001 	ifmr->ifm_status |= IFM_ACTIVE;
1002 
1003 	switch (sc->sc_link_speed) {
1004 	case 1: /* 10 Mbps */
1005 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1006 		break;
1007 	case 2: /* 100 Mbps */
1008 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1009 		break;
1010 	case 3: /* 1 Gbps */
1011 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1012 		break;
1013 	case 4: /* 10 Gbps */
1014 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1015 		break;
1016 	}
1017 
1018 	if (sc->sc_fc & IFM_ETH_RXPAUSE)
1019 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1020 	if (sc->sc_fc & IFM_ETH_TXPAUSE)
1021 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1022 }
1023 
1024 int
1025 oce_media_change(struct ifnet *ifp)
1026 {
1027 	return (0);
1028 }
1029 
1030 void
1031 oce_tick(void *arg)
1032 {
1033 	struct oce_softc *sc = arg;
1034 	int s;
1035 
1036 	s = splnet();
1037 
1038 	if (oce_update_stats(sc) == 0)
1039 		timeout_add_sec(&sc->sc_tick, 1);
1040 
1041 	splx(s);
1042 }
1043 
1044 void
1045 oce_init(void *arg)
1046 {
1047 	struct oce_softc *sc = arg;
1048 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1049 	struct oce_eq *eq;
1050 	struct oce_rq *rq;
1051 	struct oce_wq *wq;
1052 	int i;
1053 
1054 	oce_stop(sc);
1055 
1056 	DELAY(10);
1057 
1058 	oce_macaddr_set(sc);
1059 
1060 	oce_iff(sc);
1061 
1062 	/* Enable VLAN promiscuous mode */
1063 	if (oce_config_vlan(sc, NULL, 0, 1, 1))
1064 		goto error;
1065 
1066 	if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1067 		goto error;
1068 
1069 	OCE_RQ_FOREACH(sc, rq, i) {
1070 		rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1071 		    ETHER_VLAN_ENCAP_LEN;
1072 		if (oce_new_rq(sc, rq)) {
1073 			printf("%s: failed to create rq\n",
1074 			    sc->sc_dev.dv_xname);
1075 			goto error;
1076 		}
1077 		rq->ring->index	 = 0;
1078 
1079 		/* oce splits jumbos into 2k chunks... */
1080 		if_rxr_init(&rq->rxring, 8, rq->nitems);
1081 
1082 		if (!oce_alloc_rx_bufs(rq)) {
1083 			printf("%s: failed to allocate rx buffers\n",
1084 			    sc->sc_dev.dv_xname);
1085 			goto error;
1086 		}
1087 	}
1088 
1089 #ifdef OCE_RSS
1090 	/* RSS config */
1091 	if (sc->sc_rss_enable) {
1092 		if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1093 			printf("%s: failed to configure RSS\n",
1094 			    sc->sc_dev.dv_xname);
1095 			goto error;
1096 		}
1097 	}
1098 #endif
1099 
1100 	OCE_RQ_FOREACH(sc, rq, i)
1101 		oce_arm_cq(rq->cq, 0, TRUE);
1102 
1103 	OCE_WQ_FOREACH(sc, wq, i)
1104 		oce_arm_cq(wq->cq, 0, TRUE);
1105 
1106 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1107 
1108 	OCE_EQ_FOREACH(sc, eq, i)
1109 		oce_arm_eq(eq, 0, TRUE, FALSE);
1110 
1111 	if (oce_get_link_status(sc) == 0)
1112 		oce_link_status(sc);
1113 
1114 	ifp->if_flags |= IFF_RUNNING;
1115 	ifq_clr_oactive(&ifp->if_snd);
1116 
1117 	timeout_add_sec(&sc->sc_tick, 1);
1118 
1119 	oce_intr_enable(sc);
1120 
1121 	return;
1122 error:
1123 	oce_stop(sc);
1124 }
1125 
1126 void
1127 oce_stop(struct oce_softc *sc)
1128 {
1129 	struct mbx_delete_nic_rq cmd;
1130 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1131 	struct oce_rq *rq;
1132 	struct oce_wq *wq;
1133 	struct oce_eq *eq;
1134 	int i;
1135 
1136 	timeout_del(&sc->sc_tick);
1137 	timeout_del(&sc->sc_rxrefill);
1138 
1139 	ifp->if_flags &= ~IFF_RUNNING;
1140 	ifq_clr_oactive(&ifp->if_snd);
1141 
1142 	/* Stop intrs and finish any bottom halves pending */
1143 	oce_intr_disable(sc);
1144 
1145 	/* Invalidate any pending cq and eq entries */
1146 	OCE_EQ_FOREACH(sc, eq, i)
1147 		oce_drain_eq(eq);
1148 	OCE_RQ_FOREACH(sc, rq, i) {
1149 		/* destroy the work queue in the firmware */
1150 		memset(&cmd, 0, sizeof(cmd));
1151 		cmd.params.req.rq_id = htole16(rq->id);
1152 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1153 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1154 		DELAY(1000);
1155 		oce_drain_rq(rq);
1156 		oce_free_posted_rxbuf(rq);
1157 	}
1158 	OCE_WQ_FOREACH(sc, wq, i)
1159 		oce_drain_wq(wq);
1160 }
1161 
1162 void
1163 oce_watchdog(struct ifnet *ifp)
1164 {
1165 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1166 
1167 	oce_init(ifp->if_softc);
1168 
1169 	ifp->if_oerrors++;
1170 }
1171 
1172 void
1173 oce_start(struct ifnet *ifp)
1174 {
1175 	struct oce_softc *sc = ifp->if_softc;
1176 	struct mbuf *m;
1177 	int pkts = 0;
1178 
1179 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1180 		return;
1181 
1182 	for (;;) {
1183 		IFQ_DEQUEUE(&ifp->if_snd, m);
1184 		if (m == NULL)
1185 			break;
1186 
1187 		if (oce_encap(sc, &m, 0)) {
1188 			ifq_set_oactive(&ifp->if_snd);
1189 			break;
1190 		}
1191 
1192 #if NBPFILTER > 0
1193 		if (ifp->if_bpf)
1194 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1195 #endif
1196 		pkts++;
1197 	}
1198 
1199 	/* Set a timeout in case the chip goes out to lunch */
1200 	if (pkts)
1201 		ifp->if_timer = 5;
1202 }
1203 
1204 int
1205 oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1206 {
1207 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1208 	struct mbuf *m = *mpp;
1209 	struct oce_wq *wq = sc->sc_wq[wqidx];
1210 	struct oce_pkt *pkt = NULL;
1211 	struct oce_nic_hdr_wqe *nhe;
1212 	struct oce_nic_frag_wqe *nfe;
1213 	int i, nwqe, err;
1214 
1215 #ifdef OCE_TSO
1216 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1217 		/* consolidate packet buffers for TSO/LSO segment offload */
1218 		m = oce_tso(sc, mpp);
1219 		if (m == NULL)
1220 			goto error;
1221 	}
1222 #endif
1223 
1224 	if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1225 		goto error;
1226 
1227 	err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1228 	if (err == EFBIG) {
1229 		if (m_defrag(m, M_DONTWAIT) ||
1230 		    bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1231 			BUS_DMA_NOWAIT))
1232 			goto error;
1233 		*mpp = m;
1234 	} else if (err != 0)
1235 		goto error;
1236 
1237 	pkt->nsegs = pkt->map->dm_nsegs;
1238 
1239 	nwqe = pkt->nsegs + 1;
1240 	if (IS_BE(sc)) {
1241 		/* BE2 and BE3 require even number of WQEs */
1242 		if (nwqe & 1)
1243 			nwqe++;
1244 	}
1245 
1246 	/* Fail if there's not enough free WQEs */
1247 	if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1248 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1249 		goto error;
1250 	}
1251 
1252 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1253 	    BUS_DMASYNC_PREWRITE);
1254 	pkt->mbuf = m;
1255 
1256 	/* TX work queue entry for the header */
1257 	nhe = oce_ring_get(wq->ring);
1258 	memset(nhe, 0, sizeof(*nhe));
1259 
1260 	nhe->u0.s.complete = 1;
1261 	nhe->u0.s.event = 1;
1262 	nhe->u0.s.crc = 1;
1263 	nhe->u0.s.forward = 0;
1264 	nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1265 	nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1266 	nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1267 	nhe->u0.s.num_wqe = nwqe;
1268 	nhe->u0.s.total_length = m->m_pkthdr.len;
1269 
1270 #if NVLAN > 0
1271 	if (m->m_flags & M_VLANTAG) {
1272 		nhe->u0.s.vlan = 1; /* Vlan present */
1273 		nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1274 	}
1275 #endif
1276 
1277 #ifdef OCE_TSO
1278 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1279 		if (m->m_pkthdr.tso_segsz) {
1280 			nhe->u0.s.lso = 1;
1281 			nhe->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1282 		}
1283 		if (!IS_BE(sc))
1284 			nhe->u0.s.ipcs = 1;
1285 	}
1286 #endif
1287 
1288 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1289 	    BUS_DMASYNC_PREWRITE);
1290 
1291 	wq->ring->nused++;
1292 
1293 	/* TX work queue entries for data chunks */
1294 	for (i = 0; i < pkt->nsegs; i++) {
1295 		nfe = oce_ring_get(wq->ring);
1296 		memset(nfe, 0, sizeof(*nfe));
1297 		nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1298 		nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1299 		nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1300 		wq->ring->nused++;
1301 	}
1302 	if (nwqe > (pkt->nsegs + 1)) {
1303 		nfe = oce_ring_get(wq->ring);
1304 		memset(nfe, 0, sizeof(*nfe));
1305 		wq->ring->nused++;
1306 		pkt->nsegs++;
1307 	}
1308 
1309 	oce_pkt_put(&wq->pkt_list, pkt);
1310 
1311 	ifp->if_opackets++;
1312 
1313 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1314 	    BUS_DMASYNC_POSTWRITE);
1315 
1316 	oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1317 
1318 	return (0);
1319 
1320 error:
1321 	if (pkt)
1322 		oce_pkt_put(&wq->pkt_free, pkt);
1323 	m_freem(*mpp);
1324 	*mpp = NULL;
1325 	return (1);
1326 }
1327 
1328 #ifdef OCE_TSO
1329 struct mbuf *
1330 oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1331 {
1332 	struct mbuf *m;
1333 	struct ip *ip;
1334 #ifdef INET6
1335 	struct ip6_hdr *ip6;
1336 #endif
1337 	struct ether_vlan_header *eh;
1338 	struct tcphdr *th;
1339 	uint16_t etype;
1340 	int total_len = 0, ehdrlen = 0;
1341 
1342 	m = *mpp;
1343 
1344 	if (M_WRITABLE(m) == 0) {
1345 		m = m_dup(*mpp, M_DONTWAIT);
1346 		if (!m)
1347 			return (NULL);
1348 		m_freem(*mpp);
1349 		*mpp = m;
1350 	}
1351 
1352 	eh = mtod(m, struct ether_vlan_header *);
1353 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1354 		etype = ntohs(eh->evl_proto);
1355 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1356 	} else {
1357 		etype = ntohs(eh->evl_encap_proto);
1358 		ehdrlen = ETHER_HDR_LEN;
1359 	}
1360 
1361 	switch (etype) {
1362 	case ETHERTYPE_IP:
1363 		ip = (struct ip *)(m->m_data + ehdrlen);
1364 		if (ip->ip_p != IPPROTO_TCP)
1365 			return (NULL);
1366 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1367 
1368 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1369 		break;
1370 #ifdef INET6
1371 	case ETHERTYPE_IPV6:
1372 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1373 		if (ip6->ip6_nxt != IPPROTO_TCP)
1374 			return NULL;
1375 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1376 
1377 		total_len = ehdrlen + sizeof(struct ip6_hdr) +
1378 		    (th->th_off << 2);
1379 		break;
1380 #endif
1381 	default:
1382 		return (NULL);
1383 	}
1384 
1385 	m = m_pullup(m, total_len);
1386 	if (!m)
1387 		return (NULL);
1388 	*mpp = m;
1389 	return (m);
1390 
1391 }
1392 #endif /* OCE_TSO */
1393 
1394 int
1395 oce_intr(void *arg)
1396 {
1397 	struct oce_softc *sc = arg;
1398 	struct oce_eq *eq = sc->sc_eq[0];
1399 	struct oce_eqe *eqe;
1400 	struct oce_cq *cq = NULL;
1401 	int i, neqe = 0;
1402 
1403 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1404 
1405 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1406 		eqe->evnt = 0;
1407 		neqe++;
1408 	}
1409 
1410 	/* Spurious? */
1411 	if (!neqe) {
1412 		oce_arm_eq(eq, 0, TRUE, FALSE);
1413 		return (0);
1414 	}
1415 
1416 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1417 
1418  	/* Clear EQ entries, but dont arm */
1419 	oce_arm_eq(eq, neqe, FALSE, TRUE);
1420 
1421 	/* Process TX, RX and MCC completion queues */
1422 	for (i = 0; i < eq->cq_valid; i++) {
1423 		cq = eq->cq[i];
1424 		(*cq->cq_intr)(cq->cb_arg);
1425 		oce_arm_cq(cq, 0, TRUE);
1426 	}
1427 
1428 	oce_arm_eq(eq, 0, TRUE, FALSE);
1429 	return (1);
1430 }
1431 
1432 /* Handle the Completion Queue for transmit */
1433 void
1434 oce_intr_wq(void *arg)
1435 {
1436 	struct oce_wq *wq = (struct oce_wq *)arg;
1437 	struct oce_cq *cq = wq->cq;
1438 	struct oce_nic_tx_cqe *cqe;
1439 	struct oce_softc *sc = wq->sc;
1440 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1441 	int ncqe = 0;
1442 
1443 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1444 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1445 		oce_txeof(wq);
1446 		WQ_CQE_INVALIDATE(cqe);
1447 		ncqe++;
1448 	}
1449 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1450 
1451 	if (ifq_is_oactive(&ifp->if_snd)) {
1452 		if (wq->ring->nused < (wq->ring->nitems / 2)) {
1453 			ifq_clr_oactive(&ifp->if_snd);
1454 			oce_start(ifp);
1455 		}
1456 	}
1457 	if (wq->ring->nused == 0)
1458 		ifp->if_timer = 0;
1459 
1460 	if (ncqe)
1461 		oce_arm_cq(cq, ncqe, FALSE);
1462 }
1463 
1464 void
1465 oce_txeof(struct oce_wq *wq)
1466 {
1467 	struct oce_softc *sc = wq->sc;
1468 	struct oce_pkt *pkt;
1469 	struct mbuf *m;
1470 
1471 	if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1472 		printf("%s: missing descriptor in txeof\n",
1473 		    sc->sc_dev.dv_xname);
1474 		return;
1475 	}
1476 
1477 	wq->ring->nused -= pkt->nsegs + 1;
1478 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1479 	    BUS_DMASYNC_POSTWRITE);
1480 	bus_dmamap_unload(sc->sc_dmat, pkt->map);
1481 
1482 	m = pkt->mbuf;
1483 	m_freem(m);
1484 	pkt->mbuf = NULL;
1485 	oce_pkt_put(&wq->pkt_free, pkt);
1486 }
1487 
1488 /* Handle the Completion Queue for receive */
1489 void
1490 oce_intr_rq(void *arg)
1491 {
1492 	struct oce_rq *rq = (struct oce_rq *)arg;
1493 	struct oce_cq *cq = rq->cq;
1494 	struct oce_softc *sc = rq->sc;
1495 	struct oce_nic_rx_cqe *cqe;
1496 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1497 	int maxrx, ncqe = 0;
1498 
1499 	maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1500 
1501 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1502 
1503 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1504 		if (cqe->u0.s.error == 0) {
1505 			if (cqe->u0.s.pkt_size == 0)
1506 				/* partial DMA workaround for Lancer */
1507 				oce_rxeoc(rq, cqe);
1508 			else
1509 				oce_rxeof(rq, cqe);
1510 		} else {
1511 			ifp->if_ierrors++;
1512 			if (IS_XE201(sc))
1513 				/* Lancer A0 no buffer workaround */
1514 				oce_rxeoc(rq, cqe);
1515 			else
1516 				/* Post L3/L4 errors to stack.*/
1517 				oce_rxeof(rq, cqe);
1518 		}
1519 #ifdef OCE_LRO
1520 		if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1521 			oce_flush_lro(rq);
1522 #endif
1523 		RQ_CQE_INVALIDATE(cqe);
1524 		ncqe++;
1525 	}
1526 
1527 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1528 
1529 #ifdef OCE_LRO
1530 	if (IF_LRO_ENABLED(ifp))
1531 		oce_flush_lro(rq);
1532 #endif
1533 
1534 	if (ncqe) {
1535 		oce_arm_cq(cq, ncqe, FALSE);
1536 		if (!oce_alloc_rx_bufs(rq))
1537 			timeout_add(&sc->sc_rxrefill, 1);
1538 	}
1539 }
1540 
1541 void
1542 oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1543 {
1544 	struct oce_softc *sc = rq->sc;
1545 	struct oce_pkt *pkt = NULL;
1546 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1547 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1548 	struct mbuf *m = NULL, *tail = NULL;
1549 	int i, len, frag_len;
1550 	uint16_t vtag;
1551 
1552 	len = cqe->u0.s.pkt_size;
1553 
1554 	 /* Get vlan_tag value */
1555 	if (IS_BE(sc))
1556 		vtag = ntohs(cqe->u0.s.vlan_tag);
1557 	else
1558 		vtag = cqe->u0.s.vlan_tag;
1559 
1560 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1561 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1562 			printf("%s: missing descriptor in rxeof\n",
1563 			    sc->sc_dev.dv_xname);
1564 			goto exit;
1565 		}
1566 
1567 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1568 		    BUS_DMASYNC_POSTREAD);
1569 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1570 		if_rxr_put(&rq->rxring, 1);
1571 
1572 		frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1573 		pkt->mbuf->m_len = frag_len;
1574 
1575 		if (tail != NULL) {
1576 			/* additional fragments */
1577 			pkt->mbuf->m_flags &= ~M_PKTHDR;
1578 			tail->m_next = pkt->mbuf;
1579 			tail = pkt->mbuf;
1580 		} else {
1581 			/* first fragment, fill out most of the header */
1582 			pkt->mbuf->m_pkthdr.len = len;
1583 			pkt->mbuf->m_pkthdr.csum_flags = 0;
1584 			if (cqe->u0.s.ip_cksum_pass) {
1585 				if (!cqe->u0.s.ip_ver) { /* IPV4 */
1586 					pkt->mbuf->m_pkthdr.csum_flags =
1587 					    M_IPV4_CSUM_IN_OK;
1588 				}
1589 			}
1590 			if (cqe->u0.s.l4_cksum_pass) {
1591 				pkt->mbuf->m_pkthdr.csum_flags |=
1592 				    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1593 			}
1594 			m = tail = pkt->mbuf;
1595 		}
1596 		pkt->mbuf = NULL;
1597 		oce_pkt_put(&rq->pkt_free, pkt);
1598 		len -= frag_len;
1599 	}
1600 
1601 	if (m) {
1602 		if (!oce_port_valid(sc, cqe)) {
1603 			 m_freem(m);
1604 			 goto exit;
1605 		}
1606 
1607 #if NVLAN > 0
1608 		/* This determines if vlan tag is valid */
1609 		if (oce_vtp_valid(sc, cqe)) {
1610 			if (sc->sc_fmode & FNM_FLEX10_MODE) {
1611 				/* FLEX10. If QnQ is not set, neglect VLAN */
1612 				if (cqe->u0.s.qnq) {
1613 					m->m_pkthdr.ether_vtag = vtag;
1614 					m->m_flags |= M_VLANTAG;
1615 				}
1616 			} else if (sc->sc_pvid != (vtag & VLAN_VID_MASK))  {
1617 				/*
1618 				 * In UMC mode generally pvid will be striped.
1619 				 * But in some cases we have seen it comes
1620 				 * with pvid. So if pvid == vlan, neglect vlan.
1621 				 */
1622 				m->m_pkthdr.ether_vtag = vtag;
1623 				m->m_flags |= M_VLANTAG;
1624 			}
1625 		}
1626 #endif
1627 
1628 #ifdef OCE_LRO
1629 		/* Try to queue to LRO */
1630 		if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1631 		    cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1632 		    !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1633 
1634 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1635 				rq->lro_pkts_queued ++;
1636 				goto exit;
1637 			}
1638 			/* If LRO posting fails then try to post to STACK */
1639 		}
1640 #endif
1641 
1642 		ml_enqueue(&ml, m);
1643 	}
1644 exit:
1645 	if_input(ifp, &ml);
1646 }
1647 
1648 void
1649 oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1650 {
1651 	struct oce_softc *sc = rq->sc;
1652 	struct oce_pkt *pkt;
1653 	int i, num_frags = cqe->u0.s.num_fragments;
1654 
1655 	if (IS_XE201(sc) && cqe->u0.s.error) {
1656 		/*
1657 		 * Lancer A0 workaround:
1658 		 * num_frags will be 1 more than actual in case of error
1659 		 */
1660 		if (num_frags)
1661 			num_frags--;
1662 	}
1663 	for (i = 0; i < num_frags; i++) {
1664 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1665 			printf("%s: missing descriptor in rxeoc\n",
1666 			    sc->sc_dev.dv_xname);
1667 			return;
1668 		}
1669 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1670 		    BUS_DMASYNC_POSTREAD);
1671 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1672 		if_rxr_put(&rq->rxring, 1);
1673 		m_freem(pkt->mbuf);
1674 		oce_pkt_put(&rq->pkt_free, pkt);
1675 	}
1676 }
1677 
1678 int
1679 oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1680 {
1681 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1682 
1683 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1684 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1685 		return (cqe_v1->u0.s.vlan_tag_present);
1686 	}
1687 	return (cqe->u0.s.vlan_tag_present);
1688 }
1689 
1690 int
1691 oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1692 {
1693 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1694 
1695 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1696 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1697 		if (sc->sc_port != cqe_v1->u0.s.port)
1698 			return (0);
1699 	}
1700 	return (1);
1701 }
1702 
1703 #ifdef OCE_LRO
1704 void
1705 oce_flush_lro(struct oce_rq *rq)
1706 {
1707 	struct oce_softc *sc = rq->sc;
1708 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1709 	struct lro_ctrl	*lro = &rq->lro;
1710 	struct lro_entry *queued;
1711 
1712 	if (!IF_LRO_ENABLED(ifp))
1713 		return;
1714 
1715 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1716 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1717 		tcp_lro_flush(lro, queued);
1718 	}
1719 	rq->lro_pkts_queued = 0;
1720 }
1721 
1722 int
1723 oce_init_lro(struct oce_softc *sc)
1724 {
1725 	struct lro_ctrl *lro = NULL;
1726 	int i = 0, rc = 0;
1727 
1728 	for (i = 0; i < sc->sc_nrq; i++) {
1729 		lro = &sc->sc_rq[i]->lro;
1730 		rc = tcp_lro_init(lro);
1731 		if (rc != 0) {
1732 			printf("%s: LRO init failed\n",
1733 			    sc->sc_dev.dv_xname);
1734 			return rc;
1735 		}
1736 		lro->ifp = &sc->sc_ac.ac_if;
1737 	}
1738 
1739 	return (rc);
1740 }
1741 
1742 void
1743 oce_free_lro(struct oce_softc *sc)
1744 {
1745 	struct lro_ctrl *lro = NULL;
1746 	int i = 0;
1747 
1748 	for (i = 0; i < sc->sc_nrq; i++) {
1749 		lro = &sc->sc_rq[i]->lro;
1750 		if (lro)
1751 			tcp_lro_free(lro);
1752 	}
1753 }
1754 #endif /* OCE_LRO */
1755 
1756 int
1757 oce_get_buf(struct oce_rq *rq)
1758 {
1759 	struct oce_softc *sc = rq->sc;
1760 	struct oce_pkt *pkt;
1761 	struct oce_nic_rqe *rqe;
1762 
1763 	if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1764 		return (0);
1765 
1766 	pkt->mbuf = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1767 	if (pkt->mbuf == NULL) {
1768 		oce_pkt_put(&rq->pkt_free, pkt);
1769 		return (0);
1770 	}
1771 
1772 	pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1773 #ifdef __STRICT_ALIGNMENT
1774 	m_adj(pkt->mbuf, ETHER_ALIGN);
1775 #endif
1776 
1777 	if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1778 	    BUS_DMA_NOWAIT)) {
1779 		m_freem(pkt->mbuf);
1780 		pkt->mbuf = NULL;
1781 		oce_pkt_put(&rq->pkt_free, pkt);
1782 		return (0);
1783 	}
1784 
1785 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1786 	    BUS_DMASYNC_PREREAD);
1787 
1788 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1789 	    BUS_DMASYNC_PREWRITE);
1790 
1791 	rqe = oce_ring_get(rq->ring);
1792 	rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1793 	rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1794 
1795 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1796 	    BUS_DMASYNC_POSTWRITE);
1797 
1798 	oce_pkt_put(&rq->pkt_list, pkt);
1799 
1800 	return (1);
1801 }
1802 
1803 int
1804 oce_alloc_rx_bufs(struct oce_rq *rq)
1805 {
1806 	struct oce_softc *sc = rq->sc;
1807 	int i, nbufs = 0;
1808 	u_int slots;
1809 
1810 	for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1811 		if (oce_get_buf(rq) == 0)
1812 			break;
1813 
1814 		nbufs++;
1815 	}
1816 	if_rxr_put(&rq->rxring, slots);
1817 
1818 	if (!nbufs)
1819 		return (0);
1820 	for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1821 		oce_write_db(sc, PD_RXULP_DB, rq->id |
1822 		    (OCE_MAX_RQ_POSTS << 24));
1823 		nbufs -= OCE_MAX_RQ_POSTS;
1824 	}
1825 	if (nbufs > 0)
1826 		oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1827 	return (1);
1828 }
1829 
1830 void
1831 oce_refill_rx(void *arg)
1832 {
1833 	struct oce_softc *sc = arg;
1834 	struct oce_rq *rq;
1835 	int i, s;
1836 
1837 	s = splnet();
1838 	OCE_RQ_FOREACH(sc, rq, i) {
1839 		if (!oce_alloc_rx_bufs(rq))
1840 			timeout_add(&sc->sc_rxrefill, 5);
1841 	}
1842 	splx(s);
1843 }
1844 
1845 /* Handle the Completion Queue for the Mailbox/Async notifications */
1846 void
1847 oce_intr_mq(void *arg)
1848 {
1849 	struct oce_mq *mq = (struct oce_mq *)arg;
1850 	struct oce_softc *sc = mq->sc;
1851 	struct oce_cq *cq = mq->cq;
1852 	struct oce_mq_cqe *cqe;
1853 	struct oce_async_cqe_link_state *acqe;
1854 	struct oce_async_event_grp5_pvid_state *gcqe;
1855 	int evtype, optype, ncqe = 0;
1856 
1857 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1858 
1859 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1860 		if (cqe->u0.s.async_event) {
1861 			evtype = cqe->u0.s.event_type;
1862 			optype = cqe->u0.s.async_type;
1863 			if (evtype  == ASYNC_EVENT_CODE_LINK_STATE) {
1864 				/* Link status evt */
1865 				acqe = (struct oce_async_cqe_link_state *)cqe;
1866 				oce_link_event(sc, acqe);
1867 			} else if ((evtype == ASYNC_EVENT_GRP5) &&
1868 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1869 				/* GRP5 PVID */
1870 				gcqe =
1871 				(struct oce_async_event_grp5_pvid_state *)cqe;
1872 				if (gcqe->enabled)
1873 					sc->sc_pvid =
1874 					    gcqe->tag & VLAN_VID_MASK;
1875 				else
1876 					sc->sc_pvid = 0;
1877 			}
1878 		}
1879 		MQ_CQE_INVALIDATE(cqe);
1880 		ncqe++;
1881 	}
1882 
1883 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1884 
1885 	if (ncqe)
1886 		oce_arm_cq(cq, ncqe, FALSE);
1887 }
1888 
1889 void
1890 oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1891 {
1892 	/* Update Link status */
1893 	sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1894 	    ASYNC_EVENT_LINK_UP);
1895 	/* Update speed */
1896 	sc->sc_link_speed = acqe->u0.s.speed;
1897 	oce_link_status(sc);
1898 }
1899 
1900 int
1901 oce_init_queues(struct oce_softc *sc)
1902 {
1903 	struct oce_wq *wq;
1904 	struct oce_rq *rq;
1905 	int i;
1906 
1907 	sc->sc_nrq = 1;
1908 	sc->sc_nwq = 1;
1909 
1910 	/* Create network interface on card */
1911 	if (oce_create_iface(sc, sc->sc_macaddr))
1912 		goto error;
1913 
1914 	/* create all of the event queues */
1915 	for (i = 0; i < sc->sc_nintr; i++) {
1916 		sc->sc_eq[i] = oce_create_eq(sc);
1917 		if (!sc->sc_eq[i])
1918 			goto error;
1919 	}
1920 
1921 	/* alloc tx queues */
1922 	OCE_WQ_FOREACH(sc, wq, i) {
1923 		sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1924 		if (!sc->sc_wq[i])
1925 			goto error;
1926 	}
1927 
1928 	/* alloc rx queues */
1929 	OCE_RQ_FOREACH(sc, rq, i) {
1930 		sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1931 		    i > 0 ? sc->sc_rss_enable : 0);
1932 		if (!sc->sc_rq[i])
1933 			goto error;
1934 	}
1935 
1936 	/* alloc mailbox queue */
1937 	sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1938 	if (!sc->sc_mq)
1939 		goto error;
1940 
1941 	return (0);
1942 error:
1943 	oce_release_queues(sc);
1944 	return (1);
1945 }
1946 
1947 void
1948 oce_release_queues(struct oce_softc *sc)
1949 {
1950 	struct oce_wq *wq;
1951 	struct oce_rq *rq;
1952 	struct oce_eq *eq;
1953 	int i;
1954 
1955 	OCE_RQ_FOREACH(sc, rq, i) {
1956 		if (rq)
1957 			oce_destroy_rq(sc->sc_rq[i]);
1958 	}
1959 
1960 	OCE_WQ_FOREACH(sc, wq, i) {
1961 		if (wq)
1962 			oce_destroy_wq(sc->sc_wq[i]);
1963 	}
1964 
1965 	if (sc->sc_mq)
1966 		oce_destroy_mq(sc->sc_mq);
1967 
1968 	OCE_EQ_FOREACH(sc, eq, i) {
1969 		if (eq)
1970 			oce_destroy_eq(sc->sc_eq[i]);
1971 	}
1972 }
1973 
1974 /**
1975  * @brief 		Function to create a WQ for NIC Tx
1976  * @param sc 		software handle to the device
1977  * @returns		the pointer to the WQ created or NULL on failure
1978  */
1979 struct oce_wq *
1980 oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1981 {
1982 	struct oce_wq *wq;
1983 	struct oce_cq *cq;
1984 	struct oce_pkt *pkt;
1985 	int i;
1986 
1987 	if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1988 		return (NULL);
1989 
1990 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
1991 	if (!wq)
1992 		return (NULL);
1993 
1994 	wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
1995 	if (!wq->ring) {
1996 		free(wq, M_DEVBUF, 0);
1997 		return (NULL);
1998 	}
1999 
2000 	cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
2001 	    1, 0, 3);
2002 	if (!cq) {
2003 		oce_destroy_ring(sc, wq->ring);
2004 		free(wq, M_DEVBUF, 0);
2005 		return (NULL);
2006 	}
2007 
2008 	wq->id = -1;
2009 	wq->sc = sc;
2010 
2011 	wq->cq = cq;
2012 	wq->nitems = sc->sc_tx_ring_size;
2013 
2014 	SIMPLEQ_INIT(&wq->pkt_free);
2015 	SIMPLEQ_INIT(&wq->pkt_list);
2016 
2017 	for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2018 		pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2019 		    PAGE_SIZE);
2020 		if (pkt == NULL) {
2021 			oce_destroy_wq(wq);
2022 			return (NULL);
2023 		}
2024 		oce_pkt_put(&wq->pkt_free, pkt);
2025 	}
2026 
2027 	if (oce_new_wq(sc, wq)) {
2028 		oce_destroy_wq(wq);
2029 		return (NULL);
2030 	}
2031 
2032 	eq->cq[eq->cq_valid] = cq;
2033 	eq->cq_valid++;
2034 	cq->cb_arg = wq;
2035 	cq->cq_intr = oce_intr_wq;
2036 
2037 	return (wq);
2038 }
2039 
2040 void
2041 oce_drain_wq(struct oce_wq *wq)
2042 {
2043 	struct oce_cq *cq = wq->cq;
2044 	struct oce_nic_tx_cqe *cqe;
2045 	int ncqe = 0;
2046 
2047 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2048 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2049 		WQ_CQE_INVALIDATE(cqe);
2050 		ncqe++;
2051 	}
2052 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2053 	oce_arm_cq(cq, ncqe, FALSE);
2054 }
2055 
2056 void
2057 oce_destroy_wq(struct oce_wq *wq)
2058 {
2059 	struct mbx_delete_nic_wq cmd;
2060 	struct oce_softc *sc = wq->sc;
2061 	struct oce_pkt *pkt;
2062 
2063 	if (wq->id >= 0) {
2064 		memset(&cmd, 0, sizeof(cmd));
2065 		cmd.params.req.wq_id = htole16(wq->id);
2066 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2067 		    &cmd, sizeof(cmd));
2068 	}
2069 	if (wq->cq != NULL)
2070 		oce_destroy_cq(wq->cq);
2071 	if (wq->ring != NULL)
2072 		oce_destroy_ring(sc, wq->ring);
2073 	while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2074 		oce_pkt_free(sc, pkt);
2075 	free(wq, M_DEVBUF, 0);
2076 }
2077 
2078 /**
2079  * @brief 		function to allocate receive queue resources
2080  * @param sc		software handle to the device
2081  * @param eq		pointer to associated event queue
2082  * @param rss		is-rss-queue flag
2083  * @returns		the pointer to the RQ created or NULL on failure
2084  */
2085 struct oce_rq *
2086 oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2087 {
2088 	struct oce_rq *rq;
2089 	struct oce_cq *cq;
2090 	struct oce_pkt *pkt;
2091 	int i;
2092 
2093 	/* Hardware doesn't support any other value */
2094 	if (sc->sc_rx_ring_size != 1024)
2095 		return (NULL);
2096 
2097 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2098 	if (!rq)
2099 		return (NULL);
2100 
2101 	rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2102 	    sizeof(struct oce_nic_rqe), 2);
2103 	if (!rq->ring) {
2104 		free(rq, M_DEVBUF, 0);
2105 		return (NULL);
2106 	}
2107 
2108 	cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2109 	    1, 0, 3);
2110 	if (!cq) {
2111 		oce_destroy_ring(sc, rq->ring);
2112 		free(rq, M_DEVBUF, 0);
2113 		return (NULL);
2114 	}
2115 
2116 	rq->id = -1;
2117 	rq->sc = sc;
2118 
2119 	rq->nitems = sc->sc_rx_ring_size;
2120 	rq->fragsize = OCE_RX_BUF_SIZE;
2121 	rq->rss = rss;
2122 
2123 	SIMPLEQ_INIT(&rq->pkt_free);
2124 	SIMPLEQ_INIT(&rq->pkt_list);
2125 
2126 	for (i = 0; i < sc->sc_rx_ring_size; i++) {
2127 		pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2128 		if (pkt == NULL) {
2129 			oce_destroy_rq(rq);
2130 			return (NULL);
2131 		}
2132 		oce_pkt_put(&rq->pkt_free, pkt);
2133 	}
2134 
2135 	rq->cq = cq;
2136 	eq->cq[eq->cq_valid] = cq;
2137 	eq->cq_valid++;
2138 	cq->cb_arg = rq;
2139 	cq->cq_intr = oce_intr_rq;
2140 
2141 	/* RX queue is created in oce_init */
2142 
2143 	return (rq);
2144 }
2145 
2146 void
2147 oce_drain_rq(struct oce_rq *rq)
2148 {
2149 	struct oce_nic_rx_cqe *cqe;
2150 	struct oce_cq *cq = rq->cq;
2151 	int ncqe = 0;
2152 
2153 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2154 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2155 		RQ_CQE_INVALIDATE(cqe);
2156 		ncqe++;
2157 	}
2158 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2159 	oce_arm_cq(cq, ncqe, FALSE);
2160 }
2161 
2162 void
2163 oce_destroy_rq(struct oce_rq *rq)
2164 {
2165 	struct mbx_delete_nic_rq cmd;
2166 	struct oce_softc *sc = rq->sc;
2167 	struct oce_pkt *pkt;
2168 
2169 	if (rq->id >= 0) {
2170 		memset(&cmd, 0, sizeof(cmd));
2171 		cmd.params.req.rq_id = htole16(rq->id);
2172 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2173 		    &cmd, sizeof(cmd));
2174 	}
2175 	if (rq->cq != NULL)
2176 		oce_destroy_cq(rq->cq);
2177 	if (rq->ring != NULL)
2178 		oce_destroy_ring(sc, rq->ring);
2179 	while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2180 		oce_pkt_free(sc, pkt);
2181 	free(rq, M_DEVBUF, 0);
2182 }
2183 
2184 struct oce_eq *
2185 oce_create_eq(struct oce_softc *sc)
2186 {
2187 	struct oce_eq *eq;
2188 
2189 	/* allocate an eq */
2190 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2191 	if (eq == NULL)
2192 		return (NULL);
2193 
2194 	eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2195 	if (!eq->ring) {
2196 		free(eq, M_DEVBUF, 0);
2197 		return (NULL);
2198 	}
2199 
2200 	eq->id = -1;
2201 	eq->sc = sc;
2202 	eq->nitems = EQ_LEN_1024;	/* length of event queue */
2203 	eq->isize = EQE_SIZE_4; 	/* size of a queue item */
2204 	eq->delay = OCE_DEFAULT_EQD;	/* event queue delay */
2205 
2206 	if (oce_new_eq(sc, eq)) {
2207 		oce_destroy_ring(sc, eq->ring);
2208 		free(eq, M_DEVBUF, 0);
2209 		return (NULL);
2210 	}
2211 
2212 	return (eq);
2213 }
2214 
2215 /**
2216  * @brief		Function to arm an EQ so that it can generate events
2217  * @param eq		pointer to event queue structure
2218  * @param neqe		number of EQEs to arm
2219  * @param rearm		rearm bit enable/disable
2220  * @param clearint	bit to clear the interrupt condition because of which
2221  *			EQEs are generated
2222  */
2223 static inline void
2224 oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2225 {
2226 	oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2227 	    (clearint << 9) | (neqe << 16) | (rearm << 29));
2228 }
2229 
2230 void
2231 oce_drain_eq(struct oce_eq *eq)
2232 {
2233 	struct oce_eqe *eqe;
2234 	int neqe = 0;
2235 
2236 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2237 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2238 		eqe->evnt = 0;
2239 		neqe++;
2240 	}
2241 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2242 	oce_arm_eq(eq, neqe, FALSE, TRUE);
2243 }
2244 
2245 void
2246 oce_destroy_eq(struct oce_eq *eq)
2247 {
2248 	struct mbx_destroy_common_eq cmd;
2249 	struct oce_softc *sc = eq->sc;
2250 
2251 	if (eq->id >= 0) {
2252 		memset(&cmd, 0, sizeof(cmd));
2253 		cmd.params.req.id = htole16(eq->id);
2254 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2255 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2256 	}
2257 	if (eq->ring != NULL)
2258 		oce_destroy_ring(sc, eq->ring);
2259 	free(eq, M_DEVBUF, 0);
2260 }
2261 
2262 struct oce_mq *
2263 oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2264 {
2265 	struct oce_mq *mq = NULL;
2266 	struct oce_cq *cq;
2267 
2268 	/* allocate the mq */
2269 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2270 	if (!mq)
2271 		return (NULL);
2272 
2273 	mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2274 	if (!mq->ring) {
2275 		free(mq, M_DEVBUF, 0);
2276 		return (NULL);
2277 	}
2278 
2279 	cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2280 	    1, 0, 0);
2281 	if (!cq) {
2282 		oce_destroy_ring(sc, mq->ring);
2283 		free(mq, M_DEVBUF, 0);
2284 		return (NULL);
2285 	}
2286 
2287 	mq->id = -1;
2288 	mq->sc = sc;
2289 	mq->cq = cq;
2290 
2291 	mq->nitems = 128;
2292 
2293 	if (oce_new_mq(sc, mq)) {
2294 		oce_destroy_cq(mq->cq);
2295 		oce_destroy_ring(sc, mq->ring);
2296 		free(mq, M_DEVBUF, 0);
2297 		return (NULL);
2298 	}
2299 
2300 	eq->cq[eq->cq_valid] = cq;
2301 	eq->cq_valid++;
2302 	mq->cq->eq = eq;
2303 	mq->cq->cb_arg = mq;
2304 	mq->cq->cq_intr = oce_intr_mq;
2305 
2306 	return (mq);
2307 }
2308 
2309 void
2310 oce_drain_mq(struct oce_mq *mq)
2311 {
2312 	struct oce_cq *cq = mq->cq;
2313 	struct oce_mq_cqe *cqe;
2314 	int ncqe = 0;
2315 
2316 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2317 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2318 		MQ_CQE_INVALIDATE(cqe);
2319 		ncqe++;
2320 	}
2321 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2322 	oce_arm_cq(cq, ncqe, FALSE);
2323 }
2324 
2325 void
2326 oce_destroy_mq(struct oce_mq *mq)
2327 {
2328 	struct mbx_destroy_common_mq cmd;
2329 	struct oce_softc *sc = mq->sc;
2330 
2331 	if (mq->id >= 0) {
2332 		memset(&cmd, 0, sizeof(cmd));
2333 		cmd.params.req.id = htole16(mq->id);
2334 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2335 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2336 	}
2337 	if (mq->ring != NULL)
2338 		oce_destroy_ring(sc, mq->ring);
2339 	if (mq->cq != NULL)
2340 		oce_destroy_cq(mq->cq);
2341 	free(mq, M_DEVBUF, 0);
2342 }
2343 
2344 /**
2345  * @brief		Function to create a completion queue
2346  * @param sc		software handle to the device
2347  * @param eq		optional eq to be associated with to the cq
2348  * @param nitems	length of completion queue
2349  * @param isize		size of completion queue items
2350  * @param eventable	event table
2351  * @param nodelay	no delay flag
2352  * @param ncoalesce	no coalescence flag
2353  * @returns 		pointer to the cq created, NULL on failure
2354  */
2355 struct oce_cq *
2356 oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2357     int eventable, int nodelay, int ncoalesce)
2358 {
2359 	struct oce_cq *cq = NULL;
2360 
2361 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2362 	if (!cq)
2363 		return (NULL);
2364 
2365 	cq->ring = oce_create_ring(sc, nitems, isize, 4);
2366 	if (!cq->ring) {
2367 		free(cq, M_DEVBUF, 0);
2368 		return (NULL);
2369 	}
2370 
2371 	cq->sc = sc;
2372 	cq->eq = eq;
2373 	cq->nitems = nitems;
2374 	cq->nodelay = nodelay;
2375 	cq->ncoalesce = ncoalesce;
2376 	cq->eventable = eventable;
2377 
2378 	if (oce_new_cq(sc, cq)) {
2379 		oce_destroy_ring(sc, cq->ring);
2380 		free(cq, M_DEVBUF, 0);
2381 		return (NULL);
2382 	}
2383 
2384 	sc->sc_cq[sc->sc_ncq++] = cq;
2385 
2386 	return (cq);
2387 }
2388 
2389 void
2390 oce_destroy_cq(struct oce_cq *cq)
2391 {
2392 	struct mbx_destroy_common_cq cmd;
2393 	struct oce_softc *sc = cq->sc;
2394 
2395 	if (cq->id >= 0) {
2396 		memset(&cmd, 0, sizeof(cmd));
2397 		cmd.params.req.id = htole16(cq->id);
2398 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2399 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2400 	}
2401 	if (cq->ring != NULL)
2402 		oce_destroy_ring(sc, cq->ring);
2403 	free(cq, M_DEVBUF, 0);
2404 }
2405 
2406 /**
2407  * @brief		Function to arm a CQ with CQEs
2408  * @param cq		pointer to the completion queue structure
2409  * @param ncqe		number of CQEs to arm
2410  * @param rearm		rearm bit enable/disable
2411  */
2412 static inline void
2413 oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2414 {
2415 	oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2416 }
2417 
2418 void
2419 oce_free_posted_rxbuf(struct oce_rq *rq)
2420 {
2421 	struct oce_softc *sc = rq->sc;
2422 	struct oce_pkt *pkt;
2423 
2424 	while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2425 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2426 		    BUS_DMASYNC_POSTREAD);
2427 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2428 		if (pkt->mbuf != NULL) {
2429 			m_freem(pkt->mbuf);
2430 			pkt->mbuf = NULL;
2431 		}
2432 		oce_pkt_put(&rq->pkt_free, pkt);
2433 		if_rxr_put(&rq->rxring, 1);
2434 	}
2435 }
2436 
2437 int
2438 oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2439 {
2440 	int rc;
2441 
2442 	memset(dma, 0, sizeof(struct oce_dma_mem));
2443 
2444 	dma->tag = sc->sc_dmat;
2445 	rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2446 	    &dma->map);
2447 	if (rc != 0) {
2448 		printf("%s: failed to allocate DMA handle",
2449 		    sc->sc_dev.dv_xname);
2450 		goto fail_0;
2451 	}
2452 
2453 	rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2454 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2455 	if (rc != 0) {
2456 		printf("%s: failed to allocate DMA memory",
2457 		    sc->sc_dev.dv_xname);
2458 		goto fail_1;
2459 	}
2460 
2461 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2462 	    &dma->vaddr, BUS_DMA_NOWAIT);
2463 	if (rc != 0) {
2464 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2465 		goto fail_2;
2466 	}
2467 
2468 	rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2469 	    BUS_DMA_NOWAIT);
2470 	if (rc != 0) {
2471 		printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2472 		goto fail_3;
2473 	}
2474 
2475 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2476 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2477 
2478 	dma->paddr = dma->map->dm_segs[0].ds_addr;
2479 	dma->size = size;
2480 
2481 	return (0);
2482 
2483 fail_3:
2484 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2485 fail_2:
2486 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2487 fail_1:
2488 	bus_dmamap_destroy(dma->tag, dma->map);
2489 fail_0:
2490 	return (rc);
2491 }
2492 
2493 void
2494 oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2495 {
2496 	if (dma->tag == NULL)
2497 		return;
2498 
2499 	if (dma->map != NULL) {
2500 		oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2501 		bus_dmamap_unload(dma->tag, dma->map);
2502 
2503 		if (dma->vaddr != 0) {
2504 			bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2505 			dma->vaddr = 0;
2506 		}
2507 
2508 		bus_dmamap_destroy(dma->tag, dma->map);
2509 		dma->map = NULL;
2510 		dma->tag = NULL;
2511 	}
2512 }
2513 
2514 struct oce_ring *
2515 oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2516 {
2517 	struct oce_dma_mem *dma;
2518 	struct oce_ring *ring;
2519 	bus_size_t size = nitems * isize;
2520 	int rc;
2521 
2522 	if (size > maxsegs * PAGE_SIZE)
2523 		return (NULL);
2524 
2525 	ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2526 	if (ring == NULL)
2527 		return (NULL);
2528 
2529 	ring->isize = isize;
2530 	ring->nitems = nitems;
2531 
2532 	dma = &ring->dma;
2533 	dma->tag = sc->sc_dmat;
2534 	rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2535 	    BUS_DMA_NOWAIT, &dma->map);
2536 	if (rc != 0) {
2537 		printf("%s: failed to allocate DMA handle",
2538 		    sc->sc_dev.dv_xname);
2539 		goto fail_0;
2540 	}
2541 
2542 	rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2543 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2544 	if (rc != 0) {
2545 		printf("%s: failed to allocate DMA memory",
2546 		    sc->sc_dev.dv_xname);
2547 		goto fail_1;
2548 	}
2549 
2550 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2551 	    &dma->vaddr, BUS_DMA_NOWAIT);
2552 	if (rc != 0) {
2553 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2554 		goto fail_2;
2555 	}
2556 
2557 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2558 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2559 
2560 	dma->paddr = 0;
2561 	dma->size = size;
2562 
2563 	return (ring);
2564 
2565 fail_2:
2566 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2567 fail_1:
2568 	bus_dmamap_destroy(dma->tag, dma->map);
2569 fail_0:
2570 	free(ring, M_DEVBUF, 0);
2571 	return (NULL);
2572 }
2573 
2574 void
2575 oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2576 {
2577 	oce_dma_free(sc, &ring->dma);
2578 	free(ring, M_DEVBUF, 0);
2579 }
2580 
2581 int
2582 oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2583     struct oce_pa *pa, int maxsegs)
2584 {
2585 	struct oce_dma_mem *dma = &ring->dma;
2586 	int i;
2587 
2588 	if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2589 	    ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2590 		printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2591 		return (0);
2592 	}
2593 
2594 	if (dma->map->dm_nsegs > maxsegs) {
2595 		printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2596 		return (0);
2597 	}
2598 
2599 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2600 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2601 
2602 	for (i = 0; i < dma->map->dm_nsegs; i++)
2603 		pa[i].addr = dma->map->dm_segs[i].ds_addr;
2604 
2605 	return (dma->map->dm_nsegs);
2606 }
2607 
2608 static inline void *
2609 oce_ring_get(struct oce_ring *ring)
2610 {
2611 	int index = ring->index;
2612 
2613 	if (++ring->index == ring->nitems)
2614 		ring->index = 0;
2615 	return ((void *)(ring->dma.vaddr + index * ring->isize));
2616 }
2617 
2618 static inline void *
2619 oce_ring_first(struct oce_ring *ring)
2620 {
2621 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2622 }
2623 
2624 static inline void *
2625 oce_ring_next(struct oce_ring *ring)
2626 {
2627 	if (++ring->index == ring->nitems)
2628 		ring->index = 0;
2629 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2630 }
2631 
2632 struct oce_pkt *
2633 oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2634 {
2635 	struct oce_pkt *pkt;
2636 
2637 	if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2638 		return (NULL);
2639 
2640 	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2641 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2642 		pool_put(oce_pkt_pool, pkt);
2643 		return (NULL);
2644 	}
2645 
2646 	return (pkt);
2647 }
2648 
2649 void
2650 oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2651 {
2652 	if (pkt->map) {
2653 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2654 		bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2655 	}
2656 	pool_put(oce_pkt_pool, pkt);
2657 }
2658 
2659 static inline struct oce_pkt *
2660 oce_pkt_get(struct oce_pkt_list *lst)
2661 {
2662 	struct oce_pkt *pkt;
2663 
2664 	pkt = SIMPLEQ_FIRST(lst);
2665 	if (pkt == NULL)
2666 		return (NULL);
2667 
2668 	SIMPLEQ_REMOVE_HEAD(lst, entry);
2669 
2670 	return (pkt);
2671 }
2672 
2673 static inline void
2674 oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2675 {
2676 	SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2677 }
2678 
2679 /**
2680  * @brief Wait for FW to become ready and reset it
2681  * @param sc		software handle to the device
2682  */
2683 int
2684 oce_init_fw(struct oce_softc *sc)
2685 {
2686 	struct ioctl_common_function_reset cmd;
2687 	uint32_t reg;
2688 	int err = 0, tmo = 60000;
2689 
2690 	/* read semaphore CSR */
2691 	reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2692 
2693 	/* if host is ready then wait for fw ready else send POST */
2694 	if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2695 		reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2696 		oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2697 	}
2698 
2699 	/* wait for FW to become ready */
2700 	for (;;) {
2701 		if (--tmo == 0)
2702 			break;
2703 
2704 		DELAY(1000);
2705 
2706 		reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2707 		if (reg & MPU_EP_SEM_ERROR) {
2708 			printf(": POST failed: %#x\n", reg);
2709 			return (ENXIO);
2710 		}
2711 		if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2712 			/* reset FW */
2713 			if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2714 				memset(&cmd, 0, sizeof(cmd));
2715 				err = oce_cmd(sc, SUBSYS_COMMON,
2716 				    OPCODE_COMMON_FUNCTION_RESET,
2717 				    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2718 			}
2719 			return (err);
2720 		}
2721 	}
2722 
2723 	printf(": POST timed out: %#x\n", reg);
2724 
2725 	return (ENXIO);
2726 }
2727 
2728 static inline int
2729 oce_mbox_wait(struct oce_softc *sc)
2730 {
2731 	int i;
2732 
2733 	for (i = 0; i < 20000; i++) {
2734 		if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2735 			return (0);
2736 		DELAY(100);
2737 	}
2738 	return (ETIMEDOUT);
2739 }
2740 
2741 /**
2742  * @brief Mailbox dispatch
2743  * @param sc		software handle to the device
2744  */
2745 int
2746 oce_mbox_dispatch(struct oce_softc *sc)
2747 {
2748 	uint32_t pa, reg;
2749 	int err;
2750 
2751 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2752 	reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2753 
2754 	if ((err = oce_mbox_wait(sc)) != 0)
2755 		goto out;
2756 
2757 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2758 
2759 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2760 	reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2761 
2762 	if ((err = oce_mbox_wait(sc)) != 0)
2763 		goto out;
2764 
2765 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2766 
2767 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2768 
2769 	if ((err = oce_mbox_wait(sc)) != 0)
2770 		goto out;
2771 
2772 out:
2773 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2774 	return (err);
2775 }
2776 
2777 /**
2778  * @brief Function to initialize the hw with host endian information
2779  * @param sc		software handle to the device
2780  * @returns		0 on success, ETIMEDOUT on failure
2781  */
2782 int
2783 oce_mbox_init(struct oce_softc *sc)
2784 {
2785 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2786 	uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2787 
2788 	if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2789 		return (0);
2790 
2791 	/* Endian Signature */
2792 	*ptr++ = 0xff;
2793 	*ptr++ = 0x12;
2794 	*ptr++ = 0x34;
2795 	*ptr++ = 0xff;
2796 	*ptr++ = 0xff;
2797 	*ptr++ = 0x56;
2798 	*ptr++ = 0x78;
2799 	*ptr = 0xff;
2800 
2801 	return (oce_mbox_dispatch(sc));
2802 }
2803 
2804 int
2805 oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2806     void *payload, int length)
2807 {
2808 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2809 	struct oce_mbx *mbx = &bmbx->mbx;
2810 	struct mbx_hdr *hdr;
2811 	caddr_t epayload = NULL;
2812 	int err;
2813 
2814 	if (length > OCE_MBX_PAYLOAD)
2815 		epayload = OCE_MEM_KVA(&sc->sc_pld);
2816 	if (length > OCE_MAX_PAYLOAD)
2817 		return (EINVAL);
2818 
2819 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2820 
2821 	memset(mbx, 0, sizeof(struct oce_mbx));
2822 
2823 	mbx->payload_length = length;
2824 
2825 	if (epayload) {
2826 		mbx->flags = OCE_MBX_F_SGE;
2827 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2828 		memcpy(epayload, payload, length);
2829 		mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2830 		mbx->pld.sgl[0].length = length;
2831 		hdr = (struct mbx_hdr *)epayload;
2832 	} else {
2833 		mbx->flags = OCE_MBX_F_EMBED;
2834 		memcpy(mbx->pld.data, payload, length);
2835 		hdr = (struct mbx_hdr *)&mbx->pld.data;
2836 	}
2837 
2838 	hdr->subsys = subsys;
2839 	hdr->opcode = opcode;
2840 	hdr->version = version;
2841 	hdr->length = length - sizeof(*hdr);
2842 	if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2843 		hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2844 	else
2845 		hdr->timeout = OCE_MBX_TIMEOUT;
2846 
2847 	if (epayload)
2848 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2849 
2850 	err = oce_mbox_dispatch(sc);
2851 	if (err == 0) {
2852 		if (epayload) {
2853 			oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2854 			memcpy(payload, epayload, length);
2855 		} else
2856 			memcpy(payload, &mbx->pld.data, length);
2857 	} else
2858 		printf("%s: mailbox timeout, subsys %d op %d ver %d "
2859 		    "%spayload lenght %d\n", sc->sc_dev.dv_xname, subsys,
2860 		    opcode, version, epayload ? "ext " : "",
2861 		    length);
2862 	return (err);
2863 }
2864 
2865 /**
2866  * @brief	Firmware will send gracious notifications during
2867  *		attach only after sending first mcc commnad. We
2868  *		use MCC queue only for getting async and mailbox
2869  *		for sending cmds. So to get gracious notifications
2870  *		atleast send one dummy command on mcc.
2871  */
2872 void
2873 oce_first_mcc(struct oce_softc *sc)
2874 {
2875 	struct oce_mbx *mbx;
2876 	struct oce_mq *mq = sc->sc_mq;
2877 	struct mbx_hdr *hdr;
2878 	struct mbx_get_common_fw_version *cmd;
2879 
2880 	mbx = oce_ring_get(mq->ring);
2881 	memset(mbx, 0, sizeof(struct oce_mbx));
2882 
2883 	cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2884 
2885 	hdr = &cmd->hdr;
2886 	hdr->subsys = SUBSYS_COMMON;
2887 	hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2888 	hdr->version = OCE_MBX_VER_V0;
2889 	hdr->timeout = OCE_MBX_TIMEOUT;
2890 	hdr->length = sizeof(*cmd) - sizeof(*hdr);
2891 
2892 	mbx->flags = OCE_MBX_F_EMBED;
2893 	mbx->payload_length = sizeof(*cmd);
2894 	oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2895 	    BUS_DMASYNC_PREWRITE);
2896 	oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2897 }
2898 
2899 int
2900 oce_get_fw_config(struct oce_softc *sc)
2901 {
2902 	struct mbx_common_query_fw_config cmd;
2903 	int err;
2904 
2905 	memset(&cmd, 0, sizeof(cmd));
2906 
2907 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2908 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2909 	if (err)
2910 		return (err);
2911 
2912 	sc->sc_port = cmd.params.rsp.port_id;
2913 	sc->sc_fmode = cmd.params.rsp.function_mode;
2914 
2915 	return (0);
2916 }
2917 
2918 int
2919 oce_check_native_mode(struct oce_softc *sc)
2920 {
2921 	struct mbx_common_set_function_cap cmd;
2922 	int err;
2923 
2924 	memset(&cmd, 0, sizeof(cmd));
2925 
2926 	cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2927 	    CAP_BE3_NATIVE_ERX_API;
2928 	cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2929 
2930 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2931 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2932 	if (err)
2933 		return (err);
2934 
2935 	if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2936 		SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2937 
2938 	return (0);
2939 }
2940 
2941 /**
2942  * @brief Function for creating a network interface.
2943  * @param sc		software handle to the device
2944  * @returns		0 on success, error otherwise
2945  */
2946 int
2947 oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2948 {
2949 	struct mbx_create_common_iface cmd;
2950 	uint32_t caps, caps_en;
2951 	int err = 0;
2952 
2953 	/* interface capabilities to give device when creating interface */
2954 	caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2955 	    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2956 	    MBX_RX_IFACE_RSS;
2957 
2958 	/* capabilities to enable by default (others set dynamically) */
2959 	caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2960 
2961 	if (!IS_XE201(sc)) {
2962 		/* LANCER A0 workaround */
2963 		caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2964 		caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2965 	}
2966 
2967 	/* enable capabilities controlled via driver startup parameters */
2968 	if (sc->sc_rss_enable)
2969 		caps_en |= MBX_RX_IFACE_RSS;
2970 
2971 	memset(&cmd, 0, sizeof(cmd));
2972 
2973 	cmd.params.req.version = 0;
2974 	cmd.params.req.cap_flags = htole32(caps);
2975 	cmd.params.req.enable_flags = htole32(caps_en);
2976 	if (macaddr != NULL) {
2977 		memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2978 		cmd.params.req.mac_invalid = 0;
2979 	} else
2980 		cmd.params.req.mac_invalid = 1;
2981 
2982 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2983 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2984 	if (err)
2985 		return (err);
2986 
2987 	sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
2988 
2989 	if (macaddr != NULL)
2990 		sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
2991 
2992 	return (0);
2993 }
2994 
2995 /**
2996  * @brief Function to send the mbx command to configure vlan
2997  * @param sc 		software handle to the device
2998  * @param vtags		array of vlan tags
2999  * @param nvtags	number of elements in array
3000  * @param untagged	boolean TRUE/FLASE
3001  * @param promisc	flag to enable/disable VLAN promiscuous mode
3002  * @returns		0 on success, EIO on failure
3003  */
3004 int
3005 oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3006     int untagged, int promisc)
3007 {
3008 	struct mbx_common_config_vlan cmd;
3009 
3010 	memset(&cmd, 0, sizeof(cmd));
3011 
3012 	cmd.params.req.if_id = sc->sc_if_id;
3013 	cmd.params.req.promisc = promisc;
3014 	cmd.params.req.untagged = untagged;
3015 	cmd.params.req.num_vlans = nvtags;
3016 
3017 	if (!promisc)
3018 		memcpy(cmd.params.req.tags.normal_vlans, vtags,
3019 			nvtags * sizeof(struct normal_vlan));
3020 
3021 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3022 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3023 }
3024 
3025 /**
3026  * @brief Function to set flow control capability in the hardware
3027  * @param sc 		software handle to the device
3028  * @param flags		flow control flags to set
3029  * @returns		0 on success, EIO on failure
3030  */
3031 int
3032 oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3033 {
3034 	struct mbx_common_get_set_flow_control cmd;
3035 	int err;
3036 
3037 	memset(&cmd, 0, sizeof(cmd));
3038 
3039 	cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3040 	cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3041 
3042 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3043 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3044 	if (err)
3045 		return (err);
3046 
3047 	memset(&cmd, 0, sizeof(cmd));
3048 
3049 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3050 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3051 	if (err)
3052 		return (err);
3053 
3054 	sc->sc_fc  = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3055 	sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3056 
3057 	return (0);
3058 }
3059 
3060 #ifdef OCE_RSS
3061 /**
3062  * @brief Function to set flow control capability in the hardware
3063  * @param sc 		software handle to the device
3064  * @param enable	0=disable, OCE_RSS_xxx flags otherwise
3065  * @returns		0 on success, EIO on failure
3066  */
3067 int
3068 oce_config_rss(struct oce_softc *sc, int enable)
3069 {
3070 	struct mbx_config_nic_rss cmd;
3071 	uint8_t *tbl = &cmd.params.req.cputable;
3072 	int i, j;
3073 
3074 	memset(&cmd, 0, sizeof(cmd));
3075 
3076 	if (enable)
3077 		cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3078 		    RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3079 	cmd.params.req.flush = OCE_FLUSH;
3080 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3081 
3082 	arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3083 
3084 	/*
3085 	 * Initialize the RSS CPU indirection table.
3086 	 *
3087 	 * The table is used to choose the queue to place incoming packets.
3088 	 * Incoming packets are hashed.  The lowest bits in the hash result
3089 	 * are used as the index into the CPU indirection table.
3090 	 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3091 	 * create.  Based on the CPU ID, the receive completion is routed to
3092 	 * the corresponding RSS CQs.  (Non-RSS packets are always completed
3093 	 * on the default (0) CQ).
3094 	 */
3095 	for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3096 		if (sc->sc_rq[j]->cfg.is_rss_queue)
3097 			tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3098 	}
3099 	if (i > 0)
3100 		cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3101 	else
3102 		return (ENXIO);
3103 
3104 	return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3105 	    &cmd, sizeof(cmd)));
3106 }
3107 #endif	/* OCE_RSS */
3108 
3109 /**
3110  * @brief Function for hardware update multicast filter
3111  * @param sc		software handle to the device
3112  * @param multi		table of multicast addresses
3113  * @param naddr		number of multicast addresses in the table
3114  */
3115 int
3116 oce_update_mcast(struct oce_softc *sc,
3117     uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3118 {
3119 	struct mbx_set_common_iface_multicast cmd;
3120 
3121 	memset(&cmd, 0, sizeof(cmd));
3122 
3123 	memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3124 	cmd.params.req.num_mac = htole16(naddr);
3125 	cmd.params.req.if_id = sc->sc_if_id;
3126 
3127 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3128 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3129 }
3130 
3131 /**
3132  * @brief RXF function to enable/disable device promiscuous mode
3133  * @param sc		software handle to the device
3134  * @param enable	enable/disable flag
3135  * @returns		0 on success, EIO on failure
3136  * @note
3137  *	The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3138  *	This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3139  */
3140 int
3141 oce_set_promisc(struct oce_softc *sc, int enable)
3142 {
3143 	struct mbx_set_common_iface_rx_filter cmd;
3144 	struct iface_rx_filter_ctx *req;
3145 
3146 	memset(&cmd, 0, sizeof(cmd));
3147 
3148 	req = &cmd.params.req;
3149 	req->if_id = sc->sc_if_id;
3150 
3151 	if (enable)
3152 		req->iface_flags = req->iface_flags_mask =
3153 		    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3154 
3155 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3156 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3157 }
3158 
3159 /**
3160  * @brief Function to query the link status from the hardware
3161  * @param sc 		software handle to the device
3162  * @param[out] link	pointer to the structure returning link attributes
3163  * @returns		0 on success, EIO on failure
3164  */
3165 int
3166 oce_get_link_status(struct oce_softc *sc)
3167 {
3168 	struct mbx_query_common_link_config cmd;
3169 	int err;
3170 
3171 	memset(&cmd, 0, sizeof(cmd));
3172 
3173 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3174 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3175 	if (err)
3176 		return (err);
3177 
3178 	sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3179 	    NTWK_LOGICAL_LINK_UP);
3180 
3181 	if (cmd.params.rsp.mac_speed < 5)
3182 		sc->sc_link_speed = cmd.params.rsp.mac_speed;
3183 	else
3184 		sc->sc_link_speed = 0;
3185 
3186 	return (0);
3187 }
3188 
3189 void
3190 oce_macaddr_set(struct oce_softc *sc)
3191 {
3192 	uint32_t old_pmac_id = sc->sc_pmac_id;
3193 	int status = 0;
3194 
3195 	if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3196 		return;
3197 
3198 	status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3199 	if (!status)
3200 		status = oce_macaddr_del(sc, old_pmac_id);
3201 	else
3202 		printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3203 }
3204 
3205 int
3206 oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3207 {
3208 	struct mbx_query_common_iface_mac cmd;
3209 	int err;
3210 
3211 	memset(&cmd, 0, sizeof(cmd));
3212 
3213 	cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3214 	cmd.params.req.permanent = 1;
3215 
3216 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3217 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3218 	if (err == 0)
3219 		memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3220 		    ETHER_ADDR_LEN);
3221 	return (err);
3222 }
3223 
3224 int
3225 oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3226 {
3227 	struct mbx_add_common_iface_mac cmd;
3228 	int err;
3229 
3230 	memset(&cmd, 0, sizeof(cmd));
3231 
3232 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3233 	memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3234 
3235 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3236 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3237 	if (err == 0)
3238 		*pmac = letoh32(cmd.params.rsp.pmac_id);
3239 	return (err);
3240 }
3241 
3242 int
3243 oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3244 {
3245 	struct mbx_del_common_iface_mac cmd;
3246 
3247 	memset(&cmd, 0, sizeof(cmd));
3248 
3249 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3250 	cmd.params.req.pmac_id = htole32(pmac);
3251 
3252 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3253 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3254 }
3255 
3256 int
3257 oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3258 {
3259 	struct mbx_create_nic_rq cmd;
3260 	int err, npages;
3261 
3262 	memset(&cmd, 0, sizeof(cmd));
3263 
3264 	npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3265 	    nitems(cmd.params.req.pages));
3266 	if (!npages) {
3267 		printf("%s: failed to load the rq ring\n", __func__);
3268 		return (1);
3269 	}
3270 
3271 	if (IS_XE201(sc)) {
3272 		cmd.params.req.frag_size = rq->fragsize / 2048;
3273 		cmd.params.req.page_size = 1;
3274 	} else
3275 		cmd.params.req.frag_size = ilog2(rq->fragsize);
3276 	cmd.params.req.num_pages = npages;
3277 	cmd.params.req.cq_id = rq->cq->id;
3278 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3279 	cmd.params.req.max_frame_size = htole16(rq->mtu);
3280 	cmd.params.req.is_rss_queue = htole32(rq->rss);
3281 
3282 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3283 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3284 	    sizeof(cmd));
3285 	if (err)
3286 		return (err);
3287 
3288 	rq->id = letoh16(cmd.params.rsp.rq_id);
3289 	rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3290 
3291 	return (0);
3292 }
3293 
3294 int
3295 oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3296 {
3297 	struct mbx_create_nic_wq cmd;
3298 	int err, npages;
3299 
3300 	memset(&cmd, 0, sizeof(cmd));
3301 
3302 	npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3303 	    nitems(cmd.params.req.pages));
3304 	if (!npages) {
3305 		printf("%s: failed to load the wq ring\n", __func__);
3306 		return (1);
3307 	}
3308 
3309 	if (IS_XE201(sc))
3310 		cmd.params.req.if_id = sc->sc_if_id;
3311 	cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3312 	cmd.params.req.num_pages = npages;
3313 	cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3314 	cmd.params.req.cq_id = htole16(wq->cq->id);
3315 	cmd.params.req.ulp_num = 1;
3316 
3317 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3318 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3319 	    sizeof(cmd));
3320 	if (err)
3321 		return (err);
3322 
3323 	wq->id = letoh16(cmd.params.rsp.wq_id);
3324 
3325 	return (0);
3326 }
3327 
3328 int
3329 oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3330 {
3331 	struct mbx_create_common_mq_ex cmd;
3332 	union oce_mq_ext_ctx *ctx;
3333 	int err, npages;
3334 
3335 	memset(&cmd, 0, sizeof(cmd));
3336 
3337 	npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3338 	    nitems(cmd.params.req.pages));
3339 	if (!npages) {
3340 		printf("%s: failed to load the mq ring\n", __func__);
3341 		return (-1);
3342 	}
3343 
3344 	ctx = &cmd.params.req.context;
3345 	ctx->v0.num_pages = npages;
3346 	ctx->v0.cq_id = mq->cq->id;
3347 	ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3348 	ctx->v0.valid = 1;
3349 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3350 	ctx->v0.async_evt_bitmap = 0xffffffff;
3351 
3352 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3353 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3354 	if (err)
3355 		return (err);
3356 
3357 	mq->id = letoh16(cmd.params.rsp.mq_id);
3358 
3359 	return (0);
3360 }
3361 
3362 int
3363 oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3364 {
3365 	struct mbx_create_common_eq cmd;
3366 	int err, npages;
3367 
3368 	memset(&cmd, 0, sizeof(cmd));
3369 
3370 	npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3371 	    nitems(cmd.params.req.pages));
3372 	if (!npages) {
3373 		printf("%s: failed to load the eq ring\n", __func__);
3374 		return (-1);
3375 	}
3376 
3377 	cmd.params.req.ctx.num_pages = htole16(npages);
3378 	cmd.params.req.ctx.valid = 1;
3379 	cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3380 	cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3381 	cmd.params.req.ctx.armed = 0;
3382 	cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3383 
3384 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3385 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3386 	if (err)
3387 		return (err);
3388 
3389 	eq->id = letoh16(cmd.params.rsp.eq_id);
3390 
3391 	return (0);
3392 }
3393 
3394 int
3395 oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3396 {
3397 	struct mbx_create_common_cq cmd;
3398 	union oce_cq_ctx *ctx;
3399 	int err, npages;
3400 
3401 	memset(&cmd, 0, sizeof(cmd));
3402 
3403 	npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3404 	    nitems(cmd.params.req.pages));
3405 	if (!npages) {
3406 		printf("%s: failed to load the cq ring\n", __func__);
3407 		return (-1);
3408 	}
3409 
3410 	ctx = &cmd.params.req.cq_ctx;
3411 
3412 	if (IS_XE201(sc)) {
3413 		ctx->v2.num_pages = htole16(npages);
3414 		ctx->v2.page_size = 1; /* for 4K */
3415 		ctx->v2.eventable = cq->eventable;
3416 		ctx->v2.valid = 1;
3417 		ctx->v2.count = ilog2(cq->nitems / 256);
3418 		ctx->v2.nodelay = cq->nodelay;
3419 		ctx->v2.coalesce_wm = cq->ncoalesce;
3420 		ctx->v2.armed = 0;
3421 		ctx->v2.eq_id = cq->eq->id;
3422 		if (ctx->v2.count == 3) {
3423 			if (cq->nitems > (4*1024)-1)
3424 				ctx->v2.cqe_count = (4*1024)-1;
3425 			else
3426 				ctx->v2.cqe_count = cq->nitems;
3427 		}
3428 	} else {
3429 		ctx->v0.num_pages = htole16(npages);
3430 		ctx->v0.eventable = cq->eventable;
3431 		ctx->v0.valid = 1;
3432 		ctx->v0.count = ilog2(cq->nitems / 256);
3433 		ctx->v0.nodelay = cq->nodelay;
3434 		ctx->v0.coalesce_wm = cq->ncoalesce;
3435 		ctx->v0.armed = 0;
3436 		ctx->v0.eq_id = cq->eq->id;
3437 	}
3438 
3439 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3440 	    IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3441 	    sizeof(cmd));
3442 	if (err)
3443 		return (err);
3444 
3445 	cq->id = letoh16(cmd.params.rsp.cq_id);
3446 
3447 	return (0);
3448 }
3449 
3450 int
3451 oce_init_stats(struct oce_softc *sc)
3452 {
3453 	union cmd {
3454 		struct mbx_get_nic_stats_v0	_be2;
3455 		struct mbx_get_nic_stats	_be3;
3456 		struct mbx_get_pport_stats	_xe201;
3457 	};
3458 
3459 	sc->sc_statcmd = malloc(sizeof(union cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
3460 	if (sc->sc_statcmd == NULL) {
3461 		printf("%s: failed to allocate statistics command block\n",
3462 		    sc->sc_dev.dv_xname);
3463 		return (-1);
3464 	}
3465 	return (0);
3466 }
3467 
3468 int
3469 oce_update_stats(struct oce_softc *sc)
3470 {
3471 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3472 	uint64_t rxe, txe;
3473 	int err;
3474 
3475 	if (ISSET(sc->sc_flags, OCE_F_BE2))
3476 		err = oce_stats_be2(sc, &rxe, &txe);
3477 	else if (ISSET(sc->sc_flags, OCE_F_BE3))
3478 		err = oce_stats_be3(sc, &rxe, &txe);
3479 	else
3480 		err = oce_stats_xe(sc, &rxe, &txe);
3481 	if (err)
3482 		return (err);
3483 
3484 	ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3485 	    rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3486 	sc->sc_rx_errors = rxe;
3487 	ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3488 	    txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3489 	sc->sc_tx_errors = txe;
3490 
3491 	return (0);
3492 }
3493 
3494 int
3495 oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3496 {
3497 	struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3498 	struct oce_pmem_stats *ms;
3499 	struct oce_rxf_stats_v0 *rs;
3500 	struct oce_port_rxf_stats_v0 *ps;
3501 	int err;
3502 
3503 	memset(cmd, 0, sizeof(*cmd));
3504 
3505 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3506 	    cmd, sizeof(*cmd));
3507 	if (err)
3508 		return (err);
3509 
3510 	ms = &cmd->params.rsp.stats.pmem;
3511 	rs = &cmd->params.rsp.stats.rxf;
3512 	ps = &rs->port[sc->sc_port];
3513 
3514 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3515 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3516 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3517 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3518 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3519 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3520 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3521 	    ps->rx_alignment_symbol_errors;
3522 	if (sc->sc_if_id)
3523 		*rxe += rs->port1_jabber_events;
3524 	else
3525 		*rxe += rs->port0_jabber_events;
3526 	*rxe += ms->eth_red_drops;
3527 
3528 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3529 
3530 	return (0);
3531 }
3532 
3533 int
3534 oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3535 {
3536 	struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3537 	struct oce_pmem_stats *ms;
3538 	struct oce_rxf_stats_v1 *rs;
3539 	struct oce_port_rxf_stats_v1 *ps;
3540 	int err;
3541 
3542 	memset(cmd, 0, sizeof(*cmd));
3543 
3544 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3545 	    cmd, sizeof(*cmd));
3546 	if (err)
3547 		return (err);
3548 
3549 	ms = &cmd->params.rsp.stats.pmem;
3550 	rs = &cmd->params.rsp.stats.rxf;
3551 	ps = &rs->port[sc->sc_port];
3552 
3553 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3554 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3555 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3556 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3557 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3558 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3559 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3560 	    ps->rx_alignment_symbol_errors + ps->jabber_events;
3561 	*rxe += ms->eth_red_drops;
3562 
3563 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3564 
3565 	return (0);
3566 }
3567 
3568 int
3569 oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3570 {
3571 	struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3572 	struct oce_pport_stats *pps;
3573 	int err;
3574 
3575 	memset(cmd, 0, sizeof(*cmd));
3576 
3577 	cmd->params.req.reset_stats = 0;
3578 	cmd->params.req.port_number = sc->sc_if_id;
3579 
3580 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3581 	    OCE_MBX_VER_V0, cmd, sizeof(*cmd));
3582 	if (err)
3583 		return (err);
3584 
3585 	pps = &cmd->params.rsp.pps;
3586 
3587 	*rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3588 	    pps->rx_alignment_errors + pps->rx_symbol_errors +
3589 	    pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3590 	    pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3591 	    pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3592 	    pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3593 	    pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3594 	    pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3595 	    pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3596 
3597 	*txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3598 
3599 	return (0);
3600 }
3601