xref: /openbsd/sys/dev/pci/if_oce.c (revision 998de4a5)
1 /*	$OpenBSD: if_oce.c,v 1.96 2016/08/24 10:38:34 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*-
20  * Copyright (C) 2012 Emulex
21  * All rights reserved.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions are met:
25  *
26  * 1. Redistributions of source code must retain the above copyright notice,
27  *    this list of conditions and the following disclaimer.
28  *
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * 3. Neither the name of the Emulex Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived from
35  *    this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47  * POSSIBILITY OF SUCH DAMAGE.
48  *
49  * Contact Information:
50  * freebsd-drivers@emulex.com
51  *
52  * Emulex
53  * 3333 Susan Street
54  * Costa Mesa, CA 92626
55  */
56 
57 #include "bpfilter.h"
58 #include "vlan.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/device.h>
67 #include <sys/socket.h>
68 #include <sys/queue.h>
69 #include <sys/timeout.h>
70 #include <sys/pool.h>
71 
72 #include <net/if.h>
73 #include <net/if_media.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #ifdef INET6
79 #include <netinet/ip6.h>
80 #endif
81 
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #endif
85 
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89 
90 #include <dev/pci/if_ocereg.h>
91 
92 #ifndef TRUE
93 #define TRUE			1
94 #endif
95 #ifndef FALSE
96 #define FALSE			0
97 #endif
98 
99 #define OCE_MBX_TIMEOUT		5
100 
101 #define OCE_MAX_PAYLOAD		65536
102 
103 #define OCE_TX_RING_SIZE	512
104 #define OCE_RX_RING_SIZE	1024
105 
106 /* This should be powers of 2. Like 2,4,8 & 16 */
107 #define OCE_MAX_RSS		4 /* TODO: 8 */
108 #define OCE_MAX_RQ		OCE_MAX_RSS + 1 /* one default queue */
109 #define OCE_MAX_WQ		8
110 
111 #define OCE_MAX_EQ		32
112 #define OCE_MAX_CQ		OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
113 #define OCE_MAX_CQ_EQ		8 /* Max CQ that can attached to an EQ */
114 
115 #define OCE_DEFAULT_EQD		80
116 
117 #define OCE_MIN_MTU		256
118 #define OCE_MAX_MTU		9000
119 
120 #define OCE_MAX_RQ_COMPL	64
121 #define OCE_MAX_RQ_POSTS	255
122 #define OCE_RX_BUF_SIZE		2048
123 
124 #define OCE_MAX_TX_ELEMENTS	29
125 #define OCE_MAX_TX_DESC		1024
126 #define OCE_MAX_TX_SIZE		65535
127 
128 #define OCE_MEM_KVA(_m)		((void *)((_m)->vaddr))
129 #define OCE_MEM_DVA(_m)		((_m)->paddr)
130 
131 #define OCE_WQ_FOREACH(sc, wq, i) 	\
132 	for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
133 #define OCE_RQ_FOREACH(sc, rq, i) 	\
134 	for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
135 #define OCE_EQ_FOREACH(sc, eq, i) 	\
136 	for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
137 #define OCE_CQ_FOREACH(sc, cq, i) 	\
138 	for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
139 #define OCE_RING_FOREACH(_r, _v, _c)	\
140 	for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
141 
142 static inline int
143 ilog2(unsigned int v)
144 {
145 	int r = 0;
146 
147 	while (v >>= 1)
148 		r++;
149 	return (r);
150 }
151 
152 struct oce_pkt {
153 	struct mbuf *		mbuf;
154 	bus_dmamap_t		map;
155 	int			nsegs;
156 	SIMPLEQ_ENTRY(oce_pkt)	entry;
157 };
158 SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
159 
160 struct oce_dma_mem {
161 	bus_dma_tag_t		tag;
162 	bus_dmamap_t		map;
163 	bus_dma_segment_t	segs;
164 	int			nsegs;
165 	bus_size_t		size;
166 	caddr_t			vaddr;
167 	bus_addr_t		paddr;
168 };
169 
170 struct oce_ring {
171 	int			index;
172 	int			nitems;
173 	int			nused;
174 	int			isize;
175 	struct oce_dma_mem	dma;
176 };
177 
178 struct oce_softc;
179 
180 enum cq_len {
181 	CQ_LEN_256  = 256,
182 	CQ_LEN_512  = 512,
183 	CQ_LEN_1024 = 1024
184 };
185 
186 enum eq_len {
187 	EQ_LEN_256  = 256,
188 	EQ_LEN_512  = 512,
189 	EQ_LEN_1024 = 1024,
190 	EQ_LEN_2048 = 2048,
191 	EQ_LEN_4096 = 4096
192 };
193 
194 enum eqe_size {
195 	EQE_SIZE_4  = 4,
196 	EQE_SIZE_16 = 16
197 };
198 
199 enum qtype {
200 	QTYPE_EQ,
201 	QTYPE_MQ,
202 	QTYPE_WQ,
203 	QTYPE_RQ,
204 	QTYPE_CQ,
205 	QTYPE_RSS
206 };
207 
208 struct oce_eq {
209 	struct oce_softc *	sc;
210 	struct oce_ring *	ring;
211 	enum qtype		type;
212 	int			id;
213 
214 	struct oce_cq *		cq[OCE_MAX_CQ_EQ];
215 	int			cq_valid;
216 
217 	int			nitems;
218 	int			isize;
219 	int			delay;
220 };
221 
222 struct oce_cq {
223 	struct oce_softc *	sc;
224 	struct oce_ring *	ring;
225 	enum qtype		type;
226 	int			id;
227 
228 	struct oce_eq *		eq;
229 
230 	void			(*cq_intr)(void *);
231 	void *			cb_arg;
232 
233 	int			nitems;
234 	int			nodelay;
235 	int			eventable;
236 	int			ncoalesce;
237 };
238 
239 struct oce_mq {
240 	struct oce_softc *	sc;
241 	struct oce_ring *	ring;
242 	enum qtype		type;
243 	int			id;
244 
245 	struct oce_cq *		cq;
246 
247 	int			nitems;
248 };
249 
250 struct oce_wq {
251 	struct oce_softc *	sc;
252 	struct oce_ring *	ring;
253 	enum qtype		type;
254 	int			id;
255 
256 	struct oce_cq *		cq;
257 
258 	struct oce_pkt_list	pkt_list;
259 	struct oce_pkt_list	pkt_free;
260 
261 	int			nitems;
262 };
263 
264 struct oce_rq {
265 	struct oce_softc *	sc;
266 	struct oce_ring *	ring;
267 	enum qtype		type;
268 	int			id;
269 
270 	struct oce_cq *		cq;
271 
272 	struct if_rxring	rxring;
273 	struct oce_pkt_list	pkt_list;
274 	struct oce_pkt_list	pkt_free;
275 
276 	uint32_t		rss_cpuid;
277 
278 #ifdef OCE_LRO
279 	struct lro_ctrl		lro;
280 	int			lro_pkts_queued;
281 #endif
282 
283 	int			nitems;
284 	int			fragsize;
285 	int			mtu;
286 	int			rss;
287 };
288 
289 struct oce_softc {
290 	struct device		sc_dev;
291 
292 	uint			sc_flags;
293 #define  OCE_F_BE2		 0x00000001
294 #define  OCE_F_BE3		 0x00000002
295 #define  OCE_F_XE201		 0x00000008
296 #define  OCE_F_BE3_NATIVE	 0x00000100
297 #define  OCE_F_RESET_RQD	 0x00001000
298 #define  OCE_F_MBOX_ENDIAN_RQD	 0x00002000
299 
300 	bus_dma_tag_t		sc_dmat;
301 
302 	bus_space_tag_t		sc_cfg_iot;
303 	bus_space_handle_t	sc_cfg_ioh;
304 	bus_size_t		sc_cfg_size;
305 
306 	bus_space_tag_t		sc_csr_iot;
307 	bus_space_handle_t	sc_csr_ioh;
308 	bus_size_t		sc_csr_size;
309 
310 	bus_space_tag_t		sc_db_iot;
311 	bus_space_handle_t	sc_db_ioh;
312 	bus_size_t		sc_db_size;
313 
314 	void *			sc_ih;
315 
316 	struct arpcom		sc_ac;
317 	struct ifmedia		sc_media;
318 	ushort			sc_link_up;
319 	ushort			sc_link_speed;
320 	uint64_t		sc_fc;
321 
322 	struct oce_dma_mem	sc_mbx;
323 	struct oce_dma_mem	sc_pld;
324 
325 	uint			sc_port;
326 	uint			sc_fmode;
327 
328 	struct oce_wq *		sc_wq[OCE_MAX_WQ];	/* TX work queues */
329 	struct oce_rq *		sc_rq[OCE_MAX_RQ];	/* RX work queues */
330 	struct oce_cq *		sc_cq[OCE_MAX_CQ];	/* Completion queues */
331 	struct oce_eq *		sc_eq[OCE_MAX_EQ];	/* Event queues */
332 	struct oce_mq *		sc_mq;			/* Mailbox queue */
333 
334 	ushort			sc_neq;
335 	ushort			sc_ncq;
336 	ushort			sc_nrq;
337 	ushort			sc_nwq;
338 	ushort			sc_nintr;
339 
340 	ushort			sc_tx_ring_size;
341 	ushort			sc_rx_ring_size;
342 	ushort			sc_rss_enable;
343 
344 	uint32_t		sc_if_id;	/* interface ID */
345 	uint32_t		sc_pmac_id;	/* PMAC id */
346 	char			sc_macaddr[ETHER_ADDR_LEN];
347 
348 	uint32_t		sc_pvid;
349 
350 	uint64_t		sc_rx_errors;
351 	uint64_t		sc_tx_errors;
352 
353 	struct timeout		sc_tick;
354 	struct timeout		sc_rxrefill;
355 
356 	void *			sc_statcmd;
357 };
358 
359 #define IS_BE(sc)		ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
360 #define IS_XE201(sc)		ISSET((sc)->sc_flags, OCE_F_XE201)
361 
362 #define ADDR_HI(x)		((uint32_t)((uint64_t)(x) >> 32))
363 #define ADDR_LO(x)		((uint32_t)((uint64_t)(x) & 0xffffffff))
364 
365 #define IF_LRO_ENABLED(ifp)	ISSET((ifp)->if_capabilities, IFCAP_LRO)
366 
367 int 	oce_match(struct device *, void *, void *);
368 void	oce_attach(struct device *, struct device *, void *);
369 int 	oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
370 void	oce_attachhook(struct device *);
371 void	oce_attach_ifp(struct oce_softc *);
372 int 	oce_ioctl(struct ifnet *, u_long, caddr_t);
373 int	oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
374 void	oce_iff(struct oce_softc *);
375 void	oce_link_status(struct oce_softc *);
376 void	oce_media_status(struct ifnet *, struct ifmediareq *);
377 int 	oce_media_change(struct ifnet *);
378 void	oce_tick(void *);
379 void	oce_init(void *);
380 void	oce_stop(struct oce_softc *);
381 void	oce_watchdog(struct ifnet *);
382 void	oce_start(struct ifnet *);
383 int	oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
384 #ifdef OCE_TSO
385 struct mbuf *
386 	oce_tso(struct oce_softc *, struct mbuf **);
387 #endif
388 int 	oce_intr(void *);
389 void	oce_intr_wq(void *);
390 void	oce_txeof(struct oce_wq *);
391 void	oce_intr_rq(void *);
392 void	oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
393 void	oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
394 int 	oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
395 int 	oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
396 #ifdef OCE_LRO
397 void	oce_flush_lro(struct oce_rq *);
398 int 	oce_init_lro(struct oce_softc *);
399 void	oce_free_lro(struct oce_softc *);
400 #endif
401 int	oce_get_buf(struct oce_rq *);
402 int	oce_alloc_rx_bufs(struct oce_rq *);
403 void	oce_refill_rx(void *);
404 void	oce_free_posted_rxbuf(struct oce_rq *);
405 void	oce_intr_mq(void *);
406 void	oce_link_event(struct oce_softc *,
407 	    struct oce_async_cqe_link_state *);
408 
409 int 	oce_init_queues(struct oce_softc *);
410 void	oce_release_queues(struct oce_softc *);
411 struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
412 void	oce_drain_wq(struct oce_wq *);
413 void	oce_destroy_wq(struct oce_wq *);
414 struct oce_rq *
415 	oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
416 void	oce_drain_rq(struct oce_rq *);
417 void	oce_destroy_rq(struct oce_rq *);
418 struct oce_eq *
419 	oce_create_eq(struct oce_softc *);
420 static inline void
421 	oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
422 void	oce_drain_eq(struct oce_eq *);
423 void	oce_destroy_eq(struct oce_eq *);
424 struct oce_mq *
425 	oce_create_mq(struct oce_softc *, struct oce_eq *);
426 void	oce_drain_mq(struct oce_mq *);
427 void	oce_destroy_mq(struct oce_mq *);
428 struct oce_cq *
429 	oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
430 	    int isize, int eventable, int nodelay, int ncoalesce);
431 static inline void
432 	oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
433 void	oce_destroy_cq(struct oce_cq *);
434 
435 int	oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
436 void	oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
437 #define	oce_dma_sync(d, f) \
438 	    bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
439 
440 struct oce_ring *
441 	oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
442 void	oce_destroy_ring(struct oce_softc *, struct oce_ring *);
443 int	oce_load_ring(struct oce_softc *, struct oce_ring *,
444 	    struct oce_pa *, int max_segs);
445 static inline void *
446 	oce_ring_get(struct oce_ring *);
447 static inline void *
448 	oce_ring_first(struct oce_ring *);
449 static inline void *
450 	oce_ring_next(struct oce_ring *);
451 struct oce_pkt *
452 	oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
453 	    int maxsegsz);
454 void	oce_pkt_free(struct oce_softc *, struct oce_pkt *);
455 static inline struct oce_pkt *
456 	oce_pkt_get(struct oce_pkt_list *);
457 static inline void
458 	oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
459 
460 int	oce_init_fw(struct oce_softc *);
461 int	oce_mbox_init(struct oce_softc *);
462 int	oce_mbox_dispatch(struct oce_softc *);
463 int	oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
464 	    void *payload, int length);
465 void	oce_first_mcc(struct oce_softc *);
466 
467 int	oce_get_fw_config(struct oce_softc *);
468 int	oce_check_native_mode(struct oce_softc *);
469 int	oce_create_iface(struct oce_softc *, uint8_t *macaddr);
470 int	oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
471 	    int nvtags, int untagged, int promisc);
472 int	oce_set_flow_control(struct oce_softc *, uint64_t);
473 int	oce_config_rss(struct oce_softc *, int enable);
474 int	oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
475 	    int naddr);
476 int	oce_set_promisc(struct oce_softc *, int enable);
477 int	oce_get_link_status(struct oce_softc *);
478 
479 void	oce_macaddr_set(struct oce_softc *);
480 int	oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
481 int	oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
482 int	oce_macaddr_del(struct oce_softc *, uint32_t pmac);
483 
484 int	oce_new_rq(struct oce_softc *, struct oce_rq *);
485 int	oce_new_wq(struct oce_softc *, struct oce_wq *);
486 int	oce_new_mq(struct oce_softc *, struct oce_mq *);
487 int	oce_new_eq(struct oce_softc *, struct oce_eq *);
488 int	oce_new_cq(struct oce_softc *, struct oce_cq *);
489 
490 int	oce_init_stats(struct oce_softc *);
491 int	oce_update_stats(struct oce_softc *);
492 int	oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
493 int	oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
494 int	oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
495 
496 struct pool *oce_pkt_pool;
497 
498 struct cfdriver oce_cd = {
499 	NULL, "oce", DV_IFNET
500 };
501 
502 struct cfattach oce_ca = {
503 	sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
504 };
505 
506 const struct pci_matchid oce_devices[] = {
507 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
508 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
509 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
510 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
511 	{ PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
512 };
513 
514 int
515 oce_match(struct device *parent, void *match, void *aux)
516 {
517 	return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
518 }
519 
520 void
521 oce_attach(struct device *parent, struct device *self, void *aux)
522 {
523 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
524 	struct oce_softc *sc = (struct oce_softc *)self;
525 	const char *intrstr = NULL;
526 	pci_intr_handle_t ih;
527 
528 	switch (PCI_PRODUCT(pa->pa_id)) {
529 	case PCI_PRODUCT_SERVERENGINES_BE2:
530 	case PCI_PRODUCT_SERVERENGINES_OCBE2:
531 		SET(sc->sc_flags, OCE_F_BE2);
532 		break;
533 	case PCI_PRODUCT_SERVERENGINES_BE3:
534 	case PCI_PRODUCT_SERVERENGINES_OCBE3:
535 		SET(sc->sc_flags, OCE_F_BE3);
536 		break;
537 	case PCI_PRODUCT_EMULEX_XE201:
538 		SET(sc->sc_flags, OCE_F_XE201);
539 		break;
540 	}
541 
542 	sc->sc_dmat = pa->pa_dmat;
543 	if (oce_pci_alloc(sc, pa))
544 		return;
545 
546 	sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
547 	sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
548 
549 	/* create the bootstrap mailbox */
550 	if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
551 		printf(": failed to allocate mailbox memory\n");
552 		return;
553 	}
554 	if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
555 		printf(": failed to allocate payload memory\n");
556 		goto fail_1;
557 	}
558 
559 	if (oce_init_fw(sc))
560 		goto fail_2;
561 
562 	if (oce_mbox_init(sc)) {
563 		printf(": failed to initialize mailbox\n");
564 		goto fail_2;
565 	}
566 
567 	if (oce_get_fw_config(sc)) {
568 		printf(": failed to get firmware configuration\n");
569 		goto fail_2;
570 	}
571 
572 	if (ISSET(sc->sc_flags, OCE_F_BE3)) {
573 		if (oce_check_native_mode(sc))
574 			goto fail_2;
575 	}
576 
577 	if (oce_macaddr_get(sc, sc->sc_macaddr)) {
578 		printf(": failed to fetch MAC address\n");
579 		goto fail_2;
580 	}
581 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
582 
583 	if (oce_pkt_pool == NULL) {
584 		oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
585 		if (oce_pkt_pool == NULL) {
586 			printf(": unable to allocate descriptor pool\n");
587 			goto fail_2;
588 		}
589 		pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, 0, 0,
590 		    "ocepkts", NULL);
591 		pool_setipl(oce_pkt_pool, IPL_NET);
592 	}
593 
594 	/* We allocate a single interrupt resource */
595 	sc->sc_nintr = 1;
596 	if (pci_intr_map_msi(pa, &ih) != 0 &&
597 	    pci_intr_map(pa, &ih) != 0) {
598 		printf(": couldn't map interrupt\n");
599 		goto fail_2;
600 	}
601 
602 	intrstr = pci_intr_string(pa->pa_pc, ih);
603 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
604 	    sc->sc_dev.dv_xname);
605 	if (sc->sc_ih == NULL) {
606 		printf(": couldn't establish interrupt\n");
607 		if (intrstr != NULL)
608 			printf(" at %s", intrstr);
609 		printf("\n");
610 		goto fail_2;
611 	}
612 	printf(": %s", intrstr);
613 
614 	if (oce_init_stats(sc))
615 		goto fail_3;
616 
617 	if (oce_init_queues(sc))
618 		goto fail_3;
619 
620 	oce_attach_ifp(sc);
621 
622 #ifdef OCE_LRO
623 	if (oce_init_lro(sc))
624 		goto fail_4;
625 #endif
626 
627 	timeout_set(&sc->sc_tick, oce_tick, sc);
628 	timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
629 
630 	config_mountroot(self, oce_attachhook);
631 
632 	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
633 
634 	return;
635 
636 #ifdef OCE_LRO
637 fail_4:
638 	oce_free_lro(sc);
639 	ether_ifdetach(&sc->sc_ac.ac_if);
640 	if_detach(&sc->sc_ac.ac_if);
641 	oce_release_queues(sc);
642 #endif
643 fail_3:
644 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
645 fail_2:
646 	oce_dma_free(sc, &sc->sc_pld);
647 fail_1:
648 	oce_dma_free(sc, &sc->sc_mbx);
649 }
650 
651 int
652 oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
653 {
654 	pcireg_t memtype, reg;
655 
656 	/* setup the device config region */
657 	if (ISSET(sc->sc_flags, OCE_F_BE2))
658 		reg = OCE_BAR_CFG_BE2;
659 	else
660 		reg = OCE_BAR_CFG;
661 
662 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
663 	if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
664 	    &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
665 	    IS_BE(sc) ? 0 : 32768)) {
666 		printf(": can't find cfg mem space\n");
667 		return (ENXIO);
668 	}
669 
670 	/*
671 	 * Read the SLI_INTF register and determine whether we
672 	 * can use this port and its features
673 	 */
674 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
675 	if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
676 		printf(": invalid signature\n");
677 		goto fail_1;
678 	}
679 	if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
680 		printf(": unsupported SLI revision\n");
681 		goto fail_1;
682 	}
683 	if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
684 		SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
685 	if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
686 		SET(sc->sc_flags, OCE_F_RESET_RQD);
687 
688 	/* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
689 	if (IS_BE(sc)) {
690 		/* set up CSR region */
691 		reg = OCE_BAR_CSR;
692 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
693 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
694 		    &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
695 			printf(": can't find csr mem space\n");
696 			goto fail_1;
697 		}
698 
699 		/* set up DB doorbell region */
700 		reg = OCE_BAR_DB;
701 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
702 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
703 		    &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
704 			printf(": can't find csr mem space\n");
705 			goto fail_2;
706 		}
707 	} else {
708 		sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
709 		sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
710 	}
711 
712 	return (0);
713 
714 fail_2:
715 	bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
716 fail_1:
717 	bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
718 	return (ENXIO);
719 }
720 
721 static inline uint32_t
722 oce_read_cfg(struct oce_softc *sc, bus_size_t off)
723 {
724 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
725 	    BUS_SPACE_BARRIER_READ);
726 	return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
727 }
728 
729 static inline uint32_t
730 oce_read_csr(struct oce_softc *sc, bus_size_t off)
731 {
732 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
733 	    BUS_SPACE_BARRIER_READ);
734 	return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
735 }
736 
737 static inline uint32_t
738 oce_read_db(struct oce_softc *sc, bus_size_t off)
739 {
740 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
741 	    BUS_SPACE_BARRIER_READ);
742 	return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
743 }
744 
745 static inline void
746 oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
747 {
748 	bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
749 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
750 	    BUS_SPACE_BARRIER_WRITE);
751 }
752 
753 static inline void
754 oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
755 {
756 	bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
757 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
758 	    BUS_SPACE_BARRIER_WRITE);
759 }
760 
761 static inline void
762 oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
763 {
764 	bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
765 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
766 	    BUS_SPACE_BARRIER_WRITE);
767 }
768 
769 static inline void
770 oce_intr_enable(struct oce_softc *sc)
771 {
772 	uint32_t reg;
773 
774 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
775 	oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
776 }
777 
778 static inline void
779 oce_intr_disable(struct oce_softc *sc)
780 {
781 	uint32_t reg;
782 
783 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
784 	oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
785 }
786 
787 void
788 oce_attachhook(struct device *self)
789 {
790 	struct oce_softc *sc = (struct oce_softc *)self;
791 
792 	oce_get_link_status(sc);
793 
794 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
795 
796 	/*
797 	 * We need to get MCC async events. So enable intrs and arm
798 	 * first EQ, Other EQs will be armed after interface is UP
799 	 */
800 	oce_intr_enable(sc);
801 	oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
802 
803 	/*
804 	 * Send first mcc cmd and after that we get gracious
805 	 * MCC notifications from FW
806 	 */
807 	oce_first_mcc(sc);
808 }
809 
810 void
811 oce_attach_ifp(struct oce_softc *sc)
812 {
813 	struct ifnet *ifp = &sc->sc_ac.ac_if;
814 
815 	ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
816 	    oce_media_status);
817 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
818 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
819 
820 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
821 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
822 	ifp->if_ioctl = oce_ioctl;
823 	ifp->if_start = oce_start;
824 	ifp->if_watchdog = oce_watchdog;
825 	ifp->if_hardmtu = OCE_MAX_MTU;
826 	ifp->if_softc = sc;
827 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_size - 1);
828 
829 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
830 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
831 
832 #if NVLAN > 0
833 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
834 #endif
835 
836 #ifdef OCE_TSO
837 	ifp->if_capabilities |= IFCAP_TSO;
838 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
839 #endif
840 #ifdef OCE_LRO
841 	ifp->if_capabilities |= IFCAP_LRO;
842 #endif
843 
844 	if_attach(ifp);
845 	ether_ifattach(ifp);
846 }
847 
848 int
849 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
850 {
851 	struct oce_softc *sc = ifp->if_softc;
852 	struct ifreq *ifr = (struct ifreq *)data;
853 	int s, error = 0;
854 
855 	s = splnet();
856 
857 	switch (command) {
858 	case SIOCSIFADDR:
859 		ifp->if_flags |= IFF_UP;
860 		if (!(ifp->if_flags & IFF_RUNNING))
861 			oce_init(sc);
862 		break;
863 	case SIOCSIFFLAGS:
864 		if (ifp->if_flags & IFF_UP) {
865 			if (ifp->if_flags & IFF_RUNNING)
866 				error = ENETRESET;
867 			else
868 				oce_init(sc);
869 		} else {
870 			if (ifp->if_flags & IFF_RUNNING)
871 				oce_stop(sc);
872 		}
873 		break;
874 	case SIOCGIFMEDIA:
875 	case SIOCSIFMEDIA:
876 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
877 		break;
878 	case SIOCGIFRXR:
879 		error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
880 		break;
881 	default:
882 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
883 		break;
884 	}
885 
886 	if (error == ENETRESET) {
887 		if (ifp->if_flags & IFF_RUNNING)
888 			oce_iff(sc);
889 		error = 0;
890 	}
891 
892 	splx(s);
893 
894 	return (error);
895 }
896 
897 int
898 oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
899 {
900 	struct if_rxring_info *ifr, ifr1;
901 	struct oce_rq *rq;
902 	int error, i;
903 	u_int n = 0;
904 
905 	if (sc->sc_nrq > 1) {
906 		if ((ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF,
907 		    M_WAITOK | M_ZERO)) == NULL)
908 			return (ENOMEM);
909 	} else
910 		ifr = &ifr1;
911 
912 	OCE_RQ_FOREACH(sc, rq, i) {
913 		ifr[n].ifr_size = MCLBYTES;
914 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
915 		ifr[n].ifr_info = rq->rxring;
916 		n++;
917 	}
918 
919 	error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
920 
921 	if (sc->sc_nrq > 1)
922 		free(ifr, M_DEVBUF, sc->sc_nrq * sizeof(*ifr));
923 	return (error);
924 }
925 
926 
927 void
928 oce_iff(struct oce_softc *sc)
929 {
930 	uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
931 	struct arpcom *ac = &sc->sc_ac;
932 	struct ifnet *ifp = &ac->ac_if;
933 	struct ether_multi *enm;
934 	struct ether_multistep step;
935 	int naddr = 0, promisc = 0;
936 
937 	ifp->if_flags &= ~IFF_ALLMULTI;
938 
939 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
940 	    ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
941 		ifp->if_flags |= IFF_ALLMULTI;
942 		promisc = 1;
943 	} else {
944 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
945 		while (enm != NULL) {
946 			memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
947 			ETHER_NEXT_MULTI(step, enm);
948 		}
949 		oce_update_mcast(sc, multi, naddr);
950 	}
951 
952 	oce_set_promisc(sc, promisc);
953 }
954 
955 void
956 oce_link_status(struct oce_softc *sc)
957 {
958 	struct ifnet *ifp = &sc->sc_ac.ac_if;
959 	int link_state = LINK_STATE_DOWN;
960 
961 	ifp->if_baudrate = 0;
962 	if (sc->sc_link_up) {
963 		link_state = LINK_STATE_FULL_DUPLEX;
964 
965 		switch (sc->sc_link_speed) {
966 		case 1:
967 			ifp->if_baudrate = IF_Mbps(10);
968 			break;
969 		case 2:
970 			ifp->if_baudrate = IF_Mbps(100);
971 			break;
972 		case 3:
973 			ifp->if_baudrate = IF_Gbps(1);
974 			break;
975 		case 4:
976 			ifp->if_baudrate = IF_Gbps(10);
977 			break;
978 		}
979 	}
980 	if (ifp->if_link_state != link_state) {
981 		ifp->if_link_state = link_state;
982 		if_link_state_change(ifp);
983 	}
984 }
985 
986 void
987 oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
988 {
989 	struct oce_softc *sc = ifp->if_softc;
990 
991 	ifmr->ifm_status = IFM_AVALID;
992 	ifmr->ifm_active = IFM_ETHER;
993 
994 	if (oce_get_link_status(sc) == 0)
995 		oce_link_status(sc);
996 
997 	if (!sc->sc_link_up) {
998 		ifmr->ifm_active |= IFM_NONE;
999 		return;
1000 	}
1001 
1002 	ifmr->ifm_status |= IFM_ACTIVE;
1003 
1004 	switch (sc->sc_link_speed) {
1005 	case 1: /* 10 Mbps */
1006 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1007 		break;
1008 	case 2: /* 100 Mbps */
1009 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1010 		break;
1011 	case 3: /* 1 Gbps */
1012 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1013 		break;
1014 	case 4: /* 10 Gbps */
1015 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1016 		break;
1017 	}
1018 
1019 	if (sc->sc_fc & IFM_ETH_RXPAUSE)
1020 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1021 	if (sc->sc_fc & IFM_ETH_TXPAUSE)
1022 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1023 }
1024 
1025 int
1026 oce_media_change(struct ifnet *ifp)
1027 {
1028 	return (0);
1029 }
1030 
1031 void
1032 oce_tick(void *arg)
1033 {
1034 	struct oce_softc *sc = arg;
1035 	int s;
1036 
1037 	s = splnet();
1038 
1039 	if (oce_update_stats(sc) == 0)
1040 		timeout_add_sec(&sc->sc_tick, 1);
1041 
1042 	splx(s);
1043 }
1044 
1045 void
1046 oce_init(void *arg)
1047 {
1048 	struct oce_softc *sc = arg;
1049 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1050 	struct oce_eq *eq;
1051 	struct oce_rq *rq;
1052 	struct oce_wq *wq;
1053 	int i;
1054 
1055 	oce_stop(sc);
1056 
1057 	DELAY(10);
1058 
1059 	oce_macaddr_set(sc);
1060 
1061 	oce_iff(sc);
1062 
1063 	/* Enable VLAN promiscuous mode */
1064 	if (oce_config_vlan(sc, NULL, 0, 1, 1))
1065 		goto error;
1066 
1067 	if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1068 		goto error;
1069 
1070 	OCE_RQ_FOREACH(sc, rq, i) {
1071 		rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1072 		    ETHER_VLAN_ENCAP_LEN;
1073 		if (oce_new_rq(sc, rq)) {
1074 			printf("%s: failed to create rq\n",
1075 			    sc->sc_dev.dv_xname);
1076 			goto error;
1077 		}
1078 		rq->ring->index	 = 0;
1079 
1080 		/* oce splits jumbos into 2k chunks... */
1081 		if_rxr_init(&rq->rxring, 8, rq->nitems);
1082 
1083 		if (!oce_alloc_rx_bufs(rq)) {
1084 			printf("%s: failed to allocate rx buffers\n",
1085 			    sc->sc_dev.dv_xname);
1086 			goto error;
1087 		}
1088 	}
1089 
1090 #ifdef OCE_RSS
1091 	/* RSS config */
1092 	if (sc->sc_rss_enable) {
1093 		if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1094 			printf("%s: failed to configure RSS\n",
1095 			    sc->sc_dev.dv_xname);
1096 			goto error;
1097 		}
1098 	}
1099 #endif
1100 
1101 	OCE_RQ_FOREACH(sc, rq, i)
1102 		oce_arm_cq(rq->cq, 0, TRUE);
1103 
1104 	OCE_WQ_FOREACH(sc, wq, i)
1105 		oce_arm_cq(wq->cq, 0, TRUE);
1106 
1107 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1108 
1109 	OCE_EQ_FOREACH(sc, eq, i)
1110 		oce_arm_eq(eq, 0, TRUE, FALSE);
1111 
1112 	if (oce_get_link_status(sc) == 0)
1113 		oce_link_status(sc);
1114 
1115 	ifp->if_flags |= IFF_RUNNING;
1116 	ifq_clr_oactive(&ifp->if_snd);
1117 
1118 	timeout_add_sec(&sc->sc_tick, 1);
1119 
1120 	oce_intr_enable(sc);
1121 
1122 	return;
1123 error:
1124 	oce_stop(sc);
1125 }
1126 
1127 void
1128 oce_stop(struct oce_softc *sc)
1129 {
1130 	struct mbx_delete_nic_rq cmd;
1131 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1132 	struct oce_rq *rq;
1133 	struct oce_wq *wq;
1134 	struct oce_eq *eq;
1135 	int i;
1136 
1137 	timeout_del(&sc->sc_tick);
1138 	timeout_del(&sc->sc_rxrefill);
1139 
1140 	ifp->if_flags &= ~IFF_RUNNING;
1141 	ifq_clr_oactive(&ifp->if_snd);
1142 
1143 	/* Stop intrs and finish any bottom halves pending */
1144 	oce_intr_disable(sc);
1145 
1146 	/* Invalidate any pending cq and eq entries */
1147 	OCE_EQ_FOREACH(sc, eq, i)
1148 		oce_drain_eq(eq);
1149 	OCE_RQ_FOREACH(sc, rq, i) {
1150 		/* destroy the work queue in the firmware */
1151 		memset(&cmd, 0, sizeof(cmd));
1152 		cmd.params.req.rq_id = htole16(rq->id);
1153 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1154 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1155 		DELAY(1000);
1156 		oce_drain_rq(rq);
1157 		oce_free_posted_rxbuf(rq);
1158 	}
1159 	OCE_WQ_FOREACH(sc, wq, i)
1160 		oce_drain_wq(wq);
1161 }
1162 
1163 void
1164 oce_watchdog(struct ifnet *ifp)
1165 {
1166 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1167 
1168 	oce_init(ifp->if_softc);
1169 
1170 	ifp->if_oerrors++;
1171 }
1172 
1173 void
1174 oce_start(struct ifnet *ifp)
1175 {
1176 	struct oce_softc *sc = ifp->if_softc;
1177 	struct mbuf *m;
1178 	int pkts = 0;
1179 
1180 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1181 		return;
1182 
1183 	for (;;) {
1184 		IFQ_DEQUEUE(&ifp->if_snd, m);
1185 		if (m == NULL)
1186 			break;
1187 
1188 		if (oce_encap(sc, &m, 0)) {
1189 			ifq_set_oactive(&ifp->if_snd);
1190 			break;
1191 		}
1192 
1193 #if NBPFILTER > 0
1194 		if (ifp->if_bpf)
1195 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1196 #endif
1197 		pkts++;
1198 	}
1199 
1200 	/* Set a timeout in case the chip goes out to lunch */
1201 	if (pkts)
1202 		ifp->if_timer = 5;
1203 }
1204 
1205 int
1206 oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1207 {
1208 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1209 	struct mbuf *m = *mpp;
1210 	struct oce_wq *wq = sc->sc_wq[wqidx];
1211 	struct oce_pkt *pkt = NULL;
1212 	struct oce_nic_hdr_wqe *nhe;
1213 	struct oce_nic_frag_wqe *nfe;
1214 	int i, nwqe, err;
1215 
1216 #ifdef OCE_TSO
1217 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1218 		/* consolidate packet buffers for TSO/LSO segment offload */
1219 		m = oce_tso(sc, mpp);
1220 		if (m == NULL)
1221 			goto error;
1222 	}
1223 #endif
1224 
1225 	if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1226 		goto error;
1227 
1228 	err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1229 	if (err == EFBIG) {
1230 		if (m_defrag(m, M_DONTWAIT) ||
1231 		    bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1232 			BUS_DMA_NOWAIT))
1233 			goto error;
1234 		*mpp = m;
1235 	} else if (err != 0)
1236 		goto error;
1237 
1238 	pkt->nsegs = pkt->map->dm_nsegs;
1239 
1240 	nwqe = pkt->nsegs + 1;
1241 	if (IS_BE(sc)) {
1242 		/* BE2 and BE3 require even number of WQEs */
1243 		if (nwqe & 1)
1244 			nwqe++;
1245 	}
1246 
1247 	/* Fail if there's not enough free WQEs */
1248 	if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1249 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1250 		goto error;
1251 	}
1252 
1253 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1254 	    BUS_DMASYNC_PREWRITE);
1255 	pkt->mbuf = m;
1256 
1257 	/* TX work queue entry for the header */
1258 	nhe = oce_ring_get(wq->ring);
1259 	memset(nhe, 0, sizeof(*nhe));
1260 
1261 	nhe->u0.s.complete = 1;
1262 	nhe->u0.s.event = 1;
1263 	nhe->u0.s.crc = 1;
1264 	nhe->u0.s.forward = 0;
1265 	nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1266 	nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1267 	nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1268 	nhe->u0.s.num_wqe = nwqe;
1269 	nhe->u0.s.total_length = m->m_pkthdr.len;
1270 
1271 #if NVLAN > 0
1272 	if (m->m_flags & M_VLANTAG) {
1273 		nhe->u0.s.vlan = 1; /* Vlan present */
1274 		nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1275 	}
1276 #endif
1277 
1278 #ifdef OCE_TSO
1279 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1280 		if (m->m_pkthdr.tso_segsz) {
1281 			nhe->u0.s.lso = 1;
1282 			nhe->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1283 		}
1284 		if (!IS_BE(sc))
1285 			nhe->u0.s.ipcs = 1;
1286 	}
1287 #endif
1288 
1289 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1290 	    BUS_DMASYNC_PREWRITE);
1291 
1292 	wq->ring->nused++;
1293 
1294 	/* TX work queue entries for data chunks */
1295 	for (i = 0; i < pkt->nsegs; i++) {
1296 		nfe = oce_ring_get(wq->ring);
1297 		memset(nfe, 0, sizeof(*nfe));
1298 		nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1299 		nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1300 		nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1301 		wq->ring->nused++;
1302 	}
1303 	if (nwqe > (pkt->nsegs + 1)) {
1304 		nfe = oce_ring_get(wq->ring);
1305 		memset(nfe, 0, sizeof(*nfe));
1306 		wq->ring->nused++;
1307 		pkt->nsegs++;
1308 	}
1309 
1310 	oce_pkt_put(&wq->pkt_list, pkt);
1311 
1312 	ifp->if_opackets++;
1313 
1314 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1315 	    BUS_DMASYNC_POSTWRITE);
1316 
1317 	oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1318 
1319 	return (0);
1320 
1321 error:
1322 	if (pkt)
1323 		oce_pkt_put(&wq->pkt_free, pkt);
1324 	m_freem(*mpp);
1325 	*mpp = NULL;
1326 	return (1);
1327 }
1328 
1329 #ifdef OCE_TSO
1330 struct mbuf *
1331 oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1332 {
1333 	struct mbuf *m;
1334 	struct ip *ip;
1335 #ifdef INET6
1336 	struct ip6_hdr *ip6;
1337 #endif
1338 	struct ether_vlan_header *eh;
1339 	struct tcphdr *th;
1340 	uint16_t etype;
1341 	int total_len = 0, ehdrlen = 0;
1342 
1343 	m = *mpp;
1344 
1345 	if (M_WRITABLE(m) == 0) {
1346 		m = m_dup(*mpp, M_DONTWAIT);
1347 		if (!m)
1348 			return (NULL);
1349 		m_freem(*mpp);
1350 		*mpp = m;
1351 	}
1352 
1353 	eh = mtod(m, struct ether_vlan_header *);
1354 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1355 		etype = ntohs(eh->evl_proto);
1356 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1357 	} else {
1358 		etype = ntohs(eh->evl_encap_proto);
1359 		ehdrlen = ETHER_HDR_LEN;
1360 	}
1361 
1362 	switch (etype) {
1363 	case ETHERTYPE_IP:
1364 		ip = (struct ip *)(m->m_data + ehdrlen);
1365 		if (ip->ip_p != IPPROTO_TCP)
1366 			return (NULL);
1367 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1368 
1369 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1370 		break;
1371 #ifdef INET6
1372 	case ETHERTYPE_IPV6:
1373 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1374 		if (ip6->ip6_nxt != IPPROTO_TCP)
1375 			return NULL;
1376 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1377 
1378 		total_len = ehdrlen + sizeof(struct ip6_hdr) +
1379 		    (th->th_off << 2);
1380 		break;
1381 #endif
1382 	default:
1383 		return (NULL);
1384 	}
1385 
1386 	m = m_pullup(m, total_len);
1387 	if (!m)
1388 		return (NULL);
1389 	*mpp = m;
1390 	return (m);
1391 
1392 }
1393 #endif /* OCE_TSO */
1394 
1395 int
1396 oce_intr(void *arg)
1397 {
1398 	struct oce_softc *sc = arg;
1399 	struct oce_eq *eq = sc->sc_eq[0];
1400 	struct oce_eqe *eqe;
1401 	struct oce_cq *cq = NULL;
1402 	int i, neqe = 0;
1403 
1404 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1405 
1406 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1407 		eqe->evnt = 0;
1408 		neqe++;
1409 	}
1410 
1411 	/* Spurious? */
1412 	if (!neqe) {
1413 		oce_arm_eq(eq, 0, TRUE, FALSE);
1414 		return (0);
1415 	}
1416 
1417 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1418 
1419  	/* Clear EQ entries, but dont arm */
1420 	oce_arm_eq(eq, neqe, FALSE, TRUE);
1421 
1422 	/* Process TX, RX and MCC completion queues */
1423 	for (i = 0; i < eq->cq_valid; i++) {
1424 		cq = eq->cq[i];
1425 		(*cq->cq_intr)(cq->cb_arg);
1426 		oce_arm_cq(cq, 0, TRUE);
1427 	}
1428 
1429 	oce_arm_eq(eq, 0, TRUE, FALSE);
1430 	return (1);
1431 }
1432 
1433 /* Handle the Completion Queue for transmit */
1434 void
1435 oce_intr_wq(void *arg)
1436 {
1437 	struct oce_wq *wq = (struct oce_wq *)arg;
1438 	struct oce_cq *cq = wq->cq;
1439 	struct oce_nic_tx_cqe *cqe;
1440 	struct oce_softc *sc = wq->sc;
1441 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1442 	int ncqe = 0;
1443 
1444 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1445 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1446 		oce_txeof(wq);
1447 		WQ_CQE_INVALIDATE(cqe);
1448 		ncqe++;
1449 	}
1450 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1451 
1452 	if (ifq_is_oactive(&ifp->if_snd)) {
1453 		if (wq->ring->nused < (wq->ring->nitems / 2)) {
1454 			ifq_clr_oactive(&ifp->if_snd);
1455 			oce_start(ifp);
1456 		}
1457 	}
1458 	if (wq->ring->nused == 0)
1459 		ifp->if_timer = 0;
1460 
1461 	if (ncqe)
1462 		oce_arm_cq(cq, ncqe, FALSE);
1463 }
1464 
1465 void
1466 oce_txeof(struct oce_wq *wq)
1467 {
1468 	struct oce_softc *sc = wq->sc;
1469 	struct oce_pkt *pkt;
1470 	struct mbuf *m;
1471 
1472 	if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1473 		printf("%s: missing descriptor in txeof\n",
1474 		    sc->sc_dev.dv_xname);
1475 		return;
1476 	}
1477 
1478 	wq->ring->nused -= pkt->nsegs + 1;
1479 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1480 	    BUS_DMASYNC_POSTWRITE);
1481 	bus_dmamap_unload(sc->sc_dmat, pkt->map);
1482 
1483 	m = pkt->mbuf;
1484 	m_freem(m);
1485 	pkt->mbuf = NULL;
1486 	oce_pkt_put(&wq->pkt_free, pkt);
1487 }
1488 
1489 /* Handle the Completion Queue for receive */
1490 void
1491 oce_intr_rq(void *arg)
1492 {
1493 	struct oce_rq *rq = (struct oce_rq *)arg;
1494 	struct oce_cq *cq = rq->cq;
1495 	struct oce_softc *sc = rq->sc;
1496 	struct oce_nic_rx_cqe *cqe;
1497 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1498 	int maxrx, ncqe = 0;
1499 
1500 	maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1501 
1502 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1503 
1504 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1505 		if (cqe->u0.s.error == 0) {
1506 			if (cqe->u0.s.pkt_size == 0)
1507 				/* partial DMA workaround for Lancer */
1508 				oce_rxeoc(rq, cqe);
1509 			else
1510 				oce_rxeof(rq, cqe);
1511 		} else {
1512 			ifp->if_ierrors++;
1513 			if (IS_XE201(sc))
1514 				/* Lancer A0 no buffer workaround */
1515 				oce_rxeoc(rq, cqe);
1516 			else
1517 				/* Post L3/L4 errors to stack.*/
1518 				oce_rxeof(rq, cqe);
1519 		}
1520 #ifdef OCE_LRO
1521 		if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1522 			oce_flush_lro(rq);
1523 #endif
1524 		RQ_CQE_INVALIDATE(cqe);
1525 		ncqe++;
1526 	}
1527 
1528 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1529 
1530 #ifdef OCE_LRO
1531 	if (IF_LRO_ENABLED(ifp))
1532 		oce_flush_lro(rq);
1533 #endif
1534 
1535 	if (ncqe) {
1536 		oce_arm_cq(cq, ncqe, FALSE);
1537 		if (!oce_alloc_rx_bufs(rq))
1538 			timeout_add(&sc->sc_rxrefill, 1);
1539 	}
1540 }
1541 
1542 void
1543 oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1544 {
1545 	struct oce_softc *sc = rq->sc;
1546 	struct oce_pkt *pkt = NULL;
1547 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1548 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1549 	struct mbuf *m = NULL, *tail = NULL;
1550 	int i, len, frag_len;
1551 	uint16_t vtag;
1552 
1553 	len = cqe->u0.s.pkt_size;
1554 
1555 	 /* Get vlan_tag value */
1556 	if (IS_BE(sc))
1557 		vtag = ntohs(cqe->u0.s.vlan_tag);
1558 	else
1559 		vtag = cqe->u0.s.vlan_tag;
1560 
1561 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1562 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1563 			printf("%s: missing descriptor in rxeof\n",
1564 			    sc->sc_dev.dv_xname);
1565 			goto exit;
1566 		}
1567 
1568 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1569 		    BUS_DMASYNC_POSTREAD);
1570 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1571 		if_rxr_put(&rq->rxring, 1);
1572 
1573 		frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1574 		pkt->mbuf->m_len = frag_len;
1575 
1576 		if (tail != NULL) {
1577 			/* additional fragments */
1578 			pkt->mbuf->m_flags &= ~M_PKTHDR;
1579 			tail->m_next = pkt->mbuf;
1580 			tail = pkt->mbuf;
1581 		} else {
1582 			/* first fragment, fill out most of the header */
1583 			pkt->mbuf->m_pkthdr.len = len;
1584 			pkt->mbuf->m_pkthdr.csum_flags = 0;
1585 			if (cqe->u0.s.ip_cksum_pass) {
1586 				if (!cqe->u0.s.ip_ver) { /* IPV4 */
1587 					pkt->mbuf->m_pkthdr.csum_flags =
1588 					    M_IPV4_CSUM_IN_OK;
1589 				}
1590 			}
1591 			if (cqe->u0.s.l4_cksum_pass) {
1592 				pkt->mbuf->m_pkthdr.csum_flags |=
1593 				    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1594 			}
1595 			m = tail = pkt->mbuf;
1596 		}
1597 		pkt->mbuf = NULL;
1598 		oce_pkt_put(&rq->pkt_free, pkt);
1599 		len -= frag_len;
1600 	}
1601 
1602 	if (m) {
1603 		if (!oce_port_valid(sc, cqe)) {
1604 			 m_freem(m);
1605 			 goto exit;
1606 		}
1607 
1608 #if NVLAN > 0
1609 		/* This determines if vlan tag is valid */
1610 		if (oce_vtp_valid(sc, cqe)) {
1611 			if (sc->sc_fmode & FNM_FLEX10_MODE) {
1612 				/* FLEX10. If QnQ is not set, neglect VLAN */
1613 				if (cqe->u0.s.qnq) {
1614 					m->m_pkthdr.ether_vtag = vtag;
1615 					m->m_flags |= M_VLANTAG;
1616 				}
1617 			} else if (sc->sc_pvid != (vtag & VLAN_VID_MASK))  {
1618 				/*
1619 				 * In UMC mode generally pvid will be striped.
1620 				 * But in some cases we have seen it comes
1621 				 * with pvid. So if pvid == vlan, neglect vlan.
1622 				 */
1623 				m->m_pkthdr.ether_vtag = vtag;
1624 				m->m_flags |= M_VLANTAG;
1625 			}
1626 		}
1627 #endif
1628 
1629 #ifdef OCE_LRO
1630 		/* Try to queue to LRO */
1631 		if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1632 		    cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1633 		    !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1634 
1635 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1636 				rq->lro_pkts_queued ++;
1637 				goto exit;
1638 			}
1639 			/* If LRO posting fails then try to post to STACK */
1640 		}
1641 #endif
1642 
1643 		ml_enqueue(&ml, m);
1644 	}
1645 exit:
1646 	if_input(ifp, &ml);
1647 }
1648 
1649 void
1650 oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1651 {
1652 	struct oce_softc *sc = rq->sc;
1653 	struct oce_pkt *pkt;
1654 	int i, num_frags = cqe->u0.s.num_fragments;
1655 
1656 	if (IS_XE201(sc) && cqe->u0.s.error) {
1657 		/*
1658 		 * Lancer A0 workaround:
1659 		 * num_frags will be 1 more than actual in case of error
1660 		 */
1661 		if (num_frags)
1662 			num_frags--;
1663 	}
1664 	for (i = 0; i < num_frags; i++) {
1665 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1666 			printf("%s: missing descriptor in rxeoc\n",
1667 			    sc->sc_dev.dv_xname);
1668 			return;
1669 		}
1670 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1671 		    BUS_DMASYNC_POSTREAD);
1672 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1673 		if_rxr_put(&rq->rxring, 1);
1674 		m_freem(pkt->mbuf);
1675 		oce_pkt_put(&rq->pkt_free, pkt);
1676 	}
1677 }
1678 
1679 int
1680 oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1681 {
1682 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1683 
1684 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1685 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1686 		return (cqe_v1->u0.s.vlan_tag_present);
1687 	}
1688 	return (cqe->u0.s.vlan_tag_present);
1689 }
1690 
1691 int
1692 oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1693 {
1694 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1695 
1696 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1697 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1698 		if (sc->sc_port != cqe_v1->u0.s.port)
1699 			return (0);
1700 	}
1701 	return (1);
1702 }
1703 
1704 #ifdef OCE_LRO
1705 void
1706 oce_flush_lro(struct oce_rq *rq)
1707 {
1708 	struct oce_softc *sc = rq->sc;
1709 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1710 	struct lro_ctrl	*lro = &rq->lro;
1711 	struct lro_entry *queued;
1712 
1713 	if (!IF_LRO_ENABLED(ifp))
1714 		return;
1715 
1716 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1717 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1718 		tcp_lro_flush(lro, queued);
1719 	}
1720 	rq->lro_pkts_queued = 0;
1721 }
1722 
1723 int
1724 oce_init_lro(struct oce_softc *sc)
1725 {
1726 	struct lro_ctrl *lro = NULL;
1727 	int i = 0, rc = 0;
1728 
1729 	for (i = 0; i < sc->sc_nrq; i++) {
1730 		lro = &sc->sc_rq[i]->lro;
1731 		rc = tcp_lro_init(lro);
1732 		if (rc != 0) {
1733 			printf("%s: LRO init failed\n",
1734 			    sc->sc_dev.dv_xname);
1735 			return rc;
1736 		}
1737 		lro->ifp = &sc->sc_ac.ac_if;
1738 	}
1739 
1740 	return (rc);
1741 }
1742 
1743 void
1744 oce_free_lro(struct oce_softc *sc)
1745 {
1746 	struct lro_ctrl *lro = NULL;
1747 	int i = 0;
1748 
1749 	for (i = 0; i < sc->sc_nrq; i++) {
1750 		lro = &sc->sc_rq[i]->lro;
1751 		if (lro)
1752 			tcp_lro_free(lro);
1753 	}
1754 }
1755 #endif /* OCE_LRO */
1756 
1757 int
1758 oce_get_buf(struct oce_rq *rq)
1759 {
1760 	struct oce_softc *sc = rq->sc;
1761 	struct oce_pkt *pkt;
1762 	struct oce_nic_rqe *rqe;
1763 
1764 	if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1765 		return (0);
1766 
1767 	pkt->mbuf = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1768 	if (pkt->mbuf == NULL) {
1769 		oce_pkt_put(&rq->pkt_free, pkt);
1770 		return (0);
1771 	}
1772 
1773 	pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1774 #ifdef __STRICT_ALIGNMENT
1775 	m_adj(pkt->mbuf, ETHER_ALIGN);
1776 #endif
1777 
1778 	if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1779 	    BUS_DMA_NOWAIT)) {
1780 		m_freem(pkt->mbuf);
1781 		pkt->mbuf = NULL;
1782 		oce_pkt_put(&rq->pkt_free, pkt);
1783 		return (0);
1784 	}
1785 
1786 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1787 	    BUS_DMASYNC_PREREAD);
1788 
1789 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1790 	    BUS_DMASYNC_PREWRITE);
1791 
1792 	rqe = oce_ring_get(rq->ring);
1793 	rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1794 	rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1795 
1796 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1797 	    BUS_DMASYNC_POSTWRITE);
1798 
1799 	oce_pkt_put(&rq->pkt_list, pkt);
1800 
1801 	return (1);
1802 }
1803 
1804 int
1805 oce_alloc_rx_bufs(struct oce_rq *rq)
1806 {
1807 	struct oce_softc *sc = rq->sc;
1808 	int i, nbufs = 0;
1809 	u_int slots;
1810 
1811 	for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1812 		if (oce_get_buf(rq) == 0)
1813 			break;
1814 
1815 		nbufs++;
1816 	}
1817 	if_rxr_put(&rq->rxring, slots);
1818 
1819 	if (!nbufs)
1820 		return (0);
1821 	for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1822 		oce_write_db(sc, PD_RXULP_DB, rq->id |
1823 		    (OCE_MAX_RQ_POSTS << 24));
1824 		nbufs -= OCE_MAX_RQ_POSTS;
1825 	}
1826 	if (nbufs > 0)
1827 		oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1828 	return (1);
1829 }
1830 
1831 void
1832 oce_refill_rx(void *arg)
1833 {
1834 	struct oce_softc *sc = arg;
1835 	struct oce_rq *rq;
1836 	int i, s;
1837 
1838 	s = splnet();
1839 	OCE_RQ_FOREACH(sc, rq, i) {
1840 		if (!oce_alloc_rx_bufs(rq))
1841 			timeout_add(&sc->sc_rxrefill, 5);
1842 	}
1843 	splx(s);
1844 }
1845 
1846 /* Handle the Completion Queue for the Mailbox/Async notifications */
1847 void
1848 oce_intr_mq(void *arg)
1849 {
1850 	struct oce_mq *mq = (struct oce_mq *)arg;
1851 	struct oce_softc *sc = mq->sc;
1852 	struct oce_cq *cq = mq->cq;
1853 	struct oce_mq_cqe *cqe;
1854 	struct oce_async_cqe_link_state *acqe;
1855 	struct oce_async_event_grp5_pvid_state *gcqe;
1856 	int evtype, optype, ncqe = 0;
1857 
1858 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1859 
1860 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1861 		if (cqe->u0.s.async_event) {
1862 			evtype = cqe->u0.s.event_type;
1863 			optype = cqe->u0.s.async_type;
1864 			if (evtype  == ASYNC_EVENT_CODE_LINK_STATE) {
1865 				/* Link status evt */
1866 				acqe = (struct oce_async_cqe_link_state *)cqe;
1867 				oce_link_event(sc, acqe);
1868 			} else if ((evtype == ASYNC_EVENT_GRP5) &&
1869 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1870 				/* GRP5 PVID */
1871 				gcqe =
1872 				(struct oce_async_event_grp5_pvid_state *)cqe;
1873 				if (gcqe->enabled)
1874 					sc->sc_pvid =
1875 					    gcqe->tag & VLAN_VID_MASK;
1876 				else
1877 					sc->sc_pvid = 0;
1878 			}
1879 		}
1880 		MQ_CQE_INVALIDATE(cqe);
1881 		ncqe++;
1882 	}
1883 
1884 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1885 
1886 	if (ncqe)
1887 		oce_arm_cq(cq, ncqe, FALSE);
1888 }
1889 
1890 void
1891 oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1892 {
1893 	/* Update Link status */
1894 	sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1895 	    ASYNC_EVENT_LINK_UP);
1896 	/* Update speed */
1897 	sc->sc_link_speed = acqe->u0.s.speed;
1898 	oce_link_status(sc);
1899 }
1900 
1901 int
1902 oce_init_queues(struct oce_softc *sc)
1903 {
1904 	struct oce_wq *wq;
1905 	struct oce_rq *rq;
1906 	int i;
1907 
1908 	sc->sc_nrq = 1;
1909 	sc->sc_nwq = 1;
1910 
1911 	/* Create network interface on card */
1912 	if (oce_create_iface(sc, sc->sc_macaddr))
1913 		goto error;
1914 
1915 	/* create all of the event queues */
1916 	for (i = 0; i < sc->sc_nintr; i++) {
1917 		sc->sc_eq[i] = oce_create_eq(sc);
1918 		if (!sc->sc_eq[i])
1919 			goto error;
1920 	}
1921 
1922 	/* alloc tx queues */
1923 	OCE_WQ_FOREACH(sc, wq, i) {
1924 		sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1925 		if (!sc->sc_wq[i])
1926 			goto error;
1927 	}
1928 
1929 	/* alloc rx queues */
1930 	OCE_RQ_FOREACH(sc, rq, i) {
1931 		sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1932 		    i > 0 ? sc->sc_rss_enable : 0);
1933 		if (!sc->sc_rq[i])
1934 			goto error;
1935 	}
1936 
1937 	/* alloc mailbox queue */
1938 	sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1939 	if (!sc->sc_mq)
1940 		goto error;
1941 
1942 	return (0);
1943 error:
1944 	oce_release_queues(sc);
1945 	return (1);
1946 }
1947 
1948 void
1949 oce_release_queues(struct oce_softc *sc)
1950 {
1951 	struct oce_wq *wq;
1952 	struct oce_rq *rq;
1953 	struct oce_eq *eq;
1954 	int i;
1955 
1956 	OCE_RQ_FOREACH(sc, rq, i) {
1957 		if (rq)
1958 			oce_destroy_rq(sc->sc_rq[i]);
1959 	}
1960 
1961 	OCE_WQ_FOREACH(sc, wq, i) {
1962 		if (wq)
1963 			oce_destroy_wq(sc->sc_wq[i]);
1964 	}
1965 
1966 	if (sc->sc_mq)
1967 		oce_destroy_mq(sc->sc_mq);
1968 
1969 	OCE_EQ_FOREACH(sc, eq, i) {
1970 		if (eq)
1971 			oce_destroy_eq(sc->sc_eq[i]);
1972 	}
1973 }
1974 
1975 /**
1976  * @brief 		Function to create a WQ for NIC Tx
1977  * @param sc 		software handle to the device
1978  * @returns		the pointer to the WQ created or NULL on failure
1979  */
1980 struct oce_wq *
1981 oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1982 {
1983 	struct oce_wq *wq;
1984 	struct oce_cq *cq;
1985 	struct oce_pkt *pkt;
1986 	int i;
1987 
1988 	if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1989 		return (NULL);
1990 
1991 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
1992 	if (!wq)
1993 		return (NULL);
1994 
1995 	wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
1996 	if (!wq->ring) {
1997 		free(wq, M_DEVBUF, 0);
1998 		return (NULL);
1999 	}
2000 
2001 	cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
2002 	    1, 0, 3);
2003 	if (!cq) {
2004 		oce_destroy_ring(sc, wq->ring);
2005 		free(wq, M_DEVBUF, 0);
2006 		return (NULL);
2007 	}
2008 
2009 	wq->id = -1;
2010 	wq->sc = sc;
2011 
2012 	wq->cq = cq;
2013 	wq->nitems = sc->sc_tx_ring_size;
2014 
2015 	SIMPLEQ_INIT(&wq->pkt_free);
2016 	SIMPLEQ_INIT(&wq->pkt_list);
2017 
2018 	for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2019 		pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2020 		    PAGE_SIZE);
2021 		if (pkt == NULL) {
2022 			oce_destroy_wq(wq);
2023 			return (NULL);
2024 		}
2025 		oce_pkt_put(&wq->pkt_free, pkt);
2026 	}
2027 
2028 	if (oce_new_wq(sc, wq)) {
2029 		oce_destroy_wq(wq);
2030 		return (NULL);
2031 	}
2032 
2033 	eq->cq[eq->cq_valid] = cq;
2034 	eq->cq_valid++;
2035 	cq->cb_arg = wq;
2036 	cq->cq_intr = oce_intr_wq;
2037 
2038 	return (wq);
2039 }
2040 
2041 void
2042 oce_drain_wq(struct oce_wq *wq)
2043 {
2044 	struct oce_cq *cq = wq->cq;
2045 	struct oce_nic_tx_cqe *cqe;
2046 	int ncqe = 0;
2047 
2048 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2049 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2050 		WQ_CQE_INVALIDATE(cqe);
2051 		ncqe++;
2052 	}
2053 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2054 	oce_arm_cq(cq, ncqe, FALSE);
2055 }
2056 
2057 void
2058 oce_destroy_wq(struct oce_wq *wq)
2059 {
2060 	struct mbx_delete_nic_wq cmd;
2061 	struct oce_softc *sc = wq->sc;
2062 	struct oce_pkt *pkt;
2063 
2064 	if (wq->id >= 0) {
2065 		memset(&cmd, 0, sizeof(cmd));
2066 		cmd.params.req.wq_id = htole16(wq->id);
2067 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2068 		    &cmd, sizeof(cmd));
2069 	}
2070 	if (wq->cq != NULL)
2071 		oce_destroy_cq(wq->cq);
2072 	if (wq->ring != NULL)
2073 		oce_destroy_ring(sc, wq->ring);
2074 	while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2075 		oce_pkt_free(sc, pkt);
2076 	free(wq, M_DEVBUF, 0);
2077 }
2078 
2079 /**
2080  * @brief 		function to allocate receive queue resources
2081  * @param sc		software handle to the device
2082  * @param eq		pointer to associated event queue
2083  * @param rss		is-rss-queue flag
2084  * @returns		the pointer to the RQ created or NULL on failure
2085  */
2086 struct oce_rq *
2087 oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2088 {
2089 	struct oce_rq *rq;
2090 	struct oce_cq *cq;
2091 	struct oce_pkt *pkt;
2092 	int i;
2093 
2094 	/* Hardware doesn't support any other value */
2095 	if (sc->sc_rx_ring_size != 1024)
2096 		return (NULL);
2097 
2098 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2099 	if (!rq)
2100 		return (NULL);
2101 
2102 	rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2103 	    sizeof(struct oce_nic_rqe), 2);
2104 	if (!rq->ring) {
2105 		free(rq, M_DEVBUF, 0);
2106 		return (NULL);
2107 	}
2108 
2109 	cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2110 	    1, 0, 3);
2111 	if (!cq) {
2112 		oce_destroy_ring(sc, rq->ring);
2113 		free(rq, M_DEVBUF, 0);
2114 		return (NULL);
2115 	}
2116 
2117 	rq->id = -1;
2118 	rq->sc = sc;
2119 
2120 	rq->nitems = sc->sc_rx_ring_size;
2121 	rq->fragsize = OCE_RX_BUF_SIZE;
2122 	rq->rss = rss;
2123 
2124 	SIMPLEQ_INIT(&rq->pkt_free);
2125 	SIMPLEQ_INIT(&rq->pkt_list);
2126 
2127 	for (i = 0; i < sc->sc_rx_ring_size; i++) {
2128 		pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2129 		if (pkt == NULL) {
2130 			oce_destroy_rq(rq);
2131 			return (NULL);
2132 		}
2133 		oce_pkt_put(&rq->pkt_free, pkt);
2134 	}
2135 
2136 	rq->cq = cq;
2137 	eq->cq[eq->cq_valid] = cq;
2138 	eq->cq_valid++;
2139 	cq->cb_arg = rq;
2140 	cq->cq_intr = oce_intr_rq;
2141 
2142 	/* RX queue is created in oce_init */
2143 
2144 	return (rq);
2145 }
2146 
2147 void
2148 oce_drain_rq(struct oce_rq *rq)
2149 {
2150 	struct oce_nic_rx_cqe *cqe;
2151 	struct oce_cq *cq = rq->cq;
2152 	int ncqe = 0;
2153 
2154 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2155 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2156 		RQ_CQE_INVALIDATE(cqe);
2157 		ncqe++;
2158 	}
2159 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2160 	oce_arm_cq(cq, ncqe, FALSE);
2161 }
2162 
2163 void
2164 oce_destroy_rq(struct oce_rq *rq)
2165 {
2166 	struct mbx_delete_nic_rq cmd;
2167 	struct oce_softc *sc = rq->sc;
2168 	struct oce_pkt *pkt;
2169 
2170 	if (rq->id >= 0) {
2171 		memset(&cmd, 0, sizeof(cmd));
2172 		cmd.params.req.rq_id = htole16(rq->id);
2173 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2174 		    &cmd, sizeof(cmd));
2175 	}
2176 	if (rq->cq != NULL)
2177 		oce_destroy_cq(rq->cq);
2178 	if (rq->ring != NULL)
2179 		oce_destroy_ring(sc, rq->ring);
2180 	while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2181 		oce_pkt_free(sc, pkt);
2182 	free(rq, M_DEVBUF, 0);
2183 }
2184 
2185 struct oce_eq *
2186 oce_create_eq(struct oce_softc *sc)
2187 {
2188 	struct oce_eq *eq;
2189 
2190 	/* allocate an eq */
2191 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2192 	if (eq == NULL)
2193 		return (NULL);
2194 
2195 	eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2196 	if (!eq->ring) {
2197 		free(eq, M_DEVBUF, 0);
2198 		return (NULL);
2199 	}
2200 
2201 	eq->id = -1;
2202 	eq->sc = sc;
2203 	eq->nitems = EQ_LEN_1024;	/* length of event queue */
2204 	eq->isize = EQE_SIZE_4; 	/* size of a queue item */
2205 	eq->delay = OCE_DEFAULT_EQD;	/* event queue delay */
2206 
2207 	if (oce_new_eq(sc, eq)) {
2208 		oce_destroy_ring(sc, eq->ring);
2209 		free(eq, M_DEVBUF, 0);
2210 		return (NULL);
2211 	}
2212 
2213 	return (eq);
2214 }
2215 
2216 /**
2217  * @brief		Function to arm an EQ so that it can generate events
2218  * @param eq		pointer to event queue structure
2219  * @param neqe		number of EQEs to arm
2220  * @param rearm		rearm bit enable/disable
2221  * @param clearint	bit to clear the interrupt condition because of which
2222  *			EQEs are generated
2223  */
2224 static inline void
2225 oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2226 {
2227 	oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2228 	    (clearint << 9) | (neqe << 16) | (rearm << 29));
2229 }
2230 
2231 void
2232 oce_drain_eq(struct oce_eq *eq)
2233 {
2234 	struct oce_eqe *eqe;
2235 	int neqe = 0;
2236 
2237 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2238 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2239 		eqe->evnt = 0;
2240 		neqe++;
2241 	}
2242 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2243 	oce_arm_eq(eq, neqe, FALSE, TRUE);
2244 }
2245 
2246 void
2247 oce_destroy_eq(struct oce_eq *eq)
2248 {
2249 	struct mbx_destroy_common_eq cmd;
2250 	struct oce_softc *sc = eq->sc;
2251 
2252 	if (eq->id >= 0) {
2253 		memset(&cmd, 0, sizeof(cmd));
2254 		cmd.params.req.id = htole16(eq->id);
2255 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2256 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2257 	}
2258 	if (eq->ring != NULL)
2259 		oce_destroy_ring(sc, eq->ring);
2260 	free(eq, M_DEVBUF, 0);
2261 }
2262 
2263 struct oce_mq *
2264 oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2265 {
2266 	struct oce_mq *mq = NULL;
2267 	struct oce_cq *cq;
2268 
2269 	/* allocate the mq */
2270 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2271 	if (!mq)
2272 		return (NULL);
2273 
2274 	mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2275 	if (!mq->ring) {
2276 		free(mq, M_DEVBUF, 0);
2277 		return (NULL);
2278 	}
2279 
2280 	cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2281 	    1, 0, 0);
2282 	if (!cq) {
2283 		oce_destroy_ring(sc, mq->ring);
2284 		free(mq, M_DEVBUF, 0);
2285 		return (NULL);
2286 	}
2287 
2288 	mq->id = -1;
2289 	mq->sc = sc;
2290 	mq->cq = cq;
2291 
2292 	mq->nitems = 128;
2293 
2294 	if (oce_new_mq(sc, mq)) {
2295 		oce_destroy_cq(mq->cq);
2296 		oce_destroy_ring(sc, mq->ring);
2297 		free(mq, M_DEVBUF, 0);
2298 		return (NULL);
2299 	}
2300 
2301 	eq->cq[eq->cq_valid] = cq;
2302 	eq->cq_valid++;
2303 	mq->cq->eq = eq;
2304 	mq->cq->cb_arg = mq;
2305 	mq->cq->cq_intr = oce_intr_mq;
2306 
2307 	return (mq);
2308 }
2309 
2310 void
2311 oce_drain_mq(struct oce_mq *mq)
2312 {
2313 	struct oce_cq *cq = mq->cq;
2314 	struct oce_mq_cqe *cqe;
2315 	int ncqe = 0;
2316 
2317 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2318 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2319 		MQ_CQE_INVALIDATE(cqe);
2320 		ncqe++;
2321 	}
2322 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2323 	oce_arm_cq(cq, ncqe, FALSE);
2324 }
2325 
2326 void
2327 oce_destroy_mq(struct oce_mq *mq)
2328 {
2329 	struct mbx_destroy_common_mq cmd;
2330 	struct oce_softc *sc = mq->sc;
2331 
2332 	if (mq->id >= 0) {
2333 		memset(&cmd, 0, sizeof(cmd));
2334 		cmd.params.req.id = htole16(mq->id);
2335 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2336 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2337 	}
2338 	if (mq->ring != NULL)
2339 		oce_destroy_ring(sc, mq->ring);
2340 	if (mq->cq != NULL)
2341 		oce_destroy_cq(mq->cq);
2342 	free(mq, M_DEVBUF, 0);
2343 }
2344 
2345 /**
2346  * @brief		Function to create a completion queue
2347  * @param sc		software handle to the device
2348  * @param eq		optional eq to be associated with to the cq
2349  * @param nitems	length of completion queue
2350  * @param isize		size of completion queue items
2351  * @param eventable	event table
2352  * @param nodelay	no delay flag
2353  * @param ncoalesce	no coalescence flag
2354  * @returns 		pointer to the cq created, NULL on failure
2355  */
2356 struct oce_cq *
2357 oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2358     int eventable, int nodelay, int ncoalesce)
2359 {
2360 	struct oce_cq *cq = NULL;
2361 
2362 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2363 	if (!cq)
2364 		return (NULL);
2365 
2366 	cq->ring = oce_create_ring(sc, nitems, isize, 4);
2367 	if (!cq->ring) {
2368 		free(cq, M_DEVBUF, 0);
2369 		return (NULL);
2370 	}
2371 
2372 	cq->sc = sc;
2373 	cq->eq = eq;
2374 	cq->nitems = nitems;
2375 	cq->nodelay = nodelay;
2376 	cq->ncoalesce = ncoalesce;
2377 	cq->eventable = eventable;
2378 
2379 	if (oce_new_cq(sc, cq)) {
2380 		oce_destroy_ring(sc, cq->ring);
2381 		free(cq, M_DEVBUF, 0);
2382 		return (NULL);
2383 	}
2384 
2385 	sc->sc_cq[sc->sc_ncq++] = cq;
2386 
2387 	return (cq);
2388 }
2389 
2390 void
2391 oce_destroy_cq(struct oce_cq *cq)
2392 {
2393 	struct mbx_destroy_common_cq cmd;
2394 	struct oce_softc *sc = cq->sc;
2395 
2396 	if (cq->id >= 0) {
2397 		memset(&cmd, 0, sizeof(cmd));
2398 		cmd.params.req.id = htole16(cq->id);
2399 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2400 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2401 	}
2402 	if (cq->ring != NULL)
2403 		oce_destroy_ring(sc, cq->ring);
2404 	free(cq, M_DEVBUF, 0);
2405 }
2406 
2407 /**
2408  * @brief		Function to arm a CQ with CQEs
2409  * @param cq		pointer to the completion queue structure
2410  * @param ncqe		number of CQEs to arm
2411  * @param rearm		rearm bit enable/disable
2412  */
2413 static inline void
2414 oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2415 {
2416 	oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2417 }
2418 
2419 void
2420 oce_free_posted_rxbuf(struct oce_rq *rq)
2421 {
2422 	struct oce_softc *sc = rq->sc;
2423 	struct oce_pkt *pkt;
2424 
2425 	while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2426 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2427 		    BUS_DMASYNC_POSTREAD);
2428 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2429 		if (pkt->mbuf != NULL) {
2430 			m_freem(pkt->mbuf);
2431 			pkt->mbuf = NULL;
2432 		}
2433 		oce_pkt_put(&rq->pkt_free, pkt);
2434 		if_rxr_put(&rq->rxring, 1);
2435 	}
2436 }
2437 
2438 int
2439 oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2440 {
2441 	int rc;
2442 
2443 	memset(dma, 0, sizeof(struct oce_dma_mem));
2444 
2445 	dma->tag = sc->sc_dmat;
2446 	rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2447 	    &dma->map);
2448 	if (rc != 0) {
2449 		printf("%s: failed to allocate DMA handle",
2450 		    sc->sc_dev.dv_xname);
2451 		goto fail_0;
2452 	}
2453 
2454 	rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2455 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2456 	if (rc != 0) {
2457 		printf("%s: failed to allocate DMA memory",
2458 		    sc->sc_dev.dv_xname);
2459 		goto fail_1;
2460 	}
2461 
2462 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2463 	    &dma->vaddr, BUS_DMA_NOWAIT);
2464 	if (rc != 0) {
2465 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2466 		goto fail_2;
2467 	}
2468 
2469 	rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2470 	    BUS_DMA_NOWAIT);
2471 	if (rc != 0) {
2472 		printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2473 		goto fail_3;
2474 	}
2475 
2476 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2477 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2478 
2479 	dma->paddr = dma->map->dm_segs[0].ds_addr;
2480 	dma->size = size;
2481 
2482 	return (0);
2483 
2484 fail_3:
2485 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2486 fail_2:
2487 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2488 fail_1:
2489 	bus_dmamap_destroy(dma->tag, dma->map);
2490 fail_0:
2491 	return (rc);
2492 }
2493 
2494 void
2495 oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2496 {
2497 	if (dma->tag == NULL)
2498 		return;
2499 
2500 	if (dma->map != NULL) {
2501 		oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2502 		bus_dmamap_unload(dma->tag, dma->map);
2503 
2504 		if (dma->vaddr != 0) {
2505 			bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2506 			dma->vaddr = 0;
2507 		}
2508 
2509 		bus_dmamap_destroy(dma->tag, dma->map);
2510 		dma->map = NULL;
2511 		dma->tag = NULL;
2512 	}
2513 }
2514 
2515 struct oce_ring *
2516 oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2517 {
2518 	struct oce_dma_mem *dma;
2519 	struct oce_ring *ring;
2520 	bus_size_t size = nitems * isize;
2521 	int rc;
2522 
2523 	if (size > maxsegs * PAGE_SIZE)
2524 		return (NULL);
2525 
2526 	ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2527 	if (ring == NULL)
2528 		return (NULL);
2529 
2530 	ring->isize = isize;
2531 	ring->nitems = nitems;
2532 
2533 	dma = &ring->dma;
2534 	dma->tag = sc->sc_dmat;
2535 	rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2536 	    BUS_DMA_NOWAIT, &dma->map);
2537 	if (rc != 0) {
2538 		printf("%s: failed to allocate DMA handle",
2539 		    sc->sc_dev.dv_xname);
2540 		goto fail_0;
2541 	}
2542 
2543 	rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2544 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2545 	if (rc != 0) {
2546 		printf("%s: failed to allocate DMA memory",
2547 		    sc->sc_dev.dv_xname);
2548 		goto fail_1;
2549 	}
2550 
2551 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2552 	    &dma->vaddr, BUS_DMA_NOWAIT);
2553 	if (rc != 0) {
2554 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2555 		goto fail_2;
2556 	}
2557 
2558 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2559 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2560 
2561 	dma->paddr = 0;
2562 	dma->size = size;
2563 
2564 	return (ring);
2565 
2566 fail_2:
2567 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2568 fail_1:
2569 	bus_dmamap_destroy(dma->tag, dma->map);
2570 fail_0:
2571 	free(ring, M_DEVBUF, 0);
2572 	return (NULL);
2573 }
2574 
2575 void
2576 oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2577 {
2578 	oce_dma_free(sc, &ring->dma);
2579 	free(ring, M_DEVBUF, 0);
2580 }
2581 
2582 int
2583 oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2584     struct oce_pa *pa, int maxsegs)
2585 {
2586 	struct oce_dma_mem *dma = &ring->dma;
2587 	int i;
2588 
2589 	if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2590 	    ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2591 		printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2592 		return (0);
2593 	}
2594 
2595 	if (dma->map->dm_nsegs > maxsegs) {
2596 		printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2597 		return (0);
2598 	}
2599 
2600 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2601 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2602 
2603 	for (i = 0; i < dma->map->dm_nsegs; i++)
2604 		pa[i].addr = dma->map->dm_segs[i].ds_addr;
2605 
2606 	return (dma->map->dm_nsegs);
2607 }
2608 
2609 static inline void *
2610 oce_ring_get(struct oce_ring *ring)
2611 {
2612 	int index = ring->index;
2613 
2614 	if (++ring->index == ring->nitems)
2615 		ring->index = 0;
2616 	return ((void *)(ring->dma.vaddr + index * ring->isize));
2617 }
2618 
2619 static inline void *
2620 oce_ring_first(struct oce_ring *ring)
2621 {
2622 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2623 }
2624 
2625 static inline void *
2626 oce_ring_next(struct oce_ring *ring)
2627 {
2628 	if (++ring->index == ring->nitems)
2629 		ring->index = 0;
2630 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2631 }
2632 
2633 struct oce_pkt *
2634 oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2635 {
2636 	struct oce_pkt *pkt;
2637 
2638 	if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2639 		return (NULL);
2640 
2641 	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2642 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2643 		pool_put(oce_pkt_pool, pkt);
2644 		return (NULL);
2645 	}
2646 
2647 	return (pkt);
2648 }
2649 
2650 void
2651 oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2652 {
2653 	if (pkt->map) {
2654 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2655 		bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2656 	}
2657 	pool_put(oce_pkt_pool, pkt);
2658 }
2659 
2660 static inline struct oce_pkt *
2661 oce_pkt_get(struct oce_pkt_list *lst)
2662 {
2663 	struct oce_pkt *pkt;
2664 
2665 	pkt = SIMPLEQ_FIRST(lst);
2666 	if (pkt == NULL)
2667 		return (NULL);
2668 
2669 	SIMPLEQ_REMOVE_HEAD(lst, entry);
2670 
2671 	return (pkt);
2672 }
2673 
2674 static inline void
2675 oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2676 {
2677 	SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2678 }
2679 
2680 /**
2681  * @brief Wait for FW to become ready and reset it
2682  * @param sc		software handle to the device
2683  */
2684 int
2685 oce_init_fw(struct oce_softc *sc)
2686 {
2687 	struct ioctl_common_function_reset cmd;
2688 	uint32_t reg;
2689 	int err = 0, tmo = 60000;
2690 
2691 	/* read semaphore CSR */
2692 	reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2693 
2694 	/* if host is ready then wait for fw ready else send POST */
2695 	if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2696 		reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2697 		oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2698 	}
2699 
2700 	/* wait for FW to become ready */
2701 	for (;;) {
2702 		if (--tmo == 0)
2703 			break;
2704 
2705 		DELAY(1000);
2706 
2707 		reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2708 		if (reg & MPU_EP_SEM_ERROR) {
2709 			printf(": POST failed: %#x\n", reg);
2710 			return (ENXIO);
2711 		}
2712 		if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2713 			/* reset FW */
2714 			if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2715 				memset(&cmd, 0, sizeof(cmd));
2716 				err = oce_cmd(sc, SUBSYS_COMMON,
2717 				    OPCODE_COMMON_FUNCTION_RESET,
2718 				    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2719 			}
2720 			return (err);
2721 		}
2722 	}
2723 
2724 	printf(": POST timed out: %#x\n", reg);
2725 
2726 	return (ENXIO);
2727 }
2728 
2729 static inline int
2730 oce_mbox_wait(struct oce_softc *sc)
2731 {
2732 	int i;
2733 
2734 	for (i = 0; i < 20000; i++) {
2735 		if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2736 			return (0);
2737 		DELAY(100);
2738 	}
2739 	return (ETIMEDOUT);
2740 }
2741 
2742 /**
2743  * @brief Mailbox dispatch
2744  * @param sc		software handle to the device
2745  */
2746 int
2747 oce_mbox_dispatch(struct oce_softc *sc)
2748 {
2749 	uint32_t pa, reg;
2750 	int err;
2751 
2752 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2753 	reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2754 
2755 	if ((err = oce_mbox_wait(sc)) != 0)
2756 		goto out;
2757 
2758 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2759 
2760 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2761 	reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2762 
2763 	if ((err = oce_mbox_wait(sc)) != 0)
2764 		goto out;
2765 
2766 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2767 
2768 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2769 
2770 	if ((err = oce_mbox_wait(sc)) != 0)
2771 		goto out;
2772 
2773 out:
2774 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2775 	return (err);
2776 }
2777 
2778 /**
2779  * @brief Function to initialize the hw with host endian information
2780  * @param sc		software handle to the device
2781  * @returns		0 on success, ETIMEDOUT on failure
2782  */
2783 int
2784 oce_mbox_init(struct oce_softc *sc)
2785 {
2786 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2787 	uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2788 
2789 	if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2790 		return (0);
2791 
2792 	/* Endian Signature */
2793 	*ptr++ = 0xff;
2794 	*ptr++ = 0x12;
2795 	*ptr++ = 0x34;
2796 	*ptr++ = 0xff;
2797 	*ptr++ = 0xff;
2798 	*ptr++ = 0x56;
2799 	*ptr++ = 0x78;
2800 	*ptr = 0xff;
2801 
2802 	return (oce_mbox_dispatch(sc));
2803 }
2804 
2805 int
2806 oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2807     void *payload, int length)
2808 {
2809 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2810 	struct oce_mbx *mbx = &bmbx->mbx;
2811 	struct mbx_hdr *hdr;
2812 	caddr_t epayload = NULL;
2813 	int err;
2814 
2815 	if (length > OCE_MBX_PAYLOAD)
2816 		epayload = OCE_MEM_KVA(&sc->sc_pld);
2817 	if (length > OCE_MAX_PAYLOAD)
2818 		return (EINVAL);
2819 
2820 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2821 
2822 	memset(mbx, 0, sizeof(struct oce_mbx));
2823 
2824 	mbx->payload_length = length;
2825 
2826 	if (epayload) {
2827 		mbx->flags = OCE_MBX_F_SGE;
2828 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2829 		memcpy(epayload, payload, length);
2830 		mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2831 		mbx->pld.sgl[0].length = length;
2832 		hdr = (struct mbx_hdr *)epayload;
2833 	} else {
2834 		mbx->flags = OCE_MBX_F_EMBED;
2835 		memcpy(mbx->pld.data, payload, length);
2836 		hdr = (struct mbx_hdr *)&mbx->pld.data;
2837 	}
2838 
2839 	hdr->subsys = subsys;
2840 	hdr->opcode = opcode;
2841 	hdr->version = version;
2842 	hdr->length = length - sizeof(*hdr);
2843 	if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2844 		hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2845 	else
2846 		hdr->timeout = OCE_MBX_TIMEOUT;
2847 
2848 	if (epayload)
2849 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2850 
2851 	err = oce_mbox_dispatch(sc);
2852 	if (err == 0) {
2853 		if (epayload) {
2854 			oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2855 			memcpy(payload, epayload, length);
2856 		} else
2857 			memcpy(payload, &mbx->pld.data, length);
2858 	} else
2859 		printf("%s: mailbox timeout, subsys %d op %d ver %d "
2860 		    "%spayload lenght %d\n", sc->sc_dev.dv_xname, subsys,
2861 		    opcode, version, epayload ? "ext " : "",
2862 		    length);
2863 	return (err);
2864 }
2865 
2866 /**
2867  * @brief	Firmware will send gracious notifications during
2868  *		attach only after sending first mcc commnad. We
2869  *		use MCC queue only for getting async and mailbox
2870  *		for sending cmds. So to get gracious notifications
2871  *		atleast send one dummy command on mcc.
2872  */
2873 void
2874 oce_first_mcc(struct oce_softc *sc)
2875 {
2876 	struct oce_mbx *mbx;
2877 	struct oce_mq *mq = sc->sc_mq;
2878 	struct mbx_hdr *hdr;
2879 	struct mbx_get_common_fw_version *cmd;
2880 
2881 	mbx = oce_ring_get(mq->ring);
2882 	memset(mbx, 0, sizeof(struct oce_mbx));
2883 
2884 	cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2885 
2886 	hdr = &cmd->hdr;
2887 	hdr->subsys = SUBSYS_COMMON;
2888 	hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2889 	hdr->version = OCE_MBX_VER_V0;
2890 	hdr->timeout = OCE_MBX_TIMEOUT;
2891 	hdr->length = sizeof(*cmd) - sizeof(*hdr);
2892 
2893 	mbx->flags = OCE_MBX_F_EMBED;
2894 	mbx->payload_length = sizeof(*cmd);
2895 	oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2896 	    BUS_DMASYNC_PREWRITE);
2897 	oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2898 }
2899 
2900 int
2901 oce_get_fw_config(struct oce_softc *sc)
2902 {
2903 	struct mbx_common_query_fw_config cmd;
2904 	int err;
2905 
2906 	memset(&cmd, 0, sizeof(cmd));
2907 
2908 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2909 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2910 	if (err)
2911 		return (err);
2912 
2913 	sc->sc_port = cmd.params.rsp.port_id;
2914 	sc->sc_fmode = cmd.params.rsp.function_mode;
2915 
2916 	return (0);
2917 }
2918 
2919 int
2920 oce_check_native_mode(struct oce_softc *sc)
2921 {
2922 	struct mbx_common_set_function_cap cmd;
2923 	int err;
2924 
2925 	memset(&cmd, 0, sizeof(cmd));
2926 
2927 	cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2928 	    CAP_BE3_NATIVE_ERX_API;
2929 	cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2930 
2931 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2932 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2933 	if (err)
2934 		return (err);
2935 
2936 	if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2937 		SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2938 
2939 	return (0);
2940 }
2941 
2942 /**
2943  * @brief Function for creating a network interface.
2944  * @param sc		software handle to the device
2945  * @returns		0 on success, error otherwise
2946  */
2947 int
2948 oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2949 {
2950 	struct mbx_create_common_iface cmd;
2951 	uint32_t caps, caps_en;
2952 	int err = 0;
2953 
2954 	/* interface capabilities to give device when creating interface */
2955 	caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2956 	    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2957 	    MBX_RX_IFACE_RSS;
2958 
2959 	/* capabilities to enable by default (others set dynamically) */
2960 	caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2961 
2962 	if (!IS_XE201(sc)) {
2963 		/* LANCER A0 workaround */
2964 		caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2965 		caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2966 	}
2967 
2968 	/* enable capabilities controlled via driver startup parameters */
2969 	if (sc->sc_rss_enable)
2970 		caps_en |= MBX_RX_IFACE_RSS;
2971 
2972 	memset(&cmd, 0, sizeof(cmd));
2973 
2974 	cmd.params.req.version = 0;
2975 	cmd.params.req.cap_flags = htole32(caps);
2976 	cmd.params.req.enable_flags = htole32(caps_en);
2977 	if (macaddr != NULL) {
2978 		memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2979 		cmd.params.req.mac_invalid = 0;
2980 	} else
2981 		cmd.params.req.mac_invalid = 1;
2982 
2983 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2984 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2985 	if (err)
2986 		return (err);
2987 
2988 	sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
2989 
2990 	if (macaddr != NULL)
2991 		sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
2992 
2993 	return (0);
2994 }
2995 
2996 /**
2997  * @brief Function to send the mbx command to configure vlan
2998  * @param sc 		software handle to the device
2999  * @param vtags		array of vlan tags
3000  * @param nvtags	number of elements in array
3001  * @param untagged	boolean TRUE/FLASE
3002  * @param promisc	flag to enable/disable VLAN promiscuous mode
3003  * @returns		0 on success, EIO on failure
3004  */
3005 int
3006 oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3007     int untagged, int promisc)
3008 {
3009 	struct mbx_common_config_vlan cmd;
3010 
3011 	memset(&cmd, 0, sizeof(cmd));
3012 
3013 	cmd.params.req.if_id = sc->sc_if_id;
3014 	cmd.params.req.promisc = promisc;
3015 	cmd.params.req.untagged = untagged;
3016 	cmd.params.req.num_vlans = nvtags;
3017 
3018 	if (!promisc)
3019 		memcpy(cmd.params.req.tags.normal_vlans, vtags,
3020 			nvtags * sizeof(struct normal_vlan));
3021 
3022 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3023 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3024 }
3025 
3026 /**
3027  * @brief Function to set flow control capability in the hardware
3028  * @param sc 		software handle to the device
3029  * @param flags		flow control flags to set
3030  * @returns		0 on success, EIO on failure
3031  */
3032 int
3033 oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3034 {
3035 	struct mbx_common_get_set_flow_control cmd;
3036 	int err;
3037 
3038 	memset(&cmd, 0, sizeof(cmd));
3039 
3040 	cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3041 	cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3042 
3043 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3044 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3045 	if (err)
3046 		return (err);
3047 
3048 	memset(&cmd, 0, sizeof(cmd));
3049 
3050 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3051 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3052 	if (err)
3053 		return (err);
3054 
3055 	sc->sc_fc  = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3056 	sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3057 
3058 	return (0);
3059 }
3060 
3061 #ifdef OCE_RSS
3062 /**
3063  * @brief Function to set flow control capability in the hardware
3064  * @param sc 		software handle to the device
3065  * @param enable	0=disable, OCE_RSS_xxx flags otherwise
3066  * @returns		0 on success, EIO on failure
3067  */
3068 int
3069 oce_config_rss(struct oce_softc *sc, int enable)
3070 {
3071 	struct mbx_config_nic_rss cmd;
3072 	uint8_t *tbl = &cmd.params.req.cputable;
3073 	int i, j;
3074 
3075 	memset(&cmd, 0, sizeof(cmd));
3076 
3077 	if (enable)
3078 		cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3079 		    RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3080 	cmd.params.req.flush = OCE_FLUSH;
3081 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3082 
3083 	arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3084 
3085 	/*
3086 	 * Initialize the RSS CPU indirection table.
3087 	 *
3088 	 * The table is used to choose the queue to place incoming packets.
3089 	 * Incoming packets are hashed.  The lowest bits in the hash result
3090 	 * are used as the index into the CPU indirection table.
3091 	 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3092 	 * create.  Based on the CPU ID, the receive completion is routed to
3093 	 * the corresponding RSS CQs.  (Non-RSS packets are always completed
3094 	 * on the default (0) CQ).
3095 	 */
3096 	for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3097 		if (sc->sc_rq[j]->cfg.is_rss_queue)
3098 			tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3099 	}
3100 	if (i > 0)
3101 		cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3102 	else
3103 		return (ENXIO);
3104 
3105 	return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3106 	    &cmd, sizeof(cmd)));
3107 }
3108 #endif	/* OCE_RSS */
3109 
3110 /**
3111  * @brief Function for hardware update multicast filter
3112  * @param sc		software handle to the device
3113  * @param multi		table of multicast addresses
3114  * @param naddr		number of multicast addresses in the table
3115  */
3116 int
3117 oce_update_mcast(struct oce_softc *sc,
3118     uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3119 {
3120 	struct mbx_set_common_iface_multicast cmd;
3121 
3122 	memset(&cmd, 0, sizeof(cmd));
3123 
3124 	memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3125 	cmd.params.req.num_mac = htole16(naddr);
3126 	cmd.params.req.if_id = sc->sc_if_id;
3127 
3128 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3129 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3130 }
3131 
3132 /**
3133  * @brief RXF function to enable/disable device promiscuous mode
3134  * @param sc		software handle to the device
3135  * @param enable	enable/disable flag
3136  * @returns		0 on success, EIO on failure
3137  * @note
3138  *	The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3139  *	This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3140  */
3141 int
3142 oce_set_promisc(struct oce_softc *sc, int enable)
3143 {
3144 	struct mbx_set_common_iface_rx_filter cmd;
3145 	struct iface_rx_filter_ctx *req;
3146 
3147 	memset(&cmd, 0, sizeof(cmd));
3148 
3149 	req = &cmd.params.req;
3150 	req->if_id = sc->sc_if_id;
3151 
3152 	if (enable)
3153 		req->iface_flags = req->iface_flags_mask =
3154 		    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3155 
3156 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3157 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3158 }
3159 
3160 /**
3161  * @brief Function to query the link status from the hardware
3162  * @param sc 		software handle to the device
3163  * @param[out] link	pointer to the structure returning link attributes
3164  * @returns		0 on success, EIO on failure
3165  */
3166 int
3167 oce_get_link_status(struct oce_softc *sc)
3168 {
3169 	struct mbx_query_common_link_config cmd;
3170 	int err;
3171 
3172 	memset(&cmd, 0, sizeof(cmd));
3173 
3174 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3175 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3176 	if (err)
3177 		return (err);
3178 
3179 	sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3180 	    NTWK_LOGICAL_LINK_UP);
3181 
3182 	if (cmd.params.rsp.mac_speed < 5)
3183 		sc->sc_link_speed = cmd.params.rsp.mac_speed;
3184 	else
3185 		sc->sc_link_speed = 0;
3186 
3187 	return (0);
3188 }
3189 
3190 void
3191 oce_macaddr_set(struct oce_softc *sc)
3192 {
3193 	uint32_t old_pmac_id = sc->sc_pmac_id;
3194 	int status = 0;
3195 
3196 	if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3197 		return;
3198 
3199 	status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3200 	if (!status)
3201 		status = oce_macaddr_del(sc, old_pmac_id);
3202 	else
3203 		printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3204 }
3205 
3206 int
3207 oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3208 {
3209 	struct mbx_query_common_iface_mac cmd;
3210 	int err;
3211 
3212 	memset(&cmd, 0, sizeof(cmd));
3213 
3214 	cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3215 	cmd.params.req.permanent = 1;
3216 
3217 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3218 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3219 	if (err == 0)
3220 		memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3221 		    ETHER_ADDR_LEN);
3222 	return (err);
3223 }
3224 
3225 int
3226 oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3227 {
3228 	struct mbx_add_common_iface_mac cmd;
3229 	int err;
3230 
3231 	memset(&cmd, 0, sizeof(cmd));
3232 
3233 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3234 	memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3235 
3236 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3237 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3238 	if (err == 0)
3239 		*pmac = letoh32(cmd.params.rsp.pmac_id);
3240 	return (err);
3241 }
3242 
3243 int
3244 oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3245 {
3246 	struct mbx_del_common_iface_mac cmd;
3247 
3248 	memset(&cmd, 0, sizeof(cmd));
3249 
3250 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3251 	cmd.params.req.pmac_id = htole32(pmac);
3252 
3253 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3254 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3255 }
3256 
3257 int
3258 oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3259 {
3260 	struct mbx_create_nic_rq cmd;
3261 	int err, npages;
3262 
3263 	memset(&cmd, 0, sizeof(cmd));
3264 
3265 	npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3266 	    nitems(cmd.params.req.pages));
3267 	if (!npages) {
3268 		printf("%s: failed to load the rq ring\n", __func__);
3269 		return (1);
3270 	}
3271 
3272 	if (IS_XE201(sc)) {
3273 		cmd.params.req.frag_size = rq->fragsize / 2048;
3274 		cmd.params.req.page_size = 1;
3275 	} else
3276 		cmd.params.req.frag_size = ilog2(rq->fragsize);
3277 	cmd.params.req.num_pages = npages;
3278 	cmd.params.req.cq_id = rq->cq->id;
3279 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3280 	cmd.params.req.max_frame_size = htole16(rq->mtu);
3281 	cmd.params.req.is_rss_queue = htole32(rq->rss);
3282 
3283 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3284 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3285 	    sizeof(cmd));
3286 	if (err)
3287 		return (err);
3288 
3289 	rq->id = letoh16(cmd.params.rsp.rq_id);
3290 	rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3291 
3292 	return (0);
3293 }
3294 
3295 int
3296 oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3297 {
3298 	struct mbx_create_nic_wq cmd;
3299 	int err, npages;
3300 
3301 	memset(&cmd, 0, sizeof(cmd));
3302 
3303 	npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3304 	    nitems(cmd.params.req.pages));
3305 	if (!npages) {
3306 		printf("%s: failed to load the wq ring\n", __func__);
3307 		return (1);
3308 	}
3309 
3310 	if (IS_XE201(sc))
3311 		cmd.params.req.if_id = sc->sc_if_id;
3312 	cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3313 	cmd.params.req.num_pages = npages;
3314 	cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3315 	cmd.params.req.cq_id = htole16(wq->cq->id);
3316 	cmd.params.req.ulp_num = 1;
3317 
3318 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3319 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3320 	    sizeof(cmd));
3321 	if (err)
3322 		return (err);
3323 
3324 	wq->id = letoh16(cmd.params.rsp.wq_id);
3325 
3326 	return (0);
3327 }
3328 
3329 int
3330 oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3331 {
3332 	struct mbx_create_common_mq_ex cmd;
3333 	union oce_mq_ext_ctx *ctx;
3334 	int err, npages;
3335 
3336 	memset(&cmd, 0, sizeof(cmd));
3337 
3338 	npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3339 	    nitems(cmd.params.req.pages));
3340 	if (!npages) {
3341 		printf("%s: failed to load the mq ring\n", __func__);
3342 		return (-1);
3343 	}
3344 
3345 	ctx = &cmd.params.req.context;
3346 	ctx->v0.num_pages = npages;
3347 	ctx->v0.cq_id = mq->cq->id;
3348 	ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3349 	ctx->v0.valid = 1;
3350 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3351 	ctx->v0.async_evt_bitmap = 0xffffffff;
3352 
3353 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3354 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3355 	if (err)
3356 		return (err);
3357 
3358 	mq->id = letoh16(cmd.params.rsp.mq_id);
3359 
3360 	return (0);
3361 }
3362 
3363 int
3364 oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3365 {
3366 	struct mbx_create_common_eq cmd;
3367 	int err, npages;
3368 
3369 	memset(&cmd, 0, sizeof(cmd));
3370 
3371 	npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3372 	    nitems(cmd.params.req.pages));
3373 	if (!npages) {
3374 		printf("%s: failed to load the eq ring\n", __func__);
3375 		return (-1);
3376 	}
3377 
3378 	cmd.params.req.ctx.num_pages = htole16(npages);
3379 	cmd.params.req.ctx.valid = 1;
3380 	cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3381 	cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3382 	cmd.params.req.ctx.armed = 0;
3383 	cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3384 
3385 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3386 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3387 	if (err)
3388 		return (err);
3389 
3390 	eq->id = letoh16(cmd.params.rsp.eq_id);
3391 
3392 	return (0);
3393 }
3394 
3395 int
3396 oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3397 {
3398 	struct mbx_create_common_cq cmd;
3399 	union oce_cq_ctx *ctx;
3400 	int err, npages;
3401 
3402 	memset(&cmd, 0, sizeof(cmd));
3403 
3404 	npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3405 	    nitems(cmd.params.req.pages));
3406 	if (!npages) {
3407 		printf("%s: failed to load the cq ring\n", __func__);
3408 		return (-1);
3409 	}
3410 
3411 	ctx = &cmd.params.req.cq_ctx;
3412 
3413 	if (IS_XE201(sc)) {
3414 		ctx->v2.num_pages = htole16(npages);
3415 		ctx->v2.page_size = 1; /* for 4K */
3416 		ctx->v2.eventable = cq->eventable;
3417 		ctx->v2.valid = 1;
3418 		ctx->v2.count = ilog2(cq->nitems / 256);
3419 		ctx->v2.nodelay = cq->nodelay;
3420 		ctx->v2.coalesce_wm = cq->ncoalesce;
3421 		ctx->v2.armed = 0;
3422 		ctx->v2.eq_id = cq->eq->id;
3423 		if (ctx->v2.count == 3) {
3424 			if (cq->nitems > (4*1024)-1)
3425 				ctx->v2.cqe_count = (4*1024)-1;
3426 			else
3427 				ctx->v2.cqe_count = cq->nitems;
3428 		}
3429 	} else {
3430 		ctx->v0.num_pages = htole16(npages);
3431 		ctx->v0.eventable = cq->eventable;
3432 		ctx->v0.valid = 1;
3433 		ctx->v0.count = ilog2(cq->nitems / 256);
3434 		ctx->v0.nodelay = cq->nodelay;
3435 		ctx->v0.coalesce_wm = cq->ncoalesce;
3436 		ctx->v0.armed = 0;
3437 		ctx->v0.eq_id = cq->eq->id;
3438 	}
3439 
3440 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3441 	    IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3442 	    sizeof(cmd));
3443 	if (err)
3444 		return (err);
3445 
3446 	cq->id = letoh16(cmd.params.rsp.cq_id);
3447 
3448 	return (0);
3449 }
3450 
3451 int
3452 oce_init_stats(struct oce_softc *sc)
3453 {
3454 	union {
3455 		struct mbx_get_nic_stats_v0	_be2;
3456 		struct mbx_get_nic_stats	_be3;
3457 		struct mbx_get_pport_stats	_xe201;
3458 	} cmd;
3459 
3460 	sc->sc_statcmd = malloc(sizeof(cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
3461 	if (sc->sc_statcmd == NULL) {
3462 		printf("%s: failed to allocate statistics command block\n",
3463 		    sc->sc_dev.dv_xname);
3464 		return (-1);
3465 	}
3466 	return (0);
3467 }
3468 
3469 int
3470 oce_update_stats(struct oce_softc *sc)
3471 {
3472 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3473 	uint64_t rxe, txe;
3474 	int err;
3475 
3476 	if (ISSET(sc->sc_flags, OCE_F_BE2))
3477 		err = oce_stats_be2(sc, &rxe, &txe);
3478 	else if (ISSET(sc->sc_flags, OCE_F_BE3))
3479 		err = oce_stats_be3(sc, &rxe, &txe);
3480 	else
3481 		err = oce_stats_xe(sc, &rxe, &txe);
3482 	if (err)
3483 		return (err);
3484 
3485 	ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3486 	    rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3487 	sc->sc_rx_errors = rxe;
3488 	ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3489 	    txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3490 	sc->sc_tx_errors = txe;
3491 
3492 	return (0);
3493 }
3494 
3495 int
3496 oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3497 {
3498 	struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3499 	struct oce_pmem_stats *ms;
3500 	struct oce_rxf_stats_v0 *rs;
3501 	struct oce_port_rxf_stats_v0 *ps;
3502 	int err;
3503 
3504 	memset(cmd, 0, sizeof(*cmd));
3505 
3506 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3507 	    cmd, sizeof(*cmd));
3508 	if (err)
3509 		return (err);
3510 
3511 	ms = &cmd->params.rsp.stats.pmem;
3512 	rs = &cmd->params.rsp.stats.rxf;
3513 	ps = &rs->port[sc->sc_port];
3514 
3515 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3516 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3517 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3518 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3519 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3520 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3521 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3522 	    ps->rx_alignment_symbol_errors;
3523 	if (sc->sc_if_id)
3524 		*rxe += rs->port1_jabber_events;
3525 	else
3526 		*rxe += rs->port0_jabber_events;
3527 	*rxe += ms->eth_red_drops;
3528 
3529 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3530 
3531 	return (0);
3532 }
3533 
3534 int
3535 oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3536 {
3537 	struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3538 	struct oce_pmem_stats *ms;
3539 	struct oce_rxf_stats_v1 *rs;
3540 	struct oce_port_rxf_stats_v1 *ps;
3541 	int err;
3542 
3543 	memset(cmd, 0, sizeof(*cmd));
3544 
3545 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3546 	    cmd, sizeof(*cmd));
3547 	if (err)
3548 		return (err);
3549 
3550 	ms = &cmd->params.rsp.stats.pmem;
3551 	rs = &cmd->params.rsp.stats.rxf;
3552 	ps = &rs->port[sc->sc_port];
3553 
3554 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3555 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3556 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3557 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3558 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3559 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3560 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3561 	    ps->rx_alignment_symbol_errors + ps->jabber_events;
3562 	*rxe += ms->eth_red_drops;
3563 
3564 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3565 
3566 	return (0);
3567 }
3568 
3569 int
3570 oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3571 {
3572 	struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3573 	struct oce_pport_stats *pps;
3574 	int err;
3575 
3576 	memset(cmd, 0, sizeof(*cmd));
3577 
3578 	cmd->params.req.reset_stats = 0;
3579 	cmd->params.req.port_number = sc->sc_if_id;
3580 
3581 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3582 	    OCE_MBX_VER_V0, cmd, sizeof(*cmd));
3583 	if (err)
3584 		return (err);
3585 
3586 	pps = &cmd->params.rsp.pps;
3587 
3588 	*rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3589 	    pps->rx_alignment_errors + pps->rx_symbol_errors +
3590 	    pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3591 	    pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3592 	    pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3593 	    pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3594 	    pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3595 	    pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3596 	    pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3597 
3598 	*txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3599 
3600 	return (0);
3601 }
3602