xref: /openbsd/sys/dev/pci/if_oce.c (revision 0f9891f1)
1 /*	$OpenBSD: if_oce.c,v 1.109 2024/05/24 06:02:56 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*-
20  * Copyright (C) 2012 Emulex
21  * All rights reserved.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions are met:
25  *
26  * 1. Redistributions of source code must retain the above copyright notice,
27  *    this list of conditions and the following disclaimer.
28  *
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * 3. Neither the name of the Emulex Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived from
35  *    this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47  * POSSIBILITY OF SUCH DAMAGE.
48  *
49  * Contact Information:
50  * freebsd-drivers@emulex.com
51  *
52  * Emulex
53  * 3333 Susan Street
54  * Costa Mesa, CA 92626
55  */
56 
57 #include "bpfilter.h"
58 #include "vlan.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/device.h>
66 #include <sys/socket.h>
67 #include <sys/queue.h>
68 #include <sys/timeout.h>
69 #include <sys/pool.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80 
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcidevs.h>
84 
85 #include <dev/pci/if_ocereg.h>
86 
87 #ifndef TRUE
88 #define TRUE			1
89 #endif
90 #ifndef FALSE
91 #define FALSE			0
92 #endif
93 
94 #define OCE_MBX_TIMEOUT		5
95 
96 #define OCE_MAX_PAYLOAD		65536
97 
98 #define OCE_TX_RING_SIZE	512
99 #define OCE_RX_RING_SIZE	1024
100 
101 /* This should be powers of 2. Like 2,4,8 & 16 */
102 #define OCE_MAX_RSS		4 /* TODO: 8 */
103 #define OCE_MAX_RQ		OCE_MAX_RSS + 1 /* one default queue */
104 #define OCE_MAX_WQ		8
105 
106 #define OCE_MAX_EQ		32
107 #define OCE_MAX_CQ		OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
108 #define OCE_MAX_CQ_EQ		8 /* Max CQ that can attached to an EQ */
109 
110 #define OCE_DEFAULT_EQD		80
111 
112 #define OCE_MIN_MTU		256
113 #define OCE_MAX_MTU		9000
114 
115 #define OCE_MAX_RQ_COMPL	64
116 #define OCE_MAX_RQ_POSTS	255
117 #define OCE_RX_BUF_SIZE		2048
118 
119 #define OCE_MAX_TX_ELEMENTS	29
120 #define OCE_MAX_TX_DESC		1024
121 #define OCE_MAX_TX_SIZE		65535
122 
123 #define OCE_MEM_KVA(_m)		((void *)((_m)->vaddr))
124 #define OCE_MEM_DVA(_m)		((_m)->paddr)
125 
126 #define OCE_WQ_FOREACH(sc, wq, i) 	\
127 	for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
128 #define OCE_RQ_FOREACH(sc, rq, i) 	\
129 	for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
130 #define OCE_EQ_FOREACH(sc, eq, i) 	\
131 	for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
132 #define OCE_CQ_FOREACH(sc, cq, i) 	\
133 	for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
134 #define OCE_RING_FOREACH(_r, _v, _c)	\
135 	for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
136 
137 static inline int
ilog2(unsigned int v)138 ilog2(unsigned int v)
139 {
140 	int r = 0;
141 
142 	while (v >>= 1)
143 		r++;
144 	return (r);
145 }
146 
147 struct oce_pkt {
148 	struct mbuf *		mbuf;
149 	bus_dmamap_t		map;
150 	int			nsegs;
151 	SIMPLEQ_ENTRY(oce_pkt)	entry;
152 };
153 SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
154 
155 struct oce_dma_mem {
156 	bus_dma_tag_t		tag;
157 	bus_dmamap_t		map;
158 	bus_dma_segment_t	segs;
159 	int			nsegs;
160 	bus_size_t		size;
161 	caddr_t			vaddr;
162 	bus_addr_t		paddr;
163 };
164 
165 struct oce_ring {
166 	int			index;
167 	int			nitems;
168 	int			nused;
169 	int			isize;
170 	struct oce_dma_mem	dma;
171 };
172 
173 struct oce_softc;
174 
175 enum cq_len {
176 	CQ_LEN_256  = 256,
177 	CQ_LEN_512  = 512,
178 	CQ_LEN_1024 = 1024
179 };
180 
181 enum eq_len {
182 	EQ_LEN_256  = 256,
183 	EQ_LEN_512  = 512,
184 	EQ_LEN_1024 = 1024,
185 	EQ_LEN_2048 = 2048,
186 	EQ_LEN_4096 = 4096
187 };
188 
189 enum eqe_size {
190 	EQE_SIZE_4  = 4,
191 	EQE_SIZE_16 = 16
192 };
193 
194 enum qtype {
195 	QTYPE_EQ,
196 	QTYPE_MQ,
197 	QTYPE_WQ,
198 	QTYPE_RQ,
199 	QTYPE_CQ,
200 	QTYPE_RSS
201 };
202 
203 struct oce_eq {
204 	struct oce_softc *	sc;
205 	struct oce_ring *	ring;
206 	enum qtype		type;
207 	int			id;
208 
209 	struct oce_cq *		cq[OCE_MAX_CQ_EQ];
210 	int			cq_valid;
211 
212 	int			nitems;
213 	int			isize;
214 	int			delay;
215 };
216 
217 struct oce_cq {
218 	struct oce_softc *	sc;
219 	struct oce_ring *	ring;
220 	enum qtype		type;
221 	int			id;
222 
223 	struct oce_eq *		eq;
224 
225 	void			(*cq_intr)(void *);
226 	void *			cb_arg;
227 
228 	int			nitems;
229 	int			nodelay;
230 	int			eventable;
231 	int			ncoalesce;
232 };
233 
234 struct oce_mq {
235 	struct oce_softc *	sc;
236 	struct oce_ring *	ring;
237 	enum qtype		type;
238 	int			id;
239 
240 	struct oce_cq *		cq;
241 
242 	int			nitems;
243 };
244 
245 struct oce_wq {
246 	struct oce_softc *	sc;
247 	struct oce_ring *	ring;
248 	enum qtype		type;
249 	int			id;
250 
251 	struct oce_cq *		cq;
252 
253 	struct oce_pkt_list	pkt_list;
254 	struct oce_pkt_list	pkt_free;
255 
256 	int			nitems;
257 };
258 
259 struct oce_rq {
260 	struct oce_softc *	sc;
261 	struct oce_ring *	ring;
262 	enum qtype		type;
263 	int			id;
264 
265 	struct oce_cq *		cq;
266 
267 	struct if_rxring	rxring;
268 	struct oce_pkt_list	pkt_list;
269 	struct oce_pkt_list	pkt_free;
270 
271 	uint32_t		rss_cpuid;
272 
273 #ifdef OCE_LRO
274 	struct lro_ctrl		lro;
275 	int			lro_pkts_queued;
276 #endif
277 
278 	int			nitems;
279 	int			fragsize;
280 	int			mtu;
281 	int			rss;
282 };
283 
284 struct oce_softc {
285 	struct device		sc_dev;
286 
287 	uint			sc_flags;
288 #define  OCE_F_BE2		 0x00000001
289 #define  OCE_F_BE3		 0x00000002
290 #define  OCE_F_XE201		 0x00000008
291 #define  OCE_F_BE3_NATIVE	 0x00000100
292 #define  OCE_F_RESET_RQD	 0x00001000
293 #define  OCE_F_MBOX_ENDIAN_RQD	 0x00002000
294 
295 	bus_dma_tag_t		sc_dmat;
296 
297 	bus_space_tag_t		sc_cfg_iot;
298 	bus_space_handle_t	sc_cfg_ioh;
299 	bus_size_t		sc_cfg_size;
300 
301 	bus_space_tag_t		sc_csr_iot;
302 	bus_space_handle_t	sc_csr_ioh;
303 	bus_size_t		sc_csr_size;
304 
305 	bus_space_tag_t		sc_db_iot;
306 	bus_space_handle_t	sc_db_ioh;
307 	bus_size_t		sc_db_size;
308 
309 	void *			sc_ih;
310 
311 	struct arpcom		sc_ac;
312 	struct ifmedia		sc_media;
313 	ushort			sc_link_up;
314 	ushort			sc_link_speed;
315 	uint64_t		sc_fc;
316 
317 	struct oce_dma_mem	sc_mbx;
318 	struct oce_dma_mem	sc_pld;
319 
320 	uint			sc_port;
321 	uint			sc_fmode;
322 
323 	struct oce_wq *		sc_wq[OCE_MAX_WQ];	/* TX work queues */
324 	struct oce_rq *		sc_rq[OCE_MAX_RQ];	/* RX work queues */
325 	struct oce_cq *		sc_cq[OCE_MAX_CQ];	/* Completion queues */
326 	struct oce_eq *		sc_eq[OCE_MAX_EQ];	/* Event queues */
327 	struct oce_mq *		sc_mq;			/* Mailbox queue */
328 
329 	ushort			sc_neq;
330 	ushort			sc_ncq;
331 	ushort			sc_nrq;
332 	ushort			sc_nwq;
333 	ushort			sc_nintr;
334 
335 	ushort			sc_tx_ring_size;
336 	ushort			sc_rx_ring_size;
337 	ushort			sc_rss_enable;
338 
339 	uint32_t		sc_if_id;	/* interface ID */
340 	uint32_t		sc_pmac_id;	/* PMAC id */
341 	char			sc_macaddr[ETHER_ADDR_LEN];
342 
343 	uint32_t		sc_pvid;
344 
345 	uint64_t		sc_rx_errors;
346 	uint64_t		sc_tx_errors;
347 
348 	struct timeout		sc_tick;
349 	struct timeout		sc_rxrefill;
350 
351 	void *			sc_statcmd;
352 };
353 
354 #define IS_BE(sc)		ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
355 #define IS_XE201(sc)		ISSET((sc)->sc_flags, OCE_F_XE201)
356 
357 #define ADDR_HI(x)		((uint32_t)((uint64_t)(x) >> 32))
358 #define ADDR_LO(x)		((uint32_t)((uint64_t)(x) & 0xffffffff))
359 
360 #define IF_LRO_ENABLED(ifp)	ISSET((ifp)->if_capabilities, IFCAP_LRO)
361 
362 int 	oce_match(struct device *, void *, void *);
363 void	oce_attach(struct device *, struct device *, void *);
364 int 	oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
365 void	oce_attachhook(struct device *);
366 void	oce_attach_ifp(struct oce_softc *);
367 int 	oce_ioctl(struct ifnet *, u_long, caddr_t);
368 int	oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
369 void	oce_iff(struct oce_softc *);
370 void	oce_link_status(struct oce_softc *);
371 void	oce_media_status(struct ifnet *, struct ifmediareq *);
372 int 	oce_media_change(struct ifnet *);
373 void	oce_tick(void *);
374 void	oce_init(void *);
375 void	oce_stop(struct oce_softc *);
376 void	oce_watchdog(struct ifnet *);
377 void	oce_start(struct ifnet *);
378 int	oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
379 #ifdef OCE_TSO
380 struct mbuf *
381 	oce_tso(struct oce_softc *, struct mbuf **);
382 #endif
383 int 	oce_intr(void *);
384 void	oce_intr_wq(void *);
385 void	oce_txeof(struct oce_wq *);
386 void	oce_intr_rq(void *);
387 void	oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
388 void	oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
389 int 	oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
390 int 	oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
391 #ifdef OCE_LRO
392 void	oce_flush_lro(struct oce_rq *);
393 int 	oce_init_lro(struct oce_softc *);
394 void	oce_free_lro(struct oce_softc *);
395 #endif
396 int	oce_get_buf(struct oce_rq *);
397 int	oce_alloc_rx_bufs(struct oce_rq *);
398 void	oce_refill_rx(void *);
399 void	oce_free_posted_rxbuf(struct oce_rq *);
400 void	oce_intr_mq(void *);
401 void	oce_link_event(struct oce_softc *,
402 	    struct oce_async_cqe_link_state *);
403 
404 int 	oce_init_queues(struct oce_softc *);
405 void	oce_release_queues(struct oce_softc *);
406 struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
407 void	oce_drain_wq(struct oce_wq *);
408 void	oce_destroy_wq(struct oce_wq *);
409 struct oce_rq *
410 	oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
411 void	oce_drain_rq(struct oce_rq *);
412 void	oce_destroy_rq(struct oce_rq *);
413 struct oce_eq *
414 	oce_create_eq(struct oce_softc *);
415 static inline void
416 	oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
417 void	oce_drain_eq(struct oce_eq *);
418 void	oce_destroy_eq(struct oce_eq *);
419 struct oce_mq *
420 	oce_create_mq(struct oce_softc *, struct oce_eq *);
421 void	oce_drain_mq(struct oce_mq *);
422 void	oce_destroy_mq(struct oce_mq *);
423 struct oce_cq *
424 	oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
425 	    int isize, int eventable, int nodelay, int ncoalesce);
426 static inline void
427 	oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
428 void	oce_destroy_cq(struct oce_cq *);
429 
430 int	oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
431 void	oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
432 #define	oce_dma_sync(d, f) \
433 	    bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
434 
435 struct oce_ring *
436 	oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
437 void	oce_destroy_ring(struct oce_softc *, struct oce_ring *);
438 int	oce_load_ring(struct oce_softc *, struct oce_ring *,
439 	    struct oce_pa *, int max_segs);
440 static inline void *
441 	oce_ring_get(struct oce_ring *);
442 static inline void *
443 	oce_ring_first(struct oce_ring *);
444 static inline void *
445 	oce_ring_next(struct oce_ring *);
446 struct oce_pkt *
447 	oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
448 	    int maxsegsz);
449 void	oce_pkt_free(struct oce_softc *, struct oce_pkt *);
450 static inline struct oce_pkt *
451 	oce_pkt_get(struct oce_pkt_list *);
452 static inline void
453 	oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
454 
455 int	oce_init_fw(struct oce_softc *);
456 int	oce_mbox_init(struct oce_softc *);
457 int	oce_mbox_dispatch(struct oce_softc *);
458 int	oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
459 	    void *payload, int length);
460 void	oce_first_mcc(struct oce_softc *);
461 
462 int	oce_get_fw_config(struct oce_softc *);
463 int	oce_check_native_mode(struct oce_softc *);
464 int	oce_create_iface(struct oce_softc *, uint8_t *macaddr);
465 int	oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
466 	    int nvtags, int untagged, int promisc);
467 int	oce_set_flow_control(struct oce_softc *, uint64_t);
468 int	oce_config_rss(struct oce_softc *, int enable);
469 int	oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
470 	    int naddr);
471 int	oce_set_promisc(struct oce_softc *, int enable);
472 int	oce_get_link_status(struct oce_softc *);
473 
474 void	oce_macaddr_set(struct oce_softc *);
475 int	oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
476 int	oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
477 int	oce_macaddr_del(struct oce_softc *, uint32_t pmac);
478 
479 int	oce_new_rq(struct oce_softc *, struct oce_rq *);
480 int	oce_new_wq(struct oce_softc *, struct oce_wq *);
481 int	oce_new_mq(struct oce_softc *, struct oce_mq *);
482 int	oce_new_eq(struct oce_softc *, struct oce_eq *);
483 int	oce_new_cq(struct oce_softc *, struct oce_cq *);
484 
485 int	oce_init_stats(struct oce_softc *);
486 int	oce_update_stats(struct oce_softc *);
487 int	oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
488 int	oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
489 int	oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
490 
491 struct pool *oce_pkt_pool;
492 
493 struct cfdriver oce_cd = {
494 	NULL, "oce", DV_IFNET
495 };
496 
497 const struct cfattach oce_ca = {
498 	sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
499 };
500 
501 const struct pci_matchid oce_devices[] = {
502 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
503 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
504 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
505 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
506 	{ PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
507 };
508 
509 int
oce_match(struct device * parent,void * match,void * aux)510 oce_match(struct device *parent, void *match, void *aux)
511 {
512 	return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
513 }
514 
515 void
oce_attach(struct device * parent,struct device * self,void * aux)516 oce_attach(struct device *parent, struct device *self, void *aux)
517 {
518 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
519 	struct oce_softc *sc = (struct oce_softc *)self;
520 	const char *intrstr = NULL;
521 	pci_intr_handle_t ih;
522 
523 	switch (PCI_PRODUCT(pa->pa_id)) {
524 	case PCI_PRODUCT_SERVERENGINES_BE2:
525 	case PCI_PRODUCT_SERVERENGINES_OCBE2:
526 		SET(sc->sc_flags, OCE_F_BE2);
527 		break;
528 	case PCI_PRODUCT_SERVERENGINES_BE3:
529 	case PCI_PRODUCT_SERVERENGINES_OCBE3:
530 		SET(sc->sc_flags, OCE_F_BE3);
531 		break;
532 	case PCI_PRODUCT_EMULEX_XE201:
533 		SET(sc->sc_flags, OCE_F_XE201);
534 		break;
535 	}
536 
537 	sc->sc_dmat = pa->pa_dmat;
538 	if (oce_pci_alloc(sc, pa))
539 		return;
540 
541 	sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
542 	sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
543 
544 	/* create the bootstrap mailbox */
545 	if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
546 		printf(": failed to allocate mailbox memory\n");
547 		return;
548 	}
549 	if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
550 		printf(": failed to allocate payload memory\n");
551 		goto fail_1;
552 	}
553 
554 	if (oce_init_fw(sc))
555 		goto fail_2;
556 
557 	if (oce_mbox_init(sc)) {
558 		printf(": failed to initialize mailbox\n");
559 		goto fail_2;
560 	}
561 
562 	if (oce_get_fw_config(sc)) {
563 		printf(": failed to get firmware configuration\n");
564 		goto fail_2;
565 	}
566 
567 	if (ISSET(sc->sc_flags, OCE_F_BE3)) {
568 		if (oce_check_native_mode(sc))
569 			goto fail_2;
570 	}
571 
572 	if (oce_macaddr_get(sc, sc->sc_macaddr)) {
573 		printf(": failed to fetch MAC address\n");
574 		goto fail_2;
575 	}
576 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
577 
578 	if (oce_pkt_pool == NULL) {
579 		oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
580 		if (oce_pkt_pool == NULL) {
581 			printf(": unable to allocate descriptor pool\n");
582 			goto fail_2;
583 		}
584 		pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, IPL_NET,
585 		    0, "ocepkts", NULL);
586 	}
587 
588 	/* We allocate a single interrupt resource */
589 	sc->sc_nintr = 1;
590 	if (pci_intr_map_msi(pa, &ih) != 0 &&
591 	    pci_intr_map(pa, &ih) != 0) {
592 		printf(": couldn't map interrupt\n");
593 		goto fail_2;
594 	}
595 
596 	intrstr = pci_intr_string(pa->pa_pc, ih);
597 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
598 	    sc->sc_dev.dv_xname);
599 	if (sc->sc_ih == NULL) {
600 		printf(": couldn't establish interrupt\n");
601 		if (intrstr != NULL)
602 			printf(" at %s", intrstr);
603 		printf("\n");
604 		goto fail_2;
605 	}
606 	printf(": %s", intrstr);
607 
608 	if (oce_init_stats(sc))
609 		goto fail_3;
610 
611 	if (oce_init_queues(sc))
612 		goto fail_3;
613 
614 	oce_attach_ifp(sc);
615 
616 #ifdef OCE_LRO
617 	if (oce_init_lro(sc))
618 		goto fail_4;
619 #endif
620 
621 	timeout_set(&sc->sc_tick, oce_tick, sc);
622 	timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
623 
624 	config_mountroot(self, oce_attachhook);
625 
626 	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
627 
628 	return;
629 
630 #ifdef OCE_LRO
631 fail_4:
632 	oce_free_lro(sc);
633 	ether_ifdetach(&sc->sc_ac.ac_if);
634 	if_detach(&sc->sc_ac.ac_if);
635 	oce_release_queues(sc);
636 #endif
637 fail_3:
638 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
639 fail_2:
640 	oce_dma_free(sc, &sc->sc_pld);
641 fail_1:
642 	oce_dma_free(sc, &sc->sc_mbx);
643 }
644 
645 int
oce_pci_alloc(struct oce_softc * sc,struct pci_attach_args * pa)646 oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
647 {
648 	pcireg_t memtype, reg;
649 
650 	/* setup the device config region */
651 	if (ISSET(sc->sc_flags, OCE_F_BE2))
652 		reg = OCE_BAR_CFG_BE2;
653 	else
654 		reg = OCE_BAR_CFG;
655 
656 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
657 	if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
658 	    &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
659 	    IS_BE(sc) ? 0 : 32768)) {
660 		printf(": can't find cfg mem space\n");
661 		return (ENXIO);
662 	}
663 
664 	/*
665 	 * Read the SLI_INTF register and determine whether we
666 	 * can use this port and its features
667 	 */
668 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
669 	if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
670 		printf(": invalid signature\n");
671 		goto fail_1;
672 	}
673 	if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
674 		printf(": unsupported SLI revision\n");
675 		goto fail_1;
676 	}
677 	if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
678 		SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
679 	if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
680 		SET(sc->sc_flags, OCE_F_RESET_RQD);
681 
682 	/* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
683 	if (IS_BE(sc)) {
684 		/* set up CSR region */
685 		reg = OCE_BAR_CSR;
686 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
687 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
688 		    &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
689 			printf(": can't find csr mem space\n");
690 			goto fail_1;
691 		}
692 
693 		/* set up DB doorbell region */
694 		reg = OCE_BAR_DB;
695 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
696 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
697 		    &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
698 			printf(": can't find csr mem space\n");
699 			goto fail_2;
700 		}
701 	} else {
702 		sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
703 		sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
704 	}
705 
706 	return (0);
707 
708 fail_2:
709 	bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
710 fail_1:
711 	bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
712 	return (ENXIO);
713 }
714 
715 static inline uint32_t
oce_read_cfg(struct oce_softc * sc,bus_size_t off)716 oce_read_cfg(struct oce_softc *sc, bus_size_t off)
717 {
718 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
719 	    BUS_SPACE_BARRIER_READ);
720 	return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
721 }
722 
723 static inline uint32_t
oce_read_csr(struct oce_softc * sc,bus_size_t off)724 oce_read_csr(struct oce_softc *sc, bus_size_t off)
725 {
726 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
727 	    BUS_SPACE_BARRIER_READ);
728 	return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
729 }
730 
731 static inline uint32_t
oce_read_db(struct oce_softc * sc,bus_size_t off)732 oce_read_db(struct oce_softc *sc, bus_size_t off)
733 {
734 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
735 	    BUS_SPACE_BARRIER_READ);
736 	return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
737 }
738 
739 static inline void
oce_write_cfg(struct oce_softc * sc,bus_size_t off,uint32_t val)740 oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
741 {
742 	bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
743 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
744 	    BUS_SPACE_BARRIER_WRITE);
745 }
746 
747 static inline void
oce_write_csr(struct oce_softc * sc,bus_size_t off,uint32_t val)748 oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
749 {
750 	bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
751 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
752 	    BUS_SPACE_BARRIER_WRITE);
753 }
754 
755 static inline void
oce_write_db(struct oce_softc * sc,bus_size_t off,uint32_t val)756 oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
757 {
758 	bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
759 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
760 	    BUS_SPACE_BARRIER_WRITE);
761 }
762 
763 static inline void
oce_intr_enable(struct oce_softc * sc)764 oce_intr_enable(struct oce_softc *sc)
765 {
766 	uint32_t reg;
767 
768 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
769 	oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
770 }
771 
772 static inline void
oce_intr_disable(struct oce_softc * sc)773 oce_intr_disable(struct oce_softc *sc)
774 {
775 	uint32_t reg;
776 
777 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
778 	oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
779 }
780 
781 void
oce_attachhook(struct device * self)782 oce_attachhook(struct device *self)
783 {
784 	struct oce_softc *sc = (struct oce_softc *)self;
785 
786 	oce_get_link_status(sc);
787 
788 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
789 
790 	/*
791 	 * We need to get MCC async events. So enable intrs and arm
792 	 * first EQ, Other EQs will be armed after interface is UP
793 	 */
794 	oce_intr_enable(sc);
795 	oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
796 
797 	/*
798 	 * Send first mcc cmd and after that we get gracious
799 	 * MCC notifications from FW
800 	 */
801 	oce_first_mcc(sc);
802 }
803 
804 void
oce_attach_ifp(struct oce_softc * sc)805 oce_attach_ifp(struct oce_softc *sc)
806 {
807 	struct ifnet *ifp = &sc->sc_ac.ac_if;
808 
809 	ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
810 	    oce_media_status);
811 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
812 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
813 
814 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
815 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
816 	ifp->if_ioctl = oce_ioctl;
817 	ifp->if_start = oce_start;
818 	ifp->if_watchdog = oce_watchdog;
819 	ifp->if_hardmtu = OCE_MAX_MTU;
820 	ifp->if_softc = sc;
821 	ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_size - 1);
822 
823 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
824 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
825 
826 #if NVLAN > 0
827 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
828 #endif
829 
830 #ifdef OCE_TSO
831 	ifp->if_capabilities |= IFCAP_TSO;
832 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
833 #endif
834 #ifdef OCE_LRO
835 	ifp->if_capabilities |= IFCAP_LRO;
836 #endif
837 
838 	if_attach(ifp);
839 	ether_ifattach(ifp);
840 }
841 
842 int
oce_ioctl(struct ifnet * ifp,u_long command,caddr_t data)843 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
844 {
845 	struct oce_softc *sc = ifp->if_softc;
846 	struct ifreq *ifr = (struct ifreq *)data;
847 	int s, error = 0;
848 
849 	s = splnet();
850 
851 	switch (command) {
852 	case SIOCSIFADDR:
853 		ifp->if_flags |= IFF_UP;
854 		if (!(ifp->if_flags & IFF_RUNNING))
855 			oce_init(sc);
856 		break;
857 	case SIOCSIFFLAGS:
858 		if (ifp->if_flags & IFF_UP) {
859 			if (ifp->if_flags & IFF_RUNNING)
860 				error = ENETRESET;
861 			else
862 				oce_init(sc);
863 		} else {
864 			if (ifp->if_flags & IFF_RUNNING)
865 				oce_stop(sc);
866 		}
867 		break;
868 	case SIOCGIFMEDIA:
869 	case SIOCSIFMEDIA:
870 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
871 		break;
872 	case SIOCGIFRXR:
873 		error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
874 		break;
875 	default:
876 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
877 		break;
878 	}
879 
880 	if (error == ENETRESET) {
881 		if (ifp->if_flags & IFF_RUNNING)
882 			oce_iff(sc);
883 		error = 0;
884 	}
885 
886 	splx(s);
887 
888 	return (error);
889 }
890 
891 int
oce_rxrinfo(struct oce_softc * sc,struct if_rxrinfo * ifri)892 oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
893 {
894 	struct if_rxring_info *ifr, ifr1;
895 	struct oce_rq *rq;
896 	int error, i;
897 	u_int n = 0;
898 
899 	if (sc->sc_nrq > 1) {
900 		ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF,
901 		    M_WAITOK | M_ZERO);
902 	} else
903 		ifr = &ifr1;
904 
905 	OCE_RQ_FOREACH(sc, rq, i) {
906 		ifr[n].ifr_size = MCLBYTES;
907 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
908 		ifr[n].ifr_info = rq->rxring;
909 		n++;
910 	}
911 
912 	error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
913 
914 	if (sc->sc_nrq > 1)
915 		free(ifr, M_DEVBUF, sc->sc_nrq * sizeof(*ifr));
916 	return (error);
917 }
918 
919 
920 void
oce_iff(struct oce_softc * sc)921 oce_iff(struct oce_softc *sc)
922 {
923 	uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
924 	struct arpcom *ac = &sc->sc_ac;
925 	struct ifnet *ifp = &ac->ac_if;
926 	struct ether_multi *enm;
927 	struct ether_multistep step;
928 	int naddr = 0, promisc = 0;
929 
930 	ifp->if_flags &= ~IFF_ALLMULTI;
931 
932 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
933 	    ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
934 		ifp->if_flags |= IFF_ALLMULTI;
935 		promisc = 1;
936 	} else {
937 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
938 		while (enm != NULL) {
939 			memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
940 			ETHER_NEXT_MULTI(step, enm);
941 		}
942 		oce_update_mcast(sc, multi, naddr);
943 	}
944 
945 	oce_set_promisc(sc, promisc);
946 }
947 
948 void
oce_link_status(struct oce_softc * sc)949 oce_link_status(struct oce_softc *sc)
950 {
951 	struct ifnet *ifp = &sc->sc_ac.ac_if;
952 	int link_state = LINK_STATE_DOWN;
953 
954 	ifp->if_baudrate = 0;
955 	if (sc->sc_link_up) {
956 		link_state = LINK_STATE_FULL_DUPLEX;
957 
958 		switch (sc->sc_link_speed) {
959 		case 1:
960 			ifp->if_baudrate = IF_Mbps(10);
961 			break;
962 		case 2:
963 			ifp->if_baudrate = IF_Mbps(100);
964 			break;
965 		case 3:
966 			ifp->if_baudrate = IF_Gbps(1);
967 			break;
968 		case 4:
969 			ifp->if_baudrate = IF_Gbps(10);
970 			break;
971 		}
972 	}
973 	if (ifp->if_link_state != link_state) {
974 		ifp->if_link_state = link_state;
975 		if_link_state_change(ifp);
976 	}
977 }
978 
979 void
oce_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)980 oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
981 {
982 	struct oce_softc *sc = ifp->if_softc;
983 
984 	ifmr->ifm_status = IFM_AVALID;
985 	ifmr->ifm_active = IFM_ETHER;
986 
987 	if (oce_get_link_status(sc) == 0)
988 		oce_link_status(sc);
989 
990 	if (!sc->sc_link_up) {
991 		ifmr->ifm_active |= IFM_NONE;
992 		return;
993 	}
994 
995 	ifmr->ifm_status |= IFM_ACTIVE;
996 
997 	switch (sc->sc_link_speed) {
998 	case 1: /* 10 Mbps */
999 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1000 		break;
1001 	case 2: /* 100 Mbps */
1002 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1003 		break;
1004 	case 3: /* 1 Gbps */
1005 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1006 		break;
1007 	case 4: /* 10 Gbps */
1008 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1009 		break;
1010 	}
1011 
1012 	if (sc->sc_fc & IFM_ETH_RXPAUSE)
1013 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1014 	if (sc->sc_fc & IFM_ETH_TXPAUSE)
1015 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1016 }
1017 
1018 int
oce_media_change(struct ifnet * ifp)1019 oce_media_change(struct ifnet *ifp)
1020 {
1021 	return (0);
1022 }
1023 
1024 void
oce_tick(void * arg)1025 oce_tick(void *arg)
1026 {
1027 	struct oce_softc *sc = arg;
1028 	int s;
1029 
1030 	s = splnet();
1031 
1032 	if (oce_update_stats(sc) == 0)
1033 		timeout_add_sec(&sc->sc_tick, 1);
1034 
1035 	splx(s);
1036 }
1037 
1038 void
oce_init(void * arg)1039 oce_init(void *arg)
1040 {
1041 	struct oce_softc *sc = arg;
1042 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1043 	struct oce_eq *eq;
1044 	struct oce_rq *rq;
1045 	struct oce_wq *wq;
1046 	int i;
1047 
1048 	oce_stop(sc);
1049 
1050 	DELAY(10);
1051 
1052 	oce_macaddr_set(sc);
1053 
1054 	oce_iff(sc);
1055 
1056 	/* Enable VLAN promiscuous mode */
1057 	if (oce_config_vlan(sc, NULL, 0, 1, 1))
1058 		goto error;
1059 
1060 	if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1061 		goto error;
1062 
1063 	OCE_RQ_FOREACH(sc, rq, i) {
1064 		rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1065 		    ETHER_VLAN_ENCAP_LEN;
1066 		if (oce_new_rq(sc, rq)) {
1067 			printf("%s: failed to create rq\n",
1068 			    sc->sc_dev.dv_xname);
1069 			goto error;
1070 		}
1071 		rq->ring->index	 = 0;
1072 
1073 		/* oce splits jumbos into 2k chunks... */
1074 		if_rxr_init(&rq->rxring, 8, rq->nitems);
1075 
1076 		if (!oce_alloc_rx_bufs(rq)) {
1077 			printf("%s: failed to allocate rx buffers\n",
1078 			    sc->sc_dev.dv_xname);
1079 			goto error;
1080 		}
1081 	}
1082 
1083 #ifdef OCE_RSS
1084 	/* RSS config */
1085 	if (sc->sc_rss_enable) {
1086 		if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1087 			printf("%s: failed to configure RSS\n",
1088 			    sc->sc_dev.dv_xname);
1089 			goto error;
1090 		}
1091 	}
1092 #endif
1093 
1094 	OCE_RQ_FOREACH(sc, rq, i)
1095 		oce_arm_cq(rq->cq, 0, TRUE);
1096 
1097 	OCE_WQ_FOREACH(sc, wq, i)
1098 		oce_arm_cq(wq->cq, 0, TRUE);
1099 
1100 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1101 
1102 	OCE_EQ_FOREACH(sc, eq, i)
1103 		oce_arm_eq(eq, 0, TRUE, FALSE);
1104 
1105 	if (oce_get_link_status(sc) == 0)
1106 		oce_link_status(sc);
1107 
1108 	ifp->if_flags |= IFF_RUNNING;
1109 	ifq_clr_oactive(&ifp->if_snd);
1110 
1111 	timeout_add_sec(&sc->sc_tick, 1);
1112 
1113 	oce_intr_enable(sc);
1114 
1115 	return;
1116 error:
1117 	oce_stop(sc);
1118 }
1119 
1120 void
oce_stop(struct oce_softc * sc)1121 oce_stop(struct oce_softc *sc)
1122 {
1123 	struct mbx_delete_nic_rq cmd;
1124 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1125 	struct oce_rq *rq;
1126 	struct oce_wq *wq;
1127 	struct oce_eq *eq;
1128 	int i;
1129 
1130 	timeout_del(&sc->sc_tick);
1131 	timeout_del(&sc->sc_rxrefill);
1132 
1133 	ifp->if_flags &= ~IFF_RUNNING;
1134 	ifq_clr_oactive(&ifp->if_snd);
1135 
1136 	/* Stop intrs and finish any bottom halves pending */
1137 	oce_intr_disable(sc);
1138 
1139 	/* Invalidate any pending cq and eq entries */
1140 	OCE_EQ_FOREACH(sc, eq, i)
1141 		oce_drain_eq(eq);
1142 	OCE_RQ_FOREACH(sc, rq, i) {
1143 		/* destroy the work queue in the firmware */
1144 		memset(&cmd, 0, sizeof(cmd));
1145 		cmd.params.req.rq_id = htole16(rq->id);
1146 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1147 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1148 		DELAY(1000);
1149 		oce_drain_rq(rq);
1150 		oce_free_posted_rxbuf(rq);
1151 	}
1152 	OCE_WQ_FOREACH(sc, wq, i)
1153 		oce_drain_wq(wq);
1154 }
1155 
1156 void
oce_watchdog(struct ifnet * ifp)1157 oce_watchdog(struct ifnet *ifp)
1158 {
1159 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1160 
1161 	oce_init(ifp->if_softc);
1162 
1163 	ifp->if_oerrors++;
1164 }
1165 
1166 void
oce_start(struct ifnet * ifp)1167 oce_start(struct ifnet *ifp)
1168 {
1169 	struct oce_softc *sc = ifp->if_softc;
1170 	struct mbuf *m;
1171 	int pkts = 0;
1172 
1173 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1174 		return;
1175 
1176 	for (;;) {
1177 		m = ifq_dequeue(&ifp->if_snd);
1178 		if (m == NULL)
1179 			break;
1180 
1181 		if (oce_encap(sc, &m, 0)) {
1182 			ifq_set_oactive(&ifp->if_snd);
1183 			break;
1184 		}
1185 
1186 #if NBPFILTER > 0
1187 		if (ifp->if_bpf)
1188 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1189 #endif
1190 		pkts++;
1191 	}
1192 
1193 	/* Set a timeout in case the chip goes out to lunch */
1194 	if (pkts)
1195 		ifp->if_timer = 5;
1196 }
1197 
1198 int
oce_encap(struct oce_softc * sc,struct mbuf ** mpp,int wqidx)1199 oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1200 {
1201 	struct mbuf *m = *mpp;
1202 	struct oce_wq *wq = sc->sc_wq[wqidx];
1203 	struct oce_pkt *pkt = NULL;
1204 	struct oce_nic_hdr_wqe *nhe;
1205 	struct oce_nic_frag_wqe *nfe;
1206 	int i, nwqe, err;
1207 
1208 #ifdef OCE_TSO
1209 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1210 		/* consolidate packet buffers for TSO/LSO segment offload */
1211 		m = oce_tso(sc, mpp);
1212 		if (m == NULL)
1213 			goto error;
1214 	}
1215 #endif
1216 
1217 	if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1218 		goto error;
1219 
1220 	err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1221 	if (err == EFBIG) {
1222 		if (m_defrag(m, M_DONTWAIT) ||
1223 		    bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1224 			BUS_DMA_NOWAIT))
1225 			goto error;
1226 		*mpp = m;
1227 	} else if (err != 0)
1228 		goto error;
1229 
1230 	pkt->nsegs = pkt->map->dm_nsegs;
1231 
1232 	nwqe = pkt->nsegs + 1;
1233 	if (IS_BE(sc)) {
1234 		/* BE2 and BE3 require even number of WQEs */
1235 		if (nwqe & 1)
1236 			nwqe++;
1237 	}
1238 
1239 	/* Fail if there's not enough free WQEs */
1240 	if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1241 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1242 		goto error;
1243 	}
1244 
1245 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1246 	    BUS_DMASYNC_PREWRITE);
1247 	pkt->mbuf = m;
1248 
1249 	/* TX work queue entry for the header */
1250 	nhe = oce_ring_get(wq->ring);
1251 	memset(nhe, 0, sizeof(*nhe));
1252 
1253 	nhe->u0.s.complete = 1;
1254 	nhe->u0.s.event = 1;
1255 	nhe->u0.s.crc = 1;
1256 	nhe->u0.s.forward = 0;
1257 	nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1258 	nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1259 	nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1260 	nhe->u0.s.num_wqe = nwqe;
1261 	nhe->u0.s.total_length = m->m_pkthdr.len;
1262 
1263 #if NVLAN > 0
1264 	if (m->m_flags & M_VLANTAG) {
1265 		nhe->u0.s.vlan = 1; /* Vlan present */
1266 		nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1267 	}
1268 #endif
1269 
1270 #ifdef OCE_TSO
1271 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1272 		if (m->m_pkthdr.tso_segsz) {
1273 			nhe->u0.s.lso = 1;
1274 			nhe->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1275 		}
1276 		if (!IS_BE(sc))
1277 			nhe->u0.s.ipcs = 1;
1278 	}
1279 #endif
1280 
1281 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1282 	    BUS_DMASYNC_PREWRITE);
1283 
1284 	wq->ring->nused++;
1285 
1286 	/* TX work queue entries for data chunks */
1287 	for (i = 0; i < pkt->nsegs; i++) {
1288 		nfe = oce_ring_get(wq->ring);
1289 		memset(nfe, 0, sizeof(*nfe));
1290 		nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1291 		nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1292 		nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1293 		wq->ring->nused++;
1294 	}
1295 	if (nwqe > (pkt->nsegs + 1)) {
1296 		nfe = oce_ring_get(wq->ring);
1297 		memset(nfe, 0, sizeof(*nfe));
1298 		wq->ring->nused++;
1299 		pkt->nsegs++;
1300 	}
1301 
1302 	oce_pkt_put(&wq->pkt_list, pkt);
1303 
1304 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1305 	    BUS_DMASYNC_POSTWRITE);
1306 
1307 	oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1308 
1309 	return (0);
1310 
1311 error:
1312 	if (pkt)
1313 		oce_pkt_put(&wq->pkt_free, pkt);
1314 	m_freem(*mpp);
1315 	*mpp = NULL;
1316 	return (1);
1317 }
1318 
1319 #ifdef OCE_TSO
1320 struct mbuf *
oce_tso(struct oce_softc * sc,struct mbuf ** mpp)1321 oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1322 {
1323 	struct mbuf *m;
1324 	struct ip *ip;
1325 #ifdef INET6
1326 	struct ip6_hdr *ip6;
1327 #endif
1328 	struct ether_vlan_header *eh;
1329 	struct tcphdr *th;
1330 	uint16_t etype;
1331 	int total_len = 0, ehdrlen = 0;
1332 
1333 	m = *mpp;
1334 
1335 	if (M_WRITABLE(m) == 0) {
1336 		m = m_dup(*mpp, M_DONTWAIT);
1337 		if (!m)
1338 			return (NULL);
1339 		m_freem(*mpp);
1340 		*mpp = m;
1341 	}
1342 
1343 	eh = mtod(m, struct ether_vlan_header *);
1344 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1345 		etype = ntohs(eh->evl_proto);
1346 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1347 	} else {
1348 		etype = ntohs(eh->evl_encap_proto);
1349 		ehdrlen = ETHER_HDR_LEN;
1350 	}
1351 
1352 	switch (etype) {
1353 	case ETHERTYPE_IP:
1354 		ip = (struct ip *)(m->m_data + ehdrlen);
1355 		if (ip->ip_p != IPPROTO_TCP)
1356 			return (NULL);
1357 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1358 
1359 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1360 		break;
1361 #ifdef INET6
1362 	case ETHERTYPE_IPV6:
1363 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1364 		if (ip6->ip6_nxt != IPPROTO_TCP)
1365 			return NULL;
1366 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1367 
1368 		total_len = ehdrlen + sizeof(struct ip6_hdr) +
1369 		    (th->th_off << 2);
1370 		break;
1371 #endif
1372 	default:
1373 		return (NULL);
1374 	}
1375 
1376 	m = m_pullup(m, total_len);
1377 	if (!m)
1378 		return (NULL);
1379 	*mpp = m;
1380 	return (m);
1381 
1382 }
1383 #endif /* OCE_TSO */
1384 
1385 int
oce_intr(void * arg)1386 oce_intr(void *arg)
1387 {
1388 	struct oce_softc *sc = arg;
1389 	struct oce_eq *eq = sc->sc_eq[0];
1390 	struct oce_eqe *eqe;
1391 	struct oce_cq *cq = NULL;
1392 	int i, neqe = 0;
1393 
1394 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1395 
1396 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1397 		eqe->evnt = 0;
1398 		neqe++;
1399 	}
1400 
1401 	/* Spurious? */
1402 	if (!neqe) {
1403 		oce_arm_eq(eq, 0, TRUE, FALSE);
1404 		return (0);
1405 	}
1406 
1407 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1408 
1409  	/* Clear EQ entries, but dont arm */
1410 	oce_arm_eq(eq, neqe, FALSE, TRUE);
1411 
1412 	/* Process TX, RX and MCC completion queues */
1413 	for (i = 0; i < eq->cq_valid; i++) {
1414 		cq = eq->cq[i];
1415 		(*cq->cq_intr)(cq->cb_arg);
1416 		oce_arm_cq(cq, 0, TRUE);
1417 	}
1418 
1419 	oce_arm_eq(eq, 0, TRUE, FALSE);
1420 	return (1);
1421 }
1422 
1423 /* Handle the Completion Queue for transmit */
1424 void
oce_intr_wq(void * arg)1425 oce_intr_wq(void *arg)
1426 {
1427 	struct oce_wq *wq = (struct oce_wq *)arg;
1428 	struct oce_cq *cq = wq->cq;
1429 	struct oce_nic_tx_cqe *cqe;
1430 	struct oce_softc *sc = wq->sc;
1431 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1432 	int ncqe = 0;
1433 
1434 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1435 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1436 		oce_txeof(wq);
1437 		WQ_CQE_INVALIDATE(cqe);
1438 		ncqe++;
1439 	}
1440 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1441 
1442 	if (ifq_is_oactive(&ifp->if_snd)) {
1443 		if (wq->ring->nused < (wq->ring->nitems / 2)) {
1444 			ifq_clr_oactive(&ifp->if_snd);
1445 			oce_start(ifp);
1446 		}
1447 	}
1448 	if (wq->ring->nused == 0)
1449 		ifp->if_timer = 0;
1450 
1451 	if (ncqe)
1452 		oce_arm_cq(cq, ncqe, FALSE);
1453 }
1454 
1455 void
oce_txeof(struct oce_wq * wq)1456 oce_txeof(struct oce_wq *wq)
1457 {
1458 	struct oce_softc *sc = wq->sc;
1459 	struct oce_pkt *pkt;
1460 	struct mbuf *m;
1461 
1462 	if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1463 		printf("%s: missing descriptor in txeof\n",
1464 		    sc->sc_dev.dv_xname);
1465 		return;
1466 	}
1467 
1468 	wq->ring->nused -= pkt->nsegs + 1;
1469 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1470 	    BUS_DMASYNC_POSTWRITE);
1471 	bus_dmamap_unload(sc->sc_dmat, pkt->map);
1472 
1473 	m = pkt->mbuf;
1474 	m_freem(m);
1475 	pkt->mbuf = NULL;
1476 	oce_pkt_put(&wq->pkt_free, pkt);
1477 }
1478 
1479 /* Handle the Completion Queue for receive */
1480 void
oce_intr_rq(void * arg)1481 oce_intr_rq(void *arg)
1482 {
1483 	struct oce_rq *rq = (struct oce_rq *)arg;
1484 	struct oce_cq *cq = rq->cq;
1485 	struct oce_softc *sc = rq->sc;
1486 	struct oce_nic_rx_cqe *cqe;
1487 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1488 	int maxrx, ncqe = 0;
1489 
1490 	maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1491 
1492 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1493 
1494 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1495 		if (cqe->u0.s.error == 0) {
1496 			if (cqe->u0.s.pkt_size == 0)
1497 				/* partial DMA workaround for Lancer */
1498 				oce_rxeoc(rq, cqe);
1499 			else
1500 				oce_rxeof(rq, cqe);
1501 		} else {
1502 			ifp->if_ierrors++;
1503 			if (IS_XE201(sc))
1504 				/* Lancer A0 no buffer workaround */
1505 				oce_rxeoc(rq, cqe);
1506 			else
1507 				/* Post L3/L4 errors to stack.*/
1508 				oce_rxeof(rq, cqe);
1509 		}
1510 #ifdef OCE_LRO
1511 		if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1512 			oce_flush_lro(rq);
1513 #endif
1514 		RQ_CQE_INVALIDATE(cqe);
1515 		ncqe++;
1516 	}
1517 
1518 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1519 
1520 #ifdef OCE_LRO
1521 	if (IF_LRO_ENABLED(ifp))
1522 		oce_flush_lro(rq);
1523 #endif
1524 
1525 	if (ncqe) {
1526 		oce_arm_cq(cq, ncqe, FALSE);
1527 		if (!oce_alloc_rx_bufs(rq))
1528 			timeout_add(&sc->sc_rxrefill, 1);
1529 	}
1530 }
1531 
1532 void
oce_rxeof(struct oce_rq * rq,struct oce_nic_rx_cqe * cqe)1533 oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1534 {
1535 	struct oce_softc *sc = rq->sc;
1536 	struct oce_pkt *pkt = NULL;
1537 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1538 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1539 	struct mbuf *m = NULL, *tail = NULL;
1540 	int i, len, frag_len;
1541 	uint16_t vtag;
1542 
1543 	len = cqe->u0.s.pkt_size;
1544 
1545 	 /* Get vlan_tag value */
1546 	if (IS_BE(sc))
1547 		vtag = ntohs(cqe->u0.s.vlan_tag);
1548 	else
1549 		vtag = cqe->u0.s.vlan_tag;
1550 
1551 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1552 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1553 			printf("%s: missing descriptor in rxeof\n",
1554 			    sc->sc_dev.dv_xname);
1555 			goto exit;
1556 		}
1557 
1558 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1559 		    BUS_DMASYNC_POSTREAD);
1560 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1561 		if_rxr_put(&rq->rxring, 1);
1562 
1563 		frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1564 		pkt->mbuf->m_len = frag_len;
1565 
1566 		if (tail != NULL) {
1567 			/* additional fragments */
1568 			pkt->mbuf->m_flags &= ~M_PKTHDR;
1569 			tail->m_next = pkt->mbuf;
1570 			tail = pkt->mbuf;
1571 		} else {
1572 			/* first fragment, fill out most of the header */
1573 			pkt->mbuf->m_pkthdr.len = len;
1574 			pkt->mbuf->m_pkthdr.csum_flags = 0;
1575 			if (cqe->u0.s.ip_cksum_pass) {
1576 				if (!cqe->u0.s.ip_ver) { /* IPV4 */
1577 					pkt->mbuf->m_pkthdr.csum_flags =
1578 					    M_IPV4_CSUM_IN_OK;
1579 				}
1580 			}
1581 			if (cqe->u0.s.l4_cksum_pass) {
1582 				pkt->mbuf->m_pkthdr.csum_flags |=
1583 				    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1584 			}
1585 			m = tail = pkt->mbuf;
1586 		}
1587 		pkt->mbuf = NULL;
1588 		oce_pkt_put(&rq->pkt_free, pkt);
1589 		len -= frag_len;
1590 	}
1591 
1592 	if (m) {
1593 		if (!oce_port_valid(sc, cqe)) {
1594 			 m_freem(m);
1595 			 goto exit;
1596 		}
1597 
1598 #if NVLAN > 0
1599 		/* This determines if vlan tag is valid */
1600 		if (oce_vtp_valid(sc, cqe)) {
1601 			if (sc->sc_fmode & FNM_FLEX10_MODE) {
1602 				/* FLEX10. If QnQ is not set, neglect VLAN */
1603 				if (cqe->u0.s.qnq) {
1604 					m->m_pkthdr.ether_vtag = vtag;
1605 					m->m_flags |= M_VLANTAG;
1606 				}
1607 			} else if (sc->sc_pvid != (vtag & VLAN_VID_MASK))  {
1608 				/*
1609 				 * In UMC mode generally pvid will be striped.
1610 				 * But in some cases we have seen it comes
1611 				 * with pvid. So if pvid == vlan, neglect vlan.
1612 				 */
1613 				m->m_pkthdr.ether_vtag = vtag;
1614 				m->m_flags |= M_VLANTAG;
1615 			}
1616 		}
1617 #endif
1618 
1619 #ifdef OCE_LRO
1620 		/* Try to queue to LRO */
1621 		if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1622 		    cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1623 		    !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1624 
1625 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1626 				rq->lro_pkts_queued ++;
1627 				goto exit;
1628 			}
1629 			/* If LRO posting fails then try to post to STACK */
1630 		}
1631 #endif
1632 
1633 		ml_enqueue(&ml, m);
1634 	}
1635 exit:
1636 	if (ifiq_input(&ifp->if_rcv, &ml))
1637 		if_rxr_livelocked(&rq->rxring);
1638 }
1639 
1640 void
oce_rxeoc(struct oce_rq * rq,struct oce_nic_rx_cqe * cqe)1641 oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1642 {
1643 	struct oce_softc *sc = rq->sc;
1644 	struct oce_pkt *pkt;
1645 	int i, num_frags = cqe->u0.s.num_fragments;
1646 
1647 	if (IS_XE201(sc) && cqe->u0.s.error) {
1648 		/*
1649 		 * Lancer A0 workaround:
1650 		 * num_frags will be 1 more than actual in case of error
1651 		 */
1652 		if (num_frags)
1653 			num_frags--;
1654 	}
1655 	for (i = 0; i < num_frags; i++) {
1656 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1657 			printf("%s: missing descriptor in rxeoc\n",
1658 			    sc->sc_dev.dv_xname);
1659 			return;
1660 		}
1661 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1662 		    BUS_DMASYNC_POSTREAD);
1663 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1664 		if_rxr_put(&rq->rxring, 1);
1665 		m_freem(pkt->mbuf);
1666 		oce_pkt_put(&rq->pkt_free, pkt);
1667 	}
1668 }
1669 
1670 int
oce_vtp_valid(struct oce_softc * sc,struct oce_nic_rx_cqe * cqe)1671 oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1672 {
1673 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1674 
1675 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1676 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1677 		return (cqe_v1->u0.s.vlan_tag_present);
1678 	}
1679 	return (cqe->u0.s.vlan_tag_present);
1680 }
1681 
1682 int
oce_port_valid(struct oce_softc * sc,struct oce_nic_rx_cqe * cqe)1683 oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1684 {
1685 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1686 
1687 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1688 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1689 		if (sc->sc_port != cqe_v1->u0.s.port)
1690 			return (0);
1691 	}
1692 	return (1);
1693 }
1694 
1695 #ifdef OCE_LRO
1696 void
oce_flush_lro(struct oce_rq * rq)1697 oce_flush_lro(struct oce_rq *rq)
1698 {
1699 	struct oce_softc *sc = rq->sc;
1700 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1701 	struct lro_ctrl	*lro = &rq->lro;
1702 	struct lro_entry *queued;
1703 
1704 	if (!IF_LRO_ENABLED(ifp))
1705 		return;
1706 
1707 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1708 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1709 		tcp_lro_flush(lro, queued);
1710 	}
1711 	rq->lro_pkts_queued = 0;
1712 }
1713 
1714 int
oce_init_lro(struct oce_softc * sc)1715 oce_init_lro(struct oce_softc *sc)
1716 {
1717 	struct lro_ctrl *lro = NULL;
1718 	int i = 0, rc = 0;
1719 
1720 	for (i = 0; i < sc->sc_nrq; i++) {
1721 		lro = &sc->sc_rq[i]->lro;
1722 		rc = tcp_lro_init(lro);
1723 		if (rc != 0) {
1724 			printf("%s: LRO init failed\n",
1725 			    sc->sc_dev.dv_xname);
1726 			return rc;
1727 		}
1728 		lro->ifp = &sc->sc_ac.ac_if;
1729 	}
1730 
1731 	return (rc);
1732 }
1733 
1734 void
oce_free_lro(struct oce_softc * sc)1735 oce_free_lro(struct oce_softc *sc)
1736 {
1737 	struct lro_ctrl *lro = NULL;
1738 	int i = 0;
1739 
1740 	for (i = 0; i < sc->sc_nrq; i++) {
1741 		lro = &sc->sc_rq[i]->lro;
1742 		if (lro)
1743 			tcp_lro_free(lro);
1744 	}
1745 }
1746 #endif /* OCE_LRO */
1747 
1748 int
oce_get_buf(struct oce_rq * rq)1749 oce_get_buf(struct oce_rq *rq)
1750 {
1751 	struct oce_softc *sc = rq->sc;
1752 	struct oce_pkt *pkt;
1753 	struct oce_nic_rqe *rqe;
1754 
1755 	if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1756 		return (0);
1757 
1758 	pkt->mbuf = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1759 	if (pkt->mbuf == NULL) {
1760 		oce_pkt_put(&rq->pkt_free, pkt);
1761 		return (0);
1762 	}
1763 
1764 	pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1765 #ifdef __STRICT_ALIGNMENT
1766 	m_adj(pkt->mbuf, ETHER_ALIGN);
1767 #endif
1768 
1769 	if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1770 	    BUS_DMA_NOWAIT)) {
1771 		m_freem(pkt->mbuf);
1772 		pkt->mbuf = NULL;
1773 		oce_pkt_put(&rq->pkt_free, pkt);
1774 		return (0);
1775 	}
1776 
1777 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1778 	    BUS_DMASYNC_PREREAD);
1779 
1780 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1781 	    BUS_DMASYNC_PREWRITE);
1782 
1783 	rqe = oce_ring_get(rq->ring);
1784 	rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1785 	rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1786 
1787 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1788 	    BUS_DMASYNC_POSTWRITE);
1789 
1790 	oce_pkt_put(&rq->pkt_list, pkt);
1791 
1792 	return (1);
1793 }
1794 
1795 int
oce_alloc_rx_bufs(struct oce_rq * rq)1796 oce_alloc_rx_bufs(struct oce_rq *rq)
1797 {
1798 	struct oce_softc *sc = rq->sc;
1799 	int i, nbufs = 0;
1800 	u_int slots;
1801 
1802 	for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1803 		if (oce_get_buf(rq) == 0)
1804 			break;
1805 
1806 		nbufs++;
1807 	}
1808 	if_rxr_put(&rq->rxring, slots);
1809 
1810 	if (!nbufs)
1811 		return (0);
1812 	for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1813 		oce_write_db(sc, PD_RXULP_DB, rq->id |
1814 		    (OCE_MAX_RQ_POSTS << 24));
1815 		nbufs -= OCE_MAX_RQ_POSTS;
1816 	}
1817 	if (nbufs > 0)
1818 		oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1819 	return (1);
1820 }
1821 
1822 void
oce_refill_rx(void * arg)1823 oce_refill_rx(void *arg)
1824 {
1825 	struct oce_softc *sc = arg;
1826 	struct oce_rq *rq;
1827 	int i, s;
1828 
1829 	s = splnet();
1830 	OCE_RQ_FOREACH(sc, rq, i) {
1831 		if (!oce_alloc_rx_bufs(rq))
1832 			timeout_add(&sc->sc_rxrefill, 5);
1833 	}
1834 	splx(s);
1835 }
1836 
1837 /* Handle the Completion Queue for the Mailbox/Async notifications */
1838 void
oce_intr_mq(void * arg)1839 oce_intr_mq(void *arg)
1840 {
1841 	struct oce_mq *mq = (struct oce_mq *)arg;
1842 	struct oce_softc *sc = mq->sc;
1843 	struct oce_cq *cq = mq->cq;
1844 	struct oce_mq_cqe *cqe;
1845 	struct oce_async_cqe_link_state *acqe;
1846 	struct oce_async_event_grp5_pvid_state *gcqe;
1847 	int evtype, optype, ncqe = 0;
1848 
1849 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1850 
1851 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1852 		if (cqe->u0.s.async_event) {
1853 			evtype = cqe->u0.s.event_type;
1854 			optype = cqe->u0.s.async_type;
1855 			if (evtype  == ASYNC_EVENT_CODE_LINK_STATE) {
1856 				/* Link status evt */
1857 				acqe = (struct oce_async_cqe_link_state *)cqe;
1858 				oce_link_event(sc, acqe);
1859 			} else if ((evtype == ASYNC_EVENT_GRP5) &&
1860 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1861 				/* GRP5 PVID */
1862 				gcqe =
1863 				(struct oce_async_event_grp5_pvid_state *)cqe;
1864 				if (gcqe->enabled)
1865 					sc->sc_pvid =
1866 					    gcqe->tag & VLAN_VID_MASK;
1867 				else
1868 					sc->sc_pvid = 0;
1869 			}
1870 		}
1871 		MQ_CQE_INVALIDATE(cqe);
1872 		ncqe++;
1873 	}
1874 
1875 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1876 
1877 	if (ncqe)
1878 		oce_arm_cq(cq, ncqe, FALSE);
1879 }
1880 
1881 void
oce_link_event(struct oce_softc * sc,struct oce_async_cqe_link_state * acqe)1882 oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1883 {
1884 	/* Update Link status */
1885 	sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1886 	    ASYNC_EVENT_LINK_UP);
1887 	/* Update speed */
1888 	sc->sc_link_speed = acqe->u0.s.speed;
1889 	oce_link_status(sc);
1890 }
1891 
1892 int
oce_init_queues(struct oce_softc * sc)1893 oce_init_queues(struct oce_softc *sc)
1894 {
1895 	struct oce_wq *wq;
1896 	struct oce_rq *rq;
1897 	int i;
1898 
1899 	sc->sc_nrq = 1;
1900 	sc->sc_nwq = 1;
1901 
1902 	/* Create network interface on card */
1903 	if (oce_create_iface(sc, sc->sc_macaddr))
1904 		goto error;
1905 
1906 	/* create all of the event queues */
1907 	for (i = 0; i < sc->sc_nintr; i++) {
1908 		sc->sc_eq[i] = oce_create_eq(sc);
1909 		if (!sc->sc_eq[i])
1910 			goto error;
1911 	}
1912 
1913 	/* alloc tx queues */
1914 	OCE_WQ_FOREACH(sc, wq, i) {
1915 		sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1916 		if (!sc->sc_wq[i])
1917 			goto error;
1918 	}
1919 
1920 	/* alloc rx queues */
1921 	OCE_RQ_FOREACH(sc, rq, i) {
1922 		sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1923 		    i > 0 ? sc->sc_rss_enable : 0);
1924 		if (!sc->sc_rq[i])
1925 			goto error;
1926 	}
1927 
1928 	/* alloc mailbox queue */
1929 	sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1930 	if (!sc->sc_mq)
1931 		goto error;
1932 
1933 	return (0);
1934 error:
1935 	oce_release_queues(sc);
1936 	return (1);
1937 }
1938 
1939 void
oce_release_queues(struct oce_softc * sc)1940 oce_release_queues(struct oce_softc *sc)
1941 {
1942 	struct oce_wq *wq;
1943 	struct oce_rq *rq;
1944 	struct oce_eq *eq;
1945 	int i;
1946 
1947 	OCE_RQ_FOREACH(sc, rq, i) {
1948 		if (rq)
1949 			oce_destroy_rq(sc->sc_rq[i]);
1950 	}
1951 
1952 	OCE_WQ_FOREACH(sc, wq, i) {
1953 		if (wq)
1954 			oce_destroy_wq(sc->sc_wq[i]);
1955 	}
1956 
1957 	if (sc->sc_mq)
1958 		oce_destroy_mq(sc->sc_mq);
1959 
1960 	OCE_EQ_FOREACH(sc, eq, i) {
1961 		if (eq)
1962 			oce_destroy_eq(sc->sc_eq[i]);
1963 	}
1964 }
1965 
1966 /**
1967  * @brief 		Function to create a WQ for NIC Tx
1968  * @param sc 		software handle to the device
1969  * @returns		the pointer to the WQ created or NULL on failure
1970  */
1971 struct oce_wq *
oce_create_wq(struct oce_softc * sc,struct oce_eq * eq)1972 oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1973 {
1974 	struct oce_wq *wq;
1975 	struct oce_cq *cq;
1976 	struct oce_pkt *pkt;
1977 	int i;
1978 
1979 	if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1980 		return (NULL);
1981 
1982 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
1983 	if (!wq)
1984 		return (NULL);
1985 
1986 	wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
1987 	if (!wq->ring) {
1988 		free(wq, M_DEVBUF, 0);
1989 		return (NULL);
1990 	}
1991 
1992 	cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
1993 	    1, 0, 3);
1994 	if (!cq) {
1995 		oce_destroy_ring(sc, wq->ring);
1996 		free(wq, M_DEVBUF, 0);
1997 		return (NULL);
1998 	}
1999 
2000 	wq->id = -1;
2001 	wq->sc = sc;
2002 
2003 	wq->cq = cq;
2004 	wq->nitems = sc->sc_tx_ring_size;
2005 
2006 	SIMPLEQ_INIT(&wq->pkt_free);
2007 	SIMPLEQ_INIT(&wq->pkt_list);
2008 
2009 	for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2010 		pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2011 		    PAGE_SIZE);
2012 		if (pkt == NULL) {
2013 			oce_destroy_wq(wq);
2014 			return (NULL);
2015 		}
2016 		oce_pkt_put(&wq->pkt_free, pkt);
2017 	}
2018 
2019 	if (oce_new_wq(sc, wq)) {
2020 		oce_destroy_wq(wq);
2021 		return (NULL);
2022 	}
2023 
2024 	eq->cq[eq->cq_valid] = cq;
2025 	eq->cq_valid++;
2026 	cq->cb_arg = wq;
2027 	cq->cq_intr = oce_intr_wq;
2028 
2029 	return (wq);
2030 }
2031 
2032 void
oce_drain_wq(struct oce_wq * wq)2033 oce_drain_wq(struct oce_wq *wq)
2034 {
2035 	struct oce_cq *cq = wq->cq;
2036 	struct oce_nic_tx_cqe *cqe;
2037 	int ncqe = 0;
2038 
2039 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2040 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2041 		WQ_CQE_INVALIDATE(cqe);
2042 		ncqe++;
2043 	}
2044 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2045 	oce_arm_cq(cq, ncqe, FALSE);
2046 }
2047 
2048 void
oce_destroy_wq(struct oce_wq * wq)2049 oce_destroy_wq(struct oce_wq *wq)
2050 {
2051 	struct mbx_delete_nic_wq cmd;
2052 	struct oce_softc *sc = wq->sc;
2053 	struct oce_pkt *pkt;
2054 
2055 	if (wq->id >= 0) {
2056 		memset(&cmd, 0, sizeof(cmd));
2057 		cmd.params.req.wq_id = htole16(wq->id);
2058 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2059 		    &cmd, sizeof(cmd));
2060 	}
2061 	if (wq->cq != NULL)
2062 		oce_destroy_cq(wq->cq);
2063 	if (wq->ring != NULL)
2064 		oce_destroy_ring(sc, wq->ring);
2065 	while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2066 		oce_pkt_free(sc, pkt);
2067 	free(wq, M_DEVBUF, 0);
2068 }
2069 
2070 /**
2071  * @brief 		function to allocate receive queue resources
2072  * @param sc		software handle to the device
2073  * @param eq		pointer to associated event queue
2074  * @param rss		is-rss-queue flag
2075  * @returns		the pointer to the RQ created or NULL on failure
2076  */
2077 struct oce_rq *
oce_create_rq(struct oce_softc * sc,struct oce_eq * eq,int rss)2078 oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2079 {
2080 	struct oce_rq *rq;
2081 	struct oce_cq *cq;
2082 	struct oce_pkt *pkt;
2083 	int i;
2084 
2085 	/* Hardware doesn't support any other value */
2086 	if (sc->sc_rx_ring_size != 1024)
2087 		return (NULL);
2088 
2089 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2090 	if (!rq)
2091 		return (NULL);
2092 
2093 	rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2094 	    sizeof(struct oce_nic_rqe), 2);
2095 	if (!rq->ring) {
2096 		free(rq, M_DEVBUF, 0);
2097 		return (NULL);
2098 	}
2099 
2100 	cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2101 	    1, 0, 3);
2102 	if (!cq) {
2103 		oce_destroy_ring(sc, rq->ring);
2104 		free(rq, M_DEVBUF, 0);
2105 		return (NULL);
2106 	}
2107 
2108 	rq->id = -1;
2109 	rq->sc = sc;
2110 
2111 	rq->nitems = sc->sc_rx_ring_size;
2112 	rq->fragsize = OCE_RX_BUF_SIZE;
2113 	rq->rss = rss;
2114 
2115 	SIMPLEQ_INIT(&rq->pkt_free);
2116 	SIMPLEQ_INIT(&rq->pkt_list);
2117 
2118 	for (i = 0; i < sc->sc_rx_ring_size; i++) {
2119 		pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2120 		if (pkt == NULL) {
2121 			oce_destroy_rq(rq);
2122 			return (NULL);
2123 		}
2124 		oce_pkt_put(&rq->pkt_free, pkt);
2125 	}
2126 
2127 	rq->cq = cq;
2128 	eq->cq[eq->cq_valid] = cq;
2129 	eq->cq_valid++;
2130 	cq->cb_arg = rq;
2131 	cq->cq_intr = oce_intr_rq;
2132 
2133 	/* RX queue is created in oce_init */
2134 
2135 	return (rq);
2136 }
2137 
2138 void
oce_drain_rq(struct oce_rq * rq)2139 oce_drain_rq(struct oce_rq *rq)
2140 {
2141 	struct oce_nic_rx_cqe *cqe;
2142 	struct oce_cq *cq = rq->cq;
2143 	int ncqe = 0;
2144 
2145 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2146 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2147 		RQ_CQE_INVALIDATE(cqe);
2148 		ncqe++;
2149 	}
2150 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2151 	oce_arm_cq(cq, ncqe, FALSE);
2152 }
2153 
2154 void
oce_destroy_rq(struct oce_rq * rq)2155 oce_destroy_rq(struct oce_rq *rq)
2156 {
2157 	struct mbx_delete_nic_rq cmd;
2158 	struct oce_softc *sc = rq->sc;
2159 	struct oce_pkt *pkt;
2160 
2161 	if (rq->id >= 0) {
2162 		memset(&cmd, 0, sizeof(cmd));
2163 		cmd.params.req.rq_id = htole16(rq->id);
2164 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2165 		    &cmd, sizeof(cmd));
2166 	}
2167 	if (rq->cq != NULL)
2168 		oce_destroy_cq(rq->cq);
2169 	if (rq->ring != NULL)
2170 		oce_destroy_ring(sc, rq->ring);
2171 	while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2172 		oce_pkt_free(sc, pkt);
2173 	free(rq, M_DEVBUF, 0);
2174 }
2175 
2176 struct oce_eq *
oce_create_eq(struct oce_softc * sc)2177 oce_create_eq(struct oce_softc *sc)
2178 {
2179 	struct oce_eq *eq;
2180 
2181 	/* allocate an eq */
2182 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2183 	if (eq == NULL)
2184 		return (NULL);
2185 
2186 	eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2187 	if (!eq->ring) {
2188 		free(eq, M_DEVBUF, 0);
2189 		return (NULL);
2190 	}
2191 
2192 	eq->id = -1;
2193 	eq->sc = sc;
2194 	eq->nitems = EQ_LEN_1024;	/* length of event queue */
2195 	eq->isize = EQE_SIZE_4; 	/* size of a queue item */
2196 	eq->delay = OCE_DEFAULT_EQD;	/* event queue delay */
2197 
2198 	if (oce_new_eq(sc, eq)) {
2199 		oce_destroy_ring(sc, eq->ring);
2200 		free(eq, M_DEVBUF, 0);
2201 		return (NULL);
2202 	}
2203 
2204 	return (eq);
2205 }
2206 
2207 /**
2208  * @brief		Function to arm an EQ so that it can generate events
2209  * @param eq		pointer to event queue structure
2210  * @param neqe		number of EQEs to arm
2211  * @param rearm		rearm bit enable/disable
2212  * @param clearint	bit to clear the interrupt condition because of which
2213  *			EQEs are generated
2214  */
2215 static inline void
oce_arm_eq(struct oce_eq * eq,int neqe,int rearm,int clearint)2216 oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2217 {
2218 	oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2219 	    (clearint << 9) | (neqe << 16) | (rearm << 29));
2220 }
2221 
2222 void
oce_drain_eq(struct oce_eq * eq)2223 oce_drain_eq(struct oce_eq *eq)
2224 {
2225 	struct oce_eqe *eqe;
2226 	int neqe = 0;
2227 
2228 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2229 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2230 		eqe->evnt = 0;
2231 		neqe++;
2232 	}
2233 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2234 	oce_arm_eq(eq, neqe, FALSE, TRUE);
2235 }
2236 
2237 void
oce_destroy_eq(struct oce_eq * eq)2238 oce_destroy_eq(struct oce_eq *eq)
2239 {
2240 	struct mbx_destroy_common_eq cmd;
2241 	struct oce_softc *sc = eq->sc;
2242 
2243 	if (eq->id >= 0) {
2244 		memset(&cmd, 0, sizeof(cmd));
2245 		cmd.params.req.id = htole16(eq->id);
2246 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2247 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2248 	}
2249 	if (eq->ring != NULL)
2250 		oce_destroy_ring(sc, eq->ring);
2251 	free(eq, M_DEVBUF, 0);
2252 }
2253 
2254 struct oce_mq *
oce_create_mq(struct oce_softc * sc,struct oce_eq * eq)2255 oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2256 {
2257 	struct oce_mq *mq = NULL;
2258 	struct oce_cq *cq;
2259 
2260 	/* allocate the mq */
2261 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2262 	if (!mq)
2263 		return (NULL);
2264 
2265 	mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2266 	if (!mq->ring) {
2267 		free(mq, M_DEVBUF, 0);
2268 		return (NULL);
2269 	}
2270 
2271 	cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2272 	    1, 0, 0);
2273 	if (!cq) {
2274 		oce_destroy_ring(sc, mq->ring);
2275 		free(mq, M_DEVBUF, 0);
2276 		return (NULL);
2277 	}
2278 
2279 	mq->id = -1;
2280 	mq->sc = sc;
2281 	mq->cq = cq;
2282 
2283 	mq->nitems = 128;
2284 
2285 	if (oce_new_mq(sc, mq)) {
2286 		oce_destroy_cq(mq->cq);
2287 		oce_destroy_ring(sc, mq->ring);
2288 		free(mq, M_DEVBUF, 0);
2289 		return (NULL);
2290 	}
2291 
2292 	eq->cq[eq->cq_valid] = cq;
2293 	eq->cq_valid++;
2294 	mq->cq->eq = eq;
2295 	mq->cq->cb_arg = mq;
2296 	mq->cq->cq_intr = oce_intr_mq;
2297 
2298 	return (mq);
2299 }
2300 
2301 void
oce_drain_mq(struct oce_mq * mq)2302 oce_drain_mq(struct oce_mq *mq)
2303 {
2304 	struct oce_cq *cq = mq->cq;
2305 	struct oce_mq_cqe *cqe;
2306 	int ncqe = 0;
2307 
2308 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2309 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2310 		MQ_CQE_INVALIDATE(cqe);
2311 		ncqe++;
2312 	}
2313 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2314 	oce_arm_cq(cq, ncqe, FALSE);
2315 }
2316 
2317 void
oce_destroy_mq(struct oce_mq * mq)2318 oce_destroy_mq(struct oce_mq *mq)
2319 {
2320 	struct mbx_destroy_common_mq cmd;
2321 	struct oce_softc *sc = mq->sc;
2322 
2323 	if (mq->id >= 0) {
2324 		memset(&cmd, 0, sizeof(cmd));
2325 		cmd.params.req.id = htole16(mq->id);
2326 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2327 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2328 	}
2329 	if (mq->ring != NULL)
2330 		oce_destroy_ring(sc, mq->ring);
2331 	if (mq->cq != NULL)
2332 		oce_destroy_cq(mq->cq);
2333 	free(mq, M_DEVBUF, 0);
2334 }
2335 
2336 /**
2337  * @brief		Function to create a completion queue
2338  * @param sc		software handle to the device
2339  * @param eq		optional eq to be associated with to the cq
2340  * @param nitems	length of completion queue
2341  * @param isize		size of completion queue items
2342  * @param eventable	event table
2343  * @param nodelay	no delay flag
2344  * @param ncoalesce	no coalescence flag
2345  * @returns 		pointer to the cq created, NULL on failure
2346  */
2347 struct oce_cq *
oce_create_cq(struct oce_softc * sc,struct oce_eq * eq,int nitems,int isize,int eventable,int nodelay,int ncoalesce)2348 oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2349     int eventable, int nodelay, int ncoalesce)
2350 {
2351 	struct oce_cq *cq = NULL;
2352 
2353 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2354 	if (!cq)
2355 		return (NULL);
2356 
2357 	cq->ring = oce_create_ring(sc, nitems, isize, 4);
2358 	if (!cq->ring) {
2359 		free(cq, M_DEVBUF, 0);
2360 		return (NULL);
2361 	}
2362 
2363 	cq->sc = sc;
2364 	cq->eq = eq;
2365 	cq->nitems = nitems;
2366 	cq->nodelay = nodelay;
2367 	cq->ncoalesce = ncoalesce;
2368 	cq->eventable = eventable;
2369 
2370 	if (oce_new_cq(sc, cq)) {
2371 		oce_destroy_ring(sc, cq->ring);
2372 		free(cq, M_DEVBUF, 0);
2373 		return (NULL);
2374 	}
2375 
2376 	sc->sc_cq[sc->sc_ncq++] = cq;
2377 
2378 	return (cq);
2379 }
2380 
2381 void
oce_destroy_cq(struct oce_cq * cq)2382 oce_destroy_cq(struct oce_cq *cq)
2383 {
2384 	struct mbx_destroy_common_cq cmd;
2385 	struct oce_softc *sc = cq->sc;
2386 
2387 	if (cq->id >= 0) {
2388 		memset(&cmd, 0, sizeof(cmd));
2389 		cmd.params.req.id = htole16(cq->id);
2390 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2391 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2392 	}
2393 	if (cq->ring != NULL)
2394 		oce_destroy_ring(sc, cq->ring);
2395 	free(cq, M_DEVBUF, 0);
2396 }
2397 
2398 /**
2399  * @brief		Function to arm a CQ with CQEs
2400  * @param cq		pointer to the completion queue structure
2401  * @param ncqe		number of CQEs to arm
2402  * @param rearm		rearm bit enable/disable
2403  */
2404 static inline void
oce_arm_cq(struct oce_cq * cq,int ncqe,int rearm)2405 oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2406 {
2407 	oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2408 }
2409 
2410 void
oce_free_posted_rxbuf(struct oce_rq * rq)2411 oce_free_posted_rxbuf(struct oce_rq *rq)
2412 {
2413 	struct oce_softc *sc = rq->sc;
2414 	struct oce_pkt *pkt;
2415 
2416 	while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2417 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2418 		    BUS_DMASYNC_POSTREAD);
2419 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2420 		if (pkt->mbuf != NULL) {
2421 			m_freem(pkt->mbuf);
2422 			pkt->mbuf = NULL;
2423 		}
2424 		oce_pkt_put(&rq->pkt_free, pkt);
2425 		if_rxr_put(&rq->rxring, 1);
2426 	}
2427 }
2428 
2429 int
oce_dma_alloc(struct oce_softc * sc,bus_size_t size,struct oce_dma_mem * dma)2430 oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2431 {
2432 	int rc;
2433 
2434 	memset(dma, 0, sizeof(struct oce_dma_mem));
2435 
2436 	dma->tag = sc->sc_dmat;
2437 	rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2438 	    &dma->map);
2439 	if (rc != 0) {
2440 		printf("%s: failed to allocate DMA handle",
2441 		    sc->sc_dev.dv_xname);
2442 		goto fail_0;
2443 	}
2444 
2445 	rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2446 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2447 	if (rc != 0) {
2448 		printf("%s: failed to allocate DMA memory",
2449 		    sc->sc_dev.dv_xname);
2450 		goto fail_1;
2451 	}
2452 
2453 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2454 	    &dma->vaddr, BUS_DMA_NOWAIT);
2455 	if (rc != 0) {
2456 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2457 		goto fail_2;
2458 	}
2459 
2460 	rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2461 	    BUS_DMA_NOWAIT);
2462 	if (rc != 0) {
2463 		printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2464 		goto fail_3;
2465 	}
2466 
2467 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2468 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2469 
2470 	dma->paddr = dma->map->dm_segs[0].ds_addr;
2471 	dma->size = size;
2472 
2473 	return (0);
2474 
2475 fail_3:
2476 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2477 fail_2:
2478 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2479 fail_1:
2480 	bus_dmamap_destroy(dma->tag, dma->map);
2481 fail_0:
2482 	return (rc);
2483 }
2484 
2485 void
oce_dma_free(struct oce_softc * sc,struct oce_dma_mem * dma)2486 oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2487 {
2488 	if (dma->tag == NULL)
2489 		return;
2490 
2491 	if (dma->map != NULL) {
2492 		oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2493 		bus_dmamap_unload(dma->tag, dma->map);
2494 
2495 		if (dma->vaddr != 0) {
2496 			bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2497 			dma->vaddr = 0;
2498 		}
2499 
2500 		bus_dmamap_destroy(dma->tag, dma->map);
2501 		dma->map = NULL;
2502 		dma->tag = NULL;
2503 	}
2504 }
2505 
2506 struct oce_ring *
oce_create_ring(struct oce_softc * sc,int nitems,int isize,int maxsegs)2507 oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2508 {
2509 	struct oce_dma_mem *dma;
2510 	struct oce_ring *ring;
2511 	bus_size_t size = nitems * isize;
2512 	int rc;
2513 
2514 	if (size > maxsegs * PAGE_SIZE)
2515 		return (NULL);
2516 
2517 	ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2518 	if (ring == NULL)
2519 		return (NULL);
2520 
2521 	ring->isize = isize;
2522 	ring->nitems = nitems;
2523 
2524 	dma = &ring->dma;
2525 	dma->tag = sc->sc_dmat;
2526 	rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2527 	    BUS_DMA_NOWAIT, &dma->map);
2528 	if (rc != 0) {
2529 		printf("%s: failed to allocate DMA handle",
2530 		    sc->sc_dev.dv_xname);
2531 		goto fail_0;
2532 	}
2533 
2534 	rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2535 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2536 	if (rc != 0) {
2537 		printf("%s: failed to allocate DMA memory",
2538 		    sc->sc_dev.dv_xname);
2539 		goto fail_1;
2540 	}
2541 
2542 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2543 	    &dma->vaddr, BUS_DMA_NOWAIT);
2544 	if (rc != 0) {
2545 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2546 		goto fail_2;
2547 	}
2548 
2549 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2550 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2551 
2552 	dma->paddr = 0;
2553 	dma->size = size;
2554 
2555 	return (ring);
2556 
2557 fail_2:
2558 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2559 fail_1:
2560 	bus_dmamap_destroy(dma->tag, dma->map);
2561 fail_0:
2562 	free(ring, M_DEVBUF, 0);
2563 	return (NULL);
2564 }
2565 
2566 void
oce_destroy_ring(struct oce_softc * sc,struct oce_ring * ring)2567 oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2568 {
2569 	oce_dma_free(sc, &ring->dma);
2570 	free(ring, M_DEVBUF, 0);
2571 }
2572 
2573 int
oce_load_ring(struct oce_softc * sc,struct oce_ring * ring,struct oce_pa * pa,int maxsegs)2574 oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2575     struct oce_pa *pa, int maxsegs)
2576 {
2577 	struct oce_dma_mem *dma = &ring->dma;
2578 	int i;
2579 
2580 	if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2581 	    ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2582 		printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2583 		return (0);
2584 	}
2585 
2586 	if (dma->map->dm_nsegs > maxsegs) {
2587 		printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2588 		return (0);
2589 	}
2590 
2591 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2592 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2593 
2594 	for (i = 0; i < dma->map->dm_nsegs; i++)
2595 		pa[i].addr = dma->map->dm_segs[i].ds_addr;
2596 
2597 	return (dma->map->dm_nsegs);
2598 }
2599 
2600 static inline void *
oce_ring_get(struct oce_ring * ring)2601 oce_ring_get(struct oce_ring *ring)
2602 {
2603 	int index = ring->index;
2604 
2605 	if (++ring->index == ring->nitems)
2606 		ring->index = 0;
2607 	return ((void *)(ring->dma.vaddr + index * ring->isize));
2608 }
2609 
2610 static inline void *
oce_ring_first(struct oce_ring * ring)2611 oce_ring_first(struct oce_ring *ring)
2612 {
2613 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2614 }
2615 
2616 static inline void *
oce_ring_next(struct oce_ring * ring)2617 oce_ring_next(struct oce_ring *ring)
2618 {
2619 	if (++ring->index == ring->nitems)
2620 		ring->index = 0;
2621 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2622 }
2623 
2624 struct oce_pkt *
oce_pkt_alloc(struct oce_softc * sc,size_t size,int nsegs,int maxsegsz)2625 oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2626 {
2627 	struct oce_pkt *pkt;
2628 
2629 	if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2630 		return (NULL);
2631 
2632 	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2633 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2634 		pool_put(oce_pkt_pool, pkt);
2635 		return (NULL);
2636 	}
2637 
2638 	return (pkt);
2639 }
2640 
2641 void
oce_pkt_free(struct oce_softc * sc,struct oce_pkt * pkt)2642 oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2643 {
2644 	if (pkt->map) {
2645 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2646 		bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2647 	}
2648 	pool_put(oce_pkt_pool, pkt);
2649 }
2650 
2651 static inline struct oce_pkt *
oce_pkt_get(struct oce_pkt_list * lst)2652 oce_pkt_get(struct oce_pkt_list *lst)
2653 {
2654 	struct oce_pkt *pkt;
2655 
2656 	pkt = SIMPLEQ_FIRST(lst);
2657 	if (pkt == NULL)
2658 		return (NULL);
2659 
2660 	SIMPLEQ_REMOVE_HEAD(lst, entry);
2661 
2662 	return (pkt);
2663 }
2664 
2665 static inline void
oce_pkt_put(struct oce_pkt_list * lst,struct oce_pkt * pkt)2666 oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2667 {
2668 	SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2669 }
2670 
2671 /**
2672  * @brief Wait for FW to become ready and reset it
2673  * @param sc		software handle to the device
2674  */
2675 int
oce_init_fw(struct oce_softc * sc)2676 oce_init_fw(struct oce_softc *sc)
2677 {
2678 	struct ioctl_common_function_reset cmd;
2679 	uint32_t reg;
2680 	int err = 0, tmo = 60000;
2681 
2682 	/* read semaphore CSR */
2683 	reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2684 
2685 	/* if host is ready then wait for fw ready else send POST */
2686 	if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2687 		reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2688 		oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2689 	}
2690 
2691 	/* wait for FW to become ready */
2692 	for (;;) {
2693 		if (--tmo == 0)
2694 			break;
2695 
2696 		DELAY(1000);
2697 
2698 		reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2699 		if (reg & MPU_EP_SEM_ERROR) {
2700 			printf(": POST failed: %#x\n", reg);
2701 			return (ENXIO);
2702 		}
2703 		if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2704 			/* reset FW */
2705 			if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2706 				memset(&cmd, 0, sizeof(cmd));
2707 				err = oce_cmd(sc, SUBSYS_COMMON,
2708 				    OPCODE_COMMON_FUNCTION_RESET,
2709 				    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2710 			}
2711 			return (err);
2712 		}
2713 	}
2714 
2715 	printf(": POST timed out: %#x\n", reg);
2716 
2717 	return (ENXIO);
2718 }
2719 
2720 static inline int
oce_mbox_wait(struct oce_softc * sc)2721 oce_mbox_wait(struct oce_softc *sc)
2722 {
2723 	int i;
2724 
2725 	for (i = 0; i < 20000; i++) {
2726 		if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2727 			return (0);
2728 		DELAY(100);
2729 	}
2730 	return (ETIMEDOUT);
2731 }
2732 
2733 /**
2734  * @brief Mailbox dispatch
2735  * @param sc		software handle to the device
2736  */
2737 int
oce_mbox_dispatch(struct oce_softc * sc)2738 oce_mbox_dispatch(struct oce_softc *sc)
2739 {
2740 	uint32_t pa, reg;
2741 	int err;
2742 
2743 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2744 	reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2745 
2746 	if ((err = oce_mbox_wait(sc)) != 0)
2747 		goto out;
2748 
2749 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2750 
2751 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2752 	reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2753 
2754 	if ((err = oce_mbox_wait(sc)) != 0)
2755 		goto out;
2756 
2757 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2758 
2759 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2760 
2761 	if ((err = oce_mbox_wait(sc)) != 0)
2762 		goto out;
2763 
2764 out:
2765 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2766 	return (err);
2767 }
2768 
2769 /**
2770  * @brief Function to initialize the hw with host endian information
2771  * @param sc		software handle to the device
2772  * @returns		0 on success, ETIMEDOUT on failure
2773  */
2774 int
oce_mbox_init(struct oce_softc * sc)2775 oce_mbox_init(struct oce_softc *sc)
2776 {
2777 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2778 	uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2779 
2780 	if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2781 		return (0);
2782 
2783 	/* Endian Signature */
2784 	*ptr++ = 0xff;
2785 	*ptr++ = 0x12;
2786 	*ptr++ = 0x34;
2787 	*ptr++ = 0xff;
2788 	*ptr++ = 0xff;
2789 	*ptr++ = 0x56;
2790 	*ptr++ = 0x78;
2791 	*ptr = 0xff;
2792 
2793 	return (oce_mbox_dispatch(sc));
2794 }
2795 
2796 int
oce_cmd(struct oce_softc * sc,int subsys,int opcode,int version,void * payload,int length)2797 oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2798     void *payload, int length)
2799 {
2800 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2801 	struct oce_mbx *mbx = &bmbx->mbx;
2802 	struct mbx_hdr *hdr;
2803 	caddr_t epayload = NULL;
2804 	int err;
2805 
2806 	if (length > OCE_MBX_PAYLOAD)
2807 		epayload = OCE_MEM_KVA(&sc->sc_pld);
2808 	if (length > OCE_MAX_PAYLOAD)
2809 		return (EINVAL);
2810 
2811 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2812 
2813 	memset(mbx, 0, sizeof(struct oce_mbx));
2814 
2815 	mbx->payload_length = length;
2816 
2817 	if (epayload) {
2818 		mbx->flags = OCE_MBX_F_SGE;
2819 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2820 		memcpy(epayload, payload, length);
2821 		mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2822 		mbx->pld.sgl[0].length = length;
2823 		hdr = (struct mbx_hdr *)epayload;
2824 	} else {
2825 		mbx->flags = OCE_MBX_F_EMBED;
2826 		memcpy(mbx->pld.data, payload, length);
2827 		hdr = (struct mbx_hdr *)&mbx->pld.data;
2828 	}
2829 
2830 	hdr->subsys = subsys;
2831 	hdr->opcode = opcode;
2832 	hdr->version = version;
2833 	hdr->length = length - sizeof(*hdr);
2834 	if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2835 		hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2836 	else
2837 		hdr->timeout = OCE_MBX_TIMEOUT;
2838 
2839 	if (epayload)
2840 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2841 
2842 	err = oce_mbox_dispatch(sc);
2843 	if (err == 0) {
2844 		if (epayload) {
2845 			oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2846 			memcpy(payload, epayload, length);
2847 		} else
2848 			memcpy(payload, &mbx->pld.data, length);
2849 	} else
2850 		printf("%s: mailbox timeout, subsys %d op %d ver %d "
2851 		    "%spayload length %d\n", sc->sc_dev.dv_xname, subsys,
2852 		    opcode, version, epayload ? "ext " : "",
2853 		    length);
2854 	return (err);
2855 }
2856 
2857 /**
2858  * @brief	Firmware will send gracious notifications during
2859  *		attach only after sending first mcc command. We
2860  *		use MCC queue only for getting async and mailbox
2861  *		for sending cmds. So to get gracious notifications
2862  *		at least send one dummy command on mcc.
2863  */
2864 void
oce_first_mcc(struct oce_softc * sc)2865 oce_first_mcc(struct oce_softc *sc)
2866 {
2867 	struct oce_mbx *mbx;
2868 	struct oce_mq *mq = sc->sc_mq;
2869 	struct mbx_hdr *hdr;
2870 	struct mbx_get_common_fw_version *cmd;
2871 
2872 	mbx = oce_ring_get(mq->ring);
2873 	memset(mbx, 0, sizeof(struct oce_mbx));
2874 
2875 	cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2876 
2877 	hdr = &cmd->hdr;
2878 	hdr->subsys = SUBSYS_COMMON;
2879 	hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2880 	hdr->version = OCE_MBX_VER_V0;
2881 	hdr->timeout = OCE_MBX_TIMEOUT;
2882 	hdr->length = sizeof(*cmd) - sizeof(*hdr);
2883 
2884 	mbx->flags = OCE_MBX_F_EMBED;
2885 	mbx->payload_length = sizeof(*cmd);
2886 	oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2887 	    BUS_DMASYNC_PREWRITE);
2888 	oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2889 }
2890 
2891 int
oce_get_fw_config(struct oce_softc * sc)2892 oce_get_fw_config(struct oce_softc *sc)
2893 {
2894 	struct mbx_common_query_fw_config cmd;
2895 	int err;
2896 
2897 	memset(&cmd, 0, sizeof(cmd));
2898 
2899 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2900 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2901 	if (err)
2902 		return (err);
2903 
2904 	sc->sc_port = cmd.params.rsp.port_id;
2905 	sc->sc_fmode = cmd.params.rsp.function_mode;
2906 
2907 	return (0);
2908 }
2909 
2910 int
oce_check_native_mode(struct oce_softc * sc)2911 oce_check_native_mode(struct oce_softc *sc)
2912 {
2913 	struct mbx_common_set_function_cap cmd;
2914 	int err;
2915 
2916 	memset(&cmd, 0, sizeof(cmd));
2917 
2918 	cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2919 	    CAP_BE3_NATIVE_ERX_API;
2920 	cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2921 
2922 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2923 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2924 	if (err)
2925 		return (err);
2926 
2927 	if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2928 		SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2929 
2930 	return (0);
2931 }
2932 
2933 /**
2934  * @brief Function for creating a network interface.
2935  * @param sc		software handle to the device
2936  * @returns		0 on success, error otherwise
2937  */
2938 int
oce_create_iface(struct oce_softc * sc,uint8_t * macaddr)2939 oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2940 {
2941 	struct mbx_create_common_iface cmd;
2942 	uint32_t caps, caps_en;
2943 	int err = 0;
2944 
2945 	/* interface capabilities to give device when creating interface */
2946 	caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2947 	    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2948 	    MBX_RX_IFACE_RSS;
2949 
2950 	/* capabilities to enable by default (others set dynamically) */
2951 	caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2952 
2953 	if (!IS_XE201(sc)) {
2954 		/* LANCER A0 workaround */
2955 		caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2956 		caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2957 	}
2958 
2959 	/* enable capabilities controlled via driver startup parameters */
2960 	if (sc->sc_rss_enable)
2961 		caps_en |= MBX_RX_IFACE_RSS;
2962 
2963 	memset(&cmd, 0, sizeof(cmd));
2964 
2965 	cmd.params.req.version = 0;
2966 	cmd.params.req.cap_flags = htole32(caps);
2967 	cmd.params.req.enable_flags = htole32(caps_en);
2968 	if (macaddr != NULL) {
2969 		memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2970 		cmd.params.req.mac_invalid = 0;
2971 	} else
2972 		cmd.params.req.mac_invalid = 1;
2973 
2974 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2975 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2976 	if (err)
2977 		return (err);
2978 
2979 	sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
2980 
2981 	if (macaddr != NULL)
2982 		sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
2983 
2984 	return (0);
2985 }
2986 
2987 /**
2988  * @brief Function to send the mbx command to configure vlan
2989  * @param sc 		software handle to the device
2990  * @param vtags		array of vlan tags
2991  * @param nvtags	number of elements in array
2992  * @param untagged	boolean TRUE/FLASE
2993  * @param promisc	flag to enable/disable VLAN promiscuous mode
2994  * @returns		0 on success, EIO on failure
2995  */
2996 int
oce_config_vlan(struct oce_softc * sc,struct normal_vlan * vtags,int nvtags,int untagged,int promisc)2997 oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
2998     int untagged, int promisc)
2999 {
3000 	struct mbx_common_config_vlan cmd;
3001 
3002 	memset(&cmd, 0, sizeof(cmd));
3003 
3004 	cmd.params.req.if_id = sc->sc_if_id;
3005 	cmd.params.req.promisc = promisc;
3006 	cmd.params.req.untagged = untagged;
3007 	cmd.params.req.num_vlans = nvtags;
3008 
3009 	if (!promisc)
3010 		memcpy(cmd.params.req.tags.normal_vlans, vtags,
3011 			nvtags * sizeof(struct normal_vlan));
3012 
3013 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3014 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3015 }
3016 
3017 /**
3018  * @brief Function to set flow control capability in the hardware
3019  * @param sc 		software handle to the device
3020  * @param flags		flow control flags to set
3021  * @returns		0 on success, EIO on failure
3022  */
3023 int
oce_set_flow_control(struct oce_softc * sc,uint64_t flags)3024 oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3025 {
3026 	struct mbx_common_get_set_flow_control cmd;
3027 	int err;
3028 
3029 	memset(&cmd, 0, sizeof(cmd));
3030 
3031 	cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3032 	cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3033 
3034 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3035 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3036 	if (err)
3037 		return (err);
3038 
3039 	memset(&cmd, 0, sizeof(cmd));
3040 
3041 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3042 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3043 	if (err)
3044 		return (err);
3045 
3046 	sc->sc_fc  = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3047 	sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3048 
3049 	return (0);
3050 }
3051 
3052 #ifdef OCE_RSS
3053 /**
3054  * @brief Function to set flow control capability in the hardware
3055  * @param sc 		software handle to the device
3056  * @param enable	0=disable, OCE_RSS_xxx flags otherwise
3057  * @returns		0 on success, EIO on failure
3058  */
3059 int
oce_config_rss(struct oce_softc * sc,int enable)3060 oce_config_rss(struct oce_softc *sc, int enable)
3061 {
3062 	struct mbx_config_nic_rss cmd;
3063 	uint8_t *tbl = &cmd.params.req.cputable;
3064 	int i, j;
3065 
3066 	memset(&cmd, 0, sizeof(cmd));
3067 
3068 	if (enable)
3069 		cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3070 		    RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3071 	cmd.params.req.flush = OCE_FLUSH;
3072 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3073 
3074 	arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3075 
3076 	/*
3077 	 * Initialize the RSS CPU indirection table.
3078 	 *
3079 	 * The table is used to choose the queue to place incoming packets.
3080 	 * Incoming packets are hashed.  The lowest bits in the hash result
3081 	 * are used as the index into the CPU indirection table.
3082 	 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3083 	 * create.  Based on the CPU ID, the receive completion is routed to
3084 	 * the corresponding RSS CQs.  (Non-RSS packets are always completed
3085 	 * on the default (0) CQ).
3086 	 */
3087 	for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3088 		if (sc->sc_rq[j]->cfg.is_rss_queue)
3089 			tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3090 	}
3091 	if (i > 0)
3092 		cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3093 	else
3094 		return (ENXIO);
3095 
3096 	return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3097 	    &cmd, sizeof(cmd)));
3098 }
3099 #endif	/* OCE_RSS */
3100 
3101 /**
3102  * @brief Function for hardware update multicast filter
3103  * @param sc		software handle to the device
3104  * @param multi		table of multicast addresses
3105  * @param naddr		number of multicast addresses in the table
3106  */
3107 int
oce_update_mcast(struct oce_softc * sc,uint8_t multi[][ETHER_ADDR_LEN],int naddr)3108 oce_update_mcast(struct oce_softc *sc,
3109     uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3110 {
3111 	struct mbx_set_common_iface_multicast cmd;
3112 
3113 	memset(&cmd, 0, sizeof(cmd));
3114 
3115 	memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3116 	cmd.params.req.num_mac = htole16(naddr);
3117 	cmd.params.req.if_id = sc->sc_if_id;
3118 
3119 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3120 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3121 }
3122 
3123 /**
3124  * @brief RXF function to enable/disable device promiscuous mode
3125  * @param sc		software handle to the device
3126  * @param enable	enable/disable flag
3127  * @returns		0 on success, EIO on failure
3128  * @note
3129  *	The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3130  *	This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3131  */
3132 int
oce_set_promisc(struct oce_softc * sc,int enable)3133 oce_set_promisc(struct oce_softc *sc, int enable)
3134 {
3135 	struct mbx_set_common_iface_rx_filter cmd;
3136 	struct iface_rx_filter_ctx *req;
3137 
3138 	memset(&cmd, 0, sizeof(cmd));
3139 
3140 	req = &cmd.params.req;
3141 	req->if_id = sc->sc_if_id;
3142 
3143 	if (enable)
3144 		req->iface_flags = req->iface_flags_mask =
3145 		    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3146 
3147 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3148 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3149 }
3150 
3151 /**
3152  * @brief Function to query the link status from the hardware
3153  * @param sc 		software handle to the device
3154  * @param[out] link	pointer to the structure returning link attributes
3155  * @returns		0 on success, EIO on failure
3156  */
3157 int
oce_get_link_status(struct oce_softc * sc)3158 oce_get_link_status(struct oce_softc *sc)
3159 {
3160 	struct mbx_query_common_link_config cmd;
3161 	int err;
3162 
3163 	memset(&cmd, 0, sizeof(cmd));
3164 
3165 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3166 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3167 	if (err)
3168 		return (err);
3169 
3170 	sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3171 	    NTWK_LOGICAL_LINK_UP);
3172 
3173 	if (cmd.params.rsp.mac_speed < 5)
3174 		sc->sc_link_speed = cmd.params.rsp.mac_speed;
3175 	else
3176 		sc->sc_link_speed = 0;
3177 
3178 	return (0);
3179 }
3180 
3181 void
oce_macaddr_set(struct oce_softc * sc)3182 oce_macaddr_set(struct oce_softc *sc)
3183 {
3184 	uint32_t old_pmac_id = sc->sc_pmac_id;
3185 	int status = 0;
3186 
3187 	if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3188 		return;
3189 
3190 	status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3191 	if (!status)
3192 		status = oce_macaddr_del(sc, old_pmac_id);
3193 	else
3194 		printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3195 }
3196 
3197 int
oce_macaddr_get(struct oce_softc * sc,uint8_t * macaddr)3198 oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3199 {
3200 	struct mbx_query_common_iface_mac cmd;
3201 	int err;
3202 
3203 	memset(&cmd, 0, sizeof(cmd));
3204 
3205 	cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3206 	cmd.params.req.permanent = 1;
3207 
3208 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3209 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3210 	if (err == 0)
3211 		memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3212 		    ETHER_ADDR_LEN);
3213 	return (err);
3214 }
3215 
3216 int
oce_macaddr_add(struct oce_softc * sc,uint8_t * enaddr,uint32_t * pmac)3217 oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3218 {
3219 	struct mbx_add_common_iface_mac cmd;
3220 	int err;
3221 
3222 	memset(&cmd, 0, sizeof(cmd));
3223 
3224 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3225 	memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3226 
3227 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3228 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3229 	if (err == 0)
3230 		*pmac = letoh32(cmd.params.rsp.pmac_id);
3231 	return (err);
3232 }
3233 
3234 int
oce_macaddr_del(struct oce_softc * sc,uint32_t pmac)3235 oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3236 {
3237 	struct mbx_del_common_iface_mac cmd;
3238 
3239 	memset(&cmd, 0, sizeof(cmd));
3240 
3241 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3242 	cmd.params.req.pmac_id = htole32(pmac);
3243 
3244 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3245 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3246 }
3247 
3248 int
oce_new_rq(struct oce_softc * sc,struct oce_rq * rq)3249 oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3250 {
3251 	struct mbx_create_nic_rq cmd;
3252 	int err, npages;
3253 
3254 	memset(&cmd, 0, sizeof(cmd));
3255 
3256 	npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3257 	    nitems(cmd.params.req.pages));
3258 	if (!npages) {
3259 		printf("%s: failed to load the rq ring\n", __func__);
3260 		return (1);
3261 	}
3262 
3263 	if (IS_XE201(sc)) {
3264 		cmd.params.req.frag_size = rq->fragsize / 2048;
3265 		cmd.params.req.page_size = 1;
3266 	} else
3267 		cmd.params.req.frag_size = ilog2(rq->fragsize);
3268 	cmd.params.req.num_pages = npages;
3269 	cmd.params.req.cq_id = rq->cq->id;
3270 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3271 	cmd.params.req.max_frame_size = htole16(rq->mtu);
3272 	cmd.params.req.is_rss_queue = htole32(rq->rss);
3273 
3274 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3275 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3276 	    sizeof(cmd));
3277 	if (err)
3278 		return (err);
3279 
3280 	rq->id = letoh16(cmd.params.rsp.rq_id);
3281 	rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3282 
3283 	return (0);
3284 }
3285 
3286 int
oce_new_wq(struct oce_softc * sc,struct oce_wq * wq)3287 oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3288 {
3289 	struct mbx_create_nic_wq cmd;
3290 	int err, npages;
3291 
3292 	memset(&cmd, 0, sizeof(cmd));
3293 
3294 	npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3295 	    nitems(cmd.params.req.pages));
3296 	if (!npages) {
3297 		printf("%s: failed to load the wq ring\n", __func__);
3298 		return (1);
3299 	}
3300 
3301 	if (IS_XE201(sc))
3302 		cmd.params.req.if_id = sc->sc_if_id;
3303 	cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3304 	cmd.params.req.num_pages = npages;
3305 	cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3306 	cmd.params.req.cq_id = htole16(wq->cq->id);
3307 	cmd.params.req.ulp_num = 1;
3308 
3309 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3310 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3311 	    sizeof(cmd));
3312 	if (err)
3313 		return (err);
3314 
3315 	wq->id = letoh16(cmd.params.rsp.wq_id);
3316 
3317 	return (0);
3318 }
3319 
3320 int
oce_new_mq(struct oce_softc * sc,struct oce_mq * mq)3321 oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3322 {
3323 	struct mbx_create_common_mq_ex cmd;
3324 	union oce_mq_ext_ctx *ctx;
3325 	int err, npages;
3326 
3327 	memset(&cmd, 0, sizeof(cmd));
3328 
3329 	npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3330 	    nitems(cmd.params.req.pages));
3331 	if (!npages) {
3332 		printf("%s: failed to load the mq ring\n", __func__);
3333 		return (-1);
3334 	}
3335 
3336 	ctx = &cmd.params.req.context;
3337 	ctx->v0.num_pages = npages;
3338 	ctx->v0.cq_id = mq->cq->id;
3339 	ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3340 	ctx->v0.valid = 1;
3341 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3342 	ctx->v0.async_evt_bitmap = 0xffffffff;
3343 
3344 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3345 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3346 	if (err)
3347 		return (err);
3348 
3349 	mq->id = letoh16(cmd.params.rsp.mq_id);
3350 
3351 	return (0);
3352 }
3353 
3354 int
oce_new_eq(struct oce_softc * sc,struct oce_eq * eq)3355 oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3356 {
3357 	struct mbx_create_common_eq cmd;
3358 	int err, npages;
3359 
3360 	memset(&cmd, 0, sizeof(cmd));
3361 
3362 	npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3363 	    nitems(cmd.params.req.pages));
3364 	if (!npages) {
3365 		printf("%s: failed to load the eq ring\n", __func__);
3366 		return (-1);
3367 	}
3368 
3369 	cmd.params.req.ctx.num_pages = htole16(npages);
3370 	cmd.params.req.ctx.valid = 1;
3371 	cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3372 	cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3373 	cmd.params.req.ctx.armed = 0;
3374 	cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3375 
3376 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3377 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3378 	if (err)
3379 		return (err);
3380 
3381 	eq->id = letoh16(cmd.params.rsp.eq_id);
3382 
3383 	return (0);
3384 }
3385 
3386 int
oce_new_cq(struct oce_softc * sc,struct oce_cq * cq)3387 oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3388 {
3389 	struct mbx_create_common_cq cmd;
3390 	union oce_cq_ctx *ctx;
3391 	int err, npages;
3392 
3393 	memset(&cmd, 0, sizeof(cmd));
3394 
3395 	npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3396 	    nitems(cmd.params.req.pages));
3397 	if (!npages) {
3398 		printf("%s: failed to load the cq ring\n", __func__);
3399 		return (-1);
3400 	}
3401 
3402 	ctx = &cmd.params.req.cq_ctx;
3403 
3404 	if (IS_XE201(sc)) {
3405 		ctx->v2.num_pages = htole16(npages);
3406 		ctx->v2.page_size = 1; /* for 4K */
3407 		ctx->v2.eventable = cq->eventable;
3408 		ctx->v2.valid = 1;
3409 		ctx->v2.count = ilog2(cq->nitems / 256);
3410 		ctx->v2.nodelay = cq->nodelay;
3411 		ctx->v2.coalesce_wm = cq->ncoalesce;
3412 		ctx->v2.armed = 0;
3413 		ctx->v2.eq_id = cq->eq->id;
3414 		if (ctx->v2.count == 3) {
3415 			if (cq->nitems > (4*1024)-1)
3416 				ctx->v2.cqe_count = (4*1024)-1;
3417 			else
3418 				ctx->v2.cqe_count = cq->nitems;
3419 		}
3420 	} else {
3421 		ctx->v0.num_pages = htole16(npages);
3422 		ctx->v0.eventable = cq->eventable;
3423 		ctx->v0.valid = 1;
3424 		ctx->v0.count = ilog2(cq->nitems / 256);
3425 		ctx->v0.nodelay = cq->nodelay;
3426 		ctx->v0.coalesce_wm = cq->ncoalesce;
3427 		ctx->v0.armed = 0;
3428 		ctx->v0.eq_id = cq->eq->id;
3429 	}
3430 
3431 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3432 	    IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3433 	    sizeof(cmd));
3434 	if (err)
3435 		return (err);
3436 
3437 	cq->id = letoh16(cmd.params.rsp.cq_id);
3438 
3439 	return (0);
3440 }
3441 
3442 int
oce_init_stats(struct oce_softc * sc)3443 oce_init_stats(struct oce_softc *sc)
3444 {
3445 	union cmd {
3446 		struct mbx_get_nic_stats_v0	_be2;
3447 		struct mbx_get_nic_stats	_be3;
3448 		struct mbx_get_pport_stats	_xe201;
3449 	};
3450 
3451 	sc->sc_statcmd = malloc(sizeof(union cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
3452 	if (sc->sc_statcmd == NULL) {
3453 		printf("%s: failed to allocate statistics command block\n",
3454 		    sc->sc_dev.dv_xname);
3455 		return (-1);
3456 	}
3457 	return (0);
3458 }
3459 
3460 int
oce_update_stats(struct oce_softc * sc)3461 oce_update_stats(struct oce_softc *sc)
3462 {
3463 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3464 	uint64_t rxe, txe;
3465 	int err;
3466 
3467 	if (ISSET(sc->sc_flags, OCE_F_BE2))
3468 		err = oce_stats_be2(sc, &rxe, &txe);
3469 	else if (ISSET(sc->sc_flags, OCE_F_BE3))
3470 		err = oce_stats_be3(sc, &rxe, &txe);
3471 	else
3472 		err = oce_stats_xe(sc, &rxe, &txe);
3473 	if (err)
3474 		return (err);
3475 
3476 	ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3477 	    rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3478 	sc->sc_rx_errors = rxe;
3479 	ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3480 	    txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3481 	sc->sc_tx_errors = txe;
3482 
3483 	return (0);
3484 }
3485 
3486 int
oce_stats_be2(struct oce_softc * sc,uint64_t * rxe,uint64_t * txe)3487 oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3488 {
3489 	struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3490 	struct oce_pmem_stats *ms;
3491 	struct oce_rxf_stats_v0 *rs;
3492 	struct oce_port_rxf_stats_v0 *ps;
3493 	int err;
3494 
3495 	memset(cmd, 0, sizeof(*cmd));
3496 
3497 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3498 	    cmd, sizeof(*cmd));
3499 	if (err)
3500 		return (err);
3501 
3502 	ms = &cmd->params.rsp.stats.pmem;
3503 	rs = &cmd->params.rsp.stats.rxf;
3504 	ps = &rs->port[sc->sc_port];
3505 
3506 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3507 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3508 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3509 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3510 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3511 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3512 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3513 	    ps->rx_alignment_symbol_errors;
3514 	if (sc->sc_if_id)
3515 		*rxe += rs->port1_jabber_events;
3516 	else
3517 		*rxe += rs->port0_jabber_events;
3518 	*rxe += ms->eth_red_drops;
3519 
3520 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3521 
3522 	return (0);
3523 }
3524 
3525 int
oce_stats_be3(struct oce_softc * sc,uint64_t * rxe,uint64_t * txe)3526 oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3527 {
3528 	struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3529 	struct oce_pmem_stats *ms;
3530 	struct oce_rxf_stats_v1 *rs;
3531 	struct oce_port_rxf_stats_v1 *ps;
3532 	int err;
3533 
3534 	memset(cmd, 0, sizeof(*cmd));
3535 
3536 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3537 	    cmd, sizeof(*cmd));
3538 	if (err)
3539 		return (err);
3540 
3541 	ms = &cmd->params.rsp.stats.pmem;
3542 	rs = &cmd->params.rsp.stats.rxf;
3543 	ps = &rs->port[sc->sc_port];
3544 
3545 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3546 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3547 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3548 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3549 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3550 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3551 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3552 	    ps->rx_alignment_symbol_errors + ps->jabber_events;
3553 	*rxe += ms->eth_red_drops;
3554 
3555 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3556 
3557 	return (0);
3558 }
3559 
3560 int
oce_stats_xe(struct oce_softc * sc,uint64_t * rxe,uint64_t * txe)3561 oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3562 {
3563 	struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3564 	struct oce_pport_stats *pps;
3565 	int err;
3566 
3567 	memset(cmd, 0, sizeof(*cmd));
3568 
3569 	cmd->params.req.reset_stats = 0;
3570 	cmd->params.req.port_number = sc->sc_if_id;
3571 
3572 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3573 	    OCE_MBX_VER_V0, cmd, sizeof(*cmd));
3574 	if (err)
3575 		return (err);
3576 
3577 	pps = &cmd->params.rsp.pps;
3578 
3579 	*rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3580 	    pps->rx_alignment_errors + pps->rx_symbol_errors +
3581 	    pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3582 	    pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3583 	    pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3584 	    pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3585 	    pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3586 	    pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3587 	    pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3588 
3589 	*txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3590 
3591 	return (0);
3592 }
3593