xref: /openbsd/sys/dev/pci/if_myx.c (revision d89ec533)
1 /*	$OpenBSD: if_myx.c,v 1.115 2021/02/08 08:18:45 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21  */
22 
23 #include "bpfilter.h"
24 #include "kstat.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/malloc.h>
33 #include <sys/pool.h>
34 #include <sys/timeout.h>
35 #include <sys/device.h>
36 #include <sys/proc.h>
37 #include <sys/queue.h>
38 #include <sys/rwlock.h>
39 #include <sys/kstat.h>
40 
41 #include <machine/bus.h>
42 #include <machine/intr.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 
48 #if NBPFILTER > 0
49 #include <net/bpf.h>
50 #endif
51 
52 #include <netinet/in.h>
53 #include <netinet/if_ether.h>
54 
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcivar.h>
57 #include <dev/pci/pcidevs.h>
58 
59 #include <dev/pci/if_myxreg.h>
60 
61 #ifdef MYX_DEBUG
62 #define MYXDBG_INIT	(1<<0)	/* chipset initialization */
63 #define MYXDBG_CMD	(2<<0)	/* commands */
64 #define MYXDBG_INTR	(3<<0)	/* interrupts */
65 #define MYXDBG_ALL	0xffff	/* enable all debugging messages */
66 int myx_debug = MYXDBG_ALL;
67 #define DPRINTF(_lvl, _arg...)	do {					\
68 	if (myx_debug & (_lvl))						\
69 		printf(_arg);						\
70 } while (0)
71 #else
72 #define DPRINTF(_lvl, arg...)
73 #endif
74 
75 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
76 
77 struct myx_dmamem {
78 	bus_dmamap_t		 mxm_map;
79 	bus_dma_segment_t	 mxm_seg;
80 	int			 mxm_nsegs;
81 	size_t			 mxm_size;
82 	caddr_t			 mxm_kva;
83 };
84 
85 struct pool *myx_mcl_pool;
86 
87 struct myx_slot {
88 	bus_dmamap_t		 ms_map;
89 	struct mbuf		*ms_m;
90 };
91 
92 struct myx_rx_ring {
93 	struct myx_softc	*mrr_softc;
94 	struct timeout		 mrr_refill;
95 	struct if_rxring	 mrr_rxr;
96 	struct myx_slot		*mrr_slots;
97 	u_int32_t		 mrr_offset;
98 	u_int			 mrr_running;
99 	u_int			 mrr_prod;
100 	u_int			 mrr_cons;
101 	struct mbuf		*(*mrr_mclget)(void);
102 };
103 
104 enum myx_state {
105 	MYX_S_OFF = 0,
106 	MYX_S_RUNNING,
107 	MYX_S_DOWN
108 };
109 
110 struct myx_softc {
111 	struct device		 sc_dev;
112 	struct arpcom		 sc_ac;
113 
114 	pci_chipset_tag_t	 sc_pc;
115 	pci_intr_handle_t	 sc_ih;
116 	pcitag_t		 sc_tag;
117 
118 	bus_dma_tag_t		 sc_dmat;
119 	bus_space_tag_t		 sc_memt;
120 	bus_space_handle_t	 sc_memh;
121 	bus_size_t		 sc_mems;
122 
123 	struct myx_dmamem	 sc_zerodma;
124 	struct myx_dmamem	 sc_cmddma;
125 	struct myx_dmamem	 sc_paddma;
126 
127 	struct myx_dmamem	 sc_sts_dma;
128 	volatile struct myx_status	*sc_sts;
129 
130 	int			 sc_intx;
131 	void			*sc_irqh;
132 	u_int32_t		 sc_irqcoaloff;
133 	u_int32_t		 sc_irqclaimoff;
134 	u_int32_t		 sc_irqdeassertoff;
135 
136 	struct myx_dmamem	 sc_intrq_dma;
137 	struct myx_intrq_desc	*sc_intrq;
138 	u_int			 sc_intrq_count;
139 	u_int			 sc_intrq_idx;
140 
141 	u_int			 sc_rx_ring_count;
142 #define  MYX_RXSMALL		 0
143 #define  MYX_RXBIG		 1
144 	struct myx_rx_ring	 sc_rx_ring[2];
145 
146 	bus_size_t		 sc_tx_boundary;
147 	u_int			 sc_tx_ring_count;
148 	u_int32_t		 sc_tx_ring_offset;
149 	u_int			 sc_tx_nsegs;
150 	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
151 	u_int			 sc_tx_ring_prod;
152 	u_int			 sc_tx_ring_cons;
153 
154 	u_int			 sc_tx_prod;
155 	u_int			 sc_tx_cons;
156 	struct myx_slot		*sc_tx_slots;
157 
158 	struct ifmedia		 sc_media;
159 
160 	volatile enum myx_state	 sc_state;
161 	volatile u_int8_t	 sc_linkdown;
162 
163 	struct rwlock		 sc_sff_lock;
164 
165 #if NKSTAT > 0
166 	struct mutex		 sc_kstat_mtx;
167 	struct timeout		 sc_kstat_tmo;
168 	struct kstat		*sc_kstat;
169 #endif
170 };
171 
172 #define MYX_RXSMALL_SIZE	MCLBYTES
173 #define MYX_RXBIG_SIZE		(MYX_MTU - \
174     (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
175 
176 int	 myx_match(struct device *, void *, void *);
177 void	 myx_attach(struct device *, struct device *, void *);
178 int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
179 int	 myx_query(struct myx_softc *sc, char *, size_t);
180 u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
181 void	 myx_attachhook(struct device *);
182 int	 myx_loadfirmware(struct myx_softc *, const char *);
183 int	 myx_probe_firmware(struct myx_softc *);
184 
185 void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
186 void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
187 
188 #if defined(__LP64__)
189 #define _myx_bus_space_write bus_space_write_raw_region_8
190 typedef u_int64_t myx_bus_t;
191 #else
192 #define _myx_bus_space_write bus_space_write_raw_region_4
193 typedef u_int32_t myx_bus_t;
194 #endif
195 #define myx_bus_space_write(_sc, _o, _a, _l) \
196     _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
197 
198 int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
199 int	 myx_boot(struct myx_softc *, u_int32_t);
200 
201 int	 myx_rdma(struct myx_softc *, u_int);
202 int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
203 	    bus_size_t, u_int align);
204 void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
205 int	 myx_media_change(struct ifnet *);
206 void	 myx_media_status(struct ifnet *, struct ifmediareq *);
207 void	 myx_link_state(struct myx_softc *, u_int32_t);
208 void	 myx_watchdog(struct ifnet *);
209 int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
210 int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
211 void	 myx_up(struct myx_softc *);
212 void	 myx_iff(struct myx_softc *);
213 void	 myx_down(struct myx_softc *);
214 int	 myx_get_sffpage(struct myx_softc *, struct if_sffpage *);
215 
216 void	 myx_start(struct ifqueue *);
217 void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
218 	    u_int32_t, u_int);
219 int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
220 int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
221 int	 myx_intr(void *);
222 void	 myx_rxeof(struct myx_softc *);
223 void	 myx_txeof(struct myx_softc *, u_int32_t);
224 
225 int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
226 			    struct mbuf *(*)(void));
227 struct mbuf *		myx_mcl_small(void);
228 struct mbuf *		myx_mcl_big(void);
229 
230 int			myx_rx_init(struct myx_softc *, int, bus_size_t);
231 int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
232 void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
233 void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
234 
235 int			myx_tx_init(struct myx_softc *, bus_size_t);
236 void			myx_tx_empty(struct myx_softc *);
237 void			myx_tx_free(struct myx_softc *);
238 
239 void			myx_refill(void *);
240 
241 #if NKSTAT > 0
242 void			myx_kstat_attach(struct myx_softc *);
243 void			myx_kstat_start(struct myx_softc *);
244 void			myx_kstat_stop(struct myx_softc *);
245 #endif
246 
247 struct cfdriver myx_cd = {
248 	NULL, "myx", DV_IFNET
249 };
250 struct cfattach myx_ca = {
251 	sizeof(struct myx_softc), myx_match, myx_attach
252 };
253 
254 const struct pci_matchid myx_devices[] = {
255 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
256 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
257 };
258 
259 int
260 myx_match(struct device *parent, void *match, void *aux)
261 {
262 	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
263 }
264 
265 void
266 myx_attach(struct device *parent, struct device *self, void *aux)
267 {
268 	struct myx_softc	*sc = (struct myx_softc *)self;
269 	struct pci_attach_args	*pa = aux;
270 	char			 part[32];
271 	pcireg_t		 memtype;
272 
273 	rw_init(&sc->sc_sff_lock, "myxsff");
274 
275 	sc->sc_pc = pa->pa_pc;
276 	sc->sc_tag = pa->pa_tag;
277 	sc->sc_dmat = pa->pa_dmat;
278 
279 	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
280 	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
281 	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
282 	    &sc->sc_rx_ring[MYX_RXSMALL]);
283 	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
284 	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
285 	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
286 	    &sc->sc_rx_ring[MYX_RXBIG]);
287 
288 	/* Map the PCI memory space */
289 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
290 	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
291 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
292 		printf(": unable to map register memory\n");
293 		return;
294 	}
295 
296 	/* Get board details (mac/part) */
297 	memset(part, 0, sizeof(part));
298 	if (myx_query(sc, part, sizeof(part)) != 0)
299 		goto unmap;
300 
301 	/* Map the interrupt */
302 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
303 		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
304 			printf(": unable to map interrupt\n");
305 			goto unmap;
306 		}
307 		sc->sc_intx = 1;
308 	}
309 
310 	printf(": %s, model %s, address %s\n",
311 	    pci_intr_string(pa->pa_pc, sc->sc_ih),
312 	    part[0] == '\0' ? "(unknown)" : part,
313 	    ether_sprintf(sc->sc_ac.ac_enaddr));
314 
315 	if (myx_pcie_dc(sc, pa) != 0)
316 		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
317 
318 	config_mountroot(self, myx_attachhook);
319 
320 	return;
321 
322  unmap:
323 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
324 	sc->sc_mems = 0;
325 }
326 
327 int
328 myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
329 {
330 	pcireg_t dcsr;
331 	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
332 	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
333 	int reg;
334 
335 	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
336 	    &reg, NULL) == 0)
337 		return (-1);
338 
339 	reg += PCI_PCIE_DCSR;
340 	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
341 	if ((dcsr & mask) != dc) {
342 		CLR(dcsr, mask);
343 		SET(dcsr, dc);
344 		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
345 	}
346 
347 	return (0);
348 }
349 
350 u_int
351 myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
352 {
353 	u_int		i, j;
354 	u_int8_t	digit;
355 
356 	memset(lladdr, 0, ETHER_ADDR_LEN);
357 	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
358 		if (mac[i] >= '0' && mac[i] <= '9')
359 			digit = mac[i] - '0';
360 		else if (mac[i] >= 'A' && mac[i] <= 'F')
361 			digit = mac[i] - 'A' + 10;
362 		else if (mac[i] >= 'a' && mac[i] <= 'f')
363 			digit = mac[i] - 'a' + 10;
364 		else
365 			continue;
366 		if ((j & 1) == 0)
367 			digit <<= 4;
368 		lladdr[j++/2] |= digit;
369 	}
370 
371 	return (i);
372 }
373 
374 int
375 myx_query(struct myx_softc *sc, char *part, size_t partlen)
376 {
377 	struct myx_gen_hdr hdr;
378 	u_int32_t	offset;
379 	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
380 	u_int		i, len, maxlen;
381 
382 	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
383 	offset = betoh32(offset);
384 	if (offset + sizeof(hdr) > sc->sc_mems) {
385 		printf(": header is outside register window\n");
386 		return (1);
387 	}
388 
389 	myx_read(sc, offset, &hdr, sizeof(hdr));
390 	offset = betoh32(hdr.fw_specs);
391 	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
392 
393 	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
394 
395 	for (i = 0; i < len; i++) {
396 		maxlen = len - i;
397 		if (strings[i] == '\0')
398 			break;
399 		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
400 			i += 4;
401 			i += myx_ether_aton(&strings[i],
402 			    sc->sc_ac.ac_enaddr, maxlen);
403 		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
404 			i += 3;
405 			i += strlcpy(part, &strings[i], min(maxlen, partlen));
406 		}
407 		for (; i < len; i++) {
408 			if (strings[i] == '\0')
409 				break;
410 		}
411 	}
412 
413 	return (0);
414 }
415 
416 int
417 myx_loadfirmware(struct myx_softc *sc, const char *filename)
418 {
419 	struct myx_gen_hdr	hdr;
420 	u_int8_t		*fw;
421 	size_t			fwlen;
422 	u_int32_t		offset;
423 	u_int			i, ret = 1;
424 
425 	if (loadfirmware(filename, &fw, &fwlen) != 0) {
426 		printf("%s: could not load firmware %s\n", DEVNAME(sc),
427 		    filename);
428 		return (1);
429 	}
430 	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
431 		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
432 		goto err;
433 	}
434 
435 	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
436 	offset = betoh32(offset);
437 	if ((offset + sizeof(hdr)) > fwlen) {
438 		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
439 		goto err;
440 	}
441 
442 	memcpy(&hdr, fw + offset, sizeof(hdr));
443 	DPRINTF(MYXDBG_INIT, "%s: "
444 	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
445 	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
446 	    betoh32(hdr.fw_type), hdr.fw_version);
447 
448 	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
449 	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
450 		printf("%s: invalid firmware type 0x%x version %s\n",
451 		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
452 		goto err;
453 	}
454 
455 	/* Write the firmware to the card's SRAM */
456 	for (i = 0; i < fwlen; i += 256)
457 		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
458 
459 	if (myx_boot(sc, fwlen) != 0) {
460 		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
461 		goto err;
462 	}
463 
464 	ret = 0;
465 
466 err:
467 	free(fw, M_DEVBUF, fwlen);
468 	return (ret);
469 }
470 
471 void
472 myx_attachhook(struct device *self)
473 {
474 	struct myx_softc	*sc = (struct myx_softc *)self;
475 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
476 	struct myx_cmd		 mc;
477 
478 	/* this is sort of racy */
479 	if (myx_mcl_pool == NULL) {
480 		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
481 		    M_WAITOK);
482 
483 		m_pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY,
484 		    "myxmcl");
485 		pool_cache_init(myx_mcl_pool);
486 	}
487 
488 	/* Allocate command DMA memory */
489 	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
490 	    MYXALIGN_CMD) != 0) {
491 		printf("%s: failed to allocate command DMA memory\n",
492 		    DEVNAME(sc));
493 		return;
494 	}
495 
496 	/* Try the firmware stored on disk */
497 	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
498 		/* error printed by myx_loadfirmware */
499 		goto freecmd;
500 	}
501 
502 	memset(&mc, 0, sizeof(mc));
503 
504 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
505 		printf("%s: failed to reset the device\n", DEVNAME(sc));
506 		goto freecmd;
507 	}
508 
509 	sc->sc_tx_boundary = 4096;
510 
511 	if (myx_probe_firmware(sc) != 0) {
512 		printf("%s: error while selecting firmware\n", DEVNAME(sc));
513 		goto freecmd;
514 	}
515 
516 	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
517 	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
518 	if (sc->sc_irqh == NULL) {
519 		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
520 		goto freecmd;
521 	}
522 
523 #if NKSTAT > 0
524 	myx_kstat_attach(sc);
525 #endif
526 
527 	ifp->if_softc = sc;
528 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
529 	ifp->if_xflags = IFXF_MPSAFE;
530 	ifp->if_ioctl = myx_ioctl;
531 	ifp->if_qstart = myx_start;
532 	ifp->if_watchdog = myx_watchdog;
533 	ifp->if_hardmtu = MYX_RXBIG_SIZE;
534 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
535 	ifq_set_maxlen(&ifp->if_snd, 1);
536 
537 	ifp->if_capabilities = IFCAP_VLAN_MTU;
538 #if 0
539 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
540 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
541 	    IFCAP_CSUM_UDPv4;
542 #endif
543 
544 	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
545 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
546 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
547 
548 	if_attach(ifp);
549 	ether_ifattach(ifp);
550 
551 	return;
552 
553 freecmd:
554 	myx_dmamem_free(sc, &sc->sc_cmddma);
555 }
556 
557 int
558 myx_probe_firmware(struct myx_softc *sc)
559 {
560 	struct myx_dmamem test;
561 	bus_dmamap_t map;
562 	struct myx_cmd mc;
563 	pcireg_t csr;
564 	int offset;
565 	int width = 0;
566 
567 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
568 	    &offset, NULL)) {
569 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
570 		    offset + PCI_PCIE_LCSR);
571 		width = (csr >> 20) & 0x3f;
572 
573 		if (width <= 4) {
574 			/*
575 			 * if the link width is 4 or less we can use the
576 			 * aligned firmware.
577 			 */
578 			return (0);
579 		}
580 	}
581 
582 	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
583 		return (1);
584 	map = test.mxm_map;
585 
586 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
587 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
588 
589 	memset(&mc, 0, sizeof(mc));
590 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
591 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
592 	mc.mc_data2 = htobe32(4096 * 0x10000);
593 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
594 		printf("%s: DMA read test failed\n", DEVNAME(sc));
595 		goto fail;
596 	}
597 
598 	memset(&mc, 0, sizeof(mc));
599 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
600 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
601 	mc.mc_data2 = htobe32(4096 * 0x1);
602 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
603 		printf("%s: DMA write test failed\n", DEVNAME(sc));
604 		goto fail;
605 	}
606 
607 	memset(&mc, 0, sizeof(mc));
608 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
609 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
610 	mc.mc_data2 = htobe32(4096 * 0x10001);
611 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
612 		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
613 		goto fail;
614 	}
615 
616 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
617 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
618 	myx_dmamem_free(sc, &test);
619 	return (0);
620 
621 fail:
622 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
623 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
624 	myx_dmamem_free(sc, &test);
625 
626 	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
627 		printf("%s: unable to load %s\n", DEVNAME(sc),
628 		    MYXFW_UNALIGNED);
629 		return (1);
630 	}
631 
632 	sc->sc_tx_boundary = 2048;
633 
634 	printf("%s: using unaligned firmware\n", DEVNAME(sc));
635 	return (0);
636 }
637 
638 void
639 myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
640 {
641 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
642 	    BUS_SPACE_BARRIER_READ);
643 	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
644 }
645 
646 void
647 myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
648 {
649 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
650 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
651 	    BUS_SPACE_BARRIER_WRITE);
652 }
653 
654 int
655 myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
656     bus_size_t size, u_int align)
657 {
658 	mxm->mxm_size = size;
659 
660 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
661 	    mxm->mxm_size, 0,
662 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
663 	    &mxm->mxm_map) != 0)
664 		return (1);
665 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
666 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
667 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
668 		goto destroy;
669 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
670 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
671 		goto free;
672 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
673 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
674 		goto unmap;
675 
676 	return (0);
677  unmap:
678 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
679  free:
680 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
681  destroy:
682 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
683 	return (1);
684 }
685 
686 void
687 myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
688 {
689 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
690 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
691 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
692 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
693 }
694 
695 int
696 myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
697 {
698 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
699 	struct myx_response	*mr;
700 	u_int			 i;
701 	u_int32_t		 result, data;
702 
703 	mc->mc_cmd = htobe32(cmd);
704 	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
705 	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
706 
707 	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
708 	mr->mr_result = 0xffffffff;
709 
710 	/* Send command */
711 	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
712 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
713 	    BUS_DMASYNC_PREREAD);
714 
715 	for (i = 0; i < 20; i++) {
716 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
717 		    BUS_DMASYNC_POSTREAD);
718 		result = betoh32(mr->mr_result);
719 		data = betoh32(mr->mr_data);
720 
721 		if (result != 0xffffffff)
722 			break;
723 
724 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
725 		    BUS_DMASYNC_PREREAD);
726 		delay(1000);
727 	}
728 
729 	DPRINTF(MYXDBG_CMD, "%s(%s): cmd %u completed, i %d, "
730 	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
731 	    cmd, i, result, data, data);
732 
733 	if (result == MYXCMD_OK) {
734 		if (r != NULL)
735 			*r = data;
736 	}
737 
738 	return (result);
739 }
740 
741 int
742 myx_boot(struct myx_softc *sc, u_int32_t length)
743 {
744 	struct myx_bootcmd	 bc;
745 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
746 	u_int32_t		*status;
747 	u_int			 i, ret = 1;
748 
749 	memset(&bc, 0, sizeof(bc));
750 	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
751 	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
752 	bc.bc_result = 0xffffffff;
753 	bc.bc_offset = htobe32(MYX_FW_BOOT);
754 	bc.bc_length = htobe32(length - 8);
755 	bc.bc_copyto = htobe32(8);
756 	bc.bc_jumpto = htobe32(0);
757 
758 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
759 	*status = 0;
760 
761 	/* Send command */
762 	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
763 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
764 	    BUS_DMASYNC_PREREAD);
765 
766 	for (i = 0; i < 200; i++) {
767 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
768 		    BUS_DMASYNC_POSTREAD);
769 		if (*status == 0xffffffff) {
770 			ret = 0;
771 			break;
772 		}
773 
774 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
775 		    BUS_DMASYNC_PREREAD);
776 		delay(1000);
777 	}
778 
779 	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
780 	    DEVNAME(sc), i, ret);
781 
782 	return (ret);
783 }
784 
785 int
786 myx_rdma(struct myx_softc *sc, u_int do_enable)
787 {
788 	struct myx_rdmacmd	 rc;
789 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
790 	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
791 	u_int32_t		*status;
792 	int			 ret = 1;
793 	u_int			 i;
794 
795 	/*
796 	 * It is required to setup a _dummy_ RDMA address. It also makes
797 	 * some PCI-E chipsets resend dropped messages.
798 	 */
799 	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
800 	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
801 	rc.rc_result = 0xffffffff;
802 	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
803 	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
804 	rc.rc_enable = htobe32(do_enable);
805 
806 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
807 	*status = 0;
808 
809 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
810 	    BUS_DMASYNC_PREREAD);
811 
812 	/* Send command */
813 	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
814 
815 	for (i = 0; i < 20; i++) {
816 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
817 		    BUS_DMASYNC_POSTREAD);
818 
819 		if (*status == 0xffffffff) {
820 			ret = 0;
821 			break;
822 		}
823 
824 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
825 		    BUS_DMASYNC_PREREAD);
826 		delay(1000);
827 	}
828 
829 	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
830 	    DEVNAME(sc), __func__,
831 	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
832 
833 	return (ret);
834 }
835 
836 int
837 myx_media_change(struct ifnet *ifp)
838 {
839 	/* ignore */
840 	return (0);
841 }
842 
843 void
844 myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
845 {
846 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
847 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
848 	u_int32_t		 sts;
849 
850 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
851 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
852 		imr->ifm_status = 0;
853 		return;
854 	}
855 
856 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
857 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
858 	sts = sc->sc_sts->ms_linkstate;
859 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
860 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
861 
862 	myx_link_state(sc, sts);
863 
864 	imr->ifm_status = IFM_AVALID;
865 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
866 		return;
867 
868 	imr->ifm_active |= IFM_FDX | IFM_FLOW |
869 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
870 	imr->ifm_status |= IFM_ACTIVE;
871 }
872 
873 void
874 myx_link_state(struct myx_softc *sc, u_int32_t sts)
875 {
876 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
877 	int			 link_state = LINK_STATE_DOWN;
878 
879 	if (betoh32(sts) == MYXSTS_LINKUP)
880 		link_state = LINK_STATE_FULL_DUPLEX;
881 	if (ifp->if_link_state != link_state) {
882 		ifp->if_link_state = link_state;
883 		if_link_state_change(ifp);
884 		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
885 		    IF_Gbps(10) : 0;
886 	}
887 }
888 
889 void
890 myx_watchdog(struct ifnet *ifp)
891 {
892 	return;
893 }
894 
895 int
896 myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
897 {
898 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
899 	struct ifreq		*ifr = (struct ifreq *)data;
900 	int			 s, error = 0;
901 
902 	s = splnet();
903 
904 	switch (cmd) {
905 	case SIOCSIFADDR:
906 		ifp->if_flags |= IFF_UP;
907 		/* FALLTHROUGH */
908 
909 	case SIOCSIFFLAGS:
910 		if (ISSET(ifp->if_flags, IFF_UP)) {
911 			if (ISSET(ifp->if_flags, IFF_RUNNING))
912 				error = ENETRESET;
913 			else
914 				myx_up(sc);
915 		} else {
916 			if (ISSET(ifp->if_flags, IFF_RUNNING))
917 				myx_down(sc);
918 		}
919 		break;
920 
921 	case SIOCGIFMEDIA:
922 	case SIOCSIFMEDIA:
923 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
924 		break;
925 
926 	case SIOCGIFRXR:
927 		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
928 		break;
929 
930 	case SIOCGIFSFFPAGE:
931 		error = rw_enter(&sc->sc_sff_lock, RW_WRITE|RW_INTR);
932 		if (error != 0)
933 			break;
934 
935 		error = myx_get_sffpage(sc, (struct if_sffpage *)data);
936 		rw_exit(&sc->sc_sff_lock);
937 		break;
938 
939 	default:
940 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
941 	}
942 
943 	if (error == ENETRESET) {
944 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
945 		    (IFF_UP | IFF_RUNNING))
946 			myx_iff(sc);
947 		error = 0;
948 	}
949 
950 	splx(s);
951 	return (error);
952 }
953 
954 int
955 myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
956 {
957 	struct if_rxring_info ifr[2];
958 
959 	memset(ifr, 0, sizeof(ifr));
960 
961 	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
962 	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
963 	strlcpy(ifr[0].ifr_name, "small", sizeof(ifr[0].ifr_name));
964 
965 	ifr[1].ifr_size = MYX_RXBIG_SIZE;
966 	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
967 	strlcpy(ifr[1].ifr_name, "large", sizeof(ifr[1].ifr_name));
968 
969 	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
970 }
971 
972 static int
973 myx_i2c_byte(struct myx_softc *sc, uint8_t addr, uint8_t off, uint8_t *byte)
974 {
975 	struct myx_cmd		mc;
976 	int			result;
977 	uint32_t		r;
978 	unsigned int		ms;
979 
980 	memset(&mc, 0, sizeof(mc));
981 	mc.mc_data0 = htobe32(0); /* get 1 byte */
982 	mc.mc_data1 = htobe32((addr << 8) | off);
983 	result = myx_cmd(sc, MYXCMD_I2C_READ, &mc, NULL);
984 	if (result != 0)
985 		return (EIO);
986 
987 	for (ms = 0; ms < 50; ms++) {
988 		memset(&mc, 0, sizeof(mc));
989 		mc.mc_data0 = htobe32(off);
990 		result = myx_cmd(sc, MYXCMD_I2C_BYTE, &mc, &r);
991 		switch (result) {
992 		case MYXCMD_OK:
993 			*byte = r;
994 			return (0);
995 		case MYXCMD_ERR_BUSY:
996 			break;
997 		default:
998 			return (EIO);
999 		}
1000 
1001 		delay(1000);
1002 	}
1003 
1004 	return (EBUSY);
1005 }
1006 
1007 int
1008 myx_get_sffpage(struct myx_softc *sc, struct if_sffpage *sff)
1009 {
1010 	unsigned int		i;
1011 	int			result;
1012 
1013 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
1014 		uint8_t page;
1015 
1016 		result = myx_i2c_byte(sc, IFSFF_ADDR_EEPROM, 127, &page);
1017 		if (result != 0)
1018 			return (result);
1019 
1020 		if (page != sff->sff_page)
1021 			return (ENXIO);
1022 	}
1023 
1024 	for (i = 0; i < sizeof(sff->sff_data); i++) {
1025 		result = myx_i2c_byte(sc, sff->sff_addr,
1026 		    i, &sff->sff_data[i]);
1027 		if (result != 0)
1028 			return (result);
1029 	}
1030 
1031 	return (0);
1032 }
1033 
1034 void
1035 myx_up(struct myx_softc *sc)
1036 {
1037 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1038 	struct myx_cmd		mc;
1039 	bus_dmamap_t		map;
1040 	size_t			size;
1041 	u_int			maxpkt;
1042 	u_int32_t		r;
1043 
1044 	memset(&mc, 0, sizeof(mc));
1045 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1046 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1047 		return;
1048 	}
1049 
1050 	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1051 	    64, MYXALIGN_CMD) != 0) {
1052 		printf("%s: failed to allocate zero pad memory\n",
1053 		    DEVNAME(sc));
1054 		return;
1055 	}
1056 	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1057 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1058 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1059 
1060 	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1061 	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1062 		printf("%s: failed to allocate pad DMA memory\n",
1063 		    DEVNAME(sc));
1064 		goto free_zero;
1065 	}
1066 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1067 	    sc->sc_paddma.mxm_map->dm_mapsize,
1068 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1069 
1070 	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1071 		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1072 		goto free_pad;
1073 	}
1074 
1075 	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1076 		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1077 		goto free_pad;
1078 	}
1079 	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1080 
1081 	memset(&mc, 0, sizeof(mc));
1082 	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1083 		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1084 		goto free_pad;
1085 	}
1086 	sc->sc_tx_ring_prod = 0;
1087 	sc->sc_tx_ring_cons = 0;
1088 	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1089 	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1090 	sc->sc_tx_count = 0;
1091 	ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1092 
1093 	/* Allocate Interrupt Queue */
1094 
1095 	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1096 	sc->sc_intrq_idx = 0;
1097 
1098 	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1099 	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1100 	    size, MYXALIGN_DATA) != 0) {
1101 		goto free_pad;
1102 	}
1103 	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1104 	map = sc->sc_intrq_dma.mxm_map;
1105 	memset(sc->sc_intrq, 0, size);
1106 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1107 	    BUS_DMASYNC_PREREAD);
1108 
1109 	memset(&mc, 0, sizeof(mc));
1110 	mc.mc_data0 = htobe32(size);
1111 	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1112 		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1113 		goto free_intrq;
1114 	}
1115 
1116 	memset(&mc, 0, sizeof(mc));
1117 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1118 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1119 	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1120 		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1121 		goto free_intrq;
1122 	}
1123 
1124 	/*
1125 	 * get interrupt offsets
1126 	 */
1127 
1128 	memset(&mc, 0, sizeof(mc));
1129 	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1130 	    &sc->sc_irqclaimoff) != 0) {
1131 		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1132 		goto free_intrq;
1133 	}
1134 
1135 	memset(&mc, 0, sizeof(mc));
1136 	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1137 	    &sc->sc_irqdeassertoff) != 0) {
1138 		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1139 		goto free_intrq;
1140 	}
1141 
1142 	memset(&mc, 0, sizeof(mc));
1143 	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1144 	    &sc->sc_irqcoaloff) != 0) {
1145 		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1146 		goto free_intrq;
1147 	}
1148 
1149 	/* Set an appropriate interrupt coalescing period */
1150 	r = htobe32(MYX_IRQCOALDELAY);
1151 	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1152 
1153 	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1154 		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1155 		goto free_intrq;
1156 	}
1157 
1158 	memset(&mc, 0, sizeof(mc));
1159 	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1160 		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1161 		goto free_intrq;
1162 	}
1163 
1164 	memset(&mc, 0, sizeof(mc));
1165 	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1166 		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1167 		goto free_intrq;
1168 	}
1169 
1170 	memset(&mc, 0, sizeof(mc));
1171 	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1172 	    &sc->sc_tx_ring_offset) != 0) {
1173 		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1174 		goto free_intrq;
1175 	}
1176 
1177 	memset(&mc, 0, sizeof(mc));
1178 	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1179 	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1180 		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1181 		goto free_intrq;
1182 	}
1183 
1184 	memset(&mc, 0, sizeof(mc));
1185 	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1186 	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1187 		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1188 		goto free_intrq;
1189 	}
1190 
1191 	/* Allocate Interrupt Data */
1192 	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1193 	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1194 		printf("%s: failed to allocate status DMA memory\n",
1195 		    DEVNAME(sc));
1196 		goto free_intrq;
1197 	}
1198 	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1199 	map = sc->sc_sts_dma.mxm_map;
1200 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1201 	    BUS_DMASYNC_PREREAD);
1202 
1203 	memset(&mc, 0, sizeof(mc));
1204 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1205 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1206 	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1207 	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1208 		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1209 		goto free_sts;
1210 	}
1211 
1212 	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1213 
1214 	memset(&mc, 0, sizeof(mc));
1215 	mc.mc_data0 = htobe32(maxpkt);
1216 	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1217 		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1218 		goto free_sts;
1219 	}
1220 
1221 	if (myx_tx_init(sc, maxpkt) != 0)
1222 		goto free_sts;
1223 
1224 	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1225 		goto free_tx_ring;
1226 
1227 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1228 		goto free_rx_ring_small;
1229 
1230 	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1231 		goto empty_rx_ring_small;
1232 
1233 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1234 		goto free_rx_ring_big;
1235 
1236 	memset(&mc, 0, sizeof(mc));
1237 	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1238 	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1239 		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1240 		goto empty_rx_ring_big;
1241 	}
1242 
1243 	memset(&mc, 0, sizeof(mc));
1244 	mc.mc_data0 = htobe32(16384);
1245 	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1246 		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1247 		goto empty_rx_ring_big;
1248 	}
1249 
1250 	sc->sc_state = MYX_S_RUNNING;
1251 
1252 	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1253 		printf("%s: failed to start the device\n", DEVNAME(sc));
1254 		goto empty_rx_ring_big;
1255 	}
1256 
1257 	myx_iff(sc);
1258 	SET(ifp->if_flags, IFF_RUNNING);
1259 	ifq_restart(&ifp->if_snd);
1260 
1261 #if NKSTAT > 0
1262 	timeout_add_sec(&sc->sc_kstat_tmo, 1);
1263 #endif
1264 
1265 	return;
1266 
1267 empty_rx_ring_big:
1268 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1269 free_rx_ring_big:
1270 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1271 empty_rx_ring_small:
1272 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1273 free_rx_ring_small:
1274 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1275 free_tx_ring:
1276 	myx_tx_free(sc);
1277 free_sts:
1278 	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1279 	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1280 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1281 free_intrq:
1282 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1283 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1284 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1285 free_pad:
1286 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1287 	    sc->sc_paddma.mxm_map->dm_mapsize,
1288 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1289 	myx_dmamem_free(sc, &sc->sc_paddma);
1290 
1291 	memset(&mc, 0, sizeof(mc));
1292 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1293 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1294 	}
1295 free_zero:
1296 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1297 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1298 	myx_dmamem_free(sc, &sc->sc_zerodma);
1299 }
1300 
1301 int
1302 myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1303 {
1304 	struct myx_cmd		 mc;
1305 
1306 	memset(&mc, 0, sizeof(mc));
1307 	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1308 	    addr[2] << 8 | addr[3]);
1309 	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1310 
1311 	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1312 		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1313 		return (-1);
1314 	}
1315 	return (0);
1316 }
1317 
1318 void
1319 myx_iff(struct myx_softc *sc)
1320 {
1321 	struct myx_cmd		mc;
1322 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1323 	struct ether_multi	*enm;
1324 	struct ether_multistep	step;
1325 	u_int8_t *addr;
1326 
1327 	CLR(ifp->if_flags, IFF_ALLMULTI);
1328 
1329 	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1330 	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1331 		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1332 		return;
1333 	}
1334 
1335 	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1336 		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1337 		return;
1338 	}
1339 
1340 	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1341 		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1342 		return;
1343 	}
1344 
1345 	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1346 	    sc->sc_ac.ac_multirangecnt > 0) {
1347 		SET(ifp->if_flags, IFF_ALLMULTI);
1348 		return;
1349 	}
1350 
1351 	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1352 	while (enm != NULL) {
1353 		addr = enm->enm_addrlo;
1354 
1355 		memset(&mc, 0, sizeof(mc));
1356 		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1357 		    addr[2] << 8 | addr[3]);
1358 		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1359 		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1360 			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1361 			return;
1362 		}
1363 
1364 		ETHER_NEXT_MULTI(step, enm);
1365 	}
1366 
1367 	memset(&mc, 0, sizeof(mc));
1368 	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1369 		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1370 		return;
1371 	}
1372 }
1373 
1374 void
1375 myx_down(struct myx_softc *sc)
1376 {
1377 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1378 	volatile struct myx_status *sts = sc->sc_sts;
1379 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1380 	struct sleep_state	 sls;
1381 	struct myx_cmd		 mc;
1382 	int			 s;
1383 	int			 ring;
1384 
1385 	CLR(ifp->if_flags, IFF_RUNNING);
1386 
1387 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1388 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1389 	sc->sc_linkdown = sts->ms_linkdown;
1390 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1391 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1392 
1393 	sc->sc_state = MYX_S_DOWN;
1394 	membar_producer();
1395 
1396 	memset(&mc, 0, sizeof(mc));
1397 	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1398 
1399 	while (sc->sc_state != MYX_S_OFF) {
1400 		sleep_setup(&sls, sts, PWAIT, "myxdown", 0);
1401 		membar_consumer();
1402 		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1403 	}
1404 
1405 	s = splnet();
1406 	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1407 		ifp->if_link_state = LINK_STATE_UNKNOWN;
1408 		ifp->if_baudrate = 0;
1409 		if_link_state_change(ifp);
1410 	}
1411 	splx(s);
1412 
1413 	memset(&mc, 0, sizeof(mc));
1414 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1415 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1416 	}
1417 
1418 	ifq_clr_oactive(&ifp->if_snd);
1419 	ifq_barrier(&ifp->if_snd);
1420 
1421 	for (ring = 0; ring < 2; ring++) {
1422 		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1423 
1424 		timeout_del(&mrr->mrr_refill);
1425 		myx_rx_empty(sc, mrr);
1426 		myx_rx_free(sc, mrr);
1427 	}
1428 
1429 	myx_tx_empty(sc);
1430 	myx_tx_free(sc);
1431 
1432 #if NKSTAT > 0
1433 	myx_kstat_stop(sc);
1434 	sc->sc_sts = NULL;
1435 #endif
1436 
1437 	/* the sleep shizz above already synced this dmamem */
1438 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1439 
1440 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1441 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1442 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1443 
1444 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1445 	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1446 	myx_dmamem_free(sc, &sc->sc_paddma);
1447 
1448 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1449 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1450 	myx_dmamem_free(sc, &sc->sc_zerodma);
1451 }
1452 
1453 void
1454 myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1455     u_int32_t offset, u_int idx)
1456 {
1457 	struct myx_tx_desc		txd;
1458 	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1459 	bus_dmamap_t			map = ms->ms_map;
1460 	int				i;
1461 
1462 	for (i = 1; i < map->dm_nsegs; i++) {
1463 		memset(&txd, 0, sizeof(txd));
1464 		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1465 		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1466 		txd.tx_flags = flags;
1467 
1468 		myx_bus_space_write(sc,
1469 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1470 		    &txd, sizeof(txd));
1471 	}
1472 
1473 	/* pad runt frames */
1474 	if (map->dm_mapsize < 60) {
1475 		memset(&txd, 0, sizeof(txd));
1476 		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1477 		txd.tx_length = htobe16(60 - map->dm_mapsize);
1478 		txd.tx_flags = flags;
1479 
1480 		myx_bus_space_write(sc,
1481 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1482 		    &txd, sizeof(txd));
1483 	}
1484 }
1485 
1486 void
1487 myx_start(struct ifqueue *ifq)
1488 {
1489 	struct ifnet			*ifp = ifq->ifq_if;
1490 	struct myx_tx_desc		txd;
1491 	struct myx_softc		*sc = ifp->if_softc;
1492 	struct myx_slot			*ms;
1493 	bus_dmamap_t			map;
1494 	struct mbuf			*m;
1495 	u_int32_t			offset = sc->sc_tx_ring_offset;
1496 	u_int				idx, cons, prod;
1497 	u_int				free, used;
1498 	u_int8_t			flags;
1499 
1500 	idx = sc->sc_tx_ring_prod;
1501 
1502 	/* figure out space */
1503 	free = sc->sc_tx_ring_cons;
1504 	if (free <= idx)
1505 		free += sc->sc_tx_ring_count;
1506 	free -= idx;
1507 
1508 	cons = prod = sc->sc_tx_prod;
1509 
1510 	used = 0;
1511 
1512 	for (;;) {
1513 		if (used + sc->sc_tx_nsegs + 1 > free) {
1514 			ifq_set_oactive(ifq);
1515 			break;
1516 		}
1517 
1518 		m = ifq_dequeue(ifq);
1519 		if (m == NULL)
1520 			break;
1521 
1522 		ms = &sc->sc_tx_slots[prod];
1523 
1524 		if (myx_load_mbuf(sc, ms, m) != 0) {
1525 			m_freem(m);
1526 			ifp->if_oerrors++;
1527 			continue;
1528 		}
1529 
1530 #if NBPFILTER > 0
1531 		if (ifp->if_bpf)
1532 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1533 #endif
1534 
1535 		map = ms->ms_map;
1536 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1537 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1538 
1539 		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1540 
1541 		if (++prod >= sc->sc_tx_ring_count)
1542 			prod = 0;
1543 	}
1544 
1545 	if (cons == prod)
1546 		return;
1547 
1548 	ms = &sc->sc_tx_slots[cons];
1549 
1550 	for (;;) {
1551 		idx += ms->ms_map->dm_nsegs +
1552 		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1553 		if (idx >= sc->sc_tx_ring_count)
1554 			idx -= sc->sc_tx_ring_count;
1555 
1556 		if (++cons >= sc->sc_tx_ring_count)
1557 			cons = 0;
1558 
1559 		if (cons == prod)
1560 			break;
1561 
1562 		ms = &sc->sc_tx_slots[cons];
1563 		map = ms->ms_map;
1564 
1565 		flags = MYXTXD_FLAGS_NO_TSO;
1566 		if (map->dm_mapsize < 1520)
1567 			flags |= MYXTXD_FLAGS_SMALL;
1568 
1569 		memset(&txd, 0, sizeof(txd));
1570 		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1571 		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1572 		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1573 		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1574 		myx_bus_space_write(sc,
1575 		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1576 
1577 		myx_write_txd_tail(sc, ms, flags, offset, idx);
1578 	}
1579 
1580 	/* go back and post first packet */
1581 	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1582 	map = ms->ms_map;
1583 
1584 	flags = MYXTXD_FLAGS_NO_TSO;
1585 	if (map->dm_mapsize < 1520)
1586 		flags |= MYXTXD_FLAGS_SMALL;
1587 
1588 	memset(&txd, 0, sizeof(txd));
1589 	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1590 	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1591 	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1592 	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1593 
1594 	/* make sure the first descriptor is seen after the others */
1595 	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1596 
1597 	myx_bus_space_write(sc,
1598 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1599 	    sizeof(txd) - sizeof(myx_bus_t));
1600 
1601 	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1602 	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1603 
1604 	myx_bus_space_write(sc,
1605 	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1606 	    sizeof(myx_bus_t),
1607 	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1608 	    sizeof(myx_bus_t));
1609 
1610 	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1611 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1612 	    BUS_SPACE_BARRIER_WRITE);
1613 
1614 	/* commit */
1615 	sc->sc_tx_ring_prod = idx;
1616 	sc->sc_tx_prod = prod;
1617 }
1618 
1619 int
1620 myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1621 {
1622 	bus_dma_tag_t			dmat = sc->sc_dmat;
1623 	bus_dmamap_t			dmap = ms->ms_map;
1624 
1625 	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1626 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1627 	case 0:
1628 		break;
1629 
1630 	case EFBIG: /* mbuf chain is too fragmented */
1631 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1632 		    bus_dmamap_load_mbuf(dmat, dmap, m,
1633 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1634 			break;
1635 	default:
1636 		return (1);
1637 	}
1638 
1639 	ms->ms_m = m;
1640 	return (0);
1641 }
1642 
1643 int
1644 myx_intr(void *arg)
1645 {
1646 	struct myx_softc	*sc = (struct myx_softc *)arg;
1647 	volatile struct myx_status *sts = sc->sc_sts;
1648 	enum myx_state		 state;
1649 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1650 	u_int32_t		 data;
1651 	u_int8_t		 valid = 0;
1652 
1653 	state = sc->sc_state;
1654 	if (state == MYX_S_OFF)
1655 		return (0);
1656 
1657 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1658 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1659 
1660 	valid = sts->ms_isvalid;
1661 	if (valid == 0x0) {
1662 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1663 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1664 		return (0);
1665 	}
1666 
1667 	if (sc->sc_intx) {
1668 		data = htobe32(0);
1669 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1670 		    sc->sc_irqdeassertoff, &data, sizeof(data));
1671 	}
1672 	sts->ms_isvalid = 0;
1673 
1674 	do {
1675 		data = sts->ms_txdonecnt;
1676 
1677 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1678 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1679 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1680 	} while (sts->ms_isvalid);
1681 
1682 	data = betoh32(data);
1683 	if (data != sc->sc_tx_count)
1684 		myx_txeof(sc, data);
1685 
1686 	data = htobe32(3);
1687 	if (valid & 0x1) {
1688 		myx_rxeof(sc);
1689 
1690 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1691 		    sc->sc_irqclaimoff, &data, sizeof(data));
1692 	}
1693 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1694 	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1695 
1696 	if (sts->ms_statusupdated) {
1697 		if (state == MYX_S_DOWN &&
1698 		    sc->sc_linkdown != sts->ms_linkdown) {
1699 			sc->sc_state = MYX_S_OFF;
1700 			membar_producer();
1701 			wakeup(sts);
1702 		} else {
1703 			data = sts->ms_linkstate;
1704 			if (data != 0xffffffff) {
1705 				KERNEL_LOCK();
1706 				myx_link_state(sc, data);
1707 				KERNEL_UNLOCK();
1708 			}
1709 		}
1710 	}
1711 
1712 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1713 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1714 
1715 	return (1);
1716 }
1717 
1718 void
1719 myx_refill(void *xmrr)
1720 {
1721 	struct myx_rx_ring *mrr = xmrr;
1722 	struct myx_softc *sc = mrr->mrr_softc;
1723 
1724 	myx_rx_fill(sc, mrr);
1725 
1726 	if (mrr->mrr_prod == mrr->mrr_cons)
1727 		timeout_add(&mrr->mrr_refill, 1);
1728 }
1729 
1730 void
1731 myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1732 {
1733 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1734 	struct myx_slot *ms;
1735 	bus_dmamap_t map;
1736 	u_int idx, cons;
1737 
1738 	idx = sc->sc_tx_ring_cons;
1739 	cons = sc->sc_tx_cons;
1740 
1741 	do {
1742 		ms = &sc->sc_tx_slots[cons];
1743 		map = ms->ms_map;
1744 
1745 		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1746 
1747 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1748 		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1749 		bus_dmamap_unload(sc->sc_dmat, map);
1750 		m_freem(ms->ms_m);
1751 
1752 		if (++cons >= sc->sc_tx_ring_count)
1753 			cons = 0;
1754 	} while (++sc->sc_tx_count != done_count);
1755 
1756 	if (idx >= sc->sc_tx_ring_count)
1757 		idx -= sc->sc_tx_ring_count;
1758 
1759 	sc->sc_tx_ring_cons = idx;
1760 	sc->sc_tx_cons = cons;
1761 
1762 	if (ifq_is_oactive(&ifp->if_snd))
1763 		ifq_restart(&ifp->if_snd);
1764 }
1765 
1766 void
1767 myx_rxeof(struct myx_softc *sc)
1768 {
1769 	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1770 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1771 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1772 	struct myx_rx_ring *mrr;
1773 	struct myx_slot *ms;
1774 	struct mbuf *m;
1775 	int ring;
1776 	u_int rxfree[2] = { 0 , 0 };
1777 	u_int len;
1778 	int livelocked;
1779 
1780 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1781 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1782 
1783 	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1784 		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1785 
1786 		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1787 			sc->sc_intrq_idx = 0;
1788 
1789 		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1790 		    MYX_RXSMALL : MYX_RXBIG;
1791 
1792 		mrr = &sc->sc_rx_ring[ring];
1793 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1794 
1795 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1796 			mrr->mrr_cons = 0;
1797 
1798 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1799 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1800 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1801 
1802 		m = ms->ms_m;
1803 		m->m_data += ETHER_ALIGN;
1804 		m->m_pkthdr.len = m->m_len = len;
1805 
1806 		ml_enqueue(&ml, m);
1807 
1808 		rxfree[ring]++;
1809 	}
1810 
1811 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1812 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1813 
1814 	livelocked = ifiq_input(&ifp->if_rcv, &ml);
1815 	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1816 		if (rxfree[ring] == 0)
1817 			continue;
1818 
1819 		mrr = &sc->sc_rx_ring[ring];
1820 
1821 		if (livelocked)
1822 			if_rxr_livelocked(&mrr->mrr_rxr);
1823 
1824 		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1825 		myx_rx_fill(sc, mrr);
1826 		if (mrr->mrr_prod == mrr->mrr_cons)
1827 			timeout_add(&mrr->mrr_refill, 0);
1828 	}
1829 }
1830 
1831 static int
1832 myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1833 {
1834 	struct myx_rx_desc rxd;
1835 	struct myx_slot *ms;
1836 	u_int32_t offset = mrr->mrr_offset;
1837 	u_int p, first, fills;
1838 
1839 	first = p = mrr->mrr_prod;
1840 	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1841 		return (slots);
1842 
1843 	if (++p >= sc->sc_rx_ring_count)
1844 		p = 0;
1845 
1846 	for (fills = 1; fills < slots; fills++) {
1847 		ms = &mrr->mrr_slots[p];
1848 
1849 		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1850 			break;
1851 
1852 		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1853 		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1854 		    &rxd, sizeof(rxd));
1855 
1856 		if (++p >= sc->sc_rx_ring_count)
1857 			p = 0;
1858 	}
1859 
1860 	mrr->mrr_prod = p;
1861 
1862 	/* make sure the first descriptor is seen after the others */
1863 	if (fills > 1) {
1864 		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1865 		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1866 		    BUS_SPACE_BARRIER_WRITE);
1867 	}
1868 
1869 	ms = &mrr->mrr_slots[first];
1870 	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1871 	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1872 	    &rxd, sizeof(rxd));
1873 
1874 	return (slots - fills);
1875 }
1876 
1877 int
1878 myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1879 {
1880 	struct myx_rx_desc rxd;
1881 	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1882 	struct myx_slot *ms;
1883 	u_int32_t offset = mrr->mrr_offset;
1884 	int rv;
1885 	int i;
1886 
1887 	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1888 	    M_DEVBUF, M_WAITOK);
1889 	if (mrr->mrr_slots == NULL)
1890 		return (ENOMEM);
1891 
1892 	memset(&rxd, 0xff, sizeof(rxd));
1893 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1894 		ms = &mrr->mrr_slots[i];
1895 		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1896 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1897 		    &ms->ms_map);
1898 		if (rv != 0)
1899 			goto destroy;
1900 
1901 		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1902 		    &rxd, sizeof(rxd));
1903 	}
1904 
1905 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1906 	mrr->mrr_prod = mrr->mrr_cons = 0;
1907 
1908 	return (0);
1909 
1910 destroy:
1911 	while (i-- > 0) {
1912 		ms = &mrr->mrr_slots[i];
1913 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1914 	}
1915 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1916 	return (rv);
1917 }
1918 
1919 int
1920 myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1921 {
1922 	u_int slots;
1923 
1924 	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1925 	if (slots == 0)
1926 		return (1);
1927 
1928 	slots = myx_rx_fill_slots(sc, mrr, slots);
1929 	if (slots > 0)
1930 		if_rxr_put(&mrr->mrr_rxr, slots);
1931 
1932 	return (0);
1933 }
1934 
1935 void
1936 myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1937 {
1938 	struct myx_slot *ms;
1939 
1940 	while (mrr->mrr_cons != mrr->mrr_prod) {
1941 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1942 
1943 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1944 			mrr->mrr_cons = 0;
1945 
1946 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1947 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1948 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1949 		m_freem(ms->ms_m);
1950 	}
1951 
1952 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1953 }
1954 
1955 void
1956 myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1957 {
1958 	struct myx_slot *ms;
1959 	int i;
1960 
1961 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1962 		ms = &mrr->mrr_slots[i];
1963 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1964 	}
1965 
1966 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1967 }
1968 
1969 struct mbuf *
1970 myx_mcl_small(void)
1971 {
1972 	struct mbuf *m;
1973 
1974 	m = MCLGETL(NULL, M_DONTWAIT, MYX_RXSMALL_SIZE);
1975 	if (m == NULL)
1976 		return (NULL);
1977 
1978 	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1979 
1980 	return (m);
1981 }
1982 
1983 struct mbuf *
1984 myx_mcl_big(void)
1985 {
1986 	struct mbuf *m;
1987 	void *mcl;
1988 
1989 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1990 	if (m == NULL)
1991 		return (NULL);
1992 
1993 	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1994 	if (mcl == NULL) {
1995 		m_free(m);
1996 		return (NULL);
1997 	}
1998 
1999 	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool);
2000 	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
2001 
2002 	return (m);
2003 }
2004 
2005 int
2006 myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
2007     struct mbuf *(*mclget)(void))
2008 {
2009 	struct mbuf *m;
2010 	int rv;
2011 
2012 	m = (*mclget)();
2013 	if (m == NULL)
2014 		return (ENOMEM);
2015 
2016 	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
2017 	if (rv != 0) {
2018 		m_freem(m);
2019 		return (rv);
2020 	}
2021 
2022 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2023 	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2024 
2025 	ms->ms_m = m;
2026 
2027 	return (0);
2028 }
2029 
2030 int
2031 myx_tx_init(struct myx_softc *sc, bus_size_t size)
2032 {
2033 	struct myx_slot *ms;
2034 	int rv;
2035 	int i;
2036 
2037 	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
2038 	    M_DEVBUF, M_WAITOK);
2039 	if (sc->sc_tx_slots == NULL)
2040 		return (ENOMEM);
2041 
2042 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2043 		ms = &sc->sc_tx_slots[i];
2044 		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
2045 		    sc->sc_tx_boundary, sc->sc_tx_boundary,
2046 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2047 		    &ms->ms_map);
2048 		if (rv != 0)
2049 			goto destroy;
2050 	}
2051 
2052 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
2053 
2054 	return (0);
2055 
2056 destroy:
2057 	while (i-- > 0) {
2058 		ms = &sc->sc_tx_slots[i];
2059 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2060 	}
2061 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2062 	return (rv);
2063 }
2064 
2065 void
2066 myx_tx_empty(struct myx_softc *sc)
2067 {
2068 	struct myx_slot *ms;
2069 	u_int cons = sc->sc_tx_cons;
2070 	u_int prod = sc->sc_tx_prod;
2071 
2072 	while (cons != prod) {
2073 		ms = &sc->sc_tx_slots[cons];
2074 
2075 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2076 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2077 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2078 		m_freem(ms->ms_m);
2079 
2080 		if (++cons >= sc->sc_tx_ring_count)
2081 			cons = 0;
2082 	}
2083 
2084 	sc->sc_tx_cons = cons;
2085 }
2086 
2087 void
2088 myx_tx_free(struct myx_softc *sc)
2089 {
2090 	struct myx_slot *ms;
2091 	int i;
2092 
2093 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2094 		ms = &sc->sc_tx_slots[i];
2095 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2096 	}
2097 
2098 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2099 }
2100 
2101 #if NKSTAT > 0
2102 enum myx_counters {
2103 	myx_stat_dropped_pause,
2104 	myx_stat_dropped_ucast_filtered,
2105 	myx_stat_dropped_bad_crc32,
2106 	myx_stat_dropped_bad_phy,
2107 	myx_stat_dropped_mcast_filtered,
2108 	myx_stat_send_done,
2109 	myx_stat_dropped_link_overflow,
2110 	myx_stat_dropped_link,
2111 	myx_stat_dropped_runt,
2112 	myx_stat_dropped_overrun,
2113 	myx_stat_dropped_no_small_bufs,
2114 	myx_stat_dropped_no_large_bufs,
2115 
2116 	myx_ncounters,
2117 };
2118 
2119 struct myx_counter {
2120 	const char		*mc_name;
2121 	unsigned int		 mc_offset;
2122 };
2123 
2124 #define MYX_C_OFF(_f)	offsetof(struct myx_status, _f)
2125 
2126 static const struct myx_counter myx_counters[myx_ncounters] = {
2127 	{ "pause drops",	MYX_C_OFF(ms_dropped_pause), },
2128 	{ "ucast filtered",	MYX_C_OFF(ms_dropped_unicast), },
2129 	{ "bad crc32",		MYX_C_OFF(ms_dropped_pause), },
2130 	{ "bad phy",		MYX_C_OFF(ms_dropped_phyerr), },
2131 	{ "mcast filtered",	MYX_C_OFF(ms_dropped_mcast), },
2132 	{ "tx done",		MYX_C_OFF(ms_txdonecnt), },
2133 	{ "rx discards",	MYX_C_OFF(ms_dropped_linkoverflow), },
2134 	{ "rx errors",		MYX_C_OFF(ms_dropped_linkerror), },
2135 	{ "rx undersize",	MYX_C_OFF(ms_dropped_runt), },
2136 	{ "rx oversize",	MYX_C_OFF(ms_dropped_overrun), },
2137 	{ "small discards",	MYX_C_OFF(ms_dropped_smallbufunderrun), },
2138 	{ "large discards",	MYX_C_OFF(ms_dropped_bigbufunderrun), },
2139 };
2140 
2141 struct myx_kstats {
2142 	struct kstat_kv		mk_counters[myx_ncounters];
2143 	struct kstat_kv		mk_rdma_tags_available;
2144 };
2145 
2146 struct myx_kstat_cache {
2147 	uint32_t		mkc_counters[myx_ncounters];
2148 };
2149 
2150 struct myx_kstat_state {
2151 	struct myx_kstat_cache	mks_caches[2];
2152 	unsigned int		mks_gen;
2153 };
2154 
2155 int
2156 myx_kstat_read(struct kstat *ks)
2157 {
2158 	struct myx_softc *sc = ks->ks_softc;
2159 	struct myx_kstats *mk = ks->ks_data;
2160 	struct myx_kstat_state *mks = ks->ks_ptr;
2161 	unsigned int gen = (mks->mks_gen++ & 1);
2162 	struct myx_kstat_cache *omkc = &mks->mks_caches[gen];
2163 	struct myx_kstat_cache *nmkc = &mks->mks_caches[!gen];
2164 	unsigned int i = 0;
2165 
2166 	volatile struct myx_status *sts = sc->sc_sts;
2167 	bus_dmamap_t map = sc->sc_sts_dma.mxm_map;
2168 
2169 	if (sc->sc_sts == NULL)
2170 		return (0); /* counters are valid, just not updated */
2171 
2172 	getnanouptime(&ks->ks_updated);
2173 
2174 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2175 	    BUS_DMASYNC_POSTREAD);
2176 	for (i = 0; i < myx_ncounters; i++) {
2177 		const struct myx_counter *mc = &myx_counters[i];
2178 		nmkc->mkc_counters[i] =
2179 		    bemtoh32((uint32_t *)((uint8_t *)sts + mc->mc_offset));
2180 	}
2181 
2182 	kstat_kv_u32(&mk->mk_rdma_tags_available) =
2183 	    bemtoh32(&sts->ms_rdmatags_available);
2184 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2185 	    BUS_DMASYNC_PREREAD);
2186 
2187 	for (i = 0; i < myx_ncounters; i++) {
2188 		kstat_kv_u64(&mk->mk_counters[i]) +=
2189 		    nmkc->mkc_counters[i] - omkc->mkc_counters[i];
2190 	}
2191 
2192 	return (0);
2193 }
2194 
2195 void
2196 myx_kstat_tick(void *arg)
2197 {
2198 	struct myx_softc *sc = arg;
2199 
2200 	if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING))
2201 		return;
2202 
2203 	timeout_add_sec(&sc->sc_kstat_tmo, 4);
2204 
2205 	if (!mtx_enter_try(&sc->sc_kstat_mtx))
2206 		return;
2207 
2208 	myx_kstat_read(sc->sc_kstat);
2209 
2210 	mtx_leave(&sc->sc_kstat_mtx);
2211 }
2212 
2213 void
2214 myx_kstat_start(struct myx_softc *sc)
2215 {
2216 	if (sc->sc_kstat == NULL)
2217 		return;
2218 
2219 	myx_kstat_tick(sc);
2220 }
2221 
2222 void
2223 myx_kstat_stop(struct myx_softc *sc)
2224 {
2225 	struct myx_kstat_state *mks;
2226 
2227 	if (sc->sc_kstat == NULL)
2228 		return;
2229 
2230 	timeout_del_barrier(&sc->sc_kstat_tmo);
2231 
2232 	mks = sc->sc_kstat->ks_ptr;
2233 
2234 	mtx_enter(&sc->sc_kstat_mtx);
2235 	memset(mks, 0, sizeof(*mks));
2236 	mtx_leave(&sc->sc_kstat_mtx);
2237 }
2238 
2239 void
2240 myx_kstat_attach(struct myx_softc *sc)
2241 {
2242 	struct kstat *ks;
2243 	struct myx_kstats *mk;
2244 	struct myx_kstat_state *mks;
2245 	unsigned int i;
2246 
2247 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
2248 	timeout_set(&sc->sc_kstat_tmo, myx_kstat_tick, sc);
2249 
2250 	ks = kstat_create(DEVNAME(sc), 0, "myx-stats", 0, KSTAT_T_KV, 0);
2251 	if (ks == NULL)
2252 		return;
2253 
2254 	mk = malloc(sizeof(*mk), M_DEVBUF, M_WAITOK|M_ZERO);
2255 	for (i = 0; i < myx_ncounters; i++) {
2256 		const struct myx_counter *mc = &myx_counters[i];
2257 
2258 		kstat_kv_unit_init(&mk->mk_counters[i], mc->mc_name,
2259 		    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS);
2260 	}
2261 	kstat_kv_init(&mk->mk_rdma_tags_available, "rdma tags free",
2262 	    KSTAT_KV_T_UINT32);
2263 
2264 	mks = malloc(sizeof(*mks), M_DEVBUF, M_WAITOK|M_ZERO);
2265 	/* these start at 0 */
2266 
2267 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
2268 	ks->ks_data = mk;
2269 	ks->ks_datalen = sizeof(*mk);
2270 	ks->ks_read = myx_kstat_read;
2271 	ks->ks_ptr = mks;
2272 
2273 	ks->ks_softc = sc;
2274 	sc->sc_kstat = ks;
2275 	kstat_install(ks);
2276 }
2277 #endif /* NKSTAT > 0 */
2278