1 /* $OpenBSD: if_myx.c,v 1.120 2024/05/24 06:02:56 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23 #include "bpfilter.h"
24 #include "kstat.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/socket.h>
31 #include <sys/malloc.h>
32 #include <sys/pool.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35 #include <sys/rwlock.h>
36 #include <sys/kstat.h>
37
38 #include <machine/bus.h>
39 #include <machine/intr.h>
40
41 #include <net/if.h>
42 #include <net/if_dl.h>
43 #include <net/if_media.h>
44
45 #if NBPFILTER > 0
46 #include <net/bpf.h>
47 #endif
48
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcidevs.h>
55
56 #include <dev/pci/if_myxreg.h>
57
58 #ifdef MYX_DEBUG
59 #define MYXDBG_INIT (1<<0) /* chipset initialization */
60 #define MYXDBG_CMD (2<<0) /* commands */
61 #define MYXDBG_INTR (3<<0) /* interrupts */
62 #define MYXDBG_ALL 0xffff /* enable all debugging messages */
63 int myx_debug = MYXDBG_ALL;
64 #define DPRINTF(_lvl, _arg...) do { \
65 if (myx_debug & (_lvl)) \
66 printf(_arg); \
67 } while (0)
68 #else
69 #define DPRINTF(_lvl, arg...)
70 #endif
71
72 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
73
74 struct myx_dmamem {
75 bus_dmamap_t mxm_map;
76 bus_dma_segment_t mxm_seg;
77 int mxm_nsegs;
78 size_t mxm_size;
79 caddr_t mxm_kva;
80 };
81
82 struct pool *myx_mcl_pool;
83
84 struct myx_slot {
85 bus_dmamap_t ms_map;
86 struct mbuf *ms_m;
87 };
88
89 struct myx_rx_ring {
90 struct myx_softc *mrr_softc;
91 struct timeout mrr_refill;
92 struct if_rxring mrr_rxr;
93 struct myx_slot *mrr_slots;
94 u_int32_t mrr_offset;
95 u_int mrr_running;
96 u_int mrr_prod;
97 u_int mrr_cons;
98 struct mbuf *(*mrr_mclget)(void);
99 };
100
101 enum myx_state {
102 MYX_S_OFF = 0,
103 MYX_S_RUNNING,
104 MYX_S_DOWN
105 };
106
107 struct myx_softc {
108 struct device sc_dev;
109 struct arpcom sc_ac;
110
111 pci_chipset_tag_t sc_pc;
112 pci_intr_handle_t sc_ih;
113 pcitag_t sc_tag;
114
115 bus_dma_tag_t sc_dmat;
116 bus_space_tag_t sc_memt;
117 bus_space_handle_t sc_memh;
118 bus_size_t sc_mems;
119
120 struct myx_dmamem sc_zerodma;
121 struct myx_dmamem sc_cmddma;
122 struct myx_dmamem sc_paddma;
123
124 struct myx_dmamem sc_sts_dma;
125 volatile struct myx_status *sc_sts;
126
127 int sc_intx;
128 void *sc_irqh;
129 u_int32_t sc_irqcoaloff;
130 u_int32_t sc_irqclaimoff;
131 u_int32_t sc_irqdeassertoff;
132
133 struct myx_dmamem sc_intrq_dma;
134 struct myx_intrq_desc *sc_intrq;
135 u_int sc_intrq_count;
136 u_int sc_intrq_idx;
137
138 u_int sc_rx_ring_count;
139 #define MYX_RXSMALL 0
140 #define MYX_RXBIG 1
141 struct myx_rx_ring sc_rx_ring[2];
142
143 bus_size_t sc_tx_boundary;
144 u_int sc_tx_ring_count;
145 u_int32_t sc_tx_ring_offset;
146 u_int sc_tx_nsegs;
147 u_int32_t sc_tx_count; /* shadows ms_txdonecnt */
148 u_int sc_tx_ring_prod;
149 u_int sc_tx_ring_cons;
150
151 u_int sc_tx_prod;
152 u_int sc_tx_cons;
153 struct myx_slot *sc_tx_slots;
154
155 struct ifmedia sc_media;
156
157 volatile enum myx_state sc_state;
158 volatile u_int8_t sc_linkdown;
159
160 struct rwlock sc_sff_lock;
161
162 #if NKSTAT > 0
163 struct mutex sc_kstat_mtx;
164 struct timeout sc_kstat_tmo;
165 struct kstat *sc_kstat;
166 #endif
167 };
168
169 #define MYX_RXSMALL_SIZE MCLBYTES
170 #define MYX_RXBIG_SIZE (MYX_MTU - \
171 (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
172
173 int myx_match(struct device *, void *, void *);
174 void myx_attach(struct device *, struct device *, void *);
175 int myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
176 int myx_query(struct myx_softc *sc, char *, size_t);
177 u_int myx_ether_aton(char *, u_int8_t *, u_int);
178 void myx_attachhook(struct device *);
179 int myx_loadfirmware(struct myx_softc *, const char *);
180 int myx_probe_firmware(struct myx_softc *);
181
182 void myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
183 void myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
184
185 #if defined(__LP64__)
186 #define _myx_bus_space_write bus_space_write_raw_region_8
187 typedef u_int64_t myx_bus_t;
188 #else
189 #define _myx_bus_space_write bus_space_write_raw_region_4
190 typedef u_int32_t myx_bus_t;
191 #endif
192 #define myx_bus_space_write(_sc, _o, _a, _l) \
193 _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
194
195 int myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
196 int myx_boot(struct myx_softc *, u_int32_t);
197
198 int myx_rdma(struct myx_softc *, u_int);
199 int myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
200 bus_size_t, u_int align);
201 void myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
202 int myx_media_change(struct ifnet *);
203 void myx_media_status(struct ifnet *, struct ifmediareq *);
204 void myx_link_state(struct myx_softc *, u_int32_t);
205 void myx_watchdog(struct ifnet *);
206 int myx_ioctl(struct ifnet *, u_long, caddr_t);
207 int myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
208 void myx_up(struct myx_softc *);
209 void myx_iff(struct myx_softc *);
210 void myx_down(struct myx_softc *);
211 int myx_get_sffpage(struct myx_softc *, struct if_sffpage *);
212
213 void myx_start(struct ifqueue *);
214 void myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
215 u_int32_t, u_int);
216 int myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
217 int myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
218 int myx_intr(void *);
219 void myx_rxeof(struct myx_softc *);
220 void myx_txeof(struct myx_softc *, u_int32_t);
221
222 int myx_buf_fill(struct myx_softc *, struct myx_slot *,
223 struct mbuf *(*)(void));
224 struct mbuf * myx_mcl_small(void);
225 struct mbuf * myx_mcl_big(void);
226
227 int myx_rx_init(struct myx_softc *, int, bus_size_t);
228 int myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
229 void myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
230 void myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
231
232 int myx_tx_init(struct myx_softc *, bus_size_t);
233 void myx_tx_empty(struct myx_softc *);
234 void myx_tx_free(struct myx_softc *);
235
236 void myx_refill(void *);
237
238 #if NKSTAT > 0
239 void myx_kstat_attach(struct myx_softc *);
240 void myx_kstat_start(struct myx_softc *);
241 void myx_kstat_stop(struct myx_softc *);
242 #endif
243
244 struct cfdriver myx_cd = {
245 NULL, "myx", DV_IFNET
246 };
247 const struct cfattach myx_ca = {
248 sizeof(struct myx_softc), myx_match, myx_attach
249 };
250
251 const struct pci_matchid myx_devices[] = {
252 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
253 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
254 };
255
256 int
myx_match(struct device * parent,void * match,void * aux)257 myx_match(struct device *parent, void *match, void *aux)
258 {
259 return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
260 }
261
262 void
myx_attach(struct device * parent,struct device * self,void * aux)263 myx_attach(struct device *parent, struct device *self, void *aux)
264 {
265 struct myx_softc *sc = (struct myx_softc *)self;
266 struct pci_attach_args *pa = aux;
267 char part[32];
268 pcireg_t memtype;
269
270 rw_init(&sc->sc_sff_lock, "myxsff");
271
272 sc->sc_pc = pa->pa_pc;
273 sc->sc_tag = pa->pa_tag;
274 sc->sc_dmat = pa->pa_dmat;
275
276 sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
277 sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
278 timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
279 &sc->sc_rx_ring[MYX_RXSMALL]);
280 sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
281 sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
282 timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
283 &sc->sc_rx_ring[MYX_RXBIG]);
284
285 /* Map the PCI memory space */
286 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
287 if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
288 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
289 printf(": unable to map register memory\n");
290 return;
291 }
292
293 /* Get board details (mac/part) */
294 memset(part, 0, sizeof(part));
295 if (myx_query(sc, part, sizeof(part)) != 0)
296 goto unmap;
297
298 /* Map the interrupt */
299 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
300 if (pci_intr_map(pa, &sc->sc_ih) != 0) {
301 printf(": unable to map interrupt\n");
302 goto unmap;
303 }
304 sc->sc_intx = 1;
305 }
306
307 printf(": %s, model %s, address %s\n",
308 pci_intr_string(pa->pa_pc, sc->sc_ih),
309 part[0] == '\0' ? "(unknown)" : part,
310 ether_sprintf(sc->sc_ac.ac_enaddr));
311
312 if (myx_pcie_dc(sc, pa) != 0)
313 printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
314
315 config_mountroot(self, myx_attachhook);
316
317 return;
318
319 unmap:
320 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
321 sc->sc_mems = 0;
322 }
323
324 int
myx_pcie_dc(struct myx_softc * sc,struct pci_attach_args * pa)325 myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
326 {
327 pcireg_t dcsr;
328 pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
329 pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
330 int reg;
331
332 if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
333 ®, NULL) == 0)
334 return (-1);
335
336 reg += PCI_PCIE_DCSR;
337 dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
338 if ((dcsr & mask) != dc) {
339 CLR(dcsr, mask);
340 SET(dcsr, dc);
341 pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
342 }
343
344 return (0);
345 }
346
347 u_int
myx_ether_aton(char * mac,u_int8_t * lladdr,u_int maxlen)348 myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
349 {
350 u_int i, j;
351 u_int8_t digit;
352
353 memset(lladdr, 0, ETHER_ADDR_LEN);
354 for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
355 if (mac[i] >= '0' && mac[i] <= '9')
356 digit = mac[i] - '0';
357 else if (mac[i] >= 'A' && mac[i] <= 'F')
358 digit = mac[i] - 'A' + 10;
359 else if (mac[i] >= 'a' && mac[i] <= 'f')
360 digit = mac[i] - 'a' + 10;
361 else
362 continue;
363 if ((j & 1) == 0)
364 digit <<= 4;
365 lladdr[j++/2] |= digit;
366 }
367
368 return (i);
369 }
370
371 int
myx_query(struct myx_softc * sc,char * part,size_t partlen)372 myx_query(struct myx_softc *sc, char *part, size_t partlen)
373 {
374 struct myx_gen_hdr hdr;
375 u_int32_t offset;
376 u_int8_t strings[MYX_STRING_SPECS_SIZE];
377 u_int i, len, maxlen;
378
379 myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
380 offset = betoh32(offset);
381 if (offset + sizeof(hdr) > sc->sc_mems) {
382 printf(": header is outside register window\n");
383 return (1);
384 }
385
386 myx_read(sc, offset, &hdr, sizeof(hdr));
387 offset = betoh32(hdr.fw_specs);
388 len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
389
390 bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
391
392 for (i = 0; i < len; i++) {
393 maxlen = len - i;
394 if (strings[i] == '\0')
395 break;
396 if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
397 i += 4;
398 i += myx_ether_aton(&strings[i],
399 sc->sc_ac.ac_enaddr, maxlen);
400 } else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
401 i += 3;
402 i += strlcpy(part, &strings[i], min(maxlen, partlen));
403 }
404 for (; i < len; i++) {
405 if (strings[i] == '\0')
406 break;
407 }
408 }
409
410 return (0);
411 }
412
413 int
myx_loadfirmware(struct myx_softc * sc,const char * filename)414 myx_loadfirmware(struct myx_softc *sc, const char *filename)
415 {
416 struct myx_gen_hdr hdr;
417 u_int8_t *fw;
418 size_t fwlen;
419 u_int32_t offset;
420 u_int i, ret = 1;
421
422 if (loadfirmware(filename, &fw, &fwlen) != 0) {
423 printf("%s: could not load firmware %s\n", DEVNAME(sc),
424 filename);
425 return (1);
426 }
427 if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
428 printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
429 goto err;
430 }
431
432 memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
433 offset = betoh32(offset);
434 if ((offset + sizeof(hdr)) > fwlen) {
435 printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
436 goto err;
437 }
438
439 memcpy(&hdr, fw + offset, sizeof(hdr));
440 DPRINTF(MYXDBG_INIT, "%s: "
441 "fw hdr off %u, length %u, type 0x%x, version %s\n",
442 DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
443 betoh32(hdr.fw_type), hdr.fw_version);
444
445 if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
446 memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
447 printf("%s: invalid firmware type 0x%x version %s\n",
448 DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
449 goto err;
450 }
451
452 /* Write the firmware to the card's SRAM */
453 for (i = 0; i < fwlen; i += 256)
454 myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
455
456 if (myx_boot(sc, fwlen) != 0) {
457 printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
458 goto err;
459 }
460
461 ret = 0;
462
463 err:
464 free(fw, M_DEVBUF, fwlen);
465 return (ret);
466 }
467
468 void
myx_attachhook(struct device * self)469 myx_attachhook(struct device *self)
470 {
471 struct myx_softc *sc = (struct myx_softc *)self;
472 struct ifnet *ifp = &sc->sc_ac.ac_if;
473 struct myx_cmd mc;
474
475 /* this is sort of racy */
476 if (myx_mcl_pool == NULL) {
477 myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
478 M_WAITOK);
479
480 m_pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY,
481 "myxmcl");
482 pool_cache_init(myx_mcl_pool);
483 }
484
485 /* Allocate command DMA memory */
486 if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
487 MYXALIGN_CMD) != 0) {
488 printf("%s: failed to allocate command DMA memory\n",
489 DEVNAME(sc));
490 return;
491 }
492
493 /* Try the firmware stored on disk */
494 if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
495 /* error printed by myx_loadfirmware */
496 goto freecmd;
497 }
498
499 memset(&mc, 0, sizeof(mc));
500
501 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
502 printf("%s: failed to reset the device\n", DEVNAME(sc));
503 goto freecmd;
504 }
505
506 sc->sc_tx_boundary = 4096;
507
508 if (myx_probe_firmware(sc) != 0) {
509 printf("%s: error while selecting firmware\n", DEVNAME(sc));
510 goto freecmd;
511 }
512
513 sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
514 IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
515 if (sc->sc_irqh == NULL) {
516 printf("%s: unable to establish interrupt\n", DEVNAME(sc));
517 goto freecmd;
518 }
519
520 #if NKSTAT > 0
521 myx_kstat_attach(sc);
522 #endif
523
524 ifp->if_softc = sc;
525 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
526 ifp->if_xflags = IFXF_MPSAFE;
527 ifp->if_ioctl = myx_ioctl;
528 ifp->if_qstart = myx_start;
529 ifp->if_watchdog = myx_watchdog;
530 ifp->if_hardmtu = MYX_RXBIG_SIZE;
531 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
532 ifq_init_maxlen(&ifp->if_snd, 1);
533
534 ifp->if_capabilities = IFCAP_VLAN_MTU;
535 #if 0
536 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
537 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
538 IFCAP_CSUM_UDPv4;
539 #endif
540
541 ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
542 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
543 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
544
545 if_attach(ifp);
546 ether_ifattach(ifp);
547
548 return;
549
550 freecmd:
551 myx_dmamem_free(sc, &sc->sc_cmddma);
552 }
553
554 int
myx_probe_firmware(struct myx_softc * sc)555 myx_probe_firmware(struct myx_softc *sc)
556 {
557 struct myx_dmamem test;
558 bus_dmamap_t map;
559 struct myx_cmd mc;
560 pcireg_t csr;
561 int offset;
562 int width = 0;
563
564 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
565 &offset, NULL)) {
566 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
567 offset + PCI_PCIE_LCSR);
568 width = (csr >> 20) & 0x3f;
569
570 if (width <= 4) {
571 /*
572 * if the link width is 4 or less we can use the
573 * aligned firmware.
574 */
575 return (0);
576 }
577 }
578
579 if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
580 return (1);
581 map = test.mxm_map;
582
583 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
584 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
585
586 memset(&mc, 0, sizeof(mc));
587 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
588 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
589 mc.mc_data2 = htobe32(4096 * 0x10000);
590 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
591 printf("%s: DMA read test failed\n", DEVNAME(sc));
592 goto fail;
593 }
594
595 memset(&mc, 0, sizeof(mc));
596 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
597 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
598 mc.mc_data2 = htobe32(4096 * 0x1);
599 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
600 printf("%s: DMA write test failed\n", DEVNAME(sc));
601 goto fail;
602 }
603
604 memset(&mc, 0, sizeof(mc));
605 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
606 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
607 mc.mc_data2 = htobe32(4096 * 0x10001);
608 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
609 printf("%s: DMA read/write test failed\n", DEVNAME(sc));
610 goto fail;
611 }
612
613 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
614 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
615 myx_dmamem_free(sc, &test);
616 return (0);
617
618 fail:
619 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
620 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
621 myx_dmamem_free(sc, &test);
622
623 if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
624 printf("%s: unable to load %s\n", DEVNAME(sc),
625 MYXFW_UNALIGNED);
626 return (1);
627 }
628
629 sc->sc_tx_boundary = 2048;
630
631 printf("%s: using unaligned firmware\n", DEVNAME(sc));
632 return (0);
633 }
634
635 void
myx_read(struct myx_softc * sc,bus_size_t off,void * ptr,bus_size_t len)636 myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
637 {
638 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
639 BUS_SPACE_BARRIER_READ);
640 bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
641 }
642
643 void
myx_write(struct myx_softc * sc,bus_size_t off,void * ptr,bus_size_t len)644 myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
645 {
646 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
647 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
648 BUS_SPACE_BARRIER_WRITE);
649 }
650
651 int
myx_dmamem_alloc(struct myx_softc * sc,struct myx_dmamem * mxm,bus_size_t size,u_int align)652 myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
653 bus_size_t size, u_int align)
654 {
655 mxm->mxm_size = size;
656
657 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
658 mxm->mxm_size, 0,
659 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
660 &mxm->mxm_map) != 0)
661 return (1);
662 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
663 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
664 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
665 goto destroy;
666 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
667 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
668 goto free;
669 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
670 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
671 goto unmap;
672
673 return (0);
674 unmap:
675 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
676 free:
677 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
678 destroy:
679 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
680 return (1);
681 }
682
683 void
myx_dmamem_free(struct myx_softc * sc,struct myx_dmamem * mxm)684 myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
685 {
686 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
687 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
688 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
689 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
690 }
691
692 int
myx_cmd(struct myx_softc * sc,u_int32_t cmd,struct myx_cmd * mc,u_int32_t * r)693 myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
694 {
695 bus_dmamap_t map = sc->sc_cmddma.mxm_map;
696 struct myx_response *mr;
697 u_int i;
698 u_int32_t result, data;
699
700 mc->mc_cmd = htobe32(cmd);
701 mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
702 mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
703
704 mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
705 mr->mr_result = 0xffffffff;
706
707 /* Send command */
708 myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
709 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
710 BUS_DMASYNC_PREREAD);
711
712 for (i = 0; i < 20; i++) {
713 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
714 BUS_DMASYNC_POSTREAD);
715 result = betoh32(mr->mr_result);
716 data = betoh32(mr->mr_data);
717
718 if (result != 0xffffffff)
719 break;
720
721 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
722 BUS_DMASYNC_PREREAD);
723 delay(1000);
724 }
725
726 DPRINTF(MYXDBG_CMD, "%s(%s): cmd %u completed, i %d, "
727 "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
728 cmd, i, result, data, data);
729
730 if (result == MYXCMD_OK) {
731 if (r != NULL)
732 *r = data;
733 }
734
735 return (result);
736 }
737
738 int
myx_boot(struct myx_softc * sc,u_int32_t length)739 myx_boot(struct myx_softc *sc, u_int32_t length)
740 {
741 struct myx_bootcmd bc;
742 bus_dmamap_t map = sc->sc_cmddma.mxm_map;
743 u_int32_t *status;
744 u_int i, ret = 1;
745
746 memset(&bc, 0, sizeof(bc));
747 bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
748 bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
749 bc.bc_result = 0xffffffff;
750 bc.bc_offset = htobe32(MYX_FW_BOOT);
751 bc.bc_length = htobe32(length - 8);
752 bc.bc_copyto = htobe32(8);
753 bc.bc_jumpto = htobe32(0);
754
755 status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
756 *status = 0;
757
758 /* Send command */
759 myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
760 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
761 BUS_DMASYNC_PREREAD);
762
763 for (i = 0; i < 200; i++) {
764 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
765 BUS_DMASYNC_POSTREAD);
766 if (*status == 0xffffffff) {
767 ret = 0;
768 break;
769 }
770
771 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
772 BUS_DMASYNC_PREREAD);
773 delay(1000);
774 }
775
776 DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
777 DEVNAME(sc), i, ret);
778
779 return (ret);
780 }
781
782 int
myx_rdma(struct myx_softc * sc,u_int do_enable)783 myx_rdma(struct myx_softc *sc, u_int do_enable)
784 {
785 struct myx_rdmacmd rc;
786 bus_dmamap_t map = sc->sc_cmddma.mxm_map;
787 bus_dmamap_t pad = sc->sc_paddma.mxm_map;
788 u_int32_t *status;
789 int ret = 1;
790 u_int i;
791
792 /*
793 * It is required to setup a _dummy_ RDMA address. It also makes
794 * some PCI-E chipsets resend dropped messages.
795 */
796 rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
797 rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
798 rc.rc_result = 0xffffffff;
799 rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
800 rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
801 rc.rc_enable = htobe32(do_enable);
802
803 status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
804 *status = 0;
805
806 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
807 BUS_DMASYNC_PREREAD);
808
809 /* Send command */
810 myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
811
812 for (i = 0; i < 20; i++) {
813 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
814 BUS_DMASYNC_POSTREAD);
815
816 if (*status == 0xffffffff) {
817 ret = 0;
818 break;
819 }
820
821 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
822 BUS_DMASYNC_PREREAD);
823 delay(1000);
824 }
825
826 DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
827 DEVNAME(sc), __func__,
828 do_enable ? "enabled" : "disabled", i, betoh32(*status));
829
830 return (ret);
831 }
832
833 int
myx_media_change(struct ifnet * ifp)834 myx_media_change(struct ifnet *ifp)
835 {
836 /* ignore */
837 return (0);
838 }
839
840 void
myx_media_status(struct ifnet * ifp,struct ifmediareq * imr)841 myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
842 {
843 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc;
844 bus_dmamap_t map = sc->sc_sts_dma.mxm_map;
845 u_int32_t sts;
846
847 imr->ifm_active = IFM_ETHER | IFM_AUTO;
848 if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
849 imr->ifm_status = 0;
850 return;
851 }
852
853 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
854 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
855 sts = sc->sc_sts->ms_linkstate;
856 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
857 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
858
859 myx_link_state(sc, sts);
860
861 imr->ifm_status = IFM_AVALID;
862 if (!LINK_STATE_IS_UP(ifp->if_link_state))
863 return;
864
865 imr->ifm_active |= IFM_FDX | IFM_FLOW |
866 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
867 imr->ifm_status |= IFM_ACTIVE;
868 }
869
870 void
myx_link_state(struct myx_softc * sc,u_int32_t sts)871 myx_link_state(struct myx_softc *sc, u_int32_t sts)
872 {
873 struct ifnet *ifp = &sc->sc_ac.ac_if;
874 int link_state = LINK_STATE_DOWN;
875
876 if (betoh32(sts) == MYXSTS_LINKUP)
877 link_state = LINK_STATE_FULL_DUPLEX;
878 if (ifp->if_link_state != link_state) {
879 ifp->if_link_state = link_state;
880 if_link_state_change(ifp);
881 ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
882 IF_Gbps(10) : 0;
883 }
884 }
885
886 void
myx_watchdog(struct ifnet * ifp)887 myx_watchdog(struct ifnet *ifp)
888 {
889 return;
890 }
891
892 int
myx_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)893 myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
894 {
895 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc;
896 struct ifreq *ifr = (struct ifreq *)data;
897 int s, error = 0;
898
899 s = splnet();
900
901 switch (cmd) {
902 case SIOCSIFADDR:
903 ifp->if_flags |= IFF_UP;
904 /* FALLTHROUGH */
905
906 case SIOCSIFFLAGS:
907 if (ISSET(ifp->if_flags, IFF_UP)) {
908 if (ISSET(ifp->if_flags, IFF_RUNNING))
909 error = ENETRESET;
910 else
911 myx_up(sc);
912 } else {
913 if (ISSET(ifp->if_flags, IFF_RUNNING))
914 myx_down(sc);
915 }
916 break;
917
918 case SIOCGIFMEDIA:
919 case SIOCSIFMEDIA:
920 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
921 break;
922
923 case SIOCGIFRXR:
924 error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
925 break;
926
927 case SIOCGIFSFFPAGE:
928 error = rw_enter(&sc->sc_sff_lock, RW_WRITE|RW_INTR);
929 if (error != 0)
930 break;
931
932 error = myx_get_sffpage(sc, (struct if_sffpage *)data);
933 rw_exit(&sc->sc_sff_lock);
934 break;
935
936 default:
937 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
938 }
939
940 if (error == ENETRESET) {
941 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
942 (IFF_UP | IFF_RUNNING))
943 myx_iff(sc);
944 error = 0;
945 }
946
947 splx(s);
948 return (error);
949 }
950
951 int
myx_rxrinfo(struct myx_softc * sc,struct if_rxrinfo * ifri)952 myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
953 {
954 struct if_rxring_info ifr[2];
955
956 memset(ifr, 0, sizeof(ifr));
957
958 ifr[0].ifr_size = MYX_RXSMALL_SIZE;
959 ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
960 strlcpy(ifr[0].ifr_name, "small", sizeof(ifr[0].ifr_name));
961
962 ifr[1].ifr_size = MYX_RXBIG_SIZE;
963 ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
964 strlcpy(ifr[1].ifr_name, "large", sizeof(ifr[1].ifr_name));
965
966 return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
967 }
968
969 static int
myx_i2c_byte(struct myx_softc * sc,uint8_t addr,uint8_t off,uint8_t * byte)970 myx_i2c_byte(struct myx_softc *sc, uint8_t addr, uint8_t off, uint8_t *byte)
971 {
972 struct myx_cmd mc;
973 int result;
974 uint32_t r;
975 unsigned int ms;
976
977 memset(&mc, 0, sizeof(mc));
978 mc.mc_data0 = htobe32(0); /* get 1 byte */
979 mc.mc_data1 = htobe32((addr << 8) | off);
980 result = myx_cmd(sc, MYXCMD_I2C_READ, &mc, NULL);
981 if (result != 0)
982 return (EIO);
983
984 for (ms = 0; ms < 50; ms++) {
985 memset(&mc, 0, sizeof(mc));
986 mc.mc_data0 = htobe32(off);
987 result = myx_cmd(sc, MYXCMD_I2C_BYTE, &mc, &r);
988 switch (result) {
989 case MYXCMD_OK:
990 *byte = r;
991 return (0);
992 case MYXCMD_ERR_BUSY:
993 break;
994 default:
995 return (EIO);
996 }
997
998 delay(1000);
999 }
1000
1001 return (EBUSY);
1002 }
1003
1004 int
myx_get_sffpage(struct myx_softc * sc,struct if_sffpage * sff)1005 myx_get_sffpage(struct myx_softc *sc, struct if_sffpage *sff)
1006 {
1007 unsigned int i;
1008 int result;
1009
1010 if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
1011 uint8_t page;
1012
1013 result = myx_i2c_byte(sc, IFSFF_ADDR_EEPROM, 127, &page);
1014 if (result != 0)
1015 return (result);
1016
1017 if (page != sff->sff_page)
1018 return (ENXIO);
1019 }
1020
1021 for (i = 0; i < sizeof(sff->sff_data); i++) {
1022 result = myx_i2c_byte(sc, sff->sff_addr,
1023 i, &sff->sff_data[i]);
1024 if (result != 0)
1025 return (result);
1026 }
1027
1028 return (0);
1029 }
1030
1031 void
myx_up(struct myx_softc * sc)1032 myx_up(struct myx_softc *sc)
1033 {
1034 struct ifnet *ifp = &sc->sc_ac.ac_if;
1035 struct myx_cmd mc;
1036 bus_dmamap_t map;
1037 size_t size;
1038 u_int maxpkt;
1039 u_int32_t r;
1040
1041 memset(&mc, 0, sizeof(mc));
1042 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1043 printf("%s: failed to reset the device\n", DEVNAME(sc));
1044 return;
1045 }
1046
1047 if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1048 64, MYXALIGN_CMD) != 0) {
1049 printf("%s: failed to allocate zero pad memory\n",
1050 DEVNAME(sc));
1051 return;
1052 }
1053 memset(sc->sc_zerodma.mxm_kva, 0, 64);
1054 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1055 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1056
1057 if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1058 MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1059 printf("%s: failed to allocate pad DMA memory\n",
1060 DEVNAME(sc));
1061 goto free_zero;
1062 }
1063 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1064 sc->sc_paddma.mxm_map->dm_mapsize,
1065 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1066
1067 if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1068 printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1069 goto free_pad;
1070 }
1071
1072 if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1073 printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1074 goto free_pad;
1075 }
1076 sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1077
1078 memset(&mc, 0, sizeof(mc));
1079 if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1080 printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1081 goto free_pad;
1082 }
1083 sc->sc_tx_ring_prod = 0;
1084 sc->sc_tx_ring_cons = 0;
1085 sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1086 sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1087 sc->sc_tx_count = 0;
1088 ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1089
1090 /* Allocate Interrupt Queue */
1091
1092 sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1093 sc->sc_intrq_idx = 0;
1094
1095 size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1096 if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1097 size, MYXALIGN_DATA) != 0) {
1098 goto free_pad;
1099 }
1100 sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1101 map = sc->sc_intrq_dma.mxm_map;
1102 memset(sc->sc_intrq, 0, size);
1103 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1104 BUS_DMASYNC_PREREAD);
1105
1106 memset(&mc, 0, sizeof(mc));
1107 mc.mc_data0 = htobe32(size);
1108 if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1109 printf("%s: failed to set intrq size\n", DEVNAME(sc));
1110 goto free_intrq;
1111 }
1112
1113 memset(&mc, 0, sizeof(mc));
1114 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1115 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1116 if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1117 printf("%s: failed to set intrq address\n", DEVNAME(sc));
1118 goto free_intrq;
1119 }
1120
1121 /*
1122 * get interrupt offsets
1123 */
1124
1125 memset(&mc, 0, sizeof(mc));
1126 if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1127 &sc->sc_irqclaimoff) != 0) {
1128 printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1129 goto free_intrq;
1130 }
1131
1132 memset(&mc, 0, sizeof(mc));
1133 if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1134 &sc->sc_irqdeassertoff) != 0) {
1135 printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1136 goto free_intrq;
1137 }
1138
1139 memset(&mc, 0, sizeof(mc));
1140 if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1141 &sc->sc_irqcoaloff) != 0) {
1142 printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1143 goto free_intrq;
1144 }
1145
1146 /* Set an appropriate interrupt coalescing period */
1147 r = htobe32(MYX_IRQCOALDELAY);
1148 myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1149
1150 if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1151 printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1152 goto free_intrq;
1153 }
1154
1155 memset(&mc, 0, sizeof(mc));
1156 if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1157 printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1158 goto free_intrq;
1159 }
1160
1161 memset(&mc, 0, sizeof(mc));
1162 if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1163 printf("%s: failed to configure flow control\n", DEVNAME(sc));
1164 goto free_intrq;
1165 }
1166
1167 memset(&mc, 0, sizeof(mc));
1168 if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1169 &sc->sc_tx_ring_offset) != 0) {
1170 printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1171 goto free_intrq;
1172 }
1173
1174 memset(&mc, 0, sizeof(mc));
1175 if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1176 &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1177 printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1178 goto free_intrq;
1179 }
1180
1181 memset(&mc, 0, sizeof(mc));
1182 if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1183 &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1184 printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1185 goto free_intrq;
1186 }
1187
1188 /* Allocate Interrupt Data */
1189 if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1190 sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1191 printf("%s: failed to allocate status DMA memory\n",
1192 DEVNAME(sc));
1193 goto free_intrq;
1194 }
1195 sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1196 map = sc->sc_sts_dma.mxm_map;
1197 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1198 BUS_DMASYNC_PREREAD);
1199
1200 memset(&mc, 0, sizeof(mc));
1201 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1202 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1203 mc.mc_data2 = htobe32(sizeof(struct myx_status));
1204 if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1205 printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1206 goto free_sts;
1207 }
1208
1209 maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1210
1211 memset(&mc, 0, sizeof(mc));
1212 mc.mc_data0 = htobe32(maxpkt);
1213 if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1214 printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1215 goto free_sts;
1216 }
1217
1218 if (myx_tx_init(sc, maxpkt) != 0)
1219 goto free_sts;
1220
1221 if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1222 goto free_tx_ring;
1223
1224 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1225 goto free_rx_ring_small;
1226
1227 if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1228 goto empty_rx_ring_small;
1229
1230 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1231 goto free_rx_ring_big;
1232
1233 memset(&mc, 0, sizeof(mc));
1234 mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1235 if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1236 printf("%s: failed to set small buf size\n", DEVNAME(sc));
1237 goto empty_rx_ring_big;
1238 }
1239
1240 memset(&mc, 0, sizeof(mc));
1241 mc.mc_data0 = htobe32(16384);
1242 if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1243 printf("%s: failed to set big buf size\n", DEVNAME(sc));
1244 goto empty_rx_ring_big;
1245 }
1246
1247 sc->sc_state = MYX_S_RUNNING;
1248
1249 if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1250 printf("%s: failed to start the device\n", DEVNAME(sc));
1251 goto empty_rx_ring_big;
1252 }
1253
1254 myx_iff(sc);
1255 SET(ifp->if_flags, IFF_RUNNING);
1256 ifq_restart(&ifp->if_snd);
1257
1258 #if NKSTAT > 0
1259 timeout_add_sec(&sc->sc_kstat_tmo, 1);
1260 #endif
1261
1262 return;
1263
1264 empty_rx_ring_big:
1265 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1266 free_rx_ring_big:
1267 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1268 empty_rx_ring_small:
1269 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1270 free_rx_ring_small:
1271 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1272 free_tx_ring:
1273 myx_tx_free(sc);
1274 free_sts:
1275 bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1276 sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1277 myx_dmamem_free(sc, &sc->sc_sts_dma);
1278 free_intrq:
1279 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1280 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1281 myx_dmamem_free(sc, &sc->sc_intrq_dma);
1282 free_pad:
1283 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1284 sc->sc_paddma.mxm_map->dm_mapsize,
1285 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1286 myx_dmamem_free(sc, &sc->sc_paddma);
1287
1288 memset(&mc, 0, sizeof(mc));
1289 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1290 printf("%s: failed to reset the device\n", DEVNAME(sc));
1291 }
1292 free_zero:
1293 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1294 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1295 myx_dmamem_free(sc, &sc->sc_zerodma);
1296 }
1297
1298 int
myx_setlladdr(struct myx_softc * sc,u_int32_t cmd,u_int8_t * addr)1299 myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1300 {
1301 struct myx_cmd mc;
1302
1303 memset(&mc, 0, sizeof(mc));
1304 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1305 addr[2] << 8 | addr[3]);
1306 mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1307
1308 if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1309 printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1310 return (-1);
1311 }
1312 return (0);
1313 }
1314
1315 void
myx_iff(struct myx_softc * sc)1316 myx_iff(struct myx_softc *sc)
1317 {
1318 struct myx_cmd mc;
1319 struct ifnet *ifp = &sc->sc_ac.ac_if;
1320 struct ether_multi *enm;
1321 struct ether_multistep step;
1322 u_int8_t *addr;
1323
1324 CLR(ifp->if_flags, IFF_ALLMULTI);
1325
1326 if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1327 MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1328 printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1329 return;
1330 }
1331
1332 if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1333 printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1334 return;
1335 }
1336
1337 if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1338 printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1339 return;
1340 }
1341
1342 if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1343 sc->sc_ac.ac_multirangecnt > 0) {
1344 SET(ifp->if_flags, IFF_ALLMULTI);
1345 return;
1346 }
1347
1348 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1349 while (enm != NULL) {
1350 addr = enm->enm_addrlo;
1351
1352 memset(&mc, 0, sizeof(mc));
1353 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1354 addr[2] << 8 | addr[3]);
1355 mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1356 if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1357 printf("%s: failed to join mcast group\n", DEVNAME(sc));
1358 return;
1359 }
1360
1361 ETHER_NEXT_MULTI(step, enm);
1362 }
1363
1364 memset(&mc, 0, sizeof(mc));
1365 if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1366 printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1367 return;
1368 }
1369 }
1370
1371 void
myx_down(struct myx_softc * sc)1372 myx_down(struct myx_softc *sc)
1373 {
1374 struct ifnet *ifp = &sc->sc_ac.ac_if;
1375 volatile struct myx_status *sts = sc->sc_sts;
1376 bus_dmamap_t map = sc->sc_sts_dma.mxm_map;
1377 struct myx_cmd mc;
1378 int s;
1379 int ring;
1380
1381 CLR(ifp->if_flags, IFF_RUNNING);
1382
1383 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1384 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1385 sc->sc_linkdown = sts->ms_linkdown;
1386 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1387 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1388
1389 sc->sc_state = MYX_S_DOWN;
1390 membar_producer();
1391
1392 memset(&mc, 0, sizeof(mc));
1393 (void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1394
1395 while (sc->sc_state != MYX_S_OFF) {
1396 sleep_setup(sts, PWAIT, "myxdown");
1397 membar_consumer();
1398 sleep_finish(0, sc->sc_state != MYX_S_OFF);
1399 }
1400
1401 s = splnet();
1402 if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1403 ifp->if_link_state = LINK_STATE_UNKNOWN;
1404 ifp->if_baudrate = 0;
1405 if_link_state_change(ifp);
1406 }
1407 splx(s);
1408
1409 memset(&mc, 0, sizeof(mc));
1410 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1411 printf("%s: failed to reset the device\n", DEVNAME(sc));
1412 }
1413
1414 ifq_clr_oactive(&ifp->if_snd);
1415 ifq_barrier(&ifp->if_snd);
1416
1417 for (ring = 0; ring < 2; ring++) {
1418 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1419
1420 timeout_del(&mrr->mrr_refill);
1421 myx_rx_empty(sc, mrr);
1422 myx_rx_free(sc, mrr);
1423 }
1424
1425 myx_tx_empty(sc);
1426 myx_tx_free(sc);
1427
1428 #if NKSTAT > 0
1429 myx_kstat_stop(sc);
1430 sc->sc_sts = NULL;
1431 #endif
1432
1433 /* the sleep shizz above already synced this dmamem */
1434 myx_dmamem_free(sc, &sc->sc_sts_dma);
1435
1436 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1437 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1438 myx_dmamem_free(sc, &sc->sc_intrq_dma);
1439
1440 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1441 sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1442 myx_dmamem_free(sc, &sc->sc_paddma);
1443
1444 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1445 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1446 myx_dmamem_free(sc, &sc->sc_zerodma);
1447 }
1448
1449 void
myx_write_txd_tail(struct myx_softc * sc,struct myx_slot * ms,u_int8_t flags,u_int32_t offset,u_int idx)1450 myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1451 u_int32_t offset, u_int idx)
1452 {
1453 struct myx_tx_desc txd;
1454 bus_dmamap_t zmap = sc->sc_zerodma.mxm_map;
1455 bus_dmamap_t map = ms->ms_map;
1456 int i;
1457
1458 for (i = 1; i < map->dm_nsegs; i++) {
1459 memset(&txd, 0, sizeof(txd));
1460 txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1461 txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1462 txd.tx_flags = flags;
1463
1464 myx_bus_space_write(sc,
1465 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1466 &txd, sizeof(txd));
1467 }
1468
1469 /* pad runt frames */
1470 if (map->dm_mapsize < 60) {
1471 memset(&txd, 0, sizeof(txd));
1472 txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1473 txd.tx_length = htobe16(60 - map->dm_mapsize);
1474 txd.tx_flags = flags;
1475
1476 myx_bus_space_write(sc,
1477 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1478 &txd, sizeof(txd));
1479 }
1480 }
1481
1482 void
myx_start(struct ifqueue * ifq)1483 myx_start(struct ifqueue *ifq)
1484 {
1485 struct ifnet *ifp = ifq->ifq_if;
1486 struct myx_tx_desc txd;
1487 struct myx_softc *sc = ifp->if_softc;
1488 struct myx_slot *ms;
1489 bus_dmamap_t map;
1490 struct mbuf *m;
1491 u_int32_t offset = sc->sc_tx_ring_offset;
1492 u_int idx, cons, prod;
1493 u_int free, used;
1494 u_int8_t flags;
1495
1496 idx = sc->sc_tx_ring_prod;
1497
1498 /* figure out space */
1499 free = sc->sc_tx_ring_cons;
1500 if (free <= idx)
1501 free += sc->sc_tx_ring_count;
1502 free -= idx;
1503
1504 cons = prod = sc->sc_tx_prod;
1505
1506 used = 0;
1507
1508 for (;;) {
1509 if (used + sc->sc_tx_nsegs + 1 > free) {
1510 ifq_set_oactive(ifq);
1511 break;
1512 }
1513
1514 m = ifq_dequeue(ifq);
1515 if (m == NULL)
1516 break;
1517
1518 ms = &sc->sc_tx_slots[prod];
1519
1520 if (myx_load_mbuf(sc, ms, m) != 0) {
1521 m_freem(m);
1522 ifp->if_oerrors++;
1523 continue;
1524 }
1525
1526 #if NBPFILTER > 0
1527 if (ifp->if_bpf)
1528 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1529 #endif
1530
1531 map = ms->ms_map;
1532 bus_dmamap_sync(sc->sc_dmat, map, 0,
1533 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1534
1535 used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1536
1537 if (++prod >= sc->sc_tx_ring_count)
1538 prod = 0;
1539 }
1540
1541 if (cons == prod)
1542 return;
1543
1544 ms = &sc->sc_tx_slots[cons];
1545
1546 for (;;) {
1547 idx += ms->ms_map->dm_nsegs +
1548 (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1549 if (idx >= sc->sc_tx_ring_count)
1550 idx -= sc->sc_tx_ring_count;
1551
1552 if (++cons >= sc->sc_tx_ring_count)
1553 cons = 0;
1554
1555 if (cons == prod)
1556 break;
1557
1558 ms = &sc->sc_tx_slots[cons];
1559 map = ms->ms_map;
1560
1561 flags = MYXTXD_FLAGS_NO_TSO;
1562 if (map->dm_mapsize < 1520)
1563 flags |= MYXTXD_FLAGS_SMALL;
1564
1565 memset(&txd, 0, sizeof(txd));
1566 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1567 txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1568 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1569 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1570 myx_bus_space_write(sc,
1571 offset + sizeof(txd) * idx, &txd, sizeof(txd));
1572
1573 myx_write_txd_tail(sc, ms, flags, offset, idx);
1574 }
1575
1576 /* go back and post first packet */
1577 ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1578 map = ms->ms_map;
1579
1580 flags = MYXTXD_FLAGS_NO_TSO;
1581 if (map->dm_mapsize < 1520)
1582 flags |= MYXTXD_FLAGS_SMALL;
1583
1584 memset(&txd, 0, sizeof(txd));
1585 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1586 txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1587 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1588 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1589
1590 /* make sure the first descriptor is seen after the others */
1591 myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1592
1593 myx_bus_space_write(sc,
1594 offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1595 sizeof(txd) - sizeof(myx_bus_t));
1596
1597 bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1598 sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1599
1600 myx_bus_space_write(sc,
1601 offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1602 sizeof(myx_bus_t),
1603 (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1604 sizeof(myx_bus_t));
1605
1606 bus_space_barrier(sc->sc_memt, sc->sc_memh,
1607 offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1608 BUS_SPACE_BARRIER_WRITE);
1609
1610 /* commit */
1611 sc->sc_tx_ring_prod = idx;
1612 sc->sc_tx_prod = prod;
1613 }
1614
1615 int
myx_load_mbuf(struct myx_softc * sc,struct myx_slot * ms,struct mbuf * m)1616 myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1617 {
1618 bus_dma_tag_t dmat = sc->sc_dmat;
1619 bus_dmamap_t dmap = ms->ms_map;
1620
1621 switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1622 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1623 case 0:
1624 break;
1625
1626 case EFBIG: /* mbuf chain is too fragmented */
1627 if (m_defrag(m, M_DONTWAIT) == 0 &&
1628 bus_dmamap_load_mbuf(dmat, dmap, m,
1629 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1630 break;
1631 default:
1632 return (1);
1633 }
1634
1635 ms->ms_m = m;
1636 return (0);
1637 }
1638
1639 int
myx_intr(void * arg)1640 myx_intr(void *arg)
1641 {
1642 struct myx_softc *sc = (struct myx_softc *)arg;
1643 volatile struct myx_status *sts = sc->sc_sts;
1644 enum myx_state state;
1645 bus_dmamap_t map = sc->sc_sts_dma.mxm_map;
1646 u_int32_t data;
1647 u_int8_t valid = 0;
1648
1649 state = sc->sc_state;
1650 if (state == MYX_S_OFF)
1651 return (0);
1652
1653 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1654 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1655
1656 valid = sts->ms_isvalid;
1657 if (valid == 0x0) {
1658 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1659 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1660 return (0);
1661 }
1662
1663 if (sc->sc_intx) {
1664 data = htobe32(0);
1665 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1666 sc->sc_irqdeassertoff, &data, sizeof(data));
1667 }
1668 sts->ms_isvalid = 0;
1669
1670 do {
1671 data = sts->ms_txdonecnt;
1672
1673 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1674 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1675 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1676 } while (sts->ms_isvalid);
1677
1678 data = betoh32(data);
1679 if (data != sc->sc_tx_count)
1680 myx_txeof(sc, data);
1681
1682 data = htobe32(3);
1683 if (valid & 0x1) {
1684 myx_rxeof(sc);
1685
1686 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1687 sc->sc_irqclaimoff, &data, sizeof(data));
1688 }
1689 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1690 sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1691
1692 if (sts->ms_statusupdated) {
1693 if (state == MYX_S_DOWN &&
1694 sc->sc_linkdown != sts->ms_linkdown) {
1695 sc->sc_state = MYX_S_OFF;
1696 membar_producer();
1697 wakeup(sts);
1698 } else {
1699 data = sts->ms_linkstate;
1700 if (data != 0xffffffff) {
1701 KERNEL_LOCK();
1702 myx_link_state(sc, data);
1703 KERNEL_UNLOCK();
1704 }
1705 }
1706 }
1707
1708 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1709 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1710
1711 return (1);
1712 }
1713
1714 void
myx_refill(void * xmrr)1715 myx_refill(void *xmrr)
1716 {
1717 struct myx_rx_ring *mrr = xmrr;
1718 struct myx_softc *sc = mrr->mrr_softc;
1719
1720 myx_rx_fill(sc, mrr);
1721
1722 if (mrr->mrr_prod == mrr->mrr_cons)
1723 timeout_add(&mrr->mrr_refill, 1);
1724 }
1725
1726 void
myx_txeof(struct myx_softc * sc,u_int32_t done_count)1727 myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1728 {
1729 struct ifnet *ifp = &sc->sc_ac.ac_if;
1730 struct myx_slot *ms;
1731 bus_dmamap_t map;
1732 u_int idx, cons;
1733
1734 idx = sc->sc_tx_ring_cons;
1735 cons = sc->sc_tx_cons;
1736
1737 do {
1738 ms = &sc->sc_tx_slots[cons];
1739 map = ms->ms_map;
1740
1741 idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1742
1743 bus_dmamap_sync(sc->sc_dmat, map, 0,
1744 map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1745 bus_dmamap_unload(sc->sc_dmat, map);
1746 m_freem(ms->ms_m);
1747
1748 if (++cons >= sc->sc_tx_ring_count)
1749 cons = 0;
1750 } while (++sc->sc_tx_count != done_count);
1751
1752 if (idx >= sc->sc_tx_ring_count)
1753 idx -= sc->sc_tx_ring_count;
1754
1755 sc->sc_tx_ring_cons = idx;
1756 sc->sc_tx_cons = cons;
1757
1758 if (ifq_is_oactive(&ifp->if_snd))
1759 ifq_restart(&ifp->if_snd);
1760 }
1761
1762 void
myx_rxeof(struct myx_softc * sc)1763 myx_rxeof(struct myx_softc *sc)
1764 {
1765 static const struct myx_intrq_desc zerodesc = { 0, 0 };
1766 struct ifnet *ifp = &sc->sc_ac.ac_if;
1767 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1768 struct myx_rx_ring *mrr;
1769 struct myx_slot *ms;
1770 struct mbuf *m;
1771 int ring;
1772 u_int rxfree[2] = { 0 , 0 };
1773 u_int len;
1774 int livelocked;
1775
1776 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1777 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1778
1779 while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1780 sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1781
1782 if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1783 sc->sc_intrq_idx = 0;
1784
1785 ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1786 MYX_RXSMALL : MYX_RXBIG;
1787
1788 mrr = &sc->sc_rx_ring[ring];
1789 ms = &mrr->mrr_slots[mrr->mrr_cons];
1790
1791 if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1792 mrr->mrr_cons = 0;
1793
1794 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1795 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1796 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1797
1798 m = ms->ms_m;
1799 m->m_data += ETHER_ALIGN;
1800 m->m_pkthdr.len = m->m_len = len;
1801
1802 ml_enqueue(&ml, m);
1803
1804 rxfree[ring]++;
1805 }
1806
1807 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1808 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1809
1810 livelocked = ifiq_input(&ifp->if_rcv, &ml);
1811 for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1812 if (rxfree[ring] == 0)
1813 continue;
1814
1815 mrr = &sc->sc_rx_ring[ring];
1816
1817 if (livelocked)
1818 if_rxr_livelocked(&mrr->mrr_rxr);
1819
1820 if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1821 myx_rx_fill(sc, mrr);
1822 if (mrr->mrr_prod == mrr->mrr_cons)
1823 timeout_add(&mrr->mrr_refill, 0);
1824 }
1825 }
1826
1827 static int
myx_rx_fill_slots(struct myx_softc * sc,struct myx_rx_ring * mrr,u_int slots)1828 myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1829 {
1830 struct myx_rx_desc rxd;
1831 struct myx_slot *ms;
1832 u_int32_t offset = mrr->mrr_offset;
1833 u_int p, first, fills;
1834
1835 first = p = mrr->mrr_prod;
1836 if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1837 return (slots);
1838
1839 if (++p >= sc->sc_rx_ring_count)
1840 p = 0;
1841
1842 for (fills = 1; fills < slots; fills++) {
1843 ms = &mrr->mrr_slots[p];
1844
1845 if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1846 break;
1847
1848 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1849 myx_bus_space_write(sc, offset + p * sizeof(rxd),
1850 &rxd, sizeof(rxd));
1851
1852 if (++p >= sc->sc_rx_ring_count)
1853 p = 0;
1854 }
1855
1856 mrr->mrr_prod = p;
1857
1858 /* make sure the first descriptor is seen after the others */
1859 if (fills > 1) {
1860 bus_space_barrier(sc->sc_memt, sc->sc_memh,
1861 offset, sizeof(rxd) * sc->sc_rx_ring_count,
1862 BUS_SPACE_BARRIER_WRITE);
1863 }
1864
1865 ms = &mrr->mrr_slots[first];
1866 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1867 myx_bus_space_write(sc, offset + first * sizeof(rxd),
1868 &rxd, sizeof(rxd));
1869
1870 return (slots - fills);
1871 }
1872
1873 int
myx_rx_init(struct myx_softc * sc,int ring,bus_size_t size)1874 myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1875 {
1876 struct myx_rx_desc rxd;
1877 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1878 struct myx_slot *ms;
1879 u_int32_t offset = mrr->mrr_offset;
1880 int rv;
1881 int i;
1882
1883 mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1884 M_DEVBUF, M_WAITOK);
1885 if (mrr->mrr_slots == NULL)
1886 return (ENOMEM);
1887
1888 memset(&rxd, 0xff, sizeof(rxd));
1889 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1890 ms = &mrr->mrr_slots[i];
1891 rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1892 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1893 &ms->ms_map);
1894 if (rv != 0)
1895 goto destroy;
1896
1897 myx_bus_space_write(sc, offset + i * sizeof(rxd),
1898 &rxd, sizeof(rxd));
1899 }
1900
1901 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1902 mrr->mrr_prod = mrr->mrr_cons = 0;
1903
1904 return (0);
1905
1906 destroy:
1907 while (i-- > 0) {
1908 ms = &mrr->mrr_slots[i];
1909 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1910 }
1911 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1912 return (rv);
1913 }
1914
1915 int
myx_rx_fill(struct myx_softc * sc,struct myx_rx_ring * mrr)1916 myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1917 {
1918 u_int slots;
1919
1920 slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1921 if (slots == 0)
1922 return (1);
1923
1924 slots = myx_rx_fill_slots(sc, mrr, slots);
1925 if (slots > 0)
1926 if_rxr_put(&mrr->mrr_rxr, slots);
1927
1928 return (0);
1929 }
1930
1931 void
myx_rx_empty(struct myx_softc * sc,struct myx_rx_ring * mrr)1932 myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1933 {
1934 struct myx_slot *ms;
1935
1936 while (mrr->mrr_cons != mrr->mrr_prod) {
1937 ms = &mrr->mrr_slots[mrr->mrr_cons];
1938
1939 if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1940 mrr->mrr_cons = 0;
1941
1942 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1943 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1944 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1945 m_freem(ms->ms_m);
1946 }
1947
1948 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1949 }
1950
1951 void
myx_rx_free(struct myx_softc * sc,struct myx_rx_ring * mrr)1952 myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1953 {
1954 struct myx_slot *ms;
1955 int i;
1956
1957 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1958 ms = &mrr->mrr_slots[i];
1959 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1960 }
1961
1962 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1963 }
1964
1965 struct mbuf *
myx_mcl_small(void)1966 myx_mcl_small(void)
1967 {
1968 struct mbuf *m;
1969
1970 m = MCLGETL(NULL, M_DONTWAIT, MYX_RXSMALL_SIZE);
1971 if (m == NULL)
1972 return (NULL);
1973
1974 m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1975
1976 return (m);
1977 }
1978
1979 struct mbuf *
myx_mcl_big(void)1980 myx_mcl_big(void)
1981 {
1982 struct mbuf *m;
1983 void *mcl;
1984
1985 MGETHDR(m, M_DONTWAIT, MT_DATA);
1986 if (m == NULL)
1987 return (NULL);
1988
1989 mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1990 if (mcl == NULL) {
1991 m_free(m);
1992 return (NULL);
1993 }
1994
1995 MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool);
1996 m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1997
1998 return (m);
1999 }
2000
2001 int
myx_buf_fill(struct myx_softc * sc,struct myx_slot * ms,struct mbuf * (* mclget)(void))2002 myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
2003 struct mbuf *(*mclget)(void))
2004 {
2005 struct mbuf *m;
2006 int rv;
2007
2008 m = (*mclget)();
2009 if (m == NULL)
2010 return (ENOMEM);
2011
2012 rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
2013 if (rv != 0) {
2014 m_freem(m);
2015 return (rv);
2016 }
2017
2018 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2019 ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2020
2021 ms->ms_m = m;
2022
2023 return (0);
2024 }
2025
2026 int
myx_tx_init(struct myx_softc * sc,bus_size_t size)2027 myx_tx_init(struct myx_softc *sc, bus_size_t size)
2028 {
2029 struct myx_slot *ms;
2030 int rv;
2031 int i;
2032
2033 sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
2034 M_DEVBUF, M_WAITOK);
2035 if (sc->sc_tx_slots == NULL)
2036 return (ENOMEM);
2037
2038 for (i = 0; i < sc->sc_tx_ring_count; i++) {
2039 ms = &sc->sc_tx_slots[i];
2040 rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
2041 sc->sc_tx_boundary, sc->sc_tx_boundary,
2042 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2043 &ms->ms_map);
2044 if (rv != 0)
2045 goto destroy;
2046 }
2047
2048 sc->sc_tx_prod = sc->sc_tx_cons = 0;
2049
2050 return (0);
2051
2052 destroy:
2053 while (i-- > 0) {
2054 ms = &sc->sc_tx_slots[i];
2055 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2056 }
2057 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2058 return (rv);
2059 }
2060
2061 void
myx_tx_empty(struct myx_softc * sc)2062 myx_tx_empty(struct myx_softc *sc)
2063 {
2064 struct myx_slot *ms;
2065 u_int cons = sc->sc_tx_cons;
2066 u_int prod = sc->sc_tx_prod;
2067
2068 while (cons != prod) {
2069 ms = &sc->sc_tx_slots[cons];
2070
2071 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2072 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2073 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2074 m_freem(ms->ms_m);
2075
2076 if (++cons >= sc->sc_tx_ring_count)
2077 cons = 0;
2078 }
2079
2080 sc->sc_tx_cons = cons;
2081 }
2082
2083 void
myx_tx_free(struct myx_softc * sc)2084 myx_tx_free(struct myx_softc *sc)
2085 {
2086 struct myx_slot *ms;
2087 int i;
2088
2089 for (i = 0; i < sc->sc_tx_ring_count; i++) {
2090 ms = &sc->sc_tx_slots[i];
2091 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2092 }
2093
2094 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2095 }
2096
2097 #if NKSTAT > 0
2098 enum myx_counters {
2099 myx_stat_dropped_pause,
2100 myx_stat_dropped_ucast_filtered,
2101 myx_stat_dropped_bad_crc32,
2102 myx_stat_dropped_bad_phy,
2103 myx_stat_dropped_mcast_filtered,
2104 myx_stat_send_done,
2105 myx_stat_dropped_link_overflow,
2106 myx_stat_dropped_link,
2107 myx_stat_dropped_runt,
2108 myx_stat_dropped_overrun,
2109 myx_stat_dropped_no_small_bufs,
2110 myx_stat_dropped_no_large_bufs,
2111
2112 myx_ncounters,
2113 };
2114
2115 struct myx_counter {
2116 const char *mc_name;
2117 unsigned int mc_offset;
2118 };
2119
2120 #define MYX_C_OFF(_f) offsetof(struct myx_status, _f)
2121
2122 static const struct myx_counter myx_counters[myx_ncounters] = {
2123 { "pause drops", MYX_C_OFF(ms_dropped_pause), },
2124 { "ucast filtered", MYX_C_OFF(ms_dropped_unicast), },
2125 { "bad crc32", MYX_C_OFF(ms_dropped_pause), },
2126 { "bad phy", MYX_C_OFF(ms_dropped_phyerr), },
2127 { "mcast filtered", MYX_C_OFF(ms_dropped_mcast), },
2128 { "tx done", MYX_C_OFF(ms_txdonecnt), },
2129 { "rx discards", MYX_C_OFF(ms_dropped_linkoverflow), },
2130 { "rx errors", MYX_C_OFF(ms_dropped_linkerror), },
2131 { "rx undersize", MYX_C_OFF(ms_dropped_runt), },
2132 { "rx oversize", MYX_C_OFF(ms_dropped_overrun), },
2133 { "small discards", MYX_C_OFF(ms_dropped_smallbufunderrun), },
2134 { "large discards", MYX_C_OFF(ms_dropped_bigbufunderrun), },
2135 };
2136
2137 struct myx_kstats {
2138 struct kstat_kv mk_counters[myx_ncounters];
2139 struct kstat_kv mk_rdma_tags_available;
2140 };
2141
2142 struct myx_kstat_cache {
2143 uint32_t mkc_counters[myx_ncounters];
2144 };
2145
2146 struct myx_kstat_state {
2147 struct myx_kstat_cache mks_caches[2];
2148 unsigned int mks_gen;
2149 };
2150
2151 int
myx_kstat_read(struct kstat * ks)2152 myx_kstat_read(struct kstat *ks)
2153 {
2154 struct myx_softc *sc = ks->ks_softc;
2155 struct myx_kstats *mk = ks->ks_data;
2156 struct myx_kstat_state *mks = ks->ks_ptr;
2157 unsigned int gen = (mks->mks_gen++ & 1);
2158 struct myx_kstat_cache *omkc = &mks->mks_caches[gen];
2159 struct myx_kstat_cache *nmkc = &mks->mks_caches[!gen];
2160 unsigned int i = 0;
2161
2162 volatile struct myx_status *sts = sc->sc_sts;
2163 bus_dmamap_t map = sc->sc_sts_dma.mxm_map;
2164
2165 if (sc->sc_sts == NULL)
2166 return (0); /* counters are valid, just not updated */
2167
2168 getnanouptime(&ks->ks_updated);
2169
2170 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2171 BUS_DMASYNC_POSTREAD);
2172 for (i = 0; i < myx_ncounters; i++) {
2173 const struct myx_counter *mc = &myx_counters[i];
2174 nmkc->mkc_counters[i] =
2175 bemtoh32((uint32_t *)((uint8_t *)sts + mc->mc_offset));
2176 }
2177
2178 kstat_kv_u32(&mk->mk_rdma_tags_available) =
2179 bemtoh32(&sts->ms_rdmatags_available);
2180 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2181 BUS_DMASYNC_PREREAD);
2182
2183 for (i = 0; i < myx_ncounters; i++) {
2184 kstat_kv_u64(&mk->mk_counters[i]) +=
2185 nmkc->mkc_counters[i] - omkc->mkc_counters[i];
2186 }
2187
2188 return (0);
2189 }
2190
2191 void
myx_kstat_tick(void * arg)2192 myx_kstat_tick(void *arg)
2193 {
2194 struct myx_softc *sc = arg;
2195
2196 if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING))
2197 return;
2198
2199 timeout_add_sec(&sc->sc_kstat_tmo, 4);
2200
2201 if (!mtx_enter_try(&sc->sc_kstat_mtx))
2202 return;
2203
2204 myx_kstat_read(sc->sc_kstat);
2205
2206 mtx_leave(&sc->sc_kstat_mtx);
2207 }
2208
2209 void
myx_kstat_start(struct myx_softc * sc)2210 myx_kstat_start(struct myx_softc *sc)
2211 {
2212 if (sc->sc_kstat == NULL)
2213 return;
2214
2215 myx_kstat_tick(sc);
2216 }
2217
2218 void
myx_kstat_stop(struct myx_softc * sc)2219 myx_kstat_stop(struct myx_softc *sc)
2220 {
2221 struct myx_kstat_state *mks;
2222
2223 if (sc->sc_kstat == NULL)
2224 return;
2225
2226 timeout_del_barrier(&sc->sc_kstat_tmo);
2227
2228 mks = sc->sc_kstat->ks_ptr;
2229
2230 mtx_enter(&sc->sc_kstat_mtx);
2231 memset(mks, 0, sizeof(*mks));
2232 mtx_leave(&sc->sc_kstat_mtx);
2233 }
2234
2235 void
myx_kstat_attach(struct myx_softc * sc)2236 myx_kstat_attach(struct myx_softc *sc)
2237 {
2238 struct kstat *ks;
2239 struct myx_kstats *mk;
2240 struct myx_kstat_state *mks;
2241 unsigned int i;
2242
2243 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
2244 timeout_set(&sc->sc_kstat_tmo, myx_kstat_tick, sc);
2245
2246 ks = kstat_create(DEVNAME(sc), 0, "myx-stats", 0, KSTAT_T_KV, 0);
2247 if (ks == NULL)
2248 return;
2249
2250 mk = malloc(sizeof(*mk), M_DEVBUF, M_WAITOK|M_ZERO);
2251 for (i = 0; i < myx_ncounters; i++) {
2252 const struct myx_counter *mc = &myx_counters[i];
2253
2254 kstat_kv_unit_init(&mk->mk_counters[i], mc->mc_name,
2255 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS);
2256 }
2257 kstat_kv_init(&mk->mk_rdma_tags_available, "rdma tags free",
2258 KSTAT_KV_T_UINT32);
2259
2260 mks = malloc(sizeof(*mks), M_DEVBUF, M_WAITOK|M_ZERO);
2261 /* these start at 0 */
2262
2263 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
2264 ks->ks_data = mk;
2265 ks->ks_datalen = sizeof(*mk);
2266 ks->ks_read = myx_kstat_read;
2267 ks->ks_ptr = mks;
2268
2269 ks->ks_softc = sc;
2270 sc->sc_kstat = ks;
2271 kstat_install(ks);
2272 }
2273 #endif /* NKSTAT > 0 */
2274