1 /* $OpenBSD: if_myx.c,v 1.119 2023/11/10 15:51:20 bluhm Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets. 21 */ 22 23 #include "bpfilter.h" 24 #include "kstat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/sockio.h> 29 #include <sys/mbuf.h> 30 #include <sys/kernel.h> 31 #include <sys/socket.h> 32 #include <sys/malloc.h> 33 #include <sys/pool.h> 34 #include <sys/timeout.h> 35 #include <sys/device.h> 36 #include <sys/proc.h> 37 #include <sys/queue.h> 38 #include <sys/rwlock.h> 39 #include <sys/kstat.h> 40 41 #include <machine/bus.h> 42 #include <machine/intr.h> 43 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #if NBPFILTER > 0 49 #include <net/bpf.h> 50 #endif 51 52 #include <netinet/in.h> 53 #include <netinet/if_ether.h> 54 55 #include <dev/pci/pcireg.h> 56 #include <dev/pci/pcivar.h> 57 #include <dev/pci/pcidevs.h> 58 59 #include <dev/pci/if_myxreg.h> 60 61 #ifdef MYX_DEBUG 62 #define MYXDBG_INIT (1<<0) /* chipset initialization */ 63 #define MYXDBG_CMD (2<<0) /* commands */ 64 #define MYXDBG_INTR (3<<0) /* interrupts */ 65 #define MYXDBG_ALL 0xffff /* enable all debugging messages */ 66 int myx_debug = MYXDBG_ALL; 67 #define DPRINTF(_lvl, _arg...) do { \ 68 if (myx_debug & (_lvl)) \ 69 printf(_arg); \ 70 } while (0) 71 #else 72 #define DPRINTF(_lvl, arg...) 73 #endif 74 75 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 76 77 struct myx_dmamem { 78 bus_dmamap_t mxm_map; 79 bus_dma_segment_t mxm_seg; 80 int mxm_nsegs; 81 size_t mxm_size; 82 caddr_t mxm_kva; 83 }; 84 85 struct pool *myx_mcl_pool; 86 87 struct myx_slot { 88 bus_dmamap_t ms_map; 89 struct mbuf *ms_m; 90 }; 91 92 struct myx_rx_ring { 93 struct myx_softc *mrr_softc; 94 struct timeout mrr_refill; 95 struct if_rxring mrr_rxr; 96 struct myx_slot *mrr_slots; 97 u_int32_t mrr_offset; 98 u_int mrr_running; 99 u_int mrr_prod; 100 u_int mrr_cons; 101 struct mbuf *(*mrr_mclget)(void); 102 }; 103 104 enum myx_state { 105 MYX_S_OFF = 0, 106 MYX_S_RUNNING, 107 MYX_S_DOWN 108 }; 109 110 struct myx_softc { 111 struct device sc_dev; 112 struct arpcom sc_ac; 113 114 pci_chipset_tag_t sc_pc; 115 pci_intr_handle_t sc_ih; 116 pcitag_t sc_tag; 117 118 bus_dma_tag_t sc_dmat; 119 bus_space_tag_t sc_memt; 120 bus_space_handle_t sc_memh; 121 bus_size_t sc_mems; 122 123 struct myx_dmamem sc_zerodma; 124 struct myx_dmamem sc_cmddma; 125 struct myx_dmamem sc_paddma; 126 127 struct myx_dmamem sc_sts_dma; 128 volatile struct myx_status *sc_sts; 129 130 int sc_intx; 131 void *sc_irqh; 132 u_int32_t sc_irqcoaloff; 133 u_int32_t sc_irqclaimoff; 134 u_int32_t sc_irqdeassertoff; 135 136 struct myx_dmamem sc_intrq_dma; 137 struct myx_intrq_desc *sc_intrq; 138 u_int sc_intrq_count; 139 u_int sc_intrq_idx; 140 141 u_int sc_rx_ring_count; 142 #define MYX_RXSMALL 0 143 #define MYX_RXBIG 1 144 struct myx_rx_ring sc_rx_ring[2]; 145 146 bus_size_t sc_tx_boundary; 147 u_int sc_tx_ring_count; 148 u_int32_t sc_tx_ring_offset; 149 u_int sc_tx_nsegs; 150 u_int32_t sc_tx_count; /* shadows ms_txdonecnt */ 151 u_int sc_tx_ring_prod; 152 u_int sc_tx_ring_cons; 153 154 u_int sc_tx_prod; 155 u_int sc_tx_cons; 156 struct myx_slot *sc_tx_slots; 157 158 struct ifmedia sc_media; 159 160 volatile enum myx_state sc_state; 161 volatile u_int8_t sc_linkdown; 162 163 struct rwlock sc_sff_lock; 164 165 #if NKSTAT > 0 166 struct mutex sc_kstat_mtx; 167 struct timeout sc_kstat_tmo; 168 struct kstat *sc_kstat; 169 #endif 170 }; 171 172 #define MYX_RXSMALL_SIZE MCLBYTES 173 #define MYX_RXBIG_SIZE (MYX_MTU - \ 174 (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)) 175 176 int myx_match(struct device *, void *, void *); 177 void myx_attach(struct device *, struct device *, void *); 178 int myx_pcie_dc(struct myx_softc *, struct pci_attach_args *); 179 int myx_query(struct myx_softc *sc, char *, size_t); 180 u_int myx_ether_aton(char *, u_int8_t *, u_int); 181 void myx_attachhook(struct device *); 182 int myx_loadfirmware(struct myx_softc *, const char *); 183 int myx_probe_firmware(struct myx_softc *); 184 185 void myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t); 186 void myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t); 187 188 #if defined(__LP64__) 189 #define _myx_bus_space_write bus_space_write_raw_region_8 190 typedef u_int64_t myx_bus_t; 191 #else 192 #define _myx_bus_space_write bus_space_write_raw_region_4 193 typedef u_int32_t myx_bus_t; 194 #endif 195 #define myx_bus_space_write(_sc, _o, _a, _l) \ 196 _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l)) 197 198 int myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *); 199 int myx_boot(struct myx_softc *, u_int32_t); 200 201 int myx_rdma(struct myx_softc *, u_int); 202 int myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *, 203 bus_size_t, u_int align); 204 void myx_dmamem_free(struct myx_softc *, struct myx_dmamem *); 205 int myx_media_change(struct ifnet *); 206 void myx_media_status(struct ifnet *, struct ifmediareq *); 207 void myx_link_state(struct myx_softc *, u_int32_t); 208 void myx_watchdog(struct ifnet *); 209 int myx_ioctl(struct ifnet *, u_long, caddr_t); 210 int myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *); 211 void myx_up(struct myx_softc *); 212 void myx_iff(struct myx_softc *); 213 void myx_down(struct myx_softc *); 214 int myx_get_sffpage(struct myx_softc *, struct if_sffpage *); 215 216 void myx_start(struct ifqueue *); 217 void myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t, 218 u_int32_t, u_int); 219 int myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *); 220 int myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *); 221 int myx_intr(void *); 222 void myx_rxeof(struct myx_softc *); 223 void myx_txeof(struct myx_softc *, u_int32_t); 224 225 int myx_buf_fill(struct myx_softc *, struct myx_slot *, 226 struct mbuf *(*)(void)); 227 struct mbuf * myx_mcl_small(void); 228 struct mbuf * myx_mcl_big(void); 229 230 int myx_rx_init(struct myx_softc *, int, bus_size_t); 231 int myx_rx_fill(struct myx_softc *, struct myx_rx_ring *); 232 void myx_rx_empty(struct myx_softc *, struct myx_rx_ring *); 233 void myx_rx_free(struct myx_softc *, struct myx_rx_ring *); 234 235 int myx_tx_init(struct myx_softc *, bus_size_t); 236 void myx_tx_empty(struct myx_softc *); 237 void myx_tx_free(struct myx_softc *); 238 239 void myx_refill(void *); 240 241 #if NKSTAT > 0 242 void myx_kstat_attach(struct myx_softc *); 243 void myx_kstat_start(struct myx_softc *); 244 void myx_kstat_stop(struct myx_softc *); 245 #endif 246 247 struct cfdriver myx_cd = { 248 NULL, "myx", DV_IFNET 249 }; 250 const struct cfattach myx_ca = { 251 sizeof(struct myx_softc), myx_match, myx_attach 252 }; 253 254 const struct pci_matchid myx_devices[] = { 255 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E }, 256 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 } 257 }; 258 259 int 260 myx_match(struct device *parent, void *match, void *aux) 261 { 262 return (pci_matchbyid(aux, myx_devices, nitems(myx_devices))); 263 } 264 265 void 266 myx_attach(struct device *parent, struct device *self, void *aux) 267 { 268 struct myx_softc *sc = (struct myx_softc *)self; 269 struct pci_attach_args *pa = aux; 270 char part[32]; 271 pcireg_t memtype; 272 273 rw_init(&sc->sc_sff_lock, "myxsff"); 274 275 sc->sc_pc = pa->pa_pc; 276 sc->sc_tag = pa->pa_tag; 277 sc->sc_dmat = pa->pa_dmat; 278 279 sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc; 280 sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small; 281 timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill, 282 &sc->sc_rx_ring[MYX_RXSMALL]); 283 sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc; 284 sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big; 285 timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill, 286 &sc->sc_rx_ring[MYX_RXBIG]); 287 288 /* Map the PCI memory space */ 289 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0); 290 if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE, 291 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) { 292 printf(": unable to map register memory\n"); 293 return; 294 } 295 296 /* Get board details (mac/part) */ 297 memset(part, 0, sizeof(part)); 298 if (myx_query(sc, part, sizeof(part)) != 0) 299 goto unmap; 300 301 /* Map the interrupt */ 302 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) { 303 if (pci_intr_map(pa, &sc->sc_ih) != 0) { 304 printf(": unable to map interrupt\n"); 305 goto unmap; 306 } 307 sc->sc_intx = 1; 308 } 309 310 printf(": %s, model %s, address %s\n", 311 pci_intr_string(pa->pa_pc, sc->sc_ih), 312 part[0] == '\0' ? "(unknown)" : part, 313 ether_sprintf(sc->sc_ac.ac_enaddr)); 314 315 if (myx_pcie_dc(sc, pa) != 0) 316 printf("%s: unable to configure PCI Express\n", DEVNAME(sc)); 317 318 config_mountroot(self, myx_attachhook); 319 320 return; 321 322 unmap: 323 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 324 sc->sc_mems = 0; 325 } 326 327 int 328 myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa) 329 { 330 pcireg_t dcsr; 331 pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO; 332 pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO; 333 int reg; 334 335 if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 336 ®, NULL) == 0) 337 return (-1); 338 339 reg += PCI_PCIE_DCSR; 340 dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg); 341 if ((dcsr & mask) != dc) { 342 CLR(dcsr, mask); 343 SET(dcsr, dc); 344 pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr); 345 } 346 347 return (0); 348 } 349 350 u_int 351 myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen) 352 { 353 u_int i, j; 354 u_int8_t digit; 355 356 memset(lladdr, 0, ETHER_ADDR_LEN); 357 for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) { 358 if (mac[i] >= '0' && mac[i] <= '9') 359 digit = mac[i] - '0'; 360 else if (mac[i] >= 'A' && mac[i] <= 'F') 361 digit = mac[i] - 'A' + 10; 362 else if (mac[i] >= 'a' && mac[i] <= 'f') 363 digit = mac[i] - 'a' + 10; 364 else 365 continue; 366 if ((j & 1) == 0) 367 digit <<= 4; 368 lladdr[j++/2] |= digit; 369 } 370 371 return (i); 372 } 373 374 int 375 myx_query(struct myx_softc *sc, char *part, size_t partlen) 376 { 377 struct myx_gen_hdr hdr; 378 u_int32_t offset; 379 u_int8_t strings[MYX_STRING_SPECS_SIZE]; 380 u_int i, len, maxlen; 381 382 myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset)); 383 offset = betoh32(offset); 384 if (offset + sizeof(hdr) > sc->sc_mems) { 385 printf(": header is outside register window\n"); 386 return (1); 387 } 388 389 myx_read(sc, offset, &hdr, sizeof(hdr)); 390 offset = betoh32(hdr.fw_specs); 391 len = min(betoh32(hdr.fw_specs_len), sizeof(strings)); 392 393 bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len); 394 395 for (i = 0; i < len; i++) { 396 maxlen = len - i; 397 if (strings[i] == '\0') 398 break; 399 if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) { 400 i += 4; 401 i += myx_ether_aton(&strings[i], 402 sc->sc_ac.ac_enaddr, maxlen); 403 } else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) { 404 i += 3; 405 i += strlcpy(part, &strings[i], min(maxlen, partlen)); 406 } 407 for (; i < len; i++) { 408 if (strings[i] == '\0') 409 break; 410 } 411 } 412 413 return (0); 414 } 415 416 int 417 myx_loadfirmware(struct myx_softc *sc, const char *filename) 418 { 419 struct myx_gen_hdr hdr; 420 u_int8_t *fw; 421 size_t fwlen; 422 u_int32_t offset; 423 u_int i, ret = 1; 424 425 if (loadfirmware(filename, &fw, &fwlen) != 0) { 426 printf("%s: could not load firmware %s\n", DEVNAME(sc), 427 filename); 428 return (1); 429 } 430 if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) { 431 printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename); 432 goto err; 433 } 434 435 memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset)); 436 offset = betoh32(offset); 437 if ((offset + sizeof(hdr)) > fwlen) { 438 printf("%s: invalid firmware %s\n", DEVNAME(sc), filename); 439 goto err; 440 } 441 442 memcpy(&hdr, fw + offset, sizeof(hdr)); 443 DPRINTF(MYXDBG_INIT, "%s: " 444 "fw hdr off %u, length %u, type 0x%x, version %s\n", 445 DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength), 446 betoh32(hdr.fw_type), hdr.fw_version); 447 448 if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH || 449 memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) { 450 printf("%s: invalid firmware type 0x%x version %s\n", 451 DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version); 452 goto err; 453 } 454 455 /* Write the firmware to the card's SRAM */ 456 for (i = 0; i < fwlen; i += 256) 457 myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i)); 458 459 if (myx_boot(sc, fwlen) != 0) { 460 printf("%s: failed to boot %s\n", DEVNAME(sc), filename); 461 goto err; 462 } 463 464 ret = 0; 465 466 err: 467 free(fw, M_DEVBUF, fwlen); 468 return (ret); 469 } 470 471 void 472 myx_attachhook(struct device *self) 473 { 474 struct myx_softc *sc = (struct myx_softc *)self; 475 struct ifnet *ifp = &sc->sc_ac.ac_if; 476 struct myx_cmd mc; 477 478 /* this is sort of racy */ 479 if (myx_mcl_pool == NULL) { 480 myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF, 481 M_WAITOK); 482 483 m_pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 484 "myxmcl"); 485 pool_cache_init(myx_mcl_pool); 486 } 487 488 /* Allocate command DMA memory */ 489 if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD, 490 MYXALIGN_CMD) != 0) { 491 printf("%s: failed to allocate command DMA memory\n", 492 DEVNAME(sc)); 493 return; 494 } 495 496 /* Try the firmware stored on disk */ 497 if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) { 498 /* error printed by myx_loadfirmware */ 499 goto freecmd; 500 } 501 502 memset(&mc, 0, sizeof(mc)); 503 504 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 505 printf("%s: failed to reset the device\n", DEVNAME(sc)); 506 goto freecmd; 507 } 508 509 sc->sc_tx_boundary = 4096; 510 511 if (myx_probe_firmware(sc) != 0) { 512 printf("%s: error while selecting firmware\n", DEVNAME(sc)); 513 goto freecmd; 514 } 515 516 sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih, 517 IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc)); 518 if (sc->sc_irqh == NULL) { 519 printf("%s: unable to establish interrupt\n", DEVNAME(sc)); 520 goto freecmd; 521 } 522 523 #if NKSTAT > 0 524 myx_kstat_attach(sc); 525 #endif 526 527 ifp->if_softc = sc; 528 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 529 ifp->if_xflags = IFXF_MPSAFE; 530 ifp->if_ioctl = myx_ioctl; 531 ifp->if_qstart = myx_start; 532 ifp->if_watchdog = myx_watchdog; 533 ifp->if_hardmtu = MYX_RXBIG_SIZE; 534 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 535 ifq_init_maxlen(&ifp->if_snd, 1); 536 537 ifp->if_capabilities = IFCAP_VLAN_MTU; 538 #if 0 539 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 540 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 541 IFCAP_CSUM_UDPv4; 542 #endif 543 544 ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status); 545 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 546 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 547 548 if_attach(ifp); 549 ether_ifattach(ifp); 550 551 return; 552 553 freecmd: 554 myx_dmamem_free(sc, &sc->sc_cmddma); 555 } 556 557 int 558 myx_probe_firmware(struct myx_softc *sc) 559 { 560 struct myx_dmamem test; 561 bus_dmamap_t map; 562 struct myx_cmd mc; 563 pcireg_t csr; 564 int offset; 565 int width = 0; 566 567 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 568 &offset, NULL)) { 569 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 570 offset + PCI_PCIE_LCSR); 571 width = (csr >> 20) & 0x3f; 572 573 if (width <= 4) { 574 /* 575 * if the link width is 4 or less we can use the 576 * aligned firmware. 577 */ 578 return (0); 579 } 580 } 581 582 if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0) 583 return (1); 584 map = test.mxm_map; 585 586 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 587 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 588 589 memset(&mc, 0, sizeof(mc)); 590 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 591 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 592 mc.mc_data2 = htobe32(4096 * 0x10000); 593 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 594 printf("%s: DMA read test failed\n", DEVNAME(sc)); 595 goto fail; 596 } 597 598 memset(&mc, 0, sizeof(mc)); 599 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 600 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 601 mc.mc_data2 = htobe32(4096 * 0x1); 602 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 603 printf("%s: DMA write test failed\n", DEVNAME(sc)); 604 goto fail; 605 } 606 607 memset(&mc, 0, sizeof(mc)); 608 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 609 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 610 mc.mc_data2 = htobe32(4096 * 0x10001); 611 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 612 printf("%s: DMA read/write test failed\n", DEVNAME(sc)); 613 goto fail; 614 } 615 616 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 617 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 618 myx_dmamem_free(sc, &test); 619 return (0); 620 621 fail: 622 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 623 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 624 myx_dmamem_free(sc, &test); 625 626 if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) { 627 printf("%s: unable to load %s\n", DEVNAME(sc), 628 MYXFW_UNALIGNED); 629 return (1); 630 } 631 632 sc->sc_tx_boundary = 2048; 633 634 printf("%s: using unaligned firmware\n", DEVNAME(sc)); 635 return (0); 636 } 637 638 void 639 myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 640 { 641 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 642 BUS_SPACE_BARRIER_READ); 643 bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 644 } 645 646 void 647 myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 648 { 649 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 650 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 651 BUS_SPACE_BARRIER_WRITE); 652 } 653 654 int 655 myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm, 656 bus_size_t size, u_int align) 657 { 658 mxm->mxm_size = size; 659 660 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1, 661 mxm->mxm_size, 0, 662 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 663 &mxm->mxm_map) != 0) 664 return (1); 665 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size, 666 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs, 667 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 668 goto destroy; 669 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs, 670 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0) 671 goto free; 672 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva, 673 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0) 674 goto unmap; 675 676 return (0); 677 unmap: 678 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 679 free: 680 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 681 destroy: 682 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 683 return (1); 684 } 685 686 void 687 myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm) 688 { 689 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map); 690 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 691 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 692 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 693 } 694 695 int 696 myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r) 697 { 698 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 699 struct myx_response *mr; 700 u_int i; 701 u_int32_t result, data; 702 703 mc->mc_cmd = htobe32(cmd); 704 mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 705 mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 706 707 mr = (struct myx_response *)sc->sc_cmddma.mxm_kva; 708 mr->mr_result = 0xffffffff; 709 710 /* Send command */ 711 myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd)); 712 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 713 BUS_DMASYNC_PREREAD); 714 715 for (i = 0; i < 20; i++) { 716 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 717 BUS_DMASYNC_POSTREAD); 718 result = betoh32(mr->mr_result); 719 data = betoh32(mr->mr_data); 720 721 if (result != 0xffffffff) 722 break; 723 724 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 725 BUS_DMASYNC_PREREAD); 726 delay(1000); 727 } 728 729 DPRINTF(MYXDBG_CMD, "%s(%s): cmd %u completed, i %d, " 730 "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__, 731 cmd, i, result, data, data); 732 733 if (result == MYXCMD_OK) { 734 if (r != NULL) 735 *r = data; 736 } 737 738 return (result); 739 } 740 741 int 742 myx_boot(struct myx_softc *sc, u_int32_t length) 743 { 744 struct myx_bootcmd bc; 745 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 746 u_int32_t *status; 747 u_int i, ret = 1; 748 749 memset(&bc, 0, sizeof(bc)); 750 bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 751 bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 752 bc.bc_result = 0xffffffff; 753 bc.bc_offset = htobe32(MYX_FW_BOOT); 754 bc.bc_length = htobe32(length - 8); 755 bc.bc_copyto = htobe32(8); 756 bc.bc_jumpto = htobe32(0); 757 758 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 759 *status = 0; 760 761 /* Send command */ 762 myx_write(sc, MYX_BOOT, &bc, sizeof(bc)); 763 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 764 BUS_DMASYNC_PREREAD); 765 766 for (i = 0; i < 200; i++) { 767 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 768 BUS_DMASYNC_POSTREAD); 769 if (*status == 0xffffffff) { 770 ret = 0; 771 break; 772 } 773 774 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 775 BUS_DMASYNC_PREREAD); 776 delay(1000); 777 } 778 779 DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n", 780 DEVNAME(sc), i, ret); 781 782 return (ret); 783 } 784 785 int 786 myx_rdma(struct myx_softc *sc, u_int do_enable) 787 { 788 struct myx_rdmacmd rc; 789 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 790 bus_dmamap_t pad = sc->sc_paddma.mxm_map; 791 u_int32_t *status; 792 int ret = 1; 793 u_int i; 794 795 /* 796 * It is required to setup a _dummy_ RDMA address. It also makes 797 * some PCI-E chipsets resend dropped messages. 798 */ 799 rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 800 rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 801 rc.rc_result = 0xffffffff; 802 rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr)); 803 rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr)); 804 rc.rc_enable = htobe32(do_enable); 805 806 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 807 *status = 0; 808 809 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 810 BUS_DMASYNC_PREREAD); 811 812 /* Send command */ 813 myx_write(sc, MYX_RDMA, &rc, sizeof(rc)); 814 815 for (i = 0; i < 20; i++) { 816 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 817 BUS_DMASYNC_POSTREAD); 818 819 if (*status == 0xffffffff) { 820 ret = 0; 821 break; 822 } 823 824 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 825 BUS_DMASYNC_PREREAD); 826 delay(1000); 827 } 828 829 DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n", 830 DEVNAME(sc), __func__, 831 do_enable ? "enabled" : "disabled", i, betoh32(*status)); 832 833 return (ret); 834 } 835 836 int 837 myx_media_change(struct ifnet *ifp) 838 { 839 /* ignore */ 840 return (0); 841 } 842 843 void 844 myx_media_status(struct ifnet *ifp, struct ifmediareq *imr) 845 { 846 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 847 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 848 u_int32_t sts; 849 850 imr->ifm_active = IFM_ETHER | IFM_AUTO; 851 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 852 imr->ifm_status = 0; 853 return; 854 } 855 856 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 857 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 858 sts = sc->sc_sts->ms_linkstate; 859 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 860 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 861 862 myx_link_state(sc, sts); 863 864 imr->ifm_status = IFM_AVALID; 865 if (!LINK_STATE_IS_UP(ifp->if_link_state)) 866 return; 867 868 imr->ifm_active |= IFM_FDX | IFM_FLOW | 869 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 870 imr->ifm_status |= IFM_ACTIVE; 871 } 872 873 void 874 myx_link_state(struct myx_softc *sc, u_int32_t sts) 875 { 876 struct ifnet *ifp = &sc->sc_ac.ac_if; 877 int link_state = LINK_STATE_DOWN; 878 879 if (betoh32(sts) == MYXSTS_LINKUP) 880 link_state = LINK_STATE_FULL_DUPLEX; 881 if (ifp->if_link_state != link_state) { 882 ifp->if_link_state = link_state; 883 if_link_state_change(ifp); 884 ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ? 885 IF_Gbps(10) : 0; 886 } 887 } 888 889 void 890 myx_watchdog(struct ifnet *ifp) 891 { 892 return; 893 } 894 895 int 896 myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 897 { 898 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 899 struct ifreq *ifr = (struct ifreq *)data; 900 int s, error = 0; 901 902 s = splnet(); 903 904 switch (cmd) { 905 case SIOCSIFADDR: 906 ifp->if_flags |= IFF_UP; 907 /* FALLTHROUGH */ 908 909 case SIOCSIFFLAGS: 910 if (ISSET(ifp->if_flags, IFF_UP)) { 911 if (ISSET(ifp->if_flags, IFF_RUNNING)) 912 error = ENETRESET; 913 else 914 myx_up(sc); 915 } else { 916 if (ISSET(ifp->if_flags, IFF_RUNNING)) 917 myx_down(sc); 918 } 919 break; 920 921 case SIOCGIFMEDIA: 922 case SIOCSIFMEDIA: 923 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 924 break; 925 926 case SIOCGIFRXR: 927 error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 928 break; 929 930 case SIOCGIFSFFPAGE: 931 error = rw_enter(&sc->sc_sff_lock, RW_WRITE|RW_INTR); 932 if (error != 0) 933 break; 934 935 error = myx_get_sffpage(sc, (struct if_sffpage *)data); 936 rw_exit(&sc->sc_sff_lock); 937 break; 938 939 default: 940 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 941 } 942 943 if (error == ENETRESET) { 944 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 945 (IFF_UP | IFF_RUNNING)) 946 myx_iff(sc); 947 error = 0; 948 } 949 950 splx(s); 951 return (error); 952 } 953 954 int 955 myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri) 956 { 957 struct if_rxring_info ifr[2]; 958 959 memset(ifr, 0, sizeof(ifr)); 960 961 ifr[0].ifr_size = MYX_RXSMALL_SIZE; 962 ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr; 963 strlcpy(ifr[0].ifr_name, "small", sizeof(ifr[0].ifr_name)); 964 965 ifr[1].ifr_size = MYX_RXBIG_SIZE; 966 ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr; 967 strlcpy(ifr[1].ifr_name, "large", sizeof(ifr[1].ifr_name)); 968 969 return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr)); 970 } 971 972 static int 973 myx_i2c_byte(struct myx_softc *sc, uint8_t addr, uint8_t off, uint8_t *byte) 974 { 975 struct myx_cmd mc; 976 int result; 977 uint32_t r; 978 unsigned int ms; 979 980 memset(&mc, 0, sizeof(mc)); 981 mc.mc_data0 = htobe32(0); /* get 1 byte */ 982 mc.mc_data1 = htobe32((addr << 8) | off); 983 result = myx_cmd(sc, MYXCMD_I2C_READ, &mc, NULL); 984 if (result != 0) 985 return (EIO); 986 987 for (ms = 0; ms < 50; ms++) { 988 memset(&mc, 0, sizeof(mc)); 989 mc.mc_data0 = htobe32(off); 990 result = myx_cmd(sc, MYXCMD_I2C_BYTE, &mc, &r); 991 switch (result) { 992 case MYXCMD_OK: 993 *byte = r; 994 return (0); 995 case MYXCMD_ERR_BUSY: 996 break; 997 default: 998 return (EIO); 999 } 1000 1001 delay(1000); 1002 } 1003 1004 return (EBUSY); 1005 } 1006 1007 int 1008 myx_get_sffpage(struct myx_softc *sc, struct if_sffpage *sff) 1009 { 1010 unsigned int i; 1011 int result; 1012 1013 if (sff->sff_addr == IFSFF_ADDR_EEPROM) { 1014 uint8_t page; 1015 1016 result = myx_i2c_byte(sc, IFSFF_ADDR_EEPROM, 127, &page); 1017 if (result != 0) 1018 return (result); 1019 1020 if (page != sff->sff_page) 1021 return (ENXIO); 1022 } 1023 1024 for (i = 0; i < sizeof(sff->sff_data); i++) { 1025 result = myx_i2c_byte(sc, sff->sff_addr, 1026 i, &sff->sff_data[i]); 1027 if (result != 0) 1028 return (result); 1029 } 1030 1031 return (0); 1032 } 1033 1034 void 1035 myx_up(struct myx_softc *sc) 1036 { 1037 struct ifnet *ifp = &sc->sc_ac.ac_if; 1038 struct myx_cmd mc; 1039 bus_dmamap_t map; 1040 size_t size; 1041 u_int maxpkt; 1042 u_int32_t r; 1043 1044 memset(&mc, 0, sizeof(mc)); 1045 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1046 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1047 return; 1048 } 1049 1050 if (myx_dmamem_alloc(sc, &sc->sc_zerodma, 1051 64, MYXALIGN_CMD) != 0) { 1052 printf("%s: failed to allocate zero pad memory\n", 1053 DEVNAME(sc)); 1054 return; 1055 } 1056 memset(sc->sc_zerodma.mxm_kva, 0, 64); 1057 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1058 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1059 1060 if (myx_dmamem_alloc(sc, &sc->sc_paddma, 1061 MYXALIGN_CMD, MYXALIGN_CMD) != 0) { 1062 printf("%s: failed to allocate pad DMA memory\n", 1063 DEVNAME(sc)); 1064 goto free_zero; 1065 } 1066 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1067 sc->sc_paddma.mxm_map->dm_mapsize, 1068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1069 1070 if (myx_rdma(sc, MYXRDMA_ON) != 0) { 1071 printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc)); 1072 goto free_pad; 1073 } 1074 1075 if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) { 1076 printf("%s: unable to get rx ring size\n", DEVNAME(sc)); 1077 goto free_pad; 1078 } 1079 sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc); 1080 1081 memset(&mc, 0, sizeof(mc)); 1082 if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) { 1083 printf("%s: unable to get tx ring size\n", DEVNAME(sc)); 1084 goto free_pad; 1085 } 1086 sc->sc_tx_ring_prod = 0; 1087 sc->sc_tx_ring_cons = 0; 1088 sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc); 1089 sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */ 1090 sc->sc_tx_count = 0; 1091 ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_count - 1); 1092 1093 /* Allocate Interrupt Queue */ 1094 1095 sc->sc_intrq_count = sc->sc_rx_ring_count * 2; 1096 sc->sc_intrq_idx = 0; 1097 1098 size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc); 1099 if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma, 1100 size, MYXALIGN_DATA) != 0) { 1101 goto free_pad; 1102 } 1103 sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva; 1104 map = sc->sc_intrq_dma.mxm_map; 1105 memset(sc->sc_intrq, 0, size); 1106 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1107 BUS_DMASYNC_PREREAD); 1108 1109 memset(&mc, 0, sizeof(mc)); 1110 mc.mc_data0 = htobe32(size); 1111 if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) { 1112 printf("%s: failed to set intrq size\n", DEVNAME(sc)); 1113 goto free_intrq; 1114 } 1115 1116 memset(&mc, 0, sizeof(mc)); 1117 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1118 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1119 if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) { 1120 printf("%s: failed to set intrq address\n", DEVNAME(sc)); 1121 goto free_intrq; 1122 } 1123 1124 /* 1125 * get interrupt offsets 1126 */ 1127 1128 memset(&mc, 0, sizeof(mc)); 1129 if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc, 1130 &sc->sc_irqclaimoff) != 0) { 1131 printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc)); 1132 goto free_intrq; 1133 } 1134 1135 memset(&mc, 0, sizeof(mc)); 1136 if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc, 1137 &sc->sc_irqdeassertoff) != 0) { 1138 printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc)); 1139 goto free_intrq; 1140 } 1141 1142 memset(&mc, 0, sizeof(mc)); 1143 if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc, 1144 &sc->sc_irqcoaloff) != 0) { 1145 printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc)); 1146 goto free_intrq; 1147 } 1148 1149 /* Set an appropriate interrupt coalescing period */ 1150 r = htobe32(MYX_IRQCOALDELAY); 1151 myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r)); 1152 1153 if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) { 1154 printf("%s: failed to configure lladdr\n", DEVNAME(sc)); 1155 goto free_intrq; 1156 } 1157 1158 memset(&mc, 0, sizeof(mc)); 1159 if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1160 printf("%s: failed to disable promisc mode\n", DEVNAME(sc)); 1161 goto free_intrq; 1162 } 1163 1164 memset(&mc, 0, sizeof(mc)); 1165 if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) { 1166 printf("%s: failed to configure flow control\n", DEVNAME(sc)); 1167 goto free_intrq; 1168 } 1169 1170 memset(&mc, 0, sizeof(mc)); 1171 if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc, 1172 &sc->sc_tx_ring_offset) != 0) { 1173 printf("%s: unable to get tx ring offset\n", DEVNAME(sc)); 1174 goto free_intrq; 1175 } 1176 1177 memset(&mc, 0, sizeof(mc)); 1178 if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc, 1179 &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) { 1180 printf("%s: unable to get small rx ring offset\n", DEVNAME(sc)); 1181 goto free_intrq; 1182 } 1183 1184 memset(&mc, 0, sizeof(mc)); 1185 if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc, 1186 &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) { 1187 printf("%s: unable to get big rx ring offset\n", DEVNAME(sc)); 1188 goto free_intrq; 1189 } 1190 1191 /* Allocate Interrupt Data */ 1192 if (myx_dmamem_alloc(sc, &sc->sc_sts_dma, 1193 sizeof(struct myx_status), MYXALIGN_DATA) != 0) { 1194 printf("%s: failed to allocate status DMA memory\n", 1195 DEVNAME(sc)); 1196 goto free_intrq; 1197 } 1198 sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva; 1199 map = sc->sc_sts_dma.mxm_map; 1200 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1201 BUS_DMASYNC_PREREAD); 1202 1203 memset(&mc, 0, sizeof(mc)); 1204 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1205 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1206 mc.mc_data2 = htobe32(sizeof(struct myx_status)); 1207 if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) { 1208 printf("%s: failed to set status DMA offset\n", DEVNAME(sc)); 1209 goto free_sts; 1210 } 1211 1212 maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1213 1214 memset(&mc, 0, sizeof(mc)); 1215 mc.mc_data0 = htobe32(maxpkt); 1216 if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) { 1217 printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt); 1218 goto free_sts; 1219 } 1220 1221 if (myx_tx_init(sc, maxpkt) != 0) 1222 goto free_sts; 1223 1224 if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0) 1225 goto free_tx_ring; 1226 1227 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0) 1228 goto free_rx_ring_small; 1229 1230 if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0) 1231 goto empty_rx_ring_small; 1232 1233 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0) 1234 goto free_rx_ring_big; 1235 1236 memset(&mc, 0, sizeof(mc)); 1237 mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN); 1238 if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) { 1239 printf("%s: failed to set small buf size\n", DEVNAME(sc)); 1240 goto empty_rx_ring_big; 1241 } 1242 1243 memset(&mc, 0, sizeof(mc)); 1244 mc.mc_data0 = htobe32(16384); 1245 if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) { 1246 printf("%s: failed to set big buf size\n", DEVNAME(sc)); 1247 goto empty_rx_ring_big; 1248 } 1249 1250 sc->sc_state = MYX_S_RUNNING; 1251 1252 if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) { 1253 printf("%s: failed to start the device\n", DEVNAME(sc)); 1254 goto empty_rx_ring_big; 1255 } 1256 1257 myx_iff(sc); 1258 SET(ifp->if_flags, IFF_RUNNING); 1259 ifq_restart(&ifp->if_snd); 1260 1261 #if NKSTAT > 0 1262 timeout_add_sec(&sc->sc_kstat_tmo, 1); 1263 #endif 1264 1265 return; 1266 1267 empty_rx_ring_big: 1268 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]); 1269 free_rx_ring_big: 1270 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]); 1271 empty_rx_ring_small: 1272 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]); 1273 free_rx_ring_small: 1274 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]); 1275 free_tx_ring: 1276 myx_tx_free(sc); 1277 free_sts: 1278 bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0, 1279 sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1280 myx_dmamem_free(sc, &sc->sc_sts_dma); 1281 free_intrq: 1282 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1283 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1284 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1285 free_pad: 1286 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1287 sc->sc_paddma.mxm_map->dm_mapsize, 1288 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1289 myx_dmamem_free(sc, &sc->sc_paddma); 1290 1291 memset(&mc, 0, sizeof(mc)); 1292 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1293 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1294 } 1295 free_zero: 1296 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1297 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1298 myx_dmamem_free(sc, &sc->sc_zerodma); 1299 } 1300 1301 int 1302 myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr) 1303 { 1304 struct myx_cmd mc; 1305 1306 memset(&mc, 0, sizeof(mc)); 1307 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | 1308 addr[2] << 8 | addr[3]); 1309 mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]); 1310 1311 if (myx_cmd(sc, cmd, &mc, NULL) != 0) { 1312 printf("%s: failed to set the lladdr\n", DEVNAME(sc)); 1313 return (-1); 1314 } 1315 return (0); 1316 } 1317 1318 void 1319 myx_iff(struct myx_softc *sc) 1320 { 1321 struct myx_cmd mc; 1322 struct ifnet *ifp = &sc->sc_ac.ac_if; 1323 struct ether_multi *enm; 1324 struct ether_multistep step; 1325 u_int8_t *addr; 1326 1327 CLR(ifp->if_flags, IFF_ALLMULTI); 1328 1329 if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ? 1330 MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1331 printf("%s: failed to configure promisc mode\n", DEVNAME(sc)); 1332 return; 1333 } 1334 1335 if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) { 1336 printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc)); 1337 return; 1338 } 1339 1340 if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) { 1341 printf("%s: failed to leave all mcast groups \n", DEVNAME(sc)); 1342 return; 1343 } 1344 1345 if (ISSET(ifp->if_flags, IFF_PROMISC) || 1346 sc->sc_ac.ac_multirangecnt > 0) { 1347 SET(ifp->if_flags, IFF_ALLMULTI); 1348 return; 1349 } 1350 1351 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); 1352 while (enm != NULL) { 1353 addr = enm->enm_addrlo; 1354 1355 memset(&mc, 0, sizeof(mc)); 1356 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | 1357 addr[2] << 8 | addr[3]); 1358 mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16); 1359 if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) { 1360 printf("%s: failed to join mcast group\n", DEVNAME(sc)); 1361 return; 1362 } 1363 1364 ETHER_NEXT_MULTI(step, enm); 1365 } 1366 1367 memset(&mc, 0, sizeof(mc)); 1368 if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) { 1369 printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc)); 1370 return; 1371 } 1372 } 1373 1374 void 1375 myx_down(struct myx_softc *sc) 1376 { 1377 struct ifnet *ifp = &sc->sc_ac.ac_if; 1378 volatile struct myx_status *sts = sc->sc_sts; 1379 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1380 struct myx_cmd mc; 1381 int s; 1382 int ring; 1383 1384 CLR(ifp->if_flags, IFF_RUNNING); 1385 1386 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1387 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1388 sc->sc_linkdown = sts->ms_linkdown; 1389 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1390 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1391 1392 sc->sc_state = MYX_S_DOWN; 1393 membar_producer(); 1394 1395 memset(&mc, 0, sizeof(mc)); 1396 (void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL); 1397 1398 while (sc->sc_state != MYX_S_OFF) { 1399 sleep_setup(sts, PWAIT, "myxdown"); 1400 membar_consumer(); 1401 sleep_finish(0, sc->sc_state != MYX_S_OFF); 1402 } 1403 1404 s = splnet(); 1405 if (ifp->if_link_state != LINK_STATE_UNKNOWN) { 1406 ifp->if_link_state = LINK_STATE_UNKNOWN; 1407 ifp->if_baudrate = 0; 1408 if_link_state_change(ifp); 1409 } 1410 splx(s); 1411 1412 memset(&mc, 0, sizeof(mc)); 1413 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1414 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1415 } 1416 1417 ifq_clr_oactive(&ifp->if_snd); 1418 ifq_barrier(&ifp->if_snd); 1419 1420 for (ring = 0; ring < 2; ring++) { 1421 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring]; 1422 1423 timeout_del(&mrr->mrr_refill); 1424 myx_rx_empty(sc, mrr); 1425 myx_rx_free(sc, mrr); 1426 } 1427 1428 myx_tx_empty(sc); 1429 myx_tx_free(sc); 1430 1431 #if NKSTAT > 0 1432 myx_kstat_stop(sc); 1433 sc->sc_sts = NULL; 1434 #endif 1435 1436 /* the sleep shizz above already synced this dmamem */ 1437 myx_dmamem_free(sc, &sc->sc_sts_dma); 1438 1439 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1440 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1441 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1442 1443 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1444 sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1445 myx_dmamem_free(sc, &sc->sc_paddma); 1446 1447 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1448 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1449 myx_dmamem_free(sc, &sc->sc_zerodma); 1450 } 1451 1452 void 1453 myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags, 1454 u_int32_t offset, u_int idx) 1455 { 1456 struct myx_tx_desc txd; 1457 bus_dmamap_t zmap = sc->sc_zerodma.mxm_map; 1458 bus_dmamap_t map = ms->ms_map; 1459 int i; 1460 1461 for (i = 1; i < map->dm_nsegs; i++) { 1462 memset(&txd, 0, sizeof(txd)); 1463 txd.tx_addr = htobe64(map->dm_segs[i].ds_addr); 1464 txd.tx_length = htobe16(map->dm_segs[i].ds_len); 1465 txd.tx_flags = flags; 1466 1467 myx_bus_space_write(sc, 1468 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1469 &txd, sizeof(txd)); 1470 } 1471 1472 /* pad runt frames */ 1473 if (map->dm_mapsize < 60) { 1474 memset(&txd, 0, sizeof(txd)); 1475 txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr); 1476 txd.tx_length = htobe16(60 - map->dm_mapsize); 1477 txd.tx_flags = flags; 1478 1479 myx_bus_space_write(sc, 1480 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1481 &txd, sizeof(txd)); 1482 } 1483 } 1484 1485 void 1486 myx_start(struct ifqueue *ifq) 1487 { 1488 struct ifnet *ifp = ifq->ifq_if; 1489 struct myx_tx_desc txd; 1490 struct myx_softc *sc = ifp->if_softc; 1491 struct myx_slot *ms; 1492 bus_dmamap_t map; 1493 struct mbuf *m; 1494 u_int32_t offset = sc->sc_tx_ring_offset; 1495 u_int idx, cons, prod; 1496 u_int free, used; 1497 u_int8_t flags; 1498 1499 idx = sc->sc_tx_ring_prod; 1500 1501 /* figure out space */ 1502 free = sc->sc_tx_ring_cons; 1503 if (free <= idx) 1504 free += sc->sc_tx_ring_count; 1505 free -= idx; 1506 1507 cons = prod = sc->sc_tx_prod; 1508 1509 used = 0; 1510 1511 for (;;) { 1512 if (used + sc->sc_tx_nsegs + 1 > free) { 1513 ifq_set_oactive(ifq); 1514 break; 1515 } 1516 1517 m = ifq_dequeue(ifq); 1518 if (m == NULL) 1519 break; 1520 1521 ms = &sc->sc_tx_slots[prod]; 1522 1523 if (myx_load_mbuf(sc, ms, m) != 0) { 1524 m_freem(m); 1525 ifp->if_oerrors++; 1526 continue; 1527 } 1528 1529 #if NBPFILTER > 0 1530 if (ifp->if_bpf) 1531 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1532 #endif 1533 1534 map = ms->ms_map; 1535 bus_dmamap_sync(sc->sc_dmat, map, 0, 1536 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1537 1538 used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1539 1540 if (++prod >= sc->sc_tx_ring_count) 1541 prod = 0; 1542 } 1543 1544 if (cons == prod) 1545 return; 1546 1547 ms = &sc->sc_tx_slots[cons]; 1548 1549 for (;;) { 1550 idx += ms->ms_map->dm_nsegs + 1551 (ms->ms_map->dm_mapsize < 60 ? 1 : 0); 1552 if (idx >= sc->sc_tx_ring_count) 1553 idx -= sc->sc_tx_ring_count; 1554 1555 if (++cons >= sc->sc_tx_ring_count) 1556 cons = 0; 1557 1558 if (cons == prod) 1559 break; 1560 1561 ms = &sc->sc_tx_slots[cons]; 1562 map = ms->ms_map; 1563 1564 flags = MYXTXD_FLAGS_NO_TSO; 1565 if (map->dm_mapsize < 1520) 1566 flags |= MYXTXD_FLAGS_SMALL; 1567 1568 memset(&txd, 0, sizeof(txd)); 1569 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1570 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1571 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1572 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1573 myx_bus_space_write(sc, 1574 offset + sizeof(txd) * idx, &txd, sizeof(txd)); 1575 1576 myx_write_txd_tail(sc, ms, flags, offset, idx); 1577 } 1578 1579 /* go back and post first packet */ 1580 ms = &sc->sc_tx_slots[sc->sc_tx_prod]; 1581 map = ms->ms_map; 1582 1583 flags = MYXTXD_FLAGS_NO_TSO; 1584 if (map->dm_mapsize < 1520) 1585 flags |= MYXTXD_FLAGS_SMALL; 1586 1587 memset(&txd, 0, sizeof(txd)); 1588 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1589 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1590 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1591 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1592 1593 /* make sure the first descriptor is seen after the others */ 1594 myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod); 1595 1596 myx_bus_space_write(sc, 1597 offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd, 1598 sizeof(txd) - sizeof(myx_bus_t)); 1599 1600 bus_space_barrier(sc->sc_memt, sc->sc_memh, offset, 1601 sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE); 1602 1603 myx_bus_space_write(sc, 1604 offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) - 1605 sizeof(myx_bus_t), 1606 (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t), 1607 sizeof(myx_bus_t)); 1608 1609 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1610 offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd), 1611 BUS_SPACE_BARRIER_WRITE); 1612 1613 /* commit */ 1614 sc->sc_tx_ring_prod = idx; 1615 sc->sc_tx_prod = prod; 1616 } 1617 1618 int 1619 myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m) 1620 { 1621 bus_dma_tag_t dmat = sc->sc_dmat; 1622 bus_dmamap_t dmap = ms->ms_map; 1623 1624 switch (bus_dmamap_load_mbuf(dmat, dmap, m, 1625 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) { 1626 case 0: 1627 break; 1628 1629 case EFBIG: /* mbuf chain is too fragmented */ 1630 if (m_defrag(m, M_DONTWAIT) == 0 && 1631 bus_dmamap_load_mbuf(dmat, dmap, m, 1632 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0) 1633 break; 1634 default: 1635 return (1); 1636 } 1637 1638 ms->ms_m = m; 1639 return (0); 1640 } 1641 1642 int 1643 myx_intr(void *arg) 1644 { 1645 struct myx_softc *sc = (struct myx_softc *)arg; 1646 volatile struct myx_status *sts = sc->sc_sts; 1647 enum myx_state state; 1648 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1649 u_int32_t data; 1650 u_int8_t valid = 0; 1651 1652 state = sc->sc_state; 1653 if (state == MYX_S_OFF) 1654 return (0); 1655 1656 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1657 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1658 1659 valid = sts->ms_isvalid; 1660 if (valid == 0x0) { 1661 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1662 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1663 return (0); 1664 } 1665 1666 if (sc->sc_intx) { 1667 data = htobe32(0); 1668 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1669 sc->sc_irqdeassertoff, &data, sizeof(data)); 1670 } 1671 sts->ms_isvalid = 0; 1672 1673 do { 1674 data = sts->ms_txdonecnt; 1675 1676 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1677 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE | 1678 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1679 } while (sts->ms_isvalid); 1680 1681 data = betoh32(data); 1682 if (data != sc->sc_tx_count) 1683 myx_txeof(sc, data); 1684 1685 data = htobe32(3); 1686 if (valid & 0x1) { 1687 myx_rxeof(sc); 1688 1689 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1690 sc->sc_irqclaimoff, &data, sizeof(data)); 1691 } 1692 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1693 sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data)); 1694 1695 if (sts->ms_statusupdated) { 1696 if (state == MYX_S_DOWN && 1697 sc->sc_linkdown != sts->ms_linkdown) { 1698 sc->sc_state = MYX_S_OFF; 1699 membar_producer(); 1700 wakeup(sts); 1701 } else { 1702 data = sts->ms_linkstate; 1703 if (data != 0xffffffff) { 1704 KERNEL_LOCK(); 1705 myx_link_state(sc, data); 1706 KERNEL_UNLOCK(); 1707 } 1708 } 1709 } 1710 1711 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1712 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1713 1714 return (1); 1715 } 1716 1717 void 1718 myx_refill(void *xmrr) 1719 { 1720 struct myx_rx_ring *mrr = xmrr; 1721 struct myx_softc *sc = mrr->mrr_softc; 1722 1723 myx_rx_fill(sc, mrr); 1724 1725 if (mrr->mrr_prod == mrr->mrr_cons) 1726 timeout_add(&mrr->mrr_refill, 1); 1727 } 1728 1729 void 1730 myx_txeof(struct myx_softc *sc, u_int32_t done_count) 1731 { 1732 struct ifnet *ifp = &sc->sc_ac.ac_if; 1733 struct myx_slot *ms; 1734 bus_dmamap_t map; 1735 u_int idx, cons; 1736 1737 idx = sc->sc_tx_ring_cons; 1738 cons = sc->sc_tx_cons; 1739 1740 do { 1741 ms = &sc->sc_tx_slots[cons]; 1742 map = ms->ms_map; 1743 1744 idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1745 1746 bus_dmamap_sync(sc->sc_dmat, map, 0, 1747 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1748 bus_dmamap_unload(sc->sc_dmat, map); 1749 m_freem(ms->ms_m); 1750 1751 if (++cons >= sc->sc_tx_ring_count) 1752 cons = 0; 1753 } while (++sc->sc_tx_count != done_count); 1754 1755 if (idx >= sc->sc_tx_ring_count) 1756 idx -= sc->sc_tx_ring_count; 1757 1758 sc->sc_tx_ring_cons = idx; 1759 sc->sc_tx_cons = cons; 1760 1761 if (ifq_is_oactive(&ifp->if_snd)) 1762 ifq_restart(&ifp->if_snd); 1763 } 1764 1765 void 1766 myx_rxeof(struct myx_softc *sc) 1767 { 1768 static const struct myx_intrq_desc zerodesc = { 0, 0 }; 1769 struct ifnet *ifp = &sc->sc_ac.ac_if; 1770 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1771 struct myx_rx_ring *mrr; 1772 struct myx_slot *ms; 1773 struct mbuf *m; 1774 int ring; 1775 u_int rxfree[2] = { 0 , 0 }; 1776 u_int len; 1777 int livelocked; 1778 1779 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1780 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1781 1782 while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) { 1783 sc->sc_intrq[sc->sc_intrq_idx] = zerodesc; 1784 1785 if (++sc->sc_intrq_idx >= sc->sc_intrq_count) 1786 sc->sc_intrq_idx = 0; 1787 1788 ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ? 1789 MYX_RXSMALL : MYX_RXBIG; 1790 1791 mrr = &sc->sc_rx_ring[ring]; 1792 ms = &mrr->mrr_slots[mrr->mrr_cons]; 1793 1794 if (++mrr->mrr_cons >= sc->sc_rx_ring_count) 1795 mrr->mrr_cons = 0; 1796 1797 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 1798 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1799 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 1800 1801 m = ms->ms_m; 1802 m->m_data += ETHER_ALIGN; 1803 m->m_pkthdr.len = m->m_len = len; 1804 1805 ml_enqueue(&ml, m); 1806 1807 rxfree[ring]++; 1808 } 1809 1810 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1811 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1812 1813 livelocked = ifiq_input(&ifp->if_rcv, &ml); 1814 for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) { 1815 if (rxfree[ring] == 0) 1816 continue; 1817 1818 mrr = &sc->sc_rx_ring[ring]; 1819 1820 if (livelocked) 1821 if_rxr_livelocked(&mrr->mrr_rxr); 1822 1823 if_rxr_put(&mrr->mrr_rxr, rxfree[ring]); 1824 myx_rx_fill(sc, mrr); 1825 if (mrr->mrr_prod == mrr->mrr_cons) 1826 timeout_add(&mrr->mrr_refill, 0); 1827 } 1828 } 1829 1830 static int 1831 myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots) 1832 { 1833 struct myx_rx_desc rxd; 1834 struct myx_slot *ms; 1835 u_int32_t offset = mrr->mrr_offset; 1836 u_int p, first, fills; 1837 1838 first = p = mrr->mrr_prod; 1839 if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0) 1840 return (slots); 1841 1842 if (++p >= sc->sc_rx_ring_count) 1843 p = 0; 1844 1845 for (fills = 1; fills < slots; fills++) { 1846 ms = &mrr->mrr_slots[p]; 1847 1848 if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0) 1849 break; 1850 1851 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr); 1852 myx_bus_space_write(sc, offset + p * sizeof(rxd), 1853 &rxd, sizeof(rxd)); 1854 1855 if (++p >= sc->sc_rx_ring_count) 1856 p = 0; 1857 } 1858 1859 mrr->mrr_prod = p; 1860 1861 /* make sure the first descriptor is seen after the others */ 1862 if (fills > 1) { 1863 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1864 offset, sizeof(rxd) * sc->sc_rx_ring_count, 1865 BUS_SPACE_BARRIER_WRITE); 1866 } 1867 1868 ms = &mrr->mrr_slots[first]; 1869 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr); 1870 myx_bus_space_write(sc, offset + first * sizeof(rxd), 1871 &rxd, sizeof(rxd)); 1872 1873 return (slots - fills); 1874 } 1875 1876 int 1877 myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size) 1878 { 1879 struct myx_rx_desc rxd; 1880 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring]; 1881 struct myx_slot *ms; 1882 u_int32_t offset = mrr->mrr_offset; 1883 int rv; 1884 int i; 1885 1886 mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count, 1887 M_DEVBUF, M_WAITOK); 1888 if (mrr->mrr_slots == NULL) 1889 return (ENOMEM); 1890 1891 memset(&rxd, 0xff, sizeof(rxd)); 1892 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1893 ms = &mrr->mrr_slots[i]; 1894 rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1895 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1896 &ms->ms_map); 1897 if (rv != 0) 1898 goto destroy; 1899 1900 myx_bus_space_write(sc, offset + i * sizeof(rxd), 1901 &rxd, sizeof(rxd)); 1902 } 1903 1904 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2); 1905 mrr->mrr_prod = mrr->mrr_cons = 0; 1906 1907 return (0); 1908 1909 destroy: 1910 while (i-- > 0) { 1911 ms = &mrr->mrr_slots[i]; 1912 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 1913 } 1914 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count); 1915 return (rv); 1916 } 1917 1918 int 1919 myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr) 1920 { 1921 u_int slots; 1922 1923 slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count); 1924 if (slots == 0) 1925 return (1); 1926 1927 slots = myx_rx_fill_slots(sc, mrr, slots); 1928 if (slots > 0) 1929 if_rxr_put(&mrr->mrr_rxr, slots); 1930 1931 return (0); 1932 } 1933 1934 void 1935 myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr) 1936 { 1937 struct myx_slot *ms; 1938 1939 while (mrr->mrr_cons != mrr->mrr_prod) { 1940 ms = &mrr->mrr_slots[mrr->mrr_cons]; 1941 1942 if (++mrr->mrr_cons >= sc->sc_rx_ring_count) 1943 mrr->mrr_cons = 0; 1944 1945 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 1946 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1947 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 1948 m_freem(ms->ms_m); 1949 } 1950 1951 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2); 1952 } 1953 1954 void 1955 myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr) 1956 { 1957 struct myx_slot *ms; 1958 int i; 1959 1960 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1961 ms = &mrr->mrr_slots[i]; 1962 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 1963 } 1964 1965 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count); 1966 } 1967 1968 struct mbuf * 1969 myx_mcl_small(void) 1970 { 1971 struct mbuf *m; 1972 1973 m = MCLGETL(NULL, M_DONTWAIT, MYX_RXSMALL_SIZE); 1974 if (m == NULL) 1975 return (NULL); 1976 1977 m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE; 1978 1979 return (m); 1980 } 1981 1982 struct mbuf * 1983 myx_mcl_big(void) 1984 { 1985 struct mbuf *m; 1986 void *mcl; 1987 1988 MGETHDR(m, M_DONTWAIT, MT_DATA); 1989 if (m == NULL) 1990 return (NULL); 1991 1992 mcl = pool_get(myx_mcl_pool, PR_NOWAIT); 1993 if (mcl == NULL) { 1994 m_free(m); 1995 return (NULL); 1996 } 1997 1998 MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool); 1999 m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE; 2000 2001 return (m); 2002 } 2003 2004 int 2005 myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms, 2006 struct mbuf *(*mclget)(void)) 2007 { 2008 struct mbuf *m; 2009 int rv; 2010 2011 m = (*mclget)(); 2012 if (m == NULL) 2013 return (ENOMEM); 2014 2015 rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT); 2016 if (rv != 0) { 2017 m_freem(m); 2018 return (rv); 2019 } 2020 2021 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 2022 ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2023 2024 ms->ms_m = m; 2025 2026 return (0); 2027 } 2028 2029 int 2030 myx_tx_init(struct myx_softc *sc, bus_size_t size) 2031 { 2032 struct myx_slot *ms; 2033 int rv; 2034 int i; 2035 2036 sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count, 2037 M_DEVBUF, M_WAITOK); 2038 if (sc->sc_tx_slots == NULL) 2039 return (ENOMEM); 2040 2041 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2042 ms = &sc->sc_tx_slots[i]; 2043 rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs, 2044 sc->sc_tx_boundary, sc->sc_tx_boundary, 2045 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 2046 &ms->ms_map); 2047 if (rv != 0) 2048 goto destroy; 2049 } 2050 2051 sc->sc_tx_prod = sc->sc_tx_cons = 0; 2052 2053 return (0); 2054 2055 destroy: 2056 while (i-- > 0) { 2057 ms = &sc->sc_tx_slots[i]; 2058 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 2059 } 2060 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count); 2061 return (rv); 2062 } 2063 2064 void 2065 myx_tx_empty(struct myx_softc *sc) 2066 { 2067 struct myx_slot *ms; 2068 u_int cons = sc->sc_tx_cons; 2069 u_int prod = sc->sc_tx_prod; 2070 2071 while (cons != prod) { 2072 ms = &sc->sc_tx_slots[cons]; 2073 2074 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 2075 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2076 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 2077 m_freem(ms->ms_m); 2078 2079 if (++cons >= sc->sc_tx_ring_count) 2080 cons = 0; 2081 } 2082 2083 sc->sc_tx_cons = cons; 2084 } 2085 2086 void 2087 myx_tx_free(struct myx_softc *sc) 2088 { 2089 struct myx_slot *ms; 2090 int i; 2091 2092 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2093 ms = &sc->sc_tx_slots[i]; 2094 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 2095 } 2096 2097 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count); 2098 } 2099 2100 #if NKSTAT > 0 2101 enum myx_counters { 2102 myx_stat_dropped_pause, 2103 myx_stat_dropped_ucast_filtered, 2104 myx_stat_dropped_bad_crc32, 2105 myx_stat_dropped_bad_phy, 2106 myx_stat_dropped_mcast_filtered, 2107 myx_stat_send_done, 2108 myx_stat_dropped_link_overflow, 2109 myx_stat_dropped_link, 2110 myx_stat_dropped_runt, 2111 myx_stat_dropped_overrun, 2112 myx_stat_dropped_no_small_bufs, 2113 myx_stat_dropped_no_large_bufs, 2114 2115 myx_ncounters, 2116 }; 2117 2118 struct myx_counter { 2119 const char *mc_name; 2120 unsigned int mc_offset; 2121 }; 2122 2123 #define MYX_C_OFF(_f) offsetof(struct myx_status, _f) 2124 2125 static const struct myx_counter myx_counters[myx_ncounters] = { 2126 { "pause drops", MYX_C_OFF(ms_dropped_pause), }, 2127 { "ucast filtered", MYX_C_OFF(ms_dropped_unicast), }, 2128 { "bad crc32", MYX_C_OFF(ms_dropped_pause), }, 2129 { "bad phy", MYX_C_OFF(ms_dropped_phyerr), }, 2130 { "mcast filtered", MYX_C_OFF(ms_dropped_mcast), }, 2131 { "tx done", MYX_C_OFF(ms_txdonecnt), }, 2132 { "rx discards", MYX_C_OFF(ms_dropped_linkoverflow), }, 2133 { "rx errors", MYX_C_OFF(ms_dropped_linkerror), }, 2134 { "rx undersize", MYX_C_OFF(ms_dropped_runt), }, 2135 { "rx oversize", MYX_C_OFF(ms_dropped_overrun), }, 2136 { "small discards", MYX_C_OFF(ms_dropped_smallbufunderrun), }, 2137 { "large discards", MYX_C_OFF(ms_dropped_bigbufunderrun), }, 2138 }; 2139 2140 struct myx_kstats { 2141 struct kstat_kv mk_counters[myx_ncounters]; 2142 struct kstat_kv mk_rdma_tags_available; 2143 }; 2144 2145 struct myx_kstat_cache { 2146 uint32_t mkc_counters[myx_ncounters]; 2147 }; 2148 2149 struct myx_kstat_state { 2150 struct myx_kstat_cache mks_caches[2]; 2151 unsigned int mks_gen; 2152 }; 2153 2154 int 2155 myx_kstat_read(struct kstat *ks) 2156 { 2157 struct myx_softc *sc = ks->ks_softc; 2158 struct myx_kstats *mk = ks->ks_data; 2159 struct myx_kstat_state *mks = ks->ks_ptr; 2160 unsigned int gen = (mks->mks_gen++ & 1); 2161 struct myx_kstat_cache *omkc = &mks->mks_caches[gen]; 2162 struct myx_kstat_cache *nmkc = &mks->mks_caches[!gen]; 2163 unsigned int i = 0; 2164 2165 volatile struct myx_status *sts = sc->sc_sts; 2166 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 2167 2168 if (sc->sc_sts == NULL) 2169 return (0); /* counters are valid, just not updated */ 2170 2171 getnanouptime(&ks->ks_updated); 2172 2173 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2174 BUS_DMASYNC_POSTREAD); 2175 for (i = 0; i < myx_ncounters; i++) { 2176 const struct myx_counter *mc = &myx_counters[i]; 2177 nmkc->mkc_counters[i] = 2178 bemtoh32((uint32_t *)((uint8_t *)sts + mc->mc_offset)); 2179 } 2180 2181 kstat_kv_u32(&mk->mk_rdma_tags_available) = 2182 bemtoh32(&sts->ms_rdmatags_available); 2183 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2184 BUS_DMASYNC_PREREAD); 2185 2186 for (i = 0; i < myx_ncounters; i++) { 2187 kstat_kv_u64(&mk->mk_counters[i]) += 2188 nmkc->mkc_counters[i] - omkc->mkc_counters[i]; 2189 } 2190 2191 return (0); 2192 } 2193 2194 void 2195 myx_kstat_tick(void *arg) 2196 { 2197 struct myx_softc *sc = arg; 2198 2199 if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)) 2200 return; 2201 2202 timeout_add_sec(&sc->sc_kstat_tmo, 4); 2203 2204 if (!mtx_enter_try(&sc->sc_kstat_mtx)) 2205 return; 2206 2207 myx_kstat_read(sc->sc_kstat); 2208 2209 mtx_leave(&sc->sc_kstat_mtx); 2210 } 2211 2212 void 2213 myx_kstat_start(struct myx_softc *sc) 2214 { 2215 if (sc->sc_kstat == NULL) 2216 return; 2217 2218 myx_kstat_tick(sc); 2219 } 2220 2221 void 2222 myx_kstat_stop(struct myx_softc *sc) 2223 { 2224 struct myx_kstat_state *mks; 2225 2226 if (sc->sc_kstat == NULL) 2227 return; 2228 2229 timeout_del_barrier(&sc->sc_kstat_tmo); 2230 2231 mks = sc->sc_kstat->ks_ptr; 2232 2233 mtx_enter(&sc->sc_kstat_mtx); 2234 memset(mks, 0, sizeof(*mks)); 2235 mtx_leave(&sc->sc_kstat_mtx); 2236 } 2237 2238 void 2239 myx_kstat_attach(struct myx_softc *sc) 2240 { 2241 struct kstat *ks; 2242 struct myx_kstats *mk; 2243 struct myx_kstat_state *mks; 2244 unsigned int i; 2245 2246 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK); 2247 timeout_set(&sc->sc_kstat_tmo, myx_kstat_tick, sc); 2248 2249 ks = kstat_create(DEVNAME(sc), 0, "myx-stats", 0, KSTAT_T_KV, 0); 2250 if (ks == NULL) 2251 return; 2252 2253 mk = malloc(sizeof(*mk), M_DEVBUF, M_WAITOK|M_ZERO); 2254 for (i = 0; i < myx_ncounters; i++) { 2255 const struct myx_counter *mc = &myx_counters[i]; 2256 2257 kstat_kv_unit_init(&mk->mk_counters[i], mc->mc_name, 2258 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); 2259 } 2260 kstat_kv_init(&mk->mk_rdma_tags_available, "rdma tags free", 2261 KSTAT_KV_T_UINT32); 2262 2263 mks = malloc(sizeof(*mks), M_DEVBUF, M_WAITOK|M_ZERO); 2264 /* these start at 0 */ 2265 2266 kstat_set_mutex(ks, &sc->sc_kstat_mtx); 2267 ks->ks_data = mk; 2268 ks->ks_datalen = sizeof(*mk); 2269 ks->ks_read = myx_kstat_read; 2270 ks->ks_ptr = mks; 2271 2272 ks->ks_softc = sc; 2273 sc->sc_kstat = ks; 2274 kstat_install(ks); 2275 } 2276 #endif /* NKSTAT > 0 */ 2277