1 /* $OpenBSD: if_se.c,v 1.21 2020/07/10 13:26:38 patrick Exp $ */ 2 3 /*- 4 * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de> 5 * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com> 6 * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 7 * Copyright (c) 1997, 1998, 1999 8 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Bill Paul. 21 * 4. Neither the name of the author nor the names of any co-contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 28 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 29 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 36 * OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Ported to OpenBSD by Christopher Zimmermann 2009/10 49 * 50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 51 * Linux and Solaris drivers. 52 */ 53 54 #include "bpfilter.h" 55 56 #include <sys/param.h> 57 #include <sys/systm.h> 58 #include <sys/device.h> 59 #include <sys/ioctl.h> 60 #include <sys/kernel.h> 61 #include <sys/mbuf.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/timeout.h> 65 66 #include <net/if.h> 67 #include <net/if_media.h> 68 69 #include <netinet/in.h> 70 #include <netinet/if_ether.h> 71 72 #if NBPFILTER > 0 73 #include <net/bpf.h> 74 #endif 75 76 #include <dev/mii/miivar.h> 77 78 #include <dev/pci/pcidevs.h> 79 #include <dev/pci/pcireg.h> 80 #include <dev/pci/pcivar.h> 81 82 #include <dev/pci/if_sereg.h> 83 84 #define SE_RX_RING_CNT 256 /* [8, 1024] */ 85 #define SE_TX_RING_CNT 256 /* [8, 8192] */ 86 #define SE_RX_BUF_ALIGN sizeof(uint64_t) 87 88 #define SE_RX_RING_SZ (SE_RX_RING_CNT * sizeof(struct se_desc)) 89 #define SE_TX_RING_SZ (SE_TX_RING_CNT * sizeof(struct se_desc)) 90 91 struct se_list_data { 92 struct se_desc *se_rx_ring; 93 struct se_desc *se_tx_ring; 94 bus_dmamap_t se_rx_dmamap; 95 bus_dmamap_t se_tx_dmamap; 96 }; 97 98 struct se_chain_data { 99 struct mbuf *se_rx_mbuf[SE_RX_RING_CNT]; 100 struct mbuf *se_tx_mbuf[SE_TX_RING_CNT]; 101 bus_dmamap_t se_rx_map[SE_RX_RING_CNT]; 102 bus_dmamap_t se_tx_map[SE_TX_RING_CNT]; 103 uint se_rx_prod; 104 uint se_tx_prod; 105 uint se_tx_cons; 106 uint se_tx_cnt; 107 }; 108 109 struct se_softc { 110 struct device sc_dev; 111 void *sc_ih; 112 bus_space_tag_t sc_iot; 113 bus_space_handle_t sc_ioh; 114 bus_dma_tag_t sc_dmat; 115 116 struct mii_data sc_mii; 117 struct arpcom sc_ac; 118 119 struct se_list_data se_ldata; 120 struct se_chain_data se_cdata; 121 122 struct timeout sc_tick_tmo; 123 124 int sc_flags; 125 #define SE_FLAG_FASTETHER 0x0001 126 #define SE_FLAG_RGMII 0x0010 127 #define SE_FLAG_LINK 0x8000 128 }; 129 130 /* 131 * Various supported device vendors/types and their names. 132 */ 133 const struct pci_matchid se_devices[] = { 134 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 }, 135 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 } 136 }; 137 138 int se_match(struct device *, void *, void *); 139 void se_attach(struct device *, struct device *, void *); 140 int se_activate(struct device *, int); 141 142 const struct cfattach se_ca = { 143 sizeof(struct se_softc), 144 se_match, se_attach, NULL, se_activate 145 }; 146 147 struct cfdriver se_cd = { 148 0, "se", DV_IFNET 149 }; 150 151 uint32_t 152 se_miibus_cmd(struct se_softc *, uint32_t); 153 int se_miibus_readreg(struct device *, int, int); 154 void se_miibus_writereg(struct device *, int, int, int); 155 void se_miibus_statchg(struct device *); 156 157 int se_newbuf(struct se_softc *, uint); 158 void se_discard_rxbuf(struct se_softc *, uint); 159 int se_encap(struct se_softc *, struct mbuf *, uint *); 160 void se_rxeof(struct se_softc *); 161 void se_txeof(struct se_softc *); 162 int se_intr(void *); 163 void se_tick(void *); 164 void se_start(struct ifnet *); 165 int se_ioctl(struct ifnet *, u_long, caddr_t); 166 int se_init(struct ifnet *); 167 void se_stop(struct se_softc *); 168 void se_watchdog(struct ifnet *); 169 int se_ifmedia_upd(struct ifnet *); 170 void se_ifmedia_sts(struct ifnet *, struct ifmediareq *); 171 172 int se_pcib_match(struct pci_attach_args *); 173 int se_get_mac_addr_apc(struct se_softc *, uint8_t *); 174 int se_get_mac_addr_eeprom(struct se_softc *, uint8_t *); 175 uint16_t 176 se_read_eeprom(struct se_softc *, int); 177 178 void se_iff(struct se_softc *); 179 void se_reset(struct se_softc *); 180 int se_list_rx_init(struct se_softc *); 181 int se_list_rx_free(struct se_softc *); 182 int se_list_tx_init(struct se_softc *); 183 int se_list_tx_free(struct se_softc *); 184 185 /* 186 * Register space access macros. 187 */ 188 189 #define CSR_WRITE_4(sc, reg, val) \ 190 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val) 191 #define CSR_WRITE_2(sc, reg, val) \ 192 bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val) 193 #define CSR_WRITE_1(sc, reg, val) \ 194 bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val) 195 196 #define CSR_READ_4(sc, reg) \ 197 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 198 #define CSR_READ_2(sc, reg) \ 199 bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg) 200 #define CSR_READ_1(sc, reg) \ 201 bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg) 202 203 /* 204 * Read a sequence of words from the EEPROM. 205 */ 206 uint16_t 207 se_read_eeprom(struct se_softc *sc, int offset) 208 { 209 uint32_t val; 210 int i; 211 212 KASSERT(offset <= EI_OFFSET); 213 214 CSR_WRITE_4(sc, ROMInterface, 215 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 216 DELAY(500); 217 for (i = 0; i < SE_TIMEOUT; i++) { 218 val = CSR_READ_4(sc, ROMInterface); 219 if ((val & EI_REQ) == 0) 220 break; 221 DELAY(100); 222 } 223 if (i == SE_TIMEOUT) { 224 printf("%s: EEPROM read timeout: 0x%08x\n", 225 sc->sc_dev.dv_xname, val); 226 return 0xffff; 227 } 228 229 return (val & EI_DATA) >> EI_DATA_SHIFT; 230 } 231 232 int 233 se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest) 234 { 235 uint16_t val; 236 int i; 237 238 val = se_read_eeprom(sc, EEPROMSignature); 239 if (val == 0xffff || val == 0x0000) { 240 printf("%s: invalid EEPROM signature : 0x%04x\n", 241 sc->sc_dev.dv_xname, val); 242 return (EINVAL); 243 } 244 245 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 246 val = se_read_eeprom(sc, EEPROMMACAddr + i / 2); 247 dest[i + 0] = (uint8_t)val; 248 dest[i + 1] = (uint8_t)(val >> 8); 249 } 250 251 if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 252 sc->sc_flags |= SE_FLAG_RGMII; 253 return (0); 254 } 255 256 /* 257 * For SiS96x, APC CMOS RAM is used to store Ethernet address. 258 * APC CMOS RAM is accessed through ISA bridge. 259 */ 260 #if defined(__amd64__) || defined(__i386__) 261 int 262 se_pcib_match(struct pci_attach_args *pa) 263 { 264 const struct pci_matchid apc_devices[] = { 265 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 }, 266 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 }, 267 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 } 268 }; 269 270 return pci_matchbyid(pa, apc_devices, nitems(apc_devices)); 271 } 272 #endif 273 274 int 275 se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest) 276 { 277 #if defined(__amd64__) || defined(__i386__) 278 struct pci_attach_args pa; 279 pcireg_t reg; 280 bus_space_handle_t ioh; 281 int rc, i; 282 283 if (pci_find_device(&pa, se_pcib_match) == 0) { 284 printf("\n%s: couldn't find PCI-ISA bridge\n", 285 sc->sc_dev.dv_xname); 286 return EINVAL; 287 } 288 289 /* Enable port 0x78 and 0x79 to access APC registers. */ 290 reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 291 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02); 292 DELAY(50); 293 (void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 294 295 /* XXX this abuses bus_space implementation knowledge */ 296 rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh); 297 if (rc == 0) { 298 /* Read stored Ethernet address. */ 299 for (i = 0; i < ETHER_ADDR_LEN; i++) { 300 bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i); 301 dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1); 302 } 303 bus_space_write_1(pa.pa_iot, ioh, 0, 0x12); 304 if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0) 305 sc->sc_flags |= SE_FLAG_RGMII; 306 _bus_space_unmap(pa.pa_iot, ioh, 2, NULL); 307 } else 308 rc = EINVAL; 309 310 /* Restore access to APC registers. */ 311 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg); 312 313 return rc; 314 #endif 315 return EINVAL; 316 } 317 318 uint32_t 319 se_miibus_cmd(struct se_softc *sc, uint32_t ctrl) 320 { 321 int i; 322 uint32_t val; 323 324 CSR_WRITE_4(sc, GMIIControl, ctrl); 325 DELAY(10); 326 for (i = 0; i < SE_TIMEOUT; i++) { 327 val = CSR_READ_4(sc, GMIIControl); 328 if ((val & GMI_REQ) == 0) 329 return val; 330 DELAY(10); 331 } 332 333 return GMI_REQ; 334 } 335 336 int 337 se_miibus_readreg(struct device *self, int phy, int reg) 338 { 339 struct se_softc *sc = (struct se_softc *)self; 340 uint32_t ctrl, val; 341 342 ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 343 GMI_OP_RD | GMI_REQ; 344 val = se_miibus_cmd(sc, ctrl); 345 if ((val & GMI_REQ) != 0) { 346 printf("%s: PHY read timeout : %d\n", 347 sc->sc_dev.dv_xname, reg); 348 return 0; 349 } 350 return (val & GMI_DATA) >> GMI_DATA_SHIFT; 351 } 352 353 void 354 se_miibus_writereg(struct device *self, int phy, int reg, int data) 355 { 356 struct se_softc *sc = (struct se_softc *)self; 357 uint32_t ctrl, val; 358 359 ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 360 GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ; 361 val = se_miibus_cmd(sc, ctrl); 362 if ((val & GMI_REQ) != 0) { 363 printf("%s: PHY write timeout : %d\n", 364 sc->sc_dev.dv_xname, reg); 365 } 366 } 367 368 void 369 se_miibus_statchg(struct device *self) 370 { 371 struct se_softc *sc = (struct se_softc *)self; 372 #ifdef SE_DEBUG 373 struct ifnet *ifp = &sc->sc_ac.ac_if; 374 #endif 375 struct mii_data *mii = &sc->sc_mii; 376 uint32_t ctl, speed; 377 378 speed = 0; 379 sc->sc_flags &= ~SE_FLAG_LINK; 380 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 381 (IFM_ACTIVE | IFM_AVALID)) { 382 switch (IFM_SUBTYPE(mii->mii_media_active)) { 383 case IFM_10_T: 384 #ifdef SE_DEBUG 385 if (ifp->if_flags & IFF_DEBUG) 386 printf("%s: 10baseT link\n", ifp->if_xname); 387 #endif 388 sc->sc_flags |= SE_FLAG_LINK; 389 speed = SC_SPEED_10; 390 break; 391 case IFM_100_TX: 392 #ifdef SE_DEBUG 393 if (ifp->if_flags & IFF_DEBUG) 394 printf("%s: 100baseTX link\n", ifp->if_xname); 395 #endif 396 sc->sc_flags |= SE_FLAG_LINK; 397 speed = SC_SPEED_100; 398 break; 399 case IFM_1000_T: 400 #ifdef SE_DEBUG 401 if (ifp->if_flags & IFF_DEBUG) 402 printf("%s: 1000baseT link\n", ifp->if_xname); 403 #endif 404 if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) { 405 sc->sc_flags |= SE_FLAG_LINK; 406 speed = SC_SPEED_1000; 407 } 408 break; 409 default: 410 break; 411 } 412 } 413 if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 414 #ifdef SE_DEBUG 415 if (ifp->if_flags & IFF_DEBUG) 416 printf("%s: no link\n", ifp->if_xname); 417 #endif 418 return; 419 } 420 /* Reprogram MAC to resolved speed/duplex/flow-control paramters. */ 421 ctl = CSR_READ_4(sc, StationControl); 422 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 423 if (speed == SC_SPEED_1000) 424 ctl |= 0x07000000; 425 else 426 ctl |= 0x04000000; 427 #ifdef notyet 428 if ((sc->sc_flags & SE_FLAG_GMII) != 0) 429 ctl |= 0x03000000; 430 #endif 431 ctl |= speed; 432 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 433 ctl |= SC_FDX; 434 CSR_WRITE_4(sc, StationControl, ctl); 435 if ((sc->sc_flags & SE_FLAG_RGMII) != 0) { 436 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 437 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 438 } 439 } 440 441 void 442 se_iff(struct se_softc *sc) 443 { 444 struct arpcom *ac = &sc->sc_ac; 445 struct ifnet *ifp = &ac->ac_if; 446 struct ether_multi *enm; 447 struct ether_multistep step; 448 uint32_t crc, hashes[2]; 449 uint16_t rxfilt; 450 451 rxfilt = CSR_READ_2(sc, RxMacControl); 452 rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast); 453 ifp->if_flags &= ~IFF_ALLMULTI; 454 455 /* 456 * Always accept broadcast frames. 457 * Always accept frames destined to our station address. 458 */ 459 rxfilt |= AcceptBroadcast | AcceptMyPhys; 460 461 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 462 ifp->if_flags |= IFF_ALLMULTI; 463 if (ifp->if_flags & IFF_PROMISC) 464 rxfilt |= AcceptAllPhys; 465 rxfilt |= AcceptMulticast; 466 hashes[0] = hashes[1] = 0xffffffff; 467 } else { 468 rxfilt |= AcceptMulticast; 469 hashes[0] = hashes[1] = 0; 470 471 ETHER_FIRST_MULTI(step, ac, enm); 472 while (enm != NULL) { 473 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 474 475 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 476 477 ETHER_NEXT_MULTI(step, enm); 478 } 479 } 480 481 CSR_WRITE_2(sc, RxMacControl, rxfilt); 482 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 483 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 484 } 485 486 void 487 se_reset(struct se_softc *sc) 488 { 489 CSR_WRITE_4(sc, IntrMask, 0); 490 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 491 492 /* Soft reset. */ 493 CSR_WRITE_4(sc, IntrControl, 0x8000); 494 CSR_READ_4(sc, IntrControl); 495 DELAY(100); 496 CSR_WRITE_4(sc, IntrControl, 0); 497 /* Stop MAC. */ 498 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 499 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 500 501 CSR_WRITE_4(sc, IntrMask, 0); 502 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 503 504 CSR_WRITE_4(sc, GMIIControl, 0); 505 } 506 507 /* 508 * Probe for an SiS chip. Check the PCI vendor and device 509 * IDs against our list and return a device name if we find a match. 510 */ 511 int 512 se_match(struct device *parent, void *match, void *aux) 513 { 514 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 515 516 return pci_matchbyid(pa, se_devices, nitems(se_devices)); 517 } 518 519 /* 520 * Attach the interface. Do ifmedia setup and ethernet/BPF attach. 521 */ 522 void 523 se_attach(struct device *parent, struct device *self, void *aux) 524 { 525 struct se_softc *sc = (struct se_softc *)self; 526 struct arpcom *ac = &sc->sc_ac; 527 struct ifnet *ifp = &ac->ac_if; 528 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 529 uint8_t eaddr[ETHER_ADDR_LEN]; 530 const char *intrstr; 531 pci_intr_handle_t ih; 532 bus_size_t iosize; 533 bus_dma_segment_t seg; 534 struct se_list_data *ld; 535 struct se_chain_data *cd; 536 int nseg; 537 uint i; 538 int rc; 539 540 printf(": "); 541 542 /* 543 * Map control/status registers. 544 */ 545 546 rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0, 547 &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0); 548 if (rc != 0) { 549 printf("can't map i/o space\n"); 550 return; 551 } 552 553 if (pci_intr_map(pa, &ih)) { 554 printf("can't map interrupt\n"); 555 goto fail1; 556 } 557 intrstr = pci_intr_string(pa->pa_pc, ih); 558 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc, 559 self->dv_xname); 560 if (sc->sc_ih == NULL) { 561 printf("can't establish interrupt"); 562 if (intrstr != NULL) 563 printf(" at %s", intrstr); 564 printf("\n"); 565 goto fail1; 566 } 567 568 printf("%s", intrstr); 569 570 if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190)) 571 sc->sc_flags |= SE_FLAG_FASTETHER; 572 573 /* Reset the adapter. */ 574 se_reset(sc); 575 576 /* Get MAC address from the EEPROM. */ 577 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0) 578 se_get_mac_addr_apc(sc, eaddr); 579 else 580 se_get_mac_addr_eeprom(sc, eaddr); 581 printf(", address %s\n", ether_sprintf(eaddr)); 582 bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN); 583 584 /* 585 * Now do all the DMA mapping stuff 586 */ 587 588 sc->sc_dmat = pa->pa_dmat; 589 ld = &sc->se_ldata; 590 cd = &sc->se_cdata; 591 592 /* First create TX/RX busdma maps. */ 593 for (i = 0; i < SE_RX_RING_CNT; i++) { 594 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 595 0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]); 596 if (rc != 0) { 597 printf("%s: cannot init the RX map array\n", 598 self->dv_xname); 599 goto fail2; 600 } 601 } 602 603 for (i = 0; i < SE_TX_RING_CNT; i++) { 604 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 605 0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]); 606 if (rc != 0) { 607 printf("%s: cannot init the TX map array\n", 608 self->dv_xname); 609 goto fail2; 610 } 611 } 612 613 /* 614 * Now allocate a chunk of DMA-able memory for RX and TX ring 615 * descriptors, as a contiguous block of memory. 616 * XXX fix deallocation upon error 617 */ 618 619 /* RX */ 620 rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0, 621 &seg, 1, &nseg, BUS_DMA_NOWAIT); 622 if (rc != 0) { 623 printf("%s: no memory for RX descriptors\n", self->dv_xname); 624 goto fail2; 625 } 626 627 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ, 628 (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT); 629 if (rc != 0) { 630 printf("%s: can't map RX descriptors\n", self->dv_xname); 631 goto fail2; 632 } 633 634 rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1, 635 SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap); 636 if (rc != 0) { 637 printf("%s: can't alloc RX DMA map\n", self->dv_xname); 638 goto fail2; 639 } 640 641 rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap, 642 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT); 643 if (rc != 0) { 644 printf("%s: can't load RX DMA map\n", self->dv_xname); 645 bus_dmamem_unmap(sc->sc_dmat, 646 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ); 647 bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap); 648 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 649 goto fail2; 650 } 651 652 /* TX */ 653 rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0, 654 &seg, 1, &nseg, BUS_DMA_NOWAIT); 655 if (rc != 0) { 656 printf("%s: no memory for TX descriptors\n", self->dv_xname); 657 goto fail2; 658 } 659 660 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ, 661 (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT); 662 if (rc != 0) { 663 printf("%s: can't map TX descriptors\n", self->dv_xname); 664 goto fail2; 665 } 666 667 rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1, 668 SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap); 669 if (rc != 0) { 670 printf("%s: can't alloc TX DMA map\n", self->dv_xname); 671 goto fail2; 672 } 673 674 rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap, 675 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT); 676 if (rc != 0) { 677 printf("%s: can't load TX DMA map\n", self->dv_xname); 678 bus_dmamem_unmap(sc->sc_dmat, 679 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ); 680 bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap); 681 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 682 goto fail2; 683 } 684 685 timeout_set(&sc->sc_tick_tmo, se_tick, sc); 686 687 ifp = &sc->sc_ac.ac_if; 688 ifp->if_softc = sc; 689 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 690 ifp->if_ioctl = se_ioctl; 691 ifp->if_start = se_start; 692 ifp->if_watchdog = se_watchdog; 693 ifq_set_maxlen(&ifp->if_snd, SE_TX_RING_CNT - 1); 694 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 695 696 ifp->if_capabilities = IFCAP_VLAN_MTU; 697 698 /* 699 * Do MII setup. 700 */ 701 702 sc->sc_mii.mii_ifp = ifp; 703 sc->sc_mii.mii_readreg = se_miibus_readreg; 704 sc->sc_mii.mii_writereg = se_miibus_writereg; 705 sc->sc_mii.mii_statchg = se_miibus_statchg; 706 ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd, 707 se_ifmedia_sts); 708 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 709 MII_OFFSET_ANY, 0); 710 711 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 712 /* No PHY attached */ 713 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 714 0, NULL); 715 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 716 } else 717 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 718 719 /* 720 * Call MI attach routine. 721 */ 722 if_attach(ifp); 723 ether_ifattach(ifp); 724 725 return; 726 727 fail2: 728 pci_intr_disestablish(pa->pa_pc, sc->sc_ih); 729 fail1: 730 bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); 731 } 732 733 int 734 se_activate(struct device *self, int act) 735 { 736 struct se_softc *sc = (struct se_softc *)self; 737 struct ifnet *ifp = &sc->sc_ac.ac_if; 738 int rv = 0; 739 740 switch (act) { 741 case DVACT_SUSPEND: 742 if (ifp->if_flags & IFF_RUNNING) 743 se_stop(sc); 744 rv = config_activate_children(self, act); 745 break; 746 case DVACT_RESUME: 747 if (ifp->if_flags & IFF_UP) 748 (void)se_init(ifp); 749 break; 750 default: 751 rv = config_activate_children(self, act); 752 break; 753 } 754 755 return (rv); 756 } 757 758 /* 759 * Initialize the TX descriptors. 760 */ 761 int 762 se_list_tx_init(struct se_softc *sc) 763 { 764 struct se_list_data *ld = &sc->se_ldata; 765 struct se_chain_data *cd = &sc->se_cdata; 766 767 bzero(ld->se_tx_ring, SE_TX_RING_SZ); 768 ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END); 769 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 770 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 771 cd->se_tx_prod = 0; 772 cd->se_tx_cons = 0; 773 cd->se_tx_cnt = 0; 774 775 return 0; 776 } 777 778 int 779 se_list_tx_free(struct se_softc *sc) 780 { 781 struct se_chain_data *cd = &sc->se_cdata; 782 uint i; 783 784 for (i = 0; i < SE_TX_RING_CNT; i++) { 785 if (cd->se_tx_mbuf[i] != NULL) { 786 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 787 m_free(cd->se_tx_mbuf[i]); 788 cd->se_tx_mbuf[i] = NULL; 789 } 790 } 791 792 return 0; 793 } 794 795 /* 796 * Initialize the RX descriptors and allocate mbufs for them. 797 */ 798 int 799 se_list_rx_init(struct se_softc *sc) 800 { 801 struct se_list_data *ld = &sc->se_ldata; 802 struct se_chain_data *cd = &sc->se_cdata; 803 uint i; 804 805 bzero(ld->se_rx_ring, SE_RX_RING_SZ); 806 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 807 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 808 for (i = 0; i < SE_RX_RING_CNT; i++) { 809 if (se_newbuf(sc, i) != 0) 810 return ENOBUFS; 811 } 812 813 cd->se_rx_prod = 0; 814 815 return 0; 816 } 817 818 int 819 se_list_rx_free(struct se_softc *sc) 820 { 821 struct se_chain_data *cd = &sc->se_cdata; 822 uint i; 823 824 for (i = 0; i < SE_RX_RING_CNT; i++) { 825 if (cd->se_rx_mbuf[i] != NULL) { 826 bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]); 827 m_free(cd->se_rx_mbuf[i]); 828 cd->se_rx_mbuf[i] = NULL; 829 } 830 } 831 832 return 0; 833 } 834 835 /* 836 * Initialize an RX descriptor and attach an MBUF cluster. 837 */ 838 int 839 se_newbuf(struct se_softc *sc, uint i) 840 { 841 #ifdef SE_DEBUG 842 struct ifnet *ifp = &sc->sc_ac.ac_if; 843 #endif 844 struct se_list_data *ld = &sc->se_ldata; 845 struct se_chain_data *cd = &sc->se_cdata; 846 struct se_desc *desc; 847 struct mbuf *m; 848 int rc; 849 850 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 851 if (m == NULL) { 852 #ifdef SE_DEBUG 853 if (ifp->if_flags & IFF_DEBUG) 854 printf("%s: MCLGETI failed\n", ifp->if_xname); 855 #endif 856 return ENOBUFS; 857 } 858 m->m_len = m->m_pkthdr.len = MCLBYTES; 859 m_adj(m, SE_RX_BUF_ALIGN); 860 861 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i], 862 m, BUS_DMA_NOWAIT); 863 KASSERT(cd->se_rx_map[i]->dm_nsegs == 1); 864 if (rc != 0) { 865 m_freem(m); 866 return ENOBUFS; 867 } 868 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 869 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 870 871 cd->se_rx_mbuf[i] = m; 872 desc = &ld->se_rx_ring[i]; 873 desc->se_sts_size = 0; 874 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 875 desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr); 876 desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len); 877 if (i == SE_RX_RING_CNT - 1) 878 desc->se_flags |= htole32(RING_END); 879 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 880 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 881 882 return 0; 883 } 884 885 void 886 se_discard_rxbuf(struct se_softc *sc, uint i) 887 { 888 struct se_list_data *ld = &sc->se_ldata; 889 struct se_desc *desc; 890 891 desc = &ld->se_rx_ring[i]; 892 desc->se_sts_size = 0; 893 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 894 desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN); 895 if (i == SE_RX_RING_CNT - 1) 896 desc->se_flags |= htole32(RING_END); 897 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 898 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 899 } 900 901 /* 902 * A frame has been uploaded: pass the resulting mbuf chain up to 903 * the higher level protocols. 904 */ 905 void 906 se_rxeof(struct se_softc *sc) 907 { 908 struct mbuf *m; 909 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 910 struct ifnet *ifp = &sc->sc_ac.ac_if; 911 struct se_list_data *ld = &sc->se_ldata; 912 struct se_chain_data *cd = &sc->se_cdata; 913 struct se_desc *cur_rx; 914 uint32_t rxinfo, rxstat; 915 uint i; 916 917 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 918 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 919 for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) { 920 cur_rx = &ld->se_rx_ring[i]; 921 rxinfo = letoh32(cur_rx->se_cmdsts); 922 if ((rxinfo & RDC_OWN) != 0) 923 break; 924 rxstat = letoh32(cur_rx->se_sts_size); 925 926 /* 927 * If an error occurs, update stats, clear the 928 * status word and leave the mbuf cluster in place: 929 * it should simply get re-used next time this descriptor 930 * comes up in the ring. 931 */ 932 if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 || 933 SE_RX_NSEGS(rxstat) != 1) { 934 /* XXX We don't support multi-segment frames yet. */ 935 if (ifp->if_flags & IFF_DEBUG) 936 printf("%s: rx error %b\n", 937 ifp->if_xname, rxstat, RX_ERR_BITS); 938 se_discard_rxbuf(sc, i); 939 ifp->if_ierrors++; 940 continue; 941 } 942 943 /* No errors; receive the packet. */ 944 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 945 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 946 m = cd->se_rx_mbuf[i]; 947 if (se_newbuf(sc, i) != 0) { 948 se_discard_rxbuf(sc, i); 949 ifp->if_iqdrops++; 950 continue; 951 } 952 /* 953 * Account for 10 bytes auto padding which is used 954 * to align IP header on a 32bit boundary. Also note, 955 * CRC bytes are automatically removed by the hardware. 956 */ 957 m->m_data += SE_RX_PAD_BYTES; 958 m->m_pkthdr.len = m->m_len = 959 SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES; 960 961 ml_enqueue(&ml, m); 962 } 963 964 if_input(ifp, &ml); 965 966 cd->se_rx_prod = i; 967 } 968 969 /* 970 * A frame was downloaded to the chip. It's safe for us to clean up 971 * the list buffers. 972 */ 973 974 void 975 se_txeof(struct se_softc *sc) 976 { 977 struct ifnet *ifp = &sc->sc_ac.ac_if; 978 struct se_list_data *ld = &sc->se_ldata; 979 struct se_chain_data *cd = &sc->se_cdata; 980 struct se_desc *cur_tx; 981 uint32_t txstat; 982 uint i; 983 984 /* 985 * Go through our tx list and free mbufs for those 986 * frames that have been transmitted. 987 */ 988 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 989 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 990 for (i = cd->se_tx_cons; cd->se_tx_cnt > 0; 991 cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) { 992 cur_tx = &ld->se_tx_ring[i]; 993 txstat = letoh32(cur_tx->se_cmdsts); 994 if ((txstat & TDC_OWN) != 0) 995 break; 996 997 ifq_clr_oactive(&ifp->if_snd); 998 999 if (SE_TX_ERROR(txstat) != 0) { 1000 if (ifp->if_flags & IFF_DEBUG) 1001 printf("%s: tx error %b\n", 1002 ifp->if_xname, txstat, TX_ERR_BITS); 1003 ifp->if_oerrors++; 1004 /* TODO: better error differentiation */ 1005 } 1006 1007 if (cd->se_tx_mbuf[i] != NULL) { 1008 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 1009 cd->se_tx_map[i]->dm_mapsize, 1010 BUS_DMASYNC_POSTWRITE); 1011 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 1012 m_free(cd->se_tx_mbuf[i]); 1013 cd->se_tx_mbuf[i] = NULL; 1014 } 1015 1016 cur_tx->se_sts_size = 0; 1017 cur_tx->se_cmdsts = 0; 1018 cur_tx->se_ptr = 0; 1019 cur_tx->se_flags &= htole32(RING_END); 1020 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 1021 i * sizeof(*cur_tx), sizeof(*cur_tx), 1022 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1023 } 1024 1025 cd->se_tx_cons = i; 1026 if (cd->se_tx_cnt == 0) 1027 ifp->if_timer = 0; 1028 } 1029 1030 void 1031 se_tick(void *xsc) 1032 { 1033 struct se_softc *sc = xsc; 1034 struct mii_data *mii; 1035 struct ifnet *ifp = &sc->sc_ac.ac_if; 1036 int s; 1037 1038 s = splnet(); 1039 mii = &sc->sc_mii; 1040 mii_tick(mii); 1041 if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 1042 se_miibus_statchg(&sc->sc_dev); 1043 if ((sc->sc_flags & SE_FLAG_LINK) != 0 && 1044 !ifq_empty(&ifp->if_snd)) 1045 se_start(ifp); 1046 } 1047 splx(s); 1048 1049 timeout_add_sec(&sc->sc_tick_tmo, 1); 1050 } 1051 1052 int 1053 se_intr(void *arg) 1054 { 1055 struct se_softc *sc = arg; 1056 struct ifnet *ifp = &sc->sc_ac.ac_if; 1057 uint32_t status; 1058 1059 status = CSR_READ_4(sc, IntrStatus); 1060 if (status == 0xffffffff || (status & SE_INTRS) == 0) { 1061 /* Not ours. */ 1062 return 0; 1063 } 1064 /* Ack interrupts/ */ 1065 CSR_WRITE_4(sc, IntrStatus, status); 1066 /* Disable further interrupts. */ 1067 CSR_WRITE_4(sc, IntrMask, 0); 1068 1069 for (;;) { 1070 if ((ifp->if_flags & IFF_RUNNING) == 0) 1071 break; 1072 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1073 se_rxeof(sc); 1074 /* Wakeup Rx MAC. */ 1075 if ((status & INTR_RX_IDLE) != 0) 1076 CSR_WRITE_4(sc, RX_CTL, 1077 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1078 } 1079 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1080 se_txeof(sc); 1081 status = CSR_READ_4(sc, IntrStatus); 1082 if ((status & SE_INTRS) == 0) 1083 break; 1084 /* Ack interrupts. */ 1085 CSR_WRITE_4(sc, IntrStatus, status); 1086 } 1087 1088 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1089 /* Re-enable interrupts */ 1090 CSR_WRITE_4(sc, IntrMask, SE_INTRS); 1091 if (!ifq_empty(&ifp->if_snd)) 1092 se_start(ifp); 1093 } 1094 1095 return 1; 1096 } 1097 1098 /* 1099 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1100 * pointers to the fragment pointers. 1101 */ 1102 int 1103 se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx) 1104 { 1105 #ifdef SE_DEBUG 1106 struct ifnet *ifp = &sc->sc_ac.ac_if; 1107 #endif 1108 struct mbuf *m; 1109 struct se_list_data *ld = &sc->se_ldata; 1110 struct se_chain_data *cd = &sc->se_cdata; 1111 struct se_desc *desc; 1112 uint i, cnt = 0; 1113 int rc; 1114 1115 /* 1116 * If there's no way we can send any packets, return now. 1117 */ 1118 if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) { 1119 #ifdef SE_DEBUG 1120 if (ifp->if_flags & IFF_DEBUG) 1121 printf("%s: encap failed, not enough TX desc\n", 1122 ifp->if_xname); 1123 #endif 1124 return ENOBUFS; 1125 } 1126 1127 if (m_defrag(m_head, M_DONTWAIT) != 0) { 1128 #ifdef SE_DEBUG 1129 if (ifp->if_flags & IFF_DEBUG) 1130 printf("%s: m_defrag failed\n", ifp->if_xname); 1131 #endif 1132 return ENOBUFS; /* XXX should not be fatal */ 1133 } 1134 1135 /* 1136 * Start packing the mbufs in this chain into 1137 * the fragment pointers. Stop when we run out 1138 * of fragments or hit the end of the mbuf chain. 1139 */ 1140 i = *txidx; 1141 1142 for (m = m_head; m != NULL; m = m->m_next) { 1143 if (m->m_len == 0) 1144 continue; 1145 if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) { 1146 #ifdef SE_DEBUG 1147 if (ifp->if_flags & IFF_DEBUG) 1148 printf("%s: encap failed, not enough TX desc\n", 1149 ifp->if_xname); 1150 #endif 1151 return ENOBUFS; 1152 } 1153 cd->se_tx_mbuf[i] = m; 1154 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i], 1155 m, BUS_DMA_NOWAIT); 1156 if (rc != 0) 1157 return ENOBUFS; 1158 KASSERT(cd->se_tx_map[i]->dm_nsegs == 1); 1159 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 1160 cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE); 1161 1162 desc = &ld->se_tx_ring[i]; 1163 desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 1164 desc->se_ptr = 1165 htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr); 1166 desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 1167 if (i == SE_TX_RING_CNT - 1) 1168 desc->se_flags |= htole32(RING_END); 1169 desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF | 1170 TDC_CRC | TDC_PAD | TDC_BST); 1171 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 1172 i * sizeof(*desc), sizeof(*desc), 1173 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1174 1175 SE_INC(i, SE_TX_RING_CNT); 1176 cnt++; 1177 } 1178 1179 /* can't happen */ 1180 if (m != NULL) 1181 return ENOBUFS; 1182 1183 cd->se_tx_cnt += cnt; 1184 *txidx = i; 1185 1186 return 0; 1187 } 1188 1189 /* 1190 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1191 * to the mbuf data regions directly in the transmit lists. We also save a 1192 * copy of the pointers since the transmit list fragment pointers are 1193 * physical addresses. 1194 */ 1195 void 1196 se_start(struct ifnet *ifp) 1197 { 1198 struct se_softc *sc = ifp->if_softc; 1199 struct mbuf *m_head = NULL; 1200 struct se_chain_data *cd = &sc->se_cdata; 1201 uint i, queued = 0; 1202 1203 if ((sc->sc_flags & SE_FLAG_LINK) == 0 || 1204 !(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) { 1205 #ifdef SE_DEBUG 1206 if (ifp->if_flags & IFF_DEBUG) 1207 printf("%s: can't tx, flags 0x%x 0x%04x\n", 1208 ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags); 1209 #endif 1210 return; 1211 } 1212 1213 i = cd->se_tx_prod; 1214 1215 while (cd->se_tx_mbuf[i] == NULL) { 1216 m_head = ifq_deq_begin(&ifp->if_snd); 1217 if (m_head == NULL) 1218 break; 1219 1220 if (se_encap(sc, m_head, &i) != 0) { 1221 ifq_deq_rollback(&ifp->if_snd, m_head); 1222 ifq_set_oactive(&ifp->if_snd); 1223 break; 1224 } 1225 1226 /* now we are committed to transmit the packet */ 1227 ifq_deq_commit(&ifp->if_snd, m_head); 1228 queued++; 1229 1230 /* 1231 * If there's a BPF listener, bounce a copy of this frame 1232 * to him. 1233 */ 1234 #if NBPFILTER > 0 1235 if (ifp->if_bpf) 1236 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1237 #endif 1238 } 1239 1240 if (queued > 0) { 1241 /* Transmit */ 1242 cd->se_tx_prod = i; 1243 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1244 ifp->if_timer = 5; 1245 } 1246 } 1247 1248 int 1249 se_init(struct ifnet *ifp) 1250 { 1251 struct se_softc *sc = ifp->if_softc; 1252 uint16_t rxfilt; 1253 int i; 1254 1255 splassert(IPL_NET); 1256 1257 /* 1258 * Cancel pending I/O and free all RX/TX buffers. 1259 */ 1260 se_stop(sc); 1261 se_reset(sc); 1262 1263 /* Init circular RX list. */ 1264 if (se_list_rx_init(sc) == ENOBUFS) { 1265 se_stop(sc); /* XXX necessary? */ 1266 return ENOBUFS; 1267 } 1268 1269 /* Init TX descriptors. */ 1270 se_list_tx_init(sc); 1271 1272 /* 1273 * Load the address of the RX and TX lists. 1274 */ 1275 CSR_WRITE_4(sc, TX_DESC, 1276 (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr); 1277 CSR_WRITE_4(sc, RX_DESC, 1278 (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr); 1279 1280 CSR_WRITE_4(sc, TxMacControl, 0x60); 1281 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1282 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1283 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1284 SE_RX_PAD_BYTES); 1285 1286 for (i = 0; i < ETHER_ADDR_LEN; i++) 1287 CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]); 1288 /* Configure RX MAC. */ 1289 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1290 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1291 1292 /* Program promiscuous mode and multicast filters. */ 1293 se_iff(sc); 1294 1295 /* 1296 * Clear and enable interrupts. 1297 */ 1298 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1299 CSR_WRITE_4(sc, IntrMask, SE_INTRS); 1300 1301 /* Enable receiver and transmitter. */ 1302 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1303 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1304 1305 ifp->if_flags |= IFF_RUNNING; 1306 ifq_clr_oactive(&ifp->if_snd); 1307 1308 sc->sc_flags &= ~SE_FLAG_LINK; 1309 mii_mediachg(&sc->sc_mii); 1310 timeout_add_sec(&sc->sc_tick_tmo, 1); 1311 1312 return 0; 1313 } 1314 1315 /* 1316 * Set media options. 1317 */ 1318 int 1319 se_ifmedia_upd(struct ifnet *ifp) 1320 { 1321 struct se_softc *sc = ifp->if_softc; 1322 struct mii_data *mii; 1323 1324 mii = &sc->sc_mii; 1325 sc->sc_flags &= ~SE_FLAG_LINK; 1326 if (mii->mii_instance) { 1327 struct mii_softc *miisc; 1328 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1329 mii_phy_reset(miisc); 1330 } 1331 return mii_mediachg(mii); 1332 } 1333 1334 /* 1335 * Report current media status. 1336 */ 1337 void 1338 se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1339 { 1340 struct se_softc *sc = ifp->if_softc; 1341 struct mii_data *mii; 1342 1343 mii = &sc->sc_mii; 1344 mii_pollstat(mii); 1345 ifmr->ifm_active = mii->mii_media_active; 1346 ifmr->ifm_status = mii->mii_media_status; 1347 } 1348 1349 int 1350 se_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1351 { 1352 struct se_softc *sc = ifp->if_softc; 1353 struct ifreq *ifr = (struct ifreq *) data; 1354 int s, rc = 0; 1355 1356 s = splnet(); 1357 1358 switch (command) { 1359 case SIOCSIFADDR: 1360 ifp->if_flags |= IFF_UP; 1361 if ((ifp->if_flags & IFF_RUNNING) == 0) 1362 rc = se_init(ifp); 1363 break; 1364 case SIOCSIFFLAGS: 1365 if (ifp->if_flags & IFF_UP) { 1366 if (ifp->if_flags & IFF_RUNNING) 1367 rc = ENETRESET; 1368 else 1369 rc = se_init(ifp); 1370 } else { 1371 if (ifp->if_flags & IFF_RUNNING) 1372 se_stop(sc); 1373 } 1374 break; 1375 case SIOCGIFMEDIA: 1376 case SIOCSIFMEDIA: 1377 rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1378 break; 1379 default: 1380 rc = ether_ioctl(ifp, &sc->sc_ac, command, data); 1381 break; 1382 } 1383 1384 if (rc == ENETRESET) { 1385 if (ifp->if_flags & IFF_RUNNING) 1386 se_iff(sc); 1387 rc = 0; 1388 } 1389 1390 splx(s); 1391 return rc; 1392 } 1393 1394 void 1395 se_watchdog(struct ifnet *ifp) 1396 { 1397 struct se_softc *sc = ifp->if_softc; 1398 int s; 1399 1400 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1401 ifp->if_oerrors++; 1402 1403 s = splnet(); 1404 se_init(ifp); 1405 if (!ifq_empty(&ifp->if_snd)) 1406 se_start(ifp); 1407 splx(s); 1408 } 1409 1410 /* 1411 * Stop the adapter and free any mbufs allocated to the 1412 * RX and TX lists. 1413 */ 1414 void 1415 se_stop(struct se_softc *sc) 1416 { 1417 struct ifnet *ifp = &sc->sc_ac.ac_if; 1418 1419 ifp->if_timer = 0; 1420 ifp->if_flags &= ~IFF_RUNNING; 1421 ifq_clr_oactive(&ifp->if_snd); 1422 timeout_del(&sc->sc_tick_tmo); 1423 mii_down(&sc->sc_mii); 1424 1425 CSR_WRITE_4(sc, IntrMask, 0); 1426 CSR_READ_4(sc, IntrMask); 1427 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1428 /* Stop TX/RX MAC. */ 1429 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1430 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1431 /* XXX Can we assume active DMA cycles gone? */ 1432 DELAY(2000); 1433 CSR_WRITE_4(sc, IntrMask, 0); 1434 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1435 1436 sc->sc_flags &= ~SE_FLAG_LINK; 1437 se_list_rx_free(sc); 1438 se_list_tx_free(sc); 1439 } 1440