1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_dc.c,v 1.9.2.45 2003/06/08 14:31:53 mux Exp $ 33 */ 34 35 /* 36 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 37 * series chips and several workalikes including the following: 38 * 39 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 40 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 41 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 42 * ASIX Electronics AX88140A (www.asix.com.tw) 43 * ASIX Electronics AX88141 (www.asix.com.tw) 44 * ADMtek AL981 (www.admtek.com.tw) 45 * ADMtek AN985 (www.admtek.com.tw) 46 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985 47 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 48 * Accton EN1217 (www.accton.com) 49 * Xircom X3201 (www.xircom.com) 50 * Abocom FE2500 51 * Conexant LANfinity (www.conexant.com) 52 * 53 * Datasheets for the 21143 are available at developer.intel.com. 54 * Datasheets for the clone parts can be found at their respective sites. 55 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 56 * The PNIC II is essentially a Macronix 98715A chip; the only difference 57 * worth noting is that its multicast hash table is only 128 bits wide 58 * instead of 512. 59 * 60 * Written by Bill Paul <wpaul@ee.columbia.edu> 61 * Electrical Engineering Department 62 * Columbia University, New York City 63 */ 64 65 /* 66 * The Intel 21143 is the successor to the DEC 21140. It is basically 67 * the same as the 21140 but with a few new features. The 21143 supports 68 * three kinds of media attachments: 69 * 70 * o MII port, for 10Mbps and 100Mbps support and NWAY 71 * autonegotiation provided by an external PHY. 72 * o SYM port, for symbol mode 100Mbps support. 73 * o 10baseT port. 74 * o AUI/BNC port. 75 * 76 * The 100Mbps SYM port and 10baseT port can be used together in 77 * combination with the internal NWAY support to create a 10/100 78 * autosensing configuration. 79 * 80 * Note that not all tulip workalikes are handled in this driver: we only 81 * deal with those which are relatively well behaved. The Winbond is 82 * handled separately due to its different register offsets and the 83 * special handling needed for its various bugs. The PNIC is handled 84 * here, but I'm not thrilled about it. 85 * 86 * All of the workalike chips use some form of MII transceiver support 87 * with the exception of the Macronix chips, which also have a SYM port. 88 * The ASIX AX88140A is also documented to have a SYM port, but all 89 * the cards I've seen use an MII transceiver, probably because the 90 * AX88140A doesn't support internal NWAY. 91 */ 92 93 #include "opt_ifpoll.h" 94 95 #include <sys/param.h> 96 #include <sys/systm.h> 97 #include <sys/sockio.h> 98 #include <sys/mbuf.h> 99 #include <sys/malloc.h> 100 #include <sys/kernel.h> 101 #include <sys/interrupt.h> 102 #include <sys/socket.h> 103 #include <sys/sysctl.h> 104 #include <sys/bus.h> 105 #include <sys/rman.h> 106 107 #include <net/if.h> 108 #include <net/ifq_var.h> 109 #include <net/if_arp.h> 110 #include <net/ethernet.h> 111 #include <net/if_dl.h> 112 #include <net/if_media.h> 113 #include <net/if_poll.h> 114 #include <net/if_types.h> 115 #include <net/vlan/if_vlan_var.h> 116 117 #include <net/bpf.h> 118 119 #include <vm/vm.h> /* for vtophys */ 120 #include <vm/pmap.h> /* for vtophys */ 121 122 #include "../mii_layer/mii.h" 123 #include "../mii_layer/miivar.h" 124 125 #include <bus/pci/pcireg.h> 126 #include <bus/pci/pcivar.h> 127 128 #define DC_USEIOSPACE 129 130 #include "if_dcreg.h" 131 132 /* "controller miibus0" required. See GENERIC if you get errors here. */ 133 #include "miibus_if.h" 134 135 /* 136 * Various supported device vendors/types and their names. 137 */ 138 static const struct dc_type dc_devs[] = { 139 { DC_VENDORID_DEC, DC_DEVICEID_21143, 140 "Intel 21143 10/100BaseTX" }, 141 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 142 "Davicom DM9009 10/100BaseTX" }, 143 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 144 "Davicom DM9100 10/100BaseTX" }, 145 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 146 "Davicom DM9102 10/100BaseTX" }, 147 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 148 "Davicom DM9102A 10/100BaseTX" }, 149 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 150 "ADMtek AL981 10/100BaseTX" }, 151 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 152 "ADMtek AN985 10/100BaseTX" }, 153 { DC_VENDORID_ADMTEK, DC_DEVICEID_FA511, 154 "Netgear FA511 10/100BaseTX" }, 155 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511, 156 "ADMtek ADM9511 10/100BaseTX" }, 157 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513, 158 "ADMtek ADM9513 10/100BaseTX" }, 159 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 160 "ASIX AX88140A 10/100BaseTX" }, 161 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 162 "ASIX AX88141 10/100BaseTX" }, 163 { DC_VENDORID_MX, DC_DEVICEID_98713, 164 "Macronix 98713 10/100BaseTX" }, 165 { DC_VENDORID_MX, DC_DEVICEID_98713, 166 "Macronix 98713A 10/100BaseTX" }, 167 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 168 "Compex RL100-TX 10/100BaseTX" }, 169 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 170 "Compex RL100-TX 10/100BaseTX" }, 171 { DC_VENDORID_MX, DC_DEVICEID_987x5, 172 "Macronix 98715/98715A 10/100BaseTX" }, 173 { DC_VENDORID_MX, DC_DEVICEID_987x5, 174 "Macronix 98715AEC-C 10/100BaseTX" }, 175 { DC_VENDORID_MX, DC_DEVICEID_987x5, 176 "Macronix 98725 10/100BaseTX" }, 177 { DC_VENDORID_MX, DC_DEVICEID_98727, 178 "Macronix 98727/98732 10/100BaseTX" }, 179 { DC_VENDORID_LO, DC_DEVICEID_82C115, 180 "LC82C115 PNIC II 10/100BaseTX" }, 181 { DC_VENDORID_LO, DC_DEVICEID_82C168, 182 "82c168 PNIC 10/100BaseTX" }, 183 { DC_VENDORID_LO, DC_DEVICEID_82C168, 184 "82c169 PNIC 10/100BaseTX" }, 185 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 186 "Accton EN1217 10/100BaseTX" }, 187 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 188 "Accton EN2242 MiniPCI 10/100BaseTX" }, 189 { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, 190 "Xircom X3201 10/100BaseTX" }, 191 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 192 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 193 { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB, 194 "3Com OfficeConnect 10/100B" }, 195 { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, 196 "Abocom FE2500 10/100BaseTX" }, 197 { 0, 0, NULL } 198 }; 199 200 static int dc_probe (device_t); 201 static int dc_attach (device_t); 202 static int dc_detach (device_t); 203 static int dc_suspend (device_t); 204 static int dc_resume (device_t); 205 static void dc_acpi (device_t); 206 static const struct dc_type *dc_devtype (device_t); 207 static int dc_newbuf (struct dc_softc *, int, struct mbuf *); 208 static int dc_encap (struct dc_softc *, struct mbuf *, 209 u_int32_t *); 210 static void dc_pnic_rx_bug_war (struct dc_softc *, int); 211 static int dc_rx_resync (struct dc_softc *); 212 static void dc_rxeof (struct dc_softc *); 213 static void dc_txeof (struct dc_softc *); 214 static void dc_tick (void *); 215 static void dc_tx_underrun (struct dc_softc *); 216 static void dc_intr (void *); 217 static void dc_start (struct ifnet *, struct ifaltq_subque *); 218 static int dc_ioctl (struct ifnet *, u_long, caddr_t, 219 struct ucred *); 220 #ifdef IFPOLL_ENABLE 221 static void dc_npoll (struct ifnet *, struct ifpoll_info *); 222 static void dc_npoll_compat (struct ifnet *, void *, int); 223 #endif 224 static void dc_init (void *); 225 static void dc_stop (struct dc_softc *); 226 static void dc_watchdog (struct ifnet *); 227 static void dc_shutdown (device_t); 228 static int dc_ifmedia_upd (struct ifnet *); 229 static void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 230 231 static void dc_delay (struct dc_softc *); 232 static void dc_eeprom_idle (struct dc_softc *); 233 static void dc_eeprom_putbyte (struct dc_softc *, int); 234 static void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 235 static void dc_eeprom_getword_pnic 236 (struct dc_softc *, int, u_int16_t *); 237 static void dc_eeprom_getword_xircom 238 (struct dc_softc *, int, u_int16_t *); 239 static void dc_eeprom_width (struct dc_softc *); 240 static void dc_read_eeprom (struct dc_softc *, caddr_t, int, 241 int, int); 242 243 static void dc_mii_writebit (struct dc_softc *, int); 244 static int dc_mii_readbit (struct dc_softc *); 245 static void dc_mii_sync (struct dc_softc *); 246 static void dc_mii_send (struct dc_softc *, u_int32_t, int); 247 static int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 248 static int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 249 static int dc_miibus_readreg (device_t, int, int); 250 static int dc_miibus_writereg (device_t, int, int, int); 251 static void dc_miibus_statchg (device_t); 252 static void dc_miibus_mediainit (device_t); 253 254 static u_int32_t dc_crc_mask (struct dc_softc *); 255 static void dc_setcfg (struct dc_softc *, int); 256 static void dc_setfilt_21143 (struct dc_softc *); 257 static void dc_setfilt_asix (struct dc_softc *); 258 static void dc_setfilt_admtek (struct dc_softc *); 259 static void dc_setfilt_xircom (struct dc_softc *); 260 261 static void dc_setfilt (struct dc_softc *); 262 263 static void dc_reset (struct dc_softc *); 264 static int dc_list_rx_init (struct dc_softc *); 265 static int dc_list_tx_init (struct dc_softc *); 266 267 static void dc_read_srom (struct dc_softc *, int); 268 static void dc_parse_21143_srom (struct dc_softc *); 269 static void dc_decode_leaf_sia (struct dc_softc *, 270 struct dc_eblock_sia *); 271 static void dc_decode_leaf_mii (struct dc_softc *, 272 struct dc_eblock_mii *); 273 static void dc_decode_leaf_sym (struct dc_softc *, 274 struct dc_eblock_sym *); 275 static void dc_apply_fixup (struct dc_softc *, int); 276 static uint32_t dc_mchash_xircom(struct dc_softc *, const uint8_t *); 277 278 #ifdef DC_USEIOSPACE 279 #define DC_RES SYS_RES_IOPORT 280 #define DC_RID DC_PCI_CFBIO 281 #else 282 #define DC_RES SYS_RES_MEMORY 283 #define DC_RID DC_PCI_CFBMA 284 #endif 285 286 static device_method_t dc_methods[] = { 287 /* Device interface */ 288 DEVMETHOD(device_probe, dc_probe), 289 DEVMETHOD(device_attach, dc_attach), 290 DEVMETHOD(device_detach, dc_detach), 291 DEVMETHOD(device_suspend, dc_suspend), 292 DEVMETHOD(device_resume, dc_resume), 293 DEVMETHOD(device_shutdown, dc_shutdown), 294 295 /* bus interface */ 296 DEVMETHOD(bus_print_child, bus_generic_print_child), 297 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 298 299 /* MII interface */ 300 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 301 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 302 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 303 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 304 305 DEVMETHOD_END 306 }; 307 308 static driver_t dc_driver = { 309 "dc", 310 dc_methods, 311 sizeof(struct dc_softc) 312 }; 313 314 static devclass_t dc_devclass; 315 316 #ifdef __x86_64__ 317 static int dc_quick=1; 318 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, 319 &dc_quick,0,"do not mdevget in dc driver"); 320 #endif 321 322 DECLARE_DUMMY_MODULE(if_dc); 323 DRIVER_MODULE(if_dc, cardbus, dc_driver, dc_devclass, NULL, NULL); 324 DRIVER_MODULE(if_dc, pci, dc_driver, dc_devclass, NULL, NULL); 325 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, NULL, NULL); 326 327 #define DC_SETBIT(sc, reg, x) \ 328 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 329 330 #define DC_CLRBIT(sc, reg, x) \ 331 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 332 333 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 334 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 335 336 static void 337 dc_delay(struct dc_softc *sc) 338 { 339 int idx; 340 341 for (idx = (300 / 33) + 1; idx > 0; idx--) 342 CSR_READ_4(sc, DC_BUSCTL); 343 } 344 345 static void 346 dc_eeprom_width(struct dc_softc *sc) 347 { 348 int i; 349 350 /* Force EEPROM to idle state. */ 351 dc_eeprom_idle(sc); 352 353 /* Enter EEPROM access mode. */ 354 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 355 dc_delay(sc); 356 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 357 dc_delay(sc); 358 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 359 dc_delay(sc); 360 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 361 dc_delay(sc); 362 363 for (i = 3; i--;) { 364 if (6 & (1 << i)) 365 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 366 else 367 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 368 dc_delay(sc); 369 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 370 dc_delay(sc); 371 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 372 dc_delay(sc); 373 } 374 375 for (i = 1; i <= 12; i++) { 376 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 377 dc_delay(sc); 378 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 379 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 380 dc_delay(sc); 381 break; 382 } 383 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 384 dc_delay(sc); 385 } 386 387 /* Turn off EEPROM access mode. */ 388 dc_eeprom_idle(sc); 389 390 if (i < 4 || i > 12) 391 sc->dc_romwidth = 6; 392 else 393 sc->dc_romwidth = i; 394 395 /* Enter EEPROM access mode. */ 396 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 397 dc_delay(sc); 398 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 399 dc_delay(sc); 400 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 401 dc_delay(sc); 402 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 403 dc_delay(sc); 404 405 /* Turn off EEPROM access mode. */ 406 dc_eeprom_idle(sc); 407 } 408 409 static void 410 dc_eeprom_idle(struct dc_softc *sc) 411 { 412 int i; 413 414 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 415 dc_delay(sc); 416 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 417 dc_delay(sc); 418 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 419 dc_delay(sc); 420 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 421 dc_delay(sc); 422 423 for (i = 0; i < 25; i++) { 424 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 425 dc_delay(sc); 426 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 427 dc_delay(sc); 428 } 429 430 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 431 dc_delay(sc); 432 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 433 dc_delay(sc); 434 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 435 436 return; 437 } 438 439 /* 440 * Send a read command and address to the EEPROM, check for ACK. 441 */ 442 static void 443 dc_eeprom_putbyte(struct dc_softc *sc, int addr) 444 { 445 int d, i; 446 447 d = DC_EECMD_READ >> 6; 448 for (i = 3; i--; ) { 449 if (d & (1 << i)) 450 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 451 else 452 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 453 dc_delay(sc); 454 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 455 dc_delay(sc); 456 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 457 dc_delay(sc); 458 } 459 460 /* 461 * Feed in each bit and strobe the clock. 462 */ 463 for (i = sc->dc_romwidth; i--;) { 464 if (addr & (1 << i)) { 465 SIO_SET(DC_SIO_EE_DATAIN); 466 } else { 467 SIO_CLR(DC_SIO_EE_DATAIN); 468 } 469 dc_delay(sc); 470 SIO_SET(DC_SIO_EE_CLK); 471 dc_delay(sc); 472 SIO_CLR(DC_SIO_EE_CLK); 473 dc_delay(sc); 474 } 475 476 return; 477 } 478 479 /* 480 * Read a word of data stored in the EEPROM at address 'addr.' 481 * The PNIC 82c168/82c169 has its own non-standard way to read 482 * the EEPROM. 483 */ 484 static void 485 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 486 { 487 int i; 488 u_int32_t r; 489 490 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 491 492 for (i = 0; i < DC_TIMEOUT; i++) { 493 DELAY(1); 494 r = CSR_READ_4(sc, DC_SIO); 495 if (!(r & DC_PN_SIOCTL_BUSY)) { 496 *dest = (u_int16_t)(r & 0xFFFF); 497 return; 498 } 499 } 500 501 return; 502 } 503 504 /* 505 * Read a word of data stored in the EEPROM at address 'addr.' 506 * The Xircom X3201 has its own non-standard way to read 507 * the EEPROM, too. 508 */ 509 static void 510 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 511 { 512 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 513 514 addr *= 2; 515 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 516 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff; 517 addr += 1; 518 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 519 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff) << 8; 520 521 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 522 } 523 524 /* 525 * Read a word of data stored in the EEPROM at address 'addr.' 526 */ 527 static void 528 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 529 { 530 int i; 531 u_int16_t word = 0; 532 533 /* Force EEPROM to idle state. */ 534 dc_eeprom_idle(sc); 535 536 /* Enter EEPROM access mode. */ 537 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 538 dc_delay(sc); 539 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 540 dc_delay(sc); 541 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 542 dc_delay(sc); 543 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 544 dc_delay(sc); 545 546 /* 547 * Send address of word we want to read. 548 */ 549 dc_eeprom_putbyte(sc, addr); 550 551 /* 552 * Start reading bits from EEPROM. 553 */ 554 for (i = 0x8000; i; i >>= 1) { 555 SIO_SET(DC_SIO_EE_CLK); 556 dc_delay(sc); 557 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 558 word |= i; 559 dc_delay(sc); 560 SIO_CLR(DC_SIO_EE_CLK); 561 dc_delay(sc); 562 } 563 564 /* Turn off EEPROM access mode. */ 565 dc_eeprom_idle(sc); 566 567 *dest = word; 568 569 return; 570 } 571 572 /* 573 * Read a sequence of words from the EEPROM. 574 */ 575 static void 576 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int swap) 577 { 578 int i; 579 u_int16_t word = 0, *ptr; 580 581 for (i = 0; i < cnt; i++) { 582 if (DC_IS_PNIC(sc)) 583 dc_eeprom_getword_pnic(sc, off + i, &word); 584 else if (DC_IS_XIRCOM(sc)) 585 dc_eeprom_getword_xircom(sc, off + i, &word); 586 else 587 dc_eeprom_getword(sc, off + i, &word); 588 ptr = (u_int16_t *)(dest + (i * 2)); 589 if (swap) 590 *ptr = ntohs(word); 591 else 592 *ptr = word; 593 } 594 595 return; 596 } 597 598 /* 599 * The following two routines are taken from the Macronix 98713 600 * Application Notes pp.19-21. 601 */ 602 /* 603 * Write a bit to the MII bus. 604 */ 605 static void 606 dc_mii_writebit(struct dc_softc *sc, int bit) 607 { 608 if (bit) 609 CSR_WRITE_4(sc, DC_SIO, 610 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 611 else 612 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 613 614 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 615 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 616 617 return; 618 } 619 620 /* 621 * Read a bit from the MII bus. 622 */ 623 static int 624 dc_mii_readbit(struct dc_softc *sc) 625 { 626 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 627 CSR_READ_4(sc, DC_SIO); 628 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 629 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 630 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 631 return(1); 632 633 return(0); 634 } 635 636 /* 637 * Sync the PHYs by setting data bit and strobing the clock 32 times. 638 */ 639 static void 640 dc_mii_sync(struct dc_softc *sc) 641 { 642 int i; 643 644 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 645 646 for (i = 0; i < 32; i++) 647 dc_mii_writebit(sc, 1); 648 649 return; 650 } 651 652 /* 653 * Clock a series of bits through the MII. 654 */ 655 static void 656 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 657 { 658 int i; 659 660 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 661 dc_mii_writebit(sc, bits & i); 662 } 663 664 /* 665 * Read an PHY register through the MII. 666 */ 667 static int 668 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 669 { 670 int ack, i; 671 672 /* 673 * Set up frame for RX. 674 */ 675 frame->mii_stdelim = DC_MII_STARTDELIM; 676 frame->mii_opcode = DC_MII_READOP; 677 frame->mii_turnaround = 0; 678 frame->mii_data = 0; 679 680 /* 681 * Sync the PHYs. 682 */ 683 dc_mii_sync(sc); 684 685 /* 686 * Send command/address info. 687 */ 688 dc_mii_send(sc, frame->mii_stdelim, 2); 689 dc_mii_send(sc, frame->mii_opcode, 2); 690 dc_mii_send(sc, frame->mii_phyaddr, 5); 691 dc_mii_send(sc, frame->mii_regaddr, 5); 692 693 #ifdef notdef 694 /* Idle bit */ 695 dc_mii_writebit(sc, 1); 696 dc_mii_writebit(sc, 0); 697 #endif 698 699 /* Check for ack */ 700 ack = dc_mii_readbit(sc); 701 702 /* 703 * Now try reading data bits. If the ack failed, we still 704 * need to clock through 16 cycles to keep the PHY(s) in sync. 705 */ 706 if (ack) { 707 for(i = 0; i < 16; i++) { 708 dc_mii_readbit(sc); 709 } 710 goto fail; 711 } 712 713 for (i = 0x8000; i; i >>= 1) { 714 if (!ack) { 715 if (dc_mii_readbit(sc)) 716 frame->mii_data |= i; 717 } 718 } 719 720 fail: 721 722 dc_mii_writebit(sc, 0); 723 dc_mii_writebit(sc, 0); 724 725 if (ack) 726 return(1); 727 return(0); 728 } 729 730 /* 731 * Write to a PHY register through the MII. 732 */ 733 static int 734 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 735 { 736 /* 737 * Set up frame for TX. 738 */ 739 740 frame->mii_stdelim = DC_MII_STARTDELIM; 741 frame->mii_opcode = DC_MII_WRITEOP; 742 frame->mii_turnaround = DC_MII_TURNAROUND; 743 744 /* 745 * Sync the PHYs. 746 */ 747 dc_mii_sync(sc); 748 749 dc_mii_send(sc, frame->mii_stdelim, 2); 750 dc_mii_send(sc, frame->mii_opcode, 2); 751 dc_mii_send(sc, frame->mii_phyaddr, 5); 752 dc_mii_send(sc, frame->mii_regaddr, 5); 753 dc_mii_send(sc, frame->mii_turnaround, 2); 754 dc_mii_send(sc, frame->mii_data, 16); 755 756 /* Idle bit. */ 757 dc_mii_writebit(sc, 0); 758 dc_mii_writebit(sc, 0); 759 760 return(0); 761 } 762 763 static int 764 dc_miibus_readreg(device_t dev, int phy, int reg) 765 { 766 struct dc_mii_frame frame; 767 struct dc_softc *sc; 768 int i, rval, phy_reg = 0; 769 770 sc = device_get_softc(dev); 771 bzero((char *)&frame, sizeof(frame)); 772 773 /* 774 * Note: both the AL981 and AN985 have internal PHYs, 775 * however the AL981 provides direct access to the PHY 776 * registers while the AN985 uses a serial MII interface. 777 * The AN985's MII interface is also buggy in that you 778 * can read from any MII address (0 to 31), but only address 1 779 * behaves normally. To deal with both cases, we pretend 780 * that the PHY is at MII address 1. 781 */ 782 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 783 return(0); 784 785 /* 786 * Note: the ukphy probes of the RS7112 report a PHY at 787 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 788 * so we only respond to correct one. 789 */ 790 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 791 return(0); 792 793 if (sc->dc_pmode != DC_PMODE_MII) { 794 if (phy == (MII_NPHY - 1)) { 795 switch(reg) { 796 case MII_BMSR: 797 /* 798 * Fake something to make the probe 799 * code think there's a PHY here. 800 */ 801 return(BMSR_MEDIAMASK); 802 break; 803 case MII_PHYIDR1: 804 if (DC_IS_PNIC(sc)) 805 return(DC_VENDORID_LO); 806 return(DC_VENDORID_DEC); 807 break; 808 case MII_PHYIDR2: 809 if (DC_IS_PNIC(sc)) 810 return(DC_DEVICEID_82C168); 811 return(DC_DEVICEID_21143); 812 break; 813 default: 814 return(0); 815 break; 816 } 817 } else 818 return(0); 819 } 820 821 if (DC_IS_PNIC(sc)) { 822 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 823 (phy << 23) | (reg << 18)); 824 for (i = 0; i < DC_TIMEOUT; i++) { 825 DELAY(1); 826 rval = CSR_READ_4(sc, DC_PN_MII); 827 if (!(rval & DC_PN_MII_BUSY)) { 828 rval &= 0xFFFF; 829 return(rval == 0xFFFF ? 0 : rval); 830 } 831 } 832 return(0); 833 } 834 835 if (DC_IS_COMET(sc)) { 836 switch(reg) { 837 case MII_BMCR: 838 phy_reg = DC_AL_BMCR; 839 break; 840 case MII_BMSR: 841 phy_reg = DC_AL_BMSR; 842 break; 843 case MII_PHYIDR1: 844 phy_reg = DC_AL_VENID; 845 break; 846 case MII_PHYIDR2: 847 phy_reg = DC_AL_DEVID; 848 break; 849 case MII_ANAR: 850 phy_reg = DC_AL_ANAR; 851 break; 852 case MII_ANLPAR: 853 phy_reg = DC_AL_LPAR; 854 break; 855 case MII_ANER: 856 phy_reg = DC_AL_ANER; 857 break; 858 default: 859 if_printf(&sc->arpcom.ac_if, 860 "phy_read: bad phy register %x\n", reg); 861 return(0); 862 break; 863 } 864 865 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 866 867 if (rval == 0xFFFF) 868 return(0); 869 return(rval); 870 } 871 872 frame.mii_phyaddr = phy; 873 frame.mii_regaddr = reg; 874 if (sc->dc_type == DC_TYPE_98713) { 875 phy_reg = CSR_READ_4(sc, DC_NETCFG); 876 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 877 } 878 dc_mii_readreg(sc, &frame); 879 if (sc->dc_type == DC_TYPE_98713) 880 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 881 882 return(frame.mii_data); 883 } 884 885 static int 886 dc_miibus_writereg(device_t dev, int phy, int reg, int data) 887 { 888 struct dc_softc *sc; 889 struct dc_mii_frame frame; 890 int i, phy_reg = 0; 891 892 sc = device_get_softc(dev); 893 bzero((char *)&frame, sizeof(frame)); 894 895 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 896 return(0); 897 898 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 899 return(0); 900 901 if (DC_IS_PNIC(sc)) { 902 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 903 (phy << 23) | (reg << 10) | data); 904 for (i = 0; i < DC_TIMEOUT; i++) { 905 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 906 break; 907 } 908 return(0); 909 } 910 911 if (DC_IS_COMET(sc)) { 912 switch(reg) { 913 case MII_BMCR: 914 phy_reg = DC_AL_BMCR; 915 break; 916 case MII_BMSR: 917 phy_reg = DC_AL_BMSR; 918 break; 919 case MII_PHYIDR1: 920 phy_reg = DC_AL_VENID; 921 break; 922 case MII_PHYIDR2: 923 phy_reg = DC_AL_DEVID; 924 break; 925 case MII_ANAR: 926 phy_reg = DC_AL_ANAR; 927 break; 928 case MII_ANLPAR: 929 phy_reg = DC_AL_LPAR; 930 break; 931 case MII_ANER: 932 phy_reg = DC_AL_ANER; 933 break; 934 default: 935 if_printf(&sc->arpcom.ac_if, 936 "phy_write: bad phy register %x\n", reg); 937 return(0); 938 break; 939 } 940 941 CSR_WRITE_4(sc, phy_reg, data); 942 return(0); 943 } 944 945 frame.mii_phyaddr = phy; 946 frame.mii_regaddr = reg; 947 frame.mii_data = data; 948 949 if (sc->dc_type == DC_TYPE_98713) { 950 phy_reg = CSR_READ_4(sc, DC_NETCFG); 951 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 952 } 953 dc_mii_writereg(sc, &frame); 954 if (sc->dc_type == DC_TYPE_98713) 955 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 956 957 return(0); 958 } 959 960 static void 961 dc_miibus_statchg(device_t dev) 962 { 963 struct dc_softc *sc; 964 struct mii_data *mii; 965 struct ifmedia *ifm; 966 967 sc = device_get_softc(dev); 968 if (DC_IS_ADMTEK(sc)) 969 return; 970 971 mii = device_get_softc(sc->dc_miibus); 972 ifm = &mii->mii_media; 973 if (DC_IS_DAVICOM(sc) && 974 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 975 dc_setcfg(sc, ifm->ifm_media); 976 sc->dc_if_media = ifm->ifm_media; 977 } else { 978 dc_setcfg(sc, mii->mii_media_active); 979 sc->dc_if_media = mii->mii_media_active; 980 } 981 982 return; 983 } 984 985 /* 986 * Special support for DM9102A cards with HomePNA PHYs. Note: 987 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 988 * to be impossible to talk to the management interface of the DM9801 989 * PHY (its MDIO pin is not connected to anything). Consequently, 990 * the driver has to just 'know' about the additional mode and deal 991 * with it itself. *sigh* 992 */ 993 static void 994 dc_miibus_mediainit(device_t dev) 995 { 996 struct dc_softc *sc; 997 struct mii_data *mii; 998 struct ifmedia *ifm; 999 int rev; 1000 1001 rev = pci_get_revid(dev); 1002 1003 sc = device_get_softc(dev); 1004 mii = device_get_softc(sc->dc_miibus); 1005 ifm = &mii->mii_media; 1006 1007 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 1008 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); 1009 1010 return; 1011 } 1012 1013 #define DC_BITS_512 9 1014 #define DC_BITS_128 7 1015 #define DC_BITS_64 6 1016 1017 static u_int32_t 1018 dc_crc_mask(struct dc_softc *sc) 1019 { 1020 /* 1021 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1022 * chips is only 128 bits wide. 1023 */ 1024 if (sc->dc_flags & DC_128BIT_HASH) 1025 return ((1 << DC_BITS_128) - 1); 1026 1027 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1028 if (sc->dc_flags & DC_64BIT_HASH) 1029 return ((1 << DC_BITS_64) - 1); 1030 1031 return ((1 << DC_BITS_512) - 1); 1032 } 1033 1034 /* 1035 * 21143-style RX filter setup routine. Filter programming is done by 1036 * downloading a special setup frame into the TX engine. 21143, Macronix, 1037 * PNIC, PNIC II and Davicom chips are programmed this way. 1038 * 1039 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1040 * address (our node address) and a 512-bit hash filter for multicast 1041 * frames. We also sneak the broadcast address into the hash filter since 1042 * we need that too. 1043 */ 1044 static void 1045 dc_setfilt_21143(struct dc_softc *sc) 1046 { 1047 struct dc_desc *sframe; 1048 u_int32_t h, crc_mask, *sp; 1049 struct ifmultiaddr *ifma; 1050 struct ifnet *ifp; 1051 int i; 1052 1053 ifp = &sc->arpcom.ac_if; 1054 1055 i = sc->dc_cdata.dc_tx_prod; 1056 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1057 sc->dc_cdata.dc_tx_cnt++; 1058 sframe = &sc->dc_ldata->dc_tx_list[i]; 1059 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1060 bzero((char *)sp, DC_SFRAME_LEN); 1061 1062 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1063 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1064 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1065 1066 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1067 1068 /* If we want promiscuous mode, set the allframes bit. */ 1069 if (ifp->if_flags & IFF_PROMISC) 1070 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1071 else 1072 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1073 1074 if (ifp->if_flags & IFF_ALLMULTI) 1075 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1076 else 1077 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1078 1079 crc_mask = dc_crc_mask(sc); 1080 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1081 if (ifma->ifma_addr->sa_family != AF_LINK) 1082 continue; 1083 h = ether_crc32_le( 1084 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1085 ETHER_ADDR_LEN) & crc_mask; 1086 sp[h >> 4] |= 1 << (h & 0xF); 1087 } 1088 1089 if (ifp->if_flags & IFF_BROADCAST) { 1090 h = ether_crc32_le(ifp->if_broadcastaddr, 1091 ETHER_ADDR_LEN) & crc_mask; 1092 sp[h >> 4] |= 1 << (h & 0xF); 1093 } 1094 1095 /* Set our MAC address */ 1096 sp[39] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1097 sp[40] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1098 sp[41] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1099 1100 sframe->dc_status = DC_TXSTAT_OWN; 1101 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1102 1103 /* 1104 * The PNIC takes an exceedingly long time to process its 1105 * setup frame; wait 10ms after posting the setup frame 1106 * before proceeding, just so it has time to swallow its 1107 * medicine. 1108 */ 1109 DELAY(10000); 1110 1111 ifp->if_timer = 5; 1112 1113 return; 1114 } 1115 1116 static void 1117 dc_setfilt_admtek(struct dc_softc *sc) 1118 { 1119 struct ifnet *ifp; 1120 int h = 0; 1121 u_int32_t crc_mask; 1122 u_int32_t hashes[2] = { 0, 0 }; 1123 struct ifmultiaddr *ifma; 1124 1125 ifp = &sc->arpcom.ac_if; 1126 1127 /* Init our MAC address */ 1128 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1129 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1130 1131 /* If we want promiscuous mode, set the allframes bit. */ 1132 if (ifp->if_flags & IFF_PROMISC) 1133 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1134 else 1135 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1136 1137 if (ifp->if_flags & IFF_ALLMULTI) 1138 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1139 else 1140 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1141 1142 /* first, zot all the existing hash bits */ 1143 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1144 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1145 1146 /* 1147 * If we're already in promisc or allmulti mode, we 1148 * don't have to bother programming the multicast filter. 1149 */ 1150 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1151 return; 1152 1153 /* now program new ones */ 1154 if (DC_IS_CENTAUR(sc)) 1155 crc_mask = dc_crc_mask(sc); 1156 else 1157 crc_mask = 0x3f; 1158 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1159 if (ifma->ifma_addr->sa_family != AF_LINK) 1160 continue; 1161 if (DC_IS_CENTAUR(sc)) { 1162 h = ether_crc32_le( 1163 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1164 ETHER_ADDR_LEN) & crc_mask; 1165 } else { 1166 h = ether_crc32_be( 1167 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1168 ETHER_ADDR_LEN); 1169 h = (h >> 26) & crc_mask; 1170 } 1171 if (h < 32) 1172 hashes[0] |= (1 << h); 1173 else 1174 hashes[1] |= (1 << (h - 32)); 1175 } 1176 1177 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1178 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1179 1180 return; 1181 } 1182 1183 static void 1184 dc_setfilt_asix(struct dc_softc *sc) 1185 { 1186 struct ifnet *ifp; 1187 int h = 0; 1188 u_int32_t hashes[2] = { 0, 0 }; 1189 struct ifmultiaddr *ifma; 1190 1191 ifp = &sc->arpcom.ac_if; 1192 1193 /* Init our MAC address */ 1194 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1195 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1196 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1197 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1198 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1199 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1200 1201 /* If we want promiscuous mode, set the allframes bit. */ 1202 if (ifp->if_flags & IFF_PROMISC) 1203 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1204 else 1205 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1206 1207 if (ifp->if_flags & IFF_ALLMULTI) 1208 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1209 else 1210 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1211 1212 /* 1213 * The ASIX chip has a special bit to enable reception 1214 * of broadcast frames. 1215 */ 1216 if (ifp->if_flags & IFF_BROADCAST) 1217 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1218 else 1219 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1220 1221 /* first, zot all the existing hash bits */ 1222 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1223 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1224 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1225 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1226 1227 /* 1228 * If we're already in promisc or allmulti mode, we 1229 * don't have to bother programming the multicast filter. 1230 */ 1231 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1232 return; 1233 1234 /* now program new ones */ 1235 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1236 if (ifma->ifma_addr->sa_family != AF_LINK) 1237 continue; 1238 h = ether_crc32_be( 1239 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1240 ETHER_ADDR_LEN); 1241 h = (h >> 26) & 0x3f; 1242 if (h < 32) 1243 hashes[0] |= (1 << h); 1244 else 1245 hashes[1] |= (1 << (h - 32)); 1246 } 1247 1248 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1249 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1250 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1251 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1252 1253 return; 1254 } 1255 1256 static void 1257 dc_setfilt_xircom(struct dc_softc *sc) 1258 { 1259 struct dc_desc *sframe; 1260 u_int32_t h, *sp; 1261 struct ifmultiaddr *ifma; 1262 struct ifnet *ifp; 1263 int i; 1264 1265 ifp = &sc->arpcom.ac_if; 1266 KASSERT(ifp->if_flags & IFF_RUNNING, 1267 ("%s is not running yet", ifp->if_xname)); 1268 1269 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1270 1271 i = sc->dc_cdata.dc_tx_prod; 1272 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1273 sc->dc_cdata.dc_tx_cnt++; 1274 sframe = &sc->dc_ldata->dc_tx_list[i]; 1275 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1276 bzero(sp, DC_SFRAME_LEN); 1277 1278 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1279 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1280 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1281 1282 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1283 1284 /* If we want promiscuous mode, set the allframes bit. */ 1285 if (ifp->if_flags & IFF_PROMISC) 1286 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1287 else 1288 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1289 1290 if (ifp->if_flags & IFF_ALLMULTI) 1291 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1292 else 1293 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1294 1295 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1296 if (ifma->ifma_addr->sa_family != AF_LINK) 1297 continue; 1298 h = dc_mchash_xircom(sc, 1299 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1300 sp[h >> 4] |= 1 << (h & 0xF); 1301 } 1302 1303 if (ifp->if_flags & IFF_BROADCAST) { 1304 h = dc_mchash_xircom(sc, __DECONST(caddr_t, ðerbroadcastaddr)); 1305 sp[h >> 4] |= 1 << (h & 0xF); 1306 } 1307 1308 /* Set our MAC address */ 1309 sp[0] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1310 sp[1] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1311 sp[2] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1312 1313 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1314 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1315 sframe->dc_status = DC_TXSTAT_OWN; 1316 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1317 1318 /* 1319 * wait some time... 1320 */ 1321 DELAY(1000); 1322 1323 ifp->if_timer = 5; 1324 } 1325 1326 static void 1327 dc_setfilt(struct dc_softc *sc) 1328 { 1329 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1330 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1331 dc_setfilt_21143(sc); 1332 1333 if (DC_IS_ASIX(sc)) 1334 dc_setfilt_asix(sc); 1335 1336 if (DC_IS_ADMTEK(sc)) 1337 dc_setfilt_admtek(sc); 1338 1339 if (DC_IS_XIRCOM(sc)) 1340 dc_setfilt_xircom(sc); 1341 } 1342 1343 /* 1344 * In order to fiddle with the 1345 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1346 * first have to put the transmit and/or receive logic in the idle state. 1347 */ 1348 static void 1349 dc_setcfg(struct dc_softc *sc, int media) 1350 { 1351 int i, restart = 0; 1352 u_int32_t isr; 1353 1354 if (IFM_SUBTYPE(media) == IFM_NONE) 1355 return; 1356 1357 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1358 restart = 1; 1359 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1360 1361 for (i = 0; i < DC_TIMEOUT; i++) { 1362 isr = CSR_READ_4(sc, DC_ISR); 1363 if ((isr & DC_ISR_TX_IDLE) && 1364 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1365 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1366 break; 1367 DELAY(10); 1368 } 1369 1370 if (i == DC_TIMEOUT) { 1371 if_printf(&sc->arpcom.ac_if, 1372 "failed to force tx and rx to idle state\n"); 1373 } 1374 } 1375 1376 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1377 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1378 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1379 if (sc->dc_pmode == DC_PMODE_MII) { 1380 int watchdogreg; 1381 1382 if (DC_IS_INTEL(sc)) { 1383 /* there's a write enable bit here that reads as 1 */ 1384 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1385 watchdogreg &= ~DC_WDOG_CTLWREN; 1386 watchdogreg |= DC_WDOG_JABBERDIS; 1387 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1388 } else { 1389 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1390 } 1391 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1392 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1393 if (sc->dc_type == DC_TYPE_98713) 1394 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1395 DC_NETCFG_SCRAMBLER)); 1396 if (!DC_IS_DAVICOM(sc)) 1397 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1398 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1399 if (DC_IS_INTEL(sc)) 1400 dc_apply_fixup(sc, IFM_AUTO); 1401 } else { 1402 if (DC_IS_PNIC(sc)) { 1403 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1404 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1405 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1406 } 1407 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1408 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1409 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1410 if (DC_IS_INTEL(sc)) 1411 dc_apply_fixup(sc, 1412 (media & IFM_GMASK) == IFM_FDX ? 1413 IFM_100_TX|IFM_FDX : IFM_100_TX); 1414 } 1415 } 1416 1417 if (IFM_SUBTYPE(media) == IFM_10_T) { 1418 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1419 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1420 if (sc->dc_pmode == DC_PMODE_MII) { 1421 int watchdogreg; 1422 1423 /* there's a write enable bit here that reads as 1 */ 1424 if (DC_IS_INTEL(sc)) { 1425 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1426 watchdogreg &= ~DC_WDOG_CTLWREN; 1427 watchdogreg |= DC_WDOG_JABBERDIS; 1428 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1429 } else { 1430 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1431 } 1432 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1433 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1434 if (sc->dc_type == DC_TYPE_98713) 1435 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1436 if (!DC_IS_DAVICOM(sc)) 1437 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1438 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1439 if (DC_IS_INTEL(sc)) 1440 dc_apply_fixup(sc, IFM_AUTO); 1441 } else { 1442 if (DC_IS_PNIC(sc)) { 1443 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1444 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1445 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1446 } 1447 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1448 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1449 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1450 if (DC_IS_INTEL(sc)) { 1451 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1452 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1453 if ((media & IFM_GMASK) == IFM_FDX) 1454 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1455 else 1456 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1457 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1458 DC_CLRBIT(sc, DC_10BTCTRL, 1459 DC_TCTL_AUTONEGENBL); 1460 dc_apply_fixup(sc, 1461 (media & IFM_GMASK) == IFM_FDX ? 1462 IFM_10_T|IFM_FDX : IFM_10_T); 1463 DELAY(20000); 1464 } 1465 } 1466 } 1467 1468 /* 1469 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1470 * PHY and we want HomePNA mode, set the portsel bit to turn 1471 * on the external MII port. 1472 */ 1473 if (DC_IS_DAVICOM(sc)) { 1474 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1475 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1476 sc->dc_link = 1; 1477 } else { 1478 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1479 } 1480 } 1481 1482 if ((media & IFM_GMASK) == IFM_FDX) { 1483 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1484 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1485 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1486 } else { 1487 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1488 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1489 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1490 } 1491 1492 if (restart) 1493 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1494 1495 return; 1496 } 1497 1498 static void 1499 dc_reset(struct dc_softc *sc) 1500 { 1501 int i; 1502 1503 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1504 1505 for (i = 0; i < DC_TIMEOUT; i++) { 1506 DELAY(10); 1507 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1508 break; 1509 } 1510 1511 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) || 1512 DC_IS_CONEXANT(sc)) { 1513 DELAY(10000); 1514 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1515 i = 0; 1516 } 1517 1518 if (i == DC_TIMEOUT) 1519 if_printf(&sc->arpcom.ac_if, "reset never completed!\n"); 1520 1521 /* Wait a little while for the chip to get its brains in order. */ 1522 DELAY(1000); 1523 1524 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1525 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1526 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1527 1528 /* 1529 * Bring the SIA out of reset. In some cases, it looks 1530 * like failing to unreset the SIA soon enough gets it 1531 * into a state where it will never come out of reset 1532 * until we reset the whole chip again. 1533 */ 1534 if (DC_IS_INTEL(sc)) { 1535 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1536 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1537 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1538 } 1539 1540 return; 1541 } 1542 1543 static const struct dc_type * 1544 dc_devtype(device_t dev) 1545 { 1546 const struct dc_type *t; 1547 u_int32_t rev; 1548 1549 t = dc_devs; 1550 1551 while(t->dc_name != NULL) { 1552 if ((pci_get_vendor(dev) == t->dc_vid) && 1553 (pci_get_device(dev) == t->dc_did)) { 1554 /* Check the PCI revision */ 1555 rev = pci_get_revid(dev); 1556 if (t->dc_did == DC_DEVICEID_98713 && 1557 rev >= DC_REVISION_98713A) 1558 t++; 1559 if (t->dc_did == DC_DEVICEID_98713_CP && 1560 rev >= DC_REVISION_98713A) 1561 t++; 1562 if (t->dc_did == DC_DEVICEID_987x5 && 1563 rev >= DC_REVISION_98715AEC_C) 1564 t++; 1565 if (t->dc_did == DC_DEVICEID_987x5 && 1566 rev >= DC_REVISION_98725) 1567 t++; 1568 if (t->dc_did == DC_DEVICEID_AX88140A && 1569 rev >= DC_REVISION_88141) 1570 t++; 1571 if (t->dc_did == DC_DEVICEID_82C168 && 1572 rev >= DC_REVISION_82C169) 1573 t++; 1574 if (t->dc_did == DC_DEVICEID_DM9102 && 1575 rev >= DC_REVISION_DM9102A) 1576 t++; 1577 return(t); 1578 } 1579 t++; 1580 } 1581 1582 return(NULL); 1583 } 1584 1585 /* 1586 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1587 * IDs against our list and return a device name if we find a match. 1588 * We do a little bit of extra work to identify the exact type of 1589 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1590 * but different revision IDs. The same is true for 98715/98715A 1591 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1592 * cases, the exact chip revision affects driver behavior. 1593 */ 1594 static int 1595 dc_probe(device_t dev) 1596 { 1597 const struct dc_type *t; 1598 1599 t = dc_devtype(dev); 1600 if (t != NULL) { 1601 struct dc_softc *sc = device_get_softc(dev); 1602 1603 /* Need this info to decide on a chip type. */ 1604 sc->dc_info = t; 1605 device_set_desc(dev, t->dc_name); 1606 return(0); 1607 } 1608 1609 return(ENXIO); 1610 } 1611 1612 static void 1613 dc_acpi(device_t dev) 1614 { 1615 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1616 uint32_t iobase, membase, irq; 1617 struct dc_softc *sc; 1618 1619 /* Save important PCI config data. */ 1620 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1621 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1622 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1623 1624 sc = device_get_softc(dev); 1625 /* Reset the power state. */ 1626 if_printf(&sc->arpcom.ac_if, 1627 "chip is in D%d power mode " 1628 "-- setting to D0\n", pci_get_powerstate(dev)); 1629 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1630 1631 /* Restore PCI config data. */ 1632 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1633 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1634 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1635 } 1636 } 1637 1638 static void 1639 dc_apply_fixup(struct dc_softc *sc, int media) 1640 { 1641 struct dc_mediainfo *m; 1642 u_int8_t *p; 1643 int i; 1644 u_int32_t reg; 1645 1646 m = sc->dc_mi; 1647 1648 while (m != NULL) { 1649 if (m->dc_media == media) 1650 break; 1651 m = m->dc_next; 1652 } 1653 1654 if (m == NULL) 1655 return; 1656 1657 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1658 reg = (p[0] | (p[1] << 8)) << 16; 1659 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1660 } 1661 1662 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1663 reg = (p[0] | (p[1] << 8)) << 16; 1664 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1665 } 1666 1667 return; 1668 } 1669 1670 static void 1671 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1672 { 1673 struct dc_mediainfo *m; 1674 1675 m = kmalloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1676 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT){ 1677 case DC_SIA_CODE_10BT: 1678 m->dc_media = IFM_10_T; 1679 break; 1680 1681 case DC_SIA_CODE_10BT_FDX: 1682 m->dc_media = IFM_10_T|IFM_FDX; 1683 break; 1684 1685 case DC_SIA_CODE_10B2: 1686 m->dc_media = IFM_10_2; 1687 break; 1688 1689 case DC_SIA_CODE_10B5: 1690 m->dc_media = IFM_10_5; 1691 break; 1692 } 1693 if (l->dc_sia_code & DC_SIA_CODE_EXT){ 1694 m->dc_gp_len = 2; 1695 m->dc_gp_ptr = 1696 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; 1697 } else { 1698 m->dc_gp_len = 2; 1699 m->dc_gp_ptr = 1700 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; 1701 } 1702 1703 m->dc_next = sc->dc_mi; 1704 sc->dc_mi = m; 1705 1706 sc->dc_pmode = DC_PMODE_SIA; 1707 1708 return; 1709 } 1710 1711 static void 1712 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1713 { 1714 struct dc_mediainfo *m; 1715 1716 m = kmalloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1717 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1718 m->dc_media = IFM_100_TX; 1719 1720 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1721 m->dc_media = IFM_100_TX|IFM_FDX; 1722 1723 m->dc_gp_len = 2; 1724 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1725 1726 m->dc_next = sc->dc_mi; 1727 sc->dc_mi = m; 1728 1729 sc->dc_pmode = DC_PMODE_SYM; 1730 1731 return; 1732 } 1733 1734 static void 1735 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1736 { 1737 u_int8_t *p; 1738 struct dc_mediainfo *m; 1739 1740 m = kmalloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1741 /* We abuse IFM_AUTO to represent MII. */ 1742 m->dc_media = IFM_AUTO; 1743 m->dc_gp_len = l->dc_gpr_len; 1744 1745 p = (u_int8_t *)l; 1746 p += sizeof(struct dc_eblock_mii); 1747 m->dc_gp_ptr = p; 1748 p += 2 * l->dc_gpr_len; 1749 m->dc_reset_len = *p; 1750 p++; 1751 m->dc_reset_ptr = p; 1752 1753 m->dc_next = sc->dc_mi; 1754 sc->dc_mi = m; 1755 1756 return; 1757 } 1758 1759 static void 1760 dc_read_srom(struct dc_softc *sc, int bits) 1761 { 1762 int size; 1763 1764 size = 2 << bits; 1765 sc->dc_srom = kmalloc(size, M_DEVBUF, M_INTWAIT); 1766 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1767 } 1768 1769 static void 1770 dc_parse_21143_srom(struct dc_softc *sc) 1771 { 1772 struct dc_leaf_hdr *lhdr; 1773 struct dc_eblock_hdr *hdr; 1774 int i, loff; 1775 char *ptr; 1776 int have_mii; 1777 1778 have_mii = 0; 1779 loff = sc->dc_srom[27]; 1780 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1781 1782 ptr = (char *)lhdr; 1783 ptr += sizeof(struct dc_leaf_hdr) - 1; 1784 /* 1785 * Look if we got a MII media block. 1786 */ 1787 for (i = 0; i < lhdr->dc_mcnt; i++) { 1788 hdr = (struct dc_eblock_hdr *)ptr; 1789 if (hdr->dc_type == DC_EBLOCK_MII) 1790 have_mii++; 1791 1792 ptr += (hdr->dc_len & 0x7F); 1793 ptr++; 1794 } 1795 1796 /* 1797 * Do the same thing again. Only use SIA and SYM media 1798 * blocks if no MII media block is available. 1799 */ 1800 ptr = (char *)lhdr; 1801 ptr += sizeof(struct dc_leaf_hdr) - 1; 1802 for (i = 0; i < lhdr->dc_mcnt; i++) { 1803 hdr = (struct dc_eblock_hdr *)ptr; 1804 switch(hdr->dc_type) { 1805 case DC_EBLOCK_MII: 1806 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1807 break; 1808 case DC_EBLOCK_SIA: 1809 if (! have_mii) 1810 dc_decode_leaf_sia(sc, 1811 (struct dc_eblock_sia *)hdr); 1812 break; 1813 case DC_EBLOCK_SYM: 1814 if (! have_mii) 1815 dc_decode_leaf_sym(sc, 1816 (struct dc_eblock_sym *)hdr); 1817 break; 1818 default: 1819 /* Don't care. Yet. */ 1820 break; 1821 } 1822 ptr += (hdr->dc_len & 0x7F); 1823 ptr++; 1824 } 1825 1826 return; 1827 } 1828 1829 /* 1830 * Attach the interface. Allocate softc structures, do ifmedia 1831 * setup and ethernet/BPF attach. 1832 */ 1833 static int 1834 dc_attach(device_t dev) 1835 { 1836 int tmp = 0; 1837 u_char eaddr[ETHER_ADDR_LEN]; 1838 u_int32_t command; 1839 struct dc_softc *sc; 1840 struct ifnet *ifp; 1841 u_int32_t revision; 1842 int error = 0, rid, mac_offset; 1843 uint8_t *mac; 1844 1845 sc = device_get_softc(dev); 1846 callout_init(&sc->dc_stat_timer); 1847 1848 ifp = &sc->arpcom.ac_if; 1849 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1850 1851 /* 1852 * Handle power management nonsense. 1853 */ 1854 dc_acpi(dev); 1855 1856 /* 1857 * Map control/status registers. 1858 */ 1859 pci_enable_busmaster(dev); 1860 1861 rid = DC_RID; 1862 sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); 1863 1864 if (sc->dc_res == NULL) { 1865 device_printf(dev, "couldn't map ports/memory\n"); 1866 error = ENXIO; 1867 goto fail; 1868 } 1869 1870 sc->dc_btag = rman_get_bustag(sc->dc_res); 1871 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1872 1873 /* Allocate interrupt */ 1874 rid = 0; 1875 sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1876 RF_SHAREABLE | RF_ACTIVE); 1877 1878 if (sc->dc_irq == NULL) { 1879 device_printf(dev, "couldn't map interrupt\n"); 1880 error = ENXIO; 1881 goto fail; 1882 } 1883 1884 revision = pci_get_revid(dev); 1885 1886 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ 1887 if (sc->dc_info->dc_did != DC_DEVICEID_82C168 && 1888 sc->dc_info->dc_did != DC_DEVICEID_X3201) 1889 dc_eeprom_width(sc); 1890 1891 switch(sc->dc_info->dc_did) { 1892 case DC_DEVICEID_21143: 1893 sc->dc_type = DC_TYPE_21143; 1894 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1895 sc->dc_flags |= DC_REDUCED_MII_POLL; 1896 /* Save EEPROM contents so we can parse them later. */ 1897 dc_read_srom(sc, sc->dc_romwidth); 1898 break; 1899 case DC_DEVICEID_DM9009: 1900 case DC_DEVICEID_DM9100: 1901 case DC_DEVICEID_DM9102: 1902 sc->dc_type = DC_TYPE_DM9102; 1903 sc->dc_flags |= DC_TX_COALESCE|DC_TX_INTR_ALWAYS; 1904 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_TX_STORENFWD; 1905 sc->dc_flags |= DC_TX_ALIGN; 1906 sc->dc_pmode = DC_PMODE_MII; 1907 /* Increase the latency timer value. */ 1908 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1909 command &= 0xFFFF00FF; 1910 command |= 0x00008000; 1911 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1912 break; 1913 case DC_DEVICEID_AL981: 1914 sc->dc_type = DC_TYPE_AL981; 1915 sc->dc_flags |= DC_TX_USE_TX_INTR; 1916 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1917 sc->dc_pmode = DC_PMODE_MII; 1918 dc_read_srom(sc, sc->dc_romwidth); 1919 break; 1920 case DC_DEVICEID_AN985: 1921 case DC_DEVICEID_FE2500: 1922 case DC_DEVICEID_ADM9511: 1923 case DC_DEVICEID_ADM9513: 1924 case DC_DEVICEID_FA511: 1925 case DC_DEVICEID_EN2242: 1926 case DC_DEVICEID_3CSOHOB: 1927 sc->dc_type = DC_TYPE_AN985; 1928 sc->dc_flags |= DC_64BIT_HASH; 1929 sc->dc_flags |= DC_TX_USE_TX_INTR; 1930 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1931 sc->dc_pmode = DC_PMODE_MII; 1932 break; 1933 case DC_DEVICEID_98713: 1934 case DC_DEVICEID_98713_CP: 1935 if (revision < DC_REVISION_98713A) { 1936 sc->dc_type = DC_TYPE_98713; 1937 } 1938 if (revision >= DC_REVISION_98713A) { 1939 sc->dc_type = DC_TYPE_98713A; 1940 sc->dc_flags |= DC_21143_NWAY; 1941 } 1942 sc->dc_flags |= DC_REDUCED_MII_POLL; 1943 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1944 break; 1945 case DC_DEVICEID_987x5: 1946 case DC_DEVICEID_EN1217: 1947 /* 1948 * Macronix MX98715AEC-C/D/E parts have only a 1949 * 128-bit hash table. We need to deal with these 1950 * in the same manner as the PNIC II so that we 1951 * get the right number of bits out of the 1952 * CRC routine. 1953 */ 1954 if (revision >= DC_REVISION_98715AEC_C && 1955 revision < DC_REVISION_98725) 1956 sc->dc_flags |= DC_128BIT_HASH; 1957 sc->dc_type = DC_TYPE_987x5; 1958 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1959 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1960 break; 1961 case DC_DEVICEID_98727: 1962 sc->dc_type = DC_TYPE_987x5; 1963 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1964 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1965 break; 1966 case DC_DEVICEID_82C115: 1967 sc->dc_type = DC_TYPE_PNICII; 1968 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR|DC_128BIT_HASH; 1969 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1970 break; 1971 case DC_DEVICEID_82C168: 1972 sc->dc_type = DC_TYPE_PNIC; 1973 sc->dc_flags |= DC_TX_STORENFWD|DC_TX_INTR_ALWAYS; 1974 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 1975 sc->dc_pnic_rx_buf = kmalloc(DC_RXLEN * 5, M_DEVBUF, M_WAITOK); 1976 if (revision < DC_REVISION_82C169) 1977 sc->dc_pmode = DC_PMODE_SYM; 1978 break; 1979 case DC_DEVICEID_AX88140A: 1980 sc->dc_type = DC_TYPE_ASIX; 1981 sc->dc_flags |= DC_TX_USE_TX_INTR|DC_TX_INTR_FIRSTFRAG; 1982 sc->dc_flags |= DC_REDUCED_MII_POLL; 1983 sc->dc_pmode = DC_PMODE_MII; 1984 break; 1985 case DC_DEVICEID_RS7112: 1986 sc->dc_type = DC_TYPE_CONEXANT; 1987 sc->dc_flags |= DC_TX_INTR_ALWAYS; 1988 sc->dc_flags |= DC_REDUCED_MII_POLL; 1989 sc->dc_pmode = DC_PMODE_MII; 1990 dc_read_srom(sc, sc->dc_romwidth); 1991 break; 1992 case DC_DEVICEID_X3201: 1993 sc->dc_type = DC_TYPE_XIRCOM; 1994 sc->dc_flags |= (DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 1995 DC_TX_ALIGN); 1996 /* 1997 * We don't actually need to coalesce, but we're doing 1998 * it to obtain a double word aligned buffer. 1999 * The DC_TX_COALESCE flag is required. 2000 */ 2001 sc->dc_pmode = DC_PMODE_MII; 2002 break; 2003 default: 2004 device_printf(dev, "unknown device: %x\n", sc->dc_info->dc_did); 2005 break; 2006 } 2007 2008 /* Save the cache line size. */ 2009 if (DC_IS_DAVICOM(sc)) 2010 sc->dc_cachesize = 0; 2011 else 2012 sc->dc_cachesize = pci_read_config(dev, 2013 DC_PCI_CFLT, 4) & 0xFF; 2014 2015 /* Reset the adapter. */ 2016 dc_reset(sc); 2017 2018 /* Take 21143 out of snooze mode */ 2019 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 2020 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2021 command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); 2022 pci_write_config(dev, DC_PCI_CFDD, command, 4); 2023 } 2024 2025 /* 2026 * Try to learn something about the supported media. 2027 * We know that ASIX and ADMtek and Davicom devices 2028 * will *always* be using MII media, so that's a no-brainer. 2029 * The tricky ones are the Macronix/PNIC II and the 2030 * Intel 21143. 2031 */ 2032 if (DC_IS_INTEL(sc)) 2033 dc_parse_21143_srom(sc); 2034 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2035 if (sc->dc_type == DC_TYPE_98713) 2036 sc->dc_pmode = DC_PMODE_MII; 2037 else 2038 sc->dc_pmode = DC_PMODE_SYM; 2039 } else if (!sc->dc_pmode) 2040 sc->dc_pmode = DC_PMODE_MII; 2041 2042 /* 2043 * Get station address from the EEPROM. 2044 */ 2045 switch(sc->dc_type) { 2046 case DC_TYPE_98713: 2047 case DC_TYPE_98713A: 2048 case DC_TYPE_987x5: 2049 case DC_TYPE_PNICII: 2050 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2051 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2052 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2053 break; 2054 case DC_TYPE_PNIC: 2055 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2056 break; 2057 case DC_TYPE_DM9102: 2058 case DC_TYPE_21143: 2059 case DC_TYPE_ASIX: 2060 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2061 break; 2062 case DC_TYPE_AL981: 2063 case DC_TYPE_AN985: 2064 *(u_int32_t *)(&eaddr[0]) = CSR_READ_4(sc,DC_AL_PAR0); 2065 *(u_int16_t *)(&eaddr[4]) = CSR_READ_4(sc,DC_AL_PAR1); 2066 break; 2067 case DC_TYPE_CONEXANT: 2068 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 6); 2069 break; 2070 case DC_TYPE_XIRCOM: 2071 /* The MAC comes from the CIS */ 2072 mac = pci_get_ether(dev); 2073 if (!mac) { 2074 device_printf(dev, "No station address in CIS!\n"); 2075 error = ENXIO; 2076 goto fail; 2077 } 2078 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2079 break; 2080 default: 2081 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2082 break; 2083 } 2084 2085 sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, 2086 M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 2087 2088 if (sc->dc_ldata == NULL) { 2089 device_printf(dev, "no memory for list buffers!\n"); 2090 error = ENXIO; 2091 goto fail; 2092 } 2093 2094 ifp->if_softc = sc; 2095 ifp->if_mtu = ETHERMTU; 2096 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2097 ifp->if_ioctl = dc_ioctl; 2098 ifp->if_start = dc_start; 2099 #ifdef IFPOLL_ENABLE 2100 ifp->if_npoll = dc_npoll; 2101 #endif 2102 ifp->if_watchdog = dc_watchdog; 2103 ifp->if_init = dc_init; 2104 ifp->if_baudrate = 10000000; 2105 ifq_set_maxlen(&ifp->if_snd, DC_TX_LIST_CNT - 1); 2106 ifq_set_ready(&ifp->if_snd); 2107 2108 /* 2109 * Do MII setup. If this is a 21143, check for a PHY on the 2110 * MII bus after applying any necessary fixups to twiddle the 2111 * GPIO bits. If we don't end up finding a PHY, restore the 2112 * old selection (SIA only or SIA/SYM) and attach the dcphy 2113 * driver instead. 2114 */ 2115 if (DC_IS_INTEL(sc)) { 2116 dc_apply_fixup(sc, IFM_AUTO); 2117 tmp = sc->dc_pmode; 2118 sc->dc_pmode = DC_PMODE_MII; 2119 } 2120 2121 /* 2122 * Setup General Purpose port mode and data so the tulip can talk 2123 * to the MII. This needs to be done before mii_phy_probe so that 2124 * we can actually see them. 2125 */ 2126 if (DC_IS_XIRCOM(sc)) { 2127 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2128 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2129 DELAY(10); 2130 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2131 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2132 DELAY(10); 2133 } 2134 2135 error = mii_phy_probe(dev, &sc->dc_miibus, 2136 dc_ifmedia_upd, dc_ifmedia_sts); 2137 2138 if (error && DC_IS_INTEL(sc)) { 2139 sc->dc_pmode = tmp; 2140 if (sc->dc_pmode != DC_PMODE_SIA) 2141 sc->dc_pmode = DC_PMODE_SYM; 2142 sc->dc_flags |= DC_21143_NWAY; 2143 mii_phy_probe(dev, &sc->dc_miibus, 2144 dc_ifmedia_upd, dc_ifmedia_sts); 2145 /* 2146 * For non-MII cards, we need to have the 21143 2147 * drive the LEDs. Except there are some systems 2148 * like the NEC VersaPro NoteBook PC which have no 2149 * LEDs, and twiddling these bits has adverse effects 2150 * on them. (I.e. you suddenly can't get a link.) 2151 */ 2152 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2153 sc->dc_flags |= DC_TULIP_LEDS; 2154 error = 0; 2155 } 2156 2157 if (error) { 2158 device_printf(dev, "MII without any PHY!\n"); 2159 error = ENXIO; 2160 goto fail; 2161 } 2162 2163 /* 2164 * Call MI attach routine. 2165 */ 2166 ether_ifattach(ifp, eaddr, NULL); 2167 2168 #ifdef IFPOLL_ENABLE 2169 ifpoll_compat_setup(&sc->dc_npoll, NULL, NULL, device_get_unit(dev), 2170 ifp->if_serializer); 2171 #endif 2172 2173 if (DC_IS_ADMTEK(sc)) { 2174 /* 2175 * Set automatic TX underrun recovery for the ADMtek chips 2176 */ 2177 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2178 } 2179 2180 /* 2181 * Tell the upper layer(s) we support long frames. 2182 */ 2183 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2184 2185 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->dc_irq)); 2186 2187 error = bus_setup_intr(dev, sc->dc_irq, INTR_MPSAFE, 2188 dc_intr, sc, &sc->dc_intrhand, 2189 ifp->if_serializer); 2190 if (error) { 2191 ether_ifdetach(ifp); 2192 device_printf(dev, "couldn't set up irq\n"); 2193 goto fail; 2194 } 2195 2196 return(0); 2197 2198 fail: 2199 dc_detach(dev); 2200 return(error); 2201 } 2202 2203 static int 2204 dc_detach(device_t dev) 2205 { 2206 struct dc_softc *sc = device_get_softc(dev); 2207 struct ifnet *ifp = &sc->arpcom.ac_if; 2208 struct dc_mediainfo *m; 2209 2210 if (device_is_attached(dev)) { 2211 lwkt_serialize_enter(ifp->if_serializer); 2212 dc_stop(sc); 2213 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2214 lwkt_serialize_exit(ifp->if_serializer); 2215 2216 ether_ifdetach(ifp); 2217 } 2218 2219 if (sc->dc_miibus) 2220 device_delete_child(dev, sc->dc_miibus); 2221 bus_generic_detach(dev); 2222 2223 if (sc->dc_irq) 2224 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2225 if (sc->dc_res) 2226 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2227 2228 if (sc->dc_ldata) 2229 contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); 2230 if (sc->dc_pnic_rx_buf != NULL) 2231 kfree(sc->dc_pnic_rx_buf, M_DEVBUF); 2232 2233 while (sc->dc_mi != NULL) { 2234 m = sc->dc_mi->dc_next; 2235 kfree(sc->dc_mi, M_DEVBUF); 2236 sc->dc_mi = m; 2237 } 2238 2239 if (sc->dc_srom) 2240 kfree(sc->dc_srom, M_DEVBUF); 2241 2242 return(0); 2243 } 2244 2245 /* 2246 * Initialize the transmit descriptors. 2247 */ 2248 static int 2249 dc_list_tx_init(struct dc_softc *sc) 2250 { 2251 struct dc_chain_data *cd; 2252 struct dc_list_data *ld; 2253 int i; 2254 2255 cd = &sc->dc_cdata; 2256 ld = sc->dc_ldata; 2257 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2258 if (i == (DC_TX_LIST_CNT - 1)) { 2259 ld->dc_tx_list[i].dc_next = 2260 vtophys(&ld->dc_tx_list[0]); 2261 } else { 2262 ld->dc_tx_list[i].dc_next = 2263 vtophys(&ld->dc_tx_list[i + 1]); 2264 } 2265 cd->dc_tx_chain[i] = NULL; 2266 ld->dc_tx_list[i].dc_data = 0; 2267 ld->dc_tx_list[i].dc_ctl = 0; 2268 } 2269 2270 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2271 2272 return(0); 2273 } 2274 2275 2276 /* 2277 * Initialize the RX descriptors and allocate mbufs for them. Note that 2278 * we arrange the descriptors in a closed ring, so that the last descriptor 2279 * points back to the first. 2280 */ 2281 static int 2282 dc_list_rx_init(struct dc_softc *sc) 2283 { 2284 struct dc_chain_data *cd; 2285 struct dc_list_data *ld; 2286 int i; 2287 2288 cd = &sc->dc_cdata; 2289 ld = sc->dc_ldata; 2290 2291 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2292 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 2293 return(ENOBUFS); 2294 if (i == (DC_RX_LIST_CNT - 1)) { 2295 ld->dc_rx_list[i].dc_next = 2296 vtophys(&ld->dc_rx_list[0]); 2297 } else { 2298 ld->dc_rx_list[i].dc_next = 2299 vtophys(&ld->dc_rx_list[i + 1]); 2300 } 2301 } 2302 2303 cd->dc_rx_prod = 0; 2304 2305 return(0); 2306 } 2307 2308 /* 2309 * Initialize an RX descriptor and attach an MBUF cluster. 2310 */ 2311 static int 2312 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m) 2313 { 2314 struct mbuf *m_new = NULL; 2315 struct dc_desc *c; 2316 2317 c = &sc->dc_ldata->dc_rx_list[i]; 2318 2319 if (m == NULL) { 2320 m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2321 if (m_new == NULL) 2322 return (ENOBUFS); 2323 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2324 } else { 2325 m_new = m; 2326 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2327 m_new->m_data = m_new->m_ext.ext_buf; 2328 } 2329 2330 m_adj(m_new, sizeof(u_int64_t)); 2331 2332 /* 2333 * If this is a PNIC chip, zero the buffer. This is part 2334 * of the workaround for the receive bug in the 82c168 and 2335 * 82c169 chips. 2336 */ 2337 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2338 bzero((char *)mtod(m_new, char *), m_new->m_len); 2339 2340 sc->dc_cdata.dc_rx_chain[i] = m_new; 2341 c->dc_data = vtophys(mtod(m_new, caddr_t)); 2342 c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; 2343 c->dc_status = DC_RXSTAT_OWN; 2344 2345 return(0); 2346 } 2347 2348 /* 2349 * Grrrrr. 2350 * The PNIC chip has a terrible bug in it that manifests itself during 2351 * periods of heavy activity. The exact mode of failure if difficult to 2352 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2353 * will happen on slow machines. The bug is that sometimes instead of 2354 * uploading one complete frame during reception, it uploads what looks 2355 * like the entire contents of its FIFO memory. The frame we want is at 2356 * the end of the whole mess, but we never know exactly how much data has 2357 * been uploaded, so salvaging the frame is hard. 2358 * 2359 * There is only one way to do it reliably, and it's disgusting. 2360 * Here's what we know: 2361 * 2362 * - We know there will always be somewhere between one and three extra 2363 * descriptors uploaded. 2364 * 2365 * - We know the desired received frame will always be at the end of the 2366 * total data upload. 2367 * 2368 * - We know the size of the desired received frame because it will be 2369 * provided in the length field of the status word in the last descriptor. 2370 * 2371 * Here's what we do: 2372 * 2373 * - When we allocate buffers for the receive ring, we bzero() them. 2374 * This means that we know that the buffer contents should be all 2375 * zeros, except for data uploaded by the chip. 2376 * 2377 * - We also force the PNIC chip to upload frames that include the 2378 * ethernet CRC at the end. 2379 * 2380 * - We gather all of the bogus frame data into a single buffer. 2381 * 2382 * - We then position a pointer at the end of this buffer and scan 2383 * backwards until we encounter the first non-zero byte of data. 2384 * This is the end of the received frame. We know we will encounter 2385 * some data at the end of the frame because the CRC will always be 2386 * there, so even if the sender transmits a packet of all zeros, 2387 * we won't be fooled. 2388 * 2389 * - We know the size of the actual received frame, so we subtract 2390 * that value from the current pointer location. This brings us 2391 * to the start of the actual received packet. 2392 * 2393 * - We copy this into an mbuf and pass it on, along with the actual 2394 * frame length. 2395 * 2396 * The performance hit is tremendous, but it beats dropping frames all 2397 * the time. 2398 */ 2399 2400 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2401 static void 2402 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2403 { 2404 struct dc_desc *cur_rx; 2405 struct dc_desc *c = NULL; 2406 struct mbuf *m = NULL; 2407 unsigned char *ptr; 2408 int i, total_len; 2409 u_int32_t rxstat = 0; 2410 2411 i = sc->dc_pnic_rx_bug_save; 2412 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2413 ptr = sc->dc_pnic_rx_buf; 2414 bzero(ptr, DC_RXLEN * 5); 2415 2416 /* Copy all the bytes from the bogus buffers. */ 2417 while (1) { 2418 c = &sc->dc_ldata->dc_rx_list[i]; 2419 rxstat = c->dc_status; 2420 m = sc->dc_cdata.dc_rx_chain[i]; 2421 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2422 ptr += DC_RXLEN; 2423 /* If this is the last buffer, break out. */ 2424 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2425 break; 2426 dc_newbuf(sc, i, m); 2427 DC_INC(i, DC_RX_LIST_CNT); 2428 } 2429 2430 /* Find the length of the actual receive frame. */ 2431 total_len = DC_RXBYTES(rxstat); 2432 2433 /* Scan backwards until we hit a non-zero byte. */ 2434 while(*ptr == 0x00) 2435 ptr--; 2436 2437 /* Round off. */ 2438 if ((uintptr_t)(ptr) & 0x3) 2439 ptr -= 1; 2440 2441 /* Now find the start of the frame. */ 2442 ptr -= total_len; 2443 if (ptr < sc->dc_pnic_rx_buf) 2444 ptr = sc->dc_pnic_rx_buf; 2445 2446 /* 2447 * Now copy the salvaged frame to the last mbuf and fake up 2448 * the status word to make it look like a successful 2449 * frame reception. 2450 */ 2451 dc_newbuf(sc, i, m); 2452 bcopy(ptr, mtod(m, char *), total_len); 2453 cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; 2454 2455 return; 2456 } 2457 2458 /* 2459 * This routine searches the RX ring for dirty descriptors in the 2460 * event that the rxeof routine falls out of sync with the chip's 2461 * current descriptor pointer. This may happen sometimes as a result 2462 * of a "no RX buffer available" condition that happens when the chip 2463 * consumes all of the RX buffers before the driver has a chance to 2464 * process the RX ring. This routine may need to be called more than 2465 * once to bring the driver back in sync with the chip, however we 2466 * should still be getting RX DONE interrupts to drive the search 2467 * for new packets in the RX ring, so we should catch up eventually. 2468 */ 2469 static int 2470 dc_rx_resync(struct dc_softc *sc) 2471 { 2472 int i, pos; 2473 struct dc_desc *cur_rx; 2474 2475 pos = sc->dc_cdata.dc_rx_prod; 2476 2477 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2478 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2479 if (!(cur_rx->dc_status & DC_RXSTAT_OWN)) 2480 break; 2481 DC_INC(pos, DC_RX_LIST_CNT); 2482 } 2483 2484 /* If the ring really is empty, then just return. */ 2485 if (i == DC_RX_LIST_CNT) 2486 return(0); 2487 2488 /* We've fallen behing the chip: catch it. */ 2489 sc->dc_cdata.dc_rx_prod = pos; 2490 2491 return(EAGAIN); 2492 } 2493 2494 /* 2495 * A frame has been uploaded: pass the resulting mbuf chain up to 2496 * the higher level protocols. 2497 */ 2498 static void 2499 dc_rxeof(struct dc_softc *sc) 2500 { 2501 struct mbuf *m; 2502 struct ifnet *ifp; 2503 struct dc_desc *cur_rx; 2504 int i, total_len = 0; 2505 u_int32_t rxstat; 2506 2507 ifp = &sc->arpcom.ac_if; 2508 i = sc->dc_cdata.dc_rx_prod; 2509 2510 while(!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { 2511 2512 #ifdef IFPOLL_ENABLE 2513 if (ifp->if_flags & IFF_NPOLLING) { 2514 if (sc->rxcycles <= 0) 2515 break; 2516 sc->rxcycles--; 2517 } 2518 #endif /* IFPOLL_ENABLE */ 2519 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2520 rxstat = cur_rx->dc_status; 2521 m = sc->dc_cdata.dc_rx_chain[i]; 2522 total_len = DC_RXBYTES(rxstat); 2523 2524 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2525 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2526 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2527 sc->dc_pnic_rx_bug_save = i; 2528 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2529 DC_INC(i, DC_RX_LIST_CNT); 2530 continue; 2531 } 2532 dc_pnic_rx_bug_war(sc, i); 2533 rxstat = cur_rx->dc_status; 2534 total_len = DC_RXBYTES(rxstat); 2535 } 2536 } 2537 2538 sc->dc_cdata.dc_rx_chain[i] = NULL; 2539 2540 /* 2541 * If an error occurs, update stats, clear the 2542 * status word and leave the mbuf cluster in place: 2543 * it should simply get re-used next time this descriptor 2544 * comes up in the ring. However, don't report long 2545 * frames as errors since they could be vlans 2546 */ 2547 if ((rxstat & DC_RXSTAT_RXERR)){ 2548 if (!(rxstat & DC_RXSTAT_GIANT) || 2549 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2550 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2551 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2552 IFNET_STAT_INC(ifp, ierrors, 1); 2553 if (rxstat & DC_RXSTAT_COLLSEEN) 2554 IFNET_STAT_INC(ifp, collisions, 1); 2555 dc_newbuf(sc, i, m); 2556 if (rxstat & DC_RXSTAT_CRCERR) { 2557 DC_INC(i, DC_RX_LIST_CNT); 2558 continue; 2559 } else { 2560 dc_init(sc); 2561 return; 2562 } 2563 } 2564 } 2565 2566 /* No errors; receive the packet. */ 2567 total_len -= ETHER_CRC_LEN; 2568 2569 #ifdef __x86_64__ 2570 /* 2571 * On the x86 we do not have alignment problems, so try to 2572 * allocate a new buffer for the receive ring, and pass up 2573 * the one where the packet is already, saving the expensive 2574 * copy done in m_devget(). 2575 * 2576 * If we are on an architecture with alignment problems, or 2577 * if the allocation fails, then use m_devget and leave the 2578 * existing buffer in the receive ring. 2579 */ 2580 if (dc_quick && dc_newbuf(sc, i, NULL) == 0) { 2581 m->m_pkthdr.rcvif = ifp; 2582 m->m_pkthdr.len = m->m_len = total_len; 2583 DC_INC(i, DC_RX_LIST_CNT); 2584 } else 2585 #endif 2586 { 2587 struct mbuf *m0; 2588 2589 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 2590 total_len + ETHER_ALIGN, 0, ifp); 2591 dc_newbuf(sc, i, m); 2592 DC_INC(i, DC_RX_LIST_CNT); 2593 if (m0 == NULL) { 2594 IFNET_STAT_INC(ifp, ierrors, 1); 2595 continue; 2596 } 2597 m_adj(m0, ETHER_ALIGN); 2598 m = m0; 2599 } 2600 2601 IFNET_STAT_INC(ifp, ipackets, 1); 2602 ifp->if_input(ifp, m, NULL, -1); 2603 } 2604 2605 sc->dc_cdata.dc_rx_prod = i; 2606 } 2607 2608 /* 2609 * A frame was downloaded to the chip. It's safe for us to clean up 2610 * the list buffers. 2611 */ 2612 2613 static void 2614 dc_txeof(struct dc_softc *sc) 2615 { 2616 struct dc_desc *cur_tx = NULL; 2617 struct ifnet *ifp; 2618 int idx; 2619 2620 ifp = &sc->arpcom.ac_if; 2621 2622 /* 2623 * Go through our tx list and free mbufs for those 2624 * frames that have been transmitted. 2625 */ 2626 idx = sc->dc_cdata.dc_tx_cons; 2627 while(idx != sc->dc_cdata.dc_tx_prod) { 2628 u_int32_t txstat; 2629 2630 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2631 txstat = cur_tx->dc_status; 2632 2633 if (txstat & DC_TXSTAT_OWN) 2634 break; 2635 2636 if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || 2637 cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2638 if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2639 /* 2640 * Yes, the PNIC is so brain damaged 2641 * that it will sometimes generate a TX 2642 * underrun error while DMAing the RX 2643 * filter setup frame. If we detect this, 2644 * we have to send the setup frame again, 2645 * or else the filter won't be programmed 2646 * correctly. 2647 */ 2648 if (DC_IS_PNIC(sc)) { 2649 if (txstat & DC_TXSTAT_ERRSUM) 2650 dc_setfilt(sc); 2651 } 2652 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2653 } 2654 sc->dc_cdata.dc_tx_cnt--; 2655 DC_INC(idx, DC_TX_LIST_CNT); 2656 continue; 2657 } 2658 2659 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2660 /* 2661 * XXX: Why does my Xircom taunt me so? 2662 * For some reason Conexant chips like 2663 * setting the CARRLOST flag even when 2664 * the carrier is there. In CURRENT we 2665 * have the same problem for Xircom 2666 * cards ! 2667 */ 2668 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2669 sc->dc_pmode == DC_PMODE_MII && 2670 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2671 DC_TXSTAT_NOCARRIER))) 2672 txstat &= ~DC_TXSTAT_ERRSUM; 2673 } else { 2674 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2675 sc->dc_pmode == DC_PMODE_MII && 2676 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2677 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2678 txstat &= ~DC_TXSTAT_ERRSUM; 2679 } 2680 2681 if (txstat & DC_TXSTAT_ERRSUM) { 2682 IFNET_STAT_INC(ifp, oerrors, 1); 2683 if (txstat & DC_TXSTAT_EXCESSCOLL) 2684 IFNET_STAT_INC(ifp, collisions, 1); 2685 if (txstat & DC_TXSTAT_LATECOLL) 2686 IFNET_STAT_INC(ifp, collisions, 1); 2687 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2688 dc_init(sc); 2689 return; 2690 } 2691 } 2692 2693 IFNET_STAT_INC(ifp, collisions, 2694 (txstat & DC_TXSTAT_COLLCNT) >> 3); 2695 2696 IFNET_STAT_INC(ifp, opackets, 1); 2697 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2698 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2699 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2700 } 2701 2702 sc->dc_cdata.dc_tx_cnt--; 2703 DC_INC(idx, DC_TX_LIST_CNT); 2704 } 2705 2706 if (idx != sc->dc_cdata.dc_tx_cons) { 2707 /* some buffers have been freed */ 2708 sc->dc_cdata.dc_tx_cons = idx; 2709 ifq_clr_oactive(&ifp->if_snd); 2710 } 2711 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2712 2713 return; 2714 } 2715 2716 static void 2717 dc_tick(void *xsc) 2718 { 2719 struct dc_softc *sc = xsc; 2720 struct ifnet *ifp = &sc->arpcom.ac_if; 2721 struct mii_data *mii; 2722 u_int32_t r; 2723 2724 lwkt_serialize_enter(ifp->if_serializer); 2725 2726 mii = device_get_softc(sc->dc_miibus); 2727 2728 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2729 if (sc->dc_flags & DC_21143_NWAY) { 2730 r = CSR_READ_4(sc, DC_10BTSTAT); 2731 if (IFM_SUBTYPE(mii->mii_media_active) == 2732 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2733 sc->dc_link = 0; 2734 mii_mediachg(mii); 2735 } 2736 if (IFM_SUBTYPE(mii->mii_media_active) == 2737 IFM_10_T && (r & DC_TSTAT_LS10)) { 2738 sc->dc_link = 0; 2739 mii_mediachg(mii); 2740 } 2741 if (sc->dc_link == 0) 2742 mii_tick(mii); 2743 } else { 2744 r = CSR_READ_4(sc, DC_ISR); 2745 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2746 sc->dc_cdata.dc_tx_cnt == 0) { 2747 mii_tick(mii); 2748 if (!(mii->mii_media_status & IFM_ACTIVE)) 2749 sc->dc_link = 0; 2750 } 2751 } 2752 } else { 2753 mii_tick(mii); 2754 } 2755 2756 /* 2757 * When the init routine completes, we expect to be able to send 2758 * packets right away, and in fact the network code will send a 2759 * gratuitous ARP the moment the init routine marks the interface 2760 * as running. However, even though the MAC may have been initialized, 2761 * there may be a delay of a few seconds before the PHY completes 2762 * autonegotiation and the link is brought up. Any transmissions 2763 * made during that delay will be lost. Dealing with this is tricky: 2764 * we can't just pause in the init routine while waiting for the 2765 * PHY to come ready since that would bring the whole system to 2766 * a screeching halt for several seconds. 2767 * 2768 * What we do here is prevent the TX start routine from sending 2769 * any packets until a link has been established. After the 2770 * interface has been initialized, the tick routine will poll 2771 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2772 * that time, packets will stay in the send queue, and once the 2773 * link comes up, they will be flushed out to the wire. 2774 */ 2775 if (!sc->dc_link) { 2776 mii_pollstat(mii); 2777 if (mii->mii_media_status & IFM_ACTIVE && 2778 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2779 sc->dc_link++; 2780 if (!ifq_is_empty(&ifp->if_snd)) 2781 if_devstart(ifp); 2782 } 2783 } 2784 2785 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2786 callout_reset(&sc->dc_stat_timer, hz / 10, dc_tick, sc); 2787 else 2788 callout_reset(&sc->dc_stat_timer, hz, dc_tick, sc); 2789 2790 lwkt_serialize_exit(ifp->if_serializer); 2791 } 2792 2793 /* 2794 * A transmit underrun has occurred. Back off the transmit threshold, 2795 * or switch to store and forward mode if we have to. 2796 */ 2797 static void 2798 dc_tx_underrun(struct dc_softc *sc) 2799 { 2800 u_int32_t isr; 2801 int i; 2802 2803 if (DC_IS_DAVICOM(sc)) 2804 dc_init(sc); 2805 2806 if (DC_IS_INTEL(sc)) { 2807 /* 2808 * The real 21143 requires that the transmitter be idle 2809 * in order to change the transmit threshold or store 2810 * and forward state. 2811 */ 2812 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2813 2814 for (i = 0; i < DC_TIMEOUT; i++) { 2815 isr = CSR_READ_4(sc, DC_ISR); 2816 if (isr & DC_ISR_TX_IDLE) 2817 break; 2818 DELAY(10); 2819 } 2820 if (i == DC_TIMEOUT) { 2821 if_printf(&sc->arpcom.ac_if, 2822 "failed to force tx to idle state\n"); 2823 dc_init(sc); 2824 } 2825 } 2826 2827 if_printf(&sc->arpcom.ac_if, "TX underrun -- "); 2828 sc->dc_txthresh += DC_TXTHRESH_INC; 2829 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2830 kprintf("using store and forward mode\n"); 2831 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2832 } else { 2833 kprintf("increasing TX threshold\n"); 2834 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2835 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2836 } 2837 2838 if (DC_IS_INTEL(sc)) 2839 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2840 2841 return; 2842 } 2843 2844 #ifdef IFPOLL_ENABLE 2845 2846 static void 2847 dc_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 2848 { 2849 struct dc_softc *sc = ifp->if_softc; 2850 2851 ASSERT_SERIALIZED(ifp->if_serializer); 2852 2853 sc->rxcycles = count; 2854 dc_rxeof(sc); 2855 dc_txeof(sc); 2856 if (!ifq_is_empty(&ifp->if_snd)) 2857 if_devstart(ifp); 2858 2859 if (sc->dc_npoll.ifpc_stcount-- == 0) { 2860 uint32_t status; 2861 2862 sc->dc_npoll.ifpc_stcount = sc->dc_npoll.ifpc_stfrac; 2863 2864 status = CSR_READ_4(sc, DC_ISR); 2865 status &= (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF| 2866 DC_ISR_TX_NOBUF|DC_ISR_TX_IDLE|DC_ISR_TX_UNDERRUN| 2867 DC_ISR_BUS_ERR); 2868 if (!status) 2869 return; 2870 /* ack what we have */ 2871 CSR_WRITE_4(sc, DC_ISR, status); 2872 2873 if (status & (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF) ) { 2874 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 2875 IFNET_STAT_INC(ifp, ierrors, 2876 (r & 0xffff) + ((r >> 17) & 0x7ff)); 2877 2878 if (dc_rx_resync(sc)) 2879 dc_rxeof(sc); 2880 } 2881 /* restart transmit unit if necessary */ 2882 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 2883 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2884 2885 if (status & DC_ISR_TX_UNDERRUN) 2886 dc_tx_underrun(sc); 2887 2888 if (status & DC_ISR_BUS_ERR) { 2889 if_printf(ifp, "dc_poll: bus error\n"); 2890 dc_reset(sc); 2891 dc_init(sc); 2892 } 2893 } 2894 } 2895 2896 static void 2897 dc_npoll(struct ifnet *ifp, struct ifpoll_info *info) 2898 { 2899 struct dc_softc *sc = ifp->if_softc; 2900 2901 ASSERT_SERIALIZED(ifp->if_serializer); 2902 2903 if (info != NULL) { 2904 int cpuid = sc->dc_npoll.ifpc_cpuid; 2905 2906 info->ifpi_rx[cpuid].poll_func = dc_npoll_compat; 2907 info->ifpi_rx[cpuid].arg = NULL; 2908 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 2909 2910 if (ifp->if_flags & IFF_RUNNING) { 2911 /* Disable interrupts */ 2912 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2913 sc->dc_npoll.ifpc_stcount = 0; 2914 } 2915 ifq_set_cpuid(&ifp->if_snd, cpuid); 2916 } else { 2917 if (ifp->if_flags & IFF_RUNNING) { 2918 /* Re-enable interrupts. */ 2919 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2920 } 2921 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->dc_irq)); 2922 } 2923 } 2924 2925 #endif /* IFPOLL_ENABLE */ 2926 2927 static void 2928 dc_intr(void *arg) 2929 { 2930 struct dc_softc *sc; 2931 struct ifnet *ifp; 2932 u_int32_t status; 2933 2934 sc = arg; 2935 2936 if (sc->suspended) { 2937 return; 2938 } 2939 2940 ifp = &sc->arpcom.ac_if; 2941 2942 if ( (CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 2943 return ; 2944 2945 /* Suppress unwanted interrupts */ 2946 if ((ifp->if_flags & IFF_RUNNING) == 0) { 2947 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 2948 dc_stop(sc); 2949 return; 2950 } 2951 2952 /* Disable interrupts. */ 2953 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2954 2955 while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && 2956 status != 0xFFFFFFFF) { 2957 2958 CSR_WRITE_4(sc, DC_ISR, status); 2959 2960 if (status & DC_ISR_RX_OK) { 2961 u_long curpkts, ncurpkts; 2962 2963 IFNET_STAT_GET(ifp, ipackets, curpkts); 2964 dc_rxeof(sc); 2965 IFNET_STAT_GET(ifp, ipackets, ncurpkts); 2966 2967 if (curpkts == ncurpkts) { 2968 while(dc_rx_resync(sc)) 2969 dc_rxeof(sc); 2970 } 2971 } 2972 2973 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 2974 dc_txeof(sc); 2975 2976 if (status & DC_ISR_TX_IDLE) { 2977 dc_txeof(sc); 2978 if (sc->dc_cdata.dc_tx_cnt) { 2979 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2980 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2981 } 2982 } 2983 2984 if (status & DC_ISR_TX_UNDERRUN) 2985 dc_tx_underrun(sc); 2986 2987 if ((status & DC_ISR_RX_WATDOGTIMEO) 2988 || (status & DC_ISR_RX_NOBUF)) { 2989 u_long curpkts, ncurpkts; 2990 2991 IFNET_STAT_GET(ifp, ipackets, curpkts); 2992 dc_rxeof(sc); 2993 IFNET_STAT_GET(ifp, ipackets, ncurpkts); 2994 2995 if (curpkts == ncurpkts) { 2996 while(dc_rx_resync(sc)) 2997 dc_rxeof(sc); 2998 } 2999 } 3000 3001 if (status & DC_ISR_BUS_ERR) { 3002 dc_reset(sc); 3003 dc_init(sc); 3004 } 3005 } 3006 3007 /* Re-enable interrupts. */ 3008 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3009 3010 if (!ifq_is_empty(&ifp->if_snd)) 3011 if_devstart(ifp); 3012 } 3013 3014 /* 3015 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 3016 * pointers to the fragment pointers. 3017 */ 3018 static int 3019 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 3020 { 3021 struct dc_desc *f = NULL; 3022 struct mbuf *m; 3023 int frag, cur, cnt = 0; 3024 3025 /* 3026 * Start packing the mbufs in this chain into 3027 * the fragment pointers. Stop when we run out 3028 * of fragments or hit the end of the mbuf chain. 3029 */ 3030 m = m_head; 3031 cur = frag = *txidx; 3032 3033 for (m = m_head; m != NULL; m = m->m_next) { 3034 if (m->m_len != 0) { 3035 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 3036 if (*txidx != sc->dc_cdata.dc_tx_prod && 3037 frag == (DC_TX_LIST_CNT - 1)) 3038 return(ENOBUFS); 3039 } 3040 if ((DC_TX_LIST_CNT - 3041 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) 3042 return(ENOBUFS); 3043 3044 f = &sc->dc_ldata->dc_tx_list[frag]; 3045 f->dc_ctl = DC_TXCTL_TLINK | m->m_len; 3046 if (cnt == 0) { 3047 f->dc_status = 0; 3048 f->dc_ctl |= DC_TXCTL_FIRSTFRAG; 3049 } else 3050 f->dc_status = DC_TXSTAT_OWN; 3051 f->dc_data = vtophys(mtod(m, vm_offset_t)); 3052 cur = frag; 3053 DC_INC(frag, DC_TX_LIST_CNT); 3054 cnt++; 3055 } 3056 } 3057 3058 if (m != NULL) 3059 return(ENOBUFS); 3060 3061 sc->dc_cdata.dc_tx_cnt += cnt; 3062 sc->dc_cdata.dc_tx_chain[cur] = m_head; 3063 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; 3064 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3065 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; 3066 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3067 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3068 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3069 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3070 sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; 3071 *txidx = frag; 3072 3073 return(0); 3074 } 3075 3076 /* 3077 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3078 * to the mbuf data regions directly in the transmit lists. We also save a 3079 * copy of the pointers since the transmit list fragment pointers are 3080 * physical addresses. 3081 */ 3082 3083 static void 3084 dc_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3085 { 3086 struct dc_softc *sc; 3087 struct mbuf *m_head, *m_defragged; 3088 int idx, need_trans; 3089 3090 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 3091 sc = ifp->if_softc; 3092 3093 if (!sc->dc_link) { 3094 ifq_purge(&ifp->if_snd); 3095 return; 3096 } 3097 3098 if (ifq_is_oactive(&ifp->if_snd)) 3099 return; 3100 3101 idx = sc->dc_cdata.dc_tx_prod; 3102 3103 need_trans = 0; 3104 while(sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3105 m_defragged = NULL; 3106 m_head = ifq_dequeue(&ifp->if_snd); 3107 if (m_head == NULL) 3108 break; 3109 3110 if ((sc->dc_flags & DC_TX_COALESCE) && 3111 (m_head->m_next != NULL || (sc->dc_flags & DC_TX_ALIGN))) { 3112 /* 3113 * Check first if coalescing allows us to queue 3114 * the packet. We don't want to loose it if 3115 * the TX queue is full. 3116 */ 3117 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && 3118 idx != sc->dc_cdata.dc_tx_prod && 3119 idx == (DC_TX_LIST_CNT - 1)) { 3120 ifq_set_oactive(&ifp->if_snd); 3121 ifq_prepend(&ifp->if_snd, m_head); 3122 break; 3123 } 3124 if ((DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt) < 5) { 3125 ifq_set_oactive(&ifp->if_snd); 3126 ifq_prepend(&ifp->if_snd, m_head); 3127 break; 3128 } 3129 3130 /* only coalesce if have >1 mbufs */ 3131 m_defragged = m_defrag(m_head, M_NOWAIT); 3132 if (m_defragged == NULL) { 3133 ifq_set_oactive(&ifp->if_snd); 3134 ifq_prepend(&ifp->if_snd, m_head); 3135 break; 3136 } 3137 m_head = m_defragged; 3138 } 3139 3140 if (dc_encap(sc, m_head, &idx)) { 3141 if (m_defragged) { 3142 /* 3143 * Throw away the original packet if the 3144 * defragged packet could not be encapsulated, 3145 * as well as the defragged packet. 3146 */ 3147 m_freem(m_head); 3148 } else { 3149 ifq_prepend(&ifp->if_snd, m_head); 3150 } 3151 ifq_set_oactive(&ifp->if_snd); 3152 break; 3153 } 3154 3155 need_trans = 1; 3156 3157 /* 3158 * If there's a BPF listener, bounce a copy of this frame 3159 * to him. 3160 */ 3161 BPF_MTAP(ifp, m_head); 3162 3163 if (sc->dc_flags & DC_TX_ONE) { 3164 ifq_set_oactive(&ifp->if_snd); 3165 break; 3166 } 3167 } 3168 3169 if (!need_trans) 3170 return; 3171 3172 /* Transmit */ 3173 sc->dc_cdata.dc_tx_prod = idx; 3174 if (!(sc->dc_flags & DC_TX_POLL)) 3175 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3176 3177 /* 3178 * Set a timeout in case the chip goes out to lunch. 3179 */ 3180 ifp->if_timer = 5; 3181 } 3182 3183 static void 3184 dc_init(void *xsc) 3185 { 3186 struct dc_softc *sc = xsc; 3187 struct ifnet *ifp = &sc->arpcom.ac_if; 3188 struct mii_data *mii; 3189 3190 mii = device_get_softc(sc->dc_miibus); 3191 3192 /* 3193 * Cancel pending I/O and free all RX/TX buffers. 3194 */ 3195 dc_stop(sc); 3196 dc_reset(sc); 3197 3198 /* 3199 * Set cache alignment and burst length. 3200 */ 3201 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3202 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3203 else 3204 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 3205 /* 3206 * Evenly share the bus between receive and transmit process. 3207 */ 3208 if (DC_IS_INTEL(sc)) 3209 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3210 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3211 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3212 } else { 3213 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3214 } 3215 if (sc->dc_flags & DC_TX_POLL) 3216 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3217 switch(sc->dc_cachesize) { 3218 case 32: 3219 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3220 break; 3221 case 16: 3222 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3223 break; 3224 case 8: 3225 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3226 break; 3227 case 0: 3228 default: 3229 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3230 break; 3231 } 3232 3233 if (sc->dc_flags & DC_TX_STORENFWD) 3234 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3235 else { 3236 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3237 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3238 } else { 3239 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3240 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3241 } 3242 } 3243 3244 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3245 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3246 3247 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3248 /* 3249 * The app notes for the 98713 and 98715A say that 3250 * in order to have the chips operate properly, a magic 3251 * number must be written to CSR16. Macronix does not 3252 * document the meaning of these bits so there's no way 3253 * to know exactly what they do. The 98713 has a magic 3254 * number all its own; the rest all use a different one. 3255 */ 3256 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3257 if (sc->dc_type == DC_TYPE_98713) 3258 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3259 else 3260 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3261 } 3262 3263 if (DC_IS_XIRCOM(sc)) { 3264 /* 3265 * Setup General Purpose Port mode and data so the tulip 3266 * can talk to the MII. 3267 */ 3268 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3269 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3270 DELAY(10); 3271 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3272 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3273 DELAY(10); 3274 } 3275 3276 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3277 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3278 3279 /* Init circular RX list. */ 3280 if (dc_list_rx_init(sc) == ENOBUFS) { 3281 if_printf(ifp, "initialization failed: no " 3282 "memory for rx buffers\n"); 3283 dc_stop(sc); 3284 return; 3285 } 3286 3287 /* 3288 * Init tx descriptors. 3289 */ 3290 dc_list_tx_init(sc); 3291 3292 /* 3293 * Load the address of the RX list. 3294 */ 3295 CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); 3296 CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); 3297 3298 /* 3299 * Enable interrupts. 3300 */ 3301 #ifdef IFPOLL_ENABLE 3302 /* 3303 * ... but only if we are not polling, and make sure they are off in 3304 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3305 * after a reset. 3306 */ 3307 if (ifp->if_flags & IFF_NPOLLING) { 3308 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3309 sc->dc_npoll.ifpc_stcount = 0; 3310 } else 3311 #endif 3312 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3313 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3314 3315 /* Enable transmitter. */ 3316 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3317 3318 /* 3319 * If this is an Intel 21143 and we're not using the 3320 * MII port, program the LED control pins so we get 3321 * link and activity indications. 3322 */ 3323 if (sc->dc_flags & DC_TULIP_LEDS) { 3324 CSR_WRITE_4(sc, DC_WATCHDOG, 3325 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 3326 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3327 } 3328 3329 /* 3330 * Set IFF_RUNNING here to keep the assertion in dc_setfilt() 3331 * working. 3332 */ 3333 ifp->if_flags |= IFF_RUNNING; 3334 ifq_clr_oactive(&ifp->if_snd); 3335 3336 /* 3337 * Load the RX/multicast filter. We do this sort of late 3338 * because the filter programming scheme on the 21143 and 3339 * some clones requires DMAing a setup frame via the TX 3340 * engine, and we need the transmitter enabled for that. 3341 */ 3342 dc_setfilt(sc); 3343 3344 /* Enable receiver. */ 3345 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3346 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3347 3348 mii_mediachg(mii); 3349 dc_setcfg(sc, sc->dc_if_media); 3350 3351 /* Don't start the ticker if this is a homePNA link. */ 3352 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3353 sc->dc_link = 1; 3354 else { 3355 if (sc->dc_flags & DC_21143_NWAY) 3356 callout_reset(&sc->dc_stat_timer, hz/10, dc_tick, sc); 3357 else 3358 callout_reset(&sc->dc_stat_timer, hz, dc_tick, sc); 3359 } 3360 } 3361 3362 /* 3363 * Set media options. 3364 */ 3365 static int 3366 dc_ifmedia_upd(struct ifnet *ifp) 3367 { 3368 struct dc_softc *sc; 3369 struct mii_data *mii; 3370 struct ifmedia *ifm; 3371 3372 sc = ifp->if_softc; 3373 mii = device_get_softc(sc->dc_miibus); 3374 mii_mediachg(mii); 3375 ifm = &mii->mii_media; 3376 3377 if (DC_IS_DAVICOM(sc) && 3378 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3379 dc_setcfg(sc, ifm->ifm_media); 3380 else 3381 sc->dc_link = 0; 3382 3383 return(0); 3384 } 3385 3386 /* 3387 * Report current media status. 3388 */ 3389 static void 3390 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3391 { 3392 struct dc_softc *sc; 3393 struct mii_data *mii; 3394 struct ifmedia *ifm; 3395 3396 sc = ifp->if_softc; 3397 mii = device_get_softc(sc->dc_miibus); 3398 mii_pollstat(mii); 3399 ifm = &mii->mii_media; 3400 if (DC_IS_DAVICOM(sc)) { 3401 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3402 ifmr->ifm_active = ifm->ifm_media; 3403 ifmr->ifm_status = 0; 3404 return; 3405 } 3406 } 3407 ifmr->ifm_active = mii->mii_media_active; 3408 ifmr->ifm_status = mii->mii_media_status; 3409 3410 return; 3411 } 3412 3413 static int 3414 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3415 { 3416 struct dc_softc *sc = ifp->if_softc; 3417 struct ifreq *ifr = (struct ifreq *) data; 3418 struct mii_data *mii; 3419 int error = 0; 3420 3421 switch(command) { 3422 case SIOCSIFFLAGS: 3423 if (ifp->if_flags & IFF_UP) { 3424 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3425 (IFF_PROMISC | IFF_ALLMULTI); 3426 if (ifp->if_flags & IFF_RUNNING) { 3427 if (need_setfilt) 3428 dc_setfilt(sc); 3429 } else { 3430 sc->dc_txthresh = 0; 3431 dc_init(sc); 3432 } 3433 } else { 3434 if (ifp->if_flags & IFF_RUNNING) 3435 dc_stop(sc); 3436 } 3437 sc->dc_if_flags = ifp->if_flags; 3438 break; 3439 case SIOCADDMULTI: 3440 case SIOCDELMULTI: 3441 if (ifp->if_flags & IFF_RUNNING) 3442 dc_setfilt(sc); 3443 break; 3444 case SIOCGIFMEDIA: 3445 case SIOCSIFMEDIA: 3446 mii = device_get_softc(sc->dc_miibus); 3447 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3448 break; 3449 default: 3450 error = ether_ioctl(ifp, command, data); 3451 break; 3452 } 3453 3454 return(error); 3455 } 3456 3457 static void 3458 dc_watchdog(struct ifnet *ifp) 3459 { 3460 struct dc_softc *sc; 3461 3462 sc = ifp->if_softc; 3463 3464 IFNET_STAT_INC(ifp, oerrors, 1); 3465 if_printf(ifp, "watchdog timeout\n"); 3466 3467 dc_stop(sc); 3468 dc_reset(sc); 3469 dc_init(sc); 3470 3471 if (!ifq_is_empty(&ifp->if_snd)) 3472 if_devstart(ifp); 3473 } 3474 3475 /* 3476 * Stop the adapter and free any mbufs allocated to the 3477 * RX and TX lists. 3478 */ 3479 static void 3480 dc_stop(struct dc_softc *sc) 3481 { 3482 int i; 3483 struct ifnet *ifp; 3484 3485 ifp = &sc->arpcom.ac_if; 3486 ifp->if_timer = 0; 3487 3488 callout_stop(&sc->dc_stat_timer); 3489 3490 ifp->if_flags &= ~IFF_RUNNING; 3491 ifq_clr_oactive(&ifp->if_snd); 3492 3493 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3494 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3495 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3496 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3497 sc->dc_link = 0; 3498 3499 /* 3500 * Free data in the RX lists. 3501 */ 3502 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3503 if (sc->dc_cdata.dc_rx_chain[i] != NULL) { 3504 m_freem(sc->dc_cdata.dc_rx_chain[i]); 3505 sc->dc_cdata.dc_rx_chain[i] = NULL; 3506 } 3507 } 3508 bzero((char *)&sc->dc_ldata->dc_rx_list, 3509 sizeof(sc->dc_ldata->dc_rx_list)); 3510 3511 /* 3512 * Free the TX list buffers. 3513 */ 3514 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3515 if (sc->dc_cdata.dc_tx_chain[i] != NULL) { 3516 if ((sc->dc_ldata->dc_tx_list[i].dc_ctl & 3517 DC_TXCTL_SETUP) || 3518 !(sc->dc_ldata->dc_tx_list[i].dc_ctl & 3519 DC_TXCTL_LASTFRAG)) { 3520 sc->dc_cdata.dc_tx_chain[i] = NULL; 3521 continue; 3522 } 3523 m_freem(sc->dc_cdata.dc_tx_chain[i]); 3524 sc->dc_cdata.dc_tx_chain[i] = NULL; 3525 } 3526 } 3527 bzero((char *)&sc->dc_ldata->dc_tx_list, 3528 sizeof(sc->dc_ldata->dc_tx_list)); 3529 } 3530 3531 /* 3532 * Stop all chip I/O so that the kernel's probe routines don't 3533 * get confused by errant DMAs when rebooting. 3534 */ 3535 static void 3536 dc_shutdown(device_t dev) 3537 { 3538 struct dc_softc *sc; 3539 struct ifnet *ifp; 3540 3541 sc = device_get_softc(dev); 3542 ifp = &sc->arpcom.ac_if; 3543 lwkt_serialize_enter(ifp->if_serializer); 3544 3545 dc_stop(sc); 3546 3547 lwkt_serialize_exit(ifp->if_serializer); 3548 } 3549 3550 /* 3551 * Device suspend routine. Stop the interface and save some PCI 3552 * settings in case the BIOS doesn't restore them properly on 3553 * resume. 3554 */ 3555 static int 3556 dc_suspend(device_t dev) 3557 { 3558 struct dc_softc *sc = device_get_softc(dev); 3559 struct ifnet *ifp = &sc->arpcom.ac_if; 3560 int i; 3561 lwkt_serialize_enter(ifp->if_serializer); 3562 3563 dc_stop(sc); 3564 for (i = 0; i < 5; i++) 3565 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3566 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3567 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3568 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3569 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3570 3571 sc->suspended = 1; 3572 3573 lwkt_serialize_exit(ifp->if_serializer); 3574 return (0); 3575 } 3576 3577 /* 3578 * Device resume routine. Restore some PCI settings in case the BIOS 3579 * doesn't, re-enable busmastering, and restart the interface if 3580 * appropriate. 3581 */ 3582 static int 3583 dc_resume(device_t dev) 3584 { 3585 struct dc_softc *sc = device_get_softc(dev); 3586 struct ifnet *ifp = &sc->arpcom.ac_if; 3587 int i; 3588 3589 lwkt_serialize_enter(ifp->if_serializer); 3590 dc_acpi(dev); 3591 3592 /* better way to do this? */ 3593 for (i = 0; i < 5; i++) 3594 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3595 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3596 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3597 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3598 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3599 3600 /* reenable busmastering */ 3601 pci_enable_busmaster(dev); 3602 pci_enable_io(dev, DC_RES); 3603 3604 /* reinitialize interface if necessary */ 3605 if (ifp->if_flags & IFF_UP) 3606 dc_init(sc); 3607 3608 sc->suspended = 0; 3609 lwkt_serialize_exit(ifp->if_serializer); 3610 3611 return (0); 3612 } 3613 3614 static uint32_t 3615 dc_mchash_xircom(struct dc_softc *sc, const uint8_t *addr) 3616 { 3617 uint32_t crc; 3618 3619 /* Compute CRC for the address value. */ 3620 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 3621 3622 if ((crc & 0x180) == 0x180) 3623 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); 3624 else 3625 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + (12 << 4)); 3626 } 3627