1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 /* 29 * dnet -- DEC 21x4x 30 * 31 * Currently supports: 32 * 21040, 21041, 21140, 21142, 21143 33 * SROM versions 1, 3, 3.03, 4 34 * TP, AUI, BNC, 100BASETX, 100BASET4 35 * 36 * XXX NEEDSWORK 37 * All media SHOULD work, FX is untested 38 * 39 * Depends on the Generic LAN Driver utility functions in /kernel/misc/mac 40 */ 41 42 #define BUG_4010796 /* See 4007871, 4010796 */ 43 44 #include <sys/types.h> 45 #include <sys/errno.h> 46 #include <sys/param.h> 47 #include <sys/stropts.h> 48 #include <sys/stream.h> 49 #include <sys/kmem.h> 50 #include <sys/conf.h> 51 #include <sys/devops.h> 52 #include <sys/ksynch.h> 53 #include <sys/stat.h> 54 #include <sys/modctl.h> 55 #include <sys/debug.h> 56 #include <sys/dlpi.h> 57 #include <sys/ethernet.h> 58 #include <sys/vlan.h> 59 #include <sys/mac.h> 60 #include <sys/mac_ether.h> 61 #include <sys/mac_provider.h> 62 #include <sys/pci.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/strsun.h> 66 67 #include "dnet_mii.h" 68 #include "dnet.h" 69 70 /* 71 * Declarations and Module Linkage 72 */ 73 74 #define IDENT "DNET 21x4x" 75 76 /* 77 * #define DNET_NOISY 78 * #define SROMDEBUG 79 * #define SROMDUMPSTRUCTURES 80 */ 81 82 #ifdef DNETDEBUG 83 #ifdef DNET_NOISY 84 int dnetdebug = -1; 85 #else 86 int dnetdebug = 0; 87 #endif 88 #endif 89 90 /* used for message allocated using desballoc() */ 91 struct free_ptr { 92 struct free_rtn free_rtn; 93 caddr_t buf; 94 }; 95 96 struct rbuf_list { 97 struct rbuf_list *rbuf_next; /* next in the list */ 98 caddr_t rbuf_vaddr; /* virual addr of the buf */ 99 uint32_t rbuf_paddr; /* physical addr of the buf */ 100 uint32_t rbuf_endpaddr; /* physical addr at the end */ 101 ddi_dma_handle_t rbuf_dmahdl; /* dma handle */ 102 ddi_acc_handle_t rbuf_acchdl; /* handle for DDI functions */ 103 }; 104 105 /* Required system entry points */ 106 static int dnet_probe(dev_info_t *); 107 static int dnet_attach(dev_info_t *, ddi_attach_cmd_t); 108 static int dnet_detach(dev_info_t *, ddi_detach_cmd_t); 109 static int dnet_quiesce(dev_info_t *); 110 111 /* Required driver entry points for GLDv3 */ 112 static int dnet_m_start(void *); 113 static void dnet_m_stop(void *); 114 static int dnet_m_getstat(void *, uint_t, uint64_t *); 115 static int dnet_m_setpromisc(void *, boolean_t); 116 static int dnet_m_multicst(void *, boolean_t, const uint8_t *); 117 static int dnet_m_unicst(void *, const uint8_t *); 118 static mblk_t *dnet_m_tx(void *, mblk_t *); 119 120 static uint_t dnet_intr(caddr_t); 121 122 /* Internal functions used by the above entry points */ 123 static void write_gpr(struct dnetinstance *dnetp, uint32_t val); 124 static void dnet_reset_board(struct dnetinstance *); 125 static void dnet_init_board(struct dnetinstance *); 126 static void dnet_chip_init(struct dnetinstance *); 127 static uint32_t hashindex(const uint8_t *); 128 static int dnet_start(struct dnetinstance *); 129 static int dnet_set_addr(struct dnetinstance *); 130 131 static boolean_t dnet_send(struct dnetinstance *, mblk_t *); 132 133 static void dnet_getp(struct dnetinstance *); 134 static void update_rx_stats(struct dnetinstance *, int); 135 static void update_tx_stats(struct dnetinstance *, int); 136 137 /* Media Selection Setup Routines */ 138 static void set_gpr(struct dnetinstance *); 139 static void set_opr(struct dnetinstance *); 140 static void set_sia(struct dnetinstance *); 141 142 /* Buffer Management Routines */ 143 static int dnet_alloc_bufs(struct dnetinstance *); 144 static void dnet_free_bufs(struct dnetinstance *); 145 static void dnet_init_txrx_bufs(struct dnetinstance *); 146 static int alloc_descriptor(struct dnetinstance *); 147 static void dnet_reclaim_Tx_desc(struct dnetinstance *); 148 static int dnet_rbuf_init(dev_info_t *, int); 149 static int dnet_rbuf_destroy(); 150 static struct rbuf_list *dnet_rbuf_alloc(dev_info_t *, int); 151 static void dnet_rbuf_free(caddr_t); 152 static void dnet_freemsg_buf(struct free_ptr *); 153 154 static void setup_block(struct dnetinstance *); 155 156 /* SROM read functions */ 157 static int dnet_read_srom(dev_info_t *, int, ddi_acc_handle_t, caddr_t, 158 uchar_t *, int); 159 static void dnet_read21040addr(dev_info_t *, ddi_acc_handle_t, caddr_t, 160 uchar_t *, int *); 161 static void dnet_read21140srom(ddi_acc_handle_t, caddr_t, uchar_t *, int); 162 static int get_alternative_srom_image(dev_info_t *, uchar_t *, int); 163 static void dnet_print_srom(SROM_FORMAT *sr); 164 static void dnet_dump_leaf(LEAF_FORMAT *leaf); 165 static void dnet_dump_block(media_block_t *block); 166 #ifdef BUG_4010796 167 static void set_alternative_srom_image(dev_info_t *, uchar_t *, int); 168 static int dnet_hack(dev_info_t *); 169 #endif 170 171 static int dnet_hack_interrupts(struct dnetinstance *, int); 172 static int dnet_detach_hacked_interrupt(dev_info_t *devinfo); 173 static void enable_interrupts(struct dnetinstance *); 174 175 /* SROM parsing functions */ 176 static void dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr, 177 uchar_t *vi); 178 static void parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf, 179 uchar_t *vi); 180 static uchar_t *parse_media_block(struct dnetinstance *dnetp, 181 media_block_t *block, uchar_t *vi); 182 static int check_srom_valid(uchar_t *); 183 static void dnet_dumpbin(char *msg, uchar_t *, int size, int len); 184 static void setup_legacy_blocks(); 185 /* Active Media Determination Routines */ 186 static void find_active_media(struct dnetinstance *); 187 static int send_test_packet(struct dnetinstance *); 188 static int dnet_link_sense(struct dnetinstance *); 189 190 /* PHY MII Routines */ 191 static ushort_t dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num); 192 static void dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num, 193 int reg_dat); 194 static void write_mii(struct dnetinstance *, uint32_t, int); 195 static void mii_tristate(struct dnetinstance *); 196 static void do_phy(struct dnetinstance *); 197 static void dnet_mii_link_cb(dev_info_t *, int, enum mii_phy_state); 198 static void set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf); 199 200 #ifdef DNETDEBUG 201 uint32_t dnet_usecelapsed(struct dnetinstance *dnetp); 202 void dnet_timestamp(struct dnetinstance *, char *); 203 void dnet_usectimeout(struct dnetinstance *, uint32_t, int, timercb_t); 204 #endif 205 static char *media_str[] = { 206 "10BaseT", 207 "10Base2", 208 "10Base5", 209 "100BaseTX", 210 "10BaseT FD", 211 "100BaseTX FD", 212 "100BaseT4", 213 "100BaseFX", 214 "100BaseFX FD", 215 "MII" 216 }; 217 218 /* default SROM info for cards with no SROMs */ 219 static LEAF_FORMAT leaf_default_100; 220 static LEAF_FORMAT leaf_asante; 221 static LEAF_FORMAT leaf_phylegacy; 222 static LEAF_FORMAT leaf_cogent_100; 223 static LEAF_FORMAT leaf_21041; 224 static LEAF_FORMAT leaf_21040; 225 226 /* rx buffer size (rounded up to 4) */ 227 int rx_buf_size = (ETHERMAX + ETHERFCSL + VLAN_TAGSZ + 3) & ~3; 228 229 int max_rx_desc_21040 = MAX_RX_DESC_21040; 230 int max_rx_desc_21140 = MAX_RX_DESC_21140; 231 int max_tx_desc = MAX_TX_DESC; 232 int dnet_xmit_threshold = MAX_TX_DESC >> 2; /* XXX need tuning? */ 233 234 static kmutex_t dnet_rbuf_lock; /* mutex to protect rbuf_list data */ 235 236 /* used for buffers allocated by ddi_dma_mem_alloc() */ 237 static ddi_dma_attr_t dma_attr = { 238 DMA_ATTR_V0, /* dma_attr version */ 239 0, /* dma_attr_addr_lo */ 240 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 241 0x7FFFFFFF, /* dma_attr_count_max */ 242 4, /* dma_attr_align */ 243 0x3F, /* dma_attr_burstsizes */ 244 1, /* dma_attr_minxfer */ 245 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 246 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 247 1, /* dma_attr_sgllen */ 248 1, /* dma_attr_granular */ 249 0, /* dma_attr_flags */ 250 }; 251 252 /* used for buffers allocated for rbuf, allow 2 cookies */ 253 static ddi_dma_attr_t dma_attr_rb = { 254 DMA_ATTR_V0, /* dma_attr version */ 255 0, /* dma_attr_addr_lo */ 256 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 257 0x7FFFFFFF, /* dma_attr_count_max */ 258 4, /* dma_attr_align */ 259 0x3F, /* dma_attr_burstsizes */ 260 1, /* dma_attr_minxfer */ 261 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 262 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 263 2, /* dma_attr_sgllen */ 264 1, /* dma_attr_granular */ 265 0, /* dma_attr_flags */ 266 }; 267 /* used for buffers which are NOT from ddi_dma_mem_alloc() - xmit side */ 268 static ddi_dma_attr_t dma_attr_tx = { 269 DMA_ATTR_V0, /* dma_attr version */ 270 0, /* dma_attr_addr_lo */ 271 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 272 0x7FFFFFFF, /* dma_attr_count_max */ 273 1, /* dma_attr_align */ 274 0x3F, /* dma_attr_burstsizes */ 275 1, /* dma_attr_minxfer */ 276 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 277 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 278 0x7FFF, /* dma_attr_sgllen */ 279 1, /* dma_attr_granular */ 280 0, /* dma_attr_flags */ 281 }; 282 283 static ddi_device_acc_attr_t accattr = { 284 DDI_DEVICE_ATTR_V0, 285 DDI_NEVERSWAP_ACC, 286 DDI_STRICTORDER_ACC, 287 }; 288 289 uchar_t dnet_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 290 291 /* Standard Module linkage initialization for a Streams driver */ 292 extern struct mod_ops mod_driverops; 293 294 DDI_DEFINE_STREAM_OPS(dnet_devops, nulldev, dnet_probe, dnet_attach, 295 dnet_detach, nodev, NULL, D_MP, NULL, dnet_quiesce); 296 297 static struct modldrv dnet_modldrv = { 298 &mod_driverops, /* Type of module. This one is a driver */ 299 IDENT, /* short description */ 300 &dnet_devops /* driver specific ops */ 301 }; 302 303 static struct modlinkage dnet_modlinkage = { 304 MODREV_1, /* ml_rev */ 305 { &dnet_modldrv, NULL } /* ml_linkage */ 306 }; 307 308 static mac_callbacks_t dnet_m_callbacks = { 309 0, /* mc_callbacks */ 310 dnet_m_getstat, /* mc_getstat */ 311 dnet_m_start, /* mc_start */ 312 dnet_m_stop, /* mc_stop */ 313 dnet_m_setpromisc, /* mc_setpromisc */ 314 dnet_m_multicst, /* mc_multicst */ 315 dnet_m_unicst, /* mc_unicst */ 316 dnet_m_tx, /* mc_tx */ 317 NULL, 318 NULL, /* mc_ioctl */ 319 NULL, /* mc_getcapab */ 320 NULL, /* mc_open */ 321 NULL /* mc_close */ 322 }; 323 324 /* 325 * Passed to the hacked interrupt for multiport Cogent and ZNYX cards with 326 * dodgy interrupt routing 327 */ 328 #define MAX_INST 8 /* Maximum instances on a multiport adapter. */ 329 struct hackintr_inf 330 { 331 struct dnetinstance *dnetps[MAX_INST]; /* dnetps for each port */ 332 dev_info_t *devinfo; /* Devinfo of the primary device */ 333 kmutex_t lock; 334 /* Ensures the interrupt doesn't get called while detaching */ 335 }; 336 static char hackintr_propname[] = "InterruptData"; 337 static char macoffset_propname[] = "MAC_offset"; 338 static char speed_propname[] = "speed"; 339 static char ofloprob_propname[] = "dmaworkaround"; 340 static char duplex_propname[] = "full-duplex"; /* Must agree with MII */ 341 static char printsrom_propname[] = "print-srom"; 342 343 static uint_t dnet_hack_intr(struct hackintr_inf *); 344 345 int 346 _init(void) 347 { 348 int i; 349 350 /* Configure fake sroms for legacy cards */ 351 mutex_init(&dnet_rbuf_lock, NULL, MUTEX_DRIVER, NULL); 352 setup_legacy_blocks(); 353 354 mac_init_ops(&dnet_devops, "dnet"); 355 356 if ((i = mod_install(&dnet_modlinkage)) != 0) { 357 mac_fini_ops(&dnet_devops); 358 mutex_destroy(&dnet_rbuf_lock); 359 } 360 return (i); 361 } 362 363 int 364 _fini(void) 365 { 366 int i; 367 368 if ((i = mod_remove(&dnet_modlinkage)) == 0) { 369 mac_fini_ops(&dnet_devops); 370 371 /* loop until all the receive buffers are freed */ 372 while (dnet_rbuf_destroy() != 0) { 373 delay(drv_usectohz(100000)); 374 #ifdef DNETDEBUG 375 if (dnetdebug & DNETDDI) 376 cmn_err(CE_WARN, "dnet _fini delay"); 377 #endif 378 } 379 mutex_destroy(&dnet_rbuf_lock); 380 } 381 return (i); 382 } 383 384 int 385 _info(struct modinfo *modinfop) 386 { 387 return (mod_info(&dnet_modlinkage, modinfop)); 388 } 389 390 /* 391 * probe(9E) -- Determine if a device is present 392 */ 393 static int 394 dnet_probe(dev_info_t *devinfo) 395 { 396 ddi_acc_handle_t handle; 397 uint16_t vendorid; 398 uint16_t deviceid; 399 400 if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS) 401 return (DDI_PROBE_FAILURE); 402 403 vendorid = pci_config_get16(handle, PCI_CONF_VENID); 404 405 if (vendorid != DEC_VENDOR_ID) { 406 pci_config_teardown(&handle); 407 return (DDI_PROBE_FAILURE); 408 } 409 410 deviceid = pci_config_get16(handle, PCI_CONF_DEVID); 411 switch (deviceid) { 412 case DEVICE_ID_21040: 413 case DEVICE_ID_21041: 414 case DEVICE_ID_21140: 415 case DEVICE_ID_21143: /* And 142 */ 416 break; 417 default: 418 pci_config_teardown(&handle); 419 return (DDI_PROBE_FAILURE); 420 } 421 422 pci_config_teardown(&handle); 423 #ifndef BUG_4010796 424 return (DDI_PROBE_SUCCESS); 425 #else 426 return (dnet_hack(devinfo)); 427 #endif 428 } 429 430 #ifdef BUG_4010796 431 /* 432 * If we have a device, but we cannot presently access its SROM data, 433 * then we return DDI_PROBE_PARTIAL and hope that sometime later we 434 * will be able to get at the SROM data. This can only happen if we 435 * are a secondary port with no SROM, and the bootstrap failed to set 436 * our DNET_SROM property, and our primary sibling has not yet probed. 437 */ 438 static int 439 dnet_hack(dev_info_t *devinfo) 440 { 441 uchar_t vendor_info[SROM_SIZE]; 442 uint32_t csr; 443 uint16_t deviceid; 444 ddi_acc_handle_t handle; 445 uint32_t retval; 446 int secondary; 447 ddi_acc_handle_t io_handle; 448 caddr_t io_reg; 449 450 #define DNET_PCI_RNUMBER 1 451 452 if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS) 453 return (DDI_PROBE_FAILURE); 454 455 deviceid = pci_config_get16(handle, PCI_CONF_DEVID); 456 457 /* 458 * Turn on Master Enable and IO Enable bits. 459 */ 460 csr = pci_config_get32(handle, PCI_CONF_COMM); 461 pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO)); 462 463 pci_config_teardown(&handle); 464 465 /* Now map I/O register */ 466 if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER, 467 &io_reg, 0, 0, &accattr, &io_handle) != DDI_SUCCESS) { 468 return (DDI_PROBE_FAILURE); 469 } 470 471 /* 472 * Reset the chip 473 */ 474 ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), SW_RESET); 475 drv_usecwait(3); 476 ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), 0); 477 drv_usecwait(8); 478 479 secondary = dnet_read_srom(devinfo, deviceid, io_handle, 480 io_reg, vendor_info, sizeof (vendor_info)); 481 482 switch (secondary) { 483 case -1: 484 /* We can't access our SROM data! */ 485 retval = DDI_PROBE_PARTIAL; 486 break; 487 case 0: 488 retval = DDI_PROBE_SUCCESS; 489 break; 490 default: 491 retval = DDI_PROBE_SUCCESS; 492 } 493 494 ddi_regs_map_free(&io_handle); 495 return (retval); 496 } 497 #endif /* BUG_4010796 */ 498 499 /* 500 * attach(9E) -- Attach a device to the system 501 * 502 * Called once for each board successfully probed. 503 */ 504 static int 505 dnet_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 506 { 507 uint16_t revid; 508 struct dnetinstance *dnetp; /* Our private device info */ 509 mac_register_t *macp; 510 uchar_t vendor_info[SROM_SIZE]; 511 uint32_t csr; 512 uint16_t deviceid; 513 ddi_acc_handle_t handle; 514 int secondary; 515 516 #define DNET_PCI_RNUMBER 1 517 518 switch (cmd) { 519 case DDI_ATTACH: 520 break; 521 522 case DDI_RESUME: 523 /* Get the driver private (dnetinstance) structure */ 524 dnetp = ddi_get_driver_private(devinfo); 525 526 mutex_enter(&dnetp->intrlock); 527 mutex_enter(&dnetp->txlock); 528 dnet_reset_board(dnetp); 529 dnet_init_board(dnetp); 530 dnetp->suspended = B_FALSE; 531 532 if (dnetp->running) { 533 dnetp->need_tx_update = B_FALSE; 534 mutex_exit(&dnetp->txlock); 535 (void) dnet_start(dnetp); 536 mutex_exit(&dnetp->intrlock); 537 mac_tx_update(dnetp->mac_handle); 538 } else { 539 mutex_exit(&dnetp->txlock); 540 mutex_exit(&dnetp->intrlock); 541 } 542 return (DDI_SUCCESS); 543 default: 544 return (DDI_FAILURE); 545 } 546 547 if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS) 548 return (DDI_FAILURE); 549 550 deviceid = pci_config_get16(handle, PCI_CONF_DEVID); 551 switch (deviceid) { 552 case DEVICE_ID_21040: 553 case DEVICE_ID_21041: 554 case DEVICE_ID_21140: 555 case DEVICE_ID_21143: /* And 142 */ 556 break; 557 default: 558 pci_config_teardown(&handle); 559 return (DDI_FAILURE); 560 } 561 562 /* 563 * Turn on Master Enable and IO Enable bits. 564 */ 565 csr = pci_config_get32(handle, PCI_CONF_COMM); 566 pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO)); 567 568 /* Make sure the device is not asleep */ 569 csr = pci_config_get32(handle, PCI_DNET_CONF_CFDD); 570 pci_config_put32(handle, PCI_DNET_CONF_CFDD, 571 csr & ~(CFDD_SLEEP|CFDD_SNOOZE)); 572 573 revid = pci_config_get8(handle, PCI_CONF_REVID); 574 pci_config_teardown(&handle); 575 576 dnetp = kmem_zalloc(sizeof (struct dnetinstance), KM_SLEEP); 577 ddi_set_driver_private(devinfo, dnetp); 578 579 /* Now map I/O register */ 580 if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER, &dnetp->io_reg, 581 0, 0, &accattr, &dnetp->io_handle) != DDI_SUCCESS) { 582 kmem_free(dnetp, sizeof (struct dnetinstance)); 583 return (DDI_FAILURE); 584 } 585 586 dnetp->devinfo = devinfo; 587 dnetp->board_type = deviceid; 588 589 /* 590 * Get the iblock cookie with which to initialize the mutexes. 591 */ 592 if (ddi_get_iblock_cookie(devinfo, 0, &dnetp->icookie) 593 != DDI_SUCCESS) 594 goto fail; 595 596 /* 597 * Initialize mutex's for this device. 598 * Do this before registering the interrupt handler to avoid 599 * condition where interrupt handler can try using uninitialized 600 * mutex. 601 * Lock ordering rules: always lock intrlock first before 602 * txlock if both are required. 603 */ 604 mutex_init(&dnetp->txlock, NULL, MUTEX_DRIVER, dnetp->icookie); 605 mutex_init(&dnetp->intrlock, NULL, MUTEX_DRIVER, dnetp->icookie); 606 607 /* 608 * Get the BNC/TP indicator from the conf file for 21040 609 */ 610 dnetp->bnc_indicator = 611 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 612 "bncaui", -1); 613 614 /* 615 * For 21140 check the data rate set in the conf file. Default is 616 * 100Mb/s. Disallow connections at settings that would conflict 617 * with what's in the conf file 618 */ 619 dnetp->speed = 620 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 621 speed_propname, 0); 622 dnetp->full_duplex = 623 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 624 duplex_propname, -1); 625 626 if (dnetp->speed == 100) { 627 dnetp->disallowed_media |= (1UL<<MEDIA_TP) | (1UL<<MEDIA_TP_FD); 628 } else if (dnetp->speed == 10) { 629 dnetp->disallowed_media |= 630 (1UL<<MEDIA_SYM_SCR) | (1UL<<MEDIA_SYM_SCR_FD); 631 } 632 633 if (dnetp->full_duplex == 1) { 634 dnetp->disallowed_media |= 635 (1UL<<MEDIA_TP) | (1UL<<MEDIA_SYM_SCR); 636 } else if (dnetp->full_duplex == 0) { 637 dnetp->disallowed_media |= 638 (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_SYM_SCR_FD); 639 } 640 641 if (dnetp->bnc_indicator == 0) /* Disable BNC and AUI media */ 642 dnetp->disallowed_media |= (1UL<<MEDIA_BNC) | (1UL<<MEDIA_AUI); 643 else if (dnetp->bnc_indicator == 1) /* Force BNC only */ 644 dnetp->disallowed_media = (uint32_t)~(1U<<MEDIA_BNC); 645 else if (dnetp->bnc_indicator == 2) /* Force AUI only */ 646 dnetp->disallowed_media = (uint32_t)~(1U<<MEDIA_AUI); 647 648 dnet_reset_board(dnetp); 649 650 secondary = dnet_read_srom(devinfo, dnetp->board_type, dnetp->io_handle, 651 dnetp->io_reg, vendor_info, sizeof (vendor_info)); 652 653 if (secondary == -1) /* ASSERT (vendor_info not big enough) */ 654 goto fail1; 655 656 dnet_parse_srom(dnetp, &dnetp->sr, vendor_info); 657 658 if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 659 printsrom_propname, 0)) 660 dnet_print_srom(&dnetp->sr); 661 662 dnetp->sr.netaddr[ETHERADDRL-1] += secondary; /* unique ether addr */ 663 664 BCOPY((caddr_t)dnetp->sr.netaddr, 665 (caddr_t)dnetp->vendor_addr, ETHERADDRL); 666 667 BCOPY((caddr_t)dnetp->sr.netaddr, 668 (caddr_t)dnetp->curr_macaddr, ETHERADDRL); 669 670 /* 671 * determine whether to implement workaround from DEC 672 * for DMA overrun errata. 673 */ 674 dnetp->overrun_workaround = 675 ((dnetp->board_type == DEVICE_ID_21140 && revid >= 0x20) || 676 (dnetp->board_type == DEVICE_ID_21143 && revid <= 0x30)) ? 1 : 0; 677 678 dnetp->overrun_workaround = 679 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 680 ofloprob_propname, dnetp->overrun_workaround); 681 682 /* 683 * Add the interrupt handler if dnet_hack_interrupts() returns 0. 684 * Otherwise dnet_hack_interrupts() itself adds the handler. 685 */ 686 if (!dnet_hack_interrupts(dnetp, secondary)) { 687 (void) ddi_add_intr(devinfo, 0, NULL, 688 NULL, dnet_intr, (caddr_t)dnetp); 689 } 690 691 dnetp->max_tx_desc = max_tx_desc; 692 dnetp->max_rx_desc = max_rx_desc_21040; 693 if (dnetp->board_type != DEVICE_ID_21040 && 694 dnetp->board_type != DEVICE_ID_21041 && 695 dnetp->speed != 10) 696 dnetp->max_rx_desc = max_rx_desc_21140; 697 698 /* Allocate the TX and RX descriptors/buffers. */ 699 if (dnet_alloc_bufs(dnetp) == FAILURE) { 700 cmn_err(CE_WARN, "DNET: Not enough DMA memory for buffers."); 701 goto fail2; 702 } 703 704 /* 705 * Register ourselves with the GLDv3 interface 706 */ 707 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 708 goto fail2; 709 710 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 711 macp->m_driver = dnetp; 712 macp->m_dip = devinfo; 713 macp->m_src_addr = dnetp->curr_macaddr; 714 macp->m_callbacks = &dnet_m_callbacks; 715 macp->m_min_sdu = 0; 716 macp->m_max_sdu = ETHERMTU; 717 macp->m_margin = VLAN_TAGSZ; 718 719 if (mac_register(macp, &dnetp->mac_handle) == 0) { 720 mac_free(macp); 721 722 mutex_enter(&dnetp->intrlock); 723 724 dnetp->phyaddr = -1; 725 if (dnetp->board_type == DEVICE_ID_21140 || 726 dnetp->board_type == DEVICE_ID_21143) 727 do_phy(dnetp); /* Initialize the PHY, if any */ 728 find_active_media(dnetp); 729 730 /* if the chosen media is non-MII, stop the port monitor */ 731 if (dnetp->selected_media_block->media_code != MEDIA_MII && 732 dnetp->mii != NULL) { 733 mii_destroy(dnetp->mii); 734 dnetp->mii = NULL; 735 dnetp->phyaddr = -1; 736 } 737 738 #ifdef DNETDEBUG 739 if (dnetdebug & DNETSENSE) 740 cmn_err(CE_NOTE, "dnet: link configured : %s", 741 media_str[dnetp->selected_media_block->media_code]); 742 #endif 743 bzero(dnetp->setup_buf_vaddr, SETUPBUF_SIZE); 744 745 dnet_reset_board(dnetp); 746 dnet_init_board(dnetp); 747 748 mutex_exit(&dnetp->intrlock); 749 750 (void) dnet_m_unicst(dnetp, dnetp->curr_macaddr); 751 (void) dnet_m_multicst(dnetp, B_TRUE, dnet_broadcastaddr); 752 753 return (DDI_SUCCESS); 754 } 755 756 mac_free(macp); 757 fail2: 758 /* XXX function return value ignored */ 759 /* 760 * dnet_detach_hacked_interrupt() will remove 761 * interrupt for the non-hacked case also. 762 */ 763 (void) dnet_detach_hacked_interrupt(devinfo); 764 dnet_free_bufs(dnetp); 765 fail1: 766 mutex_destroy(&dnetp->txlock); 767 mutex_destroy(&dnetp->intrlock); 768 fail: 769 ddi_regs_map_free(&dnetp->io_handle); 770 kmem_free(dnetp, sizeof (struct dnetinstance)); 771 return (DDI_FAILURE); 772 } 773 774 /* 775 * detach(9E) -- Detach a device from the system 776 */ 777 static int 778 dnet_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 779 { 780 int32_t rc; 781 struct dnetinstance *dnetp; /* Our private device info */ 782 int32_t proplen; 783 784 /* Get the driver private (dnetinstance) structure */ 785 dnetp = ddi_get_driver_private(devinfo); 786 787 switch (cmd) { 788 case DDI_DETACH: 789 break; 790 791 case DDI_SUSPEND: 792 /* 793 * NB: dnetp->suspended can only be modified (marked true) 794 * if both intrlock and txlock are held. This keeps both 795 * tx and rx code paths excluded. 796 */ 797 mutex_enter(&dnetp->intrlock); 798 mutex_enter(&dnetp->txlock); 799 dnetp->suspended = B_TRUE; 800 dnet_reset_board(dnetp); 801 mutex_exit(&dnetp->txlock); 802 mutex_exit(&dnetp->intrlock); 803 return (DDI_SUCCESS); 804 805 default: 806 return (DDI_FAILURE); 807 } 808 809 /* 810 * Unregister ourselves from the GLDv3 interface 811 */ 812 if (mac_unregister(dnetp->mac_handle) != 0) 813 return (DDI_FAILURE); 814 815 /* stop the board if it is running */ 816 dnet_reset_board(dnetp); 817 818 if ((rc = dnet_detach_hacked_interrupt(devinfo)) != DDI_SUCCESS) 819 return (rc); 820 821 if (dnetp->mii != NULL) 822 mii_destroy(dnetp->mii); 823 824 /* Free leaf information */ 825 set_leaf(&dnetp->sr, NULL); 826 827 ddi_regs_map_free(&dnetp->io_handle); 828 dnet_free_bufs(dnetp); 829 mutex_destroy(&dnetp->txlock); 830 mutex_destroy(&dnetp->intrlock); 831 kmem_free(dnetp, sizeof (struct dnetinstance)); 832 833 #ifdef BUG_4010796 834 if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, 0, 835 "DNET_HACK", &proplen) != DDI_PROP_SUCCESS) 836 return (DDI_SUCCESS); 837 838 /* 839 * We must remove the properties we added, because if we leave 840 * them in the devinfo nodes and the driver is unloaded, when 841 * the driver is reloaded the info will still be there, causing 842 * nodes which had returned PROBE_PARTIAL the first time to 843 * instead return PROBE_SUCCESS, in turn causing the nodes to be 844 * attached in a different order, causing their PPA numbers to 845 * be different the second time around, which is undesirable. 846 */ 847 (void) ddi_prop_remove(DDI_DEV_T_NONE, devinfo, "DNET_HACK"); 848 (void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo), 849 "DNET_SROM"); 850 (void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo), 851 "DNET_DEVNUM"); 852 #endif 853 854 return (DDI_SUCCESS); 855 } 856 857 int 858 dnet_quiesce(dev_info_t *dip) 859 { 860 struct dnetinstance *dnetp = ddi_get_driver_private(dip); 861 862 /* 863 * Reset chip (disables interrupts). 864 */ 865 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0); 866 ddi_put32(dnetp->io_handle, 867 REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET); 868 869 return (DDI_SUCCESS); 870 } 871 872 static void 873 dnet_reset_board(struct dnetinstance *dnetp) 874 { 875 uint32_t val; 876 877 /* 878 * before initializing the dnet should be in STOP state 879 */ 880 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 881 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 882 val & ~(START_TRANSMIT | START_RECEIVE)); 883 884 /* 885 * Reset the chip 886 */ 887 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0); 888 ddi_put32(dnetp->io_handle, 889 REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET); 890 drv_usecwait(5); 891 } 892 893 /* 894 * dnet_init_board() -- initialize the specified network board short of 895 * actually starting the board. Call after dnet_reset_board(). 896 * called with intrlock held. 897 */ 898 static void 899 dnet_init_board(struct dnetinstance *dnetp) 900 { 901 set_opr(dnetp); 902 set_gpr(dnetp); 903 set_sia(dnetp); 904 dnet_chip_init(dnetp); 905 } 906 907 /* dnet_chip_init() - called with intrlock held */ 908 static void 909 dnet_chip_init(struct dnetinstance *dnetp) 910 { 911 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, BUS_MODE_REG), 912 CACHE_ALIGN | BURST_SIZE); /* CSR0 */ 913 914 /* 915 * Initialize the TX and RX descriptors/buffers 916 */ 917 dnet_init_txrx_bufs(dnetp); 918 919 /* 920 * Set the base address of the Rx descriptor list in CSR3 921 */ 922 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, RX_BASE_ADDR_REG), 923 dnetp->rx_desc_paddr); 924 925 /* 926 * Set the base address of the Tx descrptor list in CSR4 927 */ 928 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_BASE_ADDR_REG), 929 dnetp->tx_desc_paddr); 930 931 dnetp->tx_current_desc = dnetp->rx_current_desc = 0; 932 dnetp->transmitted_desc = 0; 933 dnetp->free_desc = dnetp->max_tx_desc; 934 enable_interrupts(dnetp); 935 } 936 937 /* 938 * dnet_start() -- start the board receiving and allow transmits. 939 * Called with intrlock held. 940 */ 941 static int 942 dnet_start(struct dnetinstance *dnetp) 943 { 944 uint32_t val; 945 946 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 947 /* 948 * start the board and enable receiving 949 */ 950 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 951 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 952 val | START_TRANSMIT); 953 (void) dnet_set_addr(dnetp); 954 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 955 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 956 val | START_RECEIVE); 957 enable_interrupts(dnetp); 958 return (0); 959 } 960 961 static int 962 dnet_m_start(void *arg) 963 { 964 struct dnetinstance *dnetp = arg; 965 966 mutex_enter(&dnetp->intrlock); 967 dnetp->running = B_TRUE; 968 /* 969 * start the board and enable receiving 970 */ 971 if (!dnetp->suspended) 972 (void) dnet_start(dnetp); 973 mutex_exit(&dnetp->intrlock); 974 return (0); 975 } 976 977 static void 978 dnet_m_stop(void *arg) 979 { 980 struct dnetinstance *dnetp = arg; 981 uint32_t val; 982 983 /* 984 * stop the board and disable transmit/receive 985 */ 986 mutex_enter(&dnetp->intrlock); 987 if (!dnetp->suspended) { 988 val = ddi_get32(dnetp->io_handle, 989 REG32(dnetp->io_reg, OPN_MODE_REG)); 990 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 991 val & ~(START_TRANSMIT | START_RECEIVE)); 992 } 993 mac_link_update(dnetp->mac_handle, LINK_STATE_UNKNOWN); 994 dnetp->running = B_FALSE; 995 mutex_exit(&dnetp->intrlock); 996 } 997 998 /* 999 * dnet_set_addr() -- set the physical network address on the board 1000 * Called with intrlock held. 1001 */ 1002 static int 1003 dnet_set_addr(struct dnetinstance *dnetp) 1004 { 1005 struct tx_desc_type *desc; 1006 int current_desc; 1007 uint32_t val; 1008 1009 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 1010 1011 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 1012 if (!(val & START_TRANSMIT)) 1013 return (0); 1014 1015 current_desc = dnetp->tx_current_desc; 1016 desc = &dnetp->tx_desc[current_desc]; 1017 1018 mutex_enter(&dnetp->txlock); 1019 dnetp->need_saddr = 0; 1020 mutex_exit(&dnetp->txlock); 1021 1022 if ((alloc_descriptor(dnetp)) == FAILURE) { 1023 mutex_enter(&dnetp->txlock); 1024 dnetp->need_saddr = 1; 1025 mutex_exit(&dnetp->txlock); 1026 #ifdef DNETDEBUG 1027 if (dnetdebug & DNETTRACE) 1028 cmn_err(CE_WARN, "DNET saddr:alloc descriptor failure"); 1029 #endif 1030 return (0); 1031 } 1032 1033 desc->buffer1 = dnetp->setup_buf_paddr; 1034 desc->buffer2 = 0; 1035 desc->desc1.buffer_size1 = SETUPBUF_SIZE; 1036 desc->desc1.buffer_size2 = 0; 1037 desc->desc1.setup_packet = 1; 1038 desc->desc1.first_desc = 0; 1039 desc->desc1.last_desc = 0; 1040 desc->desc1.filter_type0 = 1; 1041 desc->desc1.filter_type1 = 1; 1042 desc->desc1.int_on_comp = 1; 1043 1044 desc->desc0.own = 1; 1045 ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG), 1046 TX_POLL_DEMAND); 1047 return (0); 1048 } 1049 1050 static int 1051 dnet_m_unicst(void *arg, const uint8_t *macaddr) 1052 { 1053 struct dnetinstance *dnetp = arg; 1054 uint32_t index; 1055 uint32_t *hashp; 1056 1057 mutex_enter(&dnetp->intrlock); 1058 1059 bcopy(macaddr, dnetp->curr_macaddr, ETHERADDRL); 1060 1061 /* 1062 * As we are using Imperfect filtering, the broadcast address has to 1063 * be set explicitly in the 512 bit hash table. Hence the index into 1064 * the hash table is calculated and the bit set to enable reception 1065 * of broadcast packets. 1066 * 1067 * We also use HASH_ONLY mode, without using the perfect filter for 1068 * our station address, because there appears to be a bug in the 1069 * 21140 where it fails to receive the specified perfect filter 1070 * address. 1071 * 1072 * Since dlsdmult comes through here, it doesn't matter that the count 1073 * is wrong for the two bits that correspond to the cases below. The 1074 * worst that could happen is that we'd leave on a bit for an old 1075 * macaddr, in the case where the macaddr gets changed, which is rare. 1076 * Since filtering is imperfect, it is OK if that happens. 1077 */ 1078 hashp = (uint32_t *)dnetp->setup_buf_vaddr; 1079 index = hashindex((uint8_t *)dnet_broadcastaddr); 1080 hashp[ index / 16 ] |= 1 << (index % 16); 1081 1082 index = hashindex((uint8_t *)dnetp->curr_macaddr); 1083 hashp[ index / 16 ] |= 1 << (index % 16); 1084 1085 if (!dnetp->suspended) 1086 (void) dnet_set_addr(dnetp); 1087 mutex_exit(&dnetp->intrlock); 1088 return (0); 1089 } 1090 1091 static int 1092 dnet_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr) 1093 { 1094 struct dnetinstance *dnetp = arg; 1095 uint32_t index; 1096 uint32_t *hashp; 1097 uint32_t retval; 1098 1099 mutex_enter(&dnetp->intrlock); 1100 index = hashindex(macaddr); 1101 hashp = (uint32_t *)dnetp->setup_buf_vaddr; 1102 if (add) { 1103 if (dnetp->multicast_cnt[index]++) { 1104 mutex_exit(&dnetp->intrlock); 1105 return (0); 1106 } 1107 hashp[ index / 16 ] |= 1 << (index % 16); 1108 } else { 1109 if (--dnetp->multicast_cnt[index]) { 1110 mutex_exit(&dnetp->intrlock); 1111 return (0); 1112 } 1113 hashp[ index / 16 ] &= ~ (1 << (index % 16)); 1114 } 1115 if (!dnetp->suspended) 1116 retval = dnet_set_addr(dnetp); 1117 else 1118 retval = 0; 1119 mutex_exit(&dnetp->intrlock); 1120 return (retval); 1121 } 1122 1123 /* 1124 * A hashing function used for setting the 1125 * node address or a multicast address 1126 */ 1127 static uint32_t 1128 hashindex(const uint8_t *address) 1129 { 1130 uint32_t crc = (uint32_t)HASH_CRC; 1131 uint32_t const POLY = HASH_POLY; 1132 uint32_t msb; 1133 int32_t byteslength; 1134 uint8_t currentbyte; 1135 uint32_t index; 1136 int32_t bit; 1137 int32_t shift; 1138 1139 for (byteslength = 0; byteslength < ETHERADDRL; byteslength++) { 1140 currentbyte = address[byteslength]; 1141 for (bit = 0; bit < 8; bit++) { 1142 msb = crc >> 31; 1143 crc <<= 1; 1144 if (msb ^ (currentbyte & 1)) { 1145 crc ^= POLY; 1146 crc |= 0x00000001; 1147 } 1148 currentbyte >>= 1; 1149 } 1150 } 1151 1152 for (index = 0, bit = 23, shift = 8; shift >= 0; bit++, shift--) { 1153 index |= (((crc >> bit) & 1) << shift); 1154 } 1155 return (index); 1156 } 1157 1158 static int 1159 dnet_m_setpromisc(void *arg, boolean_t on) 1160 { 1161 struct dnetinstance *dnetp = arg; 1162 uint32_t val; 1163 1164 mutex_enter(&dnetp->intrlock); 1165 if (dnetp->promisc == on) { 1166 mutex_exit(&dnetp->intrlock); 1167 return (0); 1168 } 1169 dnetp->promisc = on; 1170 1171 if (!dnetp->suspended) { 1172 val = ddi_get32(dnetp->io_handle, 1173 REG32(dnetp->io_reg, OPN_MODE_REG)); 1174 if (on) 1175 ddi_put32(dnetp->io_handle, 1176 REG32(dnetp->io_reg, OPN_MODE_REG), 1177 val | PROM_MODE); 1178 else 1179 ddi_put32(dnetp->io_handle, 1180 REG32(dnetp->io_reg, OPN_MODE_REG), 1181 val & (~PROM_MODE)); 1182 } 1183 mutex_exit(&dnetp->intrlock); 1184 return (0); 1185 } 1186 1187 static int 1188 dnet_m_getstat(void *arg, uint_t stat, uint64_t *val) 1189 { 1190 struct dnetinstance *dnetp = arg; 1191 1192 switch (stat) { 1193 case MAC_STAT_IFSPEED: 1194 if (!dnetp->running) { 1195 *val = 0; 1196 } else { 1197 *val = (dnetp->mii_up ? 1198 dnetp->mii_speed : dnetp->speed) * 1000000; 1199 } 1200 break; 1201 1202 case MAC_STAT_NORCVBUF: 1203 *val = dnetp->stat_norcvbuf; 1204 break; 1205 1206 case MAC_STAT_IERRORS: 1207 *val = dnetp->stat_errrcv; 1208 break; 1209 1210 case MAC_STAT_OERRORS: 1211 *val = dnetp->stat_errxmt; 1212 break; 1213 1214 case MAC_STAT_COLLISIONS: 1215 *val = dnetp->stat_collisions; 1216 break; 1217 1218 case ETHER_STAT_DEFER_XMTS: 1219 *val = dnetp->stat_defer; 1220 break; 1221 1222 case ETHER_STAT_CARRIER_ERRORS: 1223 *val = dnetp->stat_nocarrier; 1224 break; 1225 1226 case ETHER_STAT_TOOSHORT_ERRORS: 1227 *val = dnetp->stat_short; 1228 break; 1229 1230 case ETHER_STAT_LINK_DUPLEX: 1231 if (!dnetp->running) { 1232 *val = LINK_DUPLEX_UNKNOWN; 1233 1234 } else if (dnetp->mii_up) { 1235 *val = dnetp->mii_duplex ? 1236 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 1237 } else { 1238 *val = dnetp->full_duplex ? 1239 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 1240 } 1241 break; 1242 1243 case ETHER_STAT_TX_LATE_COLLISIONS: 1244 *val = dnetp->stat_xmtlatecoll; 1245 break; 1246 1247 case ETHER_STAT_EX_COLLISIONS: 1248 *val = dnetp->stat_excoll; 1249 break; 1250 1251 case MAC_STAT_OVERFLOWS: 1252 *val = dnetp->stat_overflow; 1253 break; 1254 1255 case MAC_STAT_UNDERFLOWS: 1256 *val = dnetp->stat_underflow; 1257 break; 1258 1259 default: 1260 return (ENOTSUP); 1261 } 1262 1263 return (0); 1264 } 1265 1266 #define NextTXIndex(index) (((index)+1) % dnetp->max_tx_desc) 1267 #define PrevTXIndex(index) (((index)-1) < 0 ? dnetp->max_tx_desc - 1: (index)-1) 1268 1269 static mblk_t * 1270 dnet_m_tx(void *arg, mblk_t *mp) 1271 { 1272 struct dnetinstance *dnetp = arg; 1273 1274 mutex_enter(&dnetp->txlock); 1275 1276 /* if suspended, drop the packet on the floor, we missed it */ 1277 if (dnetp->suspended) { 1278 mutex_exit(&dnetp->txlock); 1279 freemsg(mp); 1280 return (NULL); 1281 } 1282 1283 if (dnetp->need_saddr) { 1284 /* XXX function return value ignored */ 1285 mutex_exit(&dnetp->txlock); 1286 mutex_enter(&dnetp->intrlock); 1287 (void) dnet_set_addr(dnetp); 1288 mutex_exit(&dnetp->intrlock); 1289 mutex_enter(&dnetp->txlock); 1290 } 1291 1292 while (mp != NULL) { 1293 if (!dnet_send(dnetp, mp)) { 1294 mutex_exit(&dnetp->txlock); 1295 return (mp); 1296 } 1297 mp = mp->b_next; 1298 } 1299 1300 mutex_exit(&dnetp->txlock); 1301 1302 /* 1303 * Enable xmit interrupt in case we are running out of xmit descriptors 1304 * or there are more packets on the queue waiting to be transmitted. 1305 */ 1306 mutex_enter(&dnetp->intrlock); 1307 1308 enable_interrupts(dnetp); 1309 1310 /* 1311 * Kick the transmitter 1312 */ 1313 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_POLL_REG), 1314 TX_POLL_DEMAND); 1315 1316 mutex_exit(&dnetp->intrlock); 1317 1318 return (NULL); 1319 } 1320 1321 static boolean_t 1322 dnet_send(struct dnetinstance *dnetp, mblk_t *mp) 1323 { 1324 struct tx_desc_type *ring = dnetp->tx_desc; 1325 int mblen, totlen; 1326 int index, end_index, start_index; 1327 int avail; 1328 int error; 1329 int bufn; 1330 int retval; 1331 mblk_t *bp; 1332 1333 ASSERT(MUTEX_HELD(&dnetp->txlock)); 1334 1335 /* reclaim any xmit descriptors completed */ 1336 dnet_reclaim_Tx_desc(dnetp); 1337 1338 /* 1339 * Use the data buffers from the message and construct the 1340 * scatter/gather list by calling ddi_dma_addr_bind_handle(). 1341 */ 1342 error = 0; 1343 totlen = 0; 1344 bp = mp; 1345 bufn = 0; 1346 index = start_index = dnetp->tx_current_desc; 1347 avail = dnetp->free_desc; 1348 while (bp != NULL) { 1349 uint_t ncookies; 1350 ddi_dma_cookie_t dma_cookie; 1351 1352 mblen = MBLKL(bp); 1353 1354 if (!mblen) { /* skip zero-length message blocks */ 1355 bp = bp->b_cont; 1356 continue; 1357 } 1358 1359 retval = ddi_dma_addr_bind_handle(dnetp->dma_handle_tx, NULL, 1360 (caddr_t)bp->b_rptr, mblen, 1361 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 0, 1362 &dma_cookie, &ncookies); 1363 1364 switch (retval) { 1365 case DDI_DMA_MAPPED: 1366 break; /* everything's fine */ 1367 1368 case DDI_DMA_NORESOURCES: 1369 error = 1; /* allow retry by gld */ 1370 break; 1371 1372 case DDI_DMA_NOMAPPING: 1373 case DDI_DMA_INUSE: 1374 case DDI_DMA_TOOBIG: 1375 default: 1376 error = 2; /* error, no retry */ 1377 break; 1378 } 1379 1380 /* 1381 * we can use two cookies per descriptor (i.e buffer1 and 1382 * buffer2) so we need at least (ncookies+1)/2 descriptors. 1383 */ 1384 if (((ncookies + 1) >> 1) > dnetp->free_desc) { 1385 (void) ddi_dma_unbind_handle(dnetp->dma_handle_tx); 1386 error = 1; 1387 break; 1388 } 1389 1390 /* setup the descriptors for this data buffer */ 1391 while (ncookies) { 1392 end_index = index; 1393 if (bufn % 2) { 1394 ring[index].buffer2 = 1395 (uint32_t)dma_cookie.dmac_address; 1396 ring[index].desc1.buffer_size2 = 1397 dma_cookie.dmac_size; 1398 index = NextTXIndex(index); /* goto next desc */ 1399 } else { 1400 /* initialize the descriptor */ 1401 ASSERT(ring[index].desc0.own == 0); 1402 *(uint32_t *)&ring[index].desc0 = 0; 1403 *(uint32_t *)&ring[index].desc1 &= 1404 DNET_END_OF_RING; 1405 ring[index].buffer1 = 1406 (uint32_t)dma_cookie.dmac_address; 1407 ring[index].desc1.buffer_size1 = 1408 dma_cookie.dmac_size; 1409 ring[index].buffer2 = (uint32_t)(0); 1410 dnetp->free_desc--; 1411 ASSERT(dnetp->free_desc >= 0); 1412 } 1413 totlen += dma_cookie.dmac_size; 1414 bufn++; 1415 if (--ncookies) 1416 ddi_dma_nextcookie(dnetp->dma_handle_tx, 1417 &dma_cookie); 1418 } 1419 (void) ddi_dma_unbind_handle(dnetp->dma_handle_tx); 1420 bp = bp->b_cont; 1421 } 1422 1423 if (error == 1) { 1424 dnetp->stat_defer++; 1425 dnetp->free_desc = avail; 1426 dnetp->need_tx_update = B_TRUE; 1427 return (B_FALSE); 1428 } else if (error) { 1429 dnetp->free_desc = avail; 1430 freemsg(mp); 1431 return (B_TRUE); /* Drop packet, don't retry */ 1432 } 1433 1434 if (totlen > ETHERMAX + VLAN_TAGSZ) { 1435 cmn_err(CE_WARN, "DNET: tried to send large %d packet", totlen); 1436 dnetp->free_desc = avail; 1437 freemsg(mp); 1438 return (B_TRUE); /* Don't repeat this attempt */ 1439 } 1440 1441 /* 1442 * Remeber the message buffer pointer to do freemsg() at xmit 1443 * interrupt time. 1444 */ 1445 dnetp->tx_msgbufp[end_index] = mp; 1446 1447 /* 1448 * Now set the first/last buffer and own bits 1449 * Since the 21040 looks for these bits set in the 1450 * first buffer, work backwards in multiple buffers. 1451 */ 1452 ring[end_index].desc1.last_desc = 1; 1453 ring[end_index].desc1.int_on_comp = 1; 1454 for (index = end_index; index != start_index; 1455 index = PrevTXIndex(index)) 1456 ring[index].desc0.own = 1; 1457 ring[start_index].desc1.first_desc = 1; 1458 ring[start_index].desc0.own = 1; 1459 1460 dnetp->tx_current_desc = NextTXIndex(end_index); 1461 1462 /* 1463 * Safety check: make sure end-of-ring is set in last desc. 1464 */ 1465 ASSERT(ring[dnetp->max_tx_desc-1].desc1.end_of_ring != 0); 1466 1467 return (B_TRUE); 1468 } 1469 1470 /* 1471 * dnet_intr() -- interrupt from board to inform us that a receive or 1472 * transmit has completed. 1473 */ 1474 static uint_t 1475 dnet_intr(caddr_t arg) 1476 { 1477 struct dnetinstance *dnetp = (struct dnetinstance *)arg; 1478 uint32_t int_status; 1479 1480 mutex_enter(&dnetp->intrlock); 1481 1482 if (dnetp->suspended) { 1483 mutex_exit(&dnetp->intrlock); 1484 return (DDI_INTR_UNCLAIMED); 1485 } 1486 1487 int_status = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, 1488 STATUS_REG)); 1489 1490 /* 1491 * If interrupt was not from this board 1492 */ 1493 if (!(int_status & (NORMAL_INTR_SUMM | ABNORMAL_INTR_SUMM))) { 1494 mutex_exit(&dnetp->intrlock); 1495 return (DDI_INTR_UNCLAIMED); 1496 } 1497 1498 dnetp->stat_intr++; 1499 1500 if (int_status & GPTIMER_INTR) { 1501 ddi_put32(dnetp->io_handle, 1502 REG32(dnetp->io_reg, STATUS_REG), GPTIMER_INTR); 1503 if (dnetp->timer.cb) 1504 dnetp->timer.cb(dnetp); 1505 else 1506 cmn_err(CE_WARN, "dnet: unhandled timer interrupt"); 1507 } 1508 1509 if (int_status & TX_INTR) { 1510 ddi_put32(dnetp->io_handle, 1511 REG32(dnetp->io_reg, STATUS_REG), TX_INTR); 1512 mutex_enter(&dnetp->txlock); 1513 if (dnetp->need_tx_update) { 1514 mutex_exit(&dnetp->txlock); 1515 mutex_exit(&dnetp->intrlock); 1516 mac_tx_update(dnetp->mac_handle); 1517 mutex_enter(&dnetp->intrlock); 1518 mutex_enter(&dnetp->txlock); 1519 dnetp->need_tx_update = B_FALSE; 1520 } 1521 /* reclaim any xmit descriptors that are completed */ 1522 dnet_reclaim_Tx_desc(dnetp); 1523 mutex_exit(&dnetp->txlock); 1524 } 1525 1526 /* 1527 * Check if receive interrupt bit is set 1528 */ 1529 if (int_status & (RX_INTR | RX_UNAVAIL_INTR)) { 1530 ddi_put32(dnetp->io_handle, 1531 REG32(dnetp->io_reg, STATUS_REG), 1532 int_status & (RX_INTR | RX_UNAVAIL_INTR)); 1533 dnet_getp(dnetp); 1534 } 1535 1536 if (int_status & ABNORMAL_INTR_SUMM) { 1537 /* 1538 * Check for system error 1539 */ 1540 if (int_status & SYS_ERR) { 1541 if ((int_status & SYS_ERR_BITS) == MASTER_ABORT) 1542 cmn_err(CE_WARN, "DNET: Bus Master Abort"); 1543 if ((int_status & SYS_ERR_BITS) == TARGET_ABORT) 1544 cmn_err(CE_WARN, "DNET: Bus Target Abort"); 1545 if ((int_status & SYS_ERR_BITS) == PARITY_ERROR) 1546 cmn_err(CE_WARN, "DNET: Parity error"); 1547 } 1548 1549 /* 1550 * If the jabber has timed out then reset the chip 1551 */ 1552 if (int_status & TX_JABBER_TIMEOUT) 1553 cmn_err(CE_WARN, "DNET: Jabber timeout."); 1554 1555 /* 1556 * If an underflow has occurred, reset the chip 1557 */ 1558 if (int_status & TX_UNDERFLOW) 1559 cmn_err(CE_WARN, "DNET: Tx Underflow."); 1560 1561 #ifdef DNETDEBUG 1562 if (dnetdebug & DNETINT) 1563 cmn_err(CE_NOTE, "Trying to reset..."); 1564 #endif 1565 dnet_reset_board(dnetp); 1566 dnet_init_board(dnetp); 1567 /* XXX function return value ignored */ 1568 (void) dnet_start(dnetp); 1569 } 1570 1571 /* 1572 * Enable the interrupts. Enable xmit interrupt in case we are 1573 * running out of free descriptors or if there are packets 1574 * in the queue waiting to be transmitted. 1575 */ 1576 enable_interrupts(dnetp); 1577 mutex_exit(&dnetp->intrlock); 1578 return (DDI_INTR_CLAIMED); /* Indicate it was our interrupt */ 1579 } 1580 1581 static void 1582 dnet_getp(struct dnetinstance *dnetp) 1583 { 1584 int packet_length, index; 1585 mblk_t *mp; 1586 caddr_t virtual_address; 1587 struct rx_desc_type *desc = dnetp->rx_desc; 1588 int marker = dnetp->rx_current_desc; 1589 int misses; 1590 1591 if (!dnetp->overrun_workaround) { 1592 /* 1593 * If the workaround is not in place, we must still update 1594 * the missed frame statistic from the on-chip counter. 1595 */ 1596 misses = ddi_get32(dnetp->io_handle, 1597 REG32(dnetp->io_reg, MISSED_FRAME_REG)); 1598 dnetp->stat_missed += (misses & MISSED_FRAME_MASK); 1599 } 1600 1601 /* While host owns the current descriptor */ 1602 while (!(desc[dnetp->rx_current_desc].desc0.own)) { 1603 struct free_ptr *frp; 1604 caddr_t newbuf; 1605 struct rbuf_list *rp; 1606 1607 index = dnetp->rx_current_desc; 1608 ASSERT(desc[index].desc0.first_desc != 0); 1609 1610 /* 1611 * DMA overrun errata from DEC: avoid possible bus hangs 1612 * and data corruption 1613 */ 1614 if (dnetp->overrun_workaround && 1615 marker == dnetp->rx_current_desc) { 1616 int opn; 1617 do { 1618 marker = (marker+1) % dnetp->max_rx_desc; 1619 } while (!(dnetp->rx_desc[marker].desc0.own) && 1620 marker != index); 1621 1622 misses = ddi_get32(dnetp->io_handle, 1623 REG32(dnetp->io_reg, MISSED_FRAME_REG)); 1624 dnetp->stat_missed += 1625 (misses & MISSED_FRAME_MASK); 1626 if (misses & OVERFLOW_COUNTER_MASK) { 1627 /* 1628 * Overflow(s) have occurred : stop receiver, 1629 * and wait until in stopped state 1630 */ 1631 opn = ddi_get32(dnetp->io_handle, 1632 REG32(dnetp->io_reg, OPN_MODE_REG)); 1633 ddi_put32(dnetp->io_handle, 1634 REG32(dnetp->io_reg, OPN_MODE_REG), 1635 opn & ~(START_RECEIVE)); 1636 1637 do { 1638 drv_usecwait(10); 1639 } while ((ddi_get32(dnetp->io_handle, 1640 REG32(dnetp->io_reg, STATUS_REG)) & 1641 RECEIVE_PROCESS_STATE) != 0); 1642 #ifdef DNETDEBUG 1643 if (dnetdebug & DNETRECV) 1644 cmn_err(CE_CONT, "^*"); 1645 #endif 1646 /* Discard probably corrupt frames */ 1647 while (!(dnetp->rx_desc[index].desc0.own)) { 1648 dnetp->rx_desc[index].desc0.own = 1; 1649 index = (index+1) % dnetp->max_rx_desc; 1650 dnetp->stat_missed++; 1651 } 1652 1653 /* restart the receiver */ 1654 opn = ddi_get32(dnetp->io_handle, 1655 REG32(dnetp->io_reg, OPN_MODE_REG)); 1656 ddi_put32(dnetp->io_handle, 1657 REG32(dnetp->io_reg, OPN_MODE_REG), 1658 opn | START_RECEIVE); 1659 marker = dnetp->rx_current_desc = index; 1660 continue; 1661 } 1662 /* 1663 * At this point, we know that all packets before 1664 * "marker" were received before a dma overrun occurred 1665 */ 1666 } 1667 1668 /* 1669 * If we get an oversized packet it could span multiple 1670 * descriptors. If this happens an error bit should be set. 1671 */ 1672 while (desc[index].desc0.last_desc == 0) { 1673 index = (index + 1) % dnetp->max_rx_desc; 1674 if (desc[index].desc0.own) 1675 return; /* not done receiving large packet */ 1676 } 1677 while (dnetp->rx_current_desc != index) { 1678 desc[dnetp->rx_current_desc].desc0.own = 1; 1679 dnetp->rx_current_desc = 1680 (dnetp->rx_current_desc + 1) % dnetp->max_rx_desc; 1681 #ifdef DNETDEBUG 1682 if (dnetdebug & DNETRECV) 1683 cmn_err(CE_WARN, "dnet: received large packet"); 1684 #endif 1685 } 1686 1687 packet_length = desc[index].desc0.frame_len; 1688 1689 /* 1690 * Remove CRC from received data. This is an artefact of the 1691 * 21x4x chip and should not be passed higher up the network 1692 * stack. 1693 */ 1694 packet_length -= ETHERFCSL; 1695 1696 /* get the virtual address of the packet received */ 1697 virtual_address = 1698 dnetp->rx_buf_vaddr[index]; 1699 1700 /* 1701 * If no packet errors then do: 1702 * 1. Allocate a new receive buffer so that we can 1703 * use the current buffer as streams buffer to 1704 * avoid bcopy. 1705 * 2. If we got a new receive buffer then allocate 1706 * an mblk using desballoc(). 1707 * 3. Otherwise use the mblk from allocb() and do 1708 * the bcopy. 1709 */ 1710 frp = NULL; 1711 rp = NULL; 1712 newbuf = NULL; 1713 mp = NULL; 1714 if (!desc[index].desc0.err_summary || 1715 (desc[index].desc0.frame2long && 1716 packet_length < rx_buf_size)) { 1717 ASSERT(packet_length < rx_buf_size); 1718 /* 1719 * Allocate another receive buffer for this descriptor. 1720 * If we fail to allocate then we do the normal bcopy. 1721 */ 1722 rp = dnet_rbuf_alloc(dnetp->devinfo, 0); 1723 if (rp != NULL) { 1724 newbuf = rp->rbuf_vaddr; 1725 frp = kmem_zalloc(sizeof (*frp), KM_NOSLEEP); 1726 if (frp != NULL) { 1727 frp->free_rtn.free_func = 1728 dnet_freemsg_buf; 1729 frp->free_rtn.free_arg = (char *)frp; 1730 frp->buf = virtual_address; 1731 mp = desballoc( 1732 (uchar_t *)virtual_address, 1733 packet_length, 0, &frp->free_rtn); 1734 if (mp == NULL) { 1735 kmem_free(frp, sizeof (*frp)); 1736 dnet_rbuf_free((caddr_t)newbuf); 1737 frp = NULL; 1738 newbuf = NULL; 1739 } 1740 } 1741 } 1742 if (mp == NULL) { 1743 if (newbuf != NULL) 1744 dnet_rbuf_free((caddr_t)newbuf); 1745 mp = allocb(packet_length, 0); 1746 } 1747 } 1748 1749 if ((desc[index].desc0.err_summary && 1750 packet_length >= rx_buf_size) || mp == NULL) { 1751 1752 /* Update gld statistics */ 1753 if (desc[index].desc0.err_summary) 1754 update_rx_stats(dnetp, index); 1755 else 1756 dnetp->stat_norcvbuf++; 1757 1758 /* 1759 * Reset ownership of the descriptor. 1760 */ 1761 desc[index].desc0.own = 1; 1762 dnetp->rx_current_desc = 1763 (dnetp->rx_current_desc+1) % dnetp->max_rx_desc; 1764 1765 /* Demand receive polling by the chip */ 1766 ddi_put32(dnetp->io_handle, 1767 REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND); 1768 1769 continue; 1770 } 1771 1772 if (newbuf != NULL) { 1773 uint32_t end_paddr; 1774 /* attach the new buffer to the rx descriptor */ 1775 dnetp->rx_buf_vaddr[index] = newbuf; 1776 dnetp->rx_buf_paddr[index] = rp->rbuf_paddr; 1777 desc[index].buffer1 = rp->rbuf_paddr; 1778 desc[index].desc1.buffer_size1 = rx_buf_size; 1779 desc[index].desc1.buffer_size2 = 0; 1780 end_paddr = rp->rbuf_endpaddr; 1781 if ((desc[index].buffer1 & ~dnetp->pgmask) != 1782 (end_paddr & ~dnetp->pgmask)) { 1783 /* discontiguous */ 1784 desc[index].buffer2 = end_paddr&~dnetp->pgmask; 1785 desc[index].desc1.buffer_size2 = 1786 (end_paddr & dnetp->pgmask) + 1; 1787 desc[index].desc1.buffer_size1 = 1788 rx_buf_size-desc[index].desc1.buffer_size2; 1789 } 1790 } else { 1791 /* couldn't allocate another buffer; copy the data */ 1792 BCOPY((caddr_t)virtual_address, (caddr_t)mp->b_wptr, 1793 packet_length); 1794 } 1795 1796 mp->b_wptr += packet_length; 1797 1798 desc[dnetp->rx_current_desc].desc0.own = 1; 1799 1800 /* 1801 * Increment receive desc index. This is for the scan of 1802 * next packet 1803 */ 1804 dnetp->rx_current_desc = 1805 (dnetp->rx_current_desc+1) % dnetp->max_rx_desc; 1806 1807 /* Demand polling by chip */ 1808 ddi_put32(dnetp->io_handle, 1809 REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND); 1810 1811 /* send the packet upstream */ 1812 mutex_exit(&dnetp->intrlock); 1813 mac_rx(dnetp->mac_handle, NULL, mp); 1814 mutex_enter(&dnetp->intrlock); 1815 } 1816 } 1817 /* 1818 * Function to update receive statistics 1819 */ 1820 static void 1821 update_rx_stats(struct dnetinstance *dnetp, int index) 1822 { 1823 struct rx_desc_type *descp = &(dnetp->rx_desc[index]); 1824 1825 /* 1826 * Update gld statistics 1827 */ 1828 dnetp->stat_errrcv++; 1829 1830 if (descp->desc0.overflow) { 1831 /* FIFO Overrun */ 1832 dnetp->stat_overflow++; 1833 } 1834 1835 if (descp->desc0.collision) { 1836 /*EMPTY*/ 1837 /* Late Colllision on receive */ 1838 /* no appropriate counter */ 1839 } 1840 1841 if (descp->desc0.crc) { 1842 /* CRC Error */ 1843 dnetp->stat_crc++; 1844 } 1845 1846 if (descp->desc0.runt_frame) { 1847 /* Runt Error */ 1848 dnetp->stat_short++; 1849 } 1850 1851 if (descp->desc0.desc_err) { 1852 /*EMPTY*/ 1853 /* Not enough receive descriptors */ 1854 /* This condition is accounted in dnet_intr() */ 1855 } 1856 1857 if (descp->desc0.frame2long) { 1858 dnetp->stat_frame++; 1859 } 1860 } 1861 1862 /* 1863 * Function to update transmit statistics 1864 */ 1865 static void 1866 update_tx_stats(struct dnetinstance *dnetp, int index) 1867 { 1868 struct tx_desc_type *descp = &(dnetp->tx_desc[index]); 1869 int fd; 1870 media_block_t *block = dnetp->selected_media_block; 1871 1872 1873 /* Update gld statistics */ 1874 dnetp->stat_errxmt++; 1875 1876 /* If we're in full-duplex don't count collisions or carrier loss. */ 1877 if (dnetp->mii_up) { 1878 fd = dnetp->mii_duplex; 1879 } else { 1880 /* Rely on media code */ 1881 fd = block->media_code == MEDIA_TP_FD || 1882 block->media_code == MEDIA_SYM_SCR_FD; 1883 } 1884 1885 if (descp->desc0.collision_count && !fd) { 1886 dnetp->stat_collisions += descp->desc0.collision_count; 1887 } 1888 1889 if (descp->desc0.late_collision && !fd) { 1890 dnetp->stat_xmtlatecoll++; 1891 } 1892 1893 if (descp->desc0.excess_collision && !fd) { 1894 dnetp->stat_excoll++; 1895 } 1896 1897 if (descp->desc0.underflow) { 1898 dnetp->stat_underflow++; 1899 } 1900 1901 #if 0 1902 if (descp->desc0.tx_jabber_to) { 1903 /* no appropriate counter */ 1904 } 1905 #endif 1906 1907 if (descp->desc0.carrier_loss && !fd) { 1908 dnetp->stat_nocarrier++; 1909 } 1910 1911 if (descp->desc0.no_carrier && !fd) { 1912 dnetp->stat_nocarrier++; 1913 } 1914 } 1915 1916 /* 1917 * ========== Media Selection Setup Routines ========== 1918 */ 1919 1920 1921 static void 1922 write_gpr(struct dnetinstance *dnetp, uint32_t val) 1923 { 1924 #ifdef DEBUG 1925 if (dnetdebug & DNETREGCFG) 1926 cmn_err(CE_NOTE, "GPR: %x", val); 1927 #endif 1928 switch (dnetp->board_type) { 1929 case DEVICE_ID_21143: 1930 /* Set the correct bit for a control write */ 1931 if (val & GPR_CONTROL_WRITE) 1932 val |= CWE_21143, val &= ~GPR_CONTROL_WRITE; 1933 /* Write to upper half of CSR15 */ 1934 dnetp->gprsia = (dnetp->gprsia & 0xffff) | (val << 16); 1935 ddi_put32(dnetp->io_handle, 1936 REG32(dnetp->io_reg, SIA_GENERAL_REG), dnetp->gprsia); 1937 break; 1938 default: 1939 /* Set the correct bit for a control write */ 1940 if (val & GPR_CONTROL_WRITE) 1941 val |= CWE_21140, val &= ~GPR_CONTROL_WRITE; 1942 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_REG), val); 1943 break; 1944 } 1945 } 1946 1947 static uint32_t 1948 read_gpr(struct dnetinstance *dnetp) 1949 { 1950 switch (dnetp->board_type) { 1951 case DEVICE_ID_21143: 1952 /* Read upper half of CSR15 */ 1953 return (ddi_get32(dnetp->io_handle, 1954 REG32(dnetp->io_reg, SIA_GENERAL_REG)) >> 16); 1955 default: 1956 return (ddi_get32(dnetp->io_handle, 1957 REG32(dnetp->io_reg, GP_REG))); 1958 } 1959 } 1960 1961 static void 1962 set_gpr(struct dnetinstance *dnetp) 1963 { 1964 uint32_t *sequence; 1965 int len; 1966 LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf]; 1967 media_block_t *block = dnetp->selected_media_block; 1968 int i; 1969 1970 if (ddi_getlongprop(DDI_DEV_T_ANY, dnetp->devinfo, 1971 DDI_PROP_DONTPASS, "gpr-sequence", (caddr_t)&sequence, 1972 &len) == DDI_PROP_SUCCESS) { 1973 for (i = 0; i < len / sizeof (uint32_t); i++) 1974 write_gpr(dnetp, sequence[i]); 1975 kmem_free(sequence, len); 1976 } else { 1977 /* 1978 * Write the reset sequence if this is the first time this 1979 * block has been selected. 1980 */ 1981 if (block->rstseqlen) { 1982 for (i = 0; i < block->rstseqlen; i++) 1983 write_gpr(dnetp, block->rstseq[i]); 1984 /* 1985 * XXX Legacy blocks do not have reset sequences, so the 1986 * static blocks will never be modified by this 1987 */ 1988 block->rstseqlen = 0; 1989 } 1990 if (leaf->gpr) 1991 write_gpr(dnetp, leaf->gpr | GPR_CONTROL_WRITE); 1992 1993 /* write GPR sequence each time */ 1994 for (i = 0; i < block->gprseqlen; i++) 1995 write_gpr(dnetp, block->gprseq[i]); 1996 } 1997 1998 /* This has possibly caused a PHY to reset. Let MII know */ 1999 if (dnetp->phyaddr != -1) 2000 /* XXX function return value ignored */ 2001 (void) mii_sync(dnetp->mii, dnetp->phyaddr); 2002 drv_usecwait(5); 2003 } 2004 2005 /* set_opr() - must be called with intrlock held */ 2006 2007 static void 2008 set_opr(struct dnetinstance *dnetp) 2009 { 2010 uint32_t fd, mb1, sf; 2011 2012 int opnmode_len; 2013 uint32_t val; 2014 media_block_t *block = dnetp->selected_media_block; 2015 2016 ASSERT(block); 2017 2018 /* Check for custom "opnmode_reg" property */ 2019 opnmode_len = sizeof (val); 2020 if (ddi_prop_op(DDI_DEV_T_ANY, dnetp->devinfo, 2021 PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, "opnmode_reg", 2022 (caddr_t)&val, &opnmode_len) != DDI_PROP_SUCCESS) 2023 opnmode_len = 0; 2024 2025 /* Some bits exist only on 21140 and greater */ 2026 if (dnetp->board_type != DEVICE_ID_21040 && 2027 dnetp->board_type != DEVICE_ID_21041) { 2028 mb1 = OPN_REG_MB1; 2029 sf = STORE_AND_FORWARD; 2030 } else { 2031 mb1 = sf = 0; 2032 mb1 = OPN_REG_MB1; /* Needed for 21040? */ 2033 } 2034 2035 if (opnmode_len) { 2036 ddi_put32(dnetp->io_handle, 2037 REG32(dnetp->io_reg, OPN_MODE_REG), val); 2038 dnet_reset_board(dnetp); 2039 ddi_put32(dnetp->io_handle, 2040 REG32(dnetp->io_reg, OPN_MODE_REG), val); 2041 return; 2042 } 2043 2044 /* 2045 * Set each bit in CSR6 that we want 2046 */ 2047 2048 /* Always want these bits set */ 2049 val = HASH_FILTERING | HASH_ONLY | TX_THRESHOLD_160 | mb1 | sf; 2050 2051 /* Promiscuous mode */ 2052 val |= dnetp->promisc ? PROM_MODE : 0; 2053 2054 /* Scrambler for SYM style media */ 2055 val |= ((block->command & CMD_SCR) && !dnetp->disable_scrambler) ? 2056 SCRAMBLER_MODE : 0; 2057 2058 /* Full duplex */ 2059 if (dnetp->mii_up) { 2060 fd = dnetp->mii_duplex; 2061 } else { 2062 /* Rely on media code */ 2063 fd = block->media_code == MEDIA_TP_FD || 2064 block->media_code == MEDIA_SYM_SCR_FD; 2065 } 2066 2067 /* Port select (and therefore, heartbeat disable) */ 2068 val |= block->command & CMD_PS ? (PORT_SELECT | HEARTBEAT_DISABLE) : 0; 2069 2070 /* PCS function */ 2071 val |= (block->command) & CMD_PCS ? PCS_FUNCTION : 0; 2072 val |= fd ? FULL_DUPLEX : 0; 2073 2074 #ifdef DNETDEBUG 2075 if (dnetdebug & DNETREGCFG) 2076 cmn_err(CE_NOTE, "OPN: %x", val); 2077 #endif 2078 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val); 2079 dnet_reset_board(dnetp); 2080 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val); 2081 } 2082 2083 static void 2084 set_sia(struct dnetinstance *dnetp) 2085 { 2086 media_block_t *block = dnetp->selected_media_block; 2087 2088 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 2089 if (block->type == 2) { 2090 int sia_delay; 2091 #ifdef DNETDEBUG 2092 if (dnetdebug & DNETREGCFG) 2093 cmn_err(CE_NOTE, 2094 "SIA: CSR13: %x, CSR14: %x, CSR15: %x", 2095 block->un.sia.csr13, 2096 block->un.sia.csr14, 2097 block->un.sia.csr15); 2098 #endif 2099 sia_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 2100 DDI_PROP_DONTPASS, "sia-delay", 10000); 2101 2102 ddi_put32(dnetp->io_handle, 2103 REG32(dnetp->io_reg, SIA_CONNECT_REG), 0); 2104 2105 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, SIA_TXRX_REG), 2106 block->un.sia.csr14); 2107 2108 /* 2109 * For '143, we need to write through a copy of the register 2110 * to keep the GP half intact 2111 */ 2112 dnetp->gprsia = (dnetp->gprsia&0xffff0000)|block->un.sia.csr15; 2113 ddi_put32(dnetp->io_handle, 2114 REG32(dnetp->io_reg, SIA_GENERAL_REG), 2115 dnetp->gprsia); 2116 2117 ddi_put32(dnetp->io_handle, 2118 REG32(dnetp->io_reg, SIA_CONNECT_REG), 2119 block->un.sia.csr13); 2120 2121 drv_usecwait(sia_delay); 2122 2123 } else if (dnetp->board_type != DEVICE_ID_21140) { 2124 ddi_put32(dnetp->io_handle, 2125 REG32(dnetp->io_reg, SIA_CONNECT_REG), 0); 2126 ddi_put32(dnetp->io_handle, 2127 REG32(dnetp->io_reg, SIA_TXRX_REG), 0); 2128 } 2129 } 2130 2131 /* 2132 * This function (re)allocates the receive and transmit buffers and 2133 * descriptors. It can be called more than once per instance, though 2134 * currently it is only called from attach. It should only be called 2135 * while the device is reset. 2136 */ 2137 static int 2138 dnet_alloc_bufs(struct dnetinstance *dnetp) 2139 { 2140 int i; 2141 size_t len; 2142 int page_size; 2143 int realloc = 0; 2144 int nrecv_desc_old = 0; 2145 ddi_dma_cookie_t cookie; 2146 uint_t ncookies; 2147 2148 /* 2149 * check if we are trying to reallocate with different xmit/recv 2150 * descriptor ring sizes. 2151 */ 2152 if ((dnetp->tx_desc != NULL) && 2153 (dnetp->nxmit_desc != dnetp->max_tx_desc)) 2154 realloc = 1; 2155 2156 if ((dnetp->rx_desc != NULL) && 2157 (dnetp->nrecv_desc != dnetp->max_rx_desc)) 2158 realloc = 1; 2159 2160 /* free up the old buffers if we are reallocating them */ 2161 if (realloc) { 2162 nrecv_desc_old = dnetp->nrecv_desc; 2163 dnet_free_bufs(dnetp); /* free the old buffers */ 2164 } 2165 2166 if (dnetp->dma_handle == NULL) 2167 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr, 2168 DDI_DMA_SLEEP, 0, &dnetp->dma_handle) != DDI_SUCCESS) 2169 return (FAILURE); 2170 2171 if (dnetp->dma_handle_tx == NULL) 2172 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr_tx, 2173 DDI_DMA_SLEEP, 0, &dnetp->dma_handle_tx) != DDI_SUCCESS) 2174 return (FAILURE); 2175 2176 if (dnetp->dma_handle_txdesc == NULL) 2177 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr, 2178 DDI_DMA_SLEEP, 0, &dnetp->dma_handle_txdesc) != DDI_SUCCESS) 2179 return (FAILURE); 2180 2181 if (dnetp->dma_handle_setbuf == NULL) 2182 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr, 2183 DDI_DMA_SLEEP, 0, &dnetp->dma_handle_setbuf) != DDI_SUCCESS) 2184 return (FAILURE); 2185 2186 page_size = ddi_ptob(dnetp->devinfo, 1); 2187 2188 dnetp->pgmask = page_size - 1; 2189 2190 /* allocate setup buffer if necessary */ 2191 if (dnetp->setup_buf_vaddr == NULL) { 2192 if (ddi_dma_mem_alloc(dnetp->dma_handle_setbuf, 2193 SETUPBUF_SIZE, &accattr, DDI_DMA_STREAMING, 2194 DDI_DMA_DONTWAIT, 0, (caddr_t *)&dnetp->setup_buf_vaddr, 2195 &len, &dnetp->setup_buf_acchdl) != DDI_SUCCESS) 2196 return (FAILURE); 2197 2198 if (ddi_dma_addr_bind_handle(dnetp->dma_handle_setbuf, 2199 NULL, dnetp->setup_buf_vaddr, SETUPBUF_SIZE, 2200 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 2201 NULL, &cookie, &ncookies) != DDI_DMA_MAPPED) 2202 return (FAILURE); 2203 2204 dnetp->setup_buf_paddr = cookie.dmac_address; 2205 bzero(dnetp->setup_buf_vaddr, len); 2206 } 2207 2208 /* allocate xmit descriptor array of size dnetp->max_tx_desc */ 2209 if (dnetp->tx_desc == NULL) { 2210 if (ddi_dma_mem_alloc(dnetp->dma_handle_txdesc, 2211 sizeof (struct tx_desc_type) * dnetp->max_tx_desc, 2212 &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2213 (caddr_t *)&dnetp->tx_desc, &len, 2214 &dnetp->tx_desc_acchdl) != DDI_SUCCESS) 2215 return (FAILURE); 2216 2217 if (ddi_dma_addr_bind_handle(dnetp->dma_handle_txdesc, 2218 NULL, (caddr_t)dnetp->tx_desc, 2219 sizeof (struct tx_desc_type) * dnetp->max_tx_desc, 2220 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 2221 NULL, &cookie, &ncookies) != DDI_DMA_MAPPED) 2222 return (FAILURE); 2223 dnetp->tx_desc_paddr = cookie.dmac_address; 2224 bzero(dnetp->tx_desc, len); 2225 dnetp->nxmit_desc = dnetp->max_tx_desc; 2226 2227 dnetp->tx_msgbufp = 2228 kmem_zalloc(dnetp->max_tx_desc * sizeof (mblk_t **), 2229 KM_SLEEP); 2230 } 2231 2232 /* allocate receive descriptor array of size dnetp->max_rx_desc */ 2233 if (dnetp->rx_desc == NULL) { 2234 int ndesc; 2235 2236 if (ddi_dma_mem_alloc(dnetp->dma_handle, 2237 sizeof (struct rx_desc_type) * dnetp->max_rx_desc, 2238 &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2239 (caddr_t *)&dnetp->rx_desc, &len, 2240 &dnetp->rx_desc_acchdl) != DDI_SUCCESS) 2241 return (FAILURE); 2242 2243 if (ddi_dma_addr_bind_handle(dnetp->dma_handle, 2244 NULL, (caddr_t)dnetp->rx_desc, 2245 sizeof (struct rx_desc_type) * dnetp->max_rx_desc, 2246 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 2247 NULL, &cookie, &ncookies) != DDI_DMA_MAPPED) 2248 return (FAILURE); 2249 2250 dnetp->rx_desc_paddr = cookie.dmac_address; 2251 bzero(dnetp->rx_desc, len); 2252 dnetp->nrecv_desc = dnetp->max_rx_desc; 2253 2254 dnetp->rx_buf_vaddr = 2255 kmem_zalloc(dnetp->max_rx_desc * sizeof (caddr_t), 2256 KM_SLEEP); 2257 dnetp->rx_buf_paddr = 2258 kmem_zalloc(dnetp->max_rx_desc * sizeof (uint32_t), 2259 KM_SLEEP); 2260 /* 2261 * Allocate or add to the pool of receive buffers. The pool 2262 * is shared among all instances of dnet. 2263 * 2264 * XXX NEEDSWORK 2265 * 2266 * We arbitrarily allocate twice as many receive buffers as 2267 * receive descriptors because we use the buffers for streams 2268 * messages to pass the packets up the stream. We should 2269 * instead have initialized constants reflecting 2270 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also 2271 * probably have a total maximum for the free pool, so that we 2272 * don't get out of hand when someone puts in an 8-port board. 2273 * The maximum for the entire pool should be the total number 2274 * of descriptors for all attached instances together, plus the 2275 * total maximum for the free pool. This maximum would only be 2276 * reached after some number of instances allocate buffers: 2277 * each instance would add (max_rx_buf-max_rx_desc) to the free 2278 * pool. 2279 */ 2280 ndesc = dnetp->max_rx_desc - nrecv_desc_old; 2281 if ((ndesc > 0) && 2282 (dnet_rbuf_init(dnetp->devinfo, ndesc * 2) != 0)) 2283 return (FAILURE); 2284 2285 for (i = 0; i < dnetp->max_rx_desc; i++) { 2286 struct rbuf_list *rp; 2287 2288 rp = dnet_rbuf_alloc(dnetp->devinfo, 1); 2289 if (rp == NULL) 2290 return (FAILURE); 2291 dnetp->rx_buf_vaddr[i] = rp->rbuf_vaddr; 2292 dnetp->rx_buf_paddr[i] = rp->rbuf_paddr; 2293 } 2294 } 2295 2296 return (SUCCESS); 2297 } 2298 /* 2299 * free descriptors/buffers allocated for this device instance. This routine 2300 * should only be called while the device is reset. 2301 */ 2302 static void 2303 dnet_free_bufs(struct dnetinstance *dnetp) 2304 { 2305 int i; 2306 /* free up any xmit descriptors/buffers */ 2307 if (dnetp->tx_desc != NULL) { 2308 ddi_dma_mem_free(&dnetp->tx_desc_acchdl); 2309 dnetp->tx_desc = NULL; 2310 /* we use streams buffers for DMA in xmit process */ 2311 if (dnetp->tx_msgbufp != NULL) { 2312 /* free up any streams message buffers unclaimed */ 2313 for (i = 0; i < dnetp->nxmit_desc; i++) { 2314 if (dnetp->tx_msgbufp[i] != NULL) { 2315 freemsg(dnetp->tx_msgbufp[i]); 2316 } 2317 } 2318 kmem_free(dnetp->tx_msgbufp, 2319 dnetp->nxmit_desc * sizeof (mblk_t **)); 2320 dnetp->tx_msgbufp = NULL; 2321 } 2322 dnetp->nxmit_desc = 0; 2323 } 2324 2325 /* free up any receive descriptors/buffers */ 2326 if (dnetp->rx_desc != NULL) { 2327 ddi_dma_mem_free(&dnetp->rx_desc_acchdl); 2328 dnetp->rx_desc = NULL; 2329 if (dnetp->rx_buf_vaddr != NULL) { 2330 /* free up the attached rbufs if any */ 2331 for (i = 0; i < dnetp->nrecv_desc; i++) { 2332 if (dnetp->rx_buf_vaddr[i]) 2333 dnet_rbuf_free( 2334 (caddr_t)dnetp->rx_buf_vaddr[i]); 2335 } 2336 kmem_free(dnetp->rx_buf_vaddr, 2337 dnetp->nrecv_desc * sizeof (caddr_t)); 2338 kmem_free(dnetp->rx_buf_paddr, 2339 dnetp->nrecv_desc * sizeof (uint32_t)); 2340 dnetp->rx_buf_vaddr = NULL; 2341 dnetp->rx_buf_paddr = NULL; 2342 } 2343 dnetp->nrecv_desc = 0; 2344 } 2345 2346 if (dnetp->setup_buf_vaddr != NULL) { 2347 ddi_dma_mem_free(&dnetp->setup_buf_acchdl); 2348 dnetp->setup_buf_vaddr = NULL; 2349 } 2350 2351 if (dnetp->dma_handle != NULL) { 2352 (void) ddi_dma_unbind_handle(dnetp->dma_handle); 2353 ddi_dma_free_handle(&dnetp->dma_handle); 2354 dnetp->dma_handle = NULL; 2355 } 2356 2357 if (dnetp->dma_handle_tx != NULL) { 2358 (void) ddi_dma_unbind_handle(dnetp->dma_handle_tx); 2359 ddi_dma_free_handle(&dnetp->dma_handle_tx); 2360 dnetp->dma_handle_tx = NULL; 2361 } 2362 2363 if (dnetp->dma_handle_txdesc != NULL) { 2364 (void) ddi_dma_unbind_handle(dnetp->dma_handle_txdesc); 2365 ddi_dma_free_handle(&dnetp->dma_handle_txdesc); 2366 dnetp->dma_handle_txdesc = NULL; 2367 } 2368 2369 if (dnetp->dma_handle_setbuf != NULL) { 2370 (void) ddi_dma_unbind_handle(dnetp->dma_handle_setbuf); 2371 ddi_dma_free_handle(&dnetp->dma_handle_setbuf); 2372 dnetp->dma_handle_setbuf = NULL; 2373 } 2374 2375 } 2376 2377 /* 2378 * Initialize transmit and receive descriptors. 2379 */ 2380 static void 2381 dnet_init_txrx_bufs(struct dnetinstance *dnetp) 2382 { 2383 int i; 2384 2385 /* 2386 * Initilize all the Tx descriptors 2387 */ 2388 for (i = 0; i < dnetp->nxmit_desc; i++) { 2389 /* 2390 * We may be resetting the device due to errors, 2391 * so free up any streams message buffer unclaimed. 2392 */ 2393 if (dnetp->tx_msgbufp[i] != NULL) { 2394 freemsg(dnetp->tx_msgbufp[i]); 2395 dnetp->tx_msgbufp[i] = NULL; 2396 } 2397 *(uint32_t *)&dnetp->tx_desc[i].desc0 = 0; 2398 *(uint32_t *)&dnetp->tx_desc[i].desc1 = 0; 2399 dnetp->tx_desc[i].buffer1 = 0; 2400 dnetp->tx_desc[i].buffer2 = 0; 2401 } 2402 dnetp->tx_desc[i - 1].desc1.end_of_ring = 1; 2403 2404 /* 2405 * Initialize the Rx descriptors 2406 */ 2407 for (i = 0; i < dnetp->nrecv_desc; i++) { 2408 uint32_t end_paddr; 2409 *(uint32_t *)&dnetp->rx_desc[i].desc0 = 0; 2410 *(uint32_t *)&dnetp->rx_desc[i].desc1 = 0; 2411 dnetp->rx_desc[i].desc0.own = 1; 2412 dnetp->rx_desc[i].desc1.buffer_size1 = rx_buf_size; 2413 dnetp->rx_desc[i].buffer1 = dnetp->rx_buf_paddr[i]; 2414 dnetp->rx_desc[i].buffer2 = 0; 2415 end_paddr = dnetp->rx_buf_paddr[i]+rx_buf_size-1; 2416 2417 if ((dnetp->rx_desc[i].buffer1 & ~dnetp->pgmask) != 2418 (end_paddr & ~dnetp->pgmask)) { 2419 /* discontiguous */ 2420 dnetp->rx_desc[i].buffer2 = end_paddr&~dnetp->pgmask; 2421 dnetp->rx_desc[i].desc1.buffer_size2 = 2422 (end_paddr & dnetp->pgmask) + 1; 2423 dnetp->rx_desc[i].desc1.buffer_size1 = 2424 rx_buf_size-dnetp->rx_desc[i].desc1.buffer_size2; 2425 } 2426 } 2427 dnetp->rx_desc[i - 1].desc1.end_of_ring = 1; 2428 } 2429 2430 static int 2431 alloc_descriptor(struct dnetinstance *dnetp) 2432 { 2433 int index; 2434 struct tx_desc_type *ring = dnetp->tx_desc; 2435 2436 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 2437 alloctop: 2438 mutex_enter(&dnetp->txlock); 2439 index = dnetp->tx_current_desc; 2440 2441 dnet_reclaim_Tx_desc(dnetp); 2442 2443 /* we do have free descriptors, right? */ 2444 if (dnetp->free_desc <= 0) { 2445 #ifdef DNETDEBUG 2446 if (dnetdebug & DNETRECV) 2447 cmn_err(CE_NOTE, "dnet: Ring buffer is full"); 2448 #endif 2449 mutex_exit(&dnetp->txlock); 2450 return (FAILURE); 2451 } 2452 2453 /* sanity, make sure the next descriptor is free for use (should be) */ 2454 if (ring[index].desc0.own) { 2455 #ifdef DNETDEBUG 2456 if (dnetdebug & DNETRECV) 2457 cmn_err(CE_WARN, 2458 "dnet: next descriptor is not free for use"); 2459 #endif 2460 mutex_exit(&dnetp->txlock); 2461 return (FAILURE); 2462 } 2463 if (dnetp->need_saddr) { 2464 mutex_exit(&dnetp->txlock); 2465 /* XXX function return value ignored */ 2466 if (!dnetp->suspended) 2467 (void) dnet_set_addr(dnetp); 2468 goto alloctop; 2469 } 2470 2471 *(uint32_t *)&ring[index].desc0 = 0; /* init descs */ 2472 *(uint32_t *)&ring[index].desc1 &= DNET_END_OF_RING; 2473 2474 /* hardware will own this descriptor when poll activated */ 2475 dnetp->free_desc--; 2476 2477 /* point to next free descriptor to be used */ 2478 dnetp->tx_current_desc = NextTXIndex(index); 2479 2480 #ifdef DNET_NOISY 2481 cmn_err(CE_WARN, "sfree 0x%x, transmitted 0x%x, tx_current 0x%x", 2482 dnetp->free_desc, dnetp->transmitted_desc, dnetp->tx_current_desc); 2483 #endif 2484 mutex_exit(&dnetp->txlock); 2485 return (SUCCESS); 2486 } 2487 2488 /* 2489 * dnet_reclaim_Tx_desc() - called with txlock held. 2490 */ 2491 static void 2492 dnet_reclaim_Tx_desc(struct dnetinstance *dnetp) 2493 { 2494 struct tx_desc_type *desc = dnetp->tx_desc; 2495 int index; 2496 2497 ASSERT(MUTEX_HELD(&dnetp->txlock)); 2498 2499 index = dnetp->transmitted_desc; 2500 while (((dnetp->free_desc == 0) || (index != dnetp->tx_current_desc)) && 2501 !(desc[index].desc0.own)) { 2502 /* 2503 * Check for Tx Error that gets set 2504 * in the last desc. 2505 */ 2506 if (desc[index].desc1.setup_packet == 0 && 2507 desc[index].desc1.last_desc && 2508 desc[index].desc0.err_summary) 2509 update_tx_stats(dnetp, index); 2510 2511 /* 2512 * If we have used the streams message buffer for this 2513 * descriptor then free up the message now. 2514 */ 2515 if (dnetp->tx_msgbufp[index] != NULL) { 2516 freemsg(dnetp->tx_msgbufp[index]); 2517 dnetp->tx_msgbufp[index] = NULL; 2518 } 2519 dnetp->free_desc++; 2520 index = (index+1) % dnetp->max_tx_desc; 2521 } 2522 2523 dnetp->transmitted_desc = index; 2524 } 2525 2526 /* 2527 * Receive buffer allocation/freeing routines. 2528 * 2529 * There is a common pool of receive buffers shared by all dnet instances. 2530 * 2531 * XXX NEEDSWORK 2532 * 2533 * We arbitrarily allocate twice as many receive buffers as 2534 * receive descriptors because we use the buffers for streams 2535 * messages to pass the packets up the stream. We should 2536 * instead have initialized constants reflecting 2537 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also 2538 * probably have a total maximum for the free pool, so that we 2539 * don't get out of hand when someone puts in an 8-port board. 2540 * The maximum for the entire pool should be the total number 2541 * of descriptors for all attached instances together, plus the 2542 * total maximum for the free pool. This maximum would only be 2543 * reached after some number of instances allocate buffers: 2544 * each instance would add (max_rx_buf-max_rx_desc) to the free 2545 * pool. 2546 */ 2547 2548 static struct rbuf_list *rbuf_usedlist_head; 2549 static struct rbuf_list *rbuf_freelist_head; 2550 static struct rbuf_list *rbuf_usedlist_end; /* last buffer allocated */ 2551 2552 static int rbuf_freebufs; /* no. of free buffers in the pool */ 2553 static int rbuf_pool_size; /* total no. of buffers in the pool */ 2554 2555 /* initialize/add 'nbufs' buffers to the rbuf pool */ 2556 /* ARGSUSED */ 2557 static int 2558 dnet_rbuf_init(dev_info_t *dip, int nbufs) 2559 { 2560 int i; 2561 struct rbuf_list *rp; 2562 ddi_dma_cookie_t cookie; 2563 uint_t ncookies; 2564 size_t len; 2565 2566 mutex_enter(&dnet_rbuf_lock); 2567 2568 /* allocate buffers and add them to the pool */ 2569 for (i = 0; i < nbufs; i++) { 2570 /* allocate rbuf_list element */ 2571 rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP); 2572 if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP, 2573 0, &rp->rbuf_dmahdl) != DDI_SUCCESS) 2574 goto fail_kfree; 2575 2576 /* allocate dma memory for the buffer */ 2577 if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr, 2578 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2579 &rp->rbuf_vaddr, &len, 2580 &rp->rbuf_acchdl) != DDI_SUCCESS) 2581 goto fail_freehdl; 2582 2583 if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL, 2584 rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, 2585 DDI_DMA_SLEEP, NULL, &cookie, 2586 &ncookies) != DDI_DMA_MAPPED) 2587 goto fail_free; 2588 2589 if (ncookies > 2) 2590 goto fail_unbind; 2591 if (ncookies == 1) { 2592 rp->rbuf_endpaddr = 2593 cookie.dmac_address + rx_buf_size - 1; 2594 } else { 2595 ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie); 2596 rp->rbuf_endpaddr = 2597 cookie.dmac_address + cookie.dmac_size - 1; 2598 } 2599 rp->rbuf_paddr = cookie.dmac_address; 2600 2601 rp->rbuf_next = rbuf_freelist_head; 2602 rbuf_freelist_head = rp; 2603 rbuf_pool_size++; 2604 rbuf_freebufs++; 2605 } 2606 2607 mutex_exit(&dnet_rbuf_lock); 2608 return (0); 2609 fail_unbind: 2610 (void) ddi_dma_unbind_handle(rp->rbuf_dmahdl); 2611 fail_free: 2612 ddi_dma_mem_free(&rp->rbuf_acchdl); 2613 fail_freehdl: 2614 ddi_dma_free_handle(&rp->rbuf_dmahdl); 2615 fail_kfree: 2616 kmem_free(rp, sizeof (struct rbuf_list)); 2617 2618 mutex_exit(&dnet_rbuf_lock); 2619 return (-1); 2620 } 2621 2622 /* 2623 * Try to free up all the rbufs in the pool. Returns 0 if it frees up all 2624 * buffers. The buffers in the used list are considered busy so these 2625 * buffers are not freed. 2626 */ 2627 static int 2628 dnet_rbuf_destroy() 2629 { 2630 struct rbuf_list *rp, *next; 2631 2632 mutex_enter(&dnet_rbuf_lock); 2633 2634 for (rp = rbuf_freelist_head; rp; rp = next) { 2635 next = rp->rbuf_next; 2636 ddi_dma_mem_free(&rp->rbuf_acchdl); 2637 (void) ddi_dma_unbind_handle(rp->rbuf_dmahdl); 2638 kmem_free(rp, sizeof (struct rbuf_list)); 2639 rbuf_pool_size--; 2640 rbuf_freebufs--; 2641 } 2642 rbuf_freelist_head = NULL; 2643 2644 if (rbuf_pool_size) { /* pool is still not empty */ 2645 mutex_exit(&dnet_rbuf_lock); 2646 return (-1); 2647 } 2648 mutex_exit(&dnet_rbuf_lock); 2649 return (0); 2650 } 2651 static struct rbuf_list * 2652 dnet_rbuf_alloc(dev_info_t *dip, int cansleep) 2653 { 2654 struct rbuf_list *rp; 2655 size_t len; 2656 ddi_dma_cookie_t cookie; 2657 uint_t ncookies; 2658 2659 mutex_enter(&dnet_rbuf_lock); 2660 2661 if (rbuf_freelist_head == NULL) { 2662 2663 if (!cansleep) { 2664 mutex_exit(&dnet_rbuf_lock); 2665 return (NULL); 2666 } 2667 2668 /* allocate rbuf_list element */ 2669 rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP); 2670 if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP, 2671 0, &rp->rbuf_dmahdl) != DDI_SUCCESS) 2672 goto fail_kfree; 2673 2674 /* allocate dma memory for the buffer */ 2675 if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr, 2676 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2677 &rp->rbuf_vaddr, &len, 2678 &rp->rbuf_acchdl) != DDI_SUCCESS) 2679 goto fail_freehdl; 2680 2681 if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL, 2682 rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, 2683 DDI_DMA_SLEEP, NULL, &cookie, 2684 &ncookies) != DDI_DMA_MAPPED) 2685 goto fail_free; 2686 2687 if (ncookies > 2) 2688 goto fail_unbind; 2689 if (ncookies == 1) { 2690 rp->rbuf_endpaddr = 2691 cookie.dmac_address + rx_buf_size - 1; 2692 } else { 2693 ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie); 2694 rp->rbuf_endpaddr = 2695 cookie.dmac_address + cookie.dmac_size - 1; 2696 } 2697 rp->rbuf_paddr = cookie.dmac_address; 2698 2699 rbuf_freelist_head = rp; 2700 rbuf_pool_size++; 2701 rbuf_freebufs++; 2702 } 2703 2704 /* take the buffer from the head of the free list */ 2705 rp = rbuf_freelist_head; 2706 rbuf_freelist_head = rbuf_freelist_head->rbuf_next; 2707 2708 /* update the used list; put the entry at the end */ 2709 if (rbuf_usedlist_head == NULL) 2710 rbuf_usedlist_head = rp; 2711 else 2712 rbuf_usedlist_end->rbuf_next = rp; 2713 rp->rbuf_next = NULL; 2714 rbuf_usedlist_end = rp; 2715 rbuf_freebufs--; 2716 2717 mutex_exit(&dnet_rbuf_lock); 2718 2719 return (rp); 2720 fail_unbind: 2721 (void) ddi_dma_unbind_handle(rp->rbuf_dmahdl); 2722 fail_free: 2723 ddi_dma_mem_free(&rp->rbuf_acchdl); 2724 fail_freehdl: 2725 ddi_dma_free_handle(&rp->rbuf_dmahdl); 2726 fail_kfree: 2727 kmem_free(rp, sizeof (struct rbuf_list)); 2728 mutex_exit(&dnet_rbuf_lock); 2729 return (NULL); 2730 } 2731 2732 static void 2733 dnet_rbuf_free(caddr_t vaddr) 2734 { 2735 struct rbuf_list *rp, *prev; 2736 2737 ASSERT(vaddr != NULL); 2738 ASSERT(rbuf_usedlist_head != NULL); 2739 2740 mutex_enter(&dnet_rbuf_lock); 2741 2742 /* find the entry in the used list */ 2743 for (prev = rp = rbuf_usedlist_head; rp; rp = rp->rbuf_next) { 2744 if (rp->rbuf_vaddr == vaddr) 2745 break; 2746 prev = rp; 2747 } 2748 2749 if (rp == NULL) { 2750 cmn_err(CE_WARN, "DNET: rbuf_free: bad addr 0x%p", 2751 (void *)vaddr); 2752 mutex_exit(&dnet_rbuf_lock); 2753 return; 2754 } 2755 2756 /* update the used list and put the buffer back in the free list */ 2757 if (rbuf_usedlist_head != rp) { 2758 prev->rbuf_next = rp->rbuf_next; 2759 if (rbuf_usedlist_end == rp) 2760 rbuf_usedlist_end = prev; 2761 } else { 2762 rbuf_usedlist_head = rp->rbuf_next; 2763 if (rbuf_usedlist_end == rp) 2764 rbuf_usedlist_end = NULL; 2765 } 2766 rp->rbuf_next = rbuf_freelist_head; 2767 rbuf_freelist_head = rp; 2768 rbuf_freebufs++; 2769 2770 mutex_exit(&dnet_rbuf_lock); 2771 } 2772 2773 /* 2774 * Free the receive buffer used in a stream's message block allocated 2775 * thru desballoc(). 2776 */ 2777 static void 2778 dnet_freemsg_buf(struct free_ptr *frp) 2779 { 2780 dnet_rbuf_free((caddr_t)frp->buf); /* buffer goes back to the pool */ 2781 kmem_free(frp, sizeof (*frp)); /* free up the free_rtn structure */ 2782 } 2783 2784 /* 2785 * ========== SROM Read Routines ========== 2786 */ 2787 2788 /* 2789 * The following code gets the SROM information, either by reading it 2790 * from the device or, failing that, by reading a property. 2791 */ 2792 static int 2793 dnet_read_srom(dev_info_t *devinfo, int board_type, ddi_acc_handle_t io_handle, 2794 caddr_t io_reg, uchar_t *vi, int maxlen) 2795 { 2796 int all_ones, zerocheck, i; 2797 2798 /* 2799 * Load SROM into vendor_info 2800 */ 2801 if (board_type == DEVICE_ID_21040) 2802 dnet_read21040addr(devinfo, io_handle, io_reg, vi, &maxlen); 2803 else 2804 /* 21041/21140 serial rom */ 2805 dnet_read21140srom(io_handle, io_reg, vi, maxlen); 2806 /* 2807 * If the dumpsrom property is present in the conf file, print 2808 * the contents of the SROM to the console 2809 */ 2810 if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 2811 "dumpsrom", 0)) 2812 dnet_dumpbin("SROM", vi, 1, maxlen); 2813 2814 for (zerocheck = i = 0, all_ones = 0xff; i < maxlen; i++) { 2815 zerocheck |= vi[i]; 2816 all_ones &= vi[i]; 2817 } 2818 if (zerocheck == 0 || all_ones == 0xff) { 2819 return (get_alternative_srom_image(devinfo, vi, maxlen)); 2820 } else { 2821 #ifdef BUG_4010796 2822 set_alternative_srom_image(devinfo, vi, maxlen); 2823 #endif 2824 return (0); /* Primary */ 2825 } 2826 } 2827 2828 /* 2829 * The function reads the ethernet address of the 21040 adapter 2830 */ 2831 static void 2832 dnet_read21040addr(dev_info_t *dip, ddi_acc_handle_t io_handle, caddr_t io_reg, 2833 uchar_t *addr, int *len) 2834 { 2835 uint32_t val; 2836 int i; 2837 2838 /* No point reading more than the ethernet address */ 2839 *len = ddi_getprop(DDI_DEV_T_ANY, dip, 2840 DDI_PROP_DONTPASS, macoffset_propname, 0) + ETHERADDRL; 2841 2842 /* Reset ROM pointer */ 2843 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 0); 2844 for (i = 0; i < *len; i++) { 2845 do { 2846 val = ddi_get32(io_handle, 2847 REG32(io_reg, ETHER_ROM_REG)); 2848 } while (val & 0x80000000); 2849 addr[i] = val & 0xFF; 2850 } 2851 } 2852 2853 #define drv_nsecwait(x) drv_usecwait(((x)+999)/1000) /* XXX */ 2854 2855 /* 2856 * The function reads the SROM of the 21140 adapter 2857 */ 2858 static void 2859 dnet_read21140srom(ddi_acc_handle_t io_handle, caddr_t io_reg, uchar_t *addr, 2860 int maxlen) 2861 { 2862 uint32_t i, j; 2863 uint32_t dout; 2864 uint16_t word; 2865 uint8_t rom_addr; 2866 uint8_t bit; 2867 2868 2869 rom_addr = 0; 2870 for (i = 0; i < maxlen; i += 2) { 2871 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2872 READ_OP | SEL_ROM); 2873 drv_nsecwait(30); 2874 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2875 READ_OP | SEL_ROM | SEL_CHIP); 2876 drv_nsecwait(50); 2877 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2878 READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK); 2879 drv_nsecwait(250); 2880 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2881 READ_OP | SEL_ROM | SEL_CHIP); 2882 drv_nsecwait(100); 2883 2884 /* command */ 2885 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2886 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN); 2887 drv_nsecwait(150); 2888 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2889 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK); 2890 drv_nsecwait(250); 2891 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2892 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN); 2893 drv_nsecwait(250); 2894 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2895 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK); 2896 drv_nsecwait(250); 2897 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2898 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN); 2899 drv_nsecwait(100); 2900 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2901 READ_OP | SEL_ROM | SEL_CHIP); 2902 drv_nsecwait(150); 2903 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2904 READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK); 2905 drv_nsecwait(250); 2906 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2907 READ_OP | SEL_ROM | SEL_CHIP); 2908 drv_nsecwait(100); 2909 2910 /* Address */ 2911 for (j = HIGH_ADDRESS_BIT; j >= 1; j >>= 1) { 2912 bit = (rom_addr & j) ? DATA_IN : 0; 2913 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2914 READ_OP | SEL_ROM | SEL_CHIP | bit); 2915 drv_nsecwait(150); 2916 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2917 READ_OP | SEL_ROM | SEL_CHIP | bit | SEL_CLK); 2918 drv_nsecwait(250); 2919 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2920 READ_OP | SEL_ROM | SEL_CHIP | bit); 2921 drv_nsecwait(100); 2922 } 2923 drv_nsecwait(150); 2924 2925 /* Data */ 2926 word = 0; 2927 for (j = 0x8000; j >= 1; j >>= 1) { 2928 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2929 READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK); 2930 drv_nsecwait(100); 2931 dout = ddi_get32(io_handle, 2932 REG32(io_reg, ETHER_ROM_REG)); 2933 drv_nsecwait(150); 2934 if (dout & DATA_OUT) 2935 word |= j; 2936 ddi_put32(io_handle, 2937 REG32(io_reg, ETHER_ROM_REG), 2938 READ_OP | SEL_ROM | SEL_CHIP); 2939 drv_nsecwait(250); 2940 } 2941 addr[i] = (word & 0x0000FF); 2942 addr[i + 1] = (word >> 8); 2943 rom_addr++; 2944 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2945 READ_OP | SEL_ROM); 2946 drv_nsecwait(100); 2947 } 2948 } 2949 2950 2951 /* 2952 * XXX NEEDSWORK 2953 * 2954 * Some lame multiport cards have only one SROM, which can be accessed 2955 * only from the "first" 21x4x chip, whichever that one is. If we can't 2956 * get at our SROM, we look for its contents in a property instead, which 2957 * we rely on the bootstrap to have properly set. 2958 * #ifdef BUG_4010796 2959 * We also have a hack to try to set it ourselves, when the "first" port 2960 * attaches, if it has not already been properly set. However, this method 2961 * is not reliable, since it makes the unwarrented assumption that the 2962 * "first" port will attach first. 2963 * #endif 2964 */ 2965 2966 static int 2967 get_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len) 2968 { 2969 int l = len; 2970 2971 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 2972 "DNET_SROM", (caddr_t)vi, &len) != DDI_PROP_SUCCESS && 2973 (len = l) && ddi_getlongprop_buf(DDI_DEV_T_ANY, 2974 ddi_get_parent(devinfo), DDI_PROP_DONTPASS, "DNET_SROM", 2975 (caddr_t)vi, &len) != DDI_PROP_SUCCESS) 2976 return (-1); /* Can't find it! */ 2977 2978 /* 2979 * The return value from this routine specifies which port number 2980 * we are. The primary port is denoted port 0. On a QUAD card we 2981 * should return 1, 2, and 3 from this routine. The return value 2982 * is used to modify the ethernet address from the SROM data. 2983 */ 2984 2985 #ifdef BUG_4010796 2986 { 2987 /* 2988 * For the present, we remember the device number of our primary 2989 * sibling and hope we and our other siblings are consecutively 2990 * numbered up from there. In the future perhaps the bootstrap 2991 * will pass us the necessary information telling us which physical 2992 * port we really are. 2993 */ 2994 pci_regspec_t *assignp; 2995 int assign_len; 2996 int devnum; 2997 int primary_devnum; 2998 2999 primary_devnum = ddi_getprop(DDI_DEV_T_ANY, devinfo, 0, 3000 "DNET_DEVNUM", -1); 3001 if (primary_devnum == -1) 3002 return (1); /* XXX NEEDSWORK -- We have no better idea */ 3003 3004 if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3005 "assigned-addresses", (caddr_t)&assignp, 3006 &assign_len)) != DDI_PROP_SUCCESS) 3007 return (1); /* XXX NEEDSWORK -- We have no better idea */ 3008 3009 devnum = PCI_REG_DEV_G(assignp->pci_phys_hi); 3010 kmem_free(assignp, assign_len); 3011 return (devnum - primary_devnum); 3012 } 3013 #else 3014 return (1); /* XXX NEEDSWORK -- We have no better idea */ 3015 #endif 3016 } 3017 3018 3019 #ifdef BUG_4010796 3020 static void 3021 set_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len) 3022 { 3023 int proplen; 3024 pci_regspec_t *assignp; 3025 int assign_len; 3026 int devnum; 3027 3028 if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3029 "DNET_SROM", &proplen) == DDI_PROP_SUCCESS || 3030 ddi_getproplen(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3031 DDI_PROP_DONTPASS, "DNET_SROM", &proplen) == DDI_PROP_SUCCESS) 3032 return; /* Already done! */ 3033 3034 /* function return value ignored */ 3035 (void) ddi_prop_update_byte_array(DDI_DEV_T_NONE, 3036 ddi_get_parent(devinfo), "DNET_SROM", (uchar_t *)vi, len); 3037 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devinfo, 3038 "DNET_HACK", "hack"); 3039 3040 if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3041 "assigned-addresses", (caddr_t)&assignp, 3042 &assign_len)) == DDI_PROP_SUCCESS) { 3043 devnum = PCI_REG_DEV_G(assignp->pci_phys_hi); 3044 kmem_free(assignp, assign_len); 3045 /* function return value ignored */ 3046 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 3047 ddi_get_parent(devinfo), "DNET_DEVNUM", devnum); 3048 } 3049 } 3050 #endif 3051 3052 /* 3053 * ========== SROM Parsing Routines ========== 3054 */ 3055 3056 static int 3057 check_srom_valid(uchar_t *vi) 3058 { 3059 int word, bit; 3060 uint8_t crc; 3061 uint16_t *wvi; /* word16 pointer to vendor info */ 3062 uint16_t bitval; 3063 3064 /* verify that the number of controllers on the card is within range */ 3065 if (vi[SROM_ADAPTER_CNT] < 1 || vi[SROM_ADAPTER_CNT] > MAX_ADAPTERS) 3066 return (0); 3067 3068 /* 3069 * version 1 and 3 of this card did not check the id block CRC value 3070 * and this can't be changed without retesting every supported card 3071 * 3072 * however version 4 of the SROM can have this test applied 3073 * without fear of breaking something that used to work. 3074 * the CRC algorithm is taken from the Intel document 3075 * "21x4 Serial ROM Format" 3076 * version 4.09 3077 * 3-Mar-1999 3078 */ 3079 3080 switch (vi[SROM_VERSION]) { 3081 case 1: 3082 /* fallthru */ 3083 case 3: 3084 return (vi[SROM_MBZ] == 0 && /* must be zero */ 3085 vi[SROM_MBZ2] == 0 && /* must be zero */ 3086 vi[SROM_MBZ3] == 0); /* must be zero */ 3087 3088 case 4: 3089 wvi = (uint16_t *)vi; 3090 crc = 0xff; 3091 for (word = 0; word < 9; word++) 3092 for (bit = 15; bit >= 0; bit--) { 3093 if (word == 8 && bit == 7) 3094 return (crc == vi[16]); 3095 bitval = 3096 ((wvi[word] >> bit) & 1) ^ ((crc >> 7) & 1); 3097 crc <<= 1; 3098 if (bitval == 1) { 3099 crc ^= 7; 3100 } 3101 } 3102 /* FALLTHROUGH */ 3103 3104 default: 3105 return (0); 3106 } 3107 } 3108 3109 /* 3110 * ========== Active Media Determination Routines ========== 3111 */ 3112 3113 /* This routine is also called for V3 Compact and extended type 0 SROMs */ 3114 static int 3115 is_fdmedia(int media) 3116 { 3117 if (media == MEDIA_TP_FD || media == MEDIA_SYM_SCR_FD) 3118 return (1); 3119 else 3120 return (0); 3121 } 3122 3123 /* 3124 * "Linkset" is used to merge media that use the same link test check. So, 3125 * if the TP link is added to the linkset, so is the TP Full duplex link. 3126 * Used to avoid checking the same link status twice. 3127 */ 3128 static void 3129 linkset_add(uint32_t *set, int media) 3130 { 3131 if (media == MEDIA_TP_FD || media == MEDIA_TP) 3132 *set |= (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_TP); 3133 else if (media == MEDIA_SYM_SCR_FD || media == MEDIA_SYM_SCR) 3134 *set |= (1UL<<MEDIA_SYM_SCR_FD) | (1UL<<MEDIA_SYM_SCR); 3135 else *set |= 1UL<<media; 3136 } 3137 static int 3138 linkset_isset(uint32_t linkset, int media) 3139 { 3140 return (((1UL<<media) & linkset) ? 1:0); 3141 } 3142 3143 /* 3144 * The following code detects which Media is connected for 21041/21140 3145 * Expect to change this code to support new 21140 variants. 3146 * find_active_media() - called with intrlock held. 3147 */ 3148 static void 3149 find_active_media(struct dnetinstance *dnetp) 3150 { 3151 int i; 3152 media_block_t *block; 3153 media_block_t *best_allowed = NULL; 3154 media_block_t *hd_found = NULL; 3155 media_block_t *fd_found = NULL; 3156 LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf]; 3157 uint32_t checked = 0, links_up = 0; 3158 3159 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3160 3161 dnetp->selected_media_block = leaf->default_block; 3162 3163 if (dnetp->phyaddr != -1) { 3164 dnetp->selected_media_block = leaf->mii_block; 3165 setup_block(dnetp); 3166 3167 if (ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3168 DDI_PROP_DONTPASS, "portmon", 1)) { 3169 /* XXX return value ignored */ 3170 (void) mii_start_portmon(dnetp->mii, dnet_mii_link_cb, 3171 &dnetp->intrlock); 3172 /* 3173 * If the port monitor detects the link is already 3174 * up, there is no point going through the rest of the 3175 * link sense 3176 */ 3177 if (dnetp->mii_up) { 3178 return; 3179 } 3180 } 3181 } 3182 3183 /* 3184 * Media is searched for in order of Precedence. This DEC SROM spec 3185 * tells us that the first media entry in the SROM is the lowest 3186 * precedence and should be checked last. This is why we go to the last 3187 * Media block and work back to the beginning. 3188 * 3189 * However, some older SROMs (Cogent EM110's etc.) have this the wrong 3190 * way around. As a result, following the SROM spec would result in a 3191 * 10 link being chosen over a 100 link if both media are available. 3192 * So we continue trying the media until we have at least tried the 3193 * DEFAULT media. 3194 */ 3195 3196 /* Search for an active medium, and select it */ 3197 for (block = leaf->block + leaf->block_count - 1; 3198 block >= leaf->block; block--) { 3199 int media = block->media_code; 3200 3201 /* User settings disallow selection of this block */ 3202 if (dnetp->disallowed_media & (1UL<<media)) 3203 continue; 3204 3205 /* We may not be able to pick the default */ 3206 if (best_allowed == NULL || block == leaf->default_block) 3207 best_allowed = block; 3208 #ifdef DEBUG 3209 if (dnetdebug & DNETSENSE) 3210 cmn_err(CE_NOTE, "Testing %s medium (block type %d)", 3211 media_str[media], block->type); 3212 #endif 3213 3214 dnetp->selected_media_block = block; 3215 switch (block->type) { 3216 3217 case 2: /* SIA Media block: Best we can do is send a packet */ 3218 setup_block(dnetp); 3219 if (send_test_packet(dnetp)) { 3220 if (!is_fdmedia(media)) 3221 return; 3222 if (!fd_found) 3223 fd_found = block; 3224 } 3225 break; 3226 3227 /* SYM/SCR or TP block: Use the link-sense bits */ 3228 case 0: 3229 if (!linkset_isset(checked, media)) { 3230 linkset_add(&checked, media); 3231 if (((media == MEDIA_BNC || 3232 media == MEDIA_AUI) && 3233 send_test_packet(dnetp)) || 3234 dnet_link_sense(dnetp)) 3235 linkset_add(&links_up, media); 3236 } 3237 3238 if (linkset_isset(links_up, media)) { 3239 /* 3240 * Half Duplex is *always* the favoured media. 3241 * Full Duplex can be set and forced via the 3242 * conf file. 3243 */ 3244 if (!is_fdmedia(media) && 3245 dnetp->selected_media_block == 3246 leaf->default_block) { 3247 /* 3248 * Cogent cards have the media in 3249 * opposite order to the spec., 3250 * this code forces the media test to 3251 * keep going until the default media 3252 * is tested. 3253 * 3254 * In Cogent case, 10, 10FD, 100FD, 100 3255 * 100 is the default but 10 could have 3256 * been detected and would have been 3257 * chosen but now we force it through to 3258 * 100. 3259 */ 3260 setup_block(dnetp); 3261 return; 3262 } else if (!is_fdmedia(media)) { 3263 /* 3264 * This allows all the others to work 3265 * properly by remembering the media 3266 * that works and not defaulting to 3267 * a FD link. 3268 */ 3269 if (hd_found == NULL) 3270 hd_found = block; 3271 } else if (fd_found == NULL) { 3272 /* 3273 * No media have already been found 3274 * so far, this is FD, it works so 3275 * remember it and if no others are 3276 * detected, use it. 3277 */ 3278 fd_found = block; 3279 } 3280 } 3281 break; 3282 3283 /* 3284 * MII block: May take up to a second or so to settle if 3285 * setup causes a PHY reset 3286 */ 3287 case 1: case 3: 3288 setup_block(dnetp); 3289 for (i = 0; ; i++) { 3290 if (mii_linkup(dnetp->mii, dnetp->phyaddr)) { 3291 /* XXX function return value ignored */ 3292 (void) mii_getspeed(dnetp->mii, 3293 dnetp->phyaddr, 3294 &dnetp->mii_speed, 3295 &dnetp->mii_duplex); 3296 dnetp->mii_up = 1; 3297 leaf->mii_block = block; 3298 return; 3299 } 3300 if (i == 10) 3301 break; 3302 delay(drv_usectohz(150000)); 3303 } 3304 dnetp->mii_up = 0; 3305 break; 3306 } 3307 } /* for loop */ 3308 if (hd_found) { 3309 dnetp->selected_media_block = hd_found; 3310 } else if (fd_found) { 3311 dnetp->selected_media_block = fd_found; 3312 } else { 3313 if (best_allowed == NULL) 3314 best_allowed = leaf->default_block; 3315 dnetp->selected_media_block = best_allowed; 3316 cmn_err(CE_WARN, "!dnet: Default media selected\n"); 3317 } 3318 setup_block(dnetp); 3319 } 3320 3321 /* 3322 * Do anything neccessary to select the selected_media_block. 3323 * setup_block() - called with intrlock held. 3324 */ 3325 static void 3326 setup_block(struct dnetinstance *dnetp) 3327 { 3328 dnet_reset_board(dnetp); 3329 dnet_init_board(dnetp); 3330 /* XXX function return value ignored */ 3331 (void) dnet_start(dnetp); 3332 } 3333 3334 /* dnet_link_sense() - called with intrlock held */ 3335 static int 3336 dnet_link_sense(struct dnetinstance *dnetp) 3337 { 3338 /* 3339 * This routine makes use of the command word from the srom config. 3340 * Details of the auto-sensing information contained in this can 3341 * be found in the "Digital Semiconductor 21X4 Serial ROM Format v3.03" 3342 * spec. Section 4.3.2.1, and 4.5.2.1.3 3343 */ 3344 media_block_t *block = dnetp->selected_media_block; 3345 uint32_t link, status, mask, polarity; 3346 int settletime, stabletime, waittime, upsamples; 3347 int delay_100, delay_10; 3348 3349 3350 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3351 /* Don't autosense if the medium does not support it */ 3352 if (block->command & (1 << 15)) { 3353 /* This should be the default block */ 3354 if (block->command & (1UL<<14)) 3355 dnetp->sr.leaf[dnetp->leaf].default_block = block; 3356 return (0); 3357 } 3358 3359 delay_100 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3360 DDI_PROP_DONTPASS, "autosense-delay-100", 2000); 3361 3362 delay_10 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3363 DDI_PROP_DONTPASS, "autosense-delay-10", 400); 3364 3365 /* 3366 * Scrambler may need to be disabled for link sensing 3367 * to work 3368 */ 3369 dnetp->disable_scrambler = 1; 3370 setup_block(dnetp); 3371 dnetp->disable_scrambler = 0; 3372 3373 if (block->media_code == MEDIA_TP || block->media_code == MEDIA_TP_FD) 3374 settletime = delay_10; 3375 else 3376 settletime = delay_100; 3377 stabletime = settletime / 4; 3378 3379 mask = 1 << ((block->command & CMD_MEDIABIT_MASK) >> 1); 3380 polarity = block->command & CMD_POL ? 0xffffffff : 0; 3381 3382 for (waittime = 0, upsamples = 0; 3383 waittime <= settletime + stabletime && upsamples < 8; 3384 waittime += stabletime/8) { 3385 delay(drv_usectohz(stabletime*1000 / 8)); 3386 status = read_gpr(dnetp); 3387 link = (status^polarity) & mask; 3388 if (link) 3389 upsamples++; 3390 else 3391 upsamples = 0; 3392 } 3393 #ifdef DNETDEBUG 3394 if (dnetdebug & DNETSENSE) 3395 cmn_err(CE_NOTE, "%s upsamples:%d stat:%x polarity:%x " 3396 "mask:%x link:%x", 3397 upsamples == 8 ? "UP":"DOWN", 3398 upsamples, status, polarity, mask, link); 3399 #endif 3400 if (upsamples == 8) 3401 return (1); 3402 return (0); 3403 } 3404 3405 static int 3406 send_test_packet(struct dnetinstance *dnetp) 3407 { 3408 int packet_delay; 3409 struct tx_desc_type *desc; 3410 int bufindex; 3411 int media_code = dnetp->selected_media_block->media_code; 3412 uint32_t del; 3413 3414 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3415 /* 3416 * For a successful test packet, the card must have settled into 3417 * its current setting. Almost all cards we've tested manage to 3418 * do this with all media within 50ms. However, the SMC 8432 3419 * requires 300ms to settle into BNC mode. We now only do this 3420 * from attach, and we do sleeping delay() instead of drv_usecwait() 3421 * so we hope this .2 second delay won't cause too much suffering. 3422 * ALSO: with an autonegotiating hub, an aditional 1 second delay is 3423 * required. This is done if the media type is TP 3424 */ 3425 if (media_code == MEDIA_TP || media_code == MEDIA_TP_FD) { 3426 packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3427 DDI_PROP_DONTPASS, "test_packet_delay_tp", 1300000); 3428 } else { 3429 packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3430 DDI_PROP_DONTPASS, "test_packet_delay", 300000); 3431 } 3432 delay(drv_usectohz(packet_delay)); 3433 3434 desc = dnetp->tx_desc; 3435 3436 bufindex = dnetp->tx_current_desc; 3437 if (alloc_descriptor(dnetp) == FAILURE) { 3438 cmn_err(CE_WARN, "DNET: send_test_packet: alloc_descriptor" 3439 "failed"); 3440 return (0); 3441 } 3442 3443 /* 3444 * use setup buffer as the buffer for the test packet 3445 * instead of allocating one. 3446 */ 3447 3448 ASSERT(dnetp->setup_buf_vaddr != NULL); 3449 /* Put something decent in dest address so we don't annoy other cards */ 3450 BCOPY((caddr_t)dnetp->curr_macaddr, 3451 (caddr_t)dnetp->setup_buf_vaddr, ETHERADDRL); 3452 BCOPY((caddr_t)dnetp->curr_macaddr, 3453 (caddr_t)dnetp->setup_buf_vaddr+ETHERADDRL, ETHERADDRL); 3454 3455 desc[bufindex].buffer1 = dnetp->setup_buf_paddr; 3456 desc[bufindex].desc1.buffer_size1 = SETUPBUF_SIZE; 3457 desc[bufindex].buffer2 = (uint32_t)(0); 3458 desc[bufindex].desc1.first_desc = 1; 3459 desc[bufindex].desc1.last_desc = 1; 3460 desc[bufindex].desc1.int_on_comp = 1; 3461 desc[bufindex].desc0.own = 1; 3462 3463 ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG), 3464 TX_POLL_DEMAND); 3465 3466 /* 3467 * Give enough time for the chip to transmit the packet 3468 */ 3469 #if 1 3470 del = 1000; 3471 while (desc[bufindex].desc0.own && --del) 3472 drv_usecwait(10); /* quickly wait up to 10ms */ 3473 if (desc[bufindex].desc0.own) 3474 delay(drv_usectohz(200000)); /* nicely wait a longer time */ 3475 #else 3476 del = 0x10000; 3477 while (desc[bufindex].desc0.own && --del) 3478 drv_usecwait(10); 3479 #endif 3480 3481 #ifdef DNETDEBUG 3482 if (dnetdebug & DNETSENSE) 3483 cmn_err(CE_NOTE, "desc0 bits = %u, %u, %u, %u, %u, %u", 3484 desc[bufindex].desc0.own, 3485 desc[bufindex].desc0.err_summary, 3486 desc[bufindex].desc0.carrier_loss, 3487 desc[bufindex].desc0.no_carrier, 3488 desc[bufindex].desc0.late_collision, 3489 desc[bufindex].desc0.link_fail); 3490 #endif 3491 if (desc[bufindex].desc0.own) /* it shouldn't take this long, error */ 3492 return (0); 3493 3494 return (!desc[bufindex].desc0.err_summary); 3495 } 3496 3497 /* enable_interrupts - called with intrlock held */ 3498 static void 3499 enable_interrupts(struct dnetinstance *dnetp) 3500 { 3501 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3502 /* Don't enable interrupts if they have been forced off */ 3503 if (dnetp->interrupts_disabled) 3504 return; 3505 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 3506 ABNORMAL_INTR_MASK | NORMAL_INTR_MASK | SYSTEM_ERROR_MASK | 3507 (dnetp->timer.cb ? GPTIMER_INTR : 0) | 3508 RX_INTERRUPT_MASK | 3509 TX_INTERRUPT_MASK | TX_JABBER_MASK | TX_UNDERFLOW_MASK); 3510 } 3511 3512 /* 3513 * Some older multiport cards are non-PCI compliant in their interrupt routing. 3514 * Second and subsequent devices are incorrectly configured by the BIOS 3515 * (either in their ILINE configuration or the MP Configuration Table for PC+MP 3516 * systems). 3517 * The hack stops registering the interrupt routine for the FIRST 3518 * device on the adapter, and registers its own. It builds up a table 3519 * of dnetp structures for each device, and the new interrupt routine 3520 * calls dnet_intr for each of them. 3521 * Known cards that suffer from this problem are: 3522 * All Cogent multiport cards; 3523 * Znyx 314; 3524 * Znyx 315. 3525 * 3526 * XXX NEEDSWORK -- see comments above get_alternative_srom_image(). This 3527 * hack relies on the fact that the offending cards will have only one SROM. 3528 * It uses this fact to identify devices that are on the same multiport 3529 * adapter, as opposed to multiple devices from the same vendor (as 3530 * indicated by "secondary") 3531 */ 3532 static int 3533 dnet_hack_interrupts(struct dnetinstance *dnetp, int secondary) 3534 { 3535 int i; 3536 struct hackintr_inf *hackintr_inf; 3537 dev_info_t *devinfo = dnetp->devinfo; 3538 uint32_t oui = 0; /* Organizationally Unique ID */ 3539 3540 if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3541 "no_INTA_workaround", 0) != 0) 3542 return (0); 3543 3544 for (i = 0; i < 3; i++) 3545 oui = (oui << 8) | dnetp->vendor_addr[i]; 3546 3547 /* Check wheather or not we need to implement the hack */ 3548 3549 switch (oui) { 3550 case ZNYX_ETHER: 3551 /* Znyx multiport 21040 cards <<==>> ZX314 or ZX315 */ 3552 if (dnetp->board_type != DEVICE_ID_21040) 3553 return (0); 3554 break; 3555 3556 case COGENT_ETHER: 3557 /* All known Cogent multiport cards */ 3558 break; 3559 3560 case ADAPTEC_ETHER: 3561 /* Adaptec multiport cards */ 3562 break; 3563 3564 default: 3565 /* Other cards work correctly */ 3566 return (0); 3567 } 3568 3569 /* card is (probably) non-PCI compliant in its interrupt routing */ 3570 3571 3572 if (!secondary) { 3573 3574 /* 3575 * If we have already registered a hacked interrupt, and 3576 * this is also a 'primary' adapter, then this is NOT part of 3577 * a multiport card, but a second card on the same PCI bus. 3578 * BUGID: 4057747 3579 */ 3580 if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3581 DDI_PROP_DONTPASS, hackintr_propname, 0) != 0) 3582 return (0); 3583 /* ... Primary not part of a multiport device */ 3584 3585 #ifdef DNETDEBUG 3586 if (dnetdebug & DNETTRACE) 3587 cmn_err(CE_NOTE, "dnet: Implementing hardware " 3588 "interrupt flaw workaround"); 3589 #endif 3590 dnetp->hackintr_inf = hackintr_inf = 3591 kmem_zalloc(sizeof (struct hackintr_inf), KM_SLEEP); 3592 if (hackintr_inf == NULL) 3593 goto fail; 3594 3595 hackintr_inf->dnetps[0] = dnetp; 3596 hackintr_inf->devinfo = devinfo; 3597 3598 /* 3599 * Add a property to allow successive attaches to find the 3600 * table 3601 */ 3602 3603 if (ddi_prop_update_byte_array(DDI_DEV_T_NONE, 3604 ddi_get_parent(devinfo), hackintr_propname, 3605 (uchar_t *)&dnetp->hackintr_inf, 3606 sizeof (void *)) != DDI_PROP_SUCCESS) 3607 goto fail; 3608 3609 3610 /* Register our hacked interrupt routine */ 3611 if (ddi_add_intr(devinfo, 0, &dnetp->icookie, NULL, 3612 (uint_t (*)(char *))dnet_hack_intr, 3613 (caddr_t)hackintr_inf) != DDI_SUCCESS) { 3614 /* XXX function return value ignored */ 3615 (void) ddi_prop_remove(DDI_DEV_T_NONE, 3616 ddi_get_parent(devinfo), 3617 hackintr_propname); 3618 goto fail; 3619 } 3620 3621 /* 3622 * Mutex required to ensure interrupt routine has completed 3623 * when detaching devices 3624 */ 3625 mutex_init(&hackintr_inf->lock, NULL, MUTEX_DRIVER, 3626 dnetp->icookie); 3627 3628 /* Stop GLD registering an interrupt */ 3629 return (-1); 3630 } else { 3631 3632 /* Add the dnetp for this secondary device to the table */ 3633 3634 hackintr_inf = (struct hackintr_inf *)(uintptr_t) 3635 ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3636 DDI_PROP_DONTPASS, hackintr_propname, 0); 3637 3638 if (hackintr_inf == NULL) 3639 goto fail; 3640 3641 /* Find an empty slot */ 3642 for (i = 0; i < MAX_INST; i++) 3643 if (hackintr_inf->dnetps[i] == NULL) 3644 break; 3645 3646 /* More than 8 ports on adapter ?! */ 3647 if (i == MAX_INST) 3648 goto fail; 3649 3650 hackintr_inf->dnetps[i] = dnetp; 3651 3652 /* 3653 * Allow GLD to register a handler for this 3654 * device. If the card is actually broken, as we suspect, this 3655 * handler will never get called. However, by registering the 3656 * interrupt handler, we can copy gracefully with new multiport 3657 * Cogent cards that decide to fix the hardware problem 3658 */ 3659 return (0); 3660 } 3661 3662 fail: 3663 cmn_err(CE_WARN, "dnet: Could not work around hardware interrupt" 3664 " routing problem"); 3665 return (0); 3666 } 3667 3668 /* 3669 * Call dnet_intr for all adapters on a multiport card 3670 */ 3671 static uint_t 3672 dnet_hack_intr(struct hackintr_inf *hackintr_inf) 3673 { 3674 int i; 3675 int claimed = DDI_INTR_UNCLAIMED; 3676 3677 /* Stop detaches while processing interrupts */ 3678 mutex_enter(&hackintr_inf->lock); 3679 3680 for (i = 0; i < MAX_INST; i++) { 3681 if (hackintr_inf->dnetps[i] && 3682 dnet_intr((caddr_t)hackintr_inf->dnetps[i]) == 3683 DDI_INTR_CLAIMED) { 3684 claimed = DDI_INTR_CLAIMED; 3685 } 3686 } 3687 mutex_exit(&hackintr_inf->lock); 3688 return (claimed); 3689 } 3690 3691 /* 3692 * This removes the detaching device from the table procesed by the hacked 3693 * interrupt routine. Because the interrupts from all devices come in to the 3694 * same interrupt handler, ALL devices must stop interrupting once the 3695 * primary device detaches. This isn't a problem at present, because all 3696 * instances of a device are detached when the driver is unloaded. 3697 */ 3698 static int 3699 dnet_detach_hacked_interrupt(dev_info_t *devinfo) 3700 { 3701 int i; 3702 struct hackintr_inf *hackintr_inf; 3703 struct dnetinstance *altdnetp, *dnetp = 3704 ddi_get_driver_private(devinfo); 3705 3706 hackintr_inf = (struct hackintr_inf *)(uintptr_t) 3707 ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3708 DDI_PROP_DONTPASS, hackintr_propname, 0); 3709 3710 /* 3711 * No hackintr_inf implies hack was not required or the primary has 3712 * detached, and our interrupts are already disabled 3713 */ 3714 if (!hackintr_inf) { 3715 /* remove the interrupt for the non-hacked case */ 3716 ddi_remove_intr(devinfo, 0, dnetp->icookie); 3717 return (DDI_SUCCESS); 3718 } 3719 3720 /* Remove this device from the handled table */ 3721 mutex_enter(&hackintr_inf->lock); 3722 for (i = 0; i < MAX_INST; i++) { 3723 if (hackintr_inf->dnetps[i] == dnetp) { 3724 hackintr_inf->dnetps[i] = NULL; 3725 break; 3726 } 3727 } 3728 3729 mutex_exit(&hackintr_inf->lock); 3730 3731 /* Not the primary card, we are done */ 3732 if (devinfo != hackintr_inf->devinfo) 3733 return (DDI_SUCCESS); 3734 3735 /* 3736 * This is the primary card. All remaining adapters on this device 3737 * must have their interrupts disabled before we remove the handler 3738 */ 3739 for (i = 0; i < MAX_INST; i++) { 3740 if ((altdnetp = hackintr_inf->dnetps[i]) != NULL) { 3741 altdnetp->interrupts_disabled = 1; 3742 ddi_put32(altdnetp->io_handle, 3743 REG32(altdnetp->io_reg, INT_MASK_REG), 0); 3744 } 3745 } 3746 3747 /* It should now be safe to remove the interrupt handler */ 3748 3749 ddi_remove_intr(devinfo, 0, dnetp->icookie); 3750 mutex_destroy(&hackintr_inf->lock); 3751 /* XXX function return value ignored */ 3752 (void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo), 3753 hackintr_propname); 3754 kmem_free(hackintr_inf, sizeof (struct hackintr_inf)); 3755 return (DDI_SUCCESS); 3756 } 3757 3758 /* do_phy() - called with intrlock held */ 3759 static void 3760 do_phy(struct dnetinstance *dnetp) 3761 { 3762 dev_info_t *dip; 3763 LEAF_FORMAT *leaf = dnetp->sr.leaf + dnetp->leaf; 3764 media_block_t *block; 3765 int phy; 3766 3767 dip = dnetp->devinfo; 3768 3769 /* 3770 * Find and configure the PHY media block. If NO PHY blocks are 3771 * found on the SROM, but a PHY device is present, we assume the card 3772 * is a legacy device, and that there is ONLY a PHY interface on the 3773 * card (ie, no BNC or AUI, and 10BaseT is implemented by the PHY 3774 */ 3775 3776 for (block = leaf->block + leaf->block_count -1; 3777 block >= leaf->block; block --) { 3778 if (block->type == 3 || block->type == 1) { 3779 leaf->mii_block = block; 3780 break; 3781 } 3782 } 3783 3784 /* 3785 * If no MII block, select default, and hope this configuration will 3786 * allow the phy to be read/written if it is present 3787 */ 3788 dnetp->selected_media_block = leaf->mii_block ? 3789 leaf->mii_block : leaf->default_block; 3790 3791 setup_block(dnetp); 3792 /* XXX function return value ignored */ 3793 (void) mii_create(dip, dnet_mii_write, dnet_mii_read, &dnetp->mii); 3794 3795 /* 3796 * We try PHY 0 LAST because it is less likely to be connected 3797 */ 3798 for (phy = 1; phy < 33; phy++) 3799 if (mii_probe_phy(dnetp->mii, phy % 32) == MII_SUCCESS && 3800 mii_init_phy(dnetp->mii, phy % 32) == MII_SUCCESS) { 3801 #ifdef DNETDEBUG 3802 if (dnetdebug & DNETSENSE) 3803 cmn_err(CE_NOTE, "dnet: " 3804 "PHY at address %d", phy % 32); 3805 #endif 3806 dnetp->phyaddr = phy % 32; 3807 if (!leaf->mii_block) { 3808 /* Legacy card, change the leaf node */ 3809 set_leaf(&dnetp->sr, &leaf_phylegacy); 3810 } 3811 return; 3812 } 3813 #ifdef DNETDEBUG 3814 if (dnetdebug & DNETSENSE) 3815 cmn_err(CE_NOTE, "dnet: No PHY found"); 3816 #endif 3817 } 3818 3819 static ushort_t 3820 dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num) 3821 { 3822 struct dnetinstance *dnetp; 3823 3824 uint32_t command_word; 3825 uint32_t tmp; 3826 uint32_t data = 0; 3827 int i; 3828 int bits_in_ushort = ((sizeof (ushort_t))*8); 3829 int turned_around = 0; 3830 3831 dnetp = ddi_get_driver_private(dip); 3832 3833 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3834 /* Write Preamble */ 3835 write_mii(dnetp, MII_PRE, 2*bits_in_ushort); 3836 3837 /* Prepare command word */ 3838 command_word = (uint32_t)phy_addr << MII_PHY_ADDR_ALIGN; 3839 command_word |= (uint32_t)reg_num << MII_REG_ADDR_ALIGN; 3840 command_word |= MII_READ_FRAME; 3841 3842 write_mii(dnetp, command_word, bits_in_ushort-2); 3843 3844 mii_tristate(dnetp); 3845 3846 /* Check that the PHY generated a zero bit the 2nd clock */ 3847 tmp = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG)); 3848 3849 turned_around = (tmp & MII_DATA_IN) ? 0 : 1; 3850 3851 /* read data WORD */ 3852 for (i = 0; i < bits_in_ushort; i++) { 3853 ddi_put32(dnetp->io_handle, 3854 REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ); 3855 drv_usecwait(MII_DELAY); 3856 ddi_put32(dnetp->io_handle, 3857 REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ | MII_CLOCK); 3858 drv_usecwait(MII_DELAY); 3859 tmp = ddi_get32(dnetp->io_handle, 3860 REG32(dnetp->io_reg, ETHER_ROM_REG)); 3861 drv_usecwait(MII_DELAY); 3862 data = (data << 1) | (tmp >> MII_DATA_IN_POSITION) & 0x0001; 3863 } 3864 3865 mii_tristate(dnetp); 3866 return (turned_around ? data: -1); 3867 } 3868 3869 static void 3870 dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num, int reg_dat) 3871 { 3872 struct dnetinstance *dnetp; 3873 uint32_t command_word; 3874 int bits_in_ushort = ((sizeof (ushort_t))*8); 3875 3876 dnetp = ddi_get_driver_private(dip); 3877 3878 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3879 write_mii(dnetp, MII_PRE, 2*bits_in_ushort); 3880 3881 /* Prepare command word */ 3882 command_word = ((uint32_t)phy_addr << MII_PHY_ADDR_ALIGN); 3883 command_word |= ((uint32_t)reg_num << MII_REG_ADDR_ALIGN); 3884 command_word |= (MII_WRITE_FRAME | (uint32_t)reg_dat); 3885 3886 write_mii(dnetp, command_word, 2*bits_in_ushort); 3887 mii_tristate(dnetp); 3888 } 3889 3890 /* 3891 * Write data size bits from mii_data to the MII control lines. 3892 */ 3893 static void 3894 write_mii(struct dnetinstance *dnetp, uint32_t mii_data, int data_size) 3895 { 3896 int i; 3897 uint32_t dbit; 3898 3899 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3900 for (i = data_size; i > 0; i--) { 3901 dbit = ((mii_data >> 3902 (31 - MII_WRITE_DATA_POSITION)) & MII_WRITE_DATA); 3903 ddi_put32(dnetp->io_handle, 3904 REG32(dnetp->io_reg, ETHER_ROM_REG), 3905 MII_WRITE | dbit); 3906 drv_usecwait(MII_DELAY); 3907 ddi_put32(dnetp->io_handle, 3908 REG32(dnetp->io_reg, ETHER_ROM_REG), 3909 MII_WRITE | MII_CLOCK | dbit); 3910 drv_usecwait(MII_DELAY); 3911 mii_data <<= 1; 3912 } 3913 } 3914 3915 /* 3916 * Put the MDIO port in tri-state for the turn around bits 3917 * in MII read and at end of MII management sequence. 3918 */ 3919 static void 3920 mii_tristate(struct dnetinstance *dnetp) 3921 { 3922 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3923 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG), 3924 MII_WRITE_TS); 3925 drv_usecwait(MII_DELAY); 3926 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG), 3927 MII_WRITE_TS | MII_CLOCK); 3928 drv_usecwait(MII_DELAY); 3929 } 3930 3931 3932 static void 3933 set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf) 3934 { 3935 if (sr->leaf && !sr->leaf->is_static) 3936 kmem_free(sr->leaf, sr->adapters * sizeof (LEAF_FORMAT)); 3937 sr->leaf = leaf; 3938 } 3939 3940 /* 3941 * Callback from MII module. Makes sure that the CSR registers are 3942 * configured properly if the PHY changes mode. 3943 */ 3944 /* ARGSUSED */ 3945 /* dnet_mii_link_cb - called with intrlock held */ 3946 static void 3947 dnet_mii_link_cb(dev_info_t *dip, int phy, enum mii_phy_state state) 3948 { 3949 struct dnetinstance *dnetp = ddi_get_driver_private(dip); 3950 LEAF_FORMAT *leaf; 3951 3952 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3953 3954 leaf = dnetp->sr.leaf + dnetp->leaf; 3955 if (state == phy_state_linkup) { 3956 dnetp->mii_up = 1; 3957 3958 (void) mii_getspeed(dnetp->mii, dnetp->phyaddr, 3959 &dnetp->mii_speed, &dnetp->mii_duplex); 3960 3961 dnetp->selected_media_block = leaf->mii_block; 3962 setup_block(dnetp); 3963 } else { 3964 /* NEEDSWORK: Probably can call find_active_media here */ 3965 dnetp->mii_up = 0; 3966 3967 if (leaf->default_block->media_code == MEDIA_MII) 3968 dnetp->selected_media_block = leaf->default_block; 3969 setup_block(dnetp); 3970 } 3971 3972 if (dnetp->running) { 3973 mac_link_update(dnetp->mac_handle, 3974 (dnetp->mii_up ? LINK_STATE_UP : LINK_STATE_DOWN)); 3975 } 3976 } 3977 3978 /* 3979 * SROM parsing routines. 3980 * Refer to the Digital 3.03 SROM spec while reading this! (references refer 3981 * to this document) 3982 * Where possible ALL vendor specific changes should be localised here. The 3983 * SROM data should be capable of describing any programmatic irregularities 3984 * of DNET cards (via SIA or GP registers, in particular), so vendor specific 3985 * code elsewhere should not be required 3986 */ 3987 static void 3988 dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr, uchar_t *vi) 3989 { 3990 uint32_t ether_mfg = 0; 3991 int i; 3992 uchar_t *p; 3993 3994 if (!ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3995 DDI_PROP_DONTPASS, "no_sromconfig", 0)) 3996 dnetp->sr.init_from_srom = check_srom_valid(vi); 3997 3998 if (dnetp->sr.init_from_srom && dnetp->board_type != DEVICE_ID_21040) { 3999 /* Section 2/3: General SROM Format/ ID Block */ 4000 p = vi+18; 4001 sr->version = *p++; 4002 sr->adapters = *p++; 4003 4004 sr->leaf = 4005 kmem_zalloc(sr->adapters * sizeof (LEAF_FORMAT), KM_SLEEP); 4006 for (i = 0; i < 6; i++) 4007 sr->netaddr[i] = *p++; 4008 4009 for (i = 0; i < sr->adapters; i++) { 4010 uchar_t devno = *p++; 4011 uint16_t offset = *p++; 4012 offset |= *p++ << 8; 4013 sr->leaf[i].device_number = devno; 4014 parse_controller_leaf(dnetp, sr->leaf+i, vi+offset); 4015 } 4016 /* 4017 * 'Orrible hack for cogent cards. The 6911A board seems to 4018 * have an incorrect SROM. (From the OEMDEMO program 4019 * supplied by cogent, it seems that the ROM matches a setup 4020 * or a board with a QSI or ICS PHY. 4021 */ 4022 for (i = 0; i < 3; i++) 4023 ether_mfg = (ether_mfg << 8) | sr->netaddr[i]; 4024 4025 if (ether_mfg == ADAPTEC_ETHER) { 4026 static uint16_t cogent_gprseq[] = {0x821, 0}; 4027 switch (vi[COGENT_SROM_ID]) { 4028 case COGENT_ANA6911A_C: 4029 case COGENT_ANA6911AC_C: 4030 #ifdef DNETDEBUG 4031 if (dnetdebug & DNETTRACE) 4032 cmn_err(CE_WARN, 4033 "Suspected bad GPR sequence." 4034 " Making a guess (821,0)"); 4035 #endif 4036 4037 /* XXX function return value ignored */ 4038 (void) ddi_prop_update_byte_array( 4039 DDI_DEV_T_NONE, dnetp->devinfo, 4040 "gpr-sequence", (uchar_t *)cogent_gprseq, 4041 sizeof (cogent_gprseq)); 4042 break; 4043 } 4044 } 4045 } else { 4046 /* 4047 * Adhoc SROM, check for some cards which need special handling 4048 * Assume vendor info contains ether address in first six bytes 4049 */ 4050 4051 uchar_t *mac = vi + ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 4052 DDI_PROP_DONTPASS, macoffset_propname, 0); 4053 4054 for (i = 0; i < 6; i++) 4055 sr->netaddr[i] = mac[i]; 4056 4057 if (dnetp->board_type == DEVICE_ID_21140) { 4058 for (i = 0; i < 3; i++) 4059 ether_mfg = (ether_mfg << 8) | mac[i]; 4060 4061 switch (ether_mfg) { 4062 case ASANTE_ETHER: 4063 dnetp->vendor_21140 = ASANTE_TYPE; 4064 dnetp->vendor_revision = 0; 4065 set_leaf(sr, &leaf_asante); 4066 sr->adapters = 1; 4067 break; 4068 4069 case COGENT_ETHER: 4070 case ADAPTEC_ETHER: 4071 dnetp->vendor_21140 = COGENT_EM_TYPE; 4072 dnetp->vendor_revision = 4073 vi[VENDOR_REVISION_OFFSET]; 4074 set_leaf(sr, &leaf_cogent_100); 4075 sr->adapters = 1; 4076 break; 4077 4078 default: 4079 dnetp->vendor_21140 = DEFAULT_TYPE; 4080 dnetp->vendor_revision = 0; 4081 set_leaf(sr, &leaf_default_100); 4082 sr->adapters = 1; 4083 break; 4084 } 4085 } else if (dnetp->board_type == DEVICE_ID_21041) { 4086 set_leaf(sr, &leaf_21041); 4087 } else if (dnetp->board_type == DEVICE_ID_21040) { 4088 set_leaf(sr, &leaf_21040); 4089 } 4090 } 4091 } 4092 4093 /* Section 4.2, 4.3, 4.4, 4.5 */ 4094 static void 4095 parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf, 4096 uchar_t *vi) 4097 { 4098 int i; 4099 4100 leaf->selected_contype = *vi++; 4101 leaf->selected_contype |= *vi++ << 8; 4102 4103 if (dnetp->board_type == DEVICE_ID_21140) /* Sect. 4.3 */ 4104 leaf->gpr = *vi++; 4105 4106 leaf->block_count = *vi++; 4107 4108 if (leaf->block_count > MAX_MEDIA) { 4109 cmn_err(CE_WARN, "dnet: Too many media in SROM!"); 4110 leaf->block_count = 1; 4111 } 4112 for (i = 0; i <= leaf->block_count; i++) { 4113 vi = parse_media_block(dnetp, leaf->block + i, vi); 4114 if (leaf->block[i].command & CMD_DEFAULT_MEDIUM) 4115 leaf->default_block = leaf->block+i; 4116 } 4117 /* No explicit default block: use last in the ROM */ 4118 if (leaf->default_block == NULL) 4119 leaf->default_block = leaf->block + leaf->block_count -1; 4120 4121 } 4122 4123 static uchar_t * 4124 parse_media_block(struct dnetinstance *dnetp, media_block_t *block, uchar_t *vi) 4125 { 4126 int i; 4127 4128 /* 4129 * There are three kinds of media block we need to worry about: 4130 * The 21041 blocks. 4131 * 21140 blocks from a version 1 SROM 4132 * 2114[023] block from a version 3 SROM 4133 */ 4134 4135 if (dnetp->board_type == DEVICE_ID_21041) { 4136 /* Section 4.2 */ 4137 block->media_code = *vi & 0x3f; 4138 block->type = 2; 4139 if (*vi++ & 0x40) { 4140 block->un.sia.csr13 = *vi++; 4141 block->un.sia.csr13 |= *vi++ << 8; 4142 block->un.sia.csr14 = *vi++; 4143 block->un.sia.csr14 |= *vi++ << 8; 4144 block->un.sia.csr15 = *vi++; 4145 block->un.sia.csr15 |= *vi++ << 8; 4146 } else { 4147 /* No media data (csrs 13,14,15). Insert defaults */ 4148 switch (block->media_code) { 4149 case MEDIA_TP: 4150 block->un.sia.csr13 = 0xef01; 4151 block->un.sia.csr14 = 0x7f3f; 4152 block->un.sia.csr15 = 0x0008; 4153 break; 4154 case MEDIA_TP_FD: 4155 block->un.sia.csr13 = 0xef01; 4156 block->un.sia.csr14 = 0x7f3d; 4157 block->un.sia.csr15 = 0x0008; 4158 break; 4159 case MEDIA_BNC: 4160 block->un.sia.csr13 = 0xef09; 4161 block->un.sia.csr14 = 0x0705; 4162 block->un.sia.csr15 = 0x0006; 4163 break; 4164 case MEDIA_AUI: 4165 block->un.sia.csr13 = 0xef09; 4166 block->un.sia.csr14 = 0x0705; 4167 block->un.sia.csr15 = 0x000e; 4168 break; 4169 } 4170 } 4171 } else if (*vi & 0x80) { /* Extended format: Section 4.3.2.2 */ 4172 int blocklen = *vi++ & 0x7f; 4173 block->type = *vi++; 4174 switch (block->type) { 4175 case 0: /* "non-MII": Section 4.3.2.2.1 */ 4176 block->media_code = (*vi++) & 0x3f; 4177 block->gprseqlen = 1; 4178 block->gprseq[0] = *vi++; 4179 block->command = *vi++; 4180 block->command |= *vi++ << 8; 4181 break; 4182 4183 case 1: /* MII/PHY: Section 4.3.2.2.2 */ 4184 block->command = CMD_PS; 4185 block->media_code = MEDIA_MII; 4186 /* This is whats needed in CSR6 */ 4187 4188 block->un.mii.phy_num = *vi++; 4189 block->gprseqlen = *vi++; 4190 4191 for (i = 0; i < block->gprseqlen; i++) 4192 block->gprseq[i] = *vi++; 4193 block->rstseqlen = *vi++; 4194 for (i = 0; i < block->rstseqlen; i++) 4195 block->rstseq[i] = *vi++; 4196 4197 block->un.mii.mediacaps = *vi++; 4198 block->un.mii.mediacaps |= *vi++ << 8; 4199 block->un.mii.nwayadvert = *vi++; 4200 block->un.mii.nwayadvert |= *vi++ << 8; 4201 block->un.mii.fdxmask = *vi++; 4202 block->un.mii.fdxmask |= *vi++ << 8; 4203 block->un.mii.ttmmask = *vi++; 4204 block->un.mii.ttmmask |= *vi++ << 8; 4205 break; 4206 4207 case 2: /* SIA Media: Section 4.4.2.1.1 */ 4208 block->media_code = *vi & 0x3f; 4209 if (*vi++ & 0x40) { 4210 block->un.sia.csr13 = *vi++; 4211 block->un.sia.csr13 |= *vi++ << 8; 4212 block->un.sia.csr14 = *vi++; 4213 block->un.sia.csr14 |= *vi++ << 8; 4214 block->un.sia.csr15 = *vi++; 4215 block->un.sia.csr15 |= *vi++ << 8; 4216 } else { 4217 /* 4218 * SIA values not provided by SROM; provide 4219 * defaults. See appendix D of 2114[23] manuals. 4220 */ 4221 switch (block->media_code) { 4222 case MEDIA_BNC: 4223 block->un.sia.csr13 = 0x0009; 4224 block->un.sia.csr14 = 0x0705; 4225 block->un.sia.csr15 = 0x0000; 4226 break; 4227 case MEDIA_AUI: 4228 block->un.sia.csr13 = 0x0009; 4229 block->un.sia.csr14 = 0x0705; 4230 block->un.sia.csr15 = 0x0008; 4231 break; 4232 case MEDIA_TP: 4233 block->un.sia.csr13 = 0x0001; 4234 block->un.sia.csr14 = 0x7f3f; 4235 block->un.sia.csr15 = 0x0000; 4236 break; 4237 case MEDIA_TP_FD: 4238 block->un.sia.csr13 = 0x0001; 4239 block->un.sia.csr14 = 0x7f3d; 4240 block->un.sia.csr15 = 0x0000; 4241 break; 4242 default: 4243 block->un.sia.csr13 = 0x0000; 4244 block->un.sia.csr14 = 0x0000; 4245 block->un.sia.csr15 = 0x0000; 4246 } 4247 } 4248 4249 /* Treat GP control/data as a GPR sequence */ 4250 block->gprseqlen = 2; 4251 block->gprseq[0] = *vi++; 4252 block->gprseq[0] |= *vi++ << 8; 4253 block->gprseq[0] |= GPR_CONTROL_WRITE; 4254 block->gprseq[1] = *vi++; 4255 block->gprseq[1] |= *vi++ << 8; 4256 break; 4257 4258 case 3: /* MII/PHY : Section 4.4.2.1.2 */ 4259 block->command = CMD_PS; 4260 block->media_code = MEDIA_MII; 4261 block->un.mii.phy_num = *vi++; 4262 4263 block->gprseqlen = *vi++; 4264 for (i = 0; i < block->gprseqlen; i++) { 4265 block->gprseq[i] = *vi++; 4266 block->gprseq[i] |= *vi++ << 8; 4267 } 4268 4269 block->rstseqlen = *vi++; 4270 for (i = 0; i < block->rstseqlen; i++) { 4271 block->rstseq[i] = *vi++; 4272 block->rstseq[i] |= *vi++ << 8; 4273 } 4274 block->un.mii.mediacaps = *vi++; 4275 block->un.mii.mediacaps |= *vi++ << 8; 4276 block->un.mii.nwayadvert = *vi++; 4277 block->un.mii.nwayadvert |= *vi++ << 8; 4278 block->un.mii.fdxmask = *vi++; 4279 block->un.mii.fdxmask |= *vi++ << 8; 4280 block->un.mii.ttmmask = *vi++; 4281 block->un.mii.ttmmask |= *vi++ << 8; 4282 block->un.mii.miiintr |= *vi++; 4283 break; 4284 4285 case 4: /* SYM Media: 4.5.2.1.3 */ 4286 block->media_code = *vi++ & 0x3f; 4287 /* Treat GP control and data as a GPR sequence */ 4288 block->gprseqlen = 2; 4289 block->gprseq[0] = *vi++; 4290 block->gprseq[0] |= *vi++ << 8; 4291 block->gprseq[0] |= GPR_CONTROL_WRITE; 4292 block->gprseq[1] = *vi++; 4293 block->gprseq[1] |= *vi++ << 8; 4294 block->command = *vi++; 4295 block->command |= *vi++ << 8; 4296 break; 4297 4298 case 5: /* GPR reset sequence: Section 4.5.2.1.4 */ 4299 block->rstseqlen = *vi++; 4300 for (i = 0; i < block->rstseqlen; i++) 4301 block->rstseq[i] = *vi++; 4302 break; 4303 4304 default: /* Unknown media block. Skip it. */ 4305 cmn_err(CE_WARN, "dnet: Unsupported SROM block."); 4306 vi += blocklen; 4307 break; 4308 } 4309 } else { /* Compact format (or V1 SROM): Section 4.3.2.1 */ 4310 block->type = 0; 4311 block->media_code = *vi++ & 0x3f; 4312 block->gprseqlen = 1; 4313 block->gprseq[0] = *vi++; 4314 block->command = *vi++; 4315 block->command |= (*vi++) << 8; 4316 } 4317 return (vi); 4318 } 4319 4320 4321 /* 4322 * An alternative to doing this would be to store the legacy ROMs in binary 4323 * format in the conf file, and in read_srom, pick out the data. This would 4324 * then allow the parser to continue on as normal. This makes it a little 4325 * easier to read. 4326 */ 4327 static void 4328 setup_legacy_blocks() 4329 { 4330 LEAF_FORMAT *leaf; 4331 media_block_t *block; 4332 4333 /* Default FAKE SROM */ 4334 leaf = &leaf_default_100; 4335 leaf->is_static = 1; 4336 leaf->default_block = &leaf->block[3]; 4337 leaf->block_count = 4; /* 100 cards are highly unlikely to have BNC */ 4338 block = leaf->block; 4339 block->media_code = MEDIA_TP_FD; 4340 block->type = 0; 4341 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4342 block++; 4343 block->media_code = MEDIA_TP; 4344 block->type = 0; 4345 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4346 block++; 4347 block->media_code = MEDIA_SYM_SCR_FD; 4348 block->type = 0; 4349 block->command = 0x6d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4350 block++; 4351 block->media_code = MEDIA_SYM_SCR; 4352 block->type = 0; 4353 block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4354 4355 /* COGENT FAKE SROM */ 4356 leaf = &leaf_cogent_100; 4357 leaf->is_static = 1; 4358 leaf->default_block = &leaf->block[4]; 4359 leaf->block_count = 5; /* 100TX, 100TX-FD, 10T 10T-FD, BNC */ 4360 block = leaf->block; /* BNC */ 4361 block->media_code = MEDIA_BNC; 4362 block->type = 0; 4363 block->command = 0x8000; /* No media sense, PCS, SCR, PS all off */ 4364 block->gprseqlen = 2; 4365 block->rstseqlen = 0; 4366 block->gprseq[0] = 0x13f; 4367 block->gprseq[1] = 1; 4368 4369 block++; 4370 block->media_code = MEDIA_TP_FD; 4371 block->type = 0; 4372 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4373 block->gprseqlen = 2; 4374 block->rstseqlen = 0; 4375 block->gprseq[0] = 0x13f; 4376 block->gprseq[1] = 0x26; 4377 4378 block++; /* 10BaseT */ 4379 block->media_code = MEDIA_TP; 4380 block->type = 0; 4381 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4382 block->gprseqlen = 2; 4383 block->rstseqlen = 0; 4384 block->gprseq[0] = 0x13f; 4385 block->gprseq[1] = 0x3e; 4386 4387 block++; /* 100BaseTX-FD */ 4388 block->media_code = MEDIA_SYM_SCR_FD; 4389 block->type = 0; 4390 block->command = 0x6d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4391 block->gprseqlen = 2; 4392 block->rstseqlen = 0; 4393 block->gprseq[0] = 0x13f; 4394 block->gprseq[1] = 1; 4395 4396 block++; /* 100BaseTX */ 4397 block->media_code = MEDIA_SYM_SCR; 4398 block->type = 0; 4399 block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4400 block->gprseqlen = 2; 4401 block->rstseqlen = 0; 4402 block->gprseq[0] = 0x13f; 4403 block->gprseq[1] = 1; 4404 4405 /* Generic legacy card with a PHY. */ 4406 leaf = &leaf_phylegacy; 4407 leaf->block_count = 1; 4408 leaf->mii_block = leaf->block; 4409 leaf->default_block = &leaf->block[0]; 4410 leaf->is_static = 1; 4411 block = leaf->block; 4412 block->media_code = MEDIA_MII; 4413 block->type = 1; /* MII Block type 1 */ 4414 block->command = 1; /* Port select */ 4415 block->gprseqlen = 0; 4416 block->rstseqlen = 0; 4417 4418 /* ASANTE FAKE SROM */ 4419 leaf = &leaf_asante; 4420 leaf->is_static = 1; 4421 leaf->default_block = &leaf->block[0]; 4422 leaf->block_count = 1; 4423 block = leaf->block; 4424 block->media_code = MEDIA_MII; 4425 block->type = 1; /* MII Block type 1 */ 4426 block->command = 1; /* Port select */ 4427 block->gprseqlen = 3; 4428 block->rstseqlen = 0; 4429 block->gprseq[0] = 0x180; 4430 block->gprseq[1] = 0x80; 4431 block->gprseq[2] = 0x0; 4432 4433 /* LEGACY 21041 card FAKE SROM */ 4434 leaf = &leaf_21041; 4435 leaf->is_static = 1; 4436 leaf->block_count = 4; /* SIA Blocks for TP, TPfd, BNC, AUI */ 4437 leaf->default_block = &leaf->block[3]; 4438 4439 block = leaf->block; 4440 block->media_code = MEDIA_AUI; 4441 block->type = 2; 4442 block->un.sia.csr13 = 0xef09; 4443 block->un.sia.csr14 = 0x0705; 4444 block->un.sia.csr15 = 0x000e; 4445 4446 block++; 4447 block->media_code = MEDIA_TP_FD; 4448 block->type = 2; 4449 block->un.sia.csr13 = 0xef01; 4450 block->un.sia.csr14 = 0x7f3d; 4451 block->un.sia.csr15 = 0x0008; 4452 4453 block++; 4454 block->media_code = MEDIA_BNC; 4455 block->type = 2; 4456 block->un.sia.csr13 = 0xef09; 4457 block->un.sia.csr14 = 0x0705; 4458 block->un.sia.csr15 = 0x0006; 4459 4460 block++; 4461 block->media_code = MEDIA_TP; 4462 block->type = 2; 4463 block->un.sia.csr13 = 0xef01; 4464 block->un.sia.csr14 = 0x7f3f; 4465 block->un.sia.csr15 = 0x0008; 4466 4467 /* LEGACY 21040 card FAKE SROM */ 4468 leaf = &leaf_21040; 4469 leaf->is_static = 1; 4470 leaf->block_count = 4; /* SIA Blocks for TP, TPfd, BNC, AUI */ 4471 block = leaf->block; 4472 block->media_code = MEDIA_AUI; 4473 block->type = 2; 4474 block->un.sia.csr13 = 0x8f09; 4475 block->un.sia.csr14 = 0x0705; 4476 block->un.sia.csr15 = 0x000e; 4477 block++; 4478 block->media_code = MEDIA_TP_FD; 4479 block->type = 2; 4480 block->un.sia.csr13 = 0x0f01; 4481 block->un.sia.csr14 = 0x7f3d; 4482 block->un.sia.csr15 = 0x0008; 4483 block++; 4484 block->media_code = MEDIA_BNC; 4485 block->type = 2; 4486 block->un.sia.csr13 = 0xef09; 4487 block->un.sia.csr14 = 0x0705; 4488 block->un.sia.csr15 = 0x0006; 4489 block++; 4490 block->media_code = MEDIA_TP; 4491 block->type = 2; 4492 block->un.sia.csr13 = 0x8f01; 4493 block->un.sia.csr14 = 0x7f3f; 4494 block->un.sia.csr15 = 0x0008; 4495 } 4496 4497 static void 4498 dnet_print_srom(SROM_FORMAT *sr) 4499 { 4500 int i; 4501 uchar_t *a = sr->netaddr; 4502 cmn_err(CE_NOTE, "SROM Dump: %d. ver %d, Num adapters %d," 4503 "Addr:%x:%x:%x:%x:%x:%x", 4504 sr->init_from_srom, sr->version, sr->adapters, 4505 a[0], a[1], a[2], a[3], a[4], a[5]); 4506 4507 for (i = 0; i < sr->adapters; i++) 4508 dnet_dump_leaf(sr->leaf+i); 4509 } 4510 4511 static void 4512 dnet_dump_leaf(LEAF_FORMAT *leaf) 4513 { 4514 int i; 4515 cmn_err(CE_NOTE, "Leaf: Device %d, block_count %d, gpr: %x", 4516 leaf->device_number, leaf->block_count, leaf->gpr); 4517 for (i = 0; i < leaf->block_count; i++) 4518 dnet_dump_block(leaf->block+i); 4519 } 4520 4521 static void 4522 dnet_dump_block(media_block_t *block) 4523 { 4524 cmn_err(CE_NOTE, "Block(%p): type %x, media %s, command: %x ", 4525 (void *)block, 4526 block->type, media_str[block->media_code], block->command); 4527 dnet_dumpbin("\tGPR Seq", (uchar_t *)block->gprseq, 2, 4528 block->gprseqlen *2); 4529 dnet_dumpbin("\tGPR Reset", (uchar_t *)block->rstseq, 2, 4530 block->rstseqlen *2); 4531 switch (block->type) { 4532 case 1: case 3: 4533 cmn_err(CE_NOTE, "\tMII Info: phy %d, nway %x, fdx" 4534 "%x, ttm %x, mediacap %x", 4535 block->un.mii.phy_num, block->un.mii.nwayadvert, 4536 block->un.mii.fdxmask, block->un.mii.ttmmask, 4537 block->un.mii.mediacaps); 4538 break; 4539 case 2: 4540 cmn_err(CE_NOTE, "\tSIA Regs: CSR13:%x, CSR14:%x, CSR15:%x", 4541 block->un.sia.csr13, block->un.sia.csr14, 4542 block->un.sia.csr15); 4543 break; 4544 } 4545 } 4546 4547 4548 /* Utility to print out binary info dumps. Handy for SROMs, etc */ 4549 4550 static int 4551 hexcode(unsigned val) 4552 { 4553 if (val <= 9) 4554 return (val +'0'); 4555 if (val <= 15) 4556 return (val + 'a' - 10); 4557 return (-1); 4558 } 4559 4560 static void 4561 dnet_dumpbin(char *msg, unsigned char *data, int size, int len) 4562 { 4563 char hex[128], *p = hex; 4564 char ascii[128], *q = ascii; 4565 int i, j; 4566 4567 if (!len) 4568 return; 4569 4570 for (i = 0; i < len; i += size) { 4571 for (j = size - 1; j >= 0; j--) { /* PORTABILITY: byte order */ 4572 *p++ = hexcode(data[i+j] >> 4); 4573 *p++ = hexcode(data[i+j] & 0xf); 4574 *q++ = (data[i+j] < 32 || data[i+j] > 127) ? 4575 '.' : data[i]; 4576 } 4577 *p++ = ' '; 4578 if (q-ascii >= 8) { 4579 *p = *q = 0; 4580 cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii); 4581 p = hex; 4582 q = ascii; 4583 } 4584 } 4585 if (p != hex) { 4586 while ((p - hex) < 8*3) 4587 *p++ = ' '; 4588 *p = *q = 0; 4589 cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii); 4590 } 4591 } 4592 4593 #ifdef DNETDEBUG 4594 void 4595 dnet_usectimeout(struct dnetinstance *dnetp, uint32_t usecs, int contin, 4596 timercb_t cback) 4597 { 4598 mutex_enter(&dnetp->intrlock); 4599 dnetp->timer.start_ticks = (usecs * 100) / 8192; 4600 dnetp->timer.cb = cback; 4601 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG), 4602 dnetp->timer.start_ticks | (contin ? GPTIMER_CONT : 0)); 4603 if (dnetp->timer.cb) 4604 enable_interrupts(dnetp); 4605 mutex_exit(&dnetp->intrlock); 4606 } 4607 4608 uint32_t 4609 dnet_usecelapsed(struct dnetinstance *dnetp) 4610 { 4611 uint32_t ticks = dnetp->timer.start_ticks - 4612 (ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG)) & 4613 0xffff); 4614 return ((ticks * 8192) / 100); 4615 } 4616 4617 /* ARGSUSED */ 4618 void 4619 dnet_timestamp(struct dnetinstance *dnetp, char *buf) 4620 { 4621 uint32_t elapsed = dnet_usecelapsed(dnetp); 4622 char loc[32], *p = loc; 4623 int firstdigit = 1; 4624 uint32_t divisor; 4625 4626 while (*p++ = *buf++) 4627 ; 4628 p--; 4629 4630 for (divisor = 1000000000; divisor /= 10; ) { 4631 int digit = (elapsed / divisor); 4632 elapsed -= digit * divisor; 4633 if (!firstdigit || digit) { 4634 *p++ = digit + '0'; 4635 firstdigit = 0; 4636 } 4637 4638 } 4639 4640 /* Actual zero, output it */ 4641 if (firstdigit) 4642 *p++ = '0'; 4643 4644 *p++ = '-'; 4645 *p++ = '>'; 4646 *p++ = 0; 4647 4648 printf(loc); 4649 dnet_usectimeout(dnetp, 1000000, 0, 0); 4650 } 4651 4652 #endif 4653