1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */ 2 /* $FreeBSD: src/sys/dev/txp/if_txp.c,v 1.4.2.4 2001/12/14 19:50:43 jlemon Exp $ */ 3 /* $DragonFly: src/sys/dev/netif/txp/if_txp.c,v 1.50 2008/08/17 04:32:35 sephe Exp $ */ 4 5 /* 6 * Copyright (c) 2001 7 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and 8 * Aaron Campbell <aaron@monkey.org>. All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Jason L. Wright, 21 * Theo de Raadt and Aaron Campbell. 22 * 4. Neither the name of the author nor the names of any co-contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 36 * THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Driver for 3c990 (Typhoon) Ethernet ASIC 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/sockio.h> 46 #include <sys/mbuf.h> 47 #include <sys/malloc.h> 48 #include <sys/kernel.h> 49 #include <sys/socket.h> 50 #include <sys/serialize.h> 51 #include <sys/bus.h> 52 #include <sys/rman.h> 53 #include <sys/thread2.h> 54 #include <sys/interrupt.h> 55 56 #include <net/if.h> 57 #include <net/ifq_var.h> 58 #include <net/if_arp.h> 59 #include <net/ethernet.h> 60 #include <net/if_dl.h> 61 #include <net/if_types.h> 62 #include <net/vlan/if_vlan_var.h> 63 #include <net/vlan/if_vlan_ether.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip.h> 69 #include <netinet/if_ether.h> 70 #include <sys/in_cksum.h> 71 72 #include <net/if_media.h> 73 74 #include <net/bpf.h> 75 76 #include <vm/vm.h> /* for vtophys */ 77 #include <vm/pmap.h> /* for vtophys */ 78 79 #include "../mii_layer/mii.h" 80 #include "../mii_layer/miivar.h" 81 82 #include <bus/pci/pcidevs.h> 83 #include <bus/pci/pcireg.h> 84 #include <bus/pci/pcivar.h> 85 86 #define TXP_USEIOSPACE 87 #define __STRICT_ALIGNMENT 88 89 #include "if_txpreg.h" 90 #include "3c990img.h" 91 92 /* 93 * Various supported device vendors/types and their names. 94 */ 95 static struct txp_type txp_devs[] = { 96 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95, 97 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" }, 98 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97, 99 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" }, 100 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B, 101 "3Com 3cR990B-TXM Etherlink with 3XP Processor" }, 102 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95, 103 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" }, 104 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97, 105 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" }, 106 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR, 107 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" }, 108 { 0, 0, NULL } 109 }; 110 111 static int txp_probe (device_t); 112 static int txp_attach (device_t); 113 static int txp_detach (device_t); 114 static void txp_intr (void *); 115 static void txp_tick (void *); 116 static int txp_shutdown (device_t); 117 static int txp_ioctl (struct ifnet *, u_long, caddr_t, struct ucred *); 118 static void txp_start (struct ifnet *); 119 static void txp_stop (struct txp_softc *); 120 static void txp_init (void *); 121 static void txp_watchdog (struct ifnet *); 122 123 static void txp_release_resources (device_t); 124 static int txp_chip_init (struct txp_softc *); 125 static int txp_reset_adapter (struct txp_softc *); 126 static int txp_download_fw (struct txp_softc *); 127 static int txp_download_fw_wait (struct txp_softc *); 128 static int txp_download_fw_section (struct txp_softc *, 129 struct txp_fw_section_header *, int); 130 static int txp_alloc_rings (struct txp_softc *); 131 static int txp_rxring_fill (struct txp_softc *); 132 static void txp_rxring_empty (struct txp_softc *); 133 static void txp_set_filter (struct txp_softc *); 134 135 static int txp_cmd_desc_numfree (struct txp_softc *); 136 static int txp_command (struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, 137 u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int); 138 static int txp_command2 (struct txp_softc *, u_int16_t, u_int16_t, 139 u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, 140 struct txp_rsp_desc **, int); 141 static int txp_response (struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, 142 struct txp_rsp_desc **); 143 static void txp_rsp_fixup (struct txp_softc *, struct txp_rsp_desc *, 144 struct txp_rsp_desc *); 145 static void txp_capabilities (struct txp_softc *); 146 147 static void txp_ifmedia_sts (struct ifnet *, struct ifmediareq *); 148 static int txp_ifmedia_upd (struct ifnet *); 149 #ifdef TXP_DEBUG 150 static void txp_show_descriptor (void *); 151 #endif 152 static void txp_tx_reclaim (struct txp_softc *, struct txp_tx_ring *); 153 static void txp_rxbuf_reclaim (struct txp_softc *); 154 static void txp_rx_reclaim (struct txp_softc *, struct txp_rx_ring *); 155 156 #ifdef TXP_USEIOSPACE 157 #define TXP_RES SYS_RES_IOPORT 158 #define TXP_RID TXP_PCI_LOIO 159 #else 160 #define TXP_RES SYS_RES_MEMORY 161 #define TXP_RID TXP_PCI_LOMEM 162 #endif 163 164 static device_method_t txp_methods[] = { 165 /* Device interface */ 166 DEVMETHOD(device_probe, txp_probe), 167 DEVMETHOD(device_attach, txp_attach), 168 DEVMETHOD(device_detach, txp_detach), 169 DEVMETHOD(device_shutdown, txp_shutdown), 170 { 0, 0 } 171 }; 172 173 static driver_t txp_driver = { 174 "txp", 175 txp_methods, 176 sizeof(struct txp_softc) 177 }; 178 179 static devclass_t txp_devclass; 180 181 DECLARE_DUMMY_MODULE(if_txp); 182 DRIVER_MODULE(if_txp, pci, txp_driver, txp_devclass, 0, 0); 183 184 static int 185 txp_probe(device_t dev) 186 { 187 struct txp_type *t; 188 uint16_t vid, did; 189 190 vid = pci_get_vendor(dev); 191 did = pci_get_device(dev); 192 193 for (t = txp_devs; t->txp_name != NULL; ++t) { 194 if ((vid == t->txp_vid) && (did == t->txp_did)) { 195 device_set_desc(dev, t->txp_name); 196 return(0); 197 } 198 } 199 200 return(ENXIO); 201 } 202 203 static int 204 txp_attach(device_t dev) 205 { 206 struct txp_softc *sc; 207 struct ifnet *ifp; 208 uint16_t p1; 209 uint32_t p2; 210 uint8_t enaddr[ETHER_ADDR_LEN]; 211 int error = 0, rid; 212 213 sc = device_get_softc(dev); 214 callout_init(&sc->txp_stat_timer); 215 216 ifp = &sc->sc_arpcom.ac_if; 217 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 218 219 pci_enable_busmaster(dev); 220 221 rid = TXP_RID; 222 sc->sc_res = bus_alloc_resource_any(dev, TXP_RES, &rid, RF_ACTIVE); 223 224 if (sc->sc_res == NULL) { 225 device_printf(dev, "couldn't map ports/memory\n"); 226 return(ENXIO); 227 } 228 229 sc->sc_bt = rman_get_bustag(sc->sc_res); 230 sc->sc_bh = rman_get_bushandle(sc->sc_res); 231 232 /* Allocate interrupt */ 233 rid = 0; 234 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 235 RF_SHAREABLE | RF_ACTIVE); 236 237 if (sc->sc_irq == NULL) { 238 device_printf(dev, "couldn't map interrupt\n"); 239 error = ENXIO; 240 goto fail; 241 } 242 243 if (txp_chip_init(sc)) { 244 error = ENXIO; 245 goto fail; 246 } 247 248 sc->sc_fwbuf = contigmalloc(32768, M_DEVBUF, 249 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 250 error = txp_download_fw(sc); 251 contigfree(sc->sc_fwbuf, 32768, M_DEVBUF); 252 sc->sc_fwbuf = NULL; 253 254 if (error) 255 goto fail; 256 257 sc->sc_ldata = contigmalloc(sizeof(struct txp_ldata), M_DEVBUF, 258 M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 259 260 if (txp_alloc_rings(sc)) { 261 error = ENXIO; 262 goto fail; 263 } 264 265 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, 266 NULL, NULL, NULL, 1)) { 267 error = ENXIO; 268 goto fail; 269 } 270 271 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, 272 &p1, &p2, NULL, 1)) { 273 error = ENXIO; 274 goto fail; 275 } 276 277 txp_set_filter(sc); 278 279 enaddr[0] = ((uint8_t *)&p1)[1]; 280 enaddr[1] = ((uint8_t *)&p1)[0]; 281 enaddr[2] = ((uint8_t *)&p2)[3]; 282 enaddr[3] = ((uint8_t *)&p2)[2]; 283 enaddr[4] = ((uint8_t *)&p2)[1]; 284 enaddr[5] = ((uint8_t *)&p2)[0]; 285 286 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); 287 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 288 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 289 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 290 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 291 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); 292 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 293 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 294 295 sc->sc_xcvr = TXP_XCVR_AUTO; 296 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, 297 NULL, NULL, NULL, 0); 298 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 299 300 ifp->if_softc = sc; 301 ifp->if_mtu = ETHERMTU; 302 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 303 ifp->if_ioctl = txp_ioctl; 304 ifp->if_start = txp_start; 305 ifp->if_watchdog = txp_watchdog; 306 ifp->if_init = txp_init; 307 ifp->if_baudrate = 100000000; 308 ifq_set_maxlen(&ifp->if_snd, TX_ENTRIES); 309 ifq_set_ready(&ifp->if_snd); 310 ifp->if_hwassist = 0; 311 txp_capabilities(sc); 312 313 ether_ifattach(ifp, enaddr, NULL); 314 315 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, 316 txp_intr, sc, &sc->sc_intrhand, 317 ifp->if_serializer); 318 if (error) { 319 device_printf(dev, "couldn't set up irq\n"); 320 ether_ifdetach(ifp); 321 goto fail; 322 } 323 324 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq)); 325 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 326 327 return(0); 328 329 fail: 330 txp_release_resources(dev); 331 return(error); 332 } 333 334 static int 335 txp_detach(device_t dev) 336 { 337 struct txp_softc *sc = device_get_softc(dev); 338 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 339 int i; 340 341 lwkt_serialize_enter(ifp->if_serializer); 342 343 txp_stop(sc); 344 txp_shutdown(dev); 345 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 346 347 lwkt_serialize_exit(ifp->if_serializer); 348 349 ifmedia_removeall(&sc->sc_ifmedia); 350 ether_ifdetach(ifp); 351 352 for (i = 0; i < RXBUF_ENTRIES; i++) 353 kfree(sc->sc_rxbufs[i].rb_sd, M_DEVBUF); 354 355 txp_release_resources(dev); 356 357 return(0); 358 } 359 360 static void 361 txp_release_resources(device_t dev) 362 { 363 struct txp_softc *sc; 364 365 sc = device_get_softc(dev); 366 367 if (sc->sc_irq != NULL) 368 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 369 370 if (sc->sc_res != NULL) 371 bus_release_resource(dev, TXP_RES, TXP_RID, sc->sc_res); 372 373 if (sc->sc_ldata != NULL) 374 contigfree(sc->sc_ldata, sizeof(struct txp_ldata), M_DEVBUF); 375 376 return; 377 } 378 379 static int 380 txp_chip_init(struct txp_softc *sc) 381 { 382 /* disable interrupts */ 383 WRITE_REG(sc, TXP_IER, 0); 384 WRITE_REG(sc, TXP_IMR, 385 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 386 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 387 TXP_INT_LATCH); 388 389 /* ack all interrupts */ 390 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 391 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 392 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 393 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 394 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 395 396 if (txp_reset_adapter(sc)) 397 return (-1); 398 399 /* disable interrupts */ 400 WRITE_REG(sc, TXP_IER, 0); 401 WRITE_REG(sc, TXP_IMR, 402 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 403 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 404 TXP_INT_LATCH); 405 406 /* ack all interrupts */ 407 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 408 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 409 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 410 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 411 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 412 413 return (0); 414 } 415 416 static int 417 txp_reset_adapter(struct txp_softc *sc) 418 { 419 u_int32_t r; 420 int i; 421 422 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); 423 DELAY(1000); 424 WRITE_REG(sc, TXP_SRR, 0); 425 426 /* Should wait max 6 seconds */ 427 for (i = 0; i < 6000; i++) { 428 r = READ_REG(sc, TXP_A2H_0); 429 if (r == STAT_WAITING_FOR_HOST_REQUEST) 430 break; 431 DELAY(1000); 432 } 433 434 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 435 if_printf(&sc->sc_arpcom.ac_if, "reset hung\n"); 436 return (-1); 437 } 438 439 return (0); 440 } 441 442 static int 443 txp_download_fw(struct txp_softc *sc) 444 { 445 struct txp_fw_file_header *fileheader; 446 struct txp_fw_section_header *secthead; 447 int sect; 448 u_int32_t r, i, ier, imr; 449 450 ier = READ_REG(sc, TXP_IER); 451 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); 452 453 imr = READ_REG(sc, TXP_IMR); 454 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); 455 456 for (i = 0; i < 10000; i++) { 457 r = READ_REG(sc, TXP_A2H_0); 458 if (r == STAT_WAITING_FOR_HOST_REQUEST) 459 break; 460 DELAY(50); 461 } 462 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 463 if_printf(&sc->sc_arpcom.ac_if, 464 "not waiting for host request\n"); 465 return (-1); 466 } 467 468 /* Ack the status */ 469 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 470 471 fileheader = (struct txp_fw_file_header *)tc990image; 472 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { 473 if_printf(&sc->sc_arpcom.ac_if, "fw invalid magic\n"); 474 return (-1); 475 } 476 477 /* Tell boot firmware to get ready for image */ 478 WRITE_REG(sc, TXP_H2A_1, fileheader->addr); 479 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); 480 481 if (txp_download_fw_wait(sc)) { 482 if_printf(&sc->sc_arpcom.ac_if, "fw wait failed, initial\n"); 483 return (-1); 484 } 485 486 secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) + 487 sizeof(struct txp_fw_file_header)); 488 489 for (sect = 0; sect < fileheader->nsections; sect++) { 490 if (txp_download_fw_section(sc, secthead, sect)) 491 return (-1); 492 secthead = (struct txp_fw_section_header *) 493 (((u_int8_t *)secthead) + secthead->nbytes + 494 sizeof(*secthead)); 495 } 496 497 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); 498 499 for (i = 0; i < 10000; i++) { 500 r = READ_REG(sc, TXP_A2H_0); 501 if (r == STAT_WAITING_FOR_BOOT) 502 break; 503 DELAY(50); 504 } 505 if (r != STAT_WAITING_FOR_BOOT) { 506 if_printf(&sc->sc_arpcom.ac_if, "not waiting for boot\n"); 507 return (-1); 508 } 509 510 WRITE_REG(sc, TXP_IER, ier); 511 WRITE_REG(sc, TXP_IMR, imr); 512 513 return (0); 514 } 515 516 static int 517 txp_download_fw_wait(struct txp_softc *sc) 518 { 519 u_int32_t i, r; 520 521 for (i = 0; i < 10000; i++) { 522 r = READ_REG(sc, TXP_ISR); 523 if (r & TXP_INT_A2H_0) 524 break; 525 DELAY(50); 526 } 527 528 if (!(r & TXP_INT_A2H_0)) { 529 if_printf(&sc->sc_arpcom.ac_if, "fw wait failed comm0\n"); 530 return (-1); 531 } 532 533 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 534 535 r = READ_REG(sc, TXP_A2H_0); 536 if (r != STAT_WAITING_FOR_SEGMENT) { 537 if_printf(&sc->sc_arpcom.ac_if, "fw not waiting for segment\n"); 538 return (-1); 539 } 540 return (0); 541 } 542 543 static int 544 txp_download_fw_section(struct txp_softc *sc, 545 struct txp_fw_section_header *sect, int sectnum) 546 { 547 vm_offset_t dma; 548 int rseg, err = 0; 549 struct mbuf m; 550 u_int16_t csum; 551 552 /* Skip zero length sections */ 553 if (sect->nbytes == 0) 554 return (0); 555 556 /* Make sure we aren't past the end of the image */ 557 rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image); 558 if (rseg >= sizeof(tc990image)) { 559 if_printf(&sc->sc_arpcom.ac_if, "fw invalid section address, " 560 "section %d\n", sectnum); 561 return (-1); 562 } 563 564 /* Make sure this section doesn't go past the end */ 565 rseg += sect->nbytes; 566 if (rseg >= sizeof(tc990image)) { 567 if_printf(&sc->sc_arpcom.ac_if, "fw truncated section %d\n", 568 sectnum); 569 return (-1); 570 } 571 572 bcopy(((u_int8_t *)sect) + sizeof(*sect), sc->sc_fwbuf, sect->nbytes); 573 dma = vtophys(sc->sc_fwbuf); 574 575 /* 576 * dummy up mbuf and verify section checksum 577 */ 578 m.m_type = MT_DATA; 579 m.m_next = m.m_nextpkt = NULL; 580 m.m_len = sect->nbytes; 581 m.m_data = sc->sc_fwbuf; 582 m.m_flags = 0; 583 csum = in_cksum(&m, sect->nbytes); 584 if (csum != sect->cksum) { 585 if_printf(&sc->sc_arpcom.ac_if, "fw section %d, bad " 586 "cksum (expected 0x%x got 0x%x)\n", 587 sectnum, sect->cksum, csum); 588 err = -1; 589 goto bail; 590 } 591 592 WRITE_REG(sc, TXP_H2A_1, sect->nbytes); 593 WRITE_REG(sc, TXP_H2A_2, sect->cksum); 594 WRITE_REG(sc, TXP_H2A_3, sect->addr); 595 WRITE_REG(sc, TXP_H2A_4, 0); 596 WRITE_REG(sc, TXP_H2A_5, dma & 0xffffffff); 597 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); 598 599 if (txp_download_fw_wait(sc)) { 600 if_printf(&sc->sc_arpcom.ac_if, "fw wait failed, " 601 "section %d\n", sectnum); 602 err = -1; 603 } 604 605 bail: 606 return (err); 607 } 608 609 static void 610 txp_intr(void *vsc) 611 { 612 struct txp_softc *sc = vsc; 613 struct txp_hostvar *hv = sc->sc_hostvar; 614 u_int32_t isr; 615 616 /* mask all interrupts */ 617 WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | 618 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 619 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 620 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 621 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 622 623 isr = READ_REG(sc, TXP_ISR); 624 while (isr) { 625 WRITE_REG(sc, TXP_ISR, isr); 626 627 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) 628 txp_rx_reclaim(sc, &sc->sc_rxhir); 629 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) 630 txp_rx_reclaim(sc, &sc->sc_rxlor); 631 632 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) 633 txp_rxbuf_reclaim(sc); 634 635 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != 636 TXP_OFFSET2IDX(*(sc->sc_txhir.r_off)))) 637 txp_tx_reclaim(sc, &sc->sc_txhir); 638 639 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != 640 TXP_OFFSET2IDX(*(sc->sc_txlor.r_off)))) 641 txp_tx_reclaim(sc, &sc->sc_txlor); 642 643 isr = READ_REG(sc, TXP_ISR); 644 } 645 646 /* unmask all interrupts */ 647 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 648 649 if_devstart(&sc->sc_arpcom.ac_if); 650 } 651 652 static void 653 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r) 654 { 655 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 656 struct txp_rx_desc *rxd; 657 struct mbuf *m; 658 struct txp_swdesc *sd = NULL; 659 u_int32_t roff, woff; 660 661 roff = *r->r_roff; 662 woff = *r->r_woff; 663 rxd = r->r_desc + (roff / sizeof(struct txp_rx_desc)); 664 665 while (roff != woff) { 666 667 if (rxd->rx_flags & RX_FLAGS_ERROR) { 668 if_printf(ifp, "error 0x%x\n", rxd->rx_stat); 669 ifp->if_ierrors++; 670 goto next; 671 } 672 673 /* retrieve stashed pointer */ 674 sd = rxd->rx_sd; 675 676 m = sd->sd_mbuf; 677 sd->sd_mbuf = NULL; 678 679 m->m_pkthdr.len = m->m_len = rxd->rx_len; 680 681 #ifdef __STRICT_ALIGNMENT 682 { 683 /* 684 * XXX Nice chip, except it won't accept "off by 2" 685 * buffers, so we're force to copy. Supposedly 686 * this will be fixed in a newer firmware rev 687 * and this will be temporary. 688 */ 689 struct mbuf *mnew; 690 691 MGETHDR(mnew, MB_DONTWAIT, MT_DATA); 692 if (mnew == NULL) { 693 m_freem(m); 694 goto next; 695 } 696 if (m->m_len > (MHLEN - 2)) { 697 MCLGET(mnew, MB_DONTWAIT); 698 if (!(mnew->m_flags & M_EXT)) { 699 m_freem(mnew); 700 m_freem(m); 701 goto next; 702 } 703 } 704 mnew->m_pkthdr.rcvif = ifp; 705 m_adj(mnew, 2); 706 mnew->m_pkthdr.len = mnew->m_len = m->m_len; 707 m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t)); 708 m_freem(m); 709 m = mnew; 710 } 711 #endif 712 713 if (rxd->rx_stat & RX_STAT_IPCKSUMBAD) 714 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 715 else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD) 716 m->m_pkthdr.csum_flags |= 717 CSUM_IP_CHECKED|CSUM_IP_VALID; 718 719 if ((rxd->rx_stat & RX_STAT_TCPCKSUMGOOD) || 720 (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD)) { 721 m->m_pkthdr.csum_flags |= 722 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 723 CSUM_FRAG_NOT_CHECKED; 724 m->m_pkthdr.csum_data = 0xffff; 725 } 726 727 if (rxd->rx_stat & RX_STAT_VLAN) { 728 m->m_flags |= M_VLANTAG; 729 m->m_pkthdr.ether_vlantag = htons(rxd->rx_vlan >> 16); 730 } 731 ifp->if_input(ifp, m); 732 733 next: 734 735 roff += sizeof(struct txp_rx_desc); 736 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { 737 roff = 0; 738 rxd = r->r_desc; 739 } else 740 rxd++; 741 woff = *r->r_woff; 742 } 743 744 *r->r_roff = woff; 745 746 return; 747 } 748 749 static void 750 txp_rxbuf_reclaim(struct txp_softc *sc) 751 { 752 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 753 struct txp_hostvar *hv = sc->sc_hostvar; 754 struct txp_rxbuf_desc *rbd; 755 struct txp_swdesc *sd; 756 u_int32_t i; 757 758 if (!(ifp->if_flags & IFF_RUNNING)) 759 return; 760 761 i = sc->sc_rxbufprod; 762 rbd = sc->sc_rxbufs + i; 763 764 while (1) { 765 sd = rbd->rb_sd; 766 if (sd->sd_mbuf != NULL) 767 break; 768 769 MGETHDR(sd->sd_mbuf, MB_DONTWAIT, MT_DATA); 770 if (sd->sd_mbuf == NULL) 771 goto err_sd; 772 773 MCLGET(sd->sd_mbuf, MB_DONTWAIT); 774 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) 775 goto err_mbuf; 776 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 777 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 778 779 rbd->rb_paddrlo = vtophys(mtod(sd->sd_mbuf, vm_offset_t)) 780 & 0xffffffff; 781 rbd->rb_paddrhi = 0; 782 783 hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i); 784 785 if (++i == RXBUF_ENTRIES) { 786 i = 0; 787 rbd = sc->sc_rxbufs; 788 } else 789 rbd++; 790 } 791 792 sc->sc_rxbufprod = i; 793 794 return; 795 796 err_mbuf: 797 m_freem(sd->sd_mbuf); 798 err_sd: 799 kfree(sd, M_DEVBUF); 800 } 801 802 /* 803 * Reclaim mbufs and entries from a transmit ring. 804 */ 805 static void 806 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r) 807 { 808 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 809 u_int32_t idx = TXP_OFFSET2IDX(*(r->r_off)); 810 u_int32_t cons = r->r_cons, cnt = r->r_cnt; 811 struct txp_tx_desc *txd = r->r_desc + cons; 812 struct txp_swdesc *sd = sc->sc_txd + cons; 813 struct mbuf *m; 814 815 while (cons != idx) { 816 if (cnt == 0) 817 break; 818 819 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == 820 TX_FLAGS_TYPE_DATA) { 821 m = sd->sd_mbuf; 822 if (m != NULL) { 823 m_freem(m); 824 txd->tx_addrlo = 0; 825 txd->tx_addrhi = 0; 826 ifp->if_opackets++; 827 } 828 } 829 ifp->if_flags &= ~IFF_OACTIVE; 830 831 if (++cons == TX_ENTRIES) { 832 txd = r->r_desc; 833 cons = 0; 834 sd = sc->sc_txd; 835 } else { 836 txd++; 837 sd++; 838 } 839 840 cnt--; 841 } 842 843 r->r_cons = cons; 844 r->r_cnt = cnt; 845 if (cnt == 0) 846 ifp->if_timer = 0; 847 } 848 849 static int 850 txp_shutdown(device_t dev) 851 { 852 struct txp_softc *sc; 853 struct ifnet *ifp; 854 855 sc = device_get_softc(dev); 856 ifp = &sc->sc_arpcom.ac_if; 857 lwkt_serialize_enter(ifp->if_serializer); 858 859 /* mask all interrupts */ 860 WRITE_REG(sc, TXP_IMR, 861 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 862 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 863 TXP_INT_LATCH); 864 865 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 866 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 867 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); 868 869 lwkt_serialize_exit(ifp->if_serializer); 870 return(0); 871 } 872 873 static int 874 txp_alloc_rings(struct txp_softc *sc) 875 { 876 struct txp_boot_record *boot; 877 struct txp_ldata *ld; 878 u_int32_t r; 879 int i; 880 881 ld = sc->sc_ldata; 882 boot = &ld->txp_boot; 883 884 /* boot record */ 885 sc->sc_boot = boot; 886 887 /* host variables */ 888 bzero(&ld->txp_hostvar, sizeof(struct txp_hostvar)); 889 boot->br_hostvar_lo = vtophys(&ld->txp_hostvar); 890 boot->br_hostvar_hi = 0; 891 sc->sc_hostvar = (struct txp_hostvar *)&ld->txp_hostvar; 892 893 /* hi priority tx ring */ 894 boot->br_txhipri_lo = vtophys(&ld->txp_txhiring); 895 boot->br_txhipri_hi = 0; 896 boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); 897 sc->sc_txhir.r_reg = TXP_H2A_1; 898 sc->sc_txhir.r_desc = (struct txp_tx_desc *)&ld->txp_txhiring; 899 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; 900 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; 901 902 /* lo priority tx ring */ 903 boot->br_txlopri_lo = vtophys(&ld->txp_txloring); 904 boot->br_txlopri_hi = 0; 905 boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); 906 sc->sc_txlor.r_reg = TXP_H2A_3; 907 sc->sc_txlor.r_desc = (struct txp_tx_desc *)&ld->txp_txloring; 908 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; 909 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; 910 911 /* high priority rx ring */ 912 boot->br_rxhipri_lo = vtophys(&ld->txp_rxhiring); 913 boot->br_rxhipri_hi = 0; 914 boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); 915 sc->sc_rxhir.r_desc = (struct txp_rx_desc *)&ld->txp_rxhiring; 916 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; 917 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; 918 919 /* low priority rx ring */ 920 boot->br_rxlopri_lo = vtophys(&ld->txp_rxloring); 921 boot->br_rxlopri_hi = 0; 922 boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); 923 sc->sc_rxlor.r_desc = (struct txp_rx_desc *)&ld->txp_rxloring; 924 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; 925 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; 926 927 /* command ring */ 928 bzero(&ld->txp_cmdring, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); 929 boot->br_cmd_lo = vtophys(&ld->txp_cmdring); 930 boot->br_cmd_hi = 0; 931 boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 932 sc->sc_cmdring.base = (struct txp_cmd_desc *)&ld->txp_cmdring; 933 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 934 sc->sc_cmdring.lastwrite = 0; 935 936 /* response ring */ 937 bzero(&ld->txp_rspring, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); 938 boot->br_resp_lo = vtophys(&ld->txp_rspring); 939 boot->br_resp_hi = 0; 940 boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc); 941 sc->sc_rspring.base = (struct txp_rsp_desc *)&ld->txp_rspring; 942 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); 943 sc->sc_rspring.lastwrite = 0; 944 945 /* receive buffer ring */ 946 boot->br_rxbuf_lo = vtophys(&ld->txp_rxbufs); 947 boot->br_rxbuf_hi = 0; 948 boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc); 949 sc->sc_rxbufs = (struct txp_rxbuf_desc *)&ld->txp_rxbufs; 950 951 for (i = 0; i < RXBUF_ENTRIES; i++) { 952 struct txp_swdesc *sd; 953 if (sc->sc_rxbufs[i].rb_sd != NULL) 954 continue; 955 sc->sc_rxbufs[i].rb_sd = kmalloc(sizeof(struct txp_swdesc), 956 M_DEVBUF, M_WAITOK); 957 sd = sc->sc_rxbufs[i].rb_sd; 958 sd->sd_mbuf = NULL; 959 } 960 sc->sc_rxbufprod = 0; 961 962 /* zero dma */ 963 bzero(&ld->txp_zero, sizeof(u_int32_t)); 964 boot->br_zero_lo = vtophys(&ld->txp_zero); 965 boot->br_zero_hi = 0; 966 967 /* See if it's waiting for boot, and try to boot it */ 968 for (i = 0; i < 10000; i++) { 969 r = READ_REG(sc, TXP_A2H_0); 970 if (r == STAT_WAITING_FOR_BOOT) 971 break; 972 DELAY(50); 973 } 974 975 if (r != STAT_WAITING_FOR_BOOT) { 976 if_printf(&sc->sc_arpcom.ac_if, "not waiting for boot\n"); 977 return(ENXIO); 978 } 979 980 WRITE_REG(sc, TXP_H2A_2, 0); 981 WRITE_REG(sc, TXP_H2A_1, vtophys(sc->sc_boot)); 982 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); 983 984 /* See if it booted */ 985 for (i = 0; i < 10000; i++) { 986 r = READ_REG(sc, TXP_A2H_0); 987 if (r == STAT_RUNNING) 988 break; 989 DELAY(50); 990 } 991 if (r != STAT_RUNNING) { 992 if_printf(&sc->sc_arpcom.ac_if, "fw not running\n"); 993 return(ENXIO); 994 } 995 996 /* Clear TX and CMD ring write registers */ 997 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); 998 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); 999 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); 1000 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); 1001 1002 return (0); 1003 } 1004 1005 static int 1006 txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1007 { 1008 struct txp_softc *sc = ifp->if_softc; 1009 struct ifreq *ifr = (struct ifreq *)data; 1010 int error = 0; 1011 1012 switch(command) { 1013 case SIOCSIFFLAGS: 1014 if (ifp->if_flags & IFF_UP) { 1015 txp_init(sc); 1016 } else { 1017 if (ifp->if_flags & IFF_RUNNING) 1018 txp_stop(sc); 1019 } 1020 break; 1021 case SIOCADDMULTI: 1022 case SIOCDELMULTI: 1023 /* 1024 * Multicast list has changed; set the hardware 1025 * filter accordingly. 1026 */ 1027 txp_set_filter(sc); 1028 error = 0; 1029 break; 1030 case SIOCGIFMEDIA: 1031 case SIOCSIFMEDIA: 1032 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); 1033 break; 1034 default: 1035 error = ether_ioctl(ifp, command, data); 1036 break; 1037 } 1038 return(error); 1039 } 1040 1041 static int 1042 txp_rxring_fill(struct txp_softc *sc) 1043 { 1044 int i; 1045 struct ifnet *ifp; 1046 struct txp_swdesc *sd; 1047 1048 ifp = &sc->sc_arpcom.ac_if; 1049 1050 for (i = 0; i < RXBUF_ENTRIES; i++) { 1051 sd = sc->sc_rxbufs[i].rb_sd; 1052 MGETHDR(sd->sd_mbuf, MB_DONTWAIT, MT_DATA); 1053 if (sd->sd_mbuf == NULL) 1054 return(ENOBUFS); 1055 1056 MCLGET(sd->sd_mbuf, MB_DONTWAIT); 1057 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { 1058 m_freem(sd->sd_mbuf); 1059 return(ENOBUFS); 1060 } 1061 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 1062 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 1063 1064 sc->sc_rxbufs[i].rb_paddrlo = 1065 vtophys(mtod(sd->sd_mbuf, vm_offset_t)); 1066 sc->sc_rxbufs[i].rb_paddrhi = 0; 1067 } 1068 1069 sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) * 1070 sizeof(struct txp_rxbuf_desc); 1071 1072 return(0); 1073 } 1074 1075 static void 1076 txp_rxring_empty(struct txp_softc *sc) 1077 { 1078 int i; 1079 struct txp_swdesc *sd; 1080 1081 if (sc->sc_rxbufs == NULL) 1082 return; 1083 1084 for (i = 0; i < RXBUF_ENTRIES; i++) { 1085 if (&sc->sc_rxbufs[i] == NULL) 1086 continue; 1087 sd = sc->sc_rxbufs[i].rb_sd; 1088 if (sd == NULL) 1089 continue; 1090 if (sd->sd_mbuf != NULL) { 1091 m_freem(sd->sd_mbuf); 1092 sd->sd_mbuf = NULL; 1093 } 1094 } 1095 1096 return; 1097 } 1098 1099 static void 1100 txp_init(void *xsc) 1101 { 1102 struct txp_softc *sc; 1103 struct ifnet *ifp; 1104 u_int16_t p1; 1105 u_int32_t p2; 1106 1107 sc = xsc; 1108 ifp = &sc->sc_arpcom.ac_if; 1109 1110 if (ifp->if_flags & IFF_RUNNING) 1111 return; 1112 1113 txp_stop(sc); 1114 1115 txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, 1116 NULL, NULL, NULL, 1); 1117 1118 /* Set station address. */ 1119 ((u_int8_t *)&p1)[1] = sc->sc_arpcom.ac_enaddr[0]; 1120 ((u_int8_t *)&p1)[0] = sc->sc_arpcom.ac_enaddr[1]; 1121 ((u_int8_t *)&p2)[3] = sc->sc_arpcom.ac_enaddr[2]; 1122 ((u_int8_t *)&p2)[2] = sc->sc_arpcom.ac_enaddr[3]; 1123 ((u_int8_t *)&p2)[1] = sc->sc_arpcom.ac_enaddr[4]; 1124 ((u_int8_t *)&p2)[0] = sc->sc_arpcom.ac_enaddr[5]; 1125 txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, 1126 NULL, NULL, NULL, 1); 1127 1128 txp_set_filter(sc); 1129 1130 txp_rxring_fill(sc); 1131 1132 txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1133 txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1134 1135 WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | 1136 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 1137 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 1138 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 1139 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 1140 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 1141 1142 ifp->if_flags |= IFF_RUNNING; 1143 ifp->if_flags &= ~IFF_OACTIVE; 1144 ifp->if_timer = 0; 1145 1146 callout_reset(&sc->txp_stat_timer, hz, txp_tick, sc); 1147 } 1148 1149 static void 1150 txp_tick(void *vsc) 1151 { 1152 struct txp_softc *sc = vsc; 1153 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1154 struct txp_rsp_desc *rsp = NULL; 1155 struct txp_ext_desc *ext; 1156 1157 lwkt_serialize_enter(ifp->if_serializer); 1158 txp_rxbuf_reclaim(sc); 1159 1160 if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, 1161 &rsp, 1)) 1162 goto out; 1163 if (rsp->rsp_numdesc != 6) 1164 goto out; 1165 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, 1166 NULL, NULL, NULL, 1)) 1167 goto out; 1168 ext = (struct txp_ext_desc *)(rsp + 1); 1169 1170 ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + 1171 ext[4].ext_1 + ext[4].ext_4; 1172 ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + 1173 ext[2].ext_1; 1174 ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + 1175 ext[1].ext_3; 1176 ifp->if_opackets += rsp->rsp_par2; 1177 ifp->if_ipackets += ext[2].ext_3; 1178 1179 out: 1180 if (rsp != NULL) 1181 kfree(rsp, M_DEVBUF); 1182 1183 callout_reset(&sc->txp_stat_timer, hz, txp_tick, sc); 1184 lwkt_serialize_exit(ifp->if_serializer); 1185 } 1186 1187 static void 1188 txp_start(struct ifnet *ifp) 1189 { 1190 struct txp_softc *sc = ifp->if_softc; 1191 struct txp_tx_ring *r = &sc->sc_txhir; 1192 struct txp_tx_desc *txd; 1193 struct txp_frag_desc *fxd; 1194 struct mbuf *m, *m0, *m_defragged; 1195 struct txp_swdesc *sd; 1196 u_int32_t firstprod, firstcnt, prod, cnt; 1197 1198 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1199 return; 1200 1201 prod = r->r_prod; 1202 cnt = r->r_cnt; 1203 1204 while (1) { 1205 int frag; 1206 1207 firstprod = prod; 1208 firstcnt = cnt; 1209 1210 if ((TX_ENTRIES - cnt) < 4) 1211 goto oactive; 1212 1213 m_defragged = NULL; 1214 m = ifq_dequeue(&ifp->if_snd, NULL); 1215 if (m == NULL) 1216 break; 1217 again: 1218 frag = 1; /* Extra desc */ 1219 for (m0 = m; m0 != NULL; m0 = m0->m_next) 1220 ++frag; 1221 if ((cnt + frag) >= (TX_ENTRIES - 4)) { 1222 if (m_defragged != NULL) { 1223 /* 1224 * Even after defragmentation, there 1225 * are still too many fragments, so 1226 * drop this packet. 1227 */ 1228 m_freem(m); 1229 goto oactive; 1230 } 1231 1232 m_defragged = m_defrag(m, MB_DONTWAIT); 1233 if (m_defragged == NULL) { 1234 m_freem(m); 1235 continue; 1236 } 1237 m = m_defragged; 1238 1239 /* Recount # of fragments */ 1240 goto again; 1241 } 1242 1243 sd = sc->sc_txd + prod; 1244 sd->sd_mbuf = m; 1245 1246 txd = r->r_desc + prod; 1247 txd->tx_flags = TX_FLAGS_TYPE_DATA; 1248 txd->tx_numdesc = 0; 1249 txd->tx_addrlo = 0; 1250 txd->tx_addrhi = 0; 1251 txd->tx_totlen = 0; 1252 txd->tx_pflags = 0; 1253 1254 if (++prod == TX_ENTRIES) 1255 prod = 0; 1256 1257 ++cnt; 1258 KASSERT(cnt < (TX_ENTRIES - 4), ("too many frag\n")); 1259 1260 if (m->m_flags & M_VLANTAG) { 1261 txd->tx_pflags = TX_PFLAGS_VLAN | 1262 (htons(m->m_pkthdr.ether_vlantag) << 1263 TX_PFLAGS_VLANTAG_S); 1264 } 1265 1266 if (m->m_pkthdr.csum_flags & CSUM_IP) 1267 txd->tx_pflags |= TX_PFLAGS_IPCKSUM; 1268 1269 #if 0 1270 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1271 txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; 1272 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1273 txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; 1274 #endif 1275 1276 fxd = (struct txp_frag_desc *)(r->r_desc + prod); 1277 for (m0 = m; m0 != NULL; m0 = m0->m_next) { 1278 if (m0->m_len == 0) 1279 continue; 1280 1281 ++cnt; 1282 KASSERT(cnt < (TX_ENTRIES - 4), ("too many frag\n")); 1283 1284 txd->tx_numdesc++; 1285 1286 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG; 1287 fxd->frag_rsvd1 = 0; 1288 fxd->frag_len = m0->m_len; 1289 fxd->frag_addrlo = vtophys(mtod(m0, vm_offset_t)); 1290 fxd->frag_addrhi = 0; 1291 fxd->frag_rsvd2 = 0; 1292 1293 if (++prod == TX_ENTRIES) { 1294 fxd = (struct txp_frag_desc *)r->r_desc; 1295 prod = 0; 1296 } else 1297 fxd++; 1298 } 1299 1300 ifp->if_timer = 5; 1301 1302 ETHER_BPF_MTAP(ifp, m); 1303 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); 1304 } 1305 1306 r->r_prod = prod; 1307 r->r_cnt = cnt; 1308 return; 1309 1310 oactive: 1311 ifp->if_flags |= IFF_OACTIVE; 1312 r->r_prod = firstprod; 1313 r->r_cnt = firstcnt; 1314 return; 1315 } 1316 1317 /* 1318 * Handle simple commands sent to the typhoon 1319 */ 1320 static int 1321 txp_command(struct txp_softc *sc, u_int16_t id, u_int16_t in1, u_int32_t in2, 1322 u_int32_t in3, u_int16_t *out1, u_int32_t *out2, u_int32_t *out3, 1323 int wait) 1324 { 1325 struct txp_rsp_desc *rsp = NULL; 1326 1327 if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) 1328 return (-1); 1329 1330 if (!wait) 1331 return (0); 1332 1333 if (out1 != NULL) 1334 *out1 = rsp->rsp_par1; 1335 if (out2 != NULL) 1336 *out2 = rsp->rsp_par2; 1337 if (out3 != NULL) 1338 *out3 = rsp->rsp_par3; 1339 kfree(rsp, M_DEVBUF); 1340 return (0); 1341 } 1342 1343 static int 1344 txp_command2(struct txp_softc *sc, u_int16_t id, u_int16_t in1, u_int32_t in2, 1345 u_int32_t in3, struct txp_ext_desc *in_extp, u_int8_t in_extn, 1346 struct txp_rsp_desc **rspp, int wait) 1347 { 1348 struct txp_hostvar *hv = sc->sc_hostvar; 1349 struct txp_cmd_desc *cmd; 1350 struct txp_ext_desc *ext; 1351 u_int32_t idx, i; 1352 u_int16_t seq; 1353 1354 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { 1355 if_printf(&sc->sc_arpcom.ac_if, "no free cmd descriptors\n"); 1356 return (-1); 1357 } 1358 1359 idx = sc->sc_cmdring.lastwrite; 1360 cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1361 bzero(cmd, sizeof(*cmd)); 1362 1363 cmd->cmd_numdesc = in_extn; 1364 cmd->cmd_seq = seq = sc->sc_seq++; 1365 cmd->cmd_id = id; 1366 cmd->cmd_par1 = in1; 1367 cmd->cmd_par2 = in2; 1368 cmd->cmd_par3 = in3; 1369 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | 1370 (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; 1371 1372 idx += sizeof(struct txp_cmd_desc); 1373 if (idx == sc->sc_cmdring.size) 1374 idx = 0; 1375 1376 for (i = 0; i < in_extn; i++) { 1377 ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1378 bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); 1379 in_extp++; 1380 idx += sizeof(struct txp_cmd_desc); 1381 if (idx == sc->sc_cmdring.size) 1382 idx = 0; 1383 } 1384 1385 sc->sc_cmdring.lastwrite = idx; 1386 1387 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); 1388 1389 if (!wait) 1390 return (0); 1391 1392 for (i = 0; i < 10000; i++) { 1393 idx = hv->hv_resp_read_idx; 1394 if (idx != hv->hv_resp_write_idx) { 1395 *rspp = NULL; 1396 if (txp_response(sc, idx, id, seq, rspp)) 1397 return (-1); 1398 if (*rspp != NULL) 1399 break; 1400 } 1401 DELAY(50); 1402 } 1403 if (i == 1000 || (*rspp) == NULL) { 1404 if_printf(&sc->sc_arpcom.ac_if, "0x%x command failed\n", id); 1405 return (-1); 1406 } 1407 1408 return (0); 1409 } 1410 1411 static int 1412 txp_response(struct txp_softc *sc, u_int32_t ridx, u_int16_t id, u_int16_t seq, 1413 struct txp_rsp_desc **rspp) 1414 { 1415 struct txp_hostvar *hv = sc->sc_hostvar; 1416 struct txp_rsp_desc *rsp; 1417 1418 while (ridx != hv->hv_resp_write_idx) { 1419 rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); 1420 1421 if (id == rsp->rsp_id && rsp->rsp_seq == seq) { 1422 *rspp = (struct txp_rsp_desc *)kmalloc( 1423 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), 1424 M_DEVBUF, M_INTWAIT); 1425 if ((*rspp) == NULL) 1426 return (-1); 1427 txp_rsp_fixup(sc, rsp, *rspp); 1428 return (0); 1429 } 1430 1431 if (rsp->rsp_flags & RSP_FLAGS_ERROR) { 1432 if_printf(&sc->sc_arpcom.ac_if, "response error!\n"); 1433 txp_rsp_fixup(sc, rsp, NULL); 1434 ridx = hv->hv_resp_read_idx; 1435 continue; 1436 } 1437 1438 switch (rsp->rsp_id) { 1439 case TXP_CMD_CYCLE_STATISTICS: 1440 case TXP_CMD_MEDIA_STATUS_READ: 1441 break; 1442 case TXP_CMD_HELLO_RESPONSE: 1443 if_printf(&sc->sc_arpcom.ac_if, "hello\n"); 1444 break; 1445 default: 1446 if_printf(&sc->sc_arpcom.ac_if, "unknown id(0x%x)\n", 1447 rsp->rsp_id); 1448 } 1449 1450 txp_rsp_fixup(sc, rsp, NULL); 1451 ridx = hv->hv_resp_read_idx; 1452 hv->hv_resp_read_idx = ridx; 1453 } 1454 1455 return (0); 1456 } 1457 1458 static void 1459 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp, 1460 struct txp_rsp_desc *dst) 1461 { 1462 struct txp_rsp_desc *src = rsp; 1463 struct txp_hostvar *hv = sc->sc_hostvar; 1464 u_int32_t i, ridx; 1465 1466 ridx = hv->hv_resp_read_idx; 1467 1468 for (i = 0; i < rsp->rsp_numdesc + 1; i++) { 1469 if (dst != NULL) 1470 bcopy(src, dst++, sizeof(struct txp_rsp_desc)); 1471 ridx += sizeof(struct txp_rsp_desc); 1472 if (ridx == sc->sc_rspring.size) { 1473 src = sc->sc_rspring.base; 1474 ridx = 0; 1475 } else 1476 src++; 1477 sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx; 1478 } 1479 1480 hv->hv_resp_read_idx = ridx; 1481 } 1482 1483 static int 1484 txp_cmd_desc_numfree(struct txp_softc *sc) 1485 { 1486 struct txp_hostvar *hv = sc->sc_hostvar; 1487 struct txp_boot_record *br = sc->sc_boot; 1488 u_int32_t widx, ridx, nfree; 1489 1490 widx = sc->sc_cmdring.lastwrite; 1491 ridx = hv->hv_cmd_read_idx; 1492 1493 if (widx == ridx) { 1494 /* Ring is completely free */ 1495 nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc); 1496 } else { 1497 if (widx > ridx) 1498 nfree = br->br_cmd_siz - 1499 (widx - ridx + sizeof(struct txp_cmd_desc)); 1500 else 1501 nfree = ridx - widx - sizeof(struct txp_cmd_desc); 1502 } 1503 1504 return (nfree / sizeof(struct txp_cmd_desc)); 1505 } 1506 1507 static void 1508 txp_stop(struct txp_softc *sc) 1509 { 1510 struct ifnet *ifp; 1511 1512 ifp = &sc->sc_arpcom.ac_if; 1513 1514 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1515 1516 callout_stop(&sc->txp_stat_timer); 1517 1518 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1519 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1520 1521 txp_rxring_empty(sc); 1522 1523 return; 1524 } 1525 1526 static void 1527 txp_watchdog(struct ifnet *ifp) 1528 { 1529 return; 1530 } 1531 1532 static int 1533 txp_ifmedia_upd(struct ifnet *ifp) 1534 { 1535 struct txp_softc *sc = ifp->if_softc; 1536 struct ifmedia *ifm = &sc->sc_ifmedia; 1537 u_int16_t new_xcvr; 1538 1539 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1540 return (EINVAL); 1541 1542 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1543 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1544 new_xcvr = TXP_XCVR_10_FDX; 1545 else 1546 new_xcvr = TXP_XCVR_10_HDX; 1547 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1548 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1549 new_xcvr = TXP_XCVR_100_FDX; 1550 else 1551 new_xcvr = TXP_XCVR_100_HDX; 1552 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1553 new_xcvr = TXP_XCVR_AUTO; 1554 } else 1555 return (EINVAL); 1556 1557 /* nothing to do */ 1558 if (sc->sc_xcvr == new_xcvr) 1559 return (0); 1560 1561 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, 1562 NULL, NULL, NULL, 0); 1563 sc->sc_xcvr = new_xcvr; 1564 1565 return (0); 1566 } 1567 1568 static void 1569 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1570 { 1571 struct txp_softc *sc = ifp->if_softc; 1572 struct ifmedia *ifm = &sc->sc_ifmedia; 1573 u_int16_t bmsr, bmcr, anlpar; 1574 1575 ifmr->ifm_status = IFM_AVALID; 1576 ifmr->ifm_active = IFM_ETHER; 1577 1578 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1579 &bmsr, NULL, NULL, 1)) 1580 goto bail; 1581 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1582 &bmsr, NULL, NULL, 1)) 1583 goto bail; 1584 1585 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, 1586 &bmcr, NULL, NULL, 1)) 1587 goto bail; 1588 1589 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, 1590 &anlpar, NULL, NULL, 1)) 1591 goto bail; 1592 1593 if (bmsr & BMSR_LINK) 1594 ifmr->ifm_status |= IFM_ACTIVE; 1595 1596 if (bmcr & BMCR_ISO) { 1597 ifmr->ifm_active |= IFM_NONE; 1598 ifmr->ifm_status = 0; 1599 return; 1600 } 1601 1602 if (bmcr & BMCR_LOOP) 1603 ifmr->ifm_active |= IFM_LOOP; 1604 1605 if (bmcr & BMCR_AUTOEN) { 1606 if ((bmsr & BMSR_ACOMP) == 0) { 1607 ifmr->ifm_active |= IFM_NONE; 1608 return; 1609 } 1610 1611 if (anlpar & ANLPAR_T4) 1612 ifmr->ifm_active |= IFM_100_T4; 1613 else if (anlpar & ANLPAR_TX_FD) 1614 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1615 else if (anlpar & ANLPAR_TX) 1616 ifmr->ifm_active |= IFM_100_TX; 1617 else if (anlpar & ANLPAR_10_FD) 1618 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1619 else if (anlpar & ANLPAR_10) 1620 ifmr->ifm_active |= IFM_10_T; 1621 else 1622 ifmr->ifm_active |= IFM_NONE; 1623 } else 1624 ifmr->ifm_active = ifm->ifm_cur->ifm_media; 1625 return; 1626 1627 bail: 1628 ifmr->ifm_active |= IFM_NONE; 1629 ifmr->ifm_status &= ~IFM_AVALID; 1630 } 1631 1632 #ifdef TXP_DEBUG 1633 static void 1634 txp_show_descriptor(void *d) 1635 { 1636 struct txp_cmd_desc *cmd = d; 1637 struct txp_rsp_desc *rsp = d; 1638 struct txp_tx_desc *txd = d; 1639 struct txp_frag_desc *frgd = d; 1640 1641 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { 1642 case CMD_FLAGS_TYPE_CMD: 1643 /* command descriptor */ 1644 kprintf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1645 cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, 1646 cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); 1647 break; 1648 case CMD_FLAGS_TYPE_RESP: 1649 /* response descriptor */ 1650 kprintf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1651 rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq, 1652 rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3); 1653 break; 1654 case CMD_FLAGS_TYPE_DATA: 1655 /* data header (assuming tx for now) */ 1656 kprintf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", 1657 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1658 txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); 1659 break; 1660 case CMD_FLAGS_TYPE_FRAG: 1661 /* fragment descriptor */ 1662 kprintf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", 1663 frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, 1664 frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); 1665 break; 1666 default: 1667 kprintf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1668 cmd->cmd_flags & CMD_FLAGS_TYPE_M, 1669 cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, 1670 cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); 1671 break; 1672 } 1673 } 1674 #endif 1675 1676 static void 1677 txp_set_filter(struct txp_softc *sc) 1678 { 1679 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1680 uint16_t filter; 1681 struct ifmultiaddr *ifma; 1682 1683 if (ifp->if_flags & IFF_PROMISC) { 1684 filter = TXP_RXFILT_PROMISC; 1685 goto setit; 1686 } 1687 1688 filter = TXP_RXFILT_DIRECT; 1689 1690 if (ifp->if_flags & IFF_BROADCAST) 1691 filter |= TXP_RXFILT_BROADCAST; 1692 1693 if (ifp->if_flags & IFF_ALLMULTI) { 1694 filter |= TXP_RXFILT_ALLMULTI; 1695 } else { 1696 uint32_t hashbit, hash[2]; 1697 int mcnt = 0; 1698 1699 hash[0] = hash[1] = 0; 1700 1701 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1702 if (ifma->ifma_addr->sa_family != AF_LINK) 1703 continue; 1704 1705 mcnt++; 1706 hashbit = (uint16_t)(ether_crc32_be( 1707 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1708 ETHER_ADDR_LEN) & (64 - 1)); 1709 hash[hashbit / 32] |= (1 << hashbit % 32); 1710 } 1711 1712 if (mcnt > 0) { 1713 filter |= TXP_RXFILT_HASHMULTI; 1714 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 1715 2, hash[0], hash[1], NULL, NULL, NULL, 0); 1716 } 1717 } 1718 1719 setit: 1720 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, 1721 NULL, NULL, NULL, 1); 1722 } 1723 1724 static void 1725 txp_capabilities(struct txp_softc *sc) 1726 { 1727 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1728 struct txp_rsp_desc *rsp = NULL; 1729 struct txp_ext_desc *ext; 1730 1731 if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) 1732 goto out; 1733 1734 if (rsp->rsp_numdesc != 1) 1735 goto out; 1736 ext = (struct txp_ext_desc *)(rsp + 1); 1737 1738 sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; 1739 sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; 1740 ifp->if_capabilities = 0; 1741 1742 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { 1743 sc->sc_tx_capability |= OFFLOAD_VLAN; 1744 sc->sc_rx_capability |= OFFLOAD_VLAN; 1745 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1746 } 1747 1748 #if 0 1749 /* not ready yet */ 1750 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { 1751 sc->sc_tx_capability |= OFFLOAD_IPSEC; 1752 sc->sc_rx_capability |= OFFLOAD_IPSEC; 1753 ifp->if_capabilities |= IFCAP_IPSEC; 1754 } 1755 #endif 1756 1757 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { 1758 sc->sc_tx_capability |= OFFLOAD_IPCKSUM; 1759 sc->sc_rx_capability |= OFFLOAD_IPCKSUM; 1760 ifp->if_capabilities |= IFCAP_HWCSUM; 1761 ifp->if_hwassist |= CSUM_IP; 1762 } 1763 1764 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { 1765 #if 0 1766 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; 1767 #endif 1768 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; 1769 ifp->if_capabilities |= IFCAP_HWCSUM; 1770 } 1771 1772 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { 1773 #if 0 1774 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; 1775 #endif 1776 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; 1777 ifp->if_capabilities |= IFCAP_HWCSUM; 1778 } 1779 ifp->if_capenable = ifp->if_capabilities; 1780 1781 if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, 1782 sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) 1783 goto out; 1784 1785 out: 1786 if (rsp != NULL) 1787 kfree(rsp, M_DEVBUF); 1788 1789 return; 1790 } 1791