1 /* $OpenBSD: if_xge.c,v 1.80 2020/12/12 11:48:53 jan Exp $ */ 2 /* $NetBSD: if_xge.c,v 1.1 2005/09/09 10:30:27 ragge Exp $ */ 3 4 /* 5 * Copyright (c) 2004, SUNET, Swedish University Computer Network. 6 * All rights reserved. 7 * 8 * Written by Anders Magnusson for SUNET, Swedish University Computer Network. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed for the NetBSD Project by 21 * SUNET, Swedish University Computer Network. 22 * 4. The name of SUNET may not be used to endorse or promote products 23 * derived from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Driver for the Neterion Xframe Ten Gigabit Ethernet controller. 40 */ 41 42 #include "bpfilter.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sockio.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/kernel.h> 51 #include <sys/socket.h> 52 #include <sys/device.h> 53 #include <sys/endian.h> 54 55 #include <net/if.h> 56 #include <net/if_media.h> 57 58 #include <netinet/in.h> 59 #include <netinet/if_ether.h> 60 61 #if NBPFILTER > 0 62 #include <net/bpf.h> 63 #endif 64 65 #include <machine/bus.h> 66 #include <machine/intr.h> 67 68 #include <dev/pci/pcivar.h> 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcidevs.h> 71 72 #include <sys/lock.h> 73 74 #include <dev/pci/if_xgereg.h> 75 76 /* Xframe chipset revisions */ 77 #define XGE_TYPE_XENA 1 /* Xframe */ 78 #define XGE_TYPE_HERC 2 /* Xframe-II */ 79 80 #define XGE_PCISIZE_XENA 26 81 #define XGE_PCISIZE_HERC 64 82 83 /* 84 * Some tunable constants, tune with care! 85 */ 86 #define RX_MODE RX_MODE_1 /* Receive mode (buffer usage, see below) */ 87 #define NRXDESCS 1016 /* # of receive descriptors (requested) */ 88 #define NTXDESCS 2048 /* Number of transmit descriptors */ 89 #define NTXFRAGS 100 /* Max fragments per packet */ 90 91 /* 92 * Receive buffer modes; 1, 3 or 5 buffers. 93 */ 94 #define RX_MODE_1 1 95 #define RX_MODE_3 3 96 #define RX_MODE_5 5 97 98 /* 99 * Use clever macros to avoid a bunch of #ifdef's. 100 */ 101 #define XCONCAT3(x,y,z) x ## y ## z 102 #define CONCAT3(x,y,z) XCONCAT3(x,y,z) 103 #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE) 104 #define rxd_4k CONCAT3(rxd,RX_MODE,_4k) 105 /* XXX */ 106 #if 0 107 #define rxdesc ___CONCAT(rxd,RX_MODE) 108 #endif 109 #define rxdesc rxd1 110 111 #define NEXTTX(x) (((x)+1) % NTXDESCS) 112 #define NRXFRAGS RX_MODE /* hardware imposed frags */ 113 #define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1) 114 #define NRXREAL (NRXPAGES*NDESC_BUFMODE) 115 #define RXMAPSZ (NRXPAGES*PAGE_SIZE) 116 117 /* 118 * Magic to fix a bug when the MAC address cannot be read correctly. 119 * This came from the Linux driver. 120 */ 121 static const uint64_t xge_fix_mac[] = { 122 0x0060000000000000ULL, 0x0060600000000000ULL, 123 0x0040600000000000ULL, 0x0000600000000000ULL, 124 0x0020600000000000ULL, 0x0060600000000000ULL, 125 0x0020600000000000ULL, 0x0060600000000000ULL, 126 0x0020600000000000ULL, 0x0060600000000000ULL, 127 0x0020600000000000ULL, 0x0060600000000000ULL, 128 0x0020600000000000ULL, 0x0060600000000000ULL, 129 0x0020600000000000ULL, 0x0060600000000000ULL, 130 0x0020600000000000ULL, 0x0060600000000000ULL, 131 0x0020600000000000ULL, 0x0060600000000000ULL, 132 0x0020600000000000ULL, 0x0060600000000000ULL, 133 0x0020600000000000ULL, 0x0060600000000000ULL, 134 0x0020600000000000ULL, 0x0000600000000000ULL, 135 0x0040600000000000ULL, 0x0060600000000000ULL, 136 }; 137 138 /* 139 * Constants to be programmed into Hercules's registers, to configure 140 * the XGXS transciever. 141 */ 142 static const uint64_t xge_herc_dtx_cfg[] = { 143 0x8000051536750000ULL, 0x80000515367500E0ULL, 144 0x8000051536750004ULL, 0x80000515367500E4ULL, 145 146 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 147 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 148 149 0x801205150D440000ULL, 0x801205150D4400E0ULL, 150 0x801205150D440004ULL, 0x801205150D4400E4ULL, 151 152 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 153 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 154 }; 155 156 static const uint64_t xge_xena_dtx_cfg[] = { 157 0x8000051500000000ULL, 0x80000515000000E0ULL, 158 0x80000515D9350004ULL, 0x80000515D93500E4ULL, 159 160 0x8001051500000000ULL, 0x80010515000000E0ULL, 161 0x80010515001E0004ULL, 0x80010515001E00E4ULL, 162 163 0x8002051500000000ULL, 0x80020515000000E0ULL, 164 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 165 }; 166 167 struct xge_softc { 168 struct device sc_dev; 169 struct arpcom sc_arpcom; 170 struct ifmedia xena_media; 171 172 void *sc_ih; 173 174 bus_dma_tag_t sc_dmat; 175 bus_space_tag_t sc_st; 176 bus_space_handle_t sc_sh; 177 bus_space_tag_t sc_txt; 178 bus_space_handle_t sc_txh; 179 180 pcireg_t sc_pciregs[16]; 181 182 int xge_type; /* chip type */ 183 int xge_if_flags; 184 185 /* Transmit structures */ 186 struct txd *sc_txd[NTXDESCS]; /* transmit frags array */ 187 bus_addr_t sc_txdp[NTXDESCS]; /* dva of transmit frags */ 188 bus_dmamap_t sc_txm[NTXDESCS]; /* transmit frags map */ 189 struct mbuf *sc_txb[NTXDESCS]; /* transmit mbuf pointer */ 190 int sc_nexttx, sc_lasttx; 191 bus_dmamap_t sc_txmap; /* transmit descriptor map */ 192 193 /* Receive data */ 194 bus_dmamap_t sc_rxmap; /* receive descriptor map */ 195 struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */ 196 bus_dmamap_t sc_rxm[NRXREAL]; /* receive buffer map */ 197 struct mbuf *sc_rxb[NRXREAL]; /* mbufs on rx descriptors */ 198 int sc_nextrx; /* next descriptor to check */ 199 }; 200 201 #ifdef XGE_DEBUG 202 #define DPRINTF(x) do { if (xgedebug) printf x ; } while (0) 203 #define DPRINTFN(n,x) do { if (xgedebug >= (n)) printf x ; } while (0) 204 int xgedebug = 0; 205 #else 206 #define DPRINTF(x) 207 #define DPRINTFN(n,x) 208 #endif 209 210 int xge_match(struct device *, void *, void *); 211 void xge_attach(struct device *, struct device *, void *); 212 int xge_alloc_txmem(struct xge_softc *); 213 int xge_alloc_rxmem(struct xge_softc *); 214 void xge_start(struct ifnet *); 215 void xge_stop(struct ifnet *, int); 216 int xge_add_rxbuf(struct xge_softc *, int); 217 void xge_setmulti(struct xge_softc *); 218 void xge_setpromisc(struct xge_softc *); 219 int xge_setup_xgxs_xena(struct xge_softc *); 220 int xge_setup_xgxs_herc(struct xge_softc *); 221 int xge_ioctl(struct ifnet *, u_long, caddr_t); 222 int xge_init(struct ifnet *); 223 void xge_ifmedia_status(struct ifnet *, struct ifmediareq *); 224 int xge_xgmii_mediachange(struct ifnet *); 225 void xge_enable(struct xge_softc *); 226 int xge_intr(void *); 227 228 /* 229 * Helpers to address registers. 230 */ 231 #define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val) 232 #define PIF_RCSR(csr) pif_rcsr(sc, csr) 233 #define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val) 234 #define PIF_WKEY(csr, val) pif_wkey(sc, csr, val) 235 236 static inline void 237 pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val) 238 { 239 #if defined(__LP64__) 240 bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val); 241 #else 242 uint32_t lval, hval; 243 244 lval = val&0xffffffff; 245 hval = val>>32; 246 247 #if BYTE_ORDER == LITTLE_ENDIAN 248 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval); 249 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval); 250 #else 251 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval); 252 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval); 253 #endif 254 #endif 255 } 256 257 static inline uint64_t 258 pif_rcsr(struct xge_softc *sc, bus_size_t csr) 259 { 260 uint64_t val; 261 #if defined(__LP64__) 262 val = bus_space_read_raw_8(sc->sc_st, sc->sc_sh, csr); 263 #else 264 uint64_t val2; 265 266 val = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr); 267 val2 = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr+4); 268 #if BYTE_ORDER == LITTLE_ENDIAN 269 val |= (val2 << 32); 270 #else 271 val = (val << 32 | val2); 272 #endif 273 #endif 274 return (val); 275 } 276 277 static inline void 278 txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val) 279 { 280 #if defined(__LP64__) 281 bus_space_write_raw_8(sc->sc_txt, sc->sc_txh, csr, val); 282 #else 283 uint32_t lval, hval; 284 285 lval = val&0xffffffff; 286 hval = val>>32; 287 288 #if BYTE_ORDER == LITTLE_ENDIAN 289 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, lval); 290 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, hval); 291 #else 292 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, hval); 293 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, lval); 294 #endif 295 #endif 296 } 297 298 static inline void 299 pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val) 300 { 301 #if defined(__LP64__) 302 if (sc->xge_type == XGE_TYPE_XENA) 303 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE); 304 305 bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val); 306 #else 307 uint32_t lval, hval; 308 309 lval = val&0xffffffff; 310 hval = val>>32; 311 312 if (sc->xge_type == XGE_TYPE_XENA) 313 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE); 314 315 #if BYTE_ORDER == LITTLE_ENDIAN 316 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval); 317 #else 318 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval); 319 #endif 320 321 if (sc->xge_type == XGE_TYPE_XENA) 322 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE); 323 #if BYTE_ORDER == LITTLE_ENDIAN 324 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval); 325 #else 326 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval); 327 #endif 328 #endif 329 } 330 331 struct cfattach xge_ca = { 332 sizeof(struct xge_softc), xge_match, xge_attach 333 }; 334 335 struct cfdriver xge_cd = { 336 NULL, "xge", DV_IFNET 337 }; 338 339 #define XNAME sc->sc_dev.dv_xname 340 341 #define XGE_RXSYNC(desc, what) \ 342 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \ 343 (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \ 344 (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what) 345 #define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \ 346 r4_rxd[desc%NDESC_BUFMODE] 347 348 /* 349 * Non-tunable constants. 350 */ 351 #define XGE_MAX_FRAMELEN 9622 352 #define XGE_MAX_MTU (XGE_MAX_FRAMELEN - ETHER_HDR_LEN - \ 353 ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) 354 355 const struct pci_matchid xge_devices[] = { 356 { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME }, 357 { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME_2 } 358 }; 359 360 int 361 xge_match(struct device *parent, void *match, void *aux) 362 { 363 return (pci_matchbyid((struct pci_attach_args *)aux, xge_devices, 364 nitems(xge_devices))); 365 } 366 367 void 368 xge_attach(struct device *parent, struct device *self, void *aux) 369 { 370 struct pci_attach_args *pa = aux; 371 struct xge_softc *sc; 372 struct ifnet *ifp; 373 pcireg_t memtype; 374 pci_intr_handle_t ih; 375 const char *intrstr = NULL; 376 pci_chipset_tag_t pc = pa->pa_pc; 377 uint8_t enaddr[ETHER_ADDR_LEN]; 378 uint64_t val; 379 int i; 380 381 sc = (struct xge_softc *)self; 382 383 sc->sc_dmat = pa->pa_dmat; 384 385 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETERION_XFRAME) 386 sc->xge_type = XGE_TYPE_XENA; 387 else 388 sc->xge_type = XGE_TYPE_HERC; 389 390 /* Get BAR0 address */ 391 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR); 392 if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0, 393 &sc->sc_st, &sc->sc_sh, 0, 0, 0)) { 394 printf(": unable to map PIF BAR registers\n"); 395 return; 396 } 397 398 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR); 399 if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0, 400 &sc->sc_txt, &sc->sc_txh, 0, 0, 0)) { 401 printf(": unable to map TXP BAR registers\n"); 402 return; 403 } 404 405 if (sc->xge_type == XGE_TYPE_XENA) { 406 /* Save PCI config space */ 407 for (i = 0; i < XGE_PCISIZE_XENA; i += 4) 408 sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i); 409 } 410 411 #if BYTE_ORDER == LITTLE_ENDIAN 412 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL; 413 val &= ~(TxF_R_SE|RxF_W_SE); 414 PIF_WCSR(SWAPPER_CTRL, val); 415 PIF_WCSR(SWAPPER_CTRL, val); 416 #endif 417 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) { 418 printf(": failed configuring endian (read), %llx != %llx!\n", 419 (unsigned long long)val, SWAPPER_MAGIC); 420 } 421 422 PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC); 423 if ((val = PIF_RCSR(XMSI_ADDRESS)) != SWAPPER_MAGIC) { 424 printf(": failed configuring endian (write), %llx != %llx!\n", 425 (unsigned long long)val, SWAPPER_MAGIC); 426 } 427 428 /* 429 * Fix for all "FFs" MAC address problems observed on 430 * Alpha platforms. Not needed for Herc. 431 */ 432 if (sc->xge_type == XGE_TYPE_XENA) { 433 /* 434 * The MAC addr may be all FF's, which is not good. 435 * Resolve it by writing some magics to GPIO_CONTROL and 436 * force a chip reset to read in the serial eeprom again. 437 */ 438 for (i = 0; i < nitems(xge_fix_mac); i++) { 439 PIF_WCSR(GPIO_CONTROL, xge_fix_mac[i]); 440 PIF_RCSR(GPIO_CONTROL); 441 } 442 443 /* 444 * Reset the chip and restore the PCI registers. 445 */ 446 PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL); 447 DELAY(500000); 448 for (i = 0; i < XGE_PCISIZE_XENA; i += 4) 449 pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]); 450 451 /* 452 * Restore the byte order registers. 453 */ 454 #if BYTE_ORDER == LITTLE_ENDIAN 455 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL; 456 val &= ~(TxF_R_SE|RxF_W_SE); 457 PIF_WCSR(SWAPPER_CTRL, val); 458 PIF_WCSR(SWAPPER_CTRL, val); 459 #endif 460 461 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) { 462 printf(": failed configuring endian2 (read), %llx != %llx!\n", 463 (unsigned long long)val, SWAPPER_MAGIC); 464 return; 465 } 466 467 PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC); 468 if ((val = PIF_RCSR(XMSI_ADDRESS)) != SWAPPER_MAGIC) { 469 printf(": failed configuring endian2 (write), %llx != %llx!\n", 470 (unsigned long long)val, SWAPPER_MAGIC); 471 return; 472 } 473 } 474 475 /* 476 * XGXS initialization. 477 */ 478 479 /* 480 * For Herc, bring EOI out of reset before XGXS. 481 */ 482 if (sc->xge_type == XGE_TYPE_HERC) { 483 val = PIF_RCSR(SW_RESET); 484 val &= 0xffff00ffffffffffULL; 485 PIF_WCSR(SW_RESET,val); 486 delay(1000*1000); /* wait for 1 sec */ 487 } 488 489 /* 29, Bring adapter out of reset */ 490 val = PIF_RCSR(SW_RESET); 491 val &= 0xffffff00ffffffffULL; 492 PIF_WCSR(SW_RESET, val); 493 DELAY(500000); 494 495 /* Ensure that it's safe to access registers by checking 496 * RIC_RUNNING bit is reset. Check is valid only for XframeII. 497 */ 498 if (sc->xge_type == XGE_TYPE_HERC){ 499 for (i = 0; i < 50; i++) { 500 val = PIF_RCSR(ADAPTER_STATUS); 501 if (!(val & RIC_RUNNING)) 502 break; 503 delay(20*1000); 504 } 505 506 if (i == 50) { 507 printf(": not safe to access registers\n"); 508 return; 509 } 510 } 511 512 /* 30, configure XGXS transceiver */ 513 if (sc->xge_type == XGE_TYPE_XENA) 514 xge_setup_xgxs_xena(sc); 515 else if(sc->xge_type == XGE_TYPE_HERC) 516 xge_setup_xgxs_herc(sc); 517 518 /* 33, program MAC address (not needed here) */ 519 /* Get ethernet address */ 520 PIF_WCSR(RMAC_ADDR_CMD_MEM, 521 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0)); 522 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 523 ; 524 val = PIF_RCSR(RMAC_ADDR_DATA0_MEM); 525 for (i = 0; i < ETHER_ADDR_LEN; i++) 526 enaddr[i] = (uint8_t)(val >> (56 - (8*i))); 527 528 /* 529 * Get memory for transmit descriptor lists. 530 */ 531 if (xge_alloc_txmem(sc)) { 532 printf(": failed allocating txmem.\n"); 533 return; 534 } 535 536 /* 9 and 10 - set FIFO number/prio */ 537 PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS)); 538 PIF_WCSR(TX_FIFO_P1, 0ULL); 539 PIF_WCSR(TX_FIFO_P2, 0ULL); 540 PIF_WCSR(TX_FIFO_P3, 0ULL); 541 542 /* 11, XXX set round-robin prio? */ 543 544 /* 12, enable transmit FIFO */ 545 val = PIF_RCSR(TX_FIFO_P0); 546 val |= TX_FIFO_ENABLE; 547 PIF_WCSR(TX_FIFO_P0, val); 548 549 /* 13, disable some error checks */ 550 PIF_WCSR(TX_PA_CFG, 551 TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE); 552 553 /* Create transmit DMA maps */ 554 for (i = 0; i < NTXDESCS; i++) { 555 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN, 556 NTXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT, 557 &sc->sc_txm[i])) { 558 printf(": cannot create TX DMA maps\n"); 559 return; 560 } 561 } 562 563 sc->sc_lasttx = NTXDESCS-1; 564 565 /* 566 * RxDMA initialization. 567 * Only use one out of 8 possible receive queues. 568 */ 569 /* allocate rx descriptor memory */ 570 if (xge_alloc_rxmem(sc)) { 571 printf(": failed allocating rxmem\n"); 572 return; 573 } 574 575 /* Create receive buffer DMA maps */ 576 for (i = 0; i < NRXREAL; i++) { 577 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN, 578 NRXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT, 579 &sc->sc_rxm[i])) { 580 printf(": cannot create RX DMA maps\n"); 581 return; 582 } 583 } 584 585 /* allocate mbufs to receive descriptors */ 586 for (i = 0; i < NRXREAL; i++) 587 if (xge_add_rxbuf(sc, i)) 588 panic("out of mbufs too early"); 589 590 /* 14, setup receive ring priority */ 591 PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */ 592 593 /* 15, setup receive ring round-robin calendar */ 594 PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */ 595 PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL); 596 PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL); 597 PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL); 598 PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL); 599 600 /* 16, write receive ring start address */ 601 PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr); 602 /* PRC_RXD0_[1-7] are not used */ 603 604 /* 17, Setup alarm registers */ 605 PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */ 606 607 /* 18, init receive ring controller */ 608 #if RX_MODE == RX_MODE_1 609 val = RING_MODE_1; 610 #elif RX_MODE == RX_MODE_3 611 val = RING_MODE_3; 612 #else /* RX_MODE == RX_MODE_5 */ 613 val = RING_MODE_5; 614 #endif 615 PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val); 616 /* leave 1-7 disabled */ 617 /* XXXX snoop configuration? */ 618 619 /* 19, set chip memory assigned to the queue */ 620 if (sc->xge_type == XGE_TYPE_XENA) { 621 /* all 64M to queue 0 */ 622 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64)); 623 } else { 624 /* all 32M to queue 0 */ 625 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 32)); 626 } 627 628 /* 20, setup RLDRAM parameters */ 629 /* do not touch it for now */ 630 631 /* 21, setup pause frame thresholds */ 632 /* so not touch the defaults */ 633 /* XXX - must 0xff be written as stated in the manual? */ 634 635 /* 22, configure RED */ 636 /* we do not want to drop packets, so ignore */ 637 638 /* 23, initiate RLDRAM */ 639 val = PIF_RCSR(MC_RLDRAM_MRS); 640 val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE; 641 PIF_WCSR(MC_RLDRAM_MRS, val); 642 DELAY(1000); 643 644 /* 645 * Setup interrupt policies. 646 */ 647 /* 40, Transmit interrupts */ 648 PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC | 649 TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48)); 650 PIF_WCSR(TTI_DATA2_MEM, 651 TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512)); 652 PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE); 653 while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE) 654 ; 655 656 /* 41, Receive interrupts */ 657 PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC | 658 RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50)); 659 PIF_WCSR(RTI_DATA2_MEM, 660 RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512)); 661 PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE); 662 while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE) 663 ; 664 665 /* 666 * Setup media stuff. 667 */ 668 ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange, 669 xge_ifmedia_status); 670 ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_SR, 0, NULL); 671 ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_SR); 672 673 ifp = &sc->sc_arpcom.ac_if; 674 strlcpy(ifp->if_xname, XNAME, IFNAMSIZ); 675 memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN); 676 ifp->if_baudrate = IF_Gbps(10); 677 ifp->if_softc = sc; 678 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 679 ifp->if_ioctl = xge_ioctl; 680 ifp->if_start = xge_start; 681 ifp->if_hardmtu = XGE_MAX_MTU; 682 ifq_set_maxlen(&ifp->if_snd, NTXDESCS - 1); 683 684 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 685 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 686 687 #if NVLAN > 0 688 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 689 #endif 690 691 /* 692 * Attach the interface. 693 */ 694 if_attach(ifp); 695 ether_ifattach(ifp); 696 697 /* 698 * Setup interrupt vector before initializing. 699 */ 700 if (pci_intr_map(pa, &ih)) { 701 printf(": unable to map interrupt\n"); 702 return; 703 } 704 intrstr = pci_intr_string(pc, ih); 705 if ((sc->sc_ih = 706 pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc, XNAME)) == NULL) { 707 printf(": unable to establish interrupt at %s\n", 708 intrstr ? intrstr : "<unknown>"); 709 return; 710 } 711 printf(": %s, address %s\n", intrstr, ether_sprintf(enaddr)); 712 } 713 714 void 715 xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 716 { 717 struct xge_softc *sc = ifp->if_softc; 718 uint64_t reg; 719 720 ifmr->ifm_status = IFM_AVALID; 721 ifmr->ifm_active = IFM_ETHER|IFM_10G_SR; 722 723 reg = PIF_RCSR(ADAPTER_STATUS); 724 if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0) 725 ifmr->ifm_status |= IFM_ACTIVE; 726 } 727 728 int 729 xge_xgmii_mediachange(struct ifnet *ifp) 730 { 731 return (0); 732 } 733 734 void 735 xge_enable(struct xge_softc *sc) 736 { 737 uint64_t val; 738 739 /* 2, enable adapter */ 740 val = PIF_RCSR(ADAPTER_CONTROL); 741 val |= ADAPTER_EN; 742 PIF_WCSR(ADAPTER_CONTROL, val); 743 744 /* 3, light the card enable led */ 745 val = PIF_RCSR(ADAPTER_CONTROL); 746 val |= LED_ON; 747 PIF_WCSR(ADAPTER_CONTROL, val); 748 #ifdef XGE_DEBUG 749 printf("%s: link up\n", XNAME); 750 #endif 751 } 752 753 int 754 xge_init(struct ifnet *ifp) 755 { 756 struct xge_softc *sc = ifp->if_softc; 757 uint64_t val; 758 int s; 759 760 s = splnet(); 761 762 /* 763 * Cancel any pending I/O 764 */ 765 xge_stop(ifp, 0); 766 767 /* 31+32, setup MAC config */ 768 PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS| 769 RMAC_BCAST_EN|RMAC_DISCARD_PFRM); 770 771 DELAY(1000); 772 773 /* 54, ensure that the adapter is 'quiescent' */ 774 val = PIF_RCSR(ADAPTER_STATUS); 775 if ((val & QUIESCENT) != QUIESCENT) { 776 #if 0 777 char buf[200]; 778 #endif 779 printf("%s: adapter not quiescent, aborting\n", XNAME); 780 val = (val & QUIESCENT) ^ QUIESCENT; 781 #if 0 782 bitmask_snprintf(val, QUIESCENT_BMSK, buf, sizeof buf); 783 printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf); 784 #endif 785 splx(s); 786 return (1); 787 } 788 789 if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 790 /* disable VLAN tag stripping */ 791 val = PIF_RCSR(RX_PA_CFG); 792 val &= ~STRIP_VLAN_TAG; 793 PIF_WCSR(RX_PA_CFG, val); 794 } 795 796 /* set MRU */ 797 PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(XGE_MAX_FRAMELEN)); 798 799 /* 56, enable the transmit laser */ 800 val = PIF_RCSR(ADAPTER_CONTROL); 801 val |= EOI_TX_ON; 802 PIF_WCSR(ADAPTER_CONTROL, val); 803 804 xge_enable(sc); 805 806 /* 807 * Enable all interrupts 808 */ 809 PIF_WCSR(TX_TRAFFIC_MASK, 0); 810 PIF_WCSR(RX_TRAFFIC_MASK, 0); 811 PIF_WCSR(TXPIC_INT_MASK, 0); 812 PIF_WCSR(RXPIC_INT_MASK, 0); 813 814 PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */ 815 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT); 816 PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT); 817 PIF_WCSR(GENERAL_INT_MASK, 0); 818 819 xge_setpromisc(sc); 820 821 xge_setmulti(sc); 822 823 /* Done... */ 824 ifp->if_flags |= IFF_RUNNING; 825 ifq_clr_oactive(&ifp->if_snd); 826 827 splx(s); 828 829 return (0); 830 } 831 832 void 833 xge_stop(struct ifnet *ifp, int disable) 834 { 835 struct xge_softc *sc = ifp->if_softc; 836 uint64_t val; 837 838 ifp->if_flags &= ~IFF_RUNNING; 839 ifq_clr_oactive(&ifp->if_snd); 840 841 val = PIF_RCSR(ADAPTER_CONTROL); 842 val &= ~ADAPTER_EN; 843 PIF_WCSR(ADAPTER_CONTROL, val); 844 845 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT) 846 ; 847 } 848 849 int 850 xge_intr(void *pv) 851 { 852 struct xge_softc *sc = pv; 853 struct txd *txd; 854 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 855 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 856 bus_dmamap_t dmp; 857 uint64_t val; 858 int i, lasttx, plen; 859 860 val = PIF_RCSR(GENERAL_INT_STATUS); 861 if (val == 0) 862 return (0); /* no interrupt here */ 863 864 PIF_WCSR(GENERAL_INT_STATUS, val); 865 866 if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) { 867 /* Wait for quiescence */ 868 #ifdef XGE_DEBUG 869 printf("%s: link down\n", XNAME); 870 #endif 871 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT) 872 ; 873 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT); 874 875 val = PIF_RCSR(ADAPTER_STATUS); 876 if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0) 877 xge_enable(sc); /* Only if link restored */ 878 } 879 880 if ((val = PIF_RCSR(TX_TRAFFIC_INT))) 881 PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */ 882 /* 883 * Collect sent packets. 884 */ 885 lasttx = sc->sc_lasttx; 886 while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) { 887 txd = sc->sc_txd[i]; 888 dmp = sc->sc_txm[i]; 889 890 bus_dmamap_sync(sc->sc_dmat, dmp, 0, 891 dmp->dm_mapsize, 892 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 893 894 if (txd->txd_control1 & TXD_CTL1_OWN) { 895 bus_dmamap_sync(sc->sc_dmat, dmp, 0, 896 dmp->dm_mapsize, BUS_DMASYNC_PREREAD); 897 break; 898 } 899 bus_dmamap_unload(sc->sc_dmat, dmp); 900 m_freem(sc->sc_txb[i]); 901 sc->sc_lasttx = i; 902 } 903 904 if (sc->sc_lasttx != lasttx) 905 ifq_clr_oactive(&ifp->if_snd); 906 907 /* Try to get more packets on the wire */ 908 xge_start(ifp); 909 910 /* clear interrupt bits */ 911 if ((val = PIF_RCSR(RX_TRAFFIC_INT))) 912 PIF_WCSR(RX_TRAFFIC_INT, val); 913 914 for (;;) { 915 struct rxdesc *rxd; 916 struct mbuf *m; 917 918 XGE_RXSYNC(sc->sc_nextrx, 919 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 920 921 rxd = XGE_RXD(sc->sc_nextrx); 922 if (rxd->rxd_control1 & RXD_CTL1_OWN) { 923 XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD); 924 break; 925 } 926 927 /* got a packet */ 928 m = sc->sc_rxb[sc->sc_nextrx]; 929 #if RX_MODE == RX_MODE_1 930 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2); 931 #elif RX_MODE == RX_MODE_3 932 #error Fix rxmodes in xge_intr 933 #elif RX_MODE == RX_MODE_5 934 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2); 935 plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2); 936 plen += m->m_next->m_next->m_len = 937 RXD_CTL2_BUF2SIZ(rxd->rxd_control2); 938 plen += m->m_next->m_next->m_next->m_len = 939 RXD_CTL3_BUF3SIZ(rxd->rxd_control3); 940 plen += m->m_next->m_next->m_next->m_next->m_len = 941 RXD_CTL3_BUF4SIZ(rxd->rxd_control3); 942 #endif 943 m->m_pkthdr.len = plen; 944 945 val = rxd->rxd_control1; 946 947 if (xge_add_rxbuf(sc, sc->sc_nextrx)) { 948 /* Failed, recycle this mbuf */ 949 #if RX_MODE == RX_MODE_1 950 rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0); 951 rxd->rxd_control1 = RXD_CTL1_OWN; 952 #elif RX_MODE == RX_MODE_3 953 #elif RX_MODE == RX_MODE_5 954 #endif 955 XGE_RXSYNC(sc->sc_nextrx, 956 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 957 ifp->if_ierrors++; 958 break; 959 } 960 961 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_IPv4) 962 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 963 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP) 964 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 965 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP) 966 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 967 968 #if NVLAN > 0 969 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_VLAN) { 970 m->m_pkthdr.ether_vtag = 971 RXD_CTL2_VLANTAG(rxd->rxd_control2); 972 m->m_flags |= M_VLANTAG; 973 } 974 #endif 975 976 ml_enqueue(&ml, m); 977 978 if (++sc->sc_nextrx == NRXREAL) 979 sc->sc_nextrx = 0; 980 } 981 982 if_input(ifp, &ml); 983 984 return (1); 985 } 986 987 int 988 xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 989 { 990 struct xge_softc *sc = ifp->if_softc; 991 struct ifreq *ifr = (struct ifreq *) data; 992 int s, error = 0; 993 994 s = splnet(); 995 996 switch (cmd) { 997 case SIOCSIFADDR: 998 ifp->if_flags |= IFF_UP; 999 if (!(ifp->if_flags & IFF_RUNNING)) 1000 xge_init(ifp); 1001 break; 1002 1003 case SIOCSIFFLAGS: 1004 if (ifp->if_flags & IFF_UP) { 1005 if (ifp->if_flags & IFF_RUNNING && 1006 (ifp->if_flags ^ sc->xge_if_flags) & 1007 IFF_PROMISC) { 1008 xge_setpromisc(sc); 1009 } else { 1010 if (!(ifp->if_flags & IFF_RUNNING)) 1011 xge_init(ifp); 1012 } 1013 } else { 1014 if (ifp->if_flags & IFF_RUNNING) 1015 xge_stop(ifp, 1); 1016 } 1017 sc->xge_if_flags = ifp->if_flags; 1018 break; 1019 1020 case SIOCGIFMEDIA: 1021 case SIOCSIFMEDIA: 1022 error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd); 1023 break; 1024 1025 default: 1026 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1027 } 1028 1029 if (error == ENETRESET) { 1030 if (ifp->if_flags & IFF_RUNNING) 1031 xge_setmulti(sc); 1032 error = 0; 1033 } 1034 1035 splx(s); 1036 return (error); 1037 } 1038 1039 void 1040 xge_setmulti(struct xge_softc *sc) 1041 { 1042 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1043 struct arpcom *ac = &sc->sc_arpcom; 1044 struct ether_multi *enm; 1045 struct ether_multistep step; 1046 int i, numaddr = 1; /* first slot used for card unicast address */ 1047 uint64_t val; 1048 1049 if (ac->ac_multirangecnt > 0) 1050 goto allmulti; 1051 1052 ETHER_FIRST_MULTI(step, ac, enm); 1053 while (enm != NULL) { 1054 if (numaddr == MAX_MCAST_ADDR) 1055 goto allmulti; 1056 for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) { 1057 val <<= 8; 1058 val |= enm->enm_addrlo[i]; 1059 } 1060 PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16); 1061 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL); 1062 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE| 1063 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr)); 1064 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 1065 ; 1066 numaddr++; 1067 ETHER_NEXT_MULTI(step, enm); 1068 } 1069 /* set the remaining entries to the broadcast address */ 1070 for (i = numaddr; i < MAX_MCAST_ADDR; i++) { 1071 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL); 1072 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL); 1073 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE| 1074 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i)); 1075 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 1076 ; 1077 } 1078 ifp->if_flags &= ~IFF_ALLMULTI; 1079 return; 1080 1081 allmulti: 1082 /* Just receive everything with the multicast bit set */ 1083 ifp->if_flags |= IFF_ALLMULTI; 1084 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL); 1085 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL); 1086 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE| 1087 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1)); 1088 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 1089 ; 1090 } 1091 1092 void 1093 xge_setpromisc(struct xge_softc *sc) 1094 { 1095 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1096 uint64_t val; 1097 1098 val = PIF_RCSR(MAC_CFG); 1099 1100 if (ifp->if_flags & IFF_PROMISC) 1101 val |= RMAC_PROM_EN; 1102 else 1103 val &= ~RMAC_PROM_EN; 1104 1105 PIF_WCSR(MAC_CFG, val); 1106 } 1107 1108 void 1109 xge_start(struct ifnet *ifp) 1110 { 1111 struct xge_softc *sc = ifp->if_softc; 1112 struct txd *txd = NULL; /* XXX - gcc */ 1113 bus_dmamap_t dmp; 1114 struct mbuf *m; 1115 uint64_t par, lcr; 1116 int nexttx = 0, ntxd, i; 1117 1118 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1119 return; 1120 1121 par = lcr = 0; 1122 for (;;) { 1123 if (sc->sc_nexttx == sc->sc_lasttx) { 1124 ifq_set_oactive(&ifp->if_snd); 1125 break; /* No more space */ 1126 } 1127 1128 m = ifq_dequeue(&ifp->if_snd); 1129 if (m == NULL) 1130 break; /* out of packets */ 1131 1132 nexttx = sc->sc_nexttx; 1133 dmp = sc->sc_txm[nexttx]; 1134 1135 switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m, 1136 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) { 1137 case 0: 1138 break; 1139 case EFBIG: 1140 if (m_defrag(m, M_DONTWAIT) == 0 && 1141 bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m, 1142 BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0) 1143 break; 1144 default: 1145 m_freem(m); 1146 continue; 1147 } 1148 1149 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize, 1150 BUS_DMASYNC_PREWRITE); 1151 1152 txd = sc->sc_txd[nexttx]; 1153 sc->sc_txb[nexttx] = m; 1154 for (i = 0; i < dmp->dm_nsegs; i++) { 1155 if (dmp->dm_segs[i].ds_len == 0) 1156 continue; 1157 txd->txd_control1 = dmp->dm_segs[i].ds_len; 1158 txd->txd_control2 = 0; 1159 txd->txd_bufaddr = dmp->dm_segs[i].ds_addr; 1160 txd++; 1161 } 1162 ntxd = txd - sc->sc_txd[nexttx] - 1; 1163 txd = sc->sc_txd[nexttx]; 1164 txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF; 1165 txd->txd_control2 = TXD_CTL2_UTIL; 1166 1167 #if NVLAN > 0 1168 if (m->m_flags & M_VLANTAG) { 1169 txd->txd_control2 |= TXD_CTL2_VLANE; 1170 txd->txd_control2 |= 1171 TXD_CTL2_VLANT(m->m_pkthdr.ether_vtag); 1172 } 1173 #endif 1174 1175 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1176 txd->txd_control2 |= TXD_CTL2_CIPv4; 1177 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1178 txd->txd_control2 |= TXD_CTL2_CTCP; 1179 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1180 txd->txd_control2 |= TXD_CTL2_CUDP; 1181 1182 txd[ntxd].txd_control1 |= TXD_CTL1_GCL; 1183 1184 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize, 1185 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1186 1187 par = sc->sc_txdp[nexttx]; 1188 lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST; 1189 TXP_WCSR(TXDL_PAR, par); 1190 TXP_WCSR(TXDL_LCR, lcr); 1191 1192 #if NBPFILTER > 0 1193 if (ifp->if_bpf) 1194 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1195 #endif /* NBPFILTER > 0 */ 1196 1197 sc->sc_nexttx = NEXTTX(nexttx); 1198 } 1199 } 1200 1201 /* 1202 * Allocate DMA memory for transmit descriptor fragments. 1203 * Only one map is used for all descriptors. 1204 */ 1205 int 1206 xge_alloc_txmem(struct xge_softc *sc) 1207 { 1208 struct txd *txp; 1209 bus_dma_segment_t seg; 1210 bus_addr_t txdp; 1211 caddr_t kva; 1212 int i, rseg, state; 1213 1214 #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd)) 1215 state = 0; 1216 if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0, 1217 &seg, 1, &rseg, BUS_DMA_NOWAIT)) 1218 goto err; 1219 state++; 1220 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva, 1221 BUS_DMA_NOWAIT)) 1222 goto err; 1223 1224 state++; 1225 if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0, 1226 BUS_DMA_NOWAIT, &sc->sc_txmap)) 1227 goto err; 1228 state++; 1229 if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap, 1230 kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT)) 1231 goto err; 1232 1233 /* setup transmit array pointers */ 1234 txp = (struct txd *)kva; 1235 txdp = seg.ds_addr; 1236 for (i = 0; i < NTXDESCS; i++) { 1237 sc->sc_txd[i] = txp; 1238 sc->sc_txdp[i] = txdp; 1239 txp += NTXFRAGS; 1240 txdp += (NTXFRAGS * sizeof(struct txd)); 1241 } 1242 1243 return (0); 1244 1245 err: 1246 if (state > 2) 1247 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap); 1248 if (state > 1) 1249 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ); 1250 if (state > 0) 1251 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1252 return (ENOBUFS); 1253 } 1254 1255 /* 1256 * Allocate DMA memory for receive descriptor, 1257 * only one map is used for all descriptors. 1258 * link receive descriptor pages together. 1259 */ 1260 int 1261 xge_alloc_rxmem(struct xge_softc *sc) 1262 { 1263 struct rxd_4k *rxpp; 1264 bus_dma_segment_t seg; 1265 caddr_t kva; 1266 int i, rseg, state; 1267 1268 /* sanity check */ 1269 if (sizeof(struct rxd_4k) != XGE_PAGE) { 1270 printf("bad compiler struct alignment, %d != %d\n", 1271 (int)sizeof(struct rxd_4k), XGE_PAGE); 1272 return (EINVAL); 1273 } 1274 1275 state = 0; 1276 if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0, 1277 &seg, 1, &rseg, BUS_DMA_NOWAIT)) 1278 goto err; 1279 state++; 1280 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva, 1281 BUS_DMA_NOWAIT)) 1282 goto err; 1283 1284 state++; 1285 if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0, 1286 BUS_DMA_NOWAIT, &sc->sc_rxmap)) 1287 goto err; 1288 state++; 1289 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap, 1290 kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT)) 1291 goto err; 1292 1293 /* setup receive page link pointers */ 1294 for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) { 1295 sc->sc_rxd_4k[i] = rxpp; 1296 rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr + 1297 (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k); 1298 } 1299 sc->sc_rxd_4k[NRXPAGES-1]->r4_next = 1300 (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr; 1301 1302 return (0); 1303 1304 err: 1305 if (state > 2) 1306 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap); 1307 if (state > 1) 1308 bus_dmamem_unmap(sc->sc_dmat, kva, RXMAPSZ); 1309 if (state > 0) 1310 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1311 return (ENOBUFS); 1312 } 1313 1314 1315 /* 1316 * Add a new mbuf chain to descriptor id. 1317 */ 1318 int 1319 xge_add_rxbuf(struct xge_softc *sc, int id) 1320 { 1321 struct rxdesc *rxd; 1322 struct mbuf *m[5]; 1323 int page, desc, error; 1324 #if RX_MODE == RX_MODE_5 1325 int i; 1326 #endif 1327 1328 page = id/NDESC_BUFMODE; 1329 desc = id%NDESC_BUFMODE; 1330 1331 rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc]; 1332 1333 /* 1334 * Allocate mbufs. 1335 * Currently five mbufs and two clusters are used, 1336 * the hardware will put (ethernet, ip, tcp/udp) headers in 1337 * their own buffer and the clusters are only used for data. 1338 */ 1339 #if RX_MODE == RX_MODE_1 1340 MGETHDR(m[0], M_DONTWAIT, MT_DATA); 1341 if (m[0] == NULL) 1342 return (ENOBUFS); 1343 MCLGETL(m[0], M_DONTWAIT, XGE_MAX_FRAMELEN + ETHER_ALIGN); 1344 if ((m[0]->m_flags & M_EXT) == 0) { 1345 m_freem(m[0]); 1346 return (ENOBUFS); 1347 } 1348 m[0]->m_len = m[0]->m_pkthdr.len = XGE_MAX_FRAMELEN + ETHER_ALIGN; 1349 #elif RX_MODE == RX_MODE_3 1350 #error missing rxmode 3. 1351 #elif RX_MODE == RX_MODE_5 1352 MGETHDR(m[0], M_DONTWAIT, MT_DATA); 1353 for (i = 1; i < 5; i++) { 1354 MGET(m[i], M_DONTWAIT, MT_DATA); 1355 } 1356 if (m[3]) 1357 MCLGET(m[3], M_DONTWAIT); 1358 if (m[4]) 1359 MCLGET(m[4], M_DONTWAIT); 1360 if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] || 1361 ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) { 1362 /* Out of something */ 1363 for (i = 0; i < 5; i++) 1364 m_free(m[i]); 1365 return (ENOBUFS); 1366 } 1367 /* Link'em together */ 1368 m[0]->m_next = m[1]; 1369 m[1]->m_next = m[2]; 1370 m[2]->m_next = m[3]; 1371 m[3]->m_next = m[4]; 1372 #else 1373 #error bad mode RX_MODE 1374 #endif 1375 1376 if (sc->sc_rxb[id]) 1377 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]); 1378 sc->sc_rxb[id] = m[0]; 1379 1380 m_adj(m[0], ETHER_ALIGN); 1381 1382 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0], 1383 BUS_DMA_READ|BUS_DMA_NOWAIT); 1384 if (error) 1385 return (error); 1386 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0, 1387 sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD); 1388 1389 #if RX_MODE == RX_MODE_1 1390 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0); 1391 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr; 1392 rxd->rxd_control1 = RXD_CTL1_OWN; 1393 #elif RX_MODE == RX_MODE_3 1394 #elif RX_MODE == RX_MODE_5 1395 rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len); 1396 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len); 1397 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr; 1398 rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr; 1399 rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr; 1400 rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr; 1401 rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr; 1402 rxd->rxd_control1 = RXD_CTL1_OWN; 1403 #endif 1404 1405 XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1406 return (0); 1407 } 1408 1409 /* 1410 * This magic comes from the FreeBSD driver. 1411 */ 1412 int 1413 xge_setup_xgxs_xena(struct xge_softc *sc) 1414 { 1415 int i; 1416 1417 for (i = 0; i < nitems(xge_xena_dtx_cfg); i++) { 1418 PIF_WCSR(DTX_CONTROL, xge_xena_dtx_cfg[i]); 1419 DELAY(100); 1420 } 1421 1422 return (0); 1423 } 1424 1425 int 1426 xge_setup_xgxs_herc(struct xge_softc *sc) 1427 { 1428 int i; 1429 1430 for (i = 0; i < nitems(xge_herc_dtx_cfg); i++) { 1431 PIF_WCSR(DTX_CONTROL, xge_herc_dtx_cfg[i]); 1432 DELAY(100); 1433 } 1434 1435 return (0); 1436 } 1437