1 /* $OpenBSD: if_cad.c,v 1.13 2023/08/15 08:27:30 miod Exp $ */ 2 3 /* 4 * Copyright (c) 2021-2022 Visa Hankala 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for Cadence 10/100/Gigabit Ethernet device. 21 */ 22 23 #include "bpfilter.h" 24 #include "kstat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/atomic.h> 29 #include <sys/device.h> 30 #include <sys/ioctl.h> 31 #include <sys/mutex.h> 32 #include <sys/kstat.h> 33 #include <sys/rwlock.h> 34 #include <sys/task.h> 35 #include <sys/timeout.h> 36 37 #include <net/if.h> 38 #include <net/if_media.h> 39 #include <netinet/in.h> 40 #include <netinet/ip.h> 41 #include <netinet/if_ether.h> 42 43 #if NBPFILTER > 0 44 #include <net/bpf.h> 45 #endif 46 47 #include <dev/mii/mii.h> 48 #include <dev/mii/miivar.h> 49 #include <dev/mii/miidevs.h> 50 51 #include <machine/bus.h> 52 #include <machine/fdt.h> 53 54 #include <dev/ofw/fdt.h> 55 #include <dev/ofw/openfirm.h> 56 #include <dev/ofw/ofw_clock.h> 57 58 #define GEM_NETCTL 0x0000 59 #define GEM_NETCTL_DPRAM (1 << 18) 60 #define GEM_NETCTL_STARTTX (1 << 9) 61 #define GEM_NETCTL_STATCLR (1 << 5) 62 #define GEM_NETCTL_MDEN (1 << 4) 63 #define GEM_NETCTL_TXEN (1 << 3) 64 #define GEM_NETCTL_RXEN (1 << 2) 65 #define GEM_NETCFG 0x0004 66 #define GEM_NETCFG_SGMIIEN (1 << 27) 67 #define GEM_NETCFG_RXCSUMEN (1 << 24) 68 #define GEM_NETCFG_MDCCLKDIV_MASK (0x7 << 18) 69 #define GEM_NETCFG_MDCCLKDIV_SHIFT 18 70 #define GEM_NETCFG_FCSREM (1 << 17) 71 #define GEM_NETCFG_RXOFFS_MASK (0x3 << 14) 72 #define GEM_NETCFG_RXOFFS_SHIFT 14 73 #define GEM_NETCFG_PCSSEL (1 << 11) 74 #define GEM_NETCFG_1000 (1 << 10) 75 #define GEM_NETCFG_1536RXEN (1 << 8) 76 #define GEM_NETCFG_UCASTHASHEN (1 << 7) 77 #define GEM_NETCFG_MCASTHASHEN (1 << 6) 78 #define GEM_NETCFG_BCASTDI (1 << 5) 79 #define GEM_NETCFG_COPYALL (1 << 4) 80 #define GEM_NETCFG_FDEN (1 << 1) 81 #define GEM_NETCFG_100 (1 << 0) 82 #define GEM_NETSR 0x0008 83 #define GEM_NETSR_PHY_MGMT_IDLE (1 << 2) 84 #define GEM_DMACR 0x0010 85 #define GEM_DMACR_DMA64 (1 << 30) 86 #define GEM_DMACR_AHBDISC (1 << 24) 87 #define GEM_DMACR_RXBUF_MASK (0xff << 16) 88 #define GEM_DMACR_RXBUF_SHIFT 16 89 #define GEM_DMACR_TXCSUMEN (1 << 11) 90 #define GEM_DMACR_TXSIZE (1 << 10) 91 #define GEM_DMACR_RXSIZE_MASK (0x3 << 8) 92 #define GEM_DMACR_RXSIZE_8K (0x3 << 8) 93 #define GEM_DMACR_ES_PDATA (1 << 7) 94 #define GEM_DMACR_ES_DESCR (1 << 6) 95 #define GEM_DMACR_BLEN_MASK (0x1f << 0) 96 #define GEM_DMACR_BLEN_16 (0x10 << 0) 97 #define GEM_TXSR 0x0014 98 #define GEM_TXSR_TXGO (1 << 3) 99 #define GEM_RXQBASE 0x0018 100 #define GEM_TXQBASE 0x001c 101 #define GEM_RXSR 0x0020 102 #define GEM_RXSR_RXOVR (1 << 2) 103 #define GEM_ISR 0x0024 104 #define GEM_IER 0x0028 105 #define GEM_IDR 0x002c 106 #define GEM_IXR_HRESP (1 << 11) 107 #define GEM_IXR_RXOVR (1 << 10) 108 #define GEM_IXR_TXDONE (1 << 7) 109 #define GEM_IXR_TXURUN (1 << 6) 110 #define GEM_IXR_RETRY (1 << 5) 111 #define GEM_IXR_TXUSED (1 << 3) 112 #define GEM_IXR_RXUSED (1 << 2) 113 #define GEM_IXR_RXDONE (1 << 1) 114 #define GEM_PHYMNTNC 0x0034 115 #define GEM_PHYMNTNC_CLAUSE_22 (1 << 30) 116 #define GEM_PHYMNTNC_OP_READ (0x2 << 28) 117 #define GEM_PHYMNTNC_OP_WRITE (0x1 << 28) 118 #define GEM_PHYMNTNC_ADDR_MASK (0x1f << 23) 119 #define GEM_PHYMNTNC_ADDR_SHIFT 23 120 #define GEM_PHYMNTNC_REG_MASK (0x1f << 18) 121 #define GEM_PHYMNTNC_REG_SHIFT 18 122 #define GEM_PHYMNTNC_MUST_10 (0x2 << 16) 123 #define GEM_PHYMNTNC_DATA_MASK 0xffff 124 #define GEM_HASHL 0x0080 125 #define GEM_HASHH 0x0084 126 #define GEM_LADDRL(i) (0x0088 + (i) * 8) 127 #define GEM_LADDRH(i) (0x008c + (i) * 8) 128 #define GEM_LADDRNUM 4 129 #define GEM_MID 0x00fc 130 #define GEM_MID_VERSION_MASK (0xfff << 16) 131 #define GEM_MID_VERSION_SHIFT 16 132 #define GEM_OCTTXL 0x0100 133 #define GEM_OCTTXH 0x0104 134 #define GEM_TXCNT 0x0108 135 #define GEM_TXBCCNT 0x010c 136 #define GEM_TXMCCNT 0x0110 137 #define GEM_TXPAUSECNT 0x0114 138 #define GEM_TX64CNT 0x0118 139 #define GEM_TX65CNT 0x011c 140 #define GEM_TX128CNT 0x0120 141 #define GEM_TX256CNT 0x0124 142 #define GEM_TX512CNT 0x0128 143 #define GEM_TX1024CNT 0x012c 144 #define GEM_TXURUNCNT 0x0134 145 #define GEM_SNGLCOLLCNT 0x0138 146 #define GEM_MULTICOLLCNT 0x013c 147 #define GEM_EXCESSCOLLCNT 0x0140 148 #define GEM_LATECOLLCNT 0x0144 149 #define GEM_TXDEFERCNT 0x0148 150 #define GEM_TXCSENSECNT 0x014c 151 #define GEM_OCTRXL 0x0150 152 #define GEM_OCTRXH 0x0154 153 #define GEM_RXCNT 0x0158 154 #define GEM_RXBROADCNT 0x015c 155 #define GEM_RXMULTICNT 0x0160 156 #define GEM_RXPAUSECNT 0x0164 157 #define GEM_RX64CNT 0x0168 158 #define GEM_RX65CNT 0x016c 159 #define GEM_RX128CNT 0x0170 160 #define GEM_RX256CNT 0x0174 161 #define GEM_RX512CNT 0x0178 162 #define GEM_RX1024CNT 0x017c 163 #define GEM_RXUNDRCNT 0x0184 164 #define GEM_RXOVRCNT 0x0188 165 #define GEM_RXJABCNT 0x018c 166 #define GEM_RXFCSCNT 0x0190 167 #define GEM_RXLENGTHCNT 0x0194 168 #define GEM_RXSYMBCNT 0x0198 169 #define GEM_RXALIGNCNT 0x019c 170 #define GEM_RXRESERRCNT 0x01a0 171 #define GEM_RXORCNT 0x01a4 172 #define GEM_RXIPCCNT 0x01a8 173 #define GEM_RXTCPCCNT 0x01ac 174 #define GEM_RXUDPCCNT 0x01b0 175 #define GEM_CFG6 0x0294 176 #define GEM_CFG6_DMA64 (1 << 23) 177 #define GEM_CFG6_PRIQ_MASK(x) ((x) & 0xffff) 178 #define GEM_CFG8 0x029c 179 #define GEM_CFG8_NUM_TYPE1_SCR(x) (((x) >> 24) & 0xff) 180 #define GEM_CFG8_NUM_TYPE2_SCR(x) (((x) >> 16) & 0xff) 181 #define GEM_TXQ1BASE(i) (0x0440 + (i) * 4) 182 #define GEM_TXQ1BASE_DISABLE (1 << 0) 183 #define GEM_RXQ1BASE(i) (0x0480 + (i) * 4) 184 #define GEM_RXQ1BASE_DISABLE (1 << 0) 185 #define GEM_TXQBASEHI 0x04c8 186 #define GEM_RXQBASEHI 0x04d4 187 #define GEM_SCR_TYPE1(i) (0x0500 + (i) * 4) 188 #define GEM_SCR_TYPE2(i) (0x0540 + (i) * 4) 189 #define GEM_RXQ8BASE(i) (0x05c0 + (i) * 4) 190 #define GEM_RXQ8BASE_DISABLE (1 << 0) 191 192 #define GEM_MAX_PRIQ 16 193 194 #define GEM_CLK_TX "tx_clk" 195 196 struct cad_buf { 197 bus_dmamap_t bf_map; 198 struct mbuf *bf_m; 199 }; 200 201 struct cad_dmamem { 202 bus_dmamap_t cdm_map; 203 bus_dma_segment_t cdm_seg; 204 size_t cdm_size; 205 caddr_t cdm_kva; 206 }; 207 208 struct cad_desc32 { 209 uint32_t d_addr; 210 uint32_t d_status; 211 }; 212 213 struct cad_desc64 { 214 uint32_t d_addrlo; 215 uint32_t d_status; 216 uint32_t d_addrhi; 217 uint32_t d_unused; 218 }; 219 220 #define GEM_RXD_ADDR_WRAP (1 << 1) 221 #define GEM_RXD_ADDR_USED (1 << 0) 222 223 #define GEM_RXD_BCAST (1U << 31) 224 #define GEM_RXD_MCAST (1 << 30) 225 #define GEM_RXD_UCAST (1 << 29) 226 #define GEM_RXD_SPEC (1 << 27) 227 #define GEM_RXD_SPEC_MASK (0x3 << 25) 228 #define GEM_RXD_CSUM_MASK (0x3 << 22) 229 #define GEM_RXD_CSUM_UDP_OK (0x3 << 22) 230 #define GEM_RXD_CSUM_TCP_OK (0x2 << 22) 231 #define GEM_RXD_CSUM_IP_OK (0x1 << 22) 232 #define GEM_RXD_VLANTAG (1 << 21) 233 #define GEM_RXD_PRIOTAG (1 << 20) 234 #define GEM_RXD_CFI (1 << 16) 235 #define GEM_RXD_EOF (1 << 15) 236 #define GEM_RXD_SOF (1 << 14) 237 #define GEM_RXD_BADFCS (1 << 13) 238 #define GEM_RXD_LEN_MASK 0x1fff 239 240 #define GEM_TXD_USED (1U << 31) 241 #define GEM_TXD_WRAP (1 << 30) 242 #define GEM_TXD_RLIMIT (1 << 29) 243 #define GEM_TXD_CORRUPT (1 << 27) 244 #define GEM_TXD_LCOLL (1 << 26) 245 #define GEM_TXD_CSUMERR_MASK (0x7 << 20) 246 #define GEM_TXD_NOFCS (1 << 16) 247 #define GEM_TXD_LAST (1 << 15) 248 #define GEM_TXD_LEN_MASK 0x3fff 249 250 #define CAD_NRXDESC 256 251 252 #define CAD_NTXDESC 256 253 #define CAD_NTXSEGS 16 254 255 enum cad_phy_mode { 256 CAD_PHY_MODE_GMII, 257 CAD_PHY_MODE_RGMII, 258 CAD_PHY_MODE_RGMII_ID, 259 CAD_PHY_MODE_RGMII_RXID, 260 CAD_PHY_MODE_RGMII_TXID, 261 CAD_PHY_MODE_SGMII, 262 }; 263 264 struct cad_softc { 265 struct device sc_dev; 266 struct arpcom sc_ac; 267 268 bus_dma_tag_t sc_dmat; 269 bus_space_tag_t sc_iot; 270 bus_space_handle_t sc_ioh; 271 void *sc_ih; 272 int sc_node; 273 int sc_phy_loc; 274 enum cad_phy_mode sc_phy_mode; 275 unsigned char sc_rxhang_erratum; 276 unsigned char sc_rxdone; 277 unsigned char sc_dma64; 278 size_t sc_descsize; 279 uint32_t sc_qmask; 280 uint8_t sc_ntype1scr; 281 uint8_t sc_ntype2scr; 282 283 struct mii_data sc_mii; 284 #define sc_media sc_mii.mii_media 285 struct timeout sc_tick; 286 287 struct cad_dmamem *sc_txring; 288 struct cad_buf *sc_txbuf; 289 caddr_t sc_txdesc; 290 unsigned int sc_tx_prod; 291 unsigned int sc_tx_cons; 292 293 struct if_rxring sc_rx_ring; 294 struct cad_dmamem *sc_rxring; 295 struct cad_buf *sc_rxbuf; 296 caddr_t sc_rxdesc; 297 unsigned int sc_rx_prod; 298 unsigned int sc_rx_cons; 299 uint32_t sc_netctl; 300 301 struct rwlock sc_cfg_lock; 302 struct task sc_statchg_task; 303 uint32_t sc_tx_freq; 304 305 struct mutex sc_kstat_mtx; 306 struct kstat *sc_kstat; 307 }; 308 309 #define HREAD4(sc, reg) \ 310 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))) 311 #define HWRITE4(sc, reg, val) \ 312 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 313 314 int cad_match(struct device *, void *, void *); 315 void cad_attach(struct device *, struct device *, void *); 316 317 int cad_ioctl(struct ifnet *, u_long, caddr_t); 318 void cad_start(struct ifqueue *); 319 void cad_watchdog(struct ifnet *); 320 321 void cad_reset(struct cad_softc *); 322 int cad_up(struct cad_softc *); 323 void cad_down(struct cad_softc *); 324 void cad_iff(struct cad_softc *); 325 int cad_intr(void *); 326 void cad_tick(void *); 327 void cad_statchg_task(void *); 328 329 int cad_media_change(struct ifnet *); 330 void cad_media_status(struct ifnet *, struct ifmediareq *); 331 int cad_mii_readreg(struct device *, int, int); 332 void cad_mii_writereg(struct device *, int, int, int); 333 void cad_mii_statchg(struct device *); 334 335 struct cad_dmamem *cad_dmamem_alloc(struct cad_softc *, bus_size_t, bus_size_t); 336 void cad_dmamem_free(struct cad_softc *, struct cad_dmamem *); 337 void cad_rxfill(struct cad_softc *); 338 void cad_rxeof(struct cad_softc *); 339 void cad_txeof(struct cad_softc *); 340 unsigned int cad_encap(struct cad_softc *, struct mbuf *); 341 struct mbuf *cad_alloc_mbuf(struct cad_softc *, bus_dmamap_t); 342 343 #if NKSTAT > 0 344 void cad_kstat_attach(struct cad_softc *); 345 int cad_kstat_read(struct kstat *); 346 void cad_kstat_tick(void *); 347 #endif 348 349 #ifdef DDB 350 struct cad_softc *cad_sc[4]; 351 #endif 352 353 const struct cfattach cad_ca = { 354 sizeof(struct cad_softc), cad_match, cad_attach 355 }; 356 357 struct cfdriver cad_cd = { 358 NULL, "cad", DV_IFNET 359 }; 360 361 const struct { 362 const char *name; 363 enum cad_phy_mode mode; 364 } cad_phy_modes[] = { 365 { "gmii", CAD_PHY_MODE_GMII }, 366 { "rgmii", CAD_PHY_MODE_RGMII }, 367 { "rgmii-id", CAD_PHY_MODE_RGMII_ID }, 368 { "rgmii-rxid", CAD_PHY_MODE_RGMII_RXID }, 369 { "rgmii-txid", CAD_PHY_MODE_RGMII_TXID }, 370 { "sgmii", CAD_PHY_MODE_SGMII }, 371 }; 372 373 int 374 cad_match(struct device *parent, void *match, void *aux) 375 { 376 struct fdt_attach_args *faa = aux; 377 378 return (OF_is_compatible(faa->fa_node, "cdns,gem") || 379 OF_is_compatible(faa->fa_node, "cdns,macb") || 380 OF_is_compatible(faa->fa_node, "sifive,fu540-c000-gem") || 381 OF_is_compatible(faa->fa_node, "sifive,fu740-c000-gem")); 382 } 383 384 void 385 cad_attach(struct device *parent, struct device *self, void *aux) 386 { 387 char phy_mode[16]; 388 struct fdt_attach_args *faa = aux; 389 struct cad_softc *sc = (struct cad_softc *)self; 390 struct ifnet *ifp = &sc->sc_ac.ac_if; 391 uint32_t hi, lo; 392 uint32_t rev, ver; 393 uint32_t val; 394 unsigned int i; 395 int node, phy; 396 397 if (faa->fa_nreg < 1) { 398 printf(": no registers\n"); 399 return; 400 } 401 402 sc->sc_node = faa->fa_node; 403 sc->sc_dmat = faa->fa_dmat; 404 sc->sc_iot = faa->fa_iot; 405 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 406 faa->fa_reg[0].size, 0, &sc->sc_ioh) != 0) { 407 printf(": can't map registers\n"); 408 return; 409 } 410 411 if (OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr, 412 sizeof(sc->sc_ac.ac_enaddr)) != sizeof(sc->sc_ac.ac_enaddr)) { 413 for (i = 0; i < GEM_LADDRNUM; i++) { 414 lo = HREAD4(sc, GEM_LADDRL(i)); 415 hi = HREAD4(sc, GEM_LADDRH(i)); 416 if (lo != 0 || hi != 0) { 417 sc->sc_ac.ac_enaddr[0] = lo; 418 sc->sc_ac.ac_enaddr[1] = lo >> 8; 419 sc->sc_ac.ac_enaddr[2] = lo >> 16; 420 sc->sc_ac.ac_enaddr[3] = lo >> 24; 421 sc->sc_ac.ac_enaddr[4] = hi; 422 sc->sc_ac.ac_enaddr[5] = hi >> 8; 423 break; 424 } 425 } 426 if (i == GEM_LADDRNUM) 427 ether_fakeaddr(ifp); 428 } 429 430 phy = OF_getpropint(faa->fa_node, "phy-handle", 0); 431 node = OF_getnodebyphandle(phy); 432 if (node != 0) 433 sc->sc_phy_loc = OF_getpropint(node, "reg", MII_PHY_ANY); 434 else 435 sc->sc_phy_loc = MII_PHY_ANY; 436 437 sc->sc_phy_mode = CAD_PHY_MODE_RGMII; 438 OF_getprop(faa->fa_node, "phy-mode", phy_mode, sizeof(phy_mode)); 439 for (i = 0; i < nitems(cad_phy_modes); i++) { 440 if (strcmp(phy_mode, cad_phy_modes[i].name) == 0) { 441 sc->sc_phy_mode = cad_phy_modes[i].mode; 442 break; 443 } 444 } 445 446 rev = HREAD4(sc, GEM_MID); 447 ver = (rev & GEM_MID_VERSION_MASK) >> GEM_MID_VERSION_SHIFT; 448 449 sc->sc_descsize = sizeof(struct cad_desc32); 450 /* Queue 0 is always present. */ 451 sc->sc_qmask = 0x1; 452 /* 453 * Registers CFG1 and CFG6-10 are not present 454 * on Zynq-7000 / GEM version 0x2. 455 */ 456 if (ver >= 0x7) { 457 val = HREAD4(sc, GEM_CFG6); 458 if (val & GEM_CFG6_DMA64) { 459 sc->sc_descsize = sizeof(struct cad_desc64); 460 sc->sc_dma64 = 1; 461 } 462 sc->sc_qmask |= GEM_CFG6_PRIQ_MASK(val); 463 464 val = HREAD4(sc, GEM_CFG8); 465 sc->sc_ntype1scr = GEM_CFG8_NUM_TYPE1_SCR(val); 466 sc->sc_ntype2scr = GEM_CFG8_NUM_TYPE2_SCR(val); 467 } 468 469 if (OF_is_compatible(faa->fa_node, "cdns,zynq-gem")) 470 sc->sc_rxhang_erratum = 1; 471 472 rw_init(&sc->sc_cfg_lock, "cadcfg"); 473 timeout_set(&sc->sc_tick, cad_tick, sc); 474 task_set(&sc->sc_statchg_task, cad_statchg_task, sc); 475 476 rw_enter_write(&sc->sc_cfg_lock); 477 cad_reset(sc); 478 rw_exit_write(&sc->sc_cfg_lock); 479 480 sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE, 481 cad_intr, sc, sc->sc_dev.dv_xname); 482 if (sc->sc_ih == NULL) { 483 printf(": can't establish interrupt\n"); 484 goto fail; 485 } 486 487 ifp->if_softc = sc; 488 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 489 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 490 ifp->if_xflags |= IFXF_MPSAFE; 491 ifp->if_ioctl = cad_ioctl; 492 ifp->if_qstart = cad_start; 493 ifp->if_watchdog = cad_watchdog; 494 ifp->if_hardmtu = ETHER_MAX_DIX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN; 495 ifp->if_capabilities = IFCAP_VLAN_MTU; 496 497 /* 498 * Enable transmit checksum offload only on reliable hardware. 499 * At least Zynq-7000 appears to generate bad UDP header checksum if 500 * the checksum field has not been initialized to zero and 501 * UDP payload size is less than three octets. 502 */ 503 if (0) { 504 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | 505 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | 506 IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 507 } 508 509 printf(": rev 0x%x, address %s\n", rev, 510 ether_sprintf(sc->sc_ac.ac_enaddr)); 511 512 sc->sc_mii.mii_ifp = ifp; 513 sc->sc_mii.mii_readreg = cad_mii_readreg; 514 sc->sc_mii.mii_writereg = cad_mii_writereg; 515 sc->sc_mii.mii_statchg = cad_mii_statchg; 516 ifmedia_init(&sc->sc_media, 0, cad_media_change, cad_media_status); 517 518 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phy_loc, 519 MII_OFFSET_ANY, MIIF_NOISOLATE); 520 521 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 522 printf("%s: no PHY found\n", sc->sc_dev.dv_xname); 523 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 524 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL); 525 } else { 526 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 527 } 528 529 if_attach(ifp); 530 ether_ifattach(ifp); 531 532 #if NKSTAT > 0 533 cad_kstat_attach(sc); 534 #endif 535 536 #ifdef DDB 537 if (sc->sc_dev.dv_unit < nitems(cad_sc)) 538 cad_sc[sc->sc_dev.dv_unit] = sc; 539 #endif 540 541 return; 542 543 fail: 544 if (sc->sc_ioh != 0) 545 bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size); 546 } 547 548 int 549 cad_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 550 { 551 struct cad_softc *sc = ifp->if_softc; 552 struct ifreq *ifr = (struct ifreq *)data; 553 int error = 0, netlock_held = 1; 554 int s; 555 556 switch (cmd) { 557 case SIOCGIFMEDIA: 558 case SIOCSIFMEDIA: 559 case SIOCGIFSFFPAGE: 560 netlock_held = 0; 561 break; 562 } 563 564 if (netlock_held) 565 NET_UNLOCK(); 566 rw_enter_write(&sc->sc_cfg_lock); 567 if (netlock_held) 568 NET_LOCK(); 569 s = splnet(); 570 571 switch (cmd) { 572 case SIOCSIFADDR: 573 ifp->if_flags |= IFF_UP; 574 /* FALLTHROUGH */ 575 576 case SIOCSIFFLAGS: 577 if (ISSET(ifp->if_flags, IFF_UP)) { 578 if (ISSET(ifp->if_flags, IFF_RUNNING)) 579 error = ENETRESET; 580 else 581 error = cad_up(sc); 582 } else { 583 if (ISSET(ifp->if_flags, IFF_RUNNING)) 584 cad_down(sc); 585 } 586 break; 587 588 case SIOCGIFMEDIA: 589 case SIOCSIFMEDIA: 590 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 591 break; 592 593 case SIOCGIFRXR: 594 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 595 NULL, MCLBYTES, &sc->sc_rx_ring); 596 break; 597 598 default: 599 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 600 break; 601 } 602 603 if (error == ENETRESET) { 604 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 605 (IFF_UP | IFF_RUNNING)) 606 cad_iff(sc); 607 error = 0; 608 } 609 610 splx(s); 611 rw_exit_write(&sc->sc_cfg_lock); 612 613 return error; 614 } 615 616 void 617 cad_reset(struct cad_softc *sc) 618 { 619 static const unsigned int mdcclk_divs[] = { 620 8, 16, 32, 48, 64, 96, 128, 224 621 }; 622 unsigned int freq, i; 623 uint32_t div, netcfg; 624 625 rw_assert_wrlock(&sc->sc_cfg_lock); 626 627 HWRITE4(sc, GEM_NETCTL, 0); 628 HWRITE4(sc, GEM_IDR, ~0U); 629 HWRITE4(sc, GEM_RXSR, 0); 630 HWRITE4(sc, GEM_TXSR, 0); 631 if (sc->sc_dma64) { 632 HWRITE4(sc, GEM_RXQBASEHI, 0); 633 HWRITE4(sc, GEM_TXQBASEHI, 0); 634 } 635 HWRITE4(sc, GEM_RXQBASE, 0); 636 HWRITE4(sc, GEM_TXQBASE, 0); 637 638 for (i = 1; i < GEM_MAX_PRIQ; i++) { 639 if (sc->sc_qmask & (1U << i)) { 640 if (i < 8) 641 HWRITE4(sc, GEM_RXQ1BASE(i - 1), 0); 642 else 643 HWRITE4(sc, GEM_RXQ8BASE(i - 8), 0); 644 HWRITE4(sc, GEM_TXQ1BASE(i - 1), 0); 645 } 646 } 647 648 /* Disable all screeners so that Rx goes through queue 0. */ 649 for (i = 0; i < sc->sc_ntype1scr; i++) 650 HWRITE4(sc, GEM_SCR_TYPE1(i), 0); 651 for (i = 0; i < sc->sc_ntype2scr; i++) 652 HWRITE4(sc, GEM_SCR_TYPE2(i), 0); 653 654 /* MDIO clock rate must not exceed 2.5 MHz. */ 655 freq = clock_get_frequency(sc->sc_node, "pclk"); 656 for (div = 0; div < nitems(mdcclk_divs) - 1; div++) { 657 if (freq / mdcclk_divs[div] <= 2500000) 658 break; 659 } 660 KASSERT(div < nitems(mdcclk_divs)); 661 662 netcfg = HREAD4(sc, GEM_NETCFG); 663 netcfg &= ~GEM_NETCFG_MDCCLKDIV_MASK; 664 netcfg |= div << GEM_NETCFG_MDCCLKDIV_SHIFT; 665 HWRITE4(sc, GEM_NETCFG, netcfg); 666 667 /* Enable MDIO bus. */ 668 sc->sc_netctl = GEM_NETCTL_MDEN; 669 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 670 } 671 672 int 673 cad_up(struct cad_softc *sc) 674 { 675 struct ifnet *ifp = &sc->sc_ac.ac_if; 676 struct cad_buf *rxb, *txb; 677 struct cad_desc32 *desc32; 678 struct cad_desc64 *desc64; 679 uint64_t addr; 680 int flags = BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW; 681 unsigned int i, nrxd, ntxd; 682 uint32_t val; 683 684 rw_assert_wrlock(&sc->sc_cfg_lock); 685 686 /* Release lock for memory allocation. */ 687 NET_UNLOCK(); 688 689 if (sc->sc_dma64) 690 flags |= BUS_DMA_64BIT; 691 692 ntxd = CAD_NTXDESC; 693 nrxd = CAD_NRXDESC; 694 695 /* 696 * Allocate a dummy descriptor for unused priority queues. 697 * This is necessary with GEM revisions that have no option 698 * to disable queues. 699 */ 700 if (sc->sc_qmask & ~1U) { 701 ntxd++; 702 nrxd++; 703 } 704 705 /* 706 * Set up Tx descriptor ring. 707 */ 708 709 sc->sc_txring = cad_dmamem_alloc(sc, 710 ntxd * sc->sc_descsize, sc->sc_descsize); 711 sc->sc_txdesc = sc->sc_txring->cdm_kva; 712 713 desc32 = (struct cad_desc32 *)sc->sc_txdesc; 714 desc64 = (struct cad_desc64 *)sc->sc_txdesc; 715 716 sc->sc_txbuf = malloc(sizeof(*sc->sc_txbuf) * CAD_NTXDESC, 717 M_DEVBUF, M_WAITOK); 718 for (i = 0; i < CAD_NTXDESC; i++) { 719 txb = &sc->sc_txbuf[i]; 720 bus_dmamap_create(sc->sc_dmat, MCLBYTES, CAD_NTXSEGS, 721 MCLBYTES, 0, flags, &txb->bf_map); 722 txb->bf_m = NULL; 723 724 if (sc->sc_dma64) { 725 desc64[i].d_addrhi = 0; 726 desc64[i].d_addrlo = 0; 727 desc64[i].d_status = GEM_TXD_USED; 728 if (i == CAD_NTXDESC - 1) 729 desc64[i].d_status |= GEM_TXD_WRAP; 730 } else { 731 desc32[i].d_addr = 0; 732 desc32[i].d_status = GEM_TXD_USED; 733 if (i == CAD_NTXDESC - 1) 734 desc32[i].d_status |= GEM_TXD_WRAP; 735 } 736 } 737 738 /* The remaining descriptors are dummies. */ 739 for (; i < ntxd; i++) { 740 if (sc->sc_dma64) { 741 desc64[i].d_addrhi = 0; 742 desc64[i].d_addrlo = 0; 743 desc64[i].d_status = GEM_TXD_USED | GEM_TXD_WRAP; 744 } else { 745 desc32[i].d_addr = 0; 746 desc32[i].d_status = GEM_TXD_USED | GEM_TXD_WRAP; 747 } 748 } 749 750 sc->sc_tx_prod = 0; 751 sc->sc_tx_cons = 0; 752 753 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 754 0, sc->sc_txring->cdm_size, 755 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 756 757 addr = sc->sc_txring->cdm_map->dm_segs[0].ds_addr; 758 if (sc->sc_dma64) 759 HWRITE4(sc, GEM_TXQBASEHI, addr >> 32); 760 HWRITE4(sc, GEM_TXQBASE, addr); 761 762 /* Initialize unused queues. Disable them if possible. */ 763 addr += CAD_NTXDESC * sc->sc_descsize; 764 for (i = 1; i < GEM_MAX_PRIQ; i++) { 765 if (sc->sc_qmask & (1U << i)) { 766 HWRITE4(sc, GEM_TXQ1BASE(i - 1), 767 addr | GEM_TXQ1BASE_DISABLE); 768 } 769 } 770 771 /* 772 * Set up Rx descriptor ring. 773 */ 774 775 sc->sc_rxring = cad_dmamem_alloc(sc, 776 nrxd * sc->sc_descsize, sc->sc_descsize); 777 sc->sc_rxdesc = sc->sc_rxring->cdm_kva; 778 779 desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 780 desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 781 782 sc->sc_rxbuf = malloc(sizeof(struct cad_buf) * CAD_NRXDESC, 783 M_DEVBUF, M_WAITOK); 784 for (i = 0; i < CAD_NRXDESC; i++) { 785 rxb = &sc->sc_rxbuf[i]; 786 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 787 MCLBYTES, 0, flags, &rxb->bf_map); 788 rxb->bf_m = NULL; 789 790 /* Mark all descriptors as used so that driver owns them. */ 791 if (sc->sc_dma64) { 792 desc64[i].d_addrhi = 0; 793 desc64[i].d_addrlo = GEM_RXD_ADDR_USED; 794 if (i == CAD_NRXDESC - 1) 795 desc64[i].d_addrlo |= GEM_RXD_ADDR_WRAP; 796 } else { 797 desc32[i].d_addr = GEM_RXD_ADDR_USED; 798 if (i == CAD_NRXDESC - 1) 799 desc32[i].d_addr |= GEM_RXD_ADDR_WRAP; 800 } 801 } 802 803 /* The remaining descriptors are dummies. */ 804 for (; i < nrxd; i++) { 805 if (sc->sc_dma64) { 806 desc64[i].d_addrhi = 0; 807 desc64[i].d_addrlo = 808 GEM_RXD_ADDR_USED | GEM_RXD_ADDR_WRAP; 809 } else { 810 desc32[i].d_addr = 811 GEM_RXD_ADDR_USED | GEM_RXD_ADDR_WRAP; 812 } 813 } 814 815 if_rxr_init(&sc->sc_rx_ring, 2, CAD_NRXDESC); 816 817 sc->sc_rx_prod = 0; 818 sc->sc_rx_cons = 0; 819 cad_rxfill(sc); 820 821 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 822 0, sc->sc_rxring->cdm_size, 823 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 824 825 addr = sc->sc_rxring->cdm_map->dm_segs[0].ds_addr; 826 if (sc->sc_dma64) 827 HWRITE4(sc, GEM_RXQBASEHI, addr >> 32); 828 HWRITE4(sc, GEM_RXQBASE, addr); 829 830 /* Initialize unused queues. Disable them if possible. */ 831 addr += sc->sc_descsize * CAD_NRXDESC; 832 for (i = 1; i < GEM_MAX_PRIQ; i++) { 833 if (sc->sc_qmask & (1U << i)) { 834 if (i < 8) { 835 HWRITE4(sc, GEM_RXQ1BASE(i - 1), 836 addr | GEM_RXQ1BASE_DISABLE); 837 } else { 838 HWRITE4(sc, GEM_RXQ8BASE(i - 8), 839 addr | GEM_RXQ8BASE_DISABLE); 840 } 841 } 842 } 843 844 NET_LOCK(); 845 846 /* 847 * Set MAC address filters. 848 */ 849 850 HWRITE4(sc, GEM_LADDRL(0), sc->sc_ac.ac_enaddr[0] | 851 ((uint32_t)sc->sc_ac.ac_enaddr[1] << 8) | 852 ((uint32_t)sc->sc_ac.ac_enaddr[2] << 16) | 853 ((uint32_t)sc->sc_ac.ac_enaddr[3] << 24)); 854 HWRITE4(sc, GEM_LADDRH(0), sc->sc_ac.ac_enaddr[4] | 855 ((uint32_t)sc->sc_ac.ac_enaddr[5] << 8)); 856 857 for (i = 1; i < GEM_LADDRNUM; i++) { 858 HWRITE4(sc, GEM_LADDRL(i), 0); 859 HWRITE4(sc, GEM_LADDRH(i), 0); 860 } 861 862 cad_iff(sc); 863 864 clock_set_frequency(sc->sc_node, GEM_CLK_TX, 2500000); 865 clock_enable(sc->sc_node, GEM_CLK_TX); 866 delay(1000); 867 868 val = HREAD4(sc, GEM_NETCFG); 869 870 val |= GEM_NETCFG_FCSREM | GEM_NETCFG_RXCSUMEN | GEM_NETCFG_1000 | 871 GEM_NETCFG_100 | GEM_NETCFG_FDEN | GEM_NETCFG_1536RXEN; 872 val &= ~GEM_NETCFG_RXOFFS_MASK; 873 val |= ETHER_ALIGN << GEM_NETCFG_RXOFFS_SHIFT; 874 val &= ~GEM_NETCFG_BCASTDI; 875 876 if (sc->sc_phy_mode == CAD_PHY_MODE_SGMII) 877 val |= GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL; 878 else 879 val &= ~(GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL); 880 881 HWRITE4(sc, GEM_NETCFG, val); 882 883 val = HREAD4(sc, GEM_DMACR); 884 885 if (sc->sc_dma64) 886 val |= GEM_DMACR_DMA64; 887 else 888 val &= ~GEM_DMACR_DMA64; 889 /* Use CPU's native byte order with descriptor words. */ 890 #if BYTE_ORDER == BIG_ENDIAN 891 val |= GEM_DMACR_ES_DESCR; 892 #else 893 val &= ~GEM_DMACR_ES_DESCR; 894 #endif 895 val &= ~GEM_DMACR_ES_PDATA; 896 val |= GEM_DMACR_AHBDISC | GEM_DMACR_TXSIZE; 897 val &= ~GEM_DMACR_RXSIZE_MASK; 898 val |= GEM_DMACR_RXSIZE_8K; 899 val &= ~GEM_DMACR_RXBUF_MASK; 900 val |= (MCLBYTES / 64) << GEM_DMACR_RXBUF_SHIFT; 901 val &= ~GEM_DMACR_BLEN_MASK; 902 val |= GEM_DMACR_BLEN_16; 903 904 if (ifp->if_capabilities & IFCAP_CSUM_IPv4) 905 val |= GEM_DMACR_TXCSUMEN; 906 907 HWRITE4(sc, GEM_DMACR, val); 908 909 /* Clear statistics. */ 910 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STATCLR); 911 912 /* Enable Rx and Tx. */ 913 sc->sc_netctl |= GEM_NETCTL_RXEN | GEM_NETCTL_TXEN; 914 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 915 916 /* Enable interrupts. */ 917 HWRITE4(sc, GEM_IER, GEM_IXR_HRESP | GEM_IXR_RXOVR | GEM_IXR_RXDONE | 918 GEM_IXR_TXDONE); 919 920 if (sc->sc_rxhang_erratum) 921 HWRITE4(sc, GEM_IER, GEM_IXR_RXUSED); 922 923 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) 924 mii_mediachg(&sc->sc_mii); 925 926 ifp->if_flags |= IFF_RUNNING; 927 ifq_clr_oactive(&ifp->if_snd); 928 929 timeout_add_sec(&sc->sc_tick, 1); 930 931 return 0; 932 } 933 934 void 935 cad_down(struct cad_softc *sc) 936 { 937 struct ifnet *ifp = &sc->sc_ac.ac_if; 938 struct cad_buf *rxb, *txb; 939 unsigned int i, timeout; 940 941 rw_assert_wrlock(&sc->sc_cfg_lock); 942 943 ifp->if_flags &= ~IFF_RUNNING; 944 945 ifq_clr_oactive(&ifp->if_snd); 946 ifp->if_timer = 0; 947 948 /* Avoid lock order issues with barriers. */ 949 NET_UNLOCK(); 950 951 timeout_del_barrier(&sc->sc_tick); 952 953 /* Disable data transfer. */ 954 sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN); 955 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 956 957 /* Disable all interrupts. */ 958 HWRITE4(sc, GEM_IDR, ~0U); 959 960 /* Wait for transmitter to become idle. */ 961 for (timeout = 1000; timeout > 0; timeout--) { 962 if ((HREAD4(sc, GEM_TXSR) & GEM_TXSR_TXGO) == 0) 963 break; 964 delay(10); 965 } 966 if (timeout == 0) 967 printf("%s: transmitter not idle\n", sc->sc_dev.dv_xname); 968 969 mii_down(&sc->sc_mii); 970 971 /* Wait for activity to cease. */ 972 intr_barrier(sc->sc_ih); 973 ifq_barrier(&ifp->if_snd); 974 taskq_del_barrier(systq, &sc->sc_statchg_task); 975 976 /* Disable the packet clock as it is not needed any longer. */ 977 clock_disable(sc->sc_node, GEM_CLK_TX); 978 979 cad_reset(sc); 980 981 /* 982 * Tear down the Tx descriptor ring. 983 */ 984 985 for (i = 0; i < CAD_NTXDESC; i++) { 986 txb = &sc->sc_txbuf[i]; 987 if (txb->bf_m != NULL) { 988 bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0, 989 txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 990 bus_dmamap_unload(sc->sc_dmat, txb->bf_map); 991 m_freem(txb->bf_m); 992 } 993 bus_dmamap_destroy(sc->sc_dmat, txb->bf_map); 994 } 995 free(sc->sc_txbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NTXDESC); 996 sc->sc_txbuf = NULL; 997 998 cad_dmamem_free(sc, sc->sc_txring); 999 sc->sc_txring = NULL; 1000 sc->sc_txdesc = NULL; 1001 1002 /* 1003 * Tear down the Rx descriptor ring. 1004 */ 1005 1006 for (i = 0; i < CAD_NRXDESC; i++) { 1007 rxb = &sc->sc_rxbuf[i]; 1008 if (rxb->bf_m != NULL) { 1009 bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, 0, 1010 rxb->bf_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1011 bus_dmamap_unload(sc->sc_dmat, rxb->bf_map); 1012 m_freem(rxb->bf_m); 1013 } 1014 bus_dmamap_destroy(sc->sc_dmat, rxb->bf_map); 1015 } 1016 free(sc->sc_rxbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NRXDESC); 1017 sc->sc_rxbuf = NULL; 1018 1019 cad_dmamem_free(sc, sc->sc_rxring); 1020 sc->sc_rxring = NULL; 1021 sc->sc_rxdesc = NULL; 1022 1023 NET_LOCK(); 1024 } 1025 1026 uint8_t 1027 cad_hash_mac(const uint8_t *eaddr) 1028 { 1029 uint64_t val = 0; 1030 int i; 1031 uint8_t hash = 0; 1032 1033 for (i = ETHER_ADDR_LEN - 1; i >= 0; i--) 1034 val = (val << 8) | eaddr[i]; 1035 1036 for (i = 0; i < 8; i++) { 1037 hash ^= val; 1038 val >>= 6; 1039 } 1040 1041 return hash & 0x3f; 1042 } 1043 1044 void 1045 cad_iff(struct cad_softc *sc) 1046 { 1047 struct arpcom *ac = &sc->sc_ac; 1048 struct ifnet *ifp = &sc->sc_ac.ac_if; 1049 struct ether_multi *enm; 1050 struct ether_multistep step; 1051 uint64_t hash; 1052 uint32_t netcfg; 1053 1054 rw_assert_wrlock(&sc->sc_cfg_lock); 1055 1056 netcfg = HREAD4(sc, GEM_NETCFG); 1057 netcfg &= ~GEM_NETCFG_UCASTHASHEN; 1058 1059 ifp->if_flags &= ~IFF_ALLMULTI; 1060 1061 if (ifp->if_flags & IFF_PROMISC) { 1062 netcfg |= GEM_NETCFG_COPYALL; 1063 netcfg &= ~GEM_NETCFG_MCASTHASHEN; 1064 } else { 1065 netcfg &= ~GEM_NETCFG_COPYALL; 1066 netcfg |= GEM_NETCFG_MCASTHASHEN; 1067 1068 if (ac->ac_multirangecnt > 0) 1069 ifp->if_flags |= IFF_ALLMULTI; 1070 1071 if (ifp->if_flags & IFF_ALLMULTI) { 1072 hash = ~0ULL; 1073 } else { 1074 hash = 0; 1075 ETHER_FIRST_MULTI(step, ac, enm); 1076 while (enm != NULL) { 1077 hash |= 1ULL << cad_hash_mac(enm->enm_addrlo); 1078 ETHER_NEXT_MULTI(step, enm); 1079 } 1080 } 1081 1082 HWRITE4(sc, GEM_HASHL, hash); 1083 HWRITE4(sc, GEM_HASHH, hash >> 32); 1084 } 1085 1086 HWRITE4(sc, GEM_NETCFG, netcfg); 1087 } 1088 1089 void 1090 cad_start(struct ifqueue *ifq) 1091 { 1092 struct ifnet *ifp = ifq->ifq_if; 1093 struct cad_softc *sc = ifp->if_softc; 1094 struct mbuf *m; 1095 unsigned int free, head, used; 1096 1097 free = sc->sc_tx_cons; 1098 head = sc->sc_tx_prod; 1099 if (free <= head) 1100 free += CAD_NTXDESC; 1101 free -= head; 1102 1103 for (;;) { 1104 if (free <= CAD_NTXSEGS) { 1105 ifq_set_oactive(ifq); 1106 break; 1107 } 1108 1109 m = ifq_dequeue(ifq); 1110 if (m == NULL) 1111 break; 1112 1113 used = cad_encap(sc, m); 1114 if (used == 0) { 1115 m_freem(m); 1116 continue; 1117 } 1118 1119 #if NBPFILTER > 0 1120 if (ifp->if_bpf != NULL) 1121 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1122 #endif 1123 1124 ifp->if_timer = 5; 1125 1126 KASSERT(free >= used); 1127 free -= used; 1128 } 1129 1130 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX); 1131 } 1132 1133 void 1134 cad_watchdog(struct ifnet *ifp) 1135 { 1136 struct cad_softc *sc = ifp->if_softc; 1137 1138 ifp->if_timer = 0; 1139 1140 if ((ifp->if_flags & IFF_RUNNING) == 0) 1141 return; 1142 1143 if (sc->sc_tx_cons == sc->sc_tx_prod) 1144 return; 1145 1146 /* XXX */ 1147 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX); 1148 } 1149 1150 unsigned int 1151 cad_encap(struct cad_softc *sc, struct mbuf *m) 1152 { 1153 bus_dmamap_t map; 1154 struct cad_buf *txb; 1155 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_txdesc; 1156 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_txdesc; 1157 unsigned int head, idx, nsegs; 1158 uint32_t status; 1159 int i; 1160 1161 head = sc->sc_tx_prod; 1162 1163 txb = &sc->sc_txbuf[head]; 1164 map = txb->bf_map; 1165 1166 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) { 1167 case 0: 1168 break; 1169 case EFBIG: 1170 if (m_defrag(m, M_DONTWAIT) != 0) 1171 return 0; 1172 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1173 BUS_DMA_NOWAIT) != 0) 1174 return 0; 1175 break; 1176 default: 1177 return 0; 1178 } 1179 1180 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1181 BUS_DMASYNC_PREWRITE); 1182 1183 nsegs = map->dm_nsegs; 1184 KASSERT(nsegs > 0); 1185 1186 txb->bf_m = m; 1187 1188 /* 1189 * Fill descriptors in reverse order so that all the descriptors 1190 * are ready when the first descriptor's GEM_TXD_USED bit is cleared. 1191 */ 1192 for (i = nsegs - 1; i >= 0; i--) { 1193 idx = (head + i) % CAD_NTXDESC; 1194 1195 status = map->dm_segs[i].ds_len & GEM_TXD_LEN_MASK; 1196 if (i == nsegs - 1) 1197 status |= GEM_TXD_LAST; 1198 if (idx == CAD_NTXDESC - 1) 1199 status |= GEM_TXD_WRAP; 1200 1201 if (sc->sc_dma64) { 1202 uint64_t addr = map->dm_segs[i].ds_addr; 1203 1204 desc64[idx].d_addrlo = addr; 1205 desc64[idx].d_addrhi = addr >> 32; 1206 } else { 1207 desc32[idx].d_addr = map->dm_segs[i].ds_addr; 1208 } 1209 1210 /* Make d_addr visible before GEM_TXD_USED is cleared 1211 * in d_status. */ 1212 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1213 idx * sc->sc_descsize, sc->sc_descsize, 1214 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1215 1216 if (sc->sc_dma64) 1217 desc64[idx].d_status = status; 1218 else 1219 desc32[idx].d_status = status; 1220 1221 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1222 idx * sc->sc_descsize, sc->sc_descsize, 1223 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1224 } 1225 1226 sc->sc_tx_prod = (head + nsegs) % CAD_NTXDESC; 1227 1228 return nsegs; 1229 } 1230 1231 int 1232 cad_intr(void *arg) 1233 { 1234 struct cad_softc *sc = arg; 1235 struct ifnet *ifp = &sc->sc_ac.ac_if; 1236 uint32_t isr; 1237 1238 isr = HREAD4(sc, GEM_ISR); 1239 HWRITE4(sc, GEM_ISR, isr); 1240 1241 if (isr & GEM_IXR_RXDONE) 1242 cad_rxeof(sc); 1243 if (isr & GEM_IXR_TXDONE) 1244 cad_txeof(sc); 1245 1246 if (isr & GEM_IXR_RXOVR) 1247 ifp->if_ierrors++; 1248 1249 if (sc->sc_rxhang_erratum && (isr & GEM_IXR_RXUSED)) { 1250 /* 1251 * Try to flush a packet from the Rx SRAM to avoid triggering 1252 * the Rx hang. 1253 */ 1254 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_DPRAM); 1255 cad_rxfill(sc); 1256 } 1257 1258 /* If there has been a DMA error, stop the interface to limit damage. */ 1259 if (isr & GEM_IXR_HRESP) { 1260 sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN); 1261 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 1262 HWRITE4(sc, GEM_IDR, ~0U); 1263 1264 printf("%s: hresp error, interface stopped\n", 1265 sc->sc_dev.dv_xname); 1266 } 1267 1268 return 1; 1269 } 1270 1271 void 1272 cad_rxeof(struct cad_softc *sc) 1273 { 1274 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1275 struct ifnet *ifp = &sc->sc_ac.ac_if; 1276 struct mbuf *m; 1277 struct cad_buf *rxb; 1278 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 1279 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 1280 size_t len; 1281 unsigned int idx; 1282 uint32_t addr, status; 1283 1284 idx = sc->sc_rx_cons; 1285 1286 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) { 1287 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1288 idx * sc->sc_descsize, sc->sc_descsize, 1289 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1290 1291 if (sc->sc_dma64) 1292 addr = desc64[idx].d_addrlo; 1293 else 1294 addr = desc32[idx].d_addr; 1295 if ((addr & GEM_RXD_ADDR_USED) == 0) 1296 break; 1297 1298 /* Prevent premature read of d_status. */ 1299 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1300 idx * sc->sc_descsize, sc->sc_descsize, 1301 BUS_DMASYNC_POSTREAD); 1302 1303 if (sc->sc_dma64) 1304 status = desc64[idx].d_status; 1305 else 1306 status = desc32[idx].d_status; 1307 len = status & GEM_RXD_LEN_MASK; 1308 1309 rxb = &sc->sc_rxbuf[idx]; 1310 1311 bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, ETHER_ALIGN, len, 1312 BUS_DMASYNC_POSTREAD); 1313 bus_dmamap_unload(sc->sc_dmat, rxb->bf_map); 1314 1315 m = rxb->bf_m; 1316 rxb->bf_m = NULL; 1317 KASSERT(m != NULL); 1318 1319 if_rxr_put(&sc->sc_rx_ring, 1); 1320 idx = (idx + 1) % CAD_NRXDESC; 1321 1322 if ((status & (GEM_RXD_SOF | GEM_RXD_EOF)) != 1323 (GEM_RXD_SOF | GEM_RXD_EOF)) { 1324 m_freem(m); 1325 ifp->if_ierrors++; 1326 continue; 1327 } 1328 1329 m_adj(m, ETHER_ALIGN); 1330 m->m_len = m->m_pkthdr.len = len; 1331 1332 m->m_pkthdr.csum_flags = 0; 1333 switch (status & GEM_RXD_CSUM_MASK) { 1334 case GEM_RXD_CSUM_IP_OK: 1335 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 1336 break; 1337 case GEM_RXD_CSUM_TCP_OK: 1338 case GEM_RXD_CSUM_UDP_OK: 1339 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK | 1340 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1341 break; 1342 } 1343 1344 ml_enqueue(&ml, m); 1345 1346 sc->sc_rxdone = 1; 1347 } 1348 1349 sc->sc_rx_cons = idx; 1350 1351 cad_rxfill(sc); 1352 1353 if (ifiq_input(&ifp->if_rcv, &ml)) 1354 if_rxr_livelocked(&sc->sc_rx_ring); 1355 } 1356 1357 void 1358 cad_rxfill(struct cad_softc *sc) 1359 { 1360 struct cad_buf *rxb; 1361 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 1362 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 1363 uint64_t addr; 1364 unsigned int idx; 1365 u_int slots; 1366 1367 idx = sc->sc_rx_prod; 1368 1369 for (slots = if_rxr_get(&sc->sc_rx_ring, CAD_NRXDESC); 1370 slots > 0; slots--) { 1371 rxb = &sc->sc_rxbuf[idx]; 1372 rxb->bf_m = cad_alloc_mbuf(sc, rxb->bf_map); 1373 if (rxb->bf_m == NULL) 1374 break; 1375 1376 addr = rxb->bf_map->dm_segs[0].ds_addr; 1377 KASSERT((addr & (GEM_RXD_ADDR_WRAP | GEM_RXD_ADDR_USED)) == 0); 1378 if (idx == CAD_NRXDESC - 1) 1379 addr |= GEM_RXD_ADDR_WRAP; 1380 1381 if (sc->sc_dma64) { 1382 desc64[idx].d_addrhi = addr >> 32; 1383 desc64[idx].d_status = 0; 1384 } else { 1385 desc32[idx].d_status = 0; 1386 } 1387 1388 /* Make d_addrhi and d_status visible before clearing 1389 * GEM_RXD_ADDR_USED in d_addr or d_addrlo. */ 1390 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1391 idx * sc->sc_descsize, sc->sc_descsize, 1392 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1393 1394 if (sc->sc_dma64) 1395 desc64[idx].d_addrlo = addr; 1396 else 1397 desc32[idx].d_addr = addr; 1398 1399 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1400 idx * sc->sc_descsize, sc->sc_descsize, 1401 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1402 1403 idx = (idx + 1) % CAD_NRXDESC; 1404 } 1405 if_rxr_put(&sc->sc_rx_ring, slots); 1406 1407 sc->sc_rx_prod = idx; 1408 } 1409 1410 void 1411 cad_txeof(struct cad_softc *sc) 1412 { 1413 struct ifnet *ifp = &sc->sc_ac.ac_if; 1414 struct cad_buf *txb; 1415 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_txdesc; 1416 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_txdesc; 1417 unsigned int free = 0; 1418 unsigned int idx, nsegs; 1419 uint32_t status; 1420 1421 idx = sc->sc_tx_cons; 1422 1423 while (idx != sc->sc_tx_prod) { 1424 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1425 idx * sc->sc_descsize, sc->sc_descsize, 1426 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1427 1428 if (sc->sc_dma64) 1429 status = desc64[idx].d_status; 1430 else 1431 status = desc32[idx].d_status; 1432 if ((status & GEM_TXD_USED) == 0) 1433 break; 1434 1435 if (status & (GEM_TXD_RLIMIT | GEM_TXD_CORRUPT | 1436 GEM_TXD_LCOLL | GEM_TXD_CSUMERR_MASK)) 1437 ifp->if_oerrors++; 1438 1439 txb = &sc->sc_txbuf[idx]; 1440 nsegs = txb->bf_map->dm_nsegs; 1441 KASSERT(nsegs > 0); 1442 1443 bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0, 1444 txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1445 bus_dmamap_unload(sc->sc_dmat, txb->bf_map); 1446 1447 m_freem(txb->bf_m); 1448 txb->bf_m = NULL; 1449 1450 for (;;) { 1451 idx = (idx + 1) % CAD_NTXDESC; 1452 1453 nsegs--; 1454 if (nsegs == 0) 1455 break; 1456 1457 /* 1458 * The controller marks only the initial segment used. 1459 * Mark the remaining segments used manually, so that 1460 * the controller will not accidentally use them later. 1461 * 1462 * This could be done lazily on the Tx ring producer 1463 * side by ensuring that the subsequent descriptor 1464 * after the actual segments is marked used. 1465 * However, this would make the ring trickier to debug. 1466 */ 1467 1468 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1469 idx * sc->sc_descsize, sc->sc_descsize, 1470 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1471 1472 if (sc->sc_dma64) 1473 desc64[idx].d_status |= GEM_TXD_USED; 1474 else 1475 desc32[idx].d_status |= GEM_TXD_USED; 1476 1477 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1478 idx * sc->sc_descsize, sc->sc_descsize, 1479 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1480 } 1481 1482 free++; 1483 } 1484 1485 if (free == 0) 1486 return; 1487 1488 sc->sc_tx_cons = idx; 1489 1490 if (ifq_is_oactive(&ifp->if_snd)) 1491 ifq_restart(&ifp->if_snd); 1492 } 1493 1494 void 1495 cad_tick(void *arg) 1496 { 1497 struct cad_softc *sc = arg; 1498 struct ifnet *ifp = &sc->sc_ac.ac_if; 1499 int s; 1500 1501 if ((ifp->if_flags & IFF_RUNNING) == 0) 1502 return; 1503 1504 s = splnet(); 1505 1506 mii_tick(&sc->sc_mii); 1507 1508 /* 1509 * If there has been no Rx for a moment, Rx DMA might be stuck. 1510 * Try to recover by restarting the receiver. 1511 */ 1512 if (sc->sc_rxhang_erratum && !sc->sc_rxdone) { 1513 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl & ~GEM_NETCTL_RXEN); 1514 (void)HREAD4(sc, GEM_NETCTL); 1515 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 1516 } 1517 sc->sc_rxdone = 0; 1518 1519 splx(s); 1520 1521 timeout_add_sec(&sc->sc_tick, 1); 1522 } 1523 1524 int 1525 cad_media_change(struct ifnet *ifp) 1526 { 1527 struct cad_softc *sc = ifp->if_softc; 1528 1529 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) 1530 mii_mediachg(&sc->sc_mii); 1531 1532 return 0; 1533 } 1534 1535 void 1536 cad_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1537 { 1538 struct cad_softc *sc = ifp->if_softc; 1539 1540 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1541 mii_pollstat(&sc->sc_mii); 1542 imr->ifm_active = sc->sc_mii.mii_media_active; 1543 imr->ifm_status = sc->sc_mii.mii_media_status; 1544 } 1545 } 1546 1547 int 1548 cad_mii_wait(struct cad_softc *sc) 1549 { 1550 int timeout; 1551 1552 for (timeout = 10000; timeout > 0; timeout--) { 1553 if (HREAD4(sc, GEM_NETSR) & GEM_NETSR_PHY_MGMT_IDLE) 1554 break; 1555 delay(10); 1556 } 1557 if (timeout == 0) 1558 return ETIMEDOUT; 1559 return 0; 1560 } 1561 1562 void 1563 cad_mii_oper(struct cad_softc *sc, int phy_no, int reg, uint32_t oper) 1564 { 1565 oper |= (phy_no << GEM_PHYMNTNC_ADDR_SHIFT) & GEM_PHYMNTNC_ADDR_MASK; 1566 oper |= (reg << GEM_PHYMNTNC_REG_SHIFT) & GEM_PHYMNTNC_REG_MASK; 1567 oper |= GEM_PHYMNTNC_CLAUSE_22 | GEM_PHYMNTNC_MUST_10; 1568 1569 if (cad_mii_wait(sc) != 0) { 1570 printf("%s: MII bus idle timeout\n", sc->sc_dev.dv_xname); 1571 return; 1572 } 1573 1574 HWRITE4(sc, GEM_PHYMNTNC, oper); 1575 1576 if (cad_mii_wait(sc) != 0) { 1577 printf("%s: MII bus operation timeout\n", sc->sc_dev.dv_xname); 1578 return; 1579 } 1580 } 1581 1582 int 1583 cad_mii_readreg(struct device *self, int phy_no, int reg) 1584 { 1585 struct cad_softc *sc = (struct cad_softc *)self; 1586 int val; 1587 1588 cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_READ); 1589 1590 val = HREAD4(sc, GEM_PHYMNTNC) & GEM_PHYMNTNC_DATA_MASK; 1591 1592 /* The MAC does not handle 1000baseT in half duplex mode. */ 1593 if (reg == MII_EXTSR) 1594 val &= ~EXTSR_1000THDX; 1595 1596 return val; 1597 } 1598 1599 void 1600 cad_mii_writereg(struct device *self, int phy_no, int reg, int val) 1601 { 1602 struct cad_softc *sc = (struct cad_softc *)self; 1603 1604 cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_WRITE | 1605 (val & GEM_PHYMNTNC_DATA_MASK)); 1606 } 1607 1608 void 1609 cad_mii_statchg(struct device *self) 1610 { 1611 struct cad_softc *sc = (struct cad_softc *)self; 1612 uint32_t netcfg; 1613 1614 netcfg = HREAD4(sc, GEM_NETCFG); 1615 if (sc->sc_mii.mii_media_active & IFM_FDX) 1616 netcfg |= GEM_NETCFG_FDEN; 1617 else 1618 netcfg &= ~GEM_NETCFG_FDEN; 1619 1620 netcfg &= ~(GEM_NETCFG_100 | GEM_NETCFG_1000); 1621 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1622 default: 1623 sc->sc_tx_freq = 2500000; 1624 break; 1625 case IFM_100_TX: 1626 netcfg |= GEM_NETCFG_100; 1627 sc->sc_tx_freq = 25000000; 1628 break; 1629 case IFM_1000_T: 1630 netcfg |= GEM_NETCFG_100 | GEM_NETCFG_1000; 1631 sc->sc_tx_freq = 125000000; 1632 break; 1633 } 1634 1635 HWRITE4(sc, GEM_NETCFG, netcfg); 1636 1637 /* Defer clock setting because it allocates memory with M_WAITOK. */ 1638 task_add(systq, &sc->sc_statchg_task); 1639 } 1640 1641 void 1642 cad_statchg_task(void *arg) 1643 { 1644 struct cad_softc *sc = arg; 1645 1646 clock_set_frequency(sc->sc_node, GEM_CLK_TX, sc->sc_tx_freq); 1647 } 1648 1649 struct cad_dmamem * 1650 cad_dmamem_alloc(struct cad_softc *sc, bus_size_t size, bus_size_t align) 1651 { 1652 struct cad_dmamem *cdm; 1653 bus_size_t boundary = 0; 1654 int flags = BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW; 1655 int nsegs; 1656 1657 cdm = malloc(sizeof(*cdm), M_DEVBUF, M_WAITOK | M_ZERO); 1658 cdm->cdm_size = size; 1659 1660 if (sc->sc_dma64) { 1661 /* 1662 * The segment contains an actual ring and possibly 1663 * a dummy ring for unused priority queues. 1664 * The segment must not cross a 32-bit boundary so that 1665 * the rings have the same base address bits 63:32. 1666 */ 1667 boundary = 1ULL << 32; 1668 flags |= BUS_DMA_64BIT; 1669 } 1670 1671 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, boundary, 1672 flags, &cdm->cdm_map) != 0) 1673 goto cdmfree; 1674 if (bus_dmamem_alloc(sc->sc_dmat, size, align, boundary, 1675 &cdm->cdm_seg, 1, &nsegs, BUS_DMA_WAITOK) != 0) 1676 goto destroy; 1677 if (bus_dmamem_map(sc->sc_dmat, &cdm->cdm_seg, nsegs, size, 1678 &cdm->cdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0) 1679 goto free; 1680 if (bus_dmamap_load(sc->sc_dmat, cdm->cdm_map, cdm->cdm_kva, size, 1681 NULL, BUS_DMA_WAITOK) != 0) 1682 goto unmap; 1683 memset(cdm->cdm_kva, 0, size); 1684 return cdm; 1685 1686 unmap: 1687 bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, size); 1688 free: 1689 bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1); 1690 destroy: 1691 bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map); 1692 cdmfree: 1693 free(cdm, M_DEVBUF, sizeof(*cdm)); 1694 return NULL; 1695 } 1696 1697 void 1698 cad_dmamem_free(struct cad_softc *sc, struct cad_dmamem *cdm) 1699 { 1700 bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, cdm->cdm_size); 1701 bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1); 1702 bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map); 1703 free(cdm, M_DEVBUF, sizeof(*cdm)); 1704 } 1705 1706 struct mbuf * 1707 cad_alloc_mbuf(struct cad_softc *sc, bus_dmamap_t map) 1708 { 1709 struct mbuf *m; 1710 1711 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1712 if (m == NULL) 1713 return NULL; 1714 m->m_len = m->m_pkthdr.len = MCLBYTES; 1715 1716 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) { 1717 m_freem(m); 1718 return NULL; 1719 } 1720 1721 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1722 BUS_DMASYNC_PREREAD); 1723 1724 return m; 1725 } 1726 1727 #if NKSTAT > 0 1728 enum cad_stat { 1729 cad_stat_tx_toto, 1730 cad_stat_tx_totp, 1731 cad_stat_tx_bcast, 1732 cad_stat_tx_mcast, 1733 cad_stat_tx_pause, 1734 cad_stat_tx_h64, 1735 cad_stat_tx_h65, 1736 cad_stat_tx_h128, 1737 cad_stat_tx_h256, 1738 cad_stat_tx_h512, 1739 cad_stat_tx_h1024, 1740 cad_stat_tx_underrun, 1741 cad_stat_tx_scoll, 1742 cad_stat_tx_mcoll, 1743 cad_stat_tx_ecoll, 1744 cad_stat_tx_lcoll, 1745 cad_stat_tx_defer, 1746 cad_stat_tx_sense, 1747 cad_stat_rx_toto, 1748 cad_stat_rx_totp, 1749 cad_stat_rx_bcast, 1750 cad_stat_rx_mcast, 1751 cad_stat_rx_pause, 1752 cad_stat_rx_h64, 1753 cad_stat_rx_h65, 1754 cad_stat_rx_h128, 1755 cad_stat_rx_h256, 1756 cad_stat_rx_h512, 1757 cad_stat_rx_h1024, 1758 cad_stat_rx_undersz, 1759 cad_stat_rx_oversz, 1760 cad_stat_rx_jabber, 1761 cad_stat_rx_fcs, 1762 cad_stat_rx_symberr, 1763 cad_stat_rx_align, 1764 cad_stat_rx_reserr, 1765 cad_stat_rx_overrun, 1766 cad_stat_rx_ipcsum, 1767 cad_stat_rx_tcpcsum, 1768 cad_stat_rx_udpcsum, 1769 cad_stat_count 1770 }; 1771 1772 struct cad_counter { 1773 const char *c_name; 1774 enum kstat_kv_unit c_unit; 1775 uint32_t c_reg; 1776 }; 1777 1778 const struct cad_counter cad_counters[cad_stat_count] = { 1779 [cad_stat_tx_toto] = 1780 { "tx total", KSTAT_KV_U_BYTES, 0 }, 1781 [cad_stat_tx_totp] = 1782 { "tx total", KSTAT_KV_U_PACKETS, GEM_TXCNT }, 1783 [cad_stat_tx_bcast] = 1784 { "tx bcast", KSTAT_KV_U_PACKETS, GEM_TXBCCNT }, 1785 [cad_stat_tx_mcast] = 1786 { "tx mcast", KSTAT_KV_U_PACKETS, GEM_TXMCCNT }, 1787 [cad_stat_tx_pause] = 1788 { "tx pause", KSTAT_KV_U_PACKETS, GEM_TXPAUSECNT }, 1789 [cad_stat_tx_h64] = 1790 { "tx 64B", KSTAT_KV_U_PACKETS, GEM_TX64CNT }, 1791 [cad_stat_tx_h65] = 1792 { "tx 65-127B", KSTAT_KV_U_PACKETS, GEM_TX65CNT }, 1793 [cad_stat_tx_h128] = 1794 { "tx 128-255B", KSTAT_KV_U_PACKETS, GEM_TX128CNT }, 1795 [cad_stat_tx_h256] = 1796 { "tx 256-511B", KSTAT_KV_U_PACKETS, GEM_TX256CNT }, 1797 [cad_stat_tx_h512] = 1798 { "tx 512-1023B", KSTAT_KV_U_PACKETS, GEM_TX512CNT }, 1799 [cad_stat_tx_h1024] = 1800 { "tx 1024-1518B", KSTAT_KV_U_PACKETS, GEM_TX1024CNT }, 1801 [cad_stat_tx_underrun] = 1802 { "tx underrun", KSTAT_KV_U_PACKETS, GEM_TXURUNCNT }, 1803 [cad_stat_tx_scoll] = 1804 { "tx scoll", KSTAT_KV_U_PACKETS, GEM_SNGLCOLLCNT }, 1805 [cad_stat_tx_mcoll] = 1806 { "tx mcoll", KSTAT_KV_U_PACKETS, GEM_MULTICOLLCNT }, 1807 [cad_stat_tx_ecoll] = 1808 { "tx excess coll", KSTAT_KV_U_PACKETS, GEM_EXCESSCOLLCNT }, 1809 [cad_stat_tx_lcoll] = 1810 { "tx late coll", KSTAT_KV_U_PACKETS, GEM_LATECOLLCNT }, 1811 [cad_stat_tx_defer] = 1812 { "tx defer", KSTAT_KV_U_PACKETS, GEM_TXDEFERCNT }, 1813 [cad_stat_tx_sense] = 1814 { "tx csense", KSTAT_KV_U_PACKETS, GEM_TXCSENSECNT }, 1815 [cad_stat_rx_toto] = 1816 { "rx total", KSTAT_KV_U_BYTES, 0 }, 1817 [cad_stat_rx_totp] = 1818 { "rx total", KSTAT_KV_U_PACKETS, GEM_RXCNT }, 1819 [cad_stat_rx_bcast] = 1820 { "rx bcast", KSTAT_KV_U_PACKETS, GEM_RXBROADCNT }, 1821 [cad_stat_rx_mcast] = 1822 { "rx mcast", KSTAT_KV_U_PACKETS, GEM_RXMULTICNT }, 1823 [cad_stat_rx_pause] = 1824 { "rx pause", KSTAT_KV_U_PACKETS, GEM_RXPAUSECNT }, 1825 [cad_stat_rx_h64] = 1826 { "rx 64B", KSTAT_KV_U_PACKETS, GEM_RX64CNT }, 1827 [cad_stat_rx_h65] = 1828 { "rx 65-127B", KSTAT_KV_U_PACKETS, GEM_RX65CNT }, 1829 [cad_stat_rx_h128] = 1830 { "rx 128-255B", KSTAT_KV_U_PACKETS, GEM_RX128CNT }, 1831 [cad_stat_rx_h256] = 1832 { "rx 256-511B", KSTAT_KV_U_PACKETS, GEM_RX256CNT }, 1833 [cad_stat_rx_h512] = 1834 { "rx 512-1023B", KSTAT_KV_U_PACKETS, GEM_RX512CNT }, 1835 [cad_stat_rx_h1024] = 1836 { "rx 1024-1518B", KSTAT_KV_U_PACKETS, GEM_RX1024CNT }, 1837 [cad_stat_rx_undersz] = 1838 { "rx undersz", KSTAT_KV_U_PACKETS, GEM_RXUNDRCNT }, 1839 [cad_stat_rx_oversz] = 1840 { "rx oversz", KSTAT_KV_U_PACKETS, GEM_RXOVRCNT }, 1841 [cad_stat_rx_jabber] = 1842 { "rx jabber", KSTAT_KV_U_PACKETS, GEM_RXJABCNT }, 1843 [cad_stat_rx_fcs] = 1844 { "rx fcs", KSTAT_KV_U_PACKETS, GEM_RXFCSCNT }, 1845 [cad_stat_rx_symberr] = 1846 { "rx symberr", KSTAT_KV_U_PACKETS, GEM_RXSYMBCNT }, 1847 [cad_stat_rx_align] = 1848 { "rx align", KSTAT_KV_U_PACKETS, GEM_RXALIGNCNT }, 1849 [cad_stat_rx_reserr] = 1850 { "rx reserr", KSTAT_KV_U_PACKETS, GEM_RXRESERRCNT }, 1851 [cad_stat_rx_overrun] = 1852 { "rx overrun", KSTAT_KV_U_PACKETS, GEM_RXORCNT }, 1853 [cad_stat_rx_ipcsum] = 1854 { "rx ip csum", KSTAT_KV_U_PACKETS, GEM_RXIPCCNT }, 1855 [cad_stat_rx_tcpcsum] = 1856 { "rx tcp csum", KSTAT_KV_U_PACKETS, GEM_RXTCPCCNT }, 1857 [cad_stat_rx_udpcsum] = 1858 { "rx udp csum", KSTAT_KV_U_PACKETS, GEM_RXUDPCCNT }, 1859 }; 1860 1861 void 1862 cad_kstat_attach(struct cad_softc *sc) 1863 { 1864 const struct cad_counter *c; 1865 struct kstat *ks; 1866 struct kstat_kv *kvs; 1867 int i; 1868 1869 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK); 1870 1871 ks = kstat_create(sc->sc_dev.dv_xname, 0, "cad-stats", 0, 1872 KSTAT_T_KV, 0); 1873 if (ks == NULL) 1874 return; 1875 1876 kvs = mallocarray(nitems(cad_counters), sizeof(*kvs), 1877 M_DEVBUF, M_WAITOK | M_ZERO); 1878 for (i = 0; i < nitems(cad_counters); i++) { 1879 c = &cad_counters[i]; 1880 kstat_kv_unit_init(&kvs[i], c->c_name, KSTAT_KV_T_COUNTER64, 1881 c->c_unit); 1882 } 1883 1884 kstat_set_mutex(ks, &sc->sc_kstat_mtx); 1885 ks->ks_softc = sc; 1886 ks->ks_data = kvs; 1887 ks->ks_datalen = nitems(cad_counters) * sizeof(*kvs); 1888 ks->ks_read = cad_kstat_read; 1889 1890 sc->sc_kstat = ks; 1891 kstat_install(ks); 1892 } 1893 1894 int 1895 cad_kstat_read(struct kstat *ks) 1896 { 1897 const struct cad_counter *c; 1898 struct kstat_kv *kvs = ks->ks_data; 1899 struct cad_softc *sc = ks->ks_softc; 1900 uint64_t v64; 1901 int i; 1902 1903 v64 = HREAD4(sc, GEM_OCTTXL); 1904 v64 |= (uint64_t)HREAD4(sc, GEM_OCTTXH) << 32; 1905 kstat_kv_u64(&kvs[cad_stat_tx_toto]) += v64; 1906 1907 v64 = HREAD4(sc, GEM_OCTRXL); 1908 v64 |= (uint64_t)HREAD4(sc, GEM_OCTRXH) << 32; 1909 kstat_kv_u64(&kvs[cad_stat_rx_toto]) += v64; 1910 1911 for (i = 0; i < nitems(cad_counters); i++) { 1912 c = &cad_counters[i]; 1913 if (c->c_reg == 0) 1914 continue; 1915 kstat_kv_u64(&kvs[i]) += HREAD4(sc, c->c_reg); 1916 } 1917 1918 getnanouptime(&ks->ks_updated); 1919 1920 return 0; 1921 } 1922 1923 void 1924 cad_kstat_tick(void *arg) 1925 { 1926 struct cad_softc *sc = arg; 1927 1928 if (mtx_enter_try(&sc->sc_kstat_mtx)) { 1929 cad_kstat_read(sc->sc_kstat); 1930 mtx_leave(&sc->sc_kstat_mtx); 1931 } 1932 } 1933 #endif /* NKSTAT > 0 */ 1934 1935 #ifdef DDB 1936 void 1937 cad_dump(struct cad_softc *sc) 1938 { 1939 struct cad_buf *rxb, *txb; 1940 struct cad_desc32 *desc32; 1941 struct cad_desc64 *desc64; 1942 int i; 1943 1944 printf("isr 0x%x txsr 0x%x rxsr 0x%x\n", HREAD4(sc, GEM_ISR), 1945 HREAD4(sc, GEM_TXSR), HREAD4(sc, GEM_RXSR)); 1946 1947 if (sc->sc_dma64) { 1948 printf("tx q 0x%08x%08x\n", 1949 HREAD4(sc, GEM_TXQBASEHI), 1950 HREAD4(sc, GEM_TXQBASE)); 1951 } else { 1952 printf("tx q 0x%08x\n", 1953 HREAD4(sc, GEM_TXQBASE)); 1954 } 1955 desc32 = (struct cad_desc32 *)sc->sc_txdesc; 1956 desc64 = (struct cad_desc64 *)sc->sc_txdesc; 1957 if (sc->sc_txbuf != NULL) { 1958 for (i = 0; i < CAD_NTXDESC; i++) { 1959 txb = &sc->sc_txbuf[i]; 1960 if (sc->sc_dma64) { 1961 printf(" %3i %p 0x%08x%08x 0x%08x %s%s " 1962 "m %p\n", i, 1963 &desc64[i], 1964 desc64[i].d_addrhi, desc64[i].d_addrlo, 1965 desc64[i].d_status, 1966 sc->sc_tx_cons == i ? ">" : " ", 1967 sc->sc_tx_prod == i ? "<" : " ", 1968 txb->bf_m); 1969 } else { 1970 printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i, 1971 &desc32[i], 1972 desc32[i].d_addr, 1973 desc32[i].d_status, 1974 sc->sc_tx_cons == i ? ">" : " ", 1975 sc->sc_tx_prod == i ? "<" : " ", 1976 txb->bf_m); 1977 } 1978 } 1979 } 1980 for (i = 1; i < GEM_MAX_PRIQ; i++) { 1981 if (sc->sc_qmask & (1U << i)) { 1982 printf("tx q%d 0x%08x\n", i, 1983 HREAD4(sc, GEM_TXQ1BASE(i - 1))); 1984 } 1985 } 1986 1987 if (sc->sc_dma64) { 1988 printf("rx q 0x%08x%08x\n", 1989 HREAD4(sc, GEM_RXQBASEHI), 1990 HREAD4(sc, GEM_RXQBASE)); 1991 } else { 1992 printf("rx q 0x%08x\n", 1993 HREAD4(sc, GEM_RXQBASE)); 1994 } 1995 desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 1996 desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 1997 if (sc->sc_rxbuf != NULL) { 1998 for (i = 0; i < CAD_NRXDESC; i++) { 1999 rxb = &sc->sc_rxbuf[i]; 2000 if (sc->sc_dma64) { 2001 printf(" %3i %p 0x%08x%08x 0x%08x %s%s " 2002 "m %p\n", i, 2003 &desc64[i], 2004 desc64[i].d_addrhi, desc64[i].d_addrlo, 2005 desc64[i].d_status, 2006 sc->sc_rx_cons == i ? ">" : " ", 2007 sc->sc_rx_prod == i ? "<" : " ", 2008 rxb->bf_m); 2009 } else { 2010 printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i, 2011 &desc32[i], 2012 desc32[i].d_addr, 2013 desc32[i].d_status, 2014 sc->sc_rx_cons == i ? ">" : " ", 2015 sc->sc_rx_prod == i ? "<" : " ", 2016 rxb->bf_m); 2017 } 2018 } 2019 } 2020 for (i = 1; i < GEM_MAX_PRIQ; i++) { 2021 if (sc->sc_qmask & (1U << i)) { 2022 printf("rx q%d 0x%08x\n", i, 2023 HREAD4(sc, (i < 8) ? GEM_RXQ1BASE(i - 1) 2024 : GEM_RXQ8BASE(i - 8))); 2025 } 2026 } 2027 } 2028 #endif 2029