1 /* $OpenBSD: if_msk.c,v 1.143 2023/11/10 15:51:20 bluhm Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $ 35 */ 36 37 /* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72 /* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89 #include "bpfilter.h" 90 #include "kstat.h" 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/sockio.h> 95 #include <sys/mbuf.h> 96 #include <sys/malloc.h> 97 #include <sys/kernel.h> 98 #include <sys/socket.h> 99 #include <sys/timeout.h> 100 #include <sys/device.h> 101 #include <sys/queue.h> 102 103 #include <net/if.h> 104 105 #include <netinet/in.h> 106 #include <netinet/if_ether.h> 107 108 #include <net/if_media.h> 109 110 #if NBPFILTER > 0 111 #include <net/bpf.h> 112 #endif 113 114 #if NKSTAT > 0 115 #include <sys/kstat.h> 116 #endif 117 118 #include <dev/mii/mii.h> 119 #include <dev/mii/miivar.h> 120 121 #include <dev/pci/pcireg.h> 122 #include <dev/pci/pcivar.h> 123 #include <dev/pci/pcidevs.h> 124 125 #include <dev/pci/if_skreg.h> 126 #include <dev/pci/if_mskvar.h> 127 128 #define MSK_STATUS_OWN_SHIFT 63 129 #define MSK_STATUS_OWN_MASK 0x1 130 #define MSK_STATUS_OPCODE_SHIFT 56 131 #define MSK_STATUS_OPCODE_MASK 0x7f 132 133 #define MSK_STATUS_OWN(_d) \ 134 (((_d) >> MSK_STATUS_OWN_SHIFT) & MSK_STATUS_OWN_MASK) 135 #define MSK_STATUS_OPCODE(_d) \ 136 (((_d) >> MSK_STATUS_OPCODE_SHIFT) & MSK_STATUS_OPCODE_MASK) 137 138 #define MSK_STATUS_OPCODE_RXSTAT 0x60 139 #define MSK_STATUS_OPCODE_RXTIMESTAMP 0x61 140 #define MSK_STATUS_OPCODE_RXVLAN 0x62 141 #define MSK_STATUS_OPCODE_RXCKSUM 0x64 142 #define MSK_STATUS_OPCODE_RXCKSUMVLAN \ 143 (MSK_STATUS_OPCODE_RXVLAN | MSK_STATUS_OPCODE_RXCKSUM) 144 #define MSK_STATUS_OPCODE_RXTIMEVLAN \ 145 (MSK_STATUS_OPCODE_RXVLAN | MSK_STATUS_OPCODE_RXTIMESTAMP) 146 #define MSK_STATUS_OPCODE_RSS_HASH 0x65 147 #define MSK_STATUS_OPCODE_TXIDX 0x68 148 #define MSK_STATUS_OPCODE_MACSEC 0x6c 149 #define MSK_STATUS_OPCODE_PUTIDX 0x70 150 151 #define MSK_STATUS_RXSTAT_PORT_SHIFT 48 152 #define MSK_STATUS_RXSTAT_PORT_MASK 0x1 153 #define MSK_STATUS_RXSTAT_LEN_SHIFT 32 154 #define MSK_STATUS_RXSTAT_LEN_MASK 0xffff 155 #define MSK_STATUS_RXSTAT_STATUS_SHIFT 0 156 #define MSK_STATUS_RXSTAT_STATUS_MASK 0xffffffff 157 158 #define MSK_STATUS_RXSTAT_PORT(_d) \ 159 (((_d) >> MSK_STATUS_RXSTAT_PORT_SHIFT) & MSK_STATUS_RXSTAT_PORT_MASK) 160 #define MSK_STATUS_RXSTAT_LEN(_d) \ 161 (((_d) >> MSK_STATUS_RXSTAT_LEN_SHIFT) & MSK_STATUS_RXSTAT_LEN_MASK) 162 #define MSK_STATUS_RXSTAT_STATUS(_d) \ 163 (((_d) >> MSK_STATUS_RXSTAT_STATUS_SHIFT) & MSK_STATUS_RXSTAT_STATUS_MASK) 164 165 #define MSK_STATUS_TXIDX_PORTA_SHIFT 0 166 #define MSK_STATUS_TXIDX_PORTA_MASK 0xfff 167 #define MSK_STATUS_TXIDX_PORTB_SHIFT 24 168 #define MSK_STATUS_TXIDX_PORTB_MASK 0xfff 169 170 #define MSK_STATUS_TXIDX_PORTA(_d) \ 171 (((_d) >> MSK_STATUS_TXIDX_PORTA_SHIFT) & MSK_STATUS_TXIDX_PORTA_MASK) 172 #define MSK_STATUS_TXIDX_PORTB(_d) \ 173 (((_d) >> MSK_STATUS_TXIDX_PORTB_SHIFT) & MSK_STATUS_TXIDX_PORTB_MASK) 174 175 int mskc_probe(struct device *, void *, void *); 176 void mskc_attach(struct device *, struct device *self, void *aux); 177 int mskc_detach(struct device *, int); 178 int mskc_activate(struct device *, int); 179 void mskc_reset(struct sk_softc *); 180 int msk_probe(struct device *, void *, void *); 181 void msk_attach(struct device *, struct device *self, void *aux); 182 int msk_detach(struct device *, int); 183 int msk_activate(struct device *, int); 184 void msk_reset(struct sk_if_softc *); 185 int mskcprint(void *, const char *); 186 int msk_intr(void *); 187 void msk_intr_yukon(struct sk_if_softc *); 188 static inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 189 void msk_rxeof(struct sk_if_softc *, struct mbuf_list *, uint16_t, uint32_t); 190 void msk_txeof(struct sk_if_softc *, unsigned int); 191 static unsigned int msk_encap(struct sk_if_softc *, struct mbuf *, uint32_t); 192 void msk_start(struct ifnet *); 193 int msk_ioctl(struct ifnet *, u_long, caddr_t); 194 void msk_init(void *); 195 void msk_init_yukon(struct sk_if_softc *); 196 void msk_stop(struct sk_if_softc *, int); 197 void msk_watchdog(struct ifnet *); 198 int msk_ifmedia_upd(struct ifnet *); 199 void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 200 static int msk_newbuf(struct sk_if_softc *); 201 int msk_init_rx_ring(struct sk_if_softc *); 202 int msk_init_tx_ring(struct sk_if_softc *); 203 void msk_fill_rx_ring(struct sk_if_softc *); 204 205 int msk_miibus_readreg(struct device *, int, int); 206 void msk_miibus_writereg(struct device *, int, int, int); 207 void msk_miibus_statchg(struct device *); 208 209 void msk_iff(struct sk_if_softc *); 210 void msk_tick(void *); 211 void msk_fill_rx_tick(void *); 212 213 #ifdef MSK_DEBUG 214 #define DPRINTF(x) if (mskdebug) printf x 215 #define DPRINTFN(n,x) if (mskdebug >= (n)) printf x 216 int mskdebug = 0; 217 218 void msk_dump_txdesc(struct msk_tx_desc *, int); 219 void msk_dump_mbuf(struct mbuf *); 220 void msk_dump_bytes(const char *, int); 221 #else 222 #define DPRINTF(x) 223 #define DPRINTFN(n,x) 224 #endif 225 226 #if NKSTAT > 0 227 struct msk_mib { 228 const char *name; 229 uint32_t reg; 230 enum kstat_kv_type type; 231 enum kstat_kv_unit unit; 232 }; 233 234 #define C32 KSTAT_KV_T_COUNTER32 235 #define C64 KSTAT_KV_T_COUNTER64 236 237 #define PKTS KSTAT_KV_U_PACKETS 238 #define BYTES KSTAT_KV_U_BYTES 239 #define NONE KSTAT_KV_U_NONE 240 241 static const struct msk_mib msk_mib[] = { 242 { "InUnicasts", 0x100, C32, PKTS }, 243 { "InBroadcasts", 0x108, C32, PKTS }, 244 { "InPause", 0x110, C32, PKTS }, 245 { "InMulticasts", 0x118, C32, PKTS }, 246 { "InFCSErr", 0x120, C32, PKTS }, 247 { "InGoodOctets", 0x130, C64, BYTES }, 248 { "InBadOctets", 0x140, C64, BYTES }, 249 { "Undersize", 0x150, C32, PKTS }, 250 { "Fragments", 0x158, C32, PKTS }, 251 { "In64Octets", 0x160, C32, PKTS }, 252 { "In127Octets", 0x168, C32, PKTS }, 253 { "In255Octets", 0x170, C32, PKTS }, 254 { "In511Octets", 0x178, C32, PKTS }, 255 { "In1023Octets", 0x180, C32, PKTS }, 256 { "In1518Octets", 0x188, C32, PKTS }, 257 { "InMaxOctets", 0x190, C32, PKTS }, 258 { "OverSize", 0x198, C32, PKTS }, 259 { "Jabber", 0x1a8, C32, PKTS }, 260 { "Overflow", 0x1b0, C32, PKTS }, 261 262 { "OutUnicasts", 0x1c0, C32, PKTS }, 263 { "OutBroadcasts", 0x1c8, C32, PKTS }, 264 { "OutPause", 0x1d0, C32, PKTS }, 265 { "OutMulticasts", 0x1d8, C32, PKTS }, 266 { "OutOctets", 0x1e0, C64, BYTES }, 267 { "Out64Octets", 0x1f0, C32, PKTS }, 268 { "Out127Octets", 0x1f8, C32, PKTS }, 269 { "Out255Octets", 0x200, C32, PKTS }, 270 { "Out511Octets", 0x208, C32, PKTS }, 271 { "Out1023Octets", 0x210, C32, PKTS }, 272 { "Out1518Octets", 0x218, C32, PKTS }, 273 { "OutMaxOctets", 0x220, C32, PKTS }, 274 { "Collisions", 0x230, C32, NONE }, 275 { "Late", 0x238, C32, NONE }, 276 { "Excessive", 0x240, C32, PKTS }, 277 { "Multiple", 0x248, C32, PKTS }, 278 { "Single", 0x250, C32, PKTS }, 279 { "Underflow", 0x258, C32, PKTS }, 280 }; 281 282 #undef C32 283 #undef C64 284 285 #undef PKTS 286 #undef BYTES 287 #undef NONE 288 289 struct msk_kstat { 290 struct rwlock lock; 291 struct kstat *ks; 292 }; 293 294 static uint32_t msk_mib_read32(struct sk_if_softc *, uint32_t); 295 static uint64_t msk_mib_read64(struct sk_if_softc *, uint32_t); 296 297 void msk_kstat_attach(struct sk_if_softc *); 298 void msk_kstat_detach(struct sk_if_softc *); 299 int msk_kstat_read(struct kstat *ks); 300 #endif 301 302 /* supported device vendors */ 303 const struct pci_matchid mskc_devices[] = { 304 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550SX }, 305 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T_B1 }, 306 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560SX }, 307 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T }, 308 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021CU }, 309 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021X }, 310 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022CU }, 311 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022X }, 312 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 }, 313 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 }, 314 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 }, 315 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8039 }, 316 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040 }, 317 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040T }, 318 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8042 }, 319 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8048 }, 320 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 }, 321 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 }, 322 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 }, 323 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055 }, 324 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055_2 }, 325 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8056 }, 326 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8057 }, 327 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8058 }, 328 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8059 }, 329 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061CU }, 330 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061X }, 331 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062CU }, 332 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062X }, 333 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8070 }, 334 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8071 }, 335 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8072 }, 336 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8075 }, 337 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8079 }, 338 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C032 }, 339 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C033 }, 340 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C034 }, 341 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C036 }, 342 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C042 }, 343 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9EXX }, 344 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9SXX } 345 }; 346 347 static inline u_int32_t 348 sk_win_read_4(struct sk_softc *sc, u_int32_t reg) 349 { 350 return CSR_READ_4(sc, reg); 351 } 352 353 static inline u_int16_t 354 sk_win_read_2(struct sk_softc *sc, u_int32_t reg) 355 { 356 return CSR_READ_2(sc, reg); 357 } 358 359 static inline u_int8_t 360 sk_win_read_1(struct sk_softc *sc, u_int32_t reg) 361 { 362 return CSR_READ_1(sc, reg); 363 } 364 365 static inline void 366 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x) 367 { 368 CSR_WRITE_4(sc, reg, x); 369 } 370 371 static inline void 372 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x) 373 { 374 CSR_WRITE_2(sc, reg, x); 375 } 376 377 static inline void 378 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x) 379 { 380 CSR_WRITE_1(sc, reg, x); 381 } 382 383 int 384 msk_miibus_readreg(struct device *dev, int phy, int reg) 385 { 386 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 387 u_int16_t val; 388 int i; 389 390 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 391 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 392 393 for (i = 0; i < SK_TIMEOUT; i++) { 394 DELAY(1); 395 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 396 if (val & YU_SMICR_READ_VALID) 397 break; 398 } 399 400 if (i == SK_TIMEOUT) { 401 printf("%s: phy failed to come ready\n", 402 sc_if->sk_dev.dv_xname); 403 return (0); 404 } 405 406 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i, 407 SK_TIMEOUT)); 408 409 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 410 411 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 412 phy, reg, val)); 413 414 return (val); 415 } 416 417 void 418 msk_miibus_writereg(struct device *dev, int phy, int reg, int val) 419 { 420 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 421 int i; 422 423 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n", 424 phy, reg, val)); 425 426 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 427 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 428 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 429 430 for (i = 0; i < SK_TIMEOUT; i++) { 431 DELAY(1); 432 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)) 433 break; 434 } 435 436 if (i == SK_TIMEOUT) 437 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 438 } 439 440 void 441 msk_miibus_statchg(struct device *dev) 442 { 443 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 444 struct mii_data *mii = &sc_if->sk_mii; 445 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 446 int gpcr; 447 448 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR); 449 gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN); 450 451 if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO || 452 sc_if->sk_softc->sk_type == SK_YUKON_FE_P) { 453 /* Set speed. */ 454 gpcr |= YU_GPCR_SPEED_DIS; 455 switch (IFM_SUBTYPE(mii->mii_media_active)) { 456 case IFM_1000_SX: 457 case IFM_1000_LX: 458 case IFM_1000_CX: 459 case IFM_1000_T: 460 gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED); 461 break; 462 case IFM_100_TX: 463 gpcr |= YU_GPCR_SPEED; 464 break; 465 } 466 467 /* Set duplex. */ 468 gpcr |= YU_GPCR_DPLX_DIS; 469 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 470 gpcr |= YU_GPCR_DUPLEX; 471 472 /* Disable flow control. */ 473 gpcr |= YU_GPCR_FCTL_DIS; 474 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS); 475 } 476 477 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr); 478 479 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n", 480 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR))); 481 } 482 483 void 484 msk_iff(struct sk_if_softc *sc_if) 485 { 486 struct ifnet *ifp = &sc_if->arpcom.ac_if; 487 struct arpcom *ac = &sc_if->arpcom; 488 struct ether_multi *enm; 489 struct ether_multistep step; 490 u_int32_t hashes[2]; 491 u_int16_t rcr; 492 int h; 493 494 rcr = SK_YU_READ_2(sc_if, YUKON_RCR); 495 rcr &= ~(YU_RCR_MUFLEN | YU_RCR_UFLEN); 496 ifp->if_flags &= ~IFF_ALLMULTI; 497 498 /* 499 * Always accept frames destined to our station address. 500 */ 501 rcr |= YU_RCR_UFLEN; 502 503 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 504 ifp->if_flags |= IFF_ALLMULTI; 505 if (ifp->if_flags & IFF_PROMISC) 506 rcr &= ~YU_RCR_UFLEN; 507 else 508 rcr |= YU_RCR_MUFLEN; 509 hashes[0] = hashes[1] = 0xFFFFFFFF; 510 } else { 511 rcr |= YU_RCR_MUFLEN; 512 /* Program new filter. */ 513 bzero(hashes, sizeof(hashes)); 514 515 ETHER_FIRST_MULTI(step, ac, enm); 516 while (enm != NULL) { 517 h = ether_crc32_be(enm->enm_addrlo, 518 ETHER_ADDR_LEN) & ((1 << SK_HASH_BITS) - 1); 519 520 if (h < 32) 521 hashes[0] |= (1 << h); 522 else 523 hashes[1] |= (1 << (h - 32)); 524 525 ETHER_NEXT_MULTI(step, enm); 526 } 527 } 528 529 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 530 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 531 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 532 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 533 SK_YU_WRITE_2(sc_if, YUKON_RCR, rcr); 534 } 535 536 int 537 msk_init_rx_ring(struct sk_if_softc *sc_if) 538 { 539 struct msk_ring_data *rd = sc_if->sk_rdata; 540 struct msk_rx_desc *r; 541 542 memset(rd->sk_rx_ring, 0, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 543 544 r = &rd->sk_rx_ring[0]; 545 r->sk_addr = htole32(0); 546 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_ADDR64; 547 548 sc_if->sk_cdata.sk_rx_prod = 1; 549 sc_if->sk_cdata.sk_rx_cons = 0; 550 sc_if->sk_cdata.sk_rx_hiaddr = 0; 551 552 /* 553 * up to two ring entries per packet, so the effective ring size is 554 * halved 555 */ 556 if_rxr_init(&sc_if->sk_cdata.sk_rx_ring, 2, (MSK_RX_RING_CNT/2) - 1); 557 558 msk_fill_rx_ring(sc_if); 559 return (0); 560 } 561 562 int 563 msk_init_tx_ring(struct sk_if_softc *sc_if) 564 { 565 struct sk_softc *sc = sc_if->sk_softc; 566 struct msk_ring_data *rd = sc_if->sk_rdata; 567 struct msk_tx_desc *t; 568 int i; 569 570 memset(rd->sk_tx_ring, 0, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 571 572 for (i = 0; i < MSK_TX_RING_CNT; i++) { 573 if (bus_dmamap_create(sc->sc_dmatag, sc_if->sk_pktlen, 574 SK_NTXSEG, sc_if->sk_pktlen, 0, 575 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 576 &sc_if->sk_cdata.sk_tx_maps[i])) 577 return (ENOBUFS); 578 } 579 580 t = &rd->sk_tx_ring[0]; 581 t->sk_addr = htole32(0); 582 t->sk_opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_ADDR64; 583 584 sc_if->sk_cdata.sk_tx_prod = 1; 585 sc_if->sk_cdata.sk_tx_cons = 0; 586 sc_if->sk_cdata.sk_tx_hiaddr = 0; 587 588 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE); 589 590 return (0); 591 } 592 593 static int 594 msk_newbuf(struct sk_if_softc *sc_if) 595 { 596 struct msk_ring_data *rd = sc_if->sk_rdata; 597 struct msk_rx_desc *r; 598 struct mbuf *m; 599 bus_dmamap_t map; 600 uint64_t addr; 601 uint32_t prod, head; 602 uint32_t hiaddr; 603 unsigned int pktlen = sc_if->sk_pktlen + ETHER_ALIGN; 604 605 m = MCLGETL(NULL, M_DONTWAIT, pktlen); 606 if (m == NULL) 607 return (0); 608 m->m_len = m->m_pkthdr.len = pktlen; 609 m_adj(m, ETHER_ALIGN); 610 611 prod = sc_if->sk_cdata.sk_rx_prod; 612 map = sc_if->sk_cdata.sk_rx_maps[prod]; 613 614 if (bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, map, m, 615 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) { 616 m_freem(m); 617 return (0); 618 } 619 620 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0, 621 map->dm_mapsize, BUS_DMASYNC_PREREAD); 622 623 head = prod; 624 625 /* high 32 bits of address */ 626 addr = map->dm_segs[0].ds_addr; 627 hiaddr = addr >> 32; 628 if (sc_if->sk_cdata.sk_rx_hiaddr != hiaddr) { 629 r = &rd->sk_rx_ring[prod]; 630 htolem32(&r->sk_addr, hiaddr); 631 r->sk_len = htole16(0); 632 r->sk_ctl = 0; 633 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_ADDR64; 634 635 sc_if->sk_cdata.sk_rx_hiaddr = hiaddr; 636 637 SK_INC(prod, MSK_RX_RING_CNT); 638 } 639 640 r = &rd->sk_rx_ring[prod]; 641 htolem32(&r->sk_addr, addr); 642 htolem16(&r->sk_len, map->dm_segs[0].ds_len); 643 r->sk_ctl = 0; 644 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_PACKET; 645 646 sc_if->sk_cdata.sk_rx_maps[head] = sc_if->sk_cdata.sk_rx_maps[prod]; 647 sc_if->sk_cdata.sk_rx_maps[prod] = map; 648 649 sc_if->sk_cdata.sk_rx_mbuf[prod] = m; 650 651 SK_INC(prod, MSK_RX_RING_CNT); 652 sc_if->sk_cdata.sk_rx_prod = prod; 653 654 return (1); 655 } 656 657 /* 658 * Set media options. 659 */ 660 int 661 msk_ifmedia_upd(struct ifnet *ifp) 662 { 663 struct sk_if_softc *sc_if = ifp->if_softc; 664 665 mii_mediachg(&sc_if->sk_mii); 666 return (0); 667 } 668 669 /* 670 * Report current media status. 671 */ 672 void 673 msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 674 { 675 struct sk_if_softc *sc_if = ifp->if_softc; 676 677 mii_pollstat(&sc_if->sk_mii); 678 ifmr->ifm_active = sc_if->sk_mii.mii_media_active; 679 ifmr->ifm_status = sc_if->sk_mii.mii_media_status; 680 } 681 682 int 683 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 684 { 685 struct sk_if_softc *sc_if = ifp->if_softc; 686 struct ifreq *ifr = (struct ifreq *) data; 687 struct mii_data *mii; 688 int s, error = 0; 689 690 s = splnet(); 691 692 switch(command) { 693 case SIOCSIFADDR: 694 ifp->if_flags |= IFF_UP; 695 if (!(ifp->if_flags & IFF_RUNNING)) 696 msk_init(sc_if); 697 break; 698 699 case SIOCSIFFLAGS: 700 if (ifp->if_flags & IFF_UP) { 701 if (ifp->if_flags & IFF_RUNNING) 702 error = ENETRESET; 703 else 704 msk_init(sc_if); 705 } else { 706 if (ifp->if_flags & IFF_RUNNING) 707 msk_stop(sc_if, 0); 708 } 709 break; 710 711 case SIOCGIFMEDIA: 712 case SIOCSIFMEDIA: 713 mii = &sc_if->sk_mii; 714 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 715 break; 716 717 case SIOCGIFRXR: 718 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 719 NULL, sc_if->sk_pktlen, &sc_if->sk_cdata.sk_rx_ring); 720 break; 721 722 default: 723 error = ether_ioctl(ifp, &sc_if->arpcom, command, data); 724 } 725 726 if (error == ENETRESET) { 727 if (ifp->if_flags & IFF_RUNNING) 728 msk_iff(sc_if); 729 error = 0; 730 } 731 732 splx(s); 733 return (error); 734 } 735 736 /* 737 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 738 * IDs against our list and return a device name if we find a match. 739 */ 740 int 741 mskc_probe(struct device *parent, void *match, void *aux) 742 { 743 return (pci_matchbyid((struct pci_attach_args *)aux, mskc_devices, 744 nitems(mskc_devices))); 745 } 746 747 /* 748 * Force the GEnesis into reset, then bring it out of reset. 749 */ 750 void 751 mskc_reset(struct sk_softc *sc) 752 { 753 u_int32_t imtimer_ticks, reg1; 754 int reg; 755 unsigned int i; 756 757 DPRINTFN(2, ("mskc_reset\n")); 758 759 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET); 760 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET); 761 762 DELAY(1000); 763 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET); 764 DELAY(2); 765 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 766 767 sk_win_write_1(sc, SK_TESTCTL1, 2); 768 769 if (sc->sk_type == SK_YUKON_EC_U || sc->sk_type == SK_YUKON_EX || 770 sc->sk_type >= SK_YUKON_FE_P) { 771 /* enable all clocks. */ 772 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG3), 0); 773 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4)); 774 reg1 &= (SK_Y2_REG4_FORCE_ASPM_REQUEST| 775 SK_Y2_REG4_ASPM_GPHY_LINK_DOWN| 776 SK_Y2_REG4_ASPM_INT_FIFO_EMPTY| 777 SK_Y2_REG4_ASPM_CLKRUN_REQUEST); 778 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4), reg1); 779 780 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5)); 781 reg1 &= SK_Y2_REG5_TIM_VMAIN_AV_MASK; 782 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5), reg1); 783 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_CFGREG1), 0); 784 785 /* 786 * Disable status race, workaround for Yukon EC Ultra & 787 * Yukon EX. 788 */ 789 reg1 = sk_win_read_4(sc, SK_GPIO); 790 reg1 |= SK_Y2_GPIO_STAT_RACE_DIS; 791 sk_win_write_4(sc, SK_GPIO, reg1); 792 sk_win_read_4(sc, SK_GPIO); 793 } 794 795 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1)); 796 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 797 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 798 else 799 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 800 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1); 801 802 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 803 sk_win_write_1(sc, SK_Y2_CLKGATE, 804 SK_Y2_CLKGATE_LINK1_GATE_DIS | 805 SK_Y2_CLKGATE_LINK2_GATE_DIS | 806 SK_Y2_CLKGATE_LINK1_CORE_DIS | 807 SK_Y2_CLKGATE_LINK2_CORE_DIS | 808 SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS); 809 else 810 sk_win_write_1(sc, SK_Y2_CLKGATE, 0); 811 812 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 813 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET); 814 DELAY(1000); 815 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 816 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR); 817 818 if (sc->sk_type == SK_YUKON_EX || sc->sk_type == SK_YUKON_SUPR) { 819 CSR_WRITE_2(sc, SK_GMAC_CTRL, SK_GMAC_BYP_MACSECRX | 820 SK_GMAC_BYP_MACSECTX | SK_GMAC_BYP_RETR_FIFO); 821 } 822 823 sk_win_write_1(sc, SK_TESTCTL1, 1); 824 825 DPRINTFN(2, ("mskc_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR))); 826 DPRINTFN(2, ("mskc_reset: sk_link_ctrl=%x\n", 827 CSR_READ_2(sc, SK_LINK_CTRL))); 828 829 /* Disable ASF */ 830 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET); 831 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF); 832 833 /* Clear I2C IRQ noise */ 834 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1); 835 836 /* Disable hardware timer */ 837 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP); 838 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR); 839 840 /* Disable descriptor polling */ 841 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 842 843 /* Disable time stamps */ 844 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP); 845 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR); 846 847 /* Enable RAM interface */ 848 sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 849 for (reg = SK_TO0;reg <= SK_TO11; reg++) 850 sk_win_write_1(sc, reg, 36); 851 sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET); 852 for (reg = SK_TO0;reg <= SK_TO11; reg++) 853 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36); 854 855 /* 856 * Configure interrupt moderation. The moderation timer 857 * defers interrupts specified in the interrupt moderation 858 * timer mask based on the timeout specified in the interrupt 859 * moderation timer init register. Each bit in the timer 860 * register represents one tick, so to specify a timeout in 861 * microseconds, we have to multiply by the correct number of 862 * ticks-per-microsecond. 863 */ 864 switch (sc->sk_type) { 865 case SK_YUKON_EC: 866 case SK_YUKON_EC_U: 867 case SK_YUKON_EX: 868 case SK_YUKON_SUPR: 869 case SK_YUKON_ULTRA2: 870 case SK_YUKON_OPTIMA: 871 case SK_YUKON_PRM: 872 case SK_YUKON_OPTIMA2: 873 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC; 874 break; 875 case SK_YUKON_FE: 876 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE; 877 break; 878 case SK_YUKON_FE_P: 879 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE_P; 880 break; 881 case SK_YUKON_XL: 882 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL; 883 break; 884 default: 885 imtimer_ticks = SK_IMTIMER_TICKS_YUKON; 886 break; 887 } 888 889 /* Reset status ring. */ 890 for (i = 0; i < MSK_STATUS_RING_CNT; i++) 891 sc->sk_status_ring[i] = htole64(0); 892 sc->sk_status_idx = 0; 893 894 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET); 895 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET); 896 897 sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1); 898 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO, 899 sc->sk_status_map->dm_segs[0].ds_addr); 900 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI, 901 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32); 902 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 10); 903 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 16); 904 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 16); 905 906 #if 0 907 sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT, SK_IM_USECS(100)); 908 sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT, SK_IM_USECS(1000)); 909 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(20)); 910 #else 911 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(4)); 912 #endif 913 914 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON); 915 916 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START); 917 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START); 918 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START); 919 } 920 921 int 922 msk_probe(struct device *parent, void *match, void *aux) 923 { 924 struct skc_attach_args *sa = aux; 925 926 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B) 927 return (0); 928 929 switch (sa->skc_type) { 930 case SK_YUKON_XL: 931 case SK_YUKON_EC_U: 932 case SK_YUKON_EX: 933 case SK_YUKON_EC: 934 case SK_YUKON_FE: 935 case SK_YUKON_FE_P: 936 case SK_YUKON_SUPR: 937 case SK_YUKON_ULTRA2: 938 case SK_YUKON_OPTIMA: 939 case SK_YUKON_PRM: 940 case SK_YUKON_OPTIMA2: 941 return (1); 942 } 943 944 return (0); 945 } 946 947 void 948 msk_reset(struct sk_if_softc *sc_if) 949 { 950 /* GMAC and GPHY Reset */ 951 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 952 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 953 DELAY(1000); 954 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR); 955 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 956 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 957 } 958 959 /* 960 * Each XMAC chip is attached as a separate logical IP interface. 961 * Single port cards will have only one logical interface of course. 962 */ 963 void 964 msk_attach(struct device *parent, struct device *self, void *aux) 965 { 966 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 967 struct sk_softc *sc = (struct sk_softc *)parent; 968 struct skc_attach_args *sa = aux; 969 struct ifnet *ifp; 970 caddr_t kva; 971 int i; 972 u_int32_t chunk; 973 int mii_flags; 974 int error; 975 976 sc_if->sk_port = sa->skc_port; 977 sc_if->sk_softc = sc; 978 sc->sk_if[sa->skc_port] = sc_if; 979 980 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port)); 981 982 /* 983 * Get station address for this interface. Note that 984 * dual port cards actually come with three station 985 * addresses: one for each port, plus an extra. The 986 * extra one is used by the SysKonnect driver software 987 * as a 'virtual' station address for when both ports 988 * are operating in failover mode. Currently we don't 989 * use this extra address. 990 */ 991 for (i = 0; i < ETHER_ADDR_LEN; i++) 992 sc_if->arpcom.ac_enaddr[i] = 993 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i); 994 995 printf(": address %s\n", 996 ether_sprintf(sc_if->arpcom.ac_enaddr)); 997 998 /* 999 * Set up RAM buffer addresses. The Yukon2 has a small amount 1000 * of SRAM on it, somewhere between 4K and 48K. We need to 1001 * divide this up between the transmitter and receiver. We 1002 * give the receiver 2/3 of the memory (rounded down), and the 1003 * transmitter whatever remains. 1004 */ 1005 chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff; 1006 sc_if->sk_rx_ramstart = 0; 1007 sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1; 1008 chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk; 1009 sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1; 1010 sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1; 1011 1012 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n" 1013 " tx_ramstart=%#x tx_ramend=%#x\n", 1014 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend, 1015 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend)); 1016 1017 /* Allocate the descriptor queues. */ 1018 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data), 1019 PAGE_SIZE, 0, &sc_if->sk_ring_seg, 1, &sc_if->sk_ring_nseg, 1020 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1021 printf(": can't alloc rx buffers\n"); 1022 goto fail; 1023 } 1024 if (bus_dmamem_map(sc->sc_dmatag, &sc_if->sk_ring_seg, 1025 sc_if->sk_ring_nseg, 1026 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) { 1027 printf(": can't map dma buffers (%lu bytes)\n", 1028 (ulong)sizeof(struct msk_ring_data)); 1029 goto fail_1; 1030 } 1031 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1, 1032 sizeof(struct msk_ring_data), 0, 1033 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1034 &sc_if->sk_ring_map)) { 1035 printf(": can't create dma map\n"); 1036 goto fail_2; 1037 } 1038 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva, 1039 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) { 1040 printf(": can't load dma map\n"); 1041 goto fail_3; 1042 } 1043 sc_if->sk_rdata = (struct msk_ring_data *)kva; 1044 1045 if (sc->sk_type != SK_YUKON_FE && 1046 sc->sk_type != SK_YUKON_FE_P) 1047 sc_if->sk_pktlen = SK_JLEN; 1048 else 1049 sc_if->sk_pktlen = MCLBYTES; 1050 1051 for (i = 0; i < MSK_RX_RING_CNT; i++) { 1052 if ((error = bus_dmamap_create(sc->sc_dmatag, 1053 sc_if->sk_pktlen, 1, sc_if->sk_pktlen, 0, 1054 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1055 &sc_if->sk_cdata.sk_rx_maps[i])) != 0) { 1056 printf("\n%s: unable to create rx DMA map %d, " 1057 "error = %d\n", sc->sk_dev.dv_xname, i, error); 1058 goto fail_4; 1059 } 1060 } 1061 1062 ifp = &sc_if->arpcom.ac_if; 1063 ifp->if_softc = sc_if; 1064 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1065 ifp->if_ioctl = msk_ioctl; 1066 ifp->if_start = msk_start; 1067 ifp->if_watchdog = msk_watchdog; 1068 if (sc->sk_type != SK_YUKON_FE && 1069 sc->sk_type != SK_YUKON_FE_P) 1070 ifp->if_hardmtu = SK_JUMBO_MTU; 1071 ifq_init_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1072 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1073 1074 ifp->if_capabilities = IFCAP_VLAN_MTU; 1075 1076 msk_reset(sc_if); 1077 1078 /* 1079 * Do miibus setup. 1080 */ 1081 msk_init_yukon(sc_if); 1082 1083 DPRINTFN(2, ("msk_attach: 1\n")); 1084 1085 sc_if->sk_mii.mii_ifp = ifp; 1086 sc_if->sk_mii.mii_readreg = msk_miibus_readreg; 1087 sc_if->sk_mii.mii_writereg = msk_miibus_writereg; 1088 sc_if->sk_mii.mii_statchg = msk_miibus_statchg; 1089 1090 ifmedia_init(&sc_if->sk_mii.mii_media, 0, 1091 msk_ifmedia_upd, msk_ifmedia_sts); 1092 mii_flags = MIIF_DOPAUSE; 1093 if (sc->sk_fibertype) 1094 mii_flags |= MIIF_HAVEFIBER; 1095 mii_attach(self, &sc_if->sk_mii, 0xffffffff, 0, 1096 MII_OFFSET_ANY, mii_flags); 1097 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) { 1098 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname); 1099 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL, 1100 0, NULL); 1101 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL); 1102 } else 1103 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO); 1104 1105 timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if); 1106 timeout_set(&sc_if->sk_tick_rx, msk_fill_rx_tick, sc_if); 1107 1108 /* 1109 * Call MI attach routines. 1110 */ 1111 if_attach(ifp); 1112 ether_ifattach(ifp); 1113 1114 #if NKSTAT > 0 1115 msk_kstat_attach(sc_if); 1116 #endif 1117 1118 DPRINTFN(2, ("msk_attach: end\n")); 1119 return; 1120 1121 fail_4: 1122 for (i = 0; i < MSK_RX_RING_CNT; i++) { 1123 if (sc_if->sk_cdata.sk_rx_maps[i] != NULL) 1124 bus_dmamap_destroy(sc->sc_dmatag, 1125 sc_if->sk_cdata.sk_rx_maps[i]); 1126 } 1127 1128 fail_3: 1129 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1130 fail_2: 1131 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data)); 1132 fail_1: 1133 bus_dmamem_free(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1134 fail: 1135 sc->sk_if[sa->skc_port] = NULL; 1136 } 1137 1138 int 1139 msk_detach(struct device *self, int flags) 1140 { 1141 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 1142 struct sk_softc *sc = sc_if->sk_softc; 1143 struct ifnet *ifp= &sc_if->arpcom.ac_if; 1144 1145 if (sc->sk_if[sc_if->sk_port] == NULL) 1146 return (0); 1147 1148 msk_stop(sc_if, 1); 1149 1150 #if NKSTAT > 0 1151 msk_kstat_detach(sc_if); 1152 #endif 1153 1154 /* Detach any PHYs we might have. */ 1155 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) != NULL) 1156 mii_detach(&sc_if->sk_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1157 1158 /* Delete any remaining media. */ 1159 ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY); 1160 1161 ether_ifdetach(ifp); 1162 if_detach(ifp); 1163 1164 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata, 1165 sizeof(struct msk_ring_data)); 1166 bus_dmamem_free(sc->sc_dmatag, 1167 &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1168 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1169 sc->sk_if[sc_if->sk_port] = NULL; 1170 1171 return (0); 1172 } 1173 1174 int 1175 msk_activate(struct device *self, int act) 1176 { 1177 struct sk_if_softc *sc_if = (void *)self; 1178 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1179 int rv = 0; 1180 1181 switch (act) { 1182 case DVACT_RESUME: 1183 msk_reset(sc_if); 1184 if (ifp->if_flags & IFF_RUNNING) 1185 msk_init(sc_if); 1186 break; 1187 default: 1188 rv = config_activate_children(self, act); 1189 break; 1190 } 1191 return (rv); 1192 } 1193 1194 int 1195 mskcprint(void *aux, const char *pnp) 1196 { 1197 struct skc_attach_args *sa = aux; 1198 1199 if (pnp) 1200 printf("msk port %c at %s", 1201 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp); 1202 else 1203 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B'); 1204 return (UNCONF); 1205 } 1206 1207 /* 1208 * Attach the interface. Allocate softc structures, do ifmedia 1209 * setup and ethernet/BPF attach. 1210 */ 1211 void 1212 mskc_attach(struct device *parent, struct device *self, void *aux) 1213 { 1214 struct sk_softc *sc = (struct sk_softc *)self; 1215 struct pci_attach_args *pa = aux; 1216 struct skc_attach_args skca; 1217 pci_chipset_tag_t pc = pa->pa_pc; 1218 pcireg_t memtype; 1219 pci_intr_handle_t ih; 1220 const char *intrstr = NULL; 1221 u_int8_t hw, pmd; 1222 char *revstr = NULL; 1223 caddr_t kva; 1224 1225 DPRINTFN(2, ("begin mskc_attach\n")); 1226 1227 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 1228 1229 /* 1230 * Map control/status registers. 1231 */ 1232 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM); 1233 if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag, 1234 &sc->sk_bhandle, NULL, &sc->sk_bsize, 0)) { 1235 printf(": can't map mem space\n"); 1236 return; 1237 } 1238 1239 sc->sc_dmatag = pa->pa_dmat; 1240 1241 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1242 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4); 1243 1244 /* bail out here if chip is not recognized */ 1245 if (!(SK_IS_YUKON2(sc))) { 1246 printf(": unknown chip type: %d\n", sc->sk_type); 1247 goto fail_1; 1248 } 1249 DPRINTFN(2, ("mskc_attach: allocate interrupt\n")); 1250 1251 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MARVELL) { 1252 switch (PCI_PRODUCT(pa->pa_id)) { 1253 case PCI_PRODUCT_MARVELL_YUKON_8036: 1254 case PCI_PRODUCT_MARVELL_YUKON_8053: 1255 pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED; 1256 } 1257 } 1258 1259 /* Allocate interrupt */ 1260 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 1261 printf(": couldn't map interrupt\n"); 1262 goto fail_1; 1263 } 1264 1265 intrstr = pci_intr_string(pc, ih); 1266 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc, 1267 self->dv_xname); 1268 if (sc->sk_intrhand == NULL) { 1269 printf(": couldn't establish interrupt"); 1270 if (intrstr != NULL) 1271 printf(" at %s", intrstr); 1272 printf("\n"); 1273 goto fail_1; 1274 } 1275 sc->sk_pc = pc; 1276 1277 if (bus_dmamem_alloc(sc->sc_dmatag, 1278 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1279 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1280 0, &sc->sk_status_seg, 1, &sc->sk_status_nseg, 1281 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1282 printf(": can't alloc status buffers\n"); 1283 goto fail_2; 1284 } 1285 1286 if (bus_dmamem_map(sc->sc_dmatag, 1287 &sc->sk_status_seg, sc->sk_status_nseg, 1288 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1289 &kva, BUS_DMA_NOWAIT)) { 1290 printf(": can't map dma buffers (%zu bytes)\n", 1291 MSK_STATUS_RING_CNT * sizeof(uint64_t)); 1292 goto fail_3; 1293 } 1294 if (bus_dmamap_create(sc->sc_dmatag, 1295 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1, 1296 MSK_STATUS_RING_CNT * sizeof(uint64_t), 0, 1297 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1298 &sc->sk_status_map)) { 1299 printf(": can't create dma map\n"); 1300 goto fail_4; 1301 } 1302 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva, 1303 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1304 NULL, BUS_DMA_NOWAIT)) { 1305 printf(": can't load dma map\n"); 1306 goto fail_5; 1307 } 1308 sc->sk_status_ring = (uint64_t *)kva; 1309 1310 /* Reset the adapter. */ 1311 mskc_reset(sc); 1312 1313 sc->sk_ramsize = sk_win_read_1(sc, SK_EPROM0) * 4096; 1314 DPRINTFN(2, ("mskc_attach: ramsize=%dK\n", sc->sk_ramsize / 1024)); 1315 1316 pmd = sk_win_read_1(sc, SK_PMDTYPE); 1317 if (pmd == 'L' || pmd == 'S' || pmd == 'P') 1318 sc->sk_fibertype = 1; 1319 1320 switch (sc->sk_type) { 1321 case SK_YUKON_XL: 1322 sc->sk_name = "Yukon-2 XL"; 1323 break; 1324 case SK_YUKON_EC_U: 1325 sc->sk_name = "Yukon-2 EC Ultra"; 1326 break; 1327 case SK_YUKON_EX: 1328 sc->sk_name = "Yukon-2 Extreme"; 1329 break; 1330 case SK_YUKON_EC: 1331 sc->sk_name = "Yukon-2 EC"; 1332 break; 1333 case SK_YUKON_FE: 1334 sc->sk_name = "Yukon-2 FE"; 1335 break; 1336 case SK_YUKON_FE_P: 1337 sc->sk_name = "Yukon-2 FE+"; 1338 break; 1339 case SK_YUKON_SUPR: 1340 sc->sk_name = "Yukon-2 Supreme"; 1341 break; 1342 case SK_YUKON_ULTRA2: 1343 sc->sk_name = "Yukon-2 Ultra 2"; 1344 break; 1345 case SK_YUKON_OPTIMA: 1346 sc->sk_name = "Yukon-2 Optima"; 1347 break; 1348 case SK_YUKON_PRM: 1349 sc->sk_name = "Yukon-2 Optima Prime"; 1350 break; 1351 case SK_YUKON_OPTIMA2: 1352 sc->sk_name = "Yukon-2 Optima 2"; 1353 break; 1354 default: 1355 sc->sk_name = "Yukon (Unknown)"; 1356 } 1357 1358 if (sc->sk_type == SK_YUKON_XL) { 1359 switch (sc->sk_rev) { 1360 case SK_YUKON_XL_REV_A0: 1361 revstr = "A0"; 1362 break; 1363 case SK_YUKON_XL_REV_A1: 1364 revstr = "A1"; 1365 break; 1366 case SK_YUKON_XL_REV_A2: 1367 revstr = "A2"; 1368 break; 1369 case SK_YUKON_XL_REV_A3: 1370 revstr = "A3"; 1371 break; 1372 default: 1373 ; 1374 } 1375 } 1376 1377 if (sc->sk_type == SK_YUKON_EC) { 1378 switch (sc->sk_rev) { 1379 case SK_YUKON_EC_REV_A1: 1380 revstr = "A1"; 1381 break; 1382 case SK_YUKON_EC_REV_A2: 1383 revstr = "A2"; 1384 break; 1385 case SK_YUKON_EC_REV_A3: 1386 revstr = "A3"; 1387 break; 1388 default: 1389 ; 1390 } 1391 } 1392 1393 if (sc->sk_type == SK_YUKON_EC_U) { 1394 switch (sc->sk_rev) { 1395 case SK_YUKON_EC_U_REV_A0: 1396 revstr = "A0"; 1397 break; 1398 case SK_YUKON_EC_U_REV_A1: 1399 revstr = "A1"; 1400 break; 1401 case SK_YUKON_EC_U_REV_B0: 1402 revstr = "B0"; 1403 break; 1404 case SK_YUKON_EC_U_REV_B1: 1405 revstr = "B1"; 1406 break; 1407 default: 1408 ; 1409 } 1410 } 1411 1412 if (sc->sk_type == SK_YUKON_FE) { 1413 switch (sc->sk_rev) { 1414 case SK_YUKON_FE_REV_A1: 1415 revstr = "A1"; 1416 break; 1417 case SK_YUKON_FE_REV_A2: 1418 revstr = "A2"; 1419 break; 1420 default: 1421 ; 1422 } 1423 } 1424 1425 if (sc->sk_type == SK_YUKON_FE_P && sc->sk_rev == SK_YUKON_FE_P_REV_A0) 1426 revstr = "A0"; 1427 1428 if (sc->sk_type == SK_YUKON_EX) { 1429 switch (sc->sk_rev) { 1430 case SK_YUKON_EX_REV_A0: 1431 revstr = "A0"; 1432 break; 1433 case SK_YUKON_EX_REV_B0: 1434 revstr = "B0"; 1435 break; 1436 default: 1437 ; 1438 } 1439 } 1440 1441 if (sc->sk_type == SK_YUKON_SUPR) { 1442 switch (sc->sk_rev) { 1443 case SK_YUKON_SUPR_REV_A0: 1444 revstr = "A0"; 1445 break; 1446 case SK_YUKON_SUPR_REV_B0: 1447 revstr = "B0"; 1448 break; 1449 case SK_YUKON_SUPR_REV_B1: 1450 revstr = "B1"; 1451 break; 1452 default: 1453 ; 1454 } 1455 } 1456 1457 if (sc->sk_type == SK_YUKON_PRM) { 1458 switch (sc->sk_rev) { 1459 case SK_YUKON_PRM_REV_Z1: 1460 revstr = "Z1"; 1461 break; 1462 case SK_YUKON_PRM_REV_A0: 1463 revstr = "A0"; 1464 break; 1465 default: 1466 ; 1467 } 1468 } 1469 1470 /* Announce the product name. */ 1471 printf(", %s", sc->sk_name); 1472 if (revstr != NULL) 1473 printf(" rev. %s", revstr); 1474 printf(" (0x%x): %s\n", sc->sk_rev, intrstr); 1475 1476 sc->sk_macs = 1; 1477 1478 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1479 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1480 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1481 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1482 sc->sk_macs++; 1483 } 1484 1485 skca.skc_port = SK_PORT_A; 1486 skca.skc_type = sc->sk_type; 1487 skca.skc_rev = sc->sk_rev; 1488 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1489 1490 if (sc->sk_macs > 1) { 1491 skca.skc_port = SK_PORT_B; 1492 skca.skc_type = sc->sk_type; 1493 skca.skc_rev = sc->sk_rev; 1494 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1495 } 1496 1497 /* Turn on the 'driver is loaded' LED. */ 1498 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1499 1500 return; 1501 1502 fail_4: 1503 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1504 MSK_STATUS_RING_CNT * sizeof(uint64_t)); 1505 fail_3: 1506 bus_dmamem_free(sc->sc_dmatag, 1507 &sc->sk_status_seg, sc->sk_status_nseg); 1508 sc->sk_status_nseg = 0; 1509 fail_5: 1510 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1511 fail_2: 1512 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1513 sc->sk_intrhand = NULL; 1514 fail_1: 1515 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1516 sc->sk_bsize = 0; 1517 } 1518 1519 int 1520 mskc_detach(struct device *self, int flags) 1521 { 1522 struct sk_softc *sc = (struct sk_softc *)self; 1523 int rv; 1524 1525 if (sc->sk_intrhand) 1526 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1527 1528 rv = config_detach_children(self, flags); 1529 if (rv != 0) 1530 return (rv); 1531 1532 if (sc->sk_status_nseg > 0) { 1533 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1534 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1535 MSK_STATUS_RING_CNT * sizeof(uint64_t)); 1536 bus_dmamem_free(sc->sc_dmatag, 1537 &sc->sk_status_seg, sc->sk_status_nseg); 1538 } 1539 1540 if (sc->sk_bsize > 0) 1541 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1542 1543 return(0); 1544 } 1545 1546 int 1547 mskc_activate(struct device *self, int act) 1548 { 1549 struct sk_softc *sc = (void *)self; 1550 int rv = 0; 1551 1552 switch (act) { 1553 case DVACT_RESUME: 1554 mskc_reset(sc); 1555 rv = config_activate_children(self, act); 1556 break; 1557 default: 1558 rv = config_activate_children(self, act); 1559 break; 1560 } 1561 return (rv); 1562 } 1563 1564 static unsigned int 1565 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m, uint32_t prod) 1566 { 1567 struct sk_softc *sc = sc_if->sk_softc; 1568 struct msk_ring_data *rd = sc_if->sk_rdata; 1569 struct msk_tx_desc *t; 1570 bus_dmamap_t map; 1571 uint64_t addr; 1572 uint32_t hiaddr; 1573 uint32_t next, last; 1574 uint8_t opcode; 1575 unsigned int entries = 0; 1576 int i; 1577 1578 map = sc_if->sk_cdata.sk_tx_maps[prod]; 1579 1580 switch (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1581 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) { 1582 case 0: 1583 break; 1584 case EFBIG: /* mbuf chain is too fragmented */ 1585 if (m_defrag(m, M_DONTWAIT) == 0 && 1586 bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1587 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0) 1588 break; 1589 /* FALLTHROUGH */ 1590 default: 1591 return (0); 1592 } 1593 1594 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1595 BUS_DMASYNC_PREWRITE); 1596 1597 opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_PACKET; 1598 next = prod; 1599 for (i = 0; i < map->dm_nsegs; i++) { 1600 /* high 32 bits of address */ 1601 addr = map->dm_segs[i].ds_addr; 1602 hiaddr = addr >> 32; 1603 if (sc_if->sk_cdata.sk_tx_hiaddr != hiaddr) { 1604 t = &rd->sk_tx_ring[next]; 1605 htolem32(&t->sk_addr, hiaddr); 1606 t->sk_opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_ADDR64; 1607 1608 sc_if->sk_cdata.sk_tx_hiaddr = hiaddr; 1609 1610 SK_INC(next, MSK_TX_RING_CNT); 1611 entries++; 1612 } 1613 1614 /* low 32 bits of address + length */ 1615 t = &rd->sk_tx_ring[next]; 1616 htolem32(&t->sk_addr, addr); 1617 htolem16(&t->sk_len, map->dm_segs[i].ds_len); 1618 t->sk_ctl = 0; 1619 t->sk_opcode = opcode; 1620 1621 last = next; 1622 SK_INC(next, MSK_TX_RING_CNT); 1623 entries++; 1624 1625 opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_BUFFER; 1626 } 1627 t->sk_ctl = SK_Y2_TXCTL_LASTFRAG; 1628 1629 sc_if->sk_cdata.sk_tx_maps[prod] = sc_if->sk_cdata.sk_tx_maps[last]; 1630 sc_if->sk_cdata.sk_tx_maps[last] = map; 1631 sc_if->sk_cdata.sk_tx_mbuf[last] = m; 1632 1633 return (entries); 1634 } 1635 1636 void 1637 msk_start(struct ifnet *ifp) 1638 { 1639 struct sk_if_softc *sc_if = ifp->if_softc; 1640 struct mbuf *m = NULL; 1641 uint32_t prod, free, used; 1642 int post = 0; 1643 1644 prod = sc_if->sk_cdata.sk_tx_prod; 1645 free = sc_if->sk_cdata.sk_tx_cons; 1646 if (free <= prod) 1647 free += MSK_TX_RING_CNT; 1648 free -= prod; 1649 1650 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_POSTWRITE); 1651 1652 for (;;) { 1653 if (free <= SK_NTXSEG * 2) { 1654 ifq_set_oactive(&ifp->if_snd); 1655 break; 1656 } 1657 1658 m = ifq_dequeue(&ifp->if_snd); 1659 if (m == NULL) 1660 break; 1661 1662 used = msk_encap(sc_if, m, prod); 1663 if (used == 0) { 1664 m_freem(m); 1665 continue; 1666 } 1667 1668 free -= used; 1669 prod += used; 1670 prod &= MSK_TX_RING_CNT - 1; 1671 1672 #if NBPFILTER > 0 1673 if (ifp->if_bpf) 1674 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1675 #endif 1676 post = 1; 1677 } 1678 1679 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE); 1680 1681 if (post == 0) 1682 return; 1683 1684 /* Transmit */ 1685 sc_if->sk_cdata.sk_tx_prod = prod; 1686 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, prod); 1687 1688 /* Set a timeout in case the chip goes out to lunch. */ 1689 ifp->if_timer = MSK_TX_TIMEOUT; 1690 } 1691 1692 void 1693 msk_watchdog(struct ifnet *ifp) 1694 { 1695 struct sk_if_softc *sc_if = ifp->if_softc; 1696 1697 if (sc_if->sk_cdata.sk_tx_prod != sc_if->sk_cdata.sk_tx_cons) { 1698 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname); 1699 1700 ifp->if_oerrors++; 1701 1702 /* XXX Resets both ports; we shouldn't do that. */ 1703 mskc_reset(sc_if->sk_softc); 1704 msk_reset(sc_if); 1705 msk_init(sc_if); 1706 } 1707 } 1708 1709 static inline int 1710 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len) 1711 { 1712 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 1713 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 1714 YU_RXSTAT_JABBER)) != 0 || 1715 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 1716 YU_RXSTAT_BYTES(stat) != len) 1717 return (0); 1718 1719 return (1); 1720 } 1721 1722 void 1723 msk_rxeof(struct sk_if_softc *sc_if, struct mbuf_list *ml, 1724 uint16_t len, uint32_t rxstat) 1725 { 1726 struct sk_softc *sc = sc_if->sk_softc; 1727 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1728 struct mbuf *m = NULL; 1729 int prod, cons, tail; 1730 bus_dmamap_t map; 1731 1732 prod = sc_if->sk_cdata.sk_rx_prod; 1733 cons = sc_if->sk_cdata.sk_rx_cons; 1734 1735 while (cons != prod) { 1736 tail = cons; 1737 SK_INC(cons, MSK_RX_RING_CNT); 1738 1739 m = sc_if->sk_cdata.sk_rx_mbuf[tail]; 1740 if (m != NULL) { 1741 /* found it */ 1742 break; 1743 } 1744 } 1745 sc_if->sk_cdata.sk_rx_cons = cons; 1746 1747 if (m == NULL) { 1748 /* maybe if ADDR64 is consumed? */ 1749 return; 1750 } 1751 1752 sc_if->sk_cdata.sk_rx_mbuf[tail] = NULL; 1753 1754 map = sc_if->sk_cdata.sk_rx_maps[tail]; 1755 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, 1); 1756 1757 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0, map->dm_mapsize, 1758 BUS_DMASYNC_POSTREAD); 1759 bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, map); 1760 1761 if (len < SK_MIN_FRAMELEN || len > SK_JUMBO_FRAMELEN || 1762 msk_rxvalid(sc, rxstat, len) == 0) { 1763 ifp->if_ierrors++; 1764 m_freem(m); 1765 return; 1766 } 1767 1768 m->m_pkthdr.len = m->m_len = len; 1769 1770 ml_enqueue(ml, m); 1771 } 1772 1773 void 1774 msk_txeof(struct sk_if_softc *sc_if, unsigned int prod) 1775 { 1776 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1777 struct sk_softc *sc = sc_if->sk_softc; 1778 uint32_t cons; 1779 struct mbuf *m; 1780 bus_dmamap_t map; 1781 1782 /* 1783 * Go through our tx ring and free mbufs for those 1784 * frames that have been sent. 1785 */ 1786 cons = sc_if->sk_cdata.sk_tx_cons; 1787 1788 if (cons == prod) 1789 return; 1790 1791 while (cons != prod) { 1792 m = sc_if->sk_cdata.sk_tx_mbuf[cons]; 1793 if (m != NULL) { 1794 sc_if->sk_cdata.sk_tx_mbuf[cons] = NULL; 1795 1796 map = sc_if->sk_cdata.sk_tx_maps[cons]; 1797 bus_dmamap_sync(sc->sc_dmatag, map, 0, 1798 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1799 bus_dmamap_unload(sc->sc_dmatag, map); 1800 1801 m_freem(m); 1802 } 1803 1804 SK_INC(cons, MSK_TX_RING_CNT); 1805 } 1806 if (cons == sc_if->sk_cdata.sk_tx_prod) 1807 ifp->if_timer = 0; 1808 1809 sc_if->sk_cdata.sk_tx_cons = cons; 1810 1811 if (ifq_is_oactive(&ifp->if_snd)) 1812 ifq_restart(&ifp->if_snd); 1813 } 1814 1815 void 1816 msk_fill_rx_ring(struct sk_if_softc *sc_if) 1817 { 1818 u_int slots, used; 1819 1820 slots = if_rxr_get(&sc_if->sk_cdata.sk_rx_ring, MSK_RX_RING_CNT/2); 1821 1822 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_POSTWRITE); /* XXX */ 1823 while (slots > 0) { 1824 used = msk_newbuf(sc_if); 1825 if (used == 0) 1826 break; 1827 1828 slots -= used; 1829 } 1830 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_PREWRITE); /* XXX */ 1831 1832 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, slots); 1833 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0) 1834 timeout_add(&sc_if->sk_tick_rx, 1); 1835 } 1836 1837 void 1838 msk_fill_rx_tick(void *xsc_if) 1839 { 1840 struct sk_if_softc *sc_if = xsc_if; 1841 int s; 1842 1843 s = splnet(); 1844 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0) { 1845 msk_fill_rx_ring(sc_if); 1846 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1847 sc_if->sk_cdata.sk_rx_prod); 1848 } 1849 splx(s); 1850 } 1851 1852 void 1853 msk_tick(void *xsc_if) 1854 { 1855 struct sk_if_softc *sc_if = xsc_if; 1856 struct mii_data *mii = &sc_if->sk_mii; 1857 int s; 1858 1859 s = splnet(); 1860 mii_tick(mii); 1861 splx(s); 1862 timeout_add_sec(&sc_if->sk_tick_ch, 1); 1863 } 1864 1865 void 1866 msk_intr_yukon(struct sk_if_softc *sc_if) 1867 { 1868 u_int8_t status; 1869 1870 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 1871 /* RX overrun */ 1872 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 1873 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 1874 SK_RFCTL_RX_FIFO_OVER); 1875 } 1876 /* TX underrun */ 1877 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 1878 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, 1879 SK_TFCTL_TX_FIFO_UNDER); 1880 } 1881 1882 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status)); 1883 } 1884 1885 int 1886 msk_intr(void *xsc) 1887 { 1888 struct sk_softc *sc = xsc; 1889 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 1890 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B]; 1891 struct mbuf_list ml[2] = { 1892 MBUF_LIST_INITIALIZER(), 1893 MBUF_LIST_INITIALIZER(), 1894 }; 1895 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 1896 int claimed = 0; 1897 u_int32_t status; 1898 uint64_t *ring = sc->sk_status_ring; 1899 uint64_t desc; 1900 1901 status = CSR_READ_4(sc, SK_Y2_ISSR2); 1902 if (status == 0xffffffff) 1903 return (0); 1904 if (status == 0) { 1905 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1906 return (0); 1907 } 1908 1909 status = CSR_READ_4(sc, SK_ISR); 1910 1911 if (sc_if0 != NULL) 1912 ifp0 = &sc_if0->arpcom.ac_if; 1913 if (sc_if1 != NULL) 1914 ifp1 = &sc_if1->arpcom.ac_if; 1915 1916 if (sc_if0 && (status & SK_Y2_IMR_MAC1) && 1917 (ifp0->if_flags & IFF_RUNNING)) { 1918 msk_intr_yukon(sc_if0); 1919 } 1920 1921 if (sc_if1 && (status & SK_Y2_IMR_MAC2) && 1922 (ifp1->if_flags & IFF_RUNNING)) { 1923 msk_intr_yukon(sc_if1); 1924 } 1925 1926 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1927 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1928 1929 while (MSK_STATUS_OWN(desc = lemtoh64(&ring[sc->sk_status_idx]))) { 1930 unsigned int opcode, port; 1931 1932 ring[sc->sk_status_idx] = htole64(0); /* clear ownership */ 1933 1934 opcode = MSK_STATUS_OPCODE(desc); 1935 switch (opcode) { 1936 case MSK_STATUS_OPCODE_RXSTAT: 1937 port = MSK_STATUS_RXSTAT_PORT(desc); 1938 msk_rxeof(sc->sk_if[port], &ml[port], 1939 MSK_STATUS_RXSTAT_LEN(desc), 1940 MSK_STATUS_RXSTAT_STATUS(desc)); 1941 break; 1942 case SK_Y2_STOPC_TXSTAT: 1943 if (sc_if0) { 1944 msk_txeof(sc_if0, 1945 MSK_STATUS_TXIDX_PORTA(desc)); 1946 } 1947 if (sc_if1) { 1948 msk_txeof(sc_if1, 1949 MSK_STATUS_TXIDX_PORTB(desc)); 1950 } 1951 break; 1952 default: 1953 printf("opcode=0x%x\n", opcode); 1954 break; 1955 } 1956 1957 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT); 1958 } 1959 1960 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1961 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1962 1963 if (status & SK_Y2_IMR_BMU) { 1964 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR); 1965 claimed = 1; 1966 } 1967 1968 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1969 1970 if (!ml_empty(&ml[0])) { 1971 if (ifiq_input(&ifp0->if_rcv, &ml[0])) 1972 if_rxr_livelocked(&sc_if0->sk_cdata.sk_rx_ring); 1973 msk_fill_rx_ring(sc_if0); 1974 SK_IF_WRITE_2(sc_if0, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1975 sc_if0->sk_cdata.sk_rx_prod); 1976 } 1977 if (!ml_empty(&ml[1])) { 1978 if (ifiq_input(&ifp1->if_rcv, &ml[1])) 1979 if_rxr_livelocked(&sc_if1->sk_cdata.sk_rx_ring); 1980 msk_fill_rx_ring(sc_if1); 1981 SK_IF_WRITE_2(sc_if1, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1982 sc_if1->sk_cdata.sk_rx_prod); 1983 } 1984 1985 return (claimed); 1986 } 1987 1988 void 1989 msk_init_yukon(struct sk_if_softc *sc_if) 1990 { 1991 u_int32_t v; 1992 u_int16_t reg; 1993 struct sk_softc *sc; 1994 int i; 1995 1996 sc = sc_if->sk_softc; 1997 1998 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n", 1999 CSR_READ_4(sc_if->sk_softc, SK_CSR))); 2000 2001 DPRINTFN(6, ("msk_init_yukon: 1\n")); 2002 2003 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n", 2004 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL))); 2005 2006 DPRINTFN(6, ("msk_init_yukon: 3\n")); 2007 2008 /* unused read of the interrupt source register */ 2009 DPRINTFN(6, ("msk_init_yukon: 4\n")); 2010 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2011 2012 DPRINTFN(6, ("msk_init_yukon: 4a\n")); 2013 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2014 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 2015 2016 /* MIB Counter Clear Mode set */ 2017 reg |= YU_PAR_MIB_CLR; 2018 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 2019 DPRINTFN(6, ("msk_init_yukon: 4b\n")); 2020 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2021 2022 /* MIB Counter Clear Mode clear */ 2023 DPRINTFN(6, ("msk_init_yukon: 5\n")); 2024 reg &= ~YU_PAR_MIB_CLR; 2025 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2026 2027 /* receive control reg */ 2028 DPRINTFN(6, ("msk_init_yukon: 7\n")); 2029 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2030 2031 /* transmit parameter register */ 2032 DPRINTFN(6, ("msk_init_yukon: 8\n")); 2033 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2034 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2035 2036 /* serial mode register */ 2037 DPRINTFN(6, ("msk_init_yukon: 9\n")); 2038 reg = YU_SMR_DATA_BLIND(0x1c) | 2039 YU_SMR_MFL_VLAN | 2040 YU_SMR_IPG_DATA(0x1e); 2041 2042 if (sc->sk_type != SK_YUKON_FE && 2043 sc->sk_type != SK_YUKON_FE_P) 2044 reg |= YU_SMR_MFL_JUMBO; 2045 2046 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 2047 2048 DPRINTFN(6, ("msk_init_yukon: 10\n")); 2049 /* Setup Yukon's address */ 2050 for (i = 0; i < 3; i++) { 2051 /* Write Source Address 1 (unicast filter) */ 2052 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2053 sc_if->arpcom.ac_enaddr[i * 2] | 2054 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2055 } 2056 2057 for (i = 0; i < 3; i++) { 2058 reg = sk_win_read_2(sc_if->sk_softc, 2059 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2060 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2061 } 2062 2063 /* Program promiscuous mode and multicast filters */ 2064 DPRINTFN(6, ("msk_init_yukon: 11\n")); 2065 msk_iff(sc_if); 2066 2067 /* enable interrupt mask for counter overflows */ 2068 DPRINTFN(6, ("msk_init_yukon: 12\n")); 2069 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2070 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2071 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2072 2073 /* Configure RX MAC FIFO Flush Mask */ 2074 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 2075 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 2076 YU_RXSTAT_JABBER; 2077 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 2078 2079 /* Configure RX MAC FIFO */ 2080 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2081 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON | 2082 SK_RFCTL_FIFO_FLUSH_ON); 2083 2084 /* Increase flush threshold to 64 bytes */ 2085 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 2086 SK_RFCTL_FIFO_THRESHOLD + 1); 2087 2088 /* Configure TX MAC FIFO */ 2089 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2090 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2091 2092 #if 1 2093 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN); 2094 #endif 2095 DPRINTFN(6, ("msk_init_yukon: end\n")); 2096 } 2097 2098 /* 2099 * Note that to properly initialize any part of the GEnesis chip, 2100 * you first have to take it out of reset mode. 2101 */ 2102 void 2103 msk_init(void *xsc_if) 2104 { 2105 struct sk_if_softc *sc_if = xsc_if; 2106 struct sk_softc *sc = sc_if->sk_softc; 2107 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2108 struct mii_data *mii = &sc_if->sk_mii; 2109 int s; 2110 2111 DPRINTFN(2, ("msk_init\n")); 2112 2113 s = splnet(); 2114 2115 /* Cancel pending I/O and free all RX/TX buffers. */ 2116 msk_stop(sc_if, 0); 2117 2118 /* Configure I2C registers */ 2119 2120 /* Configure XMAC(s) */ 2121 msk_init_yukon(sc_if); 2122 mii_mediachg(mii); 2123 2124 /* Configure transmit arbiter(s) */ 2125 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON); 2126 #if 0 2127 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2128 #endif 2129 2130 /* Configure RAMbuffers */ 2131 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2132 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2133 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2134 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2135 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2136 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2137 2138 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET); 2139 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON); 2140 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart); 2141 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart); 2142 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart); 2143 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend); 2144 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON); 2145 2146 /* Configure BMUs */ 2147 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016); 2148 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28); 2149 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080); 2150 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600); 2151 2152 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016); 2153 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28); 2154 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080); 2155 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600); 2156 2157 /* Make sure the sync transmit queue is disabled. */ 2158 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET); 2159 2160 /* Init descriptors */ 2161 if (msk_init_rx_ring(sc_if) == ENOBUFS) { 2162 printf("%s: initialization failed: no " 2163 "memory for rx buffers\n", sc_if->sk_dev.dv_xname); 2164 msk_stop(sc_if, 0); 2165 splx(s); 2166 return; 2167 } 2168 2169 if (msk_init_tx_ring(sc_if) == ENOBUFS) { 2170 printf("%s: initialization failed: no " 2171 "memory for tx buffers\n", sc_if->sk_dev.dv_xname); 2172 msk_stop(sc_if, 0); 2173 splx(s); 2174 return; 2175 } 2176 2177 /* Initialize prefetch engine. */ 2178 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2179 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002); 2180 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1); 2181 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO, 2182 MSK_RX_RING_ADDR(sc_if, 0)); 2183 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI, 2184 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32); 2185 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008); 2186 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR); 2187 2188 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2189 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002); 2190 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1); 2191 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO, 2192 MSK_TX_RING_ADDR(sc_if, 0)); 2193 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI, 2194 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32); 2195 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008); 2196 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR); 2197 2198 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 2199 sc_if->sk_cdata.sk_rx_prod); 2200 2201 /* 2202 * tell the chip the tx ring is empty for now. the first 2203 * msk_start will end up posting the ADDR64 tx descriptor 2204 * that resets the high address. 2205 */ 2206 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, 0); 2207 2208 /* Configure interrupt handling */ 2209 if (sc_if->sk_port == SK_PORT_A) 2210 sc->sk_intrmask |= SK_Y2_INTRS1; 2211 else 2212 sc->sk_intrmask |= SK_Y2_INTRS2; 2213 sc->sk_intrmask |= SK_Y2_IMR_BMU; 2214 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2215 2216 ifp->if_flags |= IFF_RUNNING; 2217 ifq_clr_oactive(&ifp->if_snd); 2218 2219 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2220 2221 splx(s); 2222 } 2223 2224 void 2225 msk_stop(struct sk_if_softc *sc_if, int softonly) 2226 { 2227 struct sk_softc *sc = sc_if->sk_softc; 2228 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2229 struct mbuf *m; 2230 bus_dmamap_t map; 2231 int i; 2232 2233 DPRINTFN(2, ("msk_stop\n")); 2234 2235 timeout_del(&sc_if->sk_tick_ch); 2236 timeout_del(&sc_if->sk_tick_rx); 2237 2238 ifp->if_flags &= ~IFF_RUNNING; 2239 ifq_clr_oactive(&ifp->if_snd); 2240 2241 /* Stop transfer of Tx descriptors */ 2242 2243 /* Stop transfer of Rx descriptors */ 2244 2245 if (!softonly) { 2246 /* Turn off various components of this interface. */ 2247 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2248 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2249 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2250 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2251 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE); 2252 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2253 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2254 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2255 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP); 2256 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2257 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2258 2259 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2260 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2261 2262 /* Disable interrupts */ 2263 if (sc_if->sk_port == SK_PORT_A) 2264 sc->sk_intrmask &= ~SK_Y2_INTRS1; 2265 else 2266 sc->sk_intrmask &= ~SK_Y2_INTRS2; 2267 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2268 } 2269 2270 /* Free RX and TX mbufs still in the queues. */ 2271 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2272 m = sc_if->sk_cdata.sk_rx_mbuf[i]; 2273 if (m == NULL) 2274 continue; 2275 2276 map = sc_if->sk_cdata.sk_rx_maps[i]; 2277 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 2278 BUS_DMASYNC_POSTREAD); 2279 bus_dmamap_unload(sc->sc_dmatag, map); 2280 2281 m_freem(m); 2282 2283 sc_if->sk_cdata.sk_rx_mbuf[i] = NULL; 2284 } 2285 2286 sc_if->sk_cdata.sk_rx_prod = 0; 2287 sc_if->sk_cdata.sk_rx_cons = 0; 2288 2289 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2290 m = sc_if->sk_cdata.sk_tx_mbuf[i]; 2291 if (m == NULL) 2292 continue; 2293 2294 map = sc_if->sk_cdata.sk_tx_maps[i]; 2295 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 2296 BUS_DMASYNC_POSTREAD); 2297 bus_dmamap_unload(sc->sc_dmatag, map); 2298 2299 m_freem(m); 2300 2301 sc_if->sk_cdata.sk_tx_mbuf[i] = NULL; 2302 } 2303 } 2304 2305 const struct cfattach mskc_ca = { 2306 sizeof(struct sk_softc), mskc_probe, mskc_attach, mskc_detach, 2307 mskc_activate 2308 }; 2309 2310 struct cfdriver mskc_cd = { 2311 NULL, "mskc", DV_DULL 2312 }; 2313 2314 const struct cfattach msk_ca = { 2315 sizeof(struct sk_if_softc), msk_probe, msk_attach, msk_detach, 2316 msk_activate 2317 }; 2318 2319 struct cfdriver msk_cd = { 2320 NULL, "msk", DV_IFNET 2321 }; 2322 2323 #if NKSTAT > 0 2324 static uint32_t 2325 msk_mib_read32(struct sk_if_softc *sc_if, uint32_t r) 2326 { 2327 uint16_t hi, lo, xx; 2328 2329 hi = SK_YU_READ_2(sc_if, r + 4); 2330 for (;;) { 2331 /* XXX barriers? */ 2332 lo = SK_YU_READ_2(sc_if, r); 2333 xx = SK_YU_READ_2(sc_if, r + 4); 2334 2335 if (hi == xx) 2336 break; 2337 2338 hi = xx; 2339 } 2340 2341 return (((uint32_t)hi << 16) | (uint32_t) lo); 2342 } 2343 2344 static uint64_t 2345 msk_mib_read64(struct sk_if_softc *sc_if, uint32_t r) 2346 { 2347 uint32_t hi, lo, xx; 2348 2349 hi = msk_mib_read32(sc_if, r + 8); 2350 for (;;) { 2351 lo = msk_mib_read32(sc_if, r); 2352 xx = msk_mib_read32(sc_if, r + 8); 2353 2354 if (hi == xx) 2355 break; 2356 2357 hi = xx; 2358 } 2359 2360 return (((uint64_t)hi << 32) | (uint64_t)lo); 2361 } 2362 2363 void 2364 msk_kstat_attach(struct sk_if_softc *sc_if) 2365 { 2366 struct kstat *ks; 2367 struct kstat_kv *kvs; 2368 struct msk_kstat *mks; 2369 size_t i; 2370 2371 ks = kstat_create(sc_if->sk_dev.dv_xname, 0, "msk-mib", 0, 2372 KSTAT_T_KV, 0); 2373 if (ks == NULL) { 2374 /* oh well */ 2375 return; 2376 } 2377 2378 mks = malloc(sizeof(*mks), M_DEVBUF, M_WAITOK); 2379 rw_init(&mks->lock, "mskstat"); 2380 mks->ks = ks; 2381 2382 kvs = mallocarray(nitems(msk_mib), sizeof(*kvs), 2383 M_DEVBUF, M_WAITOK|M_ZERO); 2384 for (i = 0; i < nitems(msk_mib); i++) { 2385 const struct msk_mib *m = &msk_mib[i]; 2386 kstat_kv_unit_init(&kvs[i], m->name, m->type, m->unit); 2387 } 2388 2389 ks->ks_softc = sc_if; 2390 ks->ks_data = kvs; 2391 ks->ks_datalen = nitems(msk_mib) * sizeof(*kvs); 2392 ks->ks_read = msk_kstat_read; 2393 kstat_set_wlock(ks, &mks->lock); 2394 2395 kstat_install(ks); 2396 2397 sc_if->sk_kstat = mks; 2398 } 2399 2400 void 2401 msk_kstat_detach(struct sk_if_softc *sc_if) 2402 { 2403 struct msk_kstat *mks = sc_if->sk_kstat; 2404 struct kstat_kv *kvs; 2405 size_t kvslen; 2406 2407 if (mks == NULL) 2408 return; 2409 2410 sc_if->sk_kstat = NULL; 2411 2412 kvs = mks->ks->ks_data; 2413 kvslen = mks->ks->ks_datalen; 2414 2415 kstat_destroy(mks->ks); 2416 free(kvs, M_DEVBUF, kvslen); 2417 free(mks, M_DEVBUF, sizeof(*mks)); 2418 } 2419 2420 int 2421 msk_kstat_read(struct kstat *ks) 2422 { 2423 struct sk_if_softc *sc_if = ks->ks_softc; 2424 struct kstat_kv *kvs = ks->ks_data; 2425 size_t i; 2426 2427 nanouptime(&ks->ks_updated); 2428 2429 for (i = 0; i < nitems(msk_mib); i++) { 2430 const struct msk_mib *m = &msk_mib[i]; 2431 2432 switch (m->type) { 2433 case KSTAT_KV_T_COUNTER32: 2434 kstat_kv_u32(&kvs[i]) = msk_mib_read32(sc_if, m->reg); 2435 break; 2436 case KSTAT_KV_T_COUNTER64: 2437 kstat_kv_u64(&kvs[i]) = msk_mib_read64(sc_if, m->reg); 2438 break; 2439 default: 2440 panic("unexpected msk_mib type"); 2441 /* NOTREACHED */ 2442 } 2443 } 2444 2445 return (0); 2446 } 2447 #endif /* NKSTAT */ 2448 2449 #ifdef MSK_DEBUG 2450 void 2451 msk_dump_txdesc(struct msk_tx_desc *le, int idx) 2452 { 2453 #define DESC_PRINT(X) \ 2454 if (X) \ 2455 printf("txdesc[%d]." #X "=%#x\n", \ 2456 idx, X); 2457 2458 DESC_PRINT(letoh32(le->sk_addr)); 2459 DESC_PRINT(letoh16(le->sk_len)); 2460 DESC_PRINT(le->sk_ctl); 2461 DESC_PRINT(le->sk_opcode); 2462 #undef DESC_PRINT 2463 } 2464 2465 void 2466 msk_dump_bytes(const char *data, int len) 2467 { 2468 int c, i, j; 2469 2470 for (i = 0; i < len; i += 16) { 2471 printf("%08x ", i); 2472 c = len - i; 2473 if (c > 16) c = 16; 2474 2475 for (j = 0; j < c; j++) { 2476 printf("%02x ", data[i + j] & 0xff); 2477 if ((j & 0xf) == 7 && j > 0) 2478 printf(" "); 2479 } 2480 2481 for (; j < 16; j++) 2482 printf(" "); 2483 printf(" "); 2484 2485 for (j = 0; j < c; j++) { 2486 int ch = data[i + j] & 0xff; 2487 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' '); 2488 } 2489 2490 printf("\n"); 2491 2492 if (c < 16) 2493 break; 2494 } 2495 } 2496 2497 void 2498 msk_dump_mbuf(struct mbuf *m) 2499 { 2500 int count = m->m_pkthdr.len; 2501 2502 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len); 2503 2504 while (count > 0 && m) { 2505 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n", 2506 m, m->m_data, m->m_len); 2507 msk_dump_bytes(mtod(m, char *), m->m_len); 2508 2509 count -= m->m_len; 2510 m = m->m_next; 2511 } 2512 } 2513 #endif 2514