1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 /* $DragonFly: src/sys/dev/netif/msk/if_msk.c,v 1.1 2007/12/26 14:02:36 sephe Exp $ */ 97 98 /* 99 * Device driver for the Marvell Yukon II Ethernet controller. 100 * Due to lack of documentation, this driver is based on the code from 101 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 102 */ 103 104 #include <sys/param.h> 105 #include <sys/endian.h> 106 #include <sys/kernel.h> 107 #include <sys/bus.h> 108 #include <sys/in_cksum.h> 109 #include <sys/malloc.h> 110 #include <sys/proc.h> 111 #include <sys/rman.h> 112 #include <sys/serialize.h> 113 #include <sys/socket.h> 114 #include <sys/sockio.h> 115 #include <sys/sysctl.h> 116 117 #include <net/ethernet.h> 118 #include <net/if.h> 119 #include <net/bpf.h> 120 #include <net/if_arp.h> 121 #include <net/if_dl.h> 122 #include <net/if_media.h> 123 #include <net/ifq_var.h> 124 #include <net/vlan/if_vlan_var.h> 125 126 #include <netinet/ip.h> 127 #include <netinet/ip_var.h> 128 129 #include <dev/netif/mii_layer/miivar.h> 130 131 #include <bus/pci/pcireg.h> 132 #include <bus/pci/pcivar.h> 133 134 #include "if_mskreg.h" 135 136 /* "device miibus" required. See GENERIC if you get errors here. */ 137 #include "miibus_if.h" 138 139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 140 141 /* 142 * Devices supported by this driver. 143 */ 144 static const struct msk_product { 145 uint16_t msk_vendorid; 146 uint16_t msk_deviceid; 147 const char *msk_name; 148 } msk_products[] = { 149 { VENDORID_SK, DEVICEID_SK_YUKON2, 150 "SK-9Sxx Gigabit Ethernet" }, 151 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 152 "SK-9Exx Gigabit Ethernet"}, 153 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 154 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 155 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 157 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 158 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 159 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 161 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 162 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 163 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 165 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 166 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 167 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 169 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 170 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 171 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 172 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 173 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 174 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 176 "Marvell Yukon 88E8039 Gigabit Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 178 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 180 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 182 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 184 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 186 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 188 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 189 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 190 "D-Link 550SX Gigabit Ethernet" }, 191 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 192 "D-Link 560T Gigabit Ethernet" }, 193 { 0, 0, NULL } 194 }; 195 196 static const char *model_name[] = { 197 "Yukon XL", 198 "Yukon EC Ultra", 199 "Yukon Unknown", 200 "Yukon EC", 201 "Yukon FE" 202 }; 203 204 static int mskc_probe(device_t); 205 static int mskc_attach(device_t); 206 static int mskc_detach(device_t); 207 static int mskc_shutdown(device_t); 208 static int mskc_suspend(device_t); 209 static int mskc_resume(device_t); 210 static void mskc_intr(void *); 211 212 static void mskc_reset(struct msk_softc *); 213 static void mskc_intr_hwerr(struct msk_softc *); 214 static int mskc_handle_events(struct msk_softc *); 215 static void mskc_phy_power(struct msk_softc *, int); 216 static int mskc_setup_rambuffer(struct msk_softc *); 217 static int mskc_status_dma_alloc(struct msk_softc *); 218 static void mskc_status_dma_free(struct msk_softc *); 219 220 static int msk_probe(device_t); 221 static int msk_attach(device_t); 222 static int msk_detach(device_t); 223 static int msk_miibus_readreg(device_t, int, int); 224 static int msk_miibus_writereg(device_t, int, int, int); 225 static void msk_miibus_statchg(device_t); 226 227 static void msk_init(void *); 228 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 229 static void msk_start(struct ifnet *); 230 static void msk_watchdog(struct ifnet *); 231 static int msk_mediachange(struct ifnet *); 232 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 233 234 static void msk_tick(void *); 235 static void msk_intr_phy(struct msk_if_softc *); 236 static void msk_intr_gmac(struct msk_if_softc *); 237 static __inline void 238 msk_rxput(struct msk_if_softc *); 239 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 240 static void msk_rxeof(struct msk_if_softc *, uint32_t, int); 241 static void msk_txeof(struct msk_if_softc *, int); 242 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 243 static void msk_set_rambuffer(struct msk_if_softc *); 244 static void msk_stop(struct msk_if_softc *); 245 246 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 247 static void msk_dmamap_mbuf_cb(void *, bus_dma_segment_t *, int, 248 bus_size_t, int); 249 static int msk_txrx_dma_alloc(struct msk_if_softc *); 250 static void msk_txrx_dma_free(struct msk_if_softc *); 251 static int msk_init_rx_ring(struct msk_if_softc *); 252 static void msk_init_tx_ring(struct msk_if_softc *); 253 static __inline void 254 msk_discard_rxbuf(struct msk_if_softc *, int); 255 static int msk_newbuf(struct msk_if_softc *, int); 256 static struct mbuf * 257 msk_defrag(struct mbuf *, int, int); 258 static int msk_encap(struct msk_if_softc *, struct mbuf **); 259 260 #ifdef MSK_JUMBO 261 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 262 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 263 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 264 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 265 static void *msk_jalloc(struct msk_if_softc *); 266 static void msk_jfree(void *, void *); 267 #endif 268 269 static int msk_phy_readreg(struct msk_if_softc *, int, int); 270 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 271 272 static void msk_setmulti(struct msk_if_softc *); 273 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 274 static void msk_setpromisc(struct msk_if_softc *); 275 276 #ifdef notyet 277 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 278 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); 279 #endif 280 281 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 282 void **, bus_addr_t *, bus_dmamap_t *); 283 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 284 285 static device_method_t mskc_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, mskc_probe), 288 DEVMETHOD(device_attach, mskc_attach), 289 DEVMETHOD(device_detach, mskc_detach), 290 DEVMETHOD(device_suspend, mskc_suspend), 291 DEVMETHOD(device_resume, mskc_resume), 292 DEVMETHOD(device_shutdown, mskc_shutdown), 293 294 /* bus interface */ 295 DEVMETHOD(bus_print_child, bus_generic_print_child), 296 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 297 298 { NULL, NULL } 299 }; 300 301 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 302 static devclass_t mskc_devclass; 303 304 static device_method_t msk_methods[] = { 305 /* Device interface */ 306 DEVMETHOD(device_probe, msk_probe), 307 DEVMETHOD(device_attach, msk_attach), 308 DEVMETHOD(device_detach, msk_detach), 309 DEVMETHOD(device_shutdown, bus_generic_shutdown), 310 311 /* bus interface */ 312 DEVMETHOD(bus_print_child, bus_generic_print_child), 313 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 314 315 /* MII interface */ 316 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 317 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 318 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 319 320 { NULL, NULL } 321 }; 322 323 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 324 static devclass_t msk_devclass; 325 326 DECLARE_DUMMY_MODULE(if_msk); 327 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, 0, 0); 328 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, 0, 0); 329 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 330 331 static int 332 msk_miibus_readreg(device_t dev, int phy, int reg) 333 { 334 struct msk_if_softc *sc_if; 335 336 if (phy != PHY_ADDR_MARV) 337 return (0); 338 339 sc_if = device_get_softc(dev); 340 341 return (msk_phy_readreg(sc_if, phy, reg)); 342 } 343 344 static int 345 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 346 { 347 struct msk_softc *sc; 348 int i, val; 349 350 sc = sc_if->msk_softc; 351 352 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 353 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 354 355 for (i = 0; i < MSK_TIMEOUT; i++) { 356 DELAY(1); 357 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 358 if ((val & GM_SMI_CT_RD_VAL) != 0) { 359 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 360 break; 361 } 362 } 363 364 if (i == MSK_TIMEOUT) { 365 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 366 val = 0; 367 } 368 369 return (val); 370 } 371 372 static int 373 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 374 { 375 struct msk_if_softc *sc_if; 376 377 if (phy != PHY_ADDR_MARV) 378 return (0); 379 380 sc_if = device_get_softc(dev); 381 382 return (msk_phy_writereg(sc_if, phy, reg, val)); 383 } 384 385 static int 386 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 387 { 388 struct msk_softc *sc; 389 int i; 390 391 sc = sc_if->msk_softc; 392 393 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 394 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 395 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 396 for (i = 0; i < MSK_TIMEOUT; i++) { 397 DELAY(1); 398 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 399 GM_SMI_CT_BUSY) == 0) 400 break; 401 } 402 if (i == MSK_TIMEOUT) 403 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 404 405 return (0); 406 } 407 408 static void 409 msk_miibus_statchg(device_t dev) 410 { 411 struct msk_if_softc *sc_if; 412 struct msk_softc *sc; 413 struct mii_data *mii; 414 struct ifnet *ifp; 415 uint32_t gmac; 416 417 sc_if = device_get_softc(dev); 418 sc = sc_if->msk_softc; 419 420 mii = device_get_softc(sc_if->msk_miibus); 421 ifp = sc_if->msk_ifp; 422 423 if (mii->mii_media_status & IFM_ACTIVE) { 424 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 425 sc_if->msk_link = 1; 426 } else 427 sc_if->msk_link = 0; 428 429 if (sc_if->msk_link != 0) { 430 /* Enable Tx FIFO Underrun. */ 431 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 432 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 433 /* 434 * Because mii(4) notify msk(4) that it detected link status 435 * change, there is no need to enable automatic 436 * speed/flow-control/duplex updates. 437 */ 438 gmac = GM_GPCR_AU_ALL_DIS; 439 switch (IFM_SUBTYPE(mii->mii_media_active)) { 440 case IFM_1000_SX: 441 case IFM_1000_T: 442 gmac |= GM_GPCR_SPEED_1000; 443 break; 444 case IFM_100_TX: 445 gmac |= GM_GPCR_SPEED_100; 446 break; 447 case IFM_10_T: 448 break; 449 } 450 451 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 452 gmac |= GM_GPCR_DUP_FULL; 453 /* Disable Rx flow control. */ 454 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 455 gmac |= GM_GPCR_FC_RX_DIS; 456 /* Disable Tx flow control. */ 457 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 458 gmac |= GM_GPCR_FC_TX_DIS; 459 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 460 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 461 /* Read again to ensure writing. */ 462 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 463 464 gmac = GMC_PAUSE_ON; 465 if (((mii->mii_media_active & IFM_GMASK) & 466 (IFM_FLAG0 | IFM_FLAG1)) == 0) 467 gmac = GMC_PAUSE_OFF; 468 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 469 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 470 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 471 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 472 gmac = GMC_PAUSE_OFF; 473 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 474 475 /* Enable PHY interrupt for FIFO underrun/overflow. */ 476 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 477 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 478 } else { 479 /* 480 * Link state changed to down. 481 * Disable PHY interrupts. 482 */ 483 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 484 /* Disable Rx/Tx MAC. */ 485 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 486 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 487 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 488 /* Read again to ensure writing. */ 489 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 490 } 491 } 492 493 static void 494 msk_setmulti(struct msk_if_softc *sc_if) 495 { 496 struct msk_softc *sc; 497 struct ifnet *ifp; 498 struct ifmultiaddr *ifma; 499 uint32_t mchash[2]; 500 uint32_t crc; 501 uint16_t mode; 502 503 sc = sc_if->msk_softc; 504 ifp = sc_if->msk_ifp; 505 506 bzero(mchash, sizeof(mchash)); 507 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 508 mode |= GM_RXCR_UCF_ENA; 509 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 510 if ((ifp->if_flags & IFF_PROMISC) != 0) 511 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 512 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 513 mchash[0] = 0xffff; 514 mchash[1] = 0xffff; 515 } 516 } else { 517 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 518 if (ifma->ifma_addr->sa_family != AF_LINK) 519 continue; 520 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 521 ifma->ifma_addr), ETHER_ADDR_LEN); 522 /* Just want the 6 least significant bits. */ 523 crc &= 0x3f; 524 /* Set the corresponding bit in the hash table. */ 525 mchash[crc >> 5] |= 1 << (crc & 0x1f); 526 } 527 mode |= GM_RXCR_MCF_ENA; 528 } 529 530 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 531 mchash[0] & 0xffff); 532 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 533 (mchash[0] >> 16) & 0xffff); 534 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 535 mchash[1] & 0xffff); 536 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 537 (mchash[1] >> 16) & 0xffff); 538 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 539 } 540 541 static void 542 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 543 { 544 struct msk_softc *sc; 545 546 sc = sc_if->msk_softc; 547 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 548 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 549 RX_VLAN_STRIP_ON); 550 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 551 TX_VLAN_TAG_ON); 552 } else { 553 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 554 RX_VLAN_STRIP_OFF); 555 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 556 TX_VLAN_TAG_OFF); 557 } 558 } 559 560 static void 561 msk_setpromisc(struct msk_if_softc *sc_if) 562 { 563 struct msk_softc *sc; 564 struct ifnet *ifp; 565 uint16_t mode; 566 567 sc = sc_if->msk_softc; 568 ifp = sc_if->msk_ifp; 569 570 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 571 if (ifp->if_flags & IFF_PROMISC) 572 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 573 else 574 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 575 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 576 } 577 578 static int 579 msk_init_rx_ring(struct msk_if_softc *sc_if) 580 { 581 struct msk_ring_data *rd; 582 struct msk_rxdesc *rxd; 583 int i, prod; 584 585 sc_if->msk_cdata.msk_rx_cons = 0; 586 sc_if->msk_cdata.msk_rx_prod = 0; 587 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 588 589 rd = &sc_if->msk_rdata; 590 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 591 prod = sc_if->msk_cdata.msk_rx_prod; 592 for (i = 0; i < MSK_RX_RING_CNT; i++) { 593 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 594 rxd->rx_m = NULL; 595 rxd->rx_le = &rd->msk_rx_ring[prod]; 596 if (msk_newbuf(sc_if, prod) != 0) 597 return (ENOBUFS); 598 MSK_INC(prod, MSK_RX_RING_CNT); 599 } 600 601 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 602 sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREWRITE); 603 604 /* Update prefetch unit. */ 605 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 606 CSR_WRITE_2(sc_if->msk_softc, 607 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 608 sc_if->msk_cdata.msk_rx_prod); 609 610 return (0); 611 } 612 613 #ifdef MSK_JUMBO 614 static int 615 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 616 { 617 struct msk_ring_data *rd; 618 struct msk_rxdesc *rxd; 619 int i, prod; 620 621 MSK_IF_LOCK_ASSERT(sc_if); 622 623 sc_if->msk_cdata.msk_rx_cons = 0; 624 sc_if->msk_cdata.msk_rx_prod = 0; 625 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 626 627 rd = &sc_if->msk_rdata; 628 bzero(rd->msk_jumbo_rx_ring, 629 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 630 prod = sc_if->msk_cdata.msk_rx_prod; 631 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 632 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 633 rxd->rx_m = NULL; 634 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 635 if (msk_jumbo_newbuf(sc_if, prod) != 0) 636 return (ENOBUFS); 637 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 638 } 639 640 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 641 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 642 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 643 644 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 645 CSR_WRITE_2(sc_if->msk_softc, 646 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 647 sc_if->msk_cdata.msk_rx_prod); 648 649 return (0); 650 } 651 #endif 652 653 static void 654 msk_init_tx_ring(struct msk_if_softc *sc_if) 655 { 656 struct msk_ring_data *rd; 657 struct msk_txdesc *txd; 658 int i; 659 660 sc_if->msk_cdata.msk_tx_prod = 0; 661 sc_if->msk_cdata.msk_tx_cons = 0; 662 sc_if->msk_cdata.msk_tx_cnt = 0; 663 664 rd = &sc_if->msk_rdata; 665 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 666 for (i = 0; i < MSK_TX_RING_CNT; i++) { 667 txd = &sc_if->msk_cdata.msk_txdesc[i]; 668 txd->tx_m = NULL; 669 txd->tx_le = &rd->msk_tx_ring[i]; 670 } 671 672 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 673 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 674 } 675 676 static __inline void 677 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 678 { 679 struct msk_rx_desc *rx_le; 680 struct msk_rxdesc *rxd; 681 struct mbuf *m; 682 683 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 684 m = rxd->rx_m; 685 rx_le = rxd->rx_le; 686 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 687 } 688 689 #ifdef MSK_JUMBO 690 static __inline void 691 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 692 { 693 struct msk_rx_desc *rx_le; 694 struct msk_rxdesc *rxd; 695 struct mbuf *m; 696 697 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 698 m = rxd->rx_m; 699 rx_le = rxd->rx_le; 700 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 701 } 702 #endif 703 704 static int 705 msk_newbuf(struct msk_if_softc *sc_if, int idx) 706 { 707 struct msk_rx_desc *rx_le; 708 struct msk_rxdesc *rxd; 709 struct mbuf *m; 710 struct msk_dmamap_arg ctx; 711 bus_dma_segment_t seg; 712 bus_dmamap_t map; 713 714 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 715 if (m == NULL) 716 return (ENOBUFS); 717 718 m->m_len = m->m_pkthdr.len = MCLBYTES; 719 m_adj(m, ETHER_ALIGN); 720 721 bzero(&ctx, sizeof(ctx)); 722 ctx.nseg = 1; 723 ctx.segs = &seg; 724 if (bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_rx_tag, 725 sc_if->msk_cdata.msk_rx_sparemap, m, msk_dmamap_mbuf_cb, &ctx, 726 BUS_DMA_NOWAIT) != 0) { 727 m_freem(m); 728 return (ENOBUFS); 729 } 730 KASSERT(ctx.nseg == 1, 731 ("%s: %d segments returned!", __func__, ctx.nseg)); 732 733 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 734 if (rxd->rx_m != NULL) { 735 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 736 BUS_DMASYNC_POSTREAD); 737 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 738 } 739 map = rxd->rx_dmamap; 740 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 741 sc_if->msk_cdata.msk_rx_sparemap = map; 742 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 743 BUS_DMASYNC_PREREAD); 744 rxd->rx_m = m; 745 rx_le = rxd->rx_le; 746 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 747 rx_le->msk_control = 748 htole32(seg.ds_len | OP_PACKET | HW_OWNER); 749 750 return (0); 751 } 752 753 #ifdef MSK_JUMBO 754 static int 755 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 756 { 757 struct msk_rx_desc *rx_le; 758 struct msk_rxdesc *rxd; 759 struct mbuf *m; 760 bus_dma_segment_t segs[1]; 761 bus_dmamap_t map; 762 int nsegs; 763 void *buf; 764 765 MGETHDR(m, M_DONTWAIT, MT_DATA); 766 if (m == NULL) 767 return (ENOBUFS); 768 buf = msk_jalloc(sc_if); 769 if (buf == NULL) { 770 m_freem(m); 771 return (ENOBUFS); 772 } 773 /* Attach the buffer to the mbuf. */ 774 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 775 EXT_NET_DRV); 776 if ((m->m_flags & M_EXT) == 0) { 777 m_freem(m); 778 return (ENOBUFS); 779 } 780 m->m_pkthdr.len = m->m_len = MSK_JLEN; 781 m_adj(m, ETHER_ALIGN); 782 783 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 784 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 785 BUS_DMA_NOWAIT) != 0) { 786 m_freem(m); 787 return (ENOBUFS); 788 } 789 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 790 791 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 792 if (rxd->rx_m != NULL) { 793 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 794 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 795 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 796 rxd->rx_dmamap); 797 } 798 map = rxd->rx_dmamap; 799 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 800 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 801 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 802 BUS_DMASYNC_PREREAD); 803 rxd->rx_m = m; 804 rx_le = rxd->rx_le; 805 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 806 rx_le->msk_control = 807 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 808 809 return (0); 810 } 811 #endif 812 813 /* 814 * Set media options. 815 */ 816 static int 817 msk_mediachange(struct ifnet *ifp) 818 { 819 struct msk_if_softc *sc_if = ifp->if_softc; 820 struct mii_data *mii; 821 822 mii = device_get_softc(sc_if->msk_miibus); 823 mii_mediachg(mii); 824 825 return (0); 826 } 827 828 /* 829 * Report current media status. 830 */ 831 static void 832 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 833 { 834 struct msk_if_softc *sc_if = ifp->if_softc; 835 struct mii_data *mii; 836 837 mii = device_get_softc(sc_if->msk_miibus); 838 mii_pollstat(mii); 839 840 ifmr->ifm_active = mii->mii_media_active; 841 ifmr->ifm_status = mii->mii_media_status; 842 } 843 844 static int 845 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 846 { 847 struct msk_if_softc *sc_if; 848 struct ifreq *ifr; 849 struct mii_data *mii; 850 int error, mask; 851 852 sc_if = ifp->if_softc; 853 ifr = (struct ifreq *)data; 854 error = 0; 855 856 switch(command) { 857 case SIOCSIFMTU: 858 #ifdef MSK_JUMBO 859 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 860 error = EINVAL; 861 break; 862 } 863 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 864 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 865 error = EINVAL; 866 break; 867 } 868 ifp->if_mtu = ifr->ifr_mtu; 869 if ((ifp->if_flags & IFF_RUNNING) != 0) 870 msk_init(sc_if); 871 #else 872 error = EOPNOTSUPP; 873 #endif 874 break; 875 876 case SIOCSIFFLAGS: 877 if (ifp->if_flags & IFF_UP) { 878 if (ifp->if_flags & IFF_RUNNING) { 879 if (((ifp->if_flags ^ sc_if->msk_if_flags) 880 & IFF_PROMISC) != 0) { 881 msk_setpromisc(sc_if); 882 msk_setmulti(sc_if); 883 } 884 } else { 885 if (sc_if->msk_detach == 0) 886 msk_init(sc_if); 887 } 888 } else { 889 if (ifp->if_flags & IFF_RUNNING) 890 msk_stop(sc_if); 891 } 892 sc_if->msk_if_flags = ifp->if_flags; 893 break; 894 895 case SIOCADDMULTI: 896 case SIOCDELMULTI: 897 if (ifp->if_flags & IFF_RUNNING) 898 msk_setmulti(sc_if); 899 break; 900 901 case SIOCGIFMEDIA: 902 case SIOCSIFMEDIA: 903 mii = device_get_softc(sc_if->msk_miibus); 904 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 905 break; 906 907 case SIOCSIFCAP: 908 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 909 if ((mask & IFCAP_TXCSUM) != 0) { 910 ifp->if_capenable ^= IFCAP_TXCSUM; 911 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 912 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 913 ifp->if_hwassist |= MSK_CSUM_FEATURES; 914 else 915 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 916 } 917 #ifdef notyet 918 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 919 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 920 msk_setvlan(sc_if, ifp); 921 } 922 #endif 923 924 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 925 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 926 /* 927 * In Yukon EC Ultra, TSO & checksum offload is not 928 * supported for jumbo frame. 929 */ 930 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 931 ifp->if_capenable &= ~IFCAP_TXCSUM; 932 } 933 break; 934 935 default: 936 error = ether_ioctl(ifp, command, data); 937 break; 938 } 939 940 return (error); 941 } 942 943 static int 944 mskc_probe(device_t dev) 945 { 946 const struct msk_product *mp; 947 uint16_t vendor, devid; 948 949 vendor = pci_get_vendor(dev); 950 devid = pci_get_device(dev); 951 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 952 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 953 device_set_desc(dev, mp->msk_name); 954 return (0); 955 } 956 } 957 return (ENXIO); 958 } 959 960 static int 961 mskc_setup_rambuffer(struct msk_softc *sc) 962 { 963 int next; 964 int i; 965 uint8_t val; 966 967 /* Get adapter SRAM size. */ 968 val = CSR_READ_1(sc, B2_E_0); 969 sc->msk_ramsize = (val == 0) ? 128 : val * 4; 970 if (bootverbose) { 971 device_printf(sc->msk_dev, 972 "RAM buffer size : %dKB\n", sc->msk_ramsize); 973 } 974 /* 975 * Give receiver 2/3 of memory and round down to the multiple 976 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 977 * of 1024. 978 */ 979 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 980 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 981 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 982 sc->msk_rxqstart[i] = next; 983 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 984 next = sc->msk_rxqend[i] + 1; 985 sc->msk_txqstart[i] = next; 986 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 987 next = sc->msk_txqend[i] + 1; 988 if (bootverbose) { 989 device_printf(sc->msk_dev, 990 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 991 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 992 sc->msk_rxqend[i]); 993 device_printf(sc->msk_dev, 994 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 995 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 996 sc->msk_txqend[i]); 997 } 998 } 999 1000 return (0); 1001 } 1002 1003 static void 1004 mskc_phy_power(struct msk_softc *sc, int mode) 1005 { 1006 uint32_t val; 1007 int i; 1008 1009 switch (mode) { 1010 case MSK_PHY_POWERUP: 1011 /* Switch power to VCC (WA for VAUX problem). */ 1012 CSR_WRITE_1(sc, B0_POWER_CTRL, 1013 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1014 /* Disable Core Clock Division, set Clock Select to 0. */ 1015 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1016 1017 val = 0; 1018 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1019 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1020 /* Enable bits are inverted. */ 1021 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1022 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1023 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1024 } 1025 /* 1026 * Enable PCI & Core Clock, enable clock gating for both Links. 1027 */ 1028 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1029 1030 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1031 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1032 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1033 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1034 /* Deassert Low Power for 1st PHY. */ 1035 val |= PCI_Y2_PHY1_COMA; 1036 if (sc->msk_num_port > 1) 1037 val |= PCI_Y2_PHY2_COMA; 1038 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1039 uint32_t our; 1040 1041 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1042 1043 /* Enable all clocks. */ 1044 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1045 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1046 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1047 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1048 /* Set all bits to 0 except bits 15..12. */ 1049 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1050 /* Set to default value. */ 1051 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1052 } 1053 /* Release PHY from PowerDown/COMA mode. */ 1054 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1055 for (i = 0; i < sc->msk_num_port; i++) { 1056 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1057 GMLC_RST_SET); 1058 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1059 GMLC_RST_CLR); 1060 } 1061 break; 1062 case MSK_PHY_POWERDOWN: 1063 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1064 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1065 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1066 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1067 val &= ~PCI_Y2_PHY1_COMA; 1068 if (sc->msk_num_port > 1) 1069 val &= ~PCI_Y2_PHY2_COMA; 1070 } 1071 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1072 1073 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1074 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1075 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1076 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1077 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1078 /* Enable bits are inverted. */ 1079 val = 0; 1080 } 1081 /* 1082 * Disable PCI & Core Clock, disable clock gating for 1083 * both Links. 1084 */ 1085 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1086 CSR_WRITE_1(sc, B0_POWER_CTRL, 1087 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1088 break; 1089 default: 1090 break; 1091 } 1092 } 1093 1094 static void 1095 mskc_reset(struct msk_softc *sc) 1096 { 1097 bus_addr_t addr; 1098 uint16_t status; 1099 uint32_t val; 1100 int i; 1101 1102 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1103 1104 /* Disable ASF. */ 1105 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1106 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1107 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1108 } 1109 /* 1110 * Since we disabled ASF, S/W reset is required for Power Management. 1111 */ 1112 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1113 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1114 1115 /* Clear all error bits in the PCI status register. */ 1116 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1117 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1118 1119 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1120 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1121 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1122 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1123 1124 switch (sc->msk_bustype) { 1125 case MSK_PEX_BUS: 1126 /* Clear all PEX errors. */ 1127 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1128 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1129 if ((val & PEX_RX_OV) != 0) { 1130 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1131 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1132 } 1133 break; 1134 case MSK_PCI_BUS: 1135 case MSK_PCIX_BUS: 1136 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1137 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1138 if (val == 0) 1139 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1140 if (sc->msk_bustype == MSK_PCIX_BUS) { 1141 /* Set Cache Line Size opt. */ 1142 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1143 val |= PCI_CLS_OPT; 1144 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1145 } 1146 break; 1147 } 1148 /* Set PHY power state. */ 1149 mskc_phy_power(sc, MSK_PHY_POWERUP); 1150 1151 /* Reset GPHY/GMAC Control */ 1152 for (i = 0; i < sc->msk_num_port; i++) { 1153 /* GPHY Control reset. */ 1154 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1155 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1156 /* GMAC Control reset. */ 1157 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1158 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1159 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1160 } 1161 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1162 1163 /* LED On. */ 1164 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1165 1166 /* Clear TWSI IRQ. */ 1167 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1168 1169 /* Turn off hardware timer. */ 1170 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1171 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1172 1173 /* Turn off descriptor polling. */ 1174 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1175 1176 /* Turn off time stamps. */ 1177 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1178 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1179 1180 /* Configure timeout values. */ 1181 for (i = 0; i < sc->msk_num_port; i++) { 1182 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1183 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1184 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1185 MSK_RI_TO_53); 1186 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1187 MSK_RI_TO_53); 1188 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1189 MSK_RI_TO_53); 1190 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1191 MSK_RI_TO_53); 1192 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1193 MSK_RI_TO_53); 1194 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1195 MSK_RI_TO_53); 1196 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1197 MSK_RI_TO_53); 1198 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1199 MSK_RI_TO_53); 1200 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1201 MSK_RI_TO_53); 1202 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1203 MSK_RI_TO_53); 1204 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1205 MSK_RI_TO_53); 1206 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1207 MSK_RI_TO_53); 1208 } 1209 1210 /* Disable all interrupts. */ 1211 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1212 CSR_READ_4(sc, B0_HWE_IMSK); 1213 CSR_WRITE_4(sc, B0_IMSK, 0); 1214 CSR_READ_4(sc, B0_IMSK); 1215 1216 /* 1217 * On dual port PCI-X card, there is an problem where status 1218 * can be received out of order due to split transactions. 1219 */ 1220 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1221 uint16_t pcix_cmd; 1222 uint8_t pcix; 1223 1224 pcix = pci_get_pcixcap_ptr(sc->msk_dev); 1225 1226 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1227 /* Clear Max Outstanding Split Transactions. */ 1228 pcix_cmd &= ~0x70; 1229 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1230 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1231 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1232 } 1233 if (sc->msk_bustype == MSK_PEX_BUS) { 1234 uint16_t v, width; 1235 1236 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1237 /* Change Max. Read Request Size to 4096 bytes. */ 1238 v &= ~PEX_DC_MAX_RRS_MSK; 1239 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1240 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1241 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1242 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1243 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1244 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1245 if (v != width) { 1246 device_printf(sc->msk_dev, 1247 "negotiated width of link(x%d) != " 1248 "max. width of link(x%d)\n", width, v); 1249 } 1250 } 1251 1252 /* Clear status list. */ 1253 bzero(sc->msk_stat_ring, 1254 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1255 sc->msk_stat_cons = 0; 1256 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1257 BUS_DMASYNC_PREWRITE); 1258 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1259 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1260 /* Set the status list base address. */ 1261 addr = sc->msk_stat_ring_paddr; 1262 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1263 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1264 /* Set the status list last index. */ 1265 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1266 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1267 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1268 /* WA for dev. #4.3 */ 1269 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1270 /* WA for dev. #4.18 */ 1271 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1272 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1273 } else { 1274 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1275 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1276 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1277 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1278 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1279 else 1280 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1281 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1282 } 1283 /* 1284 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1285 */ 1286 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1287 1288 /* Enable status unit. */ 1289 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1290 1291 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1292 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1293 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1294 } 1295 1296 static int 1297 msk_probe(device_t dev) 1298 { 1299 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1300 char desc[100]; 1301 1302 /* 1303 * Not much to do here. We always know there will be 1304 * at least one GMAC present, and if there are two, 1305 * mskc_attach() will create a second device instance 1306 * for us. 1307 */ 1308 ksnprintf(desc, sizeof(desc), 1309 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1310 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1311 sc->msk_hw_rev); 1312 device_set_desc_copy(dev, desc); 1313 1314 return (0); 1315 } 1316 1317 static int 1318 msk_attach(device_t dev) 1319 { 1320 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1321 struct msk_if_softc *sc_if = device_get_softc(dev); 1322 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1323 int i, port, error; 1324 uint8_t eaddr[ETHER_ADDR_LEN]; 1325 1326 port = *(int *)device_get_ivars(dev); 1327 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1328 1329 kfree(device_get_ivars(dev), M_DEVBUF); 1330 device_set_ivars(dev, NULL); 1331 1332 callout_init(&sc_if->msk_tick_ch); 1333 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1334 1335 sc_if->msk_if_dev = dev; 1336 sc_if->msk_port = port; 1337 sc_if->msk_softc = sc; 1338 sc_if->msk_ifp = ifp; 1339 sc->msk_if[port] = sc_if; 1340 1341 /* Setup Tx/Rx queue register offsets. */ 1342 if (port == MSK_PORT_A) { 1343 sc_if->msk_txq = Q_XA1; 1344 sc_if->msk_txsq = Q_XS1; 1345 sc_if->msk_rxq = Q_R1; 1346 } else { 1347 sc_if->msk_txq = Q_XA2; 1348 sc_if->msk_txsq = Q_XS2; 1349 sc_if->msk_rxq = Q_R2; 1350 } 1351 1352 error = msk_txrx_dma_alloc(sc_if); 1353 if (error) 1354 goto fail; 1355 1356 ifp->if_softc = sc_if; 1357 ifp->if_mtu = ETHERMTU; 1358 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1359 ifp->if_init = msk_init; 1360 ifp->if_ioctl = msk_ioctl; 1361 ifp->if_start = msk_start; 1362 ifp->if_watchdog = msk_watchdog; 1363 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1364 ifq_set_ready(&ifp->if_snd); 1365 1366 #ifdef notyet 1367 /* 1368 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1369 * has serious bug in Rx checksum offload for all Yukon II family 1370 * hardware. It seems there is a workaround to make it work somtimes. 1371 * However, the workaround also have to check OP code sequences to 1372 * verify whether the OP code is correct. Sometimes it should compute 1373 * IP/TCP/UDP checksum in driver in order to verify correctness of 1374 * checksum computed by hardware. If you have to compute checksum 1375 * with software to verify the hardware's checksum why have hardware 1376 * compute the checksum? I think there is no reason to spend time to 1377 * make Rx checksum offload work on Yukon II hardware. 1378 */ 1379 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1380 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1381 ifp->if_hwassist = MSK_CSUM_FEATURES; 1382 ifp->if_capenable = ifp->if_capabilities; 1383 #endif 1384 1385 /* 1386 * Get station address for this interface. Note that 1387 * dual port cards actually come with three station 1388 * addresses: one for each port, plus an extra. The 1389 * extra one is used by the SysKonnect driver software 1390 * as a 'virtual' station address for when both ports 1391 * are operating in failover mode. Currently we don't 1392 * use this extra address. 1393 */ 1394 for (i = 0; i < ETHER_ADDR_LEN; i++) 1395 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1396 1397 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1398 1399 /* 1400 * Do miibus setup. 1401 */ 1402 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1403 msk_mediachange, msk_mediastatus); 1404 if (error) { 1405 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1406 goto fail; 1407 } 1408 1409 /* 1410 * Call MI attach routine. Can't hold locks when calling into ether_*. 1411 */ 1412 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1413 #if 0 1414 /* 1415 * Tell the upper layer(s) we support long frames. 1416 * Must appear after the call to ether_ifattach() because 1417 * ether_ifattach() sets ifi_hdrlen to the default value. 1418 */ 1419 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1420 #endif 1421 1422 return 0; 1423 fail: 1424 msk_detach(dev); 1425 sc->msk_if[port] = NULL; 1426 return (error); 1427 } 1428 1429 /* 1430 * Attach the interface. Allocate softc structures, do ifmedia 1431 * setup and ethernet/BPF attach. 1432 */ 1433 static int 1434 mskc_attach(device_t dev) 1435 { 1436 struct msk_softc *sc; 1437 int error, *port; 1438 1439 sc = device_get_softc(dev); 1440 sc->msk_dev = dev; 1441 lwkt_serialize_init(&sc->msk_serializer); 1442 1443 #ifndef BURN_BRIDGES 1444 /* 1445 * Handle power management nonsense. 1446 */ 1447 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1448 uint32_t irq, bar0, bar1; 1449 1450 /* Save important PCI config data. */ 1451 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1452 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1453 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1454 1455 /* Reset the power state. */ 1456 device_printf(dev, "chip is in D%d power mode " 1457 "-- setting to D0\n", pci_get_powerstate(dev)); 1458 1459 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1460 1461 /* Restore PCI config data. */ 1462 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1463 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1464 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1465 } 1466 #endif /* BURN_BRIDGES */ 1467 1468 /* 1469 * Map control/status registers. 1470 */ 1471 pci_enable_busmaster(dev); 1472 1473 /* 1474 * Allocate I/O resource 1475 */ 1476 #ifdef MSK_USEIOSPACE 1477 sc->msk_res_type = SYS_RES_IOPORT; 1478 sc->msk_res_rid = PCIR_BAR(1); 1479 #else 1480 sc->msk_res_type = SYS_RES_MEMORY; 1481 sc->msk_res_rid = PCIR_BAR(0); 1482 #endif 1483 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1484 &sc->msk_res_rid, RF_ACTIVE); 1485 if (sc->msk_res == NULL) { 1486 if (sc->msk_res_type == SYS_RES_MEMORY) { 1487 sc->msk_res_type = SYS_RES_IOPORT; 1488 sc->msk_res_rid = PCIR_BAR(1); 1489 } else { 1490 sc->msk_res_type = SYS_RES_MEMORY; 1491 sc->msk_res_rid = PCIR_BAR(0); 1492 } 1493 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1494 &sc->msk_res_rid, 1495 RF_ACTIVE); 1496 if (sc->msk_res == NULL) { 1497 device_printf(dev, "couldn't allocate %s resources\n", 1498 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1499 return (ENXIO); 1500 } 1501 } 1502 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1503 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1504 1505 /* 1506 * Allocate IRQ 1507 */ 1508 sc->msk_irq_rid = 0; 1509 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1510 &sc->msk_irq_rid, 1511 RF_SHAREABLE | RF_ACTIVE); 1512 if (sc->msk_irq == NULL) { 1513 device_printf(dev, "couldn't allocate IRQ resources\n"); 1514 error = ENXIO; 1515 goto fail; 1516 } 1517 1518 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1519 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1520 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1521 /* Bail out if chip is not recognized. */ 1522 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1523 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1524 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1525 sc->msk_hw_id, sc->msk_hw_rev); 1526 error = ENXIO; 1527 goto fail; 1528 } 1529 1530 #if 0 1531 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1532 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1533 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1534 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1535 "max number of Rx events to process"); 1536 #endif 1537 1538 sc->msk_process_limit = MSK_PROC_DEFAULT; 1539 1540 #if 0 1541 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1542 "process_limit", &sc->msk_process_limit); 1543 if (error == 0) { 1544 if (sc->msk_process_limit < MSK_PROC_MIN || 1545 sc->msk_process_limit > MSK_PROC_MAX) { 1546 device_printf(dev, "process_limit value out of range; " 1547 "using default: %d\n", MSK_PROC_DEFAULT); 1548 sc->msk_process_limit = MSK_PROC_DEFAULT; 1549 } 1550 } 1551 #endif 1552 1553 /* Soft reset. */ 1554 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1555 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1556 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1557 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1558 sc->msk_coppertype = 0; 1559 else 1560 sc->msk_coppertype = 1; 1561 /* Check number of MACs. */ 1562 sc->msk_num_port = 1; 1563 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1564 CFG_DUAL_MAC_MSK) { 1565 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1566 sc->msk_num_port++; 1567 } 1568 1569 /* Check bus type. */ 1570 if (pci_is_pcie(sc->msk_dev) == 0) 1571 sc->msk_bustype = MSK_PEX_BUS; 1572 else if (pci_is_pcix(sc->msk_dev) == 0) 1573 sc->msk_bustype = MSK_PCIX_BUS; 1574 else 1575 sc->msk_bustype = MSK_PCI_BUS; 1576 1577 switch (sc->msk_hw_id) { 1578 case CHIP_ID_YUKON_EC: 1579 case CHIP_ID_YUKON_EC_U: 1580 sc->msk_clock = 125; /* 125 Mhz */ 1581 break; 1582 case CHIP_ID_YUKON_FE: 1583 sc->msk_clock = 100; /* 100 Mhz */ 1584 break; 1585 case CHIP_ID_YUKON_XL: 1586 sc->msk_clock = 156; /* 156 Mhz */ 1587 break; 1588 default: 1589 sc->msk_clock = 156; /* 156 Mhz */ 1590 break; 1591 } 1592 1593 error = mskc_status_dma_alloc(sc); 1594 if (error) 1595 goto fail; 1596 1597 /* Set base interrupt mask. */ 1598 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1599 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1600 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1601 1602 /* Reset the adapter. */ 1603 mskc_reset(sc); 1604 1605 error = mskc_setup_rambuffer(sc); 1606 if (error) 1607 goto fail; 1608 1609 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1610 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1611 device_printf(dev, "failed to add child for PORT_A\n"); 1612 error = ENXIO; 1613 goto fail; 1614 } 1615 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1616 *port = MSK_PORT_A; 1617 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1618 1619 if (sc->msk_num_port > 1) { 1620 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1621 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1622 device_printf(dev, "failed to add child for PORT_B\n"); 1623 error = ENXIO; 1624 goto fail; 1625 } 1626 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1627 *port = MSK_PORT_B; 1628 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1629 } 1630 1631 bus_generic_attach(dev); 1632 1633 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1634 mskc_intr, sc, &sc->msk_intrhand, 1635 &sc->msk_serializer); 1636 if (error) { 1637 device_printf(dev, "couldn't set up interrupt handler\n"); 1638 goto fail; 1639 } 1640 return 0; 1641 fail: 1642 mskc_detach(dev); 1643 return (error); 1644 } 1645 1646 /* 1647 * Shutdown hardware and free up resources. This can be called any 1648 * time after the mutex has been initialized. It is called in both 1649 * the error case in attach and the normal detach case so it needs 1650 * to be careful about only freeing resources that have actually been 1651 * allocated. 1652 */ 1653 static int 1654 msk_detach(device_t dev) 1655 { 1656 struct msk_if_softc *sc_if = device_get_softc(dev); 1657 1658 if (device_is_attached(dev)) { 1659 struct msk_softc *sc = sc_if->msk_softc; 1660 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1661 1662 lwkt_serialize_enter(ifp->if_serializer); 1663 1664 if (sc->msk_intrhand != NULL) { 1665 if (sc->msk_if[MSK_PORT_A] != NULL) 1666 msk_stop(sc->msk_if[MSK_PORT_A]); 1667 if (sc->msk_if[MSK_PORT_B] != NULL) 1668 msk_stop(sc->msk_if[MSK_PORT_B]); 1669 1670 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1671 sc->msk_intrhand); 1672 sc->msk_intrhand = NULL; 1673 } 1674 1675 lwkt_serialize_exit(ifp->if_serializer); 1676 1677 ether_ifdetach(ifp); 1678 } 1679 1680 if (sc_if->msk_miibus != NULL) 1681 device_delete_child(dev, sc_if->msk_miibus); 1682 1683 msk_txrx_dma_free(sc_if); 1684 return (0); 1685 } 1686 1687 static int 1688 mskc_detach(device_t dev) 1689 { 1690 struct msk_softc *sc = device_get_softc(dev); 1691 int *port, i; 1692 1693 #ifdef INVARIANTS 1694 if (device_is_attached(dev)) { 1695 KASSERT(sc->msk_intrhand == NULL, 1696 ("intr is not torn down yet\n")); 1697 } 1698 #endif 1699 1700 for (i = 0; i < sc->msk_num_port; ++i) { 1701 if (sc->msk_devs[i] != NULL) { 1702 port = device_get_ivars(sc->msk_devs[i]); 1703 if (port != NULL) { 1704 kfree(port, M_DEVBUF); 1705 device_set_ivars(sc->msk_devs[i], NULL); 1706 } 1707 device_delete_child(dev, sc->msk_devs[i]); 1708 } 1709 } 1710 1711 /* Disable all interrupts. */ 1712 CSR_WRITE_4(sc, B0_IMSK, 0); 1713 CSR_READ_4(sc, B0_IMSK); 1714 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1715 CSR_READ_4(sc, B0_HWE_IMSK); 1716 1717 /* LED Off. */ 1718 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1719 1720 /* Put hardware reset. */ 1721 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1722 1723 mskc_status_dma_free(sc); 1724 1725 if (sc->msk_irq != NULL) { 1726 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1727 sc->msk_irq); 1728 } 1729 if (sc->msk_res != NULL) { 1730 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1731 sc->msk_res); 1732 } 1733 1734 return (0); 1735 } 1736 1737 static void 1738 msk_dmamap_mbuf_cb(void *arg, bus_dma_segment_t *segs, int nseg, 1739 bus_size_t mapsz __unused, int error) 1740 { 1741 struct msk_dmamap_arg *ctx = arg; 1742 int i; 1743 1744 if (error) 1745 return; 1746 1747 if (ctx->nseg < nseg) { 1748 ctx->nseg = 0; 1749 return; 1750 } 1751 1752 ctx->nseg = nseg; 1753 for (i = 0; i < ctx->nseg; ++i) 1754 ctx->segs[i] = segs[i]; 1755 } 1756 1757 static void 1758 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1759 { 1760 struct msk_dmamap_arg *ctx = arg; 1761 int i; 1762 1763 if (error) 1764 return; 1765 1766 KKASSERT(nseg <= ctx->nseg); 1767 1768 ctx->nseg = nseg; 1769 for (i = 0; i < ctx->nseg; ++i) 1770 ctx->segs[i] = segs[i]; 1771 } 1772 1773 /* Create status DMA region. */ 1774 static int 1775 mskc_status_dma_alloc(struct msk_softc *sc) 1776 { 1777 struct msk_dmamap_arg ctx; 1778 bus_dma_segment_t seg; 1779 int error; 1780 1781 error = bus_dma_tag_create( 1782 NULL, /* XXX parent */ 1783 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1784 BUS_SPACE_MAXADDR, /* lowaddr */ 1785 BUS_SPACE_MAXADDR, /* highaddr */ 1786 NULL, NULL, /* filter, filterarg */ 1787 MSK_STAT_RING_SZ, /* maxsize */ 1788 1, /* nsegments */ 1789 MSK_STAT_RING_SZ, /* maxsegsize */ 1790 0, /* flags */ 1791 &sc->msk_stat_tag); 1792 if (error) { 1793 device_printf(sc->msk_dev, 1794 "failed to create status DMA tag\n"); 1795 return (error); 1796 } 1797 1798 /* Allocate DMA'able memory and load the DMA map for status ring. */ 1799 error = bus_dmamem_alloc(sc->msk_stat_tag, 1800 (void **)&sc->msk_stat_ring, 1801 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1802 &sc->msk_stat_map); 1803 if (error) { 1804 device_printf(sc->msk_dev, 1805 "failed to allocate DMA'able memory for status ring\n"); 1806 bus_dma_tag_destroy(sc->msk_stat_tag); 1807 sc->msk_stat_tag = NULL; 1808 return (error); 1809 } 1810 1811 bzero(&ctx, sizeof(ctx)); 1812 ctx.nseg = 1; 1813 ctx.segs = &seg; 1814 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map, 1815 sc->msk_stat_ring, MSK_STAT_RING_SZ, 1816 msk_dmamap_cb, &ctx, 0); 1817 if (error) { 1818 device_printf(sc->msk_dev, 1819 "failed to load DMA'able memory for status ring\n"); 1820 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1821 sc->msk_stat_map); 1822 bus_dma_tag_destroy(sc->msk_stat_tag); 1823 sc->msk_stat_tag = NULL; 1824 return (error); 1825 } 1826 sc->msk_stat_ring_paddr = seg.ds_addr; 1827 1828 return (0); 1829 } 1830 1831 static void 1832 mskc_status_dma_free(struct msk_softc *sc) 1833 { 1834 /* Destroy status block. */ 1835 if (sc->msk_stat_tag) { 1836 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1837 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1838 sc->msk_stat_map); 1839 bus_dma_tag_destroy(sc->msk_stat_tag); 1840 sc->msk_stat_tag = NULL; 1841 } 1842 } 1843 1844 static int 1845 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1846 { 1847 int error, i, j; 1848 #ifdef MSK_JUMBO 1849 struct msk_rxdesc *jrxd; 1850 struct msk_jpool_entry *entry; 1851 uint8_t *ptr; 1852 #endif 1853 1854 /* Create parent DMA tag. */ 1855 /* 1856 * XXX 1857 * It seems that Yukon II supports full 64bits DMA operations. But 1858 * it needs two descriptors(list elements) for 64bits DMA operations. 1859 * Since we don't know what DMA address mappings(32bits or 64bits) 1860 * would be used in advance for each mbufs, we limits its DMA space 1861 * to be in range of 32bits address space. Otherwise, we should check 1862 * what DMA address is used and chain another descriptor for the 1863 * 64bits DMA operation. This also means descriptor ring size is 1864 * variable. Limiting DMA address to be in 32bit address space greatly 1865 * simplyfies descriptor handling and possibly would increase 1866 * performance a bit due to efficient handling of descriptors. 1867 * Apart from harassing checksum offloading mechanisms, it seems 1868 * it's really bad idea to use a seperate descriptor for 64bit 1869 * DMA operation to save small descriptor memory. Anyway, I've 1870 * never seen these exotic scheme on ethernet interface hardware. 1871 */ 1872 error = bus_dma_tag_create( 1873 NULL, /* parent */ 1874 1, 0, /* alignment, boundary */ 1875 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1876 BUS_SPACE_MAXADDR, /* highaddr */ 1877 NULL, NULL, /* filter, filterarg */ 1878 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1879 0, /* nsegments */ 1880 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1881 0, /* flags */ 1882 &sc_if->msk_cdata.msk_parent_tag); 1883 if (error) { 1884 device_printf(sc_if->msk_if_dev, 1885 "failed to create parent DMA tag\n"); 1886 return error; 1887 } 1888 1889 /* Create DMA stuffs for Tx ring. */ 1890 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1891 &sc_if->msk_cdata.msk_tx_ring_tag, 1892 (void **)&sc_if->msk_rdata.msk_tx_ring, 1893 &sc_if->msk_rdata.msk_tx_ring_paddr, 1894 &sc_if->msk_cdata.msk_tx_ring_map); 1895 if (error) { 1896 device_printf(sc_if->msk_if_dev, 1897 "failed to create TX ring DMA stuffs\n"); 1898 return error; 1899 } 1900 1901 /* Create DMA stuffs for Rx ring. */ 1902 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1903 &sc_if->msk_cdata.msk_rx_ring_tag, 1904 (void **)&sc_if->msk_rdata.msk_rx_ring, 1905 &sc_if->msk_rdata.msk_rx_ring_paddr, 1906 &sc_if->msk_cdata.msk_rx_ring_map); 1907 if (error) { 1908 device_printf(sc_if->msk_if_dev, 1909 "failed to create RX ring DMA stuffs\n"); 1910 return error; 1911 } 1912 1913 /* Create tag for Tx buffers. */ 1914 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1915 1, 0, /* alignment, boundary */ 1916 BUS_SPACE_MAXADDR, /* lowaddr */ 1917 BUS_SPACE_MAXADDR, /* highaddr */ 1918 NULL, NULL, /* filter, filterarg */ 1919 MSK_TSO_MAXSIZE, /* maxsize */ 1920 MSK_MAXTXSEGS, /* nsegments */ 1921 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 1922 0, /* flags */ 1923 &sc_if->msk_cdata.msk_tx_tag); 1924 if (error) { 1925 device_printf(sc_if->msk_if_dev, 1926 "failed to create Tx DMA tag\n"); 1927 return error; 1928 } 1929 1930 /* Create DMA maps for Tx buffers. */ 1931 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1932 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1933 1934 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 1935 &txd->tx_dmamap); 1936 if (error) { 1937 device_printf(sc_if->msk_if_dev, 1938 "failed to create %dth Tx dmamap\n", i); 1939 1940 for (j = 0; j < i; ++j) { 1941 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1942 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1943 txd->tx_dmamap); 1944 } 1945 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1946 sc_if->msk_cdata.msk_tx_tag = NULL; 1947 1948 return error; 1949 } 1950 } 1951 1952 /* Create tag for Rx buffers. */ 1953 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1954 1, 0, /* alignment, boundary */ 1955 BUS_SPACE_MAXADDR, /* lowaddr */ 1956 BUS_SPACE_MAXADDR, /* highaddr */ 1957 NULL, NULL, /* filter, filterarg */ 1958 MCLBYTES, /* maxsize */ 1959 1, /* nsegments */ 1960 MCLBYTES, /* maxsegsize */ 1961 0, /* flags */ 1962 &sc_if->msk_cdata.msk_rx_tag); 1963 if (error) { 1964 device_printf(sc_if->msk_if_dev, 1965 "failed to create Rx DMA tag\n"); 1966 return error; 1967 } 1968 1969 /* Create DMA maps for Rx buffers. */ 1970 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 1971 &sc_if->msk_cdata.msk_rx_sparemap); 1972 if (error) { 1973 device_printf(sc_if->msk_if_dev, 1974 "failed to create spare Rx dmamap\n"); 1975 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 1976 sc_if->msk_cdata.msk_rx_tag = NULL; 1977 return error; 1978 } 1979 for (i = 0; i < MSK_RX_RING_CNT; i++) { 1980 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 1981 1982 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 1983 &rxd->rx_dmamap); 1984 if (error) { 1985 device_printf(sc_if->msk_if_dev, 1986 "failed to create %dth Rx dmamap\n", i); 1987 1988 for (j = 0; j < i; ++j) { 1989 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 1990 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 1991 rxd->rx_dmamap); 1992 } 1993 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 1994 sc_if->msk_cdata.msk_rx_tag = NULL; 1995 1996 return error; 1997 } 1998 } 1999 2000 #ifdef MSK_JUMBO 2001 SLIST_INIT(&sc_if->msk_jfree_listhead); 2002 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2003 2004 /* Create tag for jumbo Rx ring. */ 2005 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2006 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2007 BUS_SPACE_MAXADDR, /* lowaddr */ 2008 BUS_SPACE_MAXADDR, /* highaddr */ 2009 NULL, NULL, /* filter, filterarg */ 2010 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2011 1, /* nsegments */ 2012 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2013 0, /* flags */ 2014 NULL, NULL, /* lockfunc, lockarg */ 2015 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2016 if (error != 0) { 2017 device_printf(sc_if->msk_if_dev, 2018 "failed to create jumbo Rx ring DMA tag\n"); 2019 goto fail; 2020 } 2021 2022 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2023 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2024 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2025 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2026 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2027 if (error != 0) { 2028 device_printf(sc_if->msk_if_dev, 2029 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2030 goto fail; 2031 } 2032 2033 ctx.msk_busaddr = 0; 2034 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2035 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2036 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2037 msk_dmamap_cb, &ctx, 0); 2038 if (error != 0) { 2039 device_printf(sc_if->msk_if_dev, 2040 "failed to load DMA'able memory for jumbo Rx ring\n"); 2041 goto fail; 2042 } 2043 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2044 2045 /* Create tag for jumbo buffer blocks. */ 2046 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2047 PAGE_SIZE, 0, /* alignment, boundary */ 2048 BUS_SPACE_MAXADDR, /* lowaddr */ 2049 BUS_SPACE_MAXADDR, /* highaddr */ 2050 NULL, NULL, /* filter, filterarg */ 2051 MSK_JMEM, /* maxsize */ 2052 1, /* nsegments */ 2053 MSK_JMEM, /* maxsegsize */ 2054 0, /* flags */ 2055 NULL, NULL, /* lockfunc, lockarg */ 2056 &sc_if->msk_cdata.msk_jumbo_tag); 2057 if (error != 0) { 2058 device_printf(sc_if->msk_if_dev, 2059 "failed to create jumbo Rx buffer block DMA tag\n"); 2060 goto fail; 2061 } 2062 2063 /* Create tag for jumbo Rx buffers. */ 2064 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2065 PAGE_SIZE, 0, /* alignment, boundary */ 2066 BUS_SPACE_MAXADDR, /* lowaddr */ 2067 BUS_SPACE_MAXADDR, /* highaddr */ 2068 NULL, NULL, /* filter, filterarg */ 2069 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2070 MSK_MAXRXSEGS, /* nsegments */ 2071 MSK_JLEN, /* maxsegsize */ 2072 0, /* flags */ 2073 NULL, NULL, /* lockfunc, lockarg */ 2074 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2075 if (error != 0) { 2076 device_printf(sc_if->msk_if_dev, 2077 "failed to create jumbo Rx DMA tag\n"); 2078 goto fail; 2079 } 2080 2081 /* Create DMA maps for jumbo Rx buffers. */ 2082 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2083 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2084 device_printf(sc_if->msk_if_dev, 2085 "failed to create spare jumbo Rx dmamap\n"); 2086 goto fail; 2087 } 2088 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2089 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2090 jrxd->rx_m = NULL; 2091 jrxd->rx_dmamap = NULL; 2092 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2093 &jrxd->rx_dmamap); 2094 if (error != 0) { 2095 device_printf(sc_if->msk_if_dev, 2096 "failed to create jumbo Rx dmamap\n"); 2097 goto fail; 2098 } 2099 } 2100 2101 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2102 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2103 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2104 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2105 &sc_if->msk_cdata.msk_jumbo_map); 2106 if (error != 0) { 2107 device_printf(sc_if->msk_if_dev, 2108 "failed to allocate DMA'able memory for jumbo buf\n"); 2109 goto fail; 2110 } 2111 2112 ctx.msk_busaddr = 0; 2113 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2114 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2115 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2116 if (error != 0) { 2117 device_printf(sc_if->msk_if_dev, 2118 "failed to load DMA'able memory for jumbobuf\n"); 2119 goto fail; 2120 } 2121 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2122 2123 /* 2124 * Now divide it up into 9K pieces and save the addresses 2125 * in an array. 2126 */ 2127 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2128 for (i = 0; i < MSK_JSLOTS; i++) { 2129 sc_if->msk_cdata.msk_jslots[i] = ptr; 2130 ptr += MSK_JLEN; 2131 entry = malloc(sizeof(struct msk_jpool_entry), 2132 M_DEVBUF, M_WAITOK); 2133 if (entry == NULL) { 2134 device_printf(sc_if->msk_if_dev, 2135 "no memory for jumbo buffers!\n"); 2136 error = ENOMEM; 2137 goto fail; 2138 } 2139 entry->slot = i; 2140 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2141 jpool_entries); 2142 } 2143 #endif 2144 return 0; 2145 } 2146 2147 static void 2148 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2149 { 2150 struct msk_txdesc *txd; 2151 struct msk_rxdesc *rxd; 2152 #ifdef MSK_JUMBO 2153 struct msk_rxdesc *jrxd; 2154 struct msk_jpool_entry *entry; 2155 #endif 2156 int i; 2157 2158 #ifdef MSK_JUMBO 2159 MSK_JLIST_LOCK(sc_if); 2160 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2161 device_printf(sc_if->msk_if_dev, 2162 "asked to free buffer that is in use!\n"); 2163 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2164 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2165 jpool_entries); 2166 } 2167 2168 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2169 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2170 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2171 free(entry, M_DEVBUF); 2172 } 2173 MSK_JLIST_UNLOCK(sc_if); 2174 2175 /* Destroy jumbo buffer block. */ 2176 if (sc_if->msk_cdata.msk_jumbo_map) 2177 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2178 sc_if->msk_cdata.msk_jumbo_map); 2179 2180 if (sc_if->msk_rdata.msk_jumbo_buf) { 2181 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2182 sc_if->msk_rdata.msk_jumbo_buf, 2183 sc_if->msk_cdata.msk_jumbo_map); 2184 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2185 sc_if->msk_cdata.msk_jumbo_map = NULL; 2186 } 2187 2188 /* Jumbo Rx ring. */ 2189 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2190 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2191 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2192 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2193 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2194 sc_if->msk_rdata.msk_jumbo_rx_ring) 2195 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2196 sc_if->msk_rdata.msk_jumbo_rx_ring, 2197 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2198 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2199 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2200 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2201 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2202 } 2203 2204 /* Jumbo Rx buffers. */ 2205 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2206 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2207 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2208 if (jrxd->rx_dmamap) { 2209 bus_dmamap_destroy( 2210 sc_if->msk_cdata.msk_jumbo_rx_tag, 2211 jrxd->rx_dmamap); 2212 jrxd->rx_dmamap = NULL; 2213 } 2214 } 2215 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2216 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2217 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2218 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2219 } 2220 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2221 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2222 } 2223 #endif 2224 2225 /* Tx ring. */ 2226 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2227 sc_if->msk_rdata.msk_tx_ring, 2228 sc_if->msk_cdata.msk_tx_ring_map); 2229 2230 /* Rx ring. */ 2231 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2232 sc_if->msk_rdata.msk_rx_ring, 2233 sc_if->msk_cdata.msk_rx_ring_map); 2234 2235 /* Tx buffers. */ 2236 if (sc_if->msk_cdata.msk_tx_tag) { 2237 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2238 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2239 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2240 txd->tx_dmamap); 2241 } 2242 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2243 sc_if->msk_cdata.msk_tx_tag = NULL; 2244 } 2245 2246 /* Rx buffers. */ 2247 if (sc_if->msk_cdata.msk_rx_tag) { 2248 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2249 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2250 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2251 rxd->rx_dmamap); 2252 } 2253 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2254 sc_if->msk_cdata.msk_rx_sparemap); 2255 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2256 sc_if->msk_cdata.msk_rx_tag = NULL; 2257 } 2258 2259 if (sc_if->msk_cdata.msk_parent_tag) { 2260 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2261 sc_if->msk_cdata.msk_parent_tag = NULL; 2262 } 2263 } 2264 2265 #ifdef MSK_JUMBO 2266 /* 2267 * Allocate a jumbo buffer. 2268 */ 2269 static void * 2270 msk_jalloc(struct msk_if_softc *sc_if) 2271 { 2272 struct msk_jpool_entry *entry; 2273 2274 MSK_JLIST_LOCK(sc_if); 2275 2276 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2277 2278 if (entry == NULL) { 2279 MSK_JLIST_UNLOCK(sc_if); 2280 return (NULL); 2281 } 2282 2283 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2284 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2285 2286 MSK_JLIST_UNLOCK(sc_if); 2287 2288 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2289 } 2290 2291 /* 2292 * Release a jumbo buffer. 2293 */ 2294 static void 2295 msk_jfree(void *buf, void *args) 2296 { 2297 struct msk_if_softc *sc_if; 2298 struct msk_jpool_entry *entry; 2299 int i; 2300 2301 /* Extract the softc struct pointer. */ 2302 sc_if = (struct msk_if_softc *)args; 2303 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2304 2305 MSK_JLIST_LOCK(sc_if); 2306 /* Calculate the slot this buffer belongs to. */ 2307 i = ((vm_offset_t)buf 2308 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2309 KASSERT(i >= 0 && i < MSK_JSLOTS, 2310 ("%s: asked to free buffer that we don't manage!", __func__)); 2311 2312 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2313 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2314 entry->slot = i; 2315 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2316 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2317 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2318 wakeup(sc_if); 2319 2320 MSK_JLIST_UNLOCK(sc_if); 2321 } 2322 #endif 2323 2324 /* 2325 * It's copy of ath_defrag(ath(4)). 2326 * 2327 * Defragment an mbuf chain, returning at most maxfrags separate 2328 * mbufs+clusters. If this is not possible NULL is returned and 2329 * the original mbuf chain is left in it's present (potentially 2330 * modified) state. We use two techniques: collapsing consecutive 2331 * mbufs and replacing consecutive mbufs by a cluster. 2332 */ 2333 static struct mbuf * 2334 msk_defrag(struct mbuf *m0, int how, int maxfrags) 2335 { 2336 struct mbuf *m, *n, *n2, **prev; 2337 u_int curfrags; 2338 2339 /* 2340 * Calculate the current number of frags. 2341 */ 2342 curfrags = 0; 2343 for (m = m0; m != NULL; m = m->m_next) 2344 curfrags++; 2345 /* 2346 * First, try to collapse mbufs. Note that we always collapse 2347 * towards the front so we don't need to deal with moving the 2348 * pkthdr. This may be suboptimal if the first mbuf has much 2349 * less data than the following. 2350 */ 2351 m = m0; 2352 again: 2353 for (;;) { 2354 n = m->m_next; 2355 if (n == NULL) 2356 break; 2357 if (n->m_len < M_TRAILINGSPACE(m)) { 2358 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2359 n->m_len); 2360 m->m_len += n->m_len; 2361 m->m_next = n->m_next; 2362 m_free(n); 2363 if (--curfrags <= maxfrags) 2364 return (m0); 2365 } else 2366 m = n; 2367 } 2368 KASSERT(maxfrags > 1, 2369 ("maxfrags %u, but normal collapse failed", maxfrags)); 2370 /* 2371 * Collapse consecutive mbufs to a cluster. 2372 */ 2373 prev = &m0->m_next; /* NB: not the first mbuf */ 2374 while ((n = *prev) != NULL) { 2375 if ((n2 = n->m_next) != NULL && 2376 n->m_len + n2->m_len < MCLBYTES) { 2377 m = m_getcl(how, MT_DATA, 0); 2378 if (m == NULL) 2379 goto bad; 2380 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2381 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2382 n2->m_len); 2383 m->m_len = n->m_len + n2->m_len; 2384 m->m_next = n2->m_next; 2385 *prev = m; 2386 m_free(n); 2387 m_free(n2); 2388 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2389 return m0; 2390 /* 2391 * Still not there, try the normal collapse 2392 * again before we allocate another cluster. 2393 */ 2394 goto again; 2395 } 2396 prev = &n->m_next; 2397 } 2398 /* 2399 * No place where we can collapse to a cluster; punt. 2400 * This can occur if, for example, you request 2 frags 2401 * but the packet requires that both be clusters (we 2402 * never reallocate the first mbuf to avoid moving the 2403 * packet header). 2404 */ 2405 bad: 2406 return (NULL); 2407 } 2408 2409 static int 2410 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2411 { 2412 struct msk_txdesc *txd, *txd_last; 2413 struct msk_tx_desc *tx_le; 2414 struct mbuf *m; 2415 bus_dmamap_t map; 2416 struct msk_dmamap_arg ctx; 2417 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2418 uint32_t control, prod, si; 2419 uint16_t offset, tcp_offset; 2420 int error, i; 2421 2422 tcp_offset = offset = 0; 2423 m = *m_head; 2424 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2425 /* 2426 * Since mbuf has no protocol specific structure information 2427 * in it we have to inspect protocol information here to 2428 * setup TSO and checksum offload. I don't know why Marvell 2429 * made a such decision in chip design because other GigE 2430 * hardwares normally takes care of all these chores in 2431 * hardware. However, TSO performance of Yukon II is very 2432 * good such that it's worth to implement it. 2433 */ 2434 struct ether_header *eh; 2435 struct ip *ip; 2436 2437 /* TODO check for M_WRITABLE(m) */ 2438 2439 offset = sizeof(struct ether_header); 2440 m = m_pullup(m, offset); 2441 if (m == NULL) { 2442 *m_head = NULL; 2443 return (ENOBUFS); 2444 } 2445 eh = mtod(m, struct ether_header *); 2446 /* Check if hardware VLAN insertion is off. */ 2447 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2448 offset = sizeof(struct ether_vlan_header); 2449 m = m_pullup(m, offset); 2450 if (m == NULL) { 2451 *m_head = NULL; 2452 return (ENOBUFS); 2453 } 2454 } 2455 m = m_pullup(m, offset + sizeof(struct ip)); 2456 if (m == NULL) { 2457 *m_head = NULL; 2458 return (ENOBUFS); 2459 } 2460 ip = (struct ip *)(mtod(m, char *) + offset); 2461 offset += (ip->ip_hl << 2); 2462 tcp_offset = offset; 2463 /* 2464 * It seems that Yukon II has Tx checksum offload bug for 2465 * small TCP packets that's less than 60 bytes in size 2466 * (e.g. TCP window probe packet, pure ACK packet). 2467 * Common work around like padding with zeros to make the 2468 * frame minimum ethernet frame size didn't work at all. 2469 * Instead of disabling checksum offload completely we 2470 * resort to S/W checksum routine when we encounter short 2471 * TCP frames. 2472 * Short UDP packets appear to be handled correctly by 2473 * Yukon II. 2474 */ 2475 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2476 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2477 uint16_t csum; 2478 2479 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2480 (ip->ip_hl << 2), offset); 2481 *(uint16_t *)(m->m_data + offset + 2482 m->m_pkthdr.csum_data) = csum; 2483 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2484 } 2485 *m_head = m; 2486 } 2487 2488 prod = sc_if->msk_cdata.msk_tx_prod; 2489 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2490 txd_last = txd; 2491 map = txd->tx_dmamap; 2492 bzero(&ctx, sizeof(ctx)); 2493 ctx.nseg = MSK_MAXTXSEGS; 2494 ctx.segs = txsegs; 2495 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, map, 2496 *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2497 if (error == 0 && ctx.nseg == 0) { 2498 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2499 error = EFBIG; 2500 } 2501 if (error == EFBIG) { 2502 m = msk_defrag(*m_head, MB_DONTWAIT, MSK_MAXTXSEGS); 2503 if (m == NULL) { 2504 m_freem(*m_head); 2505 *m_head = NULL; 2506 return (ENOBUFS); 2507 } 2508 *m_head = m; 2509 2510 bzero(&ctx, sizeof(ctx)); 2511 ctx.nseg = MSK_MAXTXSEGS; 2512 ctx.segs = txsegs; 2513 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, 2514 map, *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2515 if (error == 0 && ctx.nseg == 0) { 2516 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2517 error = EFBIG; 2518 } 2519 if (error != 0) { 2520 m_freem(*m_head); 2521 *m_head = NULL; 2522 return (error); 2523 } 2524 } else if (error != 0) { 2525 return (error); 2526 } 2527 2528 /* Check number of available descriptors. */ 2529 if (sc_if->msk_cdata.msk_tx_cnt + ctx.nseg >= 2530 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2531 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2532 return (ENOBUFS); 2533 } 2534 2535 control = 0; 2536 tx_le = NULL; 2537 2538 #ifdef notyet 2539 /* Check if we have a VLAN tag to insert. */ 2540 if ((m->m_flags & M_VLANTAG) != 0) { 2541 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2542 tx_le->msk_addr = htole32(0); 2543 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2544 htons(m->m_pkthdr.ether_vtag)); 2545 sc_if->msk_cdata.msk_tx_cnt++; 2546 MSK_INC(prod, MSK_TX_RING_CNT); 2547 control |= INS_VLAN; 2548 } 2549 #endif 2550 /* Check if we have to handle checksum offload. */ 2551 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2552 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2553 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2554 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2555 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2556 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2557 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2558 control |= UDPTCP; 2559 sc_if->msk_cdata.msk_tx_cnt++; 2560 MSK_INC(prod, MSK_TX_RING_CNT); 2561 } 2562 2563 si = prod; 2564 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2565 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2566 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2567 OP_PACKET); 2568 sc_if->msk_cdata.msk_tx_cnt++; 2569 MSK_INC(prod, MSK_TX_RING_CNT); 2570 2571 for (i = 1; i < ctx.nseg; i++) { 2572 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2573 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2574 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2575 OP_BUFFER | HW_OWNER); 2576 sc_if->msk_cdata.msk_tx_cnt++; 2577 MSK_INC(prod, MSK_TX_RING_CNT); 2578 } 2579 /* Update producer index. */ 2580 sc_if->msk_cdata.msk_tx_prod = prod; 2581 2582 /* Set EOP on the last desciptor. */ 2583 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2584 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2585 tx_le->msk_control |= htole32(EOP); 2586 2587 /* Turn the first descriptor ownership to hardware. */ 2588 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2589 tx_le->msk_control |= htole32(HW_OWNER); 2590 2591 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2592 map = txd_last->tx_dmamap; 2593 txd_last->tx_dmamap = txd->tx_dmamap; 2594 txd->tx_dmamap = map; 2595 txd->tx_m = m; 2596 2597 /* Sync descriptors. */ 2598 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2599 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2600 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 2601 2602 return (0); 2603 } 2604 2605 static void 2606 msk_start(struct ifnet *ifp) 2607 { 2608 struct msk_if_softc *sc_if; 2609 struct mbuf *m_head; 2610 int enq; 2611 2612 sc_if = ifp->if_softc; 2613 2614 ASSERT_SERIALIZED(ifp->if_serializer); 2615 2616 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != 2617 IFF_RUNNING || sc_if->msk_link == 0) 2618 return; 2619 2620 for (enq = 0; !ifq_is_empty(&ifp->if_snd) && 2621 sc_if->msk_cdata.msk_tx_cnt < 2622 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2623 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2624 if (m_head == NULL) 2625 break; 2626 2627 /* 2628 * Pack the data into the transmit ring. If we 2629 * don't have room, set the OACTIVE flag and wait 2630 * for the NIC to drain the ring. 2631 */ 2632 if (msk_encap(sc_if, &m_head) != 0) { 2633 if (m_head == NULL) 2634 break; 2635 m_freem(m_head); 2636 ifp->if_flags |= IFF_OACTIVE; 2637 break; 2638 } 2639 2640 enq++; 2641 /* 2642 * If there's a BPF listener, bounce a copy of this frame 2643 * to him. 2644 */ 2645 BPF_MTAP(ifp, m_head); 2646 } 2647 2648 if (enq > 0) { 2649 /* Transmit */ 2650 CSR_WRITE_2(sc_if->msk_softc, 2651 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2652 sc_if->msk_cdata.msk_tx_prod); 2653 2654 /* Set a timeout in case the chip goes out to lunch. */ 2655 ifp->if_timer = MSK_TX_TIMEOUT; 2656 } 2657 } 2658 2659 static void 2660 msk_watchdog(struct ifnet *ifp) 2661 { 2662 struct msk_if_softc *sc_if = ifp->if_softc; 2663 uint32_t ridx; 2664 int idx; 2665 2666 ASSERT_SERIALIZED(ifp->if_serializer); 2667 2668 if (sc_if->msk_link == 0) { 2669 if (bootverbose) 2670 if_printf(sc_if->msk_ifp, "watchdog timeout " 2671 "(missed link)\n"); 2672 ifp->if_oerrors++; 2673 msk_init(sc_if); 2674 return; 2675 } 2676 2677 /* 2678 * Reclaim first as there is a possibility of losing Tx completion 2679 * interrupts. 2680 */ 2681 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2682 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2683 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2684 msk_txeof(sc_if, idx); 2685 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2686 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2687 "-- recovering\n"); 2688 if (!ifq_is_empty(&ifp->if_snd)) 2689 ifp->if_start(ifp); 2690 return; 2691 } 2692 } 2693 2694 if_printf(ifp, "watchdog timeout\n"); 2695 ifp->if_oerrors++; 2696 msk_init(sc_if); 2697 if (!ifq_is_empty(&ifp->if_snd)) 2698 ifp->if_start(ifp); 2699 } 2700 2701 static int 2702 mskc_shutdown(device_t dev) 2703 { 2704 struct msk_softc *sc = device_get_softc(dev); 2705 int i; 2706 2707 lwkt_serialize_enter(&sc->msk_serializer); 2708 2709 for (i = 0; i < sc->msk_num_port; i++) { 2710 if (sc->msk_if[i] != NULL) 2711 msk_stop(sc->msk_if[i]); 2712 } 2713 2714 /* Disable all interrupts. */ 2715 CSR_WRITE_4(sc, B0_IMSK, 0); 2716 CSR_READ_4(sc, B0_IMSK); 2717 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2718 CSR_READ_4(sc, B0_HWE_IMSK); 2719 2720 /* Put hardware reset. */ 2721 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2722 2723 lwkt_serialize_exit(&sc->msk_serializer); 2724 return (0); 2725 } 2726 2727 static int 2728 mskc_suspend(device_t dev) 2729 { 2730 struct msk_softc *sc = device_get_softc(dev); 2731 int i; 2732 2733 lwkt_serialize_enter(&sc->msk_serializer); 2734 2735 for (i = 0; i < sc->msk_num_port; i++) { 2736 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2737 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2738 msk_stop(sc->msk_if[i]); 2739 } 2740 2741 /* Disable all interrupts. */ 2742 CSR_WRITE_4(sc, B0_IMSK, 0); 2743 CSR_READ_4(sc, B0_IMSK); 2744 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2745 CSR_READ_4(sc, B0_HWE_IMSK); 2746 2747 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2748 2749 /* Put hardware reset. */ 2750 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2751 sc->msk_suspended = 1; 2752 2753 lwkt_serialize_exit(&sc->msk_serializer); 2754 2755 return (0); 2756 } 2757 2758 static int 2759 mskc_resume(device_t dev) 2760 { 2761 struct msk_softc *sc = device_get_softc(dev); 2762 int i; 2763 2764 lwkt_serialize_enter(&sc->msk_serializer); 2765 2766 mskc_reset(sc); 2767 for (i = 0; i < sc->msk_num_port; i++) { 2768 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2769 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2770 msk_init(sc->msk_if[i]); 2771 } 2772 sc->msk_suspended = 0; 2773 2774 lwkt_serialize_exit(&sc->msk_serializer); 2775 2776 return (0); 2777 } 2778 2779 static void 2780 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2781 { 2782 struct mbuf *m; 2783 struct ifnet *ifp; 2784 struct msk_rxdesc *rxd; 2785 int cons, rxlen; 2786 2787 ifp = sc_if->msk_ifp; 2788 2789 cons = sc_if->msk_cdata.msk_rx_cons; 2790 do { 2791 rxlen = status >> 16; 2792 if ((status & GMR_FS_VLAN) != 0 && 2793 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2794 rxlen -= EVL_ENCAPLEN; 2795 if (len > sc_if->msk_framesize || 2796 ((status & GMR_FS_ANY_ERR) != 0) || 2797 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2798 /* Don't count flow-control packet as errors. */ 2799 if ((status & GMR_FS_GOOD_FC) == 0) 2800 ifp->if_ierrors++; 2801 msk_discard_rxbuf(sc_if, cons); 2802 break; 2803 } 2804 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2805 m = rxd->rx_m; 2806 if (msk_newbuf(sc_if, cons) != 0) { 2807 ifp->if_iqdrops++; 2808 /* Reuse old buffer. */ 2809 msk_discard_rxbuf(sc_if, cons); 2810 break; 2811 } 2812 m->m_pkthdr.rcvif = ifp; 2813 m->m_pkthdr.len = m->m_len = len; 2814 ifp->if_ipackets++; 2815 #ifdef notyet 2816 /* Check for VLAN tagged packets. */ 2817 if ((status & GMR_FS_VLAN) != 0 && 2818 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2819 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2820 m->m_flags |= M_VLANTAG; 2821 } 2822 #endif 2823 ifp->if_input(ifp, m); 2824 } while (0); 2825 2826 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2827 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2828 } 2829 2830 #ifdef MSK_JUMBO 2831 static void 2832 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2833 { 2834 struct mbuf *m; 2835 struct ifnet *ifp; 2836 struct msk_rxdesc *jrxd; 2837 int cons, rxlen; 2838 2839 ifp = sc_if->msk_ifp; 2840 2841 MSK_IF_LOCK_ASSERT(sc_if); 2842 2843 cons = sc_if->msk_cdata.msk_rx_cons; 2844 do { 2845 rxlen = status >> 16; 2846 if ((status & GMR_FS_VLAN) != 0 && 2847 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2848 rxlen -= ETHER_VLAN_ENCAP_LEN; 2849 if (len > sc_if->msk_framesize || 2850 ((status & GMR_FS_ANY_ERR) != 0) || 2851 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2852 /* Don't count flow-control packet as errors. */ 2853 if ((status & GMR_FS_GOOD_FC) == 0) 2854 ifp->if_ierrors++; 2855 msk_discard_jumbo_rxbuf(sc_if, cons); 2856 break; 2857 } 2858 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2859 m = jrxd->rx_m; 2860 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2861 ifp->if_iqdrops++; 2862 /* Reuse old buffer. */ 2863 msk_discard_jumbo_rxbuf(sc_if, cons); 2864 break; 2865 } 2866 m->m_pkthdr.rcvif = ifp; 2867 m->m_pkthdr.len = m->m_len = len; 2868 ifp->if_ipackets++; 2869 /* Check for VLAN tagged packets. */ 2870 if ((status & GMR_FS_VLAN) != 0 && 2871 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2872 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2873 m->m_flags |= M_VLANTAG; 2874 } 2875 MSK_IF_UNLOCK(sc_if); 2876 (*ifp->if_input)(ifp, m); 2877 MSK_IF_LOCK(sc_if); 2878 } while (0); 2879 2880 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2881 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2882 } 2883 #endif 2884 2885 static void 2886 msk_txeof(struct msk_if_softc *sc_if, int idx) 2887 { 2888 struct msk_txdesc *txd; 2889 struct msk_tx_desc *cur_tx; 2890 struct ifnet *ifp; 2891 uint32_t control; 2892 int cons, prog; 2893 2894 ifp = sc_if->msk_ifp; 2895 2896 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2897 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_POSTREAD); 2898 2899 /* 2900 * Go through our tx ring and free mbufs for those 2901 * frames that have been sent. 2902 */ 2903 cons = sc_if->msk_cdata.msk_tx_cons; 2904 prog = 0; 2905 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2906 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2907 break; 2908 prog++; 2909 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2910 control = le32toh(cur_tx->msk_control); 2911 sc_if->msk_cdata.msk_tx_cnt--; 2912 ifp->if_flags &= ~IFF_OACTIVE; 2913 if ((control & EOP) == 0) 2914 continue; 2915 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2916 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 2917 BUS_DMASYNC_POSTWRITE); 2918 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2919 2920 ifp->if_opackets++; 2921 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2922 __func__)); 2923 m_freem(txd->tx_m); 2924 txd->tx_m = NULL; 2925 } 2926 2927 if (prog > 0) { 2928 sc_if->msk_cdata.msk_tx_cons = cons; 2929 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2930 ifp->if_timer = 0; 2931 /* No need to sync LEs as we didn't update LEs. */ 2932 } 2933 } 2934 2935 static void 2936 msk_tick(void *xsc_if) 2937 { 2938 struct msk_if_softc *sc_if = xsc_if; 2939 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2940 struct mii_data *mii; 2941 2942 lwkt_serialize_enter(ifp->if_serializer); 2943 2944 mii = device_get_softc(sc_if->msk_miibus); 2945 2946 mii_tick(mii); 2947 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2948 2949 lwkt_serialize_exit(ifp->if_serializer); 2950 } 2951 2952 static void 2953 msk_intr_phy(struct msk_if_softc *sc_if) 2954 { 2955 uint16_t status; 2956 2957 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2958 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2959 /* Handle FIFO Underrun/Overflow? */ 2960 if (status & PHY_M_IS_FIFO_ERROR) { 2961 device_printf(sc_if->msk_if_dev, 2962 "PHY FIFO underrun/overflow.\n"); 2963 } 2964 } 2965 2966 static void 2967 msk_intr_gmac(struct msk_if_softc *sc_if) 2968 { 2969 struct msk_softc *sc; 2970 uint8_t status; 2971 2972 sc = sc_if->msk_softc; 2973 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 2974 2975 /* GMAC Rx FIFO overrun. */ 2976 if ((status & GM_IS_RX_FF_OR) != 0) { 2977 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 2978 GMF_CLI_RX_FO); 2979 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 2980 } 2981 /* GMAC Tx FIFO underrun. */ 2982 if ((status & GM_IS_TX_FF_UR) != 0) { 2983 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 2984 GMF_CLI_TX_FU); 2985 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 2986 /* 2987 * XXX 2988 * In case of Tx underrun, we may need to flush/reset 2989 * Tx MAC but that would also require resynchronization 2990 * with status LEs. Reintializing status LEs would 2991 * affect other port in dual MAC configuration so it 2992 * should be avoided as possible as we can. 2993 * Due to lack of documentation it's all vague guess but 2994 * it needs more investigation. 2995 */ 2996 } 2997 } 2998 2999 static void 3000 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3001 { 3002 struct msk_softc *sc; 3003 3004 sc = sc_if->msk_softc; 3005 if ((status & Y2_IS_PAR_RD1) != 0) { 3006 device_printf(sc_if->msk_if_dev, 3007 "RAM buffer read parity error\n"); 3008 /* Clear IRQ. */ 3009 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3010 RI_CLR_RD_PERR); 3011 } 3012 if ((status & Y2_IS_PAR_WR1) != 0) { 3013 device_printf(sc_if->msk_if_dev, 3014 "RAM buffer write parity error\n"); 3015 /* Clear IRQ. */ 3016 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3017 RI_CLR_WR_PERR); 3018 } 3019 if ((status & Y2_IS_PAR_MAC1) != 0) { 3020 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3021 /* Clear IRQ. */ 3022 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3023 GMF_CLI_TX_PE); 3024 } 3025 if ((status & Y2_IS_PAR_RX1) != 0) { 3026 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3027 /* Clear IRQ. */ 3028 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3029 } 3030 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3031 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3032 /* Clear IRQ. */ 3033 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3034 } 3035 } 3036 3037 static void 3038 mskc_intr_hwerr(struct msk_softc *sc) 3039 { 3040 uint32_t status; 3041 uint32_t tlphead[4]; 3042 3043 status = CSR_READ_4(sc, B0_HWE_ISRC); 3044 /* Time Stamp timer overflow. */ 3045 if ((status & Y2_IS_TIST_OV) != 0) 3046 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3047 if ((status & Y2_IS_PCI_NEXP) != 0) { 3048 /* 3049 * PCI Express Error occured which is not described in PEX 3050 * spec. 3051 * This error is also mapped either to Master Abort( 3052 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3053 * can only be cleared there. 3054 */ 3055 device_printf(sc->msk_dev, 3056 "PCI Express protocol violation error\n"); 3057 } 3058 3059 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3060 uint16_t v16; 3061 3062 if ((status & Y2_IS_MST_ERR) != 0) 3063 device_printf(sc->msk_dev, 3064 "unexpected IRQ Status error\n"); 3065 else 3066 device_printf(sc->msk_dev, 3067 "unexpected IRQ Master error\n"); 3068 /* Reset all bits in the PCI status register. */ 3069 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3070 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3071 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3072 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3073 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3074 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3075 } 3076 3077 /* Check for PCI Express Uncorrectable Error. */ 3078 if ((status & Y2_IS_PCI_EXP) != 0) { 3079 uint32_t v32; 3080 3081 /* 3082 * On PCI Express bus bridges are called root complexes (RC). 3083 * PCI Express errors are recognized by the root complex too, 3084 * which requests the system to handle the problem. After 3085 * error occurence it may be that no access to the adapter 3086 * may be performed any longer. 3087 */ 3088 3089 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3090 if ((v32 & PEX_UNSUP_REQ) != 0) { 3091 /* Ignore unsupported request error. */ 3092 if (bootverbose) { 3093 device_printf(sc->msk_dev, 3094 "Uncorrectable PCI Express error\n"); 3095 } 3096 } 3097 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3098 int i; 3099 3100 /* Get TLP header form Log Registers. */ 3101 for (i = 0; i < 4; i++) 3102 tlphead[i] = CSR_PCI_READ_4(sc, 3103 PEX_HEADER_LOG + i * 4); 3104 /* Check for vendor defined broadcast message. */ 3105 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3106 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3107 CSR_WRITE_4(sc, B0_HWE_IMSK, 3108 sc->msk_intrhwemask); 3109 CSR_READ_4(sc, B0_HWE_IMSK); 3110 } 3111 } 3112 /* Clear the interrupt. */ 3113 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3114 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3115 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3116 } 3117 3118 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3119 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3120 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3121 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3122 } 3123 3124 static __inline void 3125 msk_rxput(struct msk_if_softc *sc_if) 3126 { 3127 struct msk_softc *sc; 3128 3129 sc = sc_if->msk_softc; 3130 #ifdef MSK_JUMBO 3131 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3132 bus_dmamap_sync( 3133 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3134 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3135 BUS_DMASYNC_PREWRITE); 3136 } else 3137 #endif 3138 { 3139 bus_dmamap_sync( 3140 sc_if->msk_cdata.msk_rx_ring_tag, 3141 sc_if->msk_cdata.msk_rx_ring_map, 3142 BUS_DMASYNC_PREWRITE); 3143 } 3144 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3145 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3146 } 3147 3148 static int 3149 mskc_handle_events(struct msk_softc *sc) 3150 { 3151 struct msk_if_softc *sc_if; 3152 int rxput[2]; 3153 struct msk_stat_desc *sd; 3154 uint32_t control, status; 3155 int cons, idx, len, port, rxprog; 3156 3157 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3158 if (idx == sc->msk_stat_cons) 3159 return (0); 3160 3161 /* Sync status LEs. */ 3162 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3163 BUS_DMASYNC_POSTREAD); 3164 /* XXX Sync Rx LEs here. */ 3165 3166 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3167 3168 rxprog = 0; 3169 for (cons = sc->msk_stat_cons; cons != idx;) { 3170 sd = &sc->msk_stat_ring[cons]; 3171 control = le32toh(sd->msk_control); 3172 if ((control & HW_OWNER) == 0) 3173 break; 3174 /* 3175 * Marvell's FreeBSD driver updates status LE after clearing 3176 * HW_OWNER. However we don't have a way to sync single LE 3177 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3178 * an entire DMA map. So don't sync LE until we have a better 3179 * way to sync LEs. 3180 */ 3181 control &= ~HW_OWNER; 3182 sd->msk_control = htole32(control); 3183 status = le32toh(sd->msk_status); 3184 len = control & STLE_LEN_MASK; 3185 port = (control >> 16) & 0x01; 3186 sc_if = sc->msk_if[port]; 3187 if (sc_if == NULL) { 3188 device_printf(sc->msk_dev, "invalid port opcode " 3189 "0x%08x\n", control & STLE_OP_MASK); 3190 continue; 3191 } 3192 3193 switch (control & STLE_OP_MASK) { 3194 case OP_RXVLAN: 3195 sc_if->msk_vtag = ntohs(len); 3196 break; 3197 case OP_RXCHKSVLAN: 3198 sc_if->msk_vtag = ntohs(len); 3199 break; 3200 case OP_RXSTAT: 3201 #ifdef MSK_JUMBO 3202 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3203 msk_jumbo_rxeof(sc_if, status, len); 3204 else 3205 #endif 3206 msk_rxeof(sc_if, status, len); 3207 rxprog++; 3208 /* 3209 * Because there is no way to sync single Rx LE 3210 * put the DMA sync operation off until the end of 3211 * event processing. 3212 */ 3213 rxput[port]++; 3214 /* Update prefetch unit if we've passed water mark. */ 3215 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3216 msk_rxput(sc_if); 3217 rxput[port] = 0; 3218 } 3219 break; 3220 case OP_TXINDEXLE: 3221 if (sc->msk_if[MSK_PORT_A] != NULL) { 3222 msk_txeof(sc->msk_if[MSK_PORT_A], 3223 status & STLE_TXA1_MSKL); 3224 } 3225 if (sc->msk_if[MSK_PORT_B] != NULL) { 3226 msk_txeof(sc->msk_if[MSK_PORT_B], 3227 ((status & STLE_TXA2_MSKL) >> 3228 STLE_TXA2_SHIFTL) | 3229 ((len & STLE_TXA2_MSKH) << 3230 STLE_TXA2_SHIFTH)); 3231 } 3232 break; 3233 default: 3234 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3235 control & STLE_OP_MASK); 3236 break; 3237 } 3238 MSK_INC(cons, MSK_STAT_RING_CNT); 3239 if (rxprog > sc->msk_process_limit) 3240 break; 3241 } 3242 3243 sc->msk_stat_cons = cons; 3244 /* XXX We should sync status LEs here. See above notes. */ 3245 3246 if (rxput[MSK_PORT_A] > 0) 3247 msk_rxput(sc->msk_if[MSK_PORT_A]); 3248 if (rxput[MSK_PORT_B] > 0) 3249 msk_rxput(sc->msk_if[MSK_PORT_B]); 3250 3251 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3252 } 3253 3254 /* Legacy interrupt handler for shared interrupt. */ 3255 static void 3256 mskc_intr(void *xsc) 3257 { 3258 struct msk_softc *sc; 3259 struct msk_if_softc *sc_if0, *sc_if1; 3260 struct ifnet *ifp0, *ifp1; 3261 uint32_t status; 3262 3263 sc = xsc; 3264 ASSERT_SERIALIZED(&sc->msk_serializer); 3265 3266 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3267 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3268 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3269 (status & sc->msk_intrmask) == 0) { 3270 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3271 return; 3272 } 3273 3274 sc_if0 = sc->msk_if[MSK_PORT_A]; 3275 sc_if1 = sc->msk_if[MSK_PORT_B]; 3276 ifp0 = ifp1 = NULL; 3277 if (sc_if0 != NULL) 3278 ifp0 = sc_if0->msk_ifp; 3279 if (sc_if1 != NULL) 3280 ifp1 = sc_if1->msk_ifp; 3281 3282 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3283 msk_intr_phy(sc_if0); 3284 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3285 msk_intr_phy(sc_if1); 3286 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3287 msk_intr_gmac(sc_if0); 3288 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3289 msk_intr_gmac(sc_if1); 3290 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3291 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3292 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3293 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3294 CSR_READ_4(sc, B0_IMSK); 3295 } 3296 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3297 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3298 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3299 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3300 CSR_READ_4(sc, B0_IMSK); 3301 } 3302 if ((status & Y2_IS_HW_ERR) != 0) 3303 mskc_intr_hwerr(sc); 3304 3305 while (mskc_handle_events(sc) != 0) 3306 ; 3307 if ((status & Y2_IS_STAT_BMU) != 0) 3308 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3309 3310 /* Reenable interrupts. */ 3311 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3312 3313 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3314 !ifq_is_empty(&ifp0->if_snd)) 3315 ifp0->if_start(ifp0); 3316 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3317 !ifq_is_empty(&ifp1->if_snd)) 3318 ifp1->if_start(ifp1); 3319 } 3320 3321 static void 3322 msk_init(void *xsc) 3323 { 3324 struct msk_if_softc *sc_if = xsc; 3325 struct msk_softc *sc = sc_if->msk_softc; 3326 struct ifnet *ifp = sc_if->msk_ifp; 3327 struct mii_data *mii; 3328 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3329 uint16_t gmac; 3330 int error, i; 3331 3332 ASSERT_SERIALIZED(ifp->if_serializer); 3333 3334 mii = device_get_softc(sc_if->msk_miibus); 3335 3336 error = 0; 3337 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3338 msk_stop(sc_if); 3339 3340 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3341 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3342 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3343 /* 3344 * In Yukon EC Ultra, TSO & checksum offload is not 3345 * supported for jumbo frame. 3346 */ 3347 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3348 ifp->if_capenable &= ~IFCAP_TXCSUM; 3349 } 3350 3351 /* 3352 * Initialize GMAC first. 3353 * Without this initialization, Rx MAC did not work as expected 3354 * and Rx MAC garbled status LEs and it resulted in out-of-order 3355 * or duplicated frame delivery which in turn showed very poor 3356 * Rx performance.(I had to write a packet analysis code that 3357 * could be embeded in driver to diagnose this issue.) 3358 * I've spent almost 2 months to fix this issue. If I have had 3359 * datasheet for Yukon II I wouldn't have encountered this. :-( 3360 */ 3361 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3362 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3363 3364 /* Dummy read the Interrupt Source Register. */ 3365 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3366 3367 /* Set MIB Clear Counter Mode. */ 3368 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3369 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3370 /* Read all MIB Counters with Clear Mode set. */ 3371 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3372 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3373 /* Clear MIB Clear Counter Mode. */ 3374 gmac &= ~GM_PAR_MIB_CLR; 3375 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3376 3377 /* Disable FCS. */ 3378 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3379 3380 /* Setup Transmit Control Register. */ 3381 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3382 3383 /* Setup Transmit Flow Control Register. */ 3384 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3385 3386 /* Setup Transmit Parameter Register. */ 3387 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3388 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3389 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3390 3391 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3392 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3393 3394 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3395 gmac |= GM_SMOD_JUMBO_ENA; 3396 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3397 3398 /* Set station address. */ 3399 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3400 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3401 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3402 eaddr[i]); 3403 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3404 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3405 eaddr[i]); 3406 3407 /* Disable interrupts for counter overflows. */ 3408 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3409 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3410 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3411 3412 /* Configure Rx MAC FIFO. */ 3413 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3414 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3415 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3416 GMF_OPER_ON | GMF_RX_F_FL_ON); 3417 3418 /* Set promiscuous mode. */ 3419 msk_setpromisc(sc_if); 3420 3421 /* Set multicast filter. */ 3422 msk_setmulti(sc_if); 3423 3424 /* Flush Rx MAC FIFO on any flow control or error. */ 3425 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3426 GMR_FS_ANY_ERR); 3427 3428 /* Set Rx FIFO flush threshold to 64 bytes. */ 3429 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3430 RX_GMF_FL_THR_DEF); 3431 3432 /* Configure Tx MAC FIFO. */ 3433 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3434 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3435 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3436 3437 /* Configure hardware VLAN tag insertion/stripping. */ 3438 msk_setvlan(sc_if, ifp); 3439 3440 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3441 /* Set Rx Pause threshould. */ 3442 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3443 MSK_ECU_LLPP); 3444 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3445 MSK_ECU_ULPP); 3446 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { 3447 /* 3448 * Set Tx GMAC FIFO Almost Empty Threshold. 3449 */ 3450 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3451 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3452 /* Disable Store & Forward mode for Tx. */ 3453 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3454 TX_JUMBO_ENA | TX_STFW_DIS); 3455 } else { 3456 /* Enable Store & Forward mode for Tx. */ 3457 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3458 TX_JUMBO_DIS | TX_STFW_ENA); 3459 } 3460 } 3461 3462 /* 3463 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3464 * arbiter as we don't use Sync Tx queue. 3465 */ 3466 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3467 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3468 /* Enable the RAM Interface Arbiter. */ 3469 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3470 3471 /* Setup RAM buffer. */ 3472 msk_set_rambuffer(sc_if); 3473 3474 /* Disable Tx sync Queue. */ 3475 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3476 3477 /* Setup Tx Queue Bus Memory Interface. */ 3478 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3479 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3480 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3481 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3482 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3483 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3484 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3485 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3486 } 3487 3488 /* Setup Rx Queue Bus Memory Interface. */ 3489 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3490 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3491 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3492 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3493 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3494 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3495 /* MAC Rx RAM Read is controlled by hardware. */ 3496 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3497 } 3498 3499 msk_set_prefetch(sc, sc_if->msk_txq, 3500 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3501 msk_init_tx_ring(sc_if); 3502 3503 /* Disable Rx checksum offload and RSS hash. */ 3504 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3505 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3506 #ifdef MSK_JUMBO 3507 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3508 msk_set_prefetch(sc, sc_if->msk_rxq, 3509 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3510 MSK_JUMBO_RX_RING_CNT - 1); 3511 error = msk_init_jumbo_rx_ring(sc_if); 3512 } else 3513 #endif 3514 { 3515 msk_set_prefetch(sc, sc_if->msk_rxq, 3516 sc_if->msk_rdata.msk_rx_ring_paddr, 3517 MSK_RX_RING_CNT - 1); 3518 error = msk_init_rx_ring(sc_if); 3519 } 3520 if (error != 0) { 3521 device_printf(sc_if->msk_if_dev, 3522 "initialization failed: no memory for Rx buffers\n"); 3523 msk_stop(sc_if); 3524 return; 3525 } 3526 3527 /* Configure interrupt handling. */ 3528 if (sc_if->msk_port == MSK_PORT_A) { 3529 sc->msk_intrmask |= Y2_IS_PORT_A; 3530 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3531 } else { 3532 sc->msk_intrmask |= Y2_IS_PORT_B; 3533 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3534 } 3535 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3536 CSR_READ_4(sc, B0_HWE_IMSK); 3537 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3538 CSR_READ_4(sc, B0_IMSK); 3539 3540 sc_if->msk_link = 0; 3541 mii_mediachg(mii); 3542 3543 ifp->if_flags |= IFF_RUNNING; 3544 ifp->if_flags &= ~IFF_OACTIVE; 3545 3546 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3547 } 3548 3549 static void 3550 msk_set_rambuffer(struct msk_if_softc *sc_if) 3551 { 3552 struct msk_softc *sc; 3553 int ltpp, utpp; 3554 3555 sc = sc_if->msk_softc; 3556 3557 /* Setup Rx Queue. */ 3558 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3559 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3560 sc->msk_rxqstart[sc_if->msk_port] / 8); 3561 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3562 sc->msk_rxqend[sc_if->msk_port] / 8); 3563 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3564 sc->msk_rxqstart[sc_if->msk_port] / 8); 3565 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3566 sc->msk_rxqstart[sc_if->msk_port] / 8); 3567 3568 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3569 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3570 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3571 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3572 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3573 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3574 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3575 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3576 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3577 3578 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3579 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3580 3581 /* Setup Tx Queue. */ 3582 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3583 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3584 sc->msk_txqstart[sc_if->msk_port] / 8); 3585 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3586 sc->msk_txqend[sc_if->msk_port] / 8); 3587 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3588 sc->msk_txqstart[sc_if->msk_port] / 8); 3589 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3590 sc->msk_txqstart[sc_if->msk_port] / 8); 3591 /* Enable Store & Forward for Tx side. */ 3592 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3593 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3594 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3595 } 3596 3597 static void 3598 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3599 uint32_t count) 3600 { 3601 3602 /* Reset the prefetch unit. */ 3603 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3604 PREF_UNIT_RST_SET); 3605 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3606 PREF_UNIT_RST_CLR); 3607 /* Set LE base address. */ 3608 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3609 MSK_ADDR_LO(addr)); 3610 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3611 MSK_ADDR_HI(addr)); 3612 /* Set the list last index. */ 3613 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3614 count); 3615 /* Turn on prefetch unit. */ 3616 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3617 PREF_UNIT_OP_ON); 3618 /* Dummy read to ensure write. */ 3619 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3620 } 3621 3622 static void 3623 msk_stop(struct msk_if_softc *sc_if) 3624 { 3625 struct msk_softc *sc = sc_if->msk_softc; 3626 struct ifnet *ifp = sc_if->msk_ifp; 3627 struct msk_txdesc *txd; 3628 struct msk_rxdesc *rxd; 3629 #ifdef MSK_JUMBO 3630 struct msk_rxdesc *jrxd; 3631 #endif 3632 uint32_t val; 3633 int i; 3634 3635 ASSERT_SERIALIZED(ifp->if_serializer); 3636 3637 callout_stop(&sc_if->msk_tick_ch); 3638 ifp->if_timer = 0; 3639 3640 /* Disable interrupts. */ 3641 if (sc_if->msk_port == MSK_PORT_A) { 3642 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3643 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3644 } else { 3645 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3646 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3647 } 3648 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3649 CSR_READ_4(sc, B0_HWE_IMSK); 3650 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3651 CSR_READ_4(sc, B0_IMSK); 3652 3653 /* Disable Tx/Rx MAC. */ 3654 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3655 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3656 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3657 /* Read again to ensure writing. */ 3658 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3659 3660 /* Stop Tx BMU. */ 3661 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3662 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3663 for (i = 0; i < MSK_TIMEOUT; i++) { 3664 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3665 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3666 BMU_STOP); 3667 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3668 } else 3669 break; 3670 DELAY(1); 3671 } 3672 if (i == MSK_TIMEOUT) 3673 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3674 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3675 RB_RST_SET | RB_DIS_OP_MD); 3676 3677 /* Disable all GMAC interrupt. */ 3678 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3679 /* Disable PHY interrupt. */ 3680 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3681 3682 /* Disable the RAM Interface Arbiter. */ 3683 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3684 3685 /* Reset the PCI FIFO of the async Tx queue */ 3686 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3687 BMU_RST_SET | BMU_FIFO_RST); 3688 3689 /* Reset the Tx prefetch units. */ 3690 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3691 PREF_UNIT_RST_SET); 3692 3693 /* Reset the RAM Buffer async Tx queue. */ 3694 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3695 3696 /* Reset Tx MAC FIFO. */ 3697 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3698 /* Set Pause Off. */ 3699 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3700 3701 /* 3702 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3703 * reach the end of packet and since we can't make sure that we have 3704 * incoming data, we must reset the BMU while it is not during a DMA 3705 * transfer. Since it is possible that the Rx path is still active, 3706 * the Rx RAM buffer will be stopped first, so any possible incoming 3707 * data will not trigger a DMA. After the RAM buffer is stopped, the 3708 * BMU is polled until any DMA in progress is ended and only then it 3709 * will be reset. 3710 */ 3711 3712 /* Disable the RAM Buffer receive queue. */ 3713 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3714 for (i = 0; i < MSK_TIMEOUT; i++) { 3715 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3716 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3717 break; 3718 DELAY(1); 3719 } 3720 if (i == MSK_TIMEOUT) 3721 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3722 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3723 BMU_RST_SET | BMU_FIFO_RST); 3724 /* Reset the Rx prefetch unit. */ 3725 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3726 PREF_UNIT_RST_SET); 3727 /* Reset the RAM Buffer receive queue. */ 3728 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3729 /* Reset Rx MAC FIFO. */ 3730 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3731 3732 /* Free Rx and Tx mbufs still in the queues. */ 3733 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3734 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3735 if (rxd->rx_m != NULL) { 3736 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 3737 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3738 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3739 rxd->rx_dmamap); 3740 m_freem(rxd->rx_m); 3741 rxd->rx_m = NULL; 3742 } 3743 } 3744 #ifdef MSK_JUMBO 3745 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3746 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3747 if (jrxd->rx_m != NULL) { 3748 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3749 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3750 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3751 jrxd->rx_dmamap); 3752 m_freem(jrxd->rx_m); 3753 jrxd->rx_m = NULL; 3754 } 3755 } 3756 #endif 3757 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3758 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3759 if (txd->tx_m != NULL) { 3760 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 3761 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3762 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3763 txd->tx_dmamap); 3764 m_freem(txd->tx_m); 3765 txd->tx_m = NULL; 3766 } 3767 } 3768 3769 /* 3770 * Mark the interface down. 3771 */ 3772 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3773 sc_if->msk_link = 0; 3774 } 3775 3776 #ifdef notyet 3777 3778 static int 3779 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3780 { 3781 int error, value; 3782 3783 if (!arg1) 3784 return (EINVAL); 3785 value = *(int *)arg1; 3786 error = sysctl_handle_int(oidp, &value, 0, req); 3787 if (error || !req->newptr) 3788 return (error); 3789 if (value < low || value > high) 3790 return (EINVAL); 3791 *(int *)arg1 = value; 3792 3793 return (0); 3794 } 3795 3796 static int 3797 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 3798 { 3799 3800 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 3801 MSK_PROC_MAX)); 3802 } 3803 3804 #endif 3805 3806 static int 3807 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3808 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3809 { 3810 struct msk_if_softc *sc_if = device_get_softc(dev); 3811 struct msk_dmamap_arg ctx; 3812 bus_dma_segment_t seg; 3813 int error; 3814 3815 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag, 3816 MSK_RING_ALIGN, 0, 3817 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3818 NULL, NULL, 3819 size, 1, BUS_SPACE_MAXSIZE_32BIT, 3820 0, dtag); 3821 if (error) { 3822 device_printf(dev, "can't create DMA tag\n"); 3823 return error; 3824 } 3825 3826 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 3827 dmap); 3828 if (error) { 3829 device_printf(dev, "can't allocate DMA mem\n"); 3830 bus_dma_tag_destroy(*dtag); 3831 *dtag = NULL; 3832 return error; 3833 } 3834 3835 bzero(&ctx, sizeof(ctx)); 3836 ctx.nseg = 1; 3837 ctx.segs = &seg; 3838 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 3839 msk_dmamap_cb, &ctx, BUS_DMA_WAITOK); 3840 if (error) { 3841 device_printf(dev, "can't load DMA mem\n"); 3842 bus_dmamem_free(*dtag, *addr, *dmap); 3843 bus_dma_tag_destroy(*dtag); 3844 *dtag = NULL; 3845 return error; 3846 } 3847 *paddr = seg.ds_addr; 3848 return 0; 3849 } 3850 3851 static void 3852 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3853 { 3854 if (dtag != NULL) { 3855 bus_dmamap_unload(dtag, dmap); 3856 bus_dmamem_free(dtag, addr, dmap); 3857 bus_dma_tag_destroy(dtag); 3858 } 3859 } 3860