1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 /* $DragonFly: src/sys/dev/netif/msk/if_msk.c,v 1.10 2008/11/23 04:28:27 sephe Exp $ */ 97 98 /* 99 * Device driver for the Marvell Yukon II Ethernet controller. 100 * Due to lack of documentation, this driver is based on the code from 101 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 102 */ 103 104 #include <sys/param.h> 105 #include <sys/endian.h> 106 #include <sys/kernel.h> 107 #include <sys/bus.h> 108 #include <sys/in_cksum.h> 109 #include <sys/interrupt.h> 110 #include <sys/malloc.h> 111 #include <sys/proc.h> 112 #include <sys/rman.h> 113 #include <sys/serialize.h> 114 #include <sys/socket.h> 115 #include <sys/sockio.h> 116 #include <sys/sysctl.h> 117 118 #include <net/ethernet.h> 119 #include <net/if.h> 120 #include <net/bpf.h> 121 #include <net/if_arp.h> 122 #include <net/if_dl.h> 123 #include <net/if_media.h> 124 #include <net/ifq_var.h> 125 #include <net/vlan/if_vlan_var.h> 126 127 #include <netinet/ip.h> 128 #include <netinet/ip_var.h> 129 130 #include <dev/netif/mii_layer/miivar.h> 131 132 #include <bus/pci/pcireg.h> 133 #include <bus/pci/pcivar.h> 134 135 #include "if_mskreg.h" 136 137 /* "device miibus" required. See GENERIC if you get errors here. */ 138 #include "miibus_if.h" 139 140 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 141 142 /* 143 * Devices supported by this driver. 144 */ 145 static const struct msk_product { 146 uint16_t msk_vendorid; 147 uint16_t msk_deviceid; 148 const char *msk_name; 149 } msk_products[] = { 150 { VENDORID_SK, DEVICEID_SK_YUKON2, 151 "SK-9Sxx Gigabit Ethernet" }, 152 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 153 "SK-9Exx Gigabit Ethernet"}, 154 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 155 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 156 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 157 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 158 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 159 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 160 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 161 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 162 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 163 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 164 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 165 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 166 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 167 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 168 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 169 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 170 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 171 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 172 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 173 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 174 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 175 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 176 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 177 "Marvell Yukon 88E8039 Gigabit Ethernet" }, 178 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 179 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 180 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 181 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 182 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 183 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 184 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 185 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 186 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 187 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 188 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 189 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 190 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 191 "D-Link 550SX Gigabit Ethernet" }, 192 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 193 "D-Link 560T Gigabit Ethernet" }, 194 { 0, 0, NULL } 195 }; 196 197 static const char *model_name[] = { 198 "Yukon XL", 199 "Yukon EC Ultra", 200 "Yukon Unknown", 201 "Yukon EC", 202 "Yukon FE" 203 }; 204 205 static int mskc_probe(device_t); 206 static int mskc_attach(device_t); 207 static int mskc_detach(device_t); 208 static int mskc_shutdown(device_t); 209 static int mskc_suspend(device_t); 210 static int mskc_resume(device_t); 211 static void mskc_intr(void *); 212 213 static void mskc_reset(struct msk_softc *); 214 static void mskc_set_imtimer(struct msk_softc *); 215 static void mskc_intr_hwerr(struct msk_softc *); 216 static int mskc_handle_events(struct msk_softc *); 217 static void mskc_phy_power(struct msk_softc *, int); 218 static int mskc_setup_rambuffer(struct msk_softc *); 219 static int mskc_status_dma_alloc(struct msk_softc *); 220 static void mskc_status_dma_free(struct msk_softc *); 221 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS); 222 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 223 224 static int msk_probe(device_t); 225 static int msk_attach(device_t); 226 static int msk_detach(device_t); 227 static int msk_miibus_readreg(device_t, int, int); 228 static int msk_miibus_writereg(device_t, int, int, int); 229 static void msk_miibus_statchg(device_t); 230 231 static void msk_init(void *); 232 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 233 static void msk_start(struct ifnet *); 234 static void msk_watchdog(struct ifnet *); 235 static int msk_mediachange(struct ifnet *); 236 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 237 238 static void msk_tick(void *); 239 static void msk_intr_phy(struct msk_if_softc *); 240 static void msk_intr_gmac(struct msk_if_softc *); 241 static __inline void 242 msk_rxput(struct msk_if_softc *); 243 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 244 static void msk_rxeof(struct msk_if_softc *, uint32_t, int, 245 struct mbuf_chain *); 246 static void msk_txeof(struct msk_if_softc *, int); 247 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 248 static void msk_set_rambuffer(struct msk_if_softc *); 249 static void msk_stop(struct msk_if_softc *); 250 251 static int msk_txrx_dma_alloc(struct msk_if_softc *); 252 static void msk_txrx_dma_free(struct msk_if_softc *); 253 static int msk_init_rx_ring(struct msk_if_softc *); 254 static void msk_init_tx_ring(struct msk_if_softc *); 255 static __inline void 256 msk_discard_rxbuf(struct msk_if_softc *, int); 257 static int msk_newbuf(struct msk_if_softc *, int, int); 258 static int msk_encap(struct msk_if_softc *, struct mbuf **); 259 260 #ifdef MSK_JUMBO 261 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 262 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 263 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 264 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 265 static void *msk_jalloc(struct msk_if_softc *); 266 static void msk_jfree(void *, void *); 267 #endif 268 269 static int msk_phy_readreg(struct msk_if_softc *, int, int); 270 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 271 272 static void msk_setmulti(struct msk_if_softc *); 273 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 274 static void msk_setpromisc(struct msk_if_softc *); 275 276 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 277 void **, bus_addr_t *, bus_dmamap_t *); 278 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 279 280 static device_method_t mskc_methods[] = { 281 /* Device interface */ 282 DEVMETHOD(device_probe, mskc_probe), 283 DEVMETHOD(device_attach, mskc_attach), 284 DEVMETHOD(device_detach, mskc_detach), 285 DEVMETHOD(device_suspend, mskc_suspend), 286 DEVMETHOD(device_resume, mskc_resume), 287 DEVMETHOD(device_shutdown, mskc_shutdown), 288 289 /* bus interface */ 290 DEVMETHOD(bus_print_child, bus_generic_print_child), 291 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 292 293 { NULL, NULL } 294 }; 295 296 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 297 static devclass_t mskc_devclass; 298 299 static device_method_t msk_methods[] = { 300 /* Device interface */ 301 DEVMETHOD(device_probe, msk_probe), 302 DEVMETHOD(device_attach, msk_attach), 303 DEVMETHOD(device_detach, msk_detach), 304 DEVMETHOD(device_shutdown, bus_generic_shutdown), 305 306 /* bus interface */ 307 DEVMETHOD(bus_print_child, bus_generic_print_child), 308 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 309 310 /* MII interface */ 311 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 312 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 313 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 314 315 { NULL, NULL } 316 }; 317 318 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 319 static devclass_t msk_devclass; 320 321 DECLARE_DUMMY_MODULE(if_msk); 322 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, 0, 0); 323 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, 0, 0); 324 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 325 326 static int mskc_intr_rate = 0; 327 static int mskc_process_limit = MSK_PROC_DEFAULT; 328 329 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate); 330 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit); 331 332 static int 333 msk_miibus_readreg(device_t dev, int phy, int reg) 334 { 335 struct msk_if_softc *sc_if; 336 337 if (phy != PHY_ADDR_MARV) 338 return (0); 339 340 sc_if = device_get_softc(dev); 341 342 return (msk_phy_readreg(sc_if, phy, reg)); 343 } 344 345 static int 346 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 347 { 348 struct msk_softc *sc; 349 int i, val; 350 351 sc = sc_if->msk_softc; 352 353 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 354 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 355 356 for (i = 0; i < MSK_TIMEOUT; i++) { 357 DELAY(1); 358 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 359 if ((val & GM_SMI_CT_RD_VAL) != 0) { 360 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 361 break; 362 } 363 } 364 365 if (i == MSK_TIMEOUT) { 366 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 367 val = 0; 368 } 369 370 return (val); 371 } 372 373 static int 374 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 375 { 376 struct msk_if_softc *sc_if; 377 378 if (phy != PHY_ADDR_MARV) 379 return (0); 380 381 sc_if = device_get_softc(dev); 382 383 return (msk_phy_writereg(sc_if, phy, reg, val)); 384 } 385 386 static int 387 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 388 { 389 struct msk_softc *sc; 390 int i; 391 392 sc = sc_if->msk_softc; 393 394 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 395 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 396 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 397 for (i = 0; i < MSK_TIMEOUT; i++) { 398 DELAY(1); 399 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 400 GM_SMI_CT_BUSY) == 0) 401 break; 402 } 403 if (i == MSK_TIMEOUT) 404 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 405 406 return (0); 407 } 408 409 static void 410 msk_miibus_statchg(device_t dev) 411 { 412 struct msk_if_softc *sc_if; 413 struct msk_softc *sc; 414 struct mii_data *mii; 415 struct ifnet *ifp; 416 uint32_t gmac; 417 418 sc_if = device_get_softc(dev); 419 sc = sc_if->msk_softc; 420 421 mii = device_get_softc(sc_if->msk_miibus); 422 ifp = sc_if->msk_ifp; 423 424 if (mii->mii_media_status & IFM_ACTIVE) { 425 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 426 sc_if->msk_link = 1; 427 } else 428 sc_if->msk_link = 0; 429 430 if (sc_if->msk_link != 0) { 431 /* Enable Tx FIFO Underrun. */ 432 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 433 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 434 /* 435 * Because mii(4) notify msk(4) that it detected link status 436 * change, there is no need to enable automatic 437 * speed/flow-control/duplex updates. 438 */ 439 gmac = GM_GPCR_AU_ALL_DIS; 440 switch (IFM_SUBTYPE(mii->mii_media_active)) { 441 case IFM_1000_SX: 442 case IFM_1000_T: 443 gmac |= GM_GPCR_SPEED_1000; 444 break; 445 case IFM_100_TX: 446 gmac |= GM_GPCR_SPEED_100; 447 break; 448 case IFM_10_T: 449 break; 450 } 451 452 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 453 gmac |= GM_GPCR_DUP_FULL; 454 /* Disable Rx flow control. */ 455 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 456 gmac |= GM_GPCR_FC_RX_DIS; 457 /* Disable Tx flow control. */ 458 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 459 gmac |= GM_GPCR_FC_TX_DIS; 460 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 461 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 462 /* Read again to ensure writing. */ 463 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 464 465 gmac = GMC_PAUSE_ON; 466 if (((mii->mii_media_active & IFM_GMASK) & 467 (IFM_FLAG0 | IFM_FLAG1)) == 0) 468 gmac = GMC_PAUSE_OFF; 469 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 470 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 471 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 472 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 473 gmac = GMC_PAUSE_OFF; 474 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 475 476 /* Enable PHY interrupt for FIFO underrun/overflow. */ 477 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 478 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 479 } else { 480 /* 481 * Link state changed to down. 482 * Disable PHY interrupts. 483 */ 484 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 485 /* Disable Rx/Tx MAC. */ 486 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 487 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 488 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 489 /* Read again to ensure writing. */ 490 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 491 } 492 } 493 494 static void 495 msk_setmulti(struct msk_if_softc *sc_if) 496 { 497 struct msk_softc *sc; 498 struct ifnet *ifp; 499 struct ifmultiaddr *ifma; 500 uint32_t mchash[2]; 501 uint32_t crc; 502 uint16_t mode; 503 504 sc = sc_if->msk_softc; 505 ifp = sc_if->msk_ifp; 506 507 bzero(mchash, sizeof(mchash)); 508 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 509 mode |= GM_RXCR_UCF_ENA; 510 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 511 if ((ifp->if_flags & IFF_PROMISC) != 0) 512 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 513 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 514 mchash[0] = 0xffff; 515 mchash[1] = 0xffff; 516 } 517 } else { 518 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 519 if (ifma->ifma_addr->sa_family != AF_LINK) 520 continue; 521 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 522 ifma->ifma_addr), ETHER_ADDR_LEN); 523 /* Just want the 6 least significant bits. */ 524 crc &= 0x3f; 525 /* Set the corresponding bit in the hash table. */ 526 mchash[crc >> 5] |= 1 << (crc & 0x1f); 527 } 528 mode |= GM_RXCR_MCF_ENA; 529 } 530 531 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 532 mchash[0] & 0xffff); 533 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 534 (mchash[0] >> 16) & 0xffff); 535 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 536 mchash[1] & 0xffff); 537 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 538 (mchash[1] >> 16) & 0xffff); 539 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 540 } 541 542 static void 543 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 544 { 545 struct msk_softc *sc; 546 547 sc = sc_if->msk_softc; 548 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 549 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 550 RX_VLAN_STRIP_ON); 551 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 552 TX_VLAN_TAG_ON); 553 } else { 554 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 555 RX_VLAN_STRIP_OFF); 556 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 557 TX_VLAN_TAG_OFF); 558 } 559 } 560 561 static void 562 msk_setpromisc(struct msk_if_softc *sc_if) 563 { 564 struct msk_softc *sc; 565 struct ifnet *ifp; 566 uint16_t mode; 567 568 sc = sc_if->msk_softc; 569 ifp = sc_if->msk_ifp; 570 571 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 572 if (ifp->if_flags & IFF_PROMISC) 573 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 574 else 575 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 576 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 577 } 578 579 static int 580 msk_init_rx_ring(struct msk_if_softc *sc_if) 581 { 582 struct msk_ring_data *rd; 583 struct msk_rxdesc *rxd; 584 int i, prod; 585 586 sc_if->msk_cdata.msk_rx_cons = 0; 587 sc_if->msk_cdata.msk_rx_prod = 0; 588 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 589 590 rd = &sc_if->msk_rdata; 591 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 592 prod = sc_if->msk_cdata.msk_rx_prod; 593 for (i = 0; i < MSK_RX_RING_CNT; i++) { 594 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 595 rxd->rx_m = NULL; 596 rxd->rx_le = &rd->msk_rx_ring[prod]; 597 if (msk_newbuf(sc_if, prod, 1) != 0) 598 return (ENOBUFS); 599 MSK_INC(prod, MSK_RX_RING_CNT); 600 } 601 602 /* Update prefetch unit. */ 603 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 604 CSR_WRITE_2(sc_if->msk_softc, 605 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 606 sc_if->msk_cdata.msk_rx_prod); 607 608 return (0); 609 } 610 611 #ifdef MSK_JUMBO 612 static int 613 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 614 { 615 struct msk_ring_data *rd; 616 struct msk_rxdesc *rxd; 617 int i, prod; 618 619 MSK_IF_LOCK_ASSERT(sc_if); 620 621 sc_if->msk_cdata.msk_rx_cons = 0; 622 sc_if->msk_cdata.msk_rx_prod = 0; 623 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 624 625 rd = &sc_if->msk_rdata; 626 bzero(rd->msk_jumbo_rx_ring, 627 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 628 prod = sc_if->msk_cdata.msk_rx_prod; 629 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 630 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 631 rxd->rx_m = NULL; 632 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 633 if (msk_jumbo_newbuf(sc_if, prod) != 0) 634 return (ENOBUFS); 635 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 636 } 637 638 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 639 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 640 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 641 642 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 643 CSR_WRITE_2(sc_if->msk_softc, 644 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 645 sc_if->msk_cdata.msk_rx_prod); 646 647 return (0); 648 } 649 #endif 650 651 static void 652 msk_init_tx_ring(struct msk_if_softc *sc_if) 653 { 654 struct msk_ring_data *rd; 655 struct msk_txdesc *txd; 656 int i; 657 658 sc_if->msk_cdata.msk_tx_prod = 0; 659 sc_if->msk_cdata.msk_tx_cons = 0; 660 sc_if->msk_cdata.msk_tx_cnt = 0; 661 662 rd = &sc_if->msk_rdata; 663 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 664 for (i = 0; i < MSK_TX_RING_CNT; i++) { 665 txd = &sc_if->msk_cdata.msk_txdesc[i]; 666 txd->tx_m = NULL; 667 txd->tx_le = &rd->msk_tx_ring[i]; 668 } 669 } 670 671 static __inline void 672 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 673 { 674 struct msk_rx_desc *rx_le; 675 struct msk_rxdesc *rxd; 676 struct mbuf *m; 677 678 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 679 m = rxd->rx_m; 680 rx_le = rxd->rx_le; 681 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 682 } 683 684 #ifdef MSK_JUMBO 685 static __inline void 686 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 687 { 688 struct msk_rx_desc *rx_le; 689 struct msk_rxdesc *rxd; 690 struct mbuf *m; 691 692 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 693 m = rxd->rx_m; 694 rx_le = rxd->rx_le; 695 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 696 } 697 #endif 698 699 static int 700 msk_newbuf(struct msk_if_softc *sc_if, int idx, int init) 701 { 702 struct msk_rx_desc *rx_le; 703 struct msk_rxdesc *rxd; 704 struct mbuf *m; 705 bus_dma_segment_t seg; 706 bus_dmamap_t map; 707 int error, nseg; 708 709 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 710 if (m == NULL) 711 return (ENOBUFS); 712 713 m->m_len = m->m_pkthdr.len = MCLBYTES; 714 m_adj(m, ETHER_ALIGN); 715 716 error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag, 717 sc_if->msk_cdata.msk_rx_sparemap, 718 m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 719 if (error) { 720 m_freem(m); 721 if (init) 722 if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n"); 723 return (error); 724 } 725 726 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 727 if (rxd->rx_m != NULL) { 728 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 729 BUS_DMASYNC_POSTREAD); 730 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 731 } 732 733 map = rxd->rx_dmamap; 734 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 735 sc_if->msk_cdata.msk_rx_sparemap = map; 736 737 rxd->rx_m = m; 738 rx_le = rxd->rx_le; 739 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 740 rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER); 741 742 return (0); 743 } 744 745 #ifdef MSK_JUMBO 746 static int 747 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 748 { 749 struct msk_rx_desc *rx_le; 750 struct msk_rxdesc *rxd; 751 struct mbuf *m; 752 bus_dma_segment_t segs[1]; 753 bus_dmamap_t map; 754 int nsegs; 755 void *buf; 756 757 MGETHDR(m, M_DONTWAIT, MT_DATA); 758 if (m == NULL) 759 return (ENOBUFS); 760 buf = msk_jalloc(sc_if); 761 if (buf == NULL) { 762 m_freem(m); 763 return (ENOBUFS); 764 } 765 /* Attach the buffer to the mbuf. */ 766 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 767 EXT_NET_DRV); 768 if ((m->m_flags & M_EXT) == 0) { 769 m_freem(m); 770 return (ENOBUFS); 771 } 772 m->m_pkthdr.len = m->m_len = MSK_JLEN; 773 m_adj(m, ETHER_ALIGN); 774 775 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 776 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 777 BUS_DMA_NOWAIT) != 0) { 778 m_freem(m); 779 return (ENOBUFS); 780 } 781 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 782 783 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 784 if (rxd->rx_m != NULL) { 785 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 786 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 787 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 788 rxd->rx_dmamap); 789 } 790 map = rxd->rx_dmamap; 791 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 792 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 793 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 794 BUS_DMASYNC_PREREAD); 795 rxd->rx_m = m; 796 rx_le = rxd->rx_le; 797 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 798 rx_le->msk_control = 799 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 800 801 return (0); 802 } 803 #endif 804 805 /* 806 * Set media options. 807 */ 808 static int 809 msk_mediachange(struct ifnet *ifp) 810 { 811 struct msk_if_softc *sc_if = ifp->if_softc; 812 struct mii_data *mii; 813 814 mii = device_get_softc(sc_if->msk_miibus); 815 mii_mediachg(mii); 816 817 return (0); 818 } 819 820 /* 821 * Report current media status. 822 */ 823 static void 824 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 825 { 826 struct msk_if_softc *sc_if = ifp->if_softc; 827 struct mii_data *mii; 828 829 mii = device_get_softc(sc_if->msk_miibus); 830 mii_pollstat(mii); 831 832 ifmr->ifm_active = mii->mii_media_active; 833 ifmr->ifm_status = mii->mii_media_status; 834 } 835 836 static int 837 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 838 { 839 struct msk_if_softc *sc_if; 840 struct ifreq *ifr; 841 struct mii_data *mii; 842 int error, mask; 843 844 sc_if = ifp->if_softc; 845 ifr = (struct ifreq *)data; 846 error = 0; 847 848 switch(command) { 849 case SIOCSIFMTU: 850 #ifdef MSK_JUMBO 851 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 852 error = EINVAL; 853 break; 854 } 855 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 856 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 857 error = EINVAL; 858 break; 859 } 860 ifp->if_mtu = ifr->ifr_mtu; 861 if ((ifp->if_flags & IFF_RUNNING) != 0) 862 msk_init(sc_if); 863 #else 864 error = EOPNOTSUPP; 865 #endif 866 break; 867 868 case SIOCSIFFLAGS: 869 if (ifp->if_flags & IFF_UP) { 870 if (ifp->if_flags & IFF_RUNNING) { 871 if (((ifp->if_flags ^ sc_if->msk_if_flags) 872 & IFF_PROMISC) != 0) { 873 msk_setpromisc(sc_if); 874 msk_setmulti(sc_if); 875 } 876 } else { 877 if (sc_if->msk_detach == 0) 878 msk_init(sc_if); 879 } 880 } else { 881 if (ifp->if_flags & IFF_RUNNING) 882 msk_stop(sc_if); 883 } 884 sc_if->msk_if_flags = ifp->if_flags; 885 break; 886 887 case SIOCADDMULTI: 888 case SIOCDELMULTI: 889 if (ifp->if_flags & IFF_RUNNING) 890 msk_setmulti(sc_if); 891 break; 892 893 case SIOCGIFMEDIA: 894 case SIOCSIFMEDIA: 895 mii = device_get_softc(sc_if->msk_miibus); 896 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 897 break; 898 899 case SIOCSIFCAP: 900 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 901 if ((mask & IFCAP_TXCSUM) != 0) { 902 ifp->if_capenable ^= IFCAP_TXCSUM; 903 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 904 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 905 ifp->if_hwassist |= MSK_CSUM_FEATURES; 906 else 907 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 908 } 909 #ifdef notyet 910 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 911 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 912 msk_setvlan(sc_if, ifp); 913 } 914 #endif 915 916 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 917 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 918 /* 919 * In Yukon EC Ultra, TSO & checksum offload is not 920 * supported for jumbo frame. 921 */ 922 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 923 ifp->if_capenable &= ~IFCAP_TXCSUM; 924 } 925 break; 926 927 default: 928 error = ether_ioctl(ifp, command, data); 929 break; 930 } 931 932 return (error); 933 } 934 935 static int 936 mskc_probe(device_t dev) 937 { 938 const struct msk_product *mp; 939 uint16_t vendor, devid; 940 941 vendor = pci_get_vendor(dev); 942 devid = pci_get_device(dev); 943 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 944 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 945 device_set_desc(dev, mp->msk_name); 946 return (0); 947 } 948 } 949 return (ENXIO); 950 } 951 952 static int 953 mskc_setup_rambuffer(struct msk_softc *sc) 954 { 955 int next; 956 int i; 957 uint8_t val; 958 959 /* Get adapter SRAM size. */ 960 val = CSR_READ_1(sc, B2_E_0); 961 sc->msk_ramsize = (val == 0) ? 128 : val * 4; 962 if (bootverbose) { 963 device_printf(sc->msk_dev, 964 "RAM buffer size : %dKB\n", sc->msk_ramsize); 965 } 966 /* 967 * Give receiver 2/3 of memory and round down to the multiple 968 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 969 * of 1024. 970 */ 971 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 972 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 973 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 974 sc->msk_rxqstart[i] = next; 975 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 976 next = sc->msk_rxqend[i] + 1; 977 sc->msk_txqstart[i] = next; 978 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 979 next = sc->msk_txqend[i] + 1; 980 if (bootverbose) { 981 device_printf(sc->msk_dev, 982 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 983 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 984 sc->msk_rxqend[i]); 985 device_printf(sc->msk_dev, 986 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 987 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 988 sc->msk_txqend[i]); 989 } 990 } 991 992 return (0); 993 } 994 995 static void 996 mskc_phy_power(struct msk_softc *sc, int mode) 997 { 998 uint32_t val; 999 int i; 1000 1001 switch (mode) { 1002 case MSK_PHY_POWERUP: 1003 /* Switch power to VCC (WA for VAUX problem). */ 1004 CSR_WRITE_1(sc, B0_POWER_CTRL, 1005 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1006 /* Disable Core Clock Division, set Clock Select to 0. */ 1007 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1008 1009 val = 0; 1010 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1011 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1012 /* Enable bits are inverted. */ 1013 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1014 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1015 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1016 } 1017 /* 1018 * Enable PCI & Core Clock, enable clock gating for both Links. 1019 */ 1020 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1021 1022 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1023 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1024 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1025 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1026 /* Deassert Low Power for 1st PHY. */ 1027 val |= PCI_Y2_PHY1_COMA; 1028 if (sc->msk_num_port > 1) 1029 val |= PCI_Y2_PHY2_COMA; 1030 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1031 uint32_t our; 1032 1033 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1034 1035 /* Enable all clocks. */ 1036 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1037 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1038 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1039 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1040 /* Set all bits to 0 except bits 15..12. */ 1041 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1042 /* Set to default value. */ 1043 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1044 } 1045 /* Release PHY from PowerDown/COMA mode. */ 1046 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1047 for (i = 0; i < sc->msk_num_port; i++) { 1048 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1049 GMLC_RST_SET); 1050 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1051 GMLC_RST_CLR); 1052 } 1053 break; 1054 case MSK_PHY_POWERDOWN: 1055 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1056 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1057 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1058 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1059 val &= ~PCI_Y2_PHY1_COMA; 1060 if (sc->msk_num_port > 1) 1061 val &= ~PCI_Y2_PHY2_COMA; 1062 } 1063 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1064 1065 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1066 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1067 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1068 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1069 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1070 /* Enable bits are inverted. */ 1071 val = 0; 1072 } 1073 /* 1074 * Disable PCI & Core Clock, disable clock gating for 1075 * both Links. 1076 */ 1077 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1078 CSR_WRITE_1(sc, B0_POWER_CTRL, 1079 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1080 break; 1081 default: 1082 break; 1083 } 1084 } 1085 1086 static void 1087 mskc_reset(struct msk_softc *sc) 1088 { 1089 bus_addr_t addr; 1090 uint16_t status; 1091 uint32_t val; 1092 int i; 1093 1094 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1095 1096 /* Disable ASF. */ 1097 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1098 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1099 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1100 } 1101 /* 1102 * Since we disabled ASF, S/W reset is required for Power Management. 1103 */ 1104 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1105 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1106 1107 /* Clear all error bits in the PCI status register. */ 1108 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1109 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1110 1111 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1112 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1113 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1114 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1115 1116 switch (sc->msk_bustype) { 1117 case MSK_PEX_BUS: 1118 /* Clear all PEX errors. */ 1119 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1120 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1121 if ((val & PEX_RX_OV) != 0) { 1122 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1123 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1124 } 1125 break; 1126 case MSK_PCI_BUS: 1127 case MSK_PCIX_BUS: 1128 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1129 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1130 if (val == 0) 1131 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1132 if (sc->msk_bustype == MSK_PCIX_BUS) { 1133 /* Set Cache Line Size opt. */ 1134 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1135 val |= PCI_CLS_OPT; 1136 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1137 } 1138 break; 1139 } 1140 /* Set PHY power state. */ 1141 mskc_phy_power(sc, MSK_PHY_POWERUP); 1142 1143 /* Reset GPHY/GMAC Control */ 1144 for (i = 0; i < sc->msk_num_port; i++) { 1145 /* GPHY Control reset. */ 1146 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1147 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1148 /* GMAC Control reset. */ 1149 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1150 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1151 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1152 } 1153 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1154 1155 /* LED On. */ 1156 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1157 1158 /* Clear TWSI IRQ. */ 1159 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1160 1161 /* Turn off hardware timer. */ 1162 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1163 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1164 1165 /* Turn off descriptor polling. */ 1166 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1167 1168 /* Turn off time stamps. */ 1169 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1170 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1171 1172 /* Configure timeout values. */ 1173 for (i = 0; i < sc->msk_num_port; i++) { 1174 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1175 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1176 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1177 MSK_RI_TO_53); 1178 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1179 MSK_RI_TO_53); 1180 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1181 MSK_RI_TO_53); 1182 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1183 MSK_RI_TO_53); 1184 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1185 MSK_RI_TO_53); 1186 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1187 MSK_RI_TO_53); 1188 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1189 MSK_RI_TO_53); 1190 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1191 MSK_RI_TO_53); 1192 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1193 MSK_RI_TO_53); 1194 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1195 MSK_RI_TO_53); 1196 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1197 MSK_RI_TO_53); 1198 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1199 MSK_RI_TO_53); 1200 } 1201 1202 /* Disable all interrupts. */ 1203 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1204 CSR_READ_4(sc, B0_HWE_IMSK); 1205 CSR_WRITE_4(sc, B0_IMSK, 0); 1206 CSR_READ_4(sc, B0_IMSK); 1207 1208 /* 1209 * On dual port PCI-X card, there is an problem where status 1210 * can be received out of order due to split transactions. 1211 */ 1212 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1213 uint16_t pcix_cmd; 1214 uint8_t pcix; 1215 1216 pcix = pci_get_pcixcap_ptr(sc->msk_dev); 1217 1218 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1219 /* Clear Max Outstanding Split Transactions. */ 1220 pcix_cmd &= ~0x70; 1221 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1222 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1223 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1224 } 1225 if (sc->msk_bustype == MSK_PEX_BUS) { 1226 uint16_t v, width; 1227 1228 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1229 /* Change Max. Read Request Size to 4096 bytes. */ 1230 v &= ~PEX_DC_MAX_RRS_MSK; 1231 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1232 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1233 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1234 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1235 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1236 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1237 if (v != width) { 1238 device_printf(sc->msk_dev, 1239 "negotiated width of link(x%d) != " 1240 "max. width of link(x%d)\n", width, v); 1241 } 1242 } 1243 1244 /* Clear status list. */ 1245 bzero(sc->msk_stat_ring, 1246 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1247 sc->msk_stat_cons = 0; 1248 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1249 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1250 /* Set the status list base address. */ 1251 addr = sc->msk_stat_ring_paddr; 1252 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1253 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1254 /* Set the status list last index. */ 1255 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1256 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1257 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1258 /* WA for dev. #4.3 */ 1259 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1260 /* WA for dev. #4.18 */ 1261 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1262 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1263 } else { 1264 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1265 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1266 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1267 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1268 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1269 else 1270 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1271 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1272 } 1273 /* 1274 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1275 */ 1276 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1277 1278 /* Enable status unit. */ 1279 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1280 1281 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1282 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1283 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1284 } 1285 1286 static int 1287 msk_probe(device_t dev) 1288 { 1289 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1290 char desc[100]; 1291 1292 /* 1293 * Not much to do here. We always know there will be 1294 * at least one GMAC present, and if there are two, 1295 * mskc_attach() will create a second device instance 1296 * for us. 1297 */ 1298 ksnprintf(desc, sizeof(desc), 1299 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1300 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1301 sc->msk_hw_rev); 1302 device_set_desc_copy(dev, desc); 1303 1304 return (0); 1305 } 1306 1307 static int 1308 msk_attach(device_t dev) 1309 { 1310 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1311 struct msk_if_softc *sc_if = device_get_softc(dev); 1312 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1313 int i, port, error; 1314 uint8_t eaddr[ETHER_ADDR_LEN]; 1315 1316 port = *(int *)device_get_ivars(dev); 1317 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1318 1319 kfree(device_get_ivars(dev), M_DEVBUF); 1320 device_set_ivars(dev, NULL); 1321 1322 callout_init(&sc_if->msk_tick_ch); 1323 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1324 1325 sc_if->msk_if_dev = dev; 1326 sc_if->msk_port = port; 1327 sc_if->msk_softc = sc; 1328 sc_if->msk_ifp = ifp; 1329 sc->msk_if[port] = sc_if; 1330 1331 /* Setup Tx/Rx queue register offsets. */ 1332 if (port == MSK_PORT_A) { 1333 sc_if->msk_txq = Q_XA1; 1334 sc_if->msk_txsq = Q_XS1; 1335 sc_if->msk_rxq = Q_R1; 1336 } else { 1337 sc_if->msk_txq = Q_XA2; 1338 sc_if->msk_txsq = Q_XS2; 1339 sc_if->msk_rxq = Q_R2; 1340 } 1341 1342 error = msk_txrx_dma_alloc(sc_if); 1343 if (error) 1344 goto fail; 1345 1346 ifp->if_softc = sc_if; 1347 ifp->if_mtu = ETHERMTU; 1348 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1349 ifp->if_init = msk_init; 1350 ifp->if_ioctl = msk_ioctl; 1351 ifp->if_start = msk_start; 1352 ifp->if_watchdog = msk_watchdog; 1353 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1354 ifq_set_ready(&ifp->if_snd); 1355 1356 #ifdef notyet 1357 /* 1358 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1359 * has serious bug in Rx checksum offload for all Yukon II family 1360 * hardware. It seems there is a workaround to make it work somtimes. 1361 * However, the workaround also have to check OP code sequences to 1362 * verify whether the OP code is correct. Sometimes it should compute 1363 * IP/TCP/UDP checksum in driver in order to verify correctness of 1364 * checksum computed by hardware. If you have to compute checksum 1365 * with software to verify the hardware's checksum why have hardware 1366 * compute the checksum? I think there is no reason to spend time to 1367 * make Rx checksum offload work on Yukon II hardware. 1368 */ 1369 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1370 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1371 ifp->if_hwassist = MSK_CSUM_FEATURES; 1372 ifp->if_capenable = ifp->if_capabilities; 1373 #endif 1374 1375 /* 1376 * Get station address for this interface. Note that 1377 * dual port cards actually come with three station 1378 * addresses: one for each port, plus an extra. The 1379 * extra one is used by the SysKonnect driver software 1380 * as a 'virtual' station address for when both ports 1381 * are operating in failover mode. Currently we don't 1382 * use this extra address. 1383 */ 1384 for (i = 0; i < ETHER_ADDR_LEN; i++) 1385 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1386 1387 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1388 1389 /* 1390 * Do miibus setup. 1391 */ 1392 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1393 msk_mediachange, msk_mediastatus); 1394 if (error) { 1395 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1396 goto fail; 1397 } 1398 1399 /* 1400 * Call MI attach routine. Can't hold locks when calling into ether_*. 1401 */ 1402 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1403 #if 0 1404 /* 1405 * Tell the upper layer(s) we support long frames. 1406 * Must appear after the call to ether_ifattach() because 1407 * ether_ifattach() sets ifi_hdrlen to the default value. 1408 */ 1409 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1410 #endif 1411 1412 return 0; 1413 fail: 1414 msk_detach(dev); 1415 sc->msk_if[port] = NULL; 1416 return (error); 1417 } 1418 1419 /* 1420 * Attach the interface. Allocate softc structures, do ifmedia 1421 * setup and ethernet/BPF attach. 1422 */ 1423 static int 1424 mskc_attach(device_t dev) 1425 { 1426 struct msk_softc *sc; 1427 int error, *port, cpuid; 1428 1429 sc = device_get_softc(dev); 1430 sc->msk_dev = dev; 1431 lwkt_serialize_init(&sc->msk_serializer); 1432 1433 /* 1434 * Initailize sysctl variables 1435 */ 1436 sc->msk_process_limit = mskc_process_limit; 1437 sc->msk_intr_rate = mskc_intr_rate; 1438 1439 #ifndef BURN_BRIDGES 1440 /* 1441 * Handle power management nonsense. 1442 */ 1443 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1444 uint32_t irq, bar0, bar1; 1445 1446 /* Save important PCI config data. */ 1447 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1448 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1449 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1450 1451 /* Reset the power state. */ 1452 device_printf(dev, "chip is in D%d power mode " 1453 "-- setting to D0\n", pci_get_powerstate(dev)); 1454 1455 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1456 1457 /* Restore PCI config data. */ 1458 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1459 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1460 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1461 } 1462 #endif /* BURN_BRIDGES */ 1463 1464 /* 1465 * Map control/status registers. 1466 */ 1467 pci_enable_busmaster(dev); 1468 1469 /* 1470 * Allocate I/O resource 1471 */ 1472 #ifdef MSK_USEIOSPACE 1473 sc->msk_res_type = SYS_RES_IOPORT; 1474 sc->msk_res_rid = PCIR_BAR(1); 1475 #else 1476 sc->msk_res_type = SYS_RES_MEMORY; 1477 sc->msk_res_rid = PCIR_BAR(0); 1478 #endif 1479 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1480 &sc->msk_res_rid, RF_ACTIVE); 1481 if (sc->msk_res == NULL) { 1482 if (sc->msk_res_type == SYS_RES_MEMORY) { 1483 sc->msk_res_type = SYS_RES_IOPORT; 1484 sc->msk_res_rid = PCIR_BAR(1); 1485 } else { 1486 sc->msk_res_type = SYS_RES_MEMORY; 1487 sc->msk_res_rid = PCIR_BAR(0); 1488 } 1489 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1490 &sc->msk_res_rid, 1491 RF_ACTIVE); 1492 if (sc->msk_res == NULL) { 1493 device_printf(dev, "couldn't allocate %s resources\n", 1494 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1495 return (ENXIO); 1496 } 1497 } 1498 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1499 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1500 1501 /* 1502 * Allocate IRQ 1503 */ 1504 sc->msk_irq_rid = 0; 1505 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1506 &sc->msk_irq_rid, 1507 RF_SHAREABLE | RF_ACTIVE); 1508 if (sc->msk_irq == NULL) { 1509 device_printf(dev, "couldn't allocate IRQ resources\n"); 1510 error = ENXIO; 1511 goto fail; 1512 } 1513 1514 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1515 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1516 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1517 /* Bail out if chip is not recognized. */ 1518 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1519 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1520 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1521 sc->msk_hw_id, sc->msk_hw_rev); 1522 error = ENXIO; 1523 goto fail; 1524 } 1525 1526 /* 1527 * Create sysctl tree 1528 */ 1529 sysctl_ctx_init(&sc->msk_sysctl_ctx); 1530 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx, 1531 SYSCTL_STATIC_CHILDREN(_hw), 1532 OID_AUTO, 1533 device_get_nameunit(dev), 1534 CTLFLAG_RD, 0, ""); 1535 if (sc->msk_sysctl_tree == NULL) { 1536 device_printf(dev, "can't add sysctl node\n"); 1537 error = ENXIO; 1538 goto fail; 1539 } 1540 1541 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1542 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1543 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1544 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit, 1545 "I", "max number of Rx events to process"); 1546 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1547 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1548 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1549 sc, 0, mskc_sysctl_intr_rate, 1550 "I", "max number of interrupt per second"); 1551 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1552 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1553 "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided, 1554 0, "# of avoided m_defrag on TX path"); 1555 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1556 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1557 "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied, 1558 0, "# of leading copies on TX path"); 1559 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1560 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1561 "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied, 1562 0, "# of trailing copies on TX path"); 1563 1564 /* Soft reset. */ 1565 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1566 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1567 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1568 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1569 sc->msk_coppertype = 0; 1570 else 1571 sc->msk_coppertype = 1; 1572 /* Check number of MACs. */ 1573 sc->msk_num_port = 1; 1574 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1575 CFG_DUAL_MAC_MSK) { 1576 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1577 sc->msk_num_port++; 1578 } 1579 1580 /* Check bus type. */ 1581 if (pci_is_pcie(sc->msk_dev) == 0) 1582 sc->msk_bustype = MSK_PEX_BUS; 1583 else if (pci_is_pcix(sc->msk_dev) == 0) 1584 sc->msk_bustype = MSK_PCIX_BUS; 1585 else 1586 sc->msk_bustype = MSK_PCI_BUS; 1587 1588 switch (sc->msk_hw_id) { 1589 case CHIP_ID_YUKON_EC: 1590 case CHIP_ID_YUKON_EC_U: 1591 sc->msk_clock = 125; /* 125 Mhz */ 1592 break; 1593 case CHIP_ID_YUKON_FE: 1594 sc->msk_clock = 100; /* 100 Mhz */ 1595 break; 1596 case CHIP_ID_YUKON_XL: 1597 sc->msk_clock = 156; /* 156 Mhz */ 1598 break; 1599 default: 1600 sc->msk_clock = 156; /* 156 Mhz */ 1601 break; 1602 } 1603 1604 error = mskc_status_dma_alloc(sc); 1605 if (error) 1606 goto fail; 1607 1608 /* Set base interrupt mask. */ 1609 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1610 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1611 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1612 1613 /* Reset the adapter. */ 1614 mskc_reset(sc); 1615 1616 error = mskc_setup_rambuffer(sc); 1617 if (error) 1618 goto fail; 1619 1620 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1621 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1622 device_printf(dev, "failed to add child for PORT_A\n"); 1623 error = ENXIO; 1624 goto fail; 1625 } 1626 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1627 *port = MSK_PORT_A; 1628 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1629 1630 if (sc->msk_num_port > 1) { 1631 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1632 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1633 device_printf(dev, "failed to add child for PORT_B\n"); 1634 error = ENXIO; 1635 goto fail; 1636 } 1637 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1638 *port = MSK_PORT_B; 1639 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1640 } 1641 1642 bus_generic_attach(dev); 1643 1644 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1645 mskc_intr, sc, &sc->msk_intrhand, 1646 &sc->msk_serializer); 1647 if (error) { 1648 device_printf(dev, "couldn't set up interrupt handler\n"); 1649 goto fail; 1650 } 1651 1652 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq)); 1653 KKASSERT(cpuid >= 0 && cpuid < ncpus); 1654 1655 if (sc->msk_if[0] != NULL) 1656 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid; 1657 if (sc->msk_if[1] != NULL) 1658 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid; 1659 return 0; 1660 fail: 1661 mskc_detach(dev); 1662 return (error); 1663 } 1664 1665 /* 1666 * Shutdown hardware and free up resources. This can be called any 1667 * time after the mutex has been initialized. It is called in both 1668 * the error case in attach and the normal detach case so it needs 1669 * to be careful about only freeing resources that have actually been 1670 * allocated. 1671 */ 1672 static int 1673 msk_detach(device_t dev) 1674 { 1675 struct msk_if_softc *sc_if = device_get_softc(dev); 1676 1677 if (device_is_attached(dev)) { 1678 struct msk_softc *sc = sc_if->msk_softc; 1679 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1680 1681 lwkt_serialize_enter(ifp->if_serializer); 1682 1683 if (sc->msk_intrhand != NULL) { 1684 if (sc->msk_if[MSK_PORT_A] != NULL) 1685 msk_stop(sc->msk_if[MSK_PORT_A]); 1686 if (sc->msk_if[MSK_PORT_B] != NULL) 1687 msk_stop(sc->msk_if[MSK_PORT_B]); 1688 1689 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1690 sc->msk_intrhand); 1691 sc->msk_intrhand = NULL; 1692 } 1693 1694 lwkt_serialize_exit(ifp->if_serializer); 1695 1696 ether_ifdetach(ifp); 1697 } 1698 1699 if (sc_if->msk_miibus != NULL) 1700 device_delete_child(dev, sc_if->msk_miibus); 1701 1702 msk_txrx_dma_free(sc_if); 1703 return (0); 1704 } 1705 1706 static int 1707 mskc_detach(device_t dev) 1708 { 1709 struct msk_softc *sc = device_get_softc(dev); 1710 int *port, i; 1711 1712 #ifdef INVARIANTS 1713 if (device_is_attached(dev)) { 1714 KASSERT(sc->msk_intrhand == NULL, 1715 ("intr is not torn down yet\n")); 1716 } 1717 #endif 1718 1719 for (i = 0; i < sc->msk_num_port; ++i) { 1720 if (sc->msk_devs[i] != NULL) { 1721 port = device_get_ivars(sc->msk_devs[i]); 1722 if (port != NULL) { 1723 kfree(port, M_DEVBUF); 1724 device_set_ivars(sc->msk_devs[i], NULL); 1725 } 1726 device_delete_child(dev, sc->msk_devs[i]); 1727 } 1728 } 1729 1730 /* Disable all interrupts. */ 1731 CSR_WRITE_4(sc, B0_IMSK, 0); 1732 CSR_READ_4(sc, B0_IMSK); 1733 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1734 CSR_READ_4(sc, B0_HWE_IMSK); 1735 1736 /* LED Off. */ 1737 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1738 1739 /* Put hardware reset. */ 1740 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1741 1742 mskc_status_dma_free(sc); 1743 1744 if (sc->msk_irq != NULL) { 1745 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1746 sc->msk_irq); 1747 } 1748 if (sc->msk_res != NULL) { 1749 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1750 sc->msk_res); 1751 } 1752 1753 if (sc->msk_sysctl_tree != NULL) 1754 sysctl_ctx_free(&sc->msk_sysctl_ctx); 1755 1756 return (0); 1757 } 1758 1759 /* Create status DMA region. */ 1760 static int 1761 mskc_status_dma_alloc(struct msk_softc *sc) 1762 { 1763 bus_dmamem_t dmem; 1764 int error; 1765 1766 error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0, 1767 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1768 MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1769 if (error) { 1770 device_printf(sc->msk_dev, 1771 "failed to create status coherent DMA memory\n"); 1772 return error; 1773 } 1774 sc->msk_stat_tag = dmem.dmem_tag; 1775 sc->msk_stat_map = dmem.dmem_map; 1776 sc->msk_stat_ring = dmem.dmem_addr; 1777 sc->msk_stat_ring_paddr = dmem.dmem_busaddr; 1778 1779 return (0); 1780 } 1781 1782 static void 1783 mskc_status_dma_free(struct msk_softc *sc) 1784 { 1785 /* Destroy status block. */ 1786 if (sc->msk_stat_tag) { 1787 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1788 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1789 sc->msk_stat_map); 1790 bus_dma_tag_destroy(sc->msk_stat_tag); 1791 sc->msk_stat_tag = NULL; 1792 } 1793 } 1794 1795 static int 1796 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1797 { 1798 int error, i, j; 1799 #ifdef MSK_JUMBO 1800 struct msk_rxdesc *jrxd; 1801 struct msk_jpool_entry *entry; 1802 uint8_t *ptr; 1803 #endif 1804 1805 /* Create parent DMA tag. */ 1806 /* 1807 * XXX 1808 * It seems that Yukon II supports full 64bits DMA operations. But 1809 * it needs two descriptors(list elements) for 64bits DMA operations. 1810 * Since we don't know what DMA address mappings(32bits or 64bits) 1811 * would be used in advance for each mbufs, we limits its DMA space 1812 * to be in range of 32bits address space. Otherwise, we should check 1813 * what DMA address is used and chain another descriptor for the 1814 * 64bits DMA operation. This also means descriptor ring size is 1815 * variable. Limiting DMA address to be in 32bit address space greatly 1816 * simplyfies descriptor handling and possibly would increase 1817 * performance a bit due to efficient handling of descriptors. 1818 * Apart from harassing checksum offloading mechanisms, it seems 1819 * it's really bad idea to use a seperate descriptor for 64bit 1820 * DMA operation to save small descriptor memory. Anyway, I've 1821 * never seen these exotic scheme on ethernet interface hardware. 1822 */ 1823 error = bus_dma_tag_create( 1824 NULL, /* parent */ 1825 1, 0, /* alignment, boundary */ 1826 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1827 BUS_SPACE_MAXADDR, /* highaddr */ 1828 NULL, NULL, /* filter, filterarg */ 1829 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1830 0, /* nsegments */ 1831 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1832 0, /* flags */ 1833 &sc_if->msk_cdata.msk_parent_tag); 1834 if (error) { 1835 device_printf(sc_if->msk_if_dev, 1836 "failed to create parent DMA tag\n"); 1837 return error; 1838 } 1839 1840 /* Create DMA stuffs for Tx ring. */ 1841 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1842 &sc_if->msk_cdata.msk_tx_ring_tag, 1843 (void **)&sc_if->msk_rdata.msk_tx_ring, 1844 &sc_if->msk_rdata.msk_tx_ring_paddr, 1845 &sc_if->msk_cdata.msk_tx_ring_map); 1846 if (error) { 1847 device_printf(sc_if->msk_if_dev, 1848 "failed to create TX ring DMA stuffs\n"); 1849 return error; 1850 } 1851 1852 /* Create DMA stuffs for Rx ring. */ 1853 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1854 &sc_if->msk_cdata.msk_rx_ring_tag, 1855 (void **)&sc_if->msk_rdata.msk_rx_ring, 1856 &sc_if->msk_rdata.msk_rx_ring_paddr, 1857 &sc_if->msk_cdata.msk_rx_ring_map); 1858 if (error) { 1859 device_printf(sc_if->msk_if_dev, 1860 "failed to create RX ring DMA stuffs\n"); 1861 return error; 1862 } 1863 1864 /* Create tag for Tx buffers. */ 1865 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1866 1, 0, /* alignment, boundary */ 1867 BUS_SPACE_MAXADDR, /* lowaddr */ 1868 BUS_SPACE_MAXADDR, /* highaddr */ 1869 NULL, NULL, /* filter, filterarg */ 1870 MSK_JUMBO_FRAMELEN, /* maxsize */ 1871 MSK_MAXTXSEGS, /* nsegments */ 1872 MSK_MAXSGSIZE, /* maxsegsize */ 1873 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 1874 BUS_DMA_ONEBPAGE, /* flags */ 1875 &sc_if->msk_cdata.msk_tx_tag); 1876 if (error) { 1877 device_printf(sc_if->msk_if_dev, 1878 "failed to create Tx DMA tag\n"); 1879 return error; 1880 } 1881 1882 /* Create DMA maps for Tx buffers. */ 1883 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1884 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1885 1886 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 1887 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1888 &txd->tx_dmamap); 1889 if (error) { 1890 device_printf(sc_if->msk_if_dev, 1891 "failed to create %dth Tx dmamap\n", i); 1892 1893 for (j = 0; j < i; ++j) { 1894 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1895 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1896 txd->tx_dmamap); 1897 } 1898 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1899 sc_if->msk_cdata.msk_tx_tag = NULL; 1900 1901 return error; 1902 } 1903 } 1904 1905 /* Create tag for Rx buffers. */ 1906 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1907 1, 0, /* alignment, boundary */ 1908 BUS_SPACE_MAXADDR, /* lowaddr */ 1909 BUS_SPACE_MAXADDR, /* highaddr */ 1910 NULL, NULL, /* filter, filterarg */ 1911 MCLBYTES, /* maxsize */ 1912 1, /* nsegments */ 1913 MCLBYTES, /* maxsegsize */ 1914 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,/* flags */ 1915 &sc_if->msk_cdata.msk_rx_tag); 1916 if (error) { 1917 device_printf(sc_if->msk_if_dev, 1918 "failed to create Rx DMA tag\n"); 1919 return error; 1920 } 1921 1922 /* Create DMA maps for Rx buffers. */ 1923 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK, 1924 &sc_if->msk_cdata.msk_rx_sparemap); 1925 if (error) { 1926 device_printf(sc_if->msk_if_dev, 1927 "failed to create spare Rx dmamap\n"); 1928 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 1929 sc_if->msk_cdata.msk_rx_tag = NULL; 1930 return error; 1931 } 1932 for (i = 0; i < MSK_RX_RING_CNT; i++) { 1933 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 1934 1935 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 1936 BUS_DMA_WAITOK, &rxd->rx_dmamap); 1937 if (error) { 1938 device_printf(sc_if->msk_if_dev, 1939 "failed to create %dth Rx dmamap\n", i); 1940 1941 for (j = 0; j < i; ++j) { 1942 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 1943 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 1944 rxd->rx_dmamap); 1945 } 1946 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 1947 sc_if->msk_cdata.msk_rx_sparemap); 1948 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 1949 sc_if->msk_cdata.msk_rx_tag = NULL; 1950 1951 return error; 1952 } 1953 } 1954 1955 #ifdef MSK_JUMBO 1956 SLIST_INIT(&sc_if->msk_jfree_listhead); 1957 SLIST_INIT(&sc_if->msk_jinuse_listhead); 1958 1959 /* Create tag for jumbo Rx ring. */ 1960 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1961 MSK_RING_ALIGN, 0, /* alignment, boundary */ 1962 BUS_SPACE_MAXADDR, /* lowaddr */ 1963 BUS_SPACE_MAXADDR, /* highaddr */ 1964 NULL, NULL, /* filter, filterarg */ 1965 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 1966 1, /* nsegments */ 1967 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 1968 0, /* flags */ 1969 NULL, NULL, /* lockfunc, lockarg */ 1970 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 1971 if (error != 0) { 1972 device_printf(sc_if->msk_if_dev, 1973 "failed to create jumbo Rx ring DMA tag\n"); 1974 goto fail; 1975 } 1976 1977 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1978 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 1979 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 1980 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1981 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 1982 if (error != 0) { 1983 device_printf(sc_if->msk_if_dev, 1984 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 1985 goto fail; 1986 } 1987 1988 ctx.msk_busaddr = 0; 1989 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 1990 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 1991 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 1992 msk_dmamap_cb, &ctx, 0); 1993 if (error != 0) { 1994 device_printf(sc_if->msk_if_dev, 1995 "failed to load DMA'able memory for jumbo Rx ring\n"); 1996 goto fail; 1997 } 1998 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 1999 2000 /* Create tag for jumbo buffer blocks. */ 2001 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2002 PAGE_SIZE, 0, /* alignment, boundary */ 2003 BUS_SPACE_MAXADDR, /* lowaddr */ 2004 BUS_SPACE_MAXADDR, /* highaddr */ 2005 NULL, NULL, /* filter, filterarg */ 2006 MSK_JMEM, /* maxsize */ 2007 1, /* nsegments */ 2008 MSK_JMEM, /* maxsegsize */ 2009 0, /* flags */ 2010 NULL, NULL, /* lockfunc, lockarg */ 2011 &sc_if->msk_cdata.msk_jumbo_tag); 2012 if (error != 0) { 2013 device_printf(sc_if->msk_if_dev, 2014 "failed to create jumbo Rx buffer block DMA tag\n"); 2015 goto fail; 2016 } 2017 2018 /* Create tag for jumbo Rx buffers. */ 2019 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2020 PAGE_SIZE, 0, /* alignment, boundary */ 2021 BUS_SPACE_MAXADDR, /* lowaddr */ 2022 BUS_SPACE_MAXADDR, /* highaddr */ 2023 NULL, NULL, /* filter, filterarg */ 2024 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2025 MSK_MAXRXSEGS, /* nsegments */ 2026 MSK_JLEN, /* maxsegsize */ 2027 0, /* flags */ 2028 NULL, NULL, /* lockfunc, lockarg */ 2029 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2030 if (error != 0) { 2031 device_printf(sc_if->msk_if_dev, 2032 "failed to create jumbo Rx DMA tag\n"); 2033 goto fail; 2034 } 2035 2036 /* Create DMA maps for jumbo Rx buffers. */ 2037 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2038 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2039 device_printf(sc_if->msk_if_dev, 2040 "failed to create spare jumbo Rx dmamap\n"); 2041 goto fail; 2042 } 2043 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2044 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2045 jrxd->rx_m = NULL; 2046 jrxd->rx_dmamap = NULL; 2047 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2048 &jrxd->rx_dmamap); 2049 if (error != 0) { 2050 device_printf(sc_if->msk_if_dev, 2051 "failed to create jumbo Rx dmamap\n"); 2052 goto fail; 2053 } 2054 } 2055 2056 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2057 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2058 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2059 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2060 &sc_if->msk_cdata.msk_jumbo_map); 2061 if (error != 0) { 2062 device_printf(sc_if->msk_if_dev, 2063 "failed to allocate DMA'able memory for jumbo buf\n"); 2064 goto fail; 2065 } 2066 2067 ctx.msk_busaddr = 0; 2068 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2069 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2070 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2071 if (error != 0) { 2072 device_printf(sc_if->msk_if_dev, 2073 "failed to load DMA'able memory for jumbobuf\n"); 2074 goto fail; 2075 } 2076 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2077 2078 /* 2079 * Now divide it up into 9K pieces and save the addresses 2080 * in an array. 2081 */ 2082 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2083 for (i = 0; i < MSK_JSLOTS; i++) { 2084 sc_if->msk_cdata.msk_jslots[i] = ptr; 2085 ptr += MSK_JLEN; 2086 entry = malloc(sizeof(struct msk_jpool_entry), 2087 M_DEVBUF, M_WAITOK); 2088 if (entry == NULL) { 2089 device_printf(sc_if->msk_if_dev, 2090 "no memory for jumbo buffers!\n"); 2091 error = ENOMEM; 2092 goto fail; 2093 } 2094 entry->slot = i; 2095 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2096 jpool_entries); 2097 } 2098 #endif 2099 return 0; 2100 } 2101 2102 static void 2103 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2104 { 2105 struct msk_txdesc *txd; 2106 struct msk_rxdesc *rxd; 2107 #ifdef MSK_JUMBO 2108 struct msk_rxdesc *jrxd; 2109 struct msk_jpool_entry *entry; 2110 #endif 2111 int i; 2112 2113 #ifdef MSK_JUMBO 2114 MSK_JLIST_LOCK(sc_if); 2115 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2116 device_printf(sc_if->msk_if_dev, 2117 "asked to free buffer that is in use!\n"); 2118 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2119 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2120 jpool_entries); 2121 } 2122 2123 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2124 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2125 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2126 free(entry, M_DEVBUF); 2127 } 2128 MSK_JLIST_UNLOCK(sc_if); 2129 2130 /* Destroy jumbo buffer block. */ 2131 if (sc_if->msk_cdata.msk_jumbo_map) 2132 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2133 sc_if->msk_cdata.msk_jumbo_map); 2134 2135 if (sc_if->msk_rdata.msk_jumbo_buf) { 2136 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2137 sc_if->msk_rdata.msk_jumbo_buf, 2138 sc_if->msk_cdata.msk_jumbo_map); 2139 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2140 sc_if->msk_cdata.msk_jumbo_map = NULL; 2141 } 2142 2143 /* Jumbo Rx ring. */ 2144 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2145 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2146 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2147 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2148 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2149 sc_if->msk_rdata.msk_jumbo_rx_ring) 2150 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2151 sc_if->msk_rdata.msk_jumbo_rx_ring, 2152 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2153 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2154 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2155 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2156 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2157 } 2158 2159 /* Jumbo Rx buffers. */ 2160 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2161 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2162 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2163 if (jrxd->rx_dmamap) { 2164 bus_dmamap_destroy( 2165 sc_if->msk_cdata.msk_jumbo_rx_tag, 2166 jrxd->rx_dmamap); 2167 jrxd->rx_dmamap = NULL; 2168 } 2169 } 2170 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2171 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2172 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2173 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2174 } 2175 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2176 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2177 } 2178 #endif 2179 2180 /* Tx ring. */ 2181 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2182 sc_if->msk_rdata.msk_tx_ring, 2183 sc_if->msk_cdata.msk_tx_ring_map); 2184 2185 /* Rx ring. */ 2186 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2187 sc_if->msk_rdata.msk_rx_ring, 2188 sc_if->msk_cdata.msk_rx_ring_map); 2189 2190 /* Tx buffers. */ 2191 if (sc_if->msk_cdata.msk_tx_tag) { 2192 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2193 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2194 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2195 txd->tx_dmamap); 2196 } 2197 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2198 sc_if->msk_cdata.msk_tx_tag = NULL; 2199 } 2200 2201 /* Rx buffers. */ 2202 if (sc_if->msk_cdata.msk_rx_tag) { 2203 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2204 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2205 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2206 rxd->rx_dmamap); 2207 } 2208 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2209 sc_if->msk_cdata.msk_rx_sparemap); 2210 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2211 sc_if->msk_cdata.msk_rx_tag = NULL; 2212 } 2213 2214 if (sc_if->msk_cdata.msk_parent_tag) { 2215 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2216 sc_if->msk_cdata.msk_parent_tag = NULL; 2217 } 2218 } 2219 2220 #ifdef MSK_JUMBO 2221 /* 2222 * Allocate a jumbo buffer. 2223 */ 2224 static void * 2225 msk_jalloc(struct msk_if_softc *sc_if) 2226 { 2227 struct msk_jpool_entry *entry; 2228 2229 MSK_JLIST_LOCK(sc_if); 2230 2231 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2232 2233 if (entry == NULL) { 2234 MSK_JLIST_UNLOCK(sc_if); 2235 return (NULL); 2236 } 2237 2238 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2239 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2240 2241 MSK_JLIST_UNLOCK(sc_if); 2242 2243 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2244 } 2245 2246 /* 2247 * Release a jumbo buffer. 2248 */ 2249 static void 2250 msk_jfree(void *buf, void *args) 2251 { 2252 struct msk_if_softc *sc_if; 2253 struct msk_jpool_entry *entry; 2254 int i; 2255 2256 /* Extract the softc struct pointer. */ 2257 sc_if = (struct msk_if_softc *)args; 2258 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2259 2260 MSK_JLIST_LOCK(sc_if); 2261 /* Calculate the slot this buffer belongs to. */ 2262 i = ((vm_offset_t)buf 2263 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2264 KASSERT(i >= 0 && i < MSK_JSLOTS, 2265 ("%s: asked to free buffer that we don't manage!", __func__)); 2266 2267 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2268 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2269 entry->slot = i; 2270 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2271 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2272 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2273 wakeup(sc_if); 2274 2275 MSK_JLIST_UNLOCK(sc_if); 2276 } 2277 #endif 2278 2279 static int 2280 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2281 { 2282 struct msk_txdesc *txd, *txd_last; 2283 struct msk_tx_desc *tx_le; 2284 struct mbuf *m; 2285 bus_dmamap_t map; 2286 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2287 uint32_t control, prod, si; 2288 uint16_t offset, tcp_offset; 2289 int error, i, nsegs, maxsegs, defrag; 2290 2291 maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt - 2292 MSK_RESERVED_TX_DESC_CNT; 2293 KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT, 2294 ("not enough spare TX desc\n")); 2295 if (maxsegs > MSK_MAXTXSEGS) 2296 maxsegs = MSK_MAXTXSEGS; 2297 2298 /* 2299 * Align TX buffer to 64bytes boundary. This greately improves 2300 * bulk data TX performance on my 88E8053 (+100Mbps) at least. 2301 * Try avoiding m_defrag(), if the mbufs are not chained together 2302 * by m_next (i.e. m->m_len == m->m_pkthdr.len). 2303 */ 2304 2305 #define MSK_TXBUF_ALIGN 64 2306 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1) 2307 2308 defrag = 1; 2309 m = *m_head; 2310 if (m->m_len == m->m_pkthdr.len) { 2311 int space; 2312 2313 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK); 2314 if (space) { 2315 if (M_WRITABLE(m)) { 2316 if (M_TRAILINGSPACE(m) >= space) { 2317 /* e.g. TCP ACKs */ 2318 bcopy(m->m_data, m->m_data + space, 2319 m->m_len); 2320 m->m_data += space; 2321 defrag = 0; 2322 sc_if->msk_softc->msk_trailing_copied++; 2323 } else { 2324 space = MSK_TXBUF_ALIGN - space; 2325 if (M_LEADINGSPACE(m) >= space) { 2326 /* e.g. Small UDP datagrams */ 2327 bcopy(m->m_data, 2328 m->m_data - space, 2329 m->m_len); 2330 m->m_data -= space; 2331 defrag = 0; 2332 sc_if->msk_softc-> 2333 msk_leading_copied++; 2334 } 2335 } 2336 } 2337 } else { 2338 /* e.g. on forwarding path */ 2339 defrag = 0; 2340 } 2341 } 2342 if (defrag) { 2343 m = m_defrag(*m_head, MB_DONTWAIT); 2344 if (m == NULL) { 2345 m_freem(*m_head); 2346 *m_head = NULL; 2347 return ENOBUFS; 2348 } 2349 *m_head = m; 2350 } else { 2351 sc_if->msk_softc->msk_defrag_avoided++; 2352 } 2353 2354 #undef MSK_TXBUF_MASK 2355 #undef MSK_TXBUF_ALIGN 2356 2357 tcp_offset = offset = 0; 2358 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2359 /* 2360 * Since mbuf has no protocol specific structure information 2361 * in it we have to inspect protocol information here to 2362 * setup TSO and checksum offload. I don't know why Marvell 2363 * made a such decision in chip design because other GigE 2364 * hardwares normally takes care of all these chores in 2365 * hardware. However, TSO performance of Yukon II is very 2366 * good such that it's worth to implement it. 2367 */ 2368 struct ether_header *eh; 2369 struct ip *ip; 2370 2371 /* TODO check for M_WRITABLE(m) */ 2372 2373 offset = sizeof(struct ether_header); 2374 m = m_pullup(m, offset); 2375 if (m == NULL) { 2376 *m_head = NULL; 2377 return (ENOBUFS); 2378 } 2379 eh = mtod(m, struct ether_header *); 2380 /* Check if hardware VLAN insertion is off. */ 2381 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2382 offset = sizeof(struct ether_vlan_header); 2383 m = m_pullup(m, offset); 2384 if (m == NULL) { 2385 *m_head = NULL; 2386 return (ENOBUFS); 2387 } 2388 } 2389 m = m_pullup(m, offset + sizeof(struct ip)); 2390 if (m == NULL) { 2391 *m_head = NULL; 2392 return (ENOBUFS); 2393 } 2394 ip = (struct ip *)(mtod(m, char *) + offset); 2395 offset += (ip->ip_hl << 2); 2396 tcp_offset = offset; 2397 /* 2398 * It seems that Yukon II has Tx checksum offload bug for 2399 * small TCP packets that's less than 60 bytes in size 2400 * (e.g. TCP window probe packet, pure ACK packet). 2401 * Common work around like padding with zeros to make the 2402 * frame minimum ethernet frame size didn't work at all. 2403 * Instead of disabling checksum offload completely we 2404 * resort to S/W checksum routine when we encounter short 2405 * TCP frames. 2406 * Short UDP packets appear to be handled correctly by 2407 * Yukon II. 2408 */ 2409 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2410 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2411 uint16_t csum; 2412 2413 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2414 (ip->ip_hl << 2), offset); 2415 *(uint16_t *)(m->m_data + offset + 2416 m->m_pkthdr.csum_data) = csum; 2417 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2418 } 2419 *m_head = m; 2420 } 2421 2422 prod = sc_if->msk_cdata.msk_tx_prod; 2423 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2424 txd_last = txd; 2425 map = txd->tx_dmamap; 2426 2427 error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map, 2428 m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 2429 if (error) { 2430 m_freem(*m_head); 2431 *m_head = NULL; 2432 return error; 2433 } 2434 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2435 2436 m = *m_head; 2437 control = 0; 2438 tx_le = NULL; 2439 2440 #ifdef notyet 2441 /* Check if we have a VLAN tag to insert. */ 2442 if ((m->m_flags & M_VLANTAG) != 0) { 2443 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2444 tx_le->msk_addr = htole32(0); 2445 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2446 htons(m->m_pkthdr.ether_vtag)); 2447 sc_if->msk_cdata.msk_tx_cnt++; 2448 MSK_INC(prod, MSK_TX_RING_CNT); 2449 control |= INS_VLAN; 2450 } 2451 #endif 2452 /* Check if we have to handle checksum offload. */ 2453 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2454 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2455 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2456 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2457 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2458 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2459 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2460 control |= UDPTCP; 2461 sc_if->msk_cdata.msk_tx_cnt++; 2462 MSK_INC(prod, MSK_TX_RING_CNT); 2463 } 2464 2465 si = prod; 2466 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2467 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2468 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2469 OP_PACKET); 2470 sc_if->msk_cdata.msk_tx_cnt++; 2471 MSK_INC(prod, MSK_TX_RING_CNT); 2472 2473 for (i = 1; i < nsegs; i++) { 2474 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2475 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2476 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2477 OP_BUFFER | HW_OWNER); 2478 sc_if->msk_cdata.msk_tx_cnt++; 2479 MSK_INC(prod, MSK_TX_RING_CNT); 2480 } 2481 /* Update producer index. */ 2482 sc_if->msk_cdata.msk_tx_prod = prod; 2483 2484 /* Set EOP on the last desciptor. */ 2485 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2486 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2487 tx_le->msk_control |= htole32(EOP); 2488 2489 /* Turn the first descriptor ownership to hardware. */ 2490 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2491 tx_le->msk_control |= htole32(HW_OWNER); 2492 2493 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2494 map = txd_last->tx_dmamap; 2495 txd_last->tx_dmamap = txd->tx_dmamap; 2496 txd->tx_dmamap = map; 2497 txd->tx_m = m; 2498 2499 return (0); 2500 } 2501 2502 static void 2503 msk_start(struct ifnet *ifp) 2504 { 2505 struct msk_if_softc *sc_if; 2506 struct mbuf *m_head; 2507 int enq; 2508 2509 sc_if = ifp->if_softc; 2510 2511 ASSERT_SERIALIZED(ifp->if_serializer); 2512 2513 if (!sc_if->msk_link) { 2514 ifq_purge(&ifp->if_snd); 2515 return; 2516 } 2517 2518 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2519 return; 2520 2521 enq = 0; 2522 while (!ifq_is_empty(&ifp->if_snd)) { 2523 if (MSK_IS_OACTIVE(sc_if)) { 2524 ifp->if_flags |= IFF_OACTIVE; 2525 break; 2526 } 2527 2528 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2529 if (m_head == NULL) 2530 break; 2531 2532 /* 2533 * Pack the data into the transmit ring. If we 2534 * don't have room, set the OACTIVE flag and wait 2535 * for the NIC to drain the ring. 2536 */ 2537 if (msk_encap(sc_if, &m_head) != 0) { 2538 ifp->if_oerrors++; 2539 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2540 continue; 2541 } else { 2542 ifp->if_flags |= IFF_OACTIVE; 2543 break; 2544 } 2545 } 2546 enq = 1; 2547 2548 /* 2549 * If there's a BPF listener, bounce a copy of this frame 2550 * to him. 2551 */ 2552 BPF_MTAP(ifp, m_head); 2553 } 2554 2555 if (enq) { 2556 /* Transmit */ 2557 CSR_WRITE_2(sc_if->msk_softc, 2558 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2559 sc_if->msk_cdata.msk_tx_prod); 2560 2561 /* Set a timeout in case the chip goes out to lunch. */ 2562 ifp->if_timer = MSK_TX_TIMEOUT; 2563 } 2564 } 2565 2566 static void 2567 msk_watchdog(struct ifnet *ifp) 2568 { 2569 struct msk_if_softc *sc_if = ifp->if_softc; 2570 uint32_t ridx; 2571 int idx; 2572 2573 ASSERT_SERIALIZED(ifp->if_serializer); 2574 2575 if (sc_if->msk_link == 0) { 2576 if (bootverbose) 2577 if_printf(sc_if->msk_ifp, "watchdog timeout " 2578 "(missed link)\n"); 2579 ifp->if_oerrors++; 2580 msk_init(sc_if); 2581 return; 2582 } 2583 2584 /* 2585 * Reclaim first as there is a possibility of losing Tx completion 2586 * interrupts. 2587 */ 2588 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2589 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2590 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2591 msk_txeof(sc_if, idx); 2592 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2593 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2594 "-- recovering\n"); 2595 if (!ifq_is_empty(&ifp->if_snd)) 2596 if_devstart(ifp); 2597 return; 2598 } 2599 } 2600 2601 if_printf(ifp, "watchdog timeout\n"); 2602 ifp->if_oerrors++; 2603 msk_init(sc_if); 2604 if (!ifq_is_empty(&ifp->if_snd)) 2605 if_devstart(ifp); 2606 } 2607 2608 static int 2609 mskc_shutdown(device_t dev) 2610 { 2611 struct msk_softc *sc = device_get_softc(dev); 2612 int i; 2613 2614 lwkt_serialize_enter(&sc->msk_serializer); 2615 2616 for (i = 0; i < sc->msk_num_port; i++) { 2617 if (sc->msk_if[i] != NULL) 2618 msk_stop(sc->msk_if[i]); 2619 } 2620 2621 /* Disable all interrupts. */ 2622 CSR_WRITE_4(sc, B0_IMSK, 0); 2623 CSR_READ_4(sc, B0_IMSK); 2624 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2625 CSR_READ_4(sc, B0_HWE_IMSK); 2626 2627 /* Put hardware reset. */ 2628 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2629 2630 lwkt_serialize_exit(&sc->msk_serializer); 2631 return (0); 2632 } 2633 2634 static int 2635 mskc_suspend(device_t dev) 2636 { 2637 struct msk_softc *sc = device_get_softc(dev); 2638 int i; 2639 2640 lwkt_serialize_enter(&sc->msk_serializer); 2641 2642 for (i = 0; i < sc->msk_num_port; i++) { 2643 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2644 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2645 msk_stop(sc->msk_if[i]); 2646 } 2647 2648 /* Disable all interrupts. */ 2649 CSR_WRITE_4(sc, B0_IMSK, 0); 2650 CSR_READ_4(sc, B0_IMSK); 2651 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2652 CSR_READ_4(sc, B0_HWE_IMSK); 2653 2654 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2655 2656 /* Put hardware reset. */ 2657 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2658 sc->msk_suspended = 1; 2659 2660 lwkt_serialize_exit(&sc->msk_serializer); 2661 2662 return (0); 2663 } 2664 2665 static int 2666 mskc_resume(device_t dev) 2667 { 2668 struct msk_softc *sc = device_get_softc(dev); 2669 int i; 2670 2671 lwkt_serialize_enter(&sc->msk_serializer); 2672 2673 mskc_reset(sc); 2674 for (i = 0; i < sc->msk_num_port; i++) { 2675 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2676 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2677 msk_init(sc->msk_if[i]); 2678 } 2679 sc->msk_suspended = 0; 2680 2681 lwkt_serialize_exit(&sc->msk_serializer); 2682 2683 return (0); 2684 } 2685 2686 static void 2687 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len, 2688 struct mbuf_chain *chain) 2689 { 2690 struct mbuf *m; 2691 struct ifnet *ifp; 2692 struct msk_rxdesc *rxd; 2693 int cons, rxlen; 2694 2695 ifp = sc_if->msk_ifp; 2696 2697 cons = sc_if->msk_cdata.msk_rx_cons; 2698 do { 2699 rxlen = status >> 16; 2700 if ((status & GMR_FS_VLAN) != 0 && 2701 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2702 rxlen -= EVL_ENCAPLEN; 2703 if (len > sc_if->msk_framesize || 2704 ((status & GMR_FS_ANY_ERR) != 0) || 2705 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2706 /* Don't count flow-control packet as errors. */ 2707 if ((status & GMR_FS_GOOD_FC) == 0) 2708 ifp->if_ierrors++; 2709 msk_discard_rxbuf(sc_if, cons); 2710 break; 2711 } 2712 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2713 m = rxd->rx_m; 2714 if (msk_newbuf(sc_if, cons, 0) != 0) { 2715 ifp->if_iqdrops++; 2716 /* Reuse old buffer. */ 2717 msk_discard_rxbuf(sc_if, cons); 2718 break; 2719 } 2720 m->m_pkthdr.rcvif = ifp; 2721 m->m_pkthdr.len = m->m_len = len; 2722 ifp->if_ipackets++; 2723 #ifdef notyet 2724 /* Check for VLAN tagged packets. */ 2725 if ((status & GMR_FS_VLAN) != 0 && 2726 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2727 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2728 m->m_flags |= M_VLANTAG; 2729 } 2730 #endif 2731 2732 ether_input_chain(ifp, m, NULL, chain); 2733 } while (0); 2734 2735 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2736 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2737 } 2738 2739 #ifdef MSK_JUMBO 2740 static void 2741 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2742 { 2743 struct mbuf *m; 2744 struct ifnet *ifp; 2745 struct msk_rxdesc *jrxd; 2746 int cons, rxlen; 2747 2748 ifp = sc_if->msk_ifp; 2749 2750 MSK_IF_LOCK_ASSERT(sc_if); 2751 2752 cons = sc_if->msk_cdata.msk_rx_cons; 2753 do { 2754 rxlen = status >> 16; 2755 if ((status & GMR_FS_VLAN) != 0 && 2756 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2757 rxlen -= ETHER_VLAN_ENCAP_LEN; 2758 if (len > sc_if->msk_framesize || 2759 ((status & GMR_FS_ANY_ERR) != 0) || 2760 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2761 /* Don't count flow-control packet as errors. */ 2762 if ((status & GMR_FS_GOOD_FC) == 0) 2763 ifp->if_ierrors++; 2764 msk_discard_jumbo_rxbuf(sc_if, cons); 2765 break; 2766 } 2767 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2768 m = jrxd->rx_m; 2769 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2770 ifp->if_iqdrops++; 2771 /* Reuse old buffer. */ 2772 msk_discard_jumbo_rxbuf(sc_if, cons); 2773 break; 2774 } 2775 m->m_pkthdr.rcvif = ifp; 2776 m->m_pkthdr.len = m->m_len = len; 2777 ifp->if_ipackets++; 2778 /* Check for VLAN tagged packets. */ 2779 if ((status & GMR_FS_VLAN) != 0 && 2780 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2781 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2782 m->m_flags |= M_VLANTAG; 2783 } 2784 MSK_IF_UNLOCK(sc_if); 2785 (*ifp->if_input)(ifp, m); 2786 MSK_IF_LOCK(sc_if); 2787 } while (0); 2788 2789 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2790 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2791 } 2792 #endif 2793 2794 static void 2795 msk_txeof(struct msk_if_softc *sc_if, int idx) 2796 { 2797 struct msk_txdesc *txd; 2798 struct msk_tx_desc *cur_tx; 2799 struct ifnet *ifp; 2800 uint32_t control; 2801 int cons, prog; 2802 2803 ifp = sc_if->msk_ifp; 2804 2805 /* 2806 * Go through our tx ring and free mbufs for those 2807 * frames that have been sent. 2808 */ 2809 cons = sc_if->msk_cdata.msk_tx_cons; 2810 prog = 0; 2811 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2812 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2813 break; 2814 prog++; 2815 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2816 control = le32toh(cur_tx->msk_control); 2817 sc_if->msk_cdata.msk_tx_cnt--; 2818 if ((control & EOP) == 0) 2819 continue; 2820 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2821 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2822 2823 ifp->if_opackets++; 2824 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2825 __func__)); 2826 m_freem(txd->tx_m); 2827 txd->tx_m = NULL; 2828 } 2829 2830 if (prog > 0) { 2831 sc_if->msk_cdata.msk_tx_cons = cons; 2832 if (!MSK_IS_OACTIVE(sc_if)) 2833 ifp->if_flags &= ~IFF_OACTIVE; 2834 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2835 ifp->if_timer = 0; 2836 /* No need to sync LEs as we didn't update LEs. */ 2837 } 2838 } 2839 2840 static void 2841 msk_tick(void *xsc_if) 2842 { 2843 struct msk_if_softc *sc_if = xsc_if; 2844 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2845 struct mii_data *mii; 2846 2847 lwkt_serialize_enter(ifp->if_serializer); 2848 2849 mii = device_get_softc(sc_if->msk_miibus); 2850 2851 mii_tick(mii); 2852 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2853 2854 lwkt_serialize_exit(ifp->if_serializer); 2855 } 2856 2857 static void 2858 msk_intr_phy(struct msk_if_softc *sc_if) 2859 { 2860 uint16_t status; 2861 2862 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2863 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2864 /* Handle FIFO Underrun/Overflow? */ 2865 if (status & PHY_M_IS_FIFO_ERROR) { 2866 device_printf(sc_if->msk_if_dev, 2867 "PHY FIFO underrun/overflow.\n"); 2868 } 2869 } 2870 2871 static void 2872 msk_intr_gmac(struct msk_if_softc *sc_if) 2873 { 2874 struct msk_softc *sc; 2875 uint8_t status; 2876 2877 sc = sc_if->msk_softc; 2878 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 2879 2880 /* GMAC Rx FIFO overrun. */ 2881 if ((status & GM_IS_RX_FF_OR) != 0) { 2882 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 2883 GMF_CLI_RX_FO); 2884 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 2885 } 2886 /* GMAC Tx FIFO underrun. */ 2887 if ((status & GM_IS_TX_FF_UR) != 0) { 2888 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 2889 GMF_CLI_TX_FU); 2890 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 2891 /* 2892 * XXX 2893 * In case of Tx underrun, we may need to flush/reset 2894 * Tx MAC but that would also require resynchronization 2895 * with status LEs. Reintializing status LEs would 2896 * affect other port in dual MAC configuration so it 2897 * should be avoided as possible as we can. 2898 * Due to lack of documentation it's all vague guess but 2899 * it needs more investigation. 2900 */ 2901 } 2902 } 2903 2904 static void 2905 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 2906 { 2907 struct msk_softc *sc; 2908 2909 sc = sc_if->msk_softc; 2910 if ((status & Y2_IS_PAR_RD1) != 0) { 2911 device_printf(sc_if->msk_if_dev, 2912 "RAM buffer read parity error\n"); 2913 /* Clear IRQ. */ 2914 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 2915 RI_CLR_RD_PERR); 2916 } 2917 if ((status & Y2_IS_PAR_WR1) != 0) { 2918 device_printf(sc_if->msk_if_dev, 2919 "RAM buffer write parity error\n"); 2920 /* Clear IRQ. */ 2921 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 2922 RI_CLR_WR_PERR); 2923 } 2924 if ((status & Y2_IS_PAR_MAC1) != 0) { 2925 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 2926 /* Clear IRQ. */ 2927 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 2928 GMF_CLI_TX_PE); 2929 } 2930 if ((status & Y2_IS_PAR_RX1) != 0) { 2931 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 2932 /* Clear IRQ. */ 2933 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 2934 } 2935 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 2936 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 2937 /* Clear IRQ. */ 2938 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 2939 } 2940 } 2941 2942 static void 2943 mskc_intr_hwerr(struct msk_softc *sc) 2944 { 2945 uint32_t status; 2946 uint32_t tlphead[4]; 2947 2948 status = CSR_READ_4(sc, B0_HWE_ISRC); 2949 /* Time Stamp timer overflow. */ 2950 if ((status & Y2_IS_TIST_OV) != 0) 2951 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2952 if ((status & Y2_IS_PCI_NEXP) != 0) { 2953 /* 2954 * PCI Express Error occured which is not described in PEX 2955 * spec. 2956 * This error is also mapped either to Master Abort( 2957 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 2958 * can only be cleared there. 2959 */ 2960 device_printf(sc->msk_dev, 2961 "PCI Express protocol violation error\n"); 2962 } 2963 2964 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 2965 uint16_t v16; 2966 2967 if ((status & Y2_IS_MST_ERR) != 0) 2968 device_printf(sc->msk_dev, 2969 "unexpected IRQ Status error\n"); 2970 else 2971 device_printf(sc->msk_dev, 2972 "unexpected IRQ Master error\n"); 2973 /* Reset all bits in the PCI status register. */ 2974 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 2975 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2976 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 2977 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 2978 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 2979 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2980 } 2981 2982 /* Check for PCI Express Uncorrectable Error. */ 2983 if ((status & Y2_IS_PCI_EXP) != 0) { 2984 uint32_t v32; 2985 2986 /* 2987 * On PCI Express bus bridges are called root complexes (RC). 2988 * PCI Express errors are recognized by the root complex too, 2989 * which requests the system to handle the problem. After 2990 * error occurence it may be that no access to the adapter 2991 * may be performed any longer. 2992 */ 2993 2994 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 2995 if ((v32 & PEX_UNSUP_REQ) != 0) { 2996 /* Ignore unsupported request error. */ 2997 if (bootverbose) { 2998 device_printf(sc->msk_dev, 2999 "Uncorrectable PCI Express error\n"); 3000 } 3001 } 3002 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3003 int i; 3004 3005 /* Get TLP header form Log Registers. */ 3006 for (i = 0; i < 4; i++) 3007 tlphead[i] = CSR_PCI_READ_4(sc, 3008 PEX_HEADER_LOG + i * 4); 3009 /* Check for vendor defined broadcast message. */ 3010 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3011 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3012 CSR_WRITE_4(sc, B0_HWE_IMSK, 3013 sc->msk_intrhwemask); 3014 CSR_READ_4(sc, B0_HWE_IMSK); 3015 } 3016 } 3017 /* Clear the interrupt. */ 3018 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3019 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3020 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3021 } 3022 3023 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3024 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3025 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3026 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3027 } 3028 3029 static __inline void 3030 msk_rxput(struct msk_if_softc *sc_if) 3031 { 3032 struct msk_softc *sc; 3033 3034 sc = sc_if->msk_softc; 3035 #ifdef MSK_JUMBO 3036 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3037 bus_dmamap_sync( 3038 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3039 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3040 BUS_DMASYNC_PREWRITE); 3041 } 3042 #endif 3043 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3044 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3045 } 3046 3047 static int 3048 mskc_handle_events(struct msk_softc *sc) 3049 { 3050 struct msk_if_softc *sc_if; 3051 int rxput[2]; 3052 struct msk_stat_desc *sd; 3053 uint32_t control, status; 3054 int cons, idx, len, port, rxprog; 3055 struct mbuf_chain chain[MAXCPU]; 3056 3057 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3058 if (idx == sc->msk_stat_cons) 3059 return (0); 3060 3061 ether_input_chain_init(chain); 3062 3063 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3064 3065 rxprog = 0; 3066 for (cons = sc->msk_stat_cons; cons != idx;) { 3067 sd = &sc->msk_stat_ring[cons]; 3068 control = le32toh(sd->msk_control); 3069 if ((control & HW_OWNER) == 0) 3070 break; 3071 /* 3072 * Marvell's FreeBSD driver updates status LE after clearing 3073 * HW_OWNER. However we don't have a way to sync single LE 3074 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3075 * an entire DMA map. So don't sync LE until we have a better 3076 * way to sync LEs. 3077 */ 3078 control &= ~HW_OWNER; 3079 sd->msk_control = htole32(control); 3080 status = le32toh(sd->msk_status); 3081 len = control & STLE_LEN_MASK; 3082 port = (control >> 16) & 0x01; 3083 sc_if = sc->msk_if[port]; 3084 if (sc_if == NULL) { 3085 device_printf(sc->msk_dev, "invalid port opcode " 3086 "0x%08x\n", control & STLE_OP_MASK); 3087 continue; 3088 } 3089 3090 switch (control & STLE_OP_MASK) { 3091 case OP_RXVLAN: 3092 sc_if->msk_vtag = ntohs(len); 3093 break; 3094 case OP_RXCHKSVLAN: 3095 sc_if->msk_vtag = ntohs(len); 3096 break; 3097 case OP_RXSTAT: 3098 #ifdef MSK_JUMBO 3099 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3100 msk_jumbo_rxeof(sc_if, status, len); 3101 else 3102 #endif 3103 msk_rxeof(sc_if, status, len, chain); 3104 rxprog++; 3105 /* 3106 * Because there is no way to sync single Rx LE 3107 * put the DMA sync operation off until the end of 3108 * event processing. 3109 */ 3110 rxput[port]++; 3111 /* Update prefetch unit if we've passed water mark. */ 3112 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3113 msk_rxput(sc_if); 3114 rxput[port] = 0; 3115 } 3116 break; 3117 case OP_TXINDEXLE: 3118 if (sc->msk_if[MSK_PORT_A] != NULL) { 3119 msk_txeof(sc->msk_if[MSK_PORT_A], 3120 status & STLE_TXA1_MSKL); 3121 } 3122 if (sc->msk_if[MSK_PORT_B] != NULL) { 3123 msk_txeof(sc->msk_if[MSK_PORT_B], 3124 ((status & STLE_TXA2_MSKL) >> 3125 STLE_TXA2_SHIFTL) | 3126 ((len & STLE_TXA2_MSKH) << 3127 STLE_TXA2_SHIFTH)); 3128 } 3129 break; 3130 default: 3131 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3132 control & STLE_OP_MASK); 3133 break; 3134 } 3135 MSK_INC(cons, MSK_STAT_RING_CNT); 3136 if (rxprog > sc->msk_process_limit) 3137 break; 3138 } 3139 3140 if (rxprog > 0) 3141 ether_input_dispatch(chain); 3142 3143 sc->msk_stat_cons = cons; 3144 /* XXX We should sync status LEs here. See above notes. */ 3145 3146 if (rxput[MSK_PORT_A] > 0) 3147 msk_rxput(sc->msk_if[MSK_PORT_A]); 3148 if (rxput[MSK_PORT_B] > 0) 3149 msk_rxput(sc->msk_if[MSK_PORT_B]); 3150 3151 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3152 } 3153 3154 /* Legacy interrupt handler for shared interrupt. */ 3155 static void 3156 mskc_intr(void *xsc) 3157 { 3158 struct msk_softc *sc; 3159 struct msk_if_softc *sc_if0, *sc_if1; 3160 struct ifnet *ifp0, *ifp1; 3161 uint32_t status; 3162 3163 sc = xsc; 3164 ASSERT_SERIALIZED(&sc->msk_serializer); 3165 3166 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3167 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3168 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3169 (status & sc->msk_intrmask) == 0) { 3170 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3171 return; 3172 } 3173 3174 sc_if0 = sc->msk_if[MSK_PORT_A]; 3175 sc_if1 = sc->msk_if[MSK_PORT_B]; 3176 ifp0 = ifp1 = NULL; 3177 if (sc_if0 != NULL) 3178 ifp0 = sc_if0->msk_ifp; 3179 if (sc_if1 != NULL) 3180 ifp1 = sc_if1->msk_ifp; 3181 3182 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3183 msk_intr_phy(sc_if0); 3184 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3185 msk_intr_phy(sc_if1); 3186 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3187 msk_intr_gmac(sc_if0); 3188 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3189 msk_intr_gmac(sc_if1); 3190 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3191 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3192 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3193 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3194 CSR_READ_4(sc, B0_IMSK); 3195 } 3196 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3197 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3198 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3199 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3200 CSR_READ_4(sc, B0_IMSK); 3201 } 3202 if ((status & Y2_IS_HW_ERR) != 0) 3203 mskc_intr_hwerr(sc); 3204 3205 while (mskc_handle_events(sc) != 0) 3206 ; 3207 if ((status & Y2_IS_STAT_BMU) != 0) 3208 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3209 3210 /* Reenable interrupts. */ 3211 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3212 3213 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3214 !ifq_is_empty(&ifp0->if_snd)) 3215 if_devstart(ifp0); 3216 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3217 !ifq_is_empty(&ifp1->if_snd)) 3218 if_devstart(ifp1); 3219 } 3220 3221 static void 3222 msk_init(void *xsc) 3223 { 3224 struct msk_if_softc *sc_if = xsc; 3225 struct msk_softc *sc = sc_if->msk_softc; 3226 struct ifnet *ifp = sc_if->msk_ifp; 3227 struct mii_data *mii; 3228 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3229 uint16_t gmac; 3230 int error, i; 3231 3232 ASSERT_SERIALIZED(ifp->if_serializer); 3233 3234 mii = device_get_softc(sc_if->msk_miibus); 3235 3236 error = 0; 3237 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3238 msk_stop(sc_if); 3239 3240 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3241 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3242 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3243 /* 3244 * In Yukon EC Ultra, TSO & checksum offload is not 3245 * supported for jumbo frame. 3246 */ 3247 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3248 ifp->if_capenable &= ~IFCAP_TXCSUM; 3249 } 3250 3251 /* 3252 * Initialize GMAC first. 3253 * Without this initialization, Rx MAC did not work as expected 3254 * and Rx MAC garbled status LEs and it resulted in out-of-order 3255 * or duplicated frame delivery which in turn showed very poor 3256 * Rx performance.(I had to write a packet analysis code that 3257 * could be embeded in driver to diagnose this issue.) 3258 * I've spent almost 2 months to fix this issue. If I have had 3259 * datasheet for Yukon II I wouldn't have encountered this. :-( 3260 */ 3261 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3262 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3263 3264 /* Dummy read the Interrupt Source Register. */ 3265 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3266 3267 /* Set MIB Clear Counter Mode. */ 3268 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3269 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3270 /* Read all MIB Counters with Clear Mode set. */ 3271 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3272 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3273 /* Clear MIB Clear Counter Mode. */ 3274 gmac &= ~GM_PAR_MIB_CLR; 3275 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3276 3277 /* Disable FCS. */ 3278 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3279 3280 /* Setup Transmit Control Register. */ 3281 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3282 3283 /* Setup Transmit Flow Control Register. */ 3284 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3285 3286 /* Setup Transmit Parameter Register. */ 3287 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3288 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3289 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3290 3291 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3292 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3293 3294 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3295 gmac |= GM_SMOD_JUMBO_ENA; 3296 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3297 3298 /* Set station address. */ 3299 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3300 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3301 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3302 eaddr[i]); 3303 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3304 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3305 eaddr[i]); 3306 3307 /* Disable interrupts for counter overflows. */ 3308 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3309 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3310 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3311 3312 /* Configure Rx MAC FIFO. */ 3313 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3314 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3315 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3316 GMF_OPER_ON | GMF_RX_F_FL_ON); 3317 3318 /* Set promiscuous mode. */ 3319 msk_setpromisc(sc_if); 3320 3321 /* Set multicast filter. */ 3322 msk_setmulti(sc_if); 3323 3324 /* Flush Rx MAC FIFO on any flow control or error. */ 3325 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3326 GMR_FS_ANY_ERR); 3327 3328 /* Set Rx FIFO flush threshold to 64 bytes. */ 3329 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3330 RX_GMF_FL_THR_DEF); 3331 3332 /* Configure Tx MAC FIFO. */ 3333 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3334 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3335 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3336 3337 /* Configure hardware VLAN tag insertion/stripping. */ 3338 msk_setvlan(sc_if, ifp); 3339 3340 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3341 /* Set Rx Pause threshould. */ 3342 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3343 MSK_ECU_LLPP); 3344 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3345 MSK_ECU_ULPP); 3346 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { 3347 /* 3348 * Set Tx GMAC FIFO Almost Empty Threshold. 3349 */ 3350 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3351 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3352 /* Disable Store & Forward mode for Tx. */ 3353 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3354 TX_JUMBO_ENA | TX_STFW_DIS); 3355 } else { 3356 /* Enable Store & Forward mode for Tx. */ 3357 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3358 TX_JUMBO_DIS | TX_STFW_ENA); 3359 } 3360 } 3361 3362 /* 3363 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3364 * arbiter as we don't use Sync Tx queue. 3365 */ 3366 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3367 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3368 /* Enable the RAM Interface Arbiter. */ 3369 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3370 3371 /* Setup RAM buffer. */ 3372 msk_set_rambuffer(sc_if); 3373 3374 /* Disable Tx sync Queue. */ 3375 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3376 3377 /* Setup Tx Queue Bus Memory Interface. */ 3378 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3379 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3380 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3381 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3382 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3383 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3384 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3385 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3386 } 3387 3388 /* Setup Rx Queue Bus Memory Interface. */ 3389 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3390 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3391 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3392 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3393 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3394 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3395 /* MAC Rx RAM Read is controlled by hardware. */ 3396 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3397 } 3398 3399 msk_set_prefetch(sc, sc_if->msk_txq, 3400 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3401 msk_init_tx_ring(sc_if); 3402 3403 /* Disable Rx checksum offload and RSS hash. */ 3404 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3405 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3406 #ifdef MSK_JUMBO 3407 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3408 msk_set_prefetch(sc, sc_if->msk_rxq, 3409 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3410 MSK_JUMBO_RX_RING_CNT - 1); 3411 error = msk_init_jumbo_rx_ring(sc_if); 3412 } else 3413 #endif 3414 { 3415 msk_set_prefetch(sc, sc_if->msk_rxq, 3416 sc_if->msk_rdata.msk_rx_ring_paddr, 3417 MSK_RX_RING_CNT - 1); 3418 error = msk_init_rx_ring(sc_if); 3419 } 3420 if (error != 0) { 3421 device_printf(sc_if->msk_if_dev, 3422 "initialization failed: no memory for Rx buffers\n"); 3423 msk_stop(sc_if); 3424 return; 3425 } 3426 3427 /* Configure interrupt handling. */ 3428 if (sc_if->msk_port == MSK_PORT_A) { 3429 sc->msk_intrmask |= Y2_IS_PORT_A; 3430 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3431 } else { 3432 sc->msk_intrmask |= Y2_IS_PORT_B; 3433 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3434 } 3435 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3436 CSR_READ_4(sc, B0_HWE_IMSK); 3437 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3438 CSR_READ_4(sc, B0_IMSK); 3439 3440 sc_if->msk_link = 0; 3441 mii_mediachg(mii); 3442 3443 mskc_set_imtimer(sc); 3444 3445 ifp->if_flags |= IFF_RUNNING; 3446 ifp->if_flags &= ~IFF_OACTIVE; 3447 3448 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3449 } 3450 3451 static void 3452 msk_set_rambuffer(struct msk_if_softc *sc_if) 3453 { 3454 struct msk_softc *sc; 3455 int ltpp, utpp; 3456 3457 sc = sc_if->msk_softc; 3458 3459 /* Setup Rx Queue. */ 3460 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3461 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3462 sc->msk_rxqstart[sc_if->msk_port] / 8); 3463 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3464 sc->msk_rxqend[sc_if->msk_port] / 8); 3465 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3466 sc->msk_rxqstart[sc_if->msk_port] / 8); 3467 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3468 sc->msk_rxqstart[sc_if->msk_port] / 8); 3469 3470 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3471 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3472 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3473 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3474 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3475 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3476 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3477 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3478 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3479 3480 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3481 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3482 3483 /* Setup Tx Queue. */ 3484 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3485 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3486 sc->msk_txqstart[sc_if->msk_port] / 8); 3487 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3488 sc->msk_txqend[sc_if->msk_port] / 8); 3489 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3490 sc->msk_txqstart[sc_if->msk_port] / 8); 3491 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3492 sc->msk_txqstart[sc_if->msk_port] / 8); 3493 /* Enable Store & Forward for Tx side. */ 3494 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3495 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3496 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3497 } 3498 3499 static void 3500 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3501 uint32_t count) 3502 { 3503 3504 /* Reset the prefetch unit. */ 3505 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3506 PREF_UNIT_RST_SET); 3507 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3508 PREF_UNIT_RST_CLR); 3509 /* Set LE base address. */ 3510 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3511 MSK_ADDR_LO(addr)); 3512 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3513 MSK_ADDR_HI(addr)); 3514 /* Set the list last index. */ 3515 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3516 count); 3517 /* Turn on prefetch unit. */ 3518 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3519 PREF_UNIT_OP_ON); 3520 /* Dummy read to ensure write. */ 3521 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3522 } 3523 3524 static void 3525 msk_stop(struct msk_if_softc *sc_if) 3526 { 3527 struct msk_softc *sc = sc_if->msk_softc; 3528 struct ifnet *ifp = sc_if->msk_ifp; 3529 struct msk_txdesc *txd; 3530 struct msk_rxdesc *rxd; 3531 #ifdef MSK_JUMBO 3532 struct msk_rxdesc *jrxd; 3533 #endif 3534 uint32_t val; 3535 int i; 3536 3537 ASSERT_SERIALIZED(ifp->if_serializer); 3538 3539 callout_stop(&sc_if->msk_tick_ch); 3540 ifp->if_timer = 0; 3541 3542 /* Disable interrupts. */ 3543 if (sc_if->msk_port == MSK_PORT_A) { 3544 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3545 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3546 } else { 3547 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3548 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3549 } 3550 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3551 CSR_READ_4(sc, B0_HWE_IMSK); 3552 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3553 CSR_READ_4(sc, B0_IMSK); 3554 3555 /* Disable Tx/Rx MAC. */ 3556 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3557 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3558 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3559 /* Read again to ensure writing. */ 3560 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3561 3562 /* Stop Tx BMU. */ 3563 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3564 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3565 for (i = 0; i < MSK_TIMEOUT; i++) { 3566 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3567 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3568 BMU_STOP); 3569 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3570 } else 3571 break; 3572 DELAY(1); 3573 } 3574 if (i == MSK_TIMEOUT) 3575 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3576 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3577 RB_RST_SET | RB_DIS_OP_MD); 3578 3579 /* Disable all GMAC interrupt. */ 3580 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3581 /* Disable PHY interrupt. */ 3582 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3583 3584 /* Disable the RAM Interface Arbiter. */ 3585 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3586 3587 /* Reset the PCI FIFO of the async Tx queue */ 3588 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3589 BMU_RST_SET | BMU_FIFO_RST); 3590 3591 /* Reset the Tx prefetch units. */ 3592 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3593 PREF_UNIT_RST_SET); 3594 3595 /* Reset the RAM Buffer async Tx queue. */ 3596 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3597 3598 /* Reset Tx MAC FIFO. */ 3599 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3600 /* Set Pause Off. */ 3601 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3602 3603 /* 3604 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3605 * reach the end of packet and since we can't make sure that we have 3606 * incoming data, we must reset the BMU while it is not during a DMA 3607 * transfer. Since it is possible that the Rx path is still active, 3608 * the Rx RAM buffer will be stopped first, so any possible incoming 3609 * data will not trigger a DMA. After the RAM buffer is stopped, the 3610 * BMU is polled until any DMA in progress is ended and only then it 3611 * will be reset. 3612 */ 3613 3614 /* Disable the RAM Buffer receive queue. */ 3615 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3616 for (i = 0; i < MSK_TIMEOUT; i++) { 3617 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3618 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3619 break; 3620 DELAY(1); 3621 } 3622 if (i == MSK_TIMEOUT) 3623 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3624 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3625 BMU_RST_SET | BMU_FIFO_RST); 3626 /* Reset the Rx prefetch unit. */ 3627 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3628 PREF_UNIT_RST_SET); 3629 /* Reset the RAM Buffer receive queue. */ 3630 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3631 /* Reset Rx MAC FIFO. */ 3632 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3633 3634 /* Free Rx and Tx mbufs still in the queues. */ 3635 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3636 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3637 if (rxd->rx_m != NULL) { 3638 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3639 rxd->rx_dmamap); 3640 m_freem(rxd->rx_m); 3641 rxd->rx_m = NULL; 3642 } 3643 } 3644 #ifdef MSK_JUMBO 3645 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3646 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3647 if (jrxd->rx_m != NULL) { 3648 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3649 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3650 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3651 jrxd->rx_dmamap); 3652 m_freem(jrxd->rx_m); 3653 jrxd->rx_m = NULL; 3654 } 3655 } 3656 #endif 3657 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3658 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3659 if (txd->tx_m != NULL) { 3660 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3661 txd->tx_dmamap); 3662 m_freem(txd->tx_m); 3663 txd->tx_m = NULL; 3664 } 3665 } 3666 3667 /* 3668 * Mark the interface down. 3669 */ 3670 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3671 sc_if->msk_link = 0; 3672 } 3673 3674 static int 3675 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS) 3676 { 3677 return sysctl_int_range(oidp, arg1, arg2, req, 3678 MSK_PROC_MIN, MSK_PROC_MAX); 3679 } 3680 3681 static int 3682 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3683 { 3684 struct msk_softc *sc = arg1; 3685 struct lwkt_serialize *serializer = &sc->msk_serializer; 3686 int error = 0, v; 3687 3688 lwkt_serialize_enter(serializer); 3689 3690 v = sc->msk_intr_rate; 3691 error = sysctl_handle_int(oidp, &v, 0, req); 3692 if (error || req->newptr == NULL) 3693 goto back; 3694 if (v < 0) { 3695 error = EINVAL; 3696 goto back; 3697 } 3698 3699 if (sc->msk_intr_rate != v) { 3700 int flag = 0, i; 3701 3702 sc->msk_intr_rate = v; 3703 for (i = 0; i < 2; ++i) { 3704 if (sc->msk_if[i] != NULL) { 3705 flag |= sc->msk_if[i]-> 3706 arpcom.ac_if.if_flags & IFF_RUNNING; 3707 } 3708 } 3709 if (flag) 3710 mskc_set_imtimer(sc); 3711 } 3712 back: 3713 lwkt_serialize_exit(serializer); 3714 return error; 3715 } 3716 3717 static int 3718 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3719 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3720 { 3721 struct msk_if_softc *sc_if = device_get_softc(dev); 3722 bus_dmamem_t dmem; 3723 int error; 3724 3725 error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag, 3726 MSK_RING_ALIGN, 0, 3727 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3728 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3729 if (error) { 3730 device_printf(dev, "can't create coherent DMA memory\n"); 3731 return error; 3732 } 3733 3734 *dtag = dmem.dmem_tag; 3735 *dmap = dmem.dmem_map; 3736 *addr = dmem.dmem_addr; 3737 *paddr = dmem.dmem_busaddr; 3738 3739 return 0; 3740 } 3741 3742 static void 3743 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3744 { 3745 if (dtag != NULL) { 3746 bus_dmamap_unload(dtag, dmap); 3747 bus_dmamem_free(dtag, addr, dmap); 3748 bus_dma_tag_destroy(dtag); 3749 } 3750 } 3751 3752 static void 3753 mskc_set_imtimer(struct msk_softc *sc) 3754 { 3755 if (sc->msk_intr_rate > 0) { 3756 /* 3757 * XXX myk(4) seems to use 125MHz for EC/FE/XL 3758 * and 78.125MHz for rest of chip types 3759 */ 3760 CSR_WRITE_4(sc, B2_IRQM_INI, 3761 MSK_USECS(sc, 1000000 / sc->msk_intr_rate)); 3762 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 3763 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START); 3764 } else { 3765 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP); 3766 } 3767 } 3768