1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 /* $DragonFly: src/sys/dev/netif/msk/if_msk.c,v 1.7 2008/08/03 11:00:32 sephe Exp $ */ 97 98 /* 99 * Device driver for the Marvell Yukon II Ethernet controller. 100 * Due to lack of documentation, this driver is based on the code from 101 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 102 */ 103 #include "opt_ethernet.h" 104 105 #include <sys/param.h> 106 #include <sys/endian.h> 107 #include <sys/kernel.h> 108 #include <sys/bus.h> 109 #include <sys/in_cksum.h> 110 #include <sys/interrupt.h> 111 #include <sys/malloc.h> 112 #include <sys/proc.h> 113 #include <sys/rman.h> 114 #include <sys/serialize.h> 115 #include <sys/socket.h> 116 #include <sys/sockio.h> 117 #include <sys/sysctl.h> 118 119 #include <net/ethernet.h> 120 #include <net/if.h> 121 #include <net/bpf.h> 122 #include <net/if_arp.h> 123 #include <net/if_dl.h> 124 #include <net/if_media.h> 125 #include <net/ifq_var.h> 126 #include <net/vlan/if_vlan_var.h> 127 128 #include <netinet/ip.h> 129 #include <netinet/ip_var.h> 130 131 #include <dev/netif/mii_layer/miivar.h> 132 133 #include <bus/pci/pcireg.h> 134 #include <bus/pci/pcivar.h> 135 136 #include "if_mskreg.h" 137 138 /* "device miibus" required. See GENERIC if you get errors here. */ 139 #include "miibus_if.h" 140 141 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 142 143 /* 144 * Devices supported by this driver. 145 */ 146 static const struct msk_product { 147 uint16_t msk_vendorid; 148 uint16_t msk_deviceid; 149 const char *msk_name; 150 } msk_products[] = { 151 { VENDORID_SK, DEVICEID_SK_YUKON2, 152 "SK-9Sxx Gigabit Ethernet" }, 153 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 154 "SK-9Exx Gigabit Ethernet"}, 155 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 156 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 157 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 158 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 159 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 160 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 161 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 162 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 163 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 164 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 165 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 166 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 167 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 168 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 169 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 170 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 171 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 172 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 173 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 174 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 176 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 178 "Marvell Yukon 88E8039 Gigabit Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 180 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 182 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 184 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 186 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 188 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 190 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 191 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 192 "D-Link 550SX Gigabit Ethernet" }, 193 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 194 "D-Link 560T Gigabit Ethernet" }, 195 { 0, 0, NULL } 196 }; 197 198 static const char *model_name[] = { 199 "Yukon XL", 200 "Yukon EC Ultra", 201 "Yukon Unknown", 202 "Yukon EC", 203 "Yukon FE" 204 }; 205 206 static int mskc_probe(device_t); 207 static int mskc_attach(device_t); 208 static int mskc_detach(device_t); 209 static int mskc_shutdown(device_t); 210 static int mskc_suspend(device_t); 211 static int mskc_resume(device_t); 212 static void mskc_intr(void *); 213 214 static void mskc_reset(struct msk_softc *); 215 static void mskc_set_imtimer(struct msk_softc *); 216 static void mskc_intr_hwerr(struct msk_softc *); 217 static int mskc_handle_events(struct msk_softc *); 218 static void mskc_phy_power(struct msk_softc *, int); 219 static int mskc_setup_rambuffer(struct msk_softc *); 220 static int mskc_status_dma_alloc(struct msk_softc *); 221 static void mskc_status_dma_free(struct msk_softc *); 222 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS); 223 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 224 225 static int msk_probe(device_t); 226 static int msk_attach(device_t); 227 static int msk_detach(device_t); 228 static int msk_miibus_readreg(device_t, int, int); 229 static int msk_miibus_writereg(device_t, int, int, int); 230 static void msk_miibus_statchg(device_t); 231 232 static void msk_init(void *); 233 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 234 static void msk_start(struct ifnet *); 235 static void msk_watchdog(struct ifnet *); 236 static int msk_mediachange(struct ifnet *); 237 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 238 239 static void msk_tick(void *); 240 static void msk_intr_phy(struct msk_if_softc *); 241 static void msk_intr_gmac(struct msk_if_softc *); 242 static __inline void 243 msk_rxput(struct msk_if_softc *); 244 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 245 static void msk_rxeof(struct msk_if_softc *, uint32_t, int, 246 struct mbuf_chain *); 247 static void msk_txeof(struct msk_if_softc *, int); 248 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 249 static void msk_set_rambuffer(struct msk_if_softc *); 250 static void msk_stop(struct msk_if_softc *); 251 252 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 253 static void msk_dmamap_mbuf_cb(void *, bus_dma_segment_t *, int, 254 bus_size_t, int); 255 static int msk_txrx_dma_alloc(struct msk_if_softc *); 256 static void msk_txrx_dma_free(struct msk_if_softc *); 257 static int msk_init_rx_ring(struct msk_if_softc *); 258 static void msk_init_tx_ring(struct msk_if_softc *); 259 static __inline void 260 msk_discard_rxbuf(struct msk_if_softc *, int); 261 static int msk_newbuf(struct msk_if_softc *, int); 262 static struct mbuf * 263 msk_defrag(struct mbuf *, int, int); 264 static int msk_encap(struct msk_if_softc *, struct mbuf **); 265 266 #ifdef MSK_JUMBO 267 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 268 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 269 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 270 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 271 static void *msk_jalloc(struct msk_if_softc *); 272 static void msk_jfree(void *, void *); 273 #endif 274 275 static int msk_phy_readreg(struct msk_if_softc *, int, int); 276 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 277 278 static void msk_setmulti(struct msk_if_softc *); 279 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 280 static void msk_setpromisc(struct msk_if_softc *); 281 282 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 283 void **, bus_addr_t *, bus_dmamap_t *); 284 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 285 286 static device_method_t mskc_methods[] = { 287 /* Device interface */ 288 DEVMETHOD(device_probe, mskc_probe), 289 DEVMETHOD(device_attach, mskc_attach), 290 DEVMETHOD(device_detach, mskc_detach), 291 DEVMETHOD(device_suspend, mskc_suspend), 292 DEVMETHOD(device_resume, mskc_resume), 293 DEVMETHOD(device_shutdown, mskc_shutdown), 294 295 /* bus interface */ 296 DEVMETHOD(bus_print_child, bus_generic_print_child), 297 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 298 299 { NULL, NULL } 300 }; 301 302 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 303 static devclass_t mskc_devclass; 304 305 static device_method_t msk_methods[] = { 306 /* Device interface */ 307 DEVMETHOD(device_probe, msk_probe), 308 DEVMETHOD(device_attach, msk_attach), 309 DEVMETHOD(device_detach, msk_detach), 310 DEVMETHOD(device_shutdown, bus_generic_shutdown), 311 312 /* bus interface */ 313 DEVMETHOD(bus_print_child, bus_generic_print_child), 314 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 315 316 /* MII interface */ 317 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 318 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 319 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 320 321 { NULL, NULL } 322 }; 323 324 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 325 static devclass_t msk_devclass; 326 327 DECLARE_DUMMY_MODULE(if_msk); 328 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, 0, 0); 329 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, 0, 0); 330 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 331 332 static int mskc_intr_rate = 0; 333 static int mskc_process_limit = MSK_PROC_DEFAULT; 334 335 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate); 336 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit); 337 338 static int 339 msk_miibus_readreg(device_t dev, int phy, int reg) 340 { 341 struct msk_if_softc *sc_if; 342 343 if (phy != PHY_ADDR_MARV) 344 return (0); 345 346 sc_if = device_get_softc(dev); 347 348 return (msk_phy_readreg(sc_if, phy, reg)); 349 } 350 351 static int 352 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 353 { 354 struct msk_softc *sc; 355 int i, val; 356 357 sc = sc_if->msk_softc; 358 359 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 360 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 361 362 for (i = 0; i < MSK_TIMEOUT; i++) { 363 DELAY(1); 364 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 365 if ((val & GM_SMI_CT_RD_VAL) != 0) { 366 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 367 break; 368 } 369 } 370 371 if (i == MSK_TIMEOUT) { 372 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 373 val = 0; 374 } 375 376 return (val); 377 } 378 379 static int 380 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 381 { 382 struct msk_if_softc *sc_if; 383 384 if (phy != PHY_ADDR_MARV) 385 return (0); 386 387 sc_if = device_get_softc(dev); 388 389 return (msk_phy_writereg(sc_if, phy, reg, val)); 390 } 391 392 static int 393 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 394 { 395 struct msk_softc *sc; 396 int i; 397 398 sc = sc_if->msk_softc; 399 400 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 401 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 402 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 403 for (i = 0; i < MSK_TIMEOUT; i++) { 404 DELAY(1); 405 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 406 GM_SMI_CT_BUSY) == 0) 407 break; 408 } 409 if (i == MSK_TIMEOUT) 410 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 411 412 return (0); 413 } 414 415 static void 416 msk_miibus_statchg(device_t dev) 417 { 418 struct msk_if_softc *sc_if; 419 struct msk_softc *sc; 420 struct mii_data *mii; 421 struct ifnet *ifp; 422 uint32_t gmac; 423 424 sc_if = device_get_softc(dev); 425 sc = sc_if->msk_softc; 426 427 mii = device_get_softc(sc_if->msk_miibus); 428 ifp = sc_if->msk_ifp; 429 430 if (mii->mii_media_status & IFM_ACTIVE) { 431 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 432 sc_if->msk_link = 1; 433 } else 434 sc_if->msk_link = 0; 435 436 if (sc_if->msk_link != 0) { 437 /* Enable Tx FIFO Underrun. */ 438 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 439 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 440 /* 441 * Because mii(4) notify msk(4) that it detected link status 442 * change, there is no need to enable automatic 443 * speed/flow-control/duplex updates. 444 */ 445 gmac = GM_GPCR_AU_ALL_DIS; 446 switch (IFM_SUBTYPE(mii->mii_media_active)) { 447 case IFM_1000_SX: 448 case IFM_1000_T: 449 gmac |= GM_GPCR_SPEED_1000; 450 break; 451 case IFM_100_TX: 452 gmac |= GM_GPCR_SPEED_100; 453 break; 454 case IFM_10_T: 455 break; 456 } 457 458 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 459 gmac |= GM_GPCR_DUP_FULL; 460 /* Disable Rx flow control. */ 461 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 462 gmac |= GM_GPCR_FC_RX_DIS; 463 /* Disable Tx flow control. */ 464 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 465 gmac |= GM_GPCR_FC_TX_DIS; 466 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 467 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 468 /* Read again to ensure writing. */ 469 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 470 471 gmac = GMC_PAUSE_ON; 472 if (((mii->mii_media_active & IFM_GMASK) & 473 (IFM_FLAG0 | IFM_FLAG1)) == 0) 474 gmac = GMC_PAUSE_OFF; 475 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 476 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 477 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 478 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 479 gmac = GMC_PAUSE_OFF; 480 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 481 482 /* Enable PHY interrupt for FIFO underrun/overflow. */ 483 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 484 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 485 } else { 486 /* 487 * Link state changed to down. 488 * Disable PHY interrupts. 489 */ 490 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 491 /* Disable Rx/Tx MAC. */ 492 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 493 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 494 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 495 /* Read again to ensure writing. */ 496 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 497 } 498 } 499 500 static void 501 msk_setmulti(struct msk_if_softc *sc_if) 502 { 503 struct msk_softc *sc; 504 struct ifnet *ifp; 505 struct ifmultiaddr *ifma; 506 uint32_t mchash[2]; 507 uint32_t crc; 508 uint16_t mode; 509 510 sc = sc_if->msk_softc; 511 ifp = sc_if->msk_ifp; 512 513 bzero(mchash, sizeof(mchash)); 514 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 515 mode |= GM_RXCR_UCF_ENA; 516 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 517 if ((ifp->if_flags & IFF_PROMISC) != 0) 518 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 519 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 520 mchash[0] = 0xffff; 521 mchash[1] = 0xffff; 522 } 523 } else { 524 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 525 if (ifma->ifma_addr->sa_family != AF_LINK) 526 continue; 527 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 528 ifma->ifma_addr), ETHER_ADDR_LEN); 529 /* Just want the 6 least significant bits. */ 530 crc &= 0x3f; 531 /* Set the corresponding bit in the hash table. */ 532 mchash[crc >> 5] |= 1 << (crc & 0x1f); 533 } 534 mode |= GM_RXCR_MCF_ENA; 535 } 536 537 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 538 mchash[0] & 0xffff); 539 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 540 (mchash[0] >> 16) & 0xffff); 541 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 542 mchash[1] & 0xffff); 543 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 544 (mchash[1] >> 16) & 0xffff); 545 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 546 } 547 548 static void 549 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 550 { 551 struct msk_softc *sc; 552 553 sc = sc_if->msk_softc; 554 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 555 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 556 RX_VLAN_STRIP_ON); 557 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 558 TX_VLAN_TAG_ON); 559 } else { 560 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 561 RX_VLAN_STRIP_OFF); 562 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 563 TX_VLAN_TAG_OFF); 564 } 565 } 566 567 static void 568 msk_setpromisc(struct msk_if_softc *sc_if) 569 { 570 struct msk_softc *sc; 571 struct ifnet *ifp; 572 uint16_t mode; 573 574 sc = sc_if->msk_softc; 575 ifp = sc_if->msk_ifp; 576 577 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 578 if (ifp->if_flags & IFF_PROMISC) 579 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 580 else 581 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 582 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 583 } 584 585 static int 586 msk_init_rx_ring(struct msk_if_softc *sc_if) 587 { 588 struct msk_ring_data *rd; 589 struct msk_rxdesc *rxd; 590 int i, prod; 591 592 sc_if->msk_cdata.msk_rx_cons = 0; 593 sc_if->msk_cdata.msk_rx_prod = 0; 594 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 595 596 rd = &sc_if->msk_rdata; 597 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 598 prod = sc_if->msk_cdata.msk_rx_prod; 599 for (i = 0; i < MSK_RX_RING_CNT; i++) { 600 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 601 rxd->rx_m = NULL; 602 rxd->rx_le = &rd->msk_rx_ring[prod]; 603 if (msk_newbuf(sc_if, prod) != 0) 604 return (ENOBUFS); 605 MSK_INC(prod, MSK_RX_RING_CNT); 606 } 607 608 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 609 sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREWRITE); 610 611 /* Update prefetch unit. */ 612 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 613 CSR_WRITE_2(sc_if->msk_softc, 614 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 615 sc_if->msk_cdata.msk_rx_prod); 616 617 return (0); 618 } 619 620 #ifdef MSK_JUMBO 621 static int 622 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 623 { 624 struct msk_ring_data *rd; 625 struct msk_rxdesc *rxd; 626 int i, prod; 627 628 MSK_IF_LOCK_ASSERT(sc_if); 629 630 sc_if->msk_cdata.msk_rx_cons = 0; 631 sc_if->msk_cdata.msk_rx_prod = 0; 632 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 633 634 rd = &sc_if->msk_rdata; 635 bzero(rd->msk_jumbo_rx_ring, 636 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 637 prod = sc_if->msk_cdata.msk_rx_prod; 638 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 639 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 640 rxd->rx_m = NULL; 641 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 642 if (msk_jumbo_newbuf(sc_if, prod) != 0) 643 return (ENOBUFS); 644 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 645 } 646 647 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 648 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 649 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 650 651 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 652 CSR_WRITE_2(sc_if->msk_softc, 653 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 654 sc_if->msk_cdata.msk_rx_prod); 655 656 return (0); 657 } 658 #endif 659 660 static void 661 msk_init_tx_ring(struct msk_if_softc *sc_if) 662 { 663 struct msk_ring_data *rd; 664 struct msk_txdesc *txd; 665 int i; 666 667 sc_if->msk_cdata.msk_tx_prod = 0; 668 sc_if->msk_cdata.msk_tx_cons = 0; 669 sc_if->msk_cdata.msk_tx_cnt = 0; 670 671 rd = &sc_if->msk_rdata; 672 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 673 for (i = 0; i < MSK_TX_RING_CNT; i++) { 674 txd = &sc_if->msk_cdata.msk_txdesc[i]; 675 txd->tx_m = NULL; 676 txd->tx_le = &rd->msk_tx_ring[i]; 677 } 678 679 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 680 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 681 } 682 683 static __inline void 684 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 685 { 686 struct msk_rx_desc *rx_le; 687 struct msk_rxdesc *rxd; 688 struct mbuf *m; 689 690 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 691 m = rxd->rx_m; 692 rx_le = rxd->rx_le; 693 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 694 } 695 696 #ifdef MSK_JUMBO 697 static __inline void 698 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 699 { 700 struct msk_rx_desc *rx_le; 701 struct msk_rxdesc *rxd; 702 struct mbuf *m; 703 704 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 705 m = rxd->rx_m; 706 rx_le = rxd->rx_le; 707 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 708 } 709 #endif 710 711 static int 712 msk_newbuf(struct msk_if_softc *sc_if, int idx) 713 { 714 struct msk_rx_desc *rx_le; 715 struct msk_rxdesc *rxd; 716 struct mbuf *m; 717 struct msk_dmamap_arg ctx; 718 bus_dma_segment_t seg; 719 bus_dmamap_t map; 720 721 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 722 if (m == NULL) 723 return (ENOBUFS); 724 725 m->m_len = m->m_pkthdr.len = MCLBYTES; 726 m_adj(m, ETHER_ALIGN); 727 728 bzero(&ctx, sizeof(ctx)); 729 ctx.nseg = 1; 730 ctx.segs = &seg; 731 if (bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_rx_tag, 732 sc_if->msk_cdata.msk_rx_sparemap, m, msk_dmamap_mbuf_cb, &ctx, 733 BUS_DMA_NOWAIT) != 0) { 734 m_freem(m); 735 return (ENOBUFS); 736 } 737 KASSERT(ctx.nseg == 1, 738 ("%s: %d segments returned!", __func__, ctx.nseg)); 739 740 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 741 if (rxd->rx_m != NULL) { 742 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 743 BUS_DMASYNC_POSTREAD); 744 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 745 } 746 map = rxd->rx_dmamap; 747 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 748 sc_if->msk_cdata.msk_rx_sparemap = map; 749 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 750 BUS_DMASYNC_PREREAD); 751 rxd->rx_m = m; 752 rx_le = rxd->rx_le; 753 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 754 rx_le->msk_control = 755 htole32(seg.ds_len | OP_PACKET | HW_OWNER); 756 757 return (0); 758 } 759 760 #ifdef MSK_JUMBO 761 static int 762 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 763 { 764 struct msk_rx_desc *rx_le; 765 struct msk_rxdesc *rxd; 766 struct mbuf *m; 767 bus_dma_segment_t segs[1]; 768 bus_dmamap_t map; 769 int nsegs; 770 void *buf; 771 772 MGETHDR(m, M_DONTWAIT, MT_DATA); 773 if (m == NULL) 774 return (ENOBUFS); 775 buf = msk_jalloc(sc_if); 776 if (buf == NULL) { 777 m_freem(m); 778 return (ENOBUFS); 779 } 780 /* Attach the buffer to the mbuf. */ 781 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 782 EXT_NET_DRV); 783 if ((m->m_flags & M_EXT) == 0) { 784 m_freem(m); 785 return (ENOBUFS); 786 } 787 m->m_pkthdr.len = m->m_len = MSK_JLEN; 788 m_adj(m, ETHER_ALIGN); 789 790 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 791 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 792 BUS_DMA_NOWAIT) != 0) { 793 m_freem(m); 794 return (ENOBUFS); 795 } 796 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 797 798 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 799 if (rxd->rx_m != NULL) { 800 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 801 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 802 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 803 rxd->rx_dmamap); 804 } 805 map = rxd->rx_dmamap; 806 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 807 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 808 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 809 BUS_DMASYNC_PREREAD); 810 rxd->rx_m = m; 811 rx_le = rxd->rx_le; 812 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 813 rx_le->msk_control = 814 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 815 816 return (0); 817 } 818 #endif 819 820 /* 821 * Set media options. 822 */ 823 static int 824 msk_mediachange(struct ifnet *ifp) 825 { 826 struct msk_if_softc *sc_if = ifp->if_softc; 827 struct mii_data *mii; 828 829 mii = device_get_softc(sc_if->msk_miibus); 830 mii_mediachg(mii); 831 832 return (0); 833 } 834 835 /* 836 * Report current media status. 837 */ 838 static void 839 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 840 { 841 struct msk_if_softc *sc_if = ifp->if_softc; 842 struct mii_data *mii; 843 844 mii = device_get_softc(sc_if->msk_miibus); 845 mii_pollstat(mii); 846 847 ifmr->ifm_active = mii->mii_media_active; 848 ifmr->ifm_status = mii->mii_media_status; 849 } 850 851 static int 852 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 853 { 854 struct msk_if_softc *sc_if; 855 struct ifreq *ifr; 856 struct mii_data *mii; 857 int error, mask; 858 859 sc_if = ifp->if_softc; 860 ifr = (struct ifreq *)data; 861 error = 0; 862 863 switch(command) { 864 case SIOCSIFMTU: 865 #ifdef MSK_JUMBO 866 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 867 error = EINVAL; 868 break; 869 } 870 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 871 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 872 error = EINVAL; 873 break; 874 } 875 ifp->if_mtu = ifr->ifr_mtu; 876 if ((ifp->if_flags & IFF_RUNNING) != 0) 877 msk_init(sc_if); 878 #else 879 error = EOPNOTSUPP; 880 #endif 881 break; 882 883 case SIOCSIFFLAGS: 884 if (ifp->if_flags & IFF_UP) { 885 if (ifp->if_flags & IFF_RUNNING) { 886 if (((ifp->if_flags ^ sc_if->msk_if_flags) 887 & IFF_PROMISC) != 0) { 888 msk_setpromisc(sc_if); 889 msk_setmulti(sc_if); 890 } 891 } else { 892 if (sc_if->msk_detach == 0) 893 msk_init(sc_if); 894 } 895 } else { 896 if (ifp->if_flags & IFF_RUNNING) 897 msk_stop(sc_if); 898 } 899 sc_if->msk_if_flags = ifp->if_flags; 900 break; 901 902 case SIOCADDMULTI: 903 case SIOCDELMULTI: 904 if (ifp->if_flags & IFF_RUNNING) 905 msk_setmulti(sc_if); 906 break; 907 908 case SIOCGIFMEDIA: 909 case SIOCSIFMEDIA: 910 mii = device_get_softc(sc_if->msk_miibus); 911 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 912 break; 913 914 case SIOCSIFCAP: 915 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 916 if ((mask & IFCAP_TXCSUM) != 0) { 917 ifp->if_capenable ^= IFCAP_TXCSUM; 918 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 919 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 920 ifp->if_hwassist |= MSK_CSUM_FEATURES; 921 else 922 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 923 } 924 #ifdef notyet 925 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 926 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 927 msk_setvlan(sc_if, ifp); 928 } 929 #endif 930 931 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 932 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 933 /* 934 * In Yukon EC Ultra, TSO & checksum offload is not 935 * supported for jumbo frame. 936 */ 937 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 938 ifp->if_capenable &= ~IFCAP_TXCSUM; 939 } 940 break; 941 942 default: 943 error = ether_ioctl(ifp, command, data); 944 break; 945 } 946 947 return (error); 948 } 949 950 static int 951 mskc_probe(device_t dev) 952 { 953 const struct msk_product *mp; 954 uint16_t vendor, devid; 955 956 vendor = pci_get_vendor(dev); 957 devid = pci_get_device(dev); 958 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 959 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 960 device_set_desc(dev, mp->msk_name); 961 return (0); 962 } 963 } 964 return (ENXIO); 965 } 966 967 static int 968 mskc_setup_rambuffer(struct msk_softc *sc) 969 { 970 int next; 971 int i; 972 uint8_t val; 973 974 /* Get adapter SRAM size. */ 975 val = CSR_READ_1(sc, B2_E_0); 976 sc->msk_ramsize = (val == 0) ? 128 : val * 4; 977 if (bootverbose) { 978 device_printf(sc->msk_dev, 979 "RAM buffer size : %dKB\n", sc->msk_ramsize); 980 } 981 /* 982 * Give receiver 2/3 of memory and round down to the multiple 983 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 984 * of 1024. 985 */ 986 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 987 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 988 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 989 sc->msk_rxqstart[i] = next; 990 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 991 next = sc->msk_rxqend[i] + 1; 992 sc->msk_txqstart[i] = next; 993 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 994 next = sc->msk_txqend[i] + 1; 995 if (bootverbose) { 996 device_printf(sc->msk_dev, 997 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 998 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 999 sc->msk_rxqend[i]); 1000 device_printf(sc->msk_dev, 1001 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1002 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1003 sc->msk_txqend[i]); 1004 } 1005 } 1006 1007 return (0); 1008 } 1009 1010 static void 1011 mskc_phy_power(struct msk_softc *sc, int mode) 1012 { 1013 uint32_t val; 1014 int i; 1015 1016 switch (mode) { 1017 case MSK_PHY_POWERUP: 1018 /* Switch power to VCC (WA for VAUX problem). */ 1019 CSR_WRITE_1(sc, B0_POWER_CTRL, 1020 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1021 /* Disable Core Clock Division, set Clock Select to 0. */ 1022 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1023 1024 val = 0; 1025 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1026 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1027 /* Enable bits are inverted. */ 1028 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1029 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1030 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1031 } 1032 /* 1033 * Enable PCI & Core Clock, enable clock gating for both Links. 1034 */ 1035 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1036 1037 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1038 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1039 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1040 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1041 /* Deassert Low Power for 1st PHY. */ 1042 val |= PCI_Y2_PHY1_COMA; 1043 if (sc->msk_num_port > 1) 1044 val |= PCI_Y2_PHY2_COMA; 1045 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1046 uint32_t our; 1047 1048 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1049 1050 /* Enable all clocks. */ 1051 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1052 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1053 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1054 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1055 /* Set all bits to 0 except bits 15..12. */ 1056 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1057 /* Set to default value. */ 1058 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1059 } 1060 /* Release PHY from PowerDown/COMA mode. */ 1061 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1062 for (i = 0; i < sc->msk_num_port; i++) { 1063 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1064 GMLC_RST_SET); 1065 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1066 GMLC_RST_CLR); 1067 } 1068 break; 1069 case MSK_PHY_POWERDOWN: 1070 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1071 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1072 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1073 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1074 val &= ~PCI_Y2_PHY1_COMA; 1075 if (sc->msk_num_port > 1) 1076 val &= ~PCI_Y2_PHY2_COMA; 1077 } 1078 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1079 1080 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1081 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1082 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1083 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1084 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1085 /* Enable bits are inverted. */ 1086 val = 0; 1087 } 1088 /* 1089 * Disable PCI & Core Clock, disable clock gating for 1090 * both Links. 1091 */ 1092 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1093 CSR_WRITE_1(sc, B0_POWER_CTRL, 1094 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1095 break; 1096 default: 1097 break; 1098 } 1099 } 1100 1101 static void 1102 mskc_reset(struct msk_softc *sc) 1103 { 1104 bus_addr_t addr; 1105 uint16_t status; 1106 uint32_t val; 1107 int i; 1108 1109 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1110 1111 /* Disable ASF. */ 1112 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1113 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1114 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1115 } 1116 /* 1117 * Since we disabled ASF, S/W reset is required for Power Management. 1118 */ 1119 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1120 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1121 1122 /* Clear all error bits in the PCI status register. */ 1123 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1124 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1125 1126 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1127 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1128 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1129 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1130 1131 switch (sc->msk_bustype) { 1132 case MSK_PEX_BUS: 1133 /* Clear all PEX errors. */ 1134 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1135 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1136 if ((val & PEX_RX_OV) != 0) { 1137 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1138 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1139 } 1140 break; 1141 case MSK_PCI_BUS: 1142 case MSK_PCIX_BUS: 1143 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1144 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1145 if (val == 0) 1146 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1147 if (sc->msk_bustype == MSK_PCIX_BUS) { 1148 /* Set Cache Line Size opt. */ 1149 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1150 val |= PCI_CLS_OPT; 1151 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1152 } 1153 break; 1154 } 1155 /* Set PHY power state. */ 1156 mskc_phy_power(sc, MSK_PHY_POWERUP); 1157 1158 /* Reset GPHY/GMAC Control */ 1159 for (i = 0; i < sc->msk_num_port; i++) { 1160 /* GPHY Control reset. */ 1161 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1162 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1163 /* GMAC Control reset. */ 1164 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1165 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1166 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1167 } 1168 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1169 1170 /* LED On. */ 1171 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1172 1173 /* Clear TWSI IRQ. */ 1174 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1175 1176 /* Turn off hardware timer. */ 1177 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1178 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1179 1180 /* Turn off descriptor polling. */ 1181 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1182 1183 /* Turn off time stamps. */ 1184 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1185 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1186 1187 /* Configure timeout values. */ 1188 for (i = 0; i < sc->msk_num_port; i++) { 1189 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1190 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1191 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1192 MSK_RI_TO_53); 1193 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1194 MSK_RI_TO_53); 1195 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1196 MSK_RI_TO_53); 1197 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1198 MSK_RI_TO_53); 1199 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1200 MSK_RI_TO_53); 1201 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1202 MSK_RI_TO_53); 1203 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1204 MSK_RI_TO_53); 1205 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1206 MSK_RI_TO_53); 1207 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1208 MSK_RI_TO_53); 1209 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1210 MSK_RI_TO_53); 1211 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1212 MSK_RI_TO_53); 1213 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1214 MSK_RI_TO_53); 1215 } 1216 1217 /* Disable all interrupts. */ 1218 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1219 CSR_READ_4(sc, B0_HWE_IMSK); 1220 CSR_WRITE_4(sc, B0_IMSK, 0); 1221 CSR_READ_4(sc, B0_IMSK); 1222 1223 /* 1224 * On dual port PCI-X card, there is an problem where status 1225 * can be received out of order due to split transactions. 1226 */ 1227 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1228 uint16_t pcix_cmd; 1229 uint8_t pcix; 1230 1231 pcix = pci_get_pcixcap_ptr(sc->msk_dev); 1232 1233 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1234 /* Clear Max Outstanding Split Transactions. */ 1235 pcix_cmd &= ~0x70; 1236 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1237 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1238 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1239 } 1240 if (sc->msk_bustype == MSK_PEX_BUS) { 1241 uint16_t v, width; 1242 1243 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1244 /* Change Max. Read Request Size to 4096 bytes. */ 1245 v &= ~PEX_DC_MAX_RRS_MSK; 1246 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1247 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1248 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1249 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1250 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1251 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1252 if (v != width) { 1253 device_printf(sc->msk_dev, 1254 "negotiated width of link(x%d) != " 1255 "max. width of link(x%d)\n", width, v); 1256 } 1257 } 1258 1259 /* Clear status list. */ 1260 bzero(sc->msk_stat_ring, 1261 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1262 sc->msk_stat_cons = 0; 1263 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1264 BUS_DMASYNC_PREWRITE); 1265 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1266 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1267 /* Set the status list base address. */ 1268 addr = sc->msk_stat_ring_paddr; 1269 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1270 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1271 /* Set the status list last index. */ 1272 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1273 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1274 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1275 /* WA for dev. #4.3 */ 1276 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1277 /* WA for dev. #4.18 */ 1278 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1279 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1280 } else { 1281 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1282 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1283 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1284 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1285 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1286 else 1287 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1288 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1289 } 1290 /* 1291 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1292 */ 1293 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1294 1295 /* Enable status unit. */ 1296 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1297 1298 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1299 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1300 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1301 } 1302 1303 static int 1304 msk_probe(device_t dev) 1305 { 1306 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1307 char desc[100]; 1308 1309 /* 1310 * Not much to do here. We always know there will be 1311 * at least one GMAC present, and if there are two, 1312 * mskc_attach() will create a second device instance 1313 * for us. 1314 */ 1315 ksnprintf(desc, sizeof(desc), 1316 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1317 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1318 sc->msk_hw_rev); 1319 device_set_desc_copy(dev, desc); 1320 1321 return (0); 1322 } 1323 1324 static int 1325 msk_attach(device_t dev) 1326 { 1327 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1328 struct msk_if_softc *sc_if = device_get_softc(dev); 1329 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1330 int i, port, error; 1331 uint8_t eaddr[ETHER_ADDR_LEN]; 1332 1333 port = *(int *)device_get_ivars(dev); 1334 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1335 1336 kfree(device_get_ivars(dev), M_DEVBUF); 1337 device_set_ivars(dev, NULL); 1338 1339 callout_init(&sc_if->msk_tick_ch); 1340 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1341 1342 sc_if->msk_if_dev = dev; 1343 sc_if->msk_port = port; 1344 sc_if->msk_softc = sc; 1345 sc_if->msk_ifp = ifp; 1346 sc->msk_if[port] = sc_if; 1347 1348 /* Setup Tx/Rx queue register offsets. */ 1349 if (port == MSK_PORT_A) { 1350 sc_if->msk_txq = Q_XA1; 1351 sc_if->msk_txsq = Q_XS1; 1352 sc_if->msk_rxq = Q_R1; 1353 } else { 1354 sc_if->msk_txq = Q_XA2; 1355 sc_if->msk_txsq = Q_XS2; 1356 sc_if->msk_rxq = Q_R2; 1357 } 1358 1359 error = msk_txrx_dma_alloc(sc_if); 1360 if (error) 1361 goto fail; 1362 1363 ifp->if_softc = sc_if; 1364 ifp->if_mtu = ETHERMTU; 1365 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1366 ifp->if_init = msk_init; 1367 ifp->if_ioctl = msk_ioctl; 1368 ifp->if_start = msk_start; 1369 ifp->if_watchdog = msk_watchdog; 1370 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1371 ifq_set_ready(&ifp->if_snd); 1372 1373 #ifdef notyet 1374 /* 1375 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1376 * has serious bug in Rx checksum offload for all Yukon II family 1377 * hardware. It seems there is a workaround to make it work somtimes. 1378 * However, the workaround also have to check OP code sequences to 1379 * verify whether the OP code is correct. Sometimes it should compute 1380 * IP/TCP/UDP checksum in driver in order to verify correctness of 1381 * checksum computed by hardware. If you have to compute checksum 1382 * with software to verify the hardware's checksum why have hardware 1383 * compute the checksum? I think there is no reason to spend time to 1384 * make Rx checksum offload work on Yukon II hardware. 1385 */ 1386 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1387 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1388 ifp->if_hwassist = MSK_CSUM_FEATURES; 1389 ifp->if_capenable = ifp->if_capabilities; 1390 #endif 1391 1392 /* 1393 * Get station address for this interface. Note that 1394 * dual port cards actually come with three station 1395 * addresses: one for each port, plus an extra. The 1396 * extra one is used by the SysKonnect driver software 1397 * as a 'virtual' station address for when both ports 1398 * are operating in failover mode. Currently we don't 1399 * use this extra address. 1400 */ 1401 for (i = 0; i < ETHER_ADDR_LEN; i++) 1402 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1403 1404 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1405 1406 /* 1407 * Do miibus setup. 1408 */ 1409 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1410 msk_mediachange, msk_mediastatus); 1411 if (error) { 1412 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1413 goto fail; 1414 } 1415 1416 /* 1417 * Call MI attach routine. Can't hold locks when calling into ether_*. 1418 */ 1419 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1420 #if 0 1421 /* 1422 * Tell the upper layer(s) we support long frames. 1423 * Must appear after the call to ether_ifattach() because 1424 * ether_ifattach() sets ifi_hdrlen to the default value. 1425 */ 1426 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1427 #endif 1428 1429 return 0; 1430 fail: 1431 msk_detach(dev); 1432 sc->msk_if[port] = NULL; 1433 return (error); 1434 } 1435 1436 /* 1437 * Attach the interface. Allocate softc structures, do ifmedia 1438 * setup and ethernet/BPF attach. 1439 */ 1440 static int 1441 mskc_attach(device_t dev) 1442 { 1443 struct msk_softc *sc; 1444 int error, *port, cpuid; 1445 1446 sc = device_get_softc(dev); 1447 sc->msk_dev = dev; 1448 lwkt_serialize_init(&sc->msk_serializer); 1449 1450 /* 1451 * Initailize sysctl variables 1452 */ 1453 sc->msk_process_limit = mskc_process_limit; 1454 sc->msk_intr_rate = mskc_intr_rate; 1455 1456 #ifndef BURN_BRIDGES 1457 /* 1458 * Handle power management nonsense. 1459 */ 1460 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1461 uint32_t irq, bar0, bar1; 1462 1463 /* Save important PCI config data. */ 1464 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1465 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1466 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1467 1468 /* Reset the power state. */ 1469 device_printf(dev, "chip is in D%d power mode " 1470 "-- setting to D0\n", pci_get_powerstate(dev)); 1471 1472 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1473 1474 /* Restore PCI config data. */ 1475 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1476 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1477 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1478 } 1479 #endif /* BURN_BRIDGES */ 1480 1481 /* 1482 * Map control/status registers. 1483 */ 1484 pci_enable_busmaster(dev); 1485 1486 /* 1487 * Allocate I/O resource 1488 */ 1489 #ifdef MSK_USEIOSPACE 1490 sc->msk_res_type = SYS_RES_IOPORT; 1491 sc->msk_res_rid = PCIR_BAR(1); 1492 #else 1493 sc->msk_res_type = SYS_RES_MEMORY; 1494 sc->msk_res_rid = PCIR_BAR(0); 1495 #endif 1496 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1497 &sc->msk_res_rid, RF_ACTIVE); 1498 if (sc->msk_res == NULL) { 1499 if (sc->msk_res_type == SYS_RES_MEMORY) { 1500 sc->msk_res_type = SYS_RES_IOPORT; 1501 sc->msk_res_rid = PCIR_BAR(1); 1502 } else { 1503 sc->msk_res_type = SYS_RES_MEMORY; 1504 sc->msk_res_rid = PCIR_BAR(0); 1505 } 1506 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1507 &sc->msk_res_rid, 1508 RF_ACTIVE); 1509 if (sc->msk_res == NULL) { 1510 device_printf(dev, "couldn't allocate %s resources\n", 1511 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1512 return (ENXIO); 1513 } 1514 } 1515 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1516 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1517 1518 /* 1519 * Allocate IRQ 1520 */ 1521 sc->msk_irq_rid = 0; 1522 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1523 &sc->msk_irq_rid, 1524 RF_SHAREABLE | RF_ACTIVE); 1525 if (sc->msk_irq == NULL) { 1526 device_printf(dev, "couldn't allocate IRQ resources\n"); 1527 error = ENXIO; 1528 goto fail; 1529 } 1530 1531 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1532 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1533 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1534 /* Bail out if chip is not recognized. */ 1535 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1536 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1537 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1538 sc->msk_hw_id, sc->msk_hw_rev); 1539 error = ENXIO; 1540 goto fail; 1541 } 1542 1543 /* 1544 * Create sysctl tree 1545 */ 1546 sysctl_ctx_init(&sc->msk_sysctl_ctx); 1547 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx, 1548 SYSCTL_STATIC_CHILDREN(_hw), 1549 OID_AUTO, 1550 device_get_nameunit(dev), 1551 CTLFLAG_RD, 0, ""); 1552 if (sc->msk_sysctl_tree == NULL) { 1553 device_printf(dev, "can't add sysctl node\n"); 1554 error = ENXIO; 1555 goto fail; 1556 } 1557 1558 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1559 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1560 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1561 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit, 1562 "I", "max number of Rx events to process"); 1563 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1564 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1565 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1566 sc, 0, mskc_sysctl_intr_rate, 1567 "I", "max number of interrupt per second"); 1568 1569 /* Soft reset. */ 1570 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1571 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1572 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1573 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1574 sc->msk_coppertype = 0; 1575 else 1576 sc->msk_coppertype = 1; 1577 /* Check number of MACs. */ 1578 sc->msk_num_port = 1; 1579 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1580 CFG_DUAL_MAC_MSK) { 1581 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1582 sc->msk_num_port++; 1583 } 1584 1585 /* Check bus type. */ 1586 if (pci_is_pcie(sc->msk_dev) == 0) 1587 sc->msk_bustype = MSK_PEX_BUS; 1588 else if (pci_is_pcix(sc->msk_dev) == 0) 1589 sc->msk_bustype = MSK_PCIX_BUS; 1590 else 1591 sc->msk_bustype = MSK_PCI_BUS; 1592 1593 switch (sc->msk_hw_id) { 1594 case CHIP_ID_YUKON_EC: 1595 case CHIP_ID_YUKON_EC_U: 1596 sc->msk_clock = 125; /* 125 Mhz */ 1597 break; 1598 case CHIP_ID_YUKON_FE: 1599 sc->msk_clock = 100; /* 100 Mhz */ 1600 break; 1601 case CHIP_ID_YUKON_XL: 1602 sc->msk_clock = 156; /* 156 Mhz */ 1603 break; 1604 default: 1605 sc->msk_clock = 156; /* 156 Mhz */ 1606 break; 1607 } 1608 1609 error = mskc_status_dma_alloc(sc); 1610 if (error) 1611 goto fail; 1612 1613 /* Set base interrupt mask. */ 1614 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1615 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1616 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1617 1618 /* Reset the adapter. */ 1619 mskc_reset(sc); 1620 1621 error = mskc_setup_rambuffer(sc); 1622 if (error) 1623 goto fail; 1624 1625 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1626 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1627 device_printf(dev, "failed to add child for PORT_A\n"); 1628 error = ENXIO; 1629 goto fail; 1630 } 1631 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1632 *port = MSK_PORT_A; 1633 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1634 1635 if (sc->msk_num_port > 1) { 1636 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1637 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1638 device_printf(dev, "failed to add child for PORT_B\n"); 1639 error = ENXIO; 1640 goto fail; 1641 } 1642 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1643 *port = MSK_PORT_B; 1644 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1645 } 1646 1647 bus_generic_attach(dev); 1648 1649 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1650 mskc_intr, sc, &sc->msk_intrhand, 1651 &sc->msk_serializer); 1652 if (error) { 1653 device_printf(dev, "couldn't set up interrupt handler\n"); 1654 goto fail; 1655 } 1656 1657 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq)); 1658 KKASSERT(cpuid >= 0 && cpuid < ncpus); 1659 1660 if (sc->msk_if[0] != NULL) 1661 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid; 1662 if (sc->msk_if[1] != NULL) 1663 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid; 1664 return 0; 1665 fail: 1666 mskc_detach(dev); 1667 return (error); 1668 } 1669 1670 /* 1671 * Shutdown hardware and free up resources. This can be called any 1672 * time after the mutex has been initialized. It is called in both 1673 * the error case in attach and the normal detach case so it needs 1674 * to be careful about only freeing resources that have actually been 1675 * allocated. 1676 */ 1677 static int 1678 msk_detach(device_t dev) 1679 { 1680 struct msk_if_softc *sc_if = device_get_softc(dev); 1681 1682 if (device_is_attached(dev)) { 1683 struct msk_softc *sc = sc_if->msk_softc; 1684 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1685 1686 lwkt_serialize_enter(ifp->if_serializer); 1687 1688 if (sc->msk_intrhand != NULL) { 1689 if (sc->msk_if[MSK_PORT_A] != NULL) 1690 msk_stop(sc->msk_if[MSK_PORT_A]); 1691 if (sc->msk_if[MSK_PORT_B] != NULL) 1692 msk_stop(sc->msk_if[MSK_PORT_B]); 1693 1694 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1695 sc->msk_intrhand); 1696 sc->msk_intrhand = NULL; 1697 } 1698 1699 lwkt_serialize_exit(ifp->if_serializer); 1700 1701 ether_ifdetach(ifp); 1702 } 1703 1704 if (sc_if->msk_miibus != NULL) 1705 device_delete_child(dev, sc_if->msk_miibus); 1706 1707 msk_txrx_dma_free(sc_if); 1708 return (0); 1709 } 1710 1711 static int 1712 mskc_detach(device_t dev) 1713 { 1714 struct msk_softc *sc = device_get_softc(dev); 1715 int *port, i; 1716 1717 #ifdef INVARIANTS 1718 if (device_is_attached(dev)) { 1719 KASSERT(sc->msk_intrhand == NULL, 1720 ("intr is not torn down yet\n")); 1721 } 1722 #endif 1723 1724 for (i = 0; i < sc->msk_num_port; ++i) { 1725 if (sc->msk_devs[i] != NULL) { 1726 port = device_get_ivars(sc->msk_devs[i]); 1727 if (port != NULL) { 1728 kfree(port, M_DEVBUF); 1729 device_set_ivars(sc->msk_devs[i], NULL); 1730 } 1731 device_delete_child(dev, sc->msk_devs[i]); 1732 } 1733 } 1734 1735 /* Disable all interrupts. */ 1736 CSR_WRITE_4(sc, B0_IMSK, 0); 1737 CSR_READ_4(sc, B0_IMSK); 1738 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1739 CSR_READ_4(sc, B0_HWE_IMSK); 1740 1741 /* LED Off. */ 1742 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1743 1744 /* Put hardware reset. */ 1745 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1746 1747 mskc_status_dma_free(sc); 1748 1749 if (sc->msk_irq != NULL) { 1750 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1751 sc->msk_irq); 1752 } 1753 if (sc->msk_res != NULL) { 1754 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1755 sc->msk_res); 1756 } 1757 1758 if (sc->msk_sysctl_tree != NULL) 1759 sysctl_ctx_free(&sc->msk_sysctl_ctx); 1760 1761 return (0); 1762 } 1763 1764 static void 1765 msk_dmamap_mbuf_cb(void *arg, bus_dma_segment_t *segs, int nseg, 1766 bus_size_t mapsz __unused, int error) 1767 { 1768 struct msk_dmamap_arg *ctx = arg; 1769 int i; 1770 1771 if (error) 1772 return; 1773 1774 if (ctx->nseg < nseg) { 1775 ctx->nseg = 0; 1776 return; 1777 } 1778 1779 ctx->nseg = nseg; 1780 for (i = 0; i < ctx->nseg; ++i) 1781 ctx->segs[i] = segs[i]; 1782 } 1783 1784 static void 1785 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1786 { 1787 struct msk_dmamap_arg *ctx = arg; 1788 int i; 1789 1790 if (error) 1791 return; 1792 1793 KKASSERT(nseg <= ctx->nseg); 1794 1795 ctx->nseg = nseg; 1796 for (i = 0; i < ctx->nseg; ++i) 1797 ctx->segs[i] = segs[i]; 1798 } 1799 1800 /* Create status DMA region. */ 1801 static int 1802 mskc_status_dma_alloc(struct msk_softc *sc) 1803 { 1804 struct msk_dmamap_arg ctx; 1805 bus_dma_segment_t seg; 1806 int error; 1807 1808 error = bus_dma_tag_create( 1809 NULL, /* XXX parent */ 1810 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1811 BUS_SPACE_MAXADDR, /* lowaddr */ 1812 BUS_SPACE_MAXADDR, /* highaddr */ 1813 NULL, NULL, /* filter, filterarg */ 1814 MSK_STAT_RING_SZ, /* maxsize */ 1815 1, /* nsegments */ 1816 MSK_STAT_RING_SZ, /* maxsegsize */ 1817 0, /* flags */ 1818 &sc->msk_stat_tag); 1819 if (error) { 1820 device_printf(sc->msk_dev, 1821 "failed to create status DMA tag\n"); 1822 return (error); 1823 } 1824 1825 /* Allocate DMA'able memory and load the DMA map for status ring. */ 1826 error = bus_dmamem_alloc(sc->msk_stat_tag, 1827 (void **)&sc->msk_stat_ring, 1828 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1829 &sc->msk_stat_map); 1830 if (error) { 1831 device_printf(sc->msk_dev, 1832 "failed to allocate DMA'able memory for status ring\n"); 1833 bus_dma_tag_destroy(sc->msk_stat_tag); 1834 sc->msk_stat_tag = NULL; 1835 return (error); 1836 } 1837 1838 bzero(&ctx, sizeof(ctx)); 1839 ctx.nseg = 1; 1840 ctx.segs = &seg; 1841 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map, 1842 sc->msk_stat_ring, MSK_STAT_RING_SZ, 1843 msk_dmamap_cb, &ctx, 0); 1844 if (error) { 1845 device_printf(sc->msk_dev, 1846 "failed to load DMA'able memory for status ring\n"); 1847 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1848 sc->msk_stat_map); 1849 bus_dma_tag_destroy(sc->msk_stat_tag); 1850 sc->msk_stat_tag = NULL; 1851 return (error); 1852 } 1853 sc->msk_stat_ring_paddr = seg.ds_addr; 1854 1855 return (0); 1856 } 1857 1858 static void 1859 mskc_status_dma_free(struct msk_softc *sc) 1860 { 1861 /* Destroy status block. */ 1862 if (sc->msk_stat_tag) { 1863 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1864 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1865 sc->msk_stat_map); 1866 bus_dma_tag_destroy(sc->msk_stat_tag); 1867 sc->msk_stat_tag = NULL; 1868 } 1869 } 1870 1871 static int 1872 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1873 { 1874 int error, i, j; 1875 #ifdef MSK_JUMBO 1876 struct msk_rxdesc *jrxd; 1877 struct msk_jpool_entry *entry; 1878 uint8_t *ptr; 1879 #endif 1880 1881 /* Create parent DMA tag. */ 1882 /* 1883 * XXX 1884 * It seems that Yukon II supports full 64bits DMA operations. But 1885 * it needs two descriptors(list elements) for 64bits DMA operations. 1886 * Since we don't know what DMA address mappings(32bits or 64bits) 1887 * would be used in advance for each mbufs, we limits its DMA space 1888 * to be in range of 32bits address space. Otherwise, we should check 1889 * what DMA address is used and chain another descriptor for the 1890 * 64bits DMA operation. This also means descriptor ring size is 1891 * variable. Limiting DMA address to be in 32bit address space greatly 1892 * simplyfies descriptor handling and possibly would increase 1893 * performance a bit due to efficient handling of descriptors. 1894 * Apart from harassing checksum offloading mechanisms, it seems 1895 * it's really bad idea to use a seperate descriptor for 64bit 1896 * DMA operation to save small descriptor memory. Anyway, I've 1897 * never seen these exotic scheme on ethernet interface hardware. 1898 */ 1899 error = bus_dma_tag_create( 1900 NULL, /* parent */ 1901 1, 0, /* alignment, boundary */ 1902 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1903 BUS_SPACE_MAXADDR, /* highaddr */ 1904 NULL, NULL, /* filter, filterarg */ 1905 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1906 0, /* nsegments */ 1907 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1908 0, /* flags */ 1909 &sc_if->msk_cdata.msk_parent_tag); 1910 if (error) { 1911 device_printf(sc_if->msk_if_dev, 1912 "failed to create parent DMA tag\n"); 1913 return error; 1914 } 1915 1916 /* Create DMA stuffs for Tx ring. */ 1917 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1918 &sc_if->msk_cdata.msk_tx_ring_tag, 1919 (void **)&sc_if->msk_rdata.msk_tx_ring, 1920 &sc_if->msk_rdata.msk_tx_ring_paddr, 1921 &sc_if->msk_cdata.msk_tx_ring_map); 1922 if (error) { 1923 device_printf(sc_if->msk_if_dev, 1924 "failed to create TX ring DMA stuffs\n"); 1925 return error; 1926 } 1927 1928 /* Create DMA stuffs for Rx ring. */ 1929 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1930 &sc_if->msk_cdata.msk_rx_ring_tag, 1931 (void **)&sc_if->msk_rdata.msk_rx_ring, 1932 &sc_if->msk_rdata.msk_rx_ring_paddr, 1933 &sc_if->msk_cdata.msk_rx_ring_map); 1934 if (error) { 1935 device_printf(sc_if->msk_if_dev, 1936 "failed to create RX ring DMA stuffs\n"); 1937 return error; 1938 } 1939 1940 /* Create tag for Tx buffers. */ 1941 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1942 1, 0, /* alignment, boundary */ 1943 BUS_SPACE_MAXADDR, /* lowaddr */ 1944 BUS_SPACE_MAXADDR, /* highaddr */ 1945 NULL, NULL, /* filter, filterarg */ 1946 MSK_TSO_MAXSIZE, /* maxsize */ 1947 MSK_MAXTXSEGS, /* nsegments */ 1948 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 1949 0, /* flags */ 1950 &sc_if->msk_cdata.msk_tx_tag); 1951 if (error) { 1952 device_printf(sc_if->msk_if_dev, 1953 "failed to create Tx DMA tag\n"); 1954 return error; 1955 } 1956 1957 /* Create DMA maps for Tx buffers. */ 1958 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1959 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1960 1961 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 1962 &txd->tx_dmamap); 1963 if (error) { 1964 device_printf(sc_if->msk_if_dev, 1965 "failed to create %dth Tx dmamap\n", i); 1966 1967 for (j = 0; j < i; ++j) { 1968 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1969 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1970 txd->tx_dmamap); 1971 } 1972 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1973 sc_if->msk_cdata.msk_tx_tag = NULL; 1974 1975 return error; 1976 } 1977 } 1978 1979 /* Create tag for Rx buffers. */ 1980 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1981 1, 0, /* alignment, boundary */ 1982 BUS_SPACE_MAXADDR, /* lowaddr */ 1983 BUS_SPACE_MAXADDR, /* highaddr */ 1984 NULL, NULL, /* filter, filterarg */ 1985 MCLBYTES, /* maxsize */ 1986 1, /* nsegments */ 1987 MCLBYTES, /* maxsegsize */ 1988 0, /* flags */ 1989 &sc_if->msk_cdata.msk_rx_tag); 1990 if (error) { 1991 device_printf(sc_if->msk_if_dev, 1992 "failed to create Rx DMA tag\n"); 1993 return error; 1994 } 1995 1996 /* Create DMA maps for Rx buffers. */ 1997 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 1998 &sc_if->msk_cdata.msk_rx_sparemap); 1999 if (error) { 2000 device_printf(sc_if->msk_if_dev, 2001 "failed to create spare Rx dmamap\n"); 2002 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2003 sc_if->msk_cdata.msk_rx_tag = NULL; 2004 return error; 2005 } 2006 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2007 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2008 2009 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2010 &rxd->rx_dmamap); 2011 if (error) { 2012 device_printf(sc_if->msk_if_dev, 2013 "failed to create %dth Rx dmamap\n", i); 2014 2015 for (j = 0; j < i; ++j) { 2016 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 2017 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2018 rxd->rx_dmamap); 2019 } 2020 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2021 sc_if->msk_cdata.msk_rx_tag = NULL; 2022 2023 return error; 2024 } 2025 } 2026 2027 #ifdef MSK_JUMBO 2028 SLIST_INIT(&sc_if->msk_jfree_listhead); 2029 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2030 2031 /* Create tag for jumbo Rx ring. */ 2032 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2033 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2034 BUS_SPACE_MAXADDR, /* lowaddr */ 2035 BUS_SPACE_MAXADDR, /* highaddr */ 2036 NULL, NULL, /* filter, filterarg */ 2037 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2038 1, /* nsegments */ 2039 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2040 0, /* flags */ 2041 NULL, NULL, /* lockfunc, lockarg */ 2042 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2043 if (error != 0) { 2044 device_printf(sc_if->msk_if_dev, 2045 "failed to create jumbo Rx ring DMA tag\n"); 2046 goto fail; 2047 } 2048 2049 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2050 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2051 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2052 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2053 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2054 if (error != 0) { 2055 device_printf(sc_if->msk_if_dev, 2056 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2057 goto fail; 2058 } 2059 2060 ctx.msk_busaddr = 0; 2061 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2062 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2063 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2064 msk_dmamap_cb, &ctx, 0); 2065 if (error != 0) { 2066 device_printf(sc_if->msk_if_dev, 2067 "failed to load DMA'able memory for jumbo Rx ring\n"); 2068 goto fail; 2069 } 2070 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2071 2072 /* Create tag for jumbo buffer blocks. */ 2073 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2074 PAGE_SIZE, 0, /* alignment, boundary */ 2075 BUS_SPACE_MAXADDR, /* lowaddr */ 2076 BUS_SPACE_MAXADDR, /* highaddr */ 2077 NULL, NULL, /* filter, filterarg */ 2078 MSK_JMEM, /* maxsize */ 2079 1, /* nsegments */ 2080 MSK_JMEM, /* maxsegsize */ 2081 0, /* flags */ 2082 NULL, NULL, /* lockfunc, lockarg */ 2083 &sc_if->msk_cdata.msk_jumbo_tag); 2084 if (error != 0) { 2085 device_printf(sc_if->msk_if_dev, 2086 "failed to create jumbo Rx buffer block DMA tag\n"); 2087 goto fail; 2088 } 2089 2090 /* Create tag for jumbo Rx buffers. */ 2091 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2092 PAGE_SIZE, 0, /* alignment, boundary */ 2093 BUS_SPACE_MAXADDR, /* lowaddr */ 2094 BUS_SPACE_MAXADDR, /* highaddr */ 2095 NULL, NULL, /* filter, filterarg */ 2096 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2097 MSK_MAXRXSEGS, /* nsegments */ 2098 MSK_JLEN, /* maxsegsize */ 2099 0, /* flags */ 2100 NULL, NULL, /* lockfunc, lockarg */ 2101 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2102 if (error != 0) { 2103 device_printf(sc_if->msk_if_dev, 2104 "failed to create jumbo Rx DMA tag\n"); 2105 goto fail; 2106 } 2107 2108 /* Create DMA maps for jumbo Rx buffers. */ 2109 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2110 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2111 device_printf(sc_if->msk_if_dev, 2112 "failed to create spare jumbo Rx dmamap\n"); 2113 goto fail; 2114 } 2115 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2116 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2117 jrxd->rx_m = NULL; 2118 jrxd->rx_dmamap = NULL; 2119 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2120 &jrxd->rx_dmamap); 2121 if (error != 0) { 2122 device_printf(sc_if->msk_if_dev, 2123 "failed to create jumbo Rx dmamap\n"); 2124 goto fail; 2125 } 2126 } 2127 2128 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2129 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2130 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2131 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2132 &sc_if->msk_cdata.msk_jumbo_map); 2133 if (error != 0) { 2134 device_printf(sc_if->msk_if_dev, 2135 "failed to allocate DMA'able memory for jumbo buf\n"); 2136 goto fail; 2137 } 2138 2139 ctx.msk_busaddr = 0; 2140 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2141 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2142 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2143 if (error != 0) { 2144 device_printf(sc_if->msk_if_dev, 2145 "failed to load DMA'able memory for jumbobuf\n"); 2146 goto fail; 2147 } 2148 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2149 2150 /* 2151 * Now divide it up into 9K pieces and save the addresses 2152 * in an array. 2153 */ 2154 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2155 for (i = 0; i < MSK_JSLOTS; i++) { 2156 sc_if->msk_cdata.msk_jslots[i] = ptr; 2157 ptr += MSK_JLEN; 2158 entry = malloc(sizeof(struct msk_jpool_entry), 2159 M_DEVBUF, M_WAITOK); 2160 if (entry == NULL) { 2161 device_printf(sc_if->msk_if_dev, 2162 "no memory for jumbo buffers!\n"); 2163 error = ENOMEM; 2164 goto fail; 2165 } 2166 entry->slot = i; 2167 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2168 jpool_entries); 2169 } 2170 #endif 2171 return 0; 2172 } 2173 2174 static void 2175 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2176 { 2177 struct msk_txdesc *txd; 2178 struct msk_rxdesc *rxd; 2179 #ifdef MSK_JUMBO 2180 struct msk_rxdesc *jrxd; 2181 struct msk_jpool_entry *entry; 2182 #endif 2183 int i; 2184 2185 #ifdef MSK_JUMBO 2186 MSK_JLIST_LOCK(sc_if); 2187 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2188 device_printf(sc_if->msk_if_dev, 2189 "asked to free buffer that is in use!\n"); 2190 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2191 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2192 jpool_entries); 2193 } 2194 2195 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2196 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2197 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2198 free(entry, M_DEVBUF); 2199 } 2200 MSK_JLIST_UNLOCK(sc_if); 2201 2202 /* Destroy jumbo buffer block. */ 2203 if (sc_if->msk_cdata.msk_jumbo_map) 2204 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2205 sc_if->msk_cdata.msk_jumbo_map); 2206 2207 if (sc_if->msk_rdata.msk_jumbo_buf) { 2208 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2209 sc_if->msk_rdata.msk_jumbo_buf, 2210 sc_if->msk_cdata.msk_jumbo_map); 2211 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2212 sc_if->msk_cdata.msk_jumbo_map = NULL; 2213 } 2214 2215 /* Jumbo Rx ring. */ 2216 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2217 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2218 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2219 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2220 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2221 sc_if->msk_rdata.msk_jumbo_rx_ring) 2222 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2223 sc_if->msk_rdata.msk_jumbo_rx_ring, 2224 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2225 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2226 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2227 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2228 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2229 } 2230 2231 /* Jumbo Rx buffers. */ 2232 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2233 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2234 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2235 if (jrxd->rx_dmamap) { 2236 bus_dmamap_destroy( 2237 sc_if->msk_cdata.msk_jumbo_rx_tag, 2238 jrxd->rx_dmamap); 2239 jrxd->rx_dmamap = NULL; 2240 } 2241 } 2242 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2243 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2244 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2245 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2246 } 2247 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2248 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2249 } 2250 #endif 2251 2252 /* Tx ring. */ 2253 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2254 sc_if->msk_rdata.msk_tx_ring, 2255 sc_if->msk_cdata.msk_tx_ring_map); 2256 2257 /* Rx ring. */ 2258 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2259 sc_if->msk_rdata.msk_rx_ring, 2260 sc_if->msk_cdata.msk_rx_ring_map); 2261 2262 /* Tx buffers. */ 2263 if (sc_if->msk_cdata.msk_tx_tag) { 2264 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2265 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2266 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2267 txd->tx_dmamap); 2268 } 2269 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2270 sc_if->msk_cdata.msk_tx_tag = NULL; 2271 } 2272 2273 /* Rx buffers. */ 2274 if (sc_if->msk_cdata.msk_rx_tag) { 2275 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2276 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2277 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2278 rxd->rx_dmamap); 2279 } 2280 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2281 sc_if->msk_cdata.msk_rx_sparemap); 2282 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2283 sc_if->msk_cdata.msk_rx_tag = NULL; 2284 } 2285 2286 if (sc_if->msk_cdata.msk_parent_tag) { 2287 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2288 sc_if->msk_cdata.msk_parent_tag = NULL; 2289 } 2290 } 2291 2292 #ifdef MSK_JUMBO 2293 /* 2294 * Allocate a jumbo buffer. 2295 */ 2296 static void * 2297 msk_jalloc(struct msk_if_softc *sc_if) 2298 { 2299 struct msk_jpool_entry *entry; 2300 2301 MSK_JLIST_LOCK(sc_if); 2302 2303 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2304 2305 if (entry == NULL) { 2306 MSK_JLIST_UNLOCK(sc_if); 2307 return (NULL); 2308 } 2309 2310 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2311 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2312 2313 MSK_JLIST_UNLOCK(sc_if); 2314 2315 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2316 } 2317 2318 /* 2319 * Release a jumbo buffer. 2320 */ 2321 static void 2322 msk_jfree(void *buf, void *args) 2323 { 2324 struct msk_if_softc *sc_if; 2325 struct msk_jpool_entry *entry; 2326 int i; 2327 2328 /* Extract the softc struct pointer. */ 2329 sc_if = (struct msk_if_softc *)args; 2330 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2331 2332 MSK_JLIST_LOCK(sc_if); 2333 /* Calculate the slot this buffer belongs to. */ 2334 i = ((vm_offset_t)buf 2335 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2336 KASSERT(i >= 0 && i < MSK_JSLOTS, 2337 ("%s: asked to free buffer that we don't manage!", __func__)); 2338 2339 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2340 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2341 entry->slot = i; 2342 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2343 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2344 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2345 wakeup(sc_if); 2346 2347 MSK_JLIST_UNLOCK(sc_if); 2348 } 2349 #endif 2350 2351 /* 2352 * It's copy of ath_defrag(ath(4)). 2353 * 2354 * Defragment an mbuf chain, returning at most maxfrags separate 2355 * mbufs+clusters. If this is not possible NULL is returned and 2356 * the original mbuf chain is left in it's present (potentially 2357 * modified) state. We use two techniques: collapsing consecutive 2358 * mbufs and replacing consecutive mbufs by a cluster. 2359 */ 2360 static struct mbuf * 2361 msk_defrag(struct mbuf *m0, int how, int maxfrags) 2362 { 2363 struct mbuf *m, *n, *n2, **prev; 2364 u_int curfrags; 2365 2366 /* 2367 * Calculate the current number of frags. 2368 */ 2369 curfrags = 0; 2370 for (m = m0; m != NULL; m = m->m_next) 2371 curfrags++; 2372 /* 2373 * First, try to collapse mbufs. Note that we always collapse 2374 * towards the front so we don't need to deal with moving the 2375 * pkthdr. This may be suboptimal if the first mbuf has much 2376 * less data than the following. 2377 */ 2378 m = m0; 2379 again: 2380 for (;;) { 2381 n = m->m_next; 2382 if (n == NULL) 2383 break; 2384 if (n->m_len < M_TRAILINGSPACE(m)) { 2385 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2386 n->m_len); 2387 m->m_len += n->m_len; 2388 m->m_next = n->m_next; 2389 m_free(n); 2390 if (--curfrags <= maxfrags) 2391 return (m0); 2392 } else 2393 m = n; 2394 } 2395 KASSERT(maxfrags > 1, 2396 ("maxfrags %u, but normal collapse failed", maxfrags)); 2397 /* 2398 * Collapse consecutive mbufs to a cluster. 2399 */ 2400 prev = &m0->m_next; /* NB: not the first mbuf */ 2401 while ((n = *prev) != NULL) { 2402 if ((n2 = n->m_next) != NULL && 2403 n->m_len + n2->m_len < MCLBYTES) { 2404 m = m_getcl(how, MT_DATA, 0); 2405 if (m == NULL) 2406 goto bad; 2407 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2408 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2409 n2->m_len); 2410 m->m_len = n->m_len + n2->m_len; 2411 m->m_next = n2->m_next; 2412 *prev = m; 2413 m_free(n); 2414 m_free(n2); 2415 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2416 return m0; 2417 /* 2418 * Still not there, try the normal collapse 2419 * again before we allocate another cluster. 2420 */ 2421 goto again; 2422 } 2423 prev = &n->m_next; 2424 } 2425 /* 2426 * No place where we can collapse to a cluster; punt. 2427 * This can occur if, for example, you request 2 frags 2428 * but the packet requires that both be clusters (we 2429 * never reallocate the first mbuf to avoid moving the 2430 * packet header). 2431 */ 2432 bad: 2433 return (NULL); 2434 } 2435 2436 static int 2437 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2438 { 2439 struct msk_txdesc *txd, *txd_last; 2440 struct msk_tx_desc *tx_le; 2441 struct mbuf *m; 2442 bus_dmamap_t map; 2443 struct msk_dmamap_arg ctx; 2444 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2445 uint32_t control, prod, si; 2446 uint16_t offset, tcp_offset; 2447 int error, i; 2448 2449 tcp_offset = offset = 0; 2450 m = *m_head; 2451 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2452 /* 2453 * Since mbuf has no protocol specific structure information 2454 * in it we have to inspect protocol information here to 2455 * setup TSO and checksum offload. I don't know why Marvell 2456 * made a such decision in chip design because other GigE 2457 * hardwares normally takes care of all these chores in 2458 * hardware. However, TSO performance of Yukon II is very 2459 * good such that it's worth to implement it. 2460 */ 2461 struct ether_header *eh; 2462 struct ip *ip; 2463 2464 /* TODO check for M_WRITABLE(m) */ 2465 2466 offset = sizeof(struct ether_header); 2467 m = m_pullup(m, offset); 2468 if (m == NULL) { 2469 *m_head = NULL; 2470 return (ENOBUFS); 2471 } 2472 eh = mtod(m, struct ether_header *); 2473 /* Check if hardware VLAN insertion is off. */ 2474 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2475 offset = sizeof(struct ether_vlan_header); 2476 m = m_pullup(m, offset); 2477 if (m == NULL) { 2478 *m_head = NULL; 2479 return (ENOBUFS); 2480 } 2481 } 2482 m = m_pullup(m, offset + sizeof(struct ip)); 2483 if (m == NULL) { 2484 *m_head = NULL; 2485 return (ENOBUFS); 2486 } 2487 ip = (struct ip *)(mtod(m, char *) + offset); 2488 offset += (ip->ip_hl << 2); 2489 tcp_offset = offset; 2490 /* 2491 * It seems that Yukon II has Tx checksum offload bug for 2492 * small TCP packets that's less than 60 bytes in size 2493 * (e.g. TCP window probe packet, pure ACK packet). 2494 * Common work around like padding with zeros to make the 2495 * frame minimum ethernet frame size didn't work at all. 2496 * Instead of disabling checksum offload completely we 2497 * resort to S/W checksum routine when we encounter short 2498 * TCP frames. 2499 * Short UDP packets appear to be handled correctly by 2500 * Yukon II. 2501 */ 2502 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2503 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2504 uint16_t csum; 2505 2506 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2507 (ip->ip_hl << 2), offset); 2508 *(uint16_t *)(m->m_data + offset + 2509 m->m_pkthdr.csum_data) = csum; 2510 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2511 } 2512 *m_head = m; 2513 } 2514 2515 prod = sc_if->msk_cdata.msk_tx_prod; 2516 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2517 txd_last = txd; 2518 map = txd->tx_dmamap; 2519 bzero(&ctx, sizeof(ctx)); 2520 ctx.nseg = MSK_MAXTXSEGS; 2521 ctx.segs = txsegs; 2522 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, map, 2523 *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2524 if (error == 0 && ctx.nseg == 0) { 2525 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2526 error = EFBIG; 2527 } 2528 if (error == EFBIG) { 2529 m = msk_defrag(*m_head, MB_DONTWAIT, MSK_MAXTXSEGS); 2530 if (m == NULL) { 2531 m_freem(*m_head); 2532 *m_head = NULL; 2533 return (ENOBUFS); 2534 } 2535 *m_head = m; 2536 2537 bzero(&ctx, sizeof(ctx)); 2538 ctx.nseg = MSK_MAXTXSEGS; 2539 ctx.segs = txsegs; 2540 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, 2541 map, *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2542 if (error == 0 && ctx.nseg == 0) { 2543 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2544 error = EFBIG; 2545 } 2546 if (error != 0) { 2547 m_freem(*m_head); 2548 *m_head = NULL; 2549 return (error); 2550 } 2551 } else if (error != 0) { 2552 return (error); 2553 } 2554 2555 /* Check number of available descriptors. */ 2556 if (sc_if->msk_cdata.msk_tx_cnt + ctx.nseg >= 2557 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2558 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2559 return (ENOBUFS); 2560 } 2561 2562 control = 0; 2563 tx_le = NULL; 2564 2565 #ifdef notyet 2566 /* Check if we have a VLAN tag to insert. */ 2567 if ((m->m_flags & M_VLANTAG) != 0) { 2568 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2569 tx_le->msk_addr = htole32(0); 2570 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2571 htons(m->m_pkthdr.ether_vtag)); 2572 sc_if->msk_cdata.msk_tx_cnt++; 2573 MSK_INC(prod, MSK_TX_RING_CNT); 2574 control |= INS_VLAN; 2575 } 2576 #endif 2577 /* Check if we have to handle checksum offload. */ 2578 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2579 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2580 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2581 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2582 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2583 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2584 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2585 control |= UDPTCP; 2586 sc_if->msk_cdata.msk_tx_cnt++; 2587 MSK_INC(prod, MSK_TX_RING_CNT); 2588 } 2589 2590 si = prod; 2591 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2592 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2593 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2594 OP_PACKET); 2595 sc_if->msk_cdata.msk_tx_cnt++; 2596 MSK_INC(prod, MSK_TX_RING_CNT); 2597 2598 for (i = 1; i < ctx.nseg; i++) { 2599 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2600 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2601 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2602 OP_BUFFER | HW_OWNER); 2603 sc_if->msk_cdata.msk_tx_cnt++; 2604 MSK_INC(prod, MSK_TX_RING_CNT); 2605 } 2606 /* Update producer index. */ 2607 sc_if->msk_cdata.msk_tx_prod = prod; 2608 2609 /* Set EOP on the last desciptor. */ 2610 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2611 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2612 tx_le->msk_control |= htole32(EOP); 2613 2614 /* Turn the first descriptor ownership to hardware. */ 2615 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2616 tx_le->msk_control |= htole32(HW_OWNER); 2617 2618 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2619 map = txd_last->tx_dmamap; 2620 txd_last->tx_dmamap = txd->tx_dmamap; 2621 txd->tx_dmamap = map; 2622 txd->tx_m = m; 2623 2624 /* Sync descriptors. */ 2625 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2626 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2627 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 2628 2629 return (0); 2630 } 2631 2632 static void 2633 msk_start(struct ifnet *ifp) 2634 { 2635 struct msk_if_softc *sc_if; 2636 struct mbuf *m_head; 2637 int enq; 2638 2639 sc_if = ifp->if_softc; 2640 2641 ASSERT_SERIALIZED(ifp->if_serializer); 2642 2643 if (!sc_if->msk_link) { 2644 ifq_purge(&ifp->if_snd); 2645 return; 2646 } 2647 2648 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2649 return; 2650 2651 for (enq = 0; !ifq_is_empty(&ifp->if_snd) && 2652 sc_if->msk_cdata.msk_tx_cnt < 2653 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2654 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2655 if (m_head == NULL) 2656 break; 2657 2658 /* 2659 * Pack the data into the transmit ring. If we 2660 * don't have room, set the OACTIVE flag and wait 2661 * for the NIC to drain the ring. 2662 */ 2663 if (msk_encap(sc_if, &m_head) != 0) { 2664 if (m_head == NULL) 2665 break; 2666 m_freem(m_head); 2667 ifp->if_flags |= IFF_OACTIVE; 2668 break; 2669 } 2670 2671 enq++; 2672 /* 2673 * If there's a BPF listener, bounce a copy of this frame 2674 * to him. 2675 */ 2676 BPF_MTAP(ifp, m_head); 2677 } 2678 2679 if (enq > 0) { 2680 /* Transmit */ 2681 CSR_WRITE_2(sc_if->msk_softc, 2682 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2683 sc_if->msk_cdata.msk_tx_prod); 2684 2685 /* Set a timeout in case the chip goes out to lunch. */ 2686 ifp->if_timer = MSK_TX_TIMEOUT; 2687 } 2688 } 2689 2690 static void 2691 msk_watchdog(struct ifnet *ifp) 2692 { 2693 struct msk_if_softc *sc_if = ifp->if_softc; 2694 uint32_t ridx; 2695 int idx; 2696 2697 ASSERT_SERIALIZED(ifp->if_serializer); 2698 2699 if (sc_if->msk_link == 0) { 2700 if (bootverbose) 2701 if_printf(sc_if->msk_ifp, "watchdog timeout " 2702 "(missed link)\n"); 2703 ifp->if_oerrors++; 2704 msk_init(sc_if); 2705 return; 2706 } 2707 2708 /* 2709 * Reclaim first as there is a possibility of losing Tx completion 2710 * interrupts. 2711 */ 2712 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2713 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2714 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2715 msk_txeof(sc_if, idx); 2716 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2717 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2718 "-- recovering\n"); 2719 if (!ifq_is_empty(&ifp->if_snd)) 2720 if_devstart(ifp); 2721 return; 2722 } 2723 } 2724 2725 if_printf(ifp, "watchdog timeout\n"); 2726 ifp->if_oerrors++; 2727 msk_init(sc_if); 2728 if (!ifq_is_empty(&ifp->if_snd)) 2729 if_devstart(ifp); 2730 } 2731 2732 static int 2733 mskc_shutdown(device_t dev) 2734 { 2735 struct msk_softc *sc = device_get_softc(dev); 2736 int i; 2737 2738 lwkt_serialize_enter(&sc->msk_serializer); 2739 2740 for (i = 0; i < sc->msk_num_port; i++) { 2741 if (sc->msk_if[i] != NULL) 2742 msk_stop(sc->msk_if[i]); 2743 } 2744 2745 /* Disable all interrupts. */ 2746 CSR_WRITE_4(sc, B0_IMSK, 0); 2747 CSR_READ_4(sc, B0_IMSK); 2748 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2749 CSR_READ_4(sc, B0_HWE_IMSK); 2750 2751 /* Put hardware reset. */ 2752 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2753 2754 lwkt_serialize_exit(&sc->msk_serializer); 2755 return (0); 2756 } 2757 2758 static int 2759 mskc_suspend(device_t dev) 2760 { 2761 struct msk_softc *sc = device_get_softc(dev); 2762 int i; 2763 2764 lwkt_serialize_enter(&sc->msk_serializer); 2765 2766 for (i = 0; i < sc->msk_num_port; i++) { 2767 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2768 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2769 msk_stop(sc->msk_if[i]); 2770 } 2771 2772 /* Disable all interrupts. */ 2773 CSR_WRITE_4(sc, B0_IMSK, 0); 2774 CSR_READ_4(sc, B0_IMSK); 2775 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2776 CSR_READ_4(sc, B0_HWE_IMSK); 2777 2778 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2779 2780 /* Put hardware reset. */ 2781 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2782 sc->msk_suspended = 1; 2783 2784 lwkt_serialize_exit(&sc->msk_serializer); 2785 2786 return (0); 2787 } 2788 2789 static int 2790 mskc_resume(device_t dev) 2791 { 2792 struct msk_softc *sc = device_get_softc(dev); 2793 int i; 2794 2795 lwkt_serialize_enter(&sc->msk_serializer); 2796 2797 mskc_reset(sc); 2798 for (i = 0; i < sc->msk_num_port; i++) { 2799 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2800 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2801 msk_init(sc->msk_if[i]); 2802 } 2803 sc->msk_suspended = 0; 2804 2805 lwkt_serialize_exit(&sc->msk_serializer); 2806 2807 return (0); 2808 } 2809 2810 static void 2811 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len, 2812 struct mbuf_chain *chain) 2813 { 2814 struct mbuf *m; 2815 struct ifnet *ifp; 2816 struct msk_rxdesc *rxd; 2817 int cons, rxlen; 2818 2819 ifp = sc_if->msk_ifp; 2820 2821 cons = sc_if->msk_cdata.msk_rx_cons; 2822 do { 2823 rxlen = status >> 16; 2824 if ((status & GMR_FS_VLAN) != 0 && 2825 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2826 rxlen -= EVL_ENCAPLEN; 2827 if (len > sc_if->msk_framesize || 2828 ((status & GMR_FS_ANY_ERR) != 0) || 2829 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2830 /* Don't count flow-control packet as errors. */ 2831 if ((status & GMR_FS_GOOD_FC) == 0) 2832 ifp->if_ierrors++; 2833 msk_discard_rxbuf(sc_if, cons); 2834 break; 2835 } 2836 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2837 m = rxd->rx_m; 2838 if (msk_newbuf(sc_if, cons) != 0) { 2839 ifp->if_iqdrops++; 2840 /* Reuse old buffer. */ 2841 msk_discard_rxbuf(sc_if, cons); 2842 break; 2843 } 2844 m->m_pkthdr.rcvif = ifp; 2845 m->m_pkthdr.len = m->m_len = len; 2846 ifp->if_ipackets++; 2847 #ifdef notyet 2848 /* Check for VLAN tagged packets. */ 2849 if ((status & GMR_FS_VLAN) != 0 && 2850 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2851 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2852 m->m_flags |= M_VLANTAG; 2853 } 2854 #endif 2855 2856 #ifdef ETHER_INPUT_CHAIN 2857 ether_input_chain2(ifp, m, chain); 2858 #else 2859 ifp->if_input(ifp, m); 2860 #endif 2861 } while (0); 2862 2863 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2864 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2865 } 2866 2867 #ifdef MSK_JUMBO 2868 static void 2869 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2870 { 2871 struct mbuf *m; 2872 struct ifnet *ifp; 2873 struct msk_rxdesc *jrxd; 2874 int cons, rxlen; 2875 2876 ifp = sc_if->msk_ifp; 2877 2878 MSK_IF_LOCK_ASSERT(sc_if); 2879 2880 cons = sc_if->msk_cdata.msk_rx_cons; 2881 do { 2882 rxlen = status >> 16; 2883 if ((status & GMR_FS_VLAN) != 0 && 2884 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2885 rxlen -= ETHER_VLAN_ENCAP_LEN; 2886 if (len > sc_if->msk_framesize || 2887 ((status & GMR_FS_ANY_ERR) != 0) || 2888 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2889 /* Don't count flow-control packet as errors. */ 2890 if ((status & GMR_FS_GOOD_FC) == 0) 2891 ifp->if_ierrors++; 2892 msk_discard_jumbo_rxbuf(sc_if, cons); 2893 break; 2894 } 2895 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2896 m = jrxd->rx_m; 2897 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2898 ifp->if_iqdrops++; 2899 /* Reuse old buffer. */ 2900 msk_discard_jumbo_rxbuf(sc_if, cons); 2901 break; 2902 } 2903 m->m_pkthdr.rcvif = ifp; 2904 m->m_pkthdr.len = m->m_len = len; 2905 ifp->if_ipackets++; 2906 /* Check for VLAN tagged packets. */ 2907 if ((status & GMR_FS_VLAN) != 0 && 2908 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2909 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2910 m->m_flags |= M_VLANTAG; 2911 } 2912 MSK_IF_UNLOCK(sc_if); 2913 (*ifp->if_input)(ifp, m); 2914 MSK_IF_LOCK(sc_if); 2915 } while (0); 2916 2917 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2918 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2919 } 2920 #endif 2921 2922 static void 2923 msk_txeof(struct msk_if_softc *sc_if, int idx) 2924 { 2925 struct msk_txdesc *txd; 2926 struct msk_tx_desc *cur_tx; 2927 struct ifnet *ifp; 2928 uint32_t control; 2929 int cons, prog; 2930 2931 ifp = sc_if->msk_ifp; 2932 2933 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2934 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_POSTREAD); 2935 2936 /* 2937 * Go through our tx ring and free mbufs for those 2938 * frames that have been sent. 2939 */ 2940 cons = sc_if->msk_cdata.msk_tx_cons; 2941 prog = 0; 2942 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2943 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2944 break; 2945 prog++; 2946 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2947 control = le32toh(cur_tx->msk_control); 2948 sc_if->msk_cdata.msk_tx_cnt--; 2949 ifp->if_flags &= ~IFF_OACTIVE; 2950 if ((control & EOP) == 0) 2951 continue; 2952 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2953 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 2954 BUS_DMASYNC_POSTWRITE); 2955 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2956 2957 ifp->if_opackets++; 2958 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2959 __func__)); 2960 m_freem(txd->tx_m); 2961 txd->tx_m = NULL; 2962 } 2963 2964 if (prog > 0) { 2965 sc_if->msk_cdata.msk_tx_cons = cons; 2966 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2967 ifp->if_timer = 0; 2968 /* No need to sync LEs as we didn't update LEs. */ 2969 } 2970 } 2971 2972 static void 2973 msk_tick(void *xsc_if) 2974 { 2975 struct msk_if_softc *sc_if = xsc_if; 2976 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2977 struct mii_data *mii; 2978 2979 lwkt_serialize_enter(ifp->if_serializer); 2980 2981 mii = device_get_softc(sc_if->msk_miibus); 2982 2983 mii_tick(mii); 2984 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2985 2986 lwkt_serialize_exit(ifp->if_serializer); 2987 } 2988 2989 static void 2990 msk_intr_phy(struct msk_if_softc *sc_if) 2991 { 2992 uint16_t status; 2993 2994 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2995 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2996 /* Handle FIFO Underrun/Overflow? */ 2997 if (status & PHY_M_IS_FIFO_ERROR) { 2998 device_printf(sc_if->msk_if_dev, 2999 "PHY FIFO underrun/overflow.\n"); 3000 } 3001 } 3002 3003 static void 3004 msk_intr_gmac(struct msk_if_softc *sc_if) 3005 { 3006 struct msk_softc *sc; 3007 uint8_t status; 3008 3009 sc = sc_if->msk_softc; 3010 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3011 3012 /* GMAC Rx FIFO overrun. */ 3013 if ((status & GM_IS_RX_FF_OR) != 0) { 3014 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3015 GMF_CLI_RX_FO); 3016 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 3017 } 3018 /* GMAC Tx FIFO underrun. */ 3019 if ((status & GM_IS_TX_FF_UR) != 0) { 3020 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3021 GMF_CLI_TX_FU); 3022 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3023 /* 3024 * XXX 3025 * In case of Tx underrun, we may need to flush/reset 3026 * Tx MAC but that would also require resynchronization 3027 * with status LEs. Reintializing status LEs would 3028 * affect other port in dual MAC configuration so it 3029 * should be avoided as possible as we can. 3030 * Due to lack of documentation it's all vague guess but 3031 * it needs more investigation. 3032 */ 3033 } 3034 } 3035 3036 static void 3037 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3038 { 3039 struct msk_softc *sc; 3040 3041 sc = sc_if->msk_softc; 3042 if ((status & Y2_IS_PAR_RD1) != 0) { 3043 device_printf(sc_if->msk_if_dev, 3044 "RAM buffer read parity error\n"); 3045 /* Clear IRQ. */ 3046 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3047 RI_CLR_RD_PERR); 3048 } 3049 if ((status & Y2_IS_PAR_WR1) != 0) { 3050 device_printf(sc_if->msk_if_dev, 3051 "RAM buffer write parity error\n"); 3052 /* Clear IRQ. */ 3053 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3054 RI_CLR_WR_PERR); 3055 } 3056 if ((status & Y2_IS_PAR_MAC1) != 0) { 3057 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3058 /* Clear IRQ. */ 3059 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3060 GMF_CLI_TX_PE); 3061 } 3062 if ((status & Y2_IS_PAR_RX1) != 0) { 3063 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3064 /* Clear IRQ. */ 3065 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3066 } 3067 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3068 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3069 /* Clear IRQ. */ 3070 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3071 } 3072 } 3073 3074 static void 3075 mskc_intr_hwerr(struct msk_softc *sc) 3076 { 3077 uint32_t status; 3078 uint32_t tlphead[4]; 3079 3080 status = CSR_READ_4(sc, B0_HWE_ISRC); 3081 /* Time Stamp timer overflow. */ 3082 if ((status & Y2_IS_TIST_OV) != 0) 3083 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3084 if ((status & Y2_IS_PCI_NEXP) != 0) { 3085 /* 3086 * PCI Express Error occured which is not described in PEX 3087 * spec. 3088 * This error is also mapped either to Master Abort( 3089 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3090 * can only be cleared there. 3091 */ 3092 device_printf(sc->msk_dev, 3093 "PCI Express protocol violation error\n"); 3094 } 3095 3096 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3097 uint16_t v16; 3098 3099 if ((status & Y2_IS_MST_ERR) != 0) 3100 device_printf(sc->msk_dev, 3101 "unexpected IRQ Status error\n"); 3102 else 3103 device_printf(sc->msk_dev, 3104 "unexpected IRQ Master error\n"); 3105 /* Reset all bits in the PCI status register. */ 3106 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3107 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3108 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3109 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3110 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3111 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3112 } 3113 3114 /* Check for PCI Express Uncorrectable Error. */ 3115 if ((status & Y2_IS_PCI_EXP) != 0) { 3116 uint32_t v32; 3117 3118 /* 3119 * On PCI Express bus bridges are called root complexes (RC). 3120 * PCI Express errors are recognized by the root complex too, 3121 * which requests the system to handle the problem. After 3122 * error occurence it may be that no access to the adapter 3123 * may be performed any longer. 3124 */ 3125 3126 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3127 if ((v32 & PEX_UNSUP_REQ) != 0) { 3128 /* Ignore unsupported request error. */ 3129 if (bootverbose) { 3130 device_printf(sc->msk_dev, 3131 "Uncorrectable PCI Express error\n"); 3132 } 3133 } 3134 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3135 int i; 3136 3137 /* Get TLP header form Log Registers. */ 3138 for (i = 0; i < 4; i++) 3139 tlphead[i] = CSR_PCI_READ_4(sc, 3140 PEX_HEADER_LOG + i * 4); 3141 /* Check for vendor defined broadcast message. */ 3142 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3143 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3144 CSR_WRITE_4(sc, B0_HWE_IMSK, 3145 sc->msk_intrhwemask); 3146 CSR_READ_4(sc, B0_HWE_IMSK); 3147 } 3148 } 3149 /* Clear the interrupt. */ 3150 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3151 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3152 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3153 } 3154 3155 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3156 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3157 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3158 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3159 } 3160 3161 static __inline void 3162 msk_rxput(struct msk_if_softc *sc_if) 3163 { 3164 struct msk_softc *sc; 3165 3166 sc = sc_if->msk_softc; 3167 #ifdef MSK_JUMBO 3168 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3169 bus_dmamap_sync( 3170 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3171 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3172 BUS_DMASYNC_PREWRITE); 3173 } else 3174 #endif 3175 { 3176 bus_dmamap_sync( 3177 sc_if->msk_cdata.msk_rx_ring_tag, 3178 sc_if->msk_cdata.msk_rx_ring_map, 3179 BUS_DMASYNC_PREWRITE); 3180 } 3181 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3182 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3183 } 3184 3185 static int 3186 mskc_handle_events(struct msk_softc *sc) 3187 { 3188 struct msk_if_softc *sc_if; 3189 int rxput[2]; 3190 struct msk_stat_desc *sd; 3191 uint32_t control, status; 3192 int cons, idx, len, port, rxprog; 3193 #ifdef ETHER_INPUT_CHAIN 3194 struct mbuf_chain chain0[MAXCPU]; 3195 #endif 3196 struct mbuf_chain *chain; 3197 3198 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3199 if (idx == sc->msk_stat_cons) 3200 return (0); 3201 3202 #ifdef ETHER_INPUT_CHAIN 3203 chain = chain0; 3204 ether_input_chain_init(chain); 3205 #else 3206 chain = NULL; 3207 #endif 3208 3209 /* Sync status LEs. */ 3210 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3211 BUS_DMASYNC_POSTREAD); 3212 /* XXX Sync Rx LEs here. */ 3213 3214 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3215 3216 rxprog = 0; 3217 for (cons = sc->msk_stat_cons; cons != idx;) { 3218 sd = &sc->msk_stat_ring[cons]; 3219 control = le32toh(sd->msk_control); 3220 if ((control & HW_OWNER) == 0) 3221 break; 3222 /* 3223 * Marvell's FreeBSD driver updates status LE after clearing 3224 * HW_OWNER. However we don't have a way to sync single LE 3225 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3226 * an entire DMA map. So don't sync LE until we have a better 3227 * way to sync LEs. 3228 */ 3229 control &= ~HW_OWNER; 3230 sd->msk_control = htole32(control); 3231 status = le32toh(sd->msk_status); 3232 len = control & STLE_LEN_MASK; 3233 port = (control >> 16) & 0x01; 3234 sc_if = sc->msk_if[port]; 3235 if (sc_if == NULL) { 3236 device_printf(sc->msk_dev, "invalid port opcode " 3237 "0x%08x\n", control & STLE_OP_MASK); 3238 continue; 3239 } 3240 3241 switch (control & STLE_OP_MASK) { 3242 case OP_RXVLAN: 3243 sc_if->msk_vtag = ntohs(len); 3244 break; 3245 case OP_RXCHKSVLAN: 3246 sc_if->msk_vtag = ntohs(len); 3247 break; 3248 case OP_RXSTAT: 3249 #ifdef MSK_JUMBO 3250 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3251 msk_jumbo_rxeof(sc_if, status, len); 3252 else 3253 #endif 3254 msk_rxeof(sc_if, status, len, chain); 3255 rxprog++; 3256 /* 3257 * Because there is no way to sync single Rx LE 3258 * put the DMA sync operation off until the end of 3259 * event processing. 3260 */ 3261 rxput[port]++; 3262 /* Update prefetch unit if we've passed water mark. */ 3263 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3264 msk_rxput(sc_if); 3265 rxput[port] = 0; 3266 } 3267 break; 3268 case OP_TXINDEXLE: 3269 if (sc->msk_if[MSK_PORT_A] != NULL) { 3270 msk_txeof(sc->msk_if[MSK_PORT_A], 3271 status & STLE_TXA1_MSKL); 3272 } 3273 if (sc->msk_if[MSK_PORT_B] != NULL) { 3274 msk_txeof(sc->msk_if[MSK_PORT_B], 3275 ((status & STLE_TXA2_MSKL) >> 3276 STLE_TXA2_SHIFTL) | 3277 ((len & STLE_TXA2_MSKH) << 3278 STLE_TXA2_SHIFTH)); 3279 } 3280 break; 3281 default: 3282 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3283 control & STLE_OP_MASK); 3284 break; 3285 } 3286 MSK_INC(cons, MSK_STAT_RING_CNT); 3287 if (rxprog > sc->msk_process_limit) 3288 break; 3289 } 3290 3291 #ifdef ETHER_INPUT_CHAIN 3292 if (rxprog > 0) 3293 ether_input_dispatch(chain); 3294 #endif 3295 3296 sc->msk_stat_cons = cons; 3297 /* XXX We should sync status LEs here. See above notes. */ 3298 3299 if (rxput[MSK_PORT_A] > 0) 3300 msk_rxput(sc->msk_if[MSK_PORT_A]); 3301 if (rxput[MSK_PORT_B] > 0) 3302 msk_rxput(sc->msk_if[MSK_PORT_B]); 3303 3304 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3305 } 3306 3307 /* Legacy interrupt handler for shared interrupt. */ 3308 static void 3309 mskc_intr(void *xsc) 3310 { 3311 struct msk_softc *sc; 3312 struct msk_if_softc *sc_if0, *sc_if1; 3313 struct ifnet *ifp0, *ifp1; 3314 uint32_t status; 3315 3316 sc = xsc; 3317 ASSERT_SERIALIZED(&sc->msk_serializer); 3318 3319 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3320 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3321 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3322 (status & sc->msk_intrmask) == 0) { 3323 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3324 return; 3325 } 3326 3327 sc_if0 = sc->msk_if[MSK_PORT_A]; 3328 sc_if1 = sc->msk_if[MSK_PORT_B]; 3329 ifp0 = ifp1 = NULL; 3330 if (sc_if0 != NULL) 3331 ifp0 = sc_if0->msk_ifp; 3332 if (sc_if1 != NULL) 3333 ifp1 = sc_if1->msk_ifp; 3334 3335 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3336 msk_intr_phy(sc_if0); 3337 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3338 msk_intr_phy(sc_if1); 3339 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3340 msk_intr_gmac(sc_if0); 3341 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3342 msk_intr_gmac(sc_if1); 3343 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3344 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3345 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3346 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3347 CSR_READ_4(sc, B0_IMSK); 3348 } 3349 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3350 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3351 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3352 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3353 CSR_READ_4(sc, B0_IMSK); 3354 } 3355 if ((status & Y2_IS_HW_ERR) != 0) 3356 mskc_intr_hwerr(sc); 3357 3358 while (mskc_handle_events(sc) != 0) 3359 ; 3360 if ((status & Y2_IS_STAT_BMU) != 0) 3361 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3362 3363 /* Reenable interrupts. */ 3364 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3365 3366 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3367 !ifq_is_empty(&ifp0->if_snd)) 3368 if_devstart(ifp0); 3369 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3370 !ifq_is_empty(&ifp1->if_snd)) 3371 if_devstart(ifp1); 3372 } 3373 3374 static void 3375 msk_init(void *xsc) 3376 { 3377 struct msk_if_softc *sc_if = xsc; 3378 struct msk_softc *sc = sc_if->msk_softc; 3379 struct ifnet *ifp = sc_if->msk_ifp; 3380 struct mii_data *mii; 3381 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3382 uint16_t gmac; 3383 int error, i; 3384 3385 ASSERT_SERIALIZED(ifp->if_serializer); 3386 3387 mii = device_get_softc(sc_if->msk_miibus); 3388 3389 error = 0; 3390 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3391 msk_stop(sc_if); 3392 3393 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3394 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3395 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3396 /* 3397 * In Yukon EC Ultra, TSO & checksum offload is not 3398 * supported for jumbo frame. 3399 */ 3400 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3401 ifp->if_capenable &= ~IFCAP_TXCSUM; 3402 } 3403 3404 /* 3405 * Initialize GMAC first. 3406 * Without this initialization, Rx MAC did not work as expected 3407 * and Rx MAC garbled status LEs and it resulted in out-of-order 3408 * or duplicated frame delivery which in turn showed very poor 3409 * Rx performance.(I had to write a packet analysis code that 3410 * could be embeded in driver to diagnose this issue.) 3411 * I've spent almost 2 months to fix this issue. If I have had 3412 * datasheet for Yukon II I wouldn't have encountered this. :-( 3413 */ 3414 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3415 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3416 3417 /* Dummy read the Interrupt Source Register. */ 3418 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3419 3420 /* Set MIB Clear Counter Mode. */ 3421 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3422 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3423 /* Read all MIB Counters with Clear Mode set. */ 3424 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3425 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3426 /* Clear MIB Clear Counter Mode. */ 3427 gmac &= ~GM_PAR_MIB_CLR; 3428 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3429 3430 /* Disable FCS. */ 3431 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3432 3433 /* Setup Transmit Control Register. */ 3434 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3435 3436 /* Setup Transmit Flow Control Register. */ 3437 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3438 3439 /* Setup Transmit Parameter Register. */ 3440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3441 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3442 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3443 3444 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3445 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3446 3447 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3448 gmac |= GM_SMOD_JUMBO_ENA; 3449 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3450 3451 /* Set station address. */ 3452 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3453 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3454 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3455 eaddr[i]); 3456 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3457 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3458 eaddr[i]); 3459 3460 /* Disable interrupts for counter overflows. */ 3461 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3462 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3463 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3464 3465 /* Configure Rx MAC FIFO. */ 3466 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3467 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3468 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3469 GMF_OPER_ON | GMF_RX_F_FL_ON); 3470 3471 /* Set promiscuous mode. */ 3472 msk_setpromisc(sc_if); 3473 3474 /* Set multicast filter. */ 3475 msk_setmulti(sc_if); 3476 3477 /* Flush Rx MAC FIFO on any flow control or error. */ 3478 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3479 GMR_FS_ANY_ERR); 3480 3481 /* Set Rx FIFO flush threshold to 64 bytes. */ 3482 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3483 RX_GMF_FL_THR_DEF); 3484 3485 /* Configure Tx MAC FIFO. */ 3486 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3487 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3488 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3489 3490 /* Configure hardware VLAN tag insertion/stripping. */ 3491 msk_setvlan(sc_if, ifp); 3492 3493 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3494 /* Set Rx Pause threshould. */ 3495 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3496 MSK_ECU_LLPP); 3497 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3498 MSK_ECU_ULPP); 3499 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { 3500 /* 3501 * Set Tx GMAC FIFO Almost Empty Threshold. 3502 */ 3503 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3504 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3505 /* Disable Store & Forward mode for Tx. */ 3506 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3507 TX_JUMBO_ENA | TX_STFW_DIS); 3508 } else { 3509 /* Enable Store & Forward mode for Tx. */ 3510 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3511 TX_JUMBO_DIS | TX_STFW_ENA); 3512 } 3513 } 3514 3515 /* 3516 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3517 * arbiter as we don't use Sync Tx queue. 3518 */ 3519 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3520 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3521 /* Enable the RAM Interface Arbiter. */ 3522 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3523 3524 /* Setup RAM buffer. */ 3525 msk_set_rambuffer(sc_if); 3526 3527 /* Disable Tx sync Queue. */ 3528 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3529 3530 /* Setup Tx Queue Bus Memory Interface. */ 3531 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3532 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3533 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3534 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3535 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3536 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3537 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3538 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3539 } 3540 3541 /* Setup Rx Queue Bus Memory Interface. */ 3542 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3543 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3544 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3545 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3546 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3547 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3548 /* MAC Rx RAM Read is controlled by hardware. */ 3549 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3550 } 3551 3552 msk_set_prefetch(sc, sc_if->msk_txq, 3553 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3554 msk_init_tx_ring(sc_if); 3555 3556 /* Disable Rx checksum offload and RSS hash. */ 3557 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3558 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3559 #ifdef MSK_JUMBO 3560 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3561 msk_set_prefetch(sc, sc_if->msk_rxq, 3562 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3563 MSK_JUMBO_RX_RING_CNT - 1); 3564 error = msk_init_jumbo_rx_ring(sc_if); 3565 } else 3566 #endif 3567 { 3568 msk_set_prefetch(sc, sc_if->msk_rxq, 3569 sc_if->msk_rdata.msk_rx_ring_paddr, 3570 MSK_RX_RING_CNT - 1); 3571 error = msk_init_rx_ring(sc_if); 3572 } 3573 if (error != 0) { 3574 device_printf(sc_if->msk_if_dev, 3575 "initialization failed: no memory for Rx buffers\n"); 3576 msk_stop(sc_if); 3577 return; 3578 } 3579 3580 /* Configure interrupt handling. */ 3581 if (sc_if->msk_port == MSK_PORT_A) { 3582 sc->msk_intrmask |= Y2_IS_PORT_A; 3583 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3584 } else { 3585 sc->msk_intrmask |= Y2_IS_PORT_B; 3586 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3587 } 3588 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3589 CSR_READ_4(sc, B0_HWE_IMSK); 3590 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3591 CSR_READ_4(sc, B0_IMSK); 3592 3593 sc_if->msk_link = 0; 3594 mii_mediachg(mii); 3595 3596 mskc_set_imtimer(sc); 3597 3598 ifp->if_flags |= IFF_RUNNING; 3599 ifp->if_flags &= ~IFF_OACTIVE; 3600 3601 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3602 } 3603 3604 static void 3605 msk_set_rambuffer(struct msk_if_softc *sc_if) 3606 { 3607 struct msk_softc *sc; 3608 int ltpp, utpp; 3609 3610 sc = sc_if->msk_softc; 3611 3612 /* Setup Rx Queue. */ 3613 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3614 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3615 sc->msk_rxqstart[sc_if->msk_port] / 8); 3616 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3617 sc->msk_rxqend[sc_if->msk_port] / 8); 3618 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3619 sc->msk_rxqstart[sc_if->msk_port] / 8); 3620 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3621 sc->msk_rxqstart[sc_if->msk_port] / 8); 3622 3623 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3624 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3625 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3626 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3627 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3628 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3629 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3630 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3631 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3632 3633 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3634 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3635 3636 /* Setup Tx Queue. */ 3637 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3638 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3639 sc->msk_txqstart[sc_if->msk_port] / 8); 3640 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3641 sc->msk_txqend[sc_if->msk_port] / 8); 3642 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3643 sc->msk_txqstart[sc_if->msk_port] / 8); 3644 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3645 sc->msk_txqstart[sc_if->msk_port] / 8); 3646 /* Enable Store & Forward for Tx side. */ 3647 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3648 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3649 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3650 } 3651 3652 static void 3653 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3654 uint32_t count) 3655 { 3656 3657 /* Reset the prefetch unit. */ 3658 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3659 PREF_UNIT_RST_SET); 3660 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3661 PREF_UNIT_RST_CLR); 3662 /* Set LE base address. */ 3663 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3664 MSK_ADDR_LO(addr)); 3665 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3666 MSK_ADDR_HI(addr)); 3667 /* Set the list last index. */ 3668 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3669 count); 3670 /* Turn on prefetch unit. */ 3671 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3672 PREF_UNIT_OP_ON); 3673 /* Dummy read to ensure write. */ 3674 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3675 } 3676 3677 static void 3678 msk_stop(struct msk_if_softc *sc_if) 3679 { 3680 struct msk_softc *sc = sc_if->msk_softc; 3681 struct ifnet *ifp = sc_if->msk_ifp; 3682 struct msk_txdesc *txd; 3683 struct msk_rxdesc *rxd; 3684 #ifdef MSK_JUMBO 3685 struct msk_rxdesc *jrxd; 3686 #endif 3687 uint32_t val; 3688 int i; 3689 3690 ASSERT_SERIALIZED(ifp->if_serializer); 3691 3692 callout_stop(&sc_if->msk_tick_ch); 3693 ifp->if_timer = 0; 3694 3695 /* Disable interrupts. */ 3696 if (sc_if->msk_port == MSK_PORT_A) { 3697 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3698 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3699 } else { 3700 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3701 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3702 } 3703 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3704 CSR_READ_4(sc, B0_HWE_IMSK); 3705 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3706 CSR_READ_4(sc, B0_IMSK); 3707 3708 /* Disable Tx/Rx MAC. */ 3709 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3710 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3711 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3712 /* Read again to ensure writing. */ 3713 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3714 3715 /* Stop Tx BMU. */ 3716 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3717 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3718 for (i = 0; i < MSK_TIMEOUT; i++) { 3719 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3720 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3721 BMU_STOP); 3722 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3723 } else 3724 break; 3725 DELAY(1); 3726 } 3727 if (i == MSK_TIMEOUT) 3728 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3729 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3730 RB_RST_SET | RB_DIS_OP_MD); 3731 3732 /* Disable all GMAC interrupt. */ 3733 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3734 /* Disable PHY interrupt. */ 3735 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3736 3737 /* Disable the RAM Interface Arbiter. */ 3738 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3739 3740 /* Reset the PCI FIFO of the async Tx queue */ 3741 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3742 BMU_RST_SET | BMU_FIFO_RST); 3743 3744 /* Reset the Tx prefetch units. */ 3745 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3746 PREF_UNIT_RST_SET); 3747 3748 /* Reset the RAM Buffer async Tx queue. */ 3749 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3750 3751 /* Reset Tx MAC FIFO. */ 3752 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3753 /* Set Pause Off. */ 3754 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3755 3756 /* 3757 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3758 * reach the end of packet and since we can't make sure that we have 3759 * incoming data, we must reset the BMU while it is not during a DMA 3760 * transfer. Since it is possible that the Rx path is still active, 3761 * the Rx RAM buffer will be stopped first, so any possible incoming 3762 * data will not trigger a DMA. After the RAM buffer is stopped, the 3763 * BMU is polled until any DMA in progress is ended and only then it 3764 * will be reset. 3765 */ 3766 3767 /* Disable the RAM Buffer receive queue. */ 3768 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3769 for (i = 0; i < MSK_TIMEOUT; i++) { 3770 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3771 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3772 break; 3773 DELAY(1); 3774 } 3775 if (i == MSK_TIMEOUT) 3776 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3777 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3778 BMU_RST_SET | BMU_FIFO_RST); 3779 /* Reset the Rx prefetch unit. */ 3780 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3781 PREF_UNIT_RST_SET); 3782 /* Reset the RAM Buffer receive queue. */ 3783 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3784 /* Reset Rx MAC FIFO. */ 3785 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3786 3787 /* Free Rx and Tx mbufs still in the queues. */ 3788 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3789 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3790 if (rxd->rx_m != NULL) { 3791 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 3792 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3793 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3794 rxd->rx_dmamap); 3795 m_freem(rxd->rx_m); 3796 rxd->rx_m = NULL; 3797 } 3798 } 3799 #ifdef MSK_JUMBO 3800 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3801 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3802 if (jrxd->rx_m != NULL) { 3803 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3804 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3805 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3806 jrxd->rx_dmamap); 3807 m_freem(jrxd->rx_m); 3808 jrxd->rx_m = NULL; 3809 } 3810 } 3811 #endif 3812 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3813 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3814 if (txd->tx_m != NULL) { 3815 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 3816 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3817 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3818 txd->tx_dmamap); 3819 m_freem(txd->tx_m); 3820 txd->tx_m = NULL; 3821 } 3822 } 3823 3824 /* 3825 * Mark the interface down. 3826 */ 3827 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3828 sc_if->msk_link = 0; 3829 } 3830 3831 static int 3832 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS) 3833 { 3834 return sysctl_int_range(oidp, arg1, arg2, req, 3835 MSK_PROC_MIN, MSK_PROC_MAX); 3836 } 3837 3838 static int 3839 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3840 { 3841 struct msk_softc *sc = arg1; 3842 struct lwkt_serialize *serializer = &sc->msk_serializer; 3843 int error = 0, v; 3844 3845 lwkt_serialize_enter(serializer); 3846 3847 v = sc->msk_intr_rate; 3848 error = sysctl_handle_int(oidp, &v, 0, req); 3849 if (error || req->newptr == NULL) 3850 goto back; 3851 if (v < 0) { 3852 error = EINVAL; 3853 goto back; 3854 } 3855 3856 if (sc->msk_intr_rate != v) { 3857 int flag = 0, i; 3858 3859 sc->msk_intr_rate = v; 3860 for (i = 0; i < 2; ++i) { 3861 if (sc->msk_if[i] != NULL) { 3862 flag |= sc->msk_if[i]-> 3863 arpcom.ac_if.if_flags & IFF_RUNNING; 3864 } 3865 } 3866 if (flag) 3867 mskc_set_imtimer(sc); 3868 } 3869 back: 3870 lwkt_serialize_exit(serializer); 3871 return error; 3872 } 3873 3874 static int 3875 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3876 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3877 { 3878 struct msk_if_softc *sc_if = device_get_softc(dev); 3879 struct msk_dmamap_arg ctx; 3880 bus_dma_segment_t seg; 3881 int error; 3882 3883 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag, 3884 MSK_RING_ALIGN, 0, 3885 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3886 NULL, NULL, 3887 size, 1, BUS_SPACE_MAXSIZE_32BIT, 3888 0, dtag); 3889 if (error) { 3890 device_printf(dev, "can't create DMA tag\n"); 3891 return error; 3892 } 3893 3894 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 3895 dmap); 3896 if (error) { 3897 device_printf(dev, "can't allocate DMA mem\n"); 3898 bus_dma_tag_destroy(*dtag); 3899 *dtag = NULL; 3900 return error; 3901 } 3902 3903 bzero(&ctx, sizeof(ctx)); 3904 ctx.nseg = 1; 3905 ctx.segs = &seg; 3906 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 3907 msk_dmamap_cb, &ctx, BUS_DMA_WAITOK); 3908 if (error) { 3909 device_printf(dev, "can't load DMA mem\n"); 3910 bus_dmamem_free(*dtag, *addr, *dmap); 3911 bus_dma_tag_destroy(*dtag); 3912 *dtag = NULL; 3913 return error; 3914 } 3915 *paddr = seg.ds_addr; 3916 return 0; 3917 } 3918 3919 static void 3920 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3921 { 3922 if (dtag != NULL) { 3923 bus_dmamap_unload(dtag, dmap); 3924 bus_dmamem_free(dtag, addr, dmap); 3925 bus_dma_tag_destroy(dtag); 3926 } 3927 } 3928 3929 static void 3930 mskc_set_imtimer(struct msk_softc *sc) 3931 { 3932 if (sc->msk_intr_rate > 0) { 3933 /* 3934 * XXX myk(4) seems to use 125MHz for EC/FE/XL 3935 * and 78.125MHz for rest of chip types 3936 */ 3937 CSR_WRITE_4(sc, B2_IRQM_INI, 3938 MSK_USECS(sc, 1000000 / sc->msk_intr_rate)); 3939 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 3940 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START); 3941 } else { 3942 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP); 3943 } 3944 } 3945