1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 /* $DragonFly: src/sys/dev/netif/msk/if_msk.c,v 1.10 2008/11/23 04:28:27 sephe Exp $ */ 97 98 /* 99 * Device driver for the Marvell Yukon II Ethernet controller. 100 * Due to lack of documentation, this driver is based on the code from 101 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 102 */ 103 104 #include <sys/param.h> 105 #include <sys/endian.h> 106 #include <sys/kernel.h> 107 #include <sys/bus.h> 108 #include <sys/in_cksum.h> 109 #include <sys/interrupt.h> 110 #include <sys/malloc.h> 111 #include <sys/proc.h> 112 #include <sys/rman.h> 113 #include <sys/serialize.h> 114 #include <sys/socket.h> 115 #include <sys/sockio.h> 116 #include <sys/sysctl.h> 117 118 #include <net/ethernet.h> 119 #include <net/if.h> 120 #include <net/bpf.h> 121 #include <net/if_arp.h> 122 #include <net/if_dl.h> 123 #include <net/if_media.h> 124 #include <net/ifq_var.h> 125 #include <net/vlan/if_vlan_var.h> 126 127 #include <netinet/ip.h> 128 #include <netinet/ip_var.h> 129 130 #include <dev/netif/mii_layer/miivar.h> 131 132 #include <bus/pci/pcireg.h> 133 #include <bus/pci/pcivar.h> 134 135 #include "if_mskreg.h" 136 137 /* "device miibus" required. See GENERIC if you get errors here. */ 138 #include "miibus_if.h" 139 140 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 141 142 /* 143 * Devices supported by this driver. 144 */ 145 static const struct msk_product { 146 uint16_t msk_vendorid; 147 uint16_t msk_deviceid; 148 const char *msk_name; 149 } msk_products[] = { 150 { VENDORID_SK, DEVICEID_SK_YUKON2, 151 "SK-9Sxx Gigabit Ethernet" }, 152 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 153 "SK-9Exx Gigabit Ethernet"}, 154 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 155 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 156 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 157 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 158 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 159 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 160 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 161 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 162 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 163 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 164 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 165 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 166 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 167 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 168 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 169 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 170 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 171 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 172 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 173 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 174 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 175 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 176 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 177 "Marvell Yukon 88E8039 Gigabit Ethernet" }, 178 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 179 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 180 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 181 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 182 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 183 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 184 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 185 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 186 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 187 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 188 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 189 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 190 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 191 "D-Link 550SX Gigabit Ethernet" }, 192 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 193 "D-Link 560T Gigabit Ethernet" }, 194 { 0, 0, NULL } 195 }; 196 197 static const char *model_name[] = { 198 "Yukon XL", 199 "Yukon EC Ultra", 200 "Yukon Unknown", 201 "Yukon EC", 202 "Yukon FE" 203 }; 204 205 static int mskc_probe(device_t); 206 static int mskc_attach(device_t); 207 static int mskc_detach(device_t); 208 static int mskc_shutdown(device_t); 209 static int mskc_suspend(device_t); 210 static int mskc_resume(device_t); 211 static void mskc_intr(void *); 212 213 static void mskc_reset(struct msk_softc *); 214 static void mskc_set_imtimer(struct msk_softc *); 215 static void mskc_intr_hwerr(struct msk_softc *); 216 static int mskc_handle_events(struct msk_softc *); 217 static void mskc_phy_power(struct msk_softc *, int); 218 static int mskc_setup_rambuffer(struct msk_softc *); 219 static int mskc_status_dma_alloc(struct msk_softc *); 220 static void mskc_status_dma_free(struct msk_softc *); 221 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS); 222 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 223 224 static int msk_probe(device_t); 225 static int msk_attach(device_t); 226 static int msk_detach(device_t); 227 static int msk_miibus_readreg(device_t, int, int); 228 static int msk_miibus_writereg(device_t, int, int, int); 229 static void msk_miibus_statchg(device_t); 230 231 static void msk_init(void *); 232 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 233 static void msk_start(struct ifnet *); 234 static void msk_watchdog(struct ifnet *); 235 static int msk_mediachange(struct ifnet *); 236 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 237 238 static void msk_tick(void *); 239 static void msk_intr_phy(struct msk_if_softc *); 240 static void msk_intr_gmac(struct msk_if_softc *); 241 static __inline void 242 msk_rxput(struct msk_if_softc *); 243 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 244 static void msk_rxeof(struct msk_if_softc *, uint32_t, int, 245 struct mbuf_chain *); 246 static void msk_txeof(struct msk_if_softc *, int); 247 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 248 static void msk_set_rambuffer(struct msk_if_softc *); 249 static void msk_stop(struct msk_if_softc *); 250 251 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 252 static void msk_dmamap_mbuf_cb(void *, bus_dma_segment_t *, int, 253 bus_size_t, int); 254 static int msk_txrx_dma_alloc(struct msk_if_softc *); 255 static void msk_txrx_dma_free(struct msk_if_softc *); 256 static int msk_init_rx_ring(struct msk_if_softc *); 257 static void msk_init_tx_ring(struct msk_if_softc *); 258 static __inline void 259 msk_discard_rxbuf(struct msk_if_softc *, int); 260 static int msk_newbuf(struct msk_if_softc *, int); 261 static struct mbuf * 262 msk_defrag(struct mbuf *, int, int); 263 static int msk_encap(struct msk_if_softc *, struct mbuf **); 264 265 #ifdef MSK_JUMBO 266 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 267 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 268 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 269 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 270 static void *msk_jalloc(struct msk_if_softc *); 271 static void msk_jfree(void *, void *); 272 #endif 273 274 static int msk_phy_readreg(struct msk_if_softc *, int, int); 275 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 276 277 static void msk_setmulti(struct msk_if_softc *); 278 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 279 static void msk_setpromisc(struct msk_if_softc *); 280 281 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 282 void **, bus_addr_t *, bus_dmamap_t *); 283 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 284 285 static device_method_t mskc_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, mskc_probe), 288 DEVMETHOD(device_attach, mskc_attach), 289 DEVMETHOD(device_detach, mskc_detach), 290 DEVMETHOD(device_suspend, mskc_suspend), 291 DEVMETHOD(device_resume, mskc_resume), 292 DEVMETHOD(device_shutdown, mskc_shutdown), 293 294 /* bus interface */ 295 DEVMETHOD(bus_print_child, bus_generic_print_child), 296 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 297 298 { NULL, NULL } 299 }; 300 301 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 302 static devclass_t mskc_devclass; 303 304 static device_method_t msk_methods[] = { 305 /* Device interface */ 306 DEVMETHOD(device_probe, msk_probe), 307 DEVMETHOD(device_attach, msk_attach), 308 DEVMETHOD(device_detach, msk_detach), 309 DEVMETHOD(device_shutdown, bus_generic_shutdown), 310 311 /* bus interface */ 312 DEVMETHOD(bus_print_child, bus_generic_print_child), 313 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 314 315 /* MII interface */ 316 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 317 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 318 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 319 320 { NULL, NULL } 321 }; 322 323 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 324 static devclass_t msk_devclass; 325 326 DECLARE_DUMMY_MODULE(if_msk); 327 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, 0, 0); 328 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, 0, 0); 329 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 330 331 static int mskc_intr_rate = 0; 332 static int mskc_process_limit = MSK_PROC_DEFAULT; 333 334 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate); 335 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit); 336 337 static int 338 msk_miibus_readreg(device_t dev, int phy, int reg) 339 { 340 struct msk_if_softc *sc_if; 341 342 if (phy != PHY_ADDR_MARV) 343 return (0); 344 345 sc_if = device_get_softc(dev); 346 347 return (msk_phy_readreg(sc_if, phy, reg)); 348 } 349 350 static int 351 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 352 { 353 struct msk_softc *sc; 354 int i, val; 355 356 sc = sc_if->msk_softc; 357 358 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 359 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 360 361 for (i = 0; i < MSK_TIMEOUT; i++) { 362 DELAY(1); 363 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 364 if ((val & GM_SMI_CT_RD_VAL) != 0) { 365 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 366 break; 367 } 368 } 369 370 if (i == MSK_TIMEOUT) { 371 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 372 val = 0; 373 } 374 375 return (val); 376 } 377 378 static int 379 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 380 { 381 struct msk_if_softc *sc_if; 382 383 if (phy != PHY_ADDR_MARV) 384 return (0); 385 386 sc_if = device_get_softc(dev); 387 388 return (msk_phy_writereg(sc_if, phy, reg, val)); 389 } 390 391 static int 392 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 393 { 394 struct msk_softc *sc; 395 int i; 396 397 sc = sc_if->msk_softc; 398 399 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 400 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 401 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 402 for (i = 0; i < MSK_TIMEOUT; i++) { 403 DELAY(1); 404 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 405 GM_SMI_CT_BUSY) == 0) 406 break; 407 } 408 if (i == MSK_TIMEOUT) 409 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 410 411 return (0); 412 } 413 414 static void 415 msk_miibus_statchg(device_t dev) 416 { 417 struct msk_if_softc *sc_if; 418 struct msk_softc *sc; 419 struct mii_data *mii; 420 struct ifnet *ifp; 421 uint32_t gmac; 422 423 sc_if = device_get_softc(dev); 424 sc = sc_if->msk_softc; 425 426 mii = device_get_softc(sc_if->msk_miibus); 427 ifp = sc_if->msk_ifp; 428 429 if (mii->mii_media_status & IFM_ACTIVE) { 430 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 431 sc_if->msk_link = 1; 432 } else 433 sc_if->msk_link = 0; 434 435 if (sc_if->msk_link != 0) { 436 /* Enable Tx FIFO Underrun. */ 437 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 438 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 439 /* 440 * Because mii(4) notify msk(4) that it detected link status 441 * change, there is no need to enable automatic 442 * speed/flow-control/duplex updates. 443 */ 444 gmac = GM_GPCR_AU_ALL_DIS; 445 switch (IFM_SUBTYPE(mii->mii_media_active)) { 446 case IFM_1000_SX: 447 case IFM_1000_T: 448 gmac |= GM_GPCR_SPEED_1000; 449 break; 450 case IFM_100_TX: 451 gmac |= GM_GPCR_SPEED_100; 452 break; 453 case IFM_10_T: 454 break; 455 } 456 457 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 458 gmac |= GM_GPCR_DUP_FULL; 459 /* Disable Rx flow control. */ 460 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 461 gmac |= GM_GPCR_FC_RX_DIS; 462 /* Disable Tx flow control. */ 463 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 464 gmac |= GM_GPCR_FC_TX_DIS; 465 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 466 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 467 /* Read again to ensure writing. */ 468 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 469 470 gmac = GMC_PAUSE_ON; 471 if (((mii->mii_media_active & IFM_GMASK) & 472 (IFM_FLAG0 | IFM_FLAG1)) == 0) 473 gmac = GMC_PAUSE_OFF; 474 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 475 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 476 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 477 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 478 gmac = GMC_PAUSE_OFF; 479 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 480 481 /* Enable PHY interrupt for FIFO underrun/overflow. */ 482 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 483 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 484 } else { 485 /* 486 * Link state changed to down. 487 * Disable PHY interrupts. 488 */ 489 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 490 /* Disable Rx/Tx MAC. */ 491 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 492 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 493 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 494 /* Read again to ensure writing. */ 495 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 496 } 497 } 498 499 static void 500 msk_setmulti(struct msk_if_softc *sc_if) 501 { 502 struct msk_softc *sc; 503 struct ifnet *ifp; 504 struct ifmultiaddr *ifma; 505 uint32_t mchash[2]; 506 uint32_t crc; 507 uint16_t mode; 508 509 sc = sc_if->msk_softc; 510 ifp = sc_if->msk_ifp; 511 512 bzero(mchash, sizeof(mchash)); 513 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 514 mode |= GM_RXCR_UCF_ENA; 515 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 516 if ((ifp->if_flags & IFF_PROMISC) != 0) 517 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 518 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 519 mchash[0] = 0xffff; 520 mchash[1] = 0xffff; 521 } 522 } else { 523 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 524 if (ifma->ifma_addr->sa_family != AF_LINK) 525 continue; 526 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 527 ifma->ifma_addr), ETHER_ADDR_LEN); 528 /* Just want the 6 least significant bits. */ 529 crc &= 0x3f; 530 /* Set the corresponding bit in the hash table. */ 531 mchash[crc >> 5] |= 1 << (crc & 0x1f); 532 } 533 mode |= GM_RXCR_MCF_ENA; 534 } 535 536 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 537 mchash[0] & 0xffff); 538 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 539 (mchash[0] >> 16) & 0xffff); 540 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 541 mchash[1] & 0xffff); 542 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 543 (mchash[1] >> 16) & 0xffff); 544 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 545 } 546 547 static void 548 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 549 { 550 struct msk_softc *sc; 551 552 sc = sc_if->msk_softc; 553 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 554 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 555 RX_VLAN_STRIP_ON); 556 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 557 TX_VLAN_TAG_ON); 558 } else { 559 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 560 RX_VLAN_STRIP_OFF); 561 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 562 TX_VLAN_TAG_OFF); 563 } 564 } 565 566 static void 567 msk_setpromisc(struct msk_if_softc *sc_if) 568 { 569 struct msk_softc *sc; 570 struct ifnet *ifp; 571 uint16_t mode; 572 573 sc = sc_if->msk_softc; 574 ifp = sc_if->msk_ifp; 575 576 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 577 if (ifp->if_flags & IFF_PROMISC) 578 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 579 else 580 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 581 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 582 } 583 584 static int 585 msk_init_rx_ring(struct msk_if_softc *sc_if) 586 { 587 struct msk_ring_data *rd; 588 struct msk_rxdesc *rxd; 589 int i, prod; 590 591 sc_if->msk_cdata.msk_rx_cons = 0; 592 sc_if->msk_cdata.msk_rx_prod = 0; 593 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 594 595 rd = &sc_if->msk_rdata; 596 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 597 prod = sc_if->msk_cdata.msk_rx_prod; 598 for (i = 0; i < MSK_RX_RING_CNT; i++) { 599 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 600 rxd->rx_m = NULL; 601 rxd->rx_le = &rd->msk_rx_ring[prod]; 602 if (msk_newbuf(sc_if, prod) != 0) 603 return (ENOBUFS); 604 MSK_INC(prod, MSK_RX_RING_CNT); 605 } 606 607 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 608 sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREWRITE); 609 610 /* Update prefetch unit. */ 611 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 612 CSR_WRITE_2(sc_if->msk_softc, 613 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 614 sc_if->msk_cdata.msk_rx_prod); 615 616 return (0); 617 } 618 619 #ifdef MSK_JUMBO 620 static int 621 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 622 { 623 struct msk_ring_data *rd; 624 struct msk_rxdesc *rxd; 625 int i, prod; 626 627 MSK_IF_LOCK_ASSERT(sc_if); 628 629 sc_if->msk_cdata.msk_rx_cons = 0; 630 sc_if->msk_cdata.msk_rx_prod = 0; 631 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 632 633 rd = &sc_if->msk_rdata; 634 bzero(rd->msk_jumbo_rx_ring, 635 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 636 prod = sc_if->msk_cdata.msk_rx_prod; 637 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 638 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 639 rxd->rx_m = NULL; 640 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 641 if (msk_jumbo_newbuf(sc_if, prod) != 0) 642 return (ENOBUFS); 643 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 644 } 645 646 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 647 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 648 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 649 650 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 651 CSR_WRITE_2(sc_if->msk_softc, 652 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 653 sc_if->msk_cdata.msk_rx_prod); 654 655 return (0); 656 } 657 #endif 658 659 static void 660 msk_init_tx_ring(struct msk_if_softc *sc_if) 661 { 662 struct msk_ring_data *rd; 663 struct msk_txdesc *txd; 664 int i; 665 666 sc_if->msk_cdata.msk_tx_prod = 0; 667 sc_if->msk_cdata.msk_tx_cons = 0; 668 sc_if->msk_cdata.msk_tx_cnt = 0; 669 670 rd = &sc_if->msk_rdata; 671 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 672 for (i = 0; i < MSK_TX_RING_CNT; i++) { 673 txd = &sc_if->msk_cdata.msk_txdesc[i]; 674 txd->tx_m = NULL; 675 txd->tx_le = &rd->msk_tx_ring[i]; 676 } 677 678 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 679 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 680 } 681 682 static __inline void 683 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 684 { 685 struct msk_rx_desc *rx_le; 686 struct msk_rxdesc *rxd; 687 struct mbuf *m; 688 689 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 690 m = rxd->rx_m; 691 rx_le = rxd->rx_le; 692 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 693 } 694 695 #ifdef MSK_JUMBO 696 static __inline void 697 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 698 { 699 struct msk_rx_desc *rx_le; 700 struct msk_rxdesc *rxd; 701 struct mbuf *m; 702 703 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 704 m = rxd->rx_m; 705 rx_le = rxd->rx_le; 706 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 707 } 708 #endif 709 710 static int 711 msk_newbuf(struct msk_if_softc *sc_if, int idx) 712 { 713 struct msk_rx_desc *rx_le; 714 struct msk_rxdesc *rxd; 715 struct mbuf *m; 716 struct msk_dmamap_arg ctx; 717 bus_dma_segment_t seg; 718 bus_dmamap_t map; 719 720 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 721 if (m == NULL) 722 return (ENOBUFS); 723 724 m->m_len = m->m_pkthdr.len = MCLBYTES; 725 m_adj(m, ETHER_ALIGN); 726 727 bzero(&ctx, sizeof(ctx)); 728 ctx.nseg = 1; 729 ctx.segs = &seg; 730 if (bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_rx_tag, 731 sc_if->msk_cdata.msk_rx_sparemap, m, msk_dmamap_mbuf_cb, &ctx, 732 BUS_DMA_NOWAIT) != 0) { 733 m_freem(m); 734 return (ENOBUFS); 735 } 736 KASSERT(ctx.nseg == 1, 737 ("%s: %d segments returned!", __func__, ctx.nseg)); 738 739 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 740 if (rxd->rx_m != NULL) { 741 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 742 BUS_DMASYNC_POSTREAD); 743 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 744 } 745 map = rxd->rx_dmamap; 746 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 747 sc_if->msk_cdata.msk_rx_sparemap = map; 748 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 749 BUS_DMASYNC_PREREAD); 750 rxd->rx_m = m; 751 rx_le = rxd->rx_le; 752 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 753 rx_le->msk_control = 754 htole32(seg.ds_len | OP_PACKET | HW_OWNER); 755 756 return (0); 757 } 758 759 #ifdef MSK_JUMBO 760 static int 761 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 762 { 763 struct msk_rx_desc *rx_le; 764 struct msk_rxdesc *rxd; 765 struct mbuf *m; 766 bus_dma_segment_t segs[1]; 767 bus_dmamap_t map; 768 int nsegs; 769 void *buf; 770 771 MGETHDR(m, M_DONTWAIT, MT_DATA); 772 if (m == NULL) 773 return (ENOBUFS); 774 buf = msk_jalloc(sc_if); 775 if (buf == NULL) { 776 m_freem(m); 777 return (ENOBUFS); 778 } 779 /* Attach the buffer to the mbuf. */ 780 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 781 EXT_NET_DRV); 782 if ((m->m_flags & M_EXT) == 0) { 783 m_freem(m); 784 return (ENOBUFS); 785 } 786 m->m_pkthdr.len = m->m_len = MSK_JLEN; 787 m_adj(m, ETHER_ALIGN); 788 789 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 790 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 791 BUS_DMA_NOWAIT) != 0) { 792 m_freem(m); 793 return (ENOBUFS); 794 } 795 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 796 797 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 798 if (rxd->rx_m != NULL) { 799 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 800 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 801 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 802 rxd->rx_dmamap); 803 } 804 map = rxd->rx_dmamap; 805 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 806 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 807 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 808 BUS_DMASYNC_PREREAD); 809 rxd->rx_m = m; 810 rx_le = rxd->rx_le; 811 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 812 rx_le->msk_control = 813 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 814 815 return (0); 816 } 817 #endif 818 819 /* 820 * Set media options. 821 */ 822 static int 823 msk_mediachange(struct ifnet *ifp) 824 { 825 struct msk_if_softc *sc_if = ifp->if_softc; 826 struct mii_data *mii; 827 828 mii = device_get_softc(sc_if->msk_miibus); 829 mii_mediachg(mii); 830 831 return (0); 832 } 833 834 /* 835 * Report current media status. 836 */ 837 static void 838 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 839 { 840 struct msk_if_softc *sc_if = ifp->if_softc; 841 struct mii_data *mii; 842 843 mii = device_get_softc(sc_if->msk_miibus); 844 mii_pollstat(mii); 845 846 ifmr->ifm_active = mii->mii_media_active; 847 ifmr->ifm_status = mii->mii_media_status; 848 } 849 850 static int 851 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 852 { 853 struct msk_if_softc *sc_if; 854 struct ifreq *ifr; 855 struct mii_data *mii; 856 int error, mask; 857 858 sc_if = ifp->if_softc; 859 ifr = (struct ifreq *)data; 860 error = 0; 861 862 switch(command) { 863 case SIOCSIFMTU: 864 #ifdef MSK_JUMBO 865 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 866 error = EINVAL; 867 break; 868 } 869 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 870 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 871 error = EINVAL; 872 break; 873 } 874 ifp->if_mtu = ifr->ifr_mtu; 875 if ((ifp->if_flags & IFF_RUNNING) != 0) 876 msk_init(sc_if); 877 #else 878 error = EOPNOTSUPP; 879 #endif 880 break; 881 882 case SIOCSIFFLAGS: 883 if (ifp->if_flags & IFF_UP) { 884 if (ifp->if_flags & IFF_RUNNING) { 885 if (((ifp->if_flags ^ sc_if->msk_if_flags) 886 & IFF_PROMISC) != 0) { 887 msk_setpromisc(sc_if); 888 msk_setmulti(sc_if); 889 } 890 } else { 891 if (sc_if->msk_detach == 0) 892 msk_init(sc_if); 893 } 894 } else { 895 if (ifp->if_flags & IFF_RUNNING) 896 msk_stop(sc_if); 897 } 898 sc_if->msk_if_flags = ifp->if_flags; 899 break; 900 901 case SIOCADDMULTI: 902 case SIOCDELMULTI: 903 if (ifp->if_flags & IFF_RUNNING) 904 msk_setmulti(sc_if); 905 break; 906 907 case SIOCGIFMEDIA: 908 case SIOCSIFMEDIA: 909 mii = device_get_softc(sc_if->msk_miibus); 910 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 911 break; 912 913 case SIOCSIFCAP: 914 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 915 if ((mask & IFCAP_TXCSUM) != 0) { 916 ifp->if_capenable ^= IFCAP_TXCSUM; 917 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 918 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 919 ifp->if_hwassist |= MSK_CSUM_FEATURES; 920 else 921 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 922 } 923 #ifdef notyet 924 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 925 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 926 msk_setvlan(sc_if, ifp); 927 } 928 #endif 929 930 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 931 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 932 /* 933 * In Yukon EC Ultra, TSO & checksum offload is not 934 * supported for jumbo frame. 935 */ 936 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 937 ifp->if_capenable &= ~IFCAP_TXCSUM; 938 } 939 break; 940 941 default: 942 error = ether_ioctl(ifp, command, data); 943 break; 944 } 945 946 return (error); 947 } 948 949 static int 950 mskc_probe(device_t dev) 951 { 952 const struct msk_product *mp; 953 uint16_t vendor, devid; 954 955 vendor = pci_get_vendor(dev); 956 devid = pci_get_device(dev); 957 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 958 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 959 device_set_desc(dev, mp->msk_name); 960 return (0); 961 } 962 } 963 return (ENXIO); 964 } 965 966 static int 967 mskc_setup_rambuffer(struct msk_softc *sc) 968 { 969 int next; 970 int i; 971 uint8_t val; 972 973 /* Get adapter SRAM size. */ 974 val = CSR_READ_1(sc, B2_E_0); 975 sc->msk_ramsize = (val == 0) ? 128 : val * 4; 976 if (bootverbose) { 977 device_printf(sc->msk_dev, 978 "RAM buffer size : %dKB\n", sc->msk_ramsize); 979 } 980 /* 981 * Give receiver 2/3 of memory and round down to the multiple 982 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 983 * of 1024. 984 */ 985 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 986 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 987 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 988 sc->msk_rxqstart[i] = next; 989 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 990 next = sc->msk_rxqend[i] + 1; 991 sc->msk_txqstart[i] = next; 992 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 993 next = sc->msk_txqend[i] + 1; 994 if (bootverbose) { 995 device_printf(sc->msk_dev, 996 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 997 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 998 sc->msk_rxqend[i]); 999 device_printf(sc->msk_dev, 1000 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1001 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1002 sc->msk_txqend[i]); 1003 } 1004 } 1005 1006 return (0); 1007 } 1008 1009 static void 1010 mskc_phy_power(struct msk_softc *sc, int mode) 1011 { 1012 uint32_t val; 1013 int i; 1014 1015 switch (mode) { 1016 case MSK_PHY_POWERUP: 1017 /* Switch power to VCC (WA for VAUX problem). */ 1018 CSR_WRITE_1(sc, B0_POWER_CTRL, 1019 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1020 /* Disable Core Clock Division, set Clock Select to 0. */ 1021 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1022 1023 val = 0; 1024 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1025 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1026 /* Enable bits are inverted. */ 1027 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1028 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1029 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1030 } 1031 /* 1032 * Enable PCI & Core Clock, enable clock gating for both Links. 1033 */ 1034 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1035 1036 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1037 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1038 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1039 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1040 /* Deassert Low Power for 1st PHY. */ 1041 val |= PCI_Y2_PHY1_COMA; 1042 if (sc->msk_num_port > 1) 1043 val |= PCI_Y2_PHY2_COMA; 1044 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1045 uint32_t our; 1046 1047 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1048 1049 /* Enable all clocks. */ 1050 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1051 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1052 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1053 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1054 /* Set all bits to 0 except bits 15..12. */ 1055 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1056 /* Set to default value. */ 1057 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1058 } 1059 /* Release PHY from PowerDown/COMA mode. */ 1060 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1061 for (i = 0; i < sc->msk_num_port; i++) { 1062 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1063 GMLC_RST_SET); 1064 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1065 GMLC_RST_CLR); 1066 } 1067 break; 1068 case MSK_PHY_POWERDOWN: 1069 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1070 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1071 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1072 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1073 val &= ~PCI_Y2_PHY1_COMA; 1074 if (sc->msk_num_port > 1) 1075 val &= ~PCI_Y2_PHY2_COMA; 1076 } 1077 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1078 1079 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1080 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1081 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1082 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1083 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1084 /* Enable bits are inverted. */ 1085 val = 0; 1086 } 1087 /* 1088 * Disable PCI & Core Clock, disable clock gating for 1089 * both Links. 1090 */ 1091 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1092 CSR_WRITE_1(sc, B0_POWER_CTRL, 1093 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1094 break; 1095 default: 1096 break; 1097 } 1098 } 1099 1100 static void 1101 mskc_reset(struct msk_softc *sc) 1102 { 1103 bus_addr_t addr; 1104 uint16_t status; 1105 uint32_t val; 1106 int i; 1107 1108 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1109 1110 /* Disable ASF. */ 1111 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1112 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1113 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1114 } 1115 /* 1116 * Since we disabled ASF, S/W reset is required for Power Management. 1117 */ 1118 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1119 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1120 1121 /* Clear all error bits in the PCI status register. */ 1122 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1123 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1124 1125 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1126 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1127 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1128 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1129 1130 switch (sc->msk_bustype) { 1131 case MSK_PEX_BUS: 1132 /* Clear all PEX errors. */ 1133 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1134 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1135 if ((val & PEX_RX_OV) != 0) { 1136 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1137 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1138 } 1139 break; 1140 case MSK_PCI_BUS: 1141 case MSK_PCIX_BUS: 1142 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1143 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1144 if (val == 0) 1145 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1146 if (sc->msk_bustype == MSK_PCIX_BUS) { 1147 /* Set Cache Line Size opt. */ 1148 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1149 val |= PCI_CLS_OPT; 1150 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1151 } 1152 break; 1153 } 1154 /* Set PHY power state. */ 1155 mskc_phy_power(sc, MSK_PHY_POWERUP); 1156 1157 /* Reset GPHY/GMAC Control */ 1158 for (i = 0; i < sc->msk_num_port; i++) { 1159 /* GPHY Control reset. */ 1160 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1161 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1162 /* GMAC Control reset. */ 1163 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1164 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1165 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1166 } 1167 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1168 1169 /* LED On. */ 1170 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1171 1172 /* Clear TWSI IRQ. */ 1173 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1174 1175 /* Turn off hardware timer. */ 1176 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1177 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1178 1179 /* Turn off descriptor polling. */ 1180 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1181 1182 /* Turn off time stamps. */ 1183 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1184 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1185 1186 /* Configure timeout values. */ 1187 for (i = 0; i < sc->msk_num_port; i++) { 1188 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1189 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1190 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1191 MSK_RI_TO_53); 1192 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1193 MSK_RI_TO_53); 1194 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1195 MSK_RI_TO_53); 1196 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1197 MSK_RI_TO_53); 1198 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1199 MSK_RI_TO_53); 1200 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1201 MSK_RI_TO_53); 1202 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1203 MSK_RI_TO_53); 1204 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1205 MSK_RI_TO_53); 1206 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1207 MSK_RI_TO_53); 1208 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1209 MSK_RI_TO_53); 1210 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1211 MSK_RI_TO_53); 1212 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1213 MSK_RI_TO_53); 1214 } 1215 1216 /* Disable all interrupts. */ 1217 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1218 CSR_READ_4(sc, B0_HWE_IMSK); 1219 CSR_WRITE_4(sc, B0_IMSK, 0); 1220 CSR_READ_4(sc, B0_IMSK); 1221 1222 /* 1223 * On dual port PCI-X card, there is an problem where status 1224 * can be received out of order due to split transactions. 1225 */ 1226 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1227 uint16_t pcix_cmd; 1228 uint8_t pcix; 1229 1230 pcix = pci_get_pcixcap_ptr(sc->msk_dev); 1231 1232 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1233 /* Clear Max Outstanding Split Transactions. */ 1234 pcix_cmd &= ~0x70; 1235 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1236 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1237 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1238 } 1239 if (sc->msk_bustype == MSK_PEX_BUS) { 1240 uint16_t v, width; 1241 1242 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1243 /* Change Max. Read Request Size to 4096 bytes. */ 1244 v &= ~PEX_DC_MAX_RRS_MSK; 1245 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1246 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1247 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1248 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1249 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1250 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1251 if (v != width) { 1252 device_printf(sc->msk_dev, 1253 "negotiated width of link(x%d) != " 1254 "max. width of link(x%d)\n", width, v); 1255 } 1256 } 1257 1258 /* Clear status list. */ 1259 bzero(sc->msk_stat_ring, 1260 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1261 sc->msk_stat_cons = 0; 1262 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1263 BUS_DMASYNC_PREWRITE); 1264 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1265 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1266 /* Set the status list base address. */ 1267 addr = sc->msk_stat_ring_paddr; 1268 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1269 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1270 /* Set the status list last index. */ 1271 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1272 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1273 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1274 /* WA for dev. #4.3 */ 1275 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1276 /* WA for dev. #4.18 */ 1277 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1278 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1279 } else { 1280 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1281 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1282 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1283 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1284 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1285 else 1286 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1287 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1288 } 1289 /* 1290 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1291 */ 1292 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1293 1294 /* Enable status unit. */ 1295 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1296 1297 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1298 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1299 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1300 } 1301 1302 static int 1303 msk_probe(device_t dev) 1304 { 1305 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1306 char desc[100]; 1307 1308 /* 1309 * Not much to do here. We always know there will be 1310 * at least one GMAC present, and if there are two, 1311 * mskc_attach() will create a second device instance 1312 * for us. 1313 */ 1314 ksnprintf(desc, sizeof(desc), 1315 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1316 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1317 sc->msk_hw_rev); 1318 device_set_desc_copy(dev, desc); 1319 1320 return (0); 1321 } 1322 1323 static int 1324 msk_attach(device_t dev) 1325 { 1326 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1327 struct msk_if_softc *sc_if = device_get_softc(dev); 1328 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1329 int i, port, error; 1330 uint8_t eaddr[ETHER_ADDR_LEN]; 1331 1332 port = *(int *)device_get_ivars(dev); 1333 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1334 1335 kfree(device_get_ivars(dev), M_DEVBUF); 1336 device_set_ivars(dev, NULL); 1337 1338 callout_init(&sc_if->msk_tick_ch); 1339 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1340 1341 sc_if->msk_if_dev = dev; 1342 sc_if->msk_port = port; 1343 sc_if->msk_softc = sc; 1344 sc_if->msk_ifp = ifp; 1345 sc->msk_if[port] = sc_if; 1346 1347 /* Setup Tx/Rx queue register offsets. */ 1348 if (port == MSK_PORT_A) { 1349 sc_if->msk_txq = Q_XA1; 1350 sc_if->msk_txsq = Q_XS1; 1351 sc_if->msk_rxq = Q_R1; 1352 } else { 1353 sc_if->msk_txq = Q_XA2; 1354 sc_if->msk_txsq = Q_XS2; 1355 sc_if->msk_rxq = Q_R2; 1356 } 1357 1358 error = msk_txrx_dma_alloc(sc_if); 1359 if (error) 1360 goto fail; 1361 1362 ifp->if_softc = sc_if; 1363 ifp->if_mtu = ETHERMTU; 1364 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1365 ifp->if_init = msk_init; 1366 ifp->if_ioctl = msk_ioctl; 1367 ifp->if_start = msk_start; 1368 ifp->if_watchdog = msk_watchdog; 1369 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1370 ifq_set_ready(&ifp->if_snd); 1371 1372 #ifdef notyet 1373 /* 1374 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1375 * has serious bug in Rx checksum offload for all Yukon II family 1376 * hardware. It seems there is a workaround to make it work somtimes. 1377 * However, the workaround also have to check OP code sequences to 1378 * verify whether the OP code is correct. Sometimes it should compute 1379 * IP/TCP/UDP checksum in driver in order to verify correctness of 1380 * checksum computed by hardware. If you have to compute checksum 1381 * with software to verify the hardware's checksum why have hardware 1382 * compute the checksum? I think there is no reason to spend time to 1383 * make Rx checksum offload work on Yukon II hardware. 1384 */ 1385 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1386 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1387 ifp->if_hwassist = MSK_CSUM_FEATURES; 1388 ifp->if_capenable = ifp->if_capabilities; 1389 #endif 1390 1391 /* 1392 * Get station address for this interface. Note that 1393 * dual port cards actually come with three station 1394 * addresses: one for each port, plus an extra. The 1395 * extra one is used by the SysKonnect driver software 1396 * as a 'virtual' station address for when both ports 1397 * are operating in failover mode. Currently we don't 1398 * use this extra address. 1399 */ 1400 for (i = 0; i < ETHER_ADDR_LEN; i++) 1401 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1402 1403 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1404 1405 /* 1406 * Do miibus setup. 1407 */ 1408 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1409 msk_mediachange, msk_mediastatus); 1410 if (error) { 1411 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1412 goto fail; 1413 } 1414 1415 /* 1416 * Call MI attach routine. Can't hold locks when calling into ether_*. 1417 */ 1418 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1419 #if 0 1420 /* 1421 * Tell the upper layer(s) we support long frames. 1422 * Must appear after the call to ether_ifattach() because 1423 * ether_ifattach() sets ifi_hdrlen to the default value. 1424 */ 1425 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1426 #endif 1427 1428 return 0; 1429 fail: 1430 msk_detach(dev); 1431 sc->msk_if[port] = NULL; 1432 return (error); 1433 } 1434 1435 /* 1436 * Attach the interface. Allocate softc structures, do ifmedia 1437 * setup and ethernet/BPF attach. 1438 */ 1439 static int 1440 mskc_attach(device_t dev) 1441 { 1442 struct msk_softc *sc; 1443 int error, *port, cpuid; 1444 1445 sc = device_get_softc(dev); 1446 sc->msk_dev = dev; 1447 lwkt_serialize_init(&sc->msk_serializer); 1448 1449 /* 1450 * Initailize sysctl variables 1451 */ 1452 sc->msk_process_limit = mskc_process_limit; 1453 sc->msk_intr_rate = mskc_intr_rate; 1454 1455 #ifndef BURN_BRIDGES 1456 /* 1457 * Handle power management nonsense. 1458 */ 1459 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1460 uint32_t irq, bar0, bar1; 1461 1462 /* Save important PCI config data. */ 1463 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1464 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1465 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1466 1467 /* Reset the power state. */ 1468 device_printf(dev, "chip is in D%d power mode " 1469 "-- setting to D0\n", pci_get_powerstate(dev)); 1470 1471 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1472 1473 /* Restore PCI config data. */ 1474 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1475 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1476 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1477 } 1478 #endif /* BURN_BRIDGES */ 1479 1480 /* 1481 * Map control/status registers. 1482 */ 1483 pci_enable_busmaster(dev); 1484 1485 /* 1486 * Allocate I/O resource 1487 */ 1488 #ifdef MSK_USEIOSPACE 1489 sc->msk_res_type = SYS_RES_IOPORT; 1490 sc->msk_res_rid = PCIR_BAR(1); 1491 #else 1492 sc->msk_res_type = SYS_RES_MEMORY; 1493 sc->msk_res_rid = PCIR_BAR(0); 1494 #endif 1495 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1496 &sc->msk_res_rid, RF_ACTIVE); 1497 if (sc->msk_res == NULL) { 1498 if (sc->msk_res_type == SYS_RES_MEMORY) { 1499 sc->msk_res_type = SYS_RES_IOPORT; 1500 sc->msk_res_rid = PCIR_BAR(1); 1501 } else { 1502 sc->msk_res_type = SYS_RES_MEMORY; 1503 sc->msk_res_rid = PCIR_BAR(0); 1504 } 1505 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1506 &sc->msk_res_rid, 1507 RF_ACTIVE); 1508 if (sc->msk_res == NULL) { 1509 device_printf(dev, "couldn't allocate %s resources\n", 1510 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1511 return (ENXIO); 1512 } 1513 } 1514 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1515 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1516 1517 /* 1518 * Allocate IRQ 1519 */ 1520 sc->msk_irq_rid = 0; 1521 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1522 &sc->msk_irq_rid, 1523 RF_SHAREABLE | RF_ACTIVE); 1524 if (sc->msk_irq == NULL) { 1525 device_printf(dev, "couldn't allocate IRQ resources\n"); 1526 error = ENXIO; 1527 goto fail; 1528 } 1529 1530 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1531 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1532 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1533 /* Bail out if chip is not recognized. */ 1534 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1535 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1536 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1537 sc->msk_hw_id, sc->msk_hw_rev); 1538 error = ENXIO; 1539 goto fail; 1540 } 1541 1542 /* 1543 * Create sysctl tree 1544 */ 1545 sysctl_ctx_init(&sc->msk_sysctl_ctx); 1546 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx, 1547 SYSCTL_STATIC_CHILDREN(_hw), 1548 OID_AUTO, 1549 device_get_nameunit(dev), 1550 CTLFLAG_RD, 0, ""); 1551 if (sc->msk_sysctl_tree == NULL) { 1552 device_printf(dev, "can't add sysctl node\n"); 1553 error = ENXIO; 1554 goto fail; 1555 } 1556 1557 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1558 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1559 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1560 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit, 1561 "I", "max number of Rx events to process"); 1562 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1563 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1564 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1565 sc, 0, mskc_sysctl_intr_rate, 1566 "I", "max number of interrupt per second"); 1567 1568 /* Soft reset. */ 1569 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1570 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1571 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1572 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1573 sc->msk_coppertype = 0; 1574 else 1575 sc->msk_coppertype = 1; 1576 /* Check number of MACs. */ 1577 sc->msk_num_port = 1; 1578 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1579 CFG_DUAL_MAC_MSK) { 1580 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1581 sc->msk_num_port++; 1582 } 1583 1584 /* Check bus type. */ 1585 if (pci_is_pcie(sc->msk_dev) == 0) 1586 sc->msk_bustype = MSK_PEX_BUS; 1587 else if (pci_is_pcix(sc->msk_dev) == 0) 1588 sc->msk_bustype = MSK_PCIX_BUS; 1589 else 1590 sc->msk_bustype = MSK_PCI_BUS; 1591 1592 switch (sc->msk_hw_id) { 1593 case CHIP_ID_YUKON_EC: 1594 case CHIP_ID_YUKON_EC_U: 1595 sc->msk_clock = 125; /* 125 Mhz */ 1596 break; 1597 case CHIP_ID_YUKON_FE: 1598 sc->msk_clock = 100; /* 100 Mhz */ 1599 break; 1600 case CHIP_ID_YUKON_XL: 1601 sc->msk_clock = 156; /* 156 Mhz */ 1602 break; 1603 default: 1604 sc->msk_clock = 156; /* 156 Mhz */ 1605 break; 1606 } 1607 1608 error = mskc_status_dma_alloc(sc); 1609 if (error) 1610 goto fail; 1611 1612 /* Set base interrupt mask. */ 1613 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1614 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1615 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1616 1617 /* Reset the adapter. */ 1618 mskc_reset(sc); 1619 1620 error = mskc_setup_rambuffer(sc); 1621 if (error) 1622 goto fail; 1623 1624 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1625 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1626 device_printf(dev, "failed to add child for PORT_A\n"); 1627 error = ENXIO; 1628 goto fail; 1629 } 1630 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1631 *port = MSK_PORT_A; 1632 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1633 1634 if (sc->msk_num_port > 1) { 1635 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1636 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1637 device_printf(dev, "failed to add child for PORT_B\n"); 1638 error = ENXIO; 1639 goto fail; 1640 } 1641 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1642 *port = MSK_PORT_B; 1643 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1644 } 1645 1646 bus_generic_attach(dev); 1647 1648 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1649 mskc_intr, sc, &sc->msk_intrhand, 1650 &sc->msk_serializer); 1651 if (error) { 1652 device_printf(dev, "couldn't set up interrupt handler\n"); 1653 goto fail; 1654 } 1655 1656 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq)); 1657 KKASSERT(cpuid >= 0 && cpuid < ncpus); 1658 1659 if (sc->msk_if[0] != NULL) 1660 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid; 1661 if (sc->msk_if[1] != NULL) 1662 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid; 1663 return 0; 1664 fail: 1665 mskc_detach(dev); 1666 return (error); 1667 } 1668 1669 /* 1670 * Shutdown hardware and free up resources. This can be called any 1671 * time after the mutex has been initialized. It is called in both 1672 * the error case in attach and the normal detach case so it needs 1673 * to be careful about only freeing resources that have actually been 1674 * allocated. 1675 */ 1676 static int 1677 msk_detach(device_t dev) 1678 { 1679 struct msk_if_softc *sc_if = device_get_softc(dev); 1680 1681 if (device_is_attached(dev)) { 1682 struct msk_softc *sc = sc_if->msk_softc; 1683 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1684 1685 lwkt_serialize_enter(ifp->if_serializer); 1686 1687 if (sc->msk_intrhand != NULL) { 1688 if (sc->msk_if[MSK_PORT_A] != NULL) 1689 msk_stop(sc->msk_if[MSK_PORT_A]); 1690 if (sc->msk_if[MSK_PORT_B] != NULL) 1691 msk_stop(sc->msk_if[MSK_PORT_B]); 1692 1693 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1694 sc->msk_intrhand); 1695 sc->msk_intrhand = NULL; 1696 } 1697 1698 lwkt_serialize_exit(ifp->if_serializer); 1699 1700 ether_ifdetach(ifp); 1701 } 1702 1703 if (sc_if->msk_miibus != NULL) 1704 device_delete_child(dev, sc_if->msk_miibus); 1705 1706 msk_txrx_dma_free(sc_if); 1707 return (0); 1708 } 1709 1710 static int 1711 mskc_detach(device_t dev) 1712 { 1713 struct msk_softc *sc = device_get_softc(dev); 1714 int *port, i; 1715 1716 #ifdef INVARIANTS 1717 if (device_is_attached(dev)) { 1718 KASSERT(sc->msk_intrhand == NULL, 1719 ("intr is not torn down yet\n")); 1720 } 1721 #endif 1722 1723 for (i = 0; i < sc->msk_num_port; ++i) { 1724 if (sc->msk_devs[i] != NULL) { 1725 port = device_get_ivars(sc->msk_devs[i]); 1726 if (port != NULL) { 1727 kfree(port, M_DEVBUF); 1728 device_set_ivars(sc->msk_devs[i], NULL); 1729 } 1730 device_delete_child(dev, sc->msk_devs[i]); 1731 } 1732 } 1733 1734 /* Disable all interrupts. */ 1735 CSR_WRITE_4(sc, B0_IMSK, 0); 1736 CSR_READ_4(sc, B0_IMSK); 1737 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1738 CSR_READ_4(sc, B0_HWE_IMSK); 1739 1740 /* LED Off. */ 1741 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1742 1743 /* Put hardware reset. */ 1744 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1745 1746 mskc_status_dma_free(sc); 1747 1748 if (sc->msk_irq != NULL) { 1749 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1750 sc->msk_irq); 1751 } 1752 if (sc->msk_res != NULL) { 1753 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1754 sc->msk_res); 1755 } 1756 1757 if (sc->msk_sysctl_tree != NULL) 1758 sysctl_ctx_free(&sc->msk_sysctl_ctx); 1759 1760 return (0); 1761 } 1762 1763 static void 1764 msk_dmamap_mbuf_cb(void *arg, bus_dma_segment_t *segs, int nseg, 1765 bus_size_t mapsz __unused, int error) 1766 { 1767 struct msk_dmamap_arg *ctx = arg; 1768 int i; 1769 1770 if (error) 1771 return; 1772 1773 if (ctx->nseg < nseg) { 1774 ctx->nseg = 0; 1775 return; 1776 } 1777 1778 ctx->nseg = nseg; 1779 for (i = 0; i < ctx->nseg; ++i) 1780 ctx->segs[i] = segs[i]; 1781 } 1782 1783 static void 1784 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1785 { 1786 struct msk_dmamap_arg *ctx = arg; 1787 int i; 1788 1789 if (error) 1790 return; 1791 1792 KKASSERT(nseg <= ctx->nseg); 1793 1794 ctx->nseg = nseg; 1795 for (i = 0; i < ctx->nseg; ++i) 1796 ctx->segs[i] = segs[i]; 1797 } 1798 1799 /* Create status DMA region. */ 1800 static int 1801 mskc_status_dma_alloc(struct msk_softc *sc) 1802 { 1803 struct msk_dmamap_arg ctx; 1804 bus_dma_segment_t seg; 1805 int error; 1806 1807 error = bus_dma_tag_create( 1808 NULL, /* XXX parent */ 1809 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1810 BUS_SPACE_MAXADDR, /* lowaddr */ 1811 BUS_SPACE_MAXADDR, /* highaddr */ 1812 NULL, NULL, /* filter, filterarg */ 1813 MSK_STAT_RING_SZ, /* maxsize */ 1814 1, /* nsegments */ 1815 MSK_STAT_RING_SZ, /* maxsegsize */ 1816 0, /* flags */ 1817 &sc->msk_stat_tag); 1818 if (error) { 1819 device_printf(sc->msk_dev, 1820 "failed to create status DMA tag\n"); 1821 return (error); 1822 } 1823 1824 /* Allocate DMA'able memory and load the DMA map for status ring. */ 1825 error = bus_dmamem_alloc(sc->msk_stat_tag, 1826 (void **)&sc->msk_stat_ring, 1827 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1828 &sc->msk_stat_map); 1829 if (error) { 1830 device_printf(sc->msk_dev, 1831 "failed to allocate DMA'able memory for status ring\n"); 1832 bus_dma_tag_destroy(sc->msk_stat_tag); 1833 sc->msk_stat_tag = NULL; 1834 return (error); 1835 } 1836 1837 bzero(&ctx, sizeof(ctx)); 1838 ctx.nseg = 1; 1839 ctx.segs = &seg; 1840 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map, 1841 sc->msk_stat_ring, MSK_STAT_RING_SZ, 1842 msk_dmamap_cb, &ctx, 0); 1843 if (error) { 1844 device_printf(sc->msk_dev, 1845 "failed to load DMA'able memory for status ring\n"); 1846 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1847 sc->msk_stat_map); 1848 bus_dma_tag_destroy(sc->msk_stat_tag); 1849 sc->msk_stat_tag = NULL; 1850 return (error); 1851 } 1852 sc->msk_stat_ring_paddr = seg.ds_addr; 1853 1854 return (0); 1855 } 1856 1857 static void 1858 mskc_status_dma_free(struct msk_softc *sc) 1859 { 1860 /* Destroy status block. */ 1861 if (sc->msk_stat_tag) { 1862 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1863 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1864 sc->msk_stat_map); 1865 bus_dma_tag_destroy(sc->msk_stat_tag); 1866 sc->msk_stat_tag = NULL; 1867 } 1868 } 1869 1870 static int 1871 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1872 { 1873 int error, i, j; 1874 #ifdef MSK_JUMBO 1875 struct msk_rxdesc *jrxd; 1876 struct msk_jpool_entry *entry; 1877 uint8_t *ptr; 1878 #endif 1879 1880 /* Create parent DMA tag. */ 1881 /* 1882 * XXX 1883 * It seems that Yukon II supports full 64bits DMA operations. But 1884 * it needs two descriptors(list elements) for 64bits DMA operations. 1885 * Since we don't know what DMA address mappings(32bits or 64bits) 1886 * would be used in advance for each mbufs, we limits its DMA space 1887 * to be in range of 32bits address space. Otherwise, we should check 1888 * what DMA address is used and chain another descriptor for the 1889 * 64bits DMA operation. This also means descriptor ring size is 1890 * variable. Limiting DMA address to be in 32bit address space greatly 1891 * simplyfies descriptor handling and possibly would increase 1892 * performance a bit due to efficient handling of descriptors. 1893 * Apart from harassing checksum offloading mechanisms, it seems 1894 * it's really bad idea to use a seperate descriptor for 64bit 1895 * DMA operation to save small descriptor memory. Anyway, I've 1896 * never seen these exotic scheme on ethernet interface hardware. 1897 */ 1898 error = bus_dma_tag_create( 1899 NULL, /* parent */ 1900 1, 0, /* alignment, boundary */ 1901 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1902 BUS_SPACE_MAXADDR, /* highaddr */ 1903 NULL, NULL, /* filter, filterarg */ 1904 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1905 0, /* nsegments */ 1906 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1907 0, /* flags */ 1908 &sc_if->msk_cdata.msk_parent_tag); 1909 if (error) { 1910 device_printf(sc_if->msk_if_dev, 1911 "failed to create parent DMA tag\n"); 1912 return error; 1913 } 1914 1915 /* Create DMA stuffs for Tx ring. */ 1916 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1917 &sc_if->msk_cdata.msk_tx_ring_tag, 1918 (void **)&sc_if->msk_rdata.msk_tx_ring, 1919 &sc_if->msk_rdata.msk_tx_ring_paddr, 1920 &sc_if->msk_cdata.msk_tx_ring_map); 1921 if (error) { 1922 device_printf(sc_if->msk_if_dev, 1923 "failed to create TX ring DMA stuffs\n"); 1924 return error; 1925 } 1926 1927 /* Create DMA stuffs for Rx ring. */ 1928 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1929 &sc_if->msk_cdata.msk_rx_ring_tag, 1930 (void **)&sc_if->msk_rdata.msk_rx_ring, 1931 &sc_if->msk_rdata.msk_rx_ring_paddr, 1932 &sc_if->msk_cdata.msk_rx_ring_map); 1933 if (error) { 1934 device_printf(sc_if->msk_if_dev, 1935 "failed to create RX ring DMA stuffs\n"); 1936 return error; 1937 } 1938 1939 /* Create tag for Tx buffers. */ 1940 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1941 1, 0, /* alignment, boundary */ 1942 BUS_SPACE_MAXADDR, /* lowaddr */ 1943 BUS_SPACE_MAXADDR, /* highaddr */ 1944 NULL, NULL, /* filter, filterarg */ 1945 MSK_TSO_MAXSIZE, /* maxsize */ 1946 MSK_MAXTXSEGS, /* nsegments */ 1947 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 1948 0, /* flags */ 1949 &sc_if->msk_cdata.msk_tx_tag); 1950 if (error) { 1951 device_printf(sc_if->msk_if_dev, 1952 "failed to create Tx DMA tag\n"); 1953 return error; 1954 } 1955 1956 /* Create DMA maps for Tx buffers. */ 1957 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1958 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1959 1960 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 1961 &txd->tx_dmamap); 1962 if (error) { 1963 device_printf(sc_if->msk_if_dev, 1964 "failed to create %dth Tx dmamap\n", i); 1965 1966 for (j = 0; j < i; ++j) { 1967 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1968 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1969 txd->tx_dmamap); 1970 } 1971 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1972 sc_if->msk_cdata.msk_tx_tag = NULL; 1973 1974 return error; 1975 } 1976 } 1977 1978 /* Create tag for Rx buffers. */ 1979 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1980 1, 0, /* alignment, boundary */ 1981 BUS_SPACE_MAXADDR, /* lowaddr */ 1982 BUS_SPACE_MAXADDR, /* highaddr */ 1983 NULL, NULL, /* filter, filterarg */ 1984 MCLBYTES, /* maxsize */ 1985 1, /* nsegments */ 1986 MCLBYTES, /* maxsegsize */ 1987 0, /* flags */ 1988 &sc_if->msk_cdata.msk_rx_tag); 1989 if (error) { 1990 device_printf(sc_if->msk_if_dev, 1991 "failed to create Rx DMA tag\n"); 1992 return error; 1993 } 1994 1995 /* Create DMA maps for Rx buffers. */ 1996 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 1997 &sc_if->msk_cdata.msk_rx_sparemap); 1998 if (error) { 1999 device_printf(sc_if->msk_if_dev, 2000 "failed to create spare Rx dmamap\n"); 2001 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2002 sc_if->msk_cdata.msk_rx_tag = NULL; 2003 return error; 2004 } 2005 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2006 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2007 2008 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2009 &rxd->rx_dmamap); 2010 if (error) { 2011 device_printf(sc_if->msk_if_dev, 2012 "failed to create %dth Rx dmamap\n", i); 2013 2014 for (j = 0; j < i; ++j) { 2015 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 2016 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2017 rxd->rx_dmamap); 2018 } 2019 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2020 sc_if->msk_cdata.msk_rx_tag = NULL; 2021 2022 return error; 2023 } 2024 } 2025 2026 #ifdef MSK_JUMBO 2027 SLIST_INIT(&sc_if->msk_jfree_listhead); 2028 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2029 2030 /* Create tag for jumbo Rx ring. */ 2031 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2032 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2033 BUS_SPACE_MAXADDR, /* lowaddr */ 2034 BUS_SPACE_MAXADDR, /* highaddr */ 2035 NULL, NULL, /* filter, filterarg */ 2036 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2037 1, /* nsegments */ 2038 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2039 0, /* flags */ 2040 NULL, NULL, /* lockfunc, lockarg */ 2041 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2042 if (error != 0) { 2043 device_printf(sc_if->msk_if_dev, 2044 "failed to create jumbo Rx ring DMA tag\n"); 2045 goto fail; 2046 } 2047 2048 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2049 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2050 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2051 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2052 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2053 if (error != 0) { 2054 device_printf(sc_if->msk_if_dev, 2055 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2056 goto fail; 2057 } 2058 2059 ctx.msk_busaddr = 0; 2060 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2061 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2062 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2063 msk_dmamap_cb, &ctx, 0); 2064 if (error != 0) { 2065 device_printf(sc_if->msk_if_dev, 2066 "failed to load DMA'able memory for jumbo Rx ring\n"); 2067 goto fail; 2068 } 2069 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2070 2071 /* Create tag for jumbo buffer blocks. */ 2072 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2073 PAGE_SIZE, 0, /* alignment, boundary */ 2074 BUS_SPACE_MAXADDR, /* lowaddr */ 2075 BUS_SPACE_MAXADDR, /* highaddr */ 2076 NULL, NULL, /* filter, filterarg */ 2077 MSK_JMEM, /* maxsize */ 2078 1, /* nsegments */ 2079 MSK_JMEM, /* maxsegsize */ 2080 0, /* flags */ 2081 NULL, NULL, /* lockfunc, lockarg */ 2082 &sc_if->msk_cdata.msk_jumbo_tag); 2083 if (error != 0) { 2084 device_printf(sc_if->msk_if_dev, 2085 "failed to create jumbo Rx buffer block DMA tag\n"); 2086 goto fail; 2087 } 2088 2089 /* Create tag for jumbo Rx buffers. */ 2090 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2091 PAGE_SIZE, 0, /* alignment, boundary */ 2092 BUS_SPACE_MAXADDR, /* lowaddr */ 2093 BUS_SPACE_MAXADDR, /* highaddr */ 2094 NULL, NULL, /* filter, filterarg */ 2095 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2096 MSK_MAXRXSEGS, /* nsegments */ 2097 MSK_JLEN, /* maxsegsize */ 2098 0, /* flags */ 2099 NULL, NULL, /* lockfunc, lockarg */ 2100 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2101 if (error != 0) { 2102 device_printf(sc_if->msk_if_dev, 2103 "failed to create jumbo Rx DMA tag\n"); 2104 goto fail; 2105 } 2106 2107 /* Create DMA maps for jumbo Rx buffers. */ 2108 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2109 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2110 device_printf(sc_if->msk_if_dev, 2111 "failed to create spare jumbo Rx dmamap\n"); 2112 goto fail; 2113 } 2114 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2115 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2116 jrxd->rx_m = NULL; 2117 jrxd->rx_dmamap = NULL; 2118 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2119 &jrxd->rx_dmamap); 2120 if (error != 0) { 2121 device_printf(sc_if->msk_if_dev, 2122 "failed to create jumbo Rx dmamap\n"); 2123 goto fail; 2124 } 2125 } 2126 2127 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2128 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2129 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2130 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2131 &sc_if->msk_cdata.msk_jumbo_map); 2132 if (error != 0) { 2133 device_printf(sc_if->msk_if_dev, 2134 "failed to allocate DMA'able memory for jumbo buf\n"); 2135 goto fail; 2136 } 2137 2138 ctx.msk_busaddr = 0; 2139 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2140 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2141 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2142 if (error != 0) { 2143 device_printf(sc_if->msk_if_dev, 2144 "failed to load DMA'able memory for jumbobuf\n"); 2145 goto fail; 2146 } 2147 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2148 2149 /* 2150 * Now divide it up into 9K pieces and save the addresses 2151 * in an array. 2152 */ 2153 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2154 for (i = 0; i < MSK_JSLOTS; i++) { 2155 sc_if->msk_cdata.msk_jslots[i] = ptr; 2156 ptr += MSK_JLEN; 2157 entry = malloc(sizeof(struct msk_jpool_entry), 2158 M_DEVBUF, M_WAITOK); 2159 if (entry == NULL) { 2160 device_printf(sc_if->msk_if_dev, 2161 "no memory for jumbo buffers!\n"); 2162 error = ENOMEM; 2163 goto fail; 2164 } 2165 entry->slot = i; 2166 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2167 jpool_entries); 2168 } 2169 #endif 2170 return 0; 2171 } 2172 2173 static void 2174 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2175 { 2176 struct msk_txdesc *txd; 2177 struct msk_rxdesc *rxd; 2178 #ifdef MSK_JUMBO 2179 struct msk_rxdesc *jrxd; 2180 struct msk_jpool_entry *entry; 2181 #endif 2182 int i; 2183 2184 #ifdef MSK_JUMBO 2185 MSK_JLIST_LOCK(sc_if); 2186 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2187 device_printf(sc_if->msk_if_dev, 2188 "asked to free buffer that is in use!\n"); 2189 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2190 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2191 jpool_entries); 2192 } 2193 2194 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2195 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2196 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2197 free(entry, M_DEVBUF); 2198 } 2199 MSK_JLIST_UNLOCK(sc_if); 2200 2201 /* Destroy jumbo buffer block. */ 2202 if (sc_if->msk_cdata.msk_jumbo_map) 2203 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2204 sc_if->msk_cdata.msk_jumbo_map); 2205 2206 if (sc_if->msk_rdata.msk_jumbo_buf) { 2207 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2208 sc_if->msk_rdata.msk_jumbo_buf, 2209 sc_if->msk_cdata.msk_jumbo_map); 2210 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2211 sc_if->msk_cdata.msk_jumbo_map = NULL; 2212 } 2213 2214 /* Jumbo Rx ring. */ 2215 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2216 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2217 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2218 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2219 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2220 sc_if->msk_rdata.msk_jumbo_rx_ring) 2221 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2222 sc_if->msk_rdata.msk_jumbo_rx_ring, 2223 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2224 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2225 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2226 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2227 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2228 } 2229 2230 /* Jumbo Rx buffers. */ 2231 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2232 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2233 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2234 if (jrxd->rx_dmamap) { 2235 bus_dmamap_destroy( 2236 sc_if->msk_cdata.msk_jumbo_rx_tag, 2237 jrxd->rx_dmamap); 2238 jrxd->rx_dmamap = NULL; 2239 } 2240 } 2241 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2242 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2243 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2244 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2245 } 2246 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2247 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2248 } 2249 #endif 2250 2251 /* Tx ring. */ 2252 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2253 sc_if->msk_rdata.msk_tx_ring, 2254 sc_if->msk_cdata.msk_tx_ring_map); 2255 2256 /* Rx ring. */ 2257 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2258 sc_if->msk_rdata.msk_rx_ring, 2259 sc_if->msk_cdata.msk_rx_ring_map); 2260 2261 /* Tx buffers. */ 2262 if (sc_if->msk_cdata.msk_tx_tag) { 2263 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2264 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2265 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2266 txd->tx_dmamap); 2267 } 2268 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2269 sc_if->msk_cdata.msk_tx_tag = NULL; 2270 } 2271 2272 /* Rx buffers. */ 2273 if (sc_if->msk_cdata.msk_rx_tag) { 2274 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2275 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2276 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2277 rxd->rx_dmamap); 2278 } 2279 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2280 sc_if->msk_cdata.msk_rx_sparemap); 2281 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2282 sc_if->msk_cdata.msk_rx_tag = NULL; 2283 } 2284 2285 if (sc_if->msk_cdata.msk_parent_tag) { 2286 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2287 sc_if->msk_cdata.msk_parent_tag = NULL; 2288 } 2289 } 2290 2291 #ifdef MSK_JUMBO 2292 /* 2293 * Allocate a jumbo buffer. 2294 */ 2295 static void * 2296 msk_jalloc(struct msk_if_softc *sc_if) 2297 { 2298 struct msk_jpool_entry *entry; 2299 2300 MSK_JLIST_LOCK(sc_if); 2301 2302 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2303 2304 if (entry == NULL) { 2305 MSK_JLIST_UNLOCK(sc_if); 2306 return (NULL); 2307 } 2308 2309 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2310 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2311 2312 MSK_JLIST_UNLOCK(sc_if); 2313 2314 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2315 } 2316 2317 /* 2318 * Release a jumbo buffer. 2319 */ 2320 static void 2321 msk_jfree(void *buf, void *args) 2322 { 2323 struct msk_if_softc *sc_if; 2324 struct msk_jpool_entry *entry; 2325 int i; 2326 2327 /* Extract the softc struct pointer. */ 2328 sc_if = (struct msk_if_softc *)args; 2329 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2330 2331 MSK_JLIST_LOCK(sc_if); 2332 /* Calculate the slot this buffer belongs to. */ 2333 i = ((vm_offset_t)buf 2334 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2335 KASSERT(i >= 0 && i < MSK_JSLOTS, 2336 ("%s: asked to free buffer that we don't manage!", __func__)); 2337 2338 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2339 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2340 entry->slot = i; 2341 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2342 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2343 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2344 wakeup(sc_if); 2345 2346 MSK_JLIST_UNLOCK(sc_if); 2347 } 2348 #endif 2349 2350 /* 2351 * It's copy of ath_defrag(ath(4)). 2352 * 2353 * Defragment an mbuf chain, returning at most maxfrags separate 2354 * mbufs+clusters. If this is not possible NULL is returned and 2355 * the original mbuf chain is left in it's present (potentially 2356 * modified) state. We use two techniques: collapsing consecutive 2357 * mbufs and replacing consecutive mbufs by a cluster. 2358 */ 2359 static struct mbuf * 2360 msk_defrag(struct mbuf *m0, int how, int maxfrags) 2361 { 2362 struct mbuf *m, *n, *n2, **prev; 2363 u_int curfrags; 2364 2365 /* 2366 * Calculate the current number of frags. 2367 */ 2368 curfrags = 0; 2369 for (m = m0; m != NULL; m = m->m_next) 2370 curfrags++; 2371 /* 2372 * First, try to collapse mbufs. Note that we always collapse 2373 * towards the front so we don't need to deal with moving the 2374 * pkthdr. This may be suboptimal if the first mbuf has much 2375 * less data than the following. 2376 */ 2377 m = m0; 2378 again: 2379 for (;;) { 2380 n = m->m_next; 2381 if (n == NULL) 2382 break; 2383 if (n->m_len < M_TRAILINGSPACE(m)) { 2384 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2385 n->m_len); 2386 m->m_len += n->m_len; 2387 m->m_next = n->m_next; 2388 m_free(n); 2389 if (--curfrags <= maxfrags) 2390 return (m0); 2391 } else 2392 m = n; 2393 } 2394 KASSERT(maxfrags > 1, 2395 ("maxfrags %u, but normal collapse failed", maxfrags)); 2396 /* 2397 * Collapse consecutive mbufs to a cluster. 2398 */ 2399 prev = &m0->m_next; /* NB: not the first mbuf */ 2400 while ((n = *prev) != NULL) { 2401 if ((n2 = n->m_next) != NULL && 2402 n->m_len + n2->m_len < MCLBYTES) { 2403 m = m_getcl(how, MT_DATA, 0); 2404 if (m == NULL) 2405 goto bad; 2406 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2407 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2408 n2->m_len); 2409 m->m_len = n->m_len + n2->m_len; 2410 m->m_next = n2->m_next; 2411 *prev = m; 2412 m_free(n); 2413 m_free(n2); 2414 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2415 return m0; 2416 /* 2417 * Still not there, try the normal collapse 2418 * again before we allocate another cluster. 2419 */ 2420 goto again; 2421 } 2422 prev = &n->m_next; 2423 } 2424 /* 2425 * No place where we can collapse to a cluster; punt. 2426 * This can occur if, for example, you request 2 frags 2427 * but the packet requires that both be clusters (we 2428 * never reallocate the first mbuf to avoid moving the 2429 * packet header). 2430 */ 2431 bad: 2432 return (NULL); 2433 } 2434 2435 static int 2436 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2437 { 2438 struct msk_txdesc *txd, *txd_last; 2439 struct msk_tx_desc *tx_le; 2440 struct mbuf *m; 2441 bus_dmamap_t map; 2442 struct msk_dmamap_arg ctx; 2443 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2444 uint32_t control, prod, si; 2445 uint16_t offset, tcp_offset; 2446 int error, i; 2447 2448 tcp_offset = offset = 0; 2449 m = *m_head; 2450 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2451 /* 2452 * Since mbuf has no protocol specific structure information 2453 * in it we have to inspect protocol information here to 2454 * setup TSO and checksum offload. I don't know why Marvell 2455 * made a such decision in chip design because other GigE 2456 * hardwares normally takes care of all these chores in 2457 * hardware. However, TSO performance of Yukon II is very 2458 * good such that it's worth to implement it. 2459 */ 2460 struct ether_header *eh; 2461 struct ip *ip; 2462 2463 /* TODO check for M_WRITABLE(m) */ 2464 2465 offset = sizeof(struct ether_header); 2466 m = m_pullup(m, offset); 2467 if (m == NULL) { 2468 *m_head = NULL; 2469 return (ENOBUFS); 2470 } 2471 eh = mtod(m, struct ether_header *); 2472 /* Check if hardware VLAN insertion is off. */ 2473 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2474 offset = sizeof(struct ether_vlan_header); 2475 m = m_pullup(m, offset); 2476 if (m == NULL) { 2477 *m_head = NULL; 2478 return (ENOBUFS); 2479 } 2480 } 2481 m = m_pullup(m, offset + sizeof(struct ip)); 2482 if (m == NULL) { 2483 *m_head = NULL; 2484 return (ENOBUFS); 2485 } 2486 ip = (struct ip *)(mtod(m, char *) + offset); 2487 offset += (ip->ip_hl << 2); 2488 tcp_offset = offset; 2489 /* 2490 * It seems that Yukon II has Tx checksum offload bug for 2491 * small TCP packets that's less than 60 bytes in size 2492 * (e.g. TCP window probe packet, pure ACK packet). 2493 * Common work around like padding with zeros to make the 2494 * frame minimum ethernet frame size didn't work at all. 2495 * Instead of disabling checksum offload completely we 2496 * resort to S/W checksum routine when we encounter short 2497 * TCP frames. 2498 * Short UDP packets appear to be handled correctly by 2499 * Yukon II. 2500 */ 2501 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2502 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2503 uint16_t csum; 2504 2505 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2506 (ip->ip_hl << 2), offset); 2507 *(uint16_t *)(m->m_data + offset + 2508 m->m_pkthdr.csum_data) = csum; 2509 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2510 } 2511 *m_head = m; 2512 } 2513 2514 prod = sc_if->msk_cdata.msk_tx_prod; 2515 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2516 txd_last = txd; 2517 map = txd->tx_dmamap; 2518 bzero(&ctx, sizeof(ctx)); 2519 ctx.nseg = MSK_MAXTXSEGS; 2520 ctx.segs = txsegs; 2521 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, map, 2522 *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2523 if (error == 0 && ctx.nseg == 0) { 2524 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2525 error = EFBIG; 2526 } 2527 if (error == EFBIG) { 2528 m = msk_defrag(*m_head, MB_DONTWAIT, MSK_MAXTXSEGS); 2529 if (m == NULL) { 2530 m_freem(*m_head); 2531 *m_head = NULL; 2532 return (ENOBUFS); 2533 } 2534 *m_head = m; 2535 2536 bzero(&ctx, sizeof(ctx)); 2537 ctx.nseg = MSK_MAXTXSEGS; 2538 ctx.segs = txsegs; 2539 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, 2540 map, *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2541 if (error == 0 && ctx.nseg == 0) { 2542 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2543 error = EFBIG; 2544 } 2545 if (error != 0) { 2546 m_freem(*m_head); 2547 *m_head = NULL; 2548 return (error); 2549 } 2550 } else if (error != 0) { 2551 return (error); 2552 } 2553 2554 /* Check number of available descriptors. */ 2555 if (sc_if->msk_cdata.msk_tx_cnt + ctx.nseg >= 2556 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2557 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2558 return (ENOBUFS); 2559 } 2560 2561 control = 0; 2562 tx_le = NULL; 2563 2564 #ifdef notyet 2565 /* Check if we have a VLAN tag to insert. */ 2566 if ((m->m_flags & M_VLANTAG) != 0) { 2567 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2568 tx_le->msk_addr = htole32(0); 2569 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2570 htons(m->m_pkthdr.ether_vtag)); 2571 sc_if->msk_cdata.msk_tx_cnt++; 2572 MSK_INC(prod, MSK_TX_RING_CNT); 2573 control |= INS_VLAN; 2574 } 2575 #endif 2576 /* Check if we have to handle checksum offload. */ 2577 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2578 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2579 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2580 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2581 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2582 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2583 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2584 control |= UDPTCP; 2585 sc_if->msk_cdata.msk_tx_cnt++; 2586 MSK_INC(prod, MSK_TX_RING_CNT); 2587 } 2588 2589 si = prod; 2590 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2591 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2592 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2593 OP_PACKET); 2594 sc_if->msk_cdata.msk_tx_cnt++; 2595 MSK_INC(prod, MSK_TX_RING_CNT); 2596 2597 for (i = 1; i < ctx.nseg; i++) { 2598 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2599 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2600 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2601 OP_BUFFER | HW_OWNER); 2602 sc_if->msk_cdata.msk_tx_cnt++; 2603 MSK_INC(prod, MSK_TX_RING_CNT); 2604 } 2605 /* Update producer index. */ 2606 sc_if->msk_cdata.msk_tx_prod = prod; 2607 2608 /* Set EOP on the last desciptor. */ 2609 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2610 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2611 tx_le->msk_control |= htole32(EOP); 2612 2613 /* Turn the first descriptor ownership to hardware. */ 2614 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2615 tx_le->msk_control |= htole32(HW_OWNER); 2616 2617 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2618 map = txd_last->tx_dmamap; 2619 txd_last->tx_dmamap = txd->tx_dmamap; 2620 txd->tx_dmamap = map; 2621 txd->tx_m = m; 2622 2623 /* Sync descriptors. */ 2624 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2625 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2626 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 2627 2628 return (0); 2629 } 2630 2631 static void 2632 msk_start(struct ifnet *ifp) 2633 { 2634 struct msk_if_softc *sc_if; 2635 struct mbuf *m_head; 2636 int enq; 2637 2638 sc_if = ifp->if_softc; 2639 2640 ASSERT_SERIALIZED(ifp->if_serializer); 2641 2642 if (!sc_if->msk_link) { 2643 ifq_purge(&ifp->if_snd); 2644 return; 2645 } 2646 2647 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2648 return; 2649 2650 for (enq = 0; !ifq_is_empty(&ifp->if_snd) && 2651 sc_if->msk_cdata.msk_tx_cnt < 2652 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2653 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2654 if (m_head == NULL) 2655 break; 2656 2657 /* 2658 * Pack the data into the transmit ring. If we 2659 * don't have room, set the OACTIVE flag and wait 2660 * for the NIC to drain the ring. 2661 */ 2662 if (msk_encap(sc_if, &m_head) != 0) { 2663 if (m_head == NULL) 2664 break; 2665 m_freem(m_head); 2666 ifp->if_flags |= IFF_OACTIVE; 2667 break; 2668 } 2669 2670 enq++; 2671 /* 2672 * If there's a BPF listener, bounce a copy of this frame 2673 * to him. 2674 */ 2675 BPF_MTAP(ifp, m_head); 2676 } 2677 2678 if (enq > 0) { 2679 /* Transmit */ 2680 CSR_WRITE_2(sc_if->msk_softc, 2681 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2682 sc_if->msk_cdata.msk_tx_prod); 2683 2684 /* Set a timeout in case the chip goes out to lunch. */ 2685 ifp->if_timer = MSK_TX_TIMEOUT; 2686 } 2687 } 2688 2689 static void 2690 msk_watchdog(struct ifnet *ifp) 2691 { 2692 struct msk_if_softc *sc_if = ifp->if_softc; 2693 uint32_t ridx; 2694 int idx; 2695 2696 ASSERT_SERIALIZED(ifp->if_serializer); 2697 2698 if (sc_if->msk_link == 0) { 2699 if (bootverbose) 2700 if_printf(sc_if->msk_ifp, "watchdog timeout " 2701 "(missed link)\n"); 2702 ifp->if_oerrors++; 2703 msk_init(sc_if); 2704 return; 2705 } 2706 2707 /* 2708 * Reclaim first as there is a possibility of losing Tx completion 2709 * interrupts. 2710 */ 2711 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2712 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2713 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2714 msk_txeof(sc_if, idx); 2715 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2716 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2717 "-- recovering\n"); 2718 if (!ifq_is_empty(&ifp->if_snd)) 2719 if_devstart(ifp); 2720 return; 2721 } 2722 } 2723 2724 if_printf(ifp, "watchdog timeout\n"); 2725 ifp->if_oerrors++; 2726 msk_init(sc_if); 2727 if (!ifq_is_empty(&ifp->if_snd)) 2728 if_devstart(ifp); 2729 } 2730 2731 static int 2732 mskc_shutdown(device_t dev) 2733 { 2734 struct msk_softc *sc = device_get_softc(dev); 2735 int i; 2736 2737 lwkt_serialize_enter(&sc->msk_serializer); 2738 2739 for (i = 0; i < sc->msk_num_port; i++) { 2740 if (sc->msk_if[i] != NULL) 2741 msk_stop(sc->msk_if[i]); 2742 } 2743 2744 /* Disable all interrupts. */ 2745 CSR_WRITE_4(sc, B0_IMSK, 0); 2746 CSR_READ_4(sc, B0_IMSK); 2747 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2748 CSR_READ_4(sc, B0_HWE_IMSK); 2749 2750 /* Put hardware reset. */ 2751 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2752 2753 lwkt_serialize_exit(&sc->msk_serializer); 2754 return (0); 2755 } 2756 2757 static int 2758 mskc_suspend(device_t dev) 2759 { 2760 struct msk_softc *sc = device_get_softc(dev); 2761 int i; 2762 2763 lwkt_serialize_enter(&sc->msk_serializer); 2764 2765 for (i = 0; i < sc->msk_num_port; i++) { 2766 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2767 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2768 msk_stop(sc->msk_if[i]); 2769 } 2770 2771 /* Disable all interrupts. */ 2772 CSR_WRITE_4(sc, B0_IMSK, 0); 2773 CSR_READ_4(sc, B0_IMSK); 2774 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2775 CSR_READ_4(sc, B0_HWE_IMSK); 2776 2777 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2778 2779 /* Put hardware reset. */ 2780 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2781 sc->msk_suspended = 1; 2782 2783 lwkt_serialize_exit(&sc->msk_serializer); 2784 2785 return (0); 2786 } 2787 2788 static int 2789 mskc_resume(device_t dev) 2790 { 2791 struct msk_softc *sc = device_get_softc(dev); 2792 int i; 2793 2794 lwkt_serialize_enter(&sc->msk_serializer); 2795 2796 mskc_reset(sc); 2797 for (i = 0; i < sc->msk_num_port; i++) { 2798 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2799 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2800 msk_init(sc->msk_if[i]); 2801 } 2802 sc->msk_suspended = 0; 2803 2804 lwkt_serialize_exit(&sc->msk_serializer); 2805 2806 return (0); 2807 } 2808 2809 static void 2810 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len, 2811 struct mbuf_chain *chain) 2812 { 2813 struct mbuf *m; 2814 struct ifnet *ifp; 2815 struct msk_rxdesc *rxd; 2816 int cons, rxlen; 2817 2818 ifp = sc_if->msk_ifp; 2819 2820 cons = sc_if->msk_cdata.msk_rx_cons; 2821 do { 2822 rxlen = status >> 16; 2823 if ((status & GMR_FS_VLAN) != 0 && 2824 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2825 rxlen -= EVL_ENCAPLEN; 2826 if (len > sc_if->msk_framesize || 2827 ((status & GMR_FS_ANY_ERR) != 0) || 2828 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2829 /* Don't count flow-control packet as errors. */ 2830 if ((status & GMR_FS_GOOD_FC) == 0) 2831 ifp->if_ierrors++; 2832 msk_discard_rxbuf(sc_if, cons); 2833 break; 2834 } 2835 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2836 m = rxd->rx_m; 2837 if (msk_newbuf(sc_if, cons) != 0) { 2838 ifp->if_iqdrops++; 2839 /* Reuse old buffer. */ 2840 msk_discard_rxbuf(sc_if, cons); 2841 break; 2842 } 2843 m->m_pkthdr.rcvif = ifp; 2844 m->m_pkthdr.len = m->m_len = len; 2845 ifp->if_ipackets++; 2846 #ifdef notyet 2847 /* Check for VLAN tagged packets. */ 2848 if ((status & GMR_FS_VLAN) != 0 && 2849 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2850 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2851 m->m_flags |= M_VLANTAG; 2852 } 2853 #endif 2854 2855 ether_input_chain(ifp, m, chain); 2856 } while (0); 2857 2858 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2859 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2860 } 2861 2862 #ifdef MSK_JUMBO 2863 static void 2864 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2865 { 2866 struct mbuf *m; 2867 struct ifnet *ifp; 2868 struct msk_rxdesc *jrxd; 2869 int cons, rxlen; 2870 2871 ifp = sc_if->msk_ifp; 2872 2873 MSK_IF_LOCK_ASSERT(sc_if); 2874 2875 cons = sc_if->msk_cdata.msk_rx_cons; 2876 do { 2877 rxlen = status >> 16; 2878 if ((status & GMR_FS_VLAN) != 0 && 2879 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2880 rxlen -= ETHER_VLAN_ENCAP_LEN; 2881 if (len > sc_if->msk_framesize || 2882 ((status & GMR_FS_ANY_ERR) != 0) || 2883 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2884 /* Don't count flow-control packet as errors. */ 2885 if ((status & GMR_FS_GOOD_FC) == 0) 2886 ifp->if_ierrors++; 2887 msk_discard_jumbo_rxbuf(sc_if, cons); 2888 break; 2889 } 2890 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2891 m = jrxd->rx_m; 2892 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2893 ifp->if_iqdrops++; 2894 /* Reuse old buffer. */ 2895 msk_discard_jumbo_rxbuf(sc_if, cons); 2896 break; 2897 } 2898 m->m_pkthdr.rcvif = ifp; 2899 m->m_pkthdr.len = m->m_len = len; 2900 ifp->if_ipackets++; 2901 /* Check for VLAN tagged packets. */ 2902 if ((status & GMR_FS_VLAN) != 0 && 2903 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2904 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2905 m->m_flags |= M_VLANTAG; 2906 } 2907 MSK_IF_UNLOCK(sc_if); 2908 (*ifp->if_input)(ifp, m); 2909 MSK_IF_LOCK(sc_if); 2910 } while (0); 2911 2912 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2913 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2914 } 2915 #endif 2916 2917 static void 2918 msk_txeof(struct msk_if_softc *sc_if, int idx) 2919 { 2920 struct msk_txdesc *txd; 2921 struct msk_tx_desc *cur_tx; 2922 struct ifnet *ifp; 2923 uint32_t control; 2924 int cons, prog; 2925 2926 ifp = sc_if->msk_ifp; 2927 2928 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2929 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_POSTREAD); 2930 2931 /* 2932 * Go through our tx ring and free mbufs for those 2933 * frames that have been sent. 2934 */ 2935 cons = sc_if->msk_cdata.msk_tx_cons; 2936 prog = 0; 2937 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2938 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2939 break; 2940 prog++; 2941 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2942 control = le32toh(cur_tx->msk_control); 2943 sc_if->msk_cdata.msk_tx_cnt--; 2944 ifp->if_flags &= ~IFF_OACTIVE; 2945 if ((control & EOP) == 0) 2946 continue; 2947 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2948 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 2949 BUS_DMASYNC_POSTWRITE); 2950 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2951 2952 ifp->if_opackets++; 2953 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2954 __func__)); 2955 m_freem(txd->tx_m); 2956 txd->tx_m = NULL; 2957 } 2958 2959 if (prog > 0) { 2960 sc_if->msk_cdata.msk_tx_cons = cons; 2961 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2962 ifp->if_timer = 0; 2963 /* No need to sync LEs as we didn't update LEs. */ 2964 } 2965 } 2966 2967 static void 2968 msk_tick(void *xsc_if) 2969 { 2970 struct msk_if_softc *sc_if = xsc_if; 2971 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2972 struct mii_data *mii; 2973 2974 lwkt_serialize_enter(ifp->if_serializer); 2975 2976 mii = device_get_softc(sc_if->msk_miibus); 2977 2978 mii_tick(mii); 2979 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2980 2981 lwkt_serialize_exit(ifp->if_serializer); 2982 } 2983 2984 static void 2985 msk_intr_phy(struct msk_if_softc *sc_if) 2986 { 2987 uint16_t status; 2988 2989 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2990 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2991 /* Handle FIFO Underrun/Overflow? */ 2992 if (status & PHY_M_IS_FIFO_ERROR) { 2993 device_printf(sc_if->msk_if_dev, 2994 "PHY FIFO underrun/overflow.\n"); 2995 } 2996 } 2997 2998 static void 2999 msk_intr_gmac(struct msk_if_softc *sc_if) 3000 { 3001 struct msk_softc *sc; 3002 uint8_t status; 3003 3004 sc = sc_if->msk_softc; 3005 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3006 3007 /* GMAC Rx FIFO overrun. */ 3008 if ((status & GM_IS_RX_FF_OR) != 0) { 3009 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3010 GMF_CLI_RX_FO); 3011 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 3012 } 3013 /* GMAC Tx FIFO underrun. */ 3014 if ((status & GM_IS_TX_FF_UR) != 0) { 3015 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3016 GMF_CLI_TX_FU); 3017 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3018 /* 3019 * XXX 3020 * In case of Tx underrun, we may need to flush/reset 3021 * Tx MAC but that would also require resynchronization 3022 * with status LEs. Reintializing status LEs would 3023 * affect other port in dual MAC configuration so it 3024 * should be avoided as possible as we can. 3025 * Due to lack of documentation it's all vague guess but 3026 * it needs more investigation. 3027 */ 3028 } 3029 } 3030 3031 static void 3032 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3033 { 3034 struct msk_softc *sc; 3035 3036 sc = sc_if->msk_softc; 3037 if ((status & Y2_IS_PAR_RD1) != 0) { 3038 device_printf(sc_if->msk_if_dev, 3039 "RAM buffer read parity error\n"); 3040 /* Clear IRQ. */ 3041 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3042 RI_CLR_RD_PERR); 3043 } 3044 if ((status & Y2_IS_PAR_WR1) != 0) { 3045 device_printf(sc_if->msk_if_dev, 3046 "RAM buffer write parity error\n"); 3047 /* Clear IRQ. */ 3048 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3049 RI_CLR_WR_PERR); 3050 } 3051 if ((status & Y2_IS_PAR_MAC1) != 0) { 3052 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3053 /* Clear IRQ. */ 3054 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3055 GMF_CLI_TX_PE); 3056 } 3057 if ((status & Y2_IS_PAR_RX1) != 0) { 3058 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3059 /* Clear IRQ. */ 3060 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3061 } 3062 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3063 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3064 /* Clear IRQ. */ 3065 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3066 } 3067 } 3068 3069 static void 3070 mskc_intr_hwerr(struct msk_softc *sc) 3071 { 3072 uint32_t status; 3073 uint32_t tlphead[4]; 3074 3075 status = CSR_READ_4(sc, B0_HWE_ISRC); 3076 /* Time Stamp timer overflow. */ 3077 if ((status & Y2_IS_TIST_OV) != 0) 3078 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3079 if ((status & Y2_IS_PCI_NEXP) != 0) { 3080 /* 3081 * PCI Express Error occured which is not described in PEX 3082 * spec. 3083 * This error is also mapped either to Master Abort( 3084 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3085 * can only be cleared there. 3086 */ 3087 device_printf(sc->msk_dev, 3088 "PCI Express protocol violation error\n"); 3089 } 3090 3091 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3092 uint16_t v16; 3093 3094 if ((status & Y2_IS_MST_ERR) != 0) 3095 device_printf(sc->msk_dev, 3096 "unexpected IRQ Status error\n"); 3097 else 3098 device_printf(sc->msk_dev, 3099 "unexpected IRQ Master error\n"); 3100 /* Reset all bits in the PCI status register. */ 3101 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3102 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3103 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3104 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3105 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3106 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3107 } 3108 3109 /* Check for PCI Express Uncorrectable Error. */ 3110 if ((status & Y2_IS_PCI_EXP) != 0) { 3111 uint32_t v32; 3112 3113 /* 3114 * On PCI Express bus bridges are called root complexes (RC). 3115 * PCI Express errors are recognized by the root complex too, 3116 * which requests the system to handle the problem. After 3117 * error occurence it may be that no access to the adapter 3118 * may be performed any longer. 3119 */ 3120 3121 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3122 if ((v32 & PEX_UNSUP_REQ) != 0) { 3123 /* Ignore unsupported request error. */ 3124 if (bootverbose) { 3125 device_printf(sc->msk_dev, 3126 "Uncorrectable PCI Express error\n"); 3127 } 3128 } 3129 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3130 int i; 3131 3132 /* Get TLP header form Log Registers. */ 3133 for (i = 0; i < 4; i++) 3134 tlphead[i] = CSR_PCI_READ_4(sc, 3135 PEX_HEADER_LOG + i * 4); 3136 /* Check for vendor defined broadcast message. */ 3137 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3138 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3139 CSR_WRITE_4(sc, B0_HWE_IMSK, 3140 sc->msk_intrhwemask); 3141 CSR_READ_4(sc, B0_HWE_IMSK); 3142 } 3143 } 3144 /* Clear the interrupt. */ 3145 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3146 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3147 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3148 } 3149 3150 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3151 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3152 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3153 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3154 } 3155 3156 static __inline void 3157 msk_rxput(struct msk_if_softc *sc_if) 3158 { 3159 struct msk_softc *sc; 3160 3161 sc = sc_if->msk_softc; 3162 #ifdef MSK_JUMBO 3163 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3164 bus_dmamap_sync( 3165 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3166 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3167 BUS_DMASYNC_PREWRITE); 3168 } else 3169 #endif 3170 { 3171 bus_dmamap_sync( 3172 sc_if->msk_cdata.msk_rx_ring_tag, 3173 sc_if->msk_cdata.msk_rx_ring_map, 3174 BUS_DMASYNC_PREWRITE); 3175 } 3176 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3177 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3178 } 3179 3180 static int 3181 mskc_handle_events(struct msk_softc *sc) 3182 { 3183 struct msk_if_softc *sc_if; 3184 int rxput[2]; 3185 struct msk_stat_desc *sd; 3186 uint32_t control, status; 3187 int cons, idx, len, port, rxprog; 3188 struct mbuf_chain chain[MAXCPU]; 3189 3190 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3191 if (idx == sc->msk_stat_cons) 3192 return (0); 3193 3194 ether_input_chain_init(chain); 3195 3196 /* Sync status LEs. */ 3197 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3198 BUS_DMASYNC_POSTREAD); 3199 /* XXX Sync Rx LEs here. */ 3200 3201 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3202 3203 rxprog = 0; 3204 for (cons = sc->msk_stat_cons; cons != idx;) { 3205 sd = &sc->msk_stat_ring[cons]; 3206 control = le32toh(sd->msk_control); 3207 if ((control & HW_OWNER) == 0) 3208 break; 3209 /* 3210 * Marvell's FreeBSD driver updates status LE after clearing 3211 * HW_OWNER. However we don't have a way to sync single LE 3212 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3213 * an entire DMA map. So don't sync LE until we have a better 3214 * way to sync LEs. 3215 */ 3216 control &= ~HW_OWNER; 3217 sd->msk_control = htole32(control); 3218 status = le32toh(sd->msk_status); 3219 len = control & STLE_LEN_MASK; 3220 port = (control >> 16) & 0x01; 3221 sc_if = sc->msk_if[port]; 3222 if (sc_if == NULL) { 3223 device_printf(sc->msk_dev, "invalid port opcode " 3224 "0x%08x\n", control & STLE_OP_MASK); 3225 continue; 3226 } 3227 3228 switch (control & STLE_OP_MASK) { 3229 case OP_RXVLAN: 3230 sc_if->msk_vtag = ntohs(len); 3231 break; 3232 case OP_RXCHKSVLAN: 3233 sc_if->msk_vtag = ntohs(len); 3234 break; 3235 case OP_RXSTAT: 3236 #ifdef MSK_JUMBO 3237 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3238 msk_jumbo_rxeof(sc_if, status, len); 3239 else 3240 #endif 3241 msk_rxeof(sc_if, status, len, chain); 3242 rxprog++; 3243 /* 3244 * Because there is no way to sync single Rx LE 3245 * put the DMA sync operation off until the end of 3246 * event processing. 3247 */ 3248 rxput[port]++; 3249 /* Update prefetch unit if we've passed water mark. */ 3250 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3251 msk_rxput(sc_if); 3252 rxput[port] = 0; 3253 } 3254 break; 3255 case OP_TXINDEXLE: 3256 if (sc->msk_if[MSK_PORT_A] != NULL) { 3257 msk_txeof(sc->msk_if[MSK_PORT_A], 3258 status & STLE_TXA1_MSKL); 3259 } 3260 if (sc->msk_if[MSK_PORT_B] != NULL) { 3261 msk_txeof(sc->msk_if[MSK_PORT_B], 3262 ((status & STLE_TXA2_MSKL) >> 3263 STLE_TXA2_SHIFTL) | 3264 ((len & STLE_TXA2_MSKH) << 3265 STLE_TXA2_SHIFTH)); 3266 } 3267 break; 3268 default: 3269 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3270 control & STLE_OP_MASK); 3271 break; 3272 } 3273 MSK_INC(cons, MSK_STAT_RING_CNT); 3274 if (rxprog > sc->msk_process_limit) 3275 break; 3276 } 3277 3278 if (rxprog > 0) 3279 ether_input_dispatch(chain); 3280 3281 sc->msk_stat_cons = cons; 3282 /* XXX We should sync status LEs here. See above notes. */ 3283 3284 if (rxput[MSK_PORT_A] > 0) 3285 msk_rxput(sc->msk_if[MSK_PORT_A]); 3286 if (rxput[MSK_PORT_B] > 0) 3287 msk_rxput(sc->msk_if[MSK_PORT_B]); 3288 3289 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3290 } 3291 3292 /* Legacy interrupt handler for shared interrupt. */ 3293 static void 3294 mskc_intr(void *xsc) 3295 { 3296 struct msk_softc *sc; 3297 struct msk_if_softc *sc_if0, *sc_if1; 3298 struct ifnet *ifp0, *ifp1; 3299 uint32_t status; 3300 3301 sc = xsc; 3302 ASSERT_SERIALIZED(&sc->msk_serializer); 3303 3304 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3305 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3306 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3307 (status & sc->msk_intrmask) == 0) { 3308 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3309 return; 3310 } 3311 3312 sc_if0 = sc->msk_if[MSK_PORT_A]; 3313 sc_if1 = sc->msk_if[MSK_PORT_B]; 3314 ifp0 = ifp1 = NULL; 3315 if (sc_if0 != NULL) 3316 ifp0 = sc_if0->msk_ifp; 3317 if (sc_if1 != NULL) 3318 ifp1 = sc_if1->msk_ifp; 3319 3320 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3321 msk_intr_phy(sc_if0); 3322 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3323 msk_intr_phy(sc_if1); 3324 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3325 msk_intr_gmac(sc_if0); 3326 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3327 msk_intr_gmac(sc_if1); 3328 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3329 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3330 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3331 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3332 CSR_READ_4(sc, B0_IMSK); 3333 } 3334 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3335 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3336 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3337 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3338 CSR_READ_4(sc, B0_IMSK); 3339 } 3340 if ((status & Y2_IS_HW_ERR) != 0) 3341 mskc_intr_hwerr(sc); 3342 3343 while (mskc_handle_events(sc) != 0) 3344 ; 3345 if ((status & Y2_IS_STAT_BMU) != 0) 3346 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3347 3348 /* Reenable interrupts. */ 3349 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3350 3351 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3352 !ifq_is_empty(&ifp0->if_snd)) 3353 if_devstart(ifp0); 3354 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3355 !ifq_is_empty(&ifp1->if_snd)) 3356 if_devstart(ifp1); 3357 } 3358 3359 static void 3360 msk_init(void *xsc) 3361 { 3362 struct msk_if_softc *sc_if = xsc; 3363 struct msk_softc *sc = sc_if->msk_softc; 3364 struct ifnet *ifp = sc_if->msk_ifp; 3365 struct mii_data *mii; 3366 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3367 uint16_t gmac; 3368 int error, i; 3369 3370 ASSERT_SERIALIZED(ifp->if_serializer); 3371 3372 mii = device_get_softc(sc_if->msk_miibus); 3373 3374 error = 0; 3375 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3376 msk_stop(sc_if); 3377 3378 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3379 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3380 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3381 /* 3382 * In Yukon EC Ultra, TSO & checksum offload is not 3383 * supported for jumbo frame. 3384 */ 3385 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3386 ifp->if_capenable &= ~IFCAP_TXCSUM; 3387 } 3388 3389 /* 3390 * Initialize GMAC first. 3391 * Without this initialization, Rx MAC did not work as expected 3392 * and Rx MAC garbled status LEs and it resulted in out-of-order 3393 * or duplicated frame delivery which in turn showed very poor 3394 * Rx performance.(I had to write a packet analysis code that 3395 * could be embeded in driver to diagnose this issue.) 3396 * I've spent almost 2 months to fix this issue. If I have had 3397 * datasheet for Yukon II I wouldn't have encountered this. :-( 3398 */ 3399 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3400 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3401 3402 /* Dummy read the Interrupt Source Register. */ 3403 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3404 3405 /* Set MIB Clear Counter Mode. */ 3406 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3407 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3408 /* Read all MIB Counters with Clear Mode set. */ 3409 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3410 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3411 /* Clear MIB Clear Counter Mode. */ 3412 gmac &= ~GM_PAR_MIB_CLR; 3413 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3414 3415 /* Disable FCS. */ 3416 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3417 3418 /* Setup Transmit Control Register. */ 3419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3420 3421 /* Setup Transmit Flow Control Register. */ 3422 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3423 3424 /* Setup Transmit Parameter Register. */ 3425 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3426 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3427 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3428 3429 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3430 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3431 3432 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3433 gmac |= GM_SMOD_JUMBO_ENA; 3434 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3435 3436 /* Set station address. */ 3437 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3438 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3439 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3440 eaddr[i]); 3441 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3442 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3443 eaddr[i]); 3444 3445 /* Disable interrupts for counter overflows. */ 3446 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3447 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3448 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3449 3450 /* Configure Rx MAC FIFO. */ 3451 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3452 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3453 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3454 GMF_OPER_ON | GMF_RX_F_FL_ON); 3455 3456 /* Set promiscuous mode. */ 3457 msk_setpromisc(sc_if); 3458 3459 /* Set multicast filter. */ 3460 msk_setmulti(sc_if); 3461 3462 /* Flush Rx MAC FIFO on any flow control or error. */ 3463 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3464 GMR_FS_ANY_ERR); 3465 3466 /* Set Rx FIFO flush threshold to 64 bytes. */ 3467 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3468 RX_GMF_FL_THR_DEF); 3469 3470 /* Configure Tx MAC FIFO. */ 3471 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3472 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3473 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3474 3475 /* Configure hardware VLAN tag insertion/stripping. */ 3476 msk_setvlan(sc_if, ifp); 3477 3478 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3479 /* Set Rx Pause threshould. */ 3480 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3481 MSK_ECU_LLPP); 3482 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3483 MSK_ECU_ULPP); 3484 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { 3485 /* 3486 * Set Tx GMAC FIFO Almost Empty Threshold. 3487 */ 3488 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3489 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3490 /* Disable Store & Forward mode for Tx. */ 3491 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3492 TX_JUMBO_ENA | TX_STFW_DIS); 3493 } else { 3494 /* Enable Store & Forward mode for Tx. */ 3495 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3496 TX_JUMBO_DIS | TX_STFW_ENA); 3497 } 3498 } 3499 3500 /* 3501 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3502 * arbiter as we don't use Sync Tx queue. 3503 */ 3504 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3505 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3506 /* Enable the RAM Interface Arbiter. */ 3507 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3508 3509 /* Setup RAM buffer. */ 3510 msk_set_rambuffer(sc_if); 3511 3512 /* Disable Tx sync Queue. */ 3513 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3514 3515 /* Setup Tx Queue Bus Memory Interface. */ 3516 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3517 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3518 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3519 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3520 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3521 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3522 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3523 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3524 } 3525 3526 /* Setup Rx Queue Bus Memory Interface. */ 3527 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3528 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3529 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3530 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3531 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3532 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3533 /* MAC Rx RAM Read is controlled by hardware. */ 3534 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3535 } 3536 3537 msk_set_prefetch(sc, sc_if->msk_txq, 3538 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3539 msk_init_tx_ring(sc_if); 3540 3541 /* Disable Rx checksum offload and RSS hash. */ 3542 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3543 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3544 #ifdef MSK_JUMBO 3545 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3546 msk_set_prefetch(sc, sc_if->msk_rxq, 3547 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3548 MSK_JUMBO_RX_RING_CNT - 1); 3549 error = msk_init_jumbo_rx_ring(sc_if); 3550 } else 3551 #endif 3552 { 3553 msk_set_prefetch(sc, sc_if->msk_rxq, 3554 sc_if->msk_rdata.msk_rx_ring_paddr, 3555 MSK_RX_RING_CNT - 1); 3556 error = msk_init_rx_ring(sc_if); 3557 } 3558 if (error != 0) { 3559 device_printf(sc_if->msk_if_dev, 3560 "initialization failed: no memory for Rx buffers\n"); 3561 msk_stop(sc_if); 3562 return; 3563 } 3564 3565 /* Configure interrupt handling. */ 3566 if (sc_if->msk_port == MSK_PORT_A) { 3567 sc->msk_intrmask |= Y2_IS_PORT_A; 3568 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3569 } else { 3570 sc->msk_intrmask |= Y2_IS_PORT_B; 3571 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3572 } 3573 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3574 CSR_READ_4(sc, B0_HWE_IMSK); 3575 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3576 CSR_READ_4(sc, B0_IMSK); 3577 3578 sc_if->msk_link = 0; 3579 mii_mediachg(mii); 3580 3581 mskc_set_imtimer(sc); 3582 3583 ifp->if_flags |= IFF_RUNNING; 3584 ifp->if_flags &= ~IFF_OACTIVE; 3585 3586 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3587 } 3588 3589 static void 3590 msk_set_rambuffer(struct msk_if_softc *sc_if) 3591 { 3592 struct msk_softc *sc; 3593 int ltpp, utpp; 3594 3595 sc = sc_if->msk_softc; 3596 3597 /* Setup Rx Queue. */ 3598 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3599 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3600 sc->msk_rxqstart[sc_if->msk_port] / 8); 3601 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3602 sc->msk_rxqend[sc_if->msk_port] / 8); 3603 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3604 sc->msk_rxqstart[sc_if->msk_port] / 8); 3605 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3606 sc->msk_rxqstart[sc_if->msk_port] / 8); 3607 3608 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3609 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3610 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3611 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3612 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3613 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3614 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3615 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3616 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3617 3618 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3619 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3620 3621 /* Setup Tx Queue. */ 3622 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3623 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3624 sc->msk_txqstart[sc_if->msk_port] / 8); 3625 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3626 sc->msk_txqend[sc_if->msk_port] / 8); 3627 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3628 sc->msk_txqstart[sc_if->msk_port] / 8); 3629 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3630 sc->msk_txqstart[sc_if->msk_port] / 8); 3631 /* Enable Store & Forward for Tx side. */ 3632 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3633 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3634 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3635 } 3636 3637 static void 3638 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3639 uint32_t count) 3640 { 3641 3642 /* Reset the prefetch unit. */ 3643 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3644 PREF_UNIT_RST_SET); 3645 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3646 PREF_UNIT_RST_CLR); 3647 /* Set LE base address. */ 3648 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3649 MSK_ADDR_LO(addr)); 3650 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3651 MSK_ADDR_HI(addr)); 3652 /* Set the list last index. */ 3653 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3654 count); 3655 /* Turn on prefetch unit. */ 3656 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3657 PREF_UNIT_OP_ON); 3658 /* Dummy read to ensure write. */ 3659 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3660 } 3661 3662 static void 3663 msk_stop(struct msk_if_softc *sc_if) 3664 { 3665 struct msk_softc *sc = sc_if->msk_softc; 3666 struct ifnet *ifp = sc_if->msk_ifp; 3667 struct msk_txdesc *txd; 3668 struct msk_rxdesc *rxd; 3669 #ifdef MSK_JUMBO 3670 struct msk_rxdesc *jrxd; 3671 #endif 3672 uint32_t val; 3673 int i; 3674 3675 ASSERT_SERIALIZED(ifp->if_serializer); 3676 3677 callout_stop(&sc_if->msk_tick_ch); 3678 ifp->if_timer = 0; 3679 3680 /* Disable interrupts. */ 3681 if (sc_if->msk_port == MSK_PORT_A) { 3682 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3683 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3684 } else { 3685 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3686 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3687 } 3688 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3689 CSR_READ_4(sc, B0_HWE_IMSK); 3690 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3691 CSR_READ_4(sc, B0_IMSK); 3692 3693 /* Disable Tx/Rx MAC. */ 3694 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3695 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3696 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3697 /* Read again to ensure writing. */ 3698 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3699 3700 /* Stop Tx BMU. */ 3701 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3702 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3703 for (i = 0; i < MSK_TIMEOUT; i++) { 3704 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3705 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3706 BMU_STOP); 3707 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3708 } else 3709 break; 3710 DELAY(1); 3711 } 3712 if (i == MSK_TIMEOUT) 3713 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3714 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3715 RB_RST_SET | RB_DIS_OP_MD); 3716 3717 /* Disable all GMAC interrupt. */ 3718 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3719 /* Disable PHY interrupt. */ 3720 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3721 3722 /* Disable the RAM Interface Arbiter. */ 3723 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3724 3725 /* Reset the PCI FIFO of the async Tx queue */ 3726 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3727 BMU_RST_SET | BMU_FIFO_RST); 3728 3729 /* Reset the Tx prefetch units. */ 3730 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3731 PREF_UNIT_RST_SET); 3732 3733 /* Reset the RAM Buffer async Tx queue. */ 3734 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3735 3736 /* Reset Tx MAC FIFO. */ 3737 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3738 /* Set Pause Off. */ 3739 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3740 3741 /* 3742 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3743 * reach the end of packet and since we can't make sure that we have 3744 * incoming data, we must reset the BMU while it is not during a DMA 3745 * transfer. Since it is possible that the Rx path is still active, 3746 * the Rx RAM buffer will be stopped first, so any possible incoming 3747 * data will not trigger a DMA. After the RAM buffer is stopped, the 3748 * BMU is polled until any DMA in progress is ended and only then it 3749 * will be reset. 3750 */ 3751 3752 /* Disable the RAM Buffer receive queue. */ 3753 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3754 for (i = 0; i < MSK_TIMEOUT; i++) { 3755 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3756 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3757 break; 3758 DELAY(1); 3759 } 3760 if (i == MSK_TIMEOUT) 3761 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3762 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3763 BMU_RST_SET | BMU_FIFO_RST); 3764 /* Reset the Rx prefetch unit. */ 3765 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3766 PREF_UNIT_RST_SET); 3767 /* Reset the RAM Buffer receive queue. */ 3768 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3769 /* Reset Rx MAC FIFO. */ 3770 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3771 3772 /* Free Rx and Tx mbufs still in the queues. */ 3773 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3774 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3775 if (rxd->rx_m != NULL) { 3776 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 3777 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3778 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3779 rxd->rx_dmamap); 3780 m_freem(rxd->rx_m); 3781 rxd->rx_m = NULL; 3782 } 3783 } 3784 #ifdef MSK_JUMBO 3785 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3786 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3787 if (jrxd->rx_m != NULL) { 3788 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3789 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3790 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3791 jrxd->rx_dmamap); 3792 m_freem(jrxd->rx_m); 3793 jrxd->rx_m = NULL; 3794 } 3795 } 3796 #endif 3797 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3798 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3799 if (txd->tx_m != NULL) { 3800 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 3801 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3802 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3803 txd->tx_dmamap); 3804 m_freem(txd->tx_m); 3805 txd->tx_m = NULL; 3806 } 3807 } 3808 3809 /* 3810 * Mark the interface down. 3811 */ 3812 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3813 sc_if->msk_link = 0; 3814 } 3815 3816 static int 3817 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS) 3818 { 3819 return sysctl_int_range(oidp, arg1, arg2, req, 3820 MSK_PROC_MIN, MSK_PROC_MAX); 3821 } 3822 3823 static int 3824 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3825 { 3826 struct msk_softc *sc = arg1; 3827 struct lwkt_serialize *serializer = &sc->msk_serializer; 3828 int error = 0, v; 3829 3830 lwkt_serialize_enter(serializer); 3831 3832 v = sc->msk_intr_rate; 3833 error = sysctl_handle_int(oidp, &v, 0, req); 3834 if (error || req->newptr == NULL) 3835 goto back; 3836 if (v < 0) { 3837 error = EINVAL; 3838 goto back; 3839 } 3840 3841 if (sc->msk_intr_rate != v) { 3842 int flag = 0, i; 3843 3844 sc->msk_intr_rate = v; 3845 for (i = 0; i < 2; ++i) { 3846 if (sc->msk_if[i] != NULL) { 3847 flag |= sc->msk_if[i]-> 3848 arpcom.ac_if.if_flags & IFF_RUNNING; 3849 } 3850 } 3851 if (flag) 3852 mskc_set_imtimer(sc); 3853 } 3854 back: 3855 lwkt_serialize_exit(serializer); 3856 return error; 3857 } 3858 3859 static int 3860 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3861 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3862 { 3863 struct msk_if_softc *sc_if = device_get_softc(dev); 3864 struct msk_dmamap_arg ctx; 3865 bus_dma_segment_t seg; 3866 int error; 3867 3868 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag, 3869 MSK_RING_ALIGN, 0, 3870 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3871 NULL, NULL, 3872 size, 1, BUS_SPACE_MAXSIZE_32BIT, 3873 0, dtag); 3874 if (error) { 3875 device_printf(dev, "can't create DMA tag\n"); 3876 return error; 3877 } 3878 3879 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 3880 dmap); 3881 if (error) { 3882 device_printf(dev, "can't allocate DMA mem\n"); 3883 bus_dma_tag_destroy(*dtag); 3884 *dtag = NULL; 3885 return error; 3886 } 3887 3888 bzero(&ctx, sizeof(ctx)); 3889 ctx.nseg = 1; 3890 ctx.segs = &seg; 3891 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 3892 msk_dmamap_cb, &ctx, BUS_DMA_WAITOK); 3893 if (error) { 3894 device_printf(dev, "can't load DMA mem\n"); 3895 bus_dmamem_free(*dtag, *addr, *dmap); 3896 bus_dma_tag_destroy(*dtag); 3897 *dtag = NULL; 3898 return error; 3899 } 3900 *paddr = seg.ds_addr; 3901 return 0; 3902 } 3903 3904 static void 3905 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3906 { 3907 if (dtag != NULL) { 3908 bus_dmamap_unload(dtag, dmap); 3909 bus_dmamem_free(dtag, addr, dmap); 3910 bus_dma_tag_destroy(dtag); 3911 } 3912 } 3913 3914 static void 3915 mskc_set_imtimer(struct msk_softc *sc) 3916 { 3917 if (sc->msk_intr_rate > 0) { 3918 /* 3919 * XXX myk(4) seems to use 125MHz for EC/FE/XL 3920 * and 78.125MHz for rest of chip types 3921 */ 3922 CSR_WRITE_4(sc, B2_IRQM_INI, 3923 MSK_USECS(sc, 1000000 / sc->msk_intr_rate)); 3924 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 3925 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START); 3926 } else { 3927 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP); 3928 } 3929 } 3930