1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 /* $DragonFly: src/sys/dev/netif/msk/if_msk.c,v 1.5 2008/06/26 13:08:55 sephe Exp $ */ 97 98 /* 99 * Device driver for the Marvell Yukon II Ethernet controller. 100 * Due to lack of documentation, this driver is based on the code from 101 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 102 */ 103 #include "opt_ethernet.h" 104 105 #include <sys/param.h> 106 #include <sys/endian.h> 107 #include <sys/kernel.h> 108 #include <sys/bus.h> 109 #include <sys/in_cksum.h> 110 #include <sys/interrupt.h> 111 #include <sys/malloc.h> 112 #include <sys/proc.h> 113 #include <sys/rman.h> 114 #include <sys/serialize.h> 115 #include <sys/socket.h> 116 #include <sys/sockio.h> 117 #include <sys/sysctl.h> 118 119 #include <net/ethernet.h> 120 #include <net/if.h> 121 #include <net/bpf.h> 122 #include <net/if_arp.h> 123 #include <net/if_dl.h> 124 #include <net/if_media.h> 125 #include <net/ifq_var.h> 126 #include <net/vlan/if_vlan_var.h> 127 128 #include <netinet/ip.h> 129 #include <netinet/ip_var.h> 130 131 #include <dev/netif/mii_layer/miivar.h> 132 133 #include <bus/pci/pcireg.h> 134 #include <bus/pci/pcivar.h> 135 136 #include "if_mskreg.h" 137 138 /* "device miibus" required. See GENERIC if you get errors here. */ 139 #include "miibus_if.h" 140 141 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 142 143 /* 144 * Devices supported by this driver. 145 */ 146 static const struct msk_product { 147 uint16_t msk_vendorid; 148 uint16_t msk_deviceid; 149 const char *msk_name; 150 } msk_products[] = { 151 { VENDORID_SK, DEVICEID_SK_YUKON2, 152 "SK-9Sxx Gigabit Ethernet" }, 153 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 154 "SK-9Exx Gigabit Ethernet"}, 155 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 156 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 157 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 158 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 159 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 160 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 161 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 162 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 163 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 164 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 165 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 166 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 167 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 168 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 169 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 170 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 171 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 172 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 173 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 174 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 176 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 178 "Marvell Yukon 88E8039 Gigabit Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 180 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 182 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 184 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 186 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 188 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 190 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 191 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 192 "D-Link 550SX Gigabit Ethernet" }, 193 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 194 "D-Link 560T Gigabit Ethernet" }, 195 { 0, 0, NULL } 196 }; 197 198 static const char *model_name[] = { 199 "Yukon XL", 200 "Yukon EC Ultra", 201 "Yukon Unknown", 202 "Yukon EC", 203 "Yukon FE" 204 }; 205 206 static int mskc_probe(device_t); 207 static int mskc_attach(device_t); 208 static int mskc_detach(device_t); 209 static int mskc_shutdown(device_t); 210 static int mskc_suspend(device_t); 211 static int mskc_resume(device_t); 212 static void mskc_intr(void *); 213 214 static void mskc_reset(struct msk_softc *); 215 static void mskc_set_imtimer(struct msk_softc *); 216 static void mskc_intr_hwerr(struct msk_softc *); 217 static int mskc_handle_events(struct msk_softc *); 218 static void mskc_phy_power(struct msk_softc *, int); 219 static int mskc_setup_rambuffer(struct msk_softc *); 220 static int mskc_status_dma_alloc(struct msk_softc *); 221 static void mskc_status_dma_free(struct msk_softc *); 222 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS); 223 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 224 225 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 226 227 static int msk_probe(device_t); 228 static int msk_attach(device_t); 229 static int msk_detach(device_t); 230 static int msk_miibus_readreg(device_t, int, int); 231 static int msk_miibus_writereg(device_t, int, int, int); 232 static void msk_miibus_statchg(device_t); 233 234 static void msk_init(void *); 235 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 236 static void msk_start(struct ifnet *); 237 static void msk_watchdog(struct ifnet *); 238 static int msk_mediachange(struct ifnet *); 239 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 240 241 static void msk_tick(void *); 242 static void msk_intr_phy(struct msk_if_softc *); 243 static void msk_intr_gmac(struct msk_if_softc *); 244 static __inline void 245 msk_rxput(struct msk_if_softc *); 246 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 247 static void msk_rxeof(struct msk_if_softc *, uint32_t, int, 248 struct mbuf_chain *); 249 static void msk_txeof(struct msk_if_softc *, int); 250 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 251 static void msk_set_rambuffer(struct msk_if_softc *); 252 static void msk_stop(struct msk_if_softc *); 253 254 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 255 static void msk_dmamap_mbuf_cb(void *, bus_dma_segment_t *, int, 256 bus_size_t, int); 257 static int msk_txrx_dma_alloc(struct msk_if_softc *); 258 static void msk_txrx_dma_free(struct msk_if_softc *); 259 static int msk_init_rx_ring(struct msk_if_softc *); 260 static void msk_init_tx_ring(struct msk_if_softc *); 261 static __inline void 262 msk_discard_rxbuf(struct msk_if_softc *, int); 263 static int msk_newbuf(struct msk_if_softc *, int); 264 static struct mbuf * 265 msk_defrag(struct mbuf *, int, int); 266 static int msk_encap(struct msk_if_softc *, struct mbuf **); 267 268 #ifdef MSK_JUMBO 269 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 270 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 271 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 272 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 273 static void *msk_jalloc(struct msk_if_softc *); 274 static void msk_jfree(void *, void *); 275 #endif 276 277 static int msk_phy_readreg(struct msk_if_softc *, int, int); 278 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 279 280 static void msk_setmulti(struct msk_if_softc *); 281 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 282 static void msk_setpromisc(struct msk_if_softc *); 283 284 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 285 void **, bus_addr_t *, bus_dmamap_t *); 286 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 287 288 static device_method_t mskc_methods[] = { 289 /* Device interface */ 290 DEVMETHOD(device_probe, mskc_probe), 291 DEVMETHOD(device_attach, mskc_attach), 292 DEVMETHOD(device_detach, mskc_detach), 293 DEVMETHOD(device_suspend, mskc_suspend), 294 DEVMETHOD(device_resume, mskc_resume), 295 DEVMETHOD(device_shutdown, mskc_shutdown), 296 297 /* bus interface */ 298 DEVMETHOD(bus_print_child, bus_generic_print_child), 299 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 300 301 { NULL, NULL } 302 }; 303 304 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 305 static devclass_t mskc_devclass; 306 307 static device_method_t msk_methods[] = { 308 /* Device interface */ 309 DEVMETHOD(device_probe, msk_probe), 310 DEVMETHOD(device_attach, msk_attach), 311 DEVMETHOD(device_detach, msk_detach), 312 DEVMETHOD(device_shutdown, bus_generic_shutdown), 313 314 /* bus interface */ 315 DEVMETHOD(bus_print_child, bus_generic_print_child), 316 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 317 318 /* MII interface */ 319 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 320 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 321 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 322 323 { NULL, NULL } 324 }; 325 326 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 327 static devclass_t msk_devclass; 328 329 DECLARE_DUMMY_MODULE(if_msk); 330 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, 0, 0); 331 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, 0, 0); 332 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 333 334 static int mskc_intr_rate = 0; 335 static int mskc_process_limit = MSK_PROC_DEFAULT; 336 337 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate); 338 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit); 339 340 static int 341 msk_miibus_readreg(device_t dev, int phy, int reg) 342 { 343 struct msk_if_softc *sc_if; 344 345 if (phy != PHY_ADDR_MARV) 346 return (0); 347 348 sc_if = device_get_softc(dev); 349 350 return (msk_phy_readreg(sc_if, phy, reg)); 351 } 352 353 static int 354 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 355 { 356 struct msk_softc *sc; 357 int i, val; 358 359 sc = sc_if->msk_softc; 360 361 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 362 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 363 364 for (i = 0; i < MSK_TIMEOUT; i++) { 365 DELAY(1); 366 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 367 if ((val & GM_SMI_CT_RD_VAL) != 0) { 368 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 369 break; 370 } 371 } 372 373 if (i == MSK_TIMEOUT) { 374 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 375 val = 0; 376 } 377 378 return (val); 379 } 380 381 static int 382 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 383 { 384 struct msk_if_softc *sc_if; 385 386 if (phy != PHY_ADDR_MARV) 387 return (0); 388 389 sc_if = device_get_softc(dev); 390 391 return (msk_phy_writereg(sc_if, phy, reg, val)); 392 } 393 394 static int 395 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 396 { 397 struct msk_softc *sc; 398 int i; 399 400 sc = sc_if->msk_softc; 401 402 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 403 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 404 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 405 for (i = 0; i < MSK_TIMEOUT; i++) { 406 DELAY(1); 407 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 408 GM_SMI_CT_BUSY) == 0) 409 break; 410 } 411 if (i == MSK_TIMEOUT) 412 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 413 414 return (0); 415 } 416 417 static void 418 msk_miibus_statchg(device_t dev) 419 { 420 struct msk_if_softc *sc_if; 421 struct msk_softc *sc; 422 struct mii_data *mii; 423 struct ifnet *ifp; 424 uint32_t gmac; 425 426 sc_if = device_get_softc(dev); 427 sc = sc_if->msk_softc; 428 429 mii = device_get_softc(sc_if->msk_miibus); 430 ifp = sc_if->msk_ifp; 431 432 if (mii->mii_media_status & IFM_ACTIVE) { 433 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 434 sc_if->msk_link = 1; 435 } else 436 sc_if->msk_link = 0; 437 438 if (sc_if->msk_link != 0) { 439 /* Enable Tx FIFO Underrun. */ 440 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 441 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 442 /* 443 * Because mii(4) notify msk(4) that it detected link status 444 * change, there is no need to enable automatic 445 * speed/flow-control/duplex updates. 446 */ 447 gmac = GM_GPCR_AU_ALL_DIS; 448 switch (IFM_SUBTYPE(mii->mii_media_active)) { 449 case IFM_1000_SX: 450 case IFM_1000_T: 451 gmac |= GM_GPCR_SPEED_1000; 452 break; 453 case IFM_100_TX: 454 gmac |= GM_GPCR_SPEED_100; 455 break; 456 case IFM_10_T: 457 break; 458 } 459 460 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 461 gmac |= GM_GPCR_DUP_FULL; 462 /* Disable Rx flow control. */ 463 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 464 gmac |= GM_GPCR_FC_RX_DIS; 465 /* Disable Tx flow control. */ 466 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 467 gmac |= GM_GPCR_FC_TX_DIS; 468 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 469 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 470 /* Read again to ensure writing. */ 471 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 472 473 gmac = GMC_PAUSE_ON; 474 if (((mii->mii_media_active & IFM_GMASK) & 475 (IFM_FLAG0 | IFM_FLAG1)) == 0) 476 gmac = GMC_PAUSE_OFF; 477 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 478 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 479 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 480 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 481 gmac = GMC_PAUSE_OFF; 482 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 483 484 /* Enable PHY interrupt for FIFO underrun/overflow. */ 485 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 486 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 487 } else { 488 /* 489 * Link state changed to down. 490 * Disable PHY interrupts. 491 */ 492 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 493 /* Disable Rx/Tx MAC. */ 494 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 495 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 496 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 497 /* Read again to ensure writing. */ 498 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 499 } 500 } 501 502 static void 503 msk_setmulti(struct msk_if_softc *sc_if) 504 { 505 struct msk_softc *sc; 506 struct ifnet *ifp; 507 struct ifmultiaddr *ifma; 508 uint32_t mchash[2]; 509 uint32_t crc; 510 uint16_t mode; 511 512 sc = sc_if->msk_softc; 513 ifp = sc_if->msk_ifp; 514 515 bzero(mchash, sizeof(mchash)); 516 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 517 mode |= GM_RXCR_UCF_ENA; 518 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 519 if ((ifp->if_flags & IFF_PROMISC) != 0) 520 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 521 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 522 mchash[0] = 0xffff; 523 mchash[1] = 0xffff; 524 } 525 } else { 526 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 527 if (ifma->ifma_addr->sa_family != AF_LINK) 528 continue; 529 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 530 ifma->ifma_addr), ETHER_ADDR_LEN); 531 /* Just want the 6 least significant bits. */ 532 crc &= 0x3f; 533 /* Set the corresponding bit in the hash table. */ 534 mchash[crc >> 5] |= 1 << (crc & 0x1f); 535 } 536 mode |= GM_RXCR_MCF_ENA; 537 } 538 539 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 540 mchash[0] & 0xffff); 541 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 542 (mchash[0] >> 16) & 0xffff); 543 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 544 mchash[1] & 0xffff); 545 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 546 (mchash[1] >> 16) & 0xffff); 547 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 548 } 549 550 static void 551 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 552 { 553 struct msk_softc *sc; 554 555 sc = sc_if->msk_softc; 556 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 557 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 558 RX_VLAN_STRIP_ON); 559 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 560 TX_VLAN_TAG_ON); 561 } else { 562 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 563 RX_VLAN_STRIP_OFF); 564 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 565 TX_VLAN_TAG_OFF); 566 } 567 } 568 569 static void 570 msk_setpromisc(struct msk_if_softc *sc_if) 571 { 572 struct msk_softc *sc; 573 struct ifnet *ifp; 574 uint16_t mode; 575 576 sc = sc_if->msk_softc; 577 ifp = sc_if->msk_ifp; 578 579 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 580 if (ifp->if_flags & IFF_PROMISC) 581 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 582 else 583 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 584 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 585 } 586 587 static int 588 msk_init_rx_ring(struct msk_if_softc *sc_if) 589 { 590 struct msk_ring_data *rd; 591 struct msk_rxdesc *rxd; 592 int i, prod; 593 594 sc_if->msk_cdata.msk_rx_cons = 0; 595 sc_if->msk_cdata.msk_rx_prod = 0; 596 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 597 598 rd = &sc_if->msk_rdata; 599 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 600 prod = sc_if->msk_cdata.msk_rx_prod; 601 for (i = 0; i < MSK_RX_RING_CNT; i++) { 602 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 603 rxd->rx_m = NULL; 604 rxd->rx_le = &rd->msk_rx_ring[prod]; 605 if (msk_newbuf(sc_if, prod) != 0) 606 return (ENOBUFS); 607 MSK_INC(prod, MSK_RX_RING_CNT); 608 } 609 610 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 611 sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREWRITE); 612 613 /* Update prefetch unit. */ 614 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 615 CSR_WRITE_2(sc_if->msk_softc, 616 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 617 sc_if->msk_cdata.msk_rx_prod); 618 619 return (0); 620 } 621 622 #ifdef MSK_JUMBO 623 static int 624 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 625 { 626 struct msk_ring_data *rd; 627 struct msk_rxdesc *rxd; 628 int i, prod; 629 630 MSK_IF_LOCK_ASSERT(sc_if); 631 632 sc_if->msk_cdata.msk_rx_cons = 0; 633 sc_if->msk_cdata.msk_rx_prod = 0; 634 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 635 636 rd = &sc_if->msk_rdata; 637 bzero(rd->msk_jumbo_rx_ring, 638 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 639 prod = sc_if->msk_cdata.msk_rx_prod; 640 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 641 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 642 rxd->rx_m = NULL; 643 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 644 if (msk_jumbo_newbuf(sc_if, prod) != 0) 645 return (ENOBUFS); 646 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 647 } 648 649 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 650 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 651 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 652 653 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 654 CSR_WRITE_2(sc_if->msk_softc, 655 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 656 sc_if->msk_cdata.msk_rx_prod); 657 658 return (0); 659 } 660 #endif 661 662 static void 663 msk_init_tx_ring(struct msk_if_softc *sc_if) 664 { 665 struct msk_ring_data *rd; 666 struct msk_txdesc *txd; 667 int i; 668 669 sc_if->msk_cdata.msk_tx_prod = 0; 670 sc_if->msk_cdata.msk_tx_cons = 0; 671 sc_if->msk_cdata.msk_tx_cnt = 0; 672 673 rd = &sc_if->msk_rdata; 674 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 675 for (i = 0; i < MSK_TX_RING_CNT; i++) { 676 txd = &sc_if->msk_cdata.msk_txdesc[i]; 677 txd->tx_m = NULL; 678 txd->tx_le = &rd->msk_tx_ring[i]; 679 } 680 681 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 682 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 683 } 684 685 static __inline void 686 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 687 { 688 struct msk_rx_desc *rx_le; 689 struct msk_rxdesc *rxd; 690 struct mbuf *m; 691 692 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 693 m = rxd->rx_m; 694 rx_le = rxd->rx_le; 695 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 696 } 697 698 #ifdef MSK_JUMBO 699 static __inline void 700 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 701 { 702 struct msk_rx_desc *rx_le; 703 struct msk_rxdesc *rxd; 704 struct mbuf *m; 705 706 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 707 m = rxd->rx_m; 708 rx_le = rxd->rx_le; 709 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 710 } 711 #endif 712 713 static int 714 msk_newbuf(struct msk_if_softc *sc_if, int idx) 715 { 716 struct msk_rx_desc *rx_le; 717 struct msk_rxdesc *rxd; 718 struct mbuf *m; 719 struct msk_dmamap_arg ctx; 720 bus_dma_segment_t seg; 721 bus_dmamap_t map; 722 723 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 724 if (m == NULL) 725 return (ENOBUFS); 726 727 m->m_len = m->m_pkthdr.len = MCLBYTES; 728 m_adj(m, ETHER_ALIGN); 729 730 bzero(&ctx, sizeof(ctx)); 731 ctx.nseg = 1; 732 ctx.segs = &seg; 733 if (bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_rx_tag, 734 sc_if->msk_cdata.msk_rx_sparemap, m, msk_dmamap_mbuf_cb, &ctx, 735 BUS_DMA_NOWAIT) != 0) { 736 m_freem(m); 737 return (ENOBUFS); 738 } 739 KASSERT(ctx.nseg == 1, 740 ("%s: %d segments returned!", __func__, ctx.nseg)); 741 742 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 743 if (rxd->rx_m != NULL) { 744 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 745 BUS_DMASYNC_POSTREAD); 746 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 747 } 748 map = rxd->rx_dmamap; 749 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 750 sc_if->msk_cdata.msk_rx_sparemap = map; 751 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 752 BUS_DMASYNC_PREREAD); 753 rxd->rx_m = m; 754 rx_le = rxd->rx_le; 755 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 756 rx_le->msk_control = 757 htole32(seg.ds_len | OP_PACKET | HW_OWNER); 758 759 return (0); 760 } 761 762 #ifdef MSK_JUMBO 763 static int 764 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 765 { 766 struct msk_rx_desc *rx_le; 767 struct msk_rxdesc *rxd; 768 struct mbuf *m; 769 bus_dma_segment_t segs[1]; 770 bus_dmamap_t map; 771 int nsegs; 772 void *buf; 773 774 MGETHDR(m, M_DONTWAIT, MT_DATA); 775 if (m == NULL) 776 return (ENOBUFS); 777 buf = msk_jalloc(sc_if); 778 if (buf == NULL) { 779 m_freem(m); 780 return (ENOBUFS); 781 } 782 /* Attach the buffer to the mbuf. */ 783 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 784 EXT_NET_DRV); 785 if ((m->m_flags & M_EXT) == 0) { 786 m_freem(m); 787 return (ENOBUFS); 788 } 789 m->m_pkthdr.len = m->m_len = MSK_JLEN; 790 m_adj(m, ETHER_ALIGN); 791 792 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 793 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 794 BUS_DMA_NOWAIT) != 0) { 795 m_freem(m); 796 return (ENOBUFS); 797 } 798 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 799 800 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 801 if (rxd->rx_m != NULL) { 802 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 803 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 804 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 805 rxd->rx_dmamap); 806 } 807 map = rxd->rx_dmamap; 808 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 809 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 810 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 811 BUS_DMASYNC_PREREAD); 812 rxd->rx_m = m; 813 rx_le = rxd->rx_le; 814 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 815 rx_le->msk_control = 816 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 817 818 return (0); 819 } 820 #endif 821 822 /* 823 * Set media options. 824 */ 825 static int 826 msk_mediachange(struct ifnet *ifp) 827 { 828 struct msk_if_softc *sc_if = ifp->if_softc; 829 struct mii_data *mii; 830 831 mii = device_get_softc(sc_if->msk_miibus); 832 mii_mediachg(mii); 833 834 return (0); 835 } 836 837 /* 838 * Report current media status. 839 */ 840 static void 841 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 842 { 843 struct msk_if_softc *sc_if = ifp->if_softc; 844 struct mii_data *mii; 845 846 mii = device_get_softc(sc_if->msk_miibus); 847 mii_pollstat(mii); 848 849 ifmr->ifm_active = mii->mii_media_active; 850 ifmr->ifm_status = mii->mii_media_status; 851 } 852 853 static int 854 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 855 { 856 struct msk_if_softc *sc_if; 857 struct ifreq *ifr; 858 struct mii_data *mii; 859 int error, mask; 860 861 sc_if = ifp->if_softc; 862 ifr = (struct ifreq *)data; 863 error = 0; 864 865 switch(command) { 866 case SIOCSIFMTU: 867 #ifdef MSK_JUMBO 868 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 869 error = EINVAL; 870 break; 871 } 872 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 873 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 874 error = EINVAL; 875 break; 876 } 877 ifp->if_mtu = ifr->ifr_mtu; 878 if ((ifp->if_flags & IFF_RUNNING) != 0) 879 msk_init(sc_if); 880 #else 881 error = EOPNOTSUPP; 882 #endif 883 break; 884 885 case SIOCSIFFLAGS: 886 if (ifp->if_flags & IFF_UP) { 887 if (ifp->if_flags & IFF_RUNNING) { 888 if (((ifp->if_flags ^ sc_if->msk_if_flags) 889 & IFF_PROMISC) != 0) { 890 msk_setpromisc(sc_if); 891 msk_setmulti(sc_if); 892 } 893 } else { 894 if (sc_if->msk_detach == 0) 895 msk_init(sc_if); 896 } 897 } else { 898 if (ifp->if_flags & IFF_RUNNING) 899 msk_stop(sc_if); 900 } 901 sc_if->msk_if_flags = ifp->if_flags; 902 break; 903 904 case SIOCADDMULTI: 905 case SIOCDELMULTI: 906 if (ifp->if_flags & IFF_RUNNING) 907 msk_setmulti(sc_if); 908 break; 909 910 case SIOCGIFMEDIA: 911 case SIOCSIFMEDIA: 912 mii = device_get_softc(sc_if->msk_miibus); 913 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 914 break; 915 916 case SIOCSIFCAP: 917 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 918 if ((mask & IFCAP_TXCSUM) != 0) { 919 ifp->if_capenable ^= IFCAP_TXCSUM; 920 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 921 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 922 ifp->if_hwassist |= MSK_CSUM_FEATURES; 923 else 924 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 925 } 926 #ifdef notyet 927 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 928 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 929 msk_setvlan(sc_if, ifp); 930 } 931 #endif 932 933 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 934 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 935 /* 936 * In Yukon EC Ultra, TSO & checksum offload is not 937 * supported for jumbo frame. 938 */ 939 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 940 ifp->if_capenable &= ~IFCAP_TXCSUM; 941 } 942 break; 943 944 default: 945 error = ether_ioctl(ifp, command, data); 946 break; 947 } 948 949 return (error); 950 } 951 952 static int 953 mskc_probe(device_t dev) 954 { 955 const struct msk_product *mp; 956 uint16_t vendor, devid; 957 958 vendor = pci_get_vendor(dev); 959 devid = pci_get_device(dev); 960 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 961 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 962 device_set_desc(dev, mp->msk_name); 963 return (0); 964 } 965 } 966 return (ENXIO); 967 } 968 969 static int 970 mskc_setup_rambuffer(struct msk_softc *sc) 971 { 972 int next; 973 int i; 974 uint8_t val; 975 976 /* Get adapter SRAM size. */ 977 val = CSR_READ_1(sc, B2_E_0); 978 sc->msk_ramsize = (val == 0) ? 128 : val * 4; 979 if (bootverbose) { 980 device_printf(sc->msk_dev, 981 "RAM buffer size : %dKB\n", sc->msk_ramsize); 982 } 983 /* 984 * Give receiver 2/3 of memory and round down to the multiple 985 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 986 * of 1024. 987 */ 988 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 989 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 990 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 991 sc->msk_rxqstart[i] = next; 992 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 993 next = sc->msk_rxqend[i] + 1; 994 sc->msk_txqstart[i] = next; 995 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 996 next = sc->msk_txqend[i] + 1; 997 if (bootverbose) { 998 device_printf(sc->msk_dev, 999 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1000 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1001 sc->msk_rxqend[i]); 1002 device_printf(sc->msk_dev, 1003 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1004 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1005 sc->msk_txqend[i]); 1006 } 1007 } 1008 1009 return (0); 1010 } 1011 1012 static void 1013 mskc_phy_power(struct msk_softc *sc, int mode) 1014 { 1015 uint32_t val; 1016 int i; 1017 1018 switch (mode) { 1019 case MSK_PHY_POWERUP: 1020 /* Switch power to VCC (WA for VAUX problem). */ 1021 CSR_WRITE_1(sc, B0_POWER_CTRL, 1022 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1023 /* Disable Core Clock Division, set Clock Select to 0. */ 1024 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1025 1026 val = 0; 1027 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1028 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1029 /* Enable bits are inverted. */ 1030 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1031 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1032 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1033 } 1034 /* 1035 * Enable PCI & Core Clock, enable clock gating for both Links. 1036 */ 1037 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1038 1039 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1040 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1041 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1042 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1043 /* Deassert Low Power for 1st PHY. */ 1044 val |= PCI_Y2_PHY1_COMA; 1045 if (sc->msk_num_port > 1) 1046 val |= PCI_Y2_PHY2_COMA; 1047 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1048 uint32_t our; 1049 1050 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1051 1052 /* Enable all clocks. */ 1053 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1054 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1055 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1056 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1057 /* Set all bits to 0 except bits 15..12. */ 1058 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1059 /* Set to default value. */ 1060 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1061 } 1062 /* Release PHY from PowerDown/COMA mode. */ 1063 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1064 for (i = 0; i < sc->msk_num_port; i++) { 1065 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1066 GMLC_RST_SET); 1067 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1068 GMLC_RST_CLR); 1069 } 1070 break; 1071 case MSK_PHY_POWERDOWN: 1072 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1073 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1074 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1075 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1076 val &= ~PCI_Y2_PHY1_COMA; 1077 if (sc->msk_num_port > 1) 1078 val &= ~PCI_Y2_PHY2_COMA; 1079 } 1080 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1081 1082 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1083 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1084 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1085 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1086 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1087 /* Enable bits are inverted. */ 1088 val = 0; 1089 } 1090 /* 1091 * Disable PCI & Core Clock, disable clock gating for 1092 * both Links. 1093 */ 1094 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1095 CSR_WRITE_1(sc, B0_POWER_CTRL, 1096 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1097 break; 1098 default: 1099 break; 1100 } 1101 } 1102 1103 static void 1104 mskc_reset(struct msk_softc *sc) 1105 { 1106 bus_addr_t addr; 1107 uint16_t status; 1108 uint32_t val; 1109 int i; 1110 1111 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1112 1113 /* Disable ASF. */ 1114 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1115 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1116 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1117 } 1118 /* 1119 * Since we disabled ASF, S/W reset is required for Power Management. 1120 */ 1121 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1122 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1123 1124 /* Clear all error bits in the PCI status register. */ 1125 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1126 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1127 1128 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1129 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1130 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1131 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1132 1133 switch (sc->msk_bustype) { 1134 case MSK_PEX_BUS: 1135 /* Clear all PEX errors. */ 1136 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1137 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1138 if ((val & PEX_RX_OV) != 0) { 1139 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1140 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1141 } 1142 break; 1143 case MSK_PCI_BUS: 1144 case MSK_PCIX_BUS: 1145 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1146 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1147 if (val == 0) 1148 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1149 if (sc->msk_bustype == MSK_PCIX_BUS) { 1150 /* Set Cache Line Size opt. */ 1151 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1152 val |= PCI_CLS_OPT; 1153 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1154 } 1155 break; 1156 } 1157 /* Set PHY power state. */ 1158 mskc_phy_power(sc, MSK_PHY_POWERUP); 1159 1160 /* Reset GPHY/GMAC Control */ 1161 for (i = 0; i < sc->msk_num_port; i++) { 1162 /* GPHY Control reset. */ 1163 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1164 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1165 /* GMAC Control reset. */ 1166 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1167 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1168 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1169 } 1170 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1171 1172 /* LED On. */ 1173 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1174 1175 /* Clear TWSI IRQ. */ 1176 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1177 1178 /* Turn off hardware timer. */ 1179 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1180 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1181 1182 /* Turn off descriptor polling. */ 1183 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1184 1185 /* Turn off time stamps. */ 1186 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1187 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1188 1189 /* Configure timeout values. */ 1190 for (i = 0; i < sc->msk_num_port; i++) { 1191 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1192 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1193 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1194 MSK_RI_TO_53); 1195 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1196 MSK_RI_TO_53); 1197 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1198 MSK_RI_TO_53); 1199 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1200 MSK_RI_TO_53); 1201 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1202 MSK_RI_TO_53); 1203 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1204 MSK_RI_TO_53); 1205 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1206 MSK_RI_TO_53); 1207 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1208 MSK_RI_TO_53); 1209 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1210 MSK_RI_TO_53); 1211 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1212 MSK_RI_TO_53); 1213 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1214 MSK_RI_TO_53); 1215 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1216 MSK_RI_TO_53); 1217 } 1218 1219 /* Disable all interrupts. */ 1220 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1221 CSR_READ_4(sc, B0_HWE_IMSK); 1222 CSR_WRITE_4(sc, B0_IMSK, 0); 1223 CSR_READ_4(sc, B0_IMSK); 1224 1225 /* 1226 * On dual port PCI-X card, there is an problem where status 1227 * can be received out of order due to split transactions. 1228 */ 1229 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1230 uint16_t pcix_cmd; 1231 uint8_t pcix; 1232 1233 pcix = pci_get_pcixcap_ptr(sc->msk_dev); 1234 1235 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1236 /* Clear Max Outstanding Split Transactions. */ 1237 pcix_cmd &= ~0x70; 1238 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1239 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1240 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1241 } 1242 if (sc->msk_bustype == MSK_PEX_BUS) { 1243 uint16_t v, width; 1244 1245 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1246 /* Change Max. Read Request Size to 4096 bytes. */ 1247 v &= ~PEX_DC_MAX_RRS_MSK; 1248 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1249 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1250 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1251 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1252 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1253 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1254 if (v != width) { 1255 device_printf(sc->msk_dev, 1256 "negotiated width of link(x%d) != " 1257 "max. width of link(x%d)\n", width, v); 1258 } 1259 } 1260 1261 /* Clear status list. */ 1262 bzero(sc->msk_stat_ring, 1263 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1264 sc->msk_stat_cons = 0; 1265 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1266 BUS_DMASYNC_PREWRITE); 1267 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1268 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1269 /* Set the status list base address. */ 1270 addr = sc->msk_stat_ring_paddr; 1271 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1272 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1273 /* Set the status list last index. */ 1274 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1275 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1276 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1277 /* WA for dev. #4.3 */ 1278 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1279 /* WA for dev. #4.18 */ 1280 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1281 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1282 } else { 1283 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1284 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1285 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1286 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1287 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1288 else 1289 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1290 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1291 } 1292 /* 1293 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1294 */ 1295 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1296 1297 /* Enable status unit. */ 1298 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1299 1300 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1301 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1302 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1303 } 1304 1305 static int 1306 msk_probe(device_t dev) 1307 { 1308 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1309 char desc[100]; 1310 1311 /* 1312 * Not much to do here. We always know there will be 1313 * at least one GMAC present, and if there are two, 1314 * mskc_attach() will create a second device instance 1315 * for us. 1316 */ 1317 ksnprintf(desc, sizeof(desc), 1318 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1319 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1320 sc->msk_hw_rev); 1321 device_set_desc_copy(dev, desc); 1322 1323 return (0); 1324 } 1325 1326 static int 1327 msk_attach(device_t dev) 1328 { 1329 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1330 struct msk_if_softc *sc_if = device_get_softc(dev); 1331 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1332 int i, port, error; 1333 uint8_t eaddr[ETHER_ADDR_LEN]; 1334 1335 port = *(int *)device_get_ivars(dev); 1336 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1337 1338 kfree(device_get_ivars(dev), M_DEVBUF); 1339 device_set_ivars(dev, NULL); 1340 1341 callout_init(&sc_if->msk_tick_ch); 1342 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1343 1344 sc_if->msk_if_dev = dev; 1345 sc_if->msk_port = port; 1346 sc_if->msk_softc = sc; 1347 sc_if->msk_ifp = ifp; 1348 sc->msk_if[port] = sc_if; 1349 1350 /* Setup Tx/Rx queue register offsets. */ 1351 if (port == MSK_PORT_A) { 1352 sc_if->msk_txq = Q_XA1; 1353 sc_if->msk_txsq = Q_XS1; 1354 sc_if->msk_rxq = Q_R1; 1355 } else { 1356 sc_if->msk_txq = Q_XA2; 1357 sc_if->msk_txsq = Q_XS2; 1358 sc_if->msk_rxq = Q_R2; 1359 } 1360 1361 error = msk_txrx_dma_alloc(sc_if); 1362 if (error) 1363 goto fail; 1364 1365 ifp->if_softc = sc_if; 1366 ifp->if_mtu = ETHERMTU; 1367 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1368 ifp->if_init = msk_init; 1369 ifp->if_ioctl = msk_ioctl; 1370 ifp->if_start = msk_start; 1371 ifp->if_watchdog = msk_watchdog; 1372 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1373 ifq_set_ready(&ifp->if_snd); 1374 1375 #ifdef notyet 1376 /* 1377 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1378 * has serious bug in Rx checksum offload for all Yukon II family 1379 * hardware. It seems there is a workaround to make it work somtimes. 1380 * However, the workaround also have to check OP code sequences to 1381 * verify whether the OP code is correct. Sometimes it should compute 1382 * IP/TCP/UDP checksum in driver in order to verify correctness of 1383 * checksum computed by hardware. If you have to compute checksum 1384 * with software to verify the hardware's checksum why have hardware 1385 * compute the checksum? I think there is no reason to spend time to 1386 * make Rx checksum offload work on Yukon II hardware. 1387 */ 1388 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1389 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1390 ifp->if_hwassist = MSK_CSUM_FEATURES; 1391 ifp->if_capenable = ifp->if_capabilities; 1392 #endif 1393 1394 /* 1395 * Get station address for this interface. Note that 1396 * dual port cards actually come with three station 1397 * addresses: one for each port, plus an extra. The 1398 * extra one is used by the SysKonnect driver software 1399 * as a 'virtual' station address for when both ports 1400 * are operating in failover mode. Currently we don't 1401 * use this extra address. 1402 */ 1403 for (i = 0; i < ETHER_ADDR_LEN; i++) 1404 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1405 1406 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1407 1408 /* 1409 * Do miibus setup. 1410 */ 1411 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1412 msk_mediachange, msk_mediastatus); 1413 if (error) { 1414 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1415 goto fail; 1416 } 1417 1418 /* 1419 * Call MI attach routine. Can't hold locks when calling into ether_*. 1420 */ 1421 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1422 #if 0 1423 /* 1424 * Tell the upper layer(s) we support long frames. 1425 * Must appear after the call to ether_ifattach() because 1426 * ether_ifattach() sets ifi_hdrlen to the default value. 1427 */ 1428 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1429 #endif 1430 1431 return 0; 1432 fail: 1433 msk_detach(dev); 1434 sc->msk_if[port] = NULL; 1435 return (error); 1436 } 1437 1438 /* 1439 * Attach the interface. Allocate softc structures, do ifmedia 1440 * setup and ethernet/BPF attach. 1441 */ 1442 static int 1443 mskc_attach(device_t dev) 1444 { 1445 struct msk_softc *sc; 1446 int error, *port, cpuid; 1447 1448 sc = device_get_softc(dev); 1449 sc->msk_dev = dev; 1450 lwkt_serialize_init(&sc->msk_serializer); 1451 1452 /* 1453 * Initailize sysctl variables 1454 */ 1455 sc->msk_process_limit = mskc_process_limit; 1456 sc->msk_intr_rate = mskc_intr_rate; 1457 1458 #ifndef BURN_BRIDGES 1459 /* 1460 * Handle power management nonsense. 1461 */ 1462 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1463 uint32_t irq, bar0, bar1; 1464 1465 /* Save important PCI config data. */ 1466 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1467 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1468 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1469 1470 /* Reset the power state. */ 1471 device_printf(dev, "chip is in D%d power mode " 1472 "-- setting to D0\n", pci_get_powerstate(dev)); 1473 1474 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1475 1476 /* Restore PCI config data. */ 1477 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1478 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1479 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1480 } 1481 #endif /* BURN_BRIDGES */ 1482 1483 /* 1484 * Map control/status registers. 1485 */ 1486 pci_enable_busmaster(dev); 1487 1488 /* 1489 * Allocate I/O resource 1490 */ 1491 #ifdef MSK_USEIOSPACE 1492 sc->msk_res_type = SYS_RES_IOPORT; 1493 sc->msk_res_rid = PCIR_BAR(1); 1494 #else 1495 sc->msk_res_type = SYS_RES_MEMORY; 1496 sc->msk_res_rid = PCIR_BAR(0); 1497 #endif 1498 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1499 &sc->msk_res_rid, RF_ACTIVE); 1500 if (sc->msk_res == NULL) { 1501 if (sc->msk_res_type == SYS_RES_MEMORY) { 1502 sc->msk_res_type = SYS_RES_IOPORT; 1503 sc->msk_res_rid = PCIR_BAR(1); 1504 } else { 1505 sc->msk_res_type = SYS_RES_MEMORY; 1506 sc->msk_res_rid = PCIR_BAR(0); 1507 } 1508 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1509 &sc->msk_res_rid, 1510 RF_ACTIVE); 1511 if (sc->msk_res == NULL) { 1512 device_printf(dev, "couldn't allocate %s resources\n", 1513 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1514 return (ENXIO); 1515 } 1516 } 1517 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1518 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1519 1520 /* 1521 * Allocate IRQ 1522 */ 1523 sc->msk_irq_rid = 0; 1524 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1525 &sc->msk_irq_rid, 1526 RF_SHAREABLE | RF_ACTIVE); 1527 if (sc->msk_irq == NULL) { 1528 device_printf(dev, "couldn't allocate IRQ resources\n"); 1529 error = ENXIO; 1530 goto fail; 1531 } 1532 1533 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1534 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1535 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1536 /* Bail out if chip is not recognized. */ 1537 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1538 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1539 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1540 sc->msk_hw_id, sc->msk_hw_rev); 1541 error = ENXIO; 1542 goto fail; 1543 } 1544 1545 /* 1546 * Create sysctl tree 1547 */ 1548 sysctl_ctx_init(&sc->msk_sysctl_ctx); 1549 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx, 1550 SYSCTL_STATIC_CHILDREN(_hw), 1551 OID_AUTO, 1552 device_get_nameunit(dev), 1553 CTLFLAG_RD, 0, ""); 1554 if (sc->msk_sysctl_tree == NULL) { 1555 device_printf(dev, "can't add sysctl node\n"); 1556 error = ENXIO; 1557 goto fail; 1558 } 1559 1560 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1561 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1562 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1563 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit, 1564 "I", "max number of Rx events to process"); 1565 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1566 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1567 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1568 sc, 0, mskc_sysctl_intr_rate, 1569 "I", "max number of interrupt per second"); 1570 1571 /* Soft reset. */ 1572 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1573 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1574 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1575 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1576 sc->msk_coppertype = 0; 1577 else 1578 sc->msk_coppertype = 1; 1579 /* Check number of MACs. */ 1580 sc->msk_num_port = 1; 1581 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1582 CFG_DUAL_MAC_MSK) { 1583 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1584 sc->msk_num_port++; 1585 } 1586 1587 /* Check bus type. */ 1588 if (pci_is_pcie(sc->msk_dev) == 0) 1589 sc->msk_bustype = MSK_PEX_BUS; 1590 else if (pci_is_pcix(sc->msk_dev) == 0) 1591 sc->msk_bustype = MSK_PCIX_BUS; 1592 else 1593 sc->msk_bustype = MSK_PCI_BUS; 1594 1595 switch (sc->msk_hw_id) { 1596 case CHIP_ID_YUKON_EC: 1597 case CHIP_ID_YUKON_EC_U: 1598 sc->msk_clock = 125; /* 125 Mhz */ 1599 break; 1600 case CHIP_ID_YUKON_FE: 1601 sc->msk_clock = 100; /* 100 Mhz */ 1602 break; 1603 case CHIP_ID_YUKON_XL: 1604 sc->msk_clock = 156; /* 156 Mhz */ 1605 break; 1606 default: 1607 sc->msk_clock = 156; /* 156 Mhz */ 1608 break; 1609 } 1610 1611 error = mskc_status_dma_alloc(sc); 1612 if (error) 1613 goto fail; 1614 1615 /* Set base interrupt mask. */ 1616 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1617 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1618 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1619 1620 /* Reset the adapter. */ 1621 mskc_reset(sc); 1622 1623 error = mskc_setup_rambuffer(sc); 1624 if (error) 1625 goto fail; 1626 1627 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1628 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1629 device_printf(dev, "failed to add child for PORT_A\n"); 1630 error = ENXIO; 1631 goto fail; 1632 } 1633 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1634 *port = MSK_PORT_A; 1635 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1636 1637 if (sc->msk_num_port > 1) { 1638 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1639 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1640 device_printf(dev, "failed to add child for PORT_B\n"); 1641 error = ENXIO; 1642 goto fail; 1643 } 1644 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1645 *port = MSK_PORT_B; 1646 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1647 } 1648 1649 bus_generic_attach(dev); 1650 1651 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1652 mskc_intr, sc, &sc->msk_intrhand, 1653 &sc->msk_serializer); 1654 if (error) { 1655 device_printf(dev, "couldn't set up interrupt handler\n"); 1656 goto fail; 1657 } 1658 1659 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq)); 1660 KKASSERT(cpuid >= 0 && cpuid < ncpus); 1661 1662 if (sc->msk_if[0] != NULL) 1663 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid; 1664 if (sc->msk_if[1] != NULL) 1665 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid; 1666 return 0; 1667 fail: 1668 mskc_detach(dev); 1669 return (error); 1670 } 1671 1672 /* 1673 * Shutdown hardware and free up resources. This can be called any 1674 * time after the mutex has been initialized. It is called in both 1675 * the error case in attach and the normal detach case so it needs 1676 * to be careful about only freeing resources that have actually been 1677 * allocated. 1678 */ 1679 static int 1680 msk_detach(device_t dev) 1681 { 1682 struct msk_if_softc *sc_if = device_get_softc(dev); 1683 1684 if (device_is_attached(dev)) { 1685 struct msk_softc *sc = sc_if->msk_softc; 1686 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1687 1688 lwkt_serialize_enter(ifp->if_serializer); 1689 1690 if (sc->msk_intrhand != NULL) { 1691 if (sc->msk_if[MSK_PORT_A] != NULL) 1692 msk_stop(sc->msk_if[MSK_PORT_A]); 1693 if (sc->msk_if[MSK_PORT_B] != NULL) 1694 msk_stop(sc->msk_if[MSK_PORT_B]); 1695 1696 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1697 sc->msk_intrhand); 1698 sc->msk_intrhand = NULL; 1699 } 1700 1701 lwkt_serialize_exit(ifp->if_serializer); 1702 1703 ether_ifdetach(ifp); 1704 } 1705 1706 if (sc_if->msk_miibus != NULL) 1707 device_delete_child(dev, sc_if->msk_miibus); 1708 1709 msk_txrx_dma_free(sc_if); 1710 return (0); 1711 } 1712 1713 static int 1714 mskc_detach(device_t dev) 1715 { 1716 struct msk_softc *sc = device_get_softc(dev); 1717 int *port, i; 1718 1719 #ifdef INVARIANTS 1720 if (device_is_attached(dev)) { 1721 KASSERT(sc->msk_intrhand == NULL, 1722 ("intr is not torn down yet\n")); 1723 } 1724 #endif 1725 1726 for (i = 0; i < sc->msk_num_port; ++i) { 1727 if (sc->msk_devs[i] != NULL) { 1728 port = device_get_ivars(sc->msk_devs[i]); 1729 if (port != NULL) { 1730 kfree(port, M_DEVBUF); 1731 device_set_ivars(sc->msk_devs[i], NULL); 1732 } 1733 device_delete_child(dev, sc->msk_devs[i]); 1734 } 1735 } 1736 1737 /* Disable all interrupts. */ 1738 CSR_WRITE_4(sc, B0_IMSK, 0); 1739 CSR_READ_4(sc, B0_IMSK); 1740 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1741 CSR_READ_4(sc, B0_HWE_IMSK); 1742 1743 /* LED Off. */ 1744 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1745 1746 /* Put hardware reset. */ 1747 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1748 1749 mskc_status_dma_free(sc); 1750 1751 if (sc->msk_irq != NULL) { 1752 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1753 sc->msk_irq); 1754 } 1755 if (sc->msk_res != NULL) { 1756 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1757 sc->msk_res); 1758 } 1759 1760 if (sc->msk_sysctl_tree != NULL) 1761 sysctl_ctx_free(&sc->msk_sysctl_ctx); 1762 1763 return (0); 1764 } 1765 1766 static void 1767 msk_dmamap_mbuf_cb(void *arg, bus_dma_segment_t *segs, int nseg, 1768 bus_size_t mapsz __unused, int error) 1769 { 1770 struct msk_dmamap_arg *ctx = arg; 1771 int i; 1772 1773 if (error) 1774 return; 1775 1776 if (ctx->nseg < nseg) { 1777 ctx->nseg = 0; 1778 return; 1779 } 1780 1781 ctx->nseg = nseg; 1782 for (i = 0; i < ctx->nseg; ++i) 1783 ctx->segs[i] = segs[i]; 1784 } 1785 1786 static void 1787 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1788 { 1789 struct msk_dmamap_arg *ctx = arg; 1790 int i; 1791 1792 if (error) 1793 return; 1794 1795 KKASSERT(nseg <= ctx->nseg); 1796 1797 ctx->nseg = nseg; 1798 for (i = 0; i < ctx->nseg; ++i) 1799 ctx->segs[i] = segs[i]; 1800 } 1801 1802 /* Create status DMA region. */ 1803 static int 1804 mskc_status_dma_alloc(struct msk_softc *sc) 1805 { 1806 struct msk_dmamap_arg ctx; 1807 bus_dma_segment_t seg; 1808 int error; 1809 1810 error = bus_dma_tag_create( 1811 NULL, /* XXX parent */ 1812 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1813 BUS_SPACE_MAXADDR, /* lowaddr */ 1814 BUS_SPACE_MAXADDR, /* highaddr */ 1815 NULL, NULL, /* filter, filterarg */ 1816 MSK_STAT_RING_SZ, /* maxsize */ 1817 1, /* nsegments */ 1818 MSK_STAT_RING_SZ, /* maxsegsize */ 1819 0, /* flags */ 1820 &sc->msk_stat_tag); 1821 if (error) { 1822 device_printf(sc->msk_dev, 1823 "failed to create status DMA tag\n"); 1824 return (error); 1825 } 1826 1827 /* Allocate DMA'able memory and load the DMA map for status ring. */ 1828 error = bus_dmamem_alloc(sc->msk_stat_tag, 1829 (void **)&sc->msk_stat_ring, 1830 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1831 &sc->msk_stat_map); 1832 if (error) { 1833 device_printf(sc->msk_dev, 1834 "failed to allocate DMA'able memory for status ring\n"); 1835 bus_dma_tag_destroy(sc->msk_stat_tag); 1836 sc->msk_stat_tag = NULL; 1837 return (error); 1838 } 1839 1840 bzero(&ctx, sizeof(ctx)); 1841 ctx.nseg = 1; 1842 ctx.segs = &seg; 1843 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map, 1844 sc->msk_stat_ring, MSK_STAT_RING_SZ, 1845 msk_dmamap_cb, &ctx, 0); 1846 if (error) { 1847 device_printf(sc->msk_dev, 1848 "failed to load DMA'able memory for status ring\n"); 1849 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1850 sc->msk_stat_map); 1851 bus_dma_tag_destroy(sc->msk_stat_tag); 1852 sc->msk_stat_tag = NULL; 1853 return (error); 1854 } 1855 sc->msk_stat_ring_paddr = seg.ds_addr; 1856 1857 return (0); 1858 } 1859 1860 static void 1861 mskc_status_dma_free(struct msk_softc *sc) 1862 { 1863 /* Destroy status block. */ 1864 if (sc->msk_stat_tag) { 1865 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1866 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1867 sc->msk_stat_map); 1868 bus_dma_tag_destroy(sc->msk_stat_tag); 1869 sc->msk_stat_tag = NULL; 1870 } 1871 } 1872 1873 static int 1874 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1875 { 1876 int error, i, j; 1877 #ifdef MSK_JUMBO 1878 struct msk_rxdesc *jrxd; 1879 struct msk_jpool_entry *entry; 1880 uint8_t *ptr; 1881 #endif 1882 1883 /* Create parent DMA tag. */ 1884 /* 1885 * XXX 1886 * It seems that Yukon II supports full 64bits DMA operations. But 1887 * it needs two descriptors(list elements) for 64bits DMA operations. 1888 * Since we don't know what DMA address mappings(32bits or 64bits) 1889 * would be used in advance for each mbufs, we limits its DMA space 1890 * to be in range of 32bits address space. Otherwise, we should check 1891 * what DMA address is used and chain another descriptor for the 1892 * 64bits DMA operation. This also means descriptor ring size is 1893 * variable. Limiting DMA address to be in 32bit address space greatly 1894 * simplyfies descriptor handling and possibly would increase 1895 * performance a bit due to efficient handling of descriptors. 1896 * Apart from harassing checksum offloading mechanisms, it seems 1897 * it's really bad idea to use a seperate descriptor for 64bit 1898 * DMA operation to save small descriptor memory. Anyway, I've 1899 * never seen these exotic scheme on ethernet interface hardware. 1900 */ 1901 error = bus_dma_tag_create( 1902 NULL, /* parent */ 1903 1, 0, /* alignment, boundary */ 1904 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1905 BUS_SPACE_MAXADDR, /* highaddr */ 1906 NULL, NULL, /* filter, filterarg */ 1907 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1908 0, /* nsegments */ 1909 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1910 0, /* flags */ 1911 &sc_if->msk_cdata.msk_parent_tag); 1912 if (error) { 1913 device_printf(sc_if->msk_if_dev, 1914 "failed to create parent DMA tag\n"); 1915 return error; 1916 } 1917 1918 /* Create DMA stuffs for Tx ring. */ 1919 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1920 &sc_if->msk_cdata.msk_tx_ring_tag, 1921 (void **)&sc_if->msk_rdata.msk_tx_ring, 1922 &sc_if->msk_rdata.msk_tx_ring_paddr, 1923 &sc_if->msk_cdata.msk_tx_ring_map); 1924 if (error) { 1925 device_printf(sc_if->msk_if_dev, 1926 "failed to create TX ring DMA stuffs\n"); 1927 return error; 1928 } 1929 1930 /* Create DMA stuffs for Rx ring. */ 1931 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1932 &sc_if->msk_cdata.msk_rx_ring_tag, 1933 (void **)&sc_if->msk_rdata.msk_rx_ring, 1934 &sc_if->msk_rdata.msk_rx_ring_paddr, 1935 &sc_if->msk_cdata.msk_rx_ring_map); 1936 if (error) { 1937 device_printf(sc_if->msk_if_dev, 1938 "failed to create RX ring DMA stuffs\n"); 1939 return error; 1940 } 1941 1942 /* Create tag for Tx buffers. */ 1943 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1944 1, 0, /* alignment, boundary */ 1945 BUS_SPACE_MAXADDR, /* lowaddr */ 1946 BUS_SPACE_MAXADDR, /* highaddr */ 1947 NULL, NULL, /* filter, filterarg */ 1948 MSK_TSO_MAXSIZE, /* maxsize */ 1949 MSK_MAXTXSEGS, /* nsegments */ 1950 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 1951 0, /* flags */ 1952 &sc_if->msk_cdata.msk_tx_tag); 1953 if (error) { 1954 device_printf(sc_if->msk_if_dev, 1955 "failed to create Tx DMA tag\n"); 1956 return error; 1957 } 1958 1959 /* Create DMA maps for Tx buffers. */ 1960 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1961 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1962 1963 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 1964 &txd->tx_dmamap); 1965 if (error) { 1966 device_printf(sc_if->msk_if_dev, 1967 "failed to create %dth Tx dmamap\n", i); 1968 1969 for (j = 0; j < i; ++j) { 1970 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1971 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1972 txd->tx_dmamap); 1973 } 1974 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1975 sc_if->msk_cdata.msk_tx_tag = NULL; 1976 1977 return error; 1978 } 1979 } 1980 1981 /* Create tag for Rx buffers. */ 1982 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1983 1, 0, /* alignment, boundary */ 1984 BUS_SPACE_MAXADDR, /* lowaddr */ 1985 BUS_SPACE_MAXADDR, /* highaddr */ 1986 NULL, NULL, /* filter, filterarg */ 1987 MCLBYTES, /* maxsize */ 1988 1, /* nsegments */ 1989 MCLBYTES, /* maxsegsize */ 1990 0, /* flags */ 1991 &sc_if->msk_cdata.msk_rx_tag); 1992 if (error) { 1993 device_printf(sc_if->msk_if_dev, 1994 "failed to create Rx DMA tag\n"); 1995 return error; 1996 } 1997 1998 /* Create DMA maps for Rx buffers. */ 1999 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2000 &sc_if->msk_cdata.msk_rx_sparemap); 2001 if (error) { 2002 device_printf(sc_if->msk_if_dev, 2003 "failed to create spare Rx dmamap\n"); 2004 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2005 sc_if->msk_cdata.msk_rx_tag = NULL; 2006 return error; 2007 } 2008 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2009 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2010 2011 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2012 &rxd->rx_dmamap); 2013 if (error) { 2014 device_printf(sc_if->msk_if_dev, 2015 "failed to create %dth Rx dmamap\n", i); 2016 2017 for (j = 0; j < i; ++j) { 2018 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 2019 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2020 rxd->rx_dmamap); 2021 } 2022 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2023 sc_if->msk_cdata.msk_rx_tag = NULL; 2024 2025 return error; 2026 } 2027 } 2028 2029 #ifdef MSK_JUMBO 2030 SLIST_INIT(&sc_if->msk_jfree_listhead); 2031 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2032 2033 /* Create tag for jumbo Rx ring. */ 2034 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2035 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2036 BUS_SPACE_MAXADDR, /* lowaddr */ 2037 BUS_SPACE_MAXADDR, /* highaddr */ 2038 NULL, NULL, /* filter, filterarg */ 2039 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2040 1, /* nsegments */ 2041 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2042 0, /* flags */ 2043 NULL, NULL, /* lockfunc, lockarg */ 2044 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2045 if (error != 0) { 2046 device_printf(sc_if->msk_if_dev, 2047 "failed to create jumbo Rx ring DMA tag\n"); 2048 goto fail; 2049 } 2050 2051 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2052 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2053 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2054 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2055 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2056 if (error != 0) { 2057 device_printf(sc_if->msk_if_dev, 2058 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2059 goto fail; 2060 } 2061 2062 ctx.msk_busaddr = 0; 2063 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2064 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2065 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2066 msk_dmamap_cb, &ctx, 0); 2067 if (error != 0) { 2068 device_printf(sc_if->msk_if_dev, 2069 "failed to load DMA'able memory for jumbo Rx ring\n"); 2070 goto fail; 2071 } 2072 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2073 2074 /* Create tag for jumbo buffer blocks. */ 2075 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2076 PAGE_SIZE, 0, /* alignment, boundary */ 2077 BUS_SPACE_MAXADDR, /* lowaddr */ 2078 BUS_SPACE_MAXADDR, /* highaddr */ 2079 NULL, NULL, /* filter, filterarg */ 2080 MSK_JMEM, /* maxsize */ 2081 1, /* nsegments */ 2082 MSK_JMEM, /* maxsegsize */ 2083 0, /* flags */ 2084 NULL, NULL, /* lockfunc, lockarg */ 2085 &sc_if->msk_cdata.msk_jumbo_tag); 2086 if (error != 0) { 2087 device_printf(sc_if->msk_if_dev, 2088 "failed to create jumbo Rx buffer block DMA tag\n"); 2089 goto fail; 2090 } 2091 2092 /* Create tag for jumbo Rx buffers. */ 2093 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2094 PAGE_SIZE, 0, /* alignment, boundary */ 2095 BUS_SPACE_MAXADDR, /* lowaddr */ 2096 BUS_SPACE_MAXADDR, /* highaddr */ 2097 NULL, NULL, /* filter, filterarg */ 2098 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2099 MSK_MAXRXSEGS, /* nsegments */ 2100 MSK_JLEN, /* maxsegsize */ 2101 0, /* flags */ 2102 NULL, NULL, /* lockfunc, lockarg */ 2103 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2104 if (error != 0) { 2105 device_printf(sc_if->msk_if_dev, 2106 "failed to create jumbo Rx DMA tag\n"); 2107 goto fail; 2108 } 2109 2110 /* Create DMA maps for jumbo Rx buffers. */ 2111 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2112 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2113 device_printf(sc_if->msk_if_dev, 2114 "failed to create spare jumbo Rx dmamap\n"); 2115 goto fail; 2116 } 2117 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2118 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2119 jrxd->rx_m = NULL; 2120 jrxd->rx_dmamap = NULL; 2121 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2122 &jrxd->rx_dmamap); 2123 if (error != 0) { 2124 device_printf(sc_if->msk_if_dev, 2125 "failed to create jumbo Rx dmamap\n"); 2126 goto fail; 2127 } 2128 } 2129 2130 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2131 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2132 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2133 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2134 &sc_if->msk_cdata.msk_jumbo_map); 2135 if (error != 0) { 2136 device_printf(sc_if->msk_if_dev, 2137 "failed to allocate DMA'able memory for jumbo buf\n"); 2138 goto fail; 2139 } 2140 2141 ctx.msk_busaddr = 0; 2142 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2143 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2144 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2145 if (error != 0) { 2146 device_printf(sc_if->msk_if_dev, 2147 "failed to load DMA'able memory for jumbobuf\n"); 2148 goto fail; 2149 } 2150 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2151 2152 /* 2153 * Now divide it up into 9K pieces and save the addresses 2154 * in an array. 2155 */ 2156 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2157 for (i = 0; i < MSK_JSLOTS; i++) { 2158 sc_if->msk_cdata.msk_jslots[i] = ptr; 2159 ptr += MSK_JLEN; 2160 entry = malloc(sizeof(struct msk_jpool_entry), 2161 M_DEVBUF, M_WAITOK); 2162 if (entry == NULL) { 2163 device_printf(sc_if->msk_if_dev, 2164 "no memory for jumbo buffers!\n"); 2165 error = ENOMEM; 2166 goto fail; 2167 } 2168 entry->slot = i; 2169 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2170 jpool_entries); 2171 } 2172 #endif 2173 return 0; 2174 } 2175 2176 static void 2177 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2178 { 2179 struct msk_txdesc *txd; 2180 struct msk_rxdesc *rxd; 2181 #ifdef MSK_JUMBO 2182 struct msk_rxdesc *jrxd; 2183 struct msk_jpool_entry *entry; 2184 #endif 2185 int i; 2186 2187 #ifdef MSK_JUMBO 2188 MSK_JLIST_LOCK(sc_if); 2189 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2190 device_printf(sc_if->msk_if_dev, 2191 "asked to free buffer that is in use!\n"); 2192 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2193 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2194 jpool_entries); 2195 } 2196 2197 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2198 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2199 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2200 free(entry, M_DEVBUF); 2201 } 2202 MSK_JLIST_UNLOCK(sc_if); 2203 2204 /* Destroy jumbo buffer block. */ 2205 if (sc_if->msk_cdata.msk_jumbo_map) 2206 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2207 sc_if->msk_cdata.msk_jumbo_map); 2208 2209 if (sc_if->msk_rdata.msk_jumbo_buf) { 2210 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2211 sc_if->msk_rdata.msk_jumbo_buf, 2212 sc_if->msk_cdata.msk_jumbo_map); 2213 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2214 sc_if->msk_cdata.msk_jumbo_map = NULL; 2215 } 2216 2217 /* Jumbo Rx ring. */ 2218 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2219 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2220 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2221 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2222 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2223 sc_if->msk_rdata.msk_jumbo_rx_ring) 2224 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2225 sc_if->msk_rdata.msk_jumbo_rx_ring, 2226 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2227 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2228 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2229 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2230 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2231 } 2232 2233 /* Jumbo Rx buffers. */ 2234 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2235 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2236 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2237 if (jrxd->rx_dmamap) { 2238 bus_dmamap_destroy( 2239 sc_if->msk_cdata.msk_jumbo_rx_tag, 2240 jrxd->rx_dmamap); 2241 jrxd->rx_dmamap = NULL; 2242 } 2243 } 2244 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2245 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2246 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2247 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2248 } 2249 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2250 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2251 } 2252 #endif 2253 2254 /* Tx ring. */ 2255 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2256 sc_if->msk_rdata.msk_tx_ring, 2257 sc_if->msk_cdata.msk_tx_ring_map); 2258 2259 /* Rx ring. */ 2260 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2261 sc_if->msk_rdata.msk_rx_ring, 2262 sc_if->msk_cdata.msk_rx_ring_map); 2263 2264 /* Tx buffers. */ 2265 if (sc_if->msk_cdata.msk_tx_tag) { 2266 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2267 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2268 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2269 txd->tx_dmamap); 2270 } 2271 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2272 sc_if->msk_cdata.msk_tx_tag = NULL; 2273 } 2274 2275 /* Rx buffers. */ 2276 if (sc_if->msk_cdata.msk_rx_tag) { 2277 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2278 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2279 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2280 rxd->rx_dmamap); 2281 } 2282 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2283 sc_if->msk_cdata.msk_rx_sparemap); 2284 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2285 sc_if->msk_cdata.msk_rx_tag = NULL; 2286 } 2287 2288 if (sc_if->msk_cdata.msk_parent_tag) { 2289 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2290 sc_if->msk_cdata.msk_parent_tag = NULL; 2291 } 2292 } 2293 2294 #ifdef MSK_JUMBO 2295 /* 2296 * Allocate a jumbo buffer. 2297 */ 2298 static void * 2299 msk_jalloc(struct msk_if_softc *sc_if) 2300 { 2301 struct msk_jpool_entry *entry; 2302 2303 MSK_JLIST_LOCK(sc_if); 2304 2305 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2306 2307 if (entry == NULL) { 2308 MSK_JLIST_UNLOCK(sc_if); 2309 return (NULL); 2310 } 2311 2312 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2313 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2314 2315 MSK_JLIST_UNLOCK(sc_if); 2316 2317 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2318 } 2319 2320 /* 2321 * Release a jumbo buffer. 2322 */ 2323 static void 2324 msk_jfree(void *buf, void *args) 2325 { 2326 struct msk_if_softc *sc_if; 2327 struct msk_jpool_entry *entry; 2328 int i; 2329 2330 /* Extract the softc struct pointer. */ 2331 sc_if = (struct msk_if_softc *)args; 2332 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2333 2334 MSK_JLIST_LOCK(sc_if); 2335 /* Calculate the slot this buffer belongs to. */ 2336 i = ((vm_offset_t)buf 2337 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2338 KASSERT(i >= 0 && i < MSK_JSLOTS, 2339 ("%s: asked to free buffer that we don't manage!", __func__)); 2340 2341 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2342 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2343 entry->slot = i; 2344 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2345 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2346 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2347 wakeup(sc_if); 2348 2349 MSK_JLIST_UNLOCK(sc_if); 2350 } 2351 #endif 2352 2353 /* 2354 * It's copy of ath_defrag(ath(4)). 2355 * 2356 * Defragment an mbuf chain, returning at most maxfrags separate 2357 * mbufs+clusters. If this is not possible NULL is returned and 2358 * the original mbuf chain is left in it's present (potentially 2359 * modified) state. We use two techniques: collapsing consecutive 2360 * mbufs and replacing consecutive mbufs by a cluster. 2361 */ 2362 static struct mbuf * 2363 msk_defrag(struct mbuf *m0, int how, int maxfrags) 2364 { 2365 struct mbuf *m, *n, *n2, **prev; 2366 u_int curfrags; 2367 2368 /* 2369 * Calculate the current number of frags. 2370 */ 2371 curfrags = 0; 2372 for (m = m0; m != NULL; m = m->m_next) 2373 curfrags++; 2374 /* 2375 * First, try to collapse mbufs. Note that we always collapse 2376 * towards the front so we don't need to deal with moving the 2377 * pkthdr. This may be suboptimal if the first mbuf has much 2378 * less data than the following. 2379 */ 2380 m = m0; 2381 again: 2382 for (;;) { 2383 n = m->m_next; 2384 if (n == NULL) 2385 break; 2386 if (n->m_len < M_TRAILINGSPACE(m)) { 2387 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2388 n->m_len); 2389 m->m_len += n->m_len; 2390 m->m_next = n->m_next; 2391 m_free(n); 2392 if (--curfrags <= maxfrags) 2393 return (m0); 2394 } else 2395 m = n; 2396 } 2397 KASSERT(maxfrags > 1, 2398 ("maxfrags %u, but normal collapse failed", maxfrags)); 2399 /* 2400 * Collapse consecutive mbufs to a cluster. 2401 */ 2402 prev = &m0->m_next; /* NB: not the first mbuf */ 2403 while ((n = *prev) != NULL) { 2404 if ((n2 = n->m_next) != NULL && 2405 n->m_len + n2->m_len < MCLBYTES) { 2406 m = m_getcl(how, MT_DATA, 0); 2407 if (m == NULL) 2408 goto bad; 2409 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2410 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2411 n2->m_len); 2412 m->m_len = n->m_len + n2->m_len; 2413 m->m_next = n2->m_next; 2414 *prev = m; 2415 m_free(n); 2416 m_free(n2); 2417 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2418 return m0; 2419 /* 2420 * Still not there, try the normal collapse 2421 * again before we allocate another cluster. 2422 */ 2423 goto again; 2424 } 2425 prev = &n->m_next; 2426 } 2427 /* 2428 * No place where we can collapse to a cluster; punt. 2429 * This can occur if, for example, you request 2 frags 2430 * but the packet requires that both be clusters (we 2431 * never reallocate the first mbuf to avoid moving the 2432 * packet header). 2433 */ 2434 bad: 2435 return (NULL); 2436 } 2437 2438 static int 2439 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2440 { 2441 struct msk_txdesc *txd, *txd_last; 2442 struct msk_tx_desc *tx_le; 2443 struct mbuf *m; 2444 bus_dmamap_t map; 2445 struct msk_dmamap_arg ctx; 2446 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2447 uint32_t control, prod, si; 2448 uint16_t offset, tcp_offset; 2449 int error, i; 2450 2451 tcp_offset = offset = 0; 2452 m = *m_head; 2453 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2454 /* 2455 * Since mbuf has no protocol specific structure information 2456 * in it we have to inspect protocol information here to 2457 * setup TSO and checksum offload. I don't know why Marvell 2458 * made a such decision in chip design because other GigE 2459 * hardwares normally takes care of all these chores in 2460 * hardware. However, TSO performance of Yukon II is very 2461 * good such that it's worth to implement it. 2462 */ 2463 struct ether_header *eh; 2464 struct ip *ip; 2465 2466 /* TODO check for M_WRITABLE(m) */ 2467 2468 offset = sizeof(struct ether_header); 2469 m = m_pullup(m, offset); 2470 if (m == NULL) { 2471 *m_head = NULL; 2472 return (ENOBUFS); 2473 } 2474 eh = mtod(m, struct ether_header *); 2475 /* Check if hardware VLAN insertion is off. */ 2476 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2477 offset = sizeof(struct ether_vlan_header); 2478 m = m_pullup(m, offset); 2479 if (m == NULL) { 2480 *m_head = NULL; 2481 return (ENOBUFS); 2482 } 2483 } 2484 m = m_pullup(m, offset + sizeof(struct ip)); 2485 if (m == NULL) { 2486 *m_head = NULL; 2487 return (ENOBUFS); 2488 } 2489 ip = (struct ip *)(mtod(m, char *) + offset); 2490 offset += (ip->ip_hl << 2); 2491 tcp_offset = offset; 2492 /* 2493 * It seems that Yukon II has Tx checksum offload bug for 2494 * small TCP packets that's less than 60 bytes in size 2495 * (e.g. TCP window probe packet, pure ACK packet). 2496 * Common work around like padding with zeros to make the 2497 * frame minimum ethernet frame size didn't work at all. 2498 * Instead of disabling checksum offload completely we 2499 * resort to S/W checksum routine when we encounter short 2500 * TCP frames. 2501 * Short UDP packets appear to be handled correctly by 2502 * Yukon II. 2503 */ 2504 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2505 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2506 uint16_t csum; 2507 2508 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2509 (ip->ip_hl << 2), offset); 2510 *(uint16_t *)(m->m_data + offset + 2511 m->m_pkthdr.csum_data) = csum; 2512 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2513 } 2514 *m_head = m; 2515 } 2516 2517 prod = sc_if->msk_cdata.msk_tx_prod; 2518 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2519 txd_last = txd; 2520 map = txd->tx_dmamap; 2521 bzero(&ctx, sizeof(ctx)); 2522 ctx.nseg = MSK_MAXTXSEGS; 2523 ctx.segs = txsegs; 2524 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, map, 2525 *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2526 if (error == 0 && ctx.nseg == 0) { 2527 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2528 error = EFBIG; 2529 } 2530 if (error == EFBIG) { 2531 m = msk_defrag(*m_head, MB_DONTWAIT, MSK_MAXTXSEGS); 2532 if (m == NULL) { 2533 m_freem(*m_head); 2534 *m_head = NULL; 2535 return (ENOBUFS); 2536 } 2537 *m_head = m; 2538 2539 bzero(&ctx, sizeof(ctx)); 2540 ctx.nseg = MSK_MAXTXSEGS; 2541 ctx.segs = txsegs; 2542 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, 2543 map, *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT); 2544 if (error == 0 && ctx.nseg == 0) { 2545 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2546 error = EFBIG; 2547 } 2548 if (error != 0) { 2549 m_freem(*m_head); 2550 *m_head = NULL; 2551 return (error); 2552 } 2553 } else if (error != 0) { 2554 return (error); 2555 } 2556 2557 /* Check number of available descriptors. */ 2558 if (sc_if->msk_cdata.msk_tx_cnt + ctx.nseg >= 2559 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2560 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2561 return (ENOBUFS); 2562 } 2563 2564 control = 0; 2565 tx_le = NULL; 2566 2567 #ifdef notyet 2568 /* Check if we have a VLAN tag to insert. */ 2569 if ((m->m_flags & M_VLANTAG) != 0) { 2570 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2571 tx_le->msk_addr = htole32(0); 2572 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2573 htons(m->m_pkthdr.ether_vtag)); 2574 sc_if->msk_cdata.msk_tx_cnt++; 2575 MSK_INC(prod, MSK_TX_RING_CNT); 2576 control |= INS_VLAN; 2577 } 2578 #endif 2579 /* Check if we have to handle checksum offload. */ 2580 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2581 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2582 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2583 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2584 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2585 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2586 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2587 control |= UDPTCP; 2588 sc_if->msk_cdata.msk_tx_cnt++; 2589 MSK_INC(prod, MSK_TX_RING_CNT); 2590 } 2591 2592 si = prod; 2593 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2594 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2595 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2596 OP_PACKET); 2597 sc_if->msk_cdata.msk_tx_cnt++; 2598 MSK_INC(prod, MSK_TX_RING_CNT); 2599 2600 for (i = 1; i < ctx.nseg; i++) { 2601 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2602 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2603 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2604 OP_BUFFER | HW_OWNER); 2605 sc_if->msk_cdata.msk_tx_cnt++; 2606 MSK_INC(prod, MSK_TX_RING_CNT); 2607 } 2608 /* Update producer index. */ 2609 sc_if->msk_cdata.msk_tx_prod = prod; 2610 2611 /* Set EOP on the last desciptor. */ 2612 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2613 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2614 tx_le->msk_control |= htole32(EOP); 2615 2616 /* Turn the first descriptor ownership to hardware. */ 2617 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2618 tx_le->msk_control |= htole32(HW_OWNER); 2619 2620 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2621 map = txd_last->tx_dmamap; 2622 txd_last->tx_dmamap = txd->tx_dmamap; 2623 txd->tx_dmamap = map; 2624 txd->tx_m = m; 2625 2626 /* Sync descriptors. */ 2627 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2628 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2629 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE); 2630 2631 return (0); 2632 } 2633 2634 static void 2635 msk_start(struct ifnet *ifp) 2636 { 2637 struct msk_if_softc *sc_if; 2638 struct mbuf *m_head; 2639 int enq; 2640 2641 sc_if = ifp->if_softc; 2642 2643 ASSERT_SERIALIZED(ifp->if_serializer); 2644 2645 if (!sc_if->msk_link) { 2646 ifq_purge(&ifp->if_snd); 2647 return; 2648 } 2649 2650 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2651 return; 2652 2653 for (enq = 0; !ifq_is_empty(&ifp->if_snd) && 2654 sc_if->msk_cdata.msk_tx_cnt < 2655 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2656 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2657 if (m_head == NULL) 2658 break; 2659 2660 /* 2661 * Pack the data into the transmit ring. If we 2662 * don't have room, set the OACTIVE flag and wait 2663 * for the NIC to drain the ring. 2664 */ 2665 if (msk_encap(sc_if, &m_head) != 0) { 2666 if (m_head == NULL) 2667 break; 2668 m_freem(m_head); 2669 ifp->if_flags |= IFF_OACTIVE; 2670 break; 2671 } 2672 2673 enq++; 2674 /* 2675 * If there's a BPF listener, bounce a copy of this frame 2676 * to him. 2677 */ 2678 BPF_MTAP(ifp, m_head); 2679 } 2680 2681 if (enq > 0) { 2682 /* Transmit */ 2683 CSR_WRITE_2(sc_if->msk_softc, 2684 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2685 sc_if->msk_cdata.msk_tx_prod); 2686 2687 /* Set a timeout in case the chip goes out to lunch. */ 2688 ifp->if_timer = MSK_TX_TIMEOUT; 2689 } 2690 } 2691 2692 static void 2693 msk_watchdog(struct ifnet *ifp) 2694 { 2695 struct msk_if_softc *sc_if = ifp->if_softc; 2696 uint32_t ridx; 2697 int idx; 2698 2699 ASSERT_SERIALIZED(ifp->if_serializer); 2700 2701 if (sc_if->msk_link == 0) { 2702 if (bootverbose) 2703 if_printf(sc_if->msk_ifp, "watchdog timeout " 2704 "(missed link)\n"); 2705 ifp->if_oerrors++; 2706 msk_init(sc_if); 2707 return; 2708 } 2709 2710 /* 2711 * Reclaim first as there is a possibility of losing Tx completion 2712 * interrupts. 2713 */ 2714 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2715 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2716 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2717 msk_txeof(sc_if, idx); 2718 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2719 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2720 "-- recovering\n"); 2721 if (!ifq_is_empty(&ifp->if_snd)) 2722 if_devstart(ifp); 2723 return; 2724 } 2725 } 2726 2727 if_printf(ifp, "watchdog timeout\n"); 2728 ifp->if_oerrors++; 2729 msk_init(sc_if); 2730 if (!ifq_is_empty(&ifp->if_snd)) 2731 if_devstart(ifp); 2732 } 2733 2734 static int 2735 mskc_shutdown(device_t dev) 2736 { 2737 struct msk_softc *sc = device_get_softc(dev); 2738 int i; 2739 2740 lwkt_serialize_enter(&sc->msk_serializer); 2741 2742 for (i = 0; i < sc->msk_num_port; i++) { 2743 if (sc->msk_if[i] != NULL) 2744 msk_stop(sc->msk_if[i]); 2745 } 2746 2747 /* Disable all interrupts. */ 2748 CSR_WRITE_4(sc, B0_IMSK, 0); 2749 CSR_READ_4(sc, B0_IMSK); 2750 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2751 CSR_READ_4(sc, B0_HWE_IMSK); 2752 2753 /* Put hardware reset. */ 2754 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2755 2756 lwkt_serialize_exit(&sc->msk_serializer); 2757 return (0); 2758 } 2759 2760 static int 2761 mskc_suspend(device_t dev) 2762 { 2763 struct msk_softc *sc = device_get_softc(dev); 2764 int i; 2765 2766 lwkt_serialize_enter(&sc->msk_serializer); 2767 2768 for (i = 0; i < sc->msk_num_port; i++) { 2769 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2770 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2771 msk_stop(sc->msk_if[i]); 2772 } 2773 2774 /* Disable all interrupts. */ 2775 CSR_WRITE_4(sc, B0_IMSK, 0); 2776 CSR_READ_4(sc, B0_IMSK); 2777 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2778 CSR_READ_4(sc, B0_HWE_IMSK); 2779 2780 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2781 2782 /* Put hardware reset. */ 2783 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2784 sc->msk_suspended = 1; 2785 2786 lwkt_serialize_exit(&sc->msk_serializer); 2787 2788 return (0); 2789 } 2790 2791 static int 2792 mskc_resume(device_t dev) 2793 { 2794 struct msk_softc *sc = device_get_softc(dev); 2795 int i; 2796 2797 lwkt_serialize_enter(&sc->msk_serializer); 2798 2799 mskc_reset(sc); 2800 for (i = 0; i < sc->msk_num_port; i++) { 2801 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2802 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2803 msk_init(sc->msk_if[i]); 2804 } 2805 sc->msk_suspended = 0; 2806 2807 lwkt_serialize_exit(&sc->msk_serializer); 2808 2809 return (0); 2810 } 2811 2812 static void 2813 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len, 2814 struct mbuf_chain *chain) 2815 { 2816 struct mbuf *m; 2817 struct ifnet *ifp; 2818 struct msk_rxdesc *rxd; 2819 int cons, rxlen; 2820 2821 ifp = sc_if->msk_ifp; 2822 2823 cons = sc_if->msk_cdata.msk_rx_cons; 2824 do { 2825 rxlen = status >> 16; 2826 if ((status & GMR_FS_VLAN) != 0 && 2827 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2828 rxlen -= EVL_ENCAPLEN; 2829 if (len > sc_if->msk_framesize || 2830 ((status & GMR_FS_ANY_ERR) != 0) || 2831 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2832 /* Don't count flow-control packet as errors. */ 2833 if ((status & GMR_FS_GOOD_FC) == 0) 2834 ifp->if_ierrors++; 2835 msk_discard_rxbuf(sc_if, cons); 2836 break; 2837 } 2838 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2839 m = rxd->rx_m; 2840 if (msk_newbuf(sc_if, cons) != 0) { 2841 ifp->if_iqdrops++; 2842 /* Reuse old buffer. */ 2843 msk_discard_rxbuf(sc_if, cons); 2844 break; 2845 } 2846 m->m_pkthdr.rcvif = ifp; 2847 m->m_pkthdr.len = m->m_len = len; 2848 ifp->if_ipackets++; 2849 #ifdef notyet 2850 /* Check for VLAN tagged packets. */ 2851 if ((status & GMR_FS_VLAN) != 0 && 2852 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2853 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2854 m->m_flags |= M_VLANTAG; 2855 } 2856 #endif 2857 2858 #ifdef ETHER_INPUT_CHAIN 2859 #ifdef ETHER_INPUT2 2860 ether_input_chain2(ifp, m, chain); 2861 #else 2862 ether_input_chain(ifp, m, chain); 2863 #endif 2864 #else 2865 ifp->if_input(ifp, m); 2866 #endif 2867 } while (0); 2868 2869 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2870 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2871 } 2872 2873 #ifdef MSK_JUMBO 2874 static void 2875 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2876 { 2877 struct mbuf *m; 2878 struct ifnet *ifp; 2879 struct msk_rxdesc *jrxd; 2880 int cons, rxlen; 2881 2882 ifp = sc_if->msk_ifp; 2883 2884 MSK_IF_LOCK_ASSERT(sc_if); 2885 2886 cons = sc_if->msk_cdata.msk_rx_cons; 2887 do { 2888 rxlen = status >> 16; 2889 if ((status & GMR_FS_VLAN) != 0 && 2890 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2891 rxlen -= ETHER_VLAN_ENCAP_LEN; 2892 if (len > sc_if->msk_framesize || 2893 ((status & GMR_FS_ANY_ERR) != 0) || 2894 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2895 /* Don't count flow-control packet as errors. */ 2896 if ((status & GMR_FS_GOOD_FC) == 0) 2897 ifp->if_ierrors++; 2898 msk_discard_jumbo_rxbuf(sc_if, cons); 2899 break; 2900 } 2901 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2902 m = jrxd->rx_m; 2903 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2904 ifp->if_iqdrops++; 2905 /* Reuse old buffer. */ 2906 msk_discard_jumbo_rxbuf(sc_if, cons); 2907 break; 2908 } 2909 m->m_pkthdr.rcvif = ifp; 2910 m->m_pkthdr.len = m->m_len = len; 2911 ifp->if_ipackets++; 2912 /* Check for VLAN tagged packets. */ 2913 if ((status & GMR_FS_VLAN) != 0 && 2914 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2915 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2916 m->m_flags |= M_VLANTAG; 2917 } 2918 MSK_IF_UNLOCK(sc_if); 2919 (*ifp->if_input)(ifp, m); 2920 MSK_IF_LOCK(sc_if); 2921 } while (0); 2922 2923 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2924 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2925 } 2926 #endif 2927 2928 static void 2929 msk_txeof(struct msk_if_softc *sc_if, int idx) 2930 { 2931 struct msk_txdesc *txd; 2932 struct msk_tx_desc *cur_tx; 2933 struct ifnet *ifp; 2934 uint32_t control; 2935 int cons, prog; 2936 2937 ifp = sc_if->msk_ifp; 2938 2939 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2940 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_POSTREAD); 2941 2942 /* 2943 * Go through our tx ring and free mbufs for those 2944 * frames that have been sent. 2945 */ 2946 cons = sc_if->msk_cdata.msk_tx_cons; 2947 prog = 0; 2948 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2949 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2950 break; 2951 prog++; 2952 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2953 control = le32toh(cur_tx->msk_control); 2954 sc_if->msk_cdata.msk_tx_cnt--; 2955 ifp->if_flags &= ~IFF_OACTIVE; 2956 if ((control & EOP) == 0) 2957 continue; 2958 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2959 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 2960 BUS_DMASYNC_POSTWRITE); 2961 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2962 2963 ifp->if_opackets++; 2964 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2965 __func__)); 2966 m_freem(txd->tx_m); 2967 txd->tx_m = NULL; 2968 } 2969 2970 if (prog > 0) { 2971 sc_if->msk_cdata.msk_tx_cons = cons; 2972 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2973 ifp->if_timer = 0; 2974 /* No need to sync LEs as we didn't update LEs. */ 2975 } 2976 } 2977 2978 static void 2979 msk_tick(void *xsc_if) 2980 { 2981 struct msk_if_softc *sc_if = xsc_if; 2982 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2983 struct mii_data *mii; 2984 2985 lwkt_serialize_enter(ifp->if_serializer); 2986 2987 mii = device_get_softc(sc_if->msk_miibus); 2988 2989 mii_tick(mii); 2990 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2991 2992 lwkt_serialize_exit(ifp->if_serializer); 2993 } 2994 2995 static void 2996 msk_intr_phy(struct msk_if_softc *sc_if) 2997 { 2998 uint16_t status; 2999 3000 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3001 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3002 /* Handle FIFO Underrun/Overflow? */ 3003 if (status & PHY_M_IS_FIFO_ERROR) { 3004 device_printf(sc_if->msk_if_dev, 3005 "PHY FIFO underrun/overflow.\n"); 3006 } 3007 } 3008 3009 static void 3010 msk_intr_gmac(struct msk_if_softc *sc_if) 3011 { 3012 struct msk_softc *sc; 3013 uint8_t status; 3014 3015 sc = sc_if->msk_softc; 3016 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3017 3018 /* GMAC Rx FIFO overrun. */ 3019 if ((status & GM_IS_RX_FF_OR) != 0) { 3020 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3021 GMF_CLI_RX_FO); 3022 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 3023 } 3024 /* GMAC Tx FIFO underrun. */ 3025 if ((status & GM_IS_TX_FF_UR) != 0) { 3026 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3027 GMF_CLI_TX_FU); 3028 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3029 /* 3030 * XXX 3031 * In case of Tx underrun, we may need to flush/reset 3032 * Tx MAC but that would also require resynchronization 3033 * with status LEs. Reintializing status LEs would 3034 * affect other port in dual MAC configuration so it 3035 * should be avoided as possible as we can. 3036 * Due to lack of documentation it's all vague guess but 3037 * it needs more investigation. 3038 */ 3039 } 3040 } 3041 3042 static void 3043 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3044 { 3045 struct msk_softc *sc; 3046 3047 sc = sc_if->msk_softc; 3048 if ((status & Y2_IS_PAR_RD1) != 0) { 3049 device_printf(sc_if->msk_if_dev, 3050 "RAM buffer read parity error\n"); 3051 /* Clear IRQ. */ 3052 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3053 RI_CLR_RD_PERR); 3054 } 3055 if ((status & Y2_IS_PAR_WR1) != 0) { 3056 device_printf(sc_if->msk_if_dev, 3057 "RAM buffer write parity error\n"); 3058 /* Clear IRQ. */ 3059 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3060 RI_CLR_WR_PERR); 3061 } 3062 if ((status & Y2_IS_PAR_MAC1) != 0) { 3063 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3064 /* Clear IRQ. */ 3065 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3066 GMF_CLI_TX_PE); 3067 } 3068 if ((status & Y2_IS_PAR_RX1) != 0) { 3069 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3070 /* Clear IRQ. */ 3071 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3072 } 3073 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3074 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3075 /* Clear IRQ. */ 3076 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3077 } 3078 } 3079 3080 static void 3081 mskc_intr_hwerr(struct msk_softc *sc) 3082 { 3083 uint32_t status; 3084 uint32_t tlphead[4]; 3085 3086 status = CSR_READ_4(sc, B0_HWE_ISRC); 3087 /* Time Stamp timer overflow. */ 3088 if ((status & Y2_IS_TIST_OV) != 0) 3089 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3090 if ((status & Y2_IS_PCI_NEXP) != 0) { 3091 /* 3092 * PCI Express Error occured which is not described in PEX 3093 * spec. 3094 * This error is also mapped either to Master Abort( 3095 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3096 * can only be cleared there. 3097 */ 3098 device_printf(sc->msk_dev, 3099 "PCI Express protocol violation error\n"); 3100 } 3101 3102 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3103 uint16_t v16; 3104 3105 if ((status & Y2_IS_MST_ERR) != 0) 3106 device_printf(sc->msk_dev, 3107 "unexpected IRQ Status error\n"); 3108 else 3109 device_printf(sc->msk_dev, 3110 "unexpected IRQ Master error\n"); 3111 /* Reset all bits in the PCI status register. */ 3112 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3113 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3114 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3115 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3116 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3117 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3118 } 3119 3120 /* Check for PCI Express Uncorrectable Error. */ 3121 if ((status & Y2_IS_PCI_EXP) != 0) { 3122 uint32_t v32; 3123 3124 /* 3125 * On PCI Express bus bridges are called root complexes (RC). 3126 * PCI Express errors are recognized by the root complex too, 3127 * which requests the system to handle the problem. After 3128 * error occurence it may be that no access to the adapter 3129 * may be performed any longer. 3130 */ 3131 3132 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3133 if ((v32 & PEX_UNSUP_REQ) != 0) { 3134 /* Ignore unsupported request error. */ 3135 if (bootverbose) { 3136 device_printf(sc->msk_dev, 3137 "Uncorrectable PCI Express error\n"); 3138 } 3139 } 3140 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3141 int i; 3142 3143 /* Get TLP header form Log Registers. */ 3144 for (i = 0; i < 4; i++) 3145 tlphead[i] = CSR_PCI_READ_4(sc, 3146 PEX_HEADER_LOG + i * 4); 3147 /* Check for vendor defined broadcast message. */ 3148 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3149 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3150 CSR_WRITE_4(sc, B0_HWE_IMSK, 3151 sc->msk_intrhwemask); 3152 CSR_READ_4(sc, B0_HWE_IMSK); 3153 } 3154 } 3155 /* Clear the interrupt. */ 3156 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3157 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3158 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3159 } 3160 3161 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3162 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3163 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3164 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3165 } 3166 3167 static __inline void 3168 msk_rxput(struct msk_if_softc *sc_if) 3169 { 3170 struct msk_softc *sc; 3171 3172 sc = sc_if->msk_softc; 3173 #ifdef MSK_JUMBO 3174 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3175 bus_dmamap_sync( 3176 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3177 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3178 BUS_DMASYNC_PREWRITE); 3179 } else 3180 #endif 3181 { 3182 bus_dmamap_sync( 3183 sc_if->msk_cdata.msk_rx_ring_tag, 3184 sc_if->msk_cdata.msk_rx_ring_map, 3185 BUS_DMASYNC_PREWRITE); 3186 } 3187 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3188 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3189 } 3190 3191 static int 3192 mskc_handle_events(struct msk_softc *sc) 3193 { 3194 struct msk_if_softc *sc_if; 3195 int rxput[2]; 3196 struct msk_stat_desc *sd; 3197 uint32_t control, status; 3198 int cons, idx, len, port, rxprog; 3199 #ifdef ETHER_INPUT_CHAIN 3200 struct mbuf_chain chain0[MAXCPU]; 3201 #endif 3202 struct mbuf_chain *chain; 3203 3204 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3205 if (idx == sc->msk_stat_cons) 3206 return (0); 3207 3208 #ifdef ETHER_INPUT_CHAIN 3209 chain = chain0; 3210 ether_input_chain_init(chain); 3211 #else 3212 chain = NULL; 3213 #endif 3214 3215 /* Sync status LEs. */ 3216 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3217 BUS_DMASYNC_POSTREAD); 3218 /* XXX Sync Rx LEs here. */ 3219 3220 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3221 3222 rxprog = 0; 3223 for (cons = sc->msk_stat_cons; cons != idx;) { 3224 sd = &sc->msk_stat_ring[cons]; 3225 control = le32toh(sd->msk_control); 3226 if ((control & HW_OWNER) == 0) 3227 break; 3228 /* 3229 * Marvell's FreeBSD driver updates status LE after clearing 3230 * HW_OWNER. However we don't have a way to sync single LE 3231 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3232 * an entire DMA map. So don't sync LE until we have a better 3233 * way to sync LEs. 3234 */ 3235 control &= ~HW_OWNER; 3236 sd->msk_control = htole32(control); 3237 status = le32toh(sd->msk_status); 3238 len = control & STLE_LEN_MASK; 3239 port = (control >> 16) & 0x01; 3240 sc_if = sc->msk_if[port]; 3241 if (sc_if == NULL) { 3242 device_printf(sc->msk_dev, "invalid port opcode " 3243 "0x%08x\n", control & STLE_OP_MASK); 3244 continue; 3245 } 3246 3247 switch (control & STLE_OP_MASK) { 3248 case OP_RXVLAN: 3249 sc_if->msk_vtag = ntohs(len); 3250 break; 3251 case OP_RXCHKSVLAN: 3252 sc_if->msk_vtag = ntohs(len); 3253 break; 3254 case OP_RXSTAT: 3255 #ifdef MSK_JUMBO 3256 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3257 msk_jumbo_rxeof(sc_if, status, len); 3258 else 3259 #endif 3260 msk_rxeof(sc_if, status, len, chain); 3261 rxprog++; 3262 /* 3263 * Because there is no way to sync single Rx LE 3264 * put the DMA sync operation off until the end of 3265 * event processing. 3266 */ 3267 rxput[port]++; 3268 /* Update prefetch unit if we've passed water mark. */ 3269 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3270 msk_rxput(sc_if); 3271 rxput[port] = 0; 3272 } 3273 break; 3274 case OP_TXINDEXLE: 3275 if (sc->msk_if[MSK_PORT_A] != NULL) { 3276 msk_txeof(sc->msk_if[MSK_PORT_A], 3277 status & STLE_TXA1_MSKL); 3278 } 3279 if (sc->msk_if[MSK_PORT_B] != NULL) { 3280 msk_txeof(sc->msk_if[MSK_PORT_B], 3281 ((status & STLE_TXA2_MSKL) >> 3282 STLE_TXA2_SHIFTL) | 3283 ((len & STLE_TXA2_MSKH) << 3284 STLE_TXA2_SHIFTH)); 3285 } 3286 break; 3287 default: 3288 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3289 control & STLE_OP_MASK); 3290 break; 3291 } 3292 MSK_INC(cons, MSK_STAT_RING_CNT); 3293 if (rxprog > sc->msk_process_limit) 3294 break; 3295 } 3296 3297 #ifdef ETHER_INPUT_CHAIN 3298 if (rxprog > 0) 3299 ether_input_dispatch(chain); 3300 #endif 3301 3302 sc->msk_stat_cons = cons; 3303 /* XXX We should sync status LEs here. See above notes. */ 3304 3305 if (rxput[MSK_PORT_A] > 0) 3306 msk_rxput(sc->msk_if[MSK_PORT_A]); 3307 if (rxput[MSK_PORT_B] > 0) 3308 msk_rxput(sc->msk_if[MSK_PORT_B]); 3309 3310 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3311 } 3312 3313 /* Legacy interrupt handler for shared interrupt. */ 3314 static void 3315 mskc_intr(void *xsc) 3316 { 3317 struct msk_softc *sc; 3318 struct msk_if_softc *sc_if0, *sc_if1; 3319 struct ifnet *ifp0, *ifp1; 3320 uint32_t status; 3321 3322 sc = xsc; 3323 ASSERT_SERIALIZED(&sc->msk_serializer); 3324 3325 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3326 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3327 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3328 (status & sc->msk_intrmask) == 0) { 3329 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3330 return; 3331 } 3332 3333 sc_if0 = sc->msk_if[MSK_PORT_A]; 3334 sc_if1 = sc->msk_if[MSK_PORT_B]; 3335 ifp0 = ifp1 = NULL; 3336 if (sc_if0 != NULL) 3337 ifp0 = sc_if0->msk_ifp; 3338 if (sc_if1 != NULL) 3339 ifp1 = sc_if1->msk_ifp; 3340 3341 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3342 msk_intr_phy(sc_if0); 3343 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3344 msk_intr_phy(sc_if1); 3345 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3346 msk_intr_gmac(sc_if0); 3347 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3348 msk_intr_gmac(sc_if1); 3349 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3350 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3351 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3352 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3353 CSR_READ_4(sc, B0_IMSK); 3354 } 3355 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3356 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3357 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3358 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3359 CSR_READ_4(sc, B0_IMSK); 3360 } 3361 if ((status & Y2_IS_HW_ERR) != 0) 3362 mskc_intr_hwerr(sc); 3363 3364 while (mskc_handle_events(sc) != 0) 3365 ; 3366 if ((status & Y2_IS_STAT_BMU) != 0) 3367 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3368 3369 /* Reenable interrupts. */ 3370 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3371 3372 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3373 !ifq_is_empty(&ifp0->if_snd)) 3374 if_devstart(ifp0); 3375 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3376 !ifq_is_empty(&ifp1->if_snd)) 3377 if_devstart(ifp1); 3378 } 3379 3380 static void 3381 msk_init(void *xsc) 3382 { 3383 struct msk_if_softc *sc_if = xsc; 3384 struct msk_softc *sc = sc_if->msk_softc; 3385 struct ifnet *ifp = sc_if->msk_ifp; 3386 struct mii_data *mii; 3387 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3388 uint16_t gmac; 3389 int error, i; 3390 3391 ASSERT_SERIALIZED(ifp->if_serializer); 3392 3393 mii = device_get_softc(sc_if->msk_miibus); 3394 3395 error = 0; 3396 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3397 msk_stop(sc_if); 3398 3399 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3400 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3401 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3402 /* 3403 * In Yukon EC Ultra, TSO & checksum offload is not 3404 * supported for jumbo frame. 3405 */ 3406 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3407 ifp->if_capenable &= ~IFCAP_TXCSUM; 3408 } 3409 3410 /* 3411 * Initialize GMAC first. 3412 * Without this initialization, Rx MAC did not work as expected 3413 * and Rx MAC garbled status LEs and it resulted in out-of-order 3414 * or duplicated frame delivery which in turn showed very poor 3415 * Rx performance.(I had to write a packet analysis code that 3416 * could be embeded in driver to diagnose this issue.) 3417 * I've spent almost 2 months to fix this issue. If I have had 3418 * datasheet for Yukon II I wouldn't have encountered this. :-( 3419 */ 3420 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3421 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3422 3423 /* Dummy read the Interrupt Source Register. */ 3424 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3425 3426 /* Set MIB Clear Counter Mode. */ 3427 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3428 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3429 /* Read all MIB Counters with Clear Mode set. */ 3430 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3431 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3432 /* Clear MIB Clear Counter Mode. */ 3433 gmac &= ~GM_PAR_MIB_CLR; 3434 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3435 3436 /* Disable FCS. */ 3437 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3438 3439 /* Setup Transmit Control Register. */ 3440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3441 3442 /* Setup Transmit Flow Control Register. */ 3443 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3444 3445 /* Setup Transmit Parameter Register. */ 3446 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3447 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3448 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3449 3450 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3451 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3452 3453 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3454 gmac |= GM_SMOD_JUMBO_ENA; 3455 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3456 3457 /* Set station address. */ 3458 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3459 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3460 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3461 eaddr[i]); 3462 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3463 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3464 eaddr[i]); 3465 3466 /* Disable interrupts for counter overflows. */ 3467 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3468 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3469 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3470 3471 /* Configure Rx MAC FIFO. */ 3472 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3473 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3474 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3475 GMF_OPER_ON | GMF_RX_F_FL_ON); 3476 3477 /* Set promiscuous mode. */ 3478 msk_setpromisc(sc_if); 3479 3480 /* Set multicast filter. */ 3481 msk_setmulti(sc_if); 3482 3483 /* Flush Rx MAC FIFO on any flow control or error. */ 3484 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3485 GMR_FS_ANY_ERR); 3486 3487 /* Set Rx FIFO flush threshold to 64 bytes. */ 3488 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3489 RX_GMF_FL_THR_DEF); 3490 3491 /* Configure Tx MAC FIFO. */ 3492 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3493 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3494 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3495 3496 /* Configure hardware VLAN tag insertion/stripping. */ 3497 msk_setvlan(sc_if, ifp); 3498 3499 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3500 /* Set Rx Pause threshould. */ 3501 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3502 MSK_ECU_LLPP); 3503 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3504 MSK_ECU_ULPP); 3505 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { 3506 /* 3507 * Set Tx GMAC FIFO Almost Empty Threshold. 3508 */ 3509 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3510 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3511 /* Disable Store & Forward mode for Tx. */ 3512 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3513 TX_JUMBO_ENA | TX_STFW_DIS); 3514 } else { 3515 /* Enable Store & Forward mode for Tx. */ 3516 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3517 TX_JUMBO_DIS | TX_STFW_ENA); 3518 } 3519 } 3520 3521 /* 3522 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3523 * arbiter as we don't use Sync Tx queue. 3524 */ 3525 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3526 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3527 /* Enable the RAM Interface Arbiter. */ 3528 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3529 3530 /* Setup RAM buffer. */ 3531 msk_set_rambuffer(sc_if); 3532 3533 /* Disable Tx sync Queue. */ 3534 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3535 3536 /* Setup Tx Queue Bus Memory Interface. */ 3537 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3538 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3539 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3540 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3541 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3542 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3543 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3544 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3545 } 3546 3547 /* Setup Rx Queue Bus Memory Interface. */ 3548 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3549 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3550 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3551 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3552 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3553 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3554 /* MAC Rx RAM Read is controlled by hardware. */ 3555 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3556 } 3557 3558 msk_set_prefetch(sc, sc_if->msk_txq, 3559 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3560 msk_init_tx_ring(sc_if); 3561 3562 /* Disable Rx checksum offload and RSS hash. */ 3563 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3564 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3565 #ifdef MSK_JUMBO 3566 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3567 msk_set_prefetch(sc, sc_if->msk_rxq, 3568 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3569 MSK_JUMBO_RX_RING_CNT - 1); 3570 error = msk_init_jumbo_rx_ring(sc_if); 3571 } else 3572 #endif 3573 { 3574 msk_set_prefetch(sc, sc_if->msk_rxq, 3575 sc_if->msk_rdata.msk_rx_ring_paddr, 3576 MSK_RX_RING_CNT - 1); 3577 error = msk_init_rx_ring(sc_if); 3578 } 3579 if (error != 0) { 3580 device_printf(sc_if->msk_if_dev, 3581 "initialization failed: no memory for Rx buffers\n"); 3582 msk_stop(sc_if); 3583 return; 3584 } 3585 3586 /* Configure interrupt handling. */ 3587 if (sc_if->msk_port == MSK_PORT_A) { 3588 sc->msk_intrmask |= Y2_IS_PORT_A; 3589 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3590 } else { 3591 sc->msk_intrmask |= Y2_IS_PORT_B; 3592 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3593 } 3594 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3595 CSR_READ_4(sc, B0_HWE_IMSK); 3596 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3597 CSR_READ_4(sc, B0_IMSK); 3598 3599 sc_if->msk_link = 0; 3600 mii_mediachg(mii); 3601 3602 mskc_set_imtimer(sc); 3603 3604 ifp->if_flags |= IFF_RUNNING; 3605 ifp->if_flags &= ~IFF_OACTIVE; 3606 3607 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3608 } 3609 3610 static void 3611 msk_set_rambuffer(struct msk_if_softc *sc_if) 3612 { 3613 struct msk_softc *sc; 3614 int ltpp, utpp; 3615 3616 sc = sc_if->msk_softc; 3617 3618 /* Setup Rx Queue. */ 3619 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3620 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3621 sc->msk_rxqstart[sc_if->msk_port] / 8); 3622 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3623 sc->msk_rxqend[sc_if->msk_port] / 8); 3624 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3625 sc->msk_rxqstart[sc_if->msk_port] / 8); 3626 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3627 sc->msk_rxqstart[sc_if->msk_port] / 8); 3628 3629 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3630 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3631 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3632 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3633 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3634 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3635 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3636 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3637 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3638 3639 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3640 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3641 3642 /* Setup Tx Queue. */ 3643 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3644 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3645 sc->msk_txqstart[sc_if->msk_port] / 8); 3646 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3647 sc->msk_txqend[sc_if->msk_port] / 8); 3648 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3649 sc->msk_txqstart[sc_if->msk_port] / 8); 3650 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3651 sc->msk_txqstart[sc_if->msk_port] / 8); 3652 /* Enable Store & Forward for Tx side. */ 3653 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3654 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3655 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3656 } 3657 3658 static void 3659 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3660 uint32_t count) 3661 { 3662 3663 /* Reset the prefetch unit. */ 3664 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3665 PREF_UNIT_RST_SET); 3666 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3667 PREF_UNIT_RST_CLR); 3668 /* Set LE base address. */ 3669 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3670 MSK_ADDR_LO(addr)); 3671 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3672 MSK_ADDR_HI(addr)); 3673 /* Set the list last index. */ 3674 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3675 count); 3676 /* Turn on prefetch unit. */ 3677 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3678 PREF_UNIT_OP_ON); 3679 /* Dummy read to ensure write. */ 3680 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3681 } 3682 3683 static void 3684 msk_stop(struct msk_if_softc *sc_if) 3685 { 3686 struct msk_softc *sc = sc_if->msk_softc; 3687 struct ifnet *ifp = sc_if->msk_ifp; 3688 struct msk_txdesc *txd; 3689 struct msk_rxdesc *rxd; 3690 #ifdef MSK_JUMBO 3691 struct msk_rxdesc *jrxd; 3692 #endif 3693 uint32_t val; 3694 int i; 3695 3696 ASSERT_SERIALIZED(ifp->if_serializer); 3697 3698 callout_stop(&sc_if->msk_tick_ch); 3699 ifp->if_timer = 0; 3700 3701 /* Disable interrupts. */ 3702 if (sc_if->msk_port == MSK_PORT_A) { 3703 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3704 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3705 } else { 3706 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3707 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3708 } 3709 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3710 CSR_READ_4(sc, B0_HWE_IMSK); 3711 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3712 CSR_READ_4(sc, B0_IMSK); 3713 3714 /* Disable Tx/Rx MAC. */ 3715 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3716 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3717 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3718 /* Read again to ensure writing. */ 3719 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3720 3721 /* Stop Tx BMU. */ 3722 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3723 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3724 for (i = 0; i < MSK_TIMEOUT; i++) { 3725 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3726 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3727 BMU_STOP); 3728 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3729 } else 3730 break; 3731 DELAY(1); 3732 } 3733 if (i == MSK_TIMEOUT) 3734 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3735 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3736 RB_RST_SET | RB_DIS_OP_MD); 3737 3738 /* Disable all GMAC interrupt. */ 3739 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3740 /* Disable PHY interrupt. */ 3741 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3742 3743 /* Disable the RAM Interface Arbiter. */ 3744 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3745 3746 /* Reset the PCI FIFO of the async Tx queue */ 3747 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3748 BMU_RST_SET | BMU_FIFO_RST); 3749 3750 /* Reset the Tx prefetch units. */ 3751 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3752 PREF_UNIT_RST_SET); 3753 3754 /* Reset the RAM Buffer async Tx queue. */ 3755 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3756 3757 /* Reset Tx MAC FIFO. */ 3758 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3759 /* Set Pause Off. */ 3760 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3761 3762 /* 3763 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3764 * reach the end of packet and since we can't make sure that we have 3765 * incoming data, we must reset the BMU while it is not during a DMA 3766 * transfer. Since it is possible that the Rx path is still active, 3767 * the Rx RAM buffer will be stopped first, so any possible incoming 3768 * data will not trigger a DMA. After the RAM buffer is stopped, the 3769 * BMU is polled until any DMA in progress is ended and only then it 3770 * will be reset. 3771 */ 3772 3773 /* Disable the RAM Buffer receive queue. */ 3774 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3775 for (i = 0; i < MSK_TIMEOUT; i++) { 3776 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3777 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3778 break; 3779 DELAY(1); 3780 } 3781 if (i == MSK_TIMEOUT) 3782 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3783 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3784 BMU_RST_SET | BMU_FIFO_RST); 3785 /* Reset the Rx prefetch unit. */ 3786 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3787 PREF_UNIT_RST_SET); 3788 /* Reset the RAM Buffer receive queue. */ 3789 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3790 /* Reset Rx MAC FIFO. */ 3791 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3792 3793 /* Free Rx and Tx mbufs still in the queues. */ 3794 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3795 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3796 if (rxd->rx_m != NULL) { 3797 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 3798 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3799 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3800 rxd->rx_dmamap); 3801 m_freem(rxd->rx_m); 3802 rxd->rx_m = NULL; 3803 } 3804 } 3805 #ifdef MSK_JUMBO 3806 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3807 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3808 if (jrxd->rx_m != NULL) { 3809 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3810 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3811 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3812 jrxd->rx_dmamap); 3813 m_freem(jrxd->rx_m); 3814 jrxd->rx_m = NULL; 3815 } 3816 } 3817 #endif 3818 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3819 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3820 if (txd->tx_m != NULL) { 3821 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 3822 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3823 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3824 txd->tx_dmamap); 3825 m_freem(txd->tx_m); 3826 txd->tx_m = NULL; 3827 } 3828 } 3829 3830 /* 3831 * Mark the interface down. 3832 */ 3833 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3834 sc_if->msk_link = 0; 3835 } 3836 3837 static int 3838 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3839 { 3840 int error, value; 3841 3842 if (!arg1) 3843 return (EINVAL); 3844 value = *(int *)arg1; 3845 error = sysctl_handle_int(oidp, &value, 0, req); 3846 if (error || !req->newptr) 3847 return (error); 3848 if (value < low || value > high) 3849 return (EINVAL); 3850 *(int *)arg1 = value; 3851 3852 return (0); 3853 } 3854 3855 static int 3856 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS) 3857 { 3858 return sysctl_int_range(oidp, arg1, arg2, req, 3859 MSK_PROC_MIN, MSK_PROC_MAX); 3860 } 3861 3862 static int 3863 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3864 { 3865 struct msk_softc *sc = arg1; 3866 struct lwkt_serialize *serializer = &sc->msk_serializer; 3867 int error = 0, v; 3868 3869 lwkt_serialize_enter(serializer); 3870 3871 v = sc->msk_intr_rate; 3872 error = sysctl_handle_int(oidp, &v, 0, req); 3873 if (error || req->newptr == NULL) 3874 goto back; 3875 if (v < 0) { 3876 error = EINVAL; 3877 goto back; 3878 } 3879 3880 if (sc->msk_intr_rate != v) { 3881 int flag = 0, i; 3882 3883 sc->msk_intr_rate = v; 3884 for (i = 0; i < 2; ++i) { 3885 if (sc->msk_if[i] != NULL) { 3886 flag |= sc->msk_if[i]-> 3887 arpcom.ac_if.if_flags & IFF_RUNNING; 3888 } 3889 } 3890 if (flag) 3891 mskc_set_imtimer(sc); 3892 } 3893 back: 3894 lwkt_serialize_exit(serializer); 3895 return error; 3896 } 3897 3898 static int 3899 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3900 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3901 { 3902 struct msk_if_softc *sc_if = device_get_softc(dev); 3903 struct msk_dmamap_arg ctx; 3904 bus_dma_segment_t seg; 3905 int error; 3906 3907 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag, 3908 MSK_RING_ALIGN, 0, 3909 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3910 NULL, NULL, 3911 size, 1, BUS_SPACE_MAXSIZE_32BIT, 3912 0, dtag); 3913 if (error) { 3914 device_printf(dev, "can't create DMA tag\n"); 3915 return error; 3916 } 3917 3918 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 3919 dmap); 3920 if (error) { 3921 device_printf(dev, "can't allocate DMA mem\n"); 3922 bus_dma_tag_destroy(*dtag); 3923 *dtag = NULL; 3924 return error; 3925 } 3926 3927 bzero(&ctx, sizeof(ctx)); 3928 ctx.nseg = 1; 3929 ctx.segs = &seg; 3930 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 3931 msk_dmamap_cb, &ctx, BUS_DMA_WAITOK); 3932 if (error) { 3933 device_printf(dev, "can't load DMA mem\n"); 3934 bus_dmamem_free(*dtag, *addr, *dmap); 3935 bus_dma_tag_destroy(*dtag); 3936 *dtag = NULL; 3937 return error; 3938 } 3939 *paddr = seg.ds_addr; 3940 return 0; 3941 } 3942 3943 static void 3944 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3945 { 3946 if (dtag != NULL) { 3947 bus_dmamap_unload(dtag, dmap); 3948 bus_dmamem_free(dtag, addr, dmap); 3949 bus_dma_tag_destroy(dtag); 3950 } 3951 } 3952 3953 static void 3954 mskc_set_imtimer(struct msk_softc *sc) 3955 { 3956 if (sc->msk_intr_rate > 0) { 3957 /* 3958 * XXX myk(4) seems to use 125MHz for EC/FE/XL 3959 * and 78.125MHz for rest of chip types 3960 */ 3961 CSR_WRITE_4(sc, B2_IRQM_INI, 3962 MSK_USECS(sc, 1000000 / sc->msk_intr_rate)); 3963 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 3964 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START); 3965 } else { 3966 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP); 3967 } 3968 } 3969