1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 97 /* 98 * Device driver for the Marvell Yukon II Ethernet controller. 99 * Due to lack of documentation, this driver is based on the code from 100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 101 */ 102 103 #include <sys/param.h> 104 #include <sys/endian.h> 105 #include <sys/kernel.h> 106 #include <sys/bus.h> 107 #include <sys/in_cksum.h> 108 #include <sys/interrupt.h> 109 #include <sys/malloc.h> 110 #include <sys/proc.h> 111 #include <sys/rman.h> 112 #include <sys/serialize.h> 113 #include <sys/socket.h> 114 #include <sys/sockio.h> 115 #include <sys/sysctl.h> 116 117 #include <net/ethernet.h> 118 #include <net/if.h> 119 #include <net/bpf.h> 120 #include <net/if_arp.h> 121 #include <net/if_dl.h> 122 #include <net/if_media.h> 123 #include <net/ifq_var.h> 124 #include <net/vlan/if_vlan_var.h> 125 126 #include <netinet/ip.h> 127 #include <netinet/ip_var.h> 128 129 #include <dev/netif/mii_layer/miivar.h> 130 131 #include <bus/pci/pcireg.h> 132 #include <bus/pci/pcivar.h> 133 134 #include "if_mskreg.h" 135 136 /* "device miibus" required. See GENERIC if you get errors here. */ 137 #include "miibus_if.h" 138 139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 140 141 /* 142 * Devices supported by this driver. 143 */ 144 static const struct msk_product { 145 uint16_t msk_vendorid; 146 uint16_t msk_deviceid; 147 const char *msk_name; 148 } msk_products[] = { 149 { VENDORID_SK, DEVICEID_SK_YUKON2, 150 "SK-9Sxx Gigabit Ethernet" }, 151 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 152 "SK-9Exx Gigabit Ethernet"}, 153 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 154 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 155 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 157 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 158 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 159 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 161 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 162 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 163 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 165 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 166 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 167 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 169 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 170 "Marvell Yukon 88E8035 Fast Ethernet" }, 171 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 172 "Marvell Yukon 88E8036 Fast Ethernet" }, 173 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 174 "Marvell Yukon 88E8038 Fast Ethernet" }, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 176 "Marvell Yukon 88E8039 Fast Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8040, 178 "Marvell Yukon 88E8040 Fast Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_8040T, 180 "Marvell Yukon 88E8040T Fast Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_8042, 182 "Marvell Yukon 88E8042 Fast Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_8048, 184 "Marvell Yukon 88E8048 Fast Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 186 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 188 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 190 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 191 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 192 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 193 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 194 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 195 { VENDORID_MARVELL, DEVICEID_MRVL_4365, 196 "Marvell Yukon 88E8070 Gigabit Ethernet" }, 197 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 198 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 199 { VENDORID_MARVELL, DEVICEID_MRVL_436B, 200 "Marvell Yukon 88E8071 Gigabit Ethernet" }, 201 { VENDORID_MARVELL, DEVICEID_MRVL_436C, 202 "Marvell Yukon 88E8072 Gigabit Ethernet" }, 203 { VENDORID_MARVELL, DEVICEID_MRVL_436D, 204 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 205 { VENDORID_MARVELL, DEVICEID_MRVL_4370, 206 "Marvell Yukon 88E8075 Gigabit Ethernet" }, 207 { VENDORID_MARVELL, DEVICEID_MRVL_4380, 208 "Marvell Yukon 88E8057 Gigabit Ethernet" }, 209 { VENDORID_MARVELL, DEVICEID_MRVL_4381, 210 "Marvell Yukon 88E8059 Gigabit Ethernet" }, 211 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 212 "D-Link 550SX Gigabit Ethernet" }, 213 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 214 "D-Link 560T Gigabit Ethernet" }, 215 { 0, 0, NULL } 216 }; 217 218 static const char *model_name[] = { 219 "Yukon XL", 220 "Yukon EC Ultra", 221 "Yukon EX", 222 "Yukon EC", 223 "Yukon FE", 224 "Yukon FE+", 225 "Yukon Supreme", 226 "Yukon Ultra 2", 227 "Yukon Unknown", 228 "Yukon Optima" 229 }; 230 231 static int mskc_probe(device_t); 232 static int mskc_attach(device_t); 233 static int mskc_detach(device_t); 234 static int mskc_shutdown(device_t); 235 static int mskc_suspend(device_t); 236 static int mskc_resume(device_t); 237 static void mskc_intr(void *); 238 239 static void mskc_reset(struct msk_softc *); 240 static void mskc_set_imtimer(struct msk_softc *); 241 static void mskc_intr_hwerr(struct msk_softc *); 242 static int mskc_handle_events(struct msk_softc *); 243 static void mskc_phy_power(struct msk_softc *, int); 244 static int mskc_setup_rambuffer(struct msk_softc *); 245 static int mskc_status_dma_alloc(struct msk_softc *); 246 static void mskc_status_dma_free(struct msk_softc *); 247 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS); 248 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 249 250 static int msk_probe(device_t); 251 static int msk_attach(device_t); 252 static int msk_detach(device_t); 253 static int msk_miibus_readreg(device_t, int, int); 254 static int msk_miibus_writereg(device_t, int, int, int); 255 static void msk_miibus_statchg(device_t); 256 257 static void msk_init(void *); 258 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 259 static void msk_start(struct ifnet *, struct ifaltq_subque *); 260 static void msk_watchdog(struct ifnet *); 261 static int msk_mediachange(struct ifnet *); 262 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 263 264 static void msk_tick(void *); 265 static void msk_intr_phy(struct msk_if_softc *); 266 static void msk_intr_gmac(struct msk_if_softc *); 267 static __inline void 268 msk_rxput(struct msk_if_softc *); 269 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 270 static void msk_rxeof(struct msk_if_softc *, uint32_t, int); 271 static void msk_txeof(struct msk_if_softc *, int); 272 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 273 static void msk_set_rambuffer(struct msk_if_softc *); 274 static void msk_stop(struct msk_if_softc *); 275 276 static int msk_txrx_dma_alloc(struct msk_if_softc *); 277 static void msk_txrx_dma_free(struct msk_if_softc *); 278 static int msk_init_rx_ring(struct msk_if_softc *); 279 static void msk_init_tx_ring(struct msk_if_softc *); 280 static __inline void 281 msk_discard_rxbuf(struct msk_if_softc *, int); 282 static int msk_newbuf(struct msk_if_softc *, int, int); 283 static int msk_encap(struct msk_if_softc *, struct mbuf **); 284 285 #ifdef MSK_JUMBO 286 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 287 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 288 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 289 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 290 static void *msk_jalloc(struct msk_if_softc *); 291 static void msk_jfree(void *, void *); 292 #endif 293 294 static int msk_phy_readreg(struct msk_if_softc *, int, int); 295 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 296 297 static void msk_rxfilter(struct msk_if_softc *); 298 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 299 static void msk_set_tx_stfwd(struct msk_if_softc *); 300 301 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 302 void **, bus_addr_t *, bus_dmamap_t *); 303 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 304 305 static device_method_t mskc_methods[] = { 306 /* Device interface */ 307 DEVMETHOD(device_probe, mskc_probe), 308 DEVMETHOD(device_attach, mskc_attach), 309 DEVMETHOD(device_detach, mskc_detach), 310 DEVMETHOD(device_suspend, mskc_suspend), 311 DEVMETHOD(device_resume, mskc_resume), 312 DEVMETHOD(device_shutdown, mskc_shutdown), 313 314 /* bus interface */ 315 DEVMETHOD(bus_print_child, bus_generic_print_child), 316 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 317 318 { NULL, NULL } 319 }; 320 321 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 322 static devclass_t mskc_devclass; 323 324 static device_method_t msk_methods[] = { 325 /* Device interface */ 326 DEVMETHOD(device_probe, msk_probe), 327 DEVMETHOD(device_attach, msk_attach), 328 DEVMETHOD(device_detach, msk_detach), 329 DEVMETHOD(device_shutdown, bus_generic_shutdown), 330 331 /* bus interface */ 332 DEVMETHOD(bus_print_child, bus_generic_print_child), 333 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 334 335 /* MII interface */ 336 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 337 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 338 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 339 340 { NULL, NULL } 341 }; 342 343 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 344 static devclass_t msk_devclass; 345 346 DECLARE_DUMMY_MODULE(if_msk); 347 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL); 348 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL); 349 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL); 350 351 static int mskc_msi_enable = 0; 352 static int mskc_intr_rate = 0; 353 static int mskc_process_limit = MSK_PROC_DEFAULT; 354 355 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate); 356 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit); 357 TUNABLE_INT("hw.mskc.msi.enable", &mskc_msi_enable); 358 359 static int 360 msk_miibus_readreg(device_t dev, int phy, int reg) 361 { 362 struct msk_if_softc *sc_if; 363 364 if (phy != PHY_ADDR_MARV) 365 return (0); 366 367 sc_if = device_get_softc(dev); 368 369 return (msk_phy_readreg(sc_if, phy, reg)); 370 } 371 372 static int 373 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 374 { 375 struct msk_softc *sc; 376 int i, val; 377 378 sc = sc_if->msk_softc; 379 380 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 381 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 382 383 for (i = 0; i < MSK_TIMEOUT; i++) { 384 DELAY(1); 385 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 386 if ((val & GM_SMI_CT_RD_VAL) != 0) { 387 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 388 break; 389 } 390 } 391 392 if (i == MSK_TIMEOUT) { 393 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 394 val = 0; 395 } 396 397 return (val); 398 } 399 400 static int 401 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 402 { 403 struct msk_if_softc *sc_if; 404 405 if (phy != PHY_ADDR_MARV) 406 return (0); 407 408 sc_if = device_get_softc(dev); 409 410 return (msk_phy_writereg(sc_if, phy, reg, val)); 411 } 412 413 static int 414 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 415 { 416 struct msk_softc *sc; 417 int i; 418 419 sc = sc_if->msk_softc; 420 421 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 422 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 423 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 424 for (i = 0; i < MSK_TIMEOUT; i++) { 425 DELAY(1); 426 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 427 GM_SMI_CT_BUSY) == 0) 428 break; 429 } 430 if (i == MSK_TIMEOUT) 431 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 432 433 return (0); 434 } 435 436 static void 437 msk_miibus_statchg(device_t dev) 438 { 439 struct msk_if_softc *sc_if; 440 struct msk_softc *sc; 441 struct mii_data *mii; 442 uint32_t gmac; 443 444 sc_if = device_get_softc(dev); 445 sc = sc_if->msk_softc; 446 447 mii = device_get_softc(sc_if->msk_miibus); 448 449 sc_if->msk_link = 0; 450 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 451 (IFM_AVALID | IFM_ACTIVE)) { 452 switch (IFM_SUBTYPE(mii->mii_media_active)) { 453 case IFM_10_T: 454 case IFM_100_TX: 455 sc_if->msk_link = 1; 456 break; 457 case IFM_1000_T: 458 case IFM_1000_SX: 459 case IFM_1000_LX: 460 case IFM_1000_CX: 461 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) 462 sc_if->msk_link = 1; 463 break; 464 } 465 } 466 467 if (sc_if->msk_link != 0) { 468 /* Enable Tx FIFO Underrun. */ 469 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 470 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 471 /* 472 * Because mii(4) notify msk(4) that it detected link status 473 * change, there is no need to enable automatic 474 * speed/flow-control/duplex updates. 475 */ 476 gmac = GM_GPCR_AU_ALL_DIS; 477 switch (IFM_SUBTYPE(mii->mii_media_active)) { 478 case IFM_1000_SX: 479 case IFM_1000_T: 480 gmac |= GM_GPCR_SPEED_1000; 481 break; 482 case IFM_100_TX: 483 gmac |= GM_GPCR_SPEED_100; 484 break; 485 case IFM_10_T: 486 break; 487 } 488 489 if ((mii->mii_media_active & IFM_GMASK) & IFM_FDX) 490 gmac |= GM_GPCR_DUP_FULL; 491 else 492 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS; 493 /* Disable Rx flow control. */ 494 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 495 gmac |= GM_GPCR_FC_RX_DIS; 496 /* Disable Tx flow control. */ 497 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 498 gmac |= GM_GPCR_FC_TX_DIS; 499 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 500 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 501 /* Read again to ensure writing. */ 502 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 503 504 gmac = GMC_PAUSE_OFF; 505 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) && 506 ((mii->mii_media_active & IFM_GMASK) & IFM_FDX)) 507 gmac = GMC_PAUSE_ON; 508 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 509 510 /* Enable PHY interrupt for FIFO underrun/overflow. */ 511 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 512 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 513 } else { 514 /* 515 * Link state changed to down. 516 * Disable PHY interrupts. 517 */ 518 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 519 /* Disable Rx/Tx MAC. */ 520 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 521 if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) { 522 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 523 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 524 /* Read again to ensure writing. */ 525 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 526 } 527 } 528 } 529 530 static void 531 msk_rxfilter(struct msk_if_softc *sc_if) 532 { 533 struct msk_softc *sc; 534 struct ifnet *ifp; 535 struct ifmultiaddr *ifma; 536 uint32_t mchash[2]; 537 uint32_t crc; 538 uint16_t mode; 539 540 sc = sc_if->msk_softc; 541 ifp = sc_if->msk_ifp; 542 543 bzero(mchash, sizeof(mchash)); 544 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 545 if ((ifp->if_flags & IFF_PROMISC) != 0) { 546 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 547 } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 548 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 549 mchash[0] = 0xffff; 550 mchash[1] = 0xffff; 551 } else { 552 mode |= GM_RXCR_UCF_ENA; 553 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 554 if (ifma->ifma_addr->sa_family != AF_LINK) 555 continue; 556 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 557 ifma->ifma_addr), ETHER_ADDR_LEN); 558 /* Just want the 6 least significant bits. */ 559 crc &= 0x3f; 560 /* Set the corresponding bit in the hash table. */ 561 mchash[crc >> 5] |= 1 << (crc & 0x1f); 562 } 563 if (mchash[0] != 0 || mchash[1] != 0) 564 mode |= GM_RXCR_MCF_ENA; 565 } 566 567 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 568 mchash[0] & 0xffff); 569 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 570 (mchash[0] >> 16) & 0xffff); 571 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 572 mchash[1] & 0xffff); 573 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 574 (mchash[1] >> 16) & 0xffff); 575 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 576 } 577 578 static void 579 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 580 { 581 struct msk_softc *sc; 582 583 sc = sc_if->msk_softc; 584 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 585 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 586 RX_VLAN_STRIP_ON); 587 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 588 TX_VLAN_TAG_ON); 589 } else { 590 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 591 RX_VLAN_STRIP_OFF); 592 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 593 TX_VLAN_TAG_OFF); 594 } 595 } 596 597 static int 598 msk_init_rx_ring(struct msk_if_softc *sc_if) 599 { 600 struct msk_ring_data *rd; 601 struct msk_rxdesc *rxd; 602 int i, prod; 603 604 sc_if->msk_cdata.msk_rx_cons = 0; 605 sc_if->msk_cdata.msk_rx_prod = 0; 606 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 607 608 rd = &sc_if->msk_rdata; 609 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 610 prod = sc_if->msk_cdata.msk_rx_prod; 611 for (i = 0; i < MSK_RX_RING_CNT; i++) { 612 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 613 rxd->rx_m = NULL; 614 rxd->rx_le = &rd->msk_rx_ring[prod]; 615 if (msk_newbuf(sc_if, prod, 1) != 0) 616 return (ENOBUFS); 617 MSK_INC(prod, MSK_RX_RING_CNT); 618 } 619 620 /* Update prefetch unit. */ 621 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 622 CSR_WRITE_2(sc_if->msk_softc, 623 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 624 sc_if->msk_cdata.msk_rx_prod); 625 626 return (0); 627 } 628 629 #ifdef MSK_JUMBO 630 static int 631 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 632 { 633 struct msk_ring_data *rd; 634 struct msk_rxdesc *rxd; 635 int i, prod; 636 637 MSK_IF_LOCK_ASSERT(sc_if); 638 639 sc_if->msk_cdata.msk_rx_cons = 0; 640 sc_if->msk_cdata.msk_rx_prod = 0; 641 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 642 643 rd = &sc_if->msk_rdata; 644 bzero(rd->msk_jumbo_rx_ring, 645 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 646 prod = sc_if->msk_cdata.msk_rx_prod; 647 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 648 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 649 rxd->rx_m = NULL; 650 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 651 if (msk_jumbo_newbuf(sc_if, prod) != 0) 652 return (ENOBUFS); 653 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 654 } 655 656 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 657 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 659 660 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 661 CSR_WRITE_2(sc_if->msk_softc, 662 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 663 sc_if->msk_cdata.msk_rx_prod); 664 665 return (0); 666 } 667 #endif 668 669 static void 670 msk_init_tx_ring(struct msk_if_softc *sc_if) 671 { 672 struct msk_ring_data *rd; 673 struct msk_txdesc *txd; 674 int i; 675 676 sc_if->msk_cdata.msk_tx_prod = 0; 677 sc_if->msk_cdata.msk_tx_cons = 0; 678 sc_if->msk_cdata.msk_tx_cnt = 0; 679 680 rd = &sc_if->msk_rdata; 681 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 682 for (i = 0; i < MSK_TX_RING_CNT; i++) { 683 txd = &sc_if->msk_cdata.msk_txdesc[i]; 684 txd->tx_m = NULL; 685 txd->tx_le = &rd->msk_tx_ring[i]; 686 } 687 } 688 689 static __inline void 690 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 691 { 692 struct msk_rx_desc *rx_le; 693 struct msk_rxdesc *rxd; 694 struct mbuf *m; 695 696 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 697 m = rxd->rx_m; 698 rx_le = rxd->rx_le; 699 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 700 } 701 702 #ifdef MSK_JUMBO 703 static __inline void 704 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 705 { 706 struct msk_rx_desc *rx_le; 707 struct msk_rxdesc *rxd; 708 struct mbuf *m; 709 710 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 711 m = rxd->rx_m; 712 rx_le = rxd->rx_le; 713 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 714 } 715 #endif 716 717 static int 718 msk_newbuf(struct msk_if_softc *sc_if, int idx, int init) 719 { 720 struct msk_rx_desc *rx_le; 721 struct msk_rxdesc *rxd; 722 struct mbuf *m; 723 bus_dma_segment_t seg; 724 bus_dmamap_t map; 725 int error, nseg; 726 727 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 728 if (m == NULL) 729 return (ENOBUFS); 730 731 m->m_len = m->m_pkthdr.len = MCLBYTES; 732 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 733 m_adj(m, ETHER_ALIGN); 734 735 error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag, 736 sc_if->msk_cdata.msk_rx_sparemap, 737 m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 738 if (error) { 739 m_freem(m); 740 if (init) 741 if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n"); 742 return (error); 743 } 744 745 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 746 if (rxd->rx_m != NULL) { 747 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 748 BUS_DMASYNC_POSTREAD); 749 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 750 } 751 752 map = rxd->rx_dmamap; 753 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 754 sc_if->msk_cdata.msk_rx_sparemap = map; 755 756 rxd->rx_m = m; 757 rx_le = rxd->rx_le; 758 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 759 rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER); 760 761 return (0); 762 } 763 764 #ifdef MSK_JUMBO 765 static int 766 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 767 { 768 struct msk_rx_desc *rx_le; 769 struct msk_rxdesc *rxd; 770 struct mbuf *m; 771 bus_dma_segment_t segs[1]; 772 bus_dmamap_t map; 773 int nsegs; 774 void *buf; 775 776 MGETHDR(m, M_NOWAIT, MT_DATA); 777 if (m == NULL) 778 return (ENOBUFS); 779 buf = msk_jalloc(sc_if); 780 if (buf == NULL) { 781 m_freem(m); 782 return (ENOBUFS); 783 } 784 /* Attach the buffer to the mbuf. */ 785 MEXTADD(m, buf, MSK_JLEN, msk_jfree, sc_if, 0, EXT_NET_DRV); 786 if ((m->m_flags & M_EXT) == 0) { 787 m_freem(m); 788 return (ENOBUFS); 789 } 790 m->m_pkthdr.len = m->m_len = MSK_JLEN; 791 m_adj(m, ETHER_ALIGN); 792 793 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 794 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 795 BUS_DMA_NOWAIT) != 0) { 796 m_freem(m); 797 return (ENOBUFS); 798 } 799 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 800 801 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 802 if (rxd->rx_m != NULL) { 803 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 804 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 805 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 806 rxd->rx_dmamap); 807 } 808 map = rxd->rx_dmamap; 809 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 810 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 811 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 812 BUS_DMASYNC_PREREAD); 813 rxd->rx_m = m; 814 rx_le = rxd->rx_le; 815 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 816 rx_le->msk_control = 817 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 818 819 return (0); 820 } 821 #endif 822 823 /* 824 * Set media options. 825 */ 826 static int 827 msk_mediachange(struct ifnet *ifp) 828 { 829 struct msk_if_softc *sc_if = ifp->if_softc; 830 struct mii_data *mii; 831 int error; 832 833 mii = device_get_softc(sc_if->msk_miibus); 834 error = mii_mediachg(mii); 835 836 return (error); 837 } 838 839 /* 840 * Report current media status. 841 */ 842 static void 843 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 844 { 845 struct msk_if_softc *sc_if = ifp->if_softc; 846 struct mii_data *mii; 847 848 mii = device_get_softc(sc_if->msk_miibus); 849 mii_pollstat(mii); 850 851 ifmr->ifm_active = mii->mii_media_active; 852 ifmr->ifm_status = mii->mii_media_status; 853 } 854 855 static int 856 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 857 { 858 struct msk_if_softc *sc_if; 859 struct ifreq *ifr; 860 struct mii_data *mii; 861 int error, mask; 862 863 sc_if = ifp->if_softc; 864 ifr = (struct ifreq *)data; 865 error = 0; 866 867 switch(command) { 868 case SIOCSIFMTU: 869 #ifdef MSK_JUMBO 870 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 871 error = EINVAL; 872 break; 873 } 874 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 875 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 876 error = EINVAL; 877 break; 878 } 879 ifp->if_mtu = ifr->ifr_mtu; 880 if ((ifp->if_flags & IFF_RUNNING) != 0) 881 msk_init(sc_if); 882 #else 883 error = EOPNOTSUPP; 884 #endif 885 break; 886 887 case SIOCSIFFLAGS: 888 if (ifp->if_flags & IFF_UP) { 889 if (ifp->if_flags & IFF_RUNNING) { 890 if (((ifp->if_flags ^ sc_if->msk_if_flags) 891 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 892 msk_rxfilter(sc_if); 893 } else { 894 if (sc_if->msk_detach == 0) 895 msk_init(sc_if); 896 } 897 } else { 898 if (ifp->if_flags & IFF_RUNNING) 899 msk_stop(sc_if); 900 } 901 sc_if->msk_if_flags = ifp->if_flags; 902 break; 903 904 case SIOCADDMULTI: 905 case SIOCDELMULTI: 906 if (ifp->if_flags & IFF_RUNNING) 907 msk_rxfilter(sc_if); 908 break; 909 910 case SIOCGIFMEDIA: 911 case SIOCSIFMEDIA: 912 mii = device_get_softc(sc_if->msk_miibus); 913 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 914 break; 915 916 case SIOCSIFCAP: 917 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 918 if ((mask & IFCAP_TXCSUM) != 0) { 919 ifp->if_capenable ^= IFCAP_TXCSUM; 920 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 921 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 922 ifp->if_hwassist |= MSK_CSUM_FEATURES; 923 else 924 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 925 } 926 #ifdef notyet 927 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 928 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 929 msk_setvlan(sc_if, ifp); 930 } 931 #endif 932 933 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 934 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 935 /* 936 * In Yukon EC Ultra, TSO & checksum offload is not 937 * supported for jumbo frame. 938 */ 939 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 940 ifp->if_capenable &= ~IFCAP_TXCSUM; 941 } 942 break; 943 944 default: 945 error = ether_ioctl(ifp, command, data); 946 break; 947 } 948 949 return (error); 950 } 951 952 static int 953 mskc_probe(device_t dev) 954 { 955 const struct msk_product *mp; 956 uint16_t vendor, devid; 957 958 vendor = pci_get_vendor(dev); 959 devid = pci_get_device(dev); 960 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 961 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 962 device_set_desc(dev, mp->msk_name); 963 return (0); 964 } 965 } 966 return (ENXIO); 967 } 968 969 static int 970 mskc_setup_rambuffer(struct msk_softc *sc) 971 { 972 int next; 973 int i; 974 975 /* Get adapter SRAM size. */ 976 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 977 if (bootverbose) { 978 device_printf(sc->msk_dev, 979 "RAM buffer size : %dKB\n", sc->msk_ramsize); 980 } 981 if (sc->msk_ramsize == 0) 982 return (0); 983 sc->msk_pflags |= MSK_FLAG_RAMBUF; 984 985 /* 986 * Give receiver 2/3 of memory and round down to the multiple 987 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 988 * of 1024. 989 */ 990 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 991 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 992 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 993 sc->msk_rxqstart[i] = next; 994 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 995 next = sc->msk_rxqend[i] + 1; 996 sc->msk_txqstart[i] = next; 997 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 998 next = sc->msk_txqend[i] + 1; 999 if (bootverbose) { 1000 device_printf(sc->msk_dev, 1001 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1002 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1003 sc->msk_rxqend[i]); 1004 device_printf(sc->msk_dev, 1005 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1006 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1007 sc->msk_txqend[i]); 1008 } 1009 } 1010 1011 return (0); 1012 } 1013 1014 static void 1015 mskc_phy_power(struct msk_softc *sc, int mode) 1016 { 1017 uint32_t our, val; 1018 int i; 1019 1020 switch (mode) { 1021 case MSK_PHY_POWERUP: 1022 /* Switch power to VCC (WA for VAUX problem). */ 1023 CSR_WRITE_1(sc, B0_POWER_CTRL, 1024 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1025 /* Disable Core Clock Division, set Clock Select to 0. */ 1026 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1027 1028 val = 0; 1029 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1030 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1031 /* Enable bits are inverted. */ 1032 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1033 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1034 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1035 } 1036 /* 1037 * Enable PCI & Core Clock, enable clock gating for both Links. 1038 */ 1039 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1040 1041 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1042 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1043 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 1044 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1045 /* Deassert Low Power for 1st PHY. */ 1046 our |= PCI_Y2_PHY1_COMA; 1047 if (sc->msk_num_port > 1) 1048 our |= PCI_Y2_PHY2_COMA; 1049 } 1050 } 1051 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U || 1052 sc->msk_hw_id == CHIP_ID_YUKON_EX || 1053 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) { 1054 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4); 1055 val &= (PCI_FORCE_ASPM_REQUEST | 1056 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY | 1057 PCI_ASPM_CLKRUN_REQUEST); 1058 /* Set all bits to 0 except bits 15..12. */ 1059 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val); 1060 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5); 1061 val &= PCI_CTL_TIM_VMAIN_AV_MSK; 1062 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val); 1063 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0); 1064 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1065 /* 1066 * Disable status race, workaround for 1067 * Yukon EC Ultra & Yukon EX. 1068 */ 1069 val = CSR_READ_4(sc, B2_GP_IO); 1070 val |= GLB_GPIO_STAT_RACE_DIS; 1071 CSR_WRITE_4(sc, B2_GP_IO, val); 1072 CSR_READ_4(sc, B2_GP_IO); 1073 } 1074 /* Release PHY from PowerDown/COMA mode. */ 1075 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our); 1076 1077 for (i = 0; i < sc->msk_num_port; i++) { 1078 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1079 GMLC_RST_SET); 1080 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1081 GMLC_RST_CLR); 1082 } 1083 break; 1084 case MSK_PHY_POWERDOWN: 1085 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1086 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1087 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1088 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1089 val &= ~PCI_Y2_PHY1_COMA; 1090 if (sc->msk_num_port > 1) 1091 val &= ~PCI_Y2_PHY2_COMA; 1092 } 1093 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); 1094 1095 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1096 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1097 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1098 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1099 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1100 /* Enable bits are inverted. */ 1101 val = 0; 1102 } 1103 /* 1104 * Disable PCI & Core Clock, disable clock gating for 1105 * both Links. 1106 */ 1107 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1108 CSR_WRITE_1(sc, B0_POWER_CTRL, 1109 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1110 break; 1111 default: 1112 break; 1113 } 1114 } 1115 1116 static void 1117 mskc_reset(struct msk_softc *sc) 1118 { 1119 bus_addr_t addr; 1120 uint16_t status; 1121 uint32_t val; 1122 int i; 1123 1124 /* Disable ASF. */ 1125 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL && 1126 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) { 1127 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1128 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1129 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1130 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); 1131 /* Clear AHB bridge & microcontroller reset. */ 1132 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | 1133 Y2_ASF_HCU_CCSR_CPU_RST_MODE); 1134 /* Clear ASF microcontroller state. */ 1135 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK; 1136 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK; 1137 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); 1138 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1139 } else { 1140 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1141 } 1142 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1143 /* 1144 * Since we disabled ASF, S/W reset is required for 1145 * Power Management. 1146 */ 1147 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1148 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1149 } 1150 1151 /* Clear all error bits in the PCI status register. */ 1152 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1153 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1154 1155 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1156 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1157 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1158 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1159 1160 switch (sc->msk_bustype) { 1161 case MSK_PEX_BUS: 1162 /* Clear all PEX errors. */ 1163 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1164 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1165 if ((val & PEX_RX_OV) != 0) { 1166 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1167 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1168 } 1169 break; 1170 case MSK_PCI_BUS: 1171 case MSK_PCIX_BUS: 1172 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1173 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1174 if (val == 0) 1175 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1176 if (sc->msk_bustype == MSK_PCIX_BUS) { 1177 /* Set Cache Line Size opt. */ 1178 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1179 val |= PCI_CLS_OPT; 1180 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); 1181 } 1182 break; 1183 } 1184 /* Set PHY power state. */ 1185 mskc_phy_power(sc, MSK_PHY_POWERUP); 1186 1187 /* Reset GPHY/GMAC Control */ 1188 for (i = 0; i < sc->msk_num_port; i++) { 1189 /* GPHY Control reset. */ 1190 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1191 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1192 /* GMAC Control reset. */ 1193 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1194 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1195 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1196 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1197 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1198 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), 1199 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 1200 GMC_BYP_RETR_ON); 1201 } 1202 } 1203 1204 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR && 1205 sc->msk_hw_rev > CHIP_REV_YU_SU_B0) 1206 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS); 1207 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) { 1208 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */ 1209 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080); 1210 } 1211 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1212 1213 /* LED On. */ 1214 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1215 1216 /* Clear TWSI IRQ. */ 1217 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1218 1219 /* Turn off hardware timer. */ 1220 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1221 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1222 1223 /* Turn off descriptor polling. */ 1224 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1225 1226 /* Turn off time stamps. */ 1227 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1228 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1229 1230 if (sc->msk_hw_id == CHIP_ID_YUKON_XL || 1231 sc->msk_hw_id == CHIP_ID_YUKON_EC || 1232 sc->msk_hw_id == CHIP_ID_YUKON_FE) { 1233 /* Configure timeout values. */ 1234 for (i = 0; i < sc->msk_num_port; i++) { 1235 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), 1236 RI_RST_SET); 1237 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), 1238 RI_RST_CLR); 1239 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1240 MSK_RI_TO_53); 1241 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1242 MSK_RI_TO_53); 1243 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1244 MSK_RI_TO_53); 1245 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1246 MSK_RI_TO_53); 1247 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1248 MSK_RI_TO_53); 1249 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1250 MSK_RI_TO_53); 1251 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1252 MSK_RI_TO_53); 1253 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1254 MSK_RI_TO_53); 1255 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1256 MSK_RI_TO_53); 1257 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1258 MSK_RI_TO_53); 1259 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1260 MSK_RI_TO_53); 1261 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1262 MSK_RI_TO_53); 1263 } 1264 } 1265 1266 /* Disable all interrupts. */ 1267 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1268 CSR_READ_4(sc, B0_HWE_IMSK); 1269 CSR_WRITE_4(sc, B0_IMSK, 0); 1270 CSR_READ_4(sc, B0_IMSK); 1271 1272 /* 1273 * On dual port PCI-X card, there is an problem where status 1274 * can be received out of order due to split transactions. 1275 */ 1276 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { 1277 uint16_t pcix_cmd; 1278 1279 pcix_cmd = pci_read_config(sc->msk_dev, 1280 sc->msk_pcixcap + PCIXR_COMMAND, 2); 1281 /* Clear Max Outstanding Split Transactions. */ 1282 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; 1283 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1284 pci_write_config(sc->msk_dev, 1285 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); 1286 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1287 } 1288 if (sc->msk_pciecap != 0) { 1289 /* Change Max. Read Request Size to 2048 bytes. */ 1290 if (pcie_get_max_readrq(sc->msk_dev) == 1291 PCIEM_DEVCTL_MAX_READRQ_512) { 1292 pcie_set_max_readrq(sc->msk_dev, 1293 PCIEM_DEVCTL_MAX_READRQ_2048); 1294 } 1295 } 1296 1297 /* Clear status list. */ 1298 bzero(sc->msk_stat_ring, 1299 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1300 sc->msk_stat_cons = 0; 1301 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1302 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1303 /* Set the status list base address. */ 1304 addr = sc->msk_stat_ring_paddr; 1305 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1306 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1307 /* Set the status list last index. */ 1308 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1309 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1310 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1311 /* WA for dev. #4.3 */ 1312 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1313 /* WA for dev. #4.18 */ 1314 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1315 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1316 } else { 1317 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1318 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1319 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1320 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1321 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1322 else 1323 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1324 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1325 } 1326 /* 1327 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1328 */ 1329 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1330 1331 /* Enable status unit. */ 1332 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1333 1334 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1335 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1336 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1337 } 1338 1339 static int 1340 msk_probe(device_t dev) 1341 { 1342 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1343 char desc[100]; 1344 1345 /* 1346 * Not much to do here. We always know there will be 1347 * at least one GMAC present, and if there are two, 1348 * mskc_attach() will create a second device instance 1349 * for us. 1350 */ 1351 ksnprintf(desc, sizeof(desc), 1352 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1353 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1354 sc->msk_hw_rev); 1355 device_set_desc_copy(dev, desc); 1356 1357 return (0); 1358 } 1359 1360 static int 1361 msk_attach(device_t dev) 1362 { 1363 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1364 struct msk_if_softc *sc_if = device_get_softc(dev); 1365 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1366 int i, port, error; 1367 uint8_t eaddr[ETHER_ADDR_LEN]; 1368 1369 port = *(int *)device_get_ivars(dev); 1370 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1371 1372 kfree(device_get_ivars(dev), M_DEVBUF); 1373 device_set_ivars(dev, NULL); 1374 1375 callout_init(&sc_if->msk_tick_ch); 1376 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1377 1378 sc_if->msk_if_dev = dev; 1379 sc_if->msk_port = port; 1380 sc_if->msk_softc = sc; 1381 sc_if->msk_ifp = ifp; 1382 sc_if->msk_flags = sc->msk_pflags; 1383 sc->msk_if[port] = sc_if; 1384 1385 /* Setup Tx/Rx queue register offsets. */ 1386 if (port == MSK_PORT_A) { 1387 sc_if->msk_txq = Q_XA1; 1388 sc_if->msk_txsq = Q_XS1; 1389 sc_if->msk_rxq = Q_R1; 1390 } else { 1391 sc_if->msk_txq = Q_XA2; 1392 sc_if->msk_txsq = Q_XS2; 1393 sc_if->msk_rxq = Q_R2; 1394 } 1395 1396 error = msk_txrx_dma_alloc(sc_if); 1397 if (error) 1398 goto fail; 1399 1400 ifp->if_softc = sc_if; 1401 ifp->if_mtu = ETHERMTU; 1402 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1403 ifp->if_init = msk_init; 1404 ifp->if_ioctl = msk_ioctl; 1405 ifp->if_start = msk_start; 1406 ifp->if_watchdog = msk_watchdog; 1407 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1408 ifq_set_ready(&ifp->if_snd); 1409 1410 #ifdef notyet 1411 /* 1412 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1413 * has serious bug in Rx checksum offload for all Yukon II family 1414 * hardware. It seems there is a workaround to make it work somtimes. 1415 * However, the workaround also have to check OP code sequences to 1416 * verify whether the OP code is correct. Sometimes it should compute 1417 * IP/TCP/UDP checksum in driver in order to verify correctness of 1418 * checksum computed by hardware. If you have to compute checksum 1419 * with software to verify the hardware's checksum why have hardware 1420 * compute the checksum? I think there is no reason to spend time to 1421 * make Rx checksum offload work on Yukon II hardware. 1422 */ 1423 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1424 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1425 ifp->if_hwassist = MSK_CSUM_FEATURES; 1426 ifp->if_capenable = ifp->if_capabilities; 1427 #endif 1428 1429 /* 1430 * Get station address for this interface. Note that 1431 * dual port cards actually come with three station 1432 * addresses: one for each port, plus an extra. The 1433 * extra one is used by the SysKonnect driver software 1434 * as a 'virtual' station address for when both ports 1435 * are operating in failover mode. Currently we don't 1436 * use this extra address. 1437 */ 1438 for (i = 0; i < ETHER_ADDR_LEN; i++) 1439 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1440 1441 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1442 1443 /* 1444 * Do miibus setup. 1445 */ 1446 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1447 msk_mediachange, msk_mediastatus); 1448 if (error) { 1449 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1450 goto fail; 1451 } 1452 1453 /* 1454 * Call MI attach routine. Can't hold locks when calling into ether_*. 1455 */ 1456 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1457 #if 0 1458 /* 1459 * Tell the upper layer(s) we support long frames. 1460 * Must appear after the call to ether_ifattach() because 1461 * ether_ifattach() sets ifi_hdrlen to the default value. 1462 */ 1463 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1464 #endif 1465 1466 return 0; 1467 fail: 1468 msk_detach(dev); 1469 sc->msk_if[port] = NULL; 1470 return (error); 1471 } 1472 1473 /* 1474 * Attach the interface. Allocate softc structures, do ifmedia 1475 * setup and ethernet/BPF attach. 1476 */ 1477 static int 1478 mskc_attach(device_t dev) 1479 { 1480 struct msk_softc *sc; 1481 struct sysctl_ctx_list *ctx; 1482 struct sysctl_oid *tree; 1483 int error, *port, cpuid; 1484 u_int irq_flags; 1485 1486 sc = device_get_softc(dev); 1487 sc->msk_dev = dev; 1488 lwkt_serialize_init(&sc->msk_serializer); 1489 1490 /* 1491 * Initailize sysctl variables 1492 */ 1493 sc->msk_process_limit = mskc_process_limit; 1494 sc->msk_intr_rate = mskc_intr_rate; 1495 1496 #ifndef BURN_BRIDGES 1497 /* 1498 * Handle power management nonsense. 1499 */ 1500 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1501 uint32_t irq, bar0, bar1; 1502 1503 /* Save important PCI config data. */ 1504 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1505 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1506 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1507 1508 /* Reset the power state. */ 1509 device_printf(dev, "chip is in D%d power mode " 1510 "-- setting to D0\n", pci_get_powerstate(dev)); 1511 1512 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1513 1514 /* Restore PCI config data. */ 1515 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1516 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1517 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1518 } 1519 #endif /* BURN_BRIDGES */ 1520 1521 /* 1522 * Map control/status registers. 1523 */ 1524 pci_enable_busmaster(dev); 1525 1526 /* 1527 * Allocate I/O resource 1528 */ 1529 #ifdef MSK_USEIOSPACE 1530 sc->msk_res_type = SYS_RES_IOPORT; 1531 sc->msk_res_rid = PCIR_BAR(1); 1532 #else 1533 sc->msk_res_type = SYS_RES_MEMORY; 1534 sc->msk_res_rid = PCIR_BAR(0); 1535 #endif 1536 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1537 &sc->msk_res_rid, RF_ACTIVE); 1538 if (sc->msk_res == NULL) { 1539 if (sc->msk_res_type == SYS_RES_MEMORY) { 1540 sc->msk_res_type = SYS_RES_IOPORT; 1541 sc->msk_res_rid = PCIR_BAR(1); 1542 } else { 1543 sc->msk_res_type = SYS_RES_MEMORY; 1544 sc->msk_res_rid = PCIR_BAR(0); 1545 } 1546 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1547 &sc->msk_res_rid, 1548 RF_ACTIVE); 1549 if (sc->msk_res == NULL) { 1550 device_printf(dev, "couldn't allocate %s resources\n", 1551 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1552 return (ENXIO); 1553 } 1554 } 1555 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1556 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1557 1558 /* 1559 * Allocate IRQ 1560 */ 1561 sc->msk_irq_type = pci_alloc_1intr(dev, mskc_msi_enable, 1562 &sc->msk_irq_rid, &irq_flags); 1563 1564 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->msk_irq_rid, 1565 irq_flags); 1566 if (sc->msk_irq == NULL) { 1567 device_printf(dev, "couldn't allocate IRQ resources\n"); 1568 error = ENXIO; 1569 goto fail; 1570 } 1571 1572 /* Enable all clocks before accessing any registers. */ 1573 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 1574 1575 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1576 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1577 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1578 /* Bail out if chip is not recognized. */ 1579 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1580 sc->msk_hw_id > CHIP_ID_YUKON_OPT || 1581 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) { 1582 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1583 sc->msk_hw_id, sc->msk_hw_rev); 1584 error = ENXIO; 1585 goto fail; 1586 } 1587 1588 /* 1589 * Create sysctl tree 1590 */ 1591 ctx = device_get_sysctl_ctx(dev); 1592 tree = device_get_sysctl_tree(dev); 1593 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1594 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1595 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit, 1596 "I", "max number of Rx events to process"); 1597 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 1598 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1599 sc, 0, mskc_sysctl_intr_rate, 1600 "I", "max number of interrupt per second"); 1601 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1602 "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided, 1603 0, "# of avoided m_defrag on TX path"); 1604 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1605 "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied, 1606 0, "# of leading copies on TX path"); 1607 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1608 "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied, 1609 0, "# of trailing copies on TX path"); 1610 1611 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1612 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1613 sc->msk_coppertype = 0; 1614 else 1615 sc->msk_coppertype = 1; 1616 /* Check number of MACs. */ 1617 sc->msk_num_port = 1; 1618 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1619 CFG_DUAL_MAC_MSK) { 1620 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1621 sc->msk_num_port++; 1622 } 1623 1624 /* Check bus type. */ 1625 if (pci_is_pcie(sc->msk_dev) == 0) { 1626 sc->msk_bustype = MSK_PEX_BUS; 1627 sc->msk_pciecap = pci_get_pciecap_ptr(sc->msk_dev); 1628 } else if (pci_is_pcix(sc->msk_dev) == 0) { 1629 sc->msk_bustype = MSK_PCIX_BUS; 1630 sc->msk_pcixcap = pci_get_pcixcap_ptr(sc->msk_dev); 1631 } else { 1632 sc->msk_bustype = MSK_PCI_BUS; 1633 } 1634 1635 switch (sc->msk_hw_id) { 1636 case CHIP_ID_YUKON_EC: 1637 case CHIP_ID_YUKON_EC_U: 1638 sc->msk_clock = 125; /* 125 Mhz */ 1639 break; 1640 case CHIP_ID_YUKON_EX: 1641 sc->msk_clock = 125; /* 125 Mhz */ 1642 break; 1643 case CHIP_ID_YUKON_FE: 1644 sc->msk_clock = 100; /* 100 Mhz */ 1645 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1646 break; 1647 case CHIP_ID_YUKON_FE_P: 1648 sc->msk_clock = 50; /* 50 Mhz */ 1649 /* DESCV2 */ 1650 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1651 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 1652 /* 1653 * XXX 1654 * FE+ A0 has status LE writeback bug so msk(4) 1655 * does not rely on status word of received frame 1656 * in msk_rxeof() which in turn disables all 1657 * hardware assistance bits reported by the status 1658 * word as well as validity of the recevied frame. 1659 * Just pass received frames to upper stack with 1660 * minimal test and let upper stack handle them. 1661 */ 1662 sc->msk_pflags |= MSK_FLAG_NORXCHK; 1663 } 1664 break; 1665 case CHIP_ID_YUKON_XL: 1666 sc->msk_clock = 156; /* 156 Mhz */ 1667 break; 1668 case CHIP_ID_YUKON_SUPR: 1669 sc->msk_clock = 125; /* 125 MHz */ 1670 break; 1671 case CHIP_ID_YUKON_UL_2: 1672 sc->msk_clock = 125; /* 125 Mhz */ 1673 break; 1674 case CHIP_ID_YUKON_OPT: 1675 sc->msk_clock = 125; /* 125 MHz */ 1676 break; 1677 default: 1678 sc->msk_clock = 156; /* 156 Mhz */ 1679 break; 1680 } 1681 1682 error = mskc_status_dma_alloc(sc); 1683 if (error) 1684 goto fail; 1685 1686 /* Set base interrupt mask. */ 1687 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1688 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1689 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1690 1691 /* Reset the adapter. */ 1692 mskc_reset(sc); 1693 1694 error = mskc_setup_rambuffer(sc); 1695 if (error) 1696 goto fail; 1697 1698 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1699 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1700 device_printf(dev, "failed to add child for PORT_A\n"); 1701 error = ENXIO; 1702 goto fail; 1703 } 1704 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1705 *port = MSK_PORT_A; 1706 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1707 1708 if (sc->msk_num_port > 1) { 1709 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1710 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1711 device_printf(dev, "failed to add child for PORT_B\n"); 1712 error = ENXIO; 1713 goto fail; 1714 } 1715 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1716 *port = MSK_PORT_B; 1717 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1718 } 1719 1720 bus_generic_attach(dev); 1721 1722 cpuid = rman_get_cpuid(sc->msk_irq); 1723 if (sc->msk_if[0] != NULL) 1724 ifq_set_cpuid(&sc->msk_if[0]->msk_ifp->if_snd, cpuid); 1725 if (sc->msk_if[1] != NULL) 1726 ifq_set_cpuid(&sc->msk_if[1]->msk_ifp->if_snd, cpuid); 1727 1728 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1729 mskc_intr, sc, &sc->msk_intrhand, 1730 &sc->msk_serializer); 1731 if (error) { 1732 device_printf(dev, "couldn't set up interrupt handler\n"); 1733 goto fail; 1734 } 1735 return 0; 1736 fail: 1737 mskc_detach(dev); 1738 return (error); 1739 } 1740 1741 /* 1742 * Shutdown hardware and free up resources. This can be called any 1743 * time after the mutex has been initialized. It is called in both 1744 * the error case in attach and the normal detach case so it needs 1745 * to be careful about only freeing resources that have actually been 1746 * allocated. 1747 */ 1748 static int 1749 msk_detach(device_t dev) 1750 { 1751 struct msk_if_softc *sc_if = device_get_softc(dev); 1752 1753 if (device_is_attached(dev)) { 1754 struct msk_softc *sc = sc_if->msk_softc; 1755 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1756 1757 lwkt_serialize_enter(ifp->if_serializer); 1758 1759 if (sc->msk_intrhand != NULL) { 1760 if (sc->msk_if[MSK_PORT_A] != NULL) 1761 msk_stop(sc->msk_if[MSK_PORT_A]); 1762 if (sc->msk_if[MSK_PORT_B] != NULL) 1763 msk_stop(sc->msk_if[MSK_PORT_B]); 1764 1765 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1766 sc->msk_intrhand); 1767 sc->msk_intrhand = NULL; 1768 } 1769 1770 lwkt_serialize_exit(ifp->if_serializer); 1771 1772 ether_ifdetach(ifp); 1773 } 1774 1775 if (sc_if->msk_miibus != NULL) 1776 device_delete_child(dev, sc_if->msk_miibus); 1777 1778 msk_txrx_dma_free(sc_if); 1779 return (0); 1780 } 1781 1782 static int 1783 mskc_detach(device_t dev) 1784 { 1785 struct msk_softc *sc = device_get_softc(dev); 1786 int *port, i; 1787 1788 #ifdef INVARIANTS 1789 if (device_is_attached(dev)) { 1790 KASSERT(sc->msk_intrhand == NULL, 1791 ("intr is not torn down yet")); 1792 } 1793 #endif 1794 1795 for (i = 0; i < sc->msk_num_port; ++i) { 1796 if (sc->msk_devs[i] != NULL) { 1797 port = device_get_ivars(sc->msk_devs[i]); 1798 if (port != NULL) { 1799 kfree(port, M_DEVBUF); 1800 device_set_ivars(sc->msk_devs[i], NULL); 1801 } 1802 device_delete_child(dev, sc->msk_devs[i]); 1803 } 1804 } 1805 1806 /* Disable all interrupts. */ 1807 CSR_WRITE_4(sc, B0_IMSK, 0); 1808 CSR_READ_4(sc, B0_IMSK); 1809 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1810 CSR_READ_4(sc, B0_HWE_IMSK); 1811 1812 /* LED Off. */ 1813 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1814 1815 /* Put hardware reset. */ 1816 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1817 1818 mskc_status_dma_free(sc); 1819 1820 if (sc->msk_irq != NULL) { 1821 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1822 sc->msk_irq); 1823 } 1824 if (sc->msk_irq_type == PCI_INTR_TYPE_MSI) 1825 pci_release_msi(dev); 1826 1827 if (sc->msk_res != NULL) { 1828 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1829 sc->msk_res); 1830 } 1831 1832 return (0); 1833 } 1834 1835 /* Create status DMA region. */ 1836 static int 1837 mskc_status_dma_alloc(struct msk_softc *sc) 1838 { 1839 bus_dmamem_t dmem; 1840 int error; 1841 1842 error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0, 1843 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1844 MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1845 if (error) { 1846 device_printf(sc->msk_dev, 1847 "failed to create status coherent DMA memory\n"); 1848 return error; 1849 } 1850 sc->msk_stat_tag = dmem.dmem_tag; 1851 sc->msk_stat_map = dmem.dmem_map; 1852 sc->msk_stat_ring = dmem.dmem_addr; 1853 sc->msk_stat_ring_paddr = dmem.dmem_busaddr; 1854 1855 return (0); 1856 } 1857 1858 static void 1859 mskc_status_dma_free(struct msk_softc *sc) 1860 { 1861 /* Destroy status block. */ 1862 if (sc->msk_stat_tag) { 1863 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1864 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1865 sc->msk_stat_map); 1866 bus_dma_tag_destroy(sc->msk_stat_tag); 1867 sc->msk_stat_tag = NULL; 1868 } 1869 } 1870 1871 static int 1872 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1873 { 1874 int error, i, j; 1875 #ifdef MSK_JUMBO 1876 struct msk_rxdesc *jrxd; 1877 struct msk_jpool_entry *entry; 1878 uint8_t *ptr; 1879 #endif 1880 bus_size_t rxalign; 1881 1882 /* Create parent DMA tag. */ 1883 /* 1884 * XXX 1885 * It seems that Yukon II supports full 64bits DMA operations. But 1886 * it needs two descriptors(list elements) for 64bits DMA operations. 1887 * Since we don't know what DMA address mappings(32bits or 64bits) 1888 * would be used in advance for each mbufs, we limits its DMA space 1889 * to be in range of 32bits address space. Otherwise, we should check 1890 * what DMA address is used and chain another descriptor for the 1891 * 64bits DMA operation. This also means descriptor ring size is 1892 * variable. Limiting DMA address to be in 32bit address space greatly 1893 * simplyfies descriptor handling and possibly would increase 1894 * performance a bit due to efficient handling of descriptors. 1895 * Apart from harassing checksum offloading mechanisms, it seems 1896 * it's really bad idea to use a seperate descriptor for 64bit 1897 * DMA operation to save small descriptor memory. Anyway, I've 1898 * never seen these exotic scheme on ethernet interface hardware. 1899 */ 1900 error = bus_dma_tag_create( 1901 NULL, /* parent */ 1902 1, 0, /* alignment, boundary */ 1903 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1904 BUS_SPACE_MAXADDR, /* highaddr */ 1905 NULL, NULL, /* filter, filterarg */ 1906 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1907 0, /* nsegments */ 1908 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1909 0, /* flags */ 1910 &sc_if->msk_cdata.msk_parent_tag); 1911 if (error) { 1912 device_printf(sc_if->msk_if_dev, 1913 "failed to create parent DMA tag\n"); 1914 return error; 1915 } 1916 1917 /* Create DMA stuffs for Tx ring. */ 1918 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1919 &sc_if->msk_cdata.msk_tx_ring_tag, 1920 (void *)&sc_if->msk_rdata.msk_tx_ring, 1921 &sc_if->msk_rdata.msk_tx_ring_paddr, 1922 &sc_if->msk_cdata.msk_tx_ring_map); 1923 if (error) { 1924 device_printf(sc_if->msk_if_dev, 1925 "failed to create TX ring DMA stuffs\n"); 1926 return error; 1927 } 1928 1929 /* Create DMA stuffs for Rx ring. */ 1930 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1931 &sc_if->msk_cdata.msk_rx_ring_tag, 1932 (void *)&sc_if->msk_rdata.msk_rx_ring, 1933 &sc_if->msk_rdata.msk_rx_ring_paddr, 1934 &sc_if->msk_cdata.msk_rx_ring_map); 1935 if (error) { 1936 device_printf(sc_if->msk_if_dev, 1937 "failed to create RX ring DMA stuffs\n"); 1938 return error; 1939 } 1940 1941 /* Create tag for Tx buffers. */ 1942 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1943 1, 0, /* alignment, boundary */ 1944 BUS_SPACE_MAXADDR, /* lowaddr */ 1945 BUS_SPACE_MAXADDR, /* highaddr */ 1946 NULL, NULL, /* filter, filterarg */ 1947 MSK_JUMBO_FRAMELEN, /* maxsize */ 1948 MSK_MAXTXSEGS, /* nsegments */ 1949 MSK_MAXSGSIZE, /* maxsegsize */ 1950 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 1951 BUS_DMA_ONEBPAGE, /* flags */ 1952 &sc_if->msk_cdata.msk_tx_tag); 1953 if (error) { 1954 device_printf(sc_if->msk_if_dev, 1955 "failed to create Tx DMA tag\n"); 1956 return error; 1957 } 1958 1959 /* Create DMA maps for Tx buffers. */ 1960 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1961 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1962 1963 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 1964 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1965 &txd->tx_dmamap); 1966 if (error) { 1967 device_printf(sc_if->msk_if_dev, 1968 "failed to create %dth Tx dmamap\n", i); 1969 1970 for (j = 0; j < i; ++j) { 1971 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1972 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1973 txd->tx_dmamap); 1974 } 1975 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1976 sc_if->msk_cdata.msk_tx_tag = NULL; 1977 1978 return error; 1979 } 1980 } 1981 1982 /* 1983 * Workaround hardware hang which seems to happen when Rx buffer 1984 * is not aligned on multiple of FIFO word(8 bytes). 1985 */ 1986 if (sc_if->msk_flags & MSK_FLAG_RAMBUF) 1987 rxalign = MSK_RX_BUF_ALIGN; 1988 else 1989 rxalign = 1; 1990 1991 /* Create tag for Rx buffers. */ 1992 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1993 rxalign, 0, /* alignment, boundary */ 1994 BUS_SPACE_MAXADDR, /* lowaddr */ 1995 BUS_SPACE_MAXADDR, /* highaddr */ 1996 NULL, NULL, /* filter, filterarg */ 1997 MCLBYTES, /* maxsize */ 1998 1, /* nsegments */ 1999 MCLBYTES, /* maxsegsize */ 2000 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | 2001 BUS_DMA_WAITOK, /* flags */ 2002 &sc_if->msk_cdata.msk_rx_tag); 2003 if (error) { 2004 device_printf(sc_if->msk_if_dev, 2005 "failed to create Rx DMA tag\n"); 2006 return error; 2007 } 2008 2009 /* Create DMA maps for Rx buffers. */ 2010 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK, 2011 &sc_if->msk_cdata.msk_rx_sparemap); 2012 if (error) { 2013 device_printf(sc_if->msk_if_dev, 2014 "failed to create spare Rx dmamap\n"); 2015 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2016 sc_if->msk_cdata.msk_rx_tag = NULL; 2017 return error; 2018 } 2019 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2020 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2021 2022 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 2023 BUS_DMA_WAITOK, &rxd->rx_dmamap); 2024 if (error) { 2025 device_printf(sc_if->msk_if_dev, 2026 "failed to create %dth Rx dmamap\n", i); 2027 2028 for (j = 0; j < i; ++j) { 2029 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 2030 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2031 rxd->rx_dmamap); 2032 } 2033 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2034 sc_if->msk_cdata.msk_rx_sparemap); 2035 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2036 sc_if->msk_cdata.msk_rx_tag = NULL; 2037 2038 return error; 2039 } 2040 } 2041 2042 #ifdef MSK_JUMBO 2043 SLIST_INIT(&sc_if->msk_jfree_listhead); 2044 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2045 2046 /* Create tag for jumbo Rx ring. */ 2047 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2048 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2049 BUS_SPACE_MAXADDR, /* lowaddr */ 2050 BUS_SPACE_MAXADDR, /* highaddr */ 2051 NULL, NULL, /* filter, filterarg */ 2052 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2053 1, /* nsegments */ 2054 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2055 0, /* flags */ 2056 NULL, NULL, /* lockfunc, lockarg */ 2057 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2058 if (error != 0) { 2059 device_printf(sc_if->msk_if_dev, 2060 "failed to create jumbo Rx ring DMA tag\n"); 2061 goto fail; 2062 } 2063 2064 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2065 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2066 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2067 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2068 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2069 if (error != 0) { 2070 device_printf(sc_if->msk_if_dev, 2071 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2072 goto fail; 2073 } 2074 2075 ctx.msk_busaddr = 0; 2076 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2077 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2078 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2079 msk_dmamap_cb, &ctx, 0); 2080 if (error != 0) { 2081 device_printf(sc_if->msk_if_dev, 2082 "failed to load DMA'able memory for jumbo Rx ring\n"); 2083 goto fail; 2084 } 2085 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2086 2087 /* Create tag for jumbo buffer blocks. */ 2088 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2089 PAGE_SIZE, 0, /* alignment, boundary */ 2090 BUS_SPACE_MAXADDR, /* lowaddr */ 2091 BUS_SPACE_MAXADDR, /* highaddr */ 2092 NULL, NULL, /* filter, filterarg */ 2093 MSK_JMEM, /* maxsize */ 2094 1, /* nsegments */ 2095 MSK_JMEM, /* maxsegsize */ 2096 0, /* flags */ 2097 NULL, NULL, /* lockfunc, lockarg */ 2098 &sc_if->msk_cdata.msk_jumbo_tag); 2099 if (error != 0) { 2100 device_printf(sc_if->msk_if_dev, 2101 "failed to create jumbo Rx buffer block DMA tag\n"); 2102 goto fail; 2103 } 2104 2105 /* Create tag for jumbo Rx buffers. */ 2106 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2107 PAGE_SIZE, 0, /* alignment, boundary */ 2108 BUS_SPACE_MAXADDR, /* lowaddr */ 2109 BUS_SPACE_MAXADDR, /* highaddr */ 2110 NULL, NULL, /* filter, filterarg */ 2111 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2112 MSK_MAXRXSEGS, /* nsegments */ 2113 MSK_JLEN, /* maxsegsize */ 2114 0, /* flags */ 2115 NULL, NULL, /* lockfunc, lockarg */ 2116 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2117 if (error != 0) { 2118 device_printf(sc_if->msk_if_dev, 2119 "failed to create jumbo Rx DMA tag\n"); 2120 goto fail; 2121 } 2122 2123 /* Create DMA maps for jumbo Rx buffers. */ 2124 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2125 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2126 device_printf(sc_if->msk_if_dev, 2127 "failed to create spare jumbo Rx dmamap\n"); 2128 goto fail; 2129 } 2130 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2131 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2132 jrxd->rx_m = NULL; 2133 jrxd->rx_dmamap = NULL; 2134 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2135 &jrxd->rx_dmamap); 2136 if (error != 0) { 2137 device_printf(sc_if->msk_if_dev, 2138 "failed to create jumbo Rx dmamap\n"); 2139 goto fail; 2140 } 2141 } 2142 2143 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2144 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2145 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2146 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2147 &sc_if->msk_cdata.msk_jumbo_map); 2148 if (error != 0) { 2149 device_printf(sc_if->msk_if_dev, 2150 "failed to allocate DMA'able memory for jumbo buf\n"); 2151 goto fail; 2152 } 2153 2154 ctx.msk_busaddr = 0; 2155 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2156 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2157 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2158 if (error != 0) { 2159 device_printf(sc_if->msk_if_dev, 2160 "failed to load DMA'able memory for jumbobuf\n"); 2161 goto fail; 2162 } 2163 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2164 2165 /* 2166 * Now divide it up into 9K pieces and save the addresses 2167 * in an array. 2168 */ 2169 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2170 for (i = 0; i < MSK_JSLOTS; i++) { 2171 sc_if->msk_cdata.msk_jslots[i] = ptr; 2172 ptr += MSK_JLEN; 2173 entry = malloc(sizeof(struct msk_jpool_entry), 2174 M_DEVBUF, M_WAITOK); 2175 if (entry == NULL) { 2176 device_printf(sc_if->msk_if_dev, 2177 "no memory for jumbo buffers!\n"); 2178 error = ENOMEM; 2179 goto fail; 2180 } 2181 entry->slot = i; 2182 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2183 jpool_entries); 2184 } 2185 #endif 2186 return 0; 2187 } 2188 2189 static void 2190 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2191 { 2192 struct msk_txdesc *txd; 2193 struct msk_rxdesc *rxd; 2194 #ifdef MSK_JUMBO 2195 struct msk_rxdesc *jrxd; 2196 struct msk_jpool_entry *entry; 2197 #endif 2198 int i; 2199 2200 #ifdef MSK_JUMBO 2201 MSK_JLIST_LOCK(sc_if); 2202 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2203 device_printf(sc_if->msk_if_dev, 2204 "asked to free buffer that is in use!\n"); 2205 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2206 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2207 jpool_entries); 2208 } 2209 2210 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2211 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2212 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2213 free(entry, M_DEVBUF); 2214 } 2215 MSK_JLIST_UNLOCK(sc_if); 2216 2217 /* Destroy jumbo buffer block. */ 2218 if (sc_if->msk_cdata.msk_jumbo_map) 2219 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2220 sc_if->msk_cdata.msk_jumbo_map); 2221 2222 if (sc_if->msk_rdata.msk_jumbo_buf) { 2223 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2224 sc_if->msk_rdata.msk_jumbo_buf, 2225 sc_if->msk_cdata.msk_jumbo_map); 2226 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2227 sc_if->msk_cdata.msk_jumbo_map = NULL; 2228 } 2229 2230 /* Jumbo Rx ring. */ 2231 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2232 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2233 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2234 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2235 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2236 sc_if->msk_rdata.msk_jumbo_rx_ring) 2237 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2238 sc_if->msk_rdata.msk_jumbo_rx_ring, 2239 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2240 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2241 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2242 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2243 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2244 } 2245 2246 /* Jumbo Rx buffers. */ 2247 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2248 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2249 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2250 if (jrxd->rx_dmamap) { 2251 bus_dmamap_destroy( 2252 sc_if->msk_cdata.msk_jumbo_rx_tag, 2253 jrxd->rx_dmamap); 2254 jrxd->rx_dmamap = NULL; 2255 } 2256 } 2257 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2258 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2259 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2260 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2261 } 2262 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2263 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2264 } 2265 #endif 2266 2267 /* Tx ring. */ 2268 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2269 sc_if->msk_rdata.msk_tx_ring, 2270 sc_if->msk_cdata.msk_tx_ring_map); 2271 2272 /* Rx ring. */ 2273 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2274 sc_if->msk_rdata.msk_rx_ring, 2275 sc_if->msk_cdata.msk_rx_ring_map); 2276 2277 /* Tx buffers. */ 2278 if (sc_if->msk_cdata.msk_tx_tag) { 2279 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2280 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2281 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2282 txd->tx_dmamap); 2283 } 2284 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2285 sc_if->msk_cdata.msk_tx_tag = NULL; 2286 } 2287 2288 /* Rx buffers. */ 2289 if (sc_if->msk_cdata.msk_rx_tag) { 2290 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2291 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2292 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2293 rxd->rx_dmamap); 2294 } 2295 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2296 sc_if->msk_cdata.msk_rx_sparemap); 2297 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2298 sc_if->msk_cdata.msk_rx_tag = NULL; 2299 } 2300 2301 if (sc_if->msk_cdata.msk_parent_tag) { 2302 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2303 sc_if->msk_cdata.msk_parent_tag = NULL; 2304 } 2305 } 2306 2307 #ifdef MSK_JUMBO 2308 /* 2309 * Allocate a jumbo buffer. 2310 */ 2311 static void * 2312 msk_jalloc(struct msk_if_softc *sc_if) 2313 { 2314 struct msk_jpool_entry *entry; 2315 2316 MSK_JLIST_LOCK(sc_if); 2317 2318 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2319 2320 if (entry == NULL) { 2321 MSK_JLIST_UNLOCK(sc_if); 2322 return (NULL); 2323 } 2324 2325 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2326 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2327 2328 MSK_JLIST_UNLOCK(sc_if); 2329 2330 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2331 } 2332 2333 /* 2334 * Release a jumbo buffer. 2335 */ 2336 static void 2337 msk_jfree(void *buf, void *args) 2338 { 2339 struct msk_if_softc *sc_if; 2340 struct msk_jpool_entry *entry; 2341 int i; 2342 2343 /* Extract the softc struct pointer. */ 2344 sc_if = (struct msk_if_softc *)args; 2345 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2346 2347 MSK_JLIST_LOCK(sc_if); 2348 /* Calculate the slot this buffer belongs to. */ 2349 i = ((vm_offset_t)buf 2350 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2351 KASSERT(i >= 0 && i < MSK_JSLOTS, 2352 ("%s: asked to free buffer that we don't manage!", __func__)); 2353 2354 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2355 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2356 entry->slot = i; 2357 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2358 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2359 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2360 wakeup(sc_if); 2361 2362 MSK_JLIST_UNLOCK(sc_if); 2363 } 2364 #endif 2365 2366 static int 2367 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2368 { 2369 struct msk_txdesc *txd, *txd_last; 2370 struct msk_tx_desc *tx_le; 2371 struct mbuf *m; 2372 bus_dmamap_t map; 2373 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2374 uint32_t control, prod, si; 2375 uint16_t offset, tcp_offset; 2376 int error, i, nsegs, maxsegs, defrag; 2377 2378 maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt - 2379 MSK_RESERVED_TX_DESC_CNT; 2380 KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT, 2381 ("not enough spare TX desc")); 2382 if (maxsegs > MSK_MAXTXSEGS) 2383 maxsegs = MSK_MAXTXSEGS; 2384 2385 /* 2386 * Align TX buffer to 64bytes boundary. This greately improves 2387 * bulk data TX performance on my 88E8053 (+100Mbps) at least. 2388 * Try avoiding m_defrag(), if the mbufs are not chained together 2389 * by m_next (i.e. m->m_len == m->m_pkthdr.len). 2390 */ 2391 2392 #define MSK_TXBUF_ALIGN 64 2393 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1) 2394 2395 defrag = 1; 2396 m = *m_head; 2397 if (m->m_len == m->m_pkthdr.len) { 2398 int space; 2399 2400 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK); 2401 if (space) { 2402 if (M_WRITABLE(m)) { 2403 if (M_TRAILINGSPACE(m) >= space) { 2404 /* e.g. TCP ACKs */ 2405 bcopy(m->m_data, m->m_data + space, 2406 m->m_len); 2407 m->m_data += space; 2408 defrag = 0; 2409 sc_if->msk_softc->msk_trailing_copied++; 2410 } else { 2411 space = MSK_TXBUF_ALIGN - space; 2412 if (M_LEADINGSPACE(m) >= space) { 2413 /* e.g. Small UDP datagrams */ 2414 bcopy(m->m_data, 2415 m->m_data - space, 2416 m->m_len); 2417 m->m_data -= space; 2418 defrag = 0; 2419 sc_if->msk_softc-> 2420 msk_leading_copied++; 2421 } 2422 } 2423 } 2424 } else { 2425 /* e.g. on forwarding path */ 2426 defrag = 0; 2427 } 2428 } 2429 if (defrag) { 2430 m = m_defrag(*m_head, M_NOWAIT); 2431 if (m == NULL) { 2432 m_freem(*m_head); 2433 *m_head = NULL; 2434 return ENOBUFS; 2435 } 2436 *m_head = m; 2437 } else { 2438 sc_if->msk_softc->msk_defrag_avoided++; 2439 } 2440 2441 #undef MSK_TXBUF_MASK 2442 #undef MSK_TXBUF_ALIGN 2443 2444 tcp_offset = offset = 0; 2445 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2446 /* 2447 * Since mbuf has no protocol specific structure information 2448 * in it we have to inspect protocol information here to 2449 * setup TSO and checksum offload. I don't know why Marvell 2450 * made a such decision in chip design because other GigE 2451 * hardwares normally takes care of all these chores in 2452 * hardware. However, TSO performance of Yukon II is very 2453 * good such that it's worth to implement it. 2454 */ 2455 struct ether_header *eh; 2456 struct ip *ip; 2457 2458 /* TODO check for M_WRITABLE(m) */ 2459 2460 offset = sizeof(struct ether_header); 2461 m = m_pullup(m, offset); 2462 if (m == NULL) { 2463 *m_head = NULL; 2464 return (ENOBUFS); 2465 } 2466 eh = mtod(m, struct ether_header *); 2467 /* Check if hardware VLAN insertion is off. */ 2468 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2469 offset = sizeof(struct ether_vlan_header); 2470 m = m_pullup(m, offset); 2471 if (m == NULL) { 2472 *m_head = NULL; 2473 return (ENOBUFS); 2474 } 2475 } 2476 m = m_pullup(m, offset + sizeof(struct ip)); 2477 if (m == NULL) { 2478 *m_head = NULL; 2479 return (ENOBUFS); 2480 } 2481 ip = (struct ip *)(mtod(m, char *) + offset); 2482 offset += (ip->ip_hl << 2); 2483 tcp_offset = offset; 2484 /* 2485 * It seems that Yukon II has Tx checksum offload bug for 2486 * small TCP packets that's less than 60 bytes in size 2487 * (e.g. TCP window probe packet, pure ACK packet). 2488 * Common work around like padding with zeros to make the 2489 * frame minimum ethernet frame size didn't work at all. 2490 * Instead of disabling checksum offload completely we 2491 * resort to S/W checksum routine when we encounter short 2492 * TCP frames. 2493 * Short UDP packets appear to be handled correctly by 2494 * Yukon II. 2495 */ 2496 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2497 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2498 uint16_t csum; 2499 2500 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2501 (ip->ip_hl << 2), offset); 2502 *(uint16_t *)(m->m_data + offset + 2503 m->m_pkthdr.csum_data) = csum; 2504 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2505 } 2506 *m_head = m; 2507 } 2508 2509 prod = sc_if->msk_cdata.msk_tx_prod; 2510 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2511 txd_last = txd; 2512 map = txd->tx_dmamap; 2513 2514 error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map, 2515 m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 2516 if (error) { 2517 m_freem(*m_head); 2518 *m_head = NULL; 2519 return error; 2520 } 2521 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2522 2523 m = *m_head; 2524 control = 0; 2525 tx_le = NULL; 2526 2527 #ifdef notyet 2528 /* Check if we have a VLAN tag to insert. */ 2529 if ((m->m_flags & M_VLANTAG) != 0) { 2530 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2531 tx_le->msk_addr = htole32(0); 2532 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2533 htons(m->m_pkthdr.ether_vtag)); 2534 sc_if->msk_cdata.msk_tx_cnt++; 2535 MSK_INC(prod, MSK_TX_RING_CNT); 2536 control |= INS_VLAN; 2537 } 2538 #endif 2539 /* Check if we have to handle checksum offload. */ 2540 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2541 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2542 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2543 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2544 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2545 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2546 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2547 control |= UDPTCP; 2548 sc_if->msk_cdata.msk_tx_cnt++; 2549 MSK_INC(prod, MSK_TX_RING_CNT); 2550 } 2551 2552 si = prod; 2553 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2554 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2555 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2556 OP_PACKET); 2557 sc_if->msk_cdata.msk_tx_cnt++; 2558 MSK_INC(prod, MSK_TX_RING_CNT); 2559 2560 for (i = 1; i < nsegs; i++) { 2561 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2562 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2563 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2564 OP_BUFFER | HW_OWNER); 2565 sc_if->msk_cdata.msk_tx_cnt++; 2566 MSK_INC(prod, MSK_TX_RING_CNT); 2567 } 2568 /* Update producer index. */ 2569 sc_if->msk_cdata.msk_tx_prod = prod; 2570 2571 /* Set EOP on the last desciptor. */ 2572 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2573 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2574 tx_le->msk_control |= htole32(EOP); 2575 2576 /* Turn the first descriptor ownership to hardware. */ 2577 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2578 tx_le->msk_control |= htole32(HW_OWNER); 2579 2580 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2581 map = txd_last->tx_dmamap; 2582 txd_last->tx_dmamap = txd->tx_dmamap; 2583 txd->tx_dmamap = map; 2584 txd->tx_m = m; 2585 2586 return (0); 2587 } 2588 2589 static void 2590 msk_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 2591 { 2592 struct msk_if_softc *sc_if; 2593 struct mbuf *m_head; 2594 int enq; 2595 2596 sc_if = ifp->if_softc; 2597 2598 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 2599 ASSERT_SERIALIZED(ifp->if_serializer); 2600 2601 if (!sc_if->msk_link) { 2602 ifq_purge(&ifp->if_snd); 2603 return; 2604 } 2605 2606 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 2607 return; 2608 2609 enq = 0; 2610 while (!ifq_is_empty(&ifp->if_snd)) { 2611 if (MSK_IS_OACTIVE(sc_if)) { 2612 ifq_set_oactive(&ifp->if_snd); 2613 break; 2614 } 2615 2616 m_head = ifq_dequeue(&ifp->if_snd); 2617 if (m_head == NULL) 2618 break; 2619 2620 /* 2621 * Pack the data into the transmit ring. If we 2622 * don't have room, set the OACTIVE flag and wait 2623 * for the NIC to drain the ring. 2624 */ 2625 if (msk_encap(sc_if, &m_head) != 0) { 2626 IFNET_STAT_INC(ifp, oerrors, 1); 2627 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2628 continue; 2629 } else { 2630 ifq_set_oactive(&ifp->if_snd); 2631 break; 2632 } 2633 } 2634 enq = 1; 2635 2636 /* 2637 * If there's a BPF listener, bounce a copy of this frame 2638 * to him. 2639 */ 2640 BPF_MTAP(ifp, m_head); 2641 } 2642 2643 if (enq) { 2644 /* Transmit */ 2645 CSR_WRITE_2(sc_if->msk_softc, 2646 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2647 sc_if->msk_cdata.msk_tx_prod); 2648 2649 /* Set a timeout in case the chip goes out to lunch. */ 2650 ifp->if_timer = MSK_TX_TIMEOUT; 2651 } 2652 } 2653 2654 static void 2655 msk_watchdog(struct ifnet *ifp) 2656 { 2657 struct msk_if_softc *sc_if = ifp->if_softc; 2658 uint32_t ridx; 2659 int idx; 2660 2661 ASSERT_SERIALIZED(ifp->if_serializer); 2662 2663 if (sc_if->msk_link == 0) { 2664 if (bootverbose) 2665 if_printf(sc_if->msk_ifp, "watchdog timeout " 2666 "(missed link)\n"); 2667 IFNET_STAT_INC(ifp, oerrors, 1); 2668 msk_init(sc_if); 2669 return; 2670 } 2671 2672 /* 2673 * Reclaim first as there is a possibility of losing Tx completion 2674 * interrupts. 2675 */ 2676 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2677 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2678 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2679 msk_txeof(sc_if, idx); 2680 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2681 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2682 "-- recovering\n"); 2683 if (!ifq_is_empty(&ifp->if_snd)) 2684 if_devstart(ifp); 2685 return; 2686 } 2687 } 2688 2689 if_printf(ifp, "watchdog timeout\n"); 2690 IFNET_STAT_INC(ifp, oerrors, 1); 2691 msk_init(sc_if); 2692 if (!ifq_is_empty(&ifp->if_snd)) 2693 if_devstart(ifp); 2694 } 2695 2696 static int 2697 mskc_shutdown(device_t dev) 2698 { 2699 struct msk_softc *sc = device_get_softc(dev); 2700 int i; 2701 2702 lwkt_serialize_enter(&sc->msk_serializer); 2703 2704 for (i = 0; i < sc->msk_num_port; i++) { 2705 if (sc->msk_if[i] != NULL) 2706 msk_stop(sc->msk_if[i]); 2707 } 2708 2709 /* Put hardware reset. */ 2710 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2711 2712 lwkt_serialize_exit(&sc->msk_serializer); 2713 return (0); 2714 } 2715 2716 static int 2717 mskc_suspend(device_t dev) 2718 { 2719 struct msk_softc *sc = device_get_softc(dev); 2720 int i; 2721 2722 lwkt_serialize_enter(&sc->msk_serializer); 2723 2724 for (i = 0; i < sc->msk_num_port; i++) { 2725 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2726 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2727 msk_stop(sc->msk_if[i]); 2728 } 2729 2730 /* Disable all interrupts. */ 2731 CSR_WRITE_4(sc, B0_IMSK, 0); 2732 CSR_READ_4(sc, B0_IMSK); 2733 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2734 CSR_READ_4(sc, B0_HWE_IMSK); 2735 2736 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2737 2738 /* Put hardware reset. */ 2739 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2740 sc->msk_suspended = 1; 2741 2742 lwkt_serialize_exit(&sc->msk_serializer); 2743 2744 return (0); 2745 } 2746 2747 static int 2748 mskc_resume(device_t dev) 2749 { 2750 struct msk_softc *sc = device_get_softc(dev); 2751 int i; 2752 2753 lwkt_serialize_enter(&sc->msk_serializer); 2754 2755 /* Enable all clocks before accessing any registers. */ 2756 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 2757 mskc_reset(sc); 2758 for (i = 0; i < sc->msk_num_port; i++) { 2759 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2760 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2761 msk_init(sc->msk_if[i]); 2762 } 2763 sc->msk_suspended = 0; 2764 2765 lwkt_serialize_exit(&sc->msk_serializer); 2766 2767 return (0); 2768 } 2769 2770 static void 2771 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2772 { 2773 struct mbuf *m; 2774 struct ifnet *ifp; 2775 struct msk_rxdesc *rxd; 2776 int cons, rxlen; 2777 2778 ifp = sc_if->msk_ifp; 2779 2780 cons = sc_if->msk_cdata.msk_rx_cons; 2781 do { 2782 rxlen = status >> 16; 2783 if ((status & GMR_FS_VLAN) != 0 && 2784 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2785 rxlen -= EVL_ENCAPLEN; 2786 if (sc_if->msk_flags & MSK_FLAG_NORXCHK) { 2787 /* 2788 * For controllers that returns bogus status code 2789 * just do minimal check and let upper stack 2790 * handle this frame. 2791 */ 2792 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { 2793 IFNET_STAT_INC(ifp, ierrors, 1); 2794 msk_discard_rxbuf(sc_if, cons); 2795 break; 2796 } 2797 } else if (len > sc_if->msk_framesize || 2798 ((status & GMR_FS_ANY_ERR) != 0) || 2799 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2800 /* Don't count flow-control packet as errors. */ 2801 if ((status & GMR_FS_GOOD_FC) == 0) 2802 IFNET_STAT_INC(ifp, ierrors, 1); 2803 msk_discard_rxbuf(sc_if, cons); 2804 break; 2805 } 2806 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2807 m = rxd->rx_m; 2808 if (msk_newbuf(sc_if, cons, 0) != 0) { 2809 IFNET_STAT_INC(ifp, iqdrops, 1); 2810 /* Reuse old buffer. */ 2811 msk_discard_rxbuf(sc_if, cons); 2812 break; 2813 } 2814 m->m_pkthdr.rcvif = ifp; 2815 m->m_pkthdr.len = m->m_len = len; 2816 IFNET_STAT_INC(ifp, ipackets, 1); 2817 #ifdef notyet 2818 /* Check for VLAN tagged packets. */ 2819 if ((status & GMR_FS_VLAN) != 0 && 2820 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2821 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2822 m->m_flags |= M_VLANTAG; 2823 } 2824 #endif 2825 2826 ifp->if_input(ifp, m, NULL, -1); 2827 } while (0); 2828 2829 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2830 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2831 } 2832 2833 #ifdef MSK_JUMBO 2834 static void 2835 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2836 { 2837 struct mbuf *m; 2838 struct ifnet *ifp; 2839 struct msk_rxdesc *jrxd; 2840 int cons, rxlen; 2841 2842 ifp = sc_if->msk_ifp; 2843 2844 MSK_IF_LOCK_ASSERT(sc_if); 2845 2846 cons = sc_if->msk_cdata.msk_rx_cons; 2847 do { 2848 rxlen = status >> 16; 2849 if ((status & GMR_FS_VLAN) != 0 && 2850 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2851 rxlen -= ETHER_VLAN_ENCAP_LEN; 2852 if (len > sc_if->msk_framesize || 2853 ((status & GMR_FS_ANY_ERR) != 0) || 2854 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2855 /* Don't count flow-control packet as errors. */ 2856 if ((status & GMR_FS_GOOD_FC) == 0) 2857 ifp->if_ierrors++; 2858 msk_discard_jumbo_rxbuf(sc_if, cons); 2859 break; 2860 } 2861 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2862 m = jrxd->rx_m; 2863 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2864 ifp->if_iqdrops++; 2865 /* Reuse old buffer. */ 2866 msk_discard_jumbo_rxbuf(sc_if, cons); 2867 break; 2868 } 2869 m->m_pkthdr.rcvif = ifp; 2870 m->m_pkthdr.len = m->m_len = len; 2871 ifp->if_ipackets++; 2872 /* Check for VLAN tagged packets. */ 2873 if ((status & GMR_FS_VLAN) != 0 && 2874 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2875 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2876 m->m_flags |= M_VLANTAG; 2877 } 2878 MSK_IF_UNLOCK(sc_if); 2879 ifp->if_input(ifp, m, NULL, -1); 2880 MSK_IF_LOCK(sc_if); 2881 } while (0); 2882 2883 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2884 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2885 } 2886 #endif 2887 2888 static void 2889 msk_txeof(struct msk_if_softc *sc_if, int idx) 2890 { 2891 struct msk_txdesc *txd; 2892 struct msk_tx_desc *cur_tx; 2893 struct ifnet *ifp; 2894 uint32_t control; 2895 int cons, prog; 2896 2897 ifp = sc_if->msk_ifp; 2898 2899 /* 2900 * Go through our tx ring and free mbufs for those 2901 * frames that have been sent. 2902 */ 2903 cons = sc_if->msk_cdata.msk_tx_cons; 2904 prog = 0; 2905 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2906 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2907 break; 2908 prog++; 2909 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2910 control = le32toh(cur_tx->msk_control); 2911 sc_if->msk_cdata.msk_tx_cnt--; 2912 if ((control & EOP) == 0) 2913 continue; 2914 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2915 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2916 2917 IFNET_STAT_INC(ifp, opackets, 1); 2918 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2919 __func__)); 2920 m_freem(txd->tx_m); 2921 txd->tx_m = NULL; 2922 } 2923 2924 if (prog > 0) { 2925 sc_if->msk_cdata.msk_tx_cons = cons; 2926 if (!MSK_IS_OACTIVE(sc_if)) 2927 ifq_clr_oactive(&ifp->if_snd); 2928 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2929 ifp->if_timer = 0; 2930 /* No need to sync LEs as we didn't update LEs. */ 2931 } 2932 } 2933 2934 static void 2935 msk_tick(void *xsc_if) 2936 { 2937 struct msk_if_softc *sc_if = xsc_if; 2938 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2939 struct mii_data *mii; 2940 2941 lwkt_serialize_enter(ifp->if_serializer); 2942 2943 mii = device_get_softc(sc_if->msk_miibus); 2944 2945 mii_tick(mii); 2946 if (!sc_if->msk_link) 2947 msk_miibus_statchg(sc_if->msk_if_dev); 2948 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2949 2950 lwkt_serialize_exit(ifp->if_serializer); 2951 } 2952 2953 static void 2954 msk_intr_phy(struct msk_if_softc *sc_if) 2955 { 2956 uint16_t status; 2957 2958 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2959 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2960 /* Handle FIFO Underrun/Overflow? */ 2961 if (status & PHY_M_IS_FIFO_ERROR) { 2962 device_printf(sc_if->msk_if_dev, 2963 "PHY FIFO underrun/overflow.\n"); 2964 } 2965 } 2966 2967 static void 2968 msk_intr_gmac(struct msk_if_softc *sc_if) 2969 { 2970 struct msk_softc *sc; 2971 uint8_t status; 2972 2973 sc = sc_if->msk_softc; 2974 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 2975 2976 /* GMAC Rx FIFO overrun. */ 2977 if ((status & GM_IS_RX_FF_OR) != 0) { 2978 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 2979 GMF_CLI_RX_FO); 2980 } 2981 /* GMAC Tx FIFO underrun. */ 2982 if ((status & GM_IS_TX_FF_UR) != 0) { 2983 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 2984 GMF_CLI_TX_FU); 2985 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 2986 /* 2987 * XXX 2988 * In case of Tx underrun, we may need to flush/reset 2989 * Tx MAC but that would also require resynchronization 2990 * with status LEs. Reintializing status LEs would 2991 * affect other port in dual MAC configuration so it 2992 * should be avoided as possible as we can. 2993 * Due to lack of documentation it's all vague guess but 2994 * it needs more investigation. 2995 */ 2996 } 2997 } 2998 2999 static void 3000 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3001 { 3002 struct msk_softc *sc; 3003 3004 sc = sc_if->msk_softc; 3005 if ((status & Y2_IS_PAR_RD1) != 0) { 3006 device_printf(sc_if->msk_if_dev, 3007 "RAM buffer read parity error\n"); 3008 /* Clear IRQ. */ 3009 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3010 RI_CLR_RD_PERR); 3011 } 3012 if ((status & Y2_IS_PAR_WR1) != 0) { 3013 device_printf(sc_if->msk_if_dev, 3014 "RAM buffer write parity error\n"); 3015 /* Clear IRQ. */ 3016 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3017 RI_CLR_WR_PERR); 3018 } 3019 if ((status & Y2_IS_PAR_MAC1) != 0) { 3020 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3021 /* Clear IRQ. */ 3022 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3023 GMF_CLI_TX_PE); 3024 } 3025 if ((status & Y2_IS_PAR_RX1) != 0) { 3026 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3027 /* Clear IRQ. */ 3028 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3029 } 3030 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3031 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3032 /* Clear IRQ. */ 3033 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3034 } 3035 } 3036 3037 static void 3038 mskc_intr_hwerr(struct msk_softc *sc) 3039 { 3040 uint32_t status; 3041 uint32_t tlphead[4]; 3042 3043 status = CSR_READ_4(sc, B0_HWE_ISRC); 3044 /* Time Stamp timer overflow. */ 3045 if ((status & Y2_IS_TIST_OV) != 0) 3046 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3047 if ((status & Y2_IS_PCI_NEXP) != 0) { 3048 /* 3049 * PCI Express Error occured which is not described in PEX 3050 * spec. 3051 * This error is also mapped either to Master Abort( 3052 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3053 * can only be cleared there. 3054 */ 3055 device_printf(sc->msk_dev, 3056 "PCI Express protocol violation error\n"); 3057 } 3058 3059 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3060 uint16_t v16; 3061 3062 if ((status & Y2_IS_MST_ERR) != 0) 3063 device_printf(sc->msk_dev, 3064 "unexpected IRQ Status error\n"); 3065 else 3066 device_printf(sc->msk_dev, 3067 "unexpected IRQ Master error\n"); 3068 /* Reset all bits in the PCI status register. */ 3069 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3070 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3071 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3072 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3073 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3074 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3075 } 3076 3077 /* Check for PCI Express Uncorrectable Error. */ 3078 if ((status & Y2_IS_PCI_EXP) != 0) { 3079 uint32_t v32; 3080 3081 /* 3082 * On PCI Express bus bridges are called root complexes (RC). 3083 * PCI Express errors are recognized by the root complex too, 3084 * which requests the system to handle the problem. After 3085 * error occurence it may be that no access to the adapter 3086 * may be performed any longer. 3087 */ 3088 3089 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3090 if ((v32 & PEX_UNSUP_REQ) != 0) { 3091 /* Ignore unsupported request error. */ 3092 if (bootverbose) { 3093 device_printf(sc->msk_dev, 3094 "Uncorrectable PCI Express error\n"); 3095 } 3096 } 3097 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3098 int i; 3099 3100 /* Get TLP header form Log Registers. */ 3101 for (i = 0; i < 4; i++) 3102 tlphead[i] = CSR_PCI_READ_4(sc, 3103 PEX_HEADER_LOG + i * 4); 3104 /* Check for vendor defined broadcast message. */ 3105 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3106 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3107 CSR_WRITE_4(sc, B0_HWE_IMSK, 3108 sc->msk_intrhwemask); 3109 CSR_READ_4(sc, B0_HWE_IMSK); 3110 } 3111 } 3112 /* Clear the interrupt. */ 3113 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3114 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3115 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3116 } 3117 3118 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3119 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3120 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3121 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3122 } 3123 3124 static __inline void 3125 msk_rxput(struct msk_if_softc *sc_if) 3126 { 3127 struct msk_softc *sc; 3128 3129 sc = sc_if->msk_softc; 3130 #ifdef MSK_JUMBO 3131 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3132 bus_dmamap_sync( 3133 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3134 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3135 BUS_DMASYNC_PREWRITE); 3136 } 3137 #endif 3138 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3139 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3140 } 3141 3142 static int 3143 mskc_handle_events(struct msk_softc *sc) 3144 { 3145 struct msk_if_softc *sc_if; 3146 int rxput[2]; 3147 struct msk_stat_desc *sd; 3148 uint32_t control, status; 3149 int cons, idx, len, port, rxprog; 3150 3151 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3152 if (idx == sc->msk_stat_cons) 3153 return (0); 3154 3155 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3156 3157 rxprog = 0; 3158 for (cons = sc->msk_stat_cons; cons != idx;) { 3159 sd = &sc->msk_stat_ring[cons]; 3160 control = le32toh(sd->msk_control); 3161 if ((control & HW_OWNER) == 0) 3162 break; 3163 /* 3164 * Marvell's FreeBSD driver updates status LE after clearing 3165 * HW_OWNER. However we don't have a way to sync single LE 3166 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3167 * an entire DMA map. So don't sync LE until we have a better 3168 * way to sync LEs. 3169 */ 3170 control &= ~HW_OWNER; 3171 sd->msk_control = htole32(control); 3172 status = le32toh(sd->msk_status); 3173 len = control & STLE_LEN_MASK; 3174 port = (control >> 16) & 0x01; 3175 sc_if = sc->msk_if[port]; 3176 if (sc_if == NULL) { 3177 device_printf(sc->msk_dev, "invalid port opcode " 3178 "0x%08x\n", control & STLE_OP_MASK); 3179 continue; 3180 } 3181 3182 switch (control & STLE_OP_MASK) { 3183 case OP_RXVLAN: 3184 sc_if->msk_vtag = ntohs(len); 3185 break; 3186 case OP_RXCHKSVLAN: 3187 sc_if->msk_vtag = ntohs(len); 3188 break; 3189 case OP_RXSTAT: 3190 if ((sc_if->msk_ifp->if_flags & IFF_RUNNING) == 0) 3191 break; 3192 #ifdef MSK_JUMBO 3193 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3194 msk_jumbo_rxeof(sc_if, status, len); 3195 else 3196 #endif 3197 msk_rxeof(sc_if, status, len); 3198 rxprog++; 3199 /* 3200 * Because there is no way to sync single Rx LE 3201 * put the DMA sync operation off until the end of 3202 * event processing. 3203 */ 3204 rxput[port]++; 3205 /* Update prefetch unit if we've passed water mark. */ 3206 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3207 msk_rxput(sc_if); 3208 rxput[port] = 0; 3209 } 3210 break; 3211 case OP_TXINDEXLE: 3212 if (sc->msk_if[MSK_PORT_A] != NULL) { 3213 msk_txeof(sc->msk_if[MSK_PORT_A], 3214 status & STLE_TXA1_MSKL); 3215 } 3216 if (sc->msk_if[MSK_PORT_B] != NULL) { 3217 msk_txeof(sc->msk_if[MSK_PORT_B], 3218 ((status & STLE_TXA2_MSKL) >> 3219 STLE_TXA2_SHIFTL) | 3220 ((len & STLE_TXA2_MSKH) << 3221 STLE_TXA2_SHIFTH)); 3222 } 3223 break; 3224 default: 3225 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3226 control & STLE_OP_MASK); 3227 break; 3228 } 3229 MSK_INC(cons, MSK_STAT_RING_CNT); 3230 if (rxprog > sc->msk_process_limit) 3231 break; 3232 } 3233 3234 sc->msk_stat_cons = cons; 3235 /* XXX We should sync status LEs here. See above notes. */ 3236 3237 if (rxput[MSK_PORT_A] > 0) 3238 msk_rxput(sc->msk_if[MSK_PORT_A]); 3239 if (rxput[MSK_PORT_B] > 0) 3240 msk_rxput(sc->msk_if[MSK_PORT_B]); 3241 3242 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3243 } 3244 3245 /* Legacy interrupt handler for shared interrupt. */ 3246 static void 3247 mskc_intr(void *xsc) 3248 { 3249 struct msk_softc *sc; 3250 struct msk_if_softc *sc_if0, *sc_if1; 3251 struct ifnet *ifp0, *ifp1; 3252 uint32_t status; 3253 3254 sc = xsc; 3255 ASSERT_SERIALIZED(&sc->msk_serializer); 3256 3257 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3258 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3259 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3260 (status & sc->msk_intrmask) == 0) { 3261 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3262 return; 3263 } 3264 3265 sc_if0 = sc->msk_if[MSK_PORT_A]; 3266 sc_if1 = sc->msk_if[MSK_PORT_B]; 3267 ifp0 = ifp1 = NULL; 3268 if (sc_if0 != NULL) 3269 ifp0 = sc_if0->msk_ifp; 3270 if (sc_if1 != NULL) 3271 ifp1 = sc_if1->msk_ifp; 3272 3273 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3274 msk_intr_phy(sc_if0); 3275 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3276 msk_intr_phy(sc_if1); 3277 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3278 msk_intr_gmac(sc_if0); 3279 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3280 msk_intr_gmac(sc_if1); 3281 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3282 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3283 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3284 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3285 CSR_READ_4(sc, B0_IMSK); 3286 } 3287 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3288 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3289 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3290 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3291 CSR_READ_4(sc, B0_IMSK); 3292 } 3293 if ((status & Y2_IS_HW_ERR) != 0) 3294 mskc_intr_hwerr(sc); 3295 3296 while (mskc_handle_events(sc) != 0) 3297 ; 3298 if ((status & Y2_IS_STAT_BMU) != 0) 3299 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3300 3301 /* Reenable interrupts. */ 3302 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3303 3304 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3305 !ifq_is_empty(&ifp0->if_snd)) 3306 if_devstart(ifp0); 3307 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3308 !ifq_is_empty(&ifp1->if_snd)) 3309 if_devstart(ifp1); 3310 } 3311 3312 static void 3313 msk_set_tx_stfwd(struct msk_if_softc *sc_if) 3314 { 3315 struct msk_softc *sc = sc_if->msk_softc; 3316 struct ifnet *ifp = sc_if->msk_ifp; 3317 3318 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX && 3319 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) || 3320 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) { 3321 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3322 TX_STFW_ENA); 3323 } else { 3324 if (ifp->if_mtu > ETHERMTU) { 3325 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 3326 CSR_WRITE_4(sc, 3327 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3328 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3329 /* Disable Store & Forward mode for Tx. */ 3330 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3331 TX_STFW_DIS); 3332 } else { 3333 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3334 TX_STFW_ENA); 3335 } 3336 } 3337 } 3338 3339 static void 3340 msk_init(void *xsc) 3341 { 3342 struct msk_if_softc *sc_if = xsc; 3343 struct msk_softc *sc = sc_if->msk_softc; 3344 struct ifnet *ifp = sc_if->msk_ifp; 3345 struct mii_data *mii; 3346 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3347 uint16_t gmac; 3348 uint32_t reg; 3349 int error, i; 3350 3351 ASSERT_SERIALIZED(ifp->if_serializer); 3352 3353 mii = device_get_softc(sc_if->msk_miibus); 3354 3355 error = 0; 3356 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3357 msk_stop(sc_if); 3358 3359 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3360 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3361 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3362 /* 3363 * In Yukon EC Ultra, TSO & checksum offload is not 3364 * supported for jumbo frame. 3365 */ 3366 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3367 ifp->if_capenable &= ~IFCAP_TXCSUM; 3368 } 3369 3370 /* GMAC Control reset. */ 3371 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); 3372 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); 3373 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); 3374 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 3375 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 3376 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), 3377 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 3378 GMC_BYP_RETR_ON); 3379 } 3380 3381 /* 3382 * Initialize GMAC first such that speed/duplex/flow-control 3383 * parameters are renegotiated when interface is brought up. 3384 */ 3385 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); 3386 3387 /* Dummy read the Interrupt Source Register. */ 3388 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3389 3390 /* Set MIB Clear Counter Mode. */ 3391 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3392 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3393 /* Read all MIB Counters with Clear Mode set. */ 3394 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3395 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3396 /* Clear MIB Clear Counter Mode. */ 3397 gmac &= ~GM_PAR_MIB_CLR; 3398 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3399 3400 /* Disable FCS. */ 3401 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3402 3403 /* Setup Transmit Control Register. */ 3404 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3405 3406 /* Setup Transmit Flow Control Register. */ 3407 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3408 3409 /* Setup Transmit Parameter Register. */ 3410 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3411 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3412 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3413 3414 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3415 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3416 3417 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3418 gmac |= GM_SMOD_JUMBO_ENA; 3419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3420 3421 /* Set station address. */ 3422 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3423 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3424 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3425 eaddr[i]); 3426 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3427 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3428 eaddr[i]); 3429 3430 /* Disable interrupts for counter overflows. */ 3431 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3432 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3433 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3434 3435 /* Configure Rx MAC FIFO. */ 3436 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3437 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3438 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 3439 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || 3440 sc->msk_hw_id == CHIP_ID_YUKON_EX) 3441 reg |= GMF_RX_OVER_ON; 3442 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); 3443 3444 /* Set receive filter. */ 3445 msk_rxfilter(sc_if); 3446 3447 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 3448 /* Clear flush mask - HW bug. */ 3449 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); 3450 } else { 3451 /* Flush Rx MAC FIFO on any flow control or error. */ 3452 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3453 GMR_FS_ANY_ERR); 3454 } 3455 3456 /* 3457 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word 3458 * due to hardware hang on receipt of pause frames. 3459 */ 3460 reg = RX_GMF_FL_THR_DEF + 1; 3461 /* Another magic for Yukon FE+ - From Linux. */ 3462 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3463 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) 3464 reg = 0x178; 3465 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); 3466 3467 3468 /* Configure Tx MAC FIFO. */ 3469 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3470 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3471 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3472 3473 /* Configure hardware VLAN tag insertion/stripping. */ 3474 msk_setvlan(sc_if, ifp); 3475 3476 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3477 /* Set Rx Pause threshould. */ 3478 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3479 MSK_ECU_LLPP); 3480 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3481 MSK_ECU_ULPP); 3482 /* Configure store-and-forward for Tx. */ 3483 msk_set_tx_stfwd(sc_if); 3484 } 3485 3486 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3487 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 3488 /* Disable dynamic watermark - from Linux. */ 3489 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); 3490 reg &= ~0x03; 3491 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); 3492 } 3493 3494 /* 3495 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3496 * arbiter as we don't use Sync Tx queue. 3497 */ 3498 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3499 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3500 /* Enable the RAM Interface Arbiter. */ 3501 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3502 3503 /* Setup RAM buffer. */ 3504 msk_set_rambuffer(sc_if); 3505 3506 /* Disable Tx sync Queue. */ 3507 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3508 3509 /* Setup Tx Queue Bus Memory Interface. */ 3510 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3511 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3512 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3513 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3514 switch (sc->msk_hw_id) { 3515 case CHIP_ID_YUKON_EC_U: 3516 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3517 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3518 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), 3519 MSK_ECU_TXFF_LEV); 3520 } 3521 break; 3522 case CHIP_ID_YUKON_EX: 3523 /* 3524 * Yukon Extreme seems to have silicon bug for 3525 * automatic Tx checksum calculation capability. 3526 */ 3527 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) { 3528 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), 3529 F_TX_CHK_AUTO_OFF); 3530 } 3531 break; 3532 } 3533 3534 /* Setup Rx Queue Bus Memory Interface. */ 3535 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3536 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3537 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3538 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3539 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3540 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3541 /* MAC Rx RAM Read is controlled by hardware. */ 3542 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3543 } 3544 3545 msk_set_prefetch(sc, sc_if->msk_txq, 3546 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3547 msk_init_tx_ring(sc_if); 3548 3549 /* Disable Rx checksum offload and RSS hash. */ 3550 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3551 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3552 #ifdef MSK_JUMBO 3553 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3554 msk_set_prefetch(sc, sc_if->msk_rxq, 3555 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3556 MSK_JUMBO_RX_RING_CNT - 1); 3557 error = msk_init_jumbo_rx_ring(sc_if); 3558 } else 3559 #endif 3560 { 3561 msk_set_prefetch(sc, sc_if->msk_rxq, 3562 sc_if->msk_rdata.msk_rx_ring_paddr, 3563 MSK_RX_RING_CNT - 1); 3564 error = msk_init_rx_ring(sc_if); 3565 } 3566 if (error != 0) { 3567 device_printf(sc_if->msk_if_dev, 3568 "initialization failed: no memory for Rx buffers\n"); 3569 msk_stop(sc_if); 3570 return; 3571 } 3572 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 3573 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 3574 /* Disable flushing of non-ASF packets. */ 3575 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3576 GMF_RX_MACSEC_FLUSH_OFF); 3577 } 3578 3579 /* Configure interrupt handling. */ 3580 if (sc_if->msk_port == MSK_PORT_A) { 3581 sc->msk_intrmask |= Y2_IS_PORT_A; 3582 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3583 } else { 3584 sc->msk_intrmask |= Y2_IS_PORT_B; 3585 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3586 } 3587 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3588 CSR_READ_4(sc, B0_HWE_IMSK); 3589 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3590 CSR_READ_4(sc, B0_IMSK); 3591 3592 sc_if->msk_link = 0; 3593 mii_mediachg(mii); 3594 3595 mskc_set_imtimer(sc); 3596 3597 ifp->if_flags |= IFF_RUNNING; 3598 ifq_clr_oactive(&ifp->if_snd); 3599 3600 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3601 } 3602 3603 static void 3604 msk_set_rambuffer(struct msk_if_softc *sc_if) 3605 { 3606 struct msk_softc *sc; 3607 int ltpp, utpp; 3608 3609 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 3610 return; 3611 3612 sc = sc_if->msk_softc; 3613 3614 /* Setup Rx Queue. */ 3615 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3616 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3617 sc->msk_rxqstart[sc_if->msk_port] / 8); 3618 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3619 sc->msk_rxqend[sc_if->msk_port] / 8); 3620 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3621 sc->msk_rxqstart[sc_if->msk_port] / 8); 3622 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3623 sc->msk_rxqstart[sc_if->msk_port] / 8); 3624 3625 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3626 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3627 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3628 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3629 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3630 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3631 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3632 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3633 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3634 3635 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3636 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3637 3638 /* Setup Tx Queue. */ 3639 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3640 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3641 sc->msk_txqstart[sc_if->msk_port] / 8); 3642 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3643 sc->msk_txqend[sc_if->msk_port] / 8); 3644 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3645 sc->msk_txqstart[sc_if->msk_port] / 8); 3646 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3647 sc->msk_txqstart[sc_if->msk_port] / 8); 3648 /* Enable Store & Forward for Tx side. */ 3649 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3650 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3651 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3652 } 3653 3654 static void 3655 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3656 uint32_t count) 3657 { 3658 3659 /* Reset the prefetch unit. */ 3660 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3661 PREF_UNIT_RST_SET); 3662 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3663 PREF_UNIT_RST_CLR); 3664 /* Set LE base address. */ 3665 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3666 MSK_ADDR_LO(addr)); 3667 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3668 MSK_ADDR_HI(addr)); 3669 /* Set the list last index. */ 3670 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3671 count); 3672 /* Turn on prefetch unit. */ 3673 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3674 PREF_UNIT_OP_ON); 3675 /* Dummy read to ensure write. */ 3676 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3677 } 3678 3679 static void 3680 msk_stop(struct msk_if_softc *sc_if) 3681 { 3682 struct msk_softc *sc = sc_if->msk_softc; 3683 struct ifnet *ifp = sc_if->msk_ifp; 3684 struct msk_txdesc *txd; 3685 struct msk_rxdesc *rxd; 3686 #ifdef MSK_JUMBO 3687 struct msk_rxdesc *jrxd; 3688 #endif 3689 uint32_t val; 3690 int i; 3691 3692 ASSERT_SERIALIZED(ifp->if_serializer); 3693 3694 callout_stop(&sc_if->msk_tick_ch); 3695 ifp->if_timer = 0; 3696 3697 /* Disable interrupts. */ 3698 if (sc_if->msk_port == MSK_PORT_A) { 3699 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3700 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3701 } else { 3702 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3703 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3704 } 3705 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3706 CSR_READ_4(sc, B0_HWE_IMSK); 3707 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3708 CSR_READ_4(sc, B0_IMSK); 3709 3710 /* Disable Tx/Rx MAC. */ 3711 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3712 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3713 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3714 /* Read again to ensure writing. */ 3715 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3716 3717 /* Stop Tx BMU. */ 3718 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3719 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3720 for (i = 0; i < MSK_TIMEOUT; i++) { 3721 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3722 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3723 BMU_STOP); 3724 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3725 } else 3726 break; 3727 DELAY(1); 3728 } 3729 if (i == MSK_TIMEOUT) 3730 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3731 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3732 RB_RST_SET | RB_DIS_OP_MD); 3733 3734 /* Disable all GMAC interrupt. */ 3735 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3736 /* Disable PHY interrupt. */ 3737 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3738 3739 /* Disable the RAM Interface Arbiter. */ 3740 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3741 3742 /* Reset the PCI FIFO of the async Tx queue */ 3743 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3744 BMU_RST_SET | BMU_FIFO_RST); 3745 3746 /* Reset the Tx prefetch units. */ 3747 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3748 PREF_UNIT_RST_SET); 3749 3750 /* Reset the RAM Buffer async Tx queue. */ 3751 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3752 3753 /* Reset Tx MAC FIFO. */ 3754 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3755 /* Set Pause Off. */ 3756 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3757 3758 /* 3759 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3760 * reach the end of packet and since we can't make sure that we have 3761 * incoming data, we must reset the BMU while it is not during a DMA 3762 * transfer. Since it is possible that the Rx path is still active, 3763 * the Rx RAM buffer will be stopped first, so any possible incoming 3764 * data will not trigger a DMA. After the RAM buffer is stopped, the 3765 * BMU is polled until any DMA in progress is ended and only then it 3766 * will be reset. 3767 */ 3768 3769 /* Disable the RAM Buffer receive queue. */ 3770 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3771 for (i = 0; i < MSK_TIMEOUT; i++) { 3772 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3773 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3774 break; 3775 DELAY(1); 3776 } 3777 if (i == MSK_TIMEOUT) 3778 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3779 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3780 BMU_RST_SET | BMU_FIFO_RST); 3781 /* Reset the Rx prefetch unit. */ 3782 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3783 PREF_UNIT_RST_SET); 3784 /* Reset the RAM Buffer receive queue. */ 3785 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3786 /* Reset Rx MAC FIFO. */ 3787 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3788 3789 /* Free Rx and Tx mbufs still in the queues. */ 3790 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3791 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3792 if (rxd->rx_m != NULL) { 3793 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3794 rxd->rx_dmamap); 3795 m_freem(rxd->rx_m); 3796 rxd->rx_m = NULL; 3797 } 3798 } 3799 #ifdef MSK_JUMBO 3800 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3801 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3802 if (jrxd->rx_m != NULL) { 3803 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3804 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3805 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3806 jrxd->rx_dmamap); 3807 m_freem(jrxd->rx_m); 3808 jrxd->rx_m = NULL; 3809 } 3810 } 3811 #endif 3812 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3813 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3814 if (txd->tx_m != NULL) { 3815 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3816 txd->tx_dmamap); 3817 m_freem(txd->tx_m); 3818 txd->tx_m = NULL; 3819 } 3820 } 3821 3822 /* 3823 * Mark the interface down. 3824 */ 3825 ifp->if_flags &= ~IFF_RUNNING; 3826 ifq_clr_oactive(&ifp->if_snd); 3827 sc_if->msk_link = 0; 3828 } 3829 3830 static int 3831 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS) 3832 { 3833 return sysctl_int_range(oidp, arg1, arg2, req, 3834 MSK_PROC_MIN, MSK_PROC_MAX); 3835 } 3836 3837 static int 3838 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3839 { 3840 struct msk_softc *sc = arg1; 3841 struct lwkt_serialize *serializer = &sc->msk_serializer; 3842 int error = 0, v; 3843 3844 lwkt_serialize_enter(serializer); 3845 3846 v = sc->msk_intr_rate; 3847 error = sysctl_handle_int(oidp, &v, 0, req); 3848 if (error || req->newptr == NULL) 3849 goto back; 3850 if (v < 0) { 3851 error = EINVAL; 3852 goto back; 3853 } 3854 3855 if (sc->msk_intr_rate != v) { 3856 int flag = 0, i; 3857 3858 sc->msk_intr_rate = v; 3859 for (i = 0; i < 2; ++i) { 3860 if (sc->msk_if[i] != NULL) { 3861 flag |= sc->msk_if[i]-> 3862 arpcom.ac_if.if_flags & IFF_RUNNING; 3863 } 3864 } 3865 if (flag) 3866 mskc_set_imtimer(sc); 3867 } 3868 back: 3869 lwkt_serialize_exit(serializer); 3870 return error; 3871 } 3872 3873 static int 3874 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3875 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3876 { 3877 struct msk_if_softc *sc_if = device_get_softc(dev); 3878 bus_dmamem_t dmem; 3879 int error; 3880 3881 error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag, 3882 MSK_RING_ALIGN, 0, 3883 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3884 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3885 if (error) { 3886 device_printf(dev, "can't create coherent DMA memory\n"); 3887 return error; 3888 } 3889 3890 *dtag = dmem.dmem_tag; 3891 *dmap = dmem.dmem_map; 3892 *addr = dmem.dmem_addr; 3893 *paddr = dmem.dmem_busaddr; 3894 3895 return 0; 3896 } 3897 3898 static void 3899 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3900 { 3901 if (dtag != NULL) { 3902 bus_dmamap_unload(dtag, dmap); 3903 bus_dmamem_free(dtag, addr, dmap); 3904 bus_dma_tag_destroy(dtag); 3905 } 3906 } 3907 3908 static void 3909 mskc_set_imtimer(struct msk_softc *sc) 3910 { 3911 if (sc->msk_intr_rate > 0) { 3912 /* 3913 * XXX myk(4) seems to use 125MHz for EC/FE/XL 3914 * and 78.125MHz for rest of chip types 3915 */ 3916 CSR_WRITE_4(sc, B2_IRQM_INI, 3917 MSK_USECS(sc, 1000000 / sc->msk_intr_rate)); 3918 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 3919 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START); 3920 } else { 3921 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP); 3922 } 3923 } 3924