1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 97 /* 98 * Device driver for the Marvell Yukon II Ethernet controller. 99 * Due to lack of documentation, this driver is based on the code from 100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 101 */ 102 103 #include <sys/param.h> 104 #include <sys/endian.h> 105 #include <sys/kernel.h> 106 #include <sys/bus.h> 107 #include <sys/in_cksum.h> 108 #include <sys/interrupt.h> 109 #include <sys/malloc.h> 110 #include <sys/proc.h> 111 #include <sys/rman.h> 112 #include <sys/serialize.h> 113 #include <sys/socket.h> 114 #include <sys/sockio.h> 115 #include <sys/sysctl.h> 116 117 #include <net/ethernet.h> 118 #include <net/if.h> 119 #include <net/bpf.h> 120 #include <net/if_arp.h> 121 #include <net/if_dl.h> 122 #include <net/if_media.h> 123 #include <net/ifq_var.h> 124 #include <net/vlan/if_vlan_var.h> 125 126 #include <netinet/ip.h> 127 #include <netinet/ip_var.h> 128 129 #include <dev/netif/mii_layer/miivar.h> 130 131 #include <bus/pci/pcireg.h> 132 #include <bus/pci/pcivar.h> 133 134 #include "if_mskreg.h" 135 136 /* "device miibus" required. See GENERIC if you get errors here. */ 137 #include "miibus_if.h" 138 139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 140 141 /* 142 * Devices supported by this driver. 143 */ 144 static const struct msk_product { 145 uint16_t msk_vendorid; 146 uint16_t msk_deviceid; 147 const char *msk_name; 148 } msk_products[] = { 149 { VENDORID_SK, DEVICEID_SK_YUKON2, 150 "SK-9Sxx Gigabit Ethernet" }, 151 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 152 "SK-9Exx Gigabit Ethernet"}, 153 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 154 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 155 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 157 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 158 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 159 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 161 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 162 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 163 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 165 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 166 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 167 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 169 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 170 "Marvell Yukon 88E8035 Fast Ethernet" }, 171 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 172 "Marvell Yukon 88E8036 Fast Ethernet" }, 173 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 174 "Marvell Yukon 88E8038 Fast Ethernet" }, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 176 "Marvell Yukon 88E8039 Fast Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8040, 178 "Marvell Yukon 88E8040 Fast Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_8040T, 180 "Marvell Yukon 88E8040T Fast Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_8042, 182 "Marvell Yukon 88E8042 Fast Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_8048, 184 "Marvell Yukon 88E8048 Fast Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 186 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 188 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 190 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 191 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 192 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 193 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 194 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 195 { VENDORID_MARVELL, DEVICEID_MRVL_4365, 196 "Marvell Yukon 88E8070 Gigabit Ethernet" }, 197 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 198 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 199 { VENDORID_MARVELL, DEVICEID_MRVL_436B, 200 "Marvell Yukon 88E8071 Gigabit Ethernet" }, 201 { VENDORID_MARVELL, DEVICEID_MRVL_436C, 202 "Marvell Yukon 88E8072 Gigabit Ethernet" }, 203 { VENDORID_MARVELL, DEVICEID_MRVL_436D, 204 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 205 { VENDORID_MARVELL, DEVICEID_MRVL_4370, 206 "Marvell Yukon 88E8075 Gigabit Ethernet" }, 207 { VENDORID_MARVELL, DEVICEID_MRVL_4380, 208 "Marvell Yukon 88E8057 Gigabit Ethernet" }, 209 { VENDORID_MARVELL, DEVICEID_MRVL_4381, 210 "Marvell Yukon 88E8059 Gigabit Ethernet" }, 211 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 212 "D-Link 550SX Gigabit Ethernet" }, 213 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 214 "D-Link 560T Gigabit Ethernet" }, 215 { 0, 0, NULL } 216 }; 217 218 static const char *model_name[] = { 219 "Yukon XL", 220 "Yukon EC Ultra", 221 "Yukon EX", 222 "Yukon EC", 223 "Yukon FE", 224 "Yukon FE+", 225 "Yukon Supreme", 226 "Yukon Ultra 2", 227 "Yukon Unknown", 228 "Yukon Optima" 229 }; 230 231 static int mskc_probe(device_t); 232 static int mskc_attach(device_t); 233 static int mskc_detach(device_t); 234 static int mskc_shutdown(device_t); 235 static int mskc_suspend(device_t); 236 static int mskc_resume(device_t); 237 static void mskc_intr(void *); 238 239 static void mskc_reset(struct msk_softc *); 240 static void mskc_set_imtimer(struct msk_softc *); 241 static void mskc_intr_hwerr(struct msk_softc *); 242 static int mskc_handle_events(struct msk_softc *); 243 static void mskc_phy_power(struct msk_softc *, int); 244 static int mskc_setup_rambuffer(struct msk_softc *); 245 static int mskc_status_dma_alloc(struct msk_softc *); 246 static void mskc_status_dma_free(struct msk_softc *); 247 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS); 248 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 249 250 static int msk_probe(device_t); 251 static int msk_attach(device_t); 252 static int msk_detach(device_t); 253 static int msk_miibus_readreg(device_t, int, int); 254 static int msk_miibus_writereg(device_t, int, int, int); 255 static void msk_miibus_statchg(device_t); 256 257 static void msk_init(void *); 258 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 259 static void msk_start(struct ifnet *); 260 static void msk_watchdog(struct ifnet *); 261 static int msk_mediachange(struct ifnet *); 262 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 263 264 static void msk_tick(void *); 265 static void msk_intr_phy(struct msk_if_softc *); 266 static void msk_intr_gmac(struct msk_if_softc *); 267 static __inline void 268 msk_rxput(struct msk_if_softc *); 269 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 270 static void msk_rxeof(struct msk_if_softc *, uint32_t, int, 271 struct mbuf_chain *); 272 static void msk_txeof(struct msk_if_softc *, int); 273 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 274 static void msk_set_rambuffer(struct msk_if_softc *); 275 static void msk_stop(struct msk_if_softc *); 276 277 static int msk_txrx_dma_alloc(struct msk_if_softc *); 278 static void msk_txrx_dma_free(struct msk_if_softc *); 279 static int msk_init_rx_ring(struct msk_if_softc *); 280 static void msk_init_tx_ring(struct msk_if_softc *); 281 static __inline void 282 msk_discard_rxbuf(struct msk_if_softc *, int); 283 static int msk_newbuf(struct msk_if_softc *, int, int); 284 static int msk_encap(struct msk_if_softc *, struct mbuf **); 285 286 #ifdef MSK_JUMBO 287 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 288 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 289 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 290 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 291 static void *msk_jalloc(struct msk_if_softc *); 292 static void msk_jfree(void *, void *); 293 #endif 294 295 static int msk_phy_readreg(struct msk_if_softc *, int, int); 296 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 297 298 static void msk_rxfilter(struct msk_if_softc *); 299 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 300 static void msk_set_tx_stfwd(struct msk_if_softc *); 301 302 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 303 void **, bus_addr_t *, bus_dmamap_t *); 304 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 305 306 static device_method_t mskc_methods[] = { 307 /* Device interface */ 308 DEVMETHOD(device_probe, mskc_probe), 309 DEVMETHOD(device_attach, mskc_attach), 310 DEVMETHOD(device_detach, mskc_detach), 311 DEVMETHOD(device_suspend, mskc_suspend), 312 DEVMETHOD(device_resume, mskc_resume), 313 DEVMETHOD(device_shutdown, mskc_shutdown), 314 315 /* bus interface */ 316 DEVMETHOD(bus_print_child, bus_generic_print_child), 317 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 318 319 { NULL, NULL } 320 }; 321 322 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 323 static devclass_t mskc_devclass; 324 325 static device_method_t msk_methods[] = { 326 /* Device interface */ 327 DEVMETHOD(device_probe, msk_probe), 328 DEVMETHOD(device_attach, msk_attach), 329 DEVMETHOD(device_detach, msk_detach), 330 DEVMETHOD(device_shutdown, bus_generic_shutdown), 331 332 /* bus interface */ 333 DEVMETHOD(bus_print_child, bus_generic_print_child), 334 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 335 336 /* MII interface */ 337 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 338 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 339 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 340 341 { NULL, NULL } 342 }; 343 344 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 345 static devclass_t msk_devclass; 346 347 DECLARE_DUMMY_MODULE(if_msk); 348 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL); 349 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL); 350 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL); 351 352 static int mskc_intr_rate = 0; 353 static int mskc_process_limit = MSK_PROC_DEFAULT; 354 355 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate); 356 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit); 357 358 static int 359 msk_miibus_readreg(device_t dev, int phy, int reg) 360 { 361 struct msk_if_softc *sc_if; 362 363 if (phy != PHY_ADDR_MARV) 364 return (0); 365 366 sc_if = device_get_softc(dev); 367 368 return (msk_phy_readreg(sc_if, phy, reg)); 369 } 370 371 static int 372 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 373 { 374 struct msk_softc *sc; 375 int i, val; 376 377 sc = sc_if->msk_softc; 378 379 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 380 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 381 382 for (i = 0; i < MSK_TIMEOUT; i++) { 383 DELAY(1); 384 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 385 if ((val & GM_SMI_CT_RD_VAL) != 0) { 386 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 387 break; 388 } 389 } 390 391 if (i == MSK_TIMEOUT) { 392 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 393 val = 0; 394 } 395 396 return (val); 397 } 398 399 static int 400 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 401 { 402 struct msk_if_softc *sc_if; 403 404 if (phy != PHY_ADDR_MARV) 405 return (0); 406 407 sc_if = device_get_softc(dev); 408 409 return (msk_phy_writereg(sc_if, phy, reg, val)); 410 } 411 412 static int 413 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 414 { 415 struct msk_softc *sc; 416 int i; 417 418 sc = sc_if->msk_softc; 419 420 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 421 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 422 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 423 for (i = 0; i < MSK_TIMEOUT; i++) { 424 DELAY(1); 425 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 426 GM_SMI_CT_BUSY) == 0) 427 break; 428 } 429 if (i == MSK_TIMEOUT) 430 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 431 432 return (0); 433 } 434 435 static void 436 msk_miibus_statchg(device_t dev) 437 { 438 struct msk_if_softc *sc_if; 439 struct msk_softc *sc; 440 struct mii_data *mii; 441 struct ifnet *ifp; 442 uint32_t gmac; 443 444 sc_if = device_get_softc(dev); 445 sc = sc_if->msk_softc; 446 447 mii = device_get_softc(sc_if->msk_miibus); 448 ifp = sc_if->msk_ifp; 449 450 sc_if->msk_link = 0; 451 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 452 (IFM_AVALID | IFM_ACTIVE)) { 453 switch (IFM_SUBTYPE(mii->mii_media_active)) { 454 case IFM_10_T: 455 case IFM_100_TX: 456 sc_if->msk_link = 1; 457 break; 458 case IFM_1000_T: 459 case IFM_1000_SX: 460 case IFM_1000_LX: 461 case IFM_1000_CX: 462 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) 463 sc_if->msk_link = 1; 464 break; 465 } 466 } 467 468 if (sc_if->msk_link != 0) { 469 /* Enable Tx FIFO Underrun. */ 470 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 471 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 472 /* 473 * Because mii(4) notify msk(4) that it detected link status 474 * change, there is no need to enable automatic 475 * speed/flow-control/duplex updates. 476 */ 477 gmac = GM_GPCR_AU_ALL_DIS; 478 switch (IFM_SUBTYPE(mii->mii_media_active)) { 479 case IFM_1000_SX: 480 case IFM_1000_T: 481 gmac |= GM_GPCR_SPEED_1000; 482 break; 483 case IFM_100_TX: 484 gmac |= GM_GPCR_SPEED_100; 485 break; 486 case IFM_10_T: 487 break; 488 } 489 490 if ((mii->mii_media_active & IFM_GMASK) & IFM_FDX) 491 gmac |= GM_GPCR_DUP_FULL; 492 else 493 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS; 494 /* Disable Rx flow control. */ 495 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 496 gmac |= GM_GPCR_FC_RX_DIS; 497 /* Disable Tx flow control. */ 498 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 499 gmac |= GM_GPCR_FC_TX_DIS; 500 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 501 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 502 /* Read again to ensure writing. */ 503 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 504 505 gmac = GMC_PAUSE_OFF; 506 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) && 507 ((mii->mii_media_active & IFM_GMASK) & IFM_FDX)) 508 gmac = GMC_PAUSE_ON; 509 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 510 511 /* Enable PHY interrupt for FIFO underrun/overflow. */ 512 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 513 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 514 } else { 515 /* 516 * Link state changed to down. 517 * Disable PHY interrupts. 518 */ 519 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 520 /* Disable Rx/Tx MAC. */ 521 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 522 if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) { 523 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 524 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 525 /* Read again to ensure writing. */ 526 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 527 } 528 } 529 } 530 531 static void 532 msk_rxfilter(struct msk_if_softc *sc_if) 533 { 534 struct msk_softc *sc; 535 struct ifnet *ifp; 536 struct ifmultiaddr *ifma; 537 uint32_t mchash[2]; 538 uint32_t crc; 539 uint16_t mode; 540 541 sc = sc_if->msk_softc; 542 ifp = sc_if->msk_ifp; 543 544 bzero(mchash, sizeof(mchash)); 545 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 546 if ((ifp->if_flags & IFF_PROMISC) != 0) { 547 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 548 } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 549 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 550 mchash[0] = 0xffff; 551 mchash[1] = 0xffff; 552 } else { 553 mode |= GM_RXCR_UCF_ENA; 554 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 555 if (ifma->ifma_addr->sa_family != AF_LINK) 556 continue; 557 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 558 ifma->ifma_addr), ETHER_ADDR_LEN); 559 /* Just want the 6 least significant bits. */ 560 crc &= 0x3f; 561 /* Set the corresponding bit in the hash table. */ 562 mchash[crc >> 5] |= 1 << (crc & 0x1f); 563 } 564 if (mchash[0] != 0 || mchash[1] != 0) 565 mode |= GM_RXCR_MCF_ENA; 566 } 567 568 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 569 mchash[0] & 0xffff); 570 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 571 (mchash[0] >> 16) & 0xffff); 572 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 573 mchash[1] & 0xffff); 574 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 575 (mchash[1] >> 16) & 0xffff); 576 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 577 } 578 579 static void 580 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 581 { 582 struct msk_softc *sc; 583 584 sc = sc_if->msk_softc; 585 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 586 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 587 RX_VLAN_STRIP_ON); 588 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 589 TX_VLAN_TAG_ON); 590 } else { 591 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 592 RX_VLAN_STRIP_OFF); 593 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 594 TX_VLAN_TAG_OFF); 595 } 596 } 597 598 static int 599 msk_init_rx_ring(struct msk_if_softc *sc_if) 600 { 601 struct msk_ring_data *rd; 602 struct msk_rxdesc *rxd; 603 int i, prod; 604 605 sc_if->msk_cdata.msk_rx_cons = 0; 606 sc_if->msk_cdata.msk_rx_prod = 0; 607 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 608 609 rd = &sc_if->msk_rdata; 610 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 611 prod = sc_if->msk_cdata.msk_rx_prod; 612 for (i = 0; i < MSK_RX_RING_CNT; i++) { 613 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 614 rxd->rx_m = NULL; 615 rxd->rx_le = &rd->msk_rx_ring[prod]; 616 if (msk_newbuf(sc_if, prod, 1) != 0) 617 return (ENOBUFS); 618 MSK_INC(prod, MSK_RX_RING_CNT); 619 } 620 621 /* Update prefetch unit. */ 622 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 623 CSR_WRITE_2(sc_if->msk_softc, 624 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 625 sc_if->msk_cdata.msk_rx_prod); 626 627 return (0); 628 } 629 630 #ifdef MSK_JUMBO 631 static int 632 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 633 { 634 struct msk_ring_data *rd; 635 struct msk_rxdesc *rxd; 636 int i, prod; 637 638 MSK_IF_LOCK_ASSERT(sc_if); 639 640 sc_if->msk_cdata.msk_rx_cons = 0; 641 sc_if->msk_cdata.msk_rx_prod = 0; 642 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 643 644 rd = &sc_if->msk_rdata; 645 bzero(rd->msk_jumbo_rx_ring, 646 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 647 prod = sc_if->msk_cdata.msk_rx_prod; 648 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 649 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 650 rxd->rx_m = NULL; 651 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 652 if (msk_jumbo_newbuf(sc_if, prod) != 0) 653 return (ENOBUFS); 654 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 655 } 656 657 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 658 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 659 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 660 661 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 662 CSR_WRITE_2(sc_if->msk_softc, 663 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 664 sc_if->msk_cdata.msk_rx_prod); 665 666 return (0); 667 } 668 #endif 669 670 static void 671 msk_init_tx_ring(struct msk_if_softc *sc_if) 672 { 673 struct msk_ring_data *rd; 674 struct msk_txdesc *txd; 675 int i; 676 677 sc_if->msk_cdata.msk_tx_prod = 0; 678 sc_if->msk_cdata.msk_tx_cons = 0; 679 sc_if->msk_cdata.msk_tx_cnt = 0; 680 681 rd = &sc_if->msk_rdata; 682 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 683 for (i = 0; i < MSK_TX_RING_CNT; i++) { 684 txd = &sc_if->msk_cdata.msk_txdesc[i]; 685 txd->tx_m = NULL; 686 txd->tx_le = &rd->msk_tx_ring[i]; 687 } 688 } 689 690 static __inline void 691 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 692 { 693 struct msk_rx_desc *rx_le; 694 struct msk_rxdesc *rxd; 695 struct mbuf *m; 696 697 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 698 m = rxd->rx_m; 699 rx_le = rxd->rx_le; 700 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 701 } 702 703 #ifdef MSK_JUMBO 704 static __inline void 705 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 706 { 707 struct msk_rx_desc *rx_le; 708 struct msk_rxdesc *rxd; 709 struct mbuf *m; 710 711 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 712 m = rxd->rx_m; 713 rx_le = rxd->rx_le; 714 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 715 } 716 #endif 717 718 static int 719 msk_newbuf(struct msk_if_softc *sc_if, int idx, int init) 720 { 721 struct msk_rx_desc *rx_le; 722 struct msk_rxdesc *rxd; 723 struct mbuf *m; 724 bus_dma_segment_t seg; 725 bus_dmamap_t map; 726 int error, nseg; 727 728 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 729 if (m == NULL) 730 return (ENOBUFS); 731 732 m->m_len = m->m_pkthdr.len = MCLBYTES; 733 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 734 m_adj(m, ETHER_ALIGN); 735 736 error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag, 737 sc_if->msk_cdata.msk_rx_sparemap, 738 m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 739 if (error) { 740 m_freem(m); 741 if (init) 742 if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n"); 743 return (error); 744 } 745 746 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 747 if (rxd->rx_m != NULL) { 748 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 749 BUS_DMASYNC_POSTREAD); 750 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 751 } 752 753 map = rxd->rx_dmamap; 754 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 755 sc_if->msk_cdata.msk_rx_sparemap = map; 756 757 rxd->rx_m = m; 758 rx_le = rxd->rx_le; 759 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 760 rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER); 761 762 return (0); 763 } 764 765 #ifdef MSK_JUMBO 766 static int 767 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 768 { 769 struct msk_rx_desc *rx_le; 770 struct msk_rxdesc *rxd; 771 struct mbuf *m; 772 bus_dma_segment_t segs[1]; 773 bus_dmamap_t map; 774 int nsegs; 775 void *buf; 776 777 MGETHDR(m, M_DONTWAIT, MT_DATA); 778 if (m == NULL) 779 return (ENOBUFS); 780 buf = msk_jalloc(sc_if); 781 if (buf == NULL) { 782 m_freem(m); 783 return (ENOBUFS); 784 } 785 /* Attach the buffer to the mbuf. */ 786 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 787 EXT_NET_DRV); 788 if ((m->m_flags & M_EXT) == 0) { 789 m_freem(m); 790 return (ENOBUFS); 791 } 792 m->m_pkthdr.len = m->m_len = MSK_JLEN; 793 m_adj(m, ETHER_ALIGN); 794 795 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 796 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 797 BUS_DMA_NOWAIT) != 0) { 798 m_freem(m); 799 return (ENOBUFS); 800 } 801 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 802 803 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 804 if (rxd->rx_m != NULL) { 805 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 806 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 807 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 808 rxd->rx_dmamap); 809 } 810 map = rxd->rx_dmamap; 811 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 812 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 813 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 814 BUS_DMASYNC_PREREAD); 815 rxd->rx_m = m; 816 rx_le = rxd->rx_le; 817 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 818 rx_le->msk_control = 819 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 820 821 return (0); 822 } 823 #endif 824 825 /* 826 * Set media options. 827 */ 828 static int 829 msk_mediachange(struct ifnet *ifp) 830 { 831 struct msk_if_softc *sc_if = ifp->if_softc; 832 struct mii_data *mii; 833 int error; 834 835 mii = device_get_softc(sc_if->msk_miibus); 836 error = mii_mediachg(mii); 837 838 return (error); 839 } 840 841 /* 842 * Report current media status. 843 */ 844 static void 845 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 846 { 847 struct msk_if_softc *sc_if = ifp->if_softc; 848 struct mii_data *mii; 849 850 mii = device_get_softc(sc_if->msk_miibus); 851 mii_pollstat(mii); 852 853 ifmr->ifm_active = mii->mii_media_active; 854 ifmr->ifm_status = mii->mii_media_status; 855 } 856 857 static int 858 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 859 { 860 struct msk_if_softc *sc_if; 861 struct ifreq *ifr; 862 struct mii_data *mii; 863 int error, mask; 864 865 sc_if = ifp->if_softc; 866 ifr = (struct ifreq *)data; 867 error = 0; 868 869 switch(command) { 870 case SIOCSIFMTU: 871 #ifdef MSK_JUMBO 872 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 873 error = EINVAL; 874 break; 875 } 876 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 877 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 878 error = EINVAL; 879 break; 880 } 881 ifp->if_mtu = ifr->ifr_mtu; 882 if ((ifp->if_flags & IFF_RUNNING) != 0) 883 msk_init(sc_if); 884 #else 885 error = EOPNOTSUPP; 886 #endif 887 break; 888 889 case SIOCSIFFLAGS: 890 if (ifp->if_flags & IFF_UP) { 891 if (ifp->if_flags & IFF_RUNNING) { 892 if (((ifp->if_flags ^ sc_if->msk_if_flags) 893 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 894 msk_rxfilter(sc_if); 895 } else { 896 if (sc_if->msk_detach == 0) 897 msk_init(sc_if); 898 } 899 } else { 900 if (ifp->if_flags & IFF_RUNNING) 901 msk_stop(sc_if); 902 } 903 sc_if->msk_if_flags = ifp->if_flags; 904 break; 905 906 case SIOCADDMULTI: 907 case SIOCDELMULTI: 908 if (ifp->if_flags & IFF_RUNNING) 909 msk_rxfilter(sc_if); 910 break; 911 912 case SIOCGIFMEDIA: 913 case SIOCSIFMEDIA: 914 mii = device_get_softc(sc_if->msk_miibus); 915 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 916 break; 917 918 case SIOCSIFCAP: 919 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 920 if ((mask & IFCAP_TXCSUM) != 0) { 921 ifp->if_capenable ^= IFCAP_TXCSUM; 922 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 923 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 924 ifp->if_hwassist |= MSK_CSUM_FEATURES; 925 else 926 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 927 } 928 #ifdef notyet 929 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 930 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 931 msk_setvlan(sc_if, ifp); 932 } 933 #endif 934 935 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 936 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 937 /* 938 * In Yukon EC Ultra, TSO & checksum offload is not 939 * supported for jumbo frame. 940 */ 941 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 942 ifp->if_capenable &= ~IFCAP_TXCSUM; 943 } 944 break; 945 946 default: 947 error = ether_ioctl(ifp, command, data); 948 break; 949 } 950 951 return (error); 952 } 953 954 static int 955 mskc_probe(device_t dev) 956 { 957 const struct msk_product *mp; 958 uint16_t vendor, devid; 959 960 vendor = pci_get_vendor(dev); 961 devid = pci_get_device(dev); 962 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 963 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 964 device_set_desc(dev, mp->msk_name); 965 return (0); 966 } 967 } 968 return (ENXIO); 969 } 970 971 static int 972 mskc_setup_rambuffer(struct msk_softc *sc) 973 { 974 int next; 975 int i; 976 977 /* Get adapter SRAM size. */ 978 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 979 if (bootverbose) { 980 device_printf(sc->msk_dev, 981 "RAM buffer size : %dKB\n", sc->msk_ramsize); 982 } 983 if (sc->msk_ramsize == 0) 984 return (0); 985 sc->msk_pflags |= MSK_FLAG_RAMBUF; 986 987 /* 988 * Give receiver 2/3 of memory and round down to the multiple 989 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 990 * of 1024. 991 */ 992 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 993 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 994 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 995 sc->msk_rxqstart[i] = next; 996 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 997 next = sc->msk_rxqend[i] + 1; 998 sc->msk_txqstart[i] = next; 999 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 1000 next = sc->msk_txqend[i] + 1; 1001 if (bootverbose) { 1002 device_printf(sc->msk_dev, 1003 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1004 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1005 sc->msk_rxqend[i]); 1006 device_printf(sc->msk_dev, 1007 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1008 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1009 sc->msk_txqend[i]); 1010 } 1011 } 1012 1013 return (0); 1014 } 1015 1016 static void 1017 mskc_phy_power(struct msk_softc *sc, int mode) 1018 { 1019 uint32_t our, val; 1020 int i; 1021 1022 switch (mode) { 1023 case MSK_PHY_POWERUP: 1024 /* Switch power to VCC (WA for VAUX problem). */ 1025 CSR_WRITE_1(sc, B0_POWER_CTRL, 1026 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1027 /* Disable Core Clock Division, set Clock Select to 0. */ 1028 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1029 1030 val = 0; 1031 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1032 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1033 /* Enable bits are inverted. */ 1034 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1035 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1036 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1037 } 1038 /* 1039 * Enable PCI & Core Clock, enable clock gating for both Links. 1040 */ 1041 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1042 1043 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1044 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1045 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 1046 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1047 /* Deassert Low Power for 1st PHY. */ 1048 our |= PCI_Y2_PHY1_COMA; 1049 if (sc->msk_num_port > 1) 1050 our |= PCI_Y2_PHY2_COMA; 1051 } 1052 } 1053 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U || 1054 sc->msk_hw_id == CHIP_ID_YUKON_EX || 1055 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) { 1056 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4); 1057 val &= (PCI_FORCE_ASPM_REQUEST | 1058 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY | 1059 PCI_ASPM_CLKRUN_REQUEST); 1060 /* Set all bits to 0 except bits 15..12. */ 1061 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val); 1062 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5); 1063 val &= PCI_CTL_TIM_VMAIN_AV_MSK; 1064 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val); 1065 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0); 1066 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1067 /* 1068 * Disable status race, workaround for 1069 * Yukon EC Ultra & Yukon EX. 1070 */ 1071 val = CSR_READ_4(sc, B2_GP_IO); 1072 val |= GLB_GPIO_STAT_RACE_DIS; 1073 CSR_WRITE_4(sc, B2_GP_IO, val); 1074 CSR_READ_4(sc, B2_GP_IO); 1075 } 1076 /* Release PHY from PowerDown/COMA mode. */ 1077 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our); 1078 1079 for (i = 0; i < sc->msk_num_port; i++) { 1080 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1081 GMLC_RST_SET); 1082 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1083 GMLC_RST_CLR); 1084 } 1085 break; 1086 case MSK_PHY_POWERDOWN: 1087 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1088 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1089 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1090 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1091 val &= ~PCI_Y2_PHY1_COMA; 1092 if (sc->msk_num_port > 1) 1093 val &= ~PCI_Y2_PHY2_COMA; 1094 } 1095 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); 1096 1097 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1098 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1099 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1100 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1101 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1102 /* Enable bits are inverted. */ 1103 val = 0; 1104 } 1105 /* 1106 * Disable PCI & Core Clock, disable clock gating for 1107 * both Links. 1108 */ 1109 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1110 CSR_WRITE_1(sc, B0_POWER_CTRL, 1111 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1112 break; 1113 default: 1114 break; 1115 } 1116 } 1117 1118 static void 1119 mskc_reset(struct msk_softc *sc) 1120 { 1121 bus_addr_t addr; 1122 uint16_t status; 1123 uint32_t val; 1124 int i; 1125 1126 /* Disable ASF. */ 1127 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL && 1128 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) { 1129 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1130 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1131 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1132 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); 1133 /* Clear AHB bridge & microcontroller reset. */ 1134 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | 1135 Y2_ASF_HCU_CCSR_CPU_RST_MODE); 1136 /* Clear ASF microcontroller state. */ 1137 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK; 1138 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK; 1139 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); 1140 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1141 } else { 1142 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1143 } 1144 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1145 /* 1146 * Since we disabled ASF, S/W reset is required for 1147 * Power Management. 1148 */ 1149 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1150 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1151 } 1152 1153 /* Clear all error bits in the PCI status register. */ 1154 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1155 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1156 1157 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1158 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1159 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1160 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1161 1162 switch (sc->msk_bustype) { 1163 case MSK_PEX_BUS: 1164 /* Clear all PEX errors. */ 1165 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1166 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1167 if ((val & PEX_RX_OV) != 0) { 1168 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1169 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1170 } 1171 break; 1172 case MSK_PCI_BUS: 1173 case MSK_PCIX_BUS: 1174 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1175 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1176 if (val == 0) 1177 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1178 if (sc->msk_bustype == MSK_PCIX_BUS) { 1179 /* Set Cache Line Size opt. */ 1180 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1181 val |= PCI_CLS_OPT; 1182 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); 1183 } 1184 break; 1185 } 1186 /* Set PHY power state. */ 1187 mskc_phy_power(sc, MSK_PHY_POWERUP); 1188 1189 /* Reset GPHY/GMAC Control */ 1190 for (i = 0; i < sc->msk_num_port; i++) { 1191 /* GPHY Control reset. */ 1192 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1193 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1194 /* GMAC Control reset. */ 1195 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1196 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1197 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1198 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1199 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1200 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), 1201 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 1202 GMC_BYP_RETR_ON); 1203 } 1204 } 1205 1206 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR && 1207 sc->msk_hw_rev > CHIP_REV_YU_SU_B0) 1208 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS); 1209 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) { 1210 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */ 1211 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080); 1212 } 1213 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1214 1215 /* LED On. */ 1216 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1217 1218 /* Clear TWSI IRQ. */ 1219 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1220 1221 /* Turn off hardware timer. */ 1222 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1223 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1224 1225 /* Turn off descriptor polling. */ 1226 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1227 1228 /* Turn off time stamps. */ 1229 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1230 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1231 1232 if (sc->msk_hw_id == CHIP_ID_YUKON_XL || 1233 sc->msk_hw_id == CHIP_ID_YUKON_EC || 1234 sc->msk_hw_id == CHIP_ID_YUKON_FE) { 1235 /* Configure timeout values. */ 1236 for (i = 0; i < sc->msk_num_port; i++) { 1237 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), 1238 RI_RST_SET); 1239 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), 1240 RI_RST_CLR); 1241 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1242 MSK_RI_TO_53); 1243 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1244 MSK_RI_TO_53); 1245 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1246 MSK_RI_TO_53); 1247 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1248 MSK_RI_TO_53); 1249 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1250 MSK_RI_TO_53); 1251 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1252 MSK_RI_TO_53); 1253 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1254 MSK_RI_TO_53); 1255 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1256 MSK_RI_TO_53); 1257 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1258 MSK_RI_TO_53); 1259 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1260 MSK_RI_TO_53); 1261 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1262 MSK_RI_TO_53); 1263 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1264 MSK_RI_TO_53); 1265 } 1266 } 1267 1268 /* Disable all interrupts. */ 1269 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1270 CSR_READ_4(sc, B0_HWE_IMSK); 1271 CSR_WRITE_4(sc, B0_IMSK, 0); 1272 CSR_READ_4(sc, B0_IMSK); 1273 1274 /* 1275 * On dual port PCI-X card, there is an problem where status 1276 * can be received out of order due to split transactions. 1277 */ 1278 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { 1279 uint16_t pcix_cmd; 1280 1281 pcix_cmd = pci_read_config(sc->msk_dev, 1282 sc->msk_pcixcap + PCIXR_COMMAND, 2); 1283 /* Clear Max Outstanding Split Transactions. */ 1284 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; 1285 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1286 pci_write_config(sc->msk_dev, 1287 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); 1288 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1289 } 1290 if (sc->msk_pciecap != 0) { 1291 /* Change Max. Read Request Size to 2048 bytes. */ 1292 if (pcie_get_max_readrq(sc->msk_dev) == 1293 PCIEM_DEVCTL_MAX_READRQ_512) { 1294 pcie_set_max_readrq(sc->msk_dev, 1295 PCIEM_DEVCTL_MAX_READRQ_2048); 1296 } 1297 } 1298 1299 /* Clear status list. */ 1300 bzero(sc->msk_stat_ring, 1301 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1302 sc->msk_stat_cons = 0; 1303 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1304 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1305 /* Set the status list base address. */ 1306 addr = sc->msk_stat_ring_paddr; 1307 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1308 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1309 /* Set the status list last index. */ 1310 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1311 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1312 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1313 /* WA for dev. #4.3 */ 1314 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1315 /* WA for dev. #4.18 */ 1316 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1317 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1318 } else { 1319 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1320 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1321 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1322 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1323 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1324 else 1325 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1326 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1327 } 1328 /* 1329 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1330 */ 1331 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1332 1333 /* Enable status unit. */ 1334 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1335 1336 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1337 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1338 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1339 } 1340 1341 static int 1342 msk_probe(device_t dev) 1343 { 1344 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1345 char desc[100]; 1346 1347 /* 1348 * Not much to do here. We always know there will be 1349 * at least one GMAC present, and if there are two, 1350 * mskc_attach() will create a second device instance 1351 * for us. 1352 */ 1353 ksnprintf(desc, sizeof(desc), 1354 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1355 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1356 sc->msk_hw_rev); 1357 device_set_desc_copy(dev, desc); 1358 1359 return (0); 1360 } 1361 1362 static int 1363 msk_attach(device_t dev) 1364 { 1365 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1366 struct msk_if_softc *sc_if = device_get_softc(dev); 1367 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1368 int i, port, error; 1369 uint8_t eaddr[ETHER_ADDR_LEN]; 1370 1371 port = *(int *)device_get_ivars(dev); 1372 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1373 1374 kfree(device_get_ivars(dev), M_DEVBUF); 1375 device_set_ivars(dev, NULL); 1376 1377 callout_init(&sc_if->msk_tick_ch); 1378 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1379 1380 sc_if->msk_if_dev = dev; 1381 sc_if->msk_port = port; 1382 sc_if->msk_softc = sc; 1383 sc_if->msk_ifp = ifp; 1384 sc_if->msk_flags = sc->msk_pflags; 1385 sc->msk_if[port] = sc_if; 1386 1387 /* Setup Tx/Rx queue register offsets. */ 1388 if (port == MSK_PORT_A) { 1389 sc_if->msk_txq = Q_XA1; 1390 sc_if->msk_txsq = Q_XS1; 1391 sc_if->msk_rxq = Q_R1; 1392 } else { 1393 sc_if->msk_txq = Q_XA2; 1394 sc_if->msk_txsq = Q_XS2; 1395 sc_if->msk_rxq = Q_R2; 1396 } 1397 1398 error = msk_txrx_dma_alloc(sc_if); 1399 if (error) 1400 goto fail; 1401 1402 ifp->if_softc = sc_if; 1403 ifp->if_mtu = ETHERMTU; 1404 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1405 ifp->if_init = msk_init; 1406 ifp->if_ioctl = msk_ioctl; 1407 ifp->if_start = msk_start; 1408 ifp->if_watchdog = msk_watchdog; 1409 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1410 ifq_set_ready(&ifp->if_snd); 1411 1412 #ifdef notyet 1413 /* 1414 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1415 * has serious bug in Rx checksum offload for all Yukon II family 1416 * hardware. It seems there is a workaround to make it work somtimes. 1417 * However, the workaround also have to check OP code sequences to 1418 * verify whether the OP code is correct. Sometimes it should compute 1419 * IP/TCP/UDP checksum in driver in order to verify correctness of 1420 * checksum computed by hardware. If you have to compute checksum 1421 * with software to verify the hardware's checksum why have hardware 1422 * compute the checksum? I think there is no reason to spend time to 1423 * make Rx checksum offload work on Yukon II hardware. 1424 */ 1425 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1426 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1427 ifp->if_hwassist = MSK_CSUM_FEATURES; 1428 ifp->if_capenable = ifp->if_capabilities; 1429 #endif 1430 1431 /* 1432 * Get station address for this interface. Note that 1433 * dual port cards actually come with three station 1434 * addresses: one for each port, plus an extra. The 1435 * extra one is used by the SysKonnect driver software 1436 * as a 'virtual' station address for when both ports 1437 * are operating in failover mode. Currently we don't 1438 * use this extra address. 1439 */ 1440 for (i = 0; i < ETHER_ADDR_LEN; i++) 1441 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1442 1443 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1444 1445 /* 1446 * Do miibus setup. 1447 */ 1448 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1449 msk_mediachange, msk_mediastatus); 1450 if (error) { 1451 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1452 goto fail; 1453 } 1454 1455 /* 1456 * Call MI attach routine. Can't hold locks when calling into ether_*. 1457 */ 1458 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1459 #if 0 1460 /* 1461 * Tell the upper layer(s) we support long frames. 1462 * Must appear after the call to ether_ifattach() because 1463 * ether_ifattach() sets ifi_hdrlen to the default value. 1464 */ 1465 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1466 #endif 1467 1468 return 0; 1469 fail: 1470 msk_detach(dev); 1471 sc->msk_if[port] = NULL; 1472 return (error); 1473 } 1474 1475 /* 1476 * Attach the interface. Allocate softc structures, do ifmedia 1477 * setup and ethernet/BPF attach. 1478 */ 1479 static int 1480 mskc_attach(device_t dev) 1481 { 1482 struct msk_softc *sc; 1483 int error, *port, cpuid; 1484 1485 sc = device_get_softc(dev); 1486 sc->msk_dev = dev; 1487 lwkt_serialize_init(&sc->msk_serializer); 1488 1489 /* 1490 * Initailize sysctl variables 1491 */ 1492 sc->msk_process_limit = mskc_process_limit; 1493 sc->msk_intr_rate = mskc_intr_rate; 1494 1495 #ifndef BURN_BRIDGES 1496 /* 1497 * Handle power management nonsense. 1498 */ 1499 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1500 uint32_t irq, bar0, bar1; 1501 1502 /* Save important PCI config data. */ 1503 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1504 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1505 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1506 1507 /* Reset the power state. */ 1508 device_printf(dev, "chip is in D%d power mode " 1509 "-- setting to D0\n", pci_get_powerstate(dev)); 1510 1511 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1512 1513 /* Restore PCI config data. */ 1514 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1515 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1516 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1517 } 1518 #endif /* BURN_BRIDGES */ 1519 1520 /* 1521 * Map control/status registers. 1522 */ 1523 pci_enable_busmaster(dev); 1524 1525 /* 1526 * Allocate I/O resource 1527 */ 1528 #ifdef MSK_USEIOSPACE 1529 sc->msk_res_type = SYS_RES_IOPORT; 1530 sc->msk_res_rid = PCIR_BAR(1); 1531 #else 1532 sc->msk_res_type = SYS_RES_MEMORY; 1533 sc->msk_res_rid = PCIR_BAR(0); 1534 #endif 1535 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1536 &sc->msk_res_rid, RF_ACTIVE); 1537 if (sc->msk_res == NULL) { 1538 if (sc->msk_res_type == SYS_RES_MEMORY) { 1539 sc->msk_res_type = SYS_RES_IOPORT; 1540 sc->msk_res_rid = PCIR_BAR(1); 1541 } else { 1542 sc->msk_res_type = SYS_RES_MEMORY; 1543 sc->msk_res_rid = PCIR_BAR(0); 1544 } 1545 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1546 &sc->msk_res_rid, 1547 RF_ACTIVE); 1548 if (sc->msk_res == NULL) { 1549 device_printf(dev, "couldn't allocate %s resources\n", 1550 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1551 return (ENXIO); 1552 } 1553 } 1554 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1555 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1556 1557 /* 1558 * Allocate IRQ 1559 */ 1560 sc->msk_irq_rid = 0; 1561 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1562 &sc->msk_irq_rid, 1563 RF_SHAREABLE | RF_ACTIVE); 1564 if (sc->msk_irq == NULL) { 1565 device_printf(dev, "couldn't allocate IRQ resources\n"); 1566 error = ENXIO; 1567 goto fail; 1568 } 1569 1570 /* Enable all clocks before accessing any registers. */ 1571 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 1572 1573 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1574 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1575 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1576 /* Bail out if chip is not recognized. */ 1577 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1578 sc->msk_hw_id > CHIP_ID_YUKON_OPT || 1579 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) { 1580 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1581 sc->msk_hw_id, sc->msk_hw_rev); 1582 error = ENXIO; 1583 goto fail; 1584 } 1585 1586 /* 1587 * Create sysctl tree 1588 */ 1589 sysctl_ctx_init(&sc->msk_sysctl_ctx); 1590 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx, 1591 SYSCTL_STATIC_CHILDREN(_hw), 1592 OID_AUTO, 1593 device_get_nameunit(dev), 1594 CTLFLAG_RD, 0, ""); 1595 if (sc->msk_sysctl_tree == NULL) { 1596 device_printf(dev, "can't add sysctl node\n"); 1597 error = ENXIO; 1598 goto fail; 1599 } 1600 1601 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1602 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1603 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1604 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit, 1605 "I", "max number of Rx events to process"); 1606 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1607 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1608 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1609 sc, 0, mskc_sysctl_intr_rate, 1610 "I", "max number of interrupt per second"); 1611 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1612 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1613 "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided, 1614 0, "# of avoided m_defrag on TX path"); 1615 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1616 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1617 "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied, 1618 0, "# of leading copies on TX path"); 1619 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1620 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1621 "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied, 1622 0, "# of trailing copies on TX path"); 1623 1624 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1625 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1626 sc->msk_coppertype = 0; 1627 else 1628 sc->msk_coppertype = 1; 1629 /* Check number of MACs. */ 1630 sc->msk_num_port = 1; 1631 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1632 CFG_DUAL_MAC_MSK) { 1633 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1634 sc->msk_num_port++; 1635 } 1636 1637 /* Check bus type. */ 1638 if (pci_is_pcie(sc->msk_dev) == 0) { 1639 sc->msk_bustype = MSK_PEX_BUS; 1640 sc->msk_pciecap = pci_get_pciecap_ptr(sc->msk_dev); 1641 } else if (pci_is_pcix(sc->msk_dev) == 0) { 1642 sc->msk_bustype = MSK_PCIX_BUS; 1643 sc->msk_pcixcap = pci_get_pcixcap_ptr(sc->msk_dev); 1644 } else { 1645 sc->msk_bustype = MSK_PCI_BUS; 1646 } 1647 1648 switch (sc->msk_hw_id) { 1649 case CHIP_ID_YUKON_EC: 1650 case CHIP_ID_YUKON_EC_U: 1651 sc->msk_clock = 125; /* 125 Mhz */ 1652 break; 1653 case CHIP_ID_YUKON_EX: 1654 sc->msk_clock = 125; /* 125 Mhz */ 1655 break; 1656 case CHIP_ID_YUKON_FE: 1657 sc->msk_clock = 100; /* 100 Mhz */ 1658 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1659 break; 1660 case CHIP_ID_YUKON_FE_P: 1661 sc->msk_clock = 50; /* 50 Mhz */ 1662 /* DESCV2 */ 1663 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1664 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 1665 /* 1666 * XXX 1667 * FE+ A0 has status LE writeback bug so msk(4) 1668 * does not rely on status word of received frame 1669 * in msk_rxeof() which in turn disables all 1670 * hardware assistance bits reported by the status 1671 * word as well as validity of the recevied frame. 1672 * Just pass received frames to upper stack with 1673 * minimal test and let upper stack handle them. 1674 */ 1675 sc->msk_pflags |= MSK_FLAG_NORXCHK; 1676 } 1677 break; 1678 case CHIP_ID_YUKON_XL: 1679 sc->msk_clock = 156; /* 156 Mhz */ 1680 break; 1681 case CHIP_ID_YUKON_SUPR: 1682 sc->msk_clock = 125; /* 125 MHz */ 1683 break; 1684 case CHIP_ID_YUKON_UL_2: 1685 sc->msk_clock = 125; /* 125 Mhz */ 1686 break; 1687 case CHIP_ID_YUKON_OPT: 1688 sc->msk_clock = 125; /* 125 MHz */ 1689 break; 1690 default: 1691 sc->msk_clock = 156; /* 156 Mhz */ 1692 break; 1693 } 1694 1695 error = mskc_status_dma_alloc(sc); 1696 if (error) 1697 goto fail; 1698 1699 /* Set base interrupt mask. */ 1700 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1701 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1702 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1703 1704 /* Reset the adapter. */ 1705 mskc_reset(sc); 1706 1707 error = mskc_setup_rambuffer(sc); 1708 if (error) 1709 goto fail; 1710 1711 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1712 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1713 device_printf(dev, "failed to add child for PORT_A\n"); 1714 error = ENXIO; 1715 goto fail; 1716 } 1717 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1718 *port = MSK_PORT_A; 1719 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1720 1721 if (sc->msk_num_port > 1) { 1722 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1723 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1724 device_printf(dev, "failed to add child for PORT_B\n"); 1725 error = ENXIO; 1726 goto fail; 1727 } 1728 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1729 *port = MSK_PORT_B; 1730 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1731 } 1732 1733 bus_generic_attach(dev); 1734 1735 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1736 mskc_intr, sc, &sc->msk_intrhand, 1737 &sc->msk_serializer); 1738 if (error) { 1739 device_printf(dev, "couldn't set up interrupt handler\n"); 1740 goto fail; 1741 } 1742 1743 cpuid = rman_get_cpuid(sc->msk_irq); 1744 KKASSERT(cpuid >= 0 && cpuid < ncpus); 1745 1746 if (sc->msk_if[0] != NULL) 1747 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid; 1748 if (sc->msk_if[1] != NULL) 1749 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid; 1750 return 0; 1751 fail: 1752 mskc_detach(dev); 1753 return (error); 1754 } 1755 1756 /* 1757 * Shutdown hardware and free up resources. This can be called any 1758 * time after the mutex has been initialized. It is called in both 1759 * the error case in attach and the normal detach case so it needs 1760 * to be careful about only freeing resources that have actually been 1761 * allocated. 1762 */ 1763 static int 1764 msk_detach(device_t dev) 1765 { 1766 struct msk_if_softc *sc_if = device_get_softc(dev); 1767 1768 if (device_is_attached(dev)) { 1769 struct msk_softc *sc = sc_if->msk_softc; 1770 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1771 1772 lwkt_serialize_enter(ifp->if_serializer); 1773 1774 if (sc->msk_intrhand != NULL) { 1775 if (sc->msk_if[MSK_PORT_A] != NULL) 1776 msk_stop(sc->msk_if[MSK_PORT_A]); 1777 if (sc->msk_if[MSK_PORT_B] != NULL) 1778 msk_stop(sc->msk_if[MSK_PORT_B]); 1779 1780 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1781 sc->msk_intrhand); 1782 sc->msk_intrhand = NULL; 1783 } 1784 1785 lwkt_serialize_exit(ifp->if_serializer); 1786 1787 ether_ifdetach(ifp); 1788 } 1789 1790 if (sc_if->msk_miibus != NULL) 1791 device_delete_child(dev, sc_if->msk_miibus); 1792 1793 msk_txrx_dma_free(sc_if); 1794 return (0); 1795 } 1796 1797 static int 1798 mskc_detach(device_t dev) 1799 { 1800 struct msk_softc *sc = device_get_softc(dev); 1801 int *port, i; 1802 1803 #ifdef INVARIANTS 1804 if (device_is_attached(dev)) { 1805 KASSERT(sc->msk_intrhand == NULL, 1806 ("intr is not torn down yet\n")); 1807 } 1808 #endif 1809 1810 for (i = 0; i < sc->msk_num_port; ++i) { 1811 if (sc->msk_devs[i] != NULL) { 1812 port = device_get_ivars(sc->msk_devs[i]); 1813 if (port != NULL) { 1814 kfree(port, M_DEVBUF); 1815 device_set_ivars(sc->msk_devs[i], NULL); 1816 } 1817 device_delete_child(dev, sc->msk_devs[i]); 1818 } 1819 } 1820 1821 /* Disable all interrupts. */ 1822 CSR_WRITE_4(sc, B0_IMSK, 0); 1823 CSR_READ_4(sc, B0_IMSK); 1824 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1825 CSR_READ_4(sc, B0_HWE_IMSK); 1826 1827 /* LED Off. */ 1828 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1829 1830 /* Put hardware reset. */ 1831 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1832 1833 mskc_status_dma_free(sc); 1834 1835 if (sc->msk_irq != NULL) { 1836 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1837 sc->msk_irq); 1838 } 1839 if (sc->msk_res != NULL) { 1840 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1841 sc->msk_res); 1842 } 1843 1844 if (sc->msk_sysctl_tree != NULL) 1845 sysctl_ctx_free(&sc->msk_sysctl_ctx); 1846 1847 return (0); 1848 } 1849 1850 /* Create status DMA region. */ 1851 static int 1852 mskc_status_dma_alloc(struct msk_softc *sc) 1853 { 1854 bus_dmamem_t dmem; 1855 int error; 1856 1857 error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0, 1858 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1859 MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1860 if (error) { 1861 device_printf(sc->msk_dev, 1862 "failed to create status coherent DMA memory\n"); 1863 return error; 1864 } 1865 sc->msk_stat_tag = dmem.dmem_tag; 1866 sc->msk_stat_map = dmem.dmem_map; 1867 sc->msk_stat_ring = dmem.dmem_addr; 1868 sc->msk_stat_ring_paddr = dmem.dmem_busaddr; 1869 1870 return (0); 1871 } 1872 1873 static void 1874 mskc_status_dma_free(struct msk_softc *sc) 1875 { 1876 /* Destroy status block. */ 1877 if (sc->msk_stat_tag) { 1878 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1879 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1880 sc->msk_stat_map); 1881 bus_dma_tag_destroy(sc->msk_stat_tag); 1882 sc->msk_stat_tag = NULL; 1883 } 1884 } 1885 1886 static int 1887 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1888 { 1889 int error, i, j; 1890 #ifdef MSK_JUMBO 1891 struct msk_rxdesc *jrxd; 1892 struct msk_jpool_entry *entry; 1893 uint8_t *ptr; 1894 #endif 1895 bus_size_t rxalign; 1896 1897 /* Create parent DMA tag. */ 1898 /* 1899 * XXX 1900 * It seems that Yukon II supports full 64bits DMA operations. But 1901 * it needs two descriptors(list elements) for 64bits DMA operations. 1902 * Since we don't know what DMA address mappings(32bits or 64bits) 1903 * would be used in advance for each mbufs, we limits its DMA space 1904 * to be in range of 32bits address space. Otherwise, we should check 1905 * what DMA address is used and chain another descriptor for the 1906 * 64bits DMA operation. This also means descriptor ring size is 1907 * variable. Limiting DMA address to be in 32bit address space greatly 1908 * simplyfies descriptor handling and possibly would increase 1909 * performance a bit due to efficient handling of descriptors. 1910 * Apart from harassing checksum offloading mechanisms, it seems 1911 * it's really bad idea to use a seperate descriptor for 64bit 1912 * DMA operation to save small descriptor memory. Anyway, I've 1913 * never seen these exotic scheme on ethernet interface hardware. 1914 */ 1915 error = bus_dma_tag_create( 1916 NULL, /* parent */ 1917 1, 0, /* alignment, boundary */ 1918 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1919 BUS_SPACE_MAXADDR, /* highaddr */ 1920 NULL, NULL, /* filter, filterarg */ 1921 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1922 0, /* nsegments */ 1923 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1924 0, /* flags */ 1925 &sc_if->msk_cdata.msk_parent_tag); 1926 if (error) { 1927 device_printf(sc_if->msk_if_dev, 1928 "failed to create parent DMA tag\n"); 1929 return error; 1930 } 1931 1932 /* Create DMA stuffs for Tx ring. */ 1933 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1934 &sc_if->msk_cdata.msk_tx_ring_tag, 1935 (void *)&sc_if->msk_rdata.msk_tx_ring, 1936 &sc_if->msk_rdata.msk_tx_ring_paddr, 1937 &sc_if->msk_cdata.msk_tx_ring_map); 1938 if (error) { 1939 device_printf(sc_if->msk_if_dev, 1940 "failed to create TX ring DMA stuffs\n"); 1941 return error; 1942 } 1943 1944 /* Create DMA stuffs for Rx ring. */ 1945 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1946 &sc_if->msk_cdata.msk_rx_ring_tag, 1947 (void *)&sc_if->msk_rdata.msk_rx_ring, 1948 &sc_if->msk_rdata.msk_rx_ring_paddr, 1949 &sc_if->msk_cdata.msk_rx_ring_map); 1950 if (error) { 1951 device_printf(sc_if->msk_if_dev, 1952 "failed to create RX ring DMA stuffs\n"); 1953 return error; 1954 } 1955 1956 /* Create tag for Tx buffers. */ 1957 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1958 1, 0, /* alignment, boundary */ 1959 BUS_SPACE_MAXADDR, /* lowaddr */ 1960 BUS_SPACE_MAXADDR, /* highaddr */ 1961 NULL, NULL, /* filter, filterarg */ 1962 MSK_JUMBO_FRAMELEN, /* maxsize */ 1963 MSK_MAXTXSEGS, /* nsegments */ 1964 MSK_MAXSGSIZE, /* maxsegsize */ 1965 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 1966 BUS_DMA_ONEBPAGE, /* flags */ 1967 &sc_if->msk_cdata.msk_tx_tag); 1968 if (error) { 1969 device_printf(sc_if->msk_if_dev, 1970 "failed to create Tx DMA tag\n"); 1971 return error; 1972 } 1973 1974 /* Create DMA maps for Tx buffers. */ 1975 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1976 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1977 1978 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 1979 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1980 &txd->tx_dmamap); 1981 if (error) { 1982 device_printf(sc_if->msk_if_dev, 1983 "failed to create %dth Tx dmamap\n", i); 1984 1985 for (j = 0; j < i; ++j) { 1986 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1987 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1988 txd->tx_dmamap); 1989 } 1990 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1991 sc_if->msk_cdata.msk_tx_tag = NULL; 1992 1993 return error; 1994 } 1995 } 1996 1997 /* 1998 * Workaround hardware hang which seems to happen when Rx buffer 1999 * is not aligned on multiple of FIFO word(8 bytes). 2000 */ 2001 if (sc_if->msk_flags & MSK_FLAG_RAMBUF) 2002 rxalign = MSK_RX_BUF_ALIGN; 2003 else 2004 rxalign = 1; 2005 2006 /* Create tag for Rx buffers. */ 2007 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2008 rxalign, 0, /* alignment, boundary */ 2009 BUS_SPACE_MAXADDR, /* lowaddr */ 2010 BUS_SPACE_MAXADDR, /* highaddr */ 2011 NULL, NULL, /* filter, filterarg */ 2012 MCLBYTES, /* maxsize */ 2013 1, /* nsegments */ 2014 MCLBYTES, /* maxsegsize */ 2015 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | 2016 BUS_DMA_WAITOK, /* flags */ 2017 &sc_if->msk_cdata.msk_rx_tag); 2018 if (error) { 2019 device_printf(sc_if->msk_if_dev, 2020 "failed to create Rx DMA tag\n"); 2021 return error; 2022 } 2023 2024 /* Create DMA maps for Rx buffers. */ 2025 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK, 2026 &sc_if->msk_cdata.msk_rx_sparemap); 2027 if (error) { 2028 device_printf(sc_if->msk_if_dev, 2029 "failed to create spare Rx dmamap\n"); 2030 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2031 sc_if->msk_cdata.msk_rx_tag = NULL; 2032 return error; 2033 } 2034 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2035 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2036 2037 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 2038 BUS_DMA_WAITOK, &rxd->rx_dmamap); 2039 if (error) { 2040 device_printf(sc_if->msk_if_dev, 2041 "failed to create %dth Rx dmamap\n", i); 2042 2043 for (j = 0; j < i; ++j) { 2044 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 2045 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2046 rxd->rx_dmamap); 2047 } 2048 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2049 sc_if->msk_cdata.msk_rx_sparemap); 2050 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2051 sc_if->msk_cdata.msk_rx_tag = NULL; 2052 2053 return error; 2054 } 2055 } 2056 2057 #ifdef MSK_JUMBO 2058 SLIST_INIT(&sc_if->msk_jfree_listhead); 2059 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2060 2061 /* Create tag for jumbo Rx ring. */ 2062 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2063 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2064 BUS_SPACE_MAXADDR, /* lowaddr */ 2065 BUS_SPACE_MAXADDR, /* highaddr */ 2066 NULL, NULL, /* filter, filterarg */ 2067 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2068 1, /* nsegments */ 2069 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2070 0, /* flags */ 2071 NULL, NULL, /* lockfunc, lockarg */ 2072 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2073 if (error != 0) { 2074 device_printf(sc_if->msk_if_dev, 2075 "failed to create jumbo Rx ring DMA tag\n"); 2076 goto fail; 2077 } 2078 2079 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2080 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2081 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2082 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2083 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2084 if (error != 0) { 2085 device_printf(sc_if->msk_if_dev, 2086 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2087 goto fail; 2088 } 2089 2090 ctx.msk_busaddr = 0; 2091 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2092 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2093 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2094 msk_dmamap_cb, &ctx, 0); 2095 if (error != 0) { 2096 device_printf(sc_if->msk_if_dev, 2097 "failed to load DMA'able memory for jumbo Rx ring\n"); 2098 goto fail; 2099 } 2100 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2101 2102 /* Create tag for jumbo buffer blocks. */ 2103 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2104 PAGE_SIZE, 0, /* alignment, boundary */ 2105 BUS_SPACE_MAXADDR, /* lowaddr */ 2106 BUS_SPACE_MAXADDR, /* highaddr */ 2107 NULL, NULL, /* filter, filterarg */ 2108 MSK_JMEM, /* maxsize */ 2109 1, /* nsegments */ 2110 MSK_JMEM, /* maxsegsize */ 2111 0, /* flags */ 2112 NULL, NULL, /* lockfunc, lockarg */ 2113 &sc_if->msk_cdata.msk_jumbo_tag); 2114 if (error != 0) { 2115 device_printf(sc_if->msk_if_dev, 2116 "failed to create jumbo Rx buffer block DMA tag\n"); 2117 goto fail; 2118 } 2119 2120 /* Create tag for jumbo Rx buffers. */ 2121 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2122 PAGE_SIZE, 0, /* alignment, boundary */ 2123 BUS_SPACE_MAXADDR, /* lowaddr */ 2124 BUS_SPACE_MAXADDR, /* highaddr */ 2125 NULL, NULL, /* filter, filterarg */ 2126 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2127 MSK_MAXRXSEGS, /* nsegments */ 2128 MSK_JLEN, /* maxsegsize */ 2129 0, /* flags */ 2130 NULL, NULL, /* lockfunc, lockarg */ 2131 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2132 if (error != 0) { 2133 device_printf(sc_if->msk_if_dev, 2134 "failed to create jumbo Rx DMA tag\n"); 2135 goto fail; 2136 } 2137 2138 /* Create DMA maps for jumbo Rx buffers. */ 2139 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2140 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2141 device_printf(sc_if->msk_if_dev, 2142 "failed to create spare jumbo Rx dmamap\n"); 2143 goto fail; 2144 } 2145 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2146 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2147 jrxd->rx_m = NULL; 2148 jrxd->rx_dmamap = NULL; 2149 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2150 &jrxd->rx_dmamap); 2151 if (error != 0) { 2152 device_printf(sc_if->msk_if_dev, 2153 "failed to create jumbo Rx dmamap\n"); 2154 goto fail; 2155 } 2156 } 2157 2158 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2159 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2160 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2161 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2162 &sc_if->msk_cdata.msk_jumbo_map); 2163 if (error != 0) { 2164 device_printf(sc_if->msk_if_dev, 2165 "failed to allocate DMA'able memory for jumbo buf\n"); 2166 goto fail; 2167 } 2168 2169 ctx.msk_busaddr = 0; 2170 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2171 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2172 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2173 if (error != 0) { 2174 device_printf(sc_if->msk_if_dev, 2175 "failed to load DMA'able memory for jumbobuf\n"); 2176 goto fail; 2177 } 2178 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2179 2180 /* 2181 * Now divide it up into 9K pieces and save the addresses 2182 * in an array. 2183 */ 2184 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2185 for (i = 0; i < MSK_JSLOTS; i++) { 2186 sc_if->msk_cdata.msk_jslots[i] = ptr; 2187 ptr += MSK_JLEN; 2188 entry = malloc(sizeof(struct msk_jpool_entry), 2189 M_DEVBUF, M_WAITOK); 2190 if (entry == NULL) { 2191 device_printf(sc_if->msk_if_dev, 2192 "no memory for jumbo buffers!\n"); 2193 error = ENOMEM; 2194 goto fail; 2195 } 2196 entry->slot = i; 2197 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2198 jpool_entries); 2199 } 2200 #endif 2201 return 0; 2202 } 2203 2204 static void 2205 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2206 { 2207 struct msk_txdesc *txd; 2208 struct msk_rxdesc *rxd; 2209 #ifdef MSK_JUMBO 2210 struct msk_rxdesc *jrxd; 2211 struct msk_jpool_entry *entry; 2212 #endif 2213 int i; 2214 2215 #ifdef MSK_JUMBO 2216 MSK_JLIST_LOCK(sc_if); 2217 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2218 device_printf(sc_if->msk_if_dev, 2219 "asked to free buffer that is in use!\n"); 2220 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2221 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2222 jpool_entries); 2223 } 2224 2225 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2226 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2227 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2228 free(entry, M_DEVBUF); 2229 } 2230 MSK_JLIST_UNLOCK(sc_if); 2231 2232 /* Destroy jumbo buffer block. */ 2233 if (sc_if->msk_cdata.msk_jumbo_map) 2234 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2235 sc_if->msk_cdata.msk_jumbo_map); 2236 2237 if (sc_if->msk_rdata.msk_jumbo_buf) { 2238 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2239 sc_if->msk_rdata.msk_jumbo_buf, 2240 sc_if->msk_cdata.msk_jumbo_map); 2241 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2242 sc_if->msk_cdata.msk_jumbo_map = NULL; 2243 } 2244 2245 /* Jumbo Rx ring. */ 2246 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2247 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2248 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2249 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2250 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2251 sc_if->msk_rdata.msk_jumbo_rx_ring) 2252 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2253 sc_if->msk_rdata.msk_jumbo_rx_ring, 2254 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2255 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2256 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2257 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2258 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2259 } 2260 2261 /* Jumbo Rx buffers. */ 2262 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2263 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2264 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2265 if (jrxd->rx_dmamap) { 2266 bus_dmamap_destroy( 2267 sc_if->msk_cdata.msk_jumbo_rx_tag, 2268 jrxd->rx_dmamap); 2269 jrxd->rx_dmamap = NULL; 2270 } 2271 } 2272 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2273 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2274 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2275 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2276 } 2277 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2278 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2279 } 2280 #endif 2281 2282 /* Tx ring. */ 2283 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2284 sc_if->msk_rdata.msk_tx_ring, 2285 sc_if->msk_cdata.msk_tx_ring_map); 2286 2287 /* Rx ring. */ 2288 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2289 sc_if->msk_rdata.msk_rx_ring, 2290 sc_if->msk_cdata.msk_rx_ring_map); 2291 2292 /* Tx buffers. */ 2293 if (sc_if->msk_cdata.msk_tx_tag) { 2294 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2295 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2296 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2297 txd->tx_dmamap); 2298 } 2299 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2300 sc_if->msk_cdata.msk_tx_tag = NULL; 2301 } 2302 2303 /* Rx buffers. */ 2304 if (sc_if->msk_cdata.msk_rx_tag) { 2305 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2306 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2307 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2308 rxd->rx_dmamap); 2309 } 2310 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2311 sc_if->msk_cdata.msk_rx_sparemap); 2312 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2313 sc_if->msk_cdata.msk_rx_tag = NULL; 2314 } 2315 2316 if (sc_if->msk_cdata.msk_parent_tag) { 2317 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2318 sc_if->msk_cdata.msk_parent_tag = NULL; 2319 } 2320 } 2321 2322 #ifdef MSK_JUMBO 2323 /* 2324 * Allocate a jumbo buffer. 2325 */ 2326 static void * 2327 msk_jalloc(struct msk_if_softc *sc_if) 2328 { 2329 struct msk_jpool_entry *entry; 2330 2331 MSK_JLIST_LOCK(sc_if); 2332 2333 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2334 2335 if (entry == NULL) { 2336 MSK_JLIST_UNLOCK(sc_if); 2337 return (NULL); 2338 } 2339 2340 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2341 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2342 2343 MSK_JLIST_UNLOCK(sc_if); 2344 2345 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2346 } 2347 2348 /* 2349 * Release a jumbo buffer. 2350 */ 2351 static void 2352 msk_jfree(void *buf, void *args) 2353 { 2354 struct msk_if_softc *sc_if; 2355 struct msk_jpool_entry *entry; 2356 int i; 2357 2358 /* Extract the softc struct pointer. */ 2359 sc_if = (struct msk_if_softc *)args; 2360 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2361 2362 MSK_JLIST_LOCK(sc_if); 2363 /* Calculate the slot this buffer belongs to. */ 2364 i = ((vm_offset_t)buf 2365 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2366 KASSERT(i >= 0 && i < MSK_JSLOTS, 2367 ("%s: asked to free buffer that we don't manage!", __func__)); 2368 2369 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2370 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2371 entry->slot = i; 2372 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2373 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2374 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2375 wakeup(sc_if); 2376 2377 MSK_JLIST_UNLOCK(sc_if); 2378 } 2379 #endif 2380 2381 static int 2382 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2383 { 2384 struct msk_txdesc *txd, *txd_last; 2385 struct msk_tx_desc *tx_le; 2386 struct mbuf *m; 2387 bus_dmamap_t map; 2388 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2389 uint32_t control, prod, si; 2390 uint16_t offset, tcp_offset; 2391 int error, i, nsegs, maxsegs, defrag; 2392 2393 maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt - 2394 MSK_RESERVED_TX_DESC_CNT; 2395 KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT, 2396 ("not enough spare TX desc\n")); 2397 if (maxsegs > MSK_MAXTXSEGS) 2398 maxsegs = MSK_MAXTXSEGS; 2399 2400 /* 2401 * Align TX buffer to 64bytes boundary. This greately improves 2402 * bulk data TX performance on my 88E8053 (+100Mbps) at least. 2403 * Try avoiding m_defrag(), if the mbufs are not chained together 2404 * by m_next (i.e. m->m_len == m->m_pkthdr.len). 2405 */ 2406 2407 #define MSK_TXBUF_ALIGN 64 2408 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1) 2409 2410 defrag = 1; 2411 m = *m_head; 2412 if (m->m_len == m->m_pkthdr.len) { 2413 int space; 2414 2415 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK); 2416 if (space) { 2417 if (M_WRITABLE(m)) { 2418 if (M_TRAILINGSPACE(m) >= space) { 2419 /* e.g. TCP ACKs */ 2420 bcopy(m->m_data, m->m_data + space, 2421 m->m_len); 2422 m->m_data += space; 2423 defrag = 0; 2424 sc_if->msk_softc->msk_trailing_copied++; 2425 } else { 2426 space = MSK_TXBUF_ALIGN - space; 2427 if (M_LEADINGSPACE(m) >= space) { 2428 /* e.g. Small UDP datagrams */ 2429 bcopy(m->m_data, 2430 m->m_data - space, 2431 m->m_len); 2432 m->m_data -= space; 2433 defrag = 0; 2434 sc_if->msk_softc-> 2435 msk_leading_copied++; 2436 } 2437 } 2438 } 2439 } else { 2440 /* e.g. on forwarding path */ 2441 defrag = 0; 2442 } 2443 } 2444 if (defrag) { 2445 m = m_defrag(*m_head, MB_DONTWAIT); 2446 if (m == NULL) { 2447 m_freem(*m_head); 2448 *m_head = NULL; 2449 return ENOBUFS; 2450 } 2451 *m_head = m; 2452 } else { 2453 sc_if->msk_softc->msk_defrag_avoided++; 2454 } 2455 2456 #undef MSK_TXBUF_MASK 2457 #undef MSK_TXBUF_ALIGN 2458 2459 tcp_offset = offset = 0; 2460 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2461 /* 2462 * Since mbuf has no protocol specific structure information 2463 * in it we have to inspect protocol information here to 2464 * setup TSO and checksum offload. I don't know why Marvell 2465 * made a such decision in chip design because other GigE 2466 * hardwares normally takes care of all these chores in 2467 * hardware. However, TSO performance of Yukon II is very 2468 * good such that it's worth to implement it. 2469 */ 2470 struct ether_header *eh; 2471 struct ip *ip; 2472 2473 /* TODO check for M_WRITABLE(m) */ 2474 2475 offset = sizeof(struct ether_header); 2476 m = m_pullup(m, offset); 2477 if (m == NULL) { 2478 *m_head = NULL; 2479 return (ENOBUFS); 2480 } 2481 eh = mtod(m, struct ether_header *); 2482 /* Check if hardware VLAN insertion is off. */ 2483 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2484 offset = sizeof(struct ether_vlan_header); 2485 m = m_pullup(m, offset); 2486 if (m == NULL) { 2487 *m_head = NULL; 2488 return (ENOBUFS); 2489 } 2490 } 2491 m = m_pullup(m, offset + sizeof(struct ip)); 2492 if (m == NULL) { 2493 *m_head = NULL; 2494 return (ENOBUFS); 2495 } 2496 ip = (struct ip *)(mtod(m, char *) + offset); 2497 offset += (ip->ip_hl << 2); 2498 tcp_offset = offset; 2499 /* 2500 * It seems that Yukon II has Tx checksum offload bug for 2501 * small TCP packets that's less than 60 bytes in size 2502 * (e.g. TCP window probe packet, pure ACK packet). 2503 * Common work around like padding with zeros to make the 2504 * frame minimum ethernet frame size didn't work at all. 2505 * Instead of disabling checksum offload completely we 2506 * resort to S/W checksum routine when we encounter short 2507 * TCP frames. 2508 * Short UDP packets appear to be handled correctly by 2509 * Yukon II. 2510 */ 2511 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2512 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2513 uint16_t csum; 2514 2515 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2516 (ip->ip_hl << 2), offset); 2517 *(uint16_t *)(m->m_data + offset + 2518 m->m_pkthdr.csum_data) = csum; 2519 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2520 } 2521 *m_head = m; 2522 } 2523 2524 prod = sc_if->msk_cdata.msk_tx_prod; 2525 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2526 txd_last = txd; 2527 map = txd->tx_dmamap; 2528 2529 error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map, 2530 m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 2531 if (error) { 2532 m_freem(*m_head); 2533 *m_head = NULL; 2534 return error; 2535 } 2536 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2537 2538 m = *m_head; 2539 control = 0; 2540 tx_le = NULL; 2541 2542 #ifdef notyet 2543 /* Check if we have a VLAN tag to insert. */ 2544 if ((m->m_flags & M_VLANTAG) != 0) { 2545 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2546 tx_le->msk_addr = htole32(0); 2547 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2548 htons(m->m_pkthdr.ether_vtag)); 2549 sc_if->msk_cdata.msk_tx_cnt++; 2550 MSK_INC(prod, MSK_TX_RING_CNT); 2551 control |= INS_VLAN; 2552 } 2553 #endif 2554 /* Check if we have to handle checksum offload. */ 2555 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2556 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2557 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2558 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2559 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2560 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2561 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2562 control |= UDPTCP; 2563 sc_if->msk_cdata.msk_tx_cnt++; 2564 MSK_INC(prod, MSK_TX_RING_CNT); 2565 } 2566 2567 si = prod; 2568 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2569 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2570 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2571 OP_PACKET); 2572 sc_if->msk_cdata.msk_tx_cnt++; 2573 MSK_INC(prod, MSK_TX_RING_CNT); 2574 2575 for (i = 1; i < nsegs; i++) { 2576 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2577 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2578 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2579 OP_BUFFER | HW_OWNER); 2580 sc_if->msk_cdata.msk_tx_cnt++; 2581 MSK_INC(prod, MSK_TX_RING_CNT); 2582 } 2583 /* Update producer index. */ 2584 sc_if->msk_cdata.msk_tx_prod = prod; 2585 2586 /* Set EOP on the last desciptor. */ 2587 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2588 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2589 tx_le->msk_control |= htole32(EOP); 2590 2591 /* Turn the first descriptor ownership to hardware. */ 2592 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2593 tx_le->msk_control |= htole32(HW_OWNER); 2594 2595 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2596 map = txd_last->tx_dmamap; 2597 txd_last->tx_dmamap = txd->tx_dmamap; 2598 txd->tx_dmamap = map; 2599 txd->tx_m = m; 2600 2601 return (0); 2602 } 2603 2604 static void 2605 msk_start(struct ifnet *ifp) 2606 { 2607 struct msk_if_softc *sc_if; 2608 struct mbuf *m_head; 2609 int enq; 2610 2611 sc_if = ifp->if_softc; 2612 2613 ASSERT_SERIALIZED(ifp->if_serializer); 2614 2615 if (!sc_if->msk_link) { 2616 ifq_purge(&ifp->if_snd); 2617 return; 2618 } 2619 2620 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2621 return; 2622 2623 enq = 0; 2624 while (!ifq_is_empty(&ifp->if_snd)) { 2625 if (MSK_IS_OACTIVE(sc_if)) { 2626 ifp->if_flags |= IFF_OACTIVE; 2627 break; 2628 } 2629 2630 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2631 if (m_head == NULL) 2632 break; 2633 2634 /* 2635 * Pack the data into the transmit ring. If we 2636 * don't have room, set the OACTIVE flag and wait 2637 * for the NIC to drain the ring. 2638 */ 2639 if (msk_encap(sc_if, &m_head) != 0) { 2640 ifp->if_oerrors++; 2641 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2642 continue; 2643 } else { 2644 ifp->if_flags |= IFF_OACTIVE; 2645 break; 2646 } 2647 } 2648 enq = 1; 2649 2650 /* 2651 * If there's a BPF listener, bounce a copy of this frame 2652 * to him. 2653 */ 2654 BPF_MTAP(ifp, m_head); 2655 } 2656 2657 if (enq) { 2658 /* Transmit */ 2659 CSR_WRITE_2(sc_if->msk_softc, 2660 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2661 sc_if->msk_cdata.msk_tx_prod); 2662 2663 /* Set a timeout in case the chip goes out to lunch. */ 2664 ifp->if_timer = MSK_TX_TIMEOUT; 2665 } 2666 } 2667 2668 static void 2669 msk_watchdog(struct ifnet *ifp) 2670 { 2671 struct msk_if_softc *sc_if = ifp->if_softc; 2672 uint32_t ridx; 2673 int idx; 2674 2675 ASSERT_SERIALIZED(ifp->if_serializer); 2676 2677 if (sc_if->msk_link == 0) { 2678 if (bootverbose) 2679 if_printf(sc_if->msk_ifp, "watchdog timeout " 2680 "(missed link)\n"); 2681 ifp->if_oerrors++; 2682 msk_init(sc_if); 2683 return; 2684 } 2685 2686 /* 2687 * Reclaim first as there is a possibility of losing Tx completion 2688 * interrupts. 2689 */ 2690 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2691 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2692 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2693 msk_txeof(sc_if, idx); 2694 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2695 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2696 "-- recovering\n"); 2697 if (!ifq_is_empty(&ifp->if_snd)) 2698 if_devstart(ifp); 2699 return; 2700 } 2701 } 2702 2703 if_printf(ifp, "watchdog timeout\n"); 2704 ifp->if_oerrors++; 2705 msk_init(sc_if); 2706 if (!ifq_is_empty(&ifp->if_snd)) 2707 if_devstart(ifp); 2708 } 2709 2710 static int 2711 mskc_shutdown(device_t dev) 2712 { 2713 struct msk_softc *sc = device_get_softc(dev); 2714 int i; 2715 2716 lwkt_serialize_enter(&sc->msk_serializer); 2717 2718 for (i = 0; i < sc->msk_num_port; i++) { 2719 if (sc->msk_if[i] != NULL) 2720 msk_stop(sc->msk_if[i]); 2721 } 2722 2723 /* Put hardware reset. */ 2724 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2725 2726 lwkt_serialize_exit(&sc->msk_serializer); 2727 return (0); 2728 } 2729 2730 static int 2731 mskc_suspend(device_t dev) 2732 { 2733 struct msk_softc *sc = device_get_softc(dev); 2734 int i; 2735 2736 lwkt_serialize_enter(&sc->msk_serializer); 2737 2738 for (i = 0; i < sc->msk_num_port; i++) { 2739 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2740 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2741 msk_stop(sc->msk_if[i]); 2742 } 2743 2744 /* Disable all interrupts. */ 2745 CSR_WRITE_4(sc, B0_IMSK, 0); 2746 CSR_READ_4(sc, B0_IMSK); 2747 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2748 CSR_READ_4(sc, B0_HWE_IMSK); 2749 2750 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2751 2752 /* Put hardware reset. */ 2753 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2754 sc->msk_suspended = 1; 2755 2756 lwkt_serialize_exit(&sc->msk_serializer); 2757 2758 return (0); 2759 } 2760 2761 static int 2762 mskc_resume(device_t dev) 2763 { 2764 struct msk_softc *sc = device_get_softc(dev); 2765 int i; 2766 2767 lwkt_serialize_enter(&sc->msk_serializer); 2768 2769 /* Enable all clocks before accessing any registers. */ 2770 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 2771 mskc_reset(sc); 2772 for (i = 0; i < sc->msk_num_port; i++) { 2773 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2774 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2775 msk_init(sc->msk_if[i]); 2776 } 2777 sc->msk_suspended = 0; 2778 2779 lwkt_serialize_exit(&sc->msk_serializer); 2780 2781 return (0); 2782 } 2783 2784 static void 2785 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len, 2786 struct mbuf_chain *chain) 2787 { 2788 struct mbuf *m; 2789 struct ifnet *ifp; 2790 struct msk_rxdesc *rxd; 2791 int cons, rxlen; 2792 2793 ifp = sc_if->msk_ifp; 2794 2795 cons = sc_if->msk_cdata.msk_rx_cons; 2796 do { 2797 rxlen = status >> 16; 2798 if ((status & GMR_FS_VLAN) != 0 && 2799 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2800 rxlen -= EVL_ENCAPLEN; 2801 if (sc_if->msk_flags & MSK_FLAG_NORXCHK) { 2802 /* 2803 * For controllers that returns bogus status code 2804 * just do minimal check and let upper stack 2805 * handle this frame. 2806 */ 2807 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { 2808 ifp->if_ierrors++; 2809 msk_discard_rxbuf(sc_if, cons); 2810 break; 2811 } 2812 } else if (len > sc_if->msk_framesize || 2813 ((status & GMR_FS_ANY_ERR) != 0) || 2814 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2815 /* Don't count flow-control packet as errors. */ 2816 if ((status & GMR_FS_GOOD_FC) == 0) 2817 ifp->if_ierrors++; 2818 msk_discard_rxbuf(sc_if, cons); 2819 break; 2820 } 2821 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2822 m = rxd->rx_m; 2823 if (msk_newbuf(sc_if, cons, 0) != 0) { 2824 ifp->if_iqdrops++; 2825 /* Reuse old buffer. */ 2826 msk_discard_rxbuf(sc_if, cons); 2827 break; 2828 } 2829 m->m_pkthdr.rcvif = ifp; 2830 m->m_pkthdr.len = m->m_len = len; 2831 ifp->if_ipackets++; 2832 #ifdef notyet 2833 /* Check for VLAN tagged packets. */ 2834 if ((status & GMR_FS_VLAN) != 0 && 2835 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2836 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2837 m->m_flags |= M_VLANTAG; 2838 } 2839 #endif 2840 2841 ether_input_chain(ifp, m, NULL, chain); 2842 } while (0); 2843 2844 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2845 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2846 } 2847 2848 #ifdef MSK_JUMBO 2849 static void 2850 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2851 { 2852 struct mbuf *m; 2853 struct ifnet *ifp; 2854 struct msk_rxdesc *jrxd; 2855 int cons, rxlen; 2856 2857 ifp = sc_if->msk_ifp; 2858 2859 MSK_IF_LOCK_ASSERT(sc_if); 2860 2861 cons = sc_if->msk_cdata.msk_rx_cons; 2862 do { 2863 rxlen = status >> 16; 2864 if ((status & GMR_FS_VLAN) != 0 && 2865 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2866 rxlen -= ETHER_VLAN_ENCAP_LEN; 2867 if (len > sc_if->msk_framesize || 2868 ((status & GMR_FS_ANY_ERR) != 0) || 2869 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2870 /* Don't count flow-control packet as errors. */ 2871 if ((status & GMR_FS_GOOD_FC) == 0) 2872 ifp->if_ierrors++; 2873 msk_discard_jumbo_rxbuf(sc_if, cons); 2874 break; 2875 } 2876 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2877 m = jrxd->rx_m; 2878 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2879 ifp->if_iqdrops++; 2880 /* Reuse old buffer. */ 2881 msk_discard_jumbo_rxbuf(sc_if, cons); 2882 break; 2883 } 2884 m->m_pkthdr.rcvif = ifp; 2885 m->m_pkthdr.len = m->m_len = len; 2886 ifp->if_ipackets++; 2887 /* Check for VLAN tagged packets. */ 2888 if ((status & GMR_FS_VLAN) != 0 && 2889 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2890 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2891 m->m_flags |= M_VLANTAG; 2892 } 2893 MSK_IF_UNLOCK(sc_if); 2894 (*ifp->if_input)(ifp, m); 2895 MSK_IF_LOCK(sc_if); 2896 } while (0); 2897 2898 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2899 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2900 } 2901 #endif 2902 2903 static void 2904 msk_txeof(struct msk_if_softc *sc_if, int idx) 2905 { 2906 struct msk_txdesc *txd; 2907 struct msk_tx_desc *cur_tx; 2908 struct ifnet *ifp; 2909 uint32_t control; 2910 int cons, prog; 2911 2912 ifp = sc_if->msk_ifp; 2913 2914 /* 2915 * Go through our tx ring and free mbufs for those 2916 * frames that have been sent. 2917 */ 2918 cons = sc_if->msk_cdata.msk_tx_cons; 2919 prog = 0; 2920 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2921 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2922 break; 2923 prog++; 2924 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2925 control = le32toh(cur_tx->msk_control); 2926 sc_if->msk_cdata.msk_tx_cnt--; 2927 if ((control & EOP) == 0) 2928 continue; 2929 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2930 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2931 2932 ifp->if_opackets++; 2933 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2934 __func__)); 2935 m_freem(txd->tx_m); 2936 txd->tx_m = NULL; 2937 } 2938 2939 if (prog > 0) { 2940 sc_if->msk_cdata.msk_tx_cons = cons; 2941 if (!MSK_IS_OACTIVE(sc_if)) 2942 ifp->if_flags &= ~IFF_OACTIVE; 2943 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2944 ifp->if_timer = 0; 2945 /* No need to sync LEs as we didn't update LEs. */ 2946 } 2947 } 2948 2949 static void 2950 msk_tick(void *xsc_if) 2951 { 2952 struct msk_if_softc *sc_if = xsc_if; 2953 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2954 struct mii_data *mii; 2955 2956 lwkt_serialize_enter(ifp->if_serializer); 2957 2958 mii = device_get_softc(sc_if->msk_miibus); 2959 2960 mii_tick(mii); 2961 if (!sc_if->msk_link) 2962 msk_miibus_statchg(sc_if->msk_if_dev); 2963 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2964 2965 lwkt_serialize_exit(ifp->if_serializer); 2966 } 2967 2968 static void 2969 msk_intr_phy(struct msk_if_softc *sc_if) 2970 { 2971 uint16_t status; 2972 2973 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2974 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2975 /* Handle FIFO Underrun/Overflow? */ 2976 if (status & PHY_M_IS_FIFO_ERROR) { 2977 device_printf(sc_if->msk_if_dev, 2978 "PHY FIFO underrun/overflow.\n"); 2979 } 2980 } 2981 2982 static void 2983 msk_intr_gmac(struct msk_if_softc *sc_if) 2984 { 2985 struct msk_softc *sc; 2986 uint8_t status; 2987 2988 sc = sc_if->msk_softc; 2989 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 2990 2991 /* GMAC Rx FIFO overrun. */ 2992 if ((status & GM_IS_RX_FF_OR) != 0) { 2993 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 2994 GMF_CLI_RX_FO); 2995 } 2996 /* GMAC Tx FIFO underrun. */ 2997 if ((status & GM_IS_TX_FF_UR) != 0) { 2998 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 2999 GMF_CLI_TX_FU); 3000 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3001 /* 3002 * XXX 3003 * In case of Tx underrun, we may need to flush/reset 3004 * Tx MAC but that would also require resynchronization 3005 * with status LEs. Reintializing status LEs would 3006 * affect other port in dual MAC configuration so it 3007 * should be avoided as possible as we can. 3008 * Due to lack of documentation it's all vague guess but 3009 * it needs more investigation. 3010 */ 3011 } 3012 } 3013 3014 static void 3015 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3016 { 3017 struct msk_softc *sc; 3018 3019 sc = sc_if->msk_softc; 3020 if ((status & Y2_IS_PAR_RD1) != 0) { 3021 device_printf(sc_if->msk_if_dev, 3022 "RAM buffer read parity error\n"); 3023 /* Clear IRQ. */ 3024 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3025 RI_CLR_RD_PERR); 3026 } 3027 if ((status & Y2_IS_PAR_WR1) != 0) { 3028 device_printf(sc_if->msk_if_dev, 3029 "RAM buffer write parity error\n"); 3030 /* Clear IRQ. */ 3031 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3032 RI_CLR_WR_PERR); 3033 } 3034 if ((status & Y2_IS_PAR_MAC1) != 0) { 3035 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3036 /* Clear IRQ. */ 3037 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3038 GMF_CLI_TX_PE); 3039 } 3040 if ((status & Y2_IS_PAR_RX1) != 0) { 3041 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3042 /* Clear IRQ. */ 3043 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3044 } 3045 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3046 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3047 /* Clear IRQ. */ 3048 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3049 } 3050 } 3051 3052 static void 3053 mskc_intr_hwerr(struct msk_softc *sc) 3054 { 3055 uint32_t status; 3056 uint32_t tlphead[4]; 3057 3058 status = CSR_READ_4(sc, B0_HWE_ISRC); 3059 /* Time Stamp timer overflow. */ 3060 if ((status & Y2_IS_TIST_OV) != 0) 3061 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3062 if ((status & Y2_IS_PCI_NEXP) != 0) { 3063 /* 3064 * PCI Express Error occured which is not described in PEX 3065 * spec. 3066 * This error is also mapped either to Master Abort( 3067 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3068 * can only be cleared there. 3069 */ 3070 device_printf(sc->msk_dev, 3071 "PCI Express protocol violation error\n"); 3072 } 3073 3074 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3075 uint16_t v16; 3076 3077 if ((status & Y2_IS_MST_ERR) != 0) 3078 device_printf(sc->msk_dev, 3079 "unexpected IRQ Status error\n"); 3080 else 3081 device_printf(sc->msk_dev, 3082 "unexpected IRQ Master error\n"); 3083 /* Reset all bits in the PCI status register. */ 3084 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3085 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3086 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3087 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3088 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3089 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3090 } 3091 3092 /* Check for PCI Express Uncorrectable Error. */ 3093 if ((status & Y2_IS_PCI_EXP) != 0) { 3094 uint32_t v32; 3095 3096 /* 3097 * On PCI Express bus bridges are called root complexes (RC). 3098 * PCI Express errors are recognized by the root complex too, 3099 * which requests the system to handle the problem. After 3100 * error occurence it may be that no access to the adapter 3101 * may be performed any longer. 3102 */ 3103 3104 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3105 if ((v32 & PEX_UNSUP_REQ) != 0) { 3106 /* Ignore unsupported request error. */ 3107 if (bootverbose) { 3108 device_printf(sc->msk_dev, 3109 "Uncorrectable PCI Express error\n"); 3110 } 3111 } 3112 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3113 int i; 3114 3115 /* Get TLP header form Log Registers. */ 3116 for (i = 0; i < 4; i++) 3117 tlphead[i] = CSR_PCI_READ_4(sc, 3118 PEX_HEADER_LOG + i * 4); 3119 /* Check for vendor defined broadcast message. */ 3120 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3121 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3122 CSR_WRITE_4(sc, B0_HWE_IMSK, 3123 sc->msk_intrhwemask); 3124 CSR_READ_4(sc, B0_HWE_IMSK); 3125 } 3126 } 3127 /* Clear the interrupt. */ 3128 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3129 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3130 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3131 } 3132 3133 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3134 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3135 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3136 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3137 } 3138 3139 static __inline void 3140 msk_rxput(struct msk_if_softc *sc_if) 3141 { 3142 struct msk_softc *sc; 3143 3144 sc = sc_if->msk_softc; 3145 #ifdef MSK_JUMBO 3146 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3147 bus_dmamap_sync( 3148 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3149 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3150 BUS_DMASYNC_PREWRITE); 3151 } 3152 #endif 3153 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3154 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3155 } 3156 3157 static int 3158 mskc_handle_events(struct msk_softc *sc) 3159 { 3160 struct msk_if_softc *sc_if; 3161 int rxput[2]; 3162 struct msk_stat_desc *sd; 3163 uint32_t control, status; 3164 int cons, idx, len, port, rxprog; 3165 struct mbuf_chain chain[MAXCPU]; 3166 3167 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3168 if (idx == sc->msk_stat_cons) 3169 return (0); 3170 3171 ether_input_chain_init(chain); 3172 3173 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3174 3175 rxprog = 0; 3176 for (cons = sc->msk_stat_cons; cons != idx;) { 3177 sd = &sc->msk_stat_ring[cons]; 3178 control = le32toh(sd->msk_control); 3179 if ((control & HW_OWNER) == 0) 3180 break; 3181 /* 3182 * Marvell's FreeBSD driver updates status LE after clearing 3183 * HW_OWNER. However we don't have a way to sync single LE 3184 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3185 * an entire DMA map. So don't sync LE until we have a better 3186 * way to sync LEs. 3187 */ 3188 control &= ~HW_OWNER; 3189 sd->msk_control = htole32(control); 3190 status = le32toh(sd->msk_status); 3191 len = control & STLE_LEN_MASK; 3192 port = (control >> 16) & 0x01; 3193 sc_if = sc->msk_if[port]; 3194 if (sc_if == NULL) { 3195 device_printf(sc->msk_dev, "invalid port opcode " 3196 "0x%08x\n", control & STLE_OP_MASK); 3197 continue; 3198 } 3199 3200 switch (control & STLE_OP_MASK) { 3201 case OP_RXVLAN: 3202 sc_if->msk_vtag = ntohs(len); 3203 break; 3204 case OP_RXCHKSVLAN: 3205 sc_if->msk_vtag = ntohs(len); 3206 break; 3207 case OP_RXSTAT: 3208 if ((sc_if->msk_ifp->if_flags & IFF_RUNNING) == 0) 3209 break; 3210 #ifdef MSK_JUMBO 3211 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3212 msk_jumbo_rxeof(sc_if, status, len); 3213 else 3214 #endif 3215 msk_rxeof(sc_if, status, len, chain); 3216 rxprog++; 3217 /* 3218 * Because there is no way to sync single Rx LE 3219 * put the DMA sync operation off until the end of 3220 * event processing. 3221 */ 3222 rxput[port]++; 3223 /* Update prefetch unit if we've passed water mark. */ 3224 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3225 msk_rxput(sc_if); 3226 rxput[port] = 0; 3227 } 3228 break; 3229 case OP_TXINDEXLE: 3230 if (sc->msk_if[MSK_PORT_A] != NULL) { 3231 msk_txeof(sc->msk_if[MSK_PORT_A], 3232 status & STLE_TXA1_MSKL); 3233 } 3234 if (sc->msk_if[MSK_PORT_B] != NULL) { 3235 msk_txeof(sc->msk_if[MSK_PORT_B], 3236 ((status & STLE_TXA2_MSKL) >> 3237 STLE_TXA2_SHIFTL) | 3238 ((len & STLE_TXA2_MSKH) << 3239 STLE_TXA2_SHIFTH)); 3240 } 3241 break; 3242 default: 3243 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3244 control & STLE_OP_MASK); 3245 break; 3246 } 3247 MSK_INC(cons, MSK_STAT_RING_CNT); 3248 if (rxprog > sc->msk_process_limit) 3249 break; 3250 } 3251 3252 if (rxprog > 0) 3253 ether_input_dispatch(chain); 3254 3255 sc->msk_stat_cons = cons; 3256 /* XXX We should sync status LEs here. See above notes. */ 3257 3258 if (rxput[MSK_PORT_A] > 0) 3259 msk_rxput(sc->msk_if[MSK_PORT_A]); 3260 if (rxput[MSK_PORT_B] > 0) 3261 msk_rxput(sc->msk_if[MSK_PORT_B]); 3262 3263 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3264 } 3265 3266 /* Legacy interrupt handler for shared interrupt. */ 3267 static void 3268 mskc_intr(void *xsc) 3269 { 3270 struct msk_softc *sc; 3271 struct msk_if_softc *sc_if0, *sc_if1; 3272 struct ifnet *ifp0, *ifp1; 3273 uint32_t status; 3274 3275 sc = xsc; 3276 ASSERT_SERIALIZED(&sc->msk_serializer); 3277 3278 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3279 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3280 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3281 (status & sc->msk_intrmask) == 0) { 3282 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3283 return; 3284 } 3285 3286 sc_if0 = sc->msk_if[MSK_PORT_A]; 3287 sc_if1 = sc->msk_if[MSK_PORT_B]; 3288 ifp0 = ifp1 = NULL; 3289 if (sc_if0 != NULL) 3290 ifp0 = sc_if0->msk_ifp; 3291 if (sc_if1 != NULL) 3292 ifp1 = sc_if1->msk_ifp; 3293 3294 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3295 msk_intr_phy(sc_if0); 3296 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3297 msk_intr_phy(sc_if1); 3298 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3299 msk_intr_gmac(sc_if0); 3300 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3301 msk_intr_gmac(sc_if1); 3302 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3303 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3304 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3305 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3306 CSR_READ_4(sc, B0_IMSK); 3307 } 3308 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3309 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3310 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3311 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3312 CSR_READ_4(sc, B0_IMSK); 3313 } 3314 if ((status & Y2_IS_HW_ERR) != 0) 3315 mskc_intr_hwerr(sc); 3316 3317 while (mskc_handle_events(sc) != 0) 3318 ; 3319 if ((status & Y2_IS_STAT_BMU) != 0) 3320 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3321 3322 /* Reenable interrupts. */ 3323 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3324 3325 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3326 !ifq_is_empty(&ifp0->if_snd)) 3327 if_devstart(ifp0); 3328 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3329 !ifq_is_empty(&ifp1->if_snd)) 3330 if_devstart(ifp1); 3331 } 3332 3333 static void 3334 msk_set_tx_stfwd(struct msk_if_softc *sc_if) 3335 { 3336 struct msk_softc *sc = sc_if->msk_softc; 3337 struct ifnet *ifp = sc_if->msk_ifp; 3338 3339 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX && 3340 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) || 3341 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) { 3342 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3343 TX_STFW_ENA); 3344 } else { 3345 if (ifp->if_mtu > ETHERMTU) { 3346 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 3347 CSR_WRITE_4(sc, 3348 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3349 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3350 /* Disable Store & Forward mode for Tx. */ 3351 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3352 TX_STFW_DIS); 3353 } else { 3354 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3355 TX_STFW_ENA); 3356 } 3357 } 3358 } 3359 3360 static void 3361 msk_init(void *xsc) 3362 { 3363 struct msk_if_softc *sc_if = xsc; 3364 struct msk_softc *sc = sc_if->msk_softc; 3365 struct ifnet *ifp = sc_if->msk_ifp; 3366 struct mii_data *mii; 3367 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3368 uint16_t gmac; 3369 uint32_t reg; 3370 int error, i; 3371 3372 ASSERT_SERIALIZED(ifp->if_serializer); 3373 3374 mii = device_get_softc(sc_if->msk_miibus); 3375 3376 error = 0; 3377 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3378 msk_stop(sc_if); 3379 3380 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3381 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3382 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3383 /* 3384 * In Yukon EC Ultra, TSO & checksum offload is not 3385 * supported for jumbo frame. 3386 */ 3387 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3388 ifp->if_capenable &= ~IFCAP_TXCSUM; 3389 } 3390 3391 /* GMAC Control reset. */ 3392 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); 3393 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); 3394 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); 3395 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 3396 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 3397 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), 3398 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 3399 GMC_BYP_RETR_ON); 3400 } 3401 3402 /* 3403 * Initialize GMAC first such that speed/duplex/flow-control 3404 * parameters are renegotiated when interface is brought up. 3405 */ 3406 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); 3407 3408 /* Dummy read the Interrupt Source Register. */ 3409 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3410 3411 /* Set MIB Clear Counter Mode. */ 3412 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3413 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3414 /* Read all MIB Counters with Clear Mode set. */ 3415 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3416 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3417 /* Clear MIB Clear Counter Mode. */ 3418 gmac &= ~GM_PAR_MIB_CLR; 3419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3420 3421 /* Disable FCS. */ 3422 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3423 3424 /* Setup Transmit Control Register. */ 3425 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3426 3427 /* Setup Transmit Flow Control Register. */ 3428 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3429 3430 /* Setup Transmit Parameter Register. */ 3431 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3432 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3433 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3434 3435 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3436 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3437 3438 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3439 gmac |= GM_SMOD_JUMBO_ENA; 3440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3441 3442 /* Set station address. */ 3443 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3444 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3445 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3446 eaddr[i]); 3447 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3448 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3449 eaddr[i]); 3450 3451 /* Disable interrupts for counter overflows. */ 3452 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3453 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3454 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3455 3456 /* Configure Rx MAC FIFO. */ 3457 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3458 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3459 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 3460 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || 3461 sc->msk_hw_id == CHIP_ID_YUKON_EX) 3462 reg |= GMF_RX_OVER_ON; 3463 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); 3464 3465 /* Set receive filter. */ 3466 msk_rxfilter(sc_if); 3467 3468 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 3469 /* Clear flush mask - HW bug. */ 3470 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); 3471 } else { 3472 /* Flush Rx MAC FIFO on any flow control or error. */ 3473 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3474 GMR_FS_ANY_ERR); 3475 } 3476 3477 /* 3478 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word 3479 * due to hardware hang on receipt of pause frames. 3480 */ 3481 reg = RX_GMF_FL_THR_DEF + 1; 3482 /* Another magic for Yukon FE+ - From Linux. */ 3483 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3484 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) 3485 reg = 0x178; 3486 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); 3487 3488 3489 /* Configure Tx MAC FIFO. */ 3490 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3491 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3492 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3493 3494 /* Configure hardware VLAN tag insertion/stripping. */ 3495 msk_setvlan(sc_if, ifp); 3496 3497 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3498 /* Set Rx Pause threshould. */ 3499 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3500 MSK_ECU_LLPP); 3501 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3502 MSK_ECU_ULPP); 3503 /* Configure store-and-forward for Tx. */ 3504 msk_set_tx_stfwd(sc_if); 3505 } 3506 3507 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3508 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 3509 /* Disable dynamic watermark - from Linux. */ 3510 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); 3511 reg &= ~0x03; 3512 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); 3513 } 3514 3515 /* 3516 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3517 * arbiter as we don't use Sync Tx queue. 3518 */ 3519 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3520 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3521 /* Enable the RAM Interface Arbiter. */ 3522 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3523 3524 /* Setup RAM buffer. */ 3525 msk_set_rambuffer(sc_if); 3526 3527 /* Disable Tx sync Queue. */ 3528 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3529 3530 /* Setup Tx Queue Bus Memory Interface. */ 3531 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3532 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3533 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3534 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3535 switch (sc->msk_hw_id) { 3536 case CHIP_ID_YUKON_EC_U: 3537 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3538 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3539 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), 3540 MSK_ECU_TXFF_LEV); 3541 } 3542 break; 3543 case CHIP_ID_YUKON_EX: 3544 /* 3545 * Yukon Extreme seems to have silicon bug for 3546 * automatic Tx checksum calculation capability. 3547 */ 3548 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) { 3549 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), 3550 F_TX_CHK_AUTO_OFF); 3551 } 3552 break; 3553 } 3554 3555 /* Setup Rx Queue Bus Memory Interface. */ 3556 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3557 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3558 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3559 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3560 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3561 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3562 /* MAC Rx RAM Read is controlled by hardware. */ 3563 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3564 } 3565 3566 msk_set_prefetch(sc, sc_if->msk_txq, 3567 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3568 msk_init_tx_ring(sc_if); 3569 3570 /* Disable Rx checksum offload and RSS hash. */ 3571 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3572 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3573 #ifdef MSK_JUMBO 3574 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3575 msk_set_prefetch(sc, sc_if->msk_rxq, 3576 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3577 MSK_JUMBO_RX_RING_CNT - 1); 3578 error = msk_init_jumbo_rx_ring(sc_if); 3579 } else 3580 #endif 3581 { 3582 msk_set_prefetch(sc, sc_if->msk_rxq, 3583 sc_if->msk_rdata.msk_rx_ring_paddr, 3584 MSK_RX_RING_CNT - 1); 3585 error = msk_init_rx_ring(sc_if); 3586 } 3587 if (error != 0) { 3588 device_printf(sc_if->msk_if_dev, 3589 "initialization failed: no memory for Rx buffers\n"); 3590 msk_stop(sc_if); 3591 return; 3592 } 3593 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 3594 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 3595 /* Disable flushing of non-ASF packets. */ 3596 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3597 GMF_RX_MACSEC_FLUSH_OFF); 3598 } 3599 3600 /* Configure interrupt handling. */ 3601 if (sc_if->msk_port == MSK_PORT_A) { 3602 sc->msk_intrmask |= Y2_IS_PORT_A; 3603 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3604 } else { 3605 sc->msk_intrmask |= Y2_IS_PORT_B; 3606 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3607 } 3608 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3609 CSR_READ_4(sc, B0_HWE_IMSK); 3610 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3611 CSR_READ_4(sc, B0_IMSK); 3612 3613 sc_if->msk_link = 0; 3614 mii_mediachg(mii); 3615 3616 mskc_set_imtimer(sc); 3617 3618 ifp->if_flags |= IFF_RUNNING; 3619 ifp->if_flags &= ~IFF_OACTIVE; 3620 3621 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3622 } 3623 3624 static void 3625 msk_set_rambuffer(struct msk_if_softc *sc_if) 3626 { 3627 struct msk_softc *sc; 3628 int ltpp, utpp; 3629 3630 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 3631 return; 3632 3633 sc = sc_if->msk_softc; 3634 3635 /* Setup Rx Queue. */ 3636 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3637 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3638 sc->msk_rxqstart[sc_if->msk_port] / 8); 3639 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3640 sc->msk_rxqend[sc_if->msk_port] / 8); 3641 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3642 sc->msk_rxqstart[sc_if->msk_port] / 8); 3643 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3644 sc->msk_rxqstart[sc_if->msk_port] / 8); 3645 3646 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3647 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3648 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3649 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3650 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3651 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3652 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3653 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3654 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3655 3656 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3657 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3658 3659 /* Setup Tx Queue. */ 3660 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3661 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3662 sc->msk_txqstart[sc_if->msk_port] / 8); 3663 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3664 sc->msk_txqend[sc_if->msk_port] / 8); 3665 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3666 sc->msk_txqstart[sc_if->msk_port] / 8); 3667 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3668 sc->msk_txqstart[sc_if->msk_port] / 8); 3669 /* Enable Store & Forward for Tx side. */ 3670 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3671 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3672 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3673 } 3674 3675 static void 3676 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3677 uint32_t count) 3678 { 3679 3680 /* Reset the prefetch unit. */ 3681 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3682 PREF_UNIT_RST_SET); 3683 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3684 PREF_UNIT_RST_CLR); 3685 /* Set LE base address. */ 3686 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3687 MSK_ADDR_LO(addr)); 3688 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3689 MSK_ADDR_HI(addr)); 3690 /* Set the list last index. */ 3691 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3692 count); 3693 /* Turn on prefetch unit. */ 3694 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3695 PREF_UNIT_OP_ON); 3696 /* Dummy read to ensure write. */ 3697 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3698 } 3699 3700 static void 3701 msk_stop(struct msk_if_softc *sc_if) 3702 { 3703 struct msk_softc *sc = sc_if->msk_softc; 3704 struct ifnet *ifp = sc_if->msk_ifp; 3705 struct msk_txdesc *txd; 3706 struct msk_rxdesc *rxd; 3707 #ifdef MSK_JUMBO 3708 struct msk_rxdesc *jrxd; 3709 #endif 3710 uint32_t val; 3711 int i; 3712 3713 ASSERT_SERIALIZED(ifp->if_serializer); 3714 3715 callout_stop(&sc_if->msk_tick_ch); 3716 ifp->if_timer = 0; 3717 3718 /* Disable interrupts. */ 3719 if (sc_if->msk_port == MSK_PORT_A) { 3720 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3721 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3722 } else { 3723 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3724 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3725 } 3726 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3727 CSR_READ_4(sc, B0_HWE_IMSK); 3728 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3729 CSR_READ_4(sc, B0_IMSK); 3730 3731 /* Disable Tx/Rx MAC. */ 3732 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3733 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3734 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3735 /* Read again to ensure writing. */ 3736 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3737 3738 /* Stop Tx BMU. */ 3739 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3740 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3741 for (i = 0; i < MSK_TIMEOUT; i++) { 3742 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3743 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3744 BMU_STOP); 3745 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3746 } else 3747 break; 3748 DELAY(1); 3749 } 3750 if (i == MSK_TIMEOUT) 3751 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3752 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3753 RB_RST_SET | RB_DIS_OP_MD); 3754 3755 /* Disable all GMAC interrupt. */ 3756 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3757 /* Disable PHY interrupt. */ 3758 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3759 3760 /* Disable the RAM Interface Arbiter. */ 3761 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3762 3763 /* Reset the PCI FIFO of the async Tx queue */ 3764 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3765 BMU_RST_SET | BMU_FIFO_RST); 3766 3767 /* Reset the Tx prefetch units. */ 3768 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3769 PREF_UNIT_RST_SET); 3770 3771 /* Reset the RAM Buffer async Tx queue. */ 3772 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3773 3774 /* Reset Tx MAC FIFO. */ 3775 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3776 /* Set Pause Off. */ 3777 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3778 3779 /* 3780 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3781 * reach the end of packet and since we can't make sure that we have 3782 * incoming data, we must reset the BMU while it is not during a DMA 3783 * transfer. Since it is possible that the Rx path is still active, 3784 * the Rx RAM buffer will be stopped first, so any possible incoming 3785 * data will not trigger a DMA. After the RAM buffer is stopped, the 3786 * BMU is polled until any DMA in progress is ended and only then it 3787 * will be reset. 3788 */ 3789 3790 /* Disable the RAM Buffer receive queue. */ 3791 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3792 for (i = 0; i < MSK_TIMEOUT; i++) { 3793 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3794 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3795 break; 3796 DELAY(1); 3797 } 3798 if (i == MSK_TIMEOUT) 3799 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3800 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3801 BMU_RST_SET | BMU_FIFO_RST); 3802 /* Reset the Rx prefetch unit. */ 3803 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3804 PREF_UNIT_RST_SET); 3805 /* Reset the RAM Buffer receive queue. */ 3806 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3807 /* Reset Rx MAC FIFO. */ 3808 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3809 3810 /* Free Rx and Tx mbufs still in the queues. */ 3811 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3812 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3813 if (rxd->rx_m != NULL) { 3814 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3815 rxd->rx_dmamap); 3816 m_freem(rxd->rx_m); 3817 rxd->rx_m = NULL; 3818 } 3819 } 3820 #ifdef MSK_JUMBO 3821 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3822 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3823 if (jrxd->rx_m != NULL) { 3824 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3825 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3826 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3827 jrxd->rx_dmamap); 3828 m_freem(jrxd->rx_m); 3829 jrxd->rx_m = NULL; 3830 } 3831 } 3832 #endif 3833 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3834 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3835 if (txd->tx_m != NULL) { 3836 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3837 txd->tx_dmamap); 3838 m_freem(txd->tx_m); 3839 txd->tx_m = NULL; 3840 } 3841 } 3842 3843 /* 3844 * Mark the interface down. 3845 */ 3846 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3847 sc_if->msk_link = 0; 3848 } 3849 3850 static int 3851 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS) 3852 { 3853 return sysctl_int_range(oidp, arg1, arg2, req, 3854 MSK_PROC_MIN, MSK_PROC_MAX); 3855 } 3856 3857 static int 3858 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3859 { 3860 struct msk_softc *sc = arg1; 3861 struct lwkt_serialize *serializer = &sc->msk_serializer; 3862 int error = 0, v; 3863 3864 lwkt_serialize_enter(serializer); 3865 3866 v = sc->msk_intr_rate; 3867 error = sysctl_handle_int(oidp, &v, 0, req); 3868 if (error || req->newptr == NULL) 3869 goto back; 3870 if (v < 0) { 3871 error = EINVAL; 3872 goto back; 3873 } 3874 3875 if (sc->msk_intr_rate != v) { 3876 int flag = 0, i; 3877 3878 sc->msk_intr_rate = v; 3879 for (i = 0; i < 2; ++i) { 3880 if (sc->msk_if[i] != NULL) { 3881 flag |= sc->msk_if[i]-> 3882 arpcom.ac_if.if_flags & IFF_RUNNING; 3883 } 3884 } 3885 if (flag) 3886 mskc_set_imtimer(sc); 3887 } 3888 back: 3889 lwkt_serialize_exit(serializer); 3890 return error; 3891 } 3892 3893 static int 3894 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3895 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3896 { 3897 struct msk_if_softc *sc_if = device_get_softc(dev); 3898 bus_dmamem_t dmem; 3899 int error; 3900 3901 error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag, 3902 MSK_RING_ALIGN, 0, 3903 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3904 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3905 if (error) { 3906 device_printf(dev, "can't create coherent DMA memory\n"); 3907 return error; 3908 } 3909 3910 *dtag = dmem.dmem_tag; 3911 *dmap = dmem.dmem_map; 3912 *addr = dmem.dmem_addr; 3913 *paddr = dmem.dmem_busaddr; 3914 3915 return 0; 3916 } 3917 3918 static void 3919 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3920 { 3921 if (dtag != NULL) { 3922 bus_dmamap_unload(dtag, dmap); 3923 bus_dmamem_free(dtag, addr, dmap); 3924 bus_dma_tag_destroy(dtag); 3925 } 3926 } 3927 3928 static void 3929 mskc_set_imtimer(struct msk_softc *sc) 3930 { 3931 if (sc->msk_intr_rate > 0) { 3932 /* 3933 * XXX myk(4) seems to use 125MHz for EC/FE/XL 3934 * and 78.125MHz for rest of chip types 3935 */ 3936 CSR_WRITE_4(sc, B2_IRQM_INI, 3937 MSK_USECS(sc, 1000000 / sc->msk_intr_rate)); 3938 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 3939 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START); 3940 } else { 3941 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP); 3942 } 3943 } 3944