1 /* $OpenBSD: if_nep.c,v 1.34 2022/03/11 18:00:48 mpi Exp $ */ 2 /* 3 * Copyright (c) 2014, 2015 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "bpfilter.h" 19 20 #include <sys/param.h> 21 #include <sys/systm.h> 22 #include <sys/device.h> 23 #include <sys/ioctl.h> 24 #include <sys/malloc.h> 25 #include <sys/mbuf.h> 26 #include <sys/pool.h> 27 #include <sys/socket.h> 28 29 #include <net/if.h> 30 #include <net/if_media.h> 31 32 #include <netinet/in.h> 33 #include <netinet/if_ether.h> 34 35 #if NBPFILTER > 0 36 #include <net/bpf.h> 37 #endif 38 39 #include <dev/mii/mii.h> 40 #include <dev/mii/miivar.h> 41 42 #include <dev/pci/pcireg.h> 43 #include <dev/pci/pcivar.h> 44 #include <dev/pci/pcidevs.h> 45 46 #ifdef __sparc64__ 47 #include <dev/ofw/openfirm.h> 48 #endif 49 50 /* 51 * The virtualization features make this a really complex device. For 52 * now we try to keep things simple and use one logical device per 53 * port, using port numbers as logical device numbers. 54 */ 55 56 #define PIO 0x000000 57 #define FZC_PIO 0x080000 58 #define FZC_MAC 0x180000 59 #define FZC_IPP 0x280000 60 #define FFLP 0x300000 61 #define FZC_FFLP 0x380000 62 #define ZCP 0x500000 63 #define FZC_ZCP 0x580000 64 #define DMC 0x600000 65 #define FZC_DMC 0x680000 66 #define TXC 0x700000 67 #define FZC_TXC 0x780000 68 #define PIO_LDSV 0x800000 69 #define PIO_IMASK0 0xa00000 70 #define PIO_IMASK1 0xb00000 71 72 #define RST_CTL (FZC_PIO + 0x00038) 73 #define SYS_ERR_MASK (FZC_PIO + 0x00090) 74 #define SYS_ERR_STAT (FZC_PIO + 0x00098) 75 76 #define LDN_RXDMA(chan) (0 + (chan)) 77 #define LDN_TXDMA(chan) (32 + (chan)) 78 #define LDN_MIF 63 79 #define LDN_MAC(port) (64 + (port)) 80 #define LDN_SYSERR 68 81 82 #define LDSV0(ldg) (PIO_LDSV + 0x00000 + (ldg) * 0x02000) 83 #define LDSV1(ldg) (PIO_LDSV + 0x00008 + (ldg) * 0x02000) 84 #define LDSV2(ldg) (PIO_LDSV + 0x00010 + (ldg) * 0x02000) 85 #define LDGIMGN(ldg) (PIO_LDSV + 0x00018 + (ldg) * 0x02000) 86 #define LDGIMGN_ARM (1ULL << 31) 87 #define LDGIMGN_TIMER (63ULL << 0) 88 89 #define LD_IM0(idx) (PIO_IMASK0 + 0x00000 + (idx) * 0x02000) 90 #define LD_IM0_LDF_MASK (3ULL << 0) 91 #define LD_IM1(idx) (PIO_IMASK1 + 0x00000 + (idx - 64) * 0x02000) 92 #define LD_IM1_LDF_MASK (3ULL << 0) 93 94 #define SID(ldg) (FZC_PIO + 0x10200 + (ldg) * 0x00008) 95 #define LDG_NUM(ldn) (FZC_PIO + 0x20000 + (ldn) * 0x00008) 96 97 #define ipp_port(port) (((port & 0x1) << 1) | (port & 0x2) >> 1) 98 #define IPP_CFIG(port) (FZC_IPP + 0x00000 + ipp_port(port) * 0x04000) 99 #define IPP_CFIG_SOFT_RST (1ULL << 31) 100 #define IPP_CFIG_DFIFO_PIO_W (1ULL << 5) 101 #define IPP_CFIG_IPP_ENABLE (1ULL << 0) 102 #define IPP_INT_STAT(port) (FZC_IPP + 0x00040 + ipp_port(port) * 0x04000) 103 #define IPP_MSK(port) (FZC_IPP + 0x00048 + ipp_port(port) * 0x04000) 104 #define IPP_DFIFO_RD1(port) (FZC_IPP + 0x000c0 + ipp_port(port) * 0x04000) 105 #define IPP_DFIFO_RD2(port) (FZC_IPP + 0x000c8 + ipp_port(port) * 0x04000) 106 #define IPP_DFIFO_RD3(port) (FZC_IPP + 0x000d0 + ipp_port(port) * 0x04000) 107 #define IPP_DFIFO_RD4(port) (FZC_IPP + 0x000d8 + ipp_port(port) * 0x04000) 108 #define IPP_DFIFO_RD5(port) (FZC_IPP + 0x000e0 + ipp_port(port) * 0x04000) 109 #define IPP_DFIFO_WR1(port) (FZC_IPP + 0x000e8 + ipp_port(port) * 0x04000) 110 #define IPP_DFIFO_WR2(port) (FZC_IPP + 0x000f0 + ipp_port(port) * 0x04000) 111 #define IPP_DFIFO_WR3(port) (FZC_IPP + 0x000f8 + ipp_port(port) * 0x04000) 112 #define IPP_DFIFO_WR4(port) (FZC_IPP + 0x00100 + ipp_port(port) * 0x04000) 113 #define IPP_DFIFO_WR5(port) (FZC_IPP + 0x00108 + ipp_port(port) * 0x04000) 114 #define IPP_DFIFO_RD_PTR(port) (FZC_IPP + 0x00110 + ipp_port(port) * 0x04000) 115 #define IPP_DFIFO_WR_PTR(port) (FZC_IPP + 0x00118 + ipp_port(port) * 0x04000) 116 117 #define IPP_NIU_DFIFO_ENTRIES 1024 118 #define IPP_P0_P1_DFIFO_ENTRIES 2048 119 #define IPP_P2_P3_DFIFO_ENTRIES 1024 120 121 #define ZCP_CFIG (FZC_ZCP + 0x00000) 122 #define ZCP_INT_STAT (FZC_ZCP + 0x00008) 123 #define ZCP_INT_MASK (FZC_ZCP + 0x00010) 124 125 #define TXC_DMA_MAX(chan) (FZC_TXC + 0x00000 + (chan) * 0x01000) 126 #define TXC_CONTROL (FZC_TXC + 0x20000) 127 #define TXC_CONTROL_TXC_ENABLED (1ULL << 4) 128 #define TXC_PORT_DMA(port) (FZC_TXC + 0x20028 + (port) * 0x00100) 129 #define TXC_PKT_STUFFED(port) (FZC_TXC + 0x20030 + (port) * 0x00100) 130 #define TXC_PKT_XMIT(port) (FZC_TXC + 0x20038 + (port) * 0x00100) 131 #define TXC_INT_STAT_DBG (FZC_TXC + 0x20420) 132 #define TXC_INT_STAT (FZC_TXC + 0x20428) 133 #define TXC_INT_MASK (FZC_TXC + 0x20430) 134 #define TXC_INT_MASK_PORT_INT_MASK(port) (0x3fULL << ((port) * 8)) 135 136 #define XTXMAC_SW_RST(port) (FZC_MAC + 0x00000 + (port) * 0x06000) 137 #define XTXMAC_SW_RST_REG_RST (1ULL << 1) 138 #define XTXMAC_SW_RST_SOFT_RST (1ULL << 0) 139 #define XRXMAC_SW_RST(port) (FZC_MAC + 0x00008 + (port) * 0x06000) 140 #define XRXMAC_SW_RST_REG_RST (1ULL << 1) 141 #define XRXMAC_SW_RST_SOFT_RST (1ULL << 0) 142 #define XTXMAC_STATUS(port) (FZC_MAC + 0x00020 + (port) * 0x06000) 143 #define XRXMAC_STATUS(port) (FZC_MAC + 0x00028 + (port) * 0x06000) 144 #define XTXMAC_STAT_MSK(port) (FZC_MAC + 0x00040 + (port) * 0x06000) 145 #define XRXMAC_STAT_MSK(port) (FZC_MAC + 0x00048 + (port) * 0x06000) 146 #define XMAC_CONFIG(port) (FZC_MAC + 0x00060 + (port) * 0x06000) 147 #define XMAC_CONFIG_SEL_CLK_25MHZ (1ULL << 31) 148 #define XMAC_CONFIG_1G_PCS_BYPASS (1ULL << 30) 149 #define XMAC_CONFIG_MODE_MASK (3ULL << 27) 150 #define XMAC_CONFIG_MODE_XGMII (0ULL << 27) 151 #define XMAC_CONFIG_MODE_GMII (1ULL << 27) 152 #define XMAC_CONFIG_MODE_MII (2ULL << 27) 153 #define XMAC_CONFIG_LFS_DISABLE (1ULL << 26) 154 #define XMAC_CONFIG_LOOPBACK (1ULL << 25) 155 #define XMAC_CONFIG_TX_OUTPUT_EN (1ULL << 24) 156 #define XMAC_CONFIG_SEL_POR_CLK_SRC (1ULL << 23) 157 #define XMAC_CONFIG_HASH_FILTER_EN (1ULL << 15) 158 #define XMAC_CONFIG_PROMISCUOUS_GROUP (1ULL << 10) 159 #define XMAC_CONFIG_PROMISCUOUS (1ULL << 9) 160 #define XMAC_CONFIG_RX_MAC_ENABLE (1ULL << 8) 161 #define XMAC_CONFIG_ALWAYS_NO_CRC (1ULL << 3) 162 #define XMAC_CONFIG_VAR_MIN_IPG_EN (1ULL << 2) 163 #define XMAC_CONFIG_STRETCH_MODE (1ULL << 1) 164 #define XMAC_CONFIG_TX_ENABLE (1ULL << 0) 165 166 #define XMAC_IPG(port) (FZC_MAC + 0x00080 + (port) * 0x06000) 167 #define XMAC_IPG_IPG_VALUE1_MASK (0xffULL << 8) 168 #define XMAC_IPG_IPG_VALUE1_12 (10ULL << 8) 169 #define XMAC_IPG_IPG_VALUE_MASK (0x07ULL << 0) 170 #define XMAC_IPG_IPG_VALUE_12_15 (3ULL << 0) 171 172 #define XMAC_MIN(port) (FZC_MAC + 0x00088 + (port) * 0x06000) 173 #define XMAC_MIN_RX_MIN_PKT_SIZE_MASK (0x3ffULL << 20) 174 #define XMAC_MIN_RX_MIN_PKT_SIZE_SHIFT 20 175 #define XMAC_MIN_TX_MIN_PKT_SIZE_MASK (0x3ffULL << 0) 176 #define XMAC_MIN_TX_MIN_PKT_SIZE_SHIFT 0 177 #define XMAC_MAX(port) (FZC_MAC + 0x00090 + (port) * 0x06000) 178 179 #define XMAC_ADDR0(port) (FZC_MAC + 0x000a0 + (port) * 0x06000) 180 #define XMAC_ADDR1(port) (FZC_MAC + 0x000a8 + (port) * 0x06000) 181 #define XMAC_ADDR2(port) (FZC_MAC + 0x000b0 + (port) * 0x06000) 182 183 #define XMAC_ADDR_CMPEN(port) (FZC_MAC + 0x00208 + (port) * 0x06000) 184 185 #define XMAC_ADD_FILT0(port) (FZC_MAC + 0x00818 + (port) * 0x06000) 186 #define XMAC_ADD_FILT1(port) (FZC_MAC + 0x00820 + (port) * 0x06000) 187 #define XMAC_ADD_FILT2(port) (FZC_MAC + 0x00828 + (port) * 0x06000) 188 #define XMAC_ADD_FILT12_MASK(port) (FZC_MAC + 0x00830 + (port) * 0x06000) 189 #define XMAC_ADD_FILT00_MASK(port) (FZC_MAC + 0x00838 + (port) * 0x06000) 190 191 #define XMAC_HASH_TBL0(port) (FZC_MAC + 0x00840 + (port) * 0x06000) 192 #define XMAC_HASH_TBL(port, i) (XMAC_HASH_TBL0(port) + (i) * 0x00008) 193 194 #define XMAC_HOST_INFO0(port) (FZC_MAC + 0x00900 + (port) * 0x06000) 195 #define XMAC_HOST_INFO(port, i) (XMAC_HOST_INFO0(port) + (i) * 0x00008) 196 197 #define RXMAC_BT_CNT(port) (FZC_MAC + 0x00100 + (port) * 0x06000) 198 199 #define TXMAC_FRM_CNT(port) (FZC_MAC + 0x00170 + (port) * 0x06000) 200 #define TXMAC_BYTE_CNT(port) (FZC_MAC + 0x00178 + (port) * 0x06000) 201 202 #define LINK_FAULT_CNT(port) (FZC_MAC + 0x00180 + (port) * 0x06000) 203 #define XMAC_SM_REG(port) (FZC_MAC + 0x001a8 + (port) * 0x06000) 204 205 #define TXMAC_SW_RST(port) (FZC_MAC + 0x0c000 + ((port) - 2) * 0x04000) 206 #define TXMAC_SW_RST_SW_RST (1ULL << 0) 207 #define RXMAC_SW_RST(port) (FZC_MAC + 0x0c008 + ((port) - 2) * 0x04000) 208 #define RXMAC_SW_RST_SW_RST (1ULL << 0) 209 #define TXMAC_CONFIG(port) (FZC_MAC + 0x0c060 + ((port) - 2) * 0x04000) 210 #define TXMAC_CONFIG_TX_ENABLE (1ULL << 0) 211 #define RXMAC_CONFIG(port) (FZC_MAC + 0x0c068 + ((port) - 2) * 0x04000) 212 #define RXMAC_CONFIG_ERROR_CHK_DIS (1ULL << 7) 213 #define RXMAC_CONFIG_ADDR_FILTER_EN (1ULL << 6) 214 #define RXMAC_CONFIG_HASH_FILTER_EN (1ULL << 5) 215 #define RXMAC_CONFIG_PROMISCUOUS_GROUP (1ULL << 4) 216 #define RXMAC_CONFIG_PROMISCUOUS (1ULL << 3) 217 #define RXMAC_CONFIG_STRIP_FCS (1ULL << 2) 218 #define RXMAC_CONFIG_STRIP_PAD (1ULL << 1) 219 #define RXMAC_CONFIG_RX_ENABLE (1ULL << 0) 220 #define MAC_XIF_CONFIG(port) (FZC_MAC + 0x0c078 + ((port) - 2) * 0x04000) 221 #define MAC_XIF_CONFIG_SEL_CLK_25MHZ (1ULL << 7) 222 #define MAC_XIF_CONFIG_GMII_MODE (1ULL << 3) 223 #define MAC_XIF_CONFIG_LOOPBACK (1ULL << 1) 224 #define MAC_XIF_CONFIG_TX_OUTPUT_EN (1ULL << 0) 225 #define BMAC_MIN(port) (FZC_MAC + 0x0c0a0 + ((port) - 2) * 0x04000) 226 #define BMAC_MAX(port) (FZC_MAC + 0x0c0a8 + ((port) - 2) * 0x04000) 227 #define BMAC_MAX_BURST_SHIFT 16 228 #define MAC_PA_SIZE(port) (FZC_MAC + 0x0c0b0 + ((port) - 2) * 0x04000) 229 #define MAC_CTRL_TYPE(port) (FZC_MAC + 0x0c0b8 + ((port) - 2) * 0x04000) 230 #define BMAC_ADDR0(port) (FZC_MAC + 0x0c100 + ((port) - 2) * 0x04000) 231 #define BMAC_ADDR1(port) (FZC_MAC + 0x0c108 + ((port) - 2) * 0x04000) 232 #define BMAC_ADDR2(port) (FZC_MAC + 0x0c110 + ((port) - 2) * 0x04000) 233 234 #define MAC_ADDR_FILT0(port) (FZC_MAC + 0x0c298 + ((port) - 2) * 0x04000) 235 #define MAC_ADDR_FILT1(port) (FZC_MAC + 0x0c2a0 + ((port) - 2) * 0x04000) 236 #define MAC_ADDR_FILT2(port) (FZC_MAC + 0x0c2a8 + ((port) - 2) * 0x04000) 237 #define MAC_ADDR_FILT12_MASK(port) (FZC_MAC + 0x0c2b0 + ((port) - 2) * 0x04000) 238 #define MAC_ADDR_FILT00_MASK(port) (FZC_MAC + 0x0c2b8 + ((port) - 2) * 0x04000) 239 240 #define MAC_HASH_TBL0(port) (FZC_MAC + 0x0c2c0 + ((port) - 2) * 0x04000) 241 #define MAC_HASH_TBL(port, i) (MAC_HASH_TBL0(port) + (i) * 0x00008) 242 243 #define RXMAC_FRM_CNT(port) (FZC_MAC + 0x0c370 + ((port) - 2) * 0x04000) 244 #define BMAC_ALTAD_CMPEN(port) (FZC_MAC + 0x0c3f8 + ((port) - 2) * 0x04000) 245 246 #define BMAC_HOST_INFO0(port) (FZC_MAC + 0x0c400 + ((port) - 2) * 0x04000) 247 #define BMAC_HOST_INFO(port, i) (BMAC_HOST_INFO0(port) + (i) * 0x00008) 248 249 #define PCS_PORT_OFFSET(port) ((port < 2) ? ((port) * 0x06000) : \ 250 (0x02000 + (port) * 0x4000)) 251 #define PCS_MII_CTL(port) (FZC_MAC + 0x04000 + PCS_PORT_OFFSET(port)) 252 #define PCS_MII_CTL_RESET (1ULL << 15) 253 #define PCS_DPATH_MODE(port) (FZC_MAC + 0x040a0 + PCS_PORT_OFFSET(port)) 254 #define PCS_DPATH_MODE_MII (1ULL << 1) 255 256 #define MIF_FRAME_OUTPUT (FZC_MAC + 0x16018) 257 #define MIF_FRAME_DATA 0xffff 258 #define MIF_FRAME_TA0 (1ULL << 16) 259 #define MIF_FRAME_TA1 (1ULL << 17) 260 #define MIF_FRAME_REG_SHIFT 18 261 #define MIF_FRAME_PHY_SHIFT 23 262 #define MIF_FRAME_READ 0x60020000 263 #define MIF_FRAME_WRITE 0x50020000 264 #define MIF_CONFIG (FZC_MAC + 0x16020) 265 #define MIF_CONFIG_INDIRECT_MODE (1ULL << 15) 266 267 #define DEF_PT0_RDC (FZC_DMC + 0x00008) 268 #define DEF_PT_RDC(port) (DEF_PT0_RDC + (port) * 0x00008) 269 #define RDC_TBL(tbl, i) (FZC_ZCP + 0x10000 + (tbl * 16 + i) * 0x00008) 270 271 #define RX_LOG_PAGE_VLD(chan) (FZC_DMC + 0x20000 + (chan) * 0x00040) 272 #define RX_LOG_PAGE_VLD_PAGE0 (1ULL << 0) 273 #define RX_LOG_PAGE_VLD_PAGE1 (1ULL << 1) 274 #define RX_LOG_PAGE_VLD_FUNC_SHIFT 2 275 #define RX_LOG_MASK1(chan) (FZC_DMC + 0x20008 + (chan) * 0x00040) 276 #define RX_LOG_VALUE1(chan) (FZC_DMC + 0x20010 + (chan) * 0x00040) 277 #define RX_LOG_MASK2(chan) (FZC_DMC + 0x20018 + (chan) * 0x00040) 278 #define RX_LOG_VALUE2(chan) (FZC_DMC + 0x20020 + (chan) * 0x00040) 279 #define RX_LOG_PAGE_RELO1(chan) (FZC_DMC + 0x20028 + (chan) * 0x00040) 280 #define RX_LOG_PAGE_RELO2(chan) (FZC_DMC + 0x20030 + (chan) * 0x00040) 281 #define RX_LOG_PAGE_HDL(chan) (FZC_DMC + 0x20038 + (chan) * 0x00040) 282 283 #define RXDMA_CFIG1(chan) (DMC + 0x00000 + (chan) * 0x00200) 284 #define RXDMA_CFIG1_EN (1ULL << 31) 285 #define RXDMA_CFIG1_RST (1ULL << 30) 286 #define RXDMA_CFIG1_QST (1ULL << 29) 287 #define RXDMA_CFIG2(chan) (DMC + 0x00008 + (chan) * 0x00200) 288 #define RXDMA_CFIG2_OFFSET_MASK (3ULL << 2) 289 #define RXDMA_CFIG2_OFFSET_0 (0ULL << 2) 290 #define RXDMA_CFIG2_OFFSET_64 (1ULL << 2) 291 #define RXDMA_CFIG2_OFFSET_128 (2ULL << 2) 292 #define RXDMA_CFIG2_FULL_HDR (1ULL << 0) 293 294 #define RBR_CFIG_A(chan) (DMC + 0x00010 + (chan) * 0x00200) 295 #define RBR_CFIG_A_LEN_SHIFT 48 296 #define RBR_CFIG_B(chan) (DMC + 0x00018 + (chan) * 0x00200) 297 #define RBR_CFIG_B_BLKSIZE_MASK (3ULL << 24) 298 #define RBR_CFIG_B_BLKSIZE_4K (0ULL << 24) 299 #define RBR_CFIG_B_BLKSIZE_8K (1ULL << 24) 300 #define RBR_CFIG_B_BLKSIZE_16K (2ULL << 24) 301 #define RBR_CFIG_B_BLKSIZE_32K (3ULL << 24) 302 #define RBR_CFIG_B_VLD2 (1ULL << 23) 303 #define RBR_CFIG_B_BUFSZ2_MASK (3ULL << 16) 304 #define RBR_CFIG_B_BUFSZ2_2K (0ULL << 16) 305 #define RBR_CFIG_B_BUFSZ2_4K (1ULL << 16) 306 #define RBR_CFIG_B_BUFSZ2_8K (2ULL << 16) 307 #define RBR_CFIG_B_BUFSZ2_16K (3ULL << 16) 308 #define RBR_CFIG_B_VLD1 (1ULL << 15) 309 #define RBR_CFIG_B_BUFSZ1_MASK (3ULL << 8) 310 #define RBR_CFIG_B_BUFSZ1_1K (0ULL << 8) 311 #define RBR_CFIG_B_BUFSZ1_2K (1ULL << 8) 312 #define RBR_CFIG_B_BUFSZ1_4K (2ULL << 8) 313 #define RBR_CFIG_B_BUFSZ1_8K (3ULL << 8) 314 #define RBR_CFIG_B_VLD0 (1ULL << 7) 315 #define RBR_CFIG_B_BUFSZ0_MASK (3ULL << 0) 316 #define RBR_CFIG_B_BUFSZ0_256 (0ULL << 0) 317 #define RBR_CFIG_B_BUFSZ0_512 (1ULL << 0) 318 #define RBR_CFIG_B_BUFSZ0_1K (2ULL << 0) 319 #define RBR_CFIG_B_BUFSZ0_2K (3ULL << 0) 320 #define RBR_KICK(chan) (DMC + 0x00020 + (chan) * 0x00200) 321 #define RBR_STAT(chan) (DMC + 0x00028 + (chan) * 0x00200) 322 #define RBR_HDH(chan) (DMC + 0x00030 + (chan) * 0x00200) 323 #define RBR_HDL(chan) (DMC + 0x00038 + (chan) * 0x00200) 324 #define RCRCFIG_A(chan) (DMC + 0x00040 + (chan) * 0x00200) 325 #define RCRCFIG_A_LEN_SHIFT 48 326 #define RCRCFIG_B(chan) (DMC + 0x00048 + (chan) * 0x00200) 327 #define RCRCFIG_B_PTHRES_SHIFT 16 328 #define RCRCFIG_B_ENTOUT (1ULL << 15) 329 #define RCRSTAT_A(chan) (DMC + 0x00050 + (chan) * 0x00200) 330 #define RCRSTAT_B(chan) (DMC + 0x00058 + (chan) * 0x00200) 331 #define RCRSTAT_C(chan) (DMC + 0x00060 + (chan) * 0x00200) 332 333 #define RX_DMA_ENT_MSK(chan) (DMC + 0x00068 + (chan) * 0x00200) 334 #define RX_DMA_ENT_MSK_RBR_EMPTY (1ULL << 3) 335 #define RX_DMA_CTL_STAT(chan) (DMC + 0x00070 + (chan) * 0x00200) 336 #define RX_DMA_CTL_STAT_MEX (1ULL << 47) 337 #define RX_DMA_CTL_STAT_RCRTHRES (1ULL << 46) 338 #define RX_DMA_CTL_STAT_RCRTO (1ULL << 45) 339 #define RX_DMA_CTL_STAT_RBR_EMPTY (1ULL << 35) 340 #define RX_DMA_CTL_STAT_PTRREAD_SHIFT 16 341 #define RX_DMA_CTL_STAT_DBG(chan) (DMC + 0x00098 + (chan) * 0x00200) 342 343 #define TX_LOG_PAGE_VLD(chan) (FZC_DMC + 0x40000 + (chan) * 0x00200) 344 #define TX_LOG_PAGE_VLD_PAGE0 (1ULL << 0) 345 #define TX_LOG_PAGE_VLD_PAGE1 (1ULL << 1) 346 #define TX_LOG_PAGE_VLD_FUNC_SHIFT 2 347 #define TX_LOG_MASK1(chan) (FZC_DMC + 0x40008 + (chan) * 0x00200) 348 #define TX_LOG_VALUE1(chan) (FZC_DMC + 0x40010 + (chan) * 0x00200) 349 #define TX_LOG_MASK2(chan) (FZC_DMC + 0x40018 + (chan) * 0x00200) 350 #define TX_LOG_VALUE2(chan) (FZC_DMC + 0x40020 + (chan) * 0x00200) 351 #define TX_LOG_PAGE_RELO1(chan) (FZC_DMC + 0x40028 + (chan) * 0x00200) 352 #define TX_LOG_PAGE_RELO2(chan) (FZC_DMC + 0x40030 + (chan) * 0x00200) 353 #define TX_LOG_PAGE_HDL(chan) (FZC_DMC + 0x40038 + (chan) * 0x00200) 354 355 #define TX_RNG_CFIG(chan) (DMC + 0x40000 + (chan) * 0x00200) 356 #define TX_RNG_CFIG_LEN_SHIFT 48 357 #define TX_RING_HDL(chan) (DMC + 0x40010 + (chan) * 0x00200) 358 #define TX_RING_KICK(chan) (DMC + 0x40018 + (chan) * 0x00200) 359 #define TX_RING_KICK_WRAP (1ULL << 19) 360 #define TX_ENT_MSK(chan) (DMC + 0x40020 + (chan) * 0x00200) 361 #define TX_CS(chan) (DMC + 0x40028 + (chan) * 0x00200) 362 #define TX_CS_PKT_CNT_MASK (0xfffULL << 48) 363 #define TX_CS_PKT_CNT_SHIFT 48 364 #define TX_CS_RST (1ULL << 31) 365 #define TX_CS_STOP_N_GO (1ULL << 28) 366 #define TX_CS_SNG_STATE (1ULL << 27) 367 #define TDMC_INTR_DBG(chan) (DMC + 0x40060 + (chan) * 0x00200) 368 #define TXDMA_MBH(chan) (DMC + 0x40030 + (chan) * 0x00200) 369 #define TXDMA_MBL(chan) (DMC + 0x40038 + (chan) * 0x00200) 370 #define TX_RNG_ERR_LOGH(chan) (DMC + 0x40048 + (chan) * 0x00200) 371 #define TX_RNG_ERR_LOGL(chan) (DMC + 0x40050 + (chan) * 0x00200) 372 373 #define RXD_MULTI (1ULL << 63) 374 #define RXD_L2_LEN_MASK (0x3fffULL << 40) 375 #define RXD_L2_LEN_SHIFT 40 376 #define RXD_PKT_BUF_ADDR_MASK 0x3fffffffffULL 377 #define RXD_PKT_BUF_ADDR_SHIFT 6 378 379 struct nep_block { 380 bus_dmamap_t nb_map; 381 void *nb_block; 382 }; 383 384 #define NEP_NRBDESC 256 385 #define NEP_NRCDESC 512 386 387 #define TXD_SOP (1ULL << 63) 388 #define TXD_MARK (1ULL << 62) 389 #define TXD_NUM_PTR_SHIFT 58 390 #define TXD_TR_LEN_SHIFT 44 391 392 struct nep_txbuf_hdr { 393 uint64_t nh_flags; 394 uint64_t nh_reserved; 395 }; 396 397 struct nep_buf { 398 bus_dmamap_t nb_map; 399 struct mbuf *nb_m; 400 }; 401 402 #define NEP_NTXDESC 256 403 #define NEP_NTXSEGS 15 404 405 struct nep_dmamem { 406 bus_dmamap_t ndm_map; 407 bus_dma_segment_t ndm_seg; 408 size_t ndm_size; 409 caddr_t ndm_kva; 410 }; 411 #define NEP_DMA_MAP(_ndm) ((_ndm)->ndm_map) 412 #define NEP_DMA_LEN(_ndm) ((_ndm)->ndm_size) 413 #define NEP_DMA_DVA(_ndm) ((_ndm)->ndm_map->dm_segs[0].ds_addr) 414 #define NEP_DMA_KVA(_ndm) ((void *)(_ndm)->ndm_kva); 415 416 struct pool *nep_block_pool; 417 418 struct nep_softc { 419 struct device sc_dev; 420 struct arpcom sc_ac; 421 #define sc_lladdr sc_ac.ac_enaddr 422 struct mii_data sc_mii; 423 #define sc_media sc_mii.mii_media 424 425 bus_dma_tag_t sc_dmat; 426 bus_space_tag_t sc_memt; 427 bus_space_handle_t sc_memh; 428 bus_size_t sc_mems; 429 void *sc_ih; 430 431 int sc_port; 432 433 struct nep_dmamem *sc_txring; 434 struct nep_buf *sc_txbuf; 435 uint64_t *sc_txdesc; 436 int sc_tx_prod; 437 int sc_tx_cnt; 438 int sc_tx_cons; 439 440 uint64_t sc_wrap; 441 uint16_t sc_pkt_cnt; 442 443 struct nep_dmamem *sc_rbring; 444 struct nep_block *sc_rb; 445 uint32_t *sc_rbdesc; 446 struct if_rxring sc_rx_ring; 447 int sc_rx_prod; 448 struct nep_dmamem *sc_rcring; 449 uint64_t *sc_rcdesc; 450 int sc_rx_cons; 451 452 struct nep_dmamem *sc_rxmbox; 453 454 struct timeout sc_tick; 455 }; 456 457 int nep_match(struct device *, void *, void *); 458 void nep_attach(struct device *, struct device *, void *); 459 460 const struct cfattach nep_ca = { 461 sizeof(struct nep_softc), nep_match, nep_attach 462 }; 463 464 struct cfdriver nep_cd = { 465 NULL, "nep", DV_DULL 466 }; 467 468 static u_int nep_mextfree_idx; 469 470 int nep_pci_enaddr(struct nep_softc *, struct pci_attach_args *); 471 472 uint64_t nep_read(struct nep_softc *, uint32_t); 473 void nep_write(struct nep_softc *, uint32_t, uint64_t); 474 int nep_mii_readreg(struct device *, int, int); 475 void nep_mii_writereg(struct device *, int, int, int); 476 void nep_mii_statchg(struct device *); 477 void nep_xmac_mii_statchg(struct nep_softc *); 478 void nep_bmac_mii_statchg(struct nep_softc *); 479 int nep_media_change(struct ifnet *); 480 void nep_media_status(struct ifnet *, struct ifmediareq *); 481 int nep_intr(void *); 482 483 void nep_rx_proc(struct nep_softc *); 484 void nep_extfree(caddr_t, u_int, void *); 485 void nep_tx_proc(struct nep_softc *); 486 487 void nep_init_ipp(struct nep_softc *); 488 void nep_ipp_clear_dfifo(struct nep_softc *, uint64_t); 489 void nep_init_rx_mac(struct nep_softc *); 490 void nep_init_rx_xmac(struct nep_softc *); 491 void nep_init_rx_bmac(struct nep_softc *); 492 void nep_init_rx_channel(struct nep_softc *, int); 493 void nep_init_tx_mac(struct nep_softc *); 494 void nep_init_tx_xmac(struct nep_softc *); 495 void nep_init_tx_bmac(struct nep_softc *); 496 void nep_init_tx_channel(struct nep_softc *, int); 497 void nep_enable_rx_mac(struct nep_softc *); 498 void nep_disable_rx_mac(struct nep_softc *); 499 void nep_stop_dma(struct nep_softc *); 500 501 void nep_fill_rx_ring(struct nep_softc *); 502 503 void nep_up(struct nep_softc *); 504 void nep_down(struct nep_softc *); 505 void nep_iff(struct nep_softc *); 506 int nep_encap(struct nep_softc *, struct mbuf **, int *); 507 508 void nep_start(struct ifnet *); 509 void nep_watchdog(struct ifnet *); 510 void nep_tick(void *); 511 int nep_ioctl(struct ifnet *, u_long, caddr_t); 512 513 struct nep_dmamem *nep_dmamem_alloc(struct nep_softc *, size_t); 514 void nep_dmamem_free(struct nep_softc *, struct nep_dmamem *); 515 516 /* 517 * SUNW,pcie-neptune: 4x1G onboard on T5140/T5240 518 * SUNW,pcie-qgc: 4x1G, "Sun Quad GbE UTP x8 PCI Express Card" 519 * SUNW,pcie-qgc-pem: 4x1G, "Sun Quad GbE UTP x8 PCIe ExpressModule" 520 * SUNW,pcie-2xgf: 2x10G, "Sun Dual 10GbE XFP PCI Express Card" 521 * SUNW,pcie-2xgf-pem: 2x10G, "Sun Dual 10GbE XFP PCIe ExpressModule" 522 */ 523 int 524 nep_match(struct device *parent, void *match, void *aux) 525 { 526 struct pci_attach_args *pa = aux; 527 528 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && 529 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_NEPTUNE) 530 return (1); 531 return (0); 532 } 533 534 void 535 nep_attach(struct device *parent, struct device *self, void *aux) 536 { 537 struct nep_softc *sc = (struct nep_softc *)self; 538 struct pci_attach_args *pa = aux; 539 pci_intr_handle_t ih; 540 const char *intrstr = NULL; 541 struct ifnet *ifp = &sc->sc_ac.ac_if; 542 struct mii_data *mii = &sc->sc_mii; 543 pcireg_t memtype; 544 uint64_t val; 545 546 if (nep_mextfree_idx == 0) 547 nep_mextfree_idx = mextfree_register(nep_extfree); 548 549 sc->sc_dmat = pa->pa_dmat; 550 551 memtype = PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT; 552 if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0, 553 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) { 554 printf(": can't map registers\n"); 555 return; 556 } 557 558 if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) { 559 printf(": can't map interrupt\n"); 560 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 561 return; 562 } 563 564 intrstr = pci_intr_string(pa->pa_pc, ih); 565 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, 566 nep_intr, sc, self->dv_xname); 567 if (sc->sc_ih == NULL) { 568 printf(": can't establish interrupt"); 569 if (intrstr != NULL) 570 printf(" at %s", intrstr); 571 printf("\n"); 572 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 573 return; 574 } 575 576 printf(": %s", intrstr); 577 578 sc->sc_port = pa->pa_function; 579 580 nep_write(sc, SID(sc->sc_port), pa->pa_function << 5); 581 nep_write(sc, LDG_NUM(LDN_RXDMA(sc->sc_port)), sc->sc_port); 582 nep_write(sc, LDG_NUM(LDN_TXDMA(sc->sc_port)), sc->sc_port); 583 nep_write(sc, LDG_NUM(LDN_MAC(sc->sc_port)), sc->sc_port); 584 585 /* Port 0 gets the MIF and error interrupts. */ 586 if (sc->sc_port == 0) { 587 nep_write(sc, LDG_NUM(LDN_MIF), sc->sc_port); 588 nep_write(sc, LDG_NUM(LDN_SYSERR), sc->sc_port); 589 nep_write(sc, ZCP_INT_MASK, 0); 590 } 591 592 #ifdef __sparc64__ 593 if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address", 594 sc->sc_lladdr, ETHER_ADDR_LEN) <= 0) 595 #endif 596 nep_pci_enaddr(sc, pa); 597 598 printf(", address %s\n", ether_sprintf(sc->sc_lladdr)); 599 600 if (nep_block_pool == NULL) { 601 nep_block_pool = malloc(sizeof(*nep_block_pool), 602 M_DEVBUF, M_WAITOK); 603 if (nep_block_pool == NULL) { 604 printf("%s: unable to allocate block pool\n", 605 sc->sc_dev.dv_xname); 606 return; 607 } 608 pool_init(nep_block_pool, PAGE_SIZE, 0, IPL_NET, 0, 609 "nepblk", NULL); 610 } 611 612 val = nep_read(sc, MIF_CONFIG); 613 val &= ~MIF_CONFIG_INDIRECT_MODE; 614 nep_write(sc, MIF_CONFIG, val); 615 616 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname)); 617 ifp->if_softc = sc; 618 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 619 ifp->if_ioctl = nep_ioctl; 620 ifp->if_start = nep_start; 621 ifp->if_watchdog = nep_watchdog; 622 623 mii->mii_ifp = ifp; 624 mii->mii_readreg = nep_mii_readreg; 625 mii->mii_writereg = nep_mii_writereg; 626 mii->mii_statchg = nep_mii_statchg; 627 628 ifmedia_init(&sc->sc_media, 0, nep_media_change, nep_media_status); 629 630 /* 631 * The PHYs are wired up in reverse order on the 4x1G (RGMII) 632 * configuration. 633 */ 634 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 635 sc->sc_port ^ 0x3, 0); 636 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 637 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 638 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 639 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 640 } else 641 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 642 643 if_attach(ifp); 644 ether_ifattach(ifp); 645 646 timeout_set(&sc->sc_tick, nep_tick, sc); 647 648 /* Enable the MIF and error interrupts. */ 649 if (sc->sc_port == 0) { 650 nep_write(sc, LD_IM0(LDN_MIF), 0); 651 nep_write(sc, LD_IM1(LDN_SYSERR), 0); 652 } 653 } 654 655 #define PROMHDR_PTR_DATA 0x18 656 #define PROMDATA_PTR_VPD 0x08 657 #define PROMDATA_LEN 0x10 658 #define PROMDATA_TYPE 0x14 659 660 static const uint8_t nep_promhdr[] = { 0x55, 0xaa }; 661 static const uint8_t nep_promdat[] = { 662 'P', 'C', 'I', 'R', 663 PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8, 664 PCI_PRODUCT_SUN_NEPTUNE & 0xff, PCI_PRODUCT_SUN_NEPTUNE >> 8 665 }; 666 667 int 668 nep_pci_enaddr(struct nep_softc *sc, struct pci_attach_args *pa) 669 { 670 struct pci_vpd_largeres *res; 671 struct pci_vpd *vpd; 672 bus_space_handle_t romh; 673 bus_space_tag_t romt; 674 bus_size_t romsize = 0; 675 u_int8_t buf[32], *desc; 676 pcireg_t address; 677 int dataoff, vpdoff, len; 678 int off = 0; 679 int rv = -1; 680 681 if (pci_mapreg_map(pa, PCI_ROM_REG, PCI_MAPREG_TYPE_MEM, 0, 682 &romt, &romh, 0, &romsize, 0)) 683 return (-1); 684 685 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 686 address |= PCI_ROM_ENABLE; 687 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address); 688 689 while (off < romsize) { 690 bus_space_read_region_1(romt, romh, off, buf, sizeof(buf)); 691 if (memcmp(buf, nep_promhdr, sizeof(nep_promhdr))) 692 goto fail; 693 694 dataoff = 695 buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8); 696 if (dataoff < 0x1c) 697 goto fail; 698 dataoff += off; 699 700 bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf)); 701 if (memcmp(buf, nep_promdat, sizeof(nep_promdat))) 702 goto fail; 703 704 if (buf[PROMDATA_TYPE] == 1) 705 break; 706 707 len = buf[PROMDATA_LEN] | (buf[PROMDATA_LEN + 1] << 8); 708 off += len * 512; 709 } 710 711 vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8); 712 if (vpdoff < 0x1c) 713 goto fail; 714 vpdoff += off; 715 716 next: 717 bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf)); 718 if (!PCI_VPDRES_ISLARGE(buf[0])) 719 goto fail; 720 721 res = (struct pci_vpd_largeres *)buf; 722 vpdoff += sizeof(*res); 723 724 len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb); 725 switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) { 726 case PCI_VPDRES_TYPE_IDENTIFIER_STRING: 727 /* Skip identifier string. */ 728 vpdoff += len; 729 goto next; 730 731 case PCI_VPDRES_TYPE_VPD: 732 while (len > 0) { 733 bus_space_read_region_1(romt, romh, vpdoff, 734 buf, sizeof(buf)); 735 736 vpd = (struct pci_vpd *)buf; 737 vpdoff += sizeof(*vpd) + vpd->vpd_len; 738 len -= sizeof(*vpd) + vpd->vpd_len; 739 740 /* 741 * We're looking for an "Enhanced" VPD... 742 */ 743 if (vpd->vpd_key0 != 'Z') 744 continue; 745 746 desc = buf + sizeof(*vpd); 747 748 /* 749 * ...which is an instance property... 750 */ 751 if (desc[0] != 'I') 752 continue; 753 desc += 3; 754 755 /* 756 * ...that's a byte array with the proper 757 * length for a MAC address... 758 */ 759 if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN) 760 continue; 761 desc += 2; 762 763 /* 764 * ...named "local-mac-address". 765 */ 766 if (strcmp(desc, "local-mac-address") != 0) 767 continue; 768 desc += strlen("local-mac-address") + 1; 769 770 memcpy(sc->sc_ac.ac_enaddr, desc, ETHER_ADDR_LEN); 771 sc->sc_ac.ac_enaddr[5] += pa->pa_function; 772 rv = 0; 773 } 774 break; 775 776 default: 777 goto fail; 778 } 779 780 fail: 781 if (romsize != 0) 782 bus_space_unmap(romt, romh, romsize); 783 784 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 785 address &= ~PCI_ROM_ENABLE; 786 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address); 787 788 return (rv); 789 } 790 791 uint64_t 792 nep_read(struct nep_softc *sc, uint32_t reg) 793 { 794 return (bus_space_read_8(sc->sc_memt, sc->sc_memh, reg)); 795 } 796 797 void 798 nep_write(struct nep_softc *sc, uint32_t reg, uint64_t value) 799 { 800 bus_space_write_8(sc->sc_memt, sc->sc_memh, reg, value); 801 } 802 803 int 804 nep_mii_readreg(struct device *self, int phy, int reg) 805 { 806 struct nep_softc *sc = (struct nep_softc *)self; 807 uint64_t frame; 808 int n; 809 810 frame = MIF_FRAME_READ; 811 frame |= (reg << MIF_FRAME_REG_SHIFT) | (phy << MIF_FRAME_PHY_SHIFT); 812 nep_write(sc, MIF_FRAME_OUTPUT, frame); 813 for (n = 0; n < 1000; n++) { 814 delay(10); 815 frame = nep_read(sc, MIF_FRAME_OUTPUT); 816 if (frame & MIF_FRAME_TA0) 817 return (frame & MIF_FRAME_DATA); 818 } 819 820 printf("%s: %s timeout\n", sc->sc_dev.dv_xname, __func__); 821 return (0); 822 } 823 824 void 825 nep_mii_writereg(struct device *self, int phy, int reg, int val) 826 { 827 struct nep_softc *sc = (struct nep_softc *)self; 828 uint64_t frame; 829 int n; 830 831 frame = MIF_FRAME_WRITE; 832 frame |= (reg << MIF_FRAME_REG_SHIFT) | (phy << MIF_FRAME_PHY_SHIFT); 833 frame |= (val & MIF_FRAME_DATA); 834 nep_write(sc, MIF_FRAME_OUTPUT, frame); 835 for (n = 0; n < 1000; n++) { 836 delay(10); 837 frame = nep_read(sc, MIF_FRAME_OUTPUT); 838 if (frame & MIF_FRAME_TA0) 839 return; 840 } 841 842 printf("%s: %s timeout\n", sc->sc_dev.dv_xname, __func__); 843 return; 844 } 845 846 void 847 nep_mii_statchg(struct device *dev) 848 { 849 struct nep_softc *sc = (struct nep_softc *)dev; 850 851 if (sc->sc_port < 2) 852 nep_xmac_mii_statchg(sc); 853 else 854 nep_bmac_mii_statchg(sc); 855 } 856 857 void 858 nep_xmac_mii_statchg(struct nep_softc *sc) 859 { 860 struct mii_data *mii = &sc->sc_mii; 861 uint64_t val; 862 863 val = nep_read(sc, XMAC_CONFIG(sc->sc_port)); 864 865 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 866 val |= XMAC_CONFIG_SEL_CLK_25MHZ; 867 else 868 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; 869 870 val |= XMAC_CONFIG_1G_PCS_BYPASS; 871 872 val &= ~XMAC_CONFIG_MODE_MASK; 873 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 874 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 875 val |= XMAC_CONFIG_MODE_GMII; 876 else 877 val |= XMAC_CONFIG_MODE_MII; 878 879 val |= XMAC_CONFIG_LFS_DISABLE; 880 881 if (mii->mii_media_active & IFM_LOOP) 882 val |= XMAC_CONFIG_LOOPBACK; 883 else 884 val &= ~XMAC_CONFIG_LOOPBACK; 885 886 val |= XMAC_CONFIG_TX_OUTPUT_EN; 887 888 nep_write(sc, XMAC_CONFIG(sc->sc_port), val); 889 } 890 891 void 892 nep_bmac_mii_statchg(struct nep_softc *sc) 893 { 894 struct mii_data *mii = &sc->sc_mii; 895 uint64_t val; 896 897 val = nep_read(sc, MAC_XIF_CONFIG(sc->sc_port)); 898 899 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 900 val |= MAC_XIF_CONFIG_SEL_CLK_25MHZ; 901 else 902 val &= MAC_XIF_CONFIG_SEL_CLK_25MHZ; 903 904 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 905 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 906 val |= MAC_XIF_CONFIG_GMII_MODE; 907 else 908 val &= ~MAC_XIF_CONFIG_GMII_MODE; 909 910 if (mii->mii_media_active & IFM_LOOP) 911 val |= MAC_XIF_CONFIG_LOOPBACK; 912 else 913 val &= ~MAC_XIF_CONFIG_LOOPBACK; 914 915 val |= MAC_XIF_CONFIG_TX_OUTPUT_EN; 916 917 nep_write(sc, MAC_XIF_CONFIG(sc->sc_port), val); 918 } 919 920 int 921 nep_media_change(struct ifnet *ifp) 922 { 923 struct nep_softc *sc = ifp->if_softc; 924 925 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 926 mii_mediachg(&sc->sc_mii); 927 928 return (0); 929 } 930 931 void 932 nep_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 933 { 934 struct nep_softc *sc = ifp->if_softc; 935 936 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 937 mii_pollstat(&sc->sc_mii); 938 ifmr->ifm_active = sc->sc_mii.mii_media_active; 939 ifmr->ifm_status = sc->sc_mii.mii_media_status; 940 } 941 } 942 943 int 944 nep_intr(void *arg) 945 { 946 struct nep_softc *sc = arg; 947 uint64_t sv0, sv1, sv2; 948 int rearm = 0; 949 950 sv0 = nep_read(sc, LDSV0(sc->sc_port)); 951 sv1 = nep_read(sc, LDSV1(sc->sc_port)); 952 sv2 = nep_read(sc, LDSV2(sc->sc_port)); 953 954 if ((sv0 | sv1 | sv2) == 0) 955 return (0); 956 957 if (sv0 & (1ULL << LDN_TXDMA(sc->sc_port))) { 958 nep_tx_proc(sc); 959 rearm = 1; 960 } 961 962 if (sv0 & (1ULL << LDN_RXDMA(sc->sc_port))) { 963 nep_rx_proc(sc); 964 rearm = 1; 965 } 966 967 if (rearm) 968 nep_write(sc, LDGIMGN(sc->sc_port), LDGIMGN_ARM | 2); 969 else 970 printf("%s: %s %llx %llx %llx\n", sc->sc_dev.dv_xname, 971 __func__, sv0, sv1, sv2); 972 973 return (1); 974 } 975 976 void 977 nep_rx_proc(struct nep_softc *sc) 978 { 979 struct ifnet *ifp = &sc->sc_ac.ac_if; 980 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 981 uint64_t val; 982 uint16_t count; 983 uint16_t pktread = 0, ptrread = 0; 984 uint64_t rxd; 985 uint64_t addr; 986 bus_addr_t page; 987 bus_size_t off; 988 char *block; 989 struct mbuf *m; 990 int idx, len, i; 991 992 val = nep_read(sc, RX_DMA_CTL_STAT(sc->sc_port)); 993 nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port), 994 RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO); 995 996 bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_rcring), 0, 997 NEP_DMA_LEN(sc->sc_rcring), BUS_DMASYNC_POSTREAD); 998 999 count = nep_read(sc, RCRSTAT_A(sc->sc_port)); 1000 while (count > 0) { 1001 idx = sc->sc_rx_cons; 1002 KASSERT(idx < NEP_NRCDESC); 1003 1004 rxd = letoh64(sc->sc_rcdesc[idx]); 1005 1006 addr = (rxd & RXD_PKT_BUF_ADDR_MASK) << RXD_PKT_BUF_ADDR_SHIFT; 1007 len = (rxd & RXD_L2_LEN_MASK) >> RXD_L2_LEN_SHIFT; 1008 page = addr & ~PAGE_MASK; 1009 off = addr & PAGE_MASK; 1010 block = NULL; 1011 for (i = 0; i < NEP_NRBDESC; i++) { 1012 if (sc->sc_rb[i].nb_block && 1013 sc->sc_rb[i].nb_map->dm_segs[0].ds_addr == page) { 1014 block = sc->sc_rb[i].nb_block; 1015 break; 1016 } 1017 } 1018 if (block == NULL) { 1019 m = NULL; 1020 } else { 1021 bus_dmamap_unload(sc->sc_dmat, sc->sc_rb[i].nb_map); 1022 sc->sc_rb[i].nb_block = NULL; 1023 1024 MGETHDR(m, M_DONTWAIT, MT_DATA); 1025 } 1026 1027 if (m == NULL) { 1028 ifp->if_ierrors++; 1029 } else { 1030 MEXTADD(m, block + off, PAGE_SIZE, M_EXTWR, 1031 nep_mextfree_idx, block); 1032 m->m_pkthdr.len = m->m_len = len; 1033 m->m_data += ETHER_ALIGN; 1034 1035 ml_enqueue(&ml, m); 1036 } 1037 1038 if_rxr_put(&sc->sc_rx_ring, 1); 1039 if ((rxd & RXD_MULTI) == 0) { 1040 count--; 1041 pktread++; 1042 } 1043 ptrread++; 1044 sc->sc_rx_cons++; 1045 if (sc->sc_rx_cons >= NEP_NRCDESC) 1046 sc->sc_rx_cons = 0; 1047 } 1048 1049 bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_rcring), 0, 1050 NEP_DMA_LEN(sc->sc_rcring), BUS_DMASYNC_PREREAD); 1051 1052 if (ifiq_input(&ifp->if_rcv, &ml)) 1053 if_rxr_livelocked(&sc->sc_rx_ring); 1054 1055 nep_fill_rx_ring(sc); 1056 1057 val = pktread | (ptrread << RX_DMA_CTL_STAT_PTRREAD_SHIFT); 1058 val |= RX_DMA_CTL_STAT_MEX; 1059 nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port), val); 1060 } 1061 1062 void 1063 nep_extfree(caddr_t buf, u_int size, void *arg) 1064 { 1065 pool_put(nep_block_pool, arg); 1066 } 1067 1068 void 1069 nep_tx_proc(struct nep_softc *sc) 1070 { 1071 struct ifnet *ifp = &sc->sc_ac.ac_if; 1072 struct nep_buf *txb; 1073 uint64_t val; 1074 uint16_t pkt_cnt, count; 1075 int idx; 1076 1077 val = nep_read(sc, TX_CS(sc->sc_port)); 1078 pkt_cnt = (val & TX_CS_PKT_CNT_MASK) >> TX_CS_PKT_CNT_SHIFT; 1079 count = (pkt_cnt - sc->sc_pkt_cnt); 1080 count &= (TX_CS_PKT_CNT_MASK >> TX_CS_PKT_CNT_SHIFT); 1081 sc->sc_pkt_cnt = pkt_cnt; 1082 1083 while (count > 0) { 1084 idx = sc->sc_tx_cons; 1085 KASSERT(idx < NEP_NTXDESC); 1086 1087 txb = &sc->sc_txbuf[idx]; 1088 if (txb->nb_m) { 1089 bus_dmamap_sync(sc->sc_dmat, txb->nb_map, 0, 1090 txb->nb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1091 bus_dmamap_unload(sc->sc_dmat, txb->nb_map); 1092 1093 m_freem(txb->nb_m); 1094 txb->nb_m = NULL; 1095 count--; 1096 } 1097 1098 ifq_clr_oactive(&ifp->if_snd); 1099 1100 sc->sc_tx_cnt--; 1101 sc->sc_tx_cons++; 1102 if (sc->sc_tx_cons >= NEP_NTXDESC) 1103 sc->sc_tx_cons = 0; 1104 } 1105 1106 if (sc->sc_tx_cnt == 0) 1107 ifp->if_timer = 0; 1108 } 1109 1110 void 1111 nep_init_ipp(struct nep_softc *sc) 1112 { 1113 uint64_t val; 1114 int num_entries; 1115 int n, i; 1116 1117 if (sc->sc_port < 2) 1118 num_entries = IPP_P0_P1_DFIFO_ENTRIES; 1119 else 1120 num_entries = IPP_P2_P3_DFIFO_ENTRIES; 1121 1122 for (i = 0; i < num_entries; i++) 1123 nep_ipp_clear_dfifo(sc, i); 1124 1125 (void)nep_read(sc, IPP_INT_STAT(sc->sc_port)); 1126 (void)nep_read(sc, IPP_INT_STAT(sc->sc_port)); 1127 1128 val = nep_read(sc, IPP_CFIG(sc->sc_port)); 1129 val |= IPP_CFIG_SOFT_RST; 1130 nep_write(sc, IPP_CFIG(sc->sc_port), val); 1131 n = 1000; 1132 while (--n) { 1133 val = nep_read(sc, IPP_CFIG(sc->sc_port)); 1134 if ((val & IPP_CFIG_SOFT_RST) == 0) 1135 break; 1136 } 1137 if (n == 0) 1138 printf("timeout resetting IPP\n"); 1139 1140 val = nep_read(sc, IPP_CFIG(sc->sc_port)); 1141 val |= IPP_CFIG_IPP_ENABLE; 1142 nep_write(sc, IPP_CFIG(sc->sc_port), val); 1143 1144 nep_write(sc, IPP_MSK(sc->sc_port), 0); 1145 } 1146 1147 void 1148 nep_ipp_clear_dfifo(struct nep_softc *sc, uint64_t addr) 1149 { 1150 uint64_t val; 1151 1152 val = nep_read(sc, IPP_CFIG(sc->sc_port)); 1153 val |= IPP_CFIG_DFIFO_PIO_W; 1154 nep_write(sc, IPP_CFIG(sc->sc_port), val); 1155 1156 nep_write(sc, IPP_DFIFO_WR_PTR(sc->sc_port), addr); 1157 nep_write(sc, IPP_DFIFO_WR1(sc->sc_port), 0); 1158 nep_write(sc, IPP_DFIFO_WR2(sc->sc_port), 0); 1159 nep_write(sc, IPP_DFIFO_WR3(sc->sc_port), 0); 1160 nep_write(sc, IPP_DFIFO_WR4(sc->sc_port), 0); 1161 nep_write(sc, IPP_DFIFO_WR5(sc->sc_port), 0); 1162 1163 val &= ~IPP_CFIG_DFIFO_PIO_W; 1164 nep_write(sc, IPP_CFIG(sc->sc_port), val); 1165 1166 nep_write(sc, IPP_DFIFO_RD_PTR(sc->sc_port), addr); 1167 (void)nep_read(sc, IPP_DFIFO_RD1(sc->sc_port)); 1168 (void)nep_read(sc, IPP_DFIFO_RD2(sc->sc_port)); 1169 (void)nep_read(sc, IPP_DFIFO_RD3(sc->sc_port)); 1170 (void)nep_read(sc, IPP_DFIFO_RD4(sc->sc_port)); 1171 (void)nep_read(sc, IPP_DFIFO_RD5(sc->sc_port)); 1172 } 1173 1174 void 1175 nep_init_rx_mac(struct nep_softc *sc) 1176 { 1177 if (sc->sc_port < 2) 1178 nep_init_rx_xmac(sc); 1179 else 1180 nep_init_rx_bmac(sc); 1181 } 1182 1183 void 1184 nep_init_rx_xmac(struct nep_softc *sc) 1185 { 1186 uint64_t addr0, addr1, addr2; 1187 uint64_t val; 1188 int n, i; 1189 1190 nep_write(sc, XRXMAC_SW_RST(sc->sc_port), 1191 XRXMAC_SW_RST_REG_RST | XRXMAC_SW_RST_SOFT_RST); 1192 n = 1000; 1193 while (--n) { 1194 val = nep_read(sc, XRXMAC_SW_RST(sc->sc_port)); 1195 if ((val & (XRXMAC_SW_RST_REG_RST | 1196 XRXMAC_SW_RST_SOFT_RST)) == 0) 1197 break; 1198 } 1199 if (n == 0) 1200 printf("timeout resetting Rx MAC\n"); 1201 1202 addr0 = (sc->sc_lladdr[4] << 8) | sc->sc_lladdr[5]; 1203 addr1 = (sc->sc_lladdr[2] << 8) | sc->sc_lladdr[3]; 1204 addr2 = (sc->sc_lladdr[0] << 8) | sc->sc_lladdr[1]; 1205 nep_write(sc, XMAC_ADDR0(sc->sc_port), addr0); 1206 nep_write(sc, XMAC_ADDR1(sc->sc_port), addr1); 1207 nep_write(sc, XMAC_ADDR2(sc->sc_port), addr2); 1208 1209 nep_write(sc, XMAC_ADDR_CMPEN(sc->sc_port), 0); 1210 1211 nep_write(sc, XMAC_ADD_FILT0(sc->sc_port), 0); 1212 nep_write(sc, XMAC_ADD_FILT1(sc->sc_port), 0); 1213 nep_write(sc, XMAC_ADD_FILT2(sc->sc_port), 0); 1214 nep_write(sc, XMAC_ADD_FILT12_MASK(sc->sc_port), 0); 1215 nep_write(sc, XMAC_ADD_FILT00_MASK(sc->sc_port), 0); 1216 1217 for (i = 0; i < 16; i++) 1218 nep_write(sc, XMAC_HASH_TBL(sc->sc_port, i), 0); 1219 1220 for (i = 0; i < 20; i++) 1221 nep_write(sc, XMAC_HOST_INFO(sc->sc_port, i), sc->sc_port); 1222 } 1223 1224 void 1225 nep_init_rx_bmac(struct nep_softc *sc) 1226 { 1227 uint64_t addr0, addr1, addr2; 1228 uint64_t val; 1229 int n, i; 1230 1231 nep_write(sc, RXMAC_SW_RST(sc->sc_port), RXMAC_SW_RST_SW_RST); 1232 n = 1000; 1233 while (--n) { 1234 val = nep_read(sc, RXMAC_SW_RST(sc->sc_port)); 1235 if ((val & RXMAC_SW_RST_SW_RST) == 0) 1236 break; 1237 } 1238 if (n == 0) 1239 printf("timeout resetting Rx MAC\n"); 1240 1241 val = nep_read(sc, RXMAC_CONFIG(sc->sc_port)); 1242 val &= ~RXMAC_CONFIG_ERROR_CHK_DIS; 1243 val &= ~RXMAC_CONFIG_PROMISCUOUS; 1244 val &= ~RXMAC_CONFIG_PROMISCUOUS_GROUP; 1245 val &= ~RXMAC_CONFIG_ADDR_FILTER_EN; 1246 val &= ~RXMAC_CONFIG_HASH_FILTER_EN; 1247 val &= ~RXMAC_CONFIG_STRIP_FCS; 1248 val &= ~RXMAC_CONFIG_STRIP_PAD; 1249 val &= ~RXMAC_CONFIG_RX_ENABLE; 1250 nep_write(sc, RXMAC_CONFIG(sc->sc_port), val); 1251 1252 addr0 = (sc->sc_lladdr[4] << 8) | sc->sc_lladdr[5]; 1253 addr1 = (sc->sc_lladdr[2] << 8) | sc->sc_lladdr[3]; 1254 addr2 = (sc->sc_lladdr[0] << 8) | sc->sc_lladdr[1]; 1255 nep_write(sc, BMAC_ADDR0(sc->sc_port), addr0); 1256 nep_write(sc, BMAC_ADDR1(sc->sc_port), addr1); 1257 nep_write(sc, BMAC_ADDR2(sc->sc_port), addr2); 1258 1259 nep_write(sc, BMAC_ALTAD_CMPEN(sc->sc_port), 1); 1260 1261 nep_write(sc, MAC_ADDR_FILT0(sc->sc_port), 0); 1262 nep_write(sc, MAC_ADDR_FILT1(sc->sc_port), 0); 1263 nep_write(sc, MAC_ADDR_FILT2(sc->sc_port), 0); 1264 nep_write(sc, MAC_ADDR_FILT12_MASK(sc->sc_port), 0); 1265 nep_write(sc, MAC_ADDR_FILT00_MASK(sc->sc_port), 0); 1266 1267 for (i = 0; i < 16; i++) 1268 nep_write(sc, MAC_HASH_TBL(sc->sc_port, i), 0); 1269 1270 for (i = 0; i < 9; i++) 1271 nep_write(sc, BMAC_HOST_INFO(sc->sc_port, i), sc->sc_port); 1272 } 1273 1274 void 1275 nep_init_rx_channel(struct nep_softc *sc, int chan) 1276 { 1277 uint64_t val; 1278 int i, n; 1279 1280 val = nep_read(sc, RXDMA_CFIG1(chan)); 1281 val &= ~RXDMA_CFIG1_EN; 1282 val |= RXDMA_CFIG1_RST; 1283 nep_write(sc, RXDMA_CFIG1(chan), RXDMA_CFIG1_RST); 1284 1285 n = 1000; 1286 while (--n) { 1287 val = nep_read(sc, RXDMA_CFIG1(chan)); 1288 if ((val & RXDMA_CFIG1_RST) == 0) 1289 break; 1290 } 1291 if (n == 0) 1292 printf("timeout resetting Rx DMA\n"); 1293 1294 nep_write(sc, RX_LOG_MASK1(chan), 0); 1295 nep_write(sc, RX_LOG_VALUE1(chan), 0); 1296 nep_write(sc, RX_LOG_MASK2(chan), 0); 1297 nep_write(sc, RX_LOG_VALUE2(chan), 0); 1298 nep_write(sc, RX_LOG_PAGE_RELO1(chan), 0); 1299 nep_write(sc, RX_LOG_PAGE_RELO2(chan), 0); 1300 nep_write(sc, RX_LOG_PAGE_HDL(chan), 0); 1301 nep_write(sc, RX_LOG_PAGE_VLD(chan), 1302 (sc->sc_port << RX_LOG_PAGE_VLD_FUNC_SHIFT) | 1303 RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); 1304 1305 nep_write(sc, RX_DMA_ENT_MSK(chan), RX_DMA_ENT_MSK_RBR_EMPTY); 1306 nep_write(sc, RX_DMA_CTL_STAT(chan), RX_DMA_CTL_STAT_MEX); 1307 1308 val = NEP_DMA_DVA(sc->sc_rxmbox) >> 32; 1309 nep_write(sc, RXDMA_CFIG1(chan), val); 1310 1311 val = NEP_DMA_DVA(sc->sc_rxmbox) & 0xffffffc0; 1312 nep_write(sc, RXDMA_CFIG2(chan), val); 1313 1314 val = NEP_DMA_DVA(sc->sc_rbring); 1315 val |= (uint64_t)NEP_NRBDESC << RBR_CFIG_A_LEN_SHIFT; 1316 nep_write(sc, RBR_CFIG_A(chan), val); 1317 1318 val = RBR_CFIG_B_BLKSIZE_8K; 1319 val |= RBR_CFIG_B_BUFSZ1_8K | RBR_CFIG_B_VLD1; 1320 nep_write(sc, RBR_CFIG_B(chan), val); 1321 1322 nep_write(sc, RBR_KICK(chan), 0); 1323 1324 val = NEP_DMA_DVA(sc->sc_rcring); 1325 val |= (uint64_t)NEP_NRCDESC << RCRCFIG_A_LEN_SHIFT; 1326 nep_write(sc, RCRCFIG_A(chan), val); 1327 1328 val = 8 | RCRCFIG_B_ENTOUT; 1329 val |= (16 << RCRCFIG_B_PTHRES_SHIFT); 1330 nep_write(sc, RCRCFIG_B(chan), val); 1331 1332 nep_write(sc, DEF_PT_RDC(sc->sc_port), chan); 1333 for (i = 0; i < 16; i++) 1334 nep_write(sc, RDC_TBL(sc->sc_port, i), chan); 1335 } 1336 1337 void 1338 nep_init_tx_mac(struct nep_softc *sc) 1339 { 1340 if (sc->sc_port < 2) 1341 nep_init_tx_xmac(sc); 1342 else 1343 nep_init_tx_bmac(sc); 1344 } 1345 1346 void 1347 nep_init_tx_xmac(struct nep_softc *sc) 1348 { 1349 uint64_t val; 1350 int n; 1351 1352 nep_write(sc, XTXMAC_SW_RST(sc->sc_port), 1353 XTXMAC_SW_RST_REG_RST | XTXMAC_SW_RST_SOFT_RST); 1354 n = 1000; 1355 while (--n) { 1356 val = nep_read(sc, XTXMAC_SW_RST(sc->sc_port)); 1357 if ((val & (XTXMAC_SW_RST_REG_RST | 1358 XTXMAC_SW_RST_SOFT_RST)) == 0) 1359 break; 1360 } 1361 if (n == 0) 1362 printf("timeout resetting Tx MAC\n"); 1363 1364 val = nep_read(sc, XMAC_CONFIG(sc->sc_port)); 1365 val &= ~XMAC_CONFIG_ALWAYS_NO_CRC; 1366 val &= ~XMAC_CONFIG_VAR_MIN_IPG_EN; 1367 val &= ~XMAC_CONFIG_STRETCH_MODE; 1368 val &= ~XMAC_CONFIG_TX_ENABLE; 1369 nep_write(sc, XMAC_CONFIG(sc->sc_port), val); 1370 1371 val = nep_read(sc, XMAC_IPG(sc->sc_port)); 1372 val &= ~XMAC_IPG_IPG_VALUE1_MASK; /* MII/GMII mode */ 1373 val |= XMAC_IPG_IPG_VALUE1_12; 1374 val &= ~XMAC_IPG_IPG_VALUE_MASK; /* XGMII mode */ 1375 val |= XMAC_IPG_IPG_VALUE_12_15; 1376 nep_write(sc, XMAC_IPG(sc->sc_port), val); 1377 1378 val = nep_read(sc, XMAC_MIN(sc->sc_port)); 1379 val &= ~XMAC_MIN_RX_MIN_PKT_SIZE_MASK; 1380 val &= ~XMAC_MIN_TX_MIN_PKT_SIZE_MASK; 1381 val |= (64 << XMAC_MIN_RX_MIN_PKT_SIZE_SHIFT); 1382 val |= (64 << XMAC_MIN_TX_MIN_PKT_SIZE_SHIFT); 1383 nep_write(sc, XMAC_MIN(sc->sc_port), val); 1384 nep_write(sc, XMAC_MAX(sc->sc_port), ETHER_MAX_LEN); 1385 1386 nep_write(sc, TXMAC_FRM_CNT(sc->sc_port), 0); 1387 nep_write(sc, TXMAC_BYTE_CNT(sc->sc_port), 0); 1388 } 1389 1390 void 1391 nep_init_tx_bmac(struct nep_softc *sc) 1392 { 1393 uint64_t val; 1394 int n; 1395 1396 nep_write(sc, TXMAC_SW_RST(sc->sc_port), TXMAC_SW_RST_SW_RST); 1397 n = 1000; 1398 while (--n) { 1399 val = nep_read(sc, TXMAC_SW_RST(sc->sc_port)); 1400 if ((val & TXMAC_SW_RST_SW_RST) == 0) 1401 break; 1402 } 1403 if (n == 0) 1404 printf("timeout resetting Tx MAC\n"); 1405 1406 nep_write(sc, BMAC_MIN(sc->sc_port), 0x40); 1407 nep_write(sc, BMAC_MAX(sc->sc_port), ETHER_MAX_LEN | 1408 (ETHER_MAX_LEN << BMAC_MAX_BURST_SHIFT)); 1409 nep_write(sc, MAC_CTRL_TYPE(sc->sc_port), 0x8808); 1410 nep_write(sc, MAC_PA_SIZE(sc->sc_port), 0x7); 1411 } 1412 1413 void 1414 nep_init_tx_channel(struct nep_softc *sc, int chan) 1415 { 1416 uint64_t val; 1417 int n; 1418 1419 val = nep_read(sc, TXC_CONTROL); 1420 val |= TXC_CONTROL_TXC_ENABLED; 1421 val |= (1ULL << sc->sc_port); 1422 nep_write(sc, TXC_CONTROL, val); 1423 1424 nep_write(sc, TXC_PORT_DMA(sc->sc_port), 1ULL << chan); 1425 1426 val = nep_read(sc, TXC_INT_MASK); 1427 val &= ~TXC_INT_MASK_PORT_INT_MASK(sc->sc_port); 1428 nep_write(sc, TXC_INT_MASK, val); 1429 1430 val = nep_read(sc, TX_CS(chan)); 1431 val |= TX_CS_RST; 1432 nep_write(sc, TX_CS(chan), val); 1433 1434 n = 1000; 1435 while (--n) { 1436 val = nep_read(sc, TX_CS(chan)); 1437 if ((val & TX_CS_RST) == 0) 1438 break; 1439 } 1440 if (n == 0) 1441 printf("timeout resetting Tx DMA\n"); 1442 1443 nep_write(sc, TX_LOG_MASK1(chan), 0); 1444 nep_write(sc, TX_LOG_VALUE1(chan), 0); 1445 nep_write(sc, TX_LOG_MASK2(chan), 0); 1446 nep_write(sc, TX_LOG_VALUE2(chan), 0); 1447 nep_write(sc, TX_LOG_PAGE_RELO1(chan), 0); 1448 nep_write(sc, TX_LOG_PAGE_RELO2(chan), 0); 1449 nep_write(sc, TX_LOG_PAGE_HDL(chan), 0); 1450 nep_write(sc, TX_LOG_PAGE_VLD(chan), 1451 (sc->sc_port << TX_LOG_PAGE_VLD_FUNC_SHIFT) | 1452 TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); 1453 1454 nep_write(sc, TX_RING_KICK(chan), 0); 1455 1456 nep_write(sc, TXC_DMA_MAX(chan), ETHER_MAX_LEN + 64); 1457 nep_write(sc, TX_ENT_MSK(chan), 0); 1458 1459 val = NEP_DMA_DVA(sc->sc_txring); 1460 val |= (NEP_DMA_LEN(sc->sc_txring) / 64) << TX_RNG_CFIG_LEN_SHIFT; 1461 nep_write(sc, TX_RNG_CFIG(chan), val); 1462 1463 nep_write(sc, TX_CS(chan), 0); 1464 } 1465 1466 void 1467 nep_enable_rx_mac(struct nep_softc *sc) 1468 { 1469 struct ifnet *ifp = &sc->sc_ac.ac_if; 1470 uint64_t val; 1471 1472 if (sc->sc_port < 2) { 1473 val = nep_read(sc, XMAC_CONFIG(sc->sc_port)); 1474 val &= ~XMAC_CONFIG_PROMISCUOUS; 1475 val &= ~XMAC_CONFIG_PROMISCUOUS_GROUP; 1476 val &= ~XMAC_CONFIG_HASH_FILTER_EN; 1477 if (ifp->if_flags & IFF_PROMISC) 1478 val |= XMAC_CONFIG_PROMISCUOUS; 1479 if (ifp->if_flags & IFF_ALLMULTI) 1480 val |= XMAC_CONFIG_PROMISCUOUS_GROUP; 1481 else 1482 val |= XMAC_CONFIG_HASH_FILTER_EN; 1483 val |= XMAC_CONFIG_RX_MAC_ENABLE; 1484 nep_write(sc, XMAC_CONFIG(sc->sc_port), val); 1485 } else { 1486 val = nep_read(sc, RXMAC_CONFIG(sc->sc_port)); 1487 val &= ~RXMAC_CONFIG_PROMISCUOUS; 1488 val &= ~RXMAC_CONFIG_PROMISCUOUS_GROUP; 1489 val &= ~RXMAC_CONFIG_HASH_FILTER_EN; 1490 if (ifp->if_flags & IFF_PROMISC) 1491 val |= RXMAC_CONFIG_PROMISCUOUS; 1492 if (ifp->if_flags & IFF_ALLMULTI) 1493 val |= RXMAC_CONFIG_PROMISCUOUS_GROUP; 1494 else 1495 val |= RXMAC_CONFIG_HASH_FILTER_EN; 1496 val |= RXMAC_CONFIG_RX_ENABLE; 1497 nep_write(sc, RXMAC_CONFIG(sc->sc_port), val); 1498 } 1499 } 1500 1501 void 1502 nep_disable_rx_mac(struct nep_softc *sc) 1503 { 1504 uint64_t val; 1505 1506 if (sc->sc_port < 2) { 1507 val = nep_read(sc, XMAC_CONFIG(sc->sc_port)); 1508 val &= ~XMAC_CONFIG_RX_MAC_ENABLE; 1509 nep_write(sc, XMAC_CONFIG(sc->sc_port), val); 1510 } else { 1511 val = nep_read(sc, RXMAC_CONFIG(sc->sc_port)); 1512 val &= ~RXMAC_CONFIG_RX_ENABLE; 1513 nep_write(sc, RXMAC_CONFIG(sc->sc_port), val); 1514 } 1515 } 1516 1517 void 1518 nep_stop_dma(struct nep_softc *sc) 1519 { 1520 uint64_t val; 1521 int n; 1522 1523 val = nep_read(sc, TX_CS(sc->sc_port)); 1524 val |= TX_CS_STOP_N_GO; 1525 nep_write(sc, TX_CS(sc->sc_port), val); 1526 1527 n = 1000; 1528 while (--n) { 1529 val = nep_read(sc, TX_CS(sc->sc_port)); 1530 if (val & TX_CS_SNG_STATE) 1531 break; 1532 } 1533 if (n == 0) 1534 printf("timeout stopping Tx DMA\n"); 1535 1536 val = nep_read(sc, RXDMA_CFIG1(sc->sc_port)); 1537 val &= ~RXDMA_CFIG1_EN; 1538 nep_write(sc, RXDMA_CFIG1(sc->sc_port), val); 1539 1540 n = 1000; 1541 while (--n) { 1542 val = nep_read(sc, RXDMA_CFIG1(sc->sc_port)); 1543 if (val & RXDMA_CFIG1_QST) 1544 break; 1545 } 1546 if (n == 0) 1547 printf("timeout stopping Rx DMA\n"); 1548 } 1549 1550 void 1551 nep_up(struct nep_softc *sc) 1552 { 1553 struct ifnet *ifp = &sc->sc_ac.ac_if; 1554 struct nep_block *rb; 1555 struct nep_buf *txb; 1556 uint64_t val; 1557 int i, n; 1558 1559 /* Allocate Rx block descriptor ring. */ 1560 sc->sc_rbring = nep_dmamem_alloc(sc, NEP_NRBDESC * sizeof(uint32_t)); 1561 if (sc->sc_rbring == NULL) 1562 return; 1563 sc->sc_rbdesc = NEP_DMA_KVA(sc->sc_rbring); 1564 1565 sc->sc_rb = malloc(sizeof(struct nep_block) * NEP_NRBDESC, 1566 M_DEVBUF, M_WAITOK); 1567 for (i = 0; i < NEP_NRBDESC; i++) { 1568 rb = &sc->sc_rb[i]; 1569 bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 1570 BUS_DMA_WAITOK, &rb->nb_map); 1571 rb->nb_block = NULL; 1572 } 1573 1574 sc->sc_rx_prod = 0; 1575 if_rxr_init(&sc->sc_rx_ring, 16, NEP_NRBDESC); 1576 1577 /* Allocate Rx completion descriptor ring. */ 1578 sc->sc_rcring = nep_dmamem_alloc(sc, NEP_NRCDESC * sizeof(uint64_t)); 1579 if (sc->sc_rcring == NULL) 1580 goto free_rbring; 1581 sc->sc_rcdesc = NEP_DMA_KVA(sc->sc_rcring); 1582 1583 sc->sc_rx_cons = 0; 1584 1585 /* Allocate Rx mailbox. */ 1586 sc->sc_rxmbox = nep_dmamem_alloc(sc, 64); 1587 if (sc->sc_rxmbox == NULL) 1588 goto free_rcring; 1589 1590 /* Allocate Tx descriptor ring. */ 1591 sc->sc_txring = nep_dmamem_alloc(sc, NEP_NTXDESC * sizeof(uint64_t)); 1592 if (sc->sc_txring == NULL) 1593 goto free_rxmbox; 1594 sc->sc_txdesc = NEP_DMA_KVA(sc->sc_txring); 1595 1596 sc->sc_txbuf = malloc(sizeof(struct nep_buf) * NEP_NTXDESC, 1597 M_DEVBUF, M_WAITOK); 1598 for (i = 0; i < NEP_NTXDESC; i++) { 1599 txb = &sc->sc_txbuf[i]; 1600 bus_dmamap_create(sc->sc_dmat, MCLBYTES, NEP_NTXSEGS, 1601 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->nb_map); 1602 txb->nb_m = NULL; 1603 } 1604 1605 sc->sc_tx_prod = sc->sc_tx_cons = 0; 1606 sc->sc_tx_cnt = 0; 1607 sc->sc_wrap = 0; 1608 sc->sc_pkt_cnt = 0; 1609 1610 if (sc->sc_port < 2) { 1611 /* Disable the POR loopback clock source. */ 1612 val = nep_read(sc, XMAC_CONFIG(sc->sc_port)); 1613 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 1614 nep_write(sc, XMAC_CONFIG(sc->sc_port), val); 1615 } 1616 1617 nep_write(sc, PCS_DPATH_MODE(sc->sc_port), PCS_DPATH_MODE_MII); 1618 val = nep_read(sc, PCS_MII_CTL(sc->sc_port)); 1619 val |= PCS_MII_CTL_RESET; 1620 nep_write(sc, PCS_MII_CTL(sc->sc_port), val); 1621 n = 1000; 1622 while (--n) { 1623 val = nep_read(sc, PCS_MII_CTL(sc->sc_port)); 1624 if ((val & PCS_MII_CTL_RESET) == 0) 1625 break; 1626 } 1627 if (n == 0) 1628 printf("timeout resetting PCS\n"); 1629 1630 nep_init_rx_mac(sc); 1631 nep_init_rx_channel(sc, sc->sc_port); 1632 nep_init_ipp(sc); 1633 1634 nep_init_tx_mac(sc); 1635 nep_init_tx_channel(sc, sc->sc_port); 1636 1637 nep_fill_rx_ring(sc); 1638 1639 nep_enable_rx_mac(sc); 1640 if (sc->sc_port < 2) { 1641 val = nep_read(sc, XMAC_CONFIG(sc->sc_port)); 1642 val |= XMAC_CONFIG_TX_ENABLE; 1643 nep_write(sc, XMAC_CONFIG(sc->sc_port), val); 1644 } else { 1645 val = nep_read(sc, TXMAC_CONFIG(sc->sc_port)); 1646 val |= TXMAC_CONFIG_TX_ENABLE; 1647 nep_write(sc, TXMAC_CONFIG(sc->sc_port), val); 1648 } 1649 1650 val = nep_read(sc, RXDMA_CFIG1(sc->sc_port)); 1651 val |= RXDMA_CFIG1_EN; 1652 nep_write(sc, RXDMA_CFIG1(sc->sc_port), val); 1653 1654 ifp->if_flags |= IFF_RUNNING; 1655 ifq_clr_oactive(&ifp->if_snd); 1656 ifp->if_timer = 0; 1657 1658 /* Enable interrupts. */ 1659 nep_write(sc, LD_IM1(LDN_MAC(sc->sc_port)), 0); 1660 nep_write(sc, LD_IM0(LDN_RXDMA(sc->sc_port)), 0); 1661 nep_write(sc, LD_IM0(LDN_TXDMA(sc->sc_port)), 0); 1662 nep_write(sc, LDGIMGN(sc->sc_port), LDGIMGN_ARM | 2); 1663 1664 timeout_add_sec(&sc->sc_tick, 1); 1665 1666 return; 1667 1668 free_rxmbox: 1669 nep_dmamem_free(sc, sc->sc_rxmbox); 1670 free_rcring: 1671 nep_dmamem_free(sc, sc->sc_rcring); 1672 free_rbring: 1673 nep_dmamem_free(sc, sc->sc_rbring); 1674 } 1675 1676 void 1677 nep_down(struct nep_softc *sc) 1678 { 1679 struct ifnet *ifp = &sc->sc_ac.ac_if; 1680 struct nep_buf *txb; 1681 struct nep_block *rb; 1682 uint64_t val; 1683 int i; 1684 1685 timeout_del(&sc->sc_tick); 1686 1687 /* Disable interrupts. */ 1688 nep_write(sc, LD_IM1(LDN_MAC(sc->sc_port)), 1); 1689 nep_write(sc, LD_IM0(LDN_RXDMA(sc->sc_port)), 1); 1690 nep_write(sc, LD_IM0(LDN_TXDMA(sc->sc_port)), 1); 1691 1692 ifp->if_flags &= ~IFF_RUNNING; 1693 ifq_clr_oactive(&ifp->if_snd); 1694 ifp->if_timer = 0; 1695 1696 nep_disable_rx_mac(sc); 1697 1698 val = nep_read(sc, IPP_CFIG(sc->sc_port)); 1699 val &= ~IPP_CFIG_IPP_ENABLE; 1700 nep_write(sc, IPP_CFIG(sc->sc_port), val); 1701 1702 nep_stop_dma(sc); 1703 1704 for (i = 0; i < NEP_NTXDESC; i++) { 1705 txb = &sc->sc_txbuf[i]; 1706 if (txb->nb_m) { 1707 bus_dmamap_sync(sc->sc_dmat, txb->nb_map, 0, 1708 txb->nb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1709 bus_dmamap_unload(sc->sc_dmat, txb->nb_map); 1710 m_freem(txb->nb_m); 1711 } 1712 bus_dmamap_destroy(sc->sc_dmat, txb->nb_map); 1713 } 1714 1715 nep_dmamem_free(sc, sc->sc_txring); 1716 free(sc->sc_txbuf, M_DEVBUF, sizeof(struct nep_buf) * NEP_NTXDESC); 1717 1718 nep_dmamem_free(sc, sc->sc_rxmbox); 1719 nep_dmamem_free(sc, sc->sc_rcring); 1720 1721 for (i = 0; i < NEP_NRBDESC; i++) { 1722 rb = &sc->sc_rb[i]; 1723 if (rb->nb_block) { 1724 bus_dmamap_sync(sc->sc_dmat, rb->nb_map, 0, 1725 rb->nb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1726 bus_dmamap_unload(sc->sc_dmat, rb->nb_map); 1727 pool_put(nep_block_pool, rb->nb_block); 1728 } 1729 bus_dmamap_destroy(sc->sc_dmat, rb->nb_map); 1730 } 1731 1732 nep_dmamem_free(sc, sc->sc_rbring); 1733 free(sc->sc_rb, M_DEVBUF, sizeof(struct nep_block) * NEP_NRBDESC); 1734 } 1735 1736 void 1737 nep_iff(struct nep_softc *sc) 1738 { 1739 struct arpcom *ac = &sc->sc_ac; 1740 struct ifnet *ifp = &sc->sc_ac.ac_if; 1741 struct ether_multi *enm; 1742 struct ether_multistep step; 1743 uint32_t crc, hash[16]; 1744 int i; 1745 1746 nep_disable_rx_mac(sc); 1747 1748 ifp->if_flags &= ~IFF_ALLMULTI; 1749 memset(hash, 0, sizeof(hash)); 1750 1751 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1752 ifp->if_flags |= IFF_ALLMULTI; 1753 } else { 1754 ETHER_FIRST_MULTI(step, ac, enm); 1755 while (enm != NULL) { 1756 crc = ether_crc32_le(enm->enm_addrlo, 1757 ETHER_ADDR_LEN); 1758 1759 crc >>= 24; 1760 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1761 1762 ETHER_NEXT_MULTI(step, enm); 1763 } 1764 } 1765 1766 for (i = 0; i < nitems(hash); i++) { 1767 if (sc->sc_port < 2) 1768 nep_write(sc, XMAC_HASH_TBL(sc->sc_port, i), hash[i]); 1769 else 1770 nep_write(sc, MAC_HASH_TBL(sc->sc_port, i), hash[i]); 1771 } 1772 1773 nep_enable_rx_mac(sc); 1774 } 1775 1776 int 1777 nep_encap(struct nep_softc *sc, struct mbuf **m0, int *idx) 1778 { 1779 struct mbuf *m = *m0; 1780 struct nep_txbuf_hdr *nh; 1781 uint64_t txd; 1782 bus_dmamap_t map; 1783 int cur, frag, i; 1784 int len, pad; 1785 int err; 1786 1787 /* 1788 * MAC does not support padding of transmit packets that are 1789 * fewer than 60 bytes. 1790 */ 1791 if (m->m_pkthdr.len < (ETHER_MIN_LEN - ETHER_CRC_LEN)) { 1792 struct mbuf *n; 1793 int padlen; 1794 1795 padlen = (ETHER_MIN_LEN - ETHER_CRC_LEN) - m->m_pkthdr.len; 1796 MGET(n, M_DONTWAIT, MT_DATA); 1797 if (n == NULL) { 1798 m_freem(m); 1799 return (ENOBUFS); 1800 } 1801 memset(mtod(n, caddr_t), 0, padlen); 1802 n->m_len = padlen; 1803 m_cat(m, n); 1804 m->m_pkthdr.len += padlen; 1805 } 1806 1807 if (m_leadingspace(m) < 16) 1808 pad = 0; 1809 else 1810 pad = mtod(m, u_long) % 16; 1811 len = m->m_pkthdr.len + pad; 1812 M_PREPEND(m, sizeof(*nh) + pad, M_DONTWAIT); 1813 if (m == NULL) 1814 return (ENOBUFS); 1815 nh = mtod(m, struct nep_txbuf_hdr *); 1816 nh->nh_flags = htole64((len << 16) | (pad / 2)); 1817 nh->nh_reserved = 0; 1818 1819 cur = frag = *idx; 1820 map = sc->sc_txbuf[cur].nb_map; 1821 1822 err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT); 1823 if (err) { 1824 /* XXX defrag */ 1825 m_freem(m); 1826 return (ENOBUFS); 1827 } 1828 1829 /* Sync the DMA map. */ 1830 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1831 BUS_DMASYNC_PREWRITE); 1832 1833 txd = TXD_SOP | TXD_MARK; 1834 txd |= ((uint64_t)map->dm_nsegs << TXD_NUM_PTR_SHIFT); 1835 for (i = 0; i < map->dm_nsegs; i++) { 1836 txd |= ((uint64_t)map->dm_segs[i].ds_len << TXD_TR_LEN_SHIFT); 1837 txd |= map->dm_segs[i].ds_addr; 1838 sc->sc_txdesc[frag] = htole64(txd); 1839 txd = 0; 1840 1841 bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_txring), 1842 frag * sizeof(txd), sizeof(txd), BUS_DMASYNC_PREWRITE); 1843 1844 cur = frag++; 1845 if (frag >= NEP_NTXDESC) 1846 frag = 0; 1847 KASSERT(frag != sc->sc_tx_cons); 1848 } 1849 1850 KASSERT(sc->sc_txbuf[cur].nb_m == NULL); 1851 sc->sc_txbuf[*idx].nb_map = sc->sc_txbuf[cur].nb_map; 1852 sc->sc_txbuf[cur].nb_map = map; 1853 sc->sc_txbuf[cur].nb_m = m; 1854 1855 if (frag < *idx) 1856 sc->sc_wrap ^= TX_RING_KICK_WRAP; 1857 nep_write(sc, TX_RING_KICK(sc->sc_port), sc->sc_wrap | (frag << 3)); 1858 1859 sc->sc_tx_cnt += map->dm_nsegs; 1860 *idx = frag; 1861 1862 m_adj(m, sizeof(*nh) + pad); 1863 *m0 = m; 1864 1865 return (0); 1866 } 1867 1868 void 1869 nep_start(struct ifnet *ifp) 1870 { 1871 struct nep_softc *sc = (struct nep_softc *)ifp->if_softc; 1872 struct mbuf *m; 1873 int idx; 1874 1875 if (!(ifp->if_flags & IFF_RUNNING)) 1876 return; 1877 if (ifq_is_oactive(&ifp->if_snd)) 1878 return; 1879 if (ifq_empty(&ifp->if_snd)) 1880 return; 1881 1882 idx = sc->sc_tx_prod; 1883 for (;;) { 1884 m = ifq_deq_begin(&ifp->if_snd); 1885 if (m == NULL) 1886 break; 1887 1888 if (sc->sc_tx_cnt >= (NEP_NTXDESC - NEP_NTXSEGS)) { 1889 ifq_deq_rollback(&ifp->if_snd, m); 1890 ifq_set_oactive(&ifp->if_snd); 1891 break; 1892 } 1893 1894 /* Now we are committed to transmit the packet. */ 1895 ifq_deq_commit(&ifp->if_snd, m); 1896 1897 if (nep_encap(sc, &m, &idx)) 1898 break; 1899 1900 #if NBPFILTER > 0 1901 if (ifp->if_bpf) 1902 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1903 #endif 1904 } 1905 1906 if (sc->sc_tx_prod != idx) { 1907 sc->sc_tx_prod = idx; 1908 1909 /* Set a timeout in case the chip goes out to lunch. */ 1910 ifp->if_timer = 5; 1911 } 1912 } 1913 1914 void 1915 nep_watchdog(struct ifnet *ifp) 1916 { 1917 printf("%s\n", __func__); 1918 } 1919 1920 void 1921 nep_tick(void *arg) 1922 { 1923 struct nep_softc *sc = arg; 1924 int s; 1925 1926 s = splnet(); 1927 mii_tick(&sc->sc_mii); 1928 splx(s); 1929 1930 timeout_add_sec(&sc->sc_tick, 1); 1931 } 1932 1933 int 1934 nep_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1935 { 1936 struct nep_softc *sc = (struct nep_softc *)ifp->if_softc; 1937 struct ifreq *ifr = (struct ifreq *)data; 1938 int s, error = 0; 1939 1940 s = splnet(); 1941 1942 switch (cmd) { 1943 case SIOCSIFADDR: 1944 ifp->if_flags |= IFF_UP; 1945 /* FALLTHROUGH */ 1946 1947 case SIOCSIFFLAGS: 1948 if (ISSET(ifp->if_flags, IFF_UP)) { 1949 if (ISSET(ifp->if_flags, IFF_RUNNING)) 1950 error = ENETRESET; 1951 else 1952 nep_up(sc); 1953 } else { 1954 if (ISSET(ifp->if_flags, IFF_RUNNING)) 1955 nep_down(sc); 1956 } 1957 break; 1958 1959 case SIOCGIFMEDIA: 1960 case SIOCSIFMEDIA: 1961 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1962 break; 1963 1964 default: 1965 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1966 } 1967 1968 if (error == ENETRESET) { 1969 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1970 (IFF_UP | IFF_RUNNING)) 1971 nep_iff(sc); 1972 error = 0; 1973 } 1974 1975 splx(s); 1976 return (error); 1977 } 1978 1979 void 1980 nep_fill_rx_ring(struct nep_softc *sc) 1981 { 1982 struct nep_block *rb; 1983 void *block; 1984 uint64_t val; 1985 u_int slots; 1986 int desc, err; 1987 int count = 0; 1988 1989 desc = sc->sc_rx_prod; 1990 slots = if_rxr_get(&sc->sc_rx_ring, NEP_NRBDESC); 1991 while (slots > 0) { 1992 rb = &sc->sc_rb[desc]; 1993 1994 block = pool_get(nep_block_pool, PR_NOWAIT); 1995 if (block == NULL) 1996 break; 1997 err = bus_dmamap_load(sc->sc_dmat, rb->nb_map, block, 1998 PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 1999 if (err) { 2000 pool_put(nep_block_pool, block); 2001 break; 2002 } 2003 rb->nb_block = block; 2004 sc->sc_rbdesc[desc++] = 2005 htole32(rb->nb_map->dm_segs[0].ds_addr >> 12); 2006 count++; 2007 slots--; 2008 if (desc >= NEP_NRBDESC) 2009 desc = 0; 2010 } 2011 if_rxr_put(&sc->sc_rx_ring, slots); 2012 if (count > 0) { 2013 nep_write(sc, RBR_KICK(sc->sc_port), count); 2014 val = nep_read(sc, RX_DMA_CTL_STAT(sc->sc_port)); 2015 val |= RX_DMA_CTL_STAT_RBR_EMPTY; 2016 nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port), val); 2017 sc->sc_rx_prod = desc; 2018 } 2019 } 2020 2021 struct nep_dmamem * 2022 nep_dmamem_alloc(struct nep_softc *sc, size_t size) 2023 { 2024 struct nep_dmamem *m; 2025 int nsegs; 2026 2027 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 2028 if (m == NULL) 2029 return (NULL); 2030 2031 m->ndm_size = size; 2032 2033 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2034 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->ndm_map) != 0) 2035 goto qdmfree; 2036 2037 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->ndm_seg, 1, 2038 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 2039 goto destroy; 2040 2041 if (bus_dmamem_map(sc->sc_dmat, &m->ndm_seg, nsegs, size, &m->ndm_kva, 2042 BUS_DMA_NOWAIT) != 0) 2043 goto free; 2044 2045 if (bus_dmamap_load(sc->sc_dmat, m->ndm_map, m->ndm_kva, size, NULL, 2046 BUS_DMA_NOWAIT) != 0) 2047 goto unmap; 2048 2049 return (m); 2050 2051 unmap: 2052 bus_dmamem_unmap(sc->sc_dmat, m->ndm_kva, m->ndm_size); 2053 free: 2054 bus_dmamem_free(sc->sc_dmat, &m->ndm_seg, 1); 2055 destroy: 2056 bus_dmamap_destroy(sc->sc_dmat, m->ndm_map); 2057 qdmfree: 2058 free(m, M_DEVBUF, sizeof(*m)); 2059 2060 return (NULL); 2061 } 2062 2063 void 2064 nep_dmamem_free(struct nep_softc *sc, struct nep_dmamem *m) 2065 { 2066 bus_dmamap_unload(sc->sc_dmat, m->ndm_map); 2067 bus_dmamem_unmap(sc->sc_dmat, m->ndm_kva, m->ndm_size); 2068 bus_dmamem_free(sc->sc_dmat, &m->ndm_seg, 1); 2069 bus_dmamap_destroy(sc->sc_dmat, m->ndm_map); 2070 free(m, M_DEVBUF, sizeof(*m)); 2071 } 2072