1 /* $OpenBSD: if_tht.c,v 1.124 2010/05/19 15:27:35 oga Exp $ */ 2 3 /* 4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Tehuti TN30xx multi port 10Gb Ethernet chipsets, 21 * see http://www.tehutinetworks.net/. 22 * 23 * This driver was made possible because Tehuti networks provided 24 * hardware and documentation. Thanks! 25 */ 26 27 #include "bpfilter.h" 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/sockio.h> 32 #include <sys/mbuf.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> 35 #include <sys/malloc.h> 36 #include <sys/device.h> 37 #include <sys/timeout.h> 38 #include <sys/queue.h> 39 #include <sys/rwlock.h> 40 #include <sys/time.h> 41 42 #include <machine/bus.h> 43 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pcidevs.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_types.h> 52 53 #if NBPFILTER > 0 54 #include <net/bpf.h> 55 #endif 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/if_ether.h> 60 #endif 61 62 #ifdef THT_DEBUG 63 #define THT_D_FIFO (1<<0) 64 #define THT_D_TX (1<<1) 65 #define THT_D_RX (1<<2) 66 #define THT_D_INTR (1<<3) 67 68 int thtdebug = THT_D_TX | THT_D_RX | THT_D_INTR; 69 70 #define DPRINTF(l, f...) do { if (thtdebug & (l)) printf(f); } while (0) 71 #else 72 #define DPRINTF(l, f...) 73 #endif 74 75 /* registers */ 76 77 #define THT_PCI_BAR 0x10 78 79 #define _Q(_q) ((_q) * 4) 80 81 /* General Configuration */ 82 #define THT_REG_END_SEL 0x5448 /* PCI Endian Select */ 83 #define THT_REG_CLKPLL 0x5000 84 #define THT_REG_CLKPLL_PLLLK (1<<9) /* PLL is locked */ 85 #define THT_REG_CLKPLL_RSTEND (1<<8) /* Reset ended */ 86 #define THT_REG_CLKPLL_TXF_DIS (1<<3) /* TX Free disabled */ 87 #define THT_REG_CLKPLL_VNT_STOP (1<<2) /* VENETO Stop */ 88 #define THT_REG_CLKPLL_PLLRST (1<<1) /* PLL Reset */ 89 #define THT_REG_CLKPLL_SFTRST (1<<0) /* Software Reset */ 90 /* Descriptors and FIFO Registers */ 91 #define THT_REG_TXT_CFG0(_q) (0x4040 + _Q(_q)) /* CFG0 TX Task queues */ 92 #define THT_REG_RXF_CFG0(_q) (0x4050 + _Q(_q)) /* CFG0 RX Free queues */ 93 #define THT_REG_RXD_CFG0(_q) (0x4060 + _Q(_q)) /* CFG0 RX DSC queues */ 94 #define THT_REG_TXF_CFG0(_q) (0x4070 + _Q(_q)) /* CFG0 TX Free queues */ 95 #define THT_REG_TXT_CFG1(_q) (0x4000 + _Q(_q)) /* CFG1 TX Task queues */ 96 #define THT_REG_RXF_CFG1(_q) (0x4010 + _Q(_q)) /* CFG1 RX Free queues */ 97 #define THT_REG_RXD_CFG1(_q) (0x4020 + _Q(_q)) /* CFG1 RX DSC queues */ 98 #define THT_REG_TXF_CFG1(_q) (0x4030 + _Q(_q)) /* CFG1 TX Free queues */ 99 #define THT_REG_TXT_RPTR(_q) (0x40c0 + _Q(_q)) /* TX Task read ptr */ 100 #define THT_REG_RXF_RPTR(_q) (0x40d0 + _Q(_q)) /* RX Free read ptr */ 101 #define THT_REG_RXD_RPTR(_q) (0x40e0 + _Q(_q)) /* RX DSC read ptr */ 102 #define THT_REG_TXF_RPTR(_q) (0x40f0 + _Q(_q)) /* TX Free read ptr */ 103 #define THT_REG_TXT_WPTR(_q) (0x4080 + _Q(_q)) /* TX Task write ptr */ 104 #define THT_REG_RXF_WPTR(_q) (0x4090 + _Q(_q)) /* RX Free write ptr */ 105 #define THT_REG_RXD_WPTR(_q) (0x40a0 + _Q(_q)) /* RX DSC write ptr */ 106 #define THT_REG_TXF_WPTR(_q) (0x40b0 + _Q(_q)) /* TX Free write ptr */ 107 #define THT_REG_HTB_ADDR 0x4100 /* HTB Addressing Mechanism enable */ 108 #define THT_REG_HTB_ADDR_HI 0x4110 /* High HTB Address */ 109 #define THT_REG_HTB_ST_TMR 0x3290 /* HTB Timer */ 110 #define THT_REG_RDINTCM(_q) (0x5120 + _Q(_q)) /* RX DSC Intr Coalescing */ 111 #define THT_REG_RDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */ 112 #define THT_REG_RDINTCM_RXF_TH(_c) ((_c)<<16) /* rxf intr req thresh */ 113 #define THT_REG_RDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */ 114 #define THT_REG_RDINTCM_COAL(_c) (_c) /* coalescing timer */ 115 #define THT_REG_TDINTCM(_q) (0x5130 + _Q(_q)) /* TX DSC Intr Coalescing */ 116 #define THT_REG_TDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */ 117 #define THT_REG_TDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */ 118 #define THT_REG_TDINTCM_COAL(_c) (_c) /* coalescing timer */ 119 /* 10G Ethernet MAC */ 120 #define THT_REG_10G_REV 0x6000 /* Revision */ 121 #define THT_REG_10G_SCR 0x6004 /* Scratch */ 122 #define THT_REG_10G_CTL 0x6008 /* Control/Status */ 123 #define THT_REG_10G_CTL_CMD_FRAME_EN (1<<13) /* cmd frame enable */ 124 #define THT_REG_10G_CTL_SW_RESET (1<<12) /* sw reset */ 125 #define THT_REG_10G_CTL_STATS_AUTO_CLR (1<<11) /* auto clear statistics */ 126 #define THT_REG_10G_CTL_LOOPBACK (1<<10) /* enable loopback */ 127 #define THT_REG_10G_CTL_TX_ADDR_INS (1<<9) /* set mac on tx */ 128 #define THT_REG_10G_CTL_PAUSE_IGNORE (1<<8) /* ignore pause */ 129 #define THT_REG_10G_CTL_PAUSE_FWD (1<<7) /* forward pause */ 130 #define THT_REG_10G_CTL_CRC_FWD (1<<6) /* crc forward */ 131 #define THT_REG_10G_CTL_PAD (1<<5) /* frame padding */ 132 #define THT_REG_10G_CTL_PROMISC (1<<4) /* promiscuous mode */ 133 #define THT_REG_10G_CTL_WAN_MODE (1<<3) /* WAN mode */ 134 #define THT_REG_10G_CTL_RX_EN (1<<1) /* RX enable */ 135 #define THT_REG_10G_CTL_TX_EN (1<<0) /* TX enable */ 136 #define THT_REG_10G_FRM_LEN 0x6014 /* Frame Length */ 137 #define THT_REG_10G_PAUSE 0x6018 /* Pause Quanta */ 138 #define THT_REG_10G_RX_SEC 0x601c /* RX Section */ 139 #define THT_REG_10G_TX_SEC 0x6020 /* TX Section */ 140 #define THT_REG_10G_SEC_AVAIL(_t) (_t) /* section available thresh*/ 141 #define THT_REG_10G_SEC_EMPTY(_t) ((_t)<<16) /* section empty avail */ 142 #define THT_REG_10G_RFIFO_AEF 0x6024 /* RX FIFO Almost Empty/Full */ 143 #define THT_REG_10G_TFIFO_AEF 0x6028 /* TX FIFO Almost Empty/Full */ 144 #define THT_REG_10G_FIFO_AE(_t) (_t) /* almost empty */ 145 #define THT_REG_10G_FIFO_AF(_t) ((_t)<<16) /* almost full */ 146 #define THT_REG_10G_SM_STAT 0x6030 /* MDIO Status */ 147 #define THT_REG_10G_SM_CMD 0x6034 /* MDIO Command */ 148 #define THT_REG_10G_SM_DAT 0x6038 /* MDIO Data */ 149 #define THT_REG_10G_SM_ADD 0x603c /* MDIO Address */ 150 #define THT_REG_10G_STAT 0x6040 /* Status */ 151 /* Statistic Counters */ 152 /* XXX todo */ 153 /* Status Registers */ 154 #define THT_REG_MAC_LNK_STAT 0x0200 /* Link Status */ 155 #define THT_REG_MAC_LNK_STAT_DIS (1<<4) /* Mac Stats read disable */ 156 #define THT_REG_MAC_LNK_STAT_LINK (1<<2) /* Link State */ 157 #define THT_REG_MAC_LNK_STAT_REM_FAULT (1<<1) /* Remote Fault */ 158 #define THT_REG_MAC_LNK_STAT_LOC_FAULT (1<<0) /* Local Fault */ 159 /* Interrupt Registers */ 160 #define THT_REG_ISR 0x5100 /* Interrupt Status */ 161 #define THT_REG_ISR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */ 162 #define THT_REG_ISR_GPIO (1<<26) /* GPIO */ 163 #define THT_REG_ISR_RFRSH (1<<25) /* DDR Refresh */ 164 #define THT_REG_ISR_SWI (1<<23) /* software interrupt */ 165 #define THT_REG_ISR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */ 166 #define THT_REG_ISR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */ 167 #define THT_REG_ISR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */ 168 #define THT_REG_ISR_TMR(_t) (1<<(6+(_t))) /* timer */ 169 #define THT_REG_ISR_VNT (1<<5) /* optistrata */ 170 #define THT_REG_ISR_RxFL (1<<4) /* RX Full */ 171 #define THT_REG_ISR_TR (1<<2) /* table read */ 172 #define THT_REG_ISR_PCIE_LNK_INT (1<<1) /* pcie link fail */ 173 #define THT_REG_ISR_GPLE_CLR (1<<0) /* pcie timeout */ 174 #define THT_FMT_ISR "\020" "\035LINKCHG1" "\034LINKCHG0" \ 175 "\033GPIO" "\032RFRSH" "\030SWI" \ 176 "\027RXF3" "\026RXF2" "\025RXF1" \ 177 "\024RXF0" "\023TXF3" "\022TXF2" \ 178 "\021TXF1" "\020TXF0" "\017RXD3" \ 179 "\016RXD2" "\015RXD1" "\014RXD0" \ 180 "\012TMR3" "\011TMR2" "\010TMR1" \ 181 "\007TMR0" "\006VNT" "\005RxFL" \ 182 "\003TR" "\002PCI_LNK_INT" \ 183 "\001GPLE_CLR" 184 #define THT_REG_ISR_GTI 0x5080 /* GTI Interrupt Status */ 185 #define THT_REG_IMR 0x5110 /* Interrupt Mask */ 186 #define THT_REG_IMR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */ 187 #define THT_REG_IMR_GPIO (1<<26) /* GPIO */ 188 #define THT_REG_IMR_RFRSH (1<<25) /* DDR Refresh */ 189 #define THT_REG_IMR_SWI (1<<23) /* software interrupt */ 190 #define THT_REG_IMR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */ 191 #define THT_REG_IMR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */ 192 #define THT_REG_IMR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */ 193 #define THT_REG_IMR_TMR(_t) (1<<(6+(_t))) /* timer */ 194 #define THT_REG_IMR_VNT (1<<5) /* optistrata */ 195 #define THT_REG_IMR_RxFL (1<<4) /* RX Full */ 196 #define THT_REG_IMR_TR (1<<2) /* table read */ 197 #define THT_REG_IMR_PCIE_LNK_INT (1<<1) /* pcie link fail */ 198 #define THT_REG_IMR_GPLE_CLR (1<<0) /* pcie timeout */ 199 #define THT_REG_IMR_GTI 0x5090 /* GTI Interrupt Mask */ 200 #define THT_REG_ISR_MSK 0x5140 /* ISR Masked */ 201 /* Global Counters */ 202 /* XXX todo */ 203 /* DDR2 SDRAM Controller Registers */ 204 /* XXX TBD */ 205 /* EEPROM Registers */ 206 /* XXX todo */ 207 /* Init arbitration and status registers */ 208 #define THT_REG_INIT_SEMAPHORE 0x5170 /* Init Semaphore */ 209 #define THT_REG_INIT_STATUS 0x5180 /* Init Status */ 210 /* PCI Credits Registers */ 211 /* XXX todo */ 212 /* TX Arbitration Registers */ 213 #define THT_REG_TXTSK_PR(_q) (0x41b0 + _Q(_q)) /* TX Queue Priority */ 214 /* RX Part Registers */ 215 #define THT_REG_RX_FLT 0x1240 /* RX Filter Configuration */ 216 #define THT_REG_RX_FLT_ATXER (1<<15) /* accept with xfer err */ 217 #define THT_REG_RX_FLT_ATRM (1<<14) /* accept with term err */ 218 #define THT_REG_RX_FLT_AFTSQ (1<<13) /* accept with fault seq */ 219 #define THT_REG_RX_FLT_OSEN (1<<12) /* enable pkts */ 220 #define THT_REG_RX_FLT_APHER (1<<11) /* accept with phy err */ 221 #define THT_REG_RX_FLT_TXFC (1<<10) /* TX flow control */ 222 #define THT_REG_RX_FLT_FDA (1<<8) /* filter direct address */ 223 #define THT_REG_RX_FLT_AOF (1<<7) /* accept overflow frame */ 224 #define THT_REG_RX_FLT_ACF (1<<6) /* accept control frame */ 225 #define THT_REG_RX_FLT_ARUNT (1<<5) /* accept runt */ 226 #define THT_REG_RX_FLT_ACRC (1<<4) /* accept crc error */ 227 #define THT_REG_RX_FLT_AM (1<<3) /* accept multicast */ 228 #define THT_REG_RX_FLT_AB (1<<2) /* accept broadcast */ 229 #define THT_REG_RX_FLT_PRM_MASK 0x3 /* promiscuous mode */ 230 #define THT_REG_RX_FLT_PRM_NORMAL 0x0 /* normal mode */ 231 #define THT_REG_RX_FLT_PRM_ALL 0x1 /* pass all incoming frames */ 232 #define THT_REG_RX_MAX_FRAME 0x12c0 /* Max Frame Size */ 233 #define THT_REG_RX_UNC_MAC0 0x1250 /* MAC Address low word */ 234 #define THT_REG_RX_UNC_MAC1 0x1260 /* MAC Address mid word */ 235 #define THT_REG_RX_UNC_MAC2 0x1270 /* MAC Address high word */ 236 #define THT_REG_RX_MAC_MCST0(_m) (0x1a80 + (_m)*8) 237 #define THT_REG_RX_MAC_MCST1(_m) (0x1a84 + (_m)*8) 238 #define THT_REG_RX_MAC_MCST_CNT 15 239 #define THT_REG_RX_MCST_HASH 0x1a00 /* imperfect multicast filter hash */ 240 #define THT_REG_RX_MCST_HASH_SIZE (256 / NBBY) 241 /* OptiStrata Debug Registers */ 242 #define THT_REG_VPC 0x2300 /* Program Counter */ 243 #define THT_REG_VLI 0x2310 /* Last Interrupt */ 244 #define THT_REG_VIC 0x2320 /* Interrupts Count */ 245 #define THT_REG_VTMR 0x2330 /* Timer */ 246 #define THT_REG_VGLB 0x2340 /* Global */ 247 /* SW Reset Registers */ 248 #define THT_REG_RST_PRT 0x7000 /* Reset Port */ 249 #define THT_REG_RST_PRT_ACTIVE 0x1 /* port reset is active */ 250 #define THT_REG_DIS_PRT 0x7010 /* Disable Port */ 251 #define THT_REG_RST_QU_0 0x7020 /* Reset Queue 0 */ 252 #define THT_REG_RST_QU_1 0x7028 /* Reset Queue 1 */ 253 #define THT_REG_DIS_QU_0 0x7030 /* Disable Queue 0 */ 254 #define THT_REG_DIS_QU_1 0x7038 /* Disable Queue 1 */ 255 256 #define THT_PORT_SIZE 0x8000 257 #define THT_PORT_REGION(_p) ((_p) * THT_PORT_SIZE) 258 #define THT_NQUEUES 4 259 260 #define THT_FIFO_ALIGN 4096 261 #define THT_FIFO_SIZE_4k 0x0 262 #define THT_FIFO_SIZE_8k 0x1 263 #define THT_FIFO_SIZE_16k 0x2 264 #define THT_FIFO_SIZE_32k 0x3 265 #define THT_FIFO_SIZE(_r) (4096 * (1<<(_r))) 266 #define THT_FIFO_GAP 8 /* keep 8 bytes between ptrs */ 267 #define THT_FIFO_PTR_MASK 0x00007ff8 /* rptr/wptr mask */ 268 269 #define THT_FIFO_DESC_LEN 208 /* a descriptor cant be bigger than this */ 270 271 #define THT_IMR_DOWN(_p) (THT_REG_IMR_LINKCHG(_p)) 272 #define THT_IMR_UP(_p) (THT_REG_IMR_LINKCHG(_p) | \ 273 THT_REG_IMR_RXF(0) | THT_REG_IMR_TXF(0) | \ 274 THT_REG_IMR_RXD(0)) 275 276 /* hardware structures (we're using the 64 bit variants) */ 277 278 /* physical buffer descriptor */ 279 struct tht_pbd { 280 u_int32_t addr_lo; 281 u_int32_t addr_hi; 282 u_int32_t len; 283 } __packed; 284 #define THT_PBD_PKTLEN (64 * 1024) 285 286 /* rx free fifo */ 287 struct tht_rx_free { 288 u_int16_t bc; /* buffer count (0:4) */ 289 u_int16_t type; 290 291 u_int64_t uid; 292 293 /* followed by a pdb list */ 294 } __packed; 295 #define THT_RXF_TYPE 1 296 #define THT_RXF_1ST_PDB_LEN 128 297 #define THT_RXF_SGL_LEN ((THT_FIFO_DESC_LEN - \ 298 sizeof(struct tht_rx_free)) / \ 299 sizeof(struct tht_pbd)) 300 #define THT_RXF_PKT_NUM 128 301 302 /* rx descriptor */ 303 struct tht_rx_desc { 304 u_int32_t flags; 305 #define THT_RXD_FLAGS_BC(_f) ((_f) & 0x1f) /* buffer count */ 306 #define THT_RXD_FLAGS_RXFQ(_f) (((_f)>>8) & 0x3) /* rxf queue id */ 307 #define THT_RXD_FLAGS_TO (1<<15) 308 #define THT_RXD_FLAGS_TYPE(_f) (((_f)>>16) & 0xf) /* desc type */ 309 #define THT_RXD_FLAGS_OVF (1<<21) /* overflow error */ 310 #define THT_RXD_FLAGS_RUNT (1<<22) /* runt error */ 311 #define THT_RXD_FLAGS_CRC (1<<23) /* crc error */ 312 #define THT_RXD_FLAGS_UDPCS (1<<24) /* udp checksum error */ 313 #define THT_RXD_FLAGS_TCPCS (1<<25) /* tcp checksum error */ 314 #define THT_RXD_FLAGS_IPCS (1<<26) /* ip checksum error */ 315 #define THT_RXD_FLAGS_PKT_ID 0x70000000 316 #define THT_RXD_FLAGS_PKT_ID_NONIP 0x00000000 317 #define THT_RXD_FLAGS_PKT_ID_TCP4 0x10000000 318 #define THT_RXD_FLAGS_PKT_ID_UDP4 0x20000000 319 #define THT_RXD_FLAGS_PKT_ID_IPV4 0x30000000 320 #define THT_RXD_FLAGS_PKT_ID_TCP6 0x50000000 321 #define THT_RXD_FLAGS_PKT_ID_UDP6 0x60000000 322 #define THT_RXD_FLAGS_PKT_ID_IPV6 0x70000000 323 #define THT_RXD_FLAGS_VTAG (1<<31) 324 u_int16_t len; 325 u_int16_t vlan; 326 #define THT_RXD_VLAN_ID(_v) ((_v) & 0xfff) 327 #define THT_RXD_VLAN_CFI (1<<12) 328 #define THT_RXD_VLAN_PRI(_v) ((_v) & 0x7) >> 13) 329 330 u_int64_t uid; 331 } __packed; 332 #define THT_RXD_TYPE 2 333 334 /* rx decriptor type 3: data chain instruction */ 335 struct tht_rx_desc_dc { 336 /* preceded by tht_rx_desc */ 337 338 u_int16_t cd_offset; 339 u_int16_t flags; 340 341 u_int8_t data[4]; 342 } __packed; 343 #define THT_RXD_TYPE_DC 3 344 345 /* rx descriptor type 4: rss (recv side scaling) information */ 346 struct tht_rx_desc_rss { 347 /* preceded by tht_rx_desc */ 348 349 u_int8_t rss_hft; 350 u_int8_t rss_type; 351 u_int8_t rss_tcpu; 352 u_int8_t reserved; 353 354 u_int32_t rss_hash; 355 } __packed; 356 #define THT_RXD_TYPE_RSS 4 357 358 /* tx task fifo */ 359 struct tht_tx_task { 360 u_int32_t flags; 361 #define THT_TXT_FLAGS_BC(_f) (_f) /* buffer count */ 362 #define THT_TXT_FLAGS_UDPCS (1<<5) /* udp checksum */ 363 #define THT_TXT_FLAGS_TCPCS (1<<6) /* tcp checksum */ 364 #define THT_TXT_FLAGS_IPCS (1<<7) /* ip checksum */ 365 #define THT_TXT_FLAGS_VTAG (1<<8) /* insert vlan tag */ 366 #define THT_TXT_FLAGS_LGSND (1<<9) /* tcp large send enabled */ 367 #define THT_TXT_FLAGS_FRAG (1<<10) /* ip fragmentation enabled */ 368 #define THT_TXT_FLAGS_CFI (1<<12) /* canonical format indicator */ 369 #define THT_TXT_FLAGS_PRIO(_f) ((_f)<<13) /* vlan priority */ 370 #define THT_TXT_FLAGS_VLAN(_f) ((_f)<<20) /* vlan id */ 371 u_int16_t mss_mtu; 372 u_int16_t len; 373 374 u_int64_t uid; 375 376 /* followed by a pbd list */ 377 } __packed; 378 #define THT_TXT_TYPE (3<<16) 379 #define THT_TXT_SGL_LEN ((THT_FIFO_DESC_LEN - \ 380 sizeof(struct tht_tx_task)) / \ 381 sizeof(struct tht_pbd)) 382 #define THT_TXT_PKT_NUM 128 383 384 /* tx free fifo */ 385 struct tht_tx_free { 386 u_int32_t status; 387 388 u_int64_t uid; 389 390 u_int32_t pad; 391 } __packed; 392 393 /* pci controller autoconf glue */ 394 395 struct thtc_softc { 396 struct device sc_dev; 397 398 bus_dma_tag_t sc_dmat; 399 400 bus_space_tag_t sc_memt; 401 bus_space_handle_t sc_memh; 402 bus_size_t sc_mems; 403 }; 404 405 int thtc_match(struct device *, void *, void *); 406 void thtc_attach(struct device *, struct device *, void *); 407 int thtc_print(void *, const char *); 408 409 struct cfattach thtc_ca = { 410 sizeof(struct thtc_softc), thtc_match, thtc_attach 411 }; 412 413 struct cfdriver thtc_cd = { 414 NULL, "thtc", DV_DULL 415 }; 416 417 /* glue between the controller and the port */ 418 419 struct tht_attach_args { 420 int taa_port; 421 422 struct pci_attach_args *taa_pa; 423 pci_intr_handle_t taa_ih; 424 }; 425 426 /* tht itself */ 427 428 struct tht_dmamem { 429 bus_dmamap_t tdm_map; 430 bus_dma_segment_t tdm_seg; 431 size_t tdm_size; 432 caddr_t tdm_kva; 433 }; 434 #define THT_DMA_MAP(_tdm) ((_tdm)->tdm_map) 435 #define THT_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr) 436 #define THT_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva) 437 438 struct tht_fifo_desc { 439 bus_size_t tfd_cfg0; 440 bus_size_t tfd_cfg1; 441 bus_size_t tfd_rptr; 442 bus_size_t tfd_wptr; 443 u_int32_t tfd_size; 444 int tfd_write; 445 }; 446 #define THT_FIFO_PRE_SYNC(_d) ((_d)->tfd_write ? \ 447 BUS_DMASYNC_PREWRITE : \ 448 BUS_DMASYNC_PREREAD) 449 #define THT_FIFO_POST_SYNC(_d) ((_d)->tfd_write ? \ 450 BUS_DMASYNC_POSTWRITE : \ 451 BUS_DMASYNC_POSTREAD) 452 453 struct tht_fifo { 454 struct tht_fifo_desc *tf_desc; 455 struct tht_dmamem *tf_mem; 456 int tf_len; 457 int tf_rptr; 458 int tf_wptr; 459 int tf_ready; 460 }; 461 462 struct tht_pkt { 463 u_int64_t tp_id; 464 465 bus_dmamap_t tp_dmap; 466 struct mbuf *tp_m; 467 468 TAILQ_ENTRY(tht_pkt) tp_link; 469 }; 470 471 struct tht_pkt_list { 472 struct tht_pkt *tpl_pkts; 473 TAILQ_HEAD(, tht_pkt) tpl_free; 474 TAILQ_HEAD(, tht_pkt) tpl_used; 475 }; 476 477 struct tht_softc { 478 struct device sc_dev; 479 struct thtc_softc *sc_thtc; 480 int sc_port; 481 482 void *sc_ih; 483 484 bus_space_handle_t sc_memh; 485 486 struct arpcom sc_ac; 487 struct ifmedia sc_media; 488 struct timeval sc_mediacheck; 489 490 u_int16_t sc_lladdr[3]; 491 492 struct tht_pkt_list sc_tx_list; 493 struct tht_pkt_list sc_rx_list; 494 495 struct tht_fifo sc_txt; 496 struct tht_fifo sc_rxf; 497 struct tht_fifo sc_rxd; 498 struct tht_fifo sc_txf; 499 500 u_int32_t sc_imr; 501 502 struct rwlock sc_lock; 503 }; 504 505 int tht_match(struct device *, void *, void *); 506 void tht_attach(struct device *, struct device *, void *); 507 void tht_mountroot(void *); 508 int tht_intr(void *); 509 510 struct cfattach tht_ca = { 511 sizeof(struct tht_softc), tht_match, tht_attach 512 }; 513 514 struct cfdriver tht_cd = { 515 NULL, "tht", DV_IFNET 516 }; 517 518 /* pkts */ 519 int tht_pkt_alloc(struct tht_softc *, 520 struct tht_pkt_list *, int, int); 521 void tht_pkt_free(struct tht_softc *, 522 struct tht_pkt_list *); 523 void tht_pkt_put(struct tht_pkt_list *, struct tht_pkt *); 524 struct tht_pkt *tht_pkt_get(struct tht_pkt_list *); 525 struct tht_pkt *tht_pkt_used(struct tht_pkt_list *); 526 527 /* fifos */ 528 529 struct tht_fifo_desc tht_txt_desc = { 530 THT_REG_TXT_CFG0(0), 531 THT_REG_TXT_CFG1(0), 532 THT_REG_TXT_RPTR(0), 533 THT_REG_TXT_WPTR(0), 534 THT_FIFO_SIZE_16k, 535 1 536 }; 537 538 struct tht_fifo_desc tht_rxf_desc = { 539 THT_REG_RXF_CFG0(0), 540 THT_REG_RXF_CFG1(0), 541 THT_REG_RXF_RPTR(0), 542 THT_REG_RXF_WPTR(0), 543 THT_FIFO_SIZE_16k, 544 1 545 }; 546 547 struct tht_fifo_desc tht_rxd_desc = { 548 THT_REG_RXD_CFG0(0), 549 THT_REG_RXD_CFG1(0), 550 THT_REG_RXD_RPTR(0), 551 THT_REG_RXD_WPTR(0), 552 THT_FIFO_SIZE_16k, 553 0 554 }; 555 556 struct tht_fifo_desc tht_txf_desc = { 557 THT_REG_TXF_CFG0(0), 558 THT_REG_TXF_CFG1(0), 559 THT_REG_TXF_RPTR(0), 560 THT_REG_TXF_WPTR(0), 561 THT_FIFO_SIZE_4k, 562 0 563 }; 564 565 int tht_fifo_alloc(struct tht_softc *, struct tht_fifo *, 566 struct tht_fifo_desc *); 567 void tht_fifo_free(struct tht_softc *, struct tht_fifo *); 568 569 size_t tht_fifo_readable(struct tht_softc *, 570 struct tht_fifo *); 571 size_t tht_fifo_writable(struct tht_softc *, 572 struct tht_fifo *); 573 void tht_fifo_pre(struct tht_softc *, 574 struct tht_fifo *); 575 void tht_fifo_read(struct tht_softc *, struct tht_fifo *, 576 void *, size_t); 577 void tht_fifo_write(struct tht_softc *, struct tht_fifo *, 578 void *, size_t); 579 void tht_fifo_write_dmap(struct tht_softc *, 580 struct tht_fifo *, bus_dmamap_t); 581 void tht_fifo_write_pad(struct tht_softc *, 582 struct tht_fifo *, int); 583 void tht_fifo_post(struct tht_softc *, 584 struct tht_fifo *); 585 586 /* port operations */ 587 void tht_lladdr_read(struct tht_softc *); 588 void tht_lladdr_write(struct tht_softc *); 589 int tht_sw_reset(struct tht_softc *); 590 int tht_fw_load(struct tht_softc *); 591 void tht_fw_tick(void *arg); 592 void tht_link_state(struct tht_softc *); 593 594 /* interface operations */ 595 int tht_ioctl(struct ifnet *, u_long, caddr_t); 596 void tht_watchdog(struct ifnet *); 597 void tht_start(struct ifnet *); 598 int tht_load_pkt(struct tht_softc *, struct tht_pkt *, 599 struct mbuf *); 600 void tht_txf(struct tht_softc *sc); 601 602 void tht_rxf_fill(struct tht_softc *, int); 603 void tht_rxf_drain(struct tht_softc *); 604 void tht_rxd(struct tht_softc *); 605 606 void tht_up(struct tht_softc *); 607 void tht_iff(struct tht_softc *); 608 void tht_down(struct tht_softc *); 609 610 /* ifmedia operations */ 611 int tht_media_change(struct ifnet *); 612 void tht_media_status(struct ifnet *, struct ifmediareq *); 613 614 /* wrapper around dma memory */ 615 struct tht_dmamem *tht_dmamem_alloc(struct tht_softc *, bus_size_t, 616 bus_size_t); 617 void tht_dmamem_free(struct tht_softc *, 618 struct tht_dmamem *); 619 620 /* bus space operations */ 621 u_int32_t tht_read(struct tht_softc *, bus_size_t); 622 void tht_write(struct tht_softc *, bus_size_t, u_int32_t); 623 void tht_write_region(struct tht_softc *, bus_size_t, 624 void *, size_t); 625 int tht_wait_eq(struct tht_softc *, bus_size_t, u_int32_t, 626 u_int32_t, int); 627 int tht_wait_ne(struct tht_softc *, bus_size_t, u_int32_t, 628 u_int32_t, int); 629 630 #define tht_set(_s, _r, _b) tht_write((_s), (_r), \ 631 tht_read((_s), (_r)) | (_b)) 632 #define tht_clr(_s, _r, _b) tht_write((_s), (_r), \ 633 tht_read((_s), (_r)) & ~(_b)) 634 #define tht_wait_set(_s, _r, _b, _t) tht_wait_eq((_s), (_r), \ 635 (_b), (_b), (_t)) 636 637 638 /* misc */ 639 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) 640 #define LWORDS(_b) (((_b) + 7) >> 3) 641 642 643 struct thtc_device { 644 pci_vendor_id_t td_vendor; 645 pci_vendor_id_t td_product; 646 u_int td_nports; 647 }; 648 649 const struct thtc_device *thtc_lookup(struct pci_attach_args *); 650 651 static const struct thtc_device thtc_devices[] = { 652 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3009, 1 }, 653 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3010, 1 }, 654 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3014, 2 } 655 }; 656 657 const struct thtc_device * 658 thtc_lookup(struct pci_attach_args *pa) 659 { 660 int i; 661 const struct thtc_device *td; 662 663 for (i = 0; i < nitems(thtc_devices); i++) { 664 td = &thtc_devices[i]; 665 if (td->td_vendor == PCI_VENDOR(pa->pa_id) && 666 td->td_product == PCI_PRODUCT(pa->pa_id)) 667 return (td); 668 } 669 670 return (NULL); 671 } 672 673 int 674 thtc_match(struct device *parent, void *match, void *aux) 675 { 676 struct pci_attach_args *pa = aux; 677 678 if (thtc_lookup(pa) != NULL) 679 return (1); 680 681 return (0); 682 } 683 684 void 685 thtc_attach(struct device *parent, struct device *self, void *aux) 686 { 687 struct thtc_softc *sc = (struct thtc_softc *)self; 688 struct pci_attach_args *pa = aux; 689 pcireg_t memtype; 690 const struct thtc_device *td; 691 struct tht_attach_args taa; 692 int i; 693 694 bzero(&taa, sizeof(taa)); 695 td = thtc_lookup(pa); 696 697 sc->sc_dmat = pa->pa_dmat; 698 699 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, THT_PCI_BAR); 700 if (pci_mapreg_map(pa, THT_PCI_BAR, memtype, 0, &sc->sc_memt, 701 &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) { 702 printf(": unable to map host registers\n"); 703 return; 704 } 705 706 if (pci_intr_map(pa, &taa.taa_ih) != 0) { 707 printf(": unable to map interrupt\n"); 708 goto unmap; 709 } 710 printf(": %s\n", pci_intr_string(pa->pa_pc, taa.taa_ih)); 711 712 taa.taa_pa = pa; 713 for (i = 0; i < td->td_nports; i++) { 714 taa.taa_port = i; 715 716 config_found(self, &taa, thtc_print); 717 } 718 719 return; 720 721 unmap: 722 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 723 sc->sc_mems = 0; 724 } 725 726 int 727 thtc_print(void *aux, const char *pnp) 728 { 729 struct tht_attach_args *taa = aux; 730 731 if (pnp != NULL) 732 printf("\"%s\" at %s", tht_cd.cd_name, pnp); 733 734 printf(" port %d", taa->taa_port); 735 736 return (UNCONF); 737 } 738 739 int 740 tht_match(struct device *parent, void *match, void *aux) 741 { 742 return (1); 743 } 744 745 void 746 tht_attach(struct device *parent, struct device *self, void *aux) 747 { 748 struct thtc_softc *csc = (struct thtc_softc *)parent; 749 struct tht_softc *sc = (struct tht_softc *)self; 750 struct tht_attach_args *taa = aux; 751 struct ifnet *ifp; 752 753 sc->sc_thtc = csc; 754 sc->sc_port = taa->taa_port; 755 sc->sc_imr = THT_IMR_DOWN(sc->sc_port); 756 rw_init(&sc->sc_lock, "thtioc"); 757 758 if (bus_space_subregion(csc->sc_memt, csc->sc_memh, 759 THT_PORT_REGION(sc->sc_port), THT_PORT_SIZE, 760 &sc->sc_memh) != 0) { 761 printf(": unable to map port registers\n"); 762 return; 763 } 764 765 if (tht_sw_reset(sc) != 0) { 766 printf(": unable to reset port\n"); 767 /* bus_space(9) says we dont have to free subregions */ 768 return; 769 } 770 771 sc->sc_ih = pci_intr_establish(taa->taa_pa->pa_pc, taa->taa_ih, 772 IPL_NET, tht_intr, sc, DEVNAME(sc)); 773 if (sc->sc_ih == NULL) { 774 printf(": unable to establish interrupt\n"); 775 /* bus_space(9) says we dont have to free subregions */ 776 return; 777 } 778 779 tht_lladdr_read(sc); 780 bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN); 781 782 ifp = &sc->sc_ac.ac_if; 783 ifp->if_softc = sc; 784 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 785 ifp->if_capabilities = IFCAP_VLAN_MTU; 786 ifp->if_ioctl = tht_ioctl; 787 ifp->if_start = tht_start; 788 ifp->if_watchdog = tht_watchdog; 789 ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN; /* XXX */ 790 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 791 IFQ_SET_MAXLEN(&ifp->if_snd, 400); 792 IFQ_SET_READY(&ifp->if_snd); 793 794 ifmedia_init(&sc->sc_media, 0, tht_media_change, tht_media_status); 795 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL); 796 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 797 798 if_attach(ifp); 799 ether_ifattach(ifp); 800 801 printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr)); 802 803 mountroothook_establish(tht_mountroot, sc); 804 } 805 806 void 807 tht_mountroot(void *arg) 808 { 809 struct tht_softc *sc = arg; 810 811 if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0) 812 return; 813 814 if (tht_fw_load(sc) != 0) 815 printf("%s: firmware load failed\n", DEVNAME(sc)); 816 817 tht_sw_reset(sc); 818 819 tht_fifo_free(sc, &sc->sc_txt); 820 821 tht_link_state(sc); 822 tht_write(sc, THT_REG_IMR, sc->sc_imr); 823 } 824 825 int 826 tht_intr(void *arg) 827 { 828 struct tht_softc *sc = arg; 829 struct ifnet *ifp; 830 u_int32_t isr; 831 832 isr = tht_read(sc, THT_REG_ISR); 833 if (isr == 0x0) { 834 tht_write(sc, THT_REG_IMR, sc->sc_imr); 835 return (0); 836 } 837 838 DPRINTF(THT_D_INTR, "%s: isr: 0x%b\n", DEVNAME(sc), isr, THT_FMT_ISR); 839 840 if (ISSET(isr, THT_REG_ISR_LINKCHG(0) | THT_REG_ISR_LINKCHG(1))) 841 tht_link_state(sc); 842 843 ifp = &sc->sc_ac.ac_if; 844 if (ifp->if_flags & IFF_RUNNING) { 845 if (ISSET(isr, THT_REG_ISR_RXD(0))) 846 tht_rxd(sc); 847 848 if (ISSET(isr, THT_REG_ISR_RXF(0))) 849 tht_rxf_fill(sc, 0); 850 851 if (ISSET(isr, THT_REG_ISR_TXF(0))) 852 tht_txf(sc); 853 854 tht_start(ifp); 855 } 856 857 tht_write(sc, THT_REG_IMR, sc->sc_imr); 858 return (1); 859 } 860 861 int 862 tht_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 863 { 864 struct tht_softc *sc = ifp->if_softc; 865 struct ifaddr *ifa = (struct ifaddr *)addr; 866 struct ifreq *ifr = (struct ifreq *)addr; 867 int s, error = 0; 868 869 rw_enter_write(&sc->sc_lock); 870 s = splnet(); 871 872 switch (cmd) { 873 case SIOCSIFADDR: 874 ifp->if_flags |= IFF_UP; 875 876 #ifdef INET 877 if (ifa->ifa_addr->sa_family == AF_INET) 878 arp_ifinit(&sc->sc_ac, ifa); 879 #endif 880 /* FALLTHROUGH */ 881 882 case SIOCSIFFLAGS: 883 if (ifp->if_flags & IFF_UP) { 884 if (ifp->if_flags & IFF_RUNNING) 885 tht_iff(sc); 886 else 887 tht_up(sc); 888 } else { 889 if (ifp->if_flags & IFF_RUNNING) 890 tht_down(sc); 891 } 892 break; 893 894 case SIOCGIFMEDIA: 895 case SIOCSIFMEDIA: 896 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 897 break; 898 899 default: 900 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 901 } 902 903 if (error == ENETRESET) { 904 if (ifp->if_flags & IFF_RUNNING) 905 tht_iff(sc); 906 error = 0; 907 } 908 909 splx(s); 910 rw_exit_write(&sc->sc_lock); 911 912 return (error); 913 } 914 915 void 916 tht_up(struct tht_softc *sc) 917 { 918 struct ifnet *ifp = &sc->sc_ac.ac_if; 919 920 if (ISSET(ifp->if_flags, IFF_RUNNING)) { 921 return; 922 } 923 924 if (tht_pkt_alloc(sc, &sc->sc_tx_list, THT_TXT_PKT_NUM, 925 THT_TXT_SGL_LEN) != 0) 926 return; 927 if (tht_pkt_alloc(sc, &sc->sc_rx_list, THT_RXF_PKT_NUM, 928 THT_RXF_SGL_LEN) != 0) 929 goto free_tx_list; 930 931 if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0) 932 goto free_rx_list; 933 if (tht_fifo_alloc(sc, &sc->sc_rxf, &tht_rxf_desc) != 0) 934 goto free_txt; 935 if (tht_fifo_alloc(sc, &sc->sc_rxd, &tht_rxd_desc) != 0) 936 goto free_rxf; 937 if (tht_fifo_alloc(sc, &sc->sc_txf, &tht_txf_desc) != 0) 938 goto free_rxd; 939 940 tht_write(sc, THT_REG_10G_FRM_LEN, MCLBYTES - ETHER_ALIGN); 941 tht_write(sc, THT_REG_10G_PAUSE, 0x96); 942 tht_write(sc, THT_REG_10G_RX_SEC, THT_REG_10G_SEC_AVAIL(0x10) | 943 THT_REG_10G_SEC_EMPTY(0x80)); 944 tht_write(sc, THT_REG_10G_TX_SEC, THT_REG_10G_SEC_AVAIL(0x10) | 945 THT_REG_10G_SEC_EMPTY(0xe0)); 946 tht_write(sc, THT_REG_10G_RFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) | 947 THT_REG_10G_FIFO_AF(0x0)); 948 tht_write(sc, THT_REG_10G_TFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) | 949 THT_REG_10G_FIFO_AF(0x0)); 950 tht_write(sc, THT_REG_10G_CTL, THT_REG_10G_CTL_TX_EN | 951 THT_REG_10G_CTL_RX_EN | THT_REG_10G_CTL_PAD | 952 THT_REG_10G_CTL_PROMISC); 953 954 tht_write(sc, THT_REG_VGLB, 0); 955 956 tht_write(sc, THT_REG_RX_MAX_FRAME, MCLBYTES - ETHER_ALIGN); 957 958 tht_write(sc, THT_REG_RDINTCM(0), THT_REG_RDINTCM_PKT_TH(12) | 959 THT_REG_RDINTCM_RXF_TH(4) | THT_REG_RDINTCM_COAL_RC | 960 THT_REG_RDINTCM_COAL(0x20)); 961 tht_write(sc, THT_REG_TDINTCM(0), THT_REG_TDINTCM_PKT_TH(12) | 962 THT_REG_TDINTCM_COAL_RC | THT_REG_TDINTCM_COAL(0x20)); 963 964 bcopy(sc->sc_ac.ac_enaddr, sc->sc_lladdr, ETHER_ADDR_LEN); 965 tht_lladdr_write(sc); 966 967 /* populate rxf fifo */ 968 tht_rxf_fill(sc, 1); 969 970 tht_iff(sc); 971 972 ifp->if_flags |= IFF_RUNNING; 973 ifp->if_flags &= ~IFF_OACTIVE; 974 975 /* enable interrupts */ 976 sc->sc_imr = THT_IMR_UP(sc->sc_port); 977 tht_write(sc, THT_REG_IMR, sc->sc_imr); 978 979 return; 980 981 free_rxd: 982 tht_fifo_free(sc, &sc->sc_rxd); 983 free_rxf: 984 tht_fifo_free(sc, &sc->sc_rxf); 985 free_txt: 986 tht_fifo_free(sc, &sc->sc_txt); 987 988 tht_sw_reset(sc); 989 990 free_rx_list: 991 tht_pkt_free(sc, &sc->sc_rx_list); 992 free_tx_list: 993 tht_pkt_free(sc, &sc->sc_tx_list); 994 } 995 996 void 997 tht_iff(struct tht_softc *sc) 998 { 999 struct ifnet *ifp = &sc->sc_ac.ac_if; 1000 struct ether_multi *enm; 1001 struct ether_multistep step; 1002 u_int32_t rxf; 1003 u_int8_t imf[THT_REG_RX_MCST_HASH_SIZE]; 1004 u_int8_t hash; 1005 int i; 1006 1007 ifp->if_flags &= ~IFF_ALLMULTI; 1008 1009 rxf = THT_REG_RX_FLT_OSEN | THT_REG_RX_FLT_AM | THT_REG_RX_FLT_AB; 1010 for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) { 1011 tht_write(sc, THT_REG_RX_MAC_MCST0(i), 0); 1012 tht_write(sc, THT_REG_RX_MAC_MCST1(i), 0); 1013 } 1014 memset(imf, 0x00, sizeof(imf)); 1015 1016 if (ifp->if_flags & IFF_PROMISC) 1017 rxf |= THT_REG_RX_FLT_PRM_ALL; 1018 else if (sc->sc_ac.ac_multirangecnt > 0) { 1019 ifp->if_flags |= IFF_ALLMULTI; 1020 memset(imf, 0xff, sizeof(imf)); 1021 } else { 1022 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); 1023 1024 #if 0 1025 /* fill the perfect multicast filters */ 1026 for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) { 1027 if (enm == NULL) 1028 break; 1029 1030 tht_write(sc, THT_REG_RX_MAC_MCST0(i), 1031 (enm->enm_addrlo[0] << 0) | 1032 (enm->enm_addrlo[1] << 8) | 1033 (enm->enm_addrlo[2] << 16) | 1034 (enm->enm_addrlo[3] << 24)); 1035 tht_write(sc, THT_REG_RX_MAC_MCST1(i), 1036 (enm->enm_addrlo[4] << 0) | 1037 (enm->enm_addrlo[5] << 8)); 1038 1039 ETHER_NEXT_MULTI(step, enm); 1040 } 1041 #endif 1042 1043 /* fill the imperfect multicast filter with whats left */ 1044 while (enm != NULL) { 1045 hash = 0x00; 1046 for (i = 0; i < ETHER_ADDR_LEN; i++) 1047 hash ^= enm->enm_addrlo[i]; 1048 setbit(imf, hash); 1049 1050 ETHER_NEXT_MULTI(step, enm); 1051 } 1052 } 1053 1054 tht_write_region(sc, THT_REG_RX_MCST_HASH, imf, sizeof(imf)); 1055 tht_write(sc, THT_REG_RX_FLT, rxf); 1056 } 1057 1058 void 1059 tht_down(struct tht_softc *sc) 1060 { 1061 struct ifnet *ifp = &sc->sc_ac.ac_if; 1062 1063 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 1064 return; 1065 } 1066 1067 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE | IFF_ALLMULTI); 1068 1069 while (tht_fifo_writable(sc, &sc->sc_txt) < sc->sc_txt.tf_len && 1070 tht_fifo_readable(sc, &sc->sc_txf) > 0) 1071 tsleep(sc, 0, "thtdown", hz); 1072 1073 sc->sc_imr = THT_IMR_DOWN(sc->sc_port); 1074 tht_write(sc, THT_REG_IMR, sc->sc_imr); 1075 1076 tht_sw_reset(sc); 1077 1078 tht_fifo_free(sc, &sc->sc_txf); 1079 tht_fifo_free(sc, &sc->sc_rxd); 1080 tht_fifo_free(sc, &sc->sc_rxf); 1081 tht_fifo_free(sc, &sc->sc_txt); 1082 1083 /* free mbufs that were on the rxf fifo */ 1084 tht_rxf_drain(sc); 1085 1086 tht_pkt_free(sc, &sc->sc_rx_list); 1087 tht_pkt_free(sc, &sc->sc_tx_list); 1088 } 1089 1090 void 1091 tht_start(struct ifnet *ifp) 1092 { 1093 struct tht_softc *sc = ifp->if_softc; 1094 struct tht_pkt *pkt; 1095 struct tht_tx_task txt; 1096 u_int32_t flags; 1097 struct mbuf *m; 1098 int bc; 1099 1100 if (!(ifp->if_flags & IFF_RUNNING)) 1101 return; 1102 if (ifp->if_flags & IFF_OACTIVE) 1103 return; 1104 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1105 return; 1106 1107 if (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_DESC_LEN) 1108 return; 1109 1110 bzero(&txt, sizeof(txt)); 1111 1112 tht_fifo_pre(sc, &sc->sc_txt); 1113 1114 do { 1115 IFQ_POLL(&ifp->if_snd, m); 1116 if (m == NULL) 1117 break; 1118 1119 pkt = tht_pkt_get(&sc->sc_tx_list); 1120 if (pkt == NULL) { 1121 ifp->if_flags |= IFF_OACTIVE; 1122 break; 1123 } 1124 1125 IFQ_DEQUEUE(&ifp->if_snd, m); 1126 if (tht_load_pkt(sc, pkt, m) != 0) { 1127 m_freem(m); 1128 tht_pkt_put(&sc->sc_tx_list, pkt); 1129 ifp->if_oerrors++; 1130 break; 1131 } 1132 /* thou shalt not use m after this point, only pkt->tp_m */ 1133 1134 #if NBPFILTER > 0 1135 if (ifp->if_bpf) 1136 bpf_mtap(ifp->if_bpf, pkt->tp_m, BPF_DIRECTION_OUT); 1137 #endif 1138 1139 bc = sizeof(txt) + 1140 sizeof(struct tht_pbd) * pkt->tp_dmap->dm_nsegs; 1141 1142 flags = THT_TXT_TYPE | LWORDS(bc); 1143 txt.flags = htole32(flags); 1144 txt.len = htole16(pkt->tp_m->m_pkthdr.len); 1145 txt.uid = pkt->tp_id; 1146 1147 DPRINTF(THT_D_TX, "%s: txt uid 0x%llx flags 0x%08x len %d\n", 1148 DEVNAME(sc), pkt->tp_id, flags, pkt->tp_m->m_pkthdr.len); 1149 1150 tht_fifo_write(sc, &sc->sc_txt, &txt, sizeof(txt)); 1151 tht_fifo_write_dmap(sc, &sc->sc_txt, pkt->tp_dmap); 1152 tht_fifo_write_pad(sc, &sc->sc_txt, bc); 1153 1154 bus_dmamap_sync(sc->sc_thtc->sc_dmat, pkt->tp_dmap, 0, 1155 pkt->tp_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1156 1157 ifp->if_opackets++; 1158 1159 } while (sc->sc_txt.tf_ready > THT_FIFO_DESC_LEN); 1160 1161 tht_fifo_post(sc, &sc->sc_txt); 1162 } 1163 1164 int 1165 tht_load_pkt(struct tht_softc *sc, struct tht_pkt *pkt, struct mbuf *m) 1166 { 1167 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1168 bus_dmamap_t dmap = pkt->tp_dmap; 1169 struct mbuf *m0 = NULL; 1170 1171 switch(bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) { 1172 case 0: 1173 pkt->tp_m = m; 1174 break; 1175 1176 case EFBIG: /* mbuf chain is too fragmented */ 1177 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1178 if (m0 == NULL) 1179 return (ENOBUFS); 1180 if (m->m_pkthdr.len > MHLEN) { 1181 MCLGET(m0, M_DONTWAIT); 1182 if (!(m0->m_flags & M_EXT)) { 1183 m_freem(m0); 1184 return (ENOBUFS); 1185 } 1186 } 1187 m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t)); 1188 m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len; 1189 if (bus_dmamap_load_mbuf(dmat, dmap, m0, BUS_DMA_NOWAIT)) { 1190 m_freem(m0); 1191 return (ENOBUFS); 1192 } 1193 1194 m_freem(m); 1195 pkt->tp_m = m0; 1196 break; 1197 1198 default: 1199 return (ENOBUFS); 1200 } 1201 1202 return (0); 1203 } 1204 1205 void 1206 tht_txf(struct tht_softc *sc) 1207 { 1208 struct ifnet *ifp = &sc->sc_ac.ac_if; 1209 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1210 bus_dmamap_t dmap; 1211 struct tht_tx_free txf; 1212 struct tht_pkt *pkt; 1213 1214 if (tht_fifo_readable(sc, &sc->sc_txf) < sizeof(txf)) 1215 return; 1216 1217 tht_fifo_pre(sc, &sc->sc_txf); 1218 1219 do { 1220 tht_fifo_read(sc, &sc->sc_txf, &txf, sizeof(txf)); 1221 1222 DPRINTF(THT_D_TX, "%s: txf uid 0x%llx\n", DEVNAME(sc), txf.uid); 1223 1224 pkt = &sc->sc_tx_list.tpl_pkts[txf.uid]; 1225 dmap = pkt->tp_dmap; 1226 1227 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1228 BUS_DMASYNC_POSTWRITE); 1229 bus_dmamap_unload(dmat, dmap); 1230 1231 m_freem(pkt->tp_m); 1232 1233 tht_pkt_put(&sc->sc_tx_list, pkt); 1234 1235 } while (sc->sc_txf.tf_ready >= sizeof(txf)); 1236 1237 ifp->if_flags &= ~IFF_OACTIVE; 1238 1239 tht_fifo_post(sc, &sc->sc_txf); 1240 } 1241 1242 void 1243 tht_rxf_fill(struct tht_softc *sc, int wait) 1244 { 1245 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1246 bus_dmamap_t dmap; 1247 struct tht_rx_free rxf; 1248 struct tht_pkt *pkt; 1249 struct mbuf *m; 1250 int bc; 1251 1252 if (tht_fifo_writable(sc, &sc->sc_rxf) <= THT_FIFO_DESC_LEN) 1253 return; 1254 1255 tht_fifo_pre(sc, &sc->sc_rxf); 1256 1257 for (;;) { 1258 if ((pkt = tht_pkt_get(&sc->sc_rx_list)) == NULL) 1259 goto done; 1260 1261 MGETHDR(m, wait ? M_WAIT : M_DONTWAIT, MT_DATA); 1262 if (m == NULL) 1263 goto put_pkt; 1264 1265 MCLGET(m, wait ? M_WAIT : M_DONTWAIT); 1266 if (!ISSET(m->m_flags, M_EXT)) 1267 goto free_m; 1268 1269 m->m_data += ETHER_ALIGN; 1270 m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN; 1271 1272 dmap = pkt->tp_dmap; 1273 if (bus_dmamap_load_mbuf(dmat, dmap, m, 1274 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) 1275 goto free_m; 1276 1277 pkt->tp_m = m; 1278 1279 bc = sizeof(rxf) + sizeof(struct tht_pbd) * dmap->dm_nsegs; 1280 1281 rxf.bc = htole16(LWORDS(bc)); 1282 rxf.type = htole16(THT_RXF_TYPE); 1283 rxf.uid = pkt->tp_id; 1284 1285 tht_fifo_write(sc, &sc->sc_rxf, &rxf, sizeof(rxf)); 1286 tht_fifo_write_dmap(sc, &sc->sc_rxf, dmap); 1287 tht_fifo_write_pad(sc, &sc->sc_rxf, bc); 1288 1289 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1290 BUS_DMASYNC_PREREAD); 1291 1292 if (sc->sc_rxf.tf_ready <= THT_FIFO_DESC_LEN) 1293 goto done; 1294 } 1295 1296 free_m: 1297 m_freem(m); 1298 put_pkt: 1299 tht_pkt_put(&sc->sc_rx_list, pkt); 1300 done: 1301 tht_fifo_post(sc, &sc->sc_rxf); 1302 } 1303 1304 void 1305 tht_rxf_drain(struct tht_softc *sc) 1306 { 1307 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1308 bus_dmamap_t dmap; 1309 struct tht_pkt *pkt; 1310 1311 while ((pkt = tht_pkt_used(&sc->sc_rx_list)) != NULL) { 1312 dmap = pkt->tp_dmap; 1313 1314 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1315 BUS_DMASYNC_POSTREAD); 1316 bus_dmamap_unload(dmat, dmap); 1317 1318 m_freem(pkt->tp_m); 1319 1320 tht_pkt_put(&sc->sc_rx_list, pkt); 1321 } 1322 } 1323 1324 void 1325 tht_rxd(struct tht_softc *sc) 1326 { 1327 struct ifnet *ifp = &sc->sc_ac.ac_if; 1328 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1329 bus_dmamap_t dmap; 1330 struct tht_rx_desc rxd; 1331 struct tht_pkt *pkt; 1332 struct mbuf *m; 1333 int bc; 1334 u_int32_t flags; 1335 1336 if (tht_fifo_readable(sc, &sc->sc_rxd) < sizeof(rxd)) 1337 return; 1338 1339 tht_fifo_pre(sc, &sc->sc_rxd); 1340 1341 do { 1342 tht_fifo_read(sc, &sc->sc_rxd, &rxd, sizeof(rxd)); 1343 1344 flags = letoh32(rxd.flags); 1345 bc = THT_RXD_FLAGS_BC(flags) * 8; 1346 bc -= sizeof(rxd); 1347 pkt = &sc->sc_rx_list.tpl_pkts[rxd.uid]; 1348 1349 dmap = pkt->tp_dmap; 1350 1351 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1352 BUS_DMASYNC_POSTREAD); 1353 bus_dmamap_unload(dmat, dmap); 1354 1355 m = pkt->tp_m; 1356 m->m_pkthdr.rcvif = ifp; 1357 m->m_pkthdr.len = m->m_len = letoh16(rxd.len); 1358 1359 /* XXX process type 3 rx descriptors */ 1360 1361 #if NBPFILTER > 0 1362 if (ifp->if_bpf) 1363 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1364 #endif 1365 1366 ether_input_mbuf(ifp, m); 1367 1368 tht_pkt_put(&sc->sc_rx_list, pkt); 1369 1370 while (bc > 0) { 1371 static u_int32_t pad; 1372 1373 tht_fifo_read(sc, &sc->sc_rxd, &pad, sizeof(pad)); 1374 bc -= sizeof(pad); 1375 } 1376 1377 ifp->if_ipackets++; 1378 1379 } while (sc->sc_rxd.tf_ready >= sizeof(rxd)); 1380 1381 tht_fifo_post(sc, &sc->sc_rxd); 1382 1383 /* put more pkts on the fifo */ 1384 tht_rxf_fill(sc, 0); 1385 } 1386 1387 void 1388 tht_watchdog(struct ifnet *ifp) 1389 { 1390 /* do nothing */ 1391 } 1392 1393 int 1394 tht_media_change(struct ifnet *ifp) 1395 { 1396 /* ignore */ 1397 return (0); 1398 } 1399 1400 void 1401 tht_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1402 { 1403 struct tht_softc *sc = ifp->if_softc; 1404 1405 imr->ifm_active = IFM_ETHER | IFM_AUTO; 1406 imr->ifm_status = IFM_AVALID; 1407 1408 tht_link_state(sc); 1409 1410 if (LINK_STATE_IS_UP(ifp->if_link_state)) 1411 imr->ifm_status |= IFM_ACTIVE; 1412 } 1413 1414 int 1415 tht_fifo_alloc(struct tht_softc *sc, struct tht_fifo *tf, 1416 struct tht_fifo_desc *tfd) 1417 { 1418 u_int64_t dva; 1419 1420 tf->tf_len = THT_FIFO_SIZE(tfd->tfd_size); 1421 tf->tf_mem = tht_dmamem_alloc(sc, tf->tf_len, THT_FIFO_ALIGN); 1422 if (tf->tf_mem == NULL) 1423 return (1); 1424 1425 tf->tf_desc = tfd; 1426 tf->tf_rptr = tf->tf_wptr = 0; 1427 1428 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1429 0, tf->tf_len, THT_FIFO_PRE_SYNC(tfd)); 1430 1431 dva = THT_DMA_DVA(tf->tf_mem); 1432 tht_write(sc, tfd->tfd_cfg0, (u_int32_t)dva | tfd->tfd_size); 1433 tht_write(sc, tfd->tfd_cfg1, (u_int32_t)(dva >> 32)); 1434 1435 return (0); 1436 } 1437 1438 void 1439 tht_fifo_free(struct tht_softc *sc, struct tht_fifo *tf) 1440 { 1441 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1442 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc)); 1443 tht_dmamem_free(sc, tf->tf_mem); 1444 } 1445 1446 size_t 1447 tht_fifo_readable(struct tht_softc *sc, struct tht_fifo *tf) 1448 { 1449 tf->tf_wptr = tht_read(sc, tf->tf_desc->tfd_wptr); 1450 tf->tf_wptr &= THT_FIFO_PTR_MASK; 1451 tf->tf_ready = tf->tf_wptr - tf->tf_rptr; 1452 if (tf->tf_ready < 0) 1453 tf->tf_ready += tf->tf_len; 1454 1455 DPRINTF(THT_D_FIFO, "%s: fifo rdable wptr: %d rptr: %d ready: %d\n", 1456 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1457 1458 return (tf->tf_ready); 1459 } 1460 1461 size_t 1462 tht_fifo_writable(struct tht_softc *sc, struct tht_fifo *tf) 1463 { 1464 tf->tf_rptr = tht_read(sc, tf->tf_desc->tfd_rptr); 1465 tf->tf_rptr &= THT_FIFO_PTR_MASK; 1466 tf->tf_ready = tf->tf_rptr - tf->tf_wptr; 1467 if (tf->tf_ready <= 0) 1468 tf->tf_ready += tf->tf_len; 1469 1470 DPRINTF(THT_D_FIFO, "%s: fifo wrable wptr: %d rptr: %d ready: %d\n", 1471 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1472 1473 return (tf->tf_ready); 1474 } 1475 1476 void 1477 tht_fifo_pre(struct tht_softc *sc, struct tht_fifo *tf) 1478 { 1479 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1480 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc)); 1481 } 1482 1483 void 1484 tht_fifo_read(struct tht_softc *sc, struct tht_fifo *tf, 1485 void *buf, size_t buflen) 1486 { 1487 u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem); 1488 u_int8_t *desc = buf; 1489 size_t len; 1490 1491 tf->tf_ready -= buflen; 1492 1493 len = tf->tf_len - tf->tf_rptr; 1494 1495 if (len < buflen) { 1496 memcpy(desc, fifo + tf->tf_rptr, len); 1497 1498 buflen -= len; 1499 desc += len; 1500 1501 tf->tf_rptr = 0; 1502 } 1503 1504 memcpy(desc, fifo + tf->tf_rptr, buflen); 1505 tf->tf_rptr += buflen; 1506 1507 DPRINTF(THT_D_FIFO, "%s: fifo rd wptr: %d rptr: %d ready: %d\n", 1508 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1509 } 1510 1511 void 1512 tht_fifo_write(struct tht_softc *sc, struct tht_fifo *tf, 1513 void *buf, size_t buflen) 1514 { 1515 u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem); 1516 u_int8_t *desc = buf; 1517 size_t len; 1518 1519 tf->tf_ready -= buflen; 1520 1521 len = tf->tf_len - tf->tf_wptr; 1522 1523 if (len < buflen) { 1524 memcpy(fifo + tf->tf_wptr, desc, len); 1525 1526 buflen -= len; 1527 desc += len; 1528 1529 tf->tf_wptr = 0; 1530 } 1531 1532 memcpy(fifo + tf->tf_wptr, desc, buflen); 1533 tf->tf_wptr += buflen; 1534 tf->tf_wptr %= tf->tf_len; 1535 1536 DPRINTF(THT_D_FIFO, "%s: fifo wr wptr: %d rptr: %d ready: %d\n", 1537 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1538 } 1539 1540 void 1541 tht_fifo_write_dmap(struct tht_softc *sc, struct tht_fifo *tf, 1542 bus_dmamap_t dmap) 1543 { 1544 struct tht_pbd pbd; 1545 u_int64_t dva; 1546 int i; 1547 1548 for (i = 0; i < dmap->dm_nsegs; i++) { 1549 dva = dmap->dm_segs[i].ds_addr; 1550 1551 pbd.addr_lo = htole32(dva); 1552 pbd.addr_hi = htole32(dva >> 32); 1553 pbd.len = htole32(dmap->dm_segs[i].ds_len); 1554 1555 tht_fifo_write(sc, tf, &pbd, sizeof(pbd)); 1556 } 1557 } 1558 1559 void 1560 tht_fifo_write_pad(struct tht_softc *sc, struct tht_fifo *tf, int bc) 1561 { 1562 const static u_int32_t pad = 0x0; 1563 1564 /* this assumes you'll only ever be writing multiples of 4 bytes */ 1565 if (bc % 8) 1566 tht_fifo_write(sc, tf, (void *)&pad, sizeof(pad)); 1567 } 1568 1569 void 1570 tht_fifo_post(struct tht_softc *sc, struct tht_fifo *tf) 1571 { 1572 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1573 0, tf->tf_len, THT_FIFO_PRE_SYNC(tf->tf_desc)); 1574 if (tf->tf_desc->tfd_write) 1575 tht_write(sc, tf->tf_desc->tfd_wptr, tf->tf_wptr); 1576 else 1577 tht_write(sc, tf->tf_desc->tfd_rptr, tf->tf_rptr); 1578 1579 DPRINTF(THT_D_FIFO, "%s: fifo post wptr: %d rptr: %d\n", DEVNAME(sc), 1580 tf->tf_wptr, tf->tf_rptr); 1581 } 1582 1583 const static bus_size_t tht_mac_regs[3] = { 1584 THT_REG_RX_UNC_MAC2, THT_REG_RX_UNC_MAC1, THT_REG_RX_UNC_MAC0 1585 }; 1586 1587 void 1588 tht_lladdr_read(struct tht_softc *sc) 1589 { 1590 int i; 1591 1592 for (i = 0; i < nitems(tht_mac_regs); i++) 1593 sc->sc_lladdr[i] = betoh16(tht_read(sc, tht_mac_regs[i])); 1594 } 1595 1596 void 1597 tht_lladdr_write(struct tht_softc *sc) 1598 { 1599 int i; 1600 1601 for (i = 0; i < nitems(tht_mac_regs); i++) 1602 tht_write(sc, tht_mac_regs[i], htobe16(sc->sc_lladdr[i])); 1603 } 1604 1605 #define tht_swrst_set(_s, _r) tht_write((_s), (_r), 0x1) 1606 #define tht_swrst_clr(_s, _r) tht_write((_s), (_r), 0x0) 1607 int 1608 tht_sw_reset(struct tht_softc *sc) 1609 { 1610 int i; 1611 1612 /* this follows SW Reset process in 8.8 of the doco */ 1613 1614 /* 1. disable rx */ 1615 tht_clr(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN); 1616 1617 /* 2. initiate port disable */ 1618 tht_swrst_set(sc, THT_REG_DIS_PRT); 1619 1620 /* 3. initiate queue disable */ 1621 tht_swrst_set(sc, THT_REG_DIS_QU_0); 1622 tht_swrst_set(sc, THT_REG_DIS_QU_1); 1623 1624 /* 4. wait for successful finish of previous tasks */ 1625 if (!tht_wait_set(sc, THT_REG_RST_PRT, THT_REG_RST_PRT_ACTIVE, 1000)) 1626 return (1); 1627 1628 /* 5. Reset interrupt registers */ 1629 tht_write(sc, THT_REG_IMR, 0x0); /* 5.a */ 1630 tht_read(sc, THT_REG_ISR); /* 5.b */ 1631 for (i = 0; i < THT_NQUEUES; i++) { 1632 tht_write(sc, THT_REG_RDINTCM(i), 0x0); /* 5.c/5.d */ 1633 tht_write(sc, THT_REG_TDINTCM(i), 0x0); /* 5.e */ 1634 } 1635 1636 /* 6. initiate queue reset */ 1637 tht_swrst_set(sc, THT_REG_RST_QU_0); 1638 tht_swrst_set(sc, THT_REG_RST_QU_1); 1639 1640 /* 7. initiate port reset */ 1641 tht_swrst_set(sc, THT_REG_RST_PRT); 1642 1643 /* 8. clear txt/rxf/rxd/txf read and write ptrs */ 1644 for (i = 0; i < THT_NQUEUES; i++) { 1645 tht_write(sc, THT_REG_TXT_RPTR(i), 0); 1646 tht_write(sc, THT_REG_RXF_RPTR(i), 0); 1647 tht_write(sc, THT_REG_RXD_RPTR(i), 0); 1648 tht_write(sc, THT_REG_TXF_RPTR(i), 0); 1649 1650 tht_write(sc, THT_REG_TXT_WPTR(i), 0); 1651 tht_write(sc, THT_REG_RXF_WPTR(i), 0); 1652 tht_write(sc, THT_REG_RXD_WPTR(i), 0); 1653 tht_write(sc, THT_REG_TXF_WPTR(i), 0); 1654 } 1655 1656 /* 9. unset port disable */ 1657 tht_swrst_clr(sc, THT_REG_DIS_PRT); 1658 1659 /* 10. unset queue disable */ 1660 tht_swrst_clr(sc, THT_REG_DIS_QU_0); 1661 tht_swrst_clr(sc, THT_REG_DIS_QU_1); 1662 1663 /* 11. unset queue reset */ 1664 tht_swrst_clr(sc, THT_REG_RST_QU_0); 1665 tht_swrst_clr(sc, THT_REG_RST_QU_1); 1666 1667 /* 12. unset port reset */ 1668 tht_swrst_clr(sc, THT_REG_RST_PRT); 1669 1670 /* 13. enable rx */ 1671 tht_set(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN); 1672 1673 return (0); 1674 } 1675 1676 int 1677 tht_fw_load(struct tht_softc *sc) 1678 { 1679 struct timeout ticker; 1680 volatile int ok = 1; 1681 u_int8_t *fw, *buf; 1682 size_t fwlen, wrlen; 1683 int error = 1; 1684 1685 if (loadfirmware("tht", &fw, &fwlen) != 0) 1686 return (1); 1687 1688 if ((fwlen % 8) != 0) 1689 goto err; 1690 1691 buf = fw; 1692 while (fwlen > 0) { 1693 while (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_GAP) { 1694 if (tsleep(sc, PCATCH, "thtfw", 1) == EINTR) 1695 goto err; 1696 } 1697 1698 wrlen = MIN(sc->sc_txt.tf_ready - THT_FIFO_GAP, fwlen); 1699 tht_fifo_pre(sc, &sc->sc_txt); 1700 tht_fifo_write(sc, &sc->sc_txt, buf, wrlen); 1701 tht_fifo_post(sc, &sc->sc_txt); 1702 1703 fwlen -= wrlen; 1704 buf += wrlen; 1705 } 1706 1707 timeout_set(&ticker, tht_fw_tick, (void *)&ok); 1708 timeout_add_sec(&ticker, 2); 1709 while (ok) { 1710 if (tht_read(sc, THT_REG_INIT_STATUS) != 0) { 1711 error = 0; 1712 break; 1713 } 1714 1715 if (tsleep(sc, PCATCH, "thtinit", 1) == EINTR) 1716 goto err; 1717 } 1718 timeout_del(&ticker); 1719 1720 tht_write(sc, THT_REG_INIT_SEMAPHORE, 0x1); 1721 1722 err: 1723 free(fw, M_DEVBUF); 1724 return (error); 1725 } 1726 1727 void 1728 tht_fw_tick(void *arg) 1729 { 1730 volatile int *ok = arg; 1731 1732 *ok = 0; 1733 } 1734 1735 void 1736 tht_link_state(struct tht_softc *sc) 1737 { 1738 static const struct timeval interval = { 0, 10000 }; 1739 struct ifnet *ifp = &sc->sc_ac.ac_if; 1740 int link_state = LINK_STATE_DOWN; 1741 1742 if (!ratecheck(&sc->sc_mediacheck, &interval)) 1743 return; 1744 1745 if (tht_read(sc, THT_REG_MAC_LNK_STAT) & THT_REG_MAC_LNK_STAT_LINK) 1746 link_state = LINK_STATE_FULL_DUPLEX; 1747 1748 if (ifp->if_link_state != link_state) { 1749 ifp->if_link_state = link_state; 1750 if_link_state_change(ifp); 1751 } 1752 1753 if (LINK_STATE_IS_UP(ifp->if_link_state)) 1754 ifp->if_baudrate = IF_Gbps(10); 1755 else 1756 ifp->if_baudrate = 0; 1757 } 1758 1759 u_int32_t 1760 tht_read(struct tht_softc *sc, bus_size_t r) 1761 { 1762 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4, 1763 BUS_SPACE_BARRIER_READ); 1764 return (bus_space_read_4(sc->sc_thtc->sc_memt, sc->sc_memh, r)); 1765 } 1766 1767 void 1768 tht_write(struct tht_softc *sc, bus_size_t r, u_int32_t v) 1769 { 1770 bus_space_write_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, v); 1771 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4, 1772 BUS_SPACE_BARRIER_WRITE); 1773 } 1774 1775 void 1776 tht_write_region(struct tht_softc *sc, bus_size_t r, void *buf, size_t len) 1777 { 1778 bus_space_write_raw_region_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, 1779 buf, len); 1780 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, len, 1781 BUS_SPACE_BARRIER_WRITE); 1782 } 1783 1784 int 1785 tht_wait_eq(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v, 1786 int timeout) 1787 { 1788 while ((tht_read(sc, r) & m) != v) { 1789 if (timeout == 0) 1790 return (0); 1791 1792 delay(1000); 1793 timeout--; 1794 } 1795 1796 return (1); 1797 } 1798 1799 int 1800 tht_wait_ne(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v, 1801 int timeout) 1802 { 1803 while ((tht_read(sc, r) & m) == v) { 1804 if (timeout == 0) 1805 return (0); 1806 1807 delay(1000); 1808 timeout--; 1809 } 1810 1811 return (1); 1812 } 1813 1814 struct tht_dmamem * 1815 tht_dmamem_alloc(struct tht_softc *sc, bus_size_t size, bus_size_t align) 1816 { 1817 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1818 struct tht_dmamem *tdm; 1819 int nsegs; 1820 1821 tdm = malloc(sizeof(struct tht_dmamem), M_DEVBUF, M_WAITOK | M_ZERO); 1822 tdm->tdm_size = size; 1823 1824 if (bus_dmamap_create(dmat, size, 1, size, 0, 1825 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0) 1826 goto tdmfree; 1827 1828 if (bus_dmamem_alloc(dmat, size, align, 0, &tdm->tdm_seg, 1, &nsegs, 1829 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 1830 goto destroy; 1831 1832 if (bus_dmamem_map(dmat, &tdm->tdm_seg, nsegs, size, &tdm->tdm_kva, 1833 BUS_DMA_WAITOK) != 0) 1834 goto free; 1835 1836 if (bus_dmamap_load(dmat, tdm->tdm_map, tdm->tdm_kva, size, 1837 NULL, BUS_DMA_WAITOK) != 0) 1838 goto unmap; 1839 1840 return (tdm); 1841 1842 unmap: 1843 bus_dmamem_unmap(dmat, tdm->tdm_kva, size); 1844 free: 1845 bus_dmamem_free(dmat, &tdm->tdm_seg, 1); 1846 destroy: 1847 bus_dmamap_destroy(dmat, tdm->tdm_map); 1848 tdmfree: 1849 free(tdm, M_DEVBUF); 1850 1851 return (NULL); 1852 } 1853 1854 void 1855 tht_dmamem_free(struct tht_softc *sc, struct tht_dmamem *tdm) 1856 { 1857 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1858 1859 bus_dmamap_unload(dmat, tdm->tdm_map); 1860 bus_dmamem_unmap(dmat, tdm->tdm_kva, tdm->tdm_size); 1861 bus_dmamem_free(dmat, &tdm->tdm_seg, 1); 1862 bus_dmamap_destroy(dmat, tdm->tdm_map); 1863 free(tdm, M_DEVBUF); 1864 } 1865 1866 int 1867 tht_pkt_alloc(struct tht_softc *sc, struct tht_pkt_list *tpl, int npkts, 1868 int nsegs) 1869 { 1870 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1871 struct tht_pkt *pkt; 1872 int i; 1873 1874 tpl->tpl_pkts = malloc(sizeof(struct tht_pkt) * npkts, M_DEVBUF, 1875 M_WAITOK | M_ZERO); 1876 1877 TAILQ_INIT(&tpl->tpl_free); 1878 TAILQ_INIT(&tpl->tpl_used); 1879 for (i = 0; i < npkts; i++) { 1880 pkt = &tpl->tpl_pkts[i]; 1881 1882 pkt->tp_id = i; 1883 if (bus_dmamap_create(dmat, THT_PBD_PKTLEN, nsegs, 1884 THT_PBD_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 1885 &pkt->tp_dmap) != 0) { 1886 tht_pkt_free(sc, tpl); 1887 return (1); 1888 } 1889 1890 TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link); 1891 } 1892 1893 return (0); 1894 } 1895 1896 void 1897 tht_pkt_free(struct tht_softc *sc, struct tht_pkt_list *tpl) 1898 { 1899 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1900 struct tht_pkt *pkt; 1901 1902 while ((pkt = tht_pkt_get(tpl)) != NULL) 1903 bus_dmamap_destroy(dmat, pkt->tp_dmap); 1904 free(tpl->tpl_pkts, M_DEVBUF); 1905 tpl->tpl_pkts = NULL; 1906 } 1907 1908 void 1909 tht_pkt_put(struct tht_pkt_list *tpl, struct tht_pkt *pkt) 1910 { 1911 TAILQ_REMOVE(&tpl->tpl_used, pkt, tp_link); 1912 TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link); 1913 } 1914 1915 struct tht_pkt * 1916 tht_pkt_get(struct tht_pkt_list *tpl) 1917 { 1918 struct tht_pkt *pkt; 1919 1920 pkt = TAILQ_FIRST(&tpl->tpl_free); 1921 if (pkt != NULL) { 1922 TAILQ_REMOVE(&tpl->tpl_free, pkt, tp_link); 1923 TAILQ_INSERT_TAIL(&tpl->tpl_used, pkt, tp_link); 1924 1925 } 1926 1927 return (pkt); 1928 } 1929 1930 struct tht_pkt * 1931 tht_pkt_used(struct tht_pkt_list *tpl) 1932 { 1933 return (TAILQ_FIRST(&tpl->tpl_used)); 1934 } 1935