1 /* $OpenBSD: if_tht.c,v 1.149 2024/09/04 07:54:52 mglocker Exp $ */
2
3 /*
4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Driver for the Tehuti TN30xx multi port 10Gb Ethernet chipsets,
21 * see http://www.tehutinetworks.net/.
22 *
23 * This driver was made possible because Tehuti networks provided
24 * hardware and documentation. Thanks!
25 */
26
27 #include "bpfilter.h"
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/socket.h>
34 #include <sys/malloc.h>
35 #include <sys/device.h>
36 #include <sys/queue.h>
37 #include <sys/rwlock.h>
38 #include <sys/time.h>
39
40 #include <machine/bus.h>
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <net/if.h>
47 #include <net/if_media.h>
48
49 #if NBPFILTER > 0
50 #include <net/bpf.h>
51 #endif
52
53 #include <netinet/in.h>
54 #include <netinet/if_ether.h>
55
56 #ifdef THT_DEBUG
57 #define THT_D_FIFO (1<<0)
58 #define THT_D_TX (1<<1)
59 #define THT_D_RX (1<<2)
60 #define THT_D_INTR (1<<3)
61
62 int thtdebug = THT_D_TX | THT_D_RX | THT_D_INTR;
63
64 #define DPRINTF(l, f...) do { if (thtdebug & (l)) printf(f); } while (0)
65 #else
66 #define DPRINTF(l, f...)
67 #endif
68
69 /* registers */
70
71 #define THT_PCI_BAR 0x10
72
73 #define _Q(_q) ((_q) * 4)
74
75 /* General Configuration */
76 #define THT_REG_END_SEL 0x5448 /* PCI Endian Select */
77 #define THT_REG_CLKPLL 0x5000
78 #define THT_REG_CLKPLL_PLLLK (1<<9) /* PLL is locked */
79 #define THT_REG_CLKPLL_RSTEND (1<<8) /* Reset ended */
80 #define THT_REG_CLKPLL_TXF_DIS (1<<3) /* TX Free disabled */
81 #define THT_REG_CLKPLL_VNT_STOP (1<<2) /* VENETO Stop */
82 #define THT_REG_CLKPLL_PLLRST (1<<1) /* PLL Reset */
83 #define THT_REG_CLKPLL_SFTRST (1<<0) /* Software Reset */
84 /* Descriptors and FIFO Registers */
85 #define THT_REG_TXT_CFG0(_q) (0x4040 + _Q(_q)) /* CFG0 TX Task queues */
86 #define THT_REG_RXF_CFG0(_q) (0x4050 + _Q(_q)) /* CFG0 RX Free queues */
87 #define THT_REG_RXD_CFG0(_q) (0x4060 + _Q(_q)) /* CFG0 RX DSC queues */
88 #define THT_REG_TXF_CFG0(_q) (0x4070 + _Q(_q)) /* CFG0 TX Free queues */
89 #define THT_REG_TXT_CFG1(_q) (0x4000 + _Q(_q)) /* CFG1 TX Task queues */
90 #define THT_REG_RXF_CFG1(_q) (0x4010 + _Q(_q)) /* CFG1 RX Free queues */
91 #define THT_REG_RXD_CFG1(_q) (0x4020 + _Q(_q)) /* CFG1 RX DSC queues */
92 #define THT_REG_TXF_CFG1(_q) (0x4030 + _Q(_q)) /* CFG1 TX Free queues */
93 #define THT_REG_TXT_RPTR(_q) (0x40c0 + _Q(_q)) /* TX Task read ptr */
94 #define THT_REG_RXF_RPTR(_q) (0x40d0 + _Q(_q)) /* RX Free read ptr */
95 #define THT_REG_RXD_RPTR(_q) (0x40e0 + _Q(_q)) /* RX DSC read ptr */
96 #define THT_REG_TXF_RPTR(_q) (0x40f0 + _Q(_q)) /* TX Free read ptr */
97 #define THT_REG_TXT_WPTR(_q) (0x4080 + _Q(_q)) /* TX Task write ptr */
98 #define THT_REG_RXF_WPTR(_q) (0x4090 + _Q(_q)) /* RX Free write ptr */
99 #define THT_REG_RXD_WPTR(_q) (0x40a0 + _Q(_q)) /* RX DSC write ptr */
100 #define THT_REG_TXF_WPTR(_q) (0x40b0 + _Q(_q)) /* TX Free write ptr */
101 #define THT_REG_HTB_ADDR 0x4100 /* HTB Addressing Mechanism enable */
102 #define THT_REG_HTB_ADDR_HI 0x4110 /* High HTB Address */
103 #define THT_REG_HTB_ST_TMR 0x3290 /* HTB Timer */
104 #define THT_REG_RDINTCM(_q) (0x5120 + _Q(_q)) /* RX DSC Intr Coalescing */
105 #define THT_REG_RDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */
106 #define THT_REG_RDINTCM_RXF_TH(_c) ((_c)<<16) /* rxf intr req thresh */
107 #define THT_REG_RDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */
108 #define THT_REG_RDINTCM_COAL(_c) (_c) /* coalescing timer */
109 #define THT_REG_TDINTCM(_q) (0x5130 + _Q(_q)) /* TX DSC Intr Coalescing */
110 #define THT_REG_TDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */
111 #define THT_REG_TDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */
112 #define THT_REG_TDINTCM_COAL(_c) (_c) /* coalescing timer */
113 /* 10G Ethernet MAC */
114 #define THT_REG_10G_REV 0x6000 /* Revision */
115 #define THT_REG_10G_SCR 0x6004 /* Scratch */
116 #define THT_REG_10G_CTL 0x6008 /* Control/Status */
117 #define THT_REG_10G_CTL_CMD_FRAME_EN (1<<13) /* cmd frame enable */
118 #define THT_REG_10G_CTL_SW_RESET (1<<12) /* sw reset */
119 #define THT_REG_10G_CTL_STATS_AUTO_CLR (1<<11) /* auto clear statistics */
120 #define THT_REG_10G_CTL_LOOPBACK (1<<10) /* enable loopback */
121 #define THT_REG_10G_CTL_TX_ADDR_INS (1<<9) /* set mac on tx */
122 #define THT_REG_10G_CTL_PAUSE_IGNORE (1<<8) /* ignore pause */
123 #define THT_REG_10G_CTL_PAUSE_FWD (1<<7) /* forward pause */
124 #define THT_REG_10G_CTL_CRC_FWD (1<<6) /* crc forward */
125 #define THT_REG_10G_CTL_PAD (1<<5) /* frame padding */
126 #define THT_REG_10G_CTL_PROMISC (1<<4) /* promiscuous mode */
127 #define THT_REG_10G_CTL_WAN_MODE (1<<3) /* WAN mode */
128 #define THT_REG_10G_CTL_RX_EN (1<<1) /* RX enable */
129 #define THT_REG_10G_CTL_TX_EN (1<<0) /* TX enable */
130 #define THT_REG_10G_FRM_LEN 0x6014 /* Frame Length */
131 #define THT_REG_10G_PAUSE 0x6018 /* Pause Quanta */
132 #define THT_REG_10G_RX_SEC 0x601c /* RX Section */
133 #define THT_REG_10G_TX_SEC 0x6020 /* TX Section */
134 #define THT_REG_10G_SEC_AVAIL(_t) (_t) /* section available thresh*/
135 #define THT_REG_10G_SEC_EMPTY(_t) ((_t)<<16) /* section empty avail */
136 #define THT_REG_10G_RFIFO_AEF 0x6024 /* RX FIFO Almost Empty/Full */
137 #define THT_REG_10G_TFIFO_AEF 0x6028 /* TX FIFO Almost Empty/Full */
138 #define THT_REG_10G_FIFO_AE(_t) (_t) /* almost empty */
139 #define THT_REG_10G_FIFO_AF(_t) ((_t)<<16) /* almost full */
140 #define THT_REG_10G_SM_STAT 0x6030 /* MDIO Status */
141 #define THT_REG_10G_SM_CMD 0x6034 /* MDIO Command */
142 #define THT_REG_10G_SM_DAT 0x6038 /* MDIO Data */
143 #define THT_REG_10G_SM_ADD 0x603c /* MDIO Address */
144 #define THT_REG_10G_STAT 0x6040 /* Status */
145 /* Statistic Counters */
146 /* XXX todo */
147 /* Status Registers */
148 #define THT_REG_MAC_LNK_STAT 0x0200 /* Link Status */
149 #define THT_REG_MAC_LNK_STAT_DIS (1<<4) /* Mac Stats read disable */
150 #define THT_REG_MAC_LNK_STAT_LINK (1<<2) /* Link State */
151 #define THT_REG_MAC_LNK_STAT_REM_FAULT (1<<1) /* Remote Fault */
152 #define THT_REG_MAC_LNK_STAT_LOC_FAULT (1<<0) /* Local Fault */
153 /* Interrupt Registers */
154 #define THT_REG_ISR 0x5100 /* Interrupt Status */
155 #define THT_REG_ISR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */
156 #define THT_REG_ISR_GPIO (1<<26) /* GPIO */
157 #define THT_REG_ISR_RFRSH (1<<25) /* DDR Refresh */
158 #define THT_REG_ISR_SWI (1<<23) /* software interrupt */
159 #define THT_REG_ISR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */
160 #define THT_REG_ISR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */
161 #define THT_REG_ISR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */
162 #define THT_REG_ISR_TMR(_t) (1<<(6+(_t))) /* timer */
163 #define THT_REG_ISR_VNT (1<<5) /* optistrata */
164 #define THT_REG_ISR_RxFL (1<<4) /* RX Full */
165 #define THT_REG_ISR_TR (1<<2) /* table read */
166 #define THT_REG_ISR_PCIE_LNK_INT (1<<1) /* pcie link fail */
167 #define THT_REG_ISR_GPLE_CLR (1<<0) /* pcie timeout */
168 #define THT_FMT_ISR "\020" "\035LINKCHG1" "\034LINKCHG0" \
169 "\033GPIO" "\032RFRSH" "\030SWI" \
170 "\027RXF3" "\026RXF2" "\025RXF1" \
171 "\024RXF0" "\023TXF3" "\022TXF2" \
172 "\021TXF1" "\020TXF0" "\017RXD3" \
173 "\016RXD2" "\015RXD1" "\014RXD0" \
174 "\012TMR3" "\011TMR2" "\010TMR1" \
175 "\007TMR0" "\006VNT" "\005RxFL" \
176 "\003TR" "\002PCI_LNK_INT" \
177 "\001GPLE_CLR"
178 #define THT_REG_ISR_GTI 0x5080 /* GTI Interrupt Status */
179 #define THT_REG_IMR 0x5110 /* Interrupt Mask */
180 #define THT_REG_IMR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */
181 #define THT_REG_IMR_GPIO (1<<26) /* GPIO */
182 #define THT_REG_IMR_RFRSH (1<<25) /* DDR Refresh */
183 #define THT_REG_IMR_SWI (1<<23) /* software interrupt */
184 #define THT_REG_IMR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */
185 #define THT_REG_IMR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */
186 #define THT_REG_IMR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */
187 #define THT_REG_IMR_TMR(_t) (1<<(6+(_t))) /* timer */
188 #define THT_REG_IMR_VNT (1<<5) /* optistrata */
189 #define THT_REG_IMR_RxFL (1<<4) /* RX Full */
190 #define THT_REG_IMR_TR (1<<2) /* table read */
191 #define THT_REG_IMR_PCIE_LNK_INT (1<<1) /* pcie link fail */
192 #define THT_REG_IMR_GPLE_CLR (1<<0) /* pcie timeout */
193 #define THT_REG_IMR_GTI 0x5090 /* GTI Interrupt Mask */
194 #define THT_REG_ISR_MSK 0x5140 /* ISR Masked */
195 /* Global Counters */
196 /* XXX todo */
197 /* DDR2 SDRAM Controller Registers */
198 /* XXX TBD */
199 /* EEPROM Registers */
200 /* XXX todo */
201 /* Init arbitration and status registers */
202 #define THT_REG_INIT_SEMAPHORE 0x5170 /* Init Semaphore */
203 #define THT_REG_INIT_STATUS 0x5180 /* Init Status */
204 /* PCI Credits Registers */
205 /* XXX todo */
206 /* TX Arbitration Registers */
207 #define THT_REG_TXTSK_PR(_q) (0x41b0 + _Q(_q)) /* TX Queue Priority */
208 /* RX Part Registers */
209 #define THT_REG_RX_FLT 0x1240 /* RX Filter Configuration */
210 #define THT_REG_RX_FLT_ATXER (1<<15) /* accept with xfer err */
211 #define THT_REG_RX_FLT_ATRM (1<<14) /* accept with term err */
212 #define THT_REG_RX_FLT_AFTSQ (1<<13) /* accept with fault seq */
213 #define THT_REG_RX_FLT_OSEN (1<<12) /* enable pkts */
214 #define THT_REG_RX_FLT_APHER (1<<11) /* accept with phy err */
215 #define THT_REG_RX_FLT_TXFC (1<<10) /* TX flow control */
216 #define THT_REG_RX_FLT_FDA (1<<8) /* filter direct address */
217 #define THT_REG_RX_FLT_AOF (1<<7) /* accept overflow frame */
218 #define THT_REG_RX_FLT_ACF (1<<6) /* accept control frame */
219 #define THT_REG_RX_FLT_ARUNT (1<<5) /* accept runt */
220 #define THT_REG_RX_FLT_ACRC (1<<4) /* accept crc error */
221 #define THT_REG_RX_FLT_AM (1<<3) /* accept multicast */
222 #define THT_REG_RX_FLT_AB (1<<2) /* accept broadcast */
223 #define THT_REG_RX_FLT_PRM_MASK 0x3 /* promiscuous mode */
224 #define THT_REG_RX_FLT_PRM_NORMAL 0x0 /* normal mode */
225 #define THT_REG_RX_FLT_PRM_ALL 0x1 /* pass all incoming frames */
226 #define THT_REG_RX_MAX_FRAME 0x12c0 /* Max Frame Size */
227 #define THT_REG_RX_UNC_MAC0 0x1250 /* MAC Address low word */
228 #define THT_REG_RX_UNC_MAC1 0x1260 /* MAC Address mid word */
229 #define THT_REG_RX_UNC_MAC2 0x1270 /* MAC Address high word */
230 #define THT_REG_RX_MAC_MCST0(_m) (0x1a80 + (_m)*8)
231 #define THT_REG_RX_MAC_MCST1(_m) (0x1a84 + (_m)*8)
232 #define THT_REG_RX_MAC_MCST_CNT 15
233 #define THT_REG_RX_MCST_HASH 0x1a00 /* imperfect multicast filter hash */
234 #define THT_REG_RX_MCST_HASH_SIZE (256 / NBBY)
235 /* OptiStrata Debug Registers */
236 #define THT_REG_VPC 0x2300 /* Program Counter */
237 #define THT_REG_VLI 0x2310 /* Last Interrupt */
238 #define THT_REG_VIC 0x2320 /* Interrupts Count */
239 #define THT_REG_VTMR 0x2330 /* Timer */
240 #define THT_REG_VGLB 0x2340 /* Global */
241 /* SW Reset Registers */
242 #define THT_REG_RST_PRT 0x7000 /* Reset Port */
243 #define THT_REG_RST_PRT_ACTIVE 0x1 /* port reset is active */
244 #define THT_REG_DIS_PRT 0x7010 /* Disable Port */
245 #define THT_REG_RST_QU_0 0x7020 /* Reset Queue 0 */
246 #define THT_REG_RST_QU_1 0x7028 /* Reset Queue 1 */
247 #define THT_REG_DIS_QU_0 0x7030 /* Disable Queue 0 */
248 #define THT_REG_DIS_QU_1 0x7038 /* Disable Queue 1 */
249
250 #define THT_PORT_SIZE 0x8000
251 #define THT_PORT_REGION(_p) ((_p) * THT_PORT_SIZE)
252 #define THT_NQUEUES 4
253
254 #define THT_FIFO_ALIGN 4096
255 #define THT_FIFO_SIZE_4k 0x0
256 #define THT_FIFO_SIZE_8k 0x1
257 #define THT_FIFO_SIZE_16k 0x2
258 #define THT_FIFO_SIZE_32k 0x3
259 #define THT_FIFO_SIZE(_r) (4096 * (1<<(_r)))
260 #define THT_FIFO_GAP 8 /* keep 8 bytes between ptrs */
261 #define THT_FIFO_PTR_MASK 0x00007ff8 /* rptr/wptr mask */
262
263 #define THT_FIFO_DESC_LEN 208 /* a descriptor can't be bigger than this */
264
265 #define THT_IMR_DOWN(_p) (THT_REG_IMR_LINKCHG(_p))
266 #define THT_IMR_UP(_p) (THT_REG_IMR_LINKCHG(_p) | \
267 THT_REG_IMR_RXF(0) | THT_REG_IMR_TXF(0) | \
268 THT_REG_IMR_RXD(0))
269
270 /* hardware structures (we're using the 64 bit variants) */
271
272 /* physical buffer descriptor */
273 struct tht_pbd {
274 u_int32_t addr_lo;
275 u_int32_t addr_hi;
276 u_int32_t len;
277 } __packed;
278 #define THT_PBD_PKTLEN (64 * 1024)
279
280 /* rx free fifo */
281 struct tht_rx_free {
282 u_int16_t bc; /* buffer count (0:4) */
283 u_int16_t type;
284
285 u_int64_t uid;
286
287 /* followed by a pdb list */
288 } __packed;
289 #define THT_RXF_TYPE 1
290 #define THT_RXF_1ST_PDB_LEN 128
291 #define THT_RXF_SGL_LEN ((THT_FIFO_DESC_LEN - \
292 sizeof(struct tht_rx_free)) / \
293 sizeof(struct tht_pbd))
294 #define THT_RXF_PKT_NUM 128
295
296 /* rx descriptor */
297 struct tht_rx_desc {
298 u_int32_t flags;
299 #define THT_RXD_FLAGS_BC(_f) ((_f) & 0x1f) /* buffer count */
300 #define THT_RXD_FLAGS_RXFQ(_f) (((_f)>>8) & 0x3) /* rxf queue id */
301 #define THT_RXD_FLAGS_TO (1<<15)
302 #define THT_RXD_FLAGS_TYPE(_f) (((_f)>>16) & 0xf) /* desc type */
303 #define THT_RXD_FLAGS_OVF (1<<21) /* overflow error */
304 #define THT_RXD_FLAGS_RUNT (1<<22) /* runt error */
305 #define THT_RXD_FLAGS_CRC (1<<23) /* crc error */
306 #define THT_RXD_FLAGS_UDPCS (1<<24) /* udp checksum error */
307 #define THT_RXD_FLAGS_TCPCS (1<<25) /* tcp checksum error */
308 #define THT_RXD_FLAGS_IPCS (1<<26) /* ip checksum error */
309 #define THT_RXD_FLAGS_PKT_ID 0x70000000
310 #define THT_RXD_FLAGS_PKT_ID_NONIP 0x00000000
311 #define THT_RXD_FLAGS_PKT_ID_TCP4 0x10000000
312 #define THT_RXD_FLAGS_PKT_ID_UDP4 0x20000000
313 #define THT_RXD_FLAGS_PKT_ID_IPV4 0x30000000
314 #define THT_RXD_FLAGS_PKT_ID_TCP6 0x50000000
315 #define THT_RXD_FLAGS_PKT_ID_UDP6 0x60000000
316 #define THT_RXD_FLAGS_PKT_ID_IPV6 0x70000000
317 #define THT_RXD_FLAGS_VTAG (1<<31)
318 u_int16_t len;
319 u_int16_t vlan;
320 #define THT_RXD_VLAN_ID(_v) ((_v) & 0xfff)
321 #define THT_RXD_VLAN_CFI (1<<12)
322 #define THT_RXD_VLAN_PRI(_v) ((_v) & 0x7) >> 13)
323
324 u_int64_t uid;
325 } __packed;
326 #define THT_RXD_TYPE 2
327
328 /* rx descriptor type 3: data chain instruction */
329 struct tht_rx_desc_dc {
330 /* preceded by tht_rx_desc */
331
332 u_int16_t cd_offset;
333 u_int16_t flags;
334
335 u_int8_t data[4];
336 } __packed;
337 #define THT_RXD_TYPE_DC 3
338
339 /* rx descriptor type 4: rss (recv side scaling) information */
340 struct tht_rx_desc_rss {
341 /* preceded by tht_rx_desc */
342
343 u_int8_t rss_hft;
344 u_int8_t rss_type;
345 u_int8_t rss_tcpu;
346 u_int8_t reserved;
347
348 u_int32_t rss_hash;
349 } __packed;
350 #define THT_RXD_TYPE_RSS 4
351
352 /* tx task fifo */
353 struct tht_tx_task {
354 u_int32_t flags;
355 #define THT_TXT_FLAGS_BC(_f) (_f) /* buffer count */
356 #define THT_TXT_FLAGS_UDPCS (1<<5) /* udp checksum */
357 #define THT_TXT_FLAGS_TCPCS (1<<6) /* tcp checksum */
358 #define THT_TXT_FLAGS_IPCS (1<<7) /* ip checksum */
359 #define THT_TXT_FLAGS_VTAG (1<<8) /* insert vlan tag */
360 #define THT_TXT_FLAGS_LGSND (1<<9) /* tcp large send enabled */
361 #define THT_TXT_FLAGS_FRAG (1<<10) /* ip fragmentation enabled */
362 #define THT_TXT_FLAGS_CFI (1<<12) /* canonical format indicator */
363 #define THT_TXT_FLAGS_PRIO(_f) ((_f)<<13) /* vlan priority */
364 #define THT_TXT_FLAGS_VLAN(_f) ((_f)<<20) /* vlan id */
365 u_int16_t mss_mtu;
366 u_int16_t len;
367
368 u_int64_t uid;
369
370 /* followed by a pbd list */
371 } __packed;
372 #define THT_TXT_TYPE (3<<16)
373 #define THT_TXT_SGL_LEN ((THT_FIFO_DESC_LEN - \
374 sizeof(struct tht_tx_task)) / \
375 sizeof(struct tht_pbd))
376 #define THT_TXT_PKT_NUM 128
377
378 /* tx free fifo */
379 struct tht_tx_free {
380 u_int32_t status;
381
382 u_int64_t uid;
383
384 u_int32_t pad;
385 } __packed;
386
387 /* pci controller autoconf glue */
388
389 struct thtc_softc {
390 struct device sc_dev;
391
392 bus_dma_tag_t sc_dmat;
393
394 bus_space_tag_t sc_memt;
395 bus_space_handle_t sc_memh;
396 bus_size_t sc_mems;
397 void *sc_ih;
398 };
399
400 int thtc_match(struct device *, void *, void *);
401 void thtc_attach(struct device *, struct device *, void *);
402 int thtc_print(void *, const char *);
403
404 const struct cfattach thtc_ca = {
405 sizeof(struct thtc_softc), thtc_match, thtc_attach
406 };
407
408 struct cfdriver thtc_cd = {
409 NULL, "thtc", DV_DULL
410 };
411
412 /* glue between the controller and the port */
413
414 struct tht_attach_args {
415 int taa_port;
416
417 struct pci_attach_args *taa_pa;
418 };
419
420 /* tht itself */
421
422 struct tht_dmamem {
423 bus_dmamap_t tdm_map;
424 bus_dma_segment_t tdm_seg;
425 size_t tdm_size;
426 caddr_t tdm_kva;
427 };
428 #define THT_DMA_MAP(_tdm) ((_tdm)->tdm_map)
429 #define THT_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr)
430 #define THT_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva)
431
432 struct tht_fifo_desc {
433 bus_size_t tfd_cfg0;
434 bus_size_t tfd_cfg1;
435 bus_size_t tfd_rptr;
436 bus_size_t tfd_wptr;
437 u_int32_t tfd_size;
438 int tfd_write;
439 };
440 #define THT_FIFO_PRE_SYNC(_d) ((_d)->tfd_write ? \
441 BUS_DMASYNC_PREWRITE : \
442 BUS_DMASYNC_PREREAD)
443 #define THT_FIFO_POST_SYNC(_d) ((_d)->tfd_write ? \
444 BUS_DMASYNC_POSTWRITE : \
445 BUS_DMASYNC_POSTREAD)
446
447 struct tht_fifo {
448 struct tht_fifo_desc *tf_desc;
449 struct tht_dmamem *tf_mem;
450 int tf_len;
451 int tf_rptr;
452 int tf_wptr;
453 int tf_ready;
454 };
455
456 struct tht_pkt {
457 u_int64_t tp_id;
458
459 bus_dmamap_t tp_dmap;
460 struct mbuf *tp_m;
461
462 TAILQ_ENTRY(tht_pkt) tp_link;
463 };
464
465 struct tht_pkt_list {
466 struct tht_pkt *tpl_pkts;
467 TAILQ_HEAD(, tht_pkt) tpl_free;
468 TAILQ_HEAD(, tht_pkt) tpl_used;
469 };
470
471 struct tht_softc {
472 struct device sc_dev;
473 struct thtc_softc *sc_thtc;
474 int sc_port;
475
476 bus_space_handle_t sc_memh;
477
478 struct arpcom sc_ac;
479 struct ifmedia sc_media;
480 struct timeval sc_mediacheck;
481
482 u_int16_t sc_lladdr[3];
483
484 struct tht_pkt_list sc_tx_list;
485 struct tht_pkt_list sc_rx_list;
486
487 struct tht_fifo sc_txt;
488 struct tht_fifo sc_rxf;
489 struct tht_fifo sc_rxd;
490 struct tht_fifo sc_txf;
491
492 u_int32_t sc_imr;
493
494 struct rwlock sc_lock;
495 };
496
497 int tht_match(struct device *, void *, void *);
498 void tht_attach(struct device *, struct device *, void *);
499 void tht_mountroot(struct device *);
500 int tht_intr(void *);
501
502 const struct cfattach tht_ca = {
503 sizeof(struct tht_softc), tht_match, tht_attach
504 };
505
506 struct cfdriver tht_cd = {
507 NULL, "tht", DV_IFNET
508 };
509
510 /* pkts */
511 int tht_pkt_alloc(struct tht_softc *,
512 struct tht_pkt_list *, int, int);
513 void tht_pkt_free(struct tht_softc *,
514 struct tht_pkt_list *);
515 void tht_pkt_put(struct tht_pkt_list *, struct tht_pkt *);
516 struct tht_pkt *tht_pkt_get(struct tht_pkt_list *);
517 struct tht_pkt *tht_pkt_used(struct tht_pkt_list *);
518
519 /* fifos */
520
521 struct tht_fifo_desc tht_txt_desc = {
522 THT_REG_TXT_CFG0(0),
523 THT_REG_TXT_CFG1(0),
524 THT_REG_TXT_RPTR(0),
525 THT_REG_TXT_WPTR(0),
526 THT_FIFO_SIZE_16k,
527 1
528 };
529
530 struct tht_fifo_desc tht_rxf_desc = {
531 THT_REG_RXF_CFG0(0),
532 THT_REG_RXF_CFG1(0),
533 THT_REG_RXF_RPTR(0),
534 THT_REG_RXF_WPTR(0),
535 THT_FIFO_SIZE_16k,
536 1
537 };
538
539 struct tht_fifo_desc tht_rxd_desc = {
540 THT_REG_RXD_CFG0(0),
541 THT_REG_RXD_CFG1(0),
542 THT_REG_RXD_RPTR(0),
543 THT_REG_RXD_WPTR(0),
544 THT_FIFO_SIZE_16k,
545 0
546 };
547
548 struct tht_fifo_desc tht_txf_desc = {
549 THT_REG_TXF_CFG0(0),
550 THT_REG_TXF_CFG1(0),
551 THT_REG_TXF_RPTR(0),
552 THT_REG_TXF_WPTR(0),
553 THT_FIFO_SIZE_4k,
554 0
555 };
556
557 int tht_fifo_alloc(struct tht_softc *, struct tht_fifo *,
558 struct tht_fifo_desc *);
559 void tht_fifo_free(struct tht_softc *, struct tht_fifo *);
560
561 size_t tht_fifo_readable(struct tht_softc *,
562 struct tht_fifo *);
563 size_t tht_fifo_writable(struct tht_softc *,
564 struct tht_fifo *);
565 void tht_fifo_pre(struct tht_softc *,
566 struct tht_fifo *);
567 void tht_fifo_read(struct tht_softc *, struct tht_fifo *,
568 void *, size_t);
569 void tht_fifo_write(struct tht_softc *, struct tht_fifo *,
570 void *, size_t);
571 void tht_fifo_write_dmap(struct tht_softc *,
572 struct tht_fifo *, bus_dmamap_t);
573 void tht_fifo_write_pad(struct tht_softc *,
574 struct tht_fifo *, int);
575 void tht_fifo_post(struct tht_softc *,
576 struct tht_fifo *);
577
578 /* port operations */
579 void tht_lladdr_read(struct tht_softc *);
580 void tht_lladdr_write(struct tht_softc *);
581 int tht_sw_reset(struct tht_softc *);
582 int tht_fw_load(struct tht_softc *);
583 void tht_link_state(struct tht_softc *);
584
585 /* interface operations */
586 int tht_ioctl(struct ifnet *, u_long, caddr_t);
587 void tht_watchdog(struct ifnet *);
588 void tht_start(struct ifnet *);
589 int tht_load_pkt(struct tht_softc *, struct tht_pkt *,
590 struct mbuf *);
591 void tht_txf(struct tht_softc *sc);
592
593 void tht_rxf_fill(struct tht_softc *, int);
594 void tht_rxf_drain(struct tht_softc *);
595 void tht_rxd(struct tht_softc *);
596
597 void tht_up(struct tht_softc *);
598 void tht_iff(struct tht_softc *);
599 void tht_down(struct tht_softc *);
600
601 /* ifmedia operations */
602 int tht_media_change(struct ifnet *);
603 void tht_media_status(struct ifnet *, struct ifmediareq *);
604
605 /* wrapper around dma memory */
606 struct tht_dmamem *tht_dmamem_alloc(struct tht_softc *, bus_size_t,
607 bus_size_t);
608 void tht_dmamem_free(struct tht_softc *,
609 struct tht_dmamem *);
610
611 /* bus space operations */
612 u_int32_t tht_read(struct tht_softc *, bus_size_t);
613 void tht_write(struct tht_softc *, bus_size_t, u_int32_t);
614 void tht_write_region(struct tht_softc *, bus_size_t,
615 void *, size_t);
616 int tht_wait_eq(struct tht_softc *, bus_size_t, u_int32_t,
617 u_int32_t, int);
618 int tht_wait_ne(struct tht_softc *, bus_size_t, u_int32_t,
619 u_int32_t, int);
620
621 #define tht_set(_s, _r, _b) tht_write((_s), (_r), \
622 tht_read((_s), (_r)) | (_b))
623 #define tht_clr(_s, _r, _b) tht_write((_s), (_r), \
624 tht_read((_s), (_r)) & ~(_b))
625 #define tht_wait_set(_s, _r, _b, _t) tht_wait_eq((_s), (_r), \
626 (_b), (_b), (_t))
627
628
629 /* misc */
630 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
631 #define LWORDS(_b) (((_b) + 7) >> 3)
632
633
634 struct thtc_device {
635 pci_vendor_id_t td_vendor;
636 pci_vendor_id_t td_product;
637 u_int td_nports;
638 };
639
640 const struct thtc_device *thtc_lookup(struct pci_attach_args *);
641
642 static const struct thtc_device thtc_devices[] = {
643 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3009, 1 },
644 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3010, 1 },
645 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3014, 2 }
646 };
647
648 const struct thtc_device *
thtc_lookup(struct pci_attach_args * pa)649 thtc_lookup(struct pci_attach_args *pa)
650 {
651 int i;
652 const struct thtc_device *td;
653
654 for (i = 0; i < nitems(thtc_devices); i++) {
655 td = &thtc_devices[i];
656 if (td->td_vendor == PCI_VENDOR(pa->pa_id) &&
657 td->td_product == PCI_PRODUCT(pa->pa_id))
658 return (td);
659 }
660
661 return (NULL);
662 }
663
664 int
thtc_match(struct device * parent,void * match,void * aux)665 thtc_match(struct device *parent, void *match, void *aux)
666 {
667 struct pci_attach_args *pa = aux;
668
669 if (thtc_lookup(pa) != NULL)
670 return (1);
671
672 return (0);
673 }
674
675 void
thtc_attach(struct device * parent,struct device * self,void * aux)676 thtc_attach(struct device *parent, struct device *self, void *aux)
677 {
678 struct thtc_softc *sc = (struct thtc_softc *)self;
679 struct pci_attach_args *pa = aux;
680 pcireg_t memtype;
681 const struct thtc_device *td;
682 struct tht_attach_args taa;
683 pci_intr_handle_t ih;
684 int i;
685
686 bzero(&taa, sizeof(taa));
687 td = thtc_lookup(pa);
688
689 sc->sc_dmat = pa->pa_dmat;
690
691 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, THT_PCI_BAR);
692 if (pci_mapreg_map(pa, THT_PCI_BAR, memtype, 0, &sc->sc_memt,
693 &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) {
694 printf(": unable to map host registers\n");
695 return;
696 }
697
698 if (pci_intr_map(pa, &ih) != 0) {
699 printf(": unable to map interrupt\n");
700 goto unmap;
701 }
702
703 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih,
704 IPL_NET, tht_intr, sc, DEVNAME(sc));
705 if (sc->sc_ih == NULL) {
706 printf(": unable to establish interrupt\n");
707 return;
708 }
709 printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
710
711 taa.taa_pa = pa;
712 for (i = 0; i < td->td_nports; i++) {
713 taa.taa_port = i;
714
715 config_found(self, &taa, thtc_print);
716 }
717
718 return;
719
720 unmap:
721 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
722 sc->sc_mems = 0;
723 }
724
725 int
thtc_print(void * aux,const char * pnp)726 thtc_print(void *aux, const char *pnp)
727 {
728 struct tht_attach_args *taa = aux;
729
730 if (pnp != NULL)
731 printf("\"%s\" at %s", tht_cd.cd_name, pnp);
732
733 printf(" port %d", taa->taa_port);
734
735 return (UNCONF);
736 }
737
738 int
tht_match(struct device * parent,void * match,void * aux)739 tht_match(struct device *parent, void *match, void *aux)
740 {
741 return (1);
742 }
743
744 void
tht_attach(struct device * parent,struct device * self,void * aux)745 tht_attach(struct device *parent, struct device *self, void *aux)
746 {
747 struct thtc_softc *csc = (struct thtc_softc *)parent;
748 struct tht_softc *sc = (struct tht_softc *)self;
749 struct tht_attach_args *taa = aux;
750 struct ifnet *ifp;
751
752 sc->sc_thtc = csc;
753 sc->sc_port = taa->taa_port;
754 sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
755 rw_init(&sc->sc_lock, "thtioc");
756
757 if (bus_space_subregion(csc->sc_memt, csc->sc_memh,
758 THT_PORT_REGION(sc->sc_port), THT_PORT_SIZE,
759 &sc->sc_memh) != 0) {
760 printf(": unable to map port registers\n");
761 return;
762 }
763
764 if (tht_sw_reset(sc) != 0) {
765 printf(": unable to reset port\n");
766 /* bus_space(9) says we dont have to free subregions */
767 return;
768 }
769
770 tht_lladdr_read(sc);
771 bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
772
773 ifp = &sc->sc_ac.ac_if;
774 ifp->if_softc = sc;
775 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
776 ifp->if_capabilities = IFCAP_VLAN_MTU;
777 ifp->if_ioctl = tht_ioctl;
778 ifp->if_start = tht_start;
779 ifp->if_watchdog = tht_watchdog;
780 ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN; /* XXX */
781 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
782 ifq_init_maxlen(&ifp->if_snd, 400);
783
784 ifmedia_init(&sc->sc_media, 0, tht_media_change, tht_media_status);
785 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
786 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
787
788 if_attach(ifp);
789 ether_ifattach(ifp);
790
791 printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
792
793 config_mountroot(self, tht_mountroot);
794 }
795
796 void
tht_mountroot(struct device * self)797 tht_mountroot(struct device *self)
798 {
799 struct tht_softc *sc = (struct tht_softc *)self;
800
801 if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
802 return;
803
804 if (tht_fw_load(sc) != 0)
805 printf("%s: firmware load failed\n", DEVNAME(sc));
806
807 tht_sw_reset(sc);
808
809 tht_fifo_free(sc, &sc->sc_txt);
810
811 tht_link_state(sc);
812 tht_write(sc, THT_REG_IMR, sc->sc_imr);
813 }
814
815 int
tht_intr(void * arg)816 tht_intr(void *arg)
817 {
818 struct thtc_softc *thtc = arg;
819 struct tht_softc *sc = arg;
820 struct device *d;
821 struct ifnet *ifp;
822 u_int32_t isr;
823 int rv = 0;
824
825 for (d = TAILQ_NEXT(&thtc->sc_dev, dv_list); d != NULL;
826 d = TAILQ_NEXT(d, dv_list)) {
827 sc = (struct tht_softc *)d;
828
829 isr = tht_read(sc, THT_REG_ISR);
830 if (isr == 0x0) {
831 tht_write(sc, THT_REG_IMR, sc->sc_imr);
832 continue;
833 }
834 rv = 1;
835
836 DPRINTF(THT_D_INTR, "%s: isr: 0x%b\n", DEVNAME(sc), isr, THT_FMT_ISR);
837
838 if (ISSET(isr, THT_REG_ISR_LINKCHG(0) | THT_REG_ISR_LINKCHG(1)))
839 tht_link_state(sc);
840
841 ifp = &sc->sc_ac.ac_if;
842 if (ifp->if_flags & IFF_RUNNING) {
843 if (ISSET(isr, THT_REG_ISR_RXD(0)))
844 tht_rxd(sc);
845
846 if (ISSET(isr, THT_REG_ISR_RXF(0)))
847 tht_rxf_fill(sc, 0);
848
849 if (ISSET(isr, THT_REG_ISR_TXF(0)))
850 tht_txf(sc);
851
852 tht_start(ifp);
853 }
854 tht_write(sc, THT_REG_IMR, sc->sc_imr);
855 }
856 return (rv);
857 }
858
859 int
tht_ioctl(struct ifnet * ifp,u_long cmd,caddr_t addr)860 tht_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
861 {
862 struct tht_softc *sc = ifp->if_softc;
863 struct ifreq *ifr = (struct ifreq *)addr;
864 int s, error = 0;
865
866 rw_enter_write(&sc->sc_lock);
867 s = splnet();
868
869 switch (cmd) {
870 case SIOCSIFADDR:
871 ifp->if_flags |= IFF_UP;
872 /* FALLTHROUGH */
873
874 case SIOCSIFFLAGS:
875 if (ifp->if_flags & IFF_UP) {
876 if (ifp->if_flags & IFF_RUNNING)
877 error = ENETRESET;
878 else
879 tht_up(sc);
880 } else {
881 if (ifp->if_flags & IFF_RUNNING)
882 tht_down(sc);
883 }
884 break;
885
886 case SIOCGIFMEDIA:
887 case SIOCSIFMEDIA:
888 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
889 break;
890
891 default:
892 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
893 }
894
895 if (error == ENETRESET) {
896 if (ifp->if_flags & IFF_RUNNING)
897 tht_iff(sc);
898 error = 0;
899 }
900
901 splx(s);
902 rw_exit_write(&sc->sc_lock);
903
904 return (error);
905 }
906
907 void
tht_up(struct tht_softc * sc)908 tht_up(struct tht_softc *sc)
909 {
910 struct ifnet *ifp = &sc->sc_ac.ac_if;
911
912 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
913 return;
914 }
915
916 if (tht_pkt_alloc(sc, &sc->sc_tx_list, THT_TXT_PKT_NUM,
917 THT_TXT_SGL_LEN) != 0)
918 return;
919 if (tht_pkt_alloc(sc, &sc->sc_rx_list, THT_RXF_PKT_NUM,
920 THT_RXF_SGL_LEN) != 0)
921 goto free_tx_list;
922
923 if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
924 goto free_rx_list;
925 if (tht_fifo_alloc(sc, &sc->sc_rxf, &tht_rxf_desc) != 0)
926 goto free_txt;
927 if (tht_fifo_alloc(sc, &sc->sc_rxd, &tht_rxd_desc) != 0)
928 goto free_rxf;
929 if (tht_fifo_alloc(sc, &sc->sc_txf, &tht_txf_desc) != 0)
930 goto free_rxd;
931
932 tht_write(sc, THT_REG_10G_FRM_LEN, MCLBYTES - ETHER_ALIGN);
933 tht_write(sc, THT_REG_10G_PAUSE, 0x96);
934 tht_write(sc, THT_REG_10G_RX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
935 THT_REG_10G_SEC_EMPTY(0x80));
936 tht_write(sc, THT_REG_10G_TX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
937 THT_REG_10G_SEC_EMPTY(0xe0));
938 tht_write(sc, THT_REG_10G_RFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
939 THT_REG_10G_FIFO_AF(0x0));
940 tht_write(sc, THT_REG_10G_TFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
941 THT_REG_10G_FIFO_AF(0x0));
942 tht_write(sc, THT_REG_10G_CTL, THT_REG_10G_CTL_TX_EN |
943 THT_REG_10G_CTL_RX_EN | THT_REG_10G_CTL_PAD |
944 THT_REG_10G_CTL_PROMISC);
945
946 tht_write(sc, THT_REG_VGLB, 0);
947
948 tht_write(sc, THT_REG_RX_MAX_FRAME, MCLBYTES - ETHER_ALIGN);
949
950 tht_write(sc, THT_REG_RDINTCM(0), THT_REG_RDINTCM_PKT_TH(12) |
951 THT_REG_RDINTCM_RXF_TH(4) | THT_REG_RDINTCM_COAL_RC |
952 THT_REG_RDINTCM_COAL(0x20));
953 tht_write(sc, THT_REG_TDINTCM(0), THT_REG_TDINTCM_PKT_TH(12) |
954 THT_REG_TDINTCM_COAL_RC | THT_REG_TDINTCM_COAL(0x20));
955
956 bcopy(sc->sc_ac.ac_enaddr, sc->sc_lladdr, ETHER_ADDR_LEN);
957 tht_lladdr_write(sc);
958
959 /* populate rxf fifo */
960 tht_rxf_fill(sc, 1);
961
962 /* program promiscuous mode and multicast filters */
963 tht_iff(sc);
964
965 ifp->if_flags |= IFF_RUNNING;
966 ifq_clr_oactive(&ifp->if_snd);
967
968 /* enable interrupts */
969 sc->sc_imr = THT_IMR_UP(sc->sc_port);
970 tht_write(sc, THT_REG_IMR, sc->sc_imr);
971
972 return;
973
974 free_rxd:
975 tht_fifo_free(sc, &sc->sc_rxd);
976 free_rxf:
977 tht_fifo_free(sc, &sc->sc_rxf);
978 free_txt:
979 tht_fifo_free(sc, &sc->sc_txt);
980
981 tht_sw_reset(sc);
982
983 free_rx_list:
984 tht_pkt_free(sc, &sc->sc_rx_list);
985 free_tx_list:
986 tht_pkt_free(sc, &sc->sc_tx_list);
987 }
988
989 void
tht_iff(struct tht_softc * sc)990 tht_iff(struct tht_softc *sc)
991 {
992 struct ifnet *ifp = &sc->sc_ac.ac_if;
993 struct ether_multi *enm;
994 struct ether_multistep step;
995 u_int32_t rxf;
996 u_int8_t imf[THT_REG_RX_MCST_HASH_SIZE];
997 u_int8_t hash;
998 int i;
999
1000 ifp->if_flags &= ~IFF_ALLMULTI;
1001
1002 rxf = THT_REG_RX_FLT_OSEN | THT_REG_RX_FLT_AM | THT_REG_RX_FLT_AB;
1003 for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1004 tht_write(sc, THT_REG_RX_MAC_MCST0(i), 0);
1005 tht_write(sc, THT_REG_RX_MAC_MCST1(i), 0);
1006 }
1007 memset(imf, 0x00, sizeof(imf));
1008
1009 if (ifp->if_flags & IFF_PROMISC) {
1010 ifp->if_flags |= IFF_ALLMULTI;
1011 rxf |= THT_REG_RX_FLT_PRM_ALL;
1012 } else if (sc->sc_ac.ac_multirangecnt > 0) {
1013 ifp->if_flags |= IFF_ALLMULTI;
1014 memset(imf, 0xff, sizeof(imf));
1015 } else {
1016 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1017
1018 #if 0
1019 /* fill the perfect multicast filters */
1020 for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1021 if (enm == NULL)
1022 break;
1023
1024 tht_write(sc, THT_REG_RX_MAC_MCST0(i),
1025 (enm->enm_addrlo[0] << 0) |
1026 (enm->enm_addrlo[1] << 8) |
1027 (enm->enm_addrlo[2] << 16) |
1028 (enm->enm_addrlo[3] << 24));
1029 tht_write(sc, THT_REG_RX_MAC_MCST1(i),
1030 (enm->enm_addrlo[4] << 0) |
1031 (enm->enm_addrlo[5] << 8));
1032
1033 ETHER_NEXT_MULTI(step, enm);
1034 }
1035 #endif
1036
1037 /* fill the imperfect multicast filter with what's left */
1038 while (enm != NULL) {
1039 hash = 0x00;
1040 for (i = 0; i < ETHER_ADDR_LEN; i++)
1041 hash ^= enm->enm_addrlo[i];
1042 setbit(imf, hash);
1043
1044 ETHER_NEXT_MULTI(step, enm);
1045 }
1046 }
1047
1048 tht_write_region(sc, THT_REG_RX_MCST_HASH, imf, sizeof(imf));
1049 tht_write(sc, THT_REG_RX_FLT, rxf);
1050 }
1051
1052 void
tht_down(struct tht_softc * sc)1053 tht_down(struct tht_softc *sc)
1054 {
1055 struct ifnet *ifp = &sc->sc_ac.ac_if;
1056
1057 if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
1058 return;
1059 }
1060
1061 ifp->if_flags &= ~(IFF_RUNNING | IFF_ALLMULTI);
1062 ifq_clr_oactive(&ifp->if_snd);
1063
1064 while (tht_fifo_writable(sc, &sc->sc_txt) < sc->sc_txt.tf_len &&
1065 tht_fifo_readable(sc, &sc->sc_txf) > 0)
1066 tsleep_nsec(sc, 0, "thtdown", SEC_TO_NSEC(1));
1067
1068 sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
1069 tht_write(sc, THT_REG_IMR, sc->sc_imr);
1070
1071 tht_sw_reset(sc);
1072
1073 tht_fifo_free(sc, &sc->sc_txf);
1074 tht_fifo_free(sc, &sc->sc_rxd);
1075 tht_fifo_free(sc, &sc->sc_rxf);
1076 tht_fifo_free(sc, &sc->sc_txt);
1077
1078 /* free mbufs that were on the rxf fifo */
1079 tht_rxf_drain(sc);
1080
1081 tht_pkt_free(sc, &sc->sc_rx_list);
1082 tht_pkt_free(sc, &sc->sc_tx_list);
1083 }
1084
1085 void
tht_start(struct ifnet * ifp)1086 tht_start(struct ifnet *ifp)
1087 {
1088 struct tht_softc *sc = ifp->if_softc;
1089 struct tht_pkt *pkt;
1090 struct tht_tx_task txt;
1091 u_int32_t flags;
1092 struct mbuf *m;
1093 int bc;
1094
1095 if (!(ifp->if_flags & IFF_RUNNING))
1096 return;
1097 if (ifq_is_oactive(&ifp->if_snd))
1098 return;
1099 if (ifq_empty(&ifp->if_snd))
1100 return;
1101
1102 if (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_DESC_LEN)
1103 return;
1104
1105 bzero(&txt, sizeof(txt));
1106
1107 tht_fifo_pre(sc, &sc->sc_txt);
1108
1109 do {
1110 m = ifq_deq_begin(&ifp->if_snd);
1111 if (m == NULL)
1112 break;
1113
1114 pkt = tht_pkt_get(&sc->sc_tx_list);
1115 if (pkt == NULL) {
1116 ifq_deq_rollback(&ifp->if_snd, m);
1117 ifq_set_oactive(&ifp->if_snd);
1118 break;
1119 }
1120
1121 ifq_deq_commit(&ifp->if_snd, m);
1122 if (tht_load_pkt(sc, pkt, m) != 0) {
1123 m_freem(m);
1124 tht_pkt_put(&sc->sc_tx_list, pkt);
1125 ifp->if_oerrors++;
1126 break;
1127 }
1128 /* thou shalt not use m after this point, only pkt->tp_m */
1129
1130 #if NBPFILTER > 0
1131 if (ifp->if_bpf)
1132 bpf_mtap(ifp->if_bpf, pkt->tp_m, BPF_DIRECTION_OUT);
1133 #endif
1134
1135 bc = sizeof(txt) +
1136 sizeof(struct tht_pbd) * pkt->tp_dmap->dm_nsegs;
1137
1138 flags = THT_TXT_TYPE | LWORDS(bc);
1139 txt.flags = htole32(flags);
1140 txt.len = htole16(pkt->tp_m->m_pkthdr.len);
1141 txt.uid = pkt->tp_id;
1142
1143 DPRINTF(THT_D_TX, "%s: txt uid 0x%llx flags 0x%08x len %d\n",
1144 DEVNAME(sc), pkt->tp_id, flags, pkt->tp_m->m_pkthdr.len);
1145
1146 tht_fifo_write(sc, &sc->sc_txt, &txt, sizeof(txt));
1147 tht_fifo_write_dmap(sc, &sc->sc_txt, pkt->tp_dmap);
1148 tht_fifo_write_pad(sc, &sc->sc_txt, bc);
1149
1150 bus_dmamap_sync(sc->sc_thtc->sc_dmat, pkt->tp_dmap, 0,
1151 pkt->tp_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1152
1153 } while (sc->sc_txt.tf_ready > THT_FIFO_DESC_LEN);
1154
1155 tht_fifo_post(sc, &sc->sc_txt);
1156 }
1157
1158 int
tht_load_pkt(struct tht_softc * sc,struct tht_pkt * pkt,struct mbuf * m)1159 tht_load_pkt(struct tht_softc *sc, struct tht_pkt *pkt, struct mbuf *m)
1160 {
1161 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1162 bus_dmamap_t dmap = pkt->tp_dmap;
1163 struct mbuf *m0 = NULL;
1164
1165 switch(bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) {
1166 case 0:
1167 pkt->tp_m = m;
1168 break;
1169
1170 case EFBIG: /* mbuf chain is too fragmented */
1171 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1172 if (m0 == NULL)
1173 return (ENOBUFS);
1174 if (m->m_pkthdr.len > MHLEN) {
1175 MCLGET(m0, M_DONTWAIT);
1176 if (!(m0->m_flags & M_EXT)) {
1177 m_freem(m0);
1178 return (ENOBUFS);
1179 }
1180 }
1181 m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1182 m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1183 if (bus_dmamap_load_mbuf(dmat, dmap, m0, BUS_DMA_NOWAIT)) {
1184 m_freem(m0);
1185 return (ENOBUFS);
1186 }
1187
1188 m_freem(m);
1189 pkt->tp_m = m0;
1190 break;
1191
1192 default:
1193 return (ENOBUFS);
1194 }
1195
1196 return (0);
1197 }
1198
1199 void
tht_txf(struct tht_softc * sc)1200 tht_txf(struct tht_softc *sc)
1201 {
1202 struct ifnet *ifp = &sc->sc_ac.ac_if;
1203 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1204 bus_dmamap_t dmap;
1205 struct tht_tx_free txf;
1206 struct tht_pkt *pkt;
1207
1208 if (tht_fifo_readable(sc, &sc->sc_txf) < sizeof(txf))
1209 return;
1210
1211 tht_fifo_pre(sc, &sc->sc_txf);
1212
1213 do {
1214 tht_fifo_read(sc, &sc->sc_txf, &txf, sizeof(txf));
1215
1216 DPRINTF(THT_D_TX, "%s: txf uid 0x%llx\n", DEVNAME(sc), txf.uid);
1217
1218 pkt = &sc->sc_tx_list.tpl_pkts[txf.uid];
1219 dmap = pkt->tp_dmap;
1220
1221 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1222 BUS_DMASYNC_POSTWRITE);
1223 bus_dmamap_unload(dmat, dmap);
1224
1225 m_freem(pkt->tp_m);
1226
1227 tht_pkt_put(&sc->sc_tx_list, pkt);
1228
1229 } while (sc->sc_txf.tf_ready >= sizeof(txf));
1230
1231 ifq_clr_oactive(&ifp->if_snd);
1232
1233 tht_fifo_post(sc, &sc->sc_txf);
1234 }
1235
1236 void
tht_rxf_fill(struct tht_softc * sc,int wait)1237 tht_rxf_fill(struct tht_softc *sc, int wait)
1238 {
1239 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1240 bus_dmamap_t dmap;
1241 struct tht_rx_free rxf;
1242 struct tht_pkt *pkt;
1243 struct mbuf *m;
1244 int bc;
1245
1246 if (tht_fifo_writable(sc, &sc->sc_rxf) <= THT_FIFO_DESC_LEN)
1247 return;
1248
1249 tht_fifo_pre(sc, &sc->sc_rxf);
1250
1251 for (;;) {
1252 if ((pkt = tht_pkt_get(&sc->sc_rx_list)) == NULL)
1253 goto done;
1254
1255 MGETHDR(m, wait ? M_WAIT : M_DONTWAIT, MT_DATA);
1256 if (m == NULL)
1257 goto put_pkt;
1258
1259 MCLGET(m, wait ? M_WAIT : M_DONTWAIT);
1260 if (!ISSET(m->m_flags, M_EXT))
1261 goto free_m;
1262
1263 m->m_data += ETHER_ALIGN;
1264 m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1265
1266 dmap = pkt->tp_dmap;
1267 if (bus_dmamap_load_mbuf(dmat, dmap, m,
1268 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0)
1269 goto free_m;
1270
1271 pkt->tp_m = m;
1272
1273 bc = sizeof(rxf) + sizeof(struct tht_pbd) * dmap->dm_nsegs;
1274
1275 rxf.bc = htole16(LWORDS(bc));
1276 rxf.type = htole16(THT_RXF_TYPE);
1277 rxf.uid = pkt->tp_id;
1278
1279 tht_fifo_write(sc, &sc->sc_rxf, &rxf, sizeof(rxf));
1280 tht_fifo_write_dmap(sc, &sc->sc_rxf, dmap);
1281 tht_fifo_write_pad(sc, &sc->sc_rxf, bc);
1282
1283 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1284 BUS_DMASYNC_PREREAD);
1285
1286 if (sc->sc_rxf.tf_ready <= THT_FIFO_DESC_LEN)
1287 goto done;
1288 }
1289
1290 free_m:
1291 m_freem(m);
1292 put_pkt:
1293 tht_pkt_put(&sc->sc_rx_list, pkt);
1294 done:
1295 tht_fifo_post(sc, &sc->sc_rxf);
1296 }
1297
1298 void
tht_rxf_drain(struct tht_softc * sc)1299 tht_rxf_drain(struct tht_softc *sc)
1300 {
1301 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1302 bus_dmamap_t dmap;
1303 struct tht_pkt *pkt;
1304
1305 while ((pkt = tht_pkt_used(&sc->sc_rx_list)) != NULL) {
1306 dmap = pkt->tp_dmap;
1307
1308 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1309 BUS_DMASYNC_POSTREAD);
1310 bus_dmamap_unload(dmat, dmap);
1311
1312 m_freem(pkt->tp_m);
1313
1314 tht_pkt_put(&sc->sc_rx_list, pkt);
1315 }
1316 }
1317
1318 void
tht_rxd(struct tht_softc * sc)1319 tht_rxd(struct tht_softc *sc)
1320 {
1321 struct ifnet *ifp = &sc->sc_ac.ac_if;
1322 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1323 bus_dmamap_t dmap;
1324 struct tht_rx_desc rxd;
1325 struct tht_pkt *pkt;
1326 struct mbuf *m;
1327 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1328 int bc;
1329 u_int32_t flags;
1330
1331 if (tht_fifo_readable(sc, &sc->sc_rxd) < sizeof(rxd))
1332 return;
1333
1334 tht_fifo_pre(sc, &sc->sc_rxd);
1335
1336 do {
1337 tht_fifo_read(sc, &sc->sc_rxd, &rxd, sizeof(rxd));
1338
1339 flags = letoh32(rxd.flags);
1340 bc = THT_RXD_FLAGS_BC(flags) * 8;
1341 bc -= sizeof(rxd);
1342 pkt = &sc->sc_rx_list.tpl_pkts[rxd.uid];
1343
1344 dmap = pkt->tp_dmap;
1345
1346 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1347 BUS_DMASYNC_POSTREAD);
1348 bus_dmamap_unload(dmat, dmap);
1349
1350 m = pkt->tp_m;
1351 m->m_pkthdr.len = m->m_len = letoh16(rxd.len);
1352
1353 /* XXX process type 3 rx descriptors */
1354
1355 ml_enqueue(&ml, m);
1356
1357 tht_pkt_put(&sc->sc_rx_list, pkt);
1358
1359 while (bc > 0) {
1360 static u_int32_t pad;
1361
1362 tht_fifo_read(sc, &sc->sc_rxd, &pad, sizeof(pad));
1363 bc -= sizeof(pad);
1364 }
1365 } while (sc->sc_rxd.tf_ready >= sizeof(rxd));
1366
1367 tht_fifo_post(sc, &sc->sc_rxd);
1368
1369 if_input(ifp, &ml);
1370
1371 /* put more pkts on the fifo */
1372 tht_rxf_fill(sc, 0);
1373 }
1374
1375 void
tht_watchdog(struct ifnet * ifp)1376 tht_watchdog(struct ifnet *ifp)
1377 {
1378 /* do nothing */
1379 }
1380
1381 int
tht_media_change(struct ifnet * ifp)1382 tht_media_change(struct ifnet *ifp)
1383 {
1384 /* ignore */
1385 return (0);
1386 }
1387
1388 void
tht_media_status(struct ifnet * ifp,struct ifmediareq * imr)1389 tht_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1390 {
1391 struct tht_softc *sc = ifp->if_softc;
1392
1393 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1394 imr->ifm_status = IFM_AVALID;
1395
1396 tht_link_state(sc);
1397
1398 if (LINK_STATE_IS_UP(ifp->if_link_state))
1399 imr->ifm_status |= IFM_ACTIVE;
1400 }
1401
1402 int
tht_fifo_alloc(struct tht_softc * sc,struct tht_fifo * tf,struct tht_fifo_desc * tfd)1403 tht_fifo_alloc(struct tht_softc *sc, struct tht_fifo *tf,
1404 struct tht_fifo_desc *tfd)
1405 {
1406 u_int64_t dva;
1407
1408 tf->tf_len = THT_FIFO_SIZE(tfd->tfd_size);
1409 tf->tf_mem = tht_dmamem_alloc(sc, tf->tf_len, THT_FIFO_ALIGN);
1410 if (tf->tf_mem == NULL)
1411 return (1);
1412
1413 tf->tf_desc = tfd;
1414 tf->tf_rptr = tf->tf_wptr = 0;
1415
1416 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1417 0, tf->tf_len, THT_FIFO_PRE_SYNC(tfd));
1418
1419 dva = THT_DMA_DVA(tf->tf_mem);
1420 tht_write(sc, tfd->tfd_cfg0, (u_int32_t)dva | tfd->tfd_size);
1421 tht_write(sc, tfd->tfd_cfg1, (u_int32_t)(dva >> 32));
1422
1423 return (0);
1424 }
1425
1426 void
tht_fifo_free(struct tht_softc * sc,struct tht_fifo * tf)1427 tht_fifo_free(struct tht_softc *sc, struct tht_fifo *tf)
1428 {
1429 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1430 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1431 tht_dmamem_free(sc, tf->tf_mem);
1432 }
1433
1434 size_t
tht_fifo_readable(struct tht_softc * sc,struct tht_fifo * tf)1435 tht_fifo_readable(struct tht_softc *sc, struct tht_fifo *tf)
1436 {
1437 tf->tf_wptr = tht_read(sc, tf->tf_desc->tfd_wptr);
1438 tf->tf_wptr &= THT_FIFO_PTR_MASK;
1439 tf->tf_ready = tf->tf_wptr - tf->tf_rptr;
1440 if (tf->tf_ready < 0)
1441 tf->tf_ready += tf->tf_len;
1442
1443 DPRINTF(THT_D_FIFO, "%s: fifo rdable wptr: %d rptr: %d ready: %d\n",
1444 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1445
1446 return (tf->tf_ready);
1447 }
1448
1449 size_t
tht_fifo_writable(struct tht_softc * sc,struct tht_fifo * tf)1450 tht_fifo_writable(struct tht_softc *sc, struct tht_fifo *tf)
1451 {
1452 tf->tf_rptr = tht_read(sc, tf->tf_desc->tfd_rptr);
1453 tf->tf_rptr &= THT_FIFO_PTR_MASK;
1454 tf->tf_ready = tf->tf_rptr - tf->tf_wptr;
1455 if (tf->tf_ready <= 0)
1456 tf->tf_ready += tf->tf_len;
1457
1458 DPRINTF(THT_D_FIFO, "%s: fifo wrable wptr: %d rptr: %d ready: %d\n",
1459 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1460
1461 return (tf->tf_ready);
1462 }
1463
1464 void
tht_fifo_pre(struct tht_softc * sc,struct tht_fifo * tf)1465 tht_fifo_pre(struct tht_softc *sc, struct tht_fifo *tf)
1466 {
1467 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1468 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1469 }
1470
1471 void
tht_fifo_read(struct tht_softc * sc,struct tht_fifo * tf,void * buf,size_t buflen)1472 tht_fifo_read(struct tht_softc *sc, struct tht_fifo *tf,
1473 void *buf, size_t buflen)
1474 {
1475 u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem);
1476 u_int8_t *desc = buf;
1477 size_t len;
1478
1479 tf->tf_ready -= buflen;
1480
1481 len = tf->tf_len - tf->tf_rptr;
1482
1483 if (len < buflen) {
1484 memcpy(desc, fifo + tf->tf_rptr, len);
1485
1486 buflen -= len;
1487 desc += len;
1488
1489 tf->tf_rptr = 0;
1490 }
1491
1492 memcpy(desc, fifo + tf->tf_rptr, buflen);
1493 tf->tf_rptr += buflen;
1494
1495 DPRINTF(THT_D_FIFO, "%s: fifo rd wptr: %d rptr: %d ready: %d\n",
1496 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1497 }
1498
1499 void
tht_fifo_write(struct tht_softc * sc,struct tht_fifo * tf,void * buf,size_t buflen)1500 tht_fifo_write(struct tht_softc *sc, struct tht_fifo *tf,
1501 void *buf, size_t buflen)
1502 {
1503 u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem);
1504 u_int8_t *desc = buf;
1505 size_t len;
1506
1507 tf->tf_ready -= buflen;
1508
1509 len = tf->tf_len - tf->tf_wptr;
1510
1511 if (len < buflen) {
1512 memcpy(fifo + tf->tf_wptr, desc, len);
1513
1514 buflen -= len;
1515 desc += len;
1516
1517 tf->tf_wptr = 0;
1518 }
1519
1520 memcpy(fifo + tf->tf_wptr, desc, buflen);
1521 tf->tf_wptr += buflen;
1522 tf->tf_wptr %= tf->tf_len;
1523
1524 DPRINTF(THT_D_FIFO, "%s: fifo wr wptr: %d rptr: %d ready: %d\n",
1525 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1526 }
1527
1528 void
tht_fifo_write_dmap(struct tht_softc * sc,struct tht_fifo * tf,bus_dmamap_t dmap)1529 tht_fifo_write_dmap(struct tht_softc *sc, struct tht_fifo *tf,
1530 bus_dmamap_t dmap)
1531 {
1532 struct tht_pbd pbd;
1533 u_int64_t dva;
1534 int i;
1535
1536 for (i = 0; i < dmap->dm_nsegs; i++) {
1537 dva = dmap->dm_segs[i].ds_addr;
1538
1539 pbd.addr_lo = htole32(dva);
1540 pbd.addr_hi = htole32(dva >> 32);
1541 pbd.len = htole32(dmap->dm_segs[i].ds_len);
1542
1543 tht_fifo_write(sc, tf, &pbd, sizeof(pbd));
1544 }
1545 }
1546
1547 void
tht_fifo_write_pad(struct tht_softc * sc,struct tht_fifo * tf,int bc)1548 tht_fifo_write_pad(struct tht_softc *sc, struct tht_fifo *tf, int bc)
1549 {
1550 static const u_int32_t pad = 0x0;
1551
1552 /* this assumes you'll only ever be writing multiples of 4 bytes */
1553 if (bc % 8)
1554 tht_fifo_write(sc, tf, (void *)&pad, sizeof(pad));
1555 }
1556
1557 void
tht_fifo_post(struct tht_softc * sc,struct tht_fifo * tf)1558 tht_fifo_post(struct tht_softc *sc, struct tht_fifo *tf)
1559 {
1560 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1561 0, tf->tf_len, THT_FIFO_PRE_SYNC(tf->tf_desc));
1562 if (tf->tf_desc->tfd_write)
1563 tht_write(sc, tf->tf_desc->tfd_wptr, tf->tf_wptr);
1564 else
1565 tht_write(sc, tf->tf_desc->tfd_rptr, tf->tf_rptr);
1566
1567 DPRINTF(THT_D_FIFO, "%s: fifo post wptr: %d rptr: %d\n", DEVNAME(sc),
1568 tf->tf_wptr, tf->tf_rptr);
1569 }
1570
1571 static const bus_size_t tht_mac_regs[3] = {
1572 THT_REG_RX_UNC_MAC2, THT_REG_RX_UNC_MAC1, THT_REG_RX_UNC_MAC0
1573 };
1574
1575 void
tht_lladdr_read(struct tht_softc * sc)1576 tht_lladdr_read(struct tht_softc *sc)
1577 {
1578 int i;
1579
1580 for (i = 0; i < nitems(tht_mac_regs); i++)
1581 sc->sc_lladdr[i] = betoh16(tht_read(sc, tht_mac_regs[i]));
1582 }
1583
1584 void
tht_lladdr_write(struct tht_softc * sc)1585 tht_lladdr_write(struct tht_softc *sc)
1586 {
1587 int i;
1588
1589 for (i = 0; i < nitems(tht_mac_regs); i++)
1590 tht_write(sc, tht_mac_regs[i], htobe16(sc->sc_lladdr[i]));
1591 }
1592
1593 #define tht_swrst_set(_s, _r) tht_write((_s), (_r), 0x1)
1594 #define tht_swrst_clr(_s, _r) tht_write((_s), (_r), 0x0)
1595 int
tht_sw_reset(struct tht_softc * sc)1596 tht_sw_reset(struct tht_softc *sc)
1597 {
1598 int i;
1599
1600 /* this follows SW Reset process in 8.8 of the doco */
1601
1602 /* 1. disable rx */
1603 tht_clr(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1604
1605 /* 2. initiate port disable */
1606 tht_swrst_set(sc, THT_REG_DIS_PRT);
1607
1608 /* 3. initiate queue disable */
1609 tht_swrst_set(sc, THT_REG_DIS_QU_0);
1610 tht_swrst_set(sc, THT_REG_DIS_QU_1);
1611
1612 /* 4. wait for successful finish of previous tasks */
1613 if (!tht_wait_set(sc, THT_REG_RST_PRT, THT_REG_RST_PRT_ACTIVE, 1000))
1614 return (1);
1615
1616 /* 5. Reset interrupt registers */
1617 tht_write(sc, THT_REG_IMR, 0x0); /* 5.a */
1618 tht_read(sc, THT_REG_ISR); /* 5.b */
1619 for (i = 0; i < THT_NQUEUES; i++) {
1620 tht_write(sc, THT_REG_RDINTCM(i), 0x0); /* 5.c/5.d */
1621 tht_write(sc, THT_REG_TDINTCM(i), 0x0); /* 5.e */
1622 }
1623
1624 /* 6. initiate queue reset */
1625 tht_swrst_set(sc, THT_REG_RST_QU_0);
1626 tht_swrst_set(sc, THT_REG_RST_QU_1);
1627
1628 /* 7. initiate port reset */
1629 tht_swrst_set(sc, THT_REG_RST_PRT);
1630
1631 /* 8. clear txt/rxf/rxd/txf read and write ptrs */
1632 for (i = 0; i < THT_NQUEUES; i++) {
1633 tht_write(sc, THT_REG_TXT_RPTR(i), 0);
1634 tht_write(sc, THT_REG_RXF_RPTR(i), 0);
1635 tht_write(sc, THT_REG_RXD_RPTR(i), 0);
1636 tht_write(sc, THT_REG_TXF_RPTR(i), 0);
1637
1638 tht_write(sc, THT_REG_TXT_WPTR(i), 0);
1639 tht_write(sc, THT_REG_RXF_WPTR(i), 0);
1640 tht_write(sc, THT_REG_RXD_WPTR(i), 0);
1641 tht_write(sc, THT_REG_TXF_WPTR(i), 0);
1642 }
1643
1644 /* 9. unset port disable */
1645 tht_swrst_clr(sc, THT_REG_DIS_PRT);
1646
1647 /* 10. unset queue disable */
1648 tht_swrst_clr(sc, THT_REG_DIS_QU_0);
1649 tht_swrst_clr(sc, THT_REG_DIS_QU_1);
1650
1651 /* 11. unset queue reset */
1652 tht_swrst_clr(sc, THT_REG_RST_QU_0);
1653 tht_swrst_clr(sc, THT_REG_RST_QU_1);
1654
1655 /* 12. unset port reset */
1656 tht_swrst_clr(sc, THT_REG_RST_PRT);
1657
1658 /* 13. enable rx */
1659 tht_set(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1660
1661 return (0);
1662 }
1663
1664 int
tht_fw_load(struct tht_softc * sc)1665 tht_fw_load(struct tht_softc *sc)
1666 {
1667 u_int8_t *fw, *buf;
1668 size_t fwlen, wrlen;
1669 int error = 1, msecs, ret;
1670
1671 if (loadfirmware("tht", &fw, &fwlen) != 0)
1672 return (1);
1673
1674 if ((fwlen % 8) != 0)
1675 goto err;
1676
1677 buf = fw;
1678 while (fwlen > 0) {
1679 while (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_GAP) {
1680 ret = tsleep_nsec(sc, PCATCH, "thtfw",
1681 MSEC_TO_NSEC(10));
1682 if (ret == EINTR)
1683 goto err;
1684 }
1685
1686 wrlen = MIN(sc->sc_txt.tf_ready - THT_FIFO_GAP, fwlen);
1687 tht_fifo_pre(sc, &sc->sc_txt);
1688 tht_fifo_write(sc, &sc->sc_txt, buf, wrlen);
1689 tht_fifo_post(sc, &sc->sc_txt);
1690
1691 fwlen -= wrlen;
1692 buf += wrlen;
1693 }
1694
1695 for (msecs = 0; msecs < 2000; msecs += 10) {
1696 if (tht_read(sc, THT_REG_INIT_STATUS) != 0) {
1697 error = 0;
1698 break;
1699 }
1700 ret = tsleep_nsec(sc, PCATCH, "thtinit", MSEC_TO_NSEC(10));
1701 if (ret == EINTR)
1702 goto err;
1703 }
1704
1705 tht_write(sc, THT_REG_INIT_SEMAPHORE, 0x1);
1706
1707 err:
1708 free(fw, M_DEVBUF, fwlen);
1709 return (error);
1710 }
1711
1712 void
tht_link_state(struct tht_softc * sc)1713 tht_link_state(struct tht_softc *sc)
1714 {
1715 static const struct timeval interval = { 0, 10000 };
1716 struct ifnet *ifp = &sc->sc_ac.ac_if;
1717 int link_state = LINK_STATE_DOWN;
1718
1719 if (!ratecheck(&sc->sc_mediacheck, &interval))
1720 return;
1721
1722 if (tht_read(sc, THT_REG_MAC_LNK_STAT) & THT_REG_MAC_LNK_STAT_LINK)
1723 link_state = LINK_STATE_FULL_DUPLEX;
1724
1725 if (ifp->if_link_state != link_state) {
1726 ifp->if_link_state = link_state;
1727 if_link_state_change(ifp);
1728 }
1729
1730 if (LINK_STATE_IS_UP(ifp->if_link_state))
1731 ifp->if_baudrate = IF_Gbps(10);
1732 else
1733 ifp->if_baudrate = 0;
1734 }
1735
1736 u_int32_t
tht_read(struct tht_softc * sc,bus_size_t r)1737 tht_read(struct tht_softc *sc, bus_size_t r)
1738 {
1739 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1740 BUS_SPACE_BARRIER_READ);
1741 return (bus_space_read_4(sc->sc_thtc->sc_memt, sc->sc_memh, r));
1742 }
1743
1744 void
tht_write(struct tht_softc * sc,bus_size_t r,u_int32_t v)1745 tht_write(struct tht_softc *sc, bus_size_t r, u_int32_t v)
1746 {
1747 bus_space_write_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, v);
1748 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1749 BUS_SPACE_BARRIER_WRITE);
1750 }
1751
1752 void
tht_write_region(struct tht_softc * sc,bus_size_t r,void * buf,size_t len)1753 tht_write_region(struct tht_softc *sc, bus_size_t r, void *buf, size_t len)
1754 {
1755 bus_space_write_raw_region_4(sc->sc_thtc->sc_memt, sc->sc_memh, r,
1756 buf, len);
1757 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, len,
1758 BUS_SPACE_BARRIER_WRITE);
1759 }
1760
1761 int
tht_wait_eq(struct tht_softc * sc,bus_size_t r,u_int32_t m,u_int32_t v,int timeout)1762 tht_wait_eq(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1763 int timeout)
1764 {
1765 while ((tht_read(sc, r) & m) != v) {
1766 if (timeout == 0)
1767 return (0);
1768
1769 delay(1000);
1770 timeout--;
1771 }
1772
1773 return (1);
1774 }
1775
1776 int
tht_wait_ne(struct tht_softc * sc,bus_size_t r,u_int32_t m,u_int32_t v,int timeout)1777 tht_wait_ne(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1778 int timeout)
1779 {
1780 while ((tht_read(sc, r) & m) == v) {
1781 if (timeout == 0)
1782 return (0);
1783
1784 delay(1000);
1785 timeout--;
1786 }
1787
1788 return (1);
1789 }
1790
1791 struct tht_dmamem *
tht_dmamem_alloc(struct tht_softc * sc,bus_size_t size,bus_size_t align)1792 tht_dmamem_alloc(struct tht_softc *sc, bus_size_t size, bus_size_t align)
1793 {
1794 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1795 struct tht_dmamem *tdm;
1796 int nsegs;
1797
1798 tdm = malloc(sizeof(struct tht_dmamem), M_DEVBUF, M_WAITOK | M_ZERO);
1799 tdm->tdm_size = size;
1800
1801 if (bus_dmamap_create(dmat, size, 1, size, 0,
1802 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1803 goto tdmfree;
1804
1805 if (bus_dmamem_alloc(dmat, size, align, 0, &tdm->tdm_seg, 1, &nsegs,
1806 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
1807 goto destroy;
1808
1809 if (bus_dmamem_map(dmat, &tdm->tdm_seg, nsegs, size, &tdm->tdm_kva,
1810 BUS_DMA_WAITOK) != 0)
1811 goto free;
1812
1813 if (bus_dmamap_load(dmat, tdm->tdm_map, tdm->tdm_kva, size,
1814 NULL, BUS_DMA_WAITOK) != 0)
1815 goto unmap;
1816
1817 return (tdm);
1818
1819 unmap:
1820 bus_dmamem_unmap(dmat, tdm->tdm_kva, size);
1821 free:
1822 bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1823 destroy:
1824 bus_dmamap_destroy(dmat, tdm->tdm_map);
1825 tdmfree:
1826 free(tdm, M_DEVBUF, 0);
1827
1828 return (NULL);
1829 }
1830
1831 void
tht_dmamem_free(struct tht_softc * sc,struct tht_dmamem * tdm)1832 tht_dmamem_free(struct tht_softc *sc, struct tht_dmamem *tdm)
1833 {
1834 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1835
1836 bus_dmamap_unload(dmat, tdm->tdm_map);
1837 bus_dmamem_unmap(dmat, tdm->tdm_kva, tdm->tdm_size);
1838 bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1839 bus_dmamap_destroy(dmat, tdm->tdm_map);
1840 free(tdm, M_DEVBUF, 0);
1841 }
1842
1843 int
tht_pkt_alloc(struct tht_softc * sc,struct tht_pkt_list * tpl,int npkts,int nsegs)1844 tht_pkt_alloc(struct tht_softc *sc, struct tht_pkt_list *tpl, int npkts,
1845 int nsegs)
1846 {
1847 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1848 struct tht_pkt *pkt;
1849 int i;
1850
1851 tpl->tpl_pkts = mallocarray(npkts, sizeof(struct tht_pkt),
1852 M_DEVBUF, M_WAITOK | M_ZERO);
1853
1854 TAILQ_INIT(&tpl->tpl_free);
1855 TAILQ_INIT(&tpl->tpl_used);
1856 for (i = 0; i < npkts; i++) {
1857 pkt = &tpl->tpl_pkts[i];
1858
1859 pkt->tp_id = i;
1860 if (bus_dmamap_create(dmat, THT_PBD_PKTLEN, nsegs,
1861 THT_PBD_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1862 &pkt->tp_dmap) != 0) {
1863 tht_pkt_free(sc, tpl);
1864 return (1);
1865 }
1866
1867 TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1868 }
1869
1870 return (0);
1871 }
1872
1873 void
tht_pkt_free(struct tht_softc * sc,struct tht_pkt_list * tpl)1874 tht_pkt_free(struct tht_softc *sc, struct tht_pkt_list *tpl)
1875 {
1876 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1877 struct tht_pkt *pkt;
1878
1879 while ((pkt = tht_pkt_get(tpl)) != NULL)
1880 bus_dmamap_destroy(dmat, pkt->tp_dmap);
1881 free(tpl->tpl_pkts, M_DEVBUF, 0);
1882 tpl->tpl_pkts = NULL;
1883 }
1884
1885 void
tht_pkt_put(struct tht_pkt_list * tpl,struct tht_pkt * pkt)1886 tht_pkt_put(struct tht_pkt_list *tpl, struct tht_pkt *pkt)
1887 {
1888 TAILQ_REMOVE(&tpl->tpl_used, pkt, tp_link);
1889 TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1890 }
1891
1892 struct tht_pkt *
tht_pkt_get(struct tht_pkt_list * tpl)1893 tht_pkt_get(struct tht_pkt_list *tpl)
1894 {
1895 struct tht_pkt *pkt;
1896
1897 pkt = TAILQ_FIRST(&tpl->tpl_free);
1898 if (pkt != NULL) {
1899 TAILQ_REMOVE(&tpl->tpl_free, pkt, tp_link);
1900 TAILQ_INSERT_TAIL(&tpl->tpl_used, pkt, tp_link);
1901
1902 }
1903
1904 return (pkt);
1905 }
1906
1907 struct tht_pkt *
tht_pkt_used(struct tht_pkt_list * tpl)1908 tht_pkt_used(struct tht_pkt_list *tpl)
1909 {
1910 return (TAILQ_FIRST(&tpl->tpl_used));
1911 }
1912