xref: /openbsd/sys/dev/pci/if_tht.c (revision 7b36286a)
1 /*	$OpenBSD: if_tht.c,v 1.117 2008/05/13 00:52:12 brad Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Tehuti TN30xx multi port 10Gb Ethernet chipsets,
21  * see http://www.tehutinetworks.net/.
22  *
23  * This driver was made possible because Tehuti networks provided
24  * hardware and documentation. Thanks!
25  */
26 
27 #include "bpfilter.h"
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/kernel.h>
34 #include <sys/socket.h>
35 #include <sys/malloc.h>
36 #include <sys/device.h>
37 #include <sys/proc.h>
38 #include <sys/queue.h>
39 #include <sys/rwlock.h>
40 #include <sys/time.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/if_types.h>
52 
53 #if NBPFILTER > 0
54 #include <net/bpf.h>
55 #endif
56 
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/if_ether.h>
60 #endif
61 
62 #ifdef THT_DEBUG
63 #define THT_D_FIFO		(1<<0)
64 #define THT_D_TX		(1<<1)
65 #define THT_D_RX		(1<<2)
66 #define THT_D_INTR		(1<<3)
67 
68 int thtdebug = THT_D_TX | THT_D_RX | THT_D_INTR;
69 
70 #define DPRINTF(l, f...)	do { if (thtdebug & (l)) printf(f); } while (0)
71 #else
72 #define DPRINTF(l, f...)
73 #endif
74 
75 /* registers */
76 
77 #define THT_PCI_BAR		0x10
78 
79 #define _Q(_q)			((_q) * 4)
80 
81 /* General Configuration */
82 #define THT_REG_END_SEL		0x5448 /* PCI Endian Select */
83 #define THT_REG_CLKPLL		0x5000
84 #define  THT_REG_CLKPLL_PLLLK		(1<<9) /* PLL is locked */
85 #define  THT_REG_CLKPLL_RSTEND		(1<<8) /* Reset ended */
86 #define  THT_REG_CLKPLL_TXF_DIS		(1<<3) /* TX Free disabled */
87 #define  THT_REG_CLKPLL_VNT_STOP	(1<<2) /* VENETO Stop */
88 #define  THT_REG_CLKPLL_PLLRST		(1<<1) /* PLL Reset */
89 #define  THT_REG_CLKPLL_SFTRST		(1<<0) /* Software Reset */
90 /* Descriptors and FIFO Registers */
91 #define THT_REG_TXT_CFG0(_q)	(0x4040 + _Q(_q)) /* CFG0 TX Task queues */
92 #define THT_REG_RXF_CFG0(_q)	(0x4050 + _Q(_q)) /* CFG0 RX Free queues */
93 #define THT_REG_RXD_CFG0(_q)	(0x4060 + _Q(_q)) /* CFG0 RX DSC queues */
94 #define THT_REG_TXF_CFG0(_q)	(0x4070 + _Q(_q)) /* CFG0 TX Free queues */
95 #define THT_REG_TXT_CFG1(_q)	(0x4000 + _Q(_q)) /* CFG1 TX Task queues */
96 #define THT_REG_RXF_CFG1(_q)	(0x4010 + _Q(_q)) /* CFG1 RX Free queues */
97 #define THT_REG_RXD_CFG1(_q)	(0x4020 + _Q(_q)) /* CFG1 RX DSC queues */
98 #define THT_REG_TXF_CFG1(_q)	(0x4030 + _Q(_q)) /* CFG1 TX Free queues */
99 #define THT_REG_TXT_RPTR(_q)	(0x40c0 + _Q(_q)) /* TX Task read ptr */
100 #define THT_REG_RXF_RPTR(_q)	(0x40d0 + _Q(_q)) /* RX Free read ptr */
101 #define THT_REG_RXD_RPTR(_q)	(0x40e0 + _Q(_q)) /* RX DSC read ptr */
102 #define THT_REG_TXF_RPTR(_q)	(0x40f0 + _Q(_q)) /* TX Free read ptr */
103 #define THT_REG_TXT_WPTR(_q)	(0x4080 + _Q(_q)) /* TX Task write ptr */
104 #define THT_REG_RXF_WPTR(_q)	(0x4090 + _Q(_q)) /* RX Free write ptr */
105 #define THT_REG_RXD_WPTR(_q)	(0x40a0 + _Q(_q)) /* RX DSC write ptr */
106 #define THT_REG_TXF_WPTR(_q)	(0x40b0 + _Q(_q)) /* TX Free write ptr */
107 #define THT_REG_HTB_ADDR	0x4100 /* HTB Addressing Mechanism enable */
108 #define THT_REG_HTB_ADDR_HI	0x4110 /* High HTB Address */
109 #define THT_REG_HTB_ST_TMR	0x3290 /* HTB Timer */
110 #define THT_REG_RDINTCM(_q)	(0x5120 + _Q(_q)) /* RX DSC Intr Coalescing */
111 #define  THT_REG_RDINTCM_PKT_TH(_c)	((_c)<<20) /* pkt count threshold */
112 #define  THT_REG_RDINTCM_RXF_TH(_c)	((_c)<<16) /* rxf intr req thresh */
113 #define  THT_REG_RDINTCM_COAL_RC	(1<<15) /* coalescing timer recharge */
114 #define  THT_REG_RDINTCM_COAL(_c)	(_c) /* coalescing timer */
115 #define THT_REG_TDINTCM(_q)	(0x5130 + _Q(_q)) /* TX DSC Intr Coalescing */
116 #define  THT_REG_TDINTCM_PKT_TH(_c)	((_c)<<20) /* pkt count threshold */
117 #define  THT_REG_TDINTCM_COAL_RC	(1<<15) /* coalescing timer recharge */
118 #define  THT_REG_TDINTCM_COAL(_c)	(_c) /* coalescing timer */
119 /* 10G Ethernet MAC */
120 #define THT_REG_10G_REV		0x6000 /* Revision */
121 #define THT_REG_10G_SCR		0x6004 /* Scratch */
122 #define THT_REG_10G_CTL		0x6008 /* Control/Status */
123 #define  THT_REG_10G_CTL_CMD_FRAME_EN	(1<<13) /* cmd frame enable */
124 #define  THT_REG_10G_CTL_SW_RESET	(1<<12) /* sw reset */
125 #define  THT_REG_10G_CTL_STATS_AUTO_CLR	(1<<11) /* auto clear statistics */
126 #define  THT_REG_10G_CTL_LOOPBACK	(1<<10) /* enable loopback */
127 #define  THT_REG_10G_CTL_TX_ADDR_INS	(1<<9) /* set mac on tx */
128 #define  THT_REG_10G_CTL_PAUSE_IGNORE	(1<<8) /* ignore pause */
129 #define  THT_REG_10G_CTL_PAUSE_FWD	(1<<7) /* forward pause */
130 #define  THT_REG_10G_CTL_CRC_FWD	(1<<6) /* crc forward */
131 #define  THT_REG_10G_CTL_PAD		(1<<5) /* frame padding */
132 #define  THT_REG_10G_CTL_PROMISC	(1<<4) /* promiscuous mode */
133 #define  THT_REG_10G_CTL_WAN_MODE	(1<<3) /* WAN mode */
134 #define  THT_REG_10G_CTL_RX_EN		(1<<1) /* RX enable */
135 #define  THT_REG_10G_CTL_TX_EN		(1<<0) /* TX enable */
136 #define THT_REG_10G_FRM_LEN	0x6014 /* Frame Length */
137 #define THT_REG_10G_PAUSE	0x6018 /* Pause Quanta */
138 #define THT_REG_10G_RX_SEC	0x601c /* RX Section */
139 #define THT_REG_10G_TX_SEC	0x6020 /* TX Section */
140 #define  THT_REG_10G_SEC_AVAIL(_t)	(_t) /* section available thresh*/
141 #define  THT_REG_10G_SEC_EMPTY(_t)	((_t)<<16) /* section empty avail */
142 #define THT_REG_10G_RFIFO_AEF	0x6024 /* RX FIFO Almost Empty/Full */
143 #define THT_REG_10G_TFIFO_AEF	0x6028 /* TX FIFO Almost Empty/Full */
144 #define  THT_REG_10G_FIFO_AE(_t)	(_t) /* almost empty */
145 #define  THT_REG_10G_FIFO_AF(_t)	((_t)<<16) /* almost full */
146 #define THT_REG_10G_SM_STAT	0x6030 /* MDIO Status */
147 #define THT_REG_10G_SM_CMD	0x6034 /* MDIO Command */
148 #define THT_REG_10G_SM_DAT	0x6038 /* MDIO Data */
149 #define THT_REG_10G_SM_ADD	0x603c /* MDIO Address */
150 #define THT_REG_10G_STAT	0x6040 /* Status */
151 /* Statistic Counters */
152 /* XXX todo */
153 /* Status Registers */
154 #define THT_REG_MAC_LNK_STAT	0x0200 /* Link Status */
155 #define  THT_REG_MAC_LNK_STAT_DIS	(1<<4) /* Mac Stats read disable */
156 #define  THT_REG_MAC_LNK_STAT_LINK	(1<<2) /* Link State */
157 #define  THT_REG_MAC_LNK_STAT_REM_FAULT	(1<<1) /* Remote Fault */
158 #define  THT_REG_MAC_LNK_STAT_LOC_FAULT	(1<<0) /* Local Fault */
159 /* Interrupt Registers */
160 #define THT_REG_ISR		0x5100 /* Interrupt Status */
161 #define THT_REG_ISR_LINKCHG(_p)		(1<<(27+(_p))) /* link changed */
162 #define THT_REG_ISR_GPIO		(1<<26) /* GPIO */
163 #define THT_REG_ISR_RFRSH		(1<<25) /* DDR Refresh */
164 #define THT_REG_ISR_SWI			(1<<23) /* software interrupt */
165 #define THT_REG_ISR_RXF(_q)		(1<<(19+(_q))) /* rx free fifo */
166 #define THT_REG_ISR_TXF(_q)		(1<<(15+(_q))) /* tx free fifo */
167 #define THT_REG_ISR_RXD(_q)		(1<<(11+(_q))) /* rx desc fifo */
168 #define THT_REG_ISR_TMR(_t)		(1<<(6+(_t))) /* timer */
169 #define THT_REG_ISR_VNT			(1<<5) /* optistrata */
170 #define THT_REG_ISR_RxFL		(1<<4) /* RX Full */
171 #define THT_REG_ISR_TR			(1<<2) /* table read */
172 #define THT_REG_ISR_PCIE_LNK_INT	(1<<1) /* pcie link fail */
173 #define THT_REG_ISR_GPLE_CLR		(1<<0) /* pcie timeout */
174 #define THT_FMT_ISR		"\020" "\035LINKCHG1" "\034LINKCHG0" \
175 				    "\033GPIO" "\032RFRSH" "\030SWI" \
176 				    "\027RXF3" "\026RXF2" "\025RXF1" \
177 				    "\024RXF0" "\023TXF3" "\022TXF2" \
178 				    "\021TXF1" "\020TXF0" "\017RXD3" \
179 				    "\016RXD2" "\015RXD1" "\014RXD0" \
180 				    "\012TMR3" "\011TMR2" "\010TMR1" \
181 				    "\007TMR0" "\006VNT" "\005RxFL" \
182 				    "\003TR" "\002PCI_LNK_INT" \
183 				    "\001GPLE_CLR"
184 #define THT_REG_ISR_GTI		0x5080 /* GTI Interrupt Status */
185 #define THT_REG_IMR		0x5110 /* Interrupt Mask */
186 #define THT_REG_IMR_LINKCHG(_p)		(1<<(27+(_p))) /* link changed */
187 #define THT_REG_IMR_GPIO		(1<<26) /* GPIO */
188 #define THT_REG_IMR_RFRSH		(1<<25) /* DDR Refresh */
189 #define THT_REG_IMR_SWI			(1<<23) /* software interrupt */
190 #define THT_REG_IMR_RXF(_q)		(1<<(19+(_q))) /* rx free fifo */
191 #define THT_REG_IMR_TXF(_q)		(1<<(15+(_q))) /* tx free fifo */
192 #define THT_REG_IMR_RXD(_q)		(1<<(11+(_q))) /* rx desc fifo */
193 #define THT_REG_IMR_TMR(_t)		(1<<(6+(_t))) /* timer */
194 #define THT_REG_IMR_VNT			(1<<5) /* optistrata */
195 #define THT_REG_IMR_RxFL		(1<<4) /* RX Full */
196 #define THT_REG_IMR_TR			(1<<2) /* table read */
197 #define THT_REG_IMR_PCIE_LNK_INT	(1<<1) /* pcie link fail */
198 #define THT_REG_IMR_GPLE_CLR		(1<<0) /* pcie timeout */
199 #define THT_REG_IMR_GTI		0x5090 /* GTI Interrupt Mask */
200 #define THT_REG_ISR_MSK		0x5140 /* ISR Masked */
201 /* Global Counters */
202 /* XXX todo */
203 /* DDR2 SDRAM Controller Registers */
204 /* XXX TBD */
205 /* EEPROM Registers */
206 /* XXX todo */
207 /* Init arbitration and status registers */
208 #define THT_REG_INIT_SEMAPHORE	0x5170 /* Init Semaphore */
209 #define THT_REG_INIT_STATUS	0x5180 /* Init Status */
210 /* PCI Credits Registers */
211 /* XXX todo */
212 /* TX Arbitration Registers */
213 #define THT_REG_TXTSK_PR(_q)	(0x41b0 + _Q(_q)) /* TX Queue Priority */
214 /* RX Part Registers */
215 #define THT_REG_RX_FLT		0x1240 /* RX Filter Configuration */
216 #define  THT_REG_RX_FLT_ATXER		(1<<15) /* accept with xfer err */
217 #define  THT_REG_RX_FLT_ATRM		(1<<14) /* accept with term err */
218 #define  THT_REG_RX_FLT_AFTSQ		(1<<13) /* accept with fault seq */
219 #define  THT_REG_RX_FLT_OSEN		(1<<12) /* enable pkts */
220 #define  THT_REG_RX_FLT_APHER		(1<<11) /* accept with phy err */
221 #define  THT_REG_RX_FLT_TXFC		(1<<10) /* TX flow control */
222 #define  THT_REG_RX_FLT_FDA		(1<<8) /* filter direct address */
223 #define  THT_REG_RX_FLT_AOF		(1<<7) /* accept overflow frame */
224 #define  THT_REG_RX_FLT_ACF		(1<<6) /* accept control frame */
225 #define  THT_REG_RX_FLT_ARUNT		(1<<5) /* accept runt */
226 #define  THT_REG_RX_FLT_ACRC		(1<<4) /* accept crc error */
227 #define  THT_REG_RX_FLT_AM		(1<<3) /* accept multicast */
228 #define  THT_REG_RX_FLT_AB		(1<<2) /* accept broadcast */
229 #define  THT_REG_RX_FLT_PRM_MASK	0x3 /* promiscuous mode */
230 #define  THT_REG_RX_FLT_PRM_NORMAL	0x0 /* normal mode */
231 #define  THT_REG_RX_FLT_PRM_ALL		0x1 /* pass all incoming frames */
232 #define THT_REG_RX_MAX_FRAME	0x12c0 /* Max Frame Size */
233 #define THT_REG_RX_UNC_MAC0	0x1250 /* MAC Address low word */
234 #define THT_REG_RX_UNC_MAC1	0x1260 /* MAC Address mid word */
235 #define THT_REG_RX_UNC_MAC2	0x1270 /* MAC Address high word */
236 #define THT_REG_RX_MAC_MCST0(_m) (0x1a80 + (_m)*8)
237 #define THT_REG_RX_MAC_MCST1(_m) (0x1a84 + (_m)*8)
238 #define  THT_REG_RX_MAC_MCST_CNT	15
239 #define THT_REG_RX_MCST_HASH	0x1a00 /* imperfect multicast filter hash */
240 #define  THT_REG_RX_MCST_HASH_SIZE	(256 / NBBY)
241 /* OptiStrata Debug Registers */
242 #define THT_REG_VPC		0x2300 /* Program Counter */
243 #define THT_REG_VLI		0x2310 /* Last Interrupt */
244 #define THT_REG_VIC		0x2320 /* Interrupts Count */
245 #define THT_REG_VTMR		0x2330 /* Timer */
246 #define THT_REG_VGLB		0x2340 /* Global */
247 /* SW Reset Registers */
248 #define THT_REG_RST_PRT		0x7000 /* Reset Port */
249 #define  THT_REG_RST_PRT_ACTIVE		0x1 /* port reset is active */
250 #define THT_REG_DIS_PRT		0x7010 /* Disable Port */
251 #define THT_REG_RST_QU_0	0x7020 /* Reset Queue 0 */
252 #define THT_REG_RST_QU_1	0x7028 /* Reset Queue 1 */
253 #define THT_REG_DIS_QU_0	0x7030 /* Disable Queue 0 */
254 #define THT_REG_DIS_QU_1	0x7038 /* Disable Queue 1 */
255 
256 #define THT_PORT_SIZE		0x8000
257 #define THT_PORT_REGION(_p)	((_p) * THT_PORT_SIZE)
258 #define THT_NQUEUES		4
259 
260 #define THT_FIFO_ALIGN		4096
261 #define THT_FIFO_SIZE_4k	0x0
262 #define THT_FIFO_SIZE_8k	0x1
263 #define THT_FIFO_SIZE_16k	0x2
264 #define THT_FIFO_SIZE_32k	0x3
265 #define THT_FIFO_SIZE(_r)	(4096 * (1<<(_r)))
266 #define THT_FIFO_GAP		8 /* keep 8 bytes between ptrs */
267 #define THT_FIFO_PTR_MASK	0x00007ff8 /* rptr/wptr mask */
268 
269 #define THT_FIFO_DESC_LEN	208 /* a descriptor cant be bigger than this */
270 
271 #define THT_IMR_DOWN(_p)	(THT_REG_IMR_LINKCHG(_p))
272 #define THT_IMR_UP(_p)		(THT_REG_IMR_LINKCHG(_p) | \
273 				    THT_REG_IMR_RXF(0) | THT_REG_IMR_TXF(0) | \
274 				    THT_REG_IMR_RXD(0))
275 
276 /* hardware structures (we're using the 64 bit variants) */
277 
278 /* physical buffer descriptor */
279 struct tht_pbd {
280 	u_int32_t		addr_lo;
281 	u_int32_t		addr_hi;
282 	u_int32_t		len;
283 } __packed;
284 #define THT_PBD_PKTLEN		(64 * 1024)
285 
286 /* rx free fifo */
287 struct tht_rx_free {
288 	u_int16_t		bc; /* buffer count (0:4) */
289 	u_int16_t		type;
290 
291 	u_int64_t		uid;
292 
293 	/* followed by a pdb list */
294 } __packed;
295 #define THT_RXF_TYPE		1
296 #define THT_RXF_1ST_PDB_LEN	128
297 #define THT_RXF_SGL_LEN		((THT_FIFO_DESC_LEN - \
298 				    sizeof(struct tht_rx_free)) / \
299 				    sizeof(struct tht_pbd))
300 #define THT_RXF_PKT_NUM		128
301 
302 /* rx descriptor */
303 struct tht_rx_desc {
304 	u_int32_t		flags;
305 #define THT_RXD_FLAGS_BC(_f)		((_f) & 0x1f) /* buffer count */
306 #define THT_RXD_FLAGS_RXFQ(_f)		(((_f)>>8) & 0x3) /* rxf queue id */
307 #define THT_RXD_FLAGS_TO		(1<<15)
308 #define THT_RXD_FLAGS_TYPE(_f)		(((_f)>>16) & 0xf) /* desc type */
309 #define THT_RXD_FLAGS_OVF		(1<<21) /* overflow error */
310 #define THT_RXD_FLAGS_RUNT		(1<<22) /* runt error */
311 #define THT_RXD_FLAGS_CRC		(1<<23) /* crc error */
312 #define THT_RXD_FLAGS_UDPCS		(1<<24) /* udp checksum error */
313 #define THT_RXD_FLAGS_TCPCS		(1<<25) /* tcp checksum error */
314 #define THT_RXD_FLAGS_IPCS		(1<<26) /* ip checksum error */
315 #define THT_RXD_FLAGS_PKT_ID		0x70000000
316 #define THT_RXD_FLAGS_PKT_ID_NONIP	0x00000000
317 #define THT_RXD_FLAGS_PKT_ID_TCP4	0x10000000
318 #define THT_RXD_FLAGS_PKT_ID_UDP4	0x20000000
319 #define THT_RXD_FLAGS_PKT_ID_IPV4	0x30000000
320 #define THT_RXD_FLAGS_PKT_ID_TCP6	0x50000000
321 #define THT_RXD_FLAGS_PKT_ID_UDP6	0x60000000
322 #define THT_RXD_FLAGS_PKT_ID_IPV6	0x70000000
323 #define THT_RXD_FLAGS_VTAG		(1<<31)
324 	u_int16_t		len;
325 	u_int16_t		vlan;
326 #define THT_RXD_VLAN_ID(_v)		((_v) & 0xfff)
327 #define THT_RXD_VLAN_CFI		(1<<12)
328 #define THT_RXD_VLAN_PRI(_v)		((_v) & 0x7) >> 13)
329 
330 	u_int64_t		uid;
331 } __packed;
332 #define THT_RXD_TYPE		2
333 
334 /* rx decriptor type 3: data chain instruction */
335 struct tht_rx_desc_dc {
336 	/* preceded by tht_rx_desc */
337 
338 	u_int16_t		cd_offset;
339 	u_int16_t		flags;
340 
341 	u_int8_t		data[4];
342 } __packed;
343 #define THT_RXD_TYPE_DC		3
344 
345 /* rx descriptor type 4: rss (recv side scaling) information */
346 struct tht_rx_desc_rss {
347 	/* preceded by tht_rx_desc */
348 
349 	u_int8_t		rss_hft;
350 	u_int8_t		rss_type;
351 	u_int8_t		rss_tcpu;
352 	u_int8_t		reserved;
353 
354 	u_int32_t		rss_hash;
355 } __packed;
356 #define THT_RXD_TYPE_RSS	4
357 
358 /* tx task fifo */
359 struct tht_tx_task {
360 	u_int32_t		flags;
361 #define THT_TXT_FLAGS_BC(_f)	(_f) /* buffer count */
362 #define THT_TXT_FLAGS_UDPCS	(1<<5) /* udp checksum */
363 #define THT_TXT_FLAGS_TCPCS	(1<<6) /* tcp checksum */
364 #define THT_TXT_FLAGS_IPCS	(1<<7) /* ip checksum */
365 #define THT_TXT_FLAGS_VTAG	(1<<8) /* insert vlan tag */
366 #define THT_TXT_FLAGS_LGSND	(1<<9) /* tcp large send enabled */
367 #define THT_TXT_FLAGS_FRAG	(1<<10) /* ip fragmentation enabled */
368 #define THT_TXT_FLAGS_CFI	(1<<12) /* canonical format indicator */
369 #define THT_TXT_FLAGS_PRIO(_f)	((_f)<<13) /* vlan priority */
370 #define THT_TXT_FLAGS_VLAN(_f)	((_f)<<20) /* vlan id */
371 	u_int16_t		mss_mtu;
372 	u_int16_t		len;
373 
374 	u_int64_t		uid;
375 
376 	/* followed by a pbd list */
377 } __packed;
378 #define THT_TXT_TYPE		(3<<16)
379 #define THT_TXT_SGL_LEN		((THT_FIFO_DESC_LEN - \
380 				    sizeof(struct tht_tx_task)) / \
381 				    sizeof(struct tht_pbd))
382 #define THT_TXT_PKT_NUM		128
383 
384 /* tx free fifo */
385 struct tht_tx_free {
386 	u_int32_t		status;
387 
388 	u_int64_t		uid;
389 
390 	u_int32_t		pad;
391 } __packed;
392 
393 /* pci controller autoconf glue */
394 
395 struct thtc_softc {
396 	struct device		sc_dev;
397 
398 	bus_dma_tag_t		sc_dmat;
399 
400 	bus_space_tag_t		sc_memt;
401 	bus_space_handle_t	sc_memh;
402 	bus_size_t		sc_mems;
403 };
404 
405 int			thtc_match(struct device *, void *, void *);
406 void			thtc_attach(struct device *, struct device *, void *);
407 int			thtc_print(void *, const char *);
408 
409 struct cfattach thtc_ca = {
410 	sizeof(struct thtc_softc), thtc_match, thtc_attach
411 };
412 
413 struct cfdriver thtc_cd = {
414 	NULL, "thtc", DV_DULL
415 };
416 
417 /* glue between the controller and the port */
418 
419 struct tht_attach_args {
420 	int			taa_port;
421 
422 	struct pci_attach_args	*taa_pa;
423 	pci_intr_handle_t	taa_ih;
424 };
425 
426 /* tht itself */
427 
428 struct tht_dmamem {
429 	bus_dmamap_t		tdm_map;
430 	bus_dma_segment_t	tdm_seg;
431 	size_t			tdm_size;
432 	caddr_t			tdm_kva;
433 };
434 #define THT_DMA_MAP(_tdm)	((_tdm)->tdm_map)
435 #define THT_DMA_DVA(_tdm)	((_tdm)->tdm_map->dm_segs[0].ds_addr)
436 #define THT_DMA_KVA(_tdm)	((void *)(_tdm)->tdm_kva)
437 
438 struct tht_fifo_desc {
439 	bus_size_t		tfd_cfg0;
440 	bus_size_t		tfd_cfg1;
441 	bus_size_t		tfd_rptr;
442 	bus_size_t		tfd_wptr;
443 	u_int32_t		tfd_size;
444 	int			tfd_write;
445 };
446 #define THT_FIFO_PRE_SYNC(_d)	((_d)->tfd_write ? \
447 				    BUS_DMASYNC_PREWRITE : \
448 				    BUS_DMASYNC_PREREAD)
449 #define THT_FIFO_POST_SYNC(_d)	((_d)->tfd_write ? \
450 				    BUS_DMASYNC_POSTWRITE : \
451 				    BUS_DMASYNC_POSTREAD)
452 
453 struct tht_fifo {
454 	struct tht_fifo_desc	*tf_desc;
455 	struct tht_dmamem	*tf_mem;
456 	int			tf_len;
457 	int			tf_rptr;
458 	int			tf_wptr;
459 	int			tf_ready;
460 };
461 
462 struct tht_pkt {
463 	u_int64_t		tp_id;
464 
465 	bus_dmamap_t		tp_dmap;
466 	struct mbuf		*tp_m;
467 
468 	TAILQ_ENTRY(tht_pkt)	tp_link;
469 };
470 
471 struct tht_pkt_list {
472 	struct tht_pkt		*tpl_pkts;
473 	TAILQ_HEAD(, tht_pkt)	tpl_free;
474 	TAILQ_HEAD(, tht_pkt)	tpl_used;
475 };
476 
477 struct tht_softc {
478 	struct device		sc_dev;
479 	struct thtc_softc	*sc_thtc;
480 	int			sc_port;
481 
482 	void			*sc_ih;
483 
484 	bus_space_handle_t	sc_memh;
485 
486 	struct arpcom		sc_ac;
487 	struct ifmedia		sc_media;
488 	struct timeval		sc_mediacheck;
489 
490 	u_int16_t		sc_lladdr[3];
491 
492 	struct tht_pkt_list	sc_tx_list;
493 	struct tht_pkt_list	sc_rx_list;
494 
495 	struct tht_fifo		sc_txt;
496 	struct tht_fifo		sc_rxf;
497 	struct tht_fifo		sc_rxd;
498 	struct tht_fifo		sc_txf;
499 
500 	u_int32_t		sc_imr;
501 
502 	struct rwlock		sc_lock;
503 };
504 
505 int			tht_match(struct device *, void *, void *);
506 void			tht_attach(struct device *, struct device *, void *);
507 void			tht_mountroot(void *);
508 int			tht_intr(void *);
509 
510 struct cfattach tht_ca = {
511 	sizeof(struct tht_softc), tht_match, tht_attach
512 };
513 
514 struct cfdriver tht_cd = {
515 	NULL, "tht", DV_IFNET
516 };
517 
518 /* pkts */
519 int			tht_pkt_alloc(struct tht_softc *,
520 			    struct tht_pkt_list *, int, int);
521 void			tht_pkt_free(struct tht_softc *,
522 			    struct tht_pkt_list *);
523 void			tht_pkt_put(struct tht_pkt_list *, struct tht_pkt *);
524 struct tht_pkt 		*tht_pkt_get(struct tht_pkt_list *);
525 struct tht_pkt		*tht_pkt_used(struct tht_pkt_list *);
526 
527 /* fifos */
528 
529 struct tht_fifo_desc tht_txt_desc = {
530 	THT_REG_TXT_CFG0(0),
531 	THT_REG_TXT_CFG1(0),
532 	THT_REG_TXT_RPTR(0),
533 	THT_REG_TXT_WPTR(0),
534 	THT_FIFO_SIZE_16k,
535 	1
536 };
537 
538 struct tht_fifo_desc tht_rxf_desc = {
539 	THT_REG_RXF_CFG0(0),
540 	THT_REG_RXF_CFG1(0),
541 	THT_REG_RXF_RPTR(0),
542 	THT_REG_RXF_WPTR(0),
543 	THT_FIFO_SIZE_16k,
544 	1
545 };
546 
547 struct tht_fifo_desc tht_rxd_desc = {
548 	THT_REG_RXD_CFG0(0),
549 	THT_REG_RXD_CFG1(0),
550 	THT_REG_RXD_RPTR(0),
551 	THT_REG_RXD_WPTR(0),
552 	THT_FIFO_SIZE_16k,
553 	0
554 };
555 
556 struct tht_fifo_desc tht_txf_desc = {
557 	THT_REG_TXF_CFG0(0),
558 	THT_REG_TXF_CFG1(0),
559 	THT_REG_TXF_RPTR(0),
560 	THT_REG_TXF_WPTR(0),
561 	THT_FIFO_SIZE_4k,
562 	0
563 };
564 
565 int			tht_fifo_alloc(struct tht_softc *, struct tht_fifo *,
566 			    struct tht_fifo_desc *);
567 void			tht_fifo_free(struct tht_softc *, struct tht_fifo *);
568 
569 size_t			tht_fifo_readable(struct tht_softc *,
570 			    struct tht_fifo *);
571 size_t			tht_fifo_writable(struct tht_softc *,
572 			    struct tht_fifo *);
573 void			tht_fifo_pre(struct tht_softc *,
574 			    struct tht_fifo *);
575 void			tht_fifo_read(struct tht_softc *, struct tht_fifo *,
576 			    void *, size_t);
577 void			tht_fifo_write(struct tht_softc *, struct tht_fifo *,
578 			    void *, size_t);
579 void			tht_fifo_write_dmap(struct tht_softc *,
580 			    struct tht_fifo *, bus_dmamap_t);
581 void			tht_fifo_write_pad(struct tht_softc *,
582 			    struct tht_fifo *, int);
583 void			tht_fifo_post(struct tht_softc *,
584 			    struct tht_fifo *);
585 
586 /* port operations */
587 void			tht_lladdr_read(struct tht_softc *);
588 void			tht_lladdr_write(struct tht_softc *);
589 int			tht_sw_reset(struct tht_softc *);
590 int			tht_fw_load(struct tht_softc *);
591 void			tht_fw_tick(void *arg);
592 void			tht_link_state(struct tht_softc *);
593 
594 /* interface operations */
595 int			tht_ioctl(struct ifnet *, u_long, caddr_t);
596 void			tht_watchdog(struct ifnet *);
597 void			tht_start(struct ifnet *);
598 int			tht_load_pkt(struct tht_softc *, struct tht_pkt *,
599 			    struct mbuf *);
600 void			tht_txf(struct tht_softc *sc);
601 
602 void			tht_rxf_fill(struct tht_softc *, int);
603 void			tht_rxf_drain(struct tht_softc *);
604 void			tht_rxd(struct tht_softc *);
605 
606 void			tht_up(struct tht_softc *);
607 void			tht_iff(struct tht_softc *);
608 void			tht_down(struct tht_softc *);
609 
610 /* ifmedia operations */
611 int			tht_media_change(struct ifnet *);
612 void			tht_media_status(struct ifnet *, struct ifmediareq *);
613 
614 /* wrapper around dma memory */
615 struct tht_dmamem	*tht_dmamem_alloc(struct tht_softc *, bus_size_t,
616 			    bus_size_t);
617 void			tht_dmamem_free(struct tht_softc *,
618 			    struct tht_dmamem *);
619 
620 /* bus space operations */
621 u_int32_t		tht_read(struct tht_softc *, bus_size_t);
622 void			tht_write(struct tht_softc *, bus_size_t, u_int32_t);
623 void			tht_write_region(struct tht_softc *, bus_size_t,
624 			    void *, size_t);
625 int			tht_wait_eq(struct tht_softc *, bus_size_t, u_int32_t,
626 			    u_int32_t, int);
627 int			tht_wait_ne(struct tht_softc *, bus_size_t, u_int32_t,
628 			    u_int32_t, int);
629 
630 #define tht_set(_s, _r, _b)		tht_write((_s), (_r), \
631 					    tht_read((_s), (_r)) | (_b))
632 #define tht_clr(_s, _r, _b)		tht_write((_s), (_r), \
633 					    tht_read((_s), (_r)) & ~(_b))
634 #define tht_wait_set(_s, _r, _b, _t)	tht_wait_eq((_s), (_r), \
635 					    (_b), (_b), (_t))
636 
637 
638 /* misc */
639 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
640 #define sizeofa(_a)	(sizeof(_a) / sizeof((_a)[0]))
641 #define LWORDS(_b)	(((_b) + 7) >> 3)
642 
643 
644 struct thtc_device {
645 	pci_vendor_id_t		td_vendor;
646 	pci_vendor_id_t		td_product;
647 	u_int			td_nports;
648 };
649 
650 const struct thtc_device *thtc_lookup(struct pci_attach_args *);
651 
652 static const struct thtc_device thtc_devices[] = {
653 	{ PCI_VENDOR_TEHUTI,	PCI_PRODUCT_TEHUTI_TN3009, 1 },
654 	{ PCI_VENDOR_TEHUTI,	PCI_PRODUCT_TEHUTI_TN3010, 1 },
655 	{ PCI_VENDOR_TEHUTI,	PCI_PRODUCT_TEHUTI_TN3014, 2 }
656 };
657 
658 const struct thtc_device *
659 thtc_lookup(struct pci_attach_args *pa)
660 {
661 	int				i;
662 	const struct thtc_device	*td;
663 
664 	for (i = 0; i < sizeofa(thtc_devices); i++) {
665 		td = &thtc_devices[i];
666 		if (td->td_vendor == PCI_VENDOR(pa->pa_id) &&
667 		    td->td_product == PCI_PRODUCT(pa->pa_id))
668 			return (td);
669 	}
670 
671 	return (NULL);
672 }
673 
674 int
675 thtc_match(struct device *parent, void *match, void *aux)
676 {
677 	struct pci_attach_args		*pa = aux;
678 
679 	if (thtc_lookup(pa) != NULL)
680 		return (1);
681 
682 	return (0);
683 }
684 
685 void
686 thtc_attach(struct device *parent, struct device *self, void *aux)
687 {
688 	struct thtc_softc		*sc = (struct thtc_softc *)self;
689 	struct pci_attach_args		*pa = aux;
690 	pcireg_t			memtype;
691 	const struct thtc_device	*td;
692 	struct tht_attach_args		taa;
693 	int				i;
694 
695 	bzero(&taa, sizeof(taa));
696 	td = thtc_lookup(pa);
697 
698 	sc->sc_dmat = pa->pa_dmat;
699 
700 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, THT_PCI_BAR);
701 	if (pci_mapreg_map(pa, THT_PCI_BAR, memtype, 0, &sc->sc_memt,
702 	    &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) {
703 		printf(": unable to map host registers\n");
704 		return;
705 	}
706 
707 	if (pci_intr_map(pa, &taa.taa_ih) != 0) {
708 		printf(": unable to map interrupt\n");
709 		goto unmap;
710 	}
711 	printf(": %s\n", pci_intr_string(pa->pa_pc, taa.taa_ih));
712 
713 	taa.taa_pa = pa;
714 	for (i = 0; i < td->td_nports; i++) {
715 		taa.taa_port = i;
716 
717 		config_found(self, &taa, thtc_print);
718 	}
719 
720 	return;
721 
722 unmap:
723 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
724 	sc->sc_mems = 0;
725 }
726 
727 int
728 thtc_print(void *aux, const char *pnp)
729 {
730 	struct tht_attach_args		*taa = aux;
731 
732 	if (pnp != NULL)
733 		printf("\"%s\" at %s", tht_cd.cd_name, pnp);
734 
735 	printf(" port %d", taa->taa_port);
736 
737 	return (UNCONF);
738 }
739 
740 int
741 tht_match(struct device *parent, void *match, void *aux)
742 {
743 	return (1);
744 }
745 
746 void
747 tht_attach(struct device *parent, struct device *self, void *aux)
748 {
749 	struct thtc_softc		*csc = (struct thtc_softc *)parent;
750 	struct tht_softc		*sc = (struct tht_softc *)self;
751 	struct tht_attach_args		*taa = aux;
752 	struct ifnet			*ifp;
753 
754 	sc->sc_thtc = csc;
755 	sc->sc_port = taa->taa_port;
756 	sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
757 	rw_init(&sc->sc_lock, "thtioc");
758 
759 	if (bus_space_subregion(csc->sc_memt, csc->sc_memh,
760 	    THT_PORT_REGION(sc->sc_port), THT_PORT_SIZE,
761 	    &sc->sc_memh) != 0) {
762 		printf(": unable to map port registers\n");
763 		return;
764 	}
765 
766 	if (tht_sw_reset(sc) != 0) {
767 		printf(": unable to reset port\n");
768 		/* bus_space(9) says we dont have to free subregions */
769 		return;
770 	}
771 
772 	sc->sc_ih = pci_intr_establish(taa->taa_pa->pa_pc, taa->taa_ih,
773 	    IPL_NET, tht_intr, sc, DEVNAME(sc));
774 	if (sc->sc_ih == NULL) {
775 		printf(": unable to establish interrupt\n");
776 		/* bus_space(9) says we dont have to free subregions */
777 		return;
778 	}
779 
780 	tht_lladdr_read(sc);
781 	bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
782 
783 	ifp = &sc->sc_ac.ac_if;
784 	ifp->if_softc = sc;
785 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
786 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
787 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
788 	ifp->if_ioctl = tht_ioctl;
789 	ifp->if_start = tht_start;
790 	ifp->if_watchdog = tht_watchdog;
791 	ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN; /* XXX */
792 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
793 	IFQ_SET_MAXLEN(&ifp->if_snd, 400);
794 	IFQ_SET_READY(&ifp->if_snd);
795 
796 	ifmedia_init(&sc->sc_media, 0, tht_media_change, tht_media_status);
797 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
798 	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
799 
800 	if_attach(ifp);
801 	ether_ifattach(ifp);
802 
803 	printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
804 
805 	mountroothook_establish(tht_mountroot, sc);
806 }
807 
808 void
809 tht_mountroot(void *arg)
810 {
811 	struct tht_softc		*sc = arg;
812 
813 	if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
814 		return;
815 
816 	if (tht_fw_load(sc) != 0)
817 		printf("%s: firmware load failed\n", DEVNAME(sc));
818 
819 	tht_sw_reset(sc);
820 
821 	tht_fifo_free(sc, &sc->sc_txt);
822 
823 	tht_link_state(sc);
824 	tht_write(sc, THT_REG_IMR, sc->sc_imr);
825 }
826 
827 int
828 tht_intr(void *arg)
829 {
830 	struct tht_softc		*sc = arg;
831 	struct ifnet			*ifp;
832 	u_int32_t			isr;
833 
834 	isr = tht_read(sc, THT_REG_ISR);
835 	if (isr == 0x0) {
836 		tht_write(sc, THT_REG_IMR, sc->sc_imr);
837 		return (0);
838 	}
839 
840 	DPRINTF(THT_D_INTR, "%s: isr: 0x%b\n", DEVNAME(sc), isr, THT_FMT_ISR);
841 
842 	if (ISSET(isr, THT_REG_ISR_LINKCHG(0) | THT_REG_ISR_LINKCHG(1)))
843 		tht_link_state(sc);
844 
845 	ifp = &sc->sc_ac.ac_if;
846 	if (ifp->if_flags & IFF_RUNNING) {
847 		if (ISSET(isr, THT_REG_ISR_RXD(0)))
848 			tht_rxd(sc);
849 
850 		if (ISSET(isr, THT_REG_ISR_RXF(0)))
851 			tht_rxf_fill(sc, 0);
852 
853 		if (ISSET(isr, THT_REG_ISR_TXF(0)))
854 			tht_txf(sc);
855 
856 		tht_start(ifp);
857 	}
858 
859 	tht_write(sc, THT_REG_IMR, sc->sc_imr);
860 	return (1);
861 }
862 
863 int
864 tht_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
865 {
866 	struct tht_softc		*sc = ifp->if_softc;
867 	struct ifreq			*ifr = (struct ifreq *)addr;
868 	struct ifaddr			*ifa;
869 	int				error;
870 	int				s;
871 
872 	rw_enter_write(&sc->sc_lock);
873 	s = splnet();
874 
875 	error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
876 	if (error > 0)
877 		goto err;
878 
879 	switch (cmd) {
880 	case SIOCSIFADDR:
881 		ifa = (struct ifaddr *)addr;
882 
883 #ifdef INET
884 		if (ifa->ifa_addr->sa_family == AF_INET)
885 			arp_ifinit(&sc->sc_ac, ifa);
886 #endif
887 
888 		ifp->if_flags |= IFF_UP;
889 		/* FALLTHROUGH */
890 	case SIOCSIFFLAGS:
891 		if (ifp->if_flags & IFF_UP) {
892 			if (ifp->if_flags & IFF_RUNNING)
893 				tht_iff(sc);
894 			else
895 				tht_up(sc);
896 		} else {
897 			if (ifp->if_flags & IFF_RUNNING)
898 				tht_down(sc);
899 		}
900 		break;
901 
902 	case SIOCSIFMTU:
903 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
904 			error = EINVAL;
905 		else
906 			ifp->if_mtu = ifr->ifr_mtu;
907 		break;
908 
909 	case SIOCADDMULTI:
910 		error = ether_addmulti(ifr, &sc->sc_ac);
911 		break;
912 	case SIOCDELMULTI:
913 		error = ether_delmulti(ifr, &sc->sc_ac);
914 		break;
915 
916 	case SIOCGIFMEDIA:
917 	case SIOCSIFMEDIA:
918 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
919 		break;
920 
921 	default:
922 		error = ENOTTY;
923 		break;
924 	}
925 
926 	if (error == ENETRESET) {
927 		if (ifp->if_flags & IFF_RUNNING)
928 			tht_iff(sc);
929 		error = 0;
930 	}
931 
932 err:
933 	splx(s);
934 	rw_exit_write(&sc->sc_lock);
935 
936 	return (error);
937 }
938 
939 void
940 tht_up(struct tht_softc *sc)
941 {
942 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
943 
944 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
945 		return;
946 	}
947 
948 	if (tht_pkt_alloc(sc, &sc->sc_tx_list, THT_TXT_PKT_NUM,
949 	    THT_TXT_SGL_LEN) != 0)
950 		return;
951 	if (tht_pkt_alloc(sc, &sc->sc_rx_list, THT_RXF_PKT_NUM,
952 	    THT_RXF_SGL_LEN) != 0)
953 		goto free_tx_list;
954 
955 	if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
956 		goto free_rx_list;
957 	if (tht_fifo_alloc(sc, &sc->sc_rxf, &tht_rxf_desc) != 0)
958 		goto free_txt;
959 	if (tht_fifo_alloc(sc, &sc->sc_rxd, &tht_rxd_desc) != 0)
960 		goto free_rxf;
961 	if (tht_fifo_alloc(sc, &sc->sc_txf, &tht_txf_desc) != 0)
962 		goto free_rxd;
963 
964 	tht_write(sc, THT_REG_10G_FRM_LEN, MCLBYTES - ETHER_ALIGN);
965 	tht_write(sc, THT_REG_10G_PAUSE, 0x96);
966 	tht_write(sc, THT_REG_10G_RX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
967 	    THT_REG_10G_SEC_EMPTY(0x80));
968 	tht_write(sc, THT_REG_10G_TX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
969 	    THT_REG_10G_SEC_EMPTY(0xe0));
970 	tht_write(sc, THT_REG_10G_RFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
971 	    THT_REG_10G_FIFO_AF(0x0));
972 	tht_write(sc, THT_REG_10G_TFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
973 	    THT_REG_10G_FIFO_AF(0x0));
974 	tht_write(sc, THT_REG_10G_CTL, THT_REG_10G_CTL_TX_EN |
975 	    THT_REG_10G_CTL_RX_EN | THT_REG_10G_CTL_PAD |
976 	    THT_REG_10G_CTL_PROMISC);
977 
978 	tht_write(sc, THT_REG_VGLB, 0);
979 
980 	tht_write(sc, THT_REG_RX_MAX_FRAME, MCLBYTES - ETHER_ALIGN);
981 
982 	tht_write(sc, THT_REG_RDINTCM(0), THT_REG_RDINTCM_PKT_TH(12) |
983 	    THT_REG_RDINTCM_RXF_TH(4) | THT_REG_RDINTCM_COAL_RC |
984 	    THT_REG_RDINTCM_COAL(0x20));
985 	tht_write(sc, THT_REG_TDINTCM(0), THT_REG_TDINTCM_PKT_TH(12) |
986 	    THT_REG_TDINTCM_COAL_RC | THT_REG_TDINTCM_COAL(0x20));
987 
988 	bcopy(sc->sc_ac.ac_enaddr, sc->sc_lladdr, ETHER_ADDR_LEN);
989 	tht_lladdr_write(sc);
990 
991 	/* populate rxf fifo */
992 	tht_rxf_fill(sc, 1);
993 
994 	tht_iff(sc);
995 
996 	ifp->if_flags |= IFF_RUNNING;
997 	ifp->if_flags &= ~IFF_OACTIVE;
998 
999 	/* enable interrupts */
1000 	sc->sc_imr = THT_IMR_UP(sc->sc_port);
1001 	tht_write(sc, THT_REG_IMR, sc->sc_imr);
1002 
1003 	return;
1004 
1005 free_rxd:
1006 	tht_fifo_free(sc, &sc->sc_rxd);
1007 free_rxf:
1008 	tht_fifo_free(sc, &sc->sc_rxf);
1009 free_txt:
1010 	tht_fifo_free(sc, &sc->sc_txt);
1011 
1012 	tht_sw_reset(sc);
1013 
1014 free_rx_list:
1015 	tht_pkt_free(sc, &sc->sc_rx_list);
1016 free_tx_list:
1017 	tht_pkt_free(sc, &sc->sc_tx_list);
1018 }
1019 
1020 void
1021 tht_iff(struct tht_softc *sc)
1022 {
1023 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1024 	struct ether_multi		*enm;
1025 	struct ether_multistep		step;
1026 	u_int32_t			rxf;
1027 	u_int8_t			imf[THT_REG_RX_MCST_HASH_SIZE];
1028 	u_int8_t			hash;
1029 	int				i;
1030 
1031 	ifp->if_flags &= ~IFF_ALLMULTI;
1032 
1033 	rxf = THT_REG_RX_FLT_OSEN | THT_REG_RX_FLT_AM | THT_REG_RX_FLT_AB;
1034 	for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1035 		tht_write(sc, THT_REG_RX_MAC_MCST0(i), 0);
1036 		tht_write(sc, THT_REG_RX_MAC_MCST1(i), 0);
1037 	}
1038 	memset(imf, 0x00, sizeof(imf));
1039 
1040 	if (ifp->if_flags & IFF_PROMISC)
1041 		rxf |= THT_REG_RX_FLT_PRM_ALL;
1042 	else if (sc->sc_ac.ac_multirangecnt > 0) {
1043 		ifp->if_flags |= IFF_ALLMULTI;
1044 		memset(imf, 0xff, sizeof(imf));
1045 	} else {
1046 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1047 
1048 #if 0
1049 		/* fill the perfect multicast filters */
1050 		for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1051 			if (enm == NULL)
1052 				break;
1053 
1054 			tht_write(sc, THT_REG_RX_MAC_MCST0(i),
1055 			    (enm->enm_addrlo[0] << 0) |
1056 			    (enm->enm_addrlo[1] << 8) |
1057 			    (enm->enm_addrlo[2] << 16) |
1058 			    (enm->enm_addrlo[3] << 24));
1059 			tht_write(sc, THT_REG_RX_MAC_MCST1(i),
1060 			    (enm->enm_addrlo[4] << 0) |
1061 			    (enm->enm_addrlo[5] << 8));
1062 
1063 			ETHER_NEXT_MULTI(step, enm);
1064 		}
1065 #endif
1066 
1067 		/* fill the imperfect multicast filter with whats left */
1068 		while (enm != NULL) {
1069 			hash = 0x00;
1070 			for (i = 0; i < ETHER_ADDR_LEN; i++)
1071 				hash ^= enm->enm_addrlo[i];
1072 			setbit(imf, hash);
1073 
1074 			ETHER_NEXT_MULTI(step, enm);
1075 		}
1076 	}
1077 
1078 	tht_write_region(sc, THT_REG_RX_MCST_HASH, imf, sizeof(imf));
1079 	tht_write(sc, THT_REG_RX_FLT, rxf);
1080 }
1081 
1082 void
1083 tht_down(struct tht_softc *sc)
1084 {
1085 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1086 
1087 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
1088 		return;
1089 	}
1090 
1091 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE | IFF_ALLMULTI);
1092 
1093 	while (tht_fifo_writable(sc, &sc->sc_txt) < sc->sc_txt.tf_len &&
1094 	    tht_fifo_readable(sc, &sc->sc_txf) > 0)
1095 		tsleep(sc, 0, "thtdown", hz);
1096 
1097 	sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
1098 	tht_write(sc, THT_REG_IMR, sc->sc_imr);
1099 
1100 	tht_sw_reset(sc);
1101 
1102 	tht_fifo_free(sc, &sc->sc_txf);
1103 	tht_fifo_free(sc, &sc->sc_rxd);
1104 	tht_fifo_free(sc, &sc->sc_rxf);
1105 	tht_fifo_free(sc, &sc->sc_txt);
1106 
1107 	/* free mbufs that were on the rxf fifo */
1108 	tht_rxf_drain(sc);
1109 
1110 	tht_pkt_free(sc, &sc->sc_rx_list);
1111 	tht_pkt_free(sc, &sc->sc_tx_list);
1112 }
1113 
1114 void
1115 tht_start(struct ifnet *ifp)
1116 {
1117 	struct tht_softc		*sc = ifp->if_softc;
1118 	struct tht_pkt			*pkt;
1119 	struct tht_tx_task		txt;
1120 	u_int32_t			flags;
1121 	struct mbuf			*m;
1122 	int				bc;
1123 
1124 	if (!(ifp->if_flags & IFF_RUNNING))
1125 		return;
1126 	if (ifp->if_flags & IFF_OACTIVE)
1127 		return;
1128 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1129 		return;
1130 
1131 	if (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_DESC_LEN)
1132 		return;
1133 
1134 	bzero(&txt, sizeof(txt));
1135 
1136 	tht_fifo_pre(sc, &sc->sc_txt);
1137 
1138 	do {
1139 		IFQ_POLL(&ifp->if_snd, m);
1140 		if (m == NULL)
1141 			break;
1142 
1143 		pkt = tht_pkt_get(&sc->sc_tx_list);
1144 		if (pkt == NULL) {
1145 			ifp->if_flags |= IFF_OACTIVE;
1146 			break;
1147 		}
1148 
1149 		IFQ_DEQUEUE(&ifp->if_snd, m);
1150 		if (tht_load_pkt(sc, pkt, m) != 0) {
1151 			m_freem(m);
1152 			tht_pkt_put(&sc->sc_tx_list, pkt);
1153 			ifp->if_oerrors++;
1154 			break;
1155 		}
1156 		/* thou shalt not use m after this point, only pkt->tp_m */
1157 
1158 #if NBPFILTER > 0
1159 		if (ifp->if_bpf)
1160 			bpf_mtap(ifp->if_bpf, pkt->tp_m, BPF_DIRECTION_OUT);
1161 #endif
1162 
1163 		bc = sizeof(txt) +
1164 		    sizeof(struct tht_pbd) * pkt->tp_dmap->dm_nsegs;
1165 
1166 		flags = THT_TXT_TYPE | THT_TXT_FLAGS_UDPCS |
1167 		    THT_TXT_FLAGS_TCPCS | THT_TXT_FLAGS_IPCS | LWORDS(bc);
1168 		txt.flags = htole32(flags);
1169 		txt.len = htole16(pkt->tp_m->m_pkthdr.len);
1170 		txt.uid = pkt->tp_id;
1171 
1172 		DPRINTF(THT_D_TX, "%s: txt uid 0x%llx flags 0x%08x len %d\n",
1173 		    DEVNAME(sc), pkt->tp_id, flags, pkt->tp_m->m_pkthdr.len);
1174 
1175 		tht_fifo_write(sc, &sc->sc_txt, &txt, sizeof(txt));
1176 		tht_fifo_write_dmap(sc, &sc->sc_txt, pkt->tp_dmap);
1177 		tht_fifo_write_pad(sc, &sc->sc_txt, bc);
1178 
1179 		bus_dmamap_sync(sc->sc_thtc->sc_dmat, pkt->tp_dmap, 0,
1180 		    pkt->tp_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1181 
1182 		ifp->if_opackets++;
1183 
1184 	} while (sc->sc_txt.tf_ready > THT_FIFO_DESC_LEN);
1185 
1186 	tht_fifo_post(sc, &sc->sc_txt);
1187 }
1188 
1189 int
1190 tht_load_pkt(struct tht_softc *sc, struct tht_pkt *pkt, struct mbuf *m)
1191 {
1192 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1193 	bus_dmamap_t			dmap = pkt->tp_dmap;
1194 	struct mbuf			*m0 = NULL;
1195 
1196 	switch(bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) {
1197 	case 0:
1198 		pkt->tp_m = m;
1199 		break;
1200 
1201 	case EFBIG: /* mbuf chain is too fragmented */
1202 		MGETHDR(m0, M_DONTWAIT, MT_DATA);
1203 		if (m0 == NULL)
1204 			return (ENOBUFS);
1205 		if (m->m_pkthdr.len > MHLEN) {
1206 			MCLGET(m0, M_DONTWAIT);
1207 			if (!(m0->m_flags & M_EXT)) {
1208 				m_freem(m0);
1209 				return (ENOBUFS);
1210 			}
1211 		}
1212 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1213 		m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1214 		if (bus_dmamap_load_mbuf(dmat, dmap, m0, BUS_DMA_NOWAIT)) {
1215                         m_freem(m0);
1216 			return (ENOBUFS);
1217                 }
1218 
1219 		m_freem(m);
1220 		pkt->tp_m = m0;
1221 		break;
1222 
1223 	default:
1224 		return (ENOBUFS);
1225 	}
1226 
1227 	return (0);
1228 }
1229 
1230 void
1231 tht_txf(struct tht_softc *sc)
1232 {
1233 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1234 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1235 	bus_dmamap_t			dmap;
1236 	struct tht_tx_free		txf;
1237 	struct tht_pkt			*pkt;
1238 
1239 	if (tht_fifo_readable(sc, &sc->sc_txf) < sizeof(txf))
1240 		return;
1241 
1242 	tht_fifo_pre(sc, &sc->sc_txf);
1243 
1244 	do {
1245 		tht_fifo_read(sc, &sc->sc_txf, &txf, sizeof(txf));
1246 
1247 		DPRINTF(THT_D_TX, "%s: txf uid 0x%llx\n", DEVNAME(sc), txf.uid);
1248 
1249 		pkt = &sc->sc_tx_list.tpl_pkts[txf.uid];
1250 		dmap = pkt->tp_dmap;
1251 
1252 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1253 		    BUS_DMASYNC_POSTWRITE);
1254 		bus_dmamap_unload(dmat, dmap);
1255 
1256 		m_freem(pkt->tp_m);
1257 
1258 		tht_pkt_put(&sc->sc_tx_list, pkt);
1259 
1260 	} while (sc->sc_txf.tf_ready >= sizeof(txf));
1261 
1262 	ifp->if_flags &= ~IFF_OACTIVE;
1263 
1264 	tht_fifo_post(sc, &sc->sc_txf);
1265 }
1266 
1267 void
1268 tht_rxf_fill(struct tht_softc *sc, int wait)
1269 {
1270 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1271 	bus_dmamap_t			dmap;
1272 	struct tht_rx_free		rxf;
1273 	struct tht_pkt			*pkt;
1274 	struct mbuf			*m;
1275 	int				bc;
1276 
1277 	if (tht_fifo_writable(sc, &sc->sc_rxf) <= THT_FIFO_DESC_LEN)
1278 		return;
1279 
1280 	tht_fifo_pre(sc, &sc->sc_rxf);
1281 
1282 	for (;;) {
1283 		if ((pkt = tht_pkt_get(&sc->sc_rx_list)) == NULL)
1284 			goto done;
1285 
1286 		MGETHDR(m, wait ? M_WAIT : M_DONTWAIT, MT_DATA);
1287 		if (m == NULL)
1288 			goto put_pkt;
1289 
1290 		MCLGET(m, wait ? M_WAIT : M_DONTWAIT);
1291 		if (!ISSET(m->m_flags, M_EXT))
1292 			goto free_m;
1293 
1294 		m->m_data += ETHER_ALIGN;
1295 		m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1296 
1297 		dmap = pkt->tp_dmap;
1298 		if (bus_dmamap_load_mbuf(dmat, dmap, m,
1299 		    wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0)
1300 			goto free_m;
1301 
1302 		pkt->tp_m = m;
1303 
1304 		bc = sizeof(rxf) + sizeof(struct tht_pbd) * dmap->dm_nsegs;
1305 
1306 		rxf.bc = htole16(LWORDS(bc));
1307 		rxf.type = htole16(THT_RXF_TYPE);
1308 		rxf.uid = pkt->tp_id;
1309 
1310 		tht_fifo_write(sc, &sc->sc_rxf, &rxf, sizeof(rxf));
1311 		tht_fifo_write_dmap(sc, &sc->sc_rxf, dmap);
1312 		tht_fifo_write_pad(sc, &sc->sc_rxf, bc);
1313 
1314 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1315 		    BUS_DMASYNC_PREREAD);
1316 
1317 		if (sc->sc_rxf.tf_ready <= THT_FIFO_DESC_LEN)
1318 			goto done;
1319 	}
1320 
1321 free_m:
1322 	m_freem(m);
1323 put_pkt:
1324 	tht_pkt_put(&sc->sc_rx_list, pkt);
1325 done:
1326 	tht_fifo_post(sc, &sc->sc_rxf);
1327 }
1328 
1329 void
1330 tht_rxf_drain(struct tht_softc *sc)
1331 {
1332 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1333 	bus_dmamap_t			dmap;
1334 	struct tht_pkt			*pkt;
1335 
1336 	while ((pkt = tht_pkt_used(&sc->sc_rx_list)) != NULL) {
1337 		dmap = pkt->tp_dmap;
1338 
1339 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1340 		    BUS_DMASYNC_POSTREAD);
1341 		bus_dmamap_unload(dmat, dmap);
1342 
1343 		m_freem(pkt->tp_m);
1344 
1345 		tht_pkt_put(&sc->sc_rx_list, pkt);
1346 	}
1347 }
1348 
1349 void
1350 tht_rxd(struct tht_softc *sc)
1351 {
1352 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1353 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1354 	bus_dmamap_t			dmap;
1355 	struct tht_rx_desc		rxd;
1356 	struct tht_pkt			*pkt;
1357 	struct mbuf			*m;
1358 	int				bc;
1359 	u_int32_t			flags;
1360 
1361 	if (tht_fifo_readable(sc, &sc->sc_rxd) < sizeof(rxd))
1362 		return;
1363 
1364 	tht_fifo_pre(sc, &sc->sc_rxd);
1365 
1366 	do {
1367 		tht_fifo_read(sc, &sc->sc_rxd, &rxd, sizeof(rxd));
1368 
1369 		flags = letoh32(rxd.flags);
1370 		bc = THT_RXD_FLAGS_BC(flags) * 8;
1371 		bc -= sizeof(rxd);
1372 		pkt = &sc->sc_rx_list.tpl_pkts[rxd.uid];
1373 
1374 		dmap = pkt->tp_dmap;
1375 
1376 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1377 		    BUS_DMASYNC_POSTREAD);
1378 		bus_dmamap_unload(dmat, dmap);
1379 
1380 		m = pkt->tp_m;
1381 		m->m_pkthdr.rcvif = ifp;
1382 		m->m_pkthdr.len = m->m_len = letoh16(rxd.len);
1383 
1384 		if (!ISSET(flags, THT_RXD_FLAGS_IPCS))
1385 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1386 		if (!ISSET(flags, THT_RXD_FLAGS_TCPCS))
1387 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
1388 		if (!ISSET(flags, THT_RXD_FLAGS_UDPCS))
1389 			m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
1390 
1391 		/* XXX process type 3 rx descriptors */
1392 
1393 #if NBPFILTER > 0
1394 		if (ifp->if_bpf)
1395 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1396 #endif
1397 
1398 		ether_input_mbuf(ifp, m);
1399 
1400 		tht_pkt_put(&sc->sc_rx_list, pkt);
1401 
1402 		while (bc > 0) {
1403 			static u_int32_t pad;
1404 
1405 			tht_fifo_read(sc, &sc->sc_rxd, &pad, sizeof(pad));
1406 			bc -= sizeof(pad);
1407 		}
1408 
1409 		ifp->if_ipackets++;
1410 
1411 	} while (sc->sc_rxd.tf_ready >= sizeof(rxd));
1412 
1413 	tht_fifo_post(sc, &sc->sc_rxd);
1414 
1415 	/* put more pkts on the fifo */
1416 	tht_rxf_fill(sc, 0);
1417 }
1418 
1419 void
1420 tht_watchdog(struct ifnet *ifp)
1421 {
1422 	/* do nothing */
1423 }
1424 
1425 int
1426 tht_media_change(struct ifnet *ifp)
1427 {
1428 	/* ignore */
1429 	return (0);
1430 }
1431 
1432 void
1433 tht_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1434 {
1435 	struct tht_softc		*sc = ifp->if_softc;
1436 
1437 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1438 	imr->ifm_status = IFM_AVALID;
1439 
1440 	tht_link_state(sc);
1441 
1442 	if (LINK_STATE_IS_UP(ifp->if_link_state))
1443 		imr->ifm_status |= IFM_ACTIVE;
1444 }
1445 
1446 int
1447 tht_fifo_alloc(struct tht_softc *sc, struct tht_fifo *tf,
1448     struct tht_fifo_desc *tfd)
1449 {
1450 	u_int64_t			dva;
1451 
1452 	tf->tf_len = THT_FIFO_SIZE(tfd->tfd_size);
1453 	tf->tf_mem = tht_dmamem_alloc(sc, tf->tf_len, THT_FIFO_ALIGN);
1454 	if (tf->tf_mem == NULL)
1455 		return (1);
1456 
1457 	tf->tf_desc = tfd;
1458 	tf->tf_rptr = tf->tf_wptr = 0;
1459 
1460 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1461 	    0, tf->tf_len, THT_FIFO_PRE_SYNC(tfd));
1462 
1463 	dva = THT_DMA_DVA(tf->tf_mem);
1464 	tht_write(sc, tfd->tfd_cfg0, (u_int32_t)dva | tfd->tfd_size);
1465 	tht_write(sc, tfd->tfd_cfg1, (u_int32_t)(dva >> 32));
1466 
1467 	return (0);
1468 }
1469 
1470 void
1471 tht_fifo_free(struct tht_softc *sc, struct tht_fifo *tf)
1472 {
1473 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1474 	    0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1475 	tht_dmamem_free(sc, tf->tf_mem);
1476 }
1477 
1478 size_t
1479 tht_fifo_readable(struct tht_softc *sc, struct tht_fifo *tf)
1480 {
1481 	tf->tf_wptr = tht_read(sc, tf->tf_desc->tfd_wptr);
1482 	tf->tf_wptr &= THT_FIFO_PTR_MASK;
1483 	tf->tf_ready = tf->tf_wptr - tf->tf_rptr;
1484 	if (tf->tf_ready < 0)
1485 		tf->tf_ready += tf->tf_len;
1486 
1487 	DPRINTF(THT_D_FIFO, "%s: fifo rdable wptr: %d rptr: %d ready: %d\n",
1488 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1489 
1490 	return (tf->tf_ready);
1491 }
1492 
1493 size_t
1494 tht_fifo_writable(struct tht_softc *sc, struct tht_fifo *tf)
1495 {
1496 	tf->tf_rptr = tht_read(sc, tf->tf_desc->tfd_rptr);
1497 	tf->tf_rptr &= THT_FIFO_PTR_MASK;
1498 	tf->tf_ready = tf->tf_rptr - tf->tf_wptr;
1499 	if (tf->tf_ready <= 0)
1500 		tf->tf_ready += tf->tf_len;
1501 
1502 	DPRINTF(THT_D_FIFO, "%s: fifo wrable wptr: %d rptr: %d ready: %d\n",
1503 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1504 
1505 	return (tf->tf_ready);
1506 }
1507 
1508 void
1509 tht_fifo_pre(struct tht_softc *sc, struct tht_fifo *tf)
1510 {
1511 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1512 	    0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1513 }
1514 
1515 void
1516 tht_fifo_read(struct tht_softc *sc, struct tht_fifo *tf,
1517     void *buf, size_t buflen)
1518 {
1519 	u_int8_t			*fifo = THT_DMA_KVA(tf->tf_mem);
1520 	u_int8_t			*desc = buf;
1521 	size_t				len;
1522 
1523 	tf->tf_ready -= buflen;
1524 
1525 	len = tf->tf_len - tf->tf_rptr;
1526 
1527 	if (len < buflen) {
1528 		memcpy(desc, fifo + tf->tf_rptr, len);
1529 
1530 		buflen -= len;
1531 		desc += len;
1532 
1533 		tf->tf_rptr = 0;
1534 	}
1535 
1536 	memcpy(desc, fifo + tf->tf_rptr, buflen);
1537 	tf->tf_rptr += buflen;
1538 
1539 	DPRINTF(THT_D_FIFO, "%s: fifo rd wptr: %d rptr: %d ready: %d\n",
1540 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1541 }
1542 
1543 void
1544 tht_fifo_write(struct tht_softc *sc, struct tht_fifo *tf,
1545     void *buf, size_t buflen)
1546 {
1547 	u_int8_t			*fifo = THT_DMA_KVA(tf->tf_mem);
1548 	u_int8_t			*desc = buf;
1549 	size_t				len;
1550 
1551 	tf->tf_ready -= buflen;
1552 
1553 	len = tf->tf_len - tf->tf_wptr;
1554 
1555 	if (len < buflen) {
1556 		memcpy(fifo + tf->tf_wptr, desc, len);
1557 
1558 		buflen -= len;
1559 		desc += len;
1560 
1561 		tf->tf_wptr = 0;
1562 	}
1563 
1564 	memcpy(fifo + tf->tf_wptr, desc, buflen);
1565 	tf->tf_wptr += buflen;
1566 	tf->tf_wptr %= tf->tf_len;
1567 
1568 	DPRINTF(THT_D_FIFO, "%s: fifo wr wptr: %d rptr: %d ready: %d\n",
1569 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1570 }
1571 
1572 void
1573 tht_fifo_write_dmap(struct tht_softc *sc, struct tht_fifo *tf,
1574     bus_dmamap_t dmap)
1575 {
1576 	struct tht_pbd			pbd;
1577 	u_int64_t			dva;
1578 	int				i;
1579 
1580 	for (i = 0; i < dmap->dm_nsegs; i++) {
1581 		dva = dmap->dm_segs[i].ds_addr;
1582 
1583 		pbd.addr_lo = htole32(dva);
1584 		pbd.addr_hi = htole32(dva >> 32);
1585 		pbd.len = htole32(dmap->dm_segs[i].ds_len);
1586 
1587 		tht_fifo_write(sc, tf, &pbd, sizeof(pbd));
1588 	}
1589 }
1590 
1591 void
1592 tht_fifo_write_pad(struct tht_softc *sc, struct tht_fifo *tf, int bc)
1593 {
1594 	const static u_int32_t pad = 0x0;
1595 
1596 	/* this assumes you'll only ever be writing multiples of 4 bytes */
1597 	if (bc % 8)
1598 		tht_fifo_write(sc, tf, (void *)&pad, sizeof(pad));
1599 }
1600 
1601 void
1602 tht_fifo_post(struct tht_softc *sc, struct tht_fifo *tf)
1603 {
1604 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1605 	    0, tf->tf_len, THT_FIFO_PRE_SYNC(tf->tf_desc));
1606 	if (tf->tf_desc->tfd_write)
1607 		tht_write(sc, tf->tf_desc->tfd_wptr, tf->tf_wptr);
1608 	else
1609 		tht_write(sc, tf->tf_desc->tfd_rptr, tf->tf_rptr);
1610 
1611 	DPRINTF(THT_D_FIFO, "%s: fifo post wptr: %d rptr: %d\n", DEVNAME(sc),
1612 	    tf->tf_wptr, tf->tf_rptr);
1613 }
1614 
1615 const static bus_size_t tht_mac_regs[3] = {
1616     THT_REG_RX_UNC_MAC2, THT_REG_RX_UNC_MAC1, THT_REG_RX_UNC_MAC0
1617 };
1618 
1619 void
1620 tht_lladdr_read(struct tht_softc *sc)
1621 {
1622 	int				i;
1623 
1624 	for (i = 0; i < sizeofa(tht_mac_regs); i++)
1625 		sc->sc_lladdr[i] = betoh16(tht_read(sc, tht_mac_regs[i]));
1626 }
1627 
1628 void
1629 tht_lladdr_write(struct tht_softc *sc)
1630 {
1631 	int				i;
1632 
1633 	for (i = 0; i < sizeofa(tht_mac_regs); i++)
1634 		tht_write(sc, tht_mac_regs[i], htobe16(sc->sc_lladdr[i]));
1635 }
1636 
1637 #define tht_swrst_set(_s, _r) tht_write((_s), (_r), 0x1)
1638 #define tht_swrst_clr(_s, _r) tht_write((_s), (_r), 0x0)
1639 int
1640 tht_sw_reset(struct tht_softc *sc)
1641 {
1642 	int				i;
1643 
1644 	/* this follows SW Reset process in 8.8 of the doco */
1645 
1646 	/* 1. disable rx */
1647 	tht_clr(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1648 
1649 	/* 2. initiate port disable */
1650 	tht_swrst_set(sc, THT_REG_DIS_PRT);
1651 
1652 	/* 3. initiate queue disable */
1653 	tht_swrst_set(sc, THT_REG_DIS_QU_0);
1654 	tht_swrst_set(sc, THT_REG_DIS_QU_1);
1655 
1656 	/* 4. wait for successful finish of previous tasks */
1657 	if (!tht_wait_set(sc, THT_REG_RST_PRT, THT_REG_RST_PRT_ACTIVE, 1000))
1658 		return (1);
1659 
1660 	/* 5. Reset interrupt registers */
1661 	tht_write(sc, THT_REG_IMR, 0x0); /* 5.a */
1662 	tht_read(sc, THT_REG_ISR); /* 5.b */
1663 	for (i = 0; i < THT_NQUEUES; i++) {
1664 		tht_write(sc, THT_REG_RDINTCM(i), 0x0); /* 5.c/5.d */
1665 		tht_write(sc, THT_REG_TDINTCM(i), 0x0); /* 5.e */
1666 	}
1667 
1668 	/* 6. initiate queue reset */
1669 	tht_swrst_set(sc, THT_REG_RST_QU_0);
1670 	tht_swrst_set(sc, THT_REG_RST_QU_1);
1671 
1672 	/* 7. initiate port reset */
1673 	tht_swrst_set(sc, THT_REG_RST_PRT);
1674 
1675 	/* 8. clear txt/rxf/rxd/txf read and write ptrs */
1676 	for (i = 0; i < THT_NQUEUES; i++) {
1677 		tht_write(sc, THT_REG_TXT_RPTR(i), 0);
1678 		tht_write(sc, THT_REG_RXF_RPTR(i), 0);
1679 		tht_write(sc, THT_REG_RXD_RPTR(i), 0);
1680 		tht_write(sc, THT_REG_TXF_RPTR(i), 0);
1681 
1682 		tht_write(sc, THT_REG_TXT_WPTR(i), 0);
1683 		tht_write(sc, THT_REG_RXF_WPTR(i), 0);
1684 		tht_write(sc, THT_REG_RXD_WPTR(i), 0);
1685 		tht_write(sc, THT_REG_TXF_WPTR(i), 0);
1686 	}
1687 
1688 	/* 9. unset port disable */
1689 	tht_swrst_clr(sc, THT_REG_DIS_PRT);
1690 
1691 	/* 10. unset queue disable */
1692 	tht_swrst_clr(sc, THT_REG_DIS_QU_0);
1693 	tht_swrst_clr(sc, THT_REG_DIS_QU_1);
1694 
1695 	/* 11. unset queue reset */
1696 	tht_swrst_clr(sc, THT_REG_RST_QU_0);
1697 	tht_swrst_clr(sc, THT_REG_RST_QU_1);
1698 
1699 	/* 12. unset port reset */
1700 	tht_swrst_clr(sc, THT_REG_RST_PRT);
1701 
1702 	/* 13. enable rx */
1703 	tht_set(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1704 
1705 	return (0);
1706 }
1707 
1708 int
1709 tht_fw_load(struct tht_softc *sc)
1710 {
1711 	struct timeout			ticker;
1712 	volatile int			ok = 1;
1713 	u_int8_t			*fw, *buf;
1714 	size_t				fwlen, wrlen;
1715 	int				error = 1;
1716 
1717 	if (loadfirmware("tht", &fw, &fwlen) != 0)
1718 		return (1);
1719 
1720 	if ((fwlen % 8) != 0)
1721 		goto err;
1722 
1723 	buf = fw;
1724 	while (fwlen > 0) {
1725 		while (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_GAP) {
1726 			if (tsleep(sc, PCATCH, "thtfw", 1) == EINTR)
1727 				goto err;
1728 		}
1729 
1730 		wrlen = MIN(sc->sc_txt.tf_ready - THT_FIFO_GAP, fwlen);
1731 		tht_fifo_pre(sc, &sc->sc_txt);
1732 		tht_fifo_write(sc, &sc->sc_txt, buf, wrlen);
1733 		tht_fifo_post(sc, &sc->sc_txt);
1734 
1735 		fwlen -= wrlen;
1736 		buf += wrlen;
1737 	}
1738 
1739 	timeout_set(&ticker, tht_fw_tick, (void *)&ok);
1740 	timeout_add(&ticker, 2*hz);
1741 	while (ok) {
1742 		if (tht_read(sc, THT_REG_INIT_STATUS) != 0) {
1743 			error = 0;
1744 			break;
1745 		}
1746 
1747 		if (tsleep(sc, PCATCH, "thtinit", 1) == EINTR)
1748 			goto err;
1749 	}
1750 	timeout_del(&ticker);
1751 
1752 	tht_write(sc, THT_REG_INIT_SEMAPHORE, 0x1);
1753 
1754 err:
1755 	free(fw, M_DEVBUF);
1756 	return (error);
1757 }
1758 
1759 void
1760 tht_fw_tick(void *arg)
1761 {
1762 	volatile int			*ok = arg;
1763 
1764 	*ok = 0;
1765 }
1766 
1767 void
1768 tht_link_state(struct tht_softc *sc)
1769 {
1770 	static const struct timeval	interval = { 0, 10000 };
1771 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1772 	int				link_state = LINK_STATE_DOWN;
1773 
1774 	if (!ratecheck(&sc->sc_mediacheck, &interval))
1775 		return;
1776 
1777 	if (tht_read(sc, THT_REG_MAC_LNK_STAT) & THT_REG_MAC_LNK_STAT_LINK)
1778 		link_state = LINK_STATE_FULL_DUPLEX;
1779 
1780 	if (ifp->if_link_state != link_state) {
1781 		ifp->if_link_state = link_state;
1782 		if_link_state_change(ifp);
1783 	}
1784 
1785 	if (LINK_STATE_IS_UP(ifp->if_link_state))
1786 		ifp->if_baudrate = IF_Gbps(10);
1787 	else
1788 		ifp->if_baudrate = 0;
1789 }
1790 
1791 u_int32_t
1792 tht_read(struct tht_softc *sc, bus_size_t r)
1793 {
1794 	bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1795 	    BUS_SPACE_BARRIER_READ);
1796 	return (bus_space_read_4(sc->sc_thtc->sc_memt, sc->sc_memh, r));
1797 }
1798 
1799 void
1800 tht_write(struct tht_softc *sc, bus_size_t r, u_int32_t v)
1801 {
1802 	bus_space_write_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, v);
1803 	bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1804 	    BUS_SPACE_BARRIER_WRITE);
1805 }
1806 
1807 void
1808 tht_write_region(struct tht_softc *sc, bus_size_t r, void *buf, size_t len)
1809 {
1810 	bus_space_write_raw_region_4(sc->sc_thtc->sc_memt, sc->sc_memh, r,
1811 	    buf, len);
1812 	bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, len,
1813 	    BUS_SPACE_BARRIER_WRITE);
1814 }
1815 
1816 int
1817 tht_wait_eq(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1818     int timeout)
1819 {
1820 	while ((tht_read(sc, r) & m) != v) {
1821 		if (timeout == 0)
1822 			return (0);
1823 
1824 		delay(1000);
1825 		timeout--;
1826 	}
1827 
1828 	return (1);
1829 }
1830 
1831 int
1832 tht_wait_ne(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1833     int timeout)
1834 {
1835 	while ((tht_read(sc, r) & m) == v) {
1836 		if (timeout == 0)
1837 			return (0);
1838 
1839 		delay(1000);
1840 		timeout--;
1841 	}
1842 
1843 	return (1);
1844 }
1845 
1846 struct tht_dmamem *
1847 tht_dmamem_alloc(struct tht_softc *sc, bus_size_t size, bus_size_t align)
1848 {
1849 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1850 	struct tht_dmamem		*tdm;
1851 	int				nsegs;
1852 
1853 	tdm = malloc(sizeof(struct tht_dmamem), M_DEVBUF, M_WAITOK | M_ZERO);
1854 	tdm->tdm_size = size;
1855 
1856 	if (bus_dmamap_create(dmat, size, 1, size, 0,
1857 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1858 		goto tdmfree;
1859 
1860 	if (bus_dmamem_alloc(dmat, size, align, 0, &tdm->tdm_seg, 1, &nsegs,
1861 	    BUS_DMA_WAITOK) != 0)
1862 		goto destroy;
1863 
1864 	if (bus_dmamem_map(dmat, &tdm->tdm_seg, nsegs, size, &tdm->tdm_kva,
1865 	    BUS_DMA_WAITOK) != 0)
1866 		goto free;
1867 
1868 	if (bus_dmamap_load(dmat, tdm->tdm_map, tdm->tdm_kva, size,
1869 	    NULL, BUS_DMA_WAITOK) != 0)
1870 		goto unmap;
1871 
1872 	bzero(tdm->tdm_kva, size);
1873 
1874 	return (tdm);
1875 
1876 unmap:
1877 	bus_dmamem_unmap(dmat, tdm->tdm_kva, size);
1878 free:
1879 	bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1880 destroy:
1881 	bus_dmamap_destroy(dmat, tdm->tdm_map);
1882 tdmfree:
1883 	free(tdm, M_DEVBUF);
1884 
1885 	return (NULL);
1886 }
1887 
1888 void
1889 tht_dmamem_free(struct tht_softc *sc, struct tht_dmamem *tdm)
1890 {
1891 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1892 
1893 	bus_dmamap_unload(dmat, tdm->tdm_map);
1894 	bus_dmamem_unmap(dmat, tdm->tdm_kva, tdm->tdm_size);
1895 	bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1896 	bus_dmamap_destroy(dmat, tdm->tdm_map);
1897 	free(tdm, M_DEVBUF);
1898 }
1899 
1900 int
1901 tht_pkt_alloc(struct tht_softc *sc, struct tht_pkt_list *tpl, int npkts,
1902     int nsegs)
1903 {
1904 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1905 	struct tht_pkt			*pkt;
1906 	int				i;
1907 
1908 	tpl->tpl_pkts = malloc(sizeof(struct tht_pkt) * npkts, M_DEVBUF,
1909 	    M_WAITOK | M_ZERO);
1910 
1911 	TAILQ_INIT(&tpl->tpl_free);
1912 	TAILQ_INIT(&tpl->tpl_used);
1913 	for (i = 0; i < npkts; i++) {
1914 		pkt = &tpl->tpl_pkts[i];
1915 
1916 		pkt->tp_id = i;
1917 		if (bus_dmamap_create(dmat, THT_PBD_PKTLEN, nsegs,
1918 		    THT_PBD_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1919 		    &pkt->tp_dmap) != 0) {
1920 			tht_pkt_free(sc, tpl);
1921 			return (1);
1922 		}
1923 
1924 		TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1925 	}
1926 
1927 	return (0);
1928 }
1929 
1930 void
1931 tht_pkt_free(struct tht_softc *sc, struct tht_pkt_list *tpl)
1932 {
1933 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1934 	struct tht_pkt			*pkt;
1935 
1936 	while ((pkt = tht_pkt_get(tpl)) != NULL)
1937 		bus_dmamap_destroy(dmat, pkt->tp_dmap);
1938 	free(tpl->tpl_pkts, M_DEVBUF);
1939 	tpl->tpl_pkts = NULL;
1940 }
1941 
1942 void
1943 tht_pkt_put(struct tht_pkt_list *tpl, struct tht_pkt *pkt)
1944 {
1945 	TAILQ_REMOVE(&tpl->tpl_used, pkt, tp_link);
1946 	TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1947 }
1948 
1949 struct tht_pkt *
1950 tht_pkt_get(struct tht_pkt_list *tpl)
1951 {
1952 	struct tht_pkt			*pkt;
1953 
1954 	pkt = TAILQ_FIRST(&tpl->tpl_free);
1955 	if (pkt != NULL) {
1956 		TAILQ_REMOVE(&tpl->tpl_free, pkt, tp_link);
1957 		TAILQ_INSERT_TAIL(&tpl->tpl_used, pkt, tp_link);
1958 
1959 	}
1960 
1961 	return (pkt);
1962 }
1963 
1964 struct tht_pkt *
1965 tht_pkt_used(struct tht_pkt_list *tpl)
1966 {
1967 	return (TAILQ_FIRST(&tpl->tpl_used));
1968 }
1969