xref: /openbsd/sys/dev/pci/if_nxe.c (revision d415bd75)
1 /*	$OpenBSD: if_nxe.c,v 1.80 2023/11/10 15:51:20 bluhm Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sockio.h>
24 #include <sys/mbuf.h>
25 #include <sys/kernel.h>
26 #include <sys/socket.h>
27 #include <sys/malloc.h>
28 #include <sys/device.h>
29 #include <sys/proc.h>
30 #include <sys/queue.h>
31 #include <sys/timeout.h>
32 #include <sys/sensors.h>
33 #include <sys/rwlock.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <net/if.h>
42 #include <net/if_dl.h>
43 #include <net/if_media.h>
44 
45 #if NBPFILTER > 0
46 #include <net/bpf.h>
47 #endif
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #ifdef NXE_DEBUG
53 int nxedebug = 0;
54 
55 #define DPRINTF(l, f...)	do { if (nxedebug & (l)) printf(f); } while (0)
56 #define DASSERT(_a)		assert(_a)
57 #else
58 #define DPRINTF(l, f...)
59 #define DASSERT(_a)
60 #endif
61 
62 /* this driver likes firmwares around this version */
63 #define NXE_VERSION_MAJOR	3
64 #define NXE_VERSION_MINOR	4
65 #define NXE_VERSION_BUILD	31
66 #define NXE_VERSION \
67     ((NXE_VERSION_MAJOR << 16)|(NXE_VERSION_MINOR << 8)|(NXE_VERSION_BUILD))
68 
69 
70 /*
71  * PCI configuration space registers
72  */
73 
74 #define NXE_PCI_BAR_MEM		0x10 /* bar 0 */
75 #define NXE_PCI_BAR_MEM_128MB		(128 * 1024 * 1024)
76 #define NXE_PCI_BAR_DOORBELL	0x20 /* bar 4 */
77 
78 /*
79  * doorbell register space
80  */
81 
82 #define NXE_DB			0x00000000
83 #define  NXE_DB_PEGID			0x00000003
84 #define  NXE_DB_PEGID_RX		0x00000001 /* rx unit */
85 #define  NXE_DB_PEGID_TX		0x00000002 /* tx unit */
86 #define  NXE_DB_PRIVID			0x00000004 /* must be set */
87 #define  NXE_DB_COUNT(_c)		((_c)<<3) /* count */
88 #define  NXE_DB_CTXID(_c)		((_c)<<18) /* context id */
89 #define  NXE_DB_OPCODE_RX_PROD		0x00000000
90 #define  NXE_DB_OPCODE_RX_JUMBO_PROD	0x10000000
91 #define  NXE_DB_OPCODE_RX_LRO_PROD	0x20000000
92 #define  NXE_DB_OPCODE_CMD_PROD		0x30000000
93 #define  NXE_DB_OPCODE_UPD_CONS		0x40000000
94 #define  NXE_DB_OPCODE_RESET_CTX	0x50000000
95 
96 /*
97  * register space
98  */
99 
100 /* different PCI functions use different registers sometimes */
101 #define _F(_f)			((_f) * 0x20)
102 
103 /*
104  * driver ref section 4.2
105  *
106  * All the hardware registers are mapped in memory. Apart from the registers
107  * for the individual hardware blocks, the memory map includes a large number
108  * of software definable registers.
109  *
110  * The following table gives the memory map in the PCI address space.
111  */
112 
113 #define NXE_MAP_DDR_NET		0x00000000
114 #define NXE_MAP_DDR_MD		0x02000000
115 #define NXE_MAP_QDR_NET		0x04000000
116 #define NXE_MAP_DIRECT_CRB	0x04400000
117 #define NXE_MAP_OCM0		0x05000000
118 #define NXE_MAP_OCM1		0x05100000
119 #define NXE_MAP_CRB		0x06000000
120 
121 /*
122  * Since there are a large number of registers they do not fit in a single
123  * PCI addressing range. Hence two windows are defined. The window starts at
124  * NXE_MAP_CRB, and extends to the end of the register map. The window is set
125  * using the NXE_REG_WINDOW_CRB register. The format of the NXE_REG_WINDOW_CRB
126  * register is as follows:
127  */
128 
129 #define NXE_WIN_CRB(_f)		(0x06110210 + _F(_f))
130 #define  NXE_WIN_CRB_0			(0<<25)
131 #define  NXE_WIN_CRB_1			(1<<25)
132 
133 /*
134  * The memory map inside the register windows are divided into a set of blocks.
135  * Each register block is owned by one hardware agent. The following table
136  * gives the memory map of the various register blocks in window 0. These
137  * registers are all in the CRB register space, so the offsets given here are
138  * relative to the base of the CRB offset region (NXE_MAP_CRB).
139  */
140 
141 #define NXE_W0_PCIE		0x00100000 /* PCI Express */
142 #define NXE_W0_NIU		0x00600000 /* Network Interface Unit */
143 #define NXE_W0_PPE_0		0x01100000 /* Protocol Processing Engine 0 */
144 #define NXE_W0_PPE_1		0x01200000 /* Protocol Processing Engine 1 */
145 #define NXE_W0_PPE_2		0x01300000 /* Protocol Processing Engine 2 */
146 #define NXE_W0_PPE_3		0x01400000 /* Protocol Processing Engine 3 */
147 #define NXE_W0_PPE_D		0x01500000 /* PPE D-cache */
148 #define NXE_W0_PPE_I		0x01600000 /* PPE I-cache */
149 
150 /*
151  * These are the register blocks inside window 1.
152  */
153 
154 #define NXE_W1_PCIE		0x00100000
155 #define NXE_W1_SW		0x00200000
156 #define NXE_W1_SIR		0x01200000
157 #define NXE_W1_ROMUSB		0x01300000
158 
159 /*
160  * Global registers
161  */
162 #define NXE_BOOTLD_START	0x00010000
163 
164 
165 /*
166  * driver ref section 5
167  *
168  * CRB Window Register Descriptions
169  */
170 
171 /*
172  * PCI Express Registers
173  *
174  * Despite being in the CRB window space, they can be accessed via both
175  * windows. This means they are accessible "globally" without going relative
176  * to the start of the CRB window space.
177  */
178 
179 /* Interrupts */
180 #define NXE_ISR_VECTOR		0x06110100 /* Interrupt Vector */
181 #define NXE_ISR_MASK		0x06110104 /* Interrupt Mask */
182 #define NXE_ISR_TARGET_STATUS	0x06110118
183 #define NXE_ISR_TARGET_MASK	0x06110128
184 #define  NXE_ISR_MINE(_f)		(0x08 << (_f))
185 
186 /* lock registers (semaphores between chipset and driver) */
187 #define NXE_SEM_ROM_LOCK	0x0611c010 /* ROM access lock */
188 #define NXE_SEM_ROM_UNLOCK	0x0611c014
189 #define NXE_SEM_PHY_LOCK	0x0611c018 /* PHY access lock */
190 #define NXE_SEM_PHY_UNLOCK	0x0611c01c
191 #define  NXE_SEM_DONE			0x1
192 
193 /*
194  * Network Interface Unit (NIU) Registers
195  */
196 
197 #define NXE_0_NIU_MODE		0x00600000
198 #define  NXE_0_NIU_MODE_XGE		(1<<2) /* XGE interface enabled */
199 #define  NXE_0_NIU_MODE_GBE		(1<<1) /* 4 GbE interfaces enabled */
200 #define NXE_0_NIU_SINGLE_TERM	0x00600004
201 #define NXE_0_NIU_INT_MASK	0x00600040
202 
203 #define NXE_0_NIU_RESET_XG	0x0060001c /* reset XG */
204 #define NXE_0_NIU_RESET_FIFO	0x00600088 /* reset sys fifos */
205 
206 #define _P(_p)			((_p) * 0x10000)
207 
208 #define NXE_0_XG_CFG0(_p)	(0x00670000 + _P(_p))
209 #define  NXE_0_XG_CFG0_TX_EN		(1<<0) /* TX enable */
210 #define  NXE_0_XG_CFG0_TX_SYNC		(1<<1) /* TX synced */
211 #define  NXE_0_XG_CFG0_RX_EN		(1<<2) /* RX enable */
212 #define  NXE_0_XG_CFG0_RX_SYNC		(1<<3) /* RX synced */
213 #define  NXE_0_XG_CFG0_TX_FLOWCTL	(1<<4) /* enable pause frame gen */
214 #define  NXE_0_XG_CFG0_RX_FLOWCTL	(1<<5) /* act on rxed pause frames */
215 #define  NXE_0_XG_CFG0_LOOPBACK		(1<<8) /* tx appears on rx */
216 #define  NXE_0_XG_CFG0_TX_RST_PB	(1<<15) /* reset frm tx proto block */
217 #define  NXE_0_XG_CFG0_RX_RST_PB	(1<<16) /* reset frm rx proto block */
218 #define  NXE_0_XG_CFG0_TX_RST_MAC	(1<<17) /* reset frm tx multiplexer */
219 #define  NXE_0_XG_CFG0_RX_RST_MAC	(1<<18) /* reset ctl frms and timers */
220 #define  NXE_0_XG_CFG0_SOFT_RST		(1<<31) /* soft reset */
221 #define NXE_0_XG_CFG1(_p)	(0x00670004 + _P(_p))
222 #define  NXE_0_XG_CFG1_REM_CRC		(1<<0) /* enable crc removal */
223 #define  NXE_0_XG_CFG1_CRC_EN		(1<<1) /* append crc to tx frames */
224 #define  NXE_0_XG_CFG1_NO_MAX		(1<<5) /* rx all frames despite size */
225 #define  NXE_0_XG_CFG1_WIRE_LO_ERR	(1<<6) /* recognize local err */
226 #define  NXE_0_XG_CFG1_PAUSE_FR_DIS	(1<<8) /* disable pause frame detect */
227 #define  NXE_0_XG_CFG1_SEQ_ERR_EN	(1<<10) /* enable seq err detection */
228 #define  NXE_0_XG_CFG1_MULTICAST	(1<<12) /* accept all multicast */
229 #define  NXE_0_XG_CFG1_PROMISC		(1<<13) /* accept all frames */
230 #define NXE_0_XG_IPG(_p)	(0x00670008 + _P(_p))
231 #define NXE_0_XG_MAC_LO(_p)	(0x00670010 + _P(_p))
232 #define NXE_0_XG_MAC_HI(_p)	(0x0067000c + _P(_p))
233 #define NXE_0_XG_STATUS(_p)	(0x00670018 + _P(_p))
234 #define NXE_0_XG_MTU(_p)	(0x0067001c + _P(_p))
235 #define NXE_0_XG_PAUSE_FRM(_p)	(0x00670020 + _P(_p))
236 #define NXE_0_XG_TX_BYTES(_p)	(0x00670024 + _P(_p))
237 #define NXE_0_XG_TX_PKTS(_p)	(0x00670028 + _P(_p))
238 #define NXE_0_XG_RX_BYTES(_p)	(0x0067002c + _P(_p))
239 #define NXE_0_XG_RX_PKTS(_p)	(0x00670030 + _P(_p))
240 #define NXE_0_XG_AGGR_ERRS(_p)	(0x00670034 + _P(_p))
241 #define NXE_0_XG_MCAST_PKTS(_p)	(0x00670038 + _P(_p))
242 #define NXE_0_XG_UCAST_PKTS(_p)	(0x0067003c + _P(_p))
243 #define NXE_0_XG_CRC_ERRS(_p)	(0x00670040 + _P(_p))
244 #define NXE_0_XG_OVERSIZE(_p)	(0x00670044 + _P(_p))
245 #define NXE_0_XG_UNDERSIZE(_p)	(0x00670048 + _P(_p))
246 #define NXE_0_XG_LOCAL_ERRS(_p)	(0x0067004c + _P(_p))
247 #define NXE_0_XG_REMOTE_ERRS(_p) (0x00670050 + _P(_p))
248 #define NXE_0_XG_CNTL_CHARS(_p)	(0x00670054 + _P(_p))
249 #define NXE_0_XG_PAUSE_PKTS(_p)	(0x00670058 + _P(_p))
250 
251 /*
252  * Software Defined Registers
253  */
254 
255 /* chipset state registers */
256 #define NXE_1_SW_ROM_LOCK_ID	0x00202100
257 #define  NXE_1_SW_ROM_LOCK_ID_DRV	0x0d417340
258 #define NXE_1_SW_PHY_LOCK_ID	0x00202120
259 #define  NXE_1_SW_PHY_LOCK_ID_DRV	0x44524956
260 
261 /* firmware version */
262 #define NXE_1_SW_FWVER_MAJOR	0x00202150 /* Major f/w version */
263 #define NXE_1_SW_FWVER_MINOR	0x00202154 /* Minor f/w version */
264 #define NXE_1_SW_FWVER_BUILD	0x00202158 /* Build/Sub f/w version */
265 
266 /* misc */
267 #define NXE_1_SW_CMD_ADDR_HI	0x00202218 /* cmd ring phys addr */
268 #define NXE_1_SW_CMD_ADDR_LO	0x0020221c /* cmd ring phys addr */
269 #define NXE_1_SW_CMD_SIZE	0x002022c8 /* entries in the cmd ring */
270 #define NXE_1_SW_DUMMY_ADDR_HI	0x0020223c /* hi address of dummy buf */
271 #define NXE_1_SW_DUMMY_ADDR_LO	0x00202240 /* lo address of dummy buf */
272 #define  NXE_1_SW_DUMMY_ADDR_LEN	1024
273 
274 static const u_int32_t nxe_regmap[][4] = {
275 #define NXE_1_SW_CMD_PRODUCER(_f)	(nxe_regmap[0][(_f)])
276     { 0x00202208, 0x002023ac, 0x002023b8, 0x002023d0 },
277 #define NXE_1_SW_CMD_CONSUMER(_f)	(nxe_regmap[1][(_f)])
278     { 0x0020220c, 0x002023b0, 0x002023bc, 0x002023d4 },
279 
280 #define NXE_1_SW_CONTEXT(_p)		(nxe_regmap[2][(_p)])
281 #define NXE_1_SW_CONTEXT_SIG(_p)	(0xdee0 | (_p))
282     { 0x0020238c, 0x00202390, 0x0020239c, 0x002023a4 },
283 #define NXE_1_SW_CONTEXT_ADDR_LO(_p)	(nxe_regmap[3][(_p)])
284     { 0x00202388, 0x00202390, 0x00202398, 0x002023a0 },
285 #define NXE_1_SW_CONTEXT_ADDR_HI(_p)	(nxe_regmap[4][(_p)])
286     { 0x002023c0, 0x002023c4, 0x002023c8, 0x002023cc },
287 
288 #define NXE_1_SW_INT_MASK(_p)		(nxe_regmap[5][(_p)])
289     { 0x002023d8, 0x002023e0, 0x002023e4, 0x002023e8 },
290 
291 #define NXE_1_SW_RX_PRODUCER(_c)	(nxe_regmap[6][(_c)])
292     { 0x00202300, 0x00202344, 0x002023d8, 0x0020242c },
293 #define NXE_1_SW_RX_CONSUMER(_c)	(nxe_regmap[7][(_c)])
294     { 0x00202304, 0x00202348, 0x002023dc, 0x00202430 },
295 #define NXE_1_SW_RX_RING(_c)		(nxe_regmap[8][(_c)])
296     { 0x00202308, 0x0020234c, 0x002023f0, 0x00202434 },
297 #define NXE_1_SW_RX_SIZE(_c)		(nxe_regmap[9][(_c)])
298     { 0x0020230c, 0x00202350, 0x002023f4, 0x00202438 },
299 
300 #define NXE_1_SW_RX_JUMBO_PRODUCER(_c)	(nxe_regmap[10][(_c)])
301     { 0x00202310, 0x00202354, 0x002023f8, 0x0020243c },
302 #define NXE_1_SW_RX_JUMBO_CONSUMER(_c)	(nxe_regmap[11][(_c)])
303     { 0x00202314, 0x00202358, 0x002023fc, 0x00202440 },
304 #define NXE_1_SW_RX_JUMBO_RING(_c)	(nxe_regmap[12][(_c)])
305     { 0x00202318, 0x0020235c, 0x00202400, 0x00202444 },
306 #define NXE_1_SW_RX_JUMBO_SIZE(_c)	(nxe_regmap[13][(_c)])
307     { 0x0020231c, 0x00202360, 0x00202404, 0x00202448 },
308 
309 #define NXE_1_SW_RX_LRO_PRODUCER(_c)	(nxe_regmap[14][(_c)])
310     { 0x00202320, 0x00202364, 0x00202408, 0x0020244c },
311 #define NXE_1_SW_RX_LRO_CONSUMER(_c)	(nxe_regmap[15][(_c)])
312     { 0x00202324, 0x00202368, 0x0020240c, 0x00202450 },
313 #define NXE_1_SW_RX_LRO_RING(_c)	(nxe_regmap[16][(_c)])
314     { 0x00202328, 0x0020236c, 0x00202410, 0x00202454 },
315 #define NXE_1_SW_RX_LRO_SIZE(_c)	(nxe_regmap[17][(_c)])
316     { 0x0020232c, 0x00202370, 0x00202414, 0x00202458 },
317 
318 #define NXE_1_SW_STATUS_RING(_c)	(nxe_regmap[18][(_c)])
319     { 0x00202330, 0x00202374, 0x00202418, 0x0020245c },
320 #define NXE_1_SW_STATUS_PRODUCER(_c)	(nxe_regmap[19][(_c)])
321     { 0x00202334, 0x00202378, 0x0020241c, 0x00202460 },
322 #define NXE_1_SW_STATUS_CONSUMER(_c)	(nxe_regmap[20][(_c)])
323     { 0x00202338, 0x0020237c, 0x00202420, 0x00202464 },
324 #define NXE_1_SW_STATUS_STATE(_c)	(nxe_regmap[21][(_c)])
325 #define  NXE_1_SW_STATUS_STATE_READY		0x0000ff01
326     { 0x0020233c, 0x00202380, 0x00202424, 0x00202468 },
327 #define NXE_1_SW_STATUS_SIZE(_c)	(nxe_regmap[22][(_c)])
328     { 0x00202340, 0x00202384, 0x00202428, 0x0020246c }
329 };
330 
331 
332 #define NXE_1_SW_BOOTLD_CONFIG	0x002021fc
333 #define  NXE_1_SW_BOOTLD_CONFIG_ROM	0x00000000
334 #define  NXE_1_SW_BOOTLD_CONFIG_RAM	0x12345678
335 
336 #define NXE_1_SW_CMDPEG_STATE	0x00202250 /* init status */
337 #define  NXE_1_SW_CMDPEG_STATE_START	0xff00 /* init starting */
338 #define  NXE_1_SW_CMDPEG_STATE_DONE	0xff01 /* init complete */
339 #define  NXE_1_SW_CMDPEG_STATE_ACK	0xf00f /* init ack */
340 #define  NXE_1_SW_CMDPEG_STATE_ERROR	0xffff /* init failed */
341 
342 #define NXE_1_SW_XG_STATE	0x00202294 /* phy state */
343 #define  NXE_1_SW_XG_STATE_PORT(_r, _p)	(((_r)>>8*(_p))&0xff)
344 #define  NXE_1_SW_XG_STATE_UP		(1<<4)
345 #define  NXE_1_SW_XG_STATE_DOWN		(1<<5)
346 
347 #define NXE_1_SW_MPORT_MODE	0x002022c4
348 #define  NXE_1_SW_MPORT_MODE_SINGLE	0x1111
349 #define  NXE_1_SW_MPORT_MODE_MULTI	0x2222
350 
351 #define NXE_1_SW_INT_VECTOR	0x002022d4
352 
353 #define NXE_1_SW_NIC_CAP_HOST	0x002023a8 /* host capabilities */
354 #define NXE_1_SW_NIC_CAP_FW	0x002023dc /* firmware capabilities */
355 #define  NXE_1_SW_NIC_CAP_PORTINTR	0x1 /* per port interrupts */
356 #define NXE_1_SW_DRIVER_VER	0x002024a0 /* host driver version */
357 
358 
359 #define NXE_1_SW_TEMP		0x002023b4 /* Temperature sensor */
360 #define  NXE_1_SW_TEMP_STATE(_x)	((_x)&0xffff) /* Temp state */
361 #define  NXE_1_SW_TEMP_STATE_NONE	0x0000
362 #define  NXE_1_SW_TEMP_STATE_OK		0x0001
363 #define  NXE_1_SW_TEMP_STATE_WARN	0x0002
364 #define  NXE_1_SW_TEMP_STATE_CRIT	0x0003
365 #define  NXE_1_SW_TEMP_VAL(_x)		(((_x)>>16)&0xffff) /* Temp value */
366 
367 #define NXE_1_SW_V2P(_f)	(0x00202490+((_f)*4)) /* virtual to phys */
368 
369 /*
370  * ROMUSB Registers
371  */
372 #define NXE_1_ROMUSB_STATUS	0x01300004 /* ROM Status */
373 #define  NXE_1_ROMUSB_STATUS_DONE	(1<<1)
374 #define NXE_1_ROMUSB_SW_RESET	0x01300008
375 #define NXE_1_ROMUSB_SW_RESET_DEF	0xffffffff
376 #define NXE_1_ROMUSB_SW_RESET_BOOT	0x0080000f
377 
378 #define NXE_1_CASPER_RESET	0x01300038
379 #define  NXE_1_CASPER_RESET_ENABLE	0x1
380 #define  NXE_1_CASPER_RESET_DISABLE	0x1
381 
382 #define NXE_1_GLB_PEGTUNE	0x0130005c /* reset register */
383 #define  NXE_1_GLB_PEGTUNE_DONE		0x00000001
384 
385 #define NXE_1_GLB_CHIPCLKCTL	0x013000a8
386 #define NXE_1_GLB_CHIPCLKCTL_ON		0x00003fff
387 
388 /* ROM Registers */
389 #define NXE_1_ROM_CONTROL	0x01310000
390 #define NXE_1_ROM_OPCODE	0x01310004
391 #define  NXE_1_ROM_OPCODE_READ		0x0000000b
392 #define NXE_1_ROM_ADDR		0x01310008
393 #define NXE_1_ROM_WDATA		0x0131000c
394 #define NXE_1_ROM_ABYTE_CNT	0x01310010
395 #define NXE_1_ROM_DBYTE_CNT	0x01310014 /* dummy byte count */
396 #define NXE_1_ROM_RDATA		0x01310018
397 #define NXE_1_ROM_AGT_TAG	0x0131001c
398 #define NXE_1_ROM_TIME_PARM	0x01310020
399 #define NXE_1_ROM_CLK_DIV	0x01310024
400 #define NXE_1_ROM_MISS_INSTR	0x01310028
401 
402 /*
403  * flash memory layout
404  *
405  * These are offsets of memory accessible via the ROM Registers above
406  */
407 #define NXE_FLASH_CRBINIT	0x00000000 /* crb init section */
408 #define NXE_FLASH_BRDCFG	0x00004000 /* board config */
409 #define NXE_FLASH_INITCODE	0x00006000 /* pegtune code */
410 #define NXE_FLASH_BOOTLD	0x00010000 /* boot loader */
411 #define NXE_FLASH_IMAGE		0x00043000 /* compressed image */
412 #define NXE_FLASH_SECONDARY	0x00200000 /* backup image */
413 #define NXE_FLASH_PXE		0x003d0000 /* pxe image */
414 #define NXE_FLASH_USER		0x003e8000 /* user region for new boards */
415 #define NXE_FLASH_VPD		0x003e8c00 /* vendor private data */
416 #define NXE_FLASH_LICENSE	0x003e9000 /* firmware license */
417 #define NXE_FLASH_FIXED		0x003f0000 /* backup of crbinit */
418 
419 
420 /*
421  * misc hardware details
422  */
423 #define NXE_MAX_PORTS		4
424 #define NXE_MAX_PORT_LLADDRS	32
425 #define NXE_MAX_PKTLEN		(64 * 1024)
426 
427 
428 /*
429  * hardware structures
430  */
431 
432 struct nxe_info {
433 	u_int32_t		ni_hdrver;
434 #define NXE_INFO_HDRVER_1		0x00000001
435 
436 	u_int32_t		ni_board_mfg;
437 	u_int32_t		ni_board_type;
438 #define NXE_BRDTYPE_P1_BD		0x0000
439 #define NXE_BRDTYPE_P1_SB		0x0001
440 #define NXE_BRDTYPE_P1_SMAX		0x0002
441 #define NXE_BRDTYPE_P1_SOCK		0x0003
442 #define NXE_BRDTYPE_P2_SOCK_31		0x0008
443 #define NXE_BRDTYPE_P2_SOCK_35		0x0009
444 #define NXE_BRDTYPE_P2_SB35_4G		0x000a
445 #define NXE_BRDTYPE_P2_SB31_10G		0x000b
446 #define NXE_BRDTYPE_P2_SB31_2G		0x000c
447 #define NXE_BRDTYPE_P2_SB31_10G_IMEZ	0x000d
448 #define NXE_BRDTYPE_P2_SB31_10G_HMEZ	0x000e
449 #define NXE_BRDTYPE_P2_SB31_10G_CX4	0x000f
450 	u_int32_t		ni_board_num;
451 
452 	u_int32_t		ni_chip_id;
453 	u_int32_t		ni_chip_minor;
454 	u_int32_t		ni_chip_major;
455 	u_int32_t		ni_chip_pkg;
456 	u_int32_t		ni_chip_lot;
457 
458 	u_int32_t		ni_port_mask;
459 	u_int32_t		ni_peg_mask;
460 	u_int32_t		ni_icache;
461 	u_int32_t		ni_dcache;
462 	u_int32_t		ni_casper;
463 
464 	u_int32_t		ni_lladdr0_low;
465 	u_int32_t		ni_lladdr1_low;
466 	u_int32_t		ni_lladdr2_low;
467 	u_int32_t		ni_lladdr3_low;
468 
469 	u_int32_t		ni_mnsync_mode;
470 	u_int32_t		ni_mnsync_shift_cclk;
471 	u_int32_t		ni_mnsync_shift_mclk;
472 	u_int32_t		ni_mnwb_enable;
473 	u_int32_t		ni_mnfreq_crystal;
474 	u_int32_t		ni_mnfreq_speed;
475 	u_int32_t		ni_mnorg;
476 	u_int32_t		ni_mndepth;
477 	u_int32_t		ni_mnranks0;
478 	u_int32_t		ni_mnranks1;
479 	u_int32_t		ni_mnrd_latency0;
480 	u_int32_t		ni_mnrd_latency1;
481 	u_int32_t		ni_mnrd_latency2;
482 	u_int32_t		ni_mnrd_latency3;
483 	u_int32_t		ni_mnrd_latency4;
484 	u_int32_t		ni_mnrd_latency5;
485 	u_int32_t		ni_mnrd_latency6;
486 	u_int32_t		ni_mnrd_latency7;
487 	u_int32_t		ni_mnrd_latency8;
488 	u_int32_t		ni_mndll[18];
489 	u_int32_t		ni_mnddr_mode;
490 	u_int32_t		ni_mnddr_extmode;
491 	u_int32_t		ni_mntiming0;
492 	u_int32_t		ni_mntiming1;
493 	u_int32_t		ni_mntiming2;
494 
495 	u_int32_t		ni_snsync_mode;
496 	u_int32_t		ni_snpt_mode;
497 	u_int32_t		ni_snecc_enable;
498 	u_int32_t		ni_snwb_enable;
499 	u_int32_t		ni_snfreq_crystal;
500 	u_int32_t		ni_snfreq_speed;
501 	u_int32_t		ni_snorg;
502 	u_int32_t		ni_sndepth;
503 	u_int32_t		ni_sndll;
504 	u_int32_t		ni_snrd_latency;
505 
506 	u_int32_t		ni_lladdr0_high;
507 	u_int32_t		ni_lladdr1_high;
508 	u_int32_t		ni_lladdr2_high;
509 	u_int32_t		ni_lladdr3_high;
510 
511 	u_int32_t		ni_magic;
512 #define NXE_INFO_MAGIC			0x12345678
513 
514 	u_int32_t		ni_mnrd_imm;
515 	u_int32_t		ni_mndll_override;
516 } __packed;
517 
518 struct nxe_imageinfo {
519 	u_int32_t		nim_bootld_ver;
520 	u_int32_t		nim_bootld_size;
521 
522 	u_int8_t		nim_img_ver_major;
523 	u_int8_t		nim_img_ver_minor;
524 	u_int16_t		nim_img_ver_build;
525 
526 	u_int32_t		min_img_size;
527 } __packed;
528 
529 struct nxe_lladdr {
530 	u_int8_t		pad[2];
531 	u_int8_t		lladdr[6];
532 } __packed;
533 
534 struct nxe_userinfo {
535 	u_int8_t		nu_flash_md5[1024];
536 
537 	struct nxe_imageinfo	nu_imageinfo;
538 
539 	u_int32_t		nu_primary;
540 	u_int32_t		nu_secondary;
541 
542 	u_int64_t		nu_lladdr[NXE_MAX_PORTS][NXE_MAX_PORT_LLADDRS];
543 
544 	u_int32_t		nu_subsys_id;
545 
546 	u_int8_t		nu_serial[32];
547 
548 	u_int32_t		nu_bios_ver;
549 } __packed;
550 
551 /* hw structures actually used in the io path */
552 
553 struct nxe_ctx_ring {
554 	u_int64_t		r_addr;
555 	u_int32_t		r_size;
556 	u_int32_t		r_reserved;
557 };
558 
559 #define NXE_RING_RX		0
560 #define NXE_RING_RX_JUMBO	1
561 #define NXE_RING_RX_LRO		2
562 #define NXE_NRING		3
563 
564 struct nxe_ctx {
565 	u_int64_t		ctx_cmd_consumer_addr;
566 
567 	struct nxe_ctx_ring	ctx_cmd_ring;
568 
569 	struct nxe_ctx_ring	ctx_rx_rings[NXE_NRING];
570 
571 	u_int64_t		ctx_status_ring_addr;
572 	u_int32_t		ctx_status_ring_size;
573 
574 	u_int32_t		ctx_id;
575 } __packed;
576 
577 struct nxe_tx_desc {
578 	u_int8_t		tx_tcp_offset;
579 	u_int8_t		tx_ip_offset;
580 	u_int16_t		tx_flags;
581 #define NXE_TXD_F_OPCODE_TX		(0x01 << 7)
582 
583 	u_int8_t		tx_nbufs;
584 	u_int16_t		tx_length; /* XXX who makes a 24bit field? */
585 	u_int8_t		tx_length_hi;
586 
587 	u_int64_t		tx_addr_2;
588 
589 	u_int16_t		tx_id;
590 	u_int16_t		tx_mss;
591 
592 	u_int8_t		tx_port;
593 	u_int8_t		tx_tso_hdr_len;
594 	u_int16_t		tx_ipsec_id;
595 
596 	u_int64_t		tx_addr_3;
597 
598 	u_int64_t		tx_addr_1;
599 
600 	u_int16_t		tx_slen_1;
601 	u_int16_t		tx_slen_2;
602 	u_int16_t		tx_slen_3;
603 	u_int16_t		tx_slen_4;
604 
605 	u_int64_t		tx_addr_4;
606 
607 	u_int64_t		tx_reserved;
608 } __packed;
609 #define NXE_TXD_SEGS		4
610 #define NXE_TXD_DESCS		8
611 #define NXE_TXD_MAX_SEGS	(NXE_TXD_SEGS * NXE_TXD_DESCS)
612 
613 struct nxe_rx_desc {
614 	u_int16_t		rx_id;
615 	u_int16_t		rx_flags;
616 	u_int32_t		rx_len; /* packet length */
617 	u_int64_t		rx_addr;
618 } __packed;
619 #define NXE_RXD_MAX_SEGS		1
620 
621 struct nxe_status_desc {
622 	u_int8_t		st_lro;
623 	u_int8_t		st_owner;
624 	u_int16_t		st_id;
625 	u_int16_t		st_len;
626 	u_int16_t		st_flags;
627 } __packed;
628 
629 /*
630  * driver definitions
631  */
632 
633 struct nxe_board {
634 	u_int32_t		brd_type;
635 	u_int			brd_mode;
636 };
637 
638 struct nxe_dmamem {
639 	bus_dmamap_t		ndm_map;
640 	bus_dma_segment_t	ndm_seg;
641 	size_t			ndm_size;
642 	caddr_t			ndm_kva;
643 };
644 #define NXE_DMA_MAP(_ndm)	((_ndm)->ndm_map)
645 #define NXE_DMA_LEN(_ndm)	((_ndm)->ndm_size)
646 #define NXE_DMA_DVA(_ndm)	((_ndm)->ndm_map->dm_segs[0].ds_addr)
647 #define NXE_DMA_KVA(_ndm)	((void *)(_ndm)->ndm_kva)
648 
649 struct nxe_pkt {
650 	int			pkt_id;
651 	bus_dmamap_t		pkt_dmap;
652 	struct mbuf		*pkt_m;
653 	TAILQ_ENTRY(nxe_pkt)	pkt_link;
654 };
655 
656 struct nxe_pkt_list {
657 	struct nxe_pkt		*npl_pkts;
658 	TAILQ_HEAD(, nxe_pkt)	npl_free;
659 	TAILQ_HEAD(, nxe_pkt)	npl_used;
660 };
661 
662 struct nxe_ring {
663 	struct nxe_dmamem	*nr_dmamem;
664 	u_int8_t		*nr_pos;
665 
666 	u_int			nr_slot;
667 	int			nr_ready;
668 
669 	size_t			nr_desclen;
670 	u_int			nr_nentries;
671 };
672 
673 /*
674  * autoconf glue
675  */
676 
677 struct nxe_softc {
678 	struct device		sc_dev;
679 
680 	bus_dma_tag_t		sc_dmat;
681 
682 	bus_space_tag_t		sc_memt;
683 	bus_space_handle_t	sc_memh;
684 	bus_size_t		sc_mems;
685 	bus_space_handle_t	sc_crbh;
686 	bus_space_tag_t		sc_dbt;
687 	bus_space_handle_t	sc_dbh;
688 	bus_size_t		sc_dbs;
689 
690 	void			*sc_ih;
691 
692 	int			sc_function;
693 	int			sc_port;
694 	int			sc_window;
695 
696 	const struct nxe_board	*sc_board;
697 	u_int			sc_fw_major;
698 	u_int			sc_fw_minor;
699 	u_int			sc_fw_build;
700 
701 	struct arpcom		sc_ac;
702 	struct ifmedia		sc_media;
703 
704 	struct nxe_pkt_list	*sc_tx_pkts;
705 	struct nxe_pkt_list	*sc_rx_pkts;
706 
707 	/* allocations for the hw */
708 	struct nxe_dmamem	*sc_dummy_dma;
709 	struct nxe_dmamem	*sc_dummy_rx;
710 
711 	struct nxe_dmamem	*sc_ctx;
712 	u_int32_t		*sc_cmd_consumer;
713 	u_int32_t		sc_cmd_consumer_cur;
714 
715 	struct nxe_ring		*sc_cmd_ring;
716 	struct nxe_ring		*sc_rx_rings[NXE_NRING];
717 	struct nxe_ring		*sc_status_ring;
718 
719 	/* monitoring */
720 	struct timeout		sc_tick;
721 	struct ksensor		sc_sensor;
722 	struct ksensordev	sc_sensor_dev;
723 
724 	/* ioctl lock */
725 	struct rwlock		sc_lock;
726 };
727 
728 int			nxe_match(struct device *, void *, void *);
729 void			nxe_attach(struct device *, struct device *, void *);
730 int			nxe_intr(void *);
731 
732 const struct cfattach nxe_ca = {
733 	sizeof(struct nxe_softc),
734 	nxe_match,
735 	nxe_attach
736 };
737 
738 struct cfdriver nxe_cd = {
739 	NULL,
740 	"nxe",
741 	DV_IFNET
742 };
743 
744 /* init code */
745 int			nxe_pci_map(struct nxe_softc *,
746 			    struct pci_attach_args *);
747 void			nxe_pci_unmap(struct nxe_softc *);
748 
749 int			nxe_board_info(struct nxe_softc *);
750 int			nxe_user_info(struct nxe_softc *);
751 int			nxe_init(struct nxe_softc *);
752 void			nxe_uninit(struct nxe_softc *);
753 void			nxe_mountroot(struct device *);
754 
755 /* chip state */
756 void			nxe_tick(void *);
757 void			nxe_link_state(struct nxe_softc *);
758 
759 /* interface operations */
760 int			nxe_ioctl(struct ifnet *, u_long, caddr_t);
761 void			nxe_start(struct ifnet *);
762 int			nxe_complete(struct nxe_softc *);
763 void			nxe_watchdog(struct ifnet *);
764 
765 void			nxe_rx_start(struct nxe_softc *);
766 
767 void			nxe_up(struct nxe_softc *);
768 void			nxe_lladdr(struct nxe_softc *);
769 void			nxe_iff(struct nxe_softc *);
770 void			nxe_down(struct nxe_softc *);
771 
772 int			nxe_up_fw(struct nxe_softc *);
773 
774 /* ifmedia operations */
775 int			nxe_media_change(struct ifnet *);
776 void			nxe_media_status(struct ifnet *, struct ifmediareq *);
777 
778 
779 /* ring handling */
780 struct nxe_ring		*nxe_ring_alloc(struct nxe_softc *, size_t, u_int);
781 void			nxe_ring_sync(struct nxe_softc *, struct nxe_ring *,
782 			    int);
783 void			nxe_ring_free(struct nxe_softc *, struct nxe_ring *);
784 int			nxe_ring_readable(struct nxe_ring *, int);
785 int			nxe_ring_writeable(struct nxe_ring *, int);
786 void			*nxe_ring_cur(struct nxe_softc *, struct nxe_ring *);
787 void			*nxe_ring_next(struct nxe_softc *, struct nxe_ring *);
788 
789 struct mbuf		*nxe_load_pkt(struct nxe_softc *, bus_dmamap_t,
790 			    struct mbuf *);
791 struct mbuf		*nxe_coalesce_m(struct mbuf *);
792 
793 /* pkts */
794 struct nxe_pkt_list	*nxe_pkt_alloc(struct nxe_softc *, u_int, int);
795 void			nxe_pkt_free(struct nxe_softc *,
796 			    struct nxe_pkt_list *);
797 void			nxe_pkt_put(struct nxe_pkt_list *, struct nxe_pkt *);
798 struct nxe_pkt		*nxe_pkt_get(struct nxe_pkt_list *);
799 struct nxe_pkt		*nxe_pkt_used(struct nxe_pkt_list *);
800 
801 
802 /* wrapper around dmaable memory allocations */
803 struct nxe_dmamem	*nxe_dmamem_alloc(struct nxe_softc *, bus_size_t,
804 			    bus_size_t);
805 void			nxe_dmamem_free(struct nxe_softc *,
806 			    struct nxe_dmamem *);
807 
808 /* low level hardware access goo */
809 u_int32_t		nxe_read(struct nxe_softc *, bus_size_t);
810 void			nxe_write(struct nxe_softc *, bus_size_t, u_int32_t);
811 int			nxe_wait(struct nxe_softc *, bus_size_t, u_int32_t,
812 			    u_int32_t, u_int);
813 
814 void			nxe_doorbell(struct nxe_softc *, u_int32_t);
815 
816 int			nxe_crb_set(struct nxe_softc *, int);
817 u_int32_t		nxe_crb_read(struct nxe_softc *, bus_size_t);
818 void			nxe_crb_write(struct nxe_softc *, bus_size_t,
819 			    u_int32_t);
820 int			nxe_crb_wait(struct nxe_softc *, bus_size_t,
821 			    u_int32_t, u_int32_t, u_int);
822 
823 int			nxe_rom_lock(struct nxe_softc *);
824 void			nxe_rom_unlock(struct nxe_softc *);
825 int			nxe_rom_read(struct nxe_softc *, u_int32_t,
826 			    u_int32_t *);
827 int			nxe_rom_read_region(struct nxe_softc *, u_int32_t,
828 			    void *, size_t);
829 
830 
831 /* misc bits */
832 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
833 
834 /* let's go! */
835 
836 const struct pci_matchid nxe_devices[] = {
837 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_10GXXR },
838 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_10GCX4 },
839 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_4GCU },
840 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_IMEZ },
841 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_HMEZ },
842 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_IMEZ_2 },
843 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_HMEZ_2 }
844 };
845 
846 const struct nxe_board nxe_boards[] = {
847 	{ NXE_BRDTYPE_P2_SB35_4G,	NXE_0_NIU_MODE_GBE },
848 	{ NXE_BRDTYPE_P2_SB31_10G,	NXE_0_NIU_MODE_XGE },
849 	{ NXE_BRDTYPE_P2_SB31_2G,	NXE_0_NIU_MODE_GBE },
850 	{ NXE_BRDTYPE_P2_SB31_10G_IMEZ,	NXE_0_NIU_MODE_XGE },
851 	{ NXE_BRDTYPE_P2_SB31_10G_HMEZ,	NXE_0_NIU_MODE_XGE },
852 	{ NXE_BRDTYPE_P2_SB31_10G_CX4,	NXE_0_NIU_MODE_XGE }
853 };
854 
855 int
856 nxe_match(struct device *parent, void *match, void *aux)
857 {
858 	struct pci_attach_args		*pa = aux;
859 
860 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_NETWORK)
861 		return (0);
862 
863 	return (pci_matchbyid(pa, nxe_devices, nitems(nxe_devices)));
864 }
865 
866 void
867 nxe_attach(struct device *parent, struct device *self, void *aux)
868 {
869 	struct nxe_softc		*sc = (struct nxe_softc *)self;
870 	struct pci_attach_args		*pa = aux;
871 	pci_intr_handle_t		ih;
872 	struct ifnet			*ifp;
873 
874 	sc->sc_dmat = pa->pa_dmat;
875 	sc->sc_function = pa->pa_function;
876 	sc->sc_window = -1;
877 
878 	rw_init(&sc->sc_lock, NULL);
879 
880 	if (nxe_pci_map(sc, pa) != 0) {
881 		/* error already printed by nxe_pci_map() */
882 		return;
883 	}
884 
885 	nxe_crb_set(sc, 1);
886 
887 	if (nxe_board_info(sc) != 0) {
888 		/* error already printed by nxe_board_info() */
889 		goto unmap;
890 	}
891 
892 	if (nxe_user_info(sc) != 0) {
893 		/* error already printed by nxe_board_info() */
894 		goto unmap;
895 	}
896 
897 	if (nxe_init(sc) != 0) {
898 		/* error already printed by nxe_init() */
899 		goto unmap;
900 	}
901 
902 	if (pci_intr_map(pa, &ih) != 0) {
903 		printf(": unable to map interrupt\n");
904 		goto uninit;
905 	}
906 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
907 	    nxe_intr, sc, DEVNAME(sc));
908 	if (sc->sc_ih == NULL) {
909 		printf(": unable to establish interrupt\n");
910 		goto uninit;
911 	}
912 
913 	ifp = &sc->sc_ac.ac_if;
914 	ifp->if_softc = sc;
915 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
916 	ifp->if_capabilities = IFCAP_VLAN_MTU;
917 	ifp->if_ioctl = nxe_ioctl;
918 	ifp->if_start = nxe_start;
919 	ifp->if_watchdog = nxe_watchdog;
920 	ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN;
921 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
922 	ifq_init_maxlen(&ifp->if_snd, 512); /* XXX */
923 
924 	ifmedia_init(&sc->sc_media, 0, nxe_media_change, nxe_media_status);
925 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
926 	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
927 
928 	if_attach(ifp);
929 	ether_ifattach(ifp);
930 
931 	printf(": %s firmware %d.%d.%d address %s\n",
932 	    pci_intr_string(pa->pa_pc, ih),
933 	    sc->sc_fw_major, sc->sc_fw_minor, sc->sc_fw_build,
934 	    ether_sprintf(sc->sc_ac.ac_enaddr));
935 	return;
936 
937 uninit:
938 	nxe_uninit(sc);
939 unmap:
940 	nxe_pci_unmap(sc);
941 }
942 
943 int
944 nxe_pci_map(struct nxe_softc *sc, struct pci_attach_args *pa)
945 {
946 	pcireg_t			memtype;
947 
948 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NXE_PCI_BAR_MEM);
949 	if (pci_mapreg_map(pa, NXE_PCI_BAR_MEM, memtype, 0, &sc->sc_memt,
950 	    &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) {
951 		printf(": unable to map host registers\n");
952 		return (1);
953 	}
954 	if (sc->sc_mems != NXE_PCI_BAR_MEM_128MB) {
955 		printf(": unexpected register map size\n");
956 		goto unmap_mem;
957 	}
958 
959 	/* set up the CRB window */
960 	if (bus_space_subregion(sc->sc_memt, sc->sc_memh, NXE_MAP_CRB,
961 	    sc->sc_mems - NXE_MAP_CRB, &sc->sc_crbh) != 0) {
962 		printf(": unable to create CRB window\n");
963 		goto unmap_mem;
964 	}
965 
966 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NXE_PCI_BAR_DOORBELL);
967 	if (pci_mapreg_map(pa, NXE_PCI_BAR_DOORBELL, memtype, 0, &sc->sc_dbt,
968 	    &sc->sc_dbh, NULL, &sc->sc_dbs, 0) != 0) {
969 		printf(": unable to map doorbell registers\n");
970 		/* bus_space(9) says i dont have to unmap subregions */
971 		goto unmap_mem;
972 	}
973 
974 	config_mountroot(&sc->sc_dev, nxe_mountroot);
975 	return (0);
976 
977 unmap_mem:
978 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
979 	sc->sc_mems = 0;
980 	return (1);
981 }
982 
983 void
984 nxe_pci_unmap(struct nxe_softc *sc)
985 {
986 	bus_space_unmap(sc->sc_dbt, sc->sc_dbh, sc->sc_dbs);
987 	sc->sc_dbs = 0;
988 	/* bus_space(9) says i dont have to unmap the crb subregion */
989 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
990 	sc->sc_mems = 0;
991 }
992 
993 int
994 nxe_intr(void *xsc)
995 {
996 	struct nxe_softc		*sc = xsc;
997 	u_int32_t			vector;
998 
999 	DASSERT(sc->sc_window == 1);
1000 
1001 	vector = nxe_crb_read(sc, NXE_1_SW_INT_VECTOR);
1002 	if (!ISSET(vector, NXE_ISR_MINE(sc->sc_function)))
1003 		return (0);
1004 
1005 	nxe_crb_write(sc, NXE_1_SW_INT_VECTOR, 0x80 << sc->sc_function);
1006 
1007 	/* the interrupt is mine! we should do some work now */
1008 
1009 	return (1);
1010 }
1011 
1012 int
1013 nxe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1014 {
1015 	struct nxe_softc		*sc = ifp->if_softc;
1016 	struct ifreq			*ifr = (struct ifreq *)addr;
1017 	int				s, error = 0;
1018 
1019 	rw_enter_write(&sc->sc_lock);
1020 	s = splnet();
1021 
1022 	timeout_del(&sc->sc_tick);
1023 
1024 	switch (cmd) {
1025 	case SIOCSIFADDR:
1026 		SET(ifp->if_flags, IFF_UP);
1027 		/* FALLTHROUGH */
1028 
1029 	case SIOCSIFFLAGS:
1030 		if (ISSET(ifp->if_flags, IFF_UP)) {
1031 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1032 				error = ENETRESET;
1033 			else
1034 				nxe_up(sc);
1035 		} else {
1036 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1037 				nxe_down(sc);
1038 		}
1039 		break;
1040 
1041 	case SIOCGIFMEDIA:
1042 	case SIOCSIFMEDIA:
1043 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1044 		break;
1045 
1046 	default:
1047 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1048 	}
1049 
1050 	if (error == ENETRESET) {
1051 		if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1052 			nxe_crb_set(sc, 0);
1053 			nxe_iff(sc);
1054 			nxe_crb_set(sc, 1);
1055 		}
1056 		error = 0;
1057 	}
1058 
1059 	nxe_tick(sc);
1060 
1061 	splx(s);
1062 	rw_exit_write(&sc->sc_lock);
1063 	return (error);
1064 }
1065 
1066 void
1067 nxe_up(struct nxe_softc *sc)
1068 {
1069 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1070 	static const u_int		rx_ring_sizes[] = { 16384, 1024, 128 };
1071 	struct {
1072 		struct nxe_ctx			ctx;
1073 		u_int32_t			cmd_consumer;
1074 	} __packed			*dmamem;
1075 	struct nxe_ctx			*ctx;
1076 	struct nxe_ctx_ring		*ring;
1077 	struct nxe_ring			*nr;
1078 	u_int64_t			dva;
1079 	u_int32_t			intr_scheme;
1080 	int				i;
1081 
1082 	if (nxe_up_fw(sc) != 0)
1083 		return;
1084 
1085 	/* allocate pkt lists */
1086 	sc->sc_tx_pkts = nxe_pkt_alloc(sc, 128, NXE_TXD_MAX_SEGS);
1087 	if (sc->sc_tx_pkts == NULL)
1088 		return;
1089 	sc->sc_rx_pkts = nxe_pkt_alloc(sc, 128, NXE_RXD_MAX_SEGS);
1090 	if (sc->sc_rx_pkts == NULL)
1091 		goto free_tx_pkts;
1092 
1093 	/* allocate the context memory and the consumer field */
1094 	sc->sc_ctx = nxe_dmamem_alloc(sc, sizeof(*dmamem), PAGE_SIZE);
1095 	if (sc->sc_ctx == NULL)
1096 		goto free_rx_pkts;
1097 
1098 	dmamem = NXE_DMA_KVA(sc->sc_ctx);
1099 	dva = NXE_DMA_DVA(sc->sc_ctx);
1100 
1101 	ctx = &dmamem->ctx;
1102 	ctx->ctx_cmd_consumer_addr = htole64(dva + sizeof(dmamem->ctx));
1103 	ctx->ctx_id = htole32(sc->sc_function);
1104 
1105 	sc->sc_cmd_consumer = &dmamem->cmd_consumer;
1106 	sc->sc_cmd_consumer_cur = 0;
1107 
1108 	/* allocate the cmd/tx ring */
1109 	sc->sc_cmd_ring = nxe_ring_alloc(sc,
1110 	    sizeof(struct nxe_tx_desc), 1024 /* XXX */);
1111 	if (sc->sc_cmd_ring == NULL)
1112 		goto free_ctx;
1113 
1114 	ctx->ctx_cmd_ring.r_addr =
1115 	    htole64(NXE_DMA_DVA(sc->sc_cmd_ring->nr_dmamem));
1116 	ctx->ctx_cmd_ring.r_size = htole32(sc->sc_cmd_ring->nr_nentries);
1117 
1118 	/* allocate the status ring */
1119 	sc->sc_status_ring = nxe_ring_alloc(sc,
1120 	    sizeof(struct nxe_status_desc), 16384 /* XXX */);
1121 	if (sc->sc_status_ring == NULL)
1122 		goto free_cmd_ring;
1123 
1124 	ctx->ctx_status_ring_addr =
1125 	    htole64(NXE_DMA_DVA(sc->sc_status_ring->nr_dmamem));
1126 	ctx->ctx_status_ring_size = htole32(sc->sc_status_ring->nr_nentries);
1127 
1128 	/* allocate something to point the jumbo and lro rings at */
1129 	sc->sc_dummy_rx = nxe_dmamem_alloc(sc, NXE_MAX_PKTLEN, PAGE_SIZE);
1130 	if (sc->sc_dummy_rx == NULL)
1131 		goto free_status_ring;
1132 
1133 	/* allocate the rx rings */
1134 	for (i = 0; i < NXE_NRING; i++) {
1135 		ring = &ctx->ctx_rx_rings[i];
1136 		nr = nxe_ring_alloc(sc, sizeof(struct nxe_rx_desc),
1137 		    rx_ring_sizes[i]);
1138 		if (nr == NULL)
1139 			goto free_rx_rings;
1140 
1141 		ring->r_addr = htole64(NXE_DMA_DVA(nr->nr_dmamem));
1142 		ring->r_size = htole32(nr->nr_nentries);
1143 
1144 		sc->sc_rx_rings[i] = nr;
1145 		nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_PREWRITE);
1146 	}
1147 
1148 	/* nothing can possibly go wrong now */
1149 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_rx),
1150 	    0, NXE_DMA_LEN(sc->sc_dummy_rx), BUS_DMASYNC_PREREAD);
1151 	nxe_ring_sync(sc, sc->sc_status_ring, BUS_DMASYNC_PREREAD);
1152 	nxe_ring_sync(sc, sc->sc_cmd_ring, BUS_DMASYNC_PREWRITE);
1153 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1154 	    0, NXE_DMA_LEN(sc->sc_ctx),
1155 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1156 
1157 	nxe_crb_write(sc, NXE_1_SW_CONTEXT_ADDR_LO(sc->sc_function),
1158 	    (u_int32_t)dva);
1159 	nxe_crb_write(sc, NXE_1_SW_CONTEXT_ADDR_HI(sc->sc_function),
1160 	    (u_int32_t)(dva >> 32));
1161 	nxe_crb_write(sc, NXE_1_SW_CONTEXT(sc->sc_port),
1162 	    NXE_1_SW_CONTEXT_SIG(sc->sc_port));
1163 
1164 	nxe_crb_set(sc, 0);
1165 	nxe_crb_write(sc, NXE_0_XG_MTU(sc->sc_function),
1166 	    MCLBYTES - ETHER_ALIGN);
1167 	nxe_lladdr(sc);
1168 	nxe_iff(sc);
1169 	nxe_crb_set(sc, 1);
1170 
1171 	SET(ifp->if_flags, IFF_RUNNING);
1172 	ifq_clr_oactive(&ifp->if_snd);
1173 
1174 	/* enable interrupts */
1175 	intr_scheme = nxe_crb_read(sc, NXE_1_SW_NIC_CAP_FW);
1176 	if (intr_scheme != NXE_1_SW_NIC_CAP_PORTINTR)
1177 		nxe_write(sc, NXE_ISR_MASK, 0x77f);
1178 	nxe_crb_write(sc, NXE_1_SW_INT_MASK(sc->sc_function), 0x1);
1179 	if (intr_scheme != NXE_1_SW_NIC_CAP_PORTINTR)
1180 		nxe_crb_write(sc, NXE_1_SW_INT_VECTOR, 0x0);
1181 	nxe_write(sc, NXE_ISR_TARGET_MASK, 0xbff);
1182 
1183 	return;
1184 
1185 free_rx_rings:
1186 	while (i > 0) {
1187 		i--;
1188 		nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_POSTWRITE);
1189 		nxe_ring_free(sc, sc->sc_rx_rings[i]);
1190 	}
1191 
1192 	nxe_dmamem_free(sc, sc->sc_dummy_rx);
1193 free_status_ring:
1194 	nxe_ring_free(sc, sc->sc_status_ring);
1195 free_cmd_ring:
1196 	nxe_ring_free(sc, sc->sc_cmd_ring);
1197 free_ctx:
1198 	nxe_dmamem_free(sc, sc->sc_ctx);
1199 free_rx_pkts:
1200 	nxe_pkt_free(sc, sc->sc_rx_pkts);
1201 free_tx_pkts:
1202 	nxe_pkt_free(sc, sc->sc_tx_pkts);
1203 }
1204 
1205 int
1206 nxe_up_fw(struct nxe_softc *sc)
1207 {
1208 	u_int32_t			r;
1209 
1210 	r = nxe_crb_read(sc, NXE_1_SW_CMDPEG_STATE);
1211 	if (r == NXE_1_SW_CMDPEG_STATE_ACK)
1212 		return (0);
1213 
1214 	if (r != NXE_1_SW_CMDPEG_STATE_DONE)
1215 		return (1);
1216 
1217 	nxe_crb_write(sc, NXE_1_SW_NIC_CAP_HOST, NXE_1_SW_NIC_CAP_PORTINTR);
1218 	nxe_crb_write(sc, NXE_1_SW_MPORT_MODE, NXE_1_SW_MPORT_MODE_MULTI);
1219 	nxe_crb_write(sc, NXE_1_SW_CMDPEG_STATE, NXE_1_SW_CMDPEG_STATE_ACK);
1220 
1221 	/* XXX busy wait in a process context is naughty */
1222 	if (!nxe_crb_wait(sc, NXE_1_SW_STATUS_STATE(sc->sc_function),
1223 	    0xffffffff, NXE_1_SW_STATUS_STATE_READY, 1000))
1224 		return (1);
1225 
1226 	return (0);
1227 }
1228 
1229 void
1230 nxe_lladdr(struct nxe_softc *sc)
1231 {
1232 	u_int8_t			*lladdr = sc->sc_ac.ac_enaddr;
1233 
1234 	DASSERT(sc->sc_window == 0);
1235 
1236 	nxe_crb_write(sc, NXE_0_XG_MAC_LO(sc->sc_port),
1237 	    (lladdr[0] << 16) | (lladdr[1] << 24));
1238 	nxe_crb_write(sc, NXE_0_XG_MAC_HI(sc->sc_port),
1239 	    (lladdr[2] << 0)  | (lladdr[3] << 8) |
1240 	    (lladdr[4] << 16) | (lladdr[5] << 24));
1241 }
1242 
1243 void
1244 nxe_iff(struct nxe_softc *sc)
1245 {
1246 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1247 	u_int32_t			cfg1 = 0x1447; /* XXX */
1248 
1249 	DASSERT(sc->sc_window == 0);
1250 
1251 	CLR(ifp->if_flags, IFF_ALLMULTI);
1252 
1253 	if (ISSET(ifp->if_flags, IFF_PROMISC) || sc->sc_ac.ac_multicnt > 0) {
1254 		SET(ifp->if_flags, IFF_ALLMULTI);
1255 		if (ISSET(ifp->if_flags, IFF_PROMISC))
1256 			cfg1 |= NXE_0_XG_CFG1_PROMISC;
1257 		else
1258 			cfg1 |= NXE_0_XG_CFG1_MULTICAST;
1259 	}
1260 
1261 	nxe_crb_write(sc, NXE_0_XG_CFG0(sc->sc_port),
1262 	    NXE_0_XG_CFG0_TX_EN | NXE_0_XG_CFG0_RX_EN);
1263 	nxe_crb_write(sc, NXE_0_XG_CFG1(sc->sc_port), cfg1);
1264 }
1265 
1266 void
1267 nxe_down(struct nxe_softc *sc)
1268 {
1269 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1270 	int				i;
1271 
1272 	CLR(ifp->if_flags, IFF_RUNNING | IFF_ALLMULTI);
1273 	ifq_clr_oactive(&ifp->if_snd);
1274 
1275 	/* XXX turn the chip off */
1276 
1277 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1278 	    0, NXE_DMA_LEN(sc->sc_ctx),
1279 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1280 	nxe_ring_sync(sc, sc->sc_cmd_ring, BUS_DMASYNC_POSTWRITE);
1281 	nxe_ring_sync(sc, sc->sc_status_ring, BUS_DMASYNC_POSTREAD);
1282 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_rx),
1283 	    0, NXE_DMA_LEN(sc->sc_dummy_rx), BUS_DMASYNC_POSTREAD);
1284 
1285 	for (i = 0; i < NXE_NRING; i++) {
1286 		nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_POSTWRITE);
1287 		nxe_ring_free(sc, sc->sc_rx_rings[i]);
1288 	}
1289 	nxe_dmamem_free(sc, sc->sc_dummy_rx);
1290 	nxe_ring_free(sc, sc->sc_status_ring);
1291 	nxe_ring_free(sc, sc->sc_cmd_ring);
1292 	nxe_dmamem_free(sc, sc->sc_ctx);
1293 	nxe_pkt_free(sc, sc->sc_rx_pkts);
1294 	nxe_pkt_free(sc, sc->sc_tx_pkts);
1295 }
1296 
1297 void
1298 nxe_start(struct ifnet *ifp)
1299 {
1300 	struct nxe_softc		*sc = ifp->if_softc;
1301 	struct nxe_ring			*nr = sc->sc_cmd_ring;
1302 	struct nxe_tx_desc		*txd;
1303 	struct nxe_pkt			*pkt;
1304 	struct mbuf			*m;
1305 	bus_dmamap_t			dmap;
1306 	bus_dma_segment_t		*segs;
1307 	int				nsegs;
1308 
1309 	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1310 	    ifq_is_oactive(&ifp->if_snd) ||
1311 	    ifq_empty(&ifp->if_snd))
1312 		return;
1313 
1314 	if (nxe_ring_writeable(nr, sc->sc_cmd_consumer_cur) < NXE_TXD_DESCS) {
1315 		ifq_set_oactive(&ifp->if_snd);
1316 		return;
1317 	}
1318 
1319 	nxe_ring_sync(sc, nr, BUS_DMASYNC_POSTWRITE);
1320 	txd = nxe_ring_cur(sc, nr);
1321 	bzero(txd, sizeof(struct nxe_tx_desc));
1322 
1323 	do {
1324 		m = ifq_deq_begin(&ifp->if_snd);
1325 		if (m == NULL)
1326 			break;
1327 
1328 		pkt = nxe_pkt_get(sc->sc_tx_pkts);
1329 		if (pkt == NULL) {
1330 			ifq_deq_rollback(&ifp->if_snd, m);
1331 			ifq_set_oactive(&ifp->if_snd);
1332 			break;
1333 		}
1334 
1335 		ifq_deq_commit(&ifp->if_snd, m);
1336 
1337 		dmap = pkt->pkt_dmap;
1338 		m = nxe_load_pkt(sc, dmap, m);
1339 		if (m == NULL) {
1340 			nxe_pkt_put(sc->sc_tx_pkts, pkt);
1341 			ifp->if_oerrors++;
1342 			break;
1343 		}
1344 
1345 #if NBPFILTER > 0
1346 		if (ifp->if_bpf)
1347 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1348 #endif
1349 
1350 		pkt->pkt_m = m;
1351 
1352 		txd->tx_flags = htole16(NXE_TXD_F_OPCODE_TX);
1353 		txd->tx_nbufs = dmap->dm_nsegs;
1354 		txd->tx_length = htole16(dmap->dm_mapsize);
1355 		txd->tx_port = sc->sc_port;
1356 
1357 		segs = dmap->dm_segs;
1358 		nsegs = dmap->dm_nsegs;
1359 		do {
1360 			switch ((nsegs > NXE_TXD_SEGS) ?
1361 			    NXE_TXD_SEGS : nsegs) {
1362 			case 4:
1363 				txd->tx_addr_4 = htole64(segs[3].ds_addr);
1364 				txd->tx_slen_4 = htole32(segs[3].ds_len);
1365 			case 3:
1366 				txd->tx_addr_3 = htole64(segs[2].ds_addr);
1367 				txd->tx_slen_3 = htole32(segs[2].ds_len);
1368 			case 2:
1369 				txd->tx_addr_2 = htole64(segs[1].ds_addr);
1370 				txd->tx_slen_2 = htole32(segs[1].ds_len);
1371 			case 1:
1372 				txd->tx_addr_1 = htole64(segs[0].ds_addr);
1373 				txd->tx_slen_1 = htole32(segs[0].ds_len);
1374 				break;
1375 			default:
1376 				panic("%s: unexpected segments in tx map",
1377 				    DEVNAME(sc));
1378 			}
1379 
1380 			nsegs -= NXE_TXD_SEGS;
1381 			segs += NXE_TXD_SEGS;
1382 
1383 			pkt->pkt_id = nr->nr_slot;
1384 
1385 			txd = nxe_ring_next(sc, nr);
1386 			bzero(txd, sizeof(struct nxe_tx_desc));
1387 		} while (nsegs > 0);
1388 
1389 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1390 		    BUS_DMASYNC_PREWRITE);
1391 
1392 	} while (nr->nr_ready >= NXE_TXD_DESCS);
1393 
1394 	nxe_ring_sync(sc, nr, BUS_DMASYNC_PREWRITE);
1395 	nxe_crb_write(sc, NXE_1_SW_CMD_PRODUCER(sc->sc_function), nr->nr_slot);
1396 }
1397 
1398 int
1399 nxe_complete(struct nxe_softc *sc)
1400 {
1401 	struct nxe_pkt			*pkt;
1402 	int				new_cons, cur_cons;
1403 	int				rv = 0;
1404 
1405 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1406 	    0, NXE_DMA_LEN(sc->sc_ctx),
1407 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1408 	new_cons = letoh32(*sc->sc_cmd_consumer);
1409 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1410 	    0, NXE_DMA_LEN(sc->sc_ctx),
1411 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1412 
1413 	cur_cons = sc->sc_cmd_consumer_cur;
1414 	pkt = nxe_pkt_used(sc->sc_tx_pkts);
1415 
1416 	while (pkt != NULL && cur_cons != new_cons) {
1417 		if (pkt->pkt_id == cur_cons) {
1418 			bus_dmamap_sync(sc->sc_dmat, pkt->pkt_dmap,
1419 			    0, pkt->pkt_dmap->dm_mapsize,
1420 			    BUS_DMASYNC_POSTWRITE);
1421 			    bus_dmamap_unload(sc->sc_dmat, pkt->pkt_dmap);
1422 
1423 			m_freem(pkt->pkt_m);
1424 
1425 			nxe_pkt_put(sc->sc_tx_pkts, pkt);
1426 
1427 			pkt = nxe_pkt_used(sc->sc_tx_pkts);
1428 		}
1429 
1430 		cur_cons++;
1431 		cur_cons %= sc->sc_cmd_ring->nr_nentries;
1432 
1433 		rv = 1;
1434 	}
1435 
1436 	if (rv == 1) {
1437 		sc->sc_cmd_consumer_cur = cur_cons;
1438 		ifq_clr_oactive(&sc->sc_ac.ac_if.if_snd);
1439 	}
1440 
1441 	return (rv);
1442 }
1443 
1444 struct mbuf *
1445 nxe_coalesce_m(struct mbuf *m)
1446 {
1447 	struct mbuf			*m0;
1448 
1449 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
1450 	if (m0 == NULL)
1451 		goto err;
1452 
1453 	if (m->m_pkthdr.len > MHLEN) {
1454 		MCLGET(m0, M_DONTWAIT);
1455 		if (!(m0->m_flags & M_EXT)) {
1456 			m_freem(m0);
1457 			m0 = NULL;
1458 			goto err;
1459 		}
1460 	}
1461 
1462 	m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1463 	m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1464 
1465 err:
1466 	m_freem(m);
1467 	return (m0);
1468 }
1469 
1470 struct mbuf *
1471 nxe_load_pkt(struct nxe_softc *sc, bus_dmamap_t dmap, struct mbuf *m)
1472 {
1473 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT)) {
1474 	case 0:
1475 		break;
1476 
1477 	case EFBIG:
1478 		m = nxe_coalesce_m(m);
1479 		if (m == NULL)
1480 			break;
1481 
1482 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
1483 		    BUS_DMA_NOWAIT) == 0)
1484 			break;
1485 
1486 		/* we get here on error */
1487 		/* FALLTHROUGH */
1488 	default:
1489 		m_freem(m);
1490 		m = NULL;
1491 		break;
1492 	}
1493 
1494 	return (m);
1495 }
1496 
1497 void
1498 nxe_rx_start(struct nxe_softc *sc)
1499 {
1500 	struct nxe_ring			*nr = sc->sc_rx_rings[NXE_RING_RX];
1501 	struct nxe_rx_desc		*rxd;
1502 	struct nxe_pkt			*pkt;
1503 	struct mbuf			*m;
1504 
1505 	if (nxe_ring_writeable(nr, 0) == 0)
1506 		return;
1507 
1508 	nxe_ring_sync(sc, nr, BUS_DMASYNC_POSTWRITE);
1509 	rxd = nxe_ring_cur(sc, nr);
1510 
1511 	for (;;) {
1512 		pkt = nxe_pkt_get(sc->sc_rx_pkts);
1513 		if (pkt == NULL)
1514 			goto done;
1515 
1516 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1517 		if (m == NULL)
1518 			goto put_pkt;
1519 
1520 		MCLGET(m, M_DONTWAIT);
1521 		if (!ISSET(m->m_flags, M_EXT))
1522 			goto free_m;
1523 
1524 		m->m_data += ETHER_ALIGN;
1525 		m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1526 
1527 		if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->pkt_dmap, m,
1528 		    BUS_DMA_NOWAIT) != 0)
1529 			goto free_m;
1530 
1531 		pkt->pkt_m = m;
1532 
1533 		bzero(rxd, sizeof(struct nxe_rx_desc));
1534 		rxd->rx_len = htole32(m->m_len);
1535 		rxd->rx_id = pkt->pkt_id;
1536 		rxd->rx_addr = htole64(pkt->pkt_dmap->dm_segs[0].ds_addr);
1537 
1538 		bus_dmamap_sync(sc->sc_dmat, pkt->pkt_dmap, 0,
1539 		    pkt->pkt_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
1540 
1541 		rxd = nxe_ring_next(sc, nr);
1542 
1543 		if (nr->nr_ready == 0)
1544 			goto done;
1545 	}
1546 
1547 free_m:
1548 	m_freem(m);
1549 put_pkt:
1550 	nxe_pkt_put(sc->sc_rx_pkts, pkt);
1551 done:
1552 	nxe_ring_sync(sc, nr, BUS_DMASYNC_PREWRITE);
1553 	nxe_crb_write(sc, NXE_1_SW_RX_PRODUCER(sc->sc_function), nr->nr_slot);
1554 	nxe_doorbell(sc, NXE_DB_PEGID_RX | NXE_DB_PRIVID |
1555 	    NXE_DB_OPCODE_RX_PROD |
1556 	    NXE_DB_COUNT(nr->nr_slot) | NXE_DB_CTXID(sc->sc_function));
1557 }
1558 
1559 void
1560 nxe_watchdog(struct ifnet *ifp)
1561 {
1562 	/* do nothing */
1563 }
1564 
1565 int
1566 nxe_media_change(struct ifnet *ifp)
1567 {
1568 	/* ignore for now */
1569 	return (0);
1570 }
1571 
1572 void
1573 nxe_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1574 {
1575 	struct nxe_softc		*sc = ifp->if_softc;
1576 
1577 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1578 	imr->ifm_status = IFM_AVALID;
1579 
1580 	nxe_link_state(sc);
1581 	if (LINK_STATE_IS_UP(ifp->if_link_state))
1582 		imr->ifm_status |= IFM_ACTIVE;
1583 }
1584 
1585 void
1586 nxe_link_state(struct nxe_softc *sc)
1587 {
1588 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1589 	int				link_state = LINK_STATE_DOWN;
1590 	u_int32_t			r;
1591 
1592 	DASSERT(sc->sc_window == 1);
1593 
1594 	r = nxe_crb_read(sc, NXE_1_SW_XG_STATE);
1595 	if (NXE_1_SW_XG_STATE_PORT(r, sc->sc_function) & NXE_1_SW_XG_STATE_UP)
1596 		link_state = LINK_STATE_UP;
1597 
1598 	if (ifp->if_link_state != link_state) {
1599 		ifp->if_link_state = link_state;
1600 		if_link_state_change(ifp);
1601 	}
1602 }
1603 
1604 int
1605 nxe_board_info(struct nxe_softc *sc)
1606 {
1607 	struct nxe_info			*ni;
1608 	int				rv = 1;
1609 	int				i;
1610 
1611 	ni = malloc(sizeof(struct nxe_info), M_TEMP, M_NOWAIT);
1612 	if (ni == NULL) {
1613 		printf(": unable to allocate temporary memory\n");
1614 		return (1);
1615 	}
1616 
1617 	if (nxe_rom_read_region(sc, NXE_FLASH_BRDCFG, ni,
1618 	    sizeof(struct nxe_info)) != 0) {
1619 		printf(": unable to read board info\n");
1620 		goto out;
1621 	}
1622 
1623 	if (ni->ni_hdrver != NXE_INFO_HDRVER_1) {
1624 		printf(": unexpected board info header version 0x%08x\n",
1625 		    ni->ni_hdrver);
1626 		goto out;
1627 	}
1628 	if (ni->ni_magic != NXE_INFO_MAGIC) {
1629 		printf(": board info magic is invalid\n");
1630 		goto out;
1631 	}
1632 
1633 	for (i = 0; i < nitems(nxe_boards); i++) {
1634 		if (ni->ni_board_type == nxe_boards[i].brd_type) {
1635 			sc->sc_board = &nxe_boards[i];
1636 			break;
1637 		}
1638 	}
1639 	if (sc->sc_board == NULL) {
1640 		printf(": unknown board type %04x\n", ni->ni_board_type);
1641 		goto out;
1642 	}
1643 
1644 	rv = 0;
1645 out:
1646 	free(ni, M_TEMP, 0);
1647 	return (rv);
1648 }
1649 
1650 int
1651 nxe_user_info(struct nxe_softc *sc)
1652 {
1653 	struct nxe_userinfo		*nu;
1654 	u_int64_t			lladdr;
1655 	struct nxe_lladdr		*la;
1656 	int				rv = 1;
1657 
1658 	nu = malloc(sizeof(struct nxe_userinfo), M_TEMP, M_NOWAIT);
1659 	if (nu == NULL) {
1660 		printf(": unable to allocate temp memory\n");
1661 		return (1);
1662 	}
1663 	if (nxe_rom_read_region(sc, NXE_FLASH_USER, nu,
1664 	    sizeof(struct nxe_userinfo)) != 0) {
1665 		printf(": unable to read user info\n");
1666 		goto out;
1667 	}
1668 
1669 	sc->sc_fw_major = nu->nu_imageinfo.nim_img_ver_major;
1670 	sc->sc_fw_minor = nu->nu_imageinfo.nim_img_ver_minor;
1671 	sc->sc_fw_build = letoh16(nu->nu_imageinfo.nim_img_ver_build);
1672 
1673 	if (sc->sc_fw_major > NXE_VERSION_MAJOR ||
1674 	    sc->sc_fw_major < NXE_VERSION_MAJOR ||
1675 	    sc->sc_fw_minor > NXE_VERSION_MINOR ||
1676 	    sc->sc_fw_minor < NXE_VERSION_MINOR) {
1677 		printf(": firmware %d.%d.%d is unsupported by this driver\n",
1678 		    sc->sc_fw_major, sc->sc_fw_minor, sc->sc_fw_build);
1679 		goto out;
1680 	}
1681 
1682 	lladdr = swap64(nu->nu_lladdr[sc->sc_function][0]);
1683 	la = (struct nxe_lladdr *)&lladdr;
1684 	bcopy(la->lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1685 
1686 	rv = 0;
1687 out:
1688 	free(nu, M_TEMP, 0);
1689 	return (rv);
1690 }
1691 
1692 int
1693 nxe_init(struct nxe_softc *sc)
1694 {
1695 	u_int64_t			dva;
1696 	u_int32_t			r;
1697 
1698 	/* stop the chip from processing */
1699 	nxe_crb_write(sc, NXE_1_SW_CMD_PRODUCER(sc->sc_function), 0);
1700 	nxe_crb_write(sc, NXE_1_SW_CMD_CONSUMER(sc->sc_function), 0);
1701 	nxe_crb_write(sc, NXE_1_SW_CMD_ADDR_HI, 0);
1702 	nxe_crb_write(sc, NXE_1_SW_CMD_ADDR_LO, 0);
1703 
1704 	/*
1705 	 * if this is the first port on the device it needs some special
1706 	 * treatment to get things going.
1707 	 */
1708 	if (sc->sc_function == 0) {
1709 		/* init adapter offload */
1710 		sc->sc_dummy_dma = nxe_dmamem_alloc(sc,
1711 		    NXE_1_SW_DUMMY_ADDR_LEN, PAGE_SIZE);
1712 		if (sc->sc_dummy_dma == NULL) {
1713 			printf(": unable to allocate dummy memory\n");
1714 			return (1);
1715 		}
1716 
1717 		bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1718 		    0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_PREREAD);
1719 
1720 		dva = NXE_DMA_DVA(sc->sc_dummy_dma);
1721 		nxe_crb_write(sc, NXE_1_SW_DUMMY_ADDR_HI, dva >> 32);
1722 		nxe_crb_write(sc, NXE_1_SW_DUMMY_ADDR_LO, dva);
1723 
1724 		r = nxe_crb_read(sc, NXE_1_SW_BOOTLD_CONFIG);
1725 		if (r == 0x55555555) {
1726 			r = nxe_crb_read(sc, NXE_1_ROMUSB_SW_RESET);
1727 			if (r != NXE_1_ROMUSB_SW_RESET_BOOT) {
1728 				printf(": unexpected boot state\n");
1729 				goto err;
1730 			}
1731 
1732 			/* clear */
1733 			nxe_crb_write(sc, NXE_1_SW_BOOTLD_CONFIG, 0);
1734 		}
1735 
1736 		/* start the device up */
1737 		nxe_crb_write(sc, NXE_1_SW_DRIVER_VER, NXE_VERSION);
1738 		nxe_crb_write(sc, NXE_1_GLB_PEGTUNE, NXE_1_GLB_PEGTUNE_DONE);
1739 
1740 		/*
1741 		 * the firmware takes a long time to boot, so we'll check
1742 		 * it later on, and again when we want to bring a port up.
1743 		 */
1744 	}
1745 
1746 	return (0);
1747 
1748 err:
1749 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1750 	    0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_POSTREAD);
1751 	nxe_dmamem_free(sc, sc->sc_dummy_dma);
1752 	return (1);
1753 }
1754 
1755 void
1756 nxe_uninit(struct nxe_softc *sc)
1757 {
1758 	if (sc->sc_function == 0) {
1759 		bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1760 		    0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_POSTREAD);
1761 		nxe_dmamem_free(sc, sc->sc_dummy_dma);
1762 	}
1763 }
1764 
1765 void
1766 nxe_mountroot(struct device *self)
1767 {
1768 	struct nxe_softc		*sc = (struct nxe_softc *)self;
1769 
1770 	DASSERT(sc->sc_window == 1);
1771 
1772 	if (!nxe_crb_wait(sc, NXE_1_SW_CMDPEG_STATE, 0xffffffff,
1773 	    NXE_1_SW_CMDPEG_STATE_DONE, 10000)) {
1774 		printf("%s: firmware bootstrap failed, code 0x%08x\n",
1775 		    DEVNAME(sc), nxe_crb_read(sc, NXE_1_SW_CMDPEG_STATE));
1776 		return;
1777 	}
1778 
1779 	sc->sc_port = nxe_crb_read(sc, NXE_1_SW_V2P(sc->sc_function));
1780 	if (sc->sc_port == 0x55555555)
1781 		sc->sc_port = sc->sc_function;
1782 
1783 	nxe_crb_write(sc, NXE_1_SW_NIC_CAP_HOST, NXE_1_SW_NIC_CAP_PORTINTR);
1784 	nxe_crb_write(sc, NXE_1_SW_MPORT_MODE, NXE_1_SW_MPORT_MODE_MULTI);
1785 	nxe_crb_write(sc, NXE_1_SW_CMDPEG_STATE, NXE_1_SW_CMDPEG_STATE_ACK);
1786 
1787 	sc->sc_sensor.type = SENSOR_TEMP;
1788 	strlcpy(sc->sc_sensor_dev.xname, DEVNAME(sc),
1789 	    sizeof(sc->sc_sensor_dev.xname));
1790 	sensor_attach(&sc->sc_sensor_dev, &sc->sc_sensor);
1791 	sensordev_install(&sc->sc_sensor_dev);
1792 
1793 	timeout_set(&sc->sc_tick, nxe_tick, sc);
1794 	nxe_tick(sc);
1795 }
1796 
1797 void
1798 nxe_tick(void *xsc)
1799 {
1800 	struct nxe_softc		*sc = xsc;
1801 	u_int32_t			temp;
1802 	int				window;
1803 	int				s;
1804 
1805 	s = splnet();
1806 	window = nxe_crb_set(sc, 1);
1807 	temp = nxe_crb_read(sc, NXE_1_SW_TEMP);
1808 	nxe_link_state(sc);
1809 	nxe_crb_set(sc, window);
1810 	splx(s);
1811 
1812 	sc->sc_sensor.value = NXE_1_SW_TEMP_VAL(temp) * 1000000 + 273150000;
1813 	sc->sc_sensor.flags = 0;
1814 
1815 	switch (NXE_1_SW_TEMP_STATE(temp)) {
1816 	case NXE_1_SW_TEMP_STATE_NONE:
1817 		sc->sc_sensor.status = SENSOR_S_UNSPEC;
1818 		break;
1819 	case NXE_1_SW_TEMP_STATE_OK:
1820 		sc->sc_sensor.status = SENSOR_S_OK;
1821 		break;
1822 	case NXE_1_SW_TEMP_STATE_WARN:
1823 		sc->sc_sensor.status = SENSOR_S_WARN;
1824 		break;
1825 	case NXE_1_SW_TEMP_STATE_CRIT:
1826 		/* we should probably bring things down if this is true */
1827 		sc->sc_sensor.status = SENSOR_S_CRIT;
1828 		break;
1829 	default:
1830 		sc->sc_sensor.flags = SENSOR_FUNKNOWN;
1831 		break;
1832 	}
1833 
1834 	timeout_add_sec(&sc->sc_tick, 5);
1835 }
1836 
1837 
1838 struct nxe_ring *
1839 nxe_ring_alloc(struct nxe_softc *sc, size_t desclen, u_int nentries)
1840 {
1841 	struct nxe_ring			*nr;
1842 
1843 	nr = malloc(sizeof(struct nxe_ring), M_DEVBUF, M_WAITOK);
1844 
1845 	nr->nr_dmamem = nxe_dmamem_alloc(sc, desclen * nentries, PAGE_SIZE);
1846 	if (nr->nr_dmamem == NULL) {
1847 		free(nr, M_DEVBUF, 0);
1848 		return (NULL);
1849 	}
1850 
1851 	nr->nr_pos = NXE_DMA_KVA(nr->nr_dmamem);
1852 	nr->nr_slot = 0;
1853 	nr->nr_desclen = desclen;
1854 	nr->nr_nentries = nentries;
1855 
1856 	return (nr);
1857 }
1858 
1859 void
1860 nxe_ring_sync(struct nxe_softc *sc, struct nxe_ring *nr, int flags)
1861 {
1862 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(nr->nr_dmamem),
1863 	    0, NXE_DMA_LEN(nr->nr_dmamem), flags);
1864 }
1865 
1866 void
1867 nxe_ring_free(struct nxe_softc *sc, struct nxe_ring *nr)
1868 {
1869 	nxe_dmamem_free(sc, nr->nr_dmamem);
1870 	free(nr, M_DEVBUF, 0);
1871 }
1872 
1873 int
1874 nxe_ring_readable(struct nxe_ring *nr, int producer)
1875 {
1876 	nr->nr_ready = producer - nr->nr_slot;
1877 	if (nr->nr_ready < 0)
1878 		nr->nr_ready += nr->nr_nentries;
1879 
1880 	return (nr->nr_ready);
1881 }
1882 
1883 int
1884 nxe_ring_writeable(struct nxe_ring *nr, int consumer)
1885 {
1886 	nr->nr_ready = consumer - nr->nr_slot;
1887 	if (nr->nr_ready <= 0)
1888 		nr->nr_ready += nr->nr_nentries;
1889 
1890 	return (nr->nr_ready);
1891 }
1892 
1893 void *
1894 nxe_ring_cur(struct nxe_softc *sc, struct nxe_ring *nr)
1895 {
1896 	return (nr->nr_pos);
1897 }
1898 
1899 void *
1900 nxe_ring_next(struct nxe_softc *sc, struct nxe_ring *nr)
1901 {
1902 	if (++nr->nr_slot >= nr->nr_nentries) {
1903 		nr->nr_slot = 0;
1904 		nr->nr_pos = NXE_DMA_KVA(nr->nr_dmamem);
1905 	} else
1906 		nr->nr_pos += nr->nr_desclen;
1907 
1908 	nr->nr_ready--;
1909 
1910 	return (nr->nr_pos);
1911 }
1912 
1913 struct nxe_pkt_list *
1914 nxe_pkt_alloc(struct nxe_softc *sc, u_int npkts, int nsegs)
1915 {
1916 	struct nxe_pkt_list		*npl;
1917 	struct nxe_pkt			*pkt;
1918 	int				i;
1919 
1920 	npl = malloc(sizeof(*npl), M_DEVBUF, M_WAITOK | M_ZERO);
1921 	pkt = mallocarray(npkts, sizeof(*pkt), M_DEVBUF, M_WAITOK | M_ZERO);
1922 
1923 	npl->npl_pkts = pkt;
1924 	TAILQ_INIT(&npl->npl_free);
1925 	TAILQ_INIT(&npl->npl_used);
1926 	for (i = 0; i < npkts; i++) {
1927 		pkt = &npl->npl_pkts[i];
1928 
1929 		pkt->pkt_id = i;
1930 		if (bus_dmamap_create(sc->sc_dmat, NXE_MAX_PKTLEN, nsegs,
1931 		    NXE_MAX_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1932 		    &pkt->pkt_dmap) != 0) {
1933 			nxe_pkt_free(sc, npl);
1934 			return (NULL);
1935 		}
1936 
1937 		TAILQ_INSERT_TAIL(&npl->npl_free, pkt, pkt_link);
1938 	}
1939 
1940 	return (npl);
1941 }
1942 
1943 void
1944 nxe_pkt_free(struct nxe_softc *sc, struct nxe_pkt_list *npl)
1945 {
1946 	struct nxe_pkt			*pkt;
1947 
1948 	while ((pkt = nxe_pkt_get(npl)) != NULL)
1949 		bus_dmamap_destroy(sc->sc_dmat, pkt->pkt_dmap);
1950 
1951 	free(npl->npl_pkts, M_DEVBUF, 0);
1952 	free(npl, M_DEVBUF, sizeof *npl);
1953 }
1954 
1955 struct nxe_pkt *
1956 nxe_pkt_get(struct nxe_pkt_list *npl)
1957 {
1958 	struct nxe_pkt			*pkt;
1959 
1960 	pkt = TAILQ_FIRST(&npl->npl_free);
1961 	if (pkt != NULL) {
1962 		TAILQ_REMOVE(&npl->npl_free, pkt, pkt_link);
1963 		TAILQ_INSERT_TAIL(&npl->npl_used, pkt, pkt_link);
1964 	}
1965 
1966 	return (pkt);
1967 }
1968 
1969 void
1970 nxe_pkt_put(struct nxe_pkt_list *npl, struct nxe_pkt *pkt)
1971 {
1972 	TAILQ_REMOVE(&npl->npl_used, pkt, pkt_link);
1973 	TAILQ_INSERT_TAIL(&npl->npl_free, pkt, pkt_link);
1974 
1975 }
1976 
1977 struct nxe_pkt *
1978 nxe_pkt_used(struct nxe_pkt_list *npl)
1979 {
1980 	return (TAILQ_FIRST(&npl->npl_used));
1981 }
1982 
1983 struct nxe_dmamem *
1984 nxe_dmamem_alloc(struct nxe_softc *sc, bus_size_t size, bus_size_t align)
1985 {
1986 	struct nxe_dmamem		*ndm;
1987 	int				nsegs;
1988 
1989 	ndm = malloc(sizeof(*ndm), M_DEVBUF, M_WAITOK | M_ZERO);
1990 	ndm->ndm_size = size;
1991 
1992 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1993 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
1994 		goto ndmfree;
1995 
1996 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &ndm->ndm_seg, 1,
1997 	    &nsegs, BUS_DMA_WAITOK |BUS_DMA_ZERO) != 0)
1998 		goto destroy;
1999 
2000 	if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
2001 	    &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
2002 		goto free;
2003 
2004 	if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
2005 	    NULL, BUS_DMA_WAITOK) != 0)
2006 		goto unmap;
2007 
2008 	return (ndm);
2009 
2010 unmap:
2011 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
2012 free:
2013 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2014 destroy:
2015 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2016 ndmfree:
2017 	free(ndm, M_DEVBUF, sizeof *ndm);
2018 
2019 	return (NULL);
2020 }
2021 
2022 void
2023 nxe_dmamem_free(struct nxe_softc *sc, struct nxe_dmamem *ndm)
2024 {
2025 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
2026 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2027 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2028 	free(ndm, M_DEVBUF, sizeof *ndm);
2029 }
2030 
2031 u_int32_t
2032 nxe_read(struct nxe_softc *sc, bus_size_t r)
2033 {
2034 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, 4,
2035 	    BUS_SPACE_BARRIER_READ);
2036 	return (bus_space_read_4(sc->sc_memt, sc->sc_memh, r));
2037 }
2038 
2039 void
2040 nxe_write(struct nxe_softc *sc, bus_size_t r, u_int32_t v)
2041 {
2042 	bus_space_write_4(sc->sc_memt, sc->sc_memh, r, v);
2043 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, 4,
2044 	    BUS_SPACE_BARRIER_WRITE);
2045 }
2046 
2047 int
2048 nxe_wait(struct nxe_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
2049     u_int timeout)
2050 {
2051 	while ((nxe_read(sc, r) & m) != v) {
2052 		if (timeout == 0)
2053 			return (0);
2054 
2055 		delay(1000);
2056 		timeout--;
2057 	}
2058 
2059 	return (1);
2060 }
2061 
2062 void
2063 nxe_doorbell(struct nxe_softc *sc, u_int32_t v)
2064 {
2065 	bus_space_write_4(sc->sc_memt, sc->sc_memh, NXE_DB, v);
2066 	bus_space_barrier(sc->sc_memt, sc->sc_memh, NXE_DB, 4,
2067 	    BUS_SPACE_BARRIER_WRITE);
2068 }
2069 
2070 int
2071 nxe_crb_set(struct nxe_softc *sc, int window)
2072 {
2073 	int			oldwindow = sc->sc_window;
2074 	u_int32_t		r;
2075 
2076 	if (sc->sc_window != window) {
2077 		sc->sc_window = window;
2078 
2079 		r = window ? NXE_WIN_CRB_1 : NXE_WIN_CRB_0;
2080 		nxe_write(sc, NXE_WIN_CRB(sc->sc_function), r);
2081 
2082 		if (nxe_read(sc, NXE_WIN_CRB(sc->sc_function)) != r)
2083 			printf("%s: crb window hasnt moved\n", DEVNAME(sc));
2084 	}
2085 
2086 	return (oldwindow);
2087 }
2088 
2089 u_int32_t
2090 nxe_crb_read(struct nxe_softc *sc, bus_size_t r)
2091 {
2092 	bus_space_barrier(sc->sc_memt, sc->sc_crbh, r, 4,
2093 	    BUS_SPACE_BARRIER_READ);
2094 	return (bus_space_read_4(sc->sc_memt, sc->sc_crbh, r));
2095 }
2096 
2097 void
2098 nxe_crb_write(struct nxe_softc *sc, bus_size_t r, u_int32_t v)
2099 {
2100 	bus_space_write_4(sc->sc_memt, sc->sc_crbh, r, v);
2101 	bus_space_barrier(sc->sc_memt, sc->sc_crbh, r, 4,
2102 	    BUS_SPACE_BARRIER_WRITE);
2103 }
2104 
2105 int
2106 nxe_crb_wait(struct nxe_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
2107     u_int timeout)
2108 {
2109 	while ((nxe_crb_read(sc, r) & m) != v) {
2110 		if (timeout == 0)
2111 			return (0);
2112 
2113 		delay(1000);
2114 		timeout--;
2115 	}
2116 
2117 	return (1);
2118 }
2119 
2120 int
2121 nxe_rom_lock(struct nxe_softc *sc)
2122 {
2123 	if (!nxe_wait(sc, NXE_SEM_ROM_LOCK, 0xffffffff,
2124 	    NXE_SEM_DONE, 10000))
2125 		return (1);
2126 	nxe_crb_write(sc, NXE_1_SW_ROM_LOCK_ID, NXE_1_SW_ROM_LOCK_ID);
2127 
2128 	return (0);
2129 }
2130 
2131 void
2132 nxe_rom_unlock(struct nxe_softc *sc)
2133 {
2134 	nxe_read(sc, NXE_SEM_ROM_UNLOCK);
2135 }
2136 
2137 int
2138 nxe_rom_read(struct nxe_softc *sc, u_int32_t r, u_int32_t *v)
2139 {
2140 	int			rv = 1;
2141 
2142 	DASSERT(sc->sc_window == 1);
2143 
2144 	if (nxe_rom_lock(sc) != 0)
2145 		return (1);
2146 
2147 	/* set the rom address */
2148 	nxe_crb_write(sc, NXE_1_ROM_ADDR, r);
2149 
2150 	/* set the xfer len */
2151 	nxe_crb_write(sc, NXE_1_ROM_ABYTE_CNT, 3);
2152 	delay(100); /* used to prevent bursting on the chipset */
2153 	nxe_crb_write(sc, NXE_1_ROM_DBYTE_CNT, 0);
2154 
2155 	/* set opcode and wait for completion */
2156 	nxe_crb_write(sc, NXE_1_ROM_OPCODE, NXE_1_ROM_OPCODE_READ);
2157 	if (!nxe_crb_wait(sc, NXE_1_ROMUSB_STATUS, NXE_1_ROMUSB_STATUS_DONE,
2158 	    NXE_1_ROMUSB_STATUS_DONE, 100))
2159 		goto err;
2160 
2161 	/* reset counters */
2162 	nxe_crb_write(sc, NXE_1_ROM_ABYTE_CNT, 0);
2163 	delay(100);
2164 	nxe_crb_write(sc, NXE_1_ROM_DBYTE_CNT, 0);
2165 
2166 	*v = nxe_crb_read(sc, NXE_1_ROM_RDATA);
2167 
2168 	rv = 0;
2169 err:
2170 	nxe_rom_unlock(sc);
2171 	return (rv);
2172 }
2173 
2174 int
2175 nxe_rom_read_region(struct nxe_softc *sc, u_int32_t r, void *buf,
2176     size_t buflen)
2177 {
2178 	u_int32_t		*databuf = buf;
2179 	int			i;
2180 
2181 #ifdef NXE_DEBUG
2182 	if ((buflen % 4) != 0)
2183 		panic("nxe_read_rom_region: buflen is wrong (%d)", buflen);
2184 #endif
2185 
2186 	buflen = buflen / 4;
2187 	for (i = 0; i < buflen; i++) {
2188 		if (nxe_rom_read(sc, r, &databuf[i]) != 0)
2189 			return (1);
2190 
2191 		r += sizeof(u_int32_t);
2192 	}
2193 
2194 	return (0);
2195 }
2196