xref: /openbsd/sys/dev/pci/if_nxe.c (revision 891d7ab6)
1 /*	$OpenBSD: if_nxe.c,v 1.62 2011/02/15 12:37:59 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sockio.h>
24 #include <sys/mbuf.h>
25 #include <sys/kernel.h>
26 #include <sys/socket.h>
27 #include <sys/malloc.h>
28 #include <sys/device.h>
29 #include <sys/proc.h>
30 #include <sys/queue.h>
31 #include <sys/timeout.h>
32 #include <sys/sensors.h>
33 #include <sys/rwlock.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <net/if.h>
42 #include <net/if_dl.h>
43 #include <net/if_media.h>
44 #include <net/if_types.h>
45 
46 #if NBPFILTER > 0
47 #include <net/bpf.h>
48 #endif
49 
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 #endif
54 
55 #ifdef NXE_DEBUG
56 int nxedebug = 0;
57 
58 #define DPRINTF(l, f...)	do { if (nxedebug & (l)) printf(f); } while (0)
59 #define DASSERT(_a)		assert(_a)
60 #else
61 #define DPRINTF(l, f...)
62 #define DASSERT(_a)
63 #endif
64 
65 /* this driver likes firmwares around this version */
66 #define NXE_VERSION_MAJOR	3
67 #define NXE_VERSION_MINOR	4
68 #define NXE_VERSION_BUILD	31
69 #define NXE_VERSION \
70     ((NXE_VERSION_MAJOR << 16)|(NXE_VERSION_MINOR << 8)|(NXE_VERSION_BUILD))
71 
72 
73 /*
74  * PCI configuration space registers
75  */
76 
77 #define NXE_PCI_BAR_MEM		0x10 /* bar 0 */
78 #define NXE_PCI_BAR_MEM_128MB		(128 * 1024 * 1024)
79 #define NXE_PCI_BAR_DOORBELL	0x20 /* bar 4 */
80 
81 /*
82  * doorbell register space
83  */
84 
85 #define NXE_DB			0x00000000
86 #define  NXE_DB_PEGID			0x00000003
87 #define  NXE_DB_PEGID_RX		0x00000001 /* rx unit */
88 #define  NXE_DB_PEGID_TX		0x00000002 /* tx unit */
89 #define  NXE_DB_PRIVID			0x00000004 /* must be set */
90 #define  NXE_DB_COUNT(_c)		((_c)<<3) /* count */
91 #define  NXE_DB_CTXID(_c)		((_c)<<18) /* context id */
92 #define  NXE_DB_OPCODE_RX_PROD		0x00000000
93 #define  NXE_DB_OPCODE_RX_JUMBO_PROD	0x10000000
94 #define  NXE_DB_OPCODE_RX_LRO_PROD	0x20000000
95 #define  NXE_DB_OPCODE_CMD_PROD		0x30000000
96 #define  NXE_DB_OPCODE_UPD_CONS		0x40000000
97 #define  NXE_DB_OPCODE_RESET_CTX	0x50000000
98 
99 /*
100  * register space
101  */
102 
103 /* different PCI functions use different registers sometimes */
104 #define _F(_f)			((_f) * 0x20)
105 
106 /*
107  * driver ref section 4.2
108  *
109  * All the hardware registers are mapped in memory. Apart from the registers
110  * for the individual hardware blocks, the memory map includes a large number
111  * of software definable registers.
112  *
113  * The following table gives the memory map in the PCI address space.
114  */
115 
116 #define NXE_MAP_DDR_NET		0x00000000
117 #define NXE_MAP_DDR_MD		0x02000000
118 #define NXE_MAP_QDR_NET		0x04000000
119 #define NXE_MAP_DIRECT_CRB	0x04400000
120 #define NXE_MAP_OCM0		0x05000000
121 #define NXE_MAP_OCM1		0x05100000
122 #define NXE_MAP_CRB		0x06000000
123 
124 /*
125  * Since there are a large number of registers they do not fit in a single
126  * PCI addressing range. Hence two windows are defined. The window starts at
127  * NXE_MAP_CRB, and extends to the end of the register map. The window is set
128  * using the NXE_REG_WINDOW_CRB register. The format of the NXE_REG_WINDOW_CRB
129  * register is as follows:
130  */
131 
132 #define NXE_WIN_CRB(_f)		(0x06110210 + _F(_f))
133 #define  NXE_WIN_CRB_0			(0<<25)
134 #define  NXE_WIN_CRB_1			(1<<25)
135 
136 /*
137  * The memory map inside the register windows are divided into a set of blocks.
138  * Each register block is owned by one hardware agent. The following table
139  * gives the memory map of the various register blocks in window 0. These
140  * registers are all in the CRB register space, so the offsets given here are
141  * relative to the base of the CRB offset region (NXE_MAP_CRB).
142  */
143 
144 #define NXE_W0_PCIE		0x00100000 /* PCI Express */
145 #define NXE_W0_NIU		0x00600000 /* Network Interface Unit */
146 #define NXE_W0_PPE_0		0x01100000 /* Protocol Processing Engine 0 */
147 #define NXE_W0_PPE_1		0x01200000 /* Protocol Processing Engine 1 */
148 #define NXE_W0_PPE_2		0x01300000 /* Protocol Processing Engine 2 */
149 #define NXE_W0_PPE_3		0x01400000 /* Protocol Processing Engine 3 */
150 #define NXE_W0_PPE_D		0x01500000 /* PPE D-cache */
151 #define NXE_W0_PPE_I		0x01600000 /* PPE I-cache */
152 
153 /*
154  * These are the register blocks inside window 1.
155  */
156 
157 #define NXE_W1_PCIE		0x00100000
158 #define NXE_W1_SW		0x00200000
159 #define NXE_W1_SIR		0x01200000
160 #define NXE_W1_ROMUSB		0x01300000
161 
162 /*
163  * Global registers
164  */
165 #define NXE_BOOTLD_START	0x00010000
166 
167 
168 /*
169  * driver ref section 5
170  *
171  * CRB Window Register Descriptions
172  */
173 
174 /*
175  * PCI Express Registers
176  *
177  * Despite being in the CRB window space, they can be accessed via both
178  * windows. This means they are accessable "globally" without going relative
179  * to the start of the CRB window space.
180  */
181 
182 /* Interrupts */
183 #define NXE_ISR_VECTOR		0x06110100 /* Interrupt Vector */
184 #define NXE_ISR_MASK		0x06110104 /* Interrupt Mask */
185 #define NXE_ISR_TARGET_STATUS	0x06110118
186 #define NXE_ISR_TARGET_MASK	0x06110128
187 #define  NXE_ISR_MINE(_f)		(0x08 << (_f))
188 
189 /* lock registers (semaphores between chipset and driver) */
190 #define NXE_SEM_ROM_LOCK	0x0611c010 /* ROM access lock */
191 #define NXE_SEM_ROM_UNLOCK	0x0611c014
192 #define NXE_SEM_PHY_LOCK	0x0611c018 /* PHY access lock */
193 #define NXE_SEM_PHY_UNLOCK	0x0611c01c
194 #define  NXE_SEM_DONE			0x1
195 
196 /*
197  * Network Interface Unit (NIU) Registers
198  */
199 
200 #define NXE_0_NIU_MODE		0x00600000
201 #define  NXE_0_NIU_MODE_XGE		(1<<2) /* XGE interface enabled */
202 #define  NXE_0_NIU_MODE_GBE		(1<<1) /* 4 GbE interfaces enabled */
203 #define NXE_0_NIU_SINGLE_TERM	0x00600004
204 #define NXE_0_NIU_INT_MASK	0x00600040
205 
206 #define NXE_0_NIU_RESET_XG	0x0060001c /* reset XG */
207 #define NXE_0_NIU_RESET_FIFO	0x00600088 /* reset sys fifos */
208 
209 #define _P(_p)			((_p) * 0x10000)
210 
211 #define NXE_0_XG_CFG0(_p)	(0x00670000 + _P(_p))
212 #define  NXE_0_XG_CFG0_TX_EN		(1<<0) /* TX enable */
213 #define  NXE_0_XG_CFG0_TX_SYNC		(1<<1) /* TX synced */
214 #define  NXE_0_XG_CFG0_RX_EN		(1<<2) /* RX enable */
215 #define  NXE_0_XG_CFG0_RX_SYNC		(1<<3) /* RX synced */
216 #define  NXE_0_XG_CFG0_TX_FLOWCTL	(1<<4) /* enable pause frame gen */
217 #define  NXE_0_XG_CFG0_RX_FLOWCTL	(1<<5) /* act on rxed pause frames */
218 #define  NXE_0_XG_CFG0_LOOPBACK		(1<<8) /* tx appears on rx */
219 #define  NXE_0_XG_CFG0_TX_RST_PB	(1<<15) /* reset frm tx proto block */
220 #define  NXE_0_XG_CFG0_RX_RST_PB	(1<<16) /* reset frm rx proto block */
221 #define  NXE_0_XG_CFG0_TX_RST_MAC	(1<<17) /* reset frm tx multiplexer */
222 #define  NXE_0_XG_CFG0_RX_RST_MAC	(1<<18) /* reset ctl frms and timers */
223 #define  NXE_0_XG_CFG0_SOFT_RST		(1<<31) /* soft reset */
224 #define NXE_0_XG_CFG1(_p)	(0x00670004 + _P(_p))
225 #define  NXE_0_XG_CFG1_REM_CRC		(1<<0) /* enable crc removal */
226 #define  NXE_0_XG_CFG1_CRC_EN		(1<<1) /* append crc to tx frames */
227 #define  NXE_0_XG_CFG1_NO_MAX		(1<<5) /* rx all frames despite size */
228 #define  NXE_0_XG_CFG1_WIRE_LO_ERR	(1<<6) /* recognize local err */
229 #define  NXE_0_XG_CFG1_PAUSE_FR_DIS	(1<<8) /* disable pause frame detect */
230 #define  NXE_0_XG_CFG1_SEQ_ERR_EN	(1<<10) /* enable seq err detection */
231 #define  NXE_0_XG_CFG1_MULTICAST	(1<<12) /* accept all multicast */
232 #define  NXE_0_XG_CFG1_PROMISC		(1<<13) /* accept all frames */
233 #define NXE_0_XG_IPG(_p)	(0x00670008 + _P(_p))
234 #define NXE_0_XG_MAC_LO(_p)	(0x00670010 + _P(_p))
235 #define NXE_0_XG_MAC_HI(_p)	(0x0067000c + _P(_p))
236 #define NXE_0_XG_STATUS(_p)	(0x00670018 + _P(_p))
237 #define NXE_0_XG_MTU(_p)	(0x0067001c + _P(_p))
238 #define NXE_0_XG_PAUSE_FRM(_p)	(0x00670020 + _P(_p))
239 #define NXE_0_XG_TX_BYTES(_p)	(0x00670024 + _P(_p))
240 #define NXE_0_XG_TX_PKTS(_p)	(0x00670028 + _P(_p))
241 #define NXE_0_XG_RX_BYTES(_p)	(0x0067002c + _P(_p))
242 #define NXE_0_XG_RX_PKTS(_p)	(0x00670030 + _P(_p))
243 #define NXE_0_XG_AGGR_ERRS(_p)	(0x00670034 + _P(_p))
244 #define NXE_0_XG_MCAST_PKTS(_p)	(0x00670038 + _P(_p))
245 #define NXE_0_XG_UCAST_PKTS(_p)	(0x0067003c + _P(_p))
246 #define NXE_0_XG_CRC_ERRS(_p)	(0x00670040 + _P(_p))
247 #define NXE_0_XG_OVERSIZE(_p)	(0x00670044 + _P(_p))
248 #define NXE_0_XG_UNDERSIZE(_p)	(0x00670048 + _P(_p))
249 #define NXE_0_XG_LOCAL_ERRS(_p)	(0x0067004c + _P(_p))
250 #define NXE_0_XG_REMOTE_ERRS(_p) (0x00670050 + _P(_p))
251 #define NXE_0_XG_CNTL_CHARS(_p)	(0x00670054 + _P(_p))
252 #define NXE_0_XG_PAUSE_PKTS(_p)	(0x00670058 + _P(_p))
253 
254 /*
255  * Software Defined Registers
256  */
257 
258 /* chipset state registers */
259 #define NXE_1_SW_ROM_LOCK_ID	0x00202100
260 #define  NXE_1_SW_ROM_LOCK_ID_DRV	0x0d417340
261 #define NXE_1_SW_PHY_LOCK_ID	0x00202120
262 #define  NXE_1_SW_PHY_LOCK_ID_DRV	0x44524956
263 
264 /* firmware version */
265 #define NXE_1_SW_FWVER_MAJOR	0x00202150 /* Major f/w version */
266 #define NXE_1_SW_FWVER_MINOR	0x00202154 /* Minor f/w version */
267 #define NXE_1_SW_FWVER_BUILD	0x00202158 /* Build/Sub f/w version */
268 
269 /* misc */
270 #define NXE_1_SW_CMD_ADDR_HI	0x00202218 /* cmd ring phys addr */
271 #define NXE_1_SW_CMD_ADDR_LO	0x0020221c /* cmd ring phys addr */
272 #define NXE_1_SW_CMD_SIZE	0x002022c8 /* entries in the cmd ring */
273 #define NXE_1_SW_DUMMY_ADDR_HI	0x0020223c /* hi address of dummy buf */
274 #define NXE_1_SW_DUMMY_ADDR_LO	0x00202240 /* lo address of dummy buf */
275 #define  NXE_1_SW_DUMMY_ADDR_LEN	1024
276 
277 static const u_int32_t nxe_regmap[][4] = {
278 #define NXE_1_SW_CMD_PRODUCER(_f)	(nxe_regmap[0][(_f)])
279     { 0x00202208, 0x002023ac, 0x002023b8, 0x002023d0 },
280 #define NXE_1_SW_CMD_CONSUMER(_f)	(nxe_regmap[1][(_f)])
281     { 0x0020220c, 0x002023b0, 0x002023bc, 0x002023d4 },
282 
283 #define NXE_1_SW_CONTEXT(_p)		(nxe_regmap[2][(_p)])
284 #define NXE_1_SW_CONTEXT_SIG(_p)	(0xdee0 | (_p))
285     { 0x0020238c, 0x00202390, 0x0020239c, 0x002023a4 },
286 #define NXE_1_SW_CONTEXT_ADDR_LO(_p)	(nxe_regmap[3][(_p)])
287     { 0x00202388, 0x00202390, 0x00202398, 0x002023a0 },
288 #define NXE_1_SW_CONTEXT_ADDR_HI(_p)	(nxe_regmap[4][(_p)])
289     { 0x002023c0, 0x002023c4, 0x002023c8, 0x002023cc },
290 
291 #define NXE_1_SW_INT_MASK(_p)		(nxe_regmap[5][(_p)])
292     { 0x002023d8, 0x002023e0, 0x002023e4, 0x002023e8 },
293 
294 #define NXE_1_SW_RX_PRODUCER(_c)	(nxe_regmap[6][(_c)])
295     { 0x00202300, 0x00202344, 0x002023d8, 0x0020242c },
296 #define NXE_1_SW_RX_CONSUMER(_c)	(nxe_regmap[7][(_c)])
297     { 0x00202304, 0x00202348, 0x002023dc, 0x00202430 },
298 #define NXE_1_SW_RX_RING(_c)		(nxe_regmap[8][(_c)])
299     { 0x00202308, 0x0020234c, 0x002023f0, 0x00202434 },
300 #define NXE_1_SW_RX_SIZE(_c)		(nxe_regmap[9][(_c)])
301     { 0x0020230c, 0x00202350, 0x002023f4, 0x00202438 },
302 
303 #define NXE_1_SW_RX_JUMBO_PRODUCER(_c)	(nxe_regmap[10][(_c)])
304     { 0x00202310, 0x00202354, 0x002023f8, 0x0020243c },
305 #define NXE_1_SW_RX_JUMBO_CONSUMER(_c)	(nxe_regmap[11][(_c)])
306     { 0x00202314, 0x00202358, 0x002023fc, 0x00202440 },
307 #define NXE_1_SW_RX_JUMBO_RING(_c)	(nxe_regmap[12][(_c)])
308     { 0x00202318, 0x0020235c, 0x00202400, 0x00202444 },
309 #define NXE_1_SW_RX_JUMBO_SIZE(_c)	(nxe_regmap[13][(_c)])
310     { 0x0020231c, 0x00202360, 0x00202404, 0x00202448 },
311 
312 #define NXE_1_SW_RX_LRO_PRODUCER(_c)	(nxe_regmap[14][(_c)])
313     { 0x00202320, 0x00202364, 0x00202408, 0x0020244c },
314 #define NXE_1_SW_RX_LRO_CONSUMER(_c)	(nxe_regmap[15][(_c)])
315     { 0x00202324, 0x00202368, 0x0020240c, 0x00202450 },
316 #define NXE_1_SW_RX_LRO_RING(_c)	(nxe_regmap[16][(_c)])
317     { 0x00202328, 0x0020236c, 0x00202410, 0x00202454 },
318 #define NXE_1_SW_RX_LRO_SIZE(_c)	(nxe_regmap[17][(_c)])
319     { 0x0020232c, 0x00202370, 0x00202414, 0x00202458 },
320 
321 #define NXE_1_SW_STATUS_RING(_c)	(nxe_regmap[18][(_c)])
322     { 0x00202330, 0x00202374, 0x00202418, 0x0020245c },
323 #define NXE_1_SW_STATUS_PRODUCER(_c)	(nxe_regmap[19][(_c)])
324     { 0x00202334, 0x00202378, 0x0020241c, 0x00202460 },
325 #define NXE_1_SW_STATUS_CONSUMER(_c)	(nxe_regmap[20][(_c)])
326     { 0x00202338, 0x0020237c, 0x00202420, 0x00202464 },
327 #define NXE_1_SW_STATUS_STATE(_c)	(nxe_regmap[21][(_c)])
328 #define  NXE_1_SW_STATUS_STATE_READY		0x0000ff01
329     { 0x0020233c, 0x00202380, 0x00202424, 0x00202468 },
330 #define NXE_1_SW_STATUS_SIZE(_c)	(nxe_regmap[22][(_c)])
331     { 0x00202340, 0x00202384, 0x00202428, 0x0020246c }
332 };
333 
334 
335 #define NXE_1_SW_BOOTLD_CONFIG	0x002021fc
336 #define  NXE_1_SW_BOOTLD_CONFIG_ROM	0x00000000
337 #define  NXE_1_SW_BOOTLD_CONFIG_RAM	0x12345678
338 
339 #define NXE_1_SW_CMDPEG_STATE	0x00202250 /* init status */
340 #define  NXE_1_SW_CMDPEG_STATE_START	0xff00 /* init starting */
341 #define  NXE_1_SW_CMDPEG_STATE_DONE	0xff01 /* init complete */
342 #define  NXE_1_SW_CMDPEG_STATE_ACK	0xf00f /* init ack */
343 #define  NXE_1_SW_CMDPEG_STATE_ERROR	0xffff /* init failed */
344 
345 #define NXE_1_SW_XG_STATE	0x00202294 /* phy state */
346 #define  NXE_1_SW_XG_STATE_PORT(_r, _p)	(((_r)>>8*(_p))&0xff)
347 #define  NXE_1_SW_XG_STATE_UP		(1<<4)
348 #define  NXE_1_SW_XG_STATE_DOWN		(1<<5)
349 
350 #define NXE_1_SW_MPORT_MODE	0x002022c4
351 #define  NXE_1_SW_MPORT_MODE_SINGLE	0x1111
352 #define  NXE_1_SW_MPORT_MODE_MULTI	0x2222
353 
354 #define NXE_1_SW_INT_VECTOR	0x002022d4
355 
356 #define NXE_1_SW_NIC_CAP_HOST	0x002023a8 /* host capabilities */
357 #define NXE_1_SW_NIC_CAP_FW	0x002023dc /* firmware capabilities */
358 #define  NXE_1_SW_NIC_CAP_PORTINTR	0x1 /* per port interrupts */
359 #define NXE_1_SW_DRIVER_VER	0x002024a0 /* host driver version */
360 
361 
362 #define NXE_1_SW_TEMP		0x002023b4 /* Temperature sensor */
363 #define  NXE_1_SW_TEMP_STATE(_x)	((_x)&0xffff) /* Temp state */
364 #define  NXE_1_SW_TEMP_STATE_NONE	0x0000
365 #define  NXE_1_SW_TEMP_STATE_OK		0x0001
366 #define  NXE_1_SW_TEMP_STATE_WARN	0x0002
367 #define  NXE_1_SW_TEMP_STATE_CRIT	0x0003
368 #define  NXE_1_SW_TEMP_VAL(_x)		(((_x)>>16)&0xffff) /* Temp value */
369 
370 #define NXE_1_SW_V2P(_f)	(0x00202490+((_f)*4)) /* virtual to phys */
371 
372 /*
373  * ROMUSB Registers
374  */
375 #define NXE_1_ROMUSB_STATUS	0x01300004 /* ROM Status */
376 #define  NXE_1_ROMUSB_STATUS_DONE	(1<<1)
377 #define NXE_1_ROMUSB_SW_RESET	0x01300008
378 #define NXE_1_ROMUSB_SW_RESET_DEF	0xffffffff
379 #define NXE_1_ROMUSB_SW_RESET_BOOT	0x0080000f
380 
381 #define NXE_1_CASPER_RESET	0x01300038
382 #define  NXE_1_CASPER_RESET_ENABLE	0x1
383 #define  NXE_1_CASPER_RESET_DISABLE	0x1
384 
385 #define NXE_1_GLB_PEGTUNE	0x0130005c /* reset register */
386 #define  NXE_1_GLB_PEGTUNE_DONE		0x00000001
387 
388 #define NXE_1_GLB_CHIPCLKCTL	0x013000a8
389 #define NXE_1_GLB_CHIPCLKCTL_ON		0x00003fff
390 
391 /* ROM Registers */
392 #define NXE_1_ROM_CONTROL	0x01310000
393 #define NXE_1_ROM_OPCODE	0x01310004
394 #define  NXE_1_ROM_OPCODE_READ		0x0000000b
395 #define NXE_1_ROM_ADDR		0x01310008
396 #define NXE_1_ROM_WDATA		0x0131000c
397 #define NXE_1_ROM_ABYTE_CNT	0x01310010
398 #define NXE_1_ROM_DBYTE_CNT	0x01310014 /* dummy byte count */
399 #define NXE_1_ROM_RDATA		0x01310018
400 #define NXE_1_ROM_AGT_TAG	0x0131001c
401 #define NXE_1_ROM_TIME_PARM	0x01310020
402 #define NXE_1_ROM_CLK_DIV	0x01310024
403 #define NXE_1_ROM_MISS_INSTR	0x01310028
404 
405 /*
406  * flash memory layout
407  *
408  * These are offsets of memory accessable via the ROM Registers above
409  */
410 #define NXE_FLASH_CRBINIT	0x00000000 /* crb init section */
411 #define NXE_FLASH_BRDCFG	0x00004000 /* board config */
412 #define NXE_FLASH_INITCODE	0x00006000 /* pegtune code */
413 #define NXE_FLASH_BOOTLD	0x00010000 /* boot loader */
414 #define NXE_FLASH_IMAGE		0x00043000 /* compressed image */
415 #define NXE_FLASH_SECONDARY	0x00200000 /* backup image */
416 #define NXE_FLASH_PXE		0x003d0000 /* pxe image */
417 #define NXE_FLASH_USER		0x003e8000 /* user region for new boards */
418 #define NXE_FLASH_VPD		0x003e8c00 /* vendor private data */
419 #define NXE_FLASH_LICENSE	0x003e9000 /* firmware license */
420 #define NXE_FLASH_FIXED		0x003f0000 /* backup of crbinit */
421 
422 
423 /*
424  * misc hardware details
425  */
426 #define NXE_MAX_PORTS		4
427 #define NXE_MAX_PORT_LLADDRS	32
428 #define NXE_MAX_PKTLEN		(64 * 1024)
429 
430 
431 /*
432  * hardware structures
433  */
434 
435 struct nxe_info {
436 	u_int32_t		ni_hdrver;
437 #define NXE_INFO_HDRVER_1		0x00000001
438 
439 	u_int32_t		ni_board_mfg;
440 	u_int32_t		ni_board_type;
441 #define NXE_BRDTYPE_P1_BD		0x0000
442 #define NXE_BRDTYPE_P1_SB		0x0001
443 #define NXE_BRDTYPE_P1_SMAX		0x0002
444 #define NXE_BRDTYPE_P1_SOCK		0x0003
445 #define NXE_BRDTYPE_P2_SOCK_31		0x0008
446 #define NXE_BRDTYPE_P2_SOCK_35		0x0009
447 #define NXE_BRDTYPE_P2_SB35_4G		0x000a
448 #define NXE_BRDTYPE_P2_SB31_10G		0x000b
449 #define NXE_BRDTYPE_P2_SB31_2G		0x000c
450 #define NXE_BRDTYPE_P2_SB31_10G_IMEZ	0x000d
451 #define NXE_BRDTYPE_P2_SB31_10G_HMEZ	0x000e
452 #define NXE_BRDTYPE_P2_SB31_10G_CX4	0x000f
453 	u_int32_t		ni_board_num;
454 
455 	u_int32_t		ni_chip_id;
456 	u_int32_t		ni_chip_minor;
457 	u_int32_t		ni_chip_major;
458 	u_int32_t		ni_chip_pkg;
459 	u_int32_t		ni_chip_lot;
460 
461 	u_int32_t		ni_port_mask;
462 	u_int32_t		ni_peg_mask;
463 	u_int32_t		ni_icache;
464 	u_int32_t		ni_dcache;
465 	u_int32_t		ni_casper;
466 
467 	u_int32_t		ni_lladdr0_low;
468 	u_int32_t		ni_lladdr1_low;
469 	u_int32_t		ni_lladdr2_low;
470 	u_int32_t		ni_lladdr3_low;
471 
472 	u_int32_t		ni_mnsync_mode;
473 	u_int32_t		ni_mnsync_shift_cclk;
474 	u_int32_t		ni_mnsync_shift_mclk;
475 	u_int32_t		ni_mnwb_enable;
476 	u_int32_t		ni_mnfreq_crystal;
477 	u_int32_t		ni_mnfreq_speed;
478 	u_int32_t		ni_mnorg;
479 	u_int32_t		ni_mndepth;
480 	u_int32_t		ni_mnranks0;
481 	u_int32_t		ni_mnranks1;
482 	u_int32_t		ni_mnrd_latency0;
483 	u_int32_t		ni_mnrd_latency1;
484 	u_int32_t		ni_mnrd_latency2;
485 	u_int32_t		ni_mnrd_latency3;
486 	u_int32_t		ni_mnrd_latency4;
487 	u_int32_t		ni_mnrd_latency5;
488 	u_int32_t		ni_mnrd_latency6;
489 	u_int32_t		ni_mnrd_latency7;
490 	u_int32_t		ni_mnrd_latency8;
491 	u_int32_t		ni_mndll[18];
492 	u_int32_t		ni_mnddr_mode;
493 	u_int32_t		ni_mnddr_extmode;
494 	u_int32_t		ni_mntiming0;
495 	u_int32_t		ni_mntiming1;
496 	u_int32_t		ni_mntiming2;
497 
498 	u_int32_t		ni_snsync_mode;
499 	u_int32_t		ni_snpt_mode;
500 	u_int32_t		ni_snecc_enable;
501 	u_int32_t		ni_snwb_enable;
502 	u_int32_t		ni_snfreq_crystal;
503 	u_int32_t		ni_snfreq_speed;
504 	u_int32_t		ni_snorg;
505 	u_int32_t		ni_sndepth;
506 	u_int32_t		ni_sndll;
507 	u_int32_t		ni_snrd_latency;
508 
509 	u_int32_t		ni_lladdr0_high;
510 	u_int32_t		ni_lladdr1_high;
511 	u_int32_t		ni_lladdr2_high;
512 	u_int32_t		ni_lladdr3_high;
513 
514 	u_int32_t		ni_magic;
515 #define NXE_INFO_MAGIC			0x12345678
516 
517 	u_int32_t		ni_mnrd_imm;
518 	u_int32_t		ni_mndll_override;
519 } __packed;
520 
521 struct nxe_imageinfo {
522 	u_int32_t		nim_bootld_ver;
523 	u_int32_t		nim_bootld_size;
524 
525 	u_int8_t		nim_img_ver_major;
526 	u_int8_t		nim_img_ver_minor;
527 	u_int16_t		nim_img_ver_build;
528 
529 	u_int32_t		min_img_size;
530 } __packed;
531 
532 struct nxe_lladdr {
533 	u_int8_t		pad[2];
534 	u_int8_t		lladdr[6];
535 } __packed;
536 
537 struct nxe_userinfo {
538 	u_int8_t		nu_flash_md5[1024];
539 
540 	struct nxe_imageinfo	nu_imageinfo;
541 
542 	u_int32_t		nu_primary;
543 	u_int32_t		nu_secondary;
544 
545 	u_int64_t		nu_lladdr[NXE_MAX_PORTS][NXE_MAX_PORT_LLADDRS];
546 
547 	u_int32_t		nu_subsys_id;
548 
549 	u_int8_t		nu_serial[32];
550 
551 	u_int32_t		nu_bios_ver;
552 } __packed;
553 
554 /* hw structures actually used in the io path */
555 
556 struct nxe_ctx_ring {
557 	u_int64_t		r_addr;
558 	u_int32_t		r_size;
559 	u_int32_t		r_reserved;
560 };
561 
562 #define NXE_RING_RX		0
563 #define NXE_RING_RX_JUMBO	1
564 #define NXE_RING_RX_LRO		2
565 #define NXE_NRING		3
566 
567 struct nxe_ctx {
568 	u_int64_t		ctx_cmd_consumer_addr;
569 
570 	struct nxe_ctx_ring	ctx_cmd_ring;
571 
572 	struct nxe_ctx_ring	ctx_rx_rings[NXE_NRING];
573 
574 	u_int64_t		ctx_status_ring_addr;
575 	u_int32_t		ctx_status_ring_size;
576 
577 	u_int32_t		ctx_id;
578 } __packed;
579 
580 struct nxe_tx_desc {
581 	u_int8_t		tx_tcp_offset;
582 	u_int8_t		tx_ip_offset;
583 	u_int16_t		tx_flags;
584 #define NXE_TXD_F_OPCODE_TX		(0x01 << 7)
585 
586 	u_int8_t		tx_nbufs;
587 	u_int16_t		tx_length; /* XXX who makes a 24bit field? */
588 	u_int8_t		tx_length_hi;
589 
590 	u_int64_t		tx_addr_2;
591 
592 	u_int16_t		tx_id;
593 	u_int16_t		tx_mss;
594 
595 	u_int8_t		tx_port;
596 	u_int8_t		tx_tso_hdr_len;
597 	u_int16_t		tx_ipsec_id;
598 
599 	u_int64_t		tx_addr_3;
600 
601 	u_int64_t		tx_addr_1;
602 
603 	u_int16_t		tx_slen_1;
604 	u_int16_t		tx_slen_2;
605 	u_int16_t		tx_slen_3;
606 	u_int16_t		tx_slen_4;
607 
608 	u_int64_t		tx_addr_4;
609 
610 	u_int64_t		tx_reserved;
611 } __packed;
612 #define NXE_TXD_SEGS		4
613 #define NXE_TXD_DESCS		8
614 #define NXE_TXD_MAX_SEGS	(NXE_TXD_SEGS * NXE_TXD_DESCS)
615 
616 struct nxe_rx_desc {
617 	u_int16_t		rx_id;
618 	u_int16_t		rx_flags;
619 	u_int32_t		rx_len; /* packet length */
620 	u_int64_t		rx_addr;
621 } __packed;
622 #define NXE_RXD_MAX_SEGS		1
623 
624 struct nxe_status_desc {
625 	u_int8_t		st_lro;
626 	u_int8_t		st_owner;
627 	u_int16_t		st_id;
628 	u_int16_t		st_len;
629 	u_int16_t		st_flags;
630 } __packed;
631 
632 /*
633  * driver definitions
634  */
635 
636 struct nxe_board {
637 	u_int32_t		brd_type;
638 	u_int			brd_mode;
639 };
640 
641 struct nxe_dmamem {
642 	bus_dmamap_t		ndm_map;
643 	bus_dma_segment_t	ndm_seg;
644 	size_t			ndm_size;
645 	caddr_t			ndm_kva;
646 };
647 #define NXE_DMA_MAP(_ndm)	((_ndm)->ndm_map)
648 #define NXE_DMA_LEN(_ndm)	((_ndm)->ndm_size)
649 #define NXE_DMA_DVA(_ndm)	((_ndm)->ndm_map->dm_segs[0].ds_addr)
650 #define NXE_DMA_KVA(_ndm)	((void *)(_ndm)->ndm_kva)
651 
652 struct nxe_pkt {
653 	int			pkt_id;
654 	bus_dmamap_t		pkt_dmap;
655 	struct mbuf		*pkt_m;
656 	TAILQ_ENTRY(nxe_pkt)	pkt_link;
657 };
658 
659 struct nxe_pkt_list {
660 	struct nxe_pkt		*npl_pkts;
661 	TAILQ_HEAD(, nxe_pkt)	npl_free;
662 	TAILQ_HEAD(, nxe_pkt)	npl_used;
663 };
664 
665 struct nxe_ring {
666 	struct nxe_dmamem	*nr_dmamem;
667 	u_int8_t		*nr_pos;
668 
669 	u_int			nr_slot;
670 	int			nr_ready;
671 
672 	size_t			nr_desclen;
673 	u_int			nr_nentries;
674 };
675 
676 /*
677  * autoconf glue
678  */
679 
680 struct nxe_softc {
681 	struct device		sc_dev;
682 
683 	bus_dma_tag_t		sc_dmat;
684 
685 	bus_space_tag_t		sc_memt;
686 	bus_space_handle_t	sc_memh;
687 	bus_size_t		sc_mems;
688 	bus_space_handle_t	sc_crbh;
689 	bus_space_tag_t		sc_dbt;
690 	bus_space_handle_t	sc_dbh;
691 	bus_size_t		sc_dbs;
692 
693 	void			*sc_ih;
694 
695 	int			sc_function;
696 	int			sc_port;
697 	int			sc_window;
698 
699 	const struct nxe_board	*sc_board;
700 	u_int			sc_fw_major;
701 	u_int			sc_fw_minor;
702 	u_int			sc_fw_build;
703 
704 	struct arpcom		sc_ac;
705 	struct ifmedia		sc_media;
706 
707 	struct nxe_pkt_list	*sc_tx_pkts;
708 	struct nxe_pkt_list	*sc_rx_pkts;
709 
710 	/* allocations for the hw */
711 	struct nxe_dmamem	*sc_dummy_dma;
712 	struct nxe_dmamem	*sc_dummy_rx;
713 
714 	struct nxe_dmamem	*sc_ctx;
715 	u_int32_t		*sc_cmd_consumer;
716 	u_int32_t		sc_cmd_consumer_cur;
717 
718 	struct nxe_ring		*sc_cmd_ring;
719 	struct nxe_ring		*sc_rx_rings[NXE_NRING];
720 	struct nxe_ring		*sc_status_ring;
721 
722 	/* monitoring */
723 	struct timeout		sc_tick;
724 	struct ksensor		sc_sensor;
725 	struct ksensordev	sc_sensor_dev;
726 
727 	/* ioctl lock */
728 	struct rwlock		sc_lock;
729 };
730 
731 int			nxe_match(struct device *, void *, void *);
732 void			nxe_attach(struct device *, struct device *, void *);
733 int			nxe_intr(void *);
734 
735 struct cfattach nxe_ca = {
736 	sizeof(struct nxe_softc),
737 	nxe_match,
738 	nxe_attach
739 };
740 
741 struct cfdriver nxe_cd = {
742 	NULL,
743 	"nxe",
744 	DV_IFNET
745 };
746 
747 /* init code */
748 int			nxe_pci_map(struct nxe_softc *,
749 			    struct pci_attach_args *);
750 void			nxe_pci_unmap(struct nxe_softc *);
751 
752 int			nxe_board_info(struct nxe_softc *);
753 int			nxe_user_info(struct nxe_softc *);
754 int			nxe_init(struct nxe_softc *);
755 void			nxe_uninit(struct nxe_softc *);
756 void			nxe_mountroot(void *);
757 
758 /* chip state */
759 void			nxe_tick(void *);
760 void			nxe_link_state(struct nxe_softc *);
761 
762 /* interface operations */
763 int			nxe_ioctl(struct ifnet *, u_long, caddr_t);
764 void			nxe_start(struct ifnet *);
765 int			nxe_complete(struct nxe_softc *);
766 void			nxe_watchdog(struct ifnet *);
767 
768 void			nxe_rx_start(struct nxe_softc *);
769 
770 void			nxe_up(struct nxe_softc *);
771 void			nxe_lladdr(struct nxe_softc *);
772 void			nxe_iff(struct nxe_softc *);
773 void			nxe_down(struct nxe_softc *);
774 
775 int			nxe_up_fw(struct nxe_softc *);
776 
777 /* ifmedia operations */
778 int			nxe_media_change(struct ifnet *);
779 void			nxe_media_status(struct ifnet *, struct ifmediareq *);
780 
781 
782 /* ring handling */
783 struct nxe_ring		*nxe_ring_alloc(struct nxe_softc *, size_t, u_int);
784 void			nxe_ring_sync(struct nxe_softc *, struct nxe_ring *,
785 			    int);
786 void			nxe_ring_free(struct nxe_softc *, struct nxe_ring *);
787 int			nxe_ring_readable(struct nxe_ring *, int);
788 int			nxe_ring_writeable(struct nxe_ring *, int);
789 void			*nxe_ring_cur(struct nxe_softc *, struct nxe_ring *);
790 void			*nxe_ring_next(struct nxe_softc *, struct nxe_ring *);
791 
792 struct mbuf		*nxe_load_pkt(struct nxe_softc *, bus_dmamap_t,
793 			    struct mbuf *);
794 struct mbuf		*nxe_coalesce_m(struct mbuf *);
795 
796 /* pkts */
797 struct nxe_pkt_list	*nxe_pkt_alloc(struct nxe_softc *, u_int, int);
798 void			nxe_pkt_free(struct nxe_softc *,
799 			    struct nxe_pkt_list *);
800 void			nxe_pkt_put(struct nxe_pkt_list *, struct nxe_pkt *);
801 struct nxe_pkt		*nxe_pkt_get(struct nxe_pkt_list *);
802 struct nxe_pkt		*nxe_pkt_used(struct nxe_pkt_list *);
803 
804 
805 /* wrapper around dmaable memory allocations */
806 struct nxe_dmamem	*nxe_dmamem_alloc(struct nxe_softc *, bus_size_t,
807 			    bus_size_t);
808 void			nxe_dmamem_free(struct nxe_softc *,
809 			    struct nxe_dmamem *);
810 
811 /* low level hardware access goo */
812 u_int32_t		nxe_read(struct nxe_softc *, bus_size_t);
813 void			nxe_write(struct nxe_softc *, bus_size_t, u_int32_t);
814 int			nxe_wait(struct nxe_softc *, bus_size_t, u_int32_t,
815 			    u_int32_t, u_int);
816 
817 void			nxe_doorbell(struct nxe_softc *, u_int32_t);
818 
819 int			nxe_crb_set(struct nxe_softc *, int);
820 u_int32_t		nxe_crb_read(struct nxe_softc *, bus_size_t);
821 void			nxe_crb_write(struct nxe_softc *, bus_size_t,
822 			    u_int32_t);
823 int			nxe_crb_wait(struct nxe_softc *, bus_size_t,
824 			    u_int32_t, u_int32_t, u_int);
825 
826 int			nxe_rom_lock(struct nxe_softc *);
827 void			nxe_rom_unlock(struct nxe_softc *);
828 int			nxe_rom_read(struct nxe_softc *, u_int32_t,
829 			    u_int32_t *);
830 int			nxe_rom_read_region(struct nxe_softc *, u_int32_t,
831 			    void *, size_t);
832 
833 
834 /* misc bits */
835 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
836 
837 /* let's go! */
838 
839 const struct pci_matchid nxe_devices[] = {
840 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_10GXxR },
841 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_10GCX4 },
842 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_4GCU },
843 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_IMEZ },
844 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_HMEZ },
845 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_IMEZ_2 },
846 	{ PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_HMEZ_2 }
847 };
848 
849 const struct nxe_board nxe_boards[] = {
850 	{ NXE_BRDTYPE_P2_SB35_4G,	NXE_0_NIU_MODE_GBE },
851 	{ NXE_BRDTYPE_P2_SB31_10G,	NXE_0_NIU_MODE_XGE },
852 	{ NXE_BRDTYPE_P2_SB31_2G,	NXE_0_NIU_MODE_GBE },
853 	{ NXE_BRDTYPE_P2_SB31_10G_IMEZ,	NXE_0_NIU_MODE_XGE },
854 	{ NXE_BRDTYPE_P2_SB31_10G_HMEZ,	NXE_0_NIU_MODE_XGE },
855 	{ NXE_BRDTYPE_P2_SB31_10G_CX4,	NXE_0_NIU_MODE_XGE }
856 };
857 
858 int
859 nxe_match(struct device *parent, void *match, void *aux)
860 {
861 	struct pci_attach_args		*pa = aux;
862 
863 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_NETWORK)
864 		return (0);
865 
866 	return (pci_matchbyid(pa, nxe_devices, nitems(nxe_devices)));
867 }
868 
869 void
870 nxe_attach(struct device *parent, struct device *self, void *aux)
871 {
872 	struct nxe_softc		*sc = (struct nxe_softc *)self;
873 	struct pci_attach_args		*pa = aux;
874 	pci_intr_handle_t		ih;
875 	struct ifnet			*ifp;
876 
877 	sc->sc_dmat = pa->pa_dmat;
878 	sc->sc_function = pa->pa_function;
879 	sc->sc_window = -1;
880 
881 	rw_init(&sc->sc_lock, NULL);
882 
883 	if (nxe_pci_map(sc, pa) != 0) {
884 		/* error already printed by nxe_pci_map() */
885 		return;
886 	}
887 
888 	nxe_crb_set(sc, 1);
889 
890 	if (nxe_board_info(sc) != 0) {
891 		/* error already printed by nxe_board_info() */
892 		goto unmap;
893 	}
894 
895 	if (nxe_user_info(sc) != 0) {
896 		/* error already printed by nxe_board_info() */
897 		goto unmap;
898 	}
899 
900 	if (nxe_init(sc) != 0) {
901 		/* error already printed by nxe_init() */
902 		goto unmap;
903 	}
904 
905 	if (pci_intr_map(pa, &ih) != 0) {
906 		printf(": unable to map interrupt\n");
907 		goto uninit;
908 	}
909 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
910 	    nxe_intr, sc, DEVNAME(sc));
911 	if (sc->sc_ih == NULL) {
912 		printf(": unable to establish interrupt\n");
913 		goto uninit;
914 	}
915 
916 	ifp = &sc->sc_ac.ac_if;
917 	ifp->if_softc = sc;
918 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
919 	ifp->if_capabilities = IFCAP_VLAN_MTU;
920 	ifp->if_ioctl = nxe_ioctl;
921 	ifp->if_start = nxe_start;
922 	ifp->if_watchdog = nxe_watchdog;
923 	ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN;
924 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
925 	IFQ_SET_MAXLEN(&ifp->if_snd, 512); /* XXX */
926 	IFQ_SET_READY(&ifp->if_snd);
927 
928 	ifmedia_init(&sc->sc_media, 0, nxe_media_change, nxe_media_status);
929 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
930 	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
931 
932 	if_attach(ifp);
933 	ether_ifattach(ifp);
934 
935 	printf(": %s firmware %d.%d.%d address %s\n",
936 	    pci_intr_string(pa->pa_pc, ih),
937 	    sc->sc_fw_major, sc->sc_fw_minor, sc->sc_fw_build,
938 	    ether_sprintf(sc->sc_ac.ac_enaddr));
939 	return;
940 
941 uninit:
942 	nxe_uninit(sc);
943 unmap:
944 	nxe_pci_unmap(sc);
945 }
946 
947 int
948 nxe_pci_map(struct nxe_softc *sc, struct pci_attach_args *pa)
949 {
950 	pcireg_t			memtype;
951 
952 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NXE_PCI_BAR_MEM);
953 	if (pci_mapreg_map(pa, NXE_PCI_BAR_MEM, memtype, 0, &sc->sc_memt,
954 	    &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) {
955 		printf(": unable to map host registers\n");
956 		return (1);
957 	}
958 	if (sc->sc_mems != NXE_PCI_BAR_MEM_128MB) {
959 		printf(": unexpected register map size\n");
960 		goto unmap_mem;
961 	}
962 
963 	/* set up the CRB window */
964 	if (bus_space_subregion(sc->sc_memt, sc->sc_memh, NXE_MAP_CRB,
965 	    sc->sc_mems - NXE_MAP_CRB, &sc->sc_crbh) != 0) {
966 		printf(": unable to create CRB window\n");
967 		goto unmap_mem;
968 	}
969 
970 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NXE_PCI_BAR_DOORBELL);
971 	if (pci_mapreg_map(pa, NXE_PCI_BAR_DOORBELL, memtype, 0, &sc->sc_dbt,
972 	    &sc->sc_dbh, NULL, &sc->sc_dbs, 0) != 0) {
973 		printf(": unable to map doorbell registers\n");
974 		/* bus_space(9) says i dont have to unmap subregions */
975 		goto unmap_mem;
976 	}
977 
978 	mountroothook_establish(nxe_mountroot, sc);
979 	return (0);
980 
981 unmap_mem:
982 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
983 	sc->sc_mems = 0;
984 	return (1);
985 }
986 
987 void
988 nxe_pci_unmap(struct nxe_softc *sc)
989 {
990 	bus_space_unmap(sc->sc_dbt, sc->sc_dbh, sc->sc_dbs);
991 	sc->sc_dbs = 0;
992 	/* bus_space(9) says i dont have to unmap the crb subregion */
993 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
994 	sc->sc_mems = 0;
995 }
996 
997 int
998 nxe_intr(void *xsc)
999 {
1000 	struct nxe_softc		*sc = xsc;
1001 	u_int32_t			vector;
1002 
1003 	DASSERT(sc->sc_window == 1);
1004 
1005 	vector = nxe_crb_read(sc, NXE_1_SW_INT_VECTOR);
1006 	if (!ISSET(vector, NXE_ISR_MINE(sc->sc_function)))
1007 		return (0);
1008 
1009 	nxe_crb_write(sc, NXE_1_SW_INT_VECTOR, 0x80 << sc->sc_function);
1010 
1011 	/* the interrupt is mine! we should do some work now */
1012 
1013 	return (1);
1014 }
1015 
1016 int
1017 nxe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1018 {
1019 	struct nxe_softc		*sc = ifp->if_softc;
1020 	struct ifaddr			*ifa = (struct ifaddr *)addr;
1021 	struct ifreq			*ifr = (struct ifreq *)addr;
1022 	int				s, error = 0;
1023 
1024 	rw_enter_write(&sc->sc_lock);
1025 	s = splnet();
1026 
1027 	timeout_del(&sc->sc_tick);
1028 
1029 	switch (cmd) {
1030 	case SIOCSIFADDR:
1031 		SET(ifp->if_flags, IFF_UP);
1032 #ifdef INET
1033 		if (ifa->ifa_addr->sa_family == AF_INET)
1034 			arp_ifinit(&sc->sc_ac, ifa);
1035 #endif
1036 		/* FALLTHROUGH */
1037 
1038 	case SIOCSIFFLAGS:
1039 		if (ISSET(ifp->if_flags, IFF_UP)) {
1040 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1041 				error = ENETRESET;
1042 			else
1043 				nxe_up(sc);
1044 		} else {
1045 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1046 				nxe_down(sc);
1047 		}
1048 		break;
1049 
1050 	case SIOCGIFMEDIA:
1051 	case SIOCSIFMEDIA:
1052 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1053 		break;
1054 
1055 	default:
1056 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1057 	}
1058 
1059 	if (error == ENETRESET) {
1060 		if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1061 			nxe_crb_set(sc, 0);
1062 			nxe_iff(sc);
1063 			nxe_crb_set(sc, 1);
1064 		}
1065 		error = 0;
1066 	}
1067 
1068 	nxe_tick(sc);
1069 
1070 	splx(s);
1071 	rw_exit_write(&sc->sc_lock);
1072 	return (error);
1073 }
1074 
1075 void
1076 nxe_up(struct nxe_softc *sc)
1077 {
1078 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1079 	static const u_int		rx_ring_sizes[] = { 16384, 1024, 128 };
1080 	struct {
1081 		struct nxe_ctx			ctx;
1082 		u_int32_t			cmd_consumer;
1083 	} __packed			*dmamem;
1084 	struct nxe_ctx			*ctx;
1085 	struct nxe_ctx_ring		*ring;
1086 	struct nxe_ring			*nr;
1087 	u_int64_t			dva;
1088 	u_int32_t			intr_scheme;
1089 	int				i;
1090 
1091 	if (nxe_up_fw(sc) != 0)
1092 		return;
1093 
1094 	/* allocate pkt lists */
1095 	sc->sc_tx_pkts = nxe_pkt_alloc(sc, 128, NXE_TXD_MAX_SEGS);
1096 	if (sc->sc_tx_pkts == NULL)
1097 		return;
1098 	sc->sc_rx_pkts = nxe_pkt_alloc(sc, 128, NXE_RXD_MAX_SEGS);
1099 	if (sc->sc_rx_pkts == NULL)
1100 		goto free_tx_pkts;
1101 
1102 	/* allocate the context memory and the consumer field */
1103 	sc->sc_ctx = nxe_dmamem_alloc(sc, sizeof(*dmamem), PAGE_SIZE);
1104 	if (sc->sc_ctx == NULL)
1105 		goto free_rx_pkts;
1106 
1107 	dmamem = NXE_DMA_KVA(sc->sc_ctx);
1108 	dva = NXE_DMA_DVA(sc->sc_ctx);
1109 
1110 	ctx = &dmamem->ctx;
1111 	ctx->ctx_cmd_consumer_addr = htole64(dva + sizeof(dmamem->ctx));
1112 	ctx->ctx_id = htole32(sc->sc_function);
1113 
1114 	sc->sc_cmd_consumer = &dmamem->cmd_consumer;
1115 	sc->sc_cmd_consumer_cur = 0;
1116 
1117 	/* allocate the cmd/tx ring */
1118 	sc->sc_cmd_ring = nxe_ring_alloc(sc,
1119 	    sizeof(struct nxe_tx_desc), 1024 /* XXX */);
1120 	if (sc->sc_cmd_ring == NULL)
1121 		goto free_ctx;
1122 
1123 	ctx->ctx_cmd_ring.r_addr =
1124 	    htole64(NXE_DMA_DVA(sc->sc_cmd_ring->nr_dmamem));
1125 	ctx->ctx_cmd_ring.r_size = htole32(sc->sc_cmd_ring->nr_nentries);
1126 
1127 	/* allocate the status ring */
1128 	sc->sc_status_ring = nxe_ring_alloc(sc,
1129 	    sizeof(struct nxe_status_desc), 16384 /* XXX */);
1130 	if (sc->sc_status_ring == NULL)
1131 		goto free_cmd_ring;
1132 
1133 	ctx->ctx_status_ring_addr =
1134 	    htole64(NXE_DMA_DVA(sc->sc_status_ring->nr_dmamem));
1135 	ctx->ctx_status_ring_size = htole32(sc->sc_status_ring->nr_nentries);
1136 
1137 	/* allocate something to point the jumbo and lro rings at */
1138 	sc->sc_dummy_rx = nxe_dmamem_alloc(sc, NXE_MAX_PKTLEN, PAGE_SIZE);
1139 	if (sc->sc_dummy_rx == NULL)
1140 		goto free_status_ring;
1141 
1142 	/* allocate the rx rings */
1143 	for (i = 0; i < NXE_NRING; i++) {
1144 		ring = &ctx->ctx_rx_rings[i];
1145 		nr = nxe_ring_alloc(sc, sizeof(struct nxe_rx_desc),
1146 		    rx_ring_sizes[i]);
1147 		if (nr == NULL)
1148 			goto free_rx_rings;
1149 
1150 		ring->r_addr = htole64(NXE_DMA_DVA(nr->nr_dmamem));
1151 		ring->r_size = htole32(nr->nr_nentries);
1152 
1153 		sc->sc_rx_rings[i] = nr;
1154 		nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_PREWRITE);
1155 	}
1156 
1157 	/* nothing can possibly go wrong now */
1158 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_rx),
1159 	    0, NXE_DMA_LEN(sc->sc_dummy_rx), BUS_DMASYNC_PREREAD);
1160 	nxe_ring_sync(sc, sc->sc_status_ring, BUS_DMASYNC_PREREAD);
1161 	nxe_ring_sync(sc, sc->sc_cmd_ring, BUS_DMASYNC_PREWRITE);
1162 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1163 	    0, NXE_DMA_LEN(sc->sc_ctx),
1164 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1165 
1166 	nxe_crb_write(sc, NXE_1_SW_CONTEXT_ADDR_LO(sc->sc_function),
1167 	    (u_int32_t)dva);
1168 	nxe_crb_write(sc, NXE_1_SW_CONTEXT_ADDR_HI(sc->sc_function),
1169 	    (u_int32_t)(dva >> 32));
1170 	nxe_crb_write(sc, NXE_1_SW_CONTEXT(sc->sc_port),
1171 	    NXE_1_SW_CONTEXT_SIG(sc->sc_port));
1172 
1173 	nxe_crb_set(sc, 0);
1174 	nxe_crb_write(sc, NXE_0_XG_MTU(sc->sc_function),
1175 	    MCLBYTES - ETHER_ALIGN);
1176 	nxe_lladdr(sc);
1177 	nxe_iff(sc);
1178 	nxe_crb_set(sc, 1);
1179 
1180 	SET(ifp->if_flags, IFF_RUNNING);
1181 	CLR(ifp->if_flags, IFF_OACTIVE);
1182 
1183 	/* enable interrupts */
1184 	intr_scheme = nxe_crb_read(sc, NXE_1_SW_NIC_CAP_FW);
1185 	if (intr_scheme != NXE_1_SW_NIC_CAP_PORTINTR)
1186 		nxe_write(sc, NXE_ISR_MASK, 0x77f);
1187 	nxe_crb_write(sc, NXE_1_SW_INT_MASK(sc->sc_function), 0x1);
1188 	if (intr_scheme != NXE_1_SW_NIC_CAP_PORTINTR)
1189 		nxe_crb_write(sc, NXE_1_SW_INT_VECTOR, 0x0);
1190 	nxe_write(sc, NXE_ISR_TARGET_MASK, 0xbff);
1191 
1192 	return;
1193 
1194 free_rx_rings:
1195 	while (i > 0) {
1196 		i--;
1197 		nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_POSTWRITE);
1198 		nxe_ring_free(sc, sc->sc_rx_rings[i]);
1199 	}
1200 
1201 	nxe_dmamem_free(sc, sc->sc_dummy_rx);
1202 free_status_ring:
1203 	nxe_ring_free(sc, sc->sc_status_ring);
1204 free_cmd_ring:
1205 	nxe_ring_free(sc, sc->sc_cmd_ring);
1206 free_ctx:
1207 	nxe_dmamem_free(sc, sc->sc_ctx);
1208 free_rx_pkts:
1209 	nxe_pkt_free(sc, sc->sc_rx_pkts);
1210 free_tx_pkts:
1211 	nxe_pkt_free(sc, sc->sc_tx_pkts);
1212 }
1213 
1214 int
1215 nxe_up_fw(struct nxe_softc *sc)
1216 {
1217 	u_int32_t			r;
1218 
1219 	r = nxe_crb_read(sc, NXE_1_SW_CMDPEG_STATE);
1220 	if (r == NXE_1_SW_CMDPEG_STATE_ACK)
1221 		return (0);
1222 
1223 	if (r != NXE_1_SW_CMDPEG_STATE_DONE)
1224 		return (1);
1225 
1226 	nxe_crb_write(sc, NXE_1_SW_NIC_CAP_HOST, NXE_1_SW_NIC_CAP_PORTINTR);
1227 	nxe_crb_write(sc, NXE_1_SW_MPORT_MODE, NXE_1_SW_MPORT_MODE_MULTI);
1228 	nxe_crb_write(sc, NXE_1_SW_CMDPEG_STATE, NXE_1_SW_CMDPEG_STATE_ACK);
1229 
1230 	/* XXX busy wait in a process context is naughty */
1231 	if (!nxe_crb_wait(sc, NXE_1_SW_STATUS_STATE(sc->sc_function),
1232 	    0xffffffff, NXE_1_SW_STATUS_STATE_READY, 1000))
1233 		return (1);
1234 
1235 	return (0);
1236 }
1237 
1238 void
1239 nxe_lladdr(struct nxe_softc *sc)
1240 {
1241 	u_int8_t			*lladdr = sc->sc_ac.ac_enaddr;
1242 
1243 	DASSERT(sc->sc_window == 0);
1244 
1245 	nxe_crb_write(sc, NXE_0_XG_MAC_LO(sc->sc_port),
1246 	    (lladdr[0] << 16) | (lladdr[1] << 24));
1247 	nxe_crb_write(sc, NXE_0_XG_MAC_HI(sc->sc_port),
1248 	    (lladdr[2] << 0)  | (lladdr[3] << 8) |
1249 	    (lladdr[4] << 16) | (lladdr[5] << 24));
1250 }
1251 
1252 void
1253 nxe_iff(struct nxe_softc *sc)
1254 {
1255 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1256 	u_int32_t			cfg1 = 0x1447; /* XXX */
1257 
1258 	DASSERT(sc->sc_window == 0);
1259 
1260 	CLR(ifp->if_flags, IFF_ALLMULTI);
1261 	if (sc->sc_ac.ac_multirangecnt > 0 || sc->sc_ac.ac_multicnt > 0) {
1262 		cfg1 |= NXE_0_XG_CFG1_MULTICAST;
1263 		SET(ifp->if_flags, IFF_ALLMULTI);
1264 	}
1265 
1266 	if (ISSET(ifp->if_flags, IFF_PROMISC))
1267 		cfg1 |= NXE_0_XG_CFG1_PROMISC;
1268 
1269 	nxe_crb_write(sc, NXE_0_XG_CFG0(sc->sc_port),
1270 	    NXE_0_XG_CFG0_TX_EN | NXE_0_XG_CFG0_RX_EN);
1271 	nxe_crb_write(sc, NXE_0_XG_CFG1(sc->sc_port), cfg1);
1272 }
1273 
1274 void
1275 nxe_down(struct nxe_softc *sc)
1276 {
1277 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1278 	int				i;
1279 
1280 	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE | IFF_ALLMULTI);
1281 
1282 	/* XXX turn the chip off */
1283 
1284 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1285 	    0, NXE_DMA_LEN(sc->sc_ctx),
1286 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1287 	nxe_ring_sync(sc, sc->sc_cmd_ring, BUS_DMASYNC_POSTWRITE);
1288 	nxe_ring_sync(sc, sc->sc_status_ring, BUS_DMASYNC_POSTREAD);
1289 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_rx),
1290 	    0, NXE_DMA_LEN(sc->sc_dummy_rx), BUS_DMASYNC_POSTREAD);
1291 
1292 	for (i = 0; i < NXE_NRING; i++) {
1293 		nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_POSTWRITE);
1294 		nxe_ring_free(sc, sc->sc_rx_rings[i]);
1295 	}
1296 	nxe_dmamem_free(sc, sc->sc_dummy_rx);
1297 	nxe_ring_free(sc, sc->sc_status_ring);
1298 	nxe_ring_free(sc, sc->sc_cmd_ring);
1299 	nxe_dmamem_free(sc, sc->sc_ctx);
1300 	nxe_pkt_free(sc, sc->sc_rx_pkts);
1301 	nxe_pkt_free(sc, sc->sc_tx_pkts);
1302 }
1303 
1304 void
1305 nxe_start(struct ifnet *ifp)
1306 {
1307 	struct nxe_softc		*sc = ifp->if_softc;
1308 	struct nxe_ring			*nr = sc->sc_cmd_ring;
1309 	struct nxe_tx_desc		*txd;
1310 	struct nxe_pkt			*pkt;
1311 	struct mbuf			*m;
1312 	bus_dmamap_t			dmap;
1313 	bus_dma_segment_t		*segs;
1314 	int				nsegs;
1315 
1316 	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1317 	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1318 	    IFQ_IS_EMPTY(&ifp->if_snd))
1319 		return;
1320 
1321 	if (nxe_ring_writeable(nr, sc->sc_cmd_consumer_cur) < NXE_TXD_DESCS) {
1322 		SET(ifp->if_flags, IFF_OACTIVE);
1323 		return;
1324 	}
1325 
1326 	nxe_ring_sync(sc, nr, BUS_DMASYNC_POSTWRITE);
1327 	txd = nxe_ring_cur(sc, nr);
1328 	bzero(txd, sizeof(struct nxe_tx_desc));
1329 
1330 	do {
1331 		IFQ_POLL(&ifp->if_snd, m);
1332 		if (m == NULL)
1333 			break;
1334 
1335 		pkt = nxe_pkt_get(sc->sc_tx_pkts);
1336 		if (pkt == NULL) {
1337 			SET(ifp->if_flags, IFF_OACTIVE);
1338 			break;
1339 		}
1340 
1341 		IFQ_DEQUEUE(&ifp->if_snd, m);
1342 
1343 		dmap = pkt->pkt_dmap;
1344 		m = nxe_load_pkt(sc, dmap, m);
1345 		if (m == NULL) {
1346 			nxe_pkt_put(sc->sc_tx_pkts, pkt);
1347 			ifp->if_oerrors++;
1348 			break;
1349 		}
1350 
1351 #if NBPFILTER > 0
1352 		if (ifp->if_bpf)
1353 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1354 #endif
1355 
1356 		pkt->pkt_m = m;
1357 
1358 		txd->tx_flags = htole16(NXE_TXD_F_OPCODE_TX);
1359 		txd->tx_nbufs = dmap->dm_nsegs;
1360 		txd->tx_length = htole16(dmap->dm_mapsize);
1361 		txd->tx_port = sc->sc_port;
1362 
1363 		segs = dmap->dm_segs;
1364 		nsegs = dmap->dm_nsegs;
1365 		do {
1366 			switch ((nsegs > NXE_TXD_SEGS) ?
1367 			    NXE_TXD_SEGS : nsegs) {
1368 			case 4:
1369 				txd->tx_addr_4 = htole64(segs[3].ds_addr);
1370 				txd->tx_slen_4 = htole32(segs[3].ds_len);
1371 			case 3:
1372 				txd->tx_addr_3 = htole64(segs[2].ds_addr);
1373 				txd->tx_slen_3 = htole32(segs[2].ds_len);
1374 			case 2:
1375 				txd->tx_addr_2 = htole64(segs[1].ds_addr);
1376 				txd->tx_slen_2 = htole32(segs[1].ds_len);
1377 			case 1:
1378 				txd->tx_addr_1 = htole64(segs[0].ds_addr);
1379 				txd->tx_slen_1 = htole32(segs[0].ds_len);
1380 				break;
1381 			default:
1382 				panic("%s: unexpected segments in tx map",
1383 				    DEVNAME(sc));
1384 			}
1385 
1386 			nsegs -= NXE_TXD_SEGS;
1387 			segs += NXE_TXD_SEGS;
1388 
1389 			pkt->pkt_id = nr->nr_slot;
1390 
1391 			txd = nxe_ring_next(sc, nr);
1392 			bzero(txd, sizeof(struct nxe_tx_desc));
1393 		} while (nsegs > 0);
1394 
1395 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1396 		    BUS_DMASYNC_PREWRITE);
1397 
1398 		ifp->if_opackets++;
1399 	} while (nr->nr_ready >= NXE_TXD_DESCS);
1400 
1401 	nxe_ring_sync(sc, nr, BUS_DMASYNC_PREWRITE);
1402 	nxe_crb_write(sc, NXE_1_SW_CMD_PRODUCER(sc->sc_function), nr->nr_slot);
1403 }
1404 
1405 int
1406 nxe_complete(struct nxe_softc *sc)
1407 {
1408 	struct nxe_pkt			*pkt;
1409 	int				new_cons, cur_cons;
1410 	int				rv = 0;
1411 
1412 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1413 	    0, NXE_DMA_LEN(sc->sc_ctx),
1414 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1415 	new_cons = letoh32(*sc->sc_cmd_consumer);
1416 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1417 	    0, NXE_DMA_LEN(sc->sc_ctx),
1418 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1419 
1420 	cur_cons = sc->sc_cmd_consumer_cur;
1421 	pkt = nxe_pkt_used(sc->sc_tx_pkts);
1422 
1423 	while (pkt != NULL && cur_cons != new_cons) {
1424 		if (pkt->pkt_id == cur_cons) {
1425 			bus_dmamap_sync(sc->sc_dmat, pkt->pkt_dmap,
1426 			    0, pkt->pkt_dmap->dm_mapsize,
1427 			    BUS_DMASYNC_POSTWRITE);
1428 			    bus_dmamap_unload(sc->sc_dmat, pkt->pkt_dmap);
1429 
1430 			m_freem(pkt->pkt_m);
1431 
1432 			nxe_pkt_put(sc->sc_tx_pkts, pkt);
1433 
1434 			pkt = nxe_pkt_used(sc->sc_tx_pkts);
1435 		}
1436 
1437 		cur_cons++;
1438 		cur_cons %= sc->sc_cmd_ring->nr_nentries;
1439 
1440 		rv = 1;
1441 	}
1442 
1443 	if (rv == 1) {
1444 		sc->sc_cmd_consumer_cur = cur_cons;
1445 		CLR(sc->sc_ac.ac_if.if_flags, IFF_OACTIVE);
1446 	}
1447 
1448 	return (rv);
1449 }
1450 
1451 struct mbuf *
1452 nxe_coalesce_m(struct mbuf *m)
1453 {
1454 	struct mbuf			*m0;
1455 
1456 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
1457 	if (m0 == NULL)
1458 		goto err;
1459 
1460 	if (m->m_pkthdr.len > MHLEN) {
1461 		MCLGET(m0, M_DONTWAIT);
1462 		if (!(m0->m_flags & M_EXT)) {
1463 			m_freem(m0);
1464 			m0 = NULL;
1465 			goto err;
1466 		}
1467 	}
1468 
1469 	m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1470 	m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1471 
1472 err:
1473 	m_freem(m);
1474 	return (m0);
1475 }
1476 
1477 struct mbuf *
1478 nxe_load_pkt(struct nxe_softc *sc, bus_dmamap_t dmap, struct mbuf *m)
1479 {
1480 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT)) {
1481 	case 0:
1482 		break;
1483 
1484 	case EFBIG:
1485 		m = nxe_coalesce_m(m);
1486 		if (m == NULL)
1487 			break;
1488 
1489 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
1490 		    BUS_DMA_NOWAIT) == 0)
1491 			break;
1492 
1493 		/* we get here on error */
1494 		/* FALLTHROUGH */
1495 	default:
1496 		m_freem(m);
1497 		m = NULL;
1498 		break;
1499 	}
1500 
1501 	return (m);
1502 }
1503 
1504 void
1505 nxe_rx_start(struct nxe_softc *sc)
1506 {
1507 	struct nxe_ring			*nr = sc->sc_rx_rings[NXE_RING_RX];
1508 	struct nxe_rx_desc		*rxd;
1509 	struct nxe_pkt			*pkt;
1510 	struct mbuf			*m;
1511 
1512 	if (nxe_ring_writeable(nr, 0) == 0)
1513 		return;
1514 
1515 	nxe_ring_sync(sc, nr, BUS_DMASYNC_POSTWRITE);
1516 	rxd = nxe_ring_cur(sc, nr);
1517 
1518 	for (;;) {
1519 		pkt = nxe_pkt_get(sc->sc_rx_pkts);
1520 		if (pkt == NULL)
1521 			goto done;
1522 
1523 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1524 		if (m == NULL)
1525 			goto put_pkt;
1526 
1527 		MCLGET(m, M_DONTWAIT);
1528 		if (!ISSET(m->m_flags, M_EXT))
1529 			goto free_m;
1530 
1531 		m->m_data += ETHER_ALIGN;
1532 		m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1533 
1534 		if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->pkt_dmap, m,
1535 		    BUS_DMA_NOWAIT) != 0)
1536 			goto free_m;
1537 
1538 		pkt->pkt_m = m;
1539 
1540 		bzero(rxd, sizeof(struct nxe_rx_desc));
1541 		rxd->rx_len = htole32(m->m_len);
1542 		rxd->rx_id = pkt->pkt_id;
1543 		rxd->rx_addr = htole64(pkt->pkt_dmap->dm_segs[0].ds_addr);
1544 
1545 		bus_dmamap_sync(sc->sc_dmat, pkt->pkt_dmap, 0,
1546 		    pkt->pkt_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
1547 
1548 		rxd = nxe_ring_next(sc, nr);
1549 
1550 		if (nr->nr_ready == 0)
1551 			goto done;
1552 	}
1553 
1554 free_m:
1555 	m_freem(m);
1556 put_pkt:
1557 	nxe_pkt_put(sc->sc_rx_pkts, pkt);
1558 done:
1559 	nxe_ring_sync(sc, nr, BUS_DMASYNC_PREWRITE);
1560 	nxe_crb_write(sc, NXE_1_SW_RX_PRODUCER(sc->sc_function), nr->nr_slot);
1561 	nxe_doorbell(sc, NXE_DB_PEGID_RX | NXE_DB_PRIVID |
1562 	    NXE_DB_OPCODE_RX_PROD |
1563 	    NXE_DB_COUNT(nr->nr_slot) | NXE_DB_CTXID(sc->sc_function));
1564 }
1565 
1566 void
1567 nxe_watchdog(struct ifnet *ifp)
1568 {
1569 	/* do nothing */
1570 }
1571 
1572 int
1573 nxe_media_change(struct ifnet *ifp)
1574 {
1575 	/* ignore for now */
1576 	return (0);
1577 }
1578 
1579 void
1580 nxe_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1581 {
1582 	struct nxe_softc		*sc = ifp->if_softc;
1583 
1584 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1585 	imr->ifm_status = IFM_AVALID;
1586 
1587 	nxe_link_state(sc);
1588 	if (LINK_STATE_IS_UP(ifp->if_link_state))
1589 		imr->ifm_status |= IFM_ACTIVE;
1590 }
1591 
1592 void
1593 nxe_link_state(struct nxe_softc *sc)
1594 {
1595 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1596 	int				link_state = LINK_STATE_DOWN;
1597 	u_int32_t			r;
1598 
1599 	DASSERT(sc->sc_window == 1);
1600 
1601 	r = nxe_crb_read(sc, NXE_1_SW_XG_STATE);
1602 	if (NXE_1_SW_XG_STATE_PORT(r, sc->sc_function) & NXE_1_SW_XG_STATE_UP)
1603 		link_state = LINK_STATE_UP;
1604 
1605 	if (ifp->if_link_state != link_state) {
1606 		ifp->if_link_state = link_state;
1607 		if_link_state_change(ifp);
1608 	}
1609 }
1610 
1611 int
1612 nxe_board_info(struct nxe_softc *sc)
1613 {
1614 	struct nxe_info			*ni;
1615 	int				rv = 1;
1616 	int				i;
1617 
1618 	ni = malloc(sizeof(struct nxe_info), M_TEMP, M_NOWAIT);
1619 	if (ni == NULL) {
1620 		printf(": unable to allocate temporary memory\n");
1621 		return (1);
1622 	}
1623 
1624 	if (nxe_rom_read_region(sc, NXE_FLASH_BRDCFG, ni,
1625 	    sizeof(struct nxe_info)) != 0) {
1626 		printf(": unable to read board info\n");
1627 		goto out;
1628 	}
1629 
1630 	if (ni->ni_hdrver != NXE_INFO_HDRVER_1) {
1631 		printf(": unexpected board info header version 0x%08x\n",
1632 		    ni->ni_hdrver);
1633 		goto out;
1634 	}
1635 	if (ni->ni_magic != NXE_INFO_MAGIC) {
1636 		printf(": board info magic is invalid\n");
1637 		goto out;
1638 	}
1639 
1640 	for (i = 0; i < nitems(nxe_boards); i++) {
1641 		if (ni->ni_board_type == nxe_boards[i].brd_type) {
1642 			sc->sc_board = &nxe_boards[i];
1643 			break;
1644 		}
1645 	}
1646 	if (sc->sc_board == NULL) {
1647 		printf(": unknown board type %04x\n", ni->ni_board_type);
1648 		goto out;
1649 	}
1650 
1651 	rv = 0;
1652 out:
1653 	free(ni, M_TEMP);
1654 	return (rv);
1655 }
1656 
1657 int
1658 nxe_user_info(struct nxe_softc *sc)
1659 {
1660 	struct nxe_userinfo		*nu;
1661 	u_int64_t			lladdr;
1662 	struct nxe_lladdr		*la;
1663 	int				rv = 1;
1664 
1665 	nu = malloc(sizeof(struct nxe_userinfo), M_TEMP, M_NOWAIT);
1666 	if (nu == NULL) {
1667 		printf(": unable to allocate temp memory\n");
1668 		return (1);
1669 	}
1670 	if (nxe_rom_read_region(sc, NXE_FLASH_USER, nu,
1671 	    sizeof(struct nxe_userinfo)) != 0) {
1672 		printf(": unable to read user info\n");
1673 		goto out;
1674 	}
1675 
1676 	sc->sc_fw_major = nu->nu_imageinfo.nim_img_ver_major;
1677 	sc->sc_fw_minor = nu->nu_imageinfo.nim_img_ver_minor;
1678 	sc->sc_fw_build = letoh16(nu->nu_imageinfo.nim_img_ver_build);
1679 
1680 	if (sc->sc_fw_major > NXE_VERSION_MAJOR ||
1681 	    sc->sc_fw_major < NXE_VERSION_MAJOR ||
1682 	    sc->sc_fw_minor > NXE_VERSION_MINOR ||
1683 	    sc->sc_fw_minor < NXE_VERSION_MINOR) {
1684 		printf(": firmware %d.%d.%d is unsupported by this driver\n",
1685 		    sc->sc_fw_major, sc->sc_fw_minor, sc->sc_fw_build);
1686 		goto out;
1687 	}
1688 
1689 	lladdr = swap64(nu->nu_lladdr[sc->sc_function][0]);
1690 	la = (struct nxe_lladdr *)&lladdr;
1691 	bcopy(la->lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1692 
1693 	rv = 0;
1694 out:
1695 	free(nu, M_TEMP);
1696 	return (rv);
1697 }
1698 
1699 int
1700 nxe_init(struct nxe_softc *sc)
1701 {
1702 	u_int64_t			dva;
1703 	u_int32_t			r;
1704 
1705 	/* stop the chip from processing */
1706 	nxe_crb_write(sc, NXE_1_SW_CMD_PRODUCER(sc->sc_function), 0);
1707 	nxe_crb_write(sc, NXE_1_SW_CMD_CONSUMER(sc->sc_function), 0);
1708 	nxe_crb_write(sc, NXE_1_SW_CMD_ADDR_HI, 0);
1709 	nxe_crb_write(sc, NXE_1_SW_CMD_ADDR_LO, 0);
1710 
1711 	/*
1712 	 * if this is the first port on the device it needs some special
1713 	 * treatment to get things going.
1714 	 */
1715 	if (sc->sc_function == 0) {
1716 		/* init adapter offload */
1717 		sc->sc_dummy_dma = nxe_dmamem_alloc(sc,
1718 		    NXE_1_SW_DUMMY_ADDR_LEN, PAGE_SIZE);
1719 		if (sc->sc_dummy_dma == NULL) {
1720 			printf(": unable to allocate dummy memory\n");
1721 			return (1);
1722 		}
1723 
1724 		bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1725 		    0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_PREREAD);
1726 
1727 		dva = NXE_DMA_DVA(sc->sc_dummy_dma);
1728 		nxe_crb_write(sc, NXE_1_SW_DUMMY_ADDR_HI, dva >> 32);
1729 		nxe_crb_write(sc, NXE_1_SW_DUMMY_ADDR_LO, dva);
1730 
1731 		r = nxe_crb_read(sc, NXE_1_SW_BOOTLD_CONFIG);
1732 		if (r == 0x55555555) {
1733 			r = nxe_crb_read(sc, NXE_1_ROMUSB_SW_RESET);
1734 			if (r != NXE_1_ROMUSB_SW_RESET_BOOT) {
1735 				printf(": unexpected boot state\n");
1736 				goto err;
1737 			}
1738 
1739 			/* clear */
1740 			nxe_crb_write(sc, NXE_1_SW_BOOTLD_CONFIG, 0);
1741 		}
1742 
1743 		/* start the device up */
1744 		nxe_crb_write(sc, NXE_1_SW_DRIVER_VER, NXE_VERSION);
1745 		nxe_crb_write(sc, NXE_1_GLB_PEGTUNE, NXE_1_GLB_PEGTUNE_DONE);
1746 
1747 		/*
1748 		 * the firmware takes a long time to boot, so we'll check
1749 		 * it later on, and again when we want to bring a port up.
1750 		 */
1751 	}
1752 
1753 	return (0);
1754 
1755 err:
1756 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1757 	    0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_POSTREAD);
1758 	nxe_dmamem_free(sc, sc->sc_dummy_dma);
1759 	return (1);
1760 }
1761 
1762 void
1763 nxe_uninit(struct nxe_softc *sc)
1764 {
1765 	if (sc->sc_function == 0) {
1766 		bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1767 		    0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_POSTREAD);
1768 		nxe_dmamem_free(sc, sc->sc_dummy_dma);
1769 	}
1770 }
1771 
1772 void
1773 nxe_mountroot(void *arg)
1774 {
1775 	struct nxe_softc		*sc = arg;
1776 
1777 	DASSERT(sc->sc_window == 1);
1778 
1779 	if (!nxe_crb_wait(sc, NXE_1_SW_CMDPEG_STATE, 0xffffffff,
1780 	    NXE_1_SW_CMDPEG_STATE_DONE, 10000)) {
1781 		printf("%s: firmware bootstrap failed, code 0x%08x\n",
1782 		    DEVNAME(sc), nxe_crb_read(sc, NXE_1_SW_CMDPEG_STATE));
1783 		return;
1784 	}
1785 
1786 	sc->sc_port = nxe_crb_read(sc, NXE_1_SW_V2P(sc->sc_function));
1787 	if (sc->sc_port == 0x55555555)
1788 		sc->sc_port = sc->sc_function;
1789 
1790 	nxe_crb_write(sc, NXE_1_SW_NIC_CAP_HOST, NXE_1_SW_NIC_CAP_PORTINTR);
1791 	nxe_crb_write(sc, NXE_1_SW_MPORT_MODE, NXE_1_SW_MPORT_MODE_MULTI);
1792 	nxe_crb_write(sc, NXE_1_SW_CMDPEG_STATE, NXE_1_SW_CMDPEG_STATE_ACK);
1793 
1794 	sc->sc_sensor.type = SENSOR_TEMP;
1795 	strlcpy(sc->sc_sensor_dev.xname, DEVNAME(sc),
1796 	    sizeof(sc->sc_sensor_dev.xname));
1797 	sensor_attach(&sc->sc_sensor_dev, &sc->sc_sensor);
1798 	sensordev_install(&sc->sc_sensor_dev);
1799 
1800 	timeout_set(&sc->sc_tick, nxe_tick, sc);
1801 	nxe_tick(sc);
1802 }
1803 
1804 void
1805 nxe_tick(void *xsc)
1806 {
1807 	struct nxe_softc		*sc = xsc;
1808 	u_int32_t			temp;
1809 	int				window;
1810 	int				s;
1811 
1812 	s = splnet();
1813 	window = nxe_crb_set(sc, 1);
1814 	temp = nxe_crb_read(sc, NXE_1_SW_TEMP);
1815 	nxe_link_state(sc);
1816 	nxe_crb_set(sc, window);
1817 	splx(s);
1818 
1819 	sc->sc_sensor.value = NXE_1_SW_TEMP_VAL(temp) * 1000000 + 273150000;
1820 	sc->sc_sensor.flags = 0;
1821 
1822 	switch (NXE_1_SW_TEMP_STATE(temp)) {
1823 	case NXE_1_SW_TEMP_STATE_NONE:
1824 		sc->sc_sensor.status = SENSOR_S_UNSPEC;
1825 		break;
1826 	case NXE_1_SW_TEMP_STATE_OK:
1827 		sc->sc_sensor.status = SENSOR_S_OK;
1828 		break;
1829 	case NXE_1_SW_TEMP_STATE_WARN:
1830 		sc->sc_sensor.status = SENSOR_S_WARN;
1831 		break;
1832 	case NXE_1_SW_TEMP_STATE_CRIT:
1833 		/* we should probably bring things down if this is true */
1834 		sc->sc_sensor.status = SENSOR_S_CRIT;
1835 		break;
1836 	default:
1837 		sc->sc_sensor.flags = SENSOR_FUNKNOWN;
1838 		break;
1839 	}
1840 
1841 	timeout_add_sec(&sc->sc_tick, 5);
1842 }
1843 
1844 
1845 struct nxe_ring *
1846 nxe_ring_alloc(struct nxe_softc *sc, size_t desclen, u_int nentries)
1847 {
1848 	struct nxe_ring			*nr;
1849 
1850 	nr = malloc(sizeof(struct nxe_ring), M_DEVBUF, M_WAITOK);
1851 
1852 	nr->nr_dmamem = nxe_dmamem_alloc(sc, desclen * nentries, PAGE_SIZE);
1853 	if (nr->nr_dmamem == NULL) {
1854 		free(nr, M_DEVBUF);
1855 		return (NULL);
1856 	}
1857 
1858 	nr->nr_pos = NXE_DMA_KVA(nr->nr_dmamem);
1859 	nr->nr_slot = 0;
1860 	nr->nr_desclen = desclen;
1861 	nr->nr_nentries = nentries;
1862 
1863 	return (nr);
1864 }
1865 
1866 void
1867 nxe_ring_sync(struct nxe_softc *sc, struct nxe_ring *nr, int flags)
1868 {
1869 	bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(nr->nr_dmamem),
1870 	    0, NXE_DMA_LEN(nr->nr_dmamem), flags);
1871 }
1872 
1873 void
1874 nxe_ring_free(struct nxe_softc *sc, struct nxe_ring *nr)
1875 {
1876 	nxe_dmamem_free(sc, nr->nr_dmamem);
1877 	free(nr, M_DEVBUF);
1878 }
1879 
1880 int
1881 nxe_ring_readable(struct nxe_ring *nr, int producer)
1882 {
1883 	nr->nr_ready = producer - nr->nr_slot;
1884 	if (nr->nr_ready < 0)
1885 		nr->nr_ready += nr->nr_nentries;
1886 
1887 	return (nr->nr_ready);
1888 }
1889 
1890 int
1891 nxe_ring_writeable(struct nxe_ring *nr, int consumer)
1892 {
1893 	nr->nr_ready = consumer - nr->nr_slot;
1894 	if (nr->nr_ready <= 0)
1895 		nr->nr_ready += nr->nr_nentries;
1896 
1897 	return (nr->nr_ready);
1898 }
1899 
1900 void *
1901 nxe_ring_cur(struct nxe_softc *sc, struct nxe_ring *nr)
1902 {
1903 	return (nr->nr_pos);
1904 }
1905 
1906 void *
1907 nxe_ring_next(struct nxe_softc *sc, struct nxe_ring *nr)
1908 {
1909 	if (++nr->nr_slot >= nr->nr_nentries) {
1910 		nr->nr_slot = 0;
1911 		nr->nr_pos = NXE_DMA_KVA(nr->nr_dmamem);
1912 	} else
1913 		nr->nr_pos += nr->nr_desclen;
1914 
1915 	nr->nr_ready--;
1916 
1917 	return (nr->nr_pos);
1918 }
1919 
1920 struct nxe_pkt_list *
1921 nxe_pkt_alloc(struct nxe_softc *sc, u_int npkts, int nsegs)
1922 {
1923 	struct nxe_pkt_list		*npl;
1924 	struct nxe_pkt			*pkt;
1925 	int				i;
1926 
1927 	npl = malloc(sizeof(*npl), M_DEVBUF, M_WAITOK | M_ZERO);
1928 	pkt = malloc(sizeof(*pkt) * npkts, M_DEVBUF, M_WAITOK | M_ZERO);
1929 
1930 	npl->npl_pkts = pkt;
1931 	TAILQ_INIT(&npl->npl_free);
1932 	TAILQ_INIT(&npl->npl_used);
1933 	for (i = 0; i < npkts; i++) {
1934 		pkt = &npl->npl_pkts[i];
1935 
1936 		pkt->pkt_id = i;
1937 		if (bus_dmamap_create(sc->sc_dmat, NXE_MAX_PKTLEN, nsegs,
1938 		    NXE_MAX_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1939 		    &pkt->pkt_dmap) != 0) {
1940 			nxe_pkt_free(sc, npl);
1941 			return (NULL);
1942 		}
1943 
1944 		TAILQ_INSERT_TAIL(&npl->npl_free, pkt, pkt_link);
1945 	}
1946 
1947 	return (npl);
1948 }
1949 
1950 void
1951 nxe_pkt_free(struct nxe_softc *sc, struct nxe_pkt_list *npl)
1952 {
1953 	struct nxe_pkt			*pkt;
1954 
1955 	while ((pkt = nxe_pkt_get(npl)) != NULL)
1956 		bus_dmamap_destroy(sc->sc_dmat, pkt->pkt_dmap);
1957 
1958 	free(npl->npl_pkts, M_DEVBUF);
1959 	free(npl, M_DEVBUF);
1960 }
1961 
1962 struct nxe_pkt *
1963 nxe_pkt_get(struct nxe_pkt_list *npl)
1964 {
1965 	struct nxe_pkt			*pkt;
1966 
1967 	pkt = TAILQ_FIRST(&npl->npl_free);
1968 	if (pkt != NULL) {
1969 		TAILQ_REMOVE(&npl->npl_free, pkt, pkt_link);
1970 		TAILQ_INSERT_TAIL(&npl->npl_used, pkt, pkt_link);
1971 	}
1972 
1973 	return (pkt);
1974 }
1975 
1976 void
1977 nxe_pkt_put(struct nxe_pkt_list *npl, struct nxe_pkt *pkt)
1978 {
1979 	TAILQ_REMOVE(&npl->npl_used, pkt, pkt_link);
1980 	TAILQ_INSERT_TAIL(&npl->npl_free, pkt, pkt_link);
1981 
1982 }
1983 
1984 struct nxe_pkt *
1985 nxe_pkt_used(struct nxe_pkt_list *npl)
1986 {
1987 	return (TAILQ_FIRST(&npl->npl_used));
1988 }
1989 
1990 struct nxe_dmamem *
1991 nxe_dmamem_alloc(struct nxe_softc *sc, bus_size_t size, bus_size_t align)
1992 {
1993 	struct nxe_dmamem		*ndm;
1994 	int				nsegs;
1995 
1996 	ndm = malloc(sizeof(*ndm), M_DEVBUF, M_WAITOK | M_ZERO);
1997 	ndm->ndm_size = size;
1998 
1999 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2000 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
2001 		goto ndmfree;
2002 
2003 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &ndm->ndm_seg, 1,
2004 	    &nsegs, BUS_DMA_WAITOK |BUS_DMA_ZERO) != 0)
2005 		goto destroy;
2006 
2007 	if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
2008 	    &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
2009 		goto free;
2010 
2011 	if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
2012 	    NULL, BUS_DMA_WAITOK) != 0)
2013 		goto unmap;
2014 
2015 	return (ndm);
2016 
2017 unmap:
2018 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
2019 free:
2020 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2021 destroy:
2022 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2023 ndmfree:
2024 	free(ndm, M_DEVBUF);
2025 
2026 	return (NULL);
2027 }
2028 
2029 void
2030 nxe_dmamem_free(struct nxe_softc *sc, struct nxe_dmamem *ndm)
2031 {
2032 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
2033 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2034 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2035 	free(ndm, M_DEVBUF);
2036 }
2037 
2038 u_int32_t
2039 nxe_read(struct nxe_softc *sc, bus_size_t r)
2040 {
2041 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, 4,
2042 	    BUS_SPACE_BARRIER_READ);
2043 	return (bus_space_read_4(sc->sc_memt, sc->sc_memh, r));
2044 }
2045 
2046 void
2047 nxe_write(struct nxe_softc *sc, bus_size_t r, u_int32_t v)
2048 {
2049 	bus_space_write_4(sc->sc_memt, sc->sc_memh, r, v);
2050 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, 4,
2051 	    BUS_SPACE_BARRIER_WRITE);
2052 }
2053 
2054 int
2055 nxe_wait(struct nxe_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
2056     u_int timeout)
2057 {
2058 	while ((nxe_read(sc, r) & m) != v) {
2059 		if (timeout == 0)
2060 			return (0);
2061 
2062 		delay(1000);
2063 		timeout--;
2064 	}
2065 
2066 	return (1);
2067 }
2068 
2069 void
2070 nxe_doorbell(struct nxe_softc *sc, u_int32_t v)
2071 {
2072 	bus_space_write_4(sc->sc_memt, sc->sc_memh, NXE_DB, v);
2073 	bus_space_barrier(sc->sc_memt, sc->sc_memh, NXE_DB, 4,
2074 	    BUS_SPACE_BARRIER_WRITE);
2075 }
2076 
2077 int
2078 nxe_crb_set(struct nxe_softc *sc, int window)
2079 {
2080 	int			oldwindow = sc->sc_window;
2081 	u_int32_t		r;
2082 
2083 	if (sc->sc_window != window) {
2084 		sc->sc_window = window;
2085 
2086 		r = window ? NXE_WIN_CRB_1 : NXE_WIN_CRB_0;
2087 		nxe_write(sc, NXE_WIN_CRB(sc->sc_function), r);
2088 
2089 		if (nxe_read(sc, NXE_WIN_CRB(sc->sc_function)) != r)
2090 			printf("%s: crb window hasnt moved\n");
2091 	}
2092 
2093 	return (oldwindow);
2094 }
2095 
2096 u_int32_t
2097 nxe_crb_read(struct nxe_softc *sc, bus_size_t r)
2098 {
2099 	bus_space_barrier(sc->sc_memt, sc->sc_crbh, r, 4,
2100 	    BUS_SPACE_BARRIER_READ);
2101 	return (bus_space_read_4(sc->sc_memt, sc->sc_crbh, r));
2102 }
2103 
2104 void
2105 nxe_crb_write(struct nxe_softc *sc, bus_size_t r, u_int32_t v)
2106 {
2107 	bus_space_write_4(sc->sc_memt, sc->sc_crbh, r, v);
2108 	bus_space_barrier(sc->sc_memt, sc->sc_crbh, r, 4,
2109 	    BUS_SPACE_BARRIER_WRITE);
2110 }
2111 
2112 int
2113 nxe_crb_wait(struct nxe_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
2114     u_int timeout)
2115 {
2116 	while ((nxe_crb_read(sc, r) & m) != v) {
2117 		if (timeout == 0)
2118 			return (0);
2119 
2120 		delay(1000);
2121 		timeout--;
2122 	}
2123 
2124 	return (1);
2125 }
2126 
2127 int
2128 nxe_rom_lock(struct nxe_softc *sc)
2129 {
2130 	if (!nxe_wait(sc, NXE_SEM_ROM_LOCK, 0xffffffff,
2131 	    NXE_SEM_DONE, 10000))
2132 		return (1);
2133 	nxe_crb_write(sc, NXE_1_SW_ROM_LOCK_ID, NXE_1_SW_ROM_LOCK_ID);
2134 
2135 	return (0);
2136 }
2137 
2138 void
2139 nxe_rom_unlock(struct nxe_softc *sc)
2140 {
2141 	nxe_read(sc, NXE_SEM_ROM_UNLOCK);
2142 }
2143 
2144 int
2145 nxe_rom_read(struct nxe_softc *sc, u_int32_t r, u_int32_t *v)
2146 {
2147 	int			rv = 1;
2148 
2149 	DASSERT(sc->sc_window == 1);
2150 
2151 	if (nxe_rom_lock(sc) != 0)
2152 		return (1);
2153 
2154 	/* set the rom address */
2155 	nxe_crb_write(sc, NXE_1_ROM_ADDR, r);
2156 
2157 	/* set the xfer len */
2158 	nxe_crb_write(sc, NXE_1_ROM_ABYTE_CNT, 3);
2159 	delay(100); /* used to prevent bursting on the chipset */
2160 	nxe_crb_write(sc, NXE_1_ROM_DBYTE_CNT, 0);
2161 
2162 	/* set opcode and wait for completion */
2163 	nxe_crb_write(sc, NXE_1_ROM_OPCODE, NXE_1_ROM_OPCODE_READ);
2164 	if (!nxe_crb_wait(sc, NXE_1_ROMUSB_STATUS, NXE_1_ROMUSB_STATUS_DONE,
2165 	    NXE_1_ROMUSB_STATUS_DONE, 100))
2166 		goto err;
2167 
2168 	/* reset counters */
2169 	nxe_crb_write(sc, NXE_1_ROM_ABYTE_CNT, 0);
2170 	delay(100);
2171 	nxe_crb_write(sc, NXE_1_ROM_DBYTE_CNT, 0);
2172 
2173 	*v = nxe_crb_read(sc, NXE_1_ROM_RDATA);
2174 
2175 	rv = 0;
2176 err:
2177 	nxe_rom_unlock(sc);
2178 	return (rv);
2179 }
2180 
2181 int
2182 nxe_rom_read_region(struct nxe_softc *sc, u_int32_t r, void *buf,
2183     size_t buflen)
2184 {
2185 	u_int32_t		*databuf = buf;
2186 	int			i;
2187 
2188 #ifdef NXE_DEBUG
2189 	if ((buflen % 4) != 0)
2190 		panic("nxe_read_rom_region: buflen is wrong (%d)", buflen);
2191 #endif
2192 
2193 	buflen = buflen / 4;
2194 	for (i = 0; i < buflen; i++) {
2195 		if (nxe_rom_read(sc, r, &databuf[i]) != 0)
2196 			return (1);
2197 
2198 		r += sizeof(u_int32_t);
2199 	}
2200 
2201 	return (0);
2202 }
2203