1 /* $OpenBSD: if_nxe.c,v 1.82 2024/09/04 07:54:52 mglocker Exp $ */
2
3 /*
4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include "bpfilter.h"
20
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sockio.h>
24 #include <sys/mbuf.h>
25 #include <sys/socket.h>
26 #include <sys/malloc.h>
27 #include <sys/device.h>
28 #include <sys/queue.h>
29 #include <sys/timeout.h>
30 #include <sys/sensors.h>
31 #include <sys/rwlock.h>
32
33 #include <machine/bus.h>
34
35 #include <dev/pci/pcireg.h>
36 #include <dev/pci/pcivar.h>
37 #include <dev/pci/pcidevs.h>
38
39 #include <net/if.h>
40 #include <net/if_media.h>
41
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45
46 #include <netinet/in.h>
47 #include <netinet/if_ether.h>
48
49 #ifdef NXE_DEBUG
50 int nxedebug = 0;
51
52 #define DPRINTF(l, f...) do { if (nxedebug & (l)) printf(f); } while (0)
53 #define DASSERT(_a) assert(_a)
54 #else
55 #define DPRINTF(l, f...)
56 #define DASSERT(_a)
57 #endif
58
59 /* this driver likes firmwares around this version */
60 #define NXE_VERSION_MAJOR 3
61 #define NXE_VERSION_MINOR 4
62 #define NXE_VERSION_BUILD 31
63 #define NXE_VERSION \
64 ((NXE_VERSION_MAJOR << 16)|(NXE_VERSION_MINOR << 8)|(NXE_VERSION_BUILD))
65
66
67 /*
68 * PCI configuration space registers
69 */
70
71 #define NXE_PCI_BAR_MEM 0x10 /* bar 0 */
72 #define NXE_PCI_BAR_MEM_128MB (128 * 1024 * 1024)
73 #define NXE_PCI_BAR_DOORBELL 0x20 /* bar 4 */
74
75 /*
76 * doorbell register space
77 */
78
79 #define NXE_DB 0x00000000
80 #define NXE_DB_PEGID 0x00000003
81 #define NXE_DB_PEGID_RX 0x00000001 /* rx unit */
82 #define NXE_DB_PEGID_TX 0x00000002 /* tx unit */
83 #define NXE_DB_PRIVID 0x00000004 /* must be set */
84 #define NXE_DB_COUNT(_c) ((_c)<<3) /* count */
85 #define NXE_DB_CTXID(_c) ((_c)<<18) /* context id */
86 #define NXE_DB_OPCODE_RX_PROD 0x00000000
87 #define NXE_DB_OPCODE_RX_JUMBO_PROD 0x10000000
88 #define NXE_DB_OPCODE_RX_LRO_PROD 0x20000000
89 #define NXE_DB_OPCODE_CMD_PROD 0x30000000
90 #define NXE_DB_OPCODE_UPD_CONS 0x40000000
91 #define NXE_DB_OPCODE_RESET_CTX 0x50000000
92
93 /*
94 * register space
95 */
96
97 /* different PCI functions use different registers sometimes */
98 #define _F(_f) ((_f) * 0x20)
99
100 /*
101 * driver ref section 4.2
102 *
103 * All the hardware registers are mapped in memory. Apart from the registers
104 * for the individual hardware blocks, the memory map includes a large number
105 * of software definable registers.
106 *
107 * The following table gives the memory map in the PCI address space.
108 */
109
110 #define NXE_MAP_DDR_NET 0x00000000
111 #define NXE_MAP_DDR_MD 0x02000000
112 #define NXE_MAP_QDR_NET 0x04000000
113 #define NXE_MAP_DIRECT_CRB 0x04400000
114 #define NXE_MAP_OCM0 0x05000000
115 #define NXE_MAP_OCM1 0x05100000
116 #define NXE_MAP_CRB 0x06000000
117
118 /*
119 * Since there are a large number of registers they do not fit in a single
120 * PCI addressing range. Hence two windows are defined. The window starts at
121 * NXE_MAP_CRB, and extends to the end of the register map. The window is set
122 * using the NXE_REG_WINDOW_CRB register. The format of the NXE_REG_WINDOW_CRB
123 * register is as follows:
124 */
125
126 #define NXE_WIN_CRB(_f) (0x06110210 + _F(_f))
127 #define NXE_WIN_CRB_0 (0<<25)
128 #define NXE_WIN_CRB_1 (1<<25)
129
130 /*
131 * The memory map inside the register windows are divided into a set of blocks.
132 * Each register block is owned by one hardware agent. The following table
133 * gives the memory map of the various register blocks in window 0. These
134 * registers are all in the CRB register space, so the offsets given here are
135 * relative to the base of the CRB offset region (NXE_MAP_CRB).
136 */
137
138 #define NXE_W0_PCIE 0x00100000 /* PCI Express */
139 #define NXE_W0_NIU 0x00600000 /* Network Interface Unit */
140 #define NXE_W0_PPE_0 0x01100000 /* Protocol Processing Engine 0 */
141 #define NXE_W0_PPE_1 0x01200000 /* Protocol Processing Engine 1 */
142 #define NXE_W0_PPE_2 0x01300000 /* Protocol Processing Engine 2 */
143 #define NXE_W0_PPE_3 0x01400000 /* Protocol Processing Engine 3 */
144 #define NXE_W0_PPE_D 0x01500000 /* PPE D-cache */
145 #define NXE_W0_PPE_I 0x01600000 /* PPE I-cache */
146
147 /*
148 * These are the register blocks inside window 1.
149 */
150
151 #define NXE_W1_PCIE 0x00100000
152 #define NXE_W1_SW 0x00200000
153 #define NXE_W1_SIR 0x01200000
154 #define NXE_W1_ROMUSB 0x01300000
155
156 /*
157 * Global registers
158 */
159 #define NXE_BOOTLD_START 0x00010000
160
161
162 /*
163 * driver ref section 5
164 *
165 * CRB Window Register Descriptions
166 */
167
168 /*
169 * PCI Express Registers
170 *
171 * Despite being in the CRB window space, they can be accessed via both
172 * windows. This means they are accessible "globally" without going relative
173 * to the start of the CRB window space.
174 */
175
176 /* Interrupts */
177 #define NXE_ISR_VECTOR 0x06110100 /* Interrupt Vector */
178 #define NXE_ISR_MASK 0x06110104 /* Interrupt Mask */
179 #define NXE_ISR_TARGET_STATUS 0x06110118
180 #define NXE_ISR_TARGET_MASK 0x06110128
181 #define NXE_ISR_MINE(_f) (0x08 << (_f))
182
183 /* lock registers (semaphores between chipset and driver) */
184 #define NXE_SEM_ROM_LOCK 0x0611c010 /* ROM access lock */
185 #define NXE_SEM_ROM_UNLOCK 0x0611c014
186 #define NXE_SEM_PHY_LOCK 0x0611c018 /* PHY access lock */
187 #define NXE_SEM_PHY_UNLOCK 0x0611c01c
188 #define NXE_SEM_DONE 0x1
189
190 /*
191 * Network Interface Unit (NIU) Registers
192 */
193
194 #define NXE_0_NIU_MODE 0x00600000
195 #define NXE_0_NIU_MODE_XGE (1<<2) /* XGE interface enabled */
196 #define NXE_0_NIU_MODE_GBE (1<<1) /* 4 GbE interfaces enabled */
197 #define NXE_0_NIU_SINGLE_TERM 0x00600004
198 #define NXE_0_NIU_INT_MASK 0x00600040
199
200 #define NXE_0_NIU_RESET_XG 0x0060001c /* reset XG */
201 #define NXE_0_NIU_RESET_FIFO 0x00600088 /* reset sys fifos */
202
203 #define _P(_p) ((_p) * 0x10000)
204
205 #define NXE_0_XG_CFG0(_p) (0x00670000 + _P(_p))
206 #define NXE_0_XG_CFG0_TX_EN (1<<0) /* TX enable */
207 #define NXE_0_XG_CFG0_TX_SYNC (1<<1) /* TX synced */
208 #define NXE_0_XG_CFG0_RX_EN (1<<2) /* RX enable */
209 #define NXE_0_XG_CFG0_RX_SYNC (1<<3) /* RX synced */
210 #define NXE_0_XG_CFG0_TX_FLOWCTL (1<<4) /* enable pause frame gen */
211 #define NXE_0_XG_CFG0_RX_FLOWCTL (1<<5) /* act on rxed pause frames */
212 #define NXE_0_XG_CFG0_LOOPBACK (1<<8) /* tx appears on rx */
213 #define NXE_0_XG_CFG0_TX_RST_PB (1<<15) /* reset frm tx proto block */
214 #define NXE_0_XG_CFG0_RX_RST_PB (1<<16) /* reset frm rx proto block */
215 #define NXE_0_XG_CFG0_TX_RST_MAC (1<<17) /* reset frm tx multiplexer */
216 #define NXE_0_XG_CFG0_RX_RST_MAC (1<<18) /* reset ctl frms and timers */
217 #define NXE_0_XG_CFG0_SOFT_RST (1<<31) /* soft reset */
218 #define NXE_0_XG_CFG1(_p) (0x00670004 + _P(_p))
219 #define NXE_0_XG_CFG1_REM_CRC (1<<0) /* enable crc removal */
220 #define NXE_0_XG_CFG1_CRC_EN (1<<1) /* append crc to tx frames */
221 #define NXE_0_XG_CFG1_NO_MAX (1<<5) /* rx all frames despite size */
222 #define NXE_0_XG_CFG1_WIRE_LO_ERR (1<<6) /* recognize local err */
223 #define NXE_0_XG_CFG1_PAUSE_FR_DIS (1<<8) /* disable pause frame detect */
224 #define NXE_0_XG_CFG1_SEQ_ERR_EN (1<<10) /* enable seq err detection */
225 #define NXE_0_XG_CFG1_MULTICAST (1<<12) /* accept all multicast */
226 #define NXE_0_XG_CFG1_PROMISC (1<<13) /* accept all frames */
227 #define NXE_0_XG_IPG(_p) (0x00670008 + _P(_p))
228 #define NXE_0_XG_MAC_LO(_p) (0x00670010 + _P(_p))
229 #define NXE_0_XG_MAC_HI(_p) (0x0067000c + _P(_p))
230 #define NXE_0_XG_STATUS(_p) (0x00670018 + _P(_p))
231 #define NXE_0_XG_MTU(_p) (0x0067001c + _P(_p))
232 #define NXE_0_XG_PAUSE_FRM(_p) (0x00670020 + _P(_p))
233 #define NXE_0_XG_TX_BYTES(_p) (0x00670024 + _P(_p))
234 #define NXE_0_XG_TX_PKTS(_p) (0x00670028 + _P(_p))
235 #define NXE_0_XG_RX_BYTES(_p) (0x0067002c + _P(_p))
236 #define NXE_0_XG_RX_PKTS(_p) (0x00670030 + _P(_p))
237 #define NXE_0_XG_AGGR_ERRS(_p) (0x00670034 + _P(_p))
238 #define NXE_0_XG_MCAST_PKTS(_p) (0x00670038 + _P(_p))
239 #define NXE_0_XG_UCAST_PKTS(_p) (0x0067003c + _P(_p))
240 #define NXE_0_XG_CRC_ERRS(_p) (0x00670040 + _P(_p))
241 #define NXE_0_XG_OVERSIZE(_p) (0x00670044 + _P(_p))
242 #define NXE_0_XG_UNDERSIZE(_p) (0x00670048 + _P(_p))
243 #define NXE_0_XG_LOCAL_ERRS(_p) (0x0067004c + _P(_p))
244 #define NXE_0_XG_REMOTE_ERRS(_p) (0x00670050 + _P(_p))
245 #define NXE_0_XG_CNTL_CHARS(_p) (0x00670054 + _P(_p))
246 #define NXE_0_XG_PAUSE_PKTS(_p) (0x00670058 + _P(_p))
247
248 /*
249 * Software Defined Registers
250 */
251
252 /* chipset state registers */
253 #define NXE_1_SW_ROM_LOCK_ID 0x00202100
254 #define NXE_1_SW_ROM_LOCK_ID_DRV 0x0d417340
255 #define NXE_1_SW_PHY_LOCK_ID 0x00202120
256 #define NXE_1_SW_PHY_LOCK_ID_DRV 0x44524956
257
258 /* firmware version */
259 #define NXE_1_SW_FWVER_MAJOR 0x00202150 /* Major f/w version */
260 #define NXE_1_SW_FWVER_MINOR 0x00202154 /* Minor f/w version */
261 #define NXE_1_SW_FWVER_BUILD 0x00202158 /* Build/Sub f/w version */
262
263 /* misc */
264 #define NXE_1_SW_CMD_ADDR_HI 0x00202218 /* cmd ring phys addr */
265 #define NXE_1_SW_CMD_ADDR_LO 0x0020221c /* cmd ring phys addr */
266 #define NXE_1_SW_CMD_SIZE 0x002022c8 /* entries in the cmd ring */
267 #define NXE_1_SW_DUMMY_ADDR_HI 0x0020223c /* hi address of dummy buf */
268 #define NXE_1_SW_DUMMY_ADDR_LO 0x00202240 /* lo address of dummy buf */
269 #define NXE_1_SW_DUMMY_ADDR_LEN 1024
270
271 static const u_int32_t nxe_regmap[][4] = {
272 #define NXE_1_SW_CMD_PRODUCER(_f) (nxe_regmap[0][(_f)])
273 { 0x00202208, 0x002023ac, 0x002023b8, 0x002023d0 },
274 #define NXE_1_SW_CMD_CONSUMER(_f) (nxe_regmap[1][(_f)])
275 { 0x0020220c, 0x002023b0, 0x002023bc, 0x002023d4 },
276
277 #define NXE_1_SW_CONTEXT(_p) (nxe_regmap[2][(_p)])
278 #define NXE_1_SW_CONTEXT_SIG(_p) (0xdee0 | (_p))
279 { 0x0020238c, 0x00202390, 0x0020239c, 0x002023a4 },
280 #define NXE_1_SW_CONTEXT_ADDR_LO(_p) (nxe_regmap[3][(_p)])
281 { 0x00202388, 0x00202390, 0x00202398, 0x002023a0 },
282 #define NXE_1_SW_CONTEXT_ADDR_HI(_p) (nxe_regmap[4][(_p)])
283 { 0x002023c0, 0x002023c4, 0x002023c8, 0x002023cc },
284
285 #define NXE_1_SW_INT_MASK(_p) (nxe_regmap[5][(_p)])
286 { 0x002023d8, 0x002023e0, 0x002023e4, 0x002023e8 },
287
288 #define NXE_1_SW_RX_PRODUCER(_c) (nxe_regmap[6][(_c)])
289 { 0x00202300, 0x00202344, 0x002023d8, 0x0020242c },
290 #define NXE_1_SW_RX_CONSUMER(_c) (nxe_regmap[7][(_c)])
291 { 0x00202304, 0x00202348, 0x002023dc, 0x00202430 },
292 #define NXE_1_SW_RX_RING(_c) (nxe_regmap[8][(_c)])
293 { 0x00202308, 0x0020234c, 0x002023f0, 0x00202434 },
294 #define NXE_1_SW_RX_SIZE(_c) (nxe_regmap[9][(_c)])
295 { 0x0020230c, 0x00202350, 0x002023f4, 0x00202438 },
296
297 #define NXE_1_SW_RX_JUMBO_PRODUCER(_c) (nxe_regmap[10][(_c)])
298 { 0x00202310, 0x00202354, 0x002023f8, 0x0020243c },
299 #define NXE_1_SW_RX_JUMBO_CONSUMER(_c) (nxe_regmap[11][(_c)])
300 { 0x00202314, 0x00202358, 0x002023fc, 0x00202440 },
301 #define NXE_1_SW_RX_JUMBO_RING(_c) (nxe_regmap[12][(_c)])
302 { 0x00202318, 0x0020235c, 0x00202400, 0x00202444 },
303 #define NXE_1_SW_RX_JUMBO_SIZE(_c) (nxe_regmap[13][(_c)])
304 { 0x0020231c, 0x00202360, 0x00202404, 0x00202448 },
305
306 #define NXE_1_SW_RX_LRO_PRODUCER(_c) (nxe_regmap[14][(_c)])
307 { 0x00202320, 0x00202364, 0x00202408, 0x0020244c },
308 #define NXE_1_SW_RX_LRO_CONSUMER(_c) (nxe_regmap[15][(_c)])
309 { 0x00202324, 0x00202368, 0x0020240c, 0x00202450 },
310 #define NXE_1_SW_RX_LRO_RING(_c) (nxe_regmap[16][(_c)])
311 { 0x00202328, 0x0020236c, 0x00202410, 0x00202454 },
312 #define NXE_1_SW_RX_LRO_SIZE(_c) (nxe_regmap[17][(_c)])
313 { 0x0020232c, 0x00202370, 0x00202414, 0x00202458 },
314
315 #define NXE_1_SW_STATUS_RING(_c) (nxe_regmap[18][(_c)])
316 { 0x00202330, 0x00202374, 0x00202418, 0x0020245c },
317 #define NXE_1_SW_STATUS_PRODUCER(_c) (nxe_regmap[19][(_c)])
318 { 0x00202334, 0x00202378, 0x0020241c, 0x00202460 },
319 #define NXE_1_SW_STATUS_CONSUMER(_c) (nxe_regmap[20][(_c)])
320 { 0x00202338, 0x0020237c, 0x00202420, 0x00202464 },
321 #define NXE_1_SW_STATUS_STATE(_c) (nxe_regmap[21][(_c)])
322 #define NXE_1_SW_STATUS_STATE_READY 0x0000ff01
323 { 0x0020233c, 0x00202380, 0x00202424, 0x00202468 },
324 #define NXE_1_SW_STATUS_SIZE(_c) (nxe_regmap[22][(_c)])
325 { 0x00202340, 0x00202384, 0x00202428, 0x0020246c }
326 };
327
328
329 #define NXE_1_SW_BOOTLD_CONFIG 0x002021fc
330 #define NXE_1_SW_BOOTLD_CONFIG_ROM 0x00000000
331 #define NXE_1_SW_BOOTLD_CONFIG_RAM 0x12345678
332
333 #define NXE_1_SW_CMDPEG_STATE 0x00202250 /* init status */
334 #define NXE_1_SW_CMDPEG_STATE_START 0xff00 /* init starting */
335 #define NXE_1_SW_CMDPEG_STATE_DONE 0xff01 /* init complete */
336 #define NXE_1_SW_CMDPEG_STATE_ACK 0xf00f /* init ack */
337 #define NXE_1_SW_CMDPEG_STATE_ERROR 0xffff /* init failed */
338
339 #define NXE_1_SW_XG_STATE 0x00202294 /* phy state */
340 #define NXE_1_SW_XG_STATE_PORT(_r, _p) (((_r)>>8*(_p))&0xff)
341 #define NXE_1_SW_XG_STATE_UP (1<<4)
342 #define NXE_1_SW_XG_STATE_DOWN (1<<5)
343
344 #define NXE_1_SW_MPORT_MODE 0x002022c4
345 #define NXE_1_SW_MPORT_MODE_SINGLE 0x1111
346 #define NXE_1_SW_MPORT_MODE_MULTI 0x2222
347
348 #define NXE_1_SW_INT_VECTOR 0x002022d4
349
350 #define NXE_1_SW_NIC_CAP_HOST 0x002023a8 /* host capabilities */
351 #define NXE_1_SW_NIC_CAP_FW 0x002023dc /* firmware capabilities */
352 #define NXE_1_SW_NIC_CAP_PORTINTR 0x1 /* per port interrupts */
353 #define NXE_1_SW_DRIVER_VER 0x002024a0 /* host driver version */
354
355
356 #define NXE_1_SW_TEMP 0x002023b4 /* Temperature sensor */
357 #define NXE_1_SW_TEMP_STATE(_x) ((_x)&0xffff) /* Temp state */
358 #define NXE_1_SW_TEMP_STATE_NONE 0x0000
359 #define NXE_1_SW_TEMP_STATE_OK 0x0001
360 #define NXE_1_SW_TEMP_STATE_WARN 0x0002
361 #define NXE_1_SW_TEMP_STATE_CRIT 0x0003
362 #define NXE_1_SW_TEMP_VAL(_x) (((_x)>>16)&0xffff) /* Temp value */
363
364 #define NXE_1_SW_V2P(_f) (0x00202490+((_f)*4)) /* virtual to phys */
365
366 /*
367 * ROMUSB Registers
368 */
369 #define NXE_1_ROMUSB_STATUS 0x01300004 /* ROM Status */
370 #define NXE_1_ROMUSB_STATUS_DONE (1<<1)
371 #define NXE_1_ROMUSB_SW_RESET 0x01300008
372 #define NXE_1_ROMUSB_SW_RESET_DEF 0xffffffff
373 #define NXE_1_ROMUSB_SW_RESET_BOOT 0x0080000f
374
375 #define NXE_1_CASPER_RESET 0x01300038
376 #define NXE_1_CASPER_RESET_ENABLE 0x1
377 #define NXE_1_CASPER_RESET_DISABLE 0x1
378
379 #define NXE_1_GLB_PEGTUNE 0x0130005c /* reset register */
380 #define NXE_1_GLB_PEGTUNE_DONE 0x00000001
381
382 #define NXE_1_GLB_CHIPCLKCTL 0x013000a8
383 #define NXE_1_GLB_CHIPCLKCTL_ON 0x00003fff
384
385 /* ROM Registers */
386 #define NXE_1_ROM_CONTROL 0x01310000
387 #define NXE_1_ROM_OPCODE 0x01310004
388 #define NXE_1_ROM_OPCODE_READ 0x0000000b
389 #define NXE_1_ROM_ADDR 0x01310008
390 #define NXE_1_ROM_WDATA 0x0131000c
391 #define NXE_1_ROM_ABYTE_CNT 0x01310010
392 #define NXE_1_ROM_DBYTE_CNT 0x01310014 /* dummy byte count */
393 #define NXE_1_ROM_RDATA 0x01310018
394 #define NXE_1_ROM_AGT_TAG 0x0131001c
395 #define NXE_1_ROM_TIME_PARM 0x01310020
396 #define NXE_1_ROM_CLK_DIV 0x01310024
397 #define NXE_1_ROM_MISS_INSTR 0x01310028
398
399 /*
400 * flash memory layout
401 *
402 * These are offsets of memory accessible via the ROM Registers above
403 */
404 #define NXE_FLASH_CRBINIT 0x00000000 /* crb init section */
405 #define NXE_FLASH_BRDCFG 0x00004000 /* board config */
406 #define NXE_FLASH_INITCODE 0x00006000 /* pegtune code */
407 #define NXE_FLASH_BOOTLD 0x00010000 /* boot loader */
408 #define NXE_FLASH_IMAGE 0x00043000 /* compressed image */
409 #define NXE_FLASH_SECONDARY 0x00200000 /* backup image */
410 #define NXE_FLASH_PXE 0x003d0000 /* pxe image */
411 #define NXE_FLASH_USER 0x003e8000 /* user region for new boards */
412 #define NXE_FLASH_VPD 0x003e8c00 /* vendor private data */
413 #define NXE_FLASH_LICENSE 0x003e9000 /* firmware license */
414 #define NXE_FLASH_FIXED 0x003f0000 /* backup of crbinit */
415
416
417 /*
418 * misc hardware details
419 */
420 #define NXE_MAX_PORTS 4
421 #define NXE_MAX_PORT_LLADDRS 32
422 #define NXE_MAX_PKTLEN (64 * 1024)
423
424
425 /*
426 * hardware structures
427 */
428
429 struct nxe_info {
430 u_int32_t ni_hdrver;
431 #define NXE_INFO_HDRVER_1 0x00000001
432
433 u_int32_t ni_board_mfg;
434 u_int32_t ni_board_type;
435 #define NXE_BRDTYPE_P1_BD 0x0000
436 #define NXE_BRDTYPE_P1_SB 0x0001
437 #define NXE_BRDTYPE_P1_SMAX 0x0002
438 #define NXE_BRDTYPE_P1_SOCK 0x0003
439 #define NXE_BRDTYPE_P2_SOCK_31 0x0008
440 #define NXE_BRDTYPE_P2_SOCK_35 0x0009
441 #define NXE_BRDTYPE_P2_SB35_4G 0x000a
442 #define NXE_BRDTYPE_P2_SB31_10G 0x000b
443 #define NXE_BRDTYPE_P2_SB31_2G 0x000c
444 #define NXE_BRDTYPE_P2_SB31_10G_IMEZ 0x000d
445 #define NXE_BRDTYPE_P2_SB31_10G_HMEZ 0x000e
446 #define NXE_BRDTYPE_P2_SB31_10G_CX4 0x000f
447 u_int32_t ni_board_num;
448
449 u_int32_t ni_chip_id;
450 u_int32_t ni_chip_minor;
451 u_int32_t ni_chip_major;
452 u_int32_t ni_chip_pkg;
453 u_int32_t ni_chip_lot;
454
455 u_int32_t ni_port_mask;
456 u_int32_t ni_peg_mask;
457 u_int32_t ni_icache;
458 u_int32_t ni_dcache;
459 u_int32_t ni_casper;
460
461 u_int32_t ni_lladdr0_low;
462 u_int32_t ni_lladdr1_low;
463 u_int32_t ni_lladdr2_low;
464 u_int32_t ni_lladdr3_low;
465
466 u_int32_t ni_mnsync_mode;
467 u_int32_t ni_mnsync_shift_cclk;
468 u_int32_t ni_mnsync_shift_mclk;
469 u_int32_t ni_mnwb_enable;
470 u_int32_t ni_mnfreq_crystal;
471 u_int32_t ni_mnfreq_speed;
472 u_int32_t ni_mnorg;
473 u_int32_t ni_mndepth;
474 u_int32_t ni_mnranks0;
475 u_int32_t ni_mnranks1;
476 u_int32_t ni_mnrd_latency0;
477 u_int32_t ni_mnrd_latency1;
478 u_int32_t ni_mnrd_latency2;
479 u_int32_t ni_mnrd_latency3;
480 u_int32_t ni_mnrd_latency4;
481 u_int32_t ni_mnrd_latency5;
482 u_int32_t ni_mnrd_latency6;
483 u_int32_t ni_mnrd_latency7;
484 u_int32_t ni_mnrd_latency8;
485 u_int32_t ni_mndll[18];
486 u_int32_t ni_mnddr_mode;
487 u_int32_t ni_mnddr_extmode;
488 u_int32_t ni_mntiming0;
489 u_int32_t ni_mntiming1;
490 u_int32_t ni_mntiming2;
491
492 u_int32_t ni_snsync_mode;
493 u_int32_t ni_snpt_mode;
494 u_int32_t ni_snecc_enable;
495 u_int32_t ni_snwb_enable;
496 u_int32_t ni_snfreq_crystal;
497 u_int32_t ni_snfreq_speed;
498 u_int32_t ni_snorg;
499 u_int32_t ni_sndepth;
500 u_int32_t ni_sndll;
501 u_int32_t ni_snrd_latency;
502
503 u_int32_t ni_lladdr0_high;
504 u_int32_t ni_lladdr1_high;
505 u_int32_t ni_lladdr2_high;
506 u_int32_t ni_lladdr3_high;
507
508 u_int32_t ni_magic;
509 #define NXE_INFO_MAGIC 0x12345678
510
511 u_int32_t ni_mnrd_imm;
512 u_int32_t ni_mndll_override;
513 } __packed;
514
515 struct nxe_imageinfo {
516 u_int32_t nim_bootld_ver;
517 u_int32_t nim_bootld_size;
518
519 u_int8_t nim_img_ver_major;
520 u_int8_t nim_img_ver_minor;
521 u_int16_t nim_img_ver_build;
522
523 u_int32_t min_img_size;
524 } __packed;
525
526 struct nxe_lladdr {
527 u_int8_t pad[2];
528 u_int8_t lladdr[6];
529 } __packed;
530
531 struct nxe_userinfo {
532 u_int8_t nu_flash_md5[1024];
533
534 struct nxe_imageinfo nu_imageinfo;
535
536 u_int32_t nu_primary;
537 u_int32_t nu_secondary;
538
539 u_int64_t nu_lladdr[NXE_MAX_PORTS][NXE_MAX_PORT_LLADDRS];
540
541 u_int32_t nu_subsys_id;
542
543 u_int8_t nu_serial[32];
544
545 u_int32_t nu_bios_ver;
546 } __packed;
547
548 /* hw structures actually used in the io path */
549
550 struct nxe_ctx_ring {
551 u_int64_t r_addr;
552 u_int32_t r_size;
553 u_int32_t r_reserved;
554 };
555
556 #define NXE_RING_RX 0
557 #define NXE_RING_RX_JUMBO 1
558 #define NXE_RING_RX_LRO 2
559 #define NXE_NRING 3
560
561 struct nxe_ctx {
562 u_int64_t ctx_cmd_consumer_addr;
563
564 struct nxe_ctx_ring ctx_cmd_ring;
565
566 struct nxe_ctx_ring ctx_rx_rings[NXE_NRING];
567
568 u_int64_t ctx_status_ring_addr;
569 u_int32_t ctx_status_ring_size;
570
571 u_int32_t ctx_id;
572 } __packed;
573
574 struct nxe_tx_desc {
575 u_int8_t tx_tcp_offset;
576 u_int8_t tx_ip_offset;
577 u_int16_t tx_flags;
578 #define NXE_TXD_F_OPCODE_TX (0x01 << 7)
579
580 u_int8_t tx_nbufs;
581 u_int16_t tx_length; /* XXX who makes a 24bit field? */
582 u_int8_t tx_length_hi;
583
584 u_int64_t tx_addr_2;
585
586 u_int16_t tx_id;
587 u_int16_t tx_mss;
588
589 u_int8_t tx_port;
590 u_int8_t tx_tso_hdr_len;
591 u_int16_t tx_ipsec_id;
592
593 u_int64_t tx_addr_3;
594
595 u_int64_t tx_addr_1;
596
597 u_int16_t tx_slen_1;
598 u_int16_t tx_slen_2;
599 u_int16_t tx_slen_3;
600 u_int16_t tx_slen_4;
601
602 u_int64_t tx_addr_4;
603
604 u_int64_t tx_reserved;
605 } __packed;
606 #define NXE_TXD_SEGS 4
607 #define NXE_TXD_DESCS 8
608 #define NXE_TXD_MAX_SEGS (NXE_TXD_SEGS * NXE_TXD_DESCS)
609
610 struct nxe_rx_desc {
611 u_int16_t rx_id;
612 u_int16_t rx_flags;
613 u_int32_t rx_len; /* packet length */
614 u_int64_t rx_addr;
615 } __packed;
616 #define NXE_RXD_MAX_SEGS 1
617
618 struct nxe_status_desc {
619 u_int8_t st_lro;
620 u_int8_t st_owner;
621 u_int16_t st_id;
622 u_int16_t st_len;
623 u_int16_t st_flags;
624 } __packed;
625
626 /*
627 * driver definitions
628 */
629
630 struct nxe_board {
631 u_int32_t brd_type;
632 u_int brd_mode;
633 };
634
635 struct nxe_dmamem {
636 bus_dmamap_t ndm_map;
637 bus_dma_segment_t ndm_seg;
638 size_t ndm_size;
639 caddr_t ndm_kva;
640 };
641 #define NXE_DMA_MAP(_ndm) ((_ndm)->ndm_map)
642 #define NXE_DMA_LEN(_ndm) ((_ndm)->ndm_size)
643 #define NXE_DMA_DVA(_ndm) ((_ndm)->ndm_map->dm_segs[0].ds_addr)
644 #define NXE_DMA_KVA(_ndm) ((void *)(_ndm)->ndm_kva)
645
646 struct nxe_pkt {
647 int pkt_id;
648 bus_dmamap_t pkt_dmap;
649 struct mbuf *pkt_m;
650 TAILQ_ENTRY(nxe_pkt) pkt_link;
651 };
652
653 struct nxe_pkt_list {
654 struct nxe_pkt *npl_pkts;
655 TAILQ_HEAD(, nxe_pkt) npl_free;
656 TAILQ_HEAD(, nxe_pkt) npl_used;
657 };
658
659 struct nxe_ring {
660 struct nxe_dmamem *nr_dmamem;
661 u_int8_t *nr_pos;
662
663 u_int nr_slot;
664 int nr_ready;
665
666 size_t nr_desclen;
667 u_int nr_nentries;
668 };
669
670 /*
671 * autoconf glue
672 */
673
674 struct nxe_softc {
675 struct device sc_dev;
676
677 bus_dma_tag_t sc_dmat;
678
679 bus_space_tag_t sc_memt;
680 bus_space_handle_t sc_memh;
681 bus_size_t sc_mems;
682 bus_space_handle_t sc_crbh;
683 bus_space_tag_t sc_dbt;
684 bus_space_handle_t sc_dbh;
685 bus_size_t sc_dbs;
686
687 void *sc_ih;
688
689 int sc_function;
690 int sc_port;
691 int sc_window;
692
693 const struct nxe_board *sc_board;
694 u_int sc_fw_major;
695 u_int sc_fw_minor;
696 u_int sc_fw_build;
697
698 struct arpcom sc_ac;
699 struct ifmedia sc_media;
700
701 struct nxe_pkt_list *sc_tx_pkts;
702 struct nxe_pkt_list *sc_rx_pkts;
703
704 /* allocations for the hw */
705 struct nxe_dmamem *sc_dummy_dma;
706 struct nxe_dmamem *sc_dummy_rx;
707
708 struct nxe_dmamem *sc_ctx;
709 u_int32_t *sc_cmd_consumer;
710 u_int32_t sc_cmd_consumer_cur;
711
712 struct nxe_ring *sc_cmd_ring;
713 struct nxe_ring *sc_rx_rings[NXE_NRING];
714 struct nxe_ring *sc_status_ring;
715
716 /* monitoring */
717 struct timeout sc_tick;
718 struct ksensor sc_sensor;
719 struct ksensordev sc_sensor_dev;
720
721 /* ioctl lock */
722 struct rwlock sc_lock;
723 };
724
725 int nxe_match(struct device *, void *, void *);
726 void nxe_attach(struct device *, struct device *, void *);
727 int nxe_intr(void *);
728
729 const struct cfattach nxe_ca = {
730 sizeof(struct nxe_softc),
731 nxe_match,
732 nxe_attach
733 };
734
735 struct cfdriver nxe_cd = {
736 NULL,
737 "nxe",
738 DV_IFNET
739 };
740
741 /* init code */
742 int nxe_pci_map(struct nxe_softc *,
743 struct pci_attach_args *);
744 void nxe_pci_unmap(struct nxe_softc *);
745
746 int nxe_board_info(struct nxe_softc *);
747 int nxe_user_info(struct nxe_softc *);
748 int nxe_init(struct nxe_softc *);
749 void nxe_uninit(struct nxe_softc *);
750 void nxe_mountroot(struct device *);
751
752 /* chip state */
753 void nxe_tick(void *);
754 void nxe_link_state(struct nxe_softc *);
755
756 /* interface operations */
757 int nxe_ioctl(struct ifnet *, u_long, caddr_t);
758 void nxe_start(struct ifnet *);
759 int nxe_complete(struct nxe_softc *);
760 void nxe_watchdog(struct ifnet *);
761
762 void nxe_rx_start(struct nxe_softc *);
763
764 void nxe_up(struct nxe_softc *);
765 void nxe_lladdr(struct nxe_softc *);
766 void nxe_iff(struct nxe_softc *);
767 void nxe_down(struct nxe_softc *);
768
769 int nxe_up_fw(struct nxe_softc *);
770
771 /* ifmedia operations */
772 int nxe_media_change(struct ifnet *);
773 void nxe_media_status(struct ifnet *, struct ifmediareq *);
774
775
776 /* ring handling */
777 struct nxe_ring *nxe_ring_alloc(struct nxe_softc *, size_t, u_int);
778 void nxe_ring_sync(struct nxe_softc *, struct nxe_ring *,
779 int);
780 void nxe_ring_free(struct nxe_softc *, struct nxe_ring *);
781 int nxe_ring_readable(struct nxe_ring *, int);
782 int nxe_ring_writeable(struct nxe_ring *, int);
783 void *nxe_ring_cur(struct nxe_softc *, struct nxe_ring *);
784 void *nxe_ring_next(struct nxe_softc *, struct nxe_ring *);
785
786 struct mbuf *nxe_load_pkt(struct nxe_softc *, bus_dmamap_t,
787 struct mbuf *);
788 struct mbuf *nxe_coalesce_m(struct mbuf *);
789
790 /* pkts */
791 struct nxe_pkt_list *nxe_pkt_alloc(struct nxe_softc *, u_int, int);
792 void nxe_pkt_free(struct nxe_softc *,
793 struct nxe_pkt_list *);
794 void nxe_pkt_put(struct nxe_pkt_list *, struct nxe_pkt *);
795 struct nxe_pkt *nxe_pkt_get(struct nxe_pkt_list *);
796 struct nxe_pkt *nxe_pkt_used(struct nxe_pkt_list *);
797
798
799 /* wrapper around dmaable memory allocations */
800 struct nxe_dmamem *nxe_dmamem_alloc(struct nxe_softc *, bus_size_t,
801 bus_size_t);
802 void nxe_dmamem_free(struct nxe_softc *,
803 struct nxe_dmamem *);
804
805 /* low level hardware access goo */
806 u_int32_t nxe_read(struct nxe_softc *, bus_size_t);
807 void nxe_write(struct nxe_softc *, bus_size_t, u_int32_t);
808 int nxe_wait(struct nxe_softc *, bus_size_t, u_int32_t,
809 u_int32_t, u_int);
810
811 void nxe_doorbell(struct nxe_softc *, u_int32_t);
812
813 int nxe_crb_set(struct nxe_softc *, int);
814 u_int32_t nxe_crb_read(struct nxe_softc *, bus_size_t);
815 void nxe_crb_write(struct nxe_softc *, bus_size_t,
816 u_int32_t);
817 int nxe_crb_wait(struct nxe_softc *, bus_size_t,
818 u_int32_t, u_int32_t, u_int);
819
820 int nxe_rom_lock(struct nxe_softc *);
821 void nxe_rom_unlock(struct nxe_softc *);
822 int nxe_rom_read(struct nxe_softc *, u_int32_t,
823 u_int32_t *);
824 int nxe_rom_read_region(struct nxe_softc *, u_int32_t,
825 void *, size_t);
826
827
828 /* misc bits */
829 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
830
831 /* let's go! */
832
833 const struct pci_matchid nxe_devices[] = {
834 { PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_10GXXR },
835 { PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_10GCX4 },
836 { PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_4GCU },
837 { PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_IMEZ },
838 { PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_HMEZ },
839 { PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_IMEZ_2 },
840 { PCI_VENDOR_NETXEN, PCI_PRODUCT_NETXEN_NXB_HMEZ_2 }
841 };
842
843 const struct nxe_board nxe_boards[] = {
844 { NXE_BRDTYPE_P2_SB35_4G, NXE_0_NIU_MODE_GBE },
845 { NXE_BRDTYPE_P2_SB31_10G, NXE_0_NIU_MODE_XGE },
846 { NXE_BRDTYPE_P2_SB31_2G, NXE_0_NIU_MODE_GBE },
847 { NXE_BRDTYPE_P2_SB31_10G_IMEZ, NXE_0_NIU_MODE_XGE },
848 { NXE_BRDTYPE_P2_SB31_10G_HMEZ, NXE_0_NIU_MODE_XGE },
849 { NXE_BRDTYPE_P2_SB31_10G_CX4, NXE_0_NIU_MODE_XGE }
850 };
851
852 int
nxe_match(struct device * parent,void * match,void * aux)853 nxe_match(struct device *parent, void *match, void *aux)
854 {
855 struct pci_attach_args *pa = aux;
856
857 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_NETWORK)
858 return (0);
859
860 return (pci_matchbyid(pa, nxe_devices, nitems(nxe_devices)));
861 }
862
863 void
nxe_attach(struct device * parent,struct device * self,void * aux)864 nxe_attach(struct device *parent, struct device *self, void *aux)
865 {
866 struct nxe_softc *sc = (struct nxe_softc *)self;
867 struct pci_attach_args *pa = aux;
868 pci_intr_handle_t ih;
869 struct ifnet *ifp;
870
871 sc->sc_dmat = pa->pa_dmat;
872 sc->sc_function = pa->pa_function;
873 sc->sc_window = -1;
874
875 rw_init(&sc->sc_lock, NULL);
876
877 if (nxe_pci_map(sc, pa) != 0) {
878 /* error already printed by nxe_pci_map() */
879 return;
880 }
881
882 nxe_crb_set(sc, 1);
883
884 if (nxe_board_info(sc) != 0) {
885 /* error already printed by nxe_board_info() */
886 goto unmap;
887 }
888
889 if (nxe_user_info(sc) != 0) {
890 /* error already printed by nxe_board_info() */
891 goto unmap;
892 }
893
894 if (nxe_init(sc) != 0) {
895 /* error already printed by nxe_init() */
896 goto unmap;
897 }
898
899 if (pci_intr_map(pa, &ih) != 0) {
900 printf(": unable to map interrupt\n");
901 goto uninit;
902 }
903 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
904 nxe_intr, sc, DEVNAME(sc));
905 if (sc->sc_ih == NULL) {
906 printf(": unable to establish interrupt\n");
907 goto uninit;
908 }
909
910 ifp = &sc->sc_ac.ac_if;
911 ifp->if_softc = sc;
912 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
913 ifp->if_capabilities = IFCAP_VLAN_MTU;
914 ifp->if_ioctl = nxe_ioctl;
915 ifp->if_start = nxe_start;
916 ifp->if_watchdog = nxe_watchdog;
917 ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN;
918 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
919 ifq_init_maxlen(&ifp->if_snd, 512); /* XXX */
920
921 ifmedia_init(&sc->sc_media, 0, nxe_media_change, nxe_media_status);
922 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
923 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
924
925 if_attach(ifp);
926 ether_ifattach(ifp);
927
928 printf(": %s firmware %d.%d.%d address %s\n",
929 pci_intr_string(pa->pa_pc, ih),
930 sc->sc_fw_major, sc->sc_fw_minor, sc->sc_fw_build,
931 ether_sprintf(sc->sc_ac.ac_enaddr));
932 return;
933
934 uninit:
935 nxe_uninit(sc);
936 unmap:
937 nxe_pci_unmap(sc);
938 }
939
940 int
nxe_pci_map(struct nxe_softc * sc,struct pci_attach_args * pa)941 nxe_pci_map(struct nxe_softc *sc, struct pci_attach_args *pa)
942 {
943 pcireg_t memtype;
944
945 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NXE_PCI_BAR_MEM);
946 if (pci_mapreg_map(pa, NXE_PCI_BAR_MEM, memtype, 0, &sc->sc_memt,
947 &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) {
948 printf(": unable to map host registers\n");
949 return (1);
950 }
951 if (sc->sc_mems != NXE_PCI_BAR_MEM_128MB) {
952 printf(": unexpected register map size\n");
953 goto unmap_mem;
954 }
955
956 /* set up the CRB window */
957 if (bus_space_subregion(sc->sc_memt, sc->sc_memh, NXE_MAP_CRB,
958 sc->sc_mems - NXE_MAP_CRB, &sc->sc_crbh) != 0) {
959 printf(": unable to create CRB window\n");
960 goto unmap_mem;
961 }
962
963 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NXE_PCI_BAR_DOORBELL);
964 if (pci_mapreg_map(pa, NXE_PCI_BAR_DOORBELL, memtype, 0, &sc->sc_dbt,
965 &sc->sc_dbh, NULL, &sc->sc_dbs, 0) != 0) {
966 printf(": unable to map doorbell registers\n");
967 /* bus_space(9) says i dont have to unmap subregions */
968 goto unmap_mem;
969 }
970
971 config_mountroot(&sc->sc_dev, nxe_mountroot);
972 return (0);
973
974 unmap_mem:
975 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
976 sc->sc_mems = 0;
977 return (1);
978 }
979
980 void
nxe_pci_unmap(struct nxe_softc * sc)981 nxe_pci_unmap(struct nxe_softc *sc)
982 {
983 bus_space_unmap(sc->sc_dbt, sc->sc_dbh, sc->sc_dbs);
984 sc->sc_dbs = 0;
985 /* bus_space(9) says i dont have to unmap the crb subregion */
986 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
987 sc->sc_mems = 0;
988 }
989
990 int
nxe_intr(void * xsc)991 nxe_intr(void *xsc)
992 {
993 struct nxe_softc *sc = xsc;
994 u_int32_t vector;
995
996 DASSERT(sc->sc_window == 1);
997
998 vector = nxe_crb_read(sc, NXE_1_SW_INT_VECTOR);
999 if (!ISSET(vector, NXE_ISR_MINE(sc->sc_function)))
1000 return (0);
1001
1002 nxe_crb_write(sc, NXE_1_SW_INT_VECTOR, 0x80 << sc->sc_function);
1003
1004 /* the interrupt is mine! we should do some work now */
1005
1006 return (1);
1007 }
1008
1009 int
nxe_ioctl(struct ifnet * ifp,u_long cmd,caddr_t addr)1010 nxe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1011 {
1012 struct nxe_softc *sc = ifp->if_softc;
1013 struct ifreq *ifr = (struct ifreq *)addr;
1014 int s, error = 0;
1015
1016 rw_enter_write(&sc->sc_lock);
1017 s = splnet();
1018
1019 timeout_del(&sc->sc_tick);
1020
1021 switch (cmd) {
1022 case SIOCSIFADDR:
1023 SET(ifp->if_flags, IFF_UP);
1024 /* FALLTHROUGH */
1025
1026 case SIOCSIFFLAGS:
1027 if (ISSET(ifp->if_flags, IFF_UP)) {
1028 if (ISSET(ifp->if_flags, IFF_RUNNING))
1029 error = ENETRESET;
1030 else
1031 nxe_up(sc);
1032 } else {
1033 if (ISSET(ifp->if_flags, IFF_RUNNING))
1034 nxe_down(sc);
1035 }
1036 break;
1037
1038 case SIOCGIFMEDIA:
1039 case SIOCSIFMEDIA:
1040 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1041 break;
1042
1043 default:
1044 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1045 }
1046
1047 if (error == ENETRESET) {
1048 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1049 nxe_crb_set(sc, 0);
1050 nxe_iff(sc);
1051 nxe_crb_set(sc, 1);
1052 }
1053 error = 0;
1054 }
1055
1056 nxe_tick(sc);
1057
1058 splx(s);
1059 rw_exit_write(&sc->sc_lock);
1060 return (error);
1061 }
1062
1063 void
nxe_up(struct nxe_softc * sc)1064 nxe_up(struct nxe_softc *sc)
1065 {
1066 struct ifnet *ifp = &sc->sc_ac.ac_if;
1067 static const u_int rx_ring_sizes[] = { 16384, 1024, 128 };
1068 struct {
1069 struct nxe_ctx ctx;
1070 u_int32_t cmd_consumer;
1071 } __packed *dmamem;
1072 struct nxe_ctx *ctx;
1073 struct nxe_ctx_ring *ring;
1074 struct nxe_ring *nr;
1075 u_int64_t dva;
1076 u_int32_t intr_scheme;
1077 int i;
1078
1079 if (nxe_up_fw(sc) != 0)
1080 return;
1081
1082 /* allocate pkt lists */
1083 sc->sc_tx_pkts = nxe_pkt_alloc(sc, 128, NXE_TXD_MAX_SEGS);
1084 if (sc->sc_tx_pkts == NULL)
1085 return;
1086 sc->sc_rx_pkts = nxe_pkt_alloc(sc, 128, NXE_RXD_MAX_SEGS);
1087 if (sc->sc_rx_pkts == NULL)
1088 goto free_tx_pkts;
1089
1090 /* allocate the context memory and the consumer field */
1091 sc->sc_ctx = nxe_dmamem_alloc(sc, sizeof(*dmamem), PAGE_SIZE);
1092 if (sc->sc_ctx == NULL)
1093 goto free_rx_pkts;
1094
1095 dmamem = NXE_DMA_KVA(sc->sc_ctx);
1096 dva = NXE_DMA_DVA(sc->sc_ctx);
1097
1098 ctx = &dmamem->ctx;
1099 ctx->ctx_cmd_consumer_addr = htole64(dva + sizeof(dmamem->ctx));
1100 ctx->ctx_id = htole32(sc->sc_function);
1101
1102 sc->sc_cmd_consumer = &dmamem->cmd_consumer;
1103 sc->sc_cmd_consumer_cur = 0;
1104
1105 /* allocate the cmd/tx ring */
1106 sc->sc_cmd_ring = nxe_ring_alloc(sc,
1107 sizeof(struct nxe_tx_desc), 1024 /* XXX */);
1108 if (sc->sc_cmd_ring == NULL)
1109 goto free_ctx;
1110
1111 ctx->ctx_cmd_ring.r_addr =
1112 htole64(NXE_DMA_DVA(sc->sc_cmd_ring->nr_dmamem));
1113 ctx->ctx_cmd_ring.r_size = htole32(sc->sc_cmd_ring->nr_nentries);
1114
1115 /* allocate the status ring */
1116 sc->sc_status_ring = nxe_ring_alloc(sc,
1117 sizeof(struct nxe_status_desc), 16384 /* XXX */);
1118 if (sc->sc_status_ring == NULL)
1119 goto free_cmd_ring;
1120
1121 ctx->ctx_status_ring_addr =
1122 htole64(NXE_DMA_DVA(sc->sc_status_ring->nr_dmamem));
1123 ctx->ctx_status_ring_size = htole32(sc->sc_status_ring->nr_nentries);
1124
1125 /* allocate something to point the jumbo and lro rings at */
1126 sc->sc_dummy_rx = nxe_dmamem_alloc(sc, NXE_MAX_PKTLEN, PAGE_SIZE);
1127 if (sc->sc_dummy_rx == NULL)
1128 goto free_status_ring;
1129
1130 /* allocate the rx rings */
1131 for (i = 0; i < NXE_NRING; i++) {
1132 ring = &ctx->ctx_rx_rings[i];
1133 nr = nxe_ring_alloc(sc, sizeof(struct nxe_rx_desc),
1134 rx_ring_sizes[i]);
1135 if (nr == NULL)
1136 goto free_rx_rings;
1137
1138 ring->r_addr = htole64(NXE_DMA_DVA(nr->nr_dmamem));
1139 ring->r_size = htole32(nr->nr_nentries);
1140
1141 sc->sc_rx_rings[i] = nr;
1142 nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_PREWRITE);
1143 }
1144
1145 /* nothing can possibly go wrong now */
1146 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_rx),
1147 0, NXE_DMA_LEN(sc->sc_dummy_rx), BUS_DMASYNC_PREREAD);
1148 nxe_ring_sync(sc, sc->sc_status_ring, BUS_DMASYNC_PREREAD);
1149 nxe_ring_sync(sc, sc->sc_cmd_ring, BUS_DMASYNC_PREWRITE);
1150 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1151 0, NXE_DMA_LEN(sc->sc_ctx),
1152 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1153
1154 nxe_crb_write(sc, NXE_1_SW_CONTEXT_ADDR_LO(sc->sc_function),
1155 (u_int32_t)dva);
1156 nxe_crb_write(sc, NXE_1_SW_CONTEXT_ADDR_HI(sc->sc_function),
1157 (u_int32_t)(dva >> 32));
1158 nxe_crb_write(sc, NXE_1_SW_CONTEXT(sc->sc_port),
1159 NXE_1_SW_CONTEXT_SIG(sc->sc_port));
1160
1161 nxe_crb_set(sc, 0);
1162 nxe_crb_write(sc, NXE_0_XG_MTU(sc->sc_function),
1163 MCLBYTES - ETHER_ALIGN);
1164 nxe_lladdr(sc);
1165 nxe_iff(sc);
1166 nxe_crb_set(sc, 1);
1167
1168 SET(ifp->if_flags, IFF_RUNNING);
1169 ifq_clr_oactive(&ifp->if_snd);
1170
1171 /* enable interrupts */
1172 intr_scheme = nxe_crb_read(sc, NXE_1_SW_NIC_CAP_FW);
1173 if (intr_scheme != NXE_1_SW_NIC_CAP_PORTINTR)
1174 nxe_write(sc, NXE_ISR_MASK, 0x77f);
1175 nxe_crb_write(sc, NXE_1_SW_INT_MASK(sc->sc_function), 0x1);
1176 if (intr_scheme != NXE_1_SW_NIC_CAP_PORTINTR)
1177 nxe_crb_write(sc, NXE_1_SW_INT_VECTOR, 0x0);
1178 nxe_write(sc, NXE_ISR_TARGET_MASK, 0xbff);
1179
1180 return;
1181
1182 free_rx_rings:
1183 while (i > 0) {
1184 i--;
1185 nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_POSTWRITE);
1186 nxe_ring_free(sc, sc->sc_rx_rings[i]);
1187 }
1188
1189 nxe_dmamem_free(sc, sc->sc_dummy_rx);
1190 free_status_ring:
1191 nxe_ring_free(sc, sc->sc_status_ring);
1192 free_cmd_ring:
1193 nxe_ring_free(sc, sc->sc_cmd_ring);
1194 free_ctx:
1195 nxe_dmamem_free(sc, sc->sc_ctx);
1196 free_rx_pkts:
1197 nxe_pkt_free(sc, sc->sc_rx_pkts);
1198 free_tx_pkts:
1199 nxe_pkt_free(sc, sc->sc_tx_pkts);
1200 }
1201
1202 int
nxe_up_fw(struct nxe_softc * sc)1203 nxe_up_fw(struct nxe_softc *sc)
1204 {
1205 u_int32_t r;
1206
1207 r = nxe_crb_read(sc, NXE_1_SW_CMDPEG_STATE);
1208 if (r == NXE_1_SW_CMDPEG_STATE_ACK)
1209 return (0);
1210
1211 if (r != NXE_1_SW_CMDPEG_STATE_DONE)
1212 return (1);
1213
1214 nxe_crb_write(sc, NXE_1_SW_NIC_CAP_HOST, NXE_1_SW_NIC_CAP_PORTINTR);
1215 nxe_crb_write(sc, NXE_1_SW_MPORT_MODE, NXE_1_SW_MPORT_MODE_MULTI);
1216 nxe_crb_write(sc, NXE_1_SW_CMDPEG_STATE, NXE_1_SW_CMDPEG_STATE_ACK);
1217
1218 /* XXX busy wait in a process context is naughty */
1219 if (!nxe_crb_wait(sc, NXE_1_SW_STATUS_STATE(sc->sc_function),
1220 0xffffffff, NXE_1_SW_STATUS_STATE_READY, 1000))
1221 return (1);
1222
1223 return (0);
1224 }
1225
1226 void
nxe_lladdr(struct nxe_softc * sc)1227 nxe_lladdr(struct nxe_softc *sc)
1228 {
1229 u_int8_t *lladdr = sc->sc_ac.ac_enaddr;
1230
1231 DASSERT(sc->sc_window == 0);
1232
1233 nxe_crb_write(sc, NXE_0_XG_MAC_LO(sc->sc_port),
1234 (lladdr[0] << 16) | (lladdr[1] << 24));
1235 nxe_crb_write(sc, NXE_0_XG_MAC_HI(sc->sc_port),
1236 (lladdr[2] << 0) | (lladdr[3] << 8) |
1237 (lladdr[4] << 16) | (lladdr[5] << 24));
1238 }
1239
1240 void
nxe_iff(struct nxe_softc * sc)1241 nxe_iff(struct nxe_softc *sc)
1242 {
1243 struct ifnet *ifp = &sc->sc_ac.ac_if;
1244 u_int32_t cfg1 = 0x1447; /* XXX */
1245
1246 DASSERT(sc->sc_window == 0);
1247
1248 CLR(ifp->if_flags, IFF_ALLMULTI);
1249
1250 if (ISSET(ifp->if_flags, IFF_PROMISC) || sc->sc_ac.ac_multicnt > 0) {
1251 SET(ifp->if_flags, IFF_ALLMULTI);
1252 if (ISSET(ifp->if_flags, IFF_PROMISC))
1253 cfg1 |= NXE_0_XG_CFG1_PROMISC;
1254 else
1255 cfg1 |= NXE_0_XG_CFG1_MULTICAST;
1256 }
1257
1258 nxe_crb_write(sc, NXE_0_XG_CFG0(sc->sc_port),
1259 NXE_0_XG_CFG0_TX_EN | NXE_0_XG_CFG0_RX_EN);
1260 nxe_crb_write(sc, NXE_0_XG_CFG1(sc->sc_port), cfg1);
1261 }
1262
1263 void
nxe_down(struct nxe_softc * sc)1264 nxe_down(struct nxe_softc *sc)
1265 {
1266 struct ifnet *ifp = &sc->sc_ac.ac_if;
1267 int i;
1268
1269 CLR(ifp->if_flags, IFF_RUNNING | IFF_ALLMULTI);
1270 ifq_clr_oactive(&ifp->if_snd);
1271
1272 /* XXX turn the chip off */
1273
1274 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1275 0, NXE_DMA_LEN(sc->sc_ctx),
1276 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1277 nxe_ring_sync(sc, sc->sc_cmd_ring, BUS_DMASYNC_POSTWRITE);
1278 nxe_ring_sync(sc, sc->sc_status_ring, BUS_DMASYNC_POSTREAD);
1279 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_rx),
1280 0, NXE_DMA_LEN(sc->sc_dummy_rx), BUS_DMASYNC_POSTREAD);
1281
1282 for (i = 0; i < NXE_NRING; i++) {
1283 nxe_ring_sync(sc, sc->sc_rx_rings[i], BUS_DMASYNC_POSTWRITE);
1284 nxe_ring_free(sc, sc->sc_rx_rings[i]);
1285 }
1286 nxe_dmamem_free(sc, sc->sc_dummy_rx);
1287 nxe_ring_free(sc, sc->sc_status_ring);
1288 nxe_ring_free(sc, sc->sc_cmd_ring);
1289 nxe_dmamem_free(sc, sc->sc_ctx);
1290 nxe_pkt_free(sc, sc->sc_rx_pkts);
1291 nxe_pkt_free(sc, sc->sc_tx_pkts);
1292 }
1293
1294 void
nxe_start(struct ifnet * ifp)1295 nxe_start(struct ifnet *ifp)
1296 {
1297 struct nxe_softc *sc = ifp->if_softc;
1298 struct nxe_ring *nr = sc->sc_cmd_ring;
1299 struct nxe_tx_desc *txd;
1300 struct nxe_pkt *pkt;
1301 struct mbuf *m;
1302 bus_dmamap_t dmap;
1303 bus_dma_segment_t *segs;
1304 int nsegs;
1305
1306 if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1307 ifq_is_oactive(&ifp->if_snd) ||
1308 ifq_empty(&ifp->if_snd))
1309 return;
1310
1311 if (nxe_ring_writeable(nr, sc->sc_cmd_consumer_cur) < NXE_TXD_DESCS) {
1312 ifq_set_oactive(&ifp->if_snd);
1313 return;
1314 }
1315
1316 nxe_ring_sync(sc, nr, BUS_DMASYNC_POSTWRITE);
1317 txd = nxe_ring_cur(sc, nr);
1318 bzero(txd, sizeof(struct nxe_tx_desc));
1319
1320 do {
1321 m = ifq_deq_begin(&ifp->if_snd);
1322 if (m == NULL)
1323 break;
1324
1325 pkt = nxe_pkt_get(sc->sc_tx_pkts);
1326 if (pkt == NULL) {
1327 ifq_deq_rollback(&ifp->if_snd, m);
1328 ifq_set_oactive(&ifp->if_snd);
1329 break;
1330 }
1331
1332 ifq_deq_commit(&ifp->if_snd, m);
1333
1334 dmap = pkt->pkt_dmap;
1335 m = nxe_load_pkt(sc, dmap, m);
1336 if (m == NULL) {
1337 nxe_pkt_put(sc->sc_tx_pkts, pkt);
1338 ifp->if_oerrors++;
1339 break;
1340 }
1341
1342 #if NBPFILTER > 0
1343 if (ifp->if_bpf)
1344 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1345 #endif
1346
1347 pkt->pkt_m = m;
1348
1349 txd->tx_flags = htole16(NXE_TXD_F_OPCODE_TX);
1350 txd->tx_nbufs = dmap->dm_nsegs;
1351 txd->tx_length = htole16(dmap->dm_mapsize);
1352 txd->tx_port = sc->sc_port;
1353
1354 segs = dmap->dm_segs;
1355 nsegs = dmap->dm_nsegs;
1356 do {
1357 switch ((nsegs > NXE_TXD_SEGS) ?
1358 NXE_TXD_SEGS : nsegs) {
1359 case 4:
1360 txd->tx_addr_4 = htole64(segs[3].ds_addr);
1361 txd->tx_slen_4 = htole32(segs[3].ds_len);
1362 case 3:
1363 txd->tx_addr_3 = htole64(segs[2].ds_addr);
1364 txd->tx_slen_3 = htole32(segs[2].ds_len);
1365 case 2:
1366 txd->tx_addr_2 = htole64(segs[1].ds_addr);
1367 txd->tx_slen_2 = htole32(segs[1].ds_len);
1368 case 1:
1369 txd->tx_addr_1 = htole64(segs[0].ds_addr);
1370 txd->tx_slen_1 = htole32(segs[0].ds_len);
1371 break;
1372 default:
1373 panic("%s: unexpected segments in tx map",
1374 DEVNAME(sc));
1375 }
1376
1377 nsegs -= NXE_TXD_SEGS;
1378 segs += NXE_TXD_SEGS;
1379
1380 pkt->pkt_id = nr->nr_slot;
1381
1382 txd = nxe_ring_next(sc, nr);
1383 bzero(txd, sizeof(struct nxe_tx_desc));
1384 } while (nsegs > 0);
1385
1386 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1387 BUS_DMASYNC_PREWRITE);
1388
1389 } while (nr->nr_ready >= NXE_TXD_DESCS);
1390
1391 nxe_ring_sync(sc, nr, BUS_DMASYNC_PREWRITE);
1392 nxe_crb_write(sc, NXE_1_SW_CMD_PRODUCER(sc->sc_function), nr->nr_slot);
1393 }
1394
1395 int
nxe_complete(struct nxe_softc * sc)1396 nxe_complete(struct nxe_softc *sc)
1397 {
1398 struct nxe_pkt *pkt;
1399 int new_cons, cur_cons;
1400 int rv = 0;
1401
1402 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1403 0, NXE_DMA_LEN(sc->sc_ctx),
1404 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1405 new_cons = letoh32(*sc->sc_cmd_consumer);
1406 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_ctx),
1407 0, NXE_DMA_LEN(sc->sc_ctx),
1408 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1409
1410 cur_cons = sc->sc_cmd_consumer_cur;
1411 pkt = nxe_pkt_used(sc->sc_tx_pkts);
1412
1413 while (pkt != NULL && cur_cons != new_cons) {
1414 if (pkt->pkt_id == cur_cons) {
1415 bus_dmamap_sync(sc->sc_dmat, pkt->pkt_dmap,
1416 0, pkt->pkt_dmap->dm_mapsize,
1417 BUS_DMASYNC_POSTWRITE);
1418 bus_dmamap_unload(sc->sc_dmat, pkt->pkt_dmap);
1419
1420 m_freem(pkt->pkt_m);
1421
1422 nxe_pkt_put(sc->sc_tx_pkts, pkt);
1423
1424 pkt = nxe_pkt_used(sc->sc_tx_pkts);
1425 }
1426
1427 cur_cons++;
1428 cur_cons %= sc->sc_cmd_ring->nr_nentries;
1429
1430 rv = 1;
1431 }
1432
1433 if (rv == 1) {
1434 sc->sc_cmd_consumer_cur = cur_cons;
1435 ifq_clr_oactive(&sc->sc_ac.ac_if.if_snd);
1436 }
1437
1438 return (rv);
1439 }
1440
1441 struct mbuf *
nxe_coalesce_m(struct mbuf * m)1442 nxe_coalesce_m(struct mbuf *m)
1443 {
1444 struct mbuf *m0;
1445
1446 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1447 if (m0 == NULL)
1448 goto err;
1449
1450 if (m->m_pkthdr.len > MHLEN) {
1451 MCLGET(m0, M_DONTWAIT);
1452 if (!(m0->m_flags & M_EXT)) {
1453 m_freem(m0);
1454 m0 = NULL;
1455 goto err;
1456 }
1457 }
1458
1459 m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1460 m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1461
1462 err:
1463 m_freem(m);
1464 return (m0);
1465 }
1466
1467 struct mbuf *
nxe_load_pkt(struct nxe_softc * sc,bus_dmamap_t dmap,struct mbuf * m)1468 nxe_load_pkt(struct nxe_softc *sc, bus_dmamap_t dmap, struct mbuf *m)
1469 {
1470 switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT)) {
1471 case 0:
1472 break;
1473
1474 case EFBIG:
1475 m = nxe_coalesce_m(m);
1476 if (m == NULL)
1477 break;
1478
1479 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
1480 BUS_DMA_NOWAIT) == 0)
1481 break;
1482
1483 /* we get here on error */
1484 /* FALLTHROUGH */
1485 default:
1486 m_freem(m);
1487 m = NULL;
1488 break;
1489 }
1490
1491 return (m);
1492 }
1493
1494 void
nxe_rx_start(struct nxe_softc * sc)1495 nxe_rx_start(struct nxe_softc *sc)
1496 {
1497 struct nxe_ring *nr = sc->sc_rx_rings[NXE_RING_RX];
1498 struct nxe_rx_desc *rxd;
1499 struct nxe_pkt *pkt;
1500 struct mbuf *m;
1501
1502 if (nxe_ring_writeable(nr, 0) == 0)
1503 return;
1504
1505 nxe_ring_sync(sc, nr, BUS_DMASYNC_POSTWRITE);
1506 rxd = nxe_ring_cur(sc, nr);
1507
1508 for (;;) {
1509 pkt = nxe_pkt_get(sc->sc_rx_pkts);
1510 if (pkt == NULL)
1511 goto done;
1512
1513 MGETHDR(m, M_DONTWAIT, MT_DATA);
1514 if (m == NULL)
1515 goto put_pkt;
1516
1517 MCLGET(m, M_DONTWAIT);
1518 if (!ISSET(m->m_flags, M_EXT))
1519 goto free_m;
1520
1521 m->m_data += ETHER_ALIGN;
1522 m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1523
1524 if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->pkt_dmap, m,
1525 BUS_DMA_NOWAIT) != 0)
1526 goto free_m;
1527
1528 pkt->pkt_m = m;
1529
1530 bzero(rxd, sizeof(struct nxe_rx_desc));
1531 rxd->rx_len = htole32(m->m_len);
1532 rxd->rx_id = pkt->pkt_id;
1533 rxd->rx_addr = htole64(pkt->pkt_dmap->dm_segs[0].ds_addr);
1534
1535 bus_dmamap_sync(sc->sc_dmat, pkt->pkt_dmap, 0,
1536 pkt->pkt_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
1537
1538 rxd = nxe_ring_next(sc, nr);
1539
1540 if (nr->nr_ready == 0)
1541 goto done;
1542 }
1543
1544 free_m:
1545 m_freem(m);
1546 put_pkt:
1547 nxe_pkt_put(sc->sc_rx_pkts, pkt);
1548 done:
1549 nxe_ring_sync(sc, nr, BUS_DMASYNC_PREWRITE);
1550 nxe_crb_write(sc, NXE_1_SW_RX_PRODUCER(sc->sc_function), nr->nr_slot);
1551 nxe_doorbell(sc, NXE_DB_PEGID_RX | NXE_DB_PRIVID |
1552 NXE_DB_OPCODE_RX_PROD |
1553 NXE_DB_COUNT(nr->nr_slot) | NXE_DB_CTXID(sc->sc_function));
1554 }
1555
1556 void
nxe_watchdog(struct ifnet * ifp)1557 nxe_watchdog(struct ifnet *ifp)
1558 {
1559 /* do nothing */
1560 }
1561
1562 int
nxe_media_change(struct ifnet * ifp)1563 nxe_media_change(struct ifnet *ifp)
1564 {
1565 /* ignore for now */
1566 return (0);
1567 }
1568
1569 void
nxe_media_status(struct ifnet * ifp,struct ifmediareq * imr)1570 nxe_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1571 {
1572 struct nxe_softc *sc = ifp->if_softc;
1573
1574 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1575 imr->ifm_status = IFM_AVALID;
1576
1577 nxe_link_state(sc);
1578 if (LINK_STATE_IS_UP(ifp->if_link_state))
1579 imr->ifm_status |= IFM_ACTIVE;
1580 }
1581
1582 void
nxe_link_state(struct nxe_softc * sc)1583 nxe_link_state(struct nxe_softc *sc)
1584 {
1585 struct ifnet *ifp = &sc->sc_ac.ac_if;
1586 int link_state = LINK_STATE_DOWN;
1587 u_int32_t r;
1588
1589 DASSERT(sc->sc_window == 1);
1590
1591 r = nxe_crb_read(sc, NXE_1_SW_XG_STATE);
1592 if (NXE_1_SW_XG_STATE_PORT(r, sc->sc_function) & NXE_1_SW_XG_STATE_UP)
1593 link_state = LINK_STATE_UP;
1594
1595 if (ifp->if_link_state != link_state) {
1596 ifp->if_link_state = link_state;
1597 if_link_state_change(ifp);
1598 }
1599 }
1600
1601 int
nxe_board_info(struct nxe_softc * sc)1602 nxe_board_info(struct nxe_softc *sc)
1603 {
1604 struct nxe_info *ni;
1605 int rv = 1;
1606 int i;
1607
1608 ni = malloc(sizeof(struct nxe_info), M_TEMP, M_NOWAIT);
1609 if (ni == NULL) {
1610 printf(": unable to allocate temporary memory\n");
1611 return (1);
1612 }
1613
1614 if (nxe_rom_read_region(sc, NXE_FLASH_BRDCFG, ni,
1615 sizeof(struct nxe_info)) != 0) {
1616 printf(": unable to read board info\n");
1617 goto out;
1618 }
1619
1620 if (ni->ni_hdrver != NXE_INFO_HDRVER_1) {
1621 printf(": unexpected board info header version 0x%08x\n",
1622 ni->ni_hdrver);
1623 goto out;
1624 }
1625 if (ni->ni_magic != NXE_INFO_MAGIC) {
1626 printf(": board info magic is invalid\n");
1627 goto out;
1628 }
1629
1630 for (i = 0; i < nitems(nxe_boards); i++) {
1631 if (ni->ni_board_type == nxe_boards[i].brd_type) {
1632 sc->sc_board = &nxe_boards[i];
1633 break;
1634 }
1635 }
1636 if (sc->sc_board == NULL) {
1637 printf(": unknown board type %04x\n", ni->ni_board_type);
1638 goto out;
1639 }
1640
1641 rv = 0;
1642 out:
1643 free(ni, M_TEMP, 0);
1644 return (rv);
1645 }
1646
1647 int
nxe_user_info(struct nxe_softc * sc)1648 nxe_user_info(struct nxe_softc *sc)
1649 {
1650 struct nxe_userinfo *nu;
1651 u_int64_t lladdr;
1652 struct nxe_lladdr *la;
1653 int rv = 1;
1654
1655 nu = malloc(sizeof(struct nxe_userinfo), M_TEMP, M_NOWAIT);
1656 if (nu == NULL) {
1657 printf(": unable to allocate temp memory\n");
1658 return (1);
1659 }
1660 if (nxe_rom_read_region(sc, NXE_FLASH_USER, nu,
1661 sizeof(struct nxe_userinfo)) != 0) {
1662 printf(": unable to read user info\n");
1663 goto out;
1664 }
1665
1666 sc->sc_fw_major = nu->nu_imageinfo.nim_img_ver_major;
1667 sc->sc_fw_minor = nu->nu_imageinfo.nim_img_ver_minor;
1668 sc->sc_fw_build = letoh16(nu->nu_imageinfo.nim_img_ver_build);
1669
1670 if (sc->sc_fw_major > NXE_VERSION_MAJOR ||
1671 sc->sc_fw_major < NXE_VERSION_MAJOR ||
1672 sc->sc_fw_minor > NXE_VERSION_MINOR ||
1673 sc->sc_fw_minor < NXE_VERSION_MINOR) {
1674 printf(": firmware %d.%d.%d is unsupported by this driver\n",
1675 sc->sc_fw_major, sc->sc_fw_minor, sc->sc_fw_build);
1676 goto out;
1677 }
1678
1679 lladdr = swap64(nu->nu_lladdr[sc->sc_function][0]);
1680 la = (struct nxe_lladdr *)&lladdr;
1681 bcopy(la->lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1682
1683 rv = 0;
1684 out:
1685 free(nu, M_TEMP, 0);
1686 return (rv);
1687 }
1688
1689 int
nxe_init(struct nxe_softc * sc)1690 nxe_init(struct nxe_softc *sc)
1691 {
1692 u_int64_t dva;
1693 u_int32_t r;
1694
1695 /* stop the chip from processing */
1696 nxe_crb_write(sc, NXE_1_SW_CMD_PRODUCER(sc->sc_function), 0);
1697 nxe_crb_write(sc, NXE_1_SW_CMD_CONSUMER(sc->sc_function), 0);
1698 nxe_crb_write(sc, NXE_1_SW_CMD_ADDR_HI, 0);
1699 nxe_crb_write(sc, NXE_1_SW_CMD_ADDR_LO, 0);
1700
1701 /*
1702 * if this is the first port on the device it needs some special
1703 * treatment to get things going.
1704 */
1705 if (sc->sc_function == 0) {
1706 /* init adapter offload */
1707 sc->sc_dummy_dma = nxe_dmamem_alloc(sc,
1708 NXE_1_SW_DUMMY_ADDR_LEN, PAGE_SIZE);
1709 if (sc->sc_dummy_dma == NULL) {
1710 printf(": unable to allocate dummy memory\n");
1711 return (1);
1712 }
1713
1714 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1715 0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_PREREAD);
1716
1717 dva = NXE_DMA_DVA(sc->sc_dummy_dma);
1718 nxe_crb_write(sc, NXE_1_SW_DUMMY_ADDR_HI, dva >> 32);
1719 nxe_crb_write(sc, NXE_1_SW_DUMMY_ADDR_LO, dva);
1720
1721 r = nxe_crb_read(sc, NXE_1_SW_BOOTLD_CONFIG);
1722 if (r == 0x55555555) {
1723 r = nxe_crb_read(sc, NXE_1_ROMUSB_SW_RESET);
1724 if (r != NXE_1_ROMUSB_SW_RESET_BOOT) {
1725 printf(": unexpected boot state\n");
1726 goto err;
1727 }
1728
1729 /* clear */
1730 nxe_crb_write(sc, NXE_1_SW_BOOTLD_CONFIG, 0);
1731 }
1732
1733 /* start the device up */
1734 nxe_crb_write(sc, NXE_1_SW_DRIVER_VER, NXE_VERSION);
1735 nxe_crb_write(sc, NXE_1_GLB_PEGTUNE, NXE_1_GLB_PEGTUNE_DONE);
1736
1737 /*
1738 * the firmware takes a long time to boot, so we'll check
1739 * it later on, and again when we want to bring a port up.
1740 */
1741 }
1742
1743 return (0);
1744
1745 err:
1746 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1747 0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_POSTREAD);
1748 nxe_dmamem_free(sc, sc->sc_dummy_dma);
1749 return (1);
1750 }
1751
1752 void
nxe_uninit(struct nxe_softc * sc)1753 nxe_uninit(struct nxe_softc *sc)
1754 {
1755 if (sc->sc_function == 0) {
1756 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(sc->sc_dummy_dma),
1757 0, NXE_DMA_LEN(sc->sc_dummy_dma), BUS_DMASYNC_POSTREAD);
1758 nxe_dmamem_free(sc, sc->sc_dummy_dma);
1759 }
1760 }
1761
1762 void
nxe_mountroot(struct device * self)1763 nxe_mountroot(struct device *self)
1764 {
1765 struct nxe_softc *sc = (struct nxe_softc *)self;
1766
1767 DASSERT(sc->sc_window == 1);
1768
1769 if (!nxe_crb_wait(sc, NXE_1_SW_CMDPEG_STATE, 0xffffffff,
1770 NXE_1_SW_CMDPEG_STATE_DONE, 10000)) {
1771 printf("%s: firmware bootstrap failed, code 0x%08x\n",
1772 DEVNAME(sc), nxe_crb_read(sc, NXE_1_SW_CMDPEG_STATE));
1773 return;
1774 }
1775
1776 sc->sc_port = nxe_crb_read(sc, NXE_1_SW_V2P(sc->sc_function));
1777 if (sc->sc_port == 0x55555555)
1778 sc->sc_port = sc->sc_function;
1779
1780 nxe_crb_write(sc, NXE_1_SW_NIC_CAP_HOST, NXE_1_SW_NIC_CAP_PORTINTR);
1781 nxe_crb_write(sc, NXE_1_SW_MPORT_MODE, NXE_1_SW_MPORT_MODE_MULTI);
1782 nxe_crb_write(sc, NXE_1_SW_CMDPEG_STATE, NXE_1_SW_CMDPEG_STATE_ACK);
1783
1784 sc->sc_sensor.type = SENSOR_TEMP;
1785 strlcpy(sc->sc_sensor_dev.xname, DEVNAME(sc),
1786 sizeof(sc->sc_sensor_dev.xname));
1787 sensor_attach(&sc->sc_sensor_dev, &sc->sc_sensor);
1788 sensordev_install(&sc->sc_sensor_dev);
1789
1790 timeout_set(&sc->sc_tick, nxe_tick, sc);
1791 nxe_tick(sc);
1792 }
1793
1794 void
nxe_tick(void * xsc)1795 nxe_tick(void *xsc)
1796 {
1797 struct nxe_softc *sc = xsc;
1798 u_int32_t temp;
1799 int window;
1800 int s;
1801
1802 s = splnet();
1803 window = nxe_crb_set(sc, 1);
1804 temp = nxe_crb_read(sc, NXE_1_SW_TEMP);
1805 nxe_link_state(sc);
1806 nxe_crb_set(sc, window);
1807 splx(s);
1808
1809 sc->sc_sensor.value = NXE_1_SW_TEMP_VAL(temp) * 1000000 + 273150000;
1810 sc->sc_sensor.flags = 0;
1811
1812 switch (NXE_1_SW_TEMP_STATE(temp)) {
1813 case NXE_1_SW_TEMP_STATE_NONE:
1814 sc->sc_sensor.status = SENSOR_S_UNSPEC;
1815 break;
1816 case NXE_1_SW_TEMP_STATE_OK:
1817 sc->sc_sensor.status = SENSOR_S_OK;
1818 break;
1819 case NXE_1_SW_TEMP_STATE_WARN:
1820 sc->sc_sensor.status = SENSOR_S_WARN;
1821 break;
1822 case NXE_1_SW_TEMP_STATE_CRIT:
1823 /* we should probably bring things down if this is true */
1824 sc->sc_sensor.status = SENSOR_S_CRIT;
1825 break;
1826 default:
1827 sc->sc_sensor.flags = SENSOR_FUNKNOWN;
1828 break;
1829 }
1830
1831 timeout_add_sec(&sc->sc_tick, 5);
1832 }
1833
1834
1835 struct nxe_ring *
nxe_ring_alloc(struct nxe_softc * sc,size_t desclen,u_int nentries)1836 nxe_ring_alloc(struct nxe_softc *sc, size_t desclen, u_int nentries)
1837 {
1838 struct nxe_ring *nr;
1839
1840 nr = malloc(sizeof(struct nxe_ring), M_DEVBUF, M_WAITOK);
1841
1842 nr->nr_dmamem = nxe_dmamem_alloc(sc, desclen * nentries, PAGE_SIZE);
1843 if (nr->nr_dmamem == NULL) {
1844 free(nr, M_DEVBUF, 0);
1845 return (NULL);
1846 }
1847
1848 nr->nr_pos = NXE_DMA_KVA(nr->nr_dmamem);
1849 nr->nr_slot = 0;
1850 nr->nr_desclen = desclen;
1851 nr->nr_nentries = nentries;
1852
1853 return (nr);
1854 }
1855
1856 void
nxe_ring_sync(struct nxe_softc * sc,struct nxe_ring * nr,int flags)1857 nxe_ring_sync(struct nxe_softc *sc, struct nxe_ring *nr, int flags)
1858 {
1859 bus_dmamap_sync(sc->sc_dmat, NXE_DMA_MAP(nr->nr_dmamem),
1860 0, NXE_DMA_LEN(nr->nr_dmamem), flags);
1861 }
1862
1863 void
nxe_ring_free(struct nxe_softc * sc,struct nxe_ring * nr)1864 nxe_ring_free(struct nxe_softc *sc, struct nxe_ring *nr)
1865 {
1866 nxe_dmamem_free(sc, nr->nr_dmamem);
1867 free(nr, M_DEVBUF, 0);
1868 }
1869
1870 int
nxe_ring_readable(struct nxe_ring * nr,int producer)1871 nxe_ring_readable(struct nxe_ring *nr, int producer)
1872 {
1873 nr->nr_ready = producer - nr->nr_slot;
1874 if (nr->nr_ready < 0)
1875 nr->nr_ready += nr->nr_nentries;
1876
1877 return (nr->nr_ready);
1878 }
1879
1880 int
nxe_ring_writeable(struct nxe_ring * nr,int consumer)1881 nxe_ring_writeable(struct nxe_ring *nr, int consumer)
1882 {
1883 nr->nr_ready = consumer - nr->nr_slot;
1884 if (nr->nr_ready <= 0)
1885 nr->nr_ready += nr->nr_nentries;
1886
1887 return (nr->nr_ready);
1888 }
1889
1890 void *
nxe_ring_cur(struct nxe_softc * sc,struct nxe_ring * nr)1891 nxe_ring_cur(struct nxe_softc *sc, struct nxe_ring *nr)
1892 {
1893 return (nr->nr_pos);
1894 }
1895
1896 void *
nxe_ring_next(struct nxe_softc * sc,struct nxe_ring * nr)1897 nxe_ring_next(struct nxe_softc *sc, struct nxe_ring *nr)
1898 {
1899 if (++nr->nr_slot >= nr->nr_nentries) {
1900 nr->nr_slot = 0;
1901 nr->nr_pos = NXE_DMA_KVA(nr->nr_dmamem);
1902 } else
1903 nr->nr_pos += nr->nr_desclen;
1904
1905 nr->nr_ready--;
1906
1907 return (nr->nr_pos);
1908 }
1909
1910 struct nxe_pkt_list *
nxe_pkt_alloc(struct nxe_softc * sc,u_int npkts,int nsegs)1911 nxe_pkt_alloc(struct nxe_softc *sc, u_int npkts, int nsegs)
1912 {
1913 struct nxe_pkt_list *npl;
1914 struct nxe_pkt *pkt;
1915 int i;
1916
1917 npl = malloc(sizeof(*npl), M_DEVBUF, M_WAITOK | M_ZERO);
1918 pkt = mallocarray(npkts, sizeof(*pkt), M_DEVBUF, M_WAITOK | M_ZERO);
1919
1920 npl->npl_pkts = pkt;
1921 TAILQ_INIT(&npl->npl_free);
1922 TAILQ_INIT(&npl->npl_used);
1923 for (i = 0; i < npkts; i++) {
1924 pkt = &npl->npl_pkts[i];
1925
1926 pkt->pkt_id = i;
1927 if (bus_dmamap_create(sc->sc_dmat, NXE_MAX_PKTLEN, nsegs,
1928 NXE_MAX_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1929 &pkt->pkt_dmap) != 0) {
1930 nxe_pkt_free(sc, npl);
1931 return (NULL);
1932 }
1933
1934 TAILQ_INSERT_TAIL(&npl->npl_free, pkt, pkt_link);
1935 }
1936
1937 return (npl);
1938 }
1939
1940 void
nxe_pkt_free(struct nxe_softc * sc,struct nxe_pkt_list * npl)1941 nxe_pkt_free(struct nxe_softc *sc, struct nxe_pkt_list *npl)
1942 {
1943 struct nxe_pkt *pkt;
1944
1945 while ((pkt = nxe_pkt_get(npl)) != NULL)
1946 bus_dmamap_destroy(sc->sc_dmat, pkt->pkt_dmap);
1947
1948 free(npl->npl_pkts, M_DEVBUF, 0);
1949 free(npl, M_DEVBUF, sizeof *npl);
1950 }
1951
1952 struct nxe_pkt *
nxe_pkt_get(struct nxe_pkt_list * npl)1953 nxe_pkt_get(struct nxe_pkt_list *npl)
1954 {
1955 struct nxe_pkt *pkt;
1956
1957 pkt = TAILQ_FIRST(&npl->npl_free);
1958 if (pkt != NULL) {
1959 TAILQ_REMOVE(&npl->npl_free, pkt, pkt_link);
1960 TAILQ_INSERT_TAIL(&npl->npl_used, pkt, pkt_link);
1961 }
1962
1963 return (pkt);
1964 }
1965
1966 void
nxe_pkt_put(struct nxe_pkt_list * npl,struct nxe_pkt * pkt)1967 nxe_pkt_put(struct nxe_pkt_list *npl, struct nxe_pkt *pkt)
1968 {
1969 TAILQ_REMOVE(&npl->npl_used, pkt, pkt_link);
1970 TAILQ_INSERT_TAIL(&npl->npl_free, pkt, pkt_link);
1971
1972 }
1973
1974 struct nxe_pkt *
nxe_pkt_used(struct nxe_pkt_list * npl)1975 nxe_pkt_used(struct nxe_pkt_list *npl)
1976 {
1977 return (TAILQ_FIRST(&npl->npl_used));
1978 }
1979
1980 struct nxe_dmamem *
nxe_dmamem_alloc(struct nxe_softc * sc,bus_size_t size,bus_size_t align)1981 nxe_dmamem_alloc(struct nxe_softc *sc, bus_size_t size, bus_size_t align)
1982 {
1983 struct nxe_dmamem *ndm;
1984 int nsegs;
1985
1986 ndm = malloc(sizeof(*ndm), M_DEVBUF, M_WAITOK | M_ZERO);
1987 ndm->ndm_size = size;
1988
1989 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1990 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
1991 goto ndmfree;
1992
1993 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &ndm->ndm_seg, 1,
1994 &nsegs, BUS_DMA_WAITOK |BUS_DMA_ZERO) != 0)
1995 goto destroy;
1996
1997 if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
1998 &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
1999 goto free;
2000
2001 if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
2002 NULL, BUS_DMA_WAITOK) != 0)
2003 goto unmap;
2004
2005 return (ndm);
2006
2007 unmap:
2008 bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
2009 free:
2010 bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2011 destroy:
2012 bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2013 ndmfree:
2014 free(ndm, M_DEVBUF, sizeof *ndm);
2015
2016 return (NULL);
2017 }
2018
2019 void
nxe_dmamem_free(struct nxe_softc * sc,struct nxe_dmamem * ndm)2020 nxe_dmamem_free(struct nxe_softc *sc, struct nxe_dmamem *ndm)
2021 {
2022 bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
2023 bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2024 bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2025 free(ndm, M_DEVBUF, sizeof *ndm);
2026 }
2027
2028 u_int32_t
nxe_read(struct nxe_softc * sc,bus_size_t r)2029 nxe_read(struct nxe_softc *sc, bus_size_t r)
2030 {
2031 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, 4,
2032 BUS_SPACE_BARRIER_READ);
2033 return (bus_space_read_4(sc->sc_memt, sc->sc_memh, r));
2034 }
2035
2036 void
nxe_write(struct nxe_softc * sc,bus_size_t r,u_int32_t v)2037 nxe_write(struct nxe_softc *sc, bus_size_t r, u_int32_t v)
2038 {
2039 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, v);
2040 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, 4,
2041 BUS_SPACE_BARRIER_WRITE);
2042 }
2043
2044 int
nxe_wait(struct nxe_softc * sc,bus_size_t r,u_int32_t m,u_int32_t v,u_int timeout)2045 nxe_wait(struct nxe_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
2046 u_int timeout)
2047 {
2048 while ((nxe_read(sc, r) & m) != v) {
2049 if (timeout == 0)
2050 return (0);
2051
2052 delay(1000);
2053 timeout--;
2054 }
2055
2056 return (1);
2057 }
2058
2059 void
nxe_doorbell(struct nxe_softc * sc,u_int32_t v)2060 nxe_doorbell(struct nxe_softc *sc, u_int32_t v)
2061 {
2062 bus_space_write_4(sc->sc_memt, sc->sc_memh, NXE_DB, v);
2063 bus_space_barrier(sc->sc_memt, sc->sc_memh, NXE_DB, 4,
2064 BUS_SPACE_BARRIER_WRITE);
2065 }
2066
2067 int
nxe_crb_set(struct nxe_softc * sc,int window)2068 nxe_crb_set(struct nxe_softc *sc, int window)
2069 {
2070 int oldwindow = sc->sc_window;
2071 u_int32_t r;
2072
2073 if (sc->sc_window != window) {
2074 sc->sc_window = window;
2075
2076 r = window ? NXE_WIN_CRB_1 : NXE_WIN_CRB_0;
2077 nxe_write(sc, NXE_WIN_CRB(sc->sc_function), r);
2078
2079 if (nxe_read(sc, NXE_WIN_CRB(sc->sc_function)) != r)
2080 printf("%s: crb window hasn't moved\n", DEVNAME(sc));
2081 }
2082
2083 return (oldwindow);
2084 }
2085
2086 u_int32_t
nxe_crb_read(struct nxe_softc * sc,bus_size_t r)2087 nxe_crb_read(struct nxe_softc *sc, bus_size_t r)
2088 {
2089 bus_space_barrier(sc->sc_memt, sc->sc_crbh, r, 4,
2090 BUS_SPACE_BARRIER_READ);
2091 return (bus_space_read_4(sc->sc_memt, sc->sc_crbh, r));
2092 }
2093
2094 void
nxe_crb_write(struct nxe_softc * sc,bus_size_t r,u_int32_t v)2095 nxe_crb_write(struct nxe_softc *sc, bus_size_t r, u_int32_t v)
2096 {
2097 bus_space_write_4(sc->sc_memt, sc->sc_crbh, r, v);
2098 bus_space_barrier(sc->sc_memt, sc->sc_crbh, r, 4,
2099 BUS_SPACE_BARRIER_WRITE);
2100 }
2101
2102 int
nxe_crb_wait(struct nxe_softc * sc,bus_size_t r,u_int32_t m,u_int32_t v,u_int timeout)2103 nxe_crb_wait(struct nxe_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
2104 u_int timeout)
2105 {
2106 while ((nxe_crb_read(sc, r) & m) != v) {
2107 if (timeout == 0)
2108 return (0);
2109
2110 delay(1000);
2111 timeout--;
2112 }
2113
2114 return (1);
2115 }
2116
2117 int
nxe_rom_lock(struct nxe_softc * sc)2118 nxe_rom_lock(struct nxe_softc *sc)
2119 {
2120 if (!nxe_wait(sc, NXE_SEM_ROM_LOCK, 0xffffffff,
2121 NXE_SEM_DONE, 10000))
2122 return (1);
2123 nxe_crb_write(sc, NXE_1_SW_ROM_LOCK_ID, NXE_1_SW_ROM_LOCK_ID);
2124
2125 return (0);
2126 }
2127
2128 void
nxe_rom_unlock(struct nxe_softc * sc)2129 nxe_rom_unlock(struct nxe_softc *sc)
2130 {
2131 nxe_read(sc, NXE_SEM_ROM_UNLOCK);
2132 }
2133
2134 int
nxe_rom_read(struct nxe_softc * sc,u_int32_t r,u_int32_t * v)2135 nxe_rom_read(struct nxe_softc *sc, u_int32_t r, u_int32_t *v)
2136 {
2137 int rv = 1;
2138
2139 DASSERT(sc->sc_window == 1);
2140
2141 if (nxe_rom_lock(sc) != 0)
2142 return (1);
2143
2144 /* set the rom address */
2145 nxe_crb_write(sc, NXE_1_ROM_ADDR, r);
2146
2147 /* set the xfer len */
2148 nxe_crb_write(sc, NXE_1_ROM_ABYTE_CNT, 3);
2149 delay(100); /* used to prevent bursting on the chipset */
2150 nxe_crb_write(sc, NXE_1_ROM_DBYTE_CNT, 0);
2151
2152 /* set opcode and wait for completion */
2153 nxe_crb_write(sc, NXE_1_ROM_OPCODE, NXE_1_ROM_OPCODE_READ);
2154 if (!nxe_crb_wait(sc, NXE_1_ROMUSB_STATUS, NXE_1_ROMUSB_STATUS_DONE,
2155 NXE_1_ROMUSB_STATUS_DONE, 100))
2156 goto err;
2157
2158 /* reset counters */
2159 nxe_crb_write(sc, NXE_1_ROM_ABYTE_CNT, 0);
2160 delay(100);
2161 nxe_crb_write(sc, NXE_1_ROM_DBYTE_CNT, 0);
2162
2163 *v = nxe_crb_read(sc, NXE_1_ROM_RDATA);
2164
2165 rv = 0;
2166 err:
2167 nxe_rom_unlock(sc);
2168 return (rv);
2169 }
2170
2171 int
nxe_rom_read_region(struct nxe_softc * sc,u_int32_t r,void * buf,size_t buflen)2172 nxe_rom_read_region(struct nxe_softc *sc, u_int32_t r, void *buf,
2173 size_t buflen)
2174 {
2175 u_int32_t *databuf = buf;
2176 int i;
2177
2178 #ifdef NXE_DEBUG
2179 if ((buflen % 4) != 0)
2180 panic("nxe_read_rom_region: buflen is wrong (%d)", buflen);
2181 #endif
2182
2183 buflen = buflen / 4;
2184 for (i = 0; i < buflen; i++) {
2185 if (nxe_rom_read(sc, r, &databuf[i]) != 0)
2186 return (1);
2187
2188 r += sizeof(u_int32_t);
2189 }
2190
2191 return (0);
2192 }
2193