xref: /minix/minix/drivers/net/3c90x/3c90x.c (revision fb9c64b2)
1 /* 3Com 3C90xB/C EtherLink driver, by D.C. van Moolenbroek */
2 
3 #include <minix/drivers.h>
4 #include <minix/netdriver.h>
5 
6 #include <machine/pci.h>
7 #include <sys/mman.h>
8 #include <assert.h>
9 
10 #include "3c90x.h"
11 
12 #define VERBOSE		0	/* verbose debugging output */
13 
14 #if VERBOSE
15 #define XLBC_DEBUG(x)	printf x
16 #else
17 #define XLBC_DEBUG(x)
18 #endif
19 
20 static struct {
21 	int hook_id;		/* IRQ hook ID */
22 	uint8_t *base;		/* base address of memory-mapped registers */
23 	uint32_t size;		/* size of memory-mapped register area */
24 	uint16_t window;	/* currently active register window */
25 	uint16_t filter;	/* packet receipt filter flags */
26 
27 	xlbc_pd_t *dpd_base;	/* TX descriptor array, virtual address */
28 	phys_bytes dpd_phys;	/* TX descriptor array, physical address */
29 	uint8_t *txb_base;	/* transmission buffer, virtual address */
30 	phys_bytes txb_phys;	/* transmission buffer, physical address */
31 	xlbc_pd_t *upd_base;	/* RX descriptor array, virtual address */
32 	phys_bytes upd_phys;	/* RX descriptor array, physical address */
33 	uint8_t *rxb_base;	/* receipt buffers, virtual address */
34 	phys_bytes rxb_phys;	/* receipt buffers, physical address */
35 
36 	unsigned int dpd_tail;	/* index of tail TX descriptor */
37 	unsigned int dpd_used;	/* number of in-use TX descriptors */
38 	size_t txb_tail;	/* index of tail TX byte in buffer */
39 	size_t txb_used;	/* number of in-use TX buffer bytes */
40 	unsigned int upd_head;	/* index of head RX descriptor */
41 } state;
42 
43 enum xlbc_link_type {
44 	XLBC_LINK_DOWN,
45 	XLBC_LINK_UP,
46 	XLBC_LINK_UP_T_HD,
47 	XLBC_LINK_UP_T_FD,
48 	XLBC_LINK_UP_TX_HD,
49 	XLBC_LINK_UP_TX_FD
50 };
51 
52 #define XLBC_READ_8(off)	(*(volatile uint8_t *)(state.base + (off)))
53 #define XLBC_READ_16(off)	(*(volatile uint16_t *)(state.base + (off)))
54 #define XLBC_READ_32(off)	(*(volatile uint32_t *)(state.base + (off)))
55 #define XLBC_WRITE_8(off, val)	\
56 	(*(volatile uint8_t *)(state.base + (off)) = (val))
57 #define XLBC_WRITE_16(off, val)	\
58 	(*(volatile uint16_t *)(state.base + (off)) = (val))
59 #define XLBC_WRITE_32(off, val)	\
60 	(*(volatile uint32_t *)(state.base + (off)) = (val))
61 
62 static int xlbc_init(unsigned int, netdriver_addr_t *, uint32_t *,
63 	unsigned int *);
64 static void xlbc_stop(void);
65 static void xlbc_set_mode(unsigned int, const netdriver_addr_t *,
66 	unsigned int);
67 static ssize_t xlbc_recv(struct netdriver_data *, size_t);
68 static int xlbc_send(struct netdriver_data *, size_t);
69 static void xlbc_intr(unsigned int);
70 static void xlbc_tick(void);
71 
72 static const struct netdriver xlbc_table = {
73 	.ndr_name	= "xl",
74 	.ndr_init	= xlbc_init,
75 	.ndr_stop	= xlbc_stop,
76 	.ndr_set_mode	= xlbc_set_mode,
77 	.ndr_recv	= xlbc_recv,
78 	.ndr_send	= xlbc_send,
79 	.ndr_intr	= xlbc_intr,
80 	.ndr_tick	= xlbc_tick
81 };
82 
83 /*
84  * Find a matching PCI device.
85  */
86 static int
87 xlbc_probe(unsigned int skip)
88 {
89 	uint16_t vid, did;
90 	int devind;
91 #if VERBOSE
92 	const char *dname;
93 #endif
94 
95 	pci_init();
96 
97 	if (pci_first_dev(&devind, &vid, &did) <= 0)
98 		return -1;
99 
100 	while (skip--) {
101 		if (pci_next_dev(&devind, &vid, &did) <= 0)
102 			return -1;
103 	}
104 
105 #if VERBOSE
106 	dname = pci_dev_name(vid, did);
107 	XLBC_DEBUG(("%s: found %s (%04x:%04x) at %s\n", netdriver_name(),
108 		dname ? dname : "<unknown>", vid, did, pci_slot_name(devind)));
109 #endif
110 
111 	pci_reserve(devind);
112 
113 	return devind;
114 }
115 
116 /*
117  * Issue a command to the command register.
118  */
119 static void
120 xlbc_issue_cmd(uint16_t cmd)
121 {
122 
123 	assert(!(XLBC_READ_16(XLBC_STATUS_REG) & XLBC_STATUS_IN_PROGRESS));
124 
125 	XLBC_WRITE_16(XLBC_CMD_REG, cmd);
126 }
127 
128 /*
129  * Wait for a command to be acknowledged.  Return TRUE iff the command
130  * completed within the timeout period.
131  */
132 static int
133 xlbc_wait_cmd(void)
134 {
135 	spin_t spin;
136 
137 	/*
138 	 * The documentation implies that a timeout of 1ms is an upper bound
139 	 * for all commands.
140 	 */
141 	SPIN_FOR(&spin, XLBC_CMD_TIMEOUT) {
142 		if (!(XLBC_READ_16(XLBC_STATUS_REG) & XLBC_STATUS_IN_PROGRESS))
143 			return TRUE;
144 	}
145 
146 	return FALSE;
147 }
148 
149 /*
150  * Reset the device to its initial state.  Return TRUE iff successful.
151  */
152 static int
153 xlbc_reset(void)
154 {
155 
156 	(void)xlbc_wait_cmd();
157 
158 	xlbc_issue_cmd(XLBC_CMD_GLOBAL_RESET);
159 
160 	/*
161 	 * It appears that the "command in progress" bit may be cleared before
162 	 * the reset has completed, resulting in strange behavior afterwards.
163 	 * Thus, we wait for the maximum reset time (1ms) regardless first, and
164 	 * only then start checking the command-in-progress bit.
165 	 */
166 	micro_delay(XLBC_RESET_DELAY);
167 
168 	if (!xlbc_wait_cmd())
169 		return FALSE;
170 
171 	state.window = 0;
172 
173 	return TRUE;
174 }
175 
176 /*
177  * Select a register window.
178  */
179 static void
180 xlbc_select_window(unsigned int window)
181 {
182 
183 	if (state.window == window)
184 		return;
185 
186 	xlbc_issue_cmd(XLBC_CMD_SELECT_WINDOW | window);
187 
188 	state.window = window;
189 }
190 
191 /*
192  * Read a word from the EEPROM.  On failure, return a value with all bits set.
193  */
194 static uint16_t
195 xlbc_read_eeprom(unsigned int word)
196 {
197 	spin_t spin;
198 
199 	/* The B revision supports 64 EEPROM words only. */
200 	assert(!(word & ~XLBC_EEPROM_CMD_ADDR));
201 
202 	xlbc_select_window(XLBC_EEPROM_WINDOW);
203 
204 	assert(!(XLBC_READ_16(XLBC_EEPROM_CMD_REG) & XLBC_EEPROM_CMD_BUSY));
205 
206 	XLBC_WRITE_16(XLBC_EEPROM_CMD_REG, XLBC_EEPROM_CMD_READ | word);
207 
208 	/* The documented maximum delay for reads is 162us. */
209 	SPIN_FOR(&spin, XLBC_EEPROM_TIMEOUT) {
210 		if (!(XLBC_READ_16(XLBC_EEPROM_CMD_REG) &
211 		    XLBC_EEPROM_CMD_BUSY))
212 			return XLBC_READ_16(XLBC_EEPROM_DATA_REG);
213 	}
214 
215 	return (uint16_t)-1;
216 }
217 
218 /*
219  * Obtain the preconfigured hardware address of the device.
220  */
221 static void
222 xlbc_get_hwaddr(netdriver_addr_t * addr)
223 {
224 	uint16_t word[3];
225 
226 	/* TODO: allow overriding through environment variables */
227 
228 	word[0] = xlbc_read_eeprom(XLBC_EEPROM_WORD_OEM_ADDR0);
229 	word[1] = xlbc_read_eeprom(XLBC_EEPROM_WORD_OEM_ADDR1);
230 	word[2] = xlbc_read_eeprom(XLBC_EEPROM_WORD_OEM_ADDR2);
231 
232 	addr->na_addr[0] = word[0] >> 8;
233 	addr->na_addr[1] = word[0] & 0xff;
234 	addr->na_addr[2] = word[1] >> 8;
235 	addr->na_addr[3] = word[1] & 0xff;
236 	addr->na_addr[4] = word[2] >> 8;
237 	addr->na_addr[5] = word[2] & 0xff;
238 
239 	XLBC_DEBUG(("%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
240 	    netdriver_name(),
241 	    addr->na_addr[0], addr->na_addr[1], addr->na_addr[2],
242 	    addr->na_addr[3], addr->na_addr[4], addr->na_addr[5]));
243 }
244 
245 /*
246  * Configure the device to use the given hardware address.
247  */
248 static void
249 xlbc_set_hwaddr(netdriver_addr_t * addr)
250 {
251 
252 	xlbc_select_window(XLBC_STATION_WINDOW);
253 
254 	/* Set station address. */
255 	XLBC_WRITE_16(XLBC_STATION_ADDR0_REG,
256 	    addr->na_addr[0] | (addr->na_addr[1] << 8));
257 	XLBC_WRITE_16(XLBC_STATION_ADDR1_REG,
258 	    addr->na_addr[2] | (addr->na_addr[3] << 8));
259 	XLBC_WRITE_16(XLBC_STATION_ADDR2_REG,
260 	    addr->na_addr[4] | (addr->na_addr[5] << 8));
261 
262 	/* Set station mask. */
263 	XLBC_WRITE_16(XLBC_STATION_MASK0_REG, 0);
264 	XLBC_WRITE_16(XLBC_STATION_MASK1_REG, 0);
265 	XLBC_WRITE_16(XLBC_STATION_MASK2_REG, 0);
266 }
267 
268 /*
269  * Perform one-time initialization of various settings.
270  */
271 static void
272 xlbc_init_once(void)
273 {
274 	uint16_t word;
275 	uint32_t dword;
276 
277 	/*
278 	 * Verify the presence of a 10BASE-T or 100BASE-TX port.  Those are the
279 	 * only port types that are supported and have been tested so far.
280 	 */
281 	xlbc_select_window(XLBC_MEDIA_OPT_WINDOW);
282 
283 	word = XLBC_READ_16(XLBC_MEDIA_OPT_REG);
284 	if (!(word & (XLBC_MEDIA_OPT_BASE_TX | XLBC_MEDIA_OPT_10_BT)))
285 		panic("no 100BASE-TX or 10BASE-T port on device");
286 
287 	/* Initialize the device's internal configuration. */
288 	xlbc_select_window(XLBC_CONFIG_WINDOW);
289 
290 	word = XLBC_READ_16(XLBC_CONFIG_WORD1_REG);
291 	word = (word & ~XLBC_CONFIG_XCVR_MASK) | XLBC_CONFIG_XCVR_AUTO;
292 	XLBC_WRITE_16(XLBC_CONFIG_WORD1_REG, word);
293 
294 	/* Disable alternate upload and download sequences. */
295 	dword = XLBC_READ_32(XLBC_DMA_CTRL_REG);
296 	dword |= XLBC_DMA_CTRL_UP_NOALT | XLBC_DMA_CTRL_DN_NOALT;
297 	XLBC_WRITE_32(XLBC_DMA_CTRL_REG, dword);
298 
299 	/* Specify in which status events we are interested. */
300 	xlbc_issue_cmd(XLBC_CMD_IND_ENABLE | XLBC_STATUS_MASK);
301 
302 	/* Enable statistics, including support for counters' upper bits. */
303 	xlbc_select_window(XLBC_NET_DIAG_WINDOW);
304 
305 	word = XLBC_READ_16(XLBC_NET_DIAG_REG);
306 	XLBC_WRITE_16(XLBC_NET_DIAG_REG, word | XLBC_NET_DIAG_UPPER);
307 
308 	xlbc_issue_cmd(XLBC_CMD_STATS_ENABLE);
309 }
310 
311 /*
312  * Allocate memory for DMA.
313  */
314 static void
315 xlbc_alloc_dma(void)
316 {
317 
318 	/* Packet descriptors require 8-byte alignment. */
319 	assert(!(sizeof(xlbc_pd_t) % 8));
320 
321 	/*
322 	 * For packet transmission, we use one single circular buffer in which
323 	 * we store packet data.  We do not split packets in two when the
324 	 * buffer wraps; instead we waste the trailing bytes and move on to the
325 	 * start of the buffer.  This allows us to use a single fragment for
326 	 * each transmitted packet, thus keeping the descriptors small (16
327 	 * bytes).  The descriptors themselves are allocated as a separate
328 	 * array.  There is obviously room for improvement here, but the
329 	 * approach should be good enough.
330 	 */
331 	state.dpd_base = alloc_contig(XLBC_DPD_COUNT * sizeof(xlbc_pd_t),
332 	    AC_ALIGN4K, &state.dpd_phys);
333 	state.txb_base = alloc_contig(XLBC_TXB_SIZE, 0, &state.txb_phys);
334 
335 	if (state.dpd_base == NULL || state.txb_base == NULL)
336 		panic("unable to allocate memory for packet transmission");
337 
338 	/*
339 	 * For packet receipt, we have a number of pairs of buffers and
340 	 * corresponding descriptors.  Each buffer is large enough to contain
341 	 * an entire packet.  We avoid wasting memory by allocating the buffers
342 	 * in one go, at the cost of requiring a large contiguous area.  The
343 	 * descriptors are allocated as a separate array, thus matching the
344 	 * scheme for transmission in terms of allocation strategy.  Here, too,
345 	 * there is clear room for improvement at the cost of extra complexity.
346 	 */
347 	state.upd_base = alloc_contig(XLBC_UPD_COUNT * sizeof(xlbc_pd_t),
348 	    AC_ALIGN4K, &state.upd_phys);
349 	state.rxb_base = alloc_contig(XLBC_UPD_COUNT * XLBC_MAX_PKT_LEN, 0,
350 	    &state.rxb_phys);
351 
352 	if (state.upd_base == NULL || state.rxb_base == NULL)
353 		panic("unable to allocate memory for packet receipt");
354 }
355 
356 /*
357  * Reset the transmitter.
358  */
359 static void
360 xlbc_reset_tx(void)
361 {
362 
363 	xlbc_issue_cmd(XLBC_CMD_TX_RESET);
364 	if (!xlbc_wait_cmd())
365 		panic("timeout trying to reset transmitter");
366 
367 	state.dpd_tail = 0;
368 	state.dpd_used = 0;
369 	state.txb_tail = 0;
370 	state.txb_used = 0;
371 
372 	xlbc_issue_cmd(XLBC_CMD_TX_ENABLE);
373 }
374 
375 /*
376  * Reset the receiver.
377  */
378 static void
379 xlbc_reset_rx(void)
380 {
381 	unsigned int i;
382 
383 	xlbc_issue_cmd(XLBC_CMD_RX_RESET);
384 	if (!xlbc_wait_cmd())
385 		panic("timeout trying to reset receiver");
386 
387 	xlbc_issue_cmd(XLBC_CMD_SET_FILTER | state.filter);
388 
389 	for (i = 0; i < XLBC_UPD_COUNT; i++) {
390 		state.upd_base[i].next = state.upd_phys +
391 		    ((i + 1) % XLBC_UPD_COUNT) * sizeof(xlbc_pd_t);
392 		state.upd_base[i].flags = 0;
393 		state.upd_base[i].addr = state.rxb_phys + i * XLBC_MAX_PKT_LEN;
394 		state.upd_base[i].len = XLBC_LEN_LAST | XLBC_MAX_PKT_LEN;
395 	}
396 
397 	XLBC_WRITE_32(XLBC_UP_LIST_PTR_REG, state.upd_phys);
398 
399 	state.upd_head = 0;
400 
401 	__insn_barrier();
402 
403 	xlbc_issue_cmd(XLBC_CMD_RX_ENABLE);
404 }
405 
406 /*
407  * Execute a MII read, write, or Z cycle.  Stop the clock, wait, start the
408  * clock, optionally change direction and/or data bits, and wait again.
409  */
410 static uint16_t
411 xlbc_mii_cycle(uint16_t val, uint16_t mask, uint16_t bits)
412 {
413 
414 	val &= ~XLBC_PHYS_MGMT_CLK;
415 	XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
416 
417 	/* All the delays should be 200ns minimum. */
418 	micro_delay(XLBC_MII_DELAY);
419 
420 	/* The clock must be enabled separately from other bit updates. */
421 	val |= XLBC_PHYS_MGMT_CLK;
422 	XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
423 
424 	if (mask != 0) {
425 		val = (val & ~mask) | bits;
426 		XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
427 	}
428 
429 	micro_delay(XLBC_MII_DELAY);
430 
431 	return val;
432 }
433 
434 /*
435  * Read a MII register.
436  */
437 static uint16_t
438 xlbc_mii_read(uint16_t phy, uint16_t reg)
439 {
440 	uint32_t dword;
441 	uint16_t val;
442 	int i;
443 
444 	xlbc_select_window(XLBC_PHYS_MGMT_WINDOW);
445 
446 	/* Set the direction to write. */
447 	val = XLBC_READ_16(XLBC_PHYS_MGMT_REG) | XLBC_PHYS_MGMT_DIR;
448 
449 	XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
450 
451 	/* Execute write cycles to submit the preamble: PR=1..1 (32 bits) */
452 	for (i = 0; i < 32; i++)
453 		val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
454 		    XLBC_PHYS_MGMT_DATA);
455 
456 	/* Execute write cycles to submit the rest of the read frame. */
457 	/* ST=01 OP=10 PHYAD=aaaaa REGAD=rrrrr */
458 	dword = 0x1800 | (phy << 5) | reg;
459 
460 	for (i = 13; i >= 0; i--)
461 		val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
462 		    ((dword >> i) & 1) ? XLBC_PHYS_MGMT_DATA : 0);
463 
464 	/* Execute a Z cycle to set the direction to read. */
465 	val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DIR, 0);
466 
467 	dword = 0;
468 
469 	/* Receive one status bit and 16 actual data bits. */
470 	for (i = 16; i >= 0; i--) {
471 		(void)xlbc_mii_cycle(val, 0, 0);
472 
473 		val = XLBC_READ_16(XLBC_PHYS_MGMT_REG);
474 
475 		dword = (dword << 1) | !!(val & XLBC_PHYS_MGMT_DATA);
476 
477 		micro_delay(XLBC_MII_DELAY);
478 	}
479 
480 	/* Execute a Z cycle to terminate the read frame. */
481 	(void)xlbc_mii_cycle(val, 0, 0);
482 
483 	/* If the status bit was set, the results are invalid. */
484 	if (dword & 0x10000)
485 		dword = 0xffff;
486 
487 	return (uint16_t)dword;
488 }
489 
490 /*
491  * Write a MII register.
492  */
493 static void
494 xlbc_mii_write(uint16_t phy, uint16_t reg, uint16_t data)
495 {
496 	uint32_t dword;
497 	uint16_t val;
498 	int i;
499 
500 	xlbc_select_window(XLBC_PHYS_MGMT_WINDOW);
501 
502 	/* Set the direction to write. */
503 	val = XLBC_READ_16(XLBC_PHYS_MGMT_REG) | XLBC_PHYS_MGMT_DIR;
504 
505 	XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
506 
507 	/* Execute write cycles to submit the preamble: PR=1..1 (32 bits) */
508 	for (i = 0; i < 32; i++)
509 		val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
510 		    XLBC_PHYS_MGMT_DATA);
511 
512 	/* Execute write cycles to submit the rest of the read frame. */
513 	/* ST=01 OP=01 PHYAD=aaaaa REGAD=rrrrr TA=10 DATA=d..d (16 bits) */
514 	dword = 0x50020000 | (phy << 23) | (reg << 18) | data;
515 
516 	for (i = 31; i >= 0; i--)
517 		val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
518 		    ((dword >> i) & 1) ? XLBC_PHYS_MGMT_DATA : 0);
519 
520 	/* Execute a Z cycle to terminate the write frame. */
521 	(void)xlbc_mii_cycle(val, 0, 0);
522 }
523 
524 /*
525  * Return a human-readable description for the given link type.
526  */
527 #if VERBOSE
528 static const char *
529 xlbc_get_link_name(enum xlbc_link_type link_type)
530 {
531 
532 	switch (link_type) {
533 	case XLBC_LINK_DOWN:		return "down";
534 	case XLBC_LINK_UP:		return "up";
535 	case XLBC_LINK_UP_T_HD:		return "up (10Mbps, half duplex)";
536 	case XLBC_LINK_UP_T_FD:		return "up (10Mbps, full duplex)";
537 	case XLBC_LINK_UP_TX_HD:	return "up (100Mbps, half duplex)";
538 	case XLBC_LINK_UP_TX_FD:	return "up (100Mbps, full duplex)";
539 	default:			return "(unknown)";
540 	}
541 }
542 #endif /* VERBOSE */
543 
544 /*
545  * Determine the current link status, and return the resulting link type.
546  */
547 static enum xlbc_link_type
548 xlbc_get_link_type(void)
549 {
550 	uint16_t status, control, mask;
551 
552 	xlbc_select_window(XLBC_MEDIA_STS_WINDOW);
553 
554 	if (!(XLBC_READ_16(XLBC_MEDIA_STS_REG) & XLBC_MEDIA_STS_LINK_DET))
555 		return XLBC_LINK_DOWN;
556 
557 	status = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_STATUS);
558 	if (!(status & XLBC_MII_STATUS_EXTCAP))
559 		return XLBC_LINK_UP;
560 	if (!(status & XLBC_MII_STATUS_AUTONEG))
561 		return XLBC_LINK_UP;
562 
563 	/* Wait for auto-negotiation to complete first. */
564 	if (!(status & XLBC_MII_STATUS_COMPLETE)) {
565 		control = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_CONTROL);
566 		control |= XLBC_MII_CONTROL_AUTONEG;
567 		xlbc_mii_write(XLBC_PHY_ADDR, XLBC_MII_CONTROL, control);
568 
569 		SPIN_UNTIL(xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_STATUS) &
570 		    XLBC_MII_STATUS_COMPLETE, XLBC_AUTONEG_TIMEOUT);
571 
572 		status = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_STATUS);
573 		if (!(status & XLBC_MII_STATUS_COMPLETE))
574 			return XLBC_LINK_UP;
575 	}
576 
577 	/* The highest bit set in both registers is the selected link type. */
578 	mask = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_AUTONEG_ADV) &
579 	    xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_LP_ABILITY);
580 
581 	if (mask & XLBC_MII_LINK_TX_FD)
582 		return XLBC_LINK_UP_TX_FD;
583 	if (mask & XLBC_MII_LINK_TX_HD)
584 		return XLBC_LINK_UP_TX_HD;
585 	if (mask & XLBC_MII_LINK_T_FD)
586 		return XLBC_LINK_UP_T_FD;
587 	if (mask & XLBC_MII_LINK_T_HD)
588 		return XLBC_LINK_UP_T_HD;
589 
590 	return XLBC_LINK_UP;
591 }
592 
593 /*
594  * Set the duplex mode to full or half, based on the current link type.
595  */
596 static void
597 xlbc_set_duplex(enum xlbc_link_type link)
598 {
599 	uint16_t word;
600 	int duplex;
601 
602 	/*
603 	 * If the link is down, do not change modes.  In fact, the link may go
604 	 * down as a result of the reset that is part of changing the mode.
605 	 */
606 	if (link == XLBC_LINK_DOWN)
607 		return;
608 
609 	/* See if the desired duplex mode differs from the current mode. */
610 	duplex = (link == XLBC_LINK_UP_T_FD || link == XLBC_LINK_UP_TX_FD);
611 
612 	xlbc_select_window(XLBC_MAC_CTRL_WINDOW);
613 
614 	word = XLBC_READ_16(XLBC_MAC_CTRL_REG);
615 
616 	if (!!(word & XLBC_MAC_CTRL_ENA_FD) == duplex)
617 		return; /* already in the desired mode */
618 
619 	/*
620 	 * Change duplex mode.  Unfortunately, that also means we need to
621 	 * reset the RX and TX engines.  Fortunately, this should happen only
622 	 * on a link change, so we're probably not doing much extra damage.
623 	 * TODO: recovery for packets currently on the transmission queue.
624 	 */
625 	XLBC_DEBUG(("%s: %s full-duplex mode\n", netdriver_name(),
626 	    duplex ? "setting" : "clearing"));
627 
628 	XLBC_WRITE_16(XLBC_MAC_CTRL_REG, word ^ XLBC_MAC_CTRL_ENA_FD);
629 
630 	xlbc_reset_rx();
631 
632 	xlbc_reset_tx();
633 }
634 
635 /*
636  * The link status has changed.
637  */
638 static void
639 xlbc_link_event(void)
640 {
641 	enum xlbc_link_type link_type;
642 
643 	/*
644 	 * The 3c90xB is documented to require a read from the internal
645 	 * auto-negotiation expansion MII register in order to clear the link
646 	 * event interrupt.  The 3c90xC resets the link event interrupt as part
647 	 * of automatic interrupt acknowledgment.
648 	 */
649 	(void)xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_AUTONEG_EXP);
650 
651 	link_type = xlbc_get_link_type();
652 
653 #if VERBOSE
654 	XLBC_DEBUG(("%s: link %s\n", netdriver_name(),
655 	    xlbc_get_link_name(link_type)));
656 #endif
657 
658 	xlbc_set_duplex(link_type);
659 }
660 
661 /*
662  * Initialize the device.
663  */
664 static void
665 xlbc_init_hw(int devind, netdriver_addr_t * addr)
666 {
667 	uint32_t bar;
668 	uint16_t cr;
669 	int r, io, irq;
670 
671 	/* Map in the device's memory-mapped registers. */
672 	if ((r = pci_get_bar(devind, PCI_BAR_2, &bar, &state.size, &io)) != OK)
673 		panic("unable to retrieve bar: %d", r);
674 
675 	if (state.size < XLBC_MIN_REG_SIZE || io)
676 		panic("invalid register bar");
677 
678 	state.base = vm_map_phys(SELF, (void *)bar, state.size);
679 	if (state.base == MAP_FAILED)
680 		panic("unable to map in registers");
681 
682 	/* Reset the device to a known initial state. */
683 	if (!xlbc_reset())
684 		panic("unable to reset hardware");
685 
686 	/* Now that the device is reset, enable bus mastering if needed. */
687 	cr = pci_attr_r8(devind, PCI_CR);
688 	if (!(cr & PCI_CR_MAST_EN))
689 		pci_attr_w8(devind, PCI_CR, cr | PCI_CR_MAST_EN);
690 
691 	/* Obtain and apply the hardware address. */
692 	xlbc_get_hwaddr(addr);
693 
694 	xlbc_set_hwaddr(addr);
695 
696 	/* Perform various one-time initialization actions. */
697 	xlbc_init_once();
698 
699 	/* Allocate memory for DMA. */
700 	xlbc_alloc_dma();
701 
702 	/* Initialize the transmitter. */
703 	xlbc_reset_tx();
704 
705 	/* Initialize the receiver. */
706 	state.filter = XLBC_FILTER_STATION;
707 
708 	xlbc_reset_rx();
709 
710 	/* Enable interrupts. */
711 	irq = pci_attr_r8(devind, PCI_ILR);
712 	state.hook_id = 0;
713 
714 	if ((r = sys_irqsetpolicy(irq, 0, &state.hook_id)) != OK)
715 		panic("unable to register IRQ: %d", r);
716 
717 	if ((r = sys_irqenable(&state.hook_id)) != OK)
718 		panic("unable to enable IRQ: %d", r);
719 
720 	xlbc_issue_cmd(XLBC_CMD_INT_ENABLE | XLBC_STATUS_MASK);
721 
722 	/*
723 	 * We will probably get a link event anyway, but trigger one now in
724 	 * case that does not happen.  The main purpose of this call is to
725 	 * set the right duplex mode.
726 	 */
727 	xlbc_link_event();
728 }
729 
730 /*
731  * Initialize the 3c90x driver and device.
732  */
733 static int
734 xlbc_init(unsigned int instance, netdriver_addr_t * addr, uint32_t * caps,
735 	unsigned int * ticks)
736 {
737 	int devind;
738 
739 	memset(&state, 0, sizeof(state));
740 
741 	/* Try to find a recognized device. */
742 	if ((devind = xlbc_probe(instance)) < 0)
743 		return ENXIO;
744 
745 	/* Initialize the device. */
746 	xlbc_init_hw(devind, addr);
747 
748 	*caps = NDEV_CAP_MCAST | NDEV_CAP_BCAST;
749 	*ticks = sys_hz() / 10; /* update statistics 10x/sec */
750 	return OK;
751 }
752 
753 /*
754  * Stop the device.  The main purpose is to stop any ongoing and future DMA.
755  */
756 static void
757 xlbc_stop(void)
758 {
759 
760 	/* A full reset ought to do it. */
761 	(void)xlbc_reset();
762 }
763 
764 /*
765  * Set packet receipt mode.
766  */
767 static void
768 xlbc_set_mode(unsigned int mode, const netdriver_addr_t * mcast_list __unused,
769 	unsigned int mcast_count __unused)
770 {
771 
772 	state.filter = XLBC_FILTER_STATION;
773 
774 	if (mode & (NDEV_MODE_MCAST_LIST | NDEV_MODE_MCAST_ALL))
775 		state.filter |= XLBC_FILTER_MULTI;
776 	if (mode & NDEV_MODE_BCAST)
777 		state.filter |= XLBC_FILTER_BROAD;
778 	if (mode & NDEV_MODE_PROMISC)
779 		state.filter |= XLBC_FILTER_PROMISC;
780 
781 	xlbc_issue_cmd(XLBC_CMD_SET_FILTER | state.filter);
782 }
783 
784 /*
785  * Try to receive a packet.
786  */
787 static ssize_t
788 xlbc_recv(struct netdriver_data * data, size_t max)
789 {
790 	uint32_t flags;
791 	uint8_t *ptr;
792 	unsigned int head;
793 	size_t len;
794 
795 	head = state.upd_head;
796 	flags = *(volatile uint32_t *)&state.upd_base[head].flags;
797 
798 	/*
799 	 * The documentation implies, but does not state, that UP_COMPLETE is
800 	 * set whenever UP_ERROR is.  We rely exclusively on UP_COMPLETE.
801 	 */
802 	if (!(flags & XLBC_UP_COMPLETE))
803 		return SUSPEND;
804 
805 	if (flags & XLBC_UP_ERROR) {
806 		XLBC_DEBUG(("%s: received error\n", netdriver_name()));
807 
808 		netdriver_stat_ierror(1);
809 
810 		len = 0; /* immediately move on to the next descriptor */
811 	} else {
812 		len = flags & XLBC_UP_LEN;
813 
814 		XLBC_DEBUG(("%s: received packet (size %zu)\n",
815 		    netdriver_name(), len));
816 
817 		/* The device is supposed to not give us runt frames. */
818 		assert(len >= XLBC_MIN_PKT_LEN);
819 
820 		/* Truncate large packets. */
821 		if (flags & XLBC_UP_OVERFLOW)
822 			len = XLBC_MAX_PKT_LEN;
823 		if (len > max)
824 			len = max;
825 
826 		ptr = state.rxb_base + head * XLBC_MAX_PKT_LEN;
827 
828 		netdriver_copyout(data, 0, ptr, len);
829 	}
830 
831 	/* Mark the descriptor as ready for reuse. */
832 	*(volatile uint32_t *)&state.upd_base[head].flags = 0;
833 
834 	/*
835 	 * At this point, the receive engine may have stalled as a result of
836 	 * filling up all descriptors.  Now that we have a free descriptor, we
837 	 * can restart it.  As per the documentation, we unstall blindly.
838 	 */
839 	xlbc_issue_cmd(XLBC_CMD_UP_UNSTALL);
840 
841 	/* Advance to the next descriptor in our ring. */
842 	state.upd_head = (head + 1) % XLBC_UPD_COUNT;
843 
844 	return len;
845 }
846 
847 /*
848  * Return how much padding (if any) must be prepended to a packet of the given
849  * size so that it does not have to be split due to wrapping.  The given offset
850  * is the starting point of the packet; this may be beyond the transmission
851  * buffer size in the case that the current buffer contents already wrap.
852  */
853 static size_t
854 xlbc_pad_tx(size_t off, size_t size)
855 {
856 
857 	if (off < XLBC_TXB_SIZE && off + size >= XLBC_TXB_SIZE)
858 		return XLBC_TXB_SIZE - off;
859 	else
860 		return 0;
861 }
862 
863 /*
864  * Try to send a packet.
865  */
866 static int
867 xlbc_send(struct netdriver_data * data, size_t size)
868 {
869 	size_t used, off, left;
870 	unsigned int head, last;
871 	uint32_t phys;
872 
873 	/* We need a free transmission descriptor. */
874 	if (state.dpd_used == XLBC_DPD_COUNT)
875 		return SUSPEND;
876 
877 	/*
878 	 * See if we can fit the packet in the circular transmission buffer.
879 	 * The packet may not be broken up in two parts as the buffer wraps.
880 	 */
881 	used = state.txb_used;
882 	used += xlbc_pad_tx(state.txb_tail + used, size);
883 	left = XLBC_TXB_SIZE - used;
884 
885 	if (left < size)
886 		return SUSPEND;
887 
888 	XLBC_DEBUG(("%s: transmitting packet (size %zu)\n",
889 	    netdriver_name(), size));
890 
891 	/* Copy in the packet. */
892 	off = (state.txb_tail + used) % XLBC_TXB_SIZE;
893 
894 	netdriver_copyin(data, 0, &state.txb_base[off], size);
895 
896 	/* Set up a descriptor for the packet. */
897 	head = (state.dpd_tail + state.dpd_used) % XLBC_DPD_COUNT;
898 
899 	state.dpd_base[head].next = 0;
900 	state.dpd_base[head].flags = XLBC_DN_RNDUP_WORD | XLBC_DN_DN_INDICATE;
901 	state.dpd_base[head].addr = state.txb_phys + off;
902 	state.dpd_base[head].len = XLBC_LEN_LAST | size;
903 
904 	phys = state.dpd_phys + head * sizeof(xlbc_pd_t);
905 
906 	__insn_barrier();
907 
908 	/* We need to stall only if other packets were already pending. */
909 	if (XLBC_READ_32(XLBC_DN_LIST_PTR_REG) != 0) {
910 		assert(state.dpd_used > 0);
911 
912 		xlbc_issue_cmd(XLBC_CMD_DN_STALL);
913 		if (!xlbc_wait_cmd())
914 			panic("timeout trying to stall downloads");
915 
916 		last = (state.dpd_tail + state.dpd_used - 1) % XLBC_DPD_COUNT;
917 		state.dpd_base[last].next = phys;
918 		/* Group interrupts a bit.  This is a tradeoff. */
919 		state.dpd_base[last].flags &= ~XLBC_DN_DN_INDICATE;
920 
921 		if (XLBC_READ_32(XLBC_DN_LIST_PTR_REG) == 0)
922 			XLBC_WRITE_32(XLBC_DN_LIST_PTR_REG, phys);
923 
924 		xlbc_issue_cmd(XLBC_CMD_DN_UNSTALL);
925 	} else
926 		XLBC_WRITE_32(XLBC_DN_LIST_PTR_REG, phys);
927 
928 	/* Advance internal queue heads. */
929 	state.dpd_used++;
930 
931 	state.txb_used = used + size;
932 	assert(state.txb_used <= XLBC_TXB_SIZE);
933 
934 	return OK;
935 }
936 
937 /*
938  * One or more packets have been downloaded.  Free up the corresponding
939  * descriptors for later reuse.
940  */
941 static void
942 xlbc_advance_tx(void)
943 {
944 	uint32_t flags, len;
945 
946 	while (state.dpd_used > 0) {
947 		flags = *(volatile uint32_t *)
948 		    &state.dpd_base[state.dpd_tail].flags;
949 
950 		if (!(flags & XLBC_DN_DN_COMPLETE))
951 			break;
952 
953 		XLBC_DEBUG(("%s: packet copied to transmitter\n",
954 		    netdriver_name()));
955 
956 		len = state.dpd_base[state.dpd_tail].len & ~XLBC_LEN_LAST;
957 
958 		state.dpd_tail = (state.dpd_tail + 1) % XLBC_DPD_COUNT;
959 		state.dpd_used--;
960 
961 		len += xlbc_pad_tx(state.txb_tail, len);
962 		assert(state.txb_used >= len);
963 
964 		state.txb_tail = (state.txb_tail + len) % XLBC_TXB_SIZE;
965 		state.txb_used -= len;
966 	}
967 }
968 
969 /*
970  * A transmission error has occurred.  Restart, and if necessary even reset,
971  * the transmitter.
972  */
973 static void
974 xlbc_recover_tx(void)
975 {
976 	uint8_t status;
977 	int enable, reset;
978 
979 	enable = reset = FALSE;
980 
981 	while ((status = XLBC_READ_8(XLBC_TX_STATUS_REG)) &
982 	    XLBC_TX_STATUS_COMPLETE) {
983 		XLBC_DEBUG(("%s: transmission error (0x%04x)\n",
984 		    netdriver_name(), status));
985 
986 		/* This is an internal (non-packet) error status. */
987 		if (status & XLBC_TX_STATUS_OVERFLOW)
988 			enable = TRUE;
989 
990 		if (status & XLBC_TX_STATUS_MAX_COLL) {
991 			netdriver_stat_coll(1);
992 			enable = TRUE;
993 		}
994 		if (status &
995 		    (XLBC_TX_STATUS_UNDERRUN | XLBC_TX_STATUS_JABBER)) {
996 			netdriver_stat_oerror(1);
997 			reset = TRUE;
998 		}
999 
1000 		XLBC_WRITE_8(XLBC_TX_STATUS_REG, status);
1001 	}
1002 
1003 	if (reset) {
1004 		/*
1005 		 * Below is the documented Underrun Recovery procedure.  We use
1006 		 * it for jabber errors as well, because there is no indication
1007 		 * that another procedure should be followed for that case.
1008 		 */
1009 		xlbc_issue_cmd(XLBC_CMD_DN_STALL);
1010 		if (!xlbc_wait_cmd())
1011 			panic("download stall timeout during recovery");
1012 
1013 		SPIN_UNTIL(!(XLBC_READ_32(XLBC_DMA_CTRL_REG) &
1014 		    XLBC_DMA_CTRL_DN_INPROG), XLBC_CMD_TIMEOUT);
1015 
1016 		xlbc_select_window(XLBC_MEDIA_STS_WINDOW);
1017 
1018 		SPIN_UNTIL(!(XLBC_READ_16(XLBC_MEDIA_STS_REG) &
1019 		    XLBC_MEDIA_STS_TX_INPROG), XLBC_CMD_TIMEOUT);
1020 
1021 		xlbc_issue_cmd(XLBC_CMD_TX_RESET);
1022 		if (!xlbc_wait_cmd())
1023 			panic("transmitter reset timeout during recovery");
1024 
1025 		xlbc_issue_cmd(XLBC_CMD_TX_ENABLE);
1026 
1027 		XLBC_WRITE_32(XLBC_DN_LIST_PTR_REG,
1028 		    state.dpd_phys + state.dpd_tail * sizeof(xlbc_pd_t));
1029 
1030 		XLBC_DEBUG(("%s: performed recovery\n", netdriver_name()));
1031 	} else if (enable)
1032 		xlbc_issue_cmd(XLBC_CMD_TX_ENABLE);
1033 }
1034 
1035 /*
1036  * Update statistics.  We read all registers, not just the ones we are
1037  * interested in, so as to limit the number of useless statistics interrupts.
1038  */
1039 static void
1040 xlbc_update_stats(void)
1041 {
1042 
1043 	xlbc_select_window(XLBC_STATS_WINDOW);
1044 
1045 	(void)XLBC_READ_8(XLBC_CARRIER_LOST_REG);
1046 	(void)XLBC_READ_8(XLBC_SQE_ERR_REG);
1047 	netdriver_stat_coll(XLBC_READ_8(XLBC_MULTI_COLL_REG));
1048 	netdriver_stat_coll(XLBC_READ_8(XLBC_SINGLE_COLL_REG));
1049 	netdriver_stat_coll(XLBC_READ_8(XLBC_LATE_COLL_REG));
1050 	netdriver_stat_ierror(XLBC_READ_8(XLBC_RX_OVERRUNS_REG));
1051 	(void)XLBC_READ_8(XLBC_FRAMES_DEFERRED_REG);
1052 
1053 	(void)XLBC_READ_8(XLBC_UPPER_FRAMES_REG);
1054 	(void)XLBC_READ_8(XLBC_FRAMES_XMIT_OK_REG);
1055 	(void)XLBC_READ_8(XLBC_FRAMES_RCVD_OK_REG);
1056 
1057 	(void)XLBC_READ_16(XLBC_BYTES_RCVD_OK_REG);
1058 	(void)XLBC_READ_16(XLBC_BYTES_XMIT_OK_REG);
1059 
1060 	xlbc_select_window(XLBC_SSD_STATS_WINDOW);
1061 
1062 	(void)XLBC_READ_8(XLBC_BAD_SSD_REG);
1063 }
1064 
1065 /*
1066  * Process an interrupt.
1067  */
1068 static void
1069 xlbc_intr(unsigned int __unused mask)
1070 {
1071 	uint32_t val;
1072 	int r;
1073 
1074 	/*
1075 	 * Get interrupt mask.  Acknowledge some interrupts, and disable all
1076 	 * interrupts as automatic side effect.  The assumption is that any new
1077 	 * events are stored as indications which are then translated into
1078 	 * interrupts as soon as interrupts are reenabled, but this is not
1079 	 * documented explicitly.
1080 	 */
1081 	val = XLBC_READ_16(XLBC_STATUS_AUTO_REG);
1082 
1083 	XLBC_DEBUG(("%s: interrupt (0x%04x)\n", netdriver_name(), val));
1084 
1085 	if (val & XLBC_STATUS_UP_COMPLETE)
1086 		netdriver_recv();
1087 
1088 	if (val & (XLBC_STATUS_DN_COMPLETE | XLBC_STATUS_TX_COMPLETE))
1089 		xlbc_advance_tx();
1090 
1091 	if (val & XLBC_STATUS_TX_COMPLETE)
1092 		xlbc_recover_tx();
1093 
1094 	if (val & XLBC_STATUS_HOST_ERROR) {
1095 		/*
1096 		 * A catastrophic host error has occurred.  Reset both the
1097 		 * transmitter and the receiver.  This should be enough to
1098 		 * clear the host error, but may be overkill in the cases where
1099 		 * the error direction (TX or RX) can be clearly identified.
1100 		 * Since this entire condition is effectively untestable, we
1101 		 * do not even try to be smart about it.
1102 		 */
1103 		XLBC_DEBUG(("%s: host error, performing reset\n",
1104 		    netdriver_name()));
1105 
1106 		xlbc_reset_tx();
1107 
1108 		xlbc_reset_rx();
1109 
1110 		/* If this has not resolved the problem, restart the driver. */
1111 		if (XLBC_READ_16(XLBC_STATUS_REG) & XLBC_STATUS_HOST_ERROR)
1112 			panic("host error not cleared");
1113 	}
1114 
1115 	if (val & XLBC_STATUS_UPDATE_STATS)
1116 		xlbc_update_stats();
1117 
1118 	if (val & XLBC_STATUS_LINK_EVENT)
1119 		xlbc_link_event();
1120 
1121 	/* See if we should try to send more packets. */
1122 	if (val & (XLBC_STATUS_DN_COMPLETE | XLBC_STATUS_TX_COMPLETE |
1123 	    XLBC_STATUS_HOST_ERROR))
1124 		netdriver_send();
1125 
1126 	/* Reenable interrupts. */
1127 	if ((r = sys_irqenable(&state.hook_id)) != OK)
1128 		panic("unable to reenable IRQ: %d", r);
1129 
1130 	xlbc_issue_cmd(XLBC_CMD_INT_ENABLE | XLBC_STATUS_MASK);
1131 }
1132 
1133 /*
1134  * Do regular processing.
1135  */
1136 static void
1137 xlbc_tick(void)
1138 {
1139 
1140 	xlbc_update_stats();
1141 }
1142 
1143 /*
1144  * The 3c90x ethernet driver.
1145  */
1146 int
1147 main(int argc, char ** argv)
1148 {
1149 
1150 	env_setargs(argc, argv);
1151 
1152 	netdriver_task(&xlbc_table);
1153 
1154 	return EXIT_SUCCESS;
1155 }
1156