xref: /openbsd/sys/dev/ic/fxp.c (revision 91f110e0)
1 /*	$OpenBSD: fxp.c,v 1.115 2013/12/28 03:34:59 deraadt Exp $	*/
2 /*	$NetBSD: if_fxp.c,v 1.2 1997/06/05 02:01:55 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, David Greenman
6  * All rights reserved.
7  *
8  * Modifications to support NetBSD:
9  * Copyright (c) 1997 Jason R. Thorpe.  All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice unmodified, this list of conditions, and the following
16  *    disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	Id: if_fxp.c,v 1.55 1998/08/04 08:53:12 dg Exp
34  */
35 
36 /*
37  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
38  */
39 
40 #include "bpfilter.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/syslog.h>
49 #include <sys/timeout.h>
50 
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 
56 #ifdef INET
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #endif
61 
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65 
66 #include <sys/ioctl.h>
67 #include <sys/errno.h>
68 #include <sys/device.h>
69 
70 #include <netinet/if_ether.h>
71 
72 #include <machine/cpu.h>
73 #include <machine/bus.h>
74 #include <machine/intr.h>
75 
76 #include <dev/mii/miivar.h>
77 
78 #include <dev/ic/fxpreg.h>
79 #include <dev/ic/fxpvar.h>
80 
81 /*
82  * NOTE!  On the Alpha, we have an alignment constraint.  The
83  * card DMAs the packet immediately following the RFA.  However,
84  * the first thing in the packet is a 14-byte Ethernet header.
85  * This means that the packet is misaligned.  To compensate,
86  * we actually offset the RFA 2 bytes into the cluster.  This
87  * aligns the packet after the Ethernet header at a 32-bit
88  * boundary.  HOWEVER!  This means that the RFA is misaligned!
89  */
90 #define	RFA_ALIGNMENT_FUDGE	(2 + sizeof(bus_dmamap_t *))
91 
92 /*
93  * Inline function to copy a 16-bit aligned 32-bit quantity.
94  */
95 static __inline void fxp_lwcopy(volatile u_int32_t *,
96 	volatile u_int32_t *);
97 
98 static __inline void
99 fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst)
100 {
101 	volatile u_int16_t *a = (u_int16_t *)src;
102 	volatile u_int16_t *b = (u_int16_t *)dst;
103 
104 	b[0] = a[0];
105 	b[1] = a[1];
106 }
107 
108 /*
109  * Template for default configuration parameters.
110  * See struct fxp_cb_config for the bit definitions.
111  * Note, cb_command is filled in later.
112  */
113 static u_char fxp_cb_config_template[] = {
114 	0x0, 0x0,		/* cb_status */
115 	0x0, 0x0,		/* cb_command */
116 	0xff, 0xff, 0xff, 0xff,	/* link_addr */
117 	0x16,	/*  0 Byte count. */
118 	0x08,	/*  1 Fifo limit */
119 	0x00,	/*  2 Adaptive ifs */
120 	0x00,	/*  3 ctrl0 */
121 	0x00,	/*  4 rx_dma_bytecount */
122 	0x80,	/*  5 tx_dma_bytecount */
123 	0xb2,	/*  6 ctrl 1*/
124 	0x03,	/*  7 ctrl 2*/
125 	0x01,	/*  8 mediatype */
126 	0x00,	/*  9 void2 */
127 	0x26,	/* 10 ctrl3 */
128 	0x00,	/* 11 linear priority */
129 	0x60,	/* 12 interfrm_spacing */
130 	0x00,	/* 13 void31 */
131 	0xf2,	/* 14 void32 */
132 	0x48,	/* 15 promiscuous */
133 	0x00,	/* 16 void41 */
134 	0x40,	/* 17 void42 */
135 	0xf3,	/* 18 stripping */
136 	0x00,	/* 19 fdx_pin */
137 	0x3f,	/* 20 multi_ia */
138 	0x05	/* 21 mc_all */
139 };
140 
141 void fxp_eeprom_shiftin(struct fxp_softc *, int, int);
142 void fxp_eeprom_putword(struct fxp_softc *, int, u_int16_t);
143 void fxp_write_eeprom(struct fxp_softc *, u_short *, int, int);
144 int fxp_mediachange(struct ifnet *);
145 void fxp_mediastatus(struct ifnet *, struct ifmediareq *);
146 void fxp_scb_wait(struct fxp_softc *);
147 void fxp_start(struct ifnet *);
148 int fxp_ioctl(struct ifnet *, u_long, caddr_t);
149 void fxp_load_ucode(struct fxp_softc *);
150 void fxp_watchdog(struct ifnet *);
151 int fxp_add_rfabuf(struct fxp_softc *, struct mbuf *);
152 int fxp_mdi_read(struct device *, int, int);
153 void fxp_mdi_write(struct device *, int, int, int);
154 void fxp_autosize_eeprom(struct fxp_softc *);
155 void fxp_statchg(struct device *);
156 void fxp_read_eeprom(struct fxp_softc *, u_int16_t *,
157 				    int, int);
158 void fxp_stats_update(void *);
159 void fxp_mc_setup(struct fxp_softc *, int);
160 void fxp_scb_cmd(struct fxp_softc *, u_int16_t);
161 
162 /*
163  * Set initial transmit threshold at 64 (512 bytes). This is
164  * increased by 64 (512 bytes) at a time, to maximum of 192
165  * (1536 bytes), if an underrun occurs.
166  */
167 static int tx_threshold = 64;
168 
169 /*
170  * Interrupts coalescing code params
171  */
172 int fxp_int_delay = FXP_INT_DELAY;
173 int fxp_bundle_max = FXP_BUNDLE_MAX;
174 int fxp_min_size_mask = FXP_MIN_SIZE_MASK;
175 
176 /*
177  * TxCB list index mask. This is used to do list wrap-around.
178  */
179 #define FXP_TXCB_MASK	(FXP_NTXCB - 1)
180 
181 /*
182  * Maximum number of seconds that the receiver can be idle before we
183  * assume it's dead and attempt to reset it by reprogramming the
184  * multicast filter. This is part of a work-around for a bug in the
185  * NIC. See fxp_stats_update().
186  */
187 #define FXP_MAX_RX_IDLE	15
188 
189 /*
190  * Wait for the previous command to be accepted (but not necessarily
191  * completed).
192  */
193 void
194 fxp_scb_wait(struct fxp_softc *sc)
195 {
196 	int i = FXP_CMD_TMO;
197 
198 	while ((CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff) && --i)
199 		DELAY(2);
200 	if (i == 0)
201 		printf("%s: warning: SCB timed out\n", sc->sc_dev.dv_xname);
202 }
203 
204 void
205 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
206 {
207 	u_int16_t reg;
208 	int x;
209 
210 	/*
211 	 * Shift in data.
212 	 */
213 	for (x = 1 << (length - 1); x; x >>= 1) {
214 		if (data & x)
215 			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
216 		else
217 			reg = FXP_EEPROM_EECS;
218 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
219 		DELAY(1);
220 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
221 		DELAY(1);
222 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
223 		DELAY(1);
224 	}
225 }
226 
227 void
228 fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data)
229 {
230 	int i;
231 
232 	/*
233 	 * Erase/write enable.
234 	 */
235 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
236 	fxp_eeprom_shiftin(sc, 0x4, 3);
237 	fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
238 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
239 	DELAY(1);
240 	/*
241 	 * Shift in write opcode, address, data.
242 	 */
243 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
244 	fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
245 	fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
246 	fxp_eeprom_shiftin(sc, data, 16);
247 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
248 	DELAY(1);
249 	/*
250 	 * Wait for EEPROM to finish up.
251 	 */
252 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
253 	DELAY(1);
254 	for (i = 0; i < 1000; i++) {
255 		if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
256 			break;
257 		DELAY(50);
258 	}
259 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
260 	DELAY(1);
261 	/*
262 	 * Erase/write disable.
263 	 */
264 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
265 	fxp_eeprom_shiftin(sc, 0x4, 3);
266 	fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
267 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
268 	DELAY(1);
269 }
270 
271 void
272 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
273 {
274 	int i;
275 
276 	for (i = 0; i < words; i++)
277 		fxp_eeprom_putword(sc, offset + i, data[i]);
278 }
279 
280 /*************************************************************
281  * Operating system-specific autoconfiguration glue
282  *************************************************************/
283 
284 struct cfdriver fxp_cd = {
285 	NULL, "fxp", DV_IFNET
286 };
287 
288 int
289 fxp_activate(struct device *self, int act)
290 {
291 	struct fxp_softc *sc = (struct fxp_softc *)self;
292 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
293 	int rv = 0;
294 
295 	switch (act) {
296 	case DVACT_SUSPEND:
297 		if (ifp->if_flags & IFF_RUNNING)
298 			fxp_stop(sc, 1, 0);
299 		rv = config_activate_children(self, act);
300 		break;
301 	case DVACT_WAKEUP:
302 		if (ifp->if_flags & IFF_UP)
303 			fxp_wakeup(sc);
304 		break;
305 	default:
306 		rv = config_activate_children(self, act);
307 		break;
308 	}
309 	return (rv);
310 }
311 
312 void
313 fxp_wakeup(struct fxp_softc *sc)
314 {
315 	int s = splnet();
316 
317 	/* force reload of the microcode */
318 	sc->sc_flags &= ~FXPF_UCODELOADED;
319 
320 	fxp_init(sc);
321 	splx(s);
322 }
323 
324 /*************************************************************
325  * End of operating system-specific autoconfiguration glue
326  *************************************************************/
327 
328 /*
329  * Do generic parts of attach.
330  */
331 int
332 fxp_attach(struct fxp_softc *sc, const char *intrstr)
333 {
334 	struct ifnet *ifp;
335 	struct mbuf *m;
336 	bus_dmamap_t rxmap;
337 	u_int16_t data;
338 	u_int8_t enaddr[6];
339 	int i, err;
340 
341 	/*
342 	 * Reset to a stable state.
343 	 */
344 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
345 	DELAY(10);
346 
347 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct fxp_ctrl),
348 	    PAGE_SIZE, 0, &sc->sc_cb_seg, 1, &sc->sc_cb_nseg,
349 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO))
350 		goto fail;
351 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg,
352 	    sizeof(struct fxp_ctrl), (caddr_t *)&sc->sc_ctrl,
353 	    BUS_DMA_NOWAIT)) {
354 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
355 		goto fail;
356 	}
357 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct fxp_ctrl),
358 	    1, sizeof(struct fxp_ctrl), 0, BUS_DMA_NOWAIT,
359 	    &sc->tx_cb_map)) {
360 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
361 		    sizeof(struct fxp_ctrl));
362 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
363 		goto fail;
364 	}
365 	if (bus_dmamap_load(sc->sc_dmat, sc->tx_cb_map, (caddr_t)sc->sc_ctrl,
366 	    sizeof(struct fxp_ctrl), NULL, BUS_DMA_NOWAIT)) {
367 		bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map);
368 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
369 		    sizeof(struct fxp_ctrl));
370 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
371 		goto fail;
372 	}
373 
374 	for (i = 0; i < FXP_NTXCB; i++) {
375 		if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
376 		    FXP_NTXSEG, MCLBYTES, 0, 0, &sc->txs[i].tx_map)) != 0) {
377 			printf("%s: unable to create tx dma map %d, error %d\n",
378 			    sc->sc_dev.dv_xname, i, err);
379 			goto fail;
380 		}
381 		sc->txs[i].tx_mbuf = NULL;
382 		sc->txs[i].tx_cb = sc->sc_ctrl->tx_cb + i;
383 		sc->txs[i].tx_off = offsetof(struct fxp_ctrl, tx_cb[i]);
384 		sc->txs[i].tx_next = &sc->txs[(i + 1) & FXP_TXCB_MASK];
385 	}
386 
387 	/*
388 	 * Pre-allocate some receive buffers.
389 	 */
390 	sc->sc_rxfree = 0;
391 	for (i = 0; i < FXP_NRFABUFS_MIN; i++) {
392 		if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
393 		    MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) {
394 			printf("%s: unable to create rx dma map %d, error %d\n",
395 			    sc->sc_dev.dv_xname, i, err);
396 			goto fail;
397 		}
398 		sc->rx_bufs++;
399 	}
400 	for (i = 0; i < FXP_NRFABUFS_MIN; i++)
401 		if (fxp_add_rfabuf(sc, NULL) != 0)
402 			goto fail;
403 
404 	/*
405 	 * Find out how large of an SEEPROM we have.
406 	 */
407 	fxp_autosize_eeprom(sc);
408 
409 	/*
410 	 * Get info about the primary PHY
411 	 */
412 	fxp_read_eeprom(sc, (u_int16_t *)&data, FXP_EEPROM_REG_PHY, 1);
413 	sc->phy_primary_addr = data & 0xff;
414 	sc->phy_primary_device = (data >> 8) & 0x3f;
415 	sc->phy_10Mbps_only = data >> 15;
416 
417 	/*
418 	 * Only 82558 and newer cards can do this.
419 	 */
420 	if (sc->sc_revision >= FXP_REV_82558_A4) {
421 		sc->sc_int_delay = fxp_int_delay;
422 		sc->sc_bundle_max = fxp_bundle_max;
423 		sc->sc_min_size_mask = fxp_min_size_mask;
424 	}
425 	/*
426 	 * Read MAC address.
427 	 */
428 	fxp_read_eeprom(sc, (u_int16_t *)enaddr, FXP_EEPROM_REG_MAC, 3);
429 
430 	ifp = &sc->sc_arpcom.ac_if;
431 	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
432 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
433 	ifp->if_softc = sc;
434 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
435 	ifp->if_ioctl = fxp_ioctl;
436 	ifp->if_start = fxp_start;
437 	ifp->if_watchdog = fxp_watchdog;
438 	IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
439 	IFQ_SET_READY(&ifp->if_snd);
440 
441 	ifp->if_capabilities = IFCAP_VLAN_MTU;
442 
443 	printf(": %s, address %s\n", intrstr,
444 	    ether_sprintf(sc->sc_arpcom.ac_enaddr));
445 
446 	if (sc->sc_flags & FXPF_DISABLE_STANDBY) {
447 		fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_ID, 1);
448 		if (data & FXP_EEPROM_REG_ID_STB) {
449 			u_int16_t cksum;
450 
451 			printf("%s: Disabling dynamic standby mode in EEPROM",
452 			    sc->sc_dev.dv_xname);
453 			data &= ~FXP_EEPROM_REG_ID_STB;
454 			fxp_write_eeprom(sc, &data, FXP_EEPROM_REG_ID, 1);
455 			printf(", New ID 0x%x", data);
456 			cksum = 0;
457 			for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
458 				fxp_read_eeprom(sc, &data, i, 1);
459 				cksum += data;
460 			}
461 			i = (1 << sc->eeprom_size) - 1;
462 			cksum = 0xBABA - cksum;
463 			fxp_read_eeprom(sc, &data, i, 1);
464 			fxp_write_eeprom(sc, &cksum, i, 1);
465 			printf(", cksum @ 0x%x: 0x%x -> 0x%x\n",
466 			    i, data, cksum);
467 		}
468 	}
469 
470 	/* Receiver lock-up workaround detection. */
471 	fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_COMPAT, 1);
472 	if ((data & (FXP_EEPROM_REG_COMPAT_MC10|FXP_EEPROM_REG_COMPAT_MC100))
473 	    != (FXP_EEPROM_REG_COMPAT_MC10|FXP_EEPROM_REG_COMPAT_MC100))
474 		sc->sc_flags |= FXPF_RECV_WORKAROUND;
475 
476 	/*
477 	 * Initialize our media structures and probe the MII.
478 	 */
479 	sc->sc_mii.mii_ifp = ifp;
480 	sc->sc_mii.mii_readreg = fxp_mdi_read;
481 	sc->sc_mii.mii_writereg = fxp_mdi_write;
482 	sc->sc_mii.mii_statchg = fxp_statchg;
483 	ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mediachange,
484 	    fxp_mediastatus);
485 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
486 	    MII_OFFSET_ANY, MIIF_NOISOLATE);
487 	/* If no phy found, just use auto mode */
488 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
489 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
490 		    0, NULL);
491 		printf("%s: no phy found, using manual mode\n",
492 		    sc->sc_dev.dv_xname);
493 	}
494 
495 	if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0))
496 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
497 	else if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0))
498 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
499 	else
500 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
501 
502 	/*
503 	 * Attach the interface.
504 	 */
505 	if_attach(ifp);
506 	ether_ifattach(ifp);
507 
508 	/*
509 	 * Initialize timeout for statistics update.
510 	 */
511 	timeout_set(&sc->stats_update_to, fxp_stats_update, sc);
512 
513 	return (0);
514 
515  fail:
516 	printf("%s: Failed to malloc memory\n", sc->sc_dev.dv_xname);
517 	if (sc->tx_cb_map != NULL) {
518 		bus_dmamap_unload(sc->sc_dmat, sc->tx_cb_map);
519 		bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map);
520 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
521 		    sizeof(struct fxp_cb_tx) * FXP_NTXCB);
522 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
523 	}
524 	m = sc->rfa_headm;
525 	while (m != NULL) {
526 		rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf);
527 		bus_dmamap_unload(sc->sc_dmat, rxmap);
528 		FXP_RXMAP_PUT(sc, rxmap);
529 		m = m_free(m);
530 	}
531 	return (ENOMEM);
532 }
533 
534 /*
535  * From NetBSD:
536  *
537  * Figure out EEPROM size.
538  *
539  * 559's can have either 64-word or 256-word EEPROMs, the 558
540  * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
541  * talks about the existence of 16 to 256 word EEPROMs.
542  *
543  * The only known sizes are 64 and 256, where the 256 version is used
544  * by CardBus cards to store CIS information.
545  *
546  * The address is shifted in msb-to-lsb, and after the last
547  * address-bit the EEPROM is supposed to output a `dummy zero' bit,
548  * after which follows the actual data. We try to detect this zero, by
549  * probing the data-out bit in the EEPROM control register just after
550  * having shifted in a bit. If the bit is zero, we assume we've
551  * shifted enough address bits. The data-out should be tri-state,
552  * before this, which should translate to a logical one.
553  *
554  * Other ways to do this would be to try to read a register with known
555  * contents with a varying number of address bits, but no such
556  * register seem to be available. The high bits of register 10 are 01
557  * on the 558 and 559, but apparently not on the 557.
558  *
559  * The Linux driver computes a checksum on the EEPROM data, but the
560  * value of this checksum is not very well documented.
561  */
562 void
563 fxp_autosize_eeprom(struct fxp_softc *sc)
564 {
565 	u_int16_t reg;
566 	int x;
567 
568 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
569 	/*
570 	 * Shift in read opcode.
571 	 */
572 	for (x = 3; x > 0; x--) {
573 		if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
574 			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
575 		} else {
576 			reg = FXP_EEPROM_EECS;
577 		}
578 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
579 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
580 		    reg | FXP_EEPROM_EESK);
581 		DELAY(4);
582 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
583 		DELAY(4);
584 	}
585 	/*
586 	 * Shift in address.
587 	 * Wait for the dummy zero following a correct address shift.
588 	 */
589 	for (x = 1; x <= 8; x++) {
590 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
591 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
592 			FXP_EEPROM_EECS | FXP_EEPROM_EESK);
593 		DELAY(4);
594 		if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0)
595 			break;
596 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
597 		DELAY(4);
598 	}
599 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
600 	DELAY(4);
601 	sc->eeprom_size = x;
602 }
603 
604 /*
605  * Read from the serial EEPROM. Basically, you manually shift in
606  * the read opcode (one bit at a time) and then shift in the address,
607  * and then you shift out the data (all of this one bit at a time).
608  * The word size is 16 bits, so you have to provide the address for
609  * every 16 bits of data.
610  */
611 void
612 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset,
613     int words)
614 {
615 	u_int16_t reg;
616 	int i, x;
617 
618 	for (i = 0; i < words; i++) {
619 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
620 		/*
621 		 * Shift in read opcode.
622 		 */
623 		for (x = 3; x > 0; x--) {
624 			if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
625 				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
626 			} else {
627 				reg = FXP_EEPROM_EECS;
628 			}
629 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
630 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
631 			    reg | FXP_EEPROM_EESK);
632 			DELAY(4);
633 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
634 			DELAY(4);
635 		}
636 		/*
637 		 * Shift in address.
638 		 */
639 		for (x = sc->eeprom_size; x > 0; x--) {
640 			if ((i + offset) & (1 << (x - 1))) {
641 				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
642 			} else {
643 				reg = FXP_EEPROM_EECS;
644 			}
645 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
646 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
647 			    reg | FXP_EEPROM_EESK);
648 			DELAY(4);
649 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
650 			DELAY(4);
651 		}
652 		reg = FXP_EEPROM_EECS;
653 		data[i] = 0;
654 		/*
655 		 * Shift out data.
656 		 */
657 		for (x = 16; x > 0; x--) {
658 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
659 			    reg | FXP_EEPROM_EESK);
660 			DELAY(4);
661 			if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
662 			    FXP_EEPROM_EEDO)
663 				data[i] |= (1 << (x - 1));
664 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
665 			DELAY(4);
666 		}
667 		data[i] = letoh16(data[i]);
668 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
669 		DELAY(4);
670 	}
671 }
672 
673 /*
674  * Start packet transmission on the interface.
675  */
676 void
677 fxp_start(struct ifnet *ifp)
678 {
679 	struct fxp_softc *sc = ifp->if_softc;
680 	struct fxp_txsw *txs = sc->sc_cbt_prod;
681 	struct fxp_cb_tx *txc;
682 	struct mbuf *m0, *m = NULL;
683 	int cnt = sc->sc_cbt_cnt, seg;
684 
685 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
686 		return;
687 
688 	while (1) {
689 		if (cnt >= (FXP_NTXCB - 2)) {
690 			ifp->if_flags |= IFF_OACTIVE;
691 			break;
692 		}
693 
694 		txs = txs->tx_next;
695 
696 		IFQ_POLL(&ifp->if_snd, m0);
697 		if (m0 == NULL)
698 			break;
699 
700 		if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map,
701 		    m0, BUS_DMA_NOWAIT) != 0) {
702 			MGETHDR(m, M_DONTWAIT, MT_DATA);
703 			if (m == NULL)
704 				break;
705 			if (m0->m_pkthdr.len > MHLEN) {
706 				MCLGET(m, M_DONTWAIT);
707 				if (!(m->m_flags & M_EXT)) {
708 					m_freem(m);
709 					break;
710 				}
711 			}
712 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
713 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
714 			if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map,
715 			    m, BUS_DMA_NOWAIT) != 0) {
716 				m_freem(m);
717 				break;
718 			}
719 		}
720 
721 		IFQ_DEQUEUE(&ifp->if_snd, m0);
722 		if (m != NULL) {
723 			m_freem(m0);
724 			m0 = m;
725 			m = NULL;
726 		}
727 
728 		txs->tx_mbuf = m0;
729 
730 #if NBPFILTER > 0
731 		if (ifp->if_bpf)
732 			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
733 #endif
734 
735 		FXP_MBUF_SYNC(sc, txs->tx_map, BUS_DMASYNC_PREWRITE);
736 
737 		txc = txs->tx_cb;
738 		txc->tbd_number = txs->tx_map->dm_nsegs;
739 		txc->cb_status = 0;
740 		txc->cb_command = htole16(FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF);
741 		txc->tx_threshold = tx_threshold;
742 		for (seg = 0; seg < txs->tx_map->dm_nsegs; seg++) {
743 			txc->tbd[seg].tb_addr =
744 			    htole32(txs->tx_map->dm_segs[seg].ds_addr);
745 			txc->tbd[seg].tb_size =
746 			    htole32(txs->tx_map->dm_segs[seg].ds_len);
747 		}
748 		FXP_TXCB_SYNC(sc, txs,
749 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
750 
751 		++cnt;
752 		sc->sc_cbt_prod = txs;
753 	}
754 
755 	if (cnt != sc->sc_cbt_cnt) {
756 		/* We enqueued at least one. */
757 		ifp->if_timer = 5;
758 
759 		txs = sc->sc_cbt_prod;
760 		txs = txs->tx_next;
761 		sc->sc_cbt_prod = txs;
762 		txs->tx_cb->cb_command =
763 		    htole16(FXP_CB_COMMAND_I | FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
764 		FXP_TXCB_SYNC(sc, txs,
765 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
766 
767 		FXP_TXCB_SYNC(sc, sc->sc_cbt_prev,
768 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
769 		sc->sc_cbt_prev->tx_cb->cb_command &=
770 		    htole16(~(FXP_CB_COMMAND_S | FXP_CB_COMMAND_I));
771 		FXP_TXCB_SYNC(sc, sc->sc_cbt_prev,
772 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
773 
774 		sc->sc_cbt_prev = txs;
775 
776 		fxp_scb_wait(sc);
777 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
778 
779 		sc->sc_cbt_cnt = cnt + 1;
780 	}
781 }
782 
783 /*
784  * Process interface interrupts.
785  */
786 int
787 fxp_intr(void *arg)
788 {
789 	struct fxp_softc *sc = arg;
790 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
791 	u_int16_t statack;
792 	bus_dmamap_t rxmap;
793 	int claimed = 0;
794 	int rnr = 0;
795 
796 	/*
797 	 * If the interface isn't running, don't try to
798 	 * service the interrupt.. just ack it and bail.
799 	 */
800 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
801 		statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS);
802 		if (statack) {
803 			claimed = 1;
804 			CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS,
805 			    statack & FXP_SCB_STATACK_MASK);
806 		}
807 		return claimed;
808 	}
809 
810 	while ((statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS)) &
811 	    FXP_SCB_STATACK_MASK) {
812 		claimed = 1;
813 		rnr = (statack & (FXP_SCB_STATACK_RNR |
814 		                  FXP_SCB_STATACK_SWI)) ? 1 : 0;
815 		/*
816 		 * First ACK all the interrupts in this pass.
817 		 */
818 		CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS,
819 		    statack & FXP_SCB_STATACK_MASK);
820 
821 		/*
822 		 * Free any finished transmit mbuf chains.
823 		 */
824 		if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) {
825 			int txcnt = sc->sc_cbt_cnt;
826 			struct fxp_txsw *txs = sc->sc_cbt_cons;
827 
828 			FXP_TXCB_SYNC(sc, txs,
829 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
830 
831 			while ((txcnt > 0) &&
832 			   ((txs->tx_cb->cb_status & htole16(FXP_CB_STATUS_C)) ||
833 			   (txs->tx_cb->cb_command & htole16(FXP_CB_COMMAND_NOP)))) {
834 				if (txs->tx_mbuf != NULL) {
835 					FXP_MBUF_SYNC(sc, txs->tx_map,
836 					    BUS_DMASYNC_POSTWRITE);
837 					bus_dmamap_unload(sc->sc_dmat,
838 					    txs->tx_map);
839 					m_freem(txs->tx_mbuf);
840 					txs->tx_mbuf = NULL;
841 				}
842 				--txcnt;
843 				txs = txs->tx_next;
844 				FXP_TXCB_SYNC(sc, txs,
845 				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
846 			}
847 			sc->sc_cbt_cnt = txcnt;
848 			/* Did we transmit any packets? */
849 			if (sc->sc_cbt_cons != txs)
850 				ifp->if_flags &= ~IFF_OACTIVE;
851 			ifp->if_timer = sc->sc_cbt_cnt ? 5 : 0;
852 			sc->sc_cbt_cons = txs;
853 
854 			if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
855 				/*
856 				 * Try to start more packets transmitting.
857 				 */
858 				fxp_start(ifp);
859 			}
860 		}
861 		/*
862 		 * Process receiver interrupts. If a Receive Unit
863 		 * not ready (RNR) condition exists, get whatever
864 		 * packets we can and re-start the receiver.
865 		 */
866 		if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR |
867 			       FXP_SCB_STATACK_SWI)) {
868 			struct mbuf *m;
869 			u_int8_t *rfap;
870 rcvloop:
871 			m = sc->rfa_headm;
872 			rfap = m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE;
873 			rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf);
874 			bus_dmamap_sync(sc->sc_dmat, rxmap,
875 			    0, MCLBYTES, BUS_DMASYNC_POSTREAD |
876 			    BUS_DMASYNC_POSTWRITE);
877 
878 			if (*(u_int16_t *)(rfap +
879 			    offsetof(struct fxp_rfa, rfa_status)) &
880 			    htole16(FXP_RFA_STATUS_C)) {
881 				if (*(u_int16_t *)(rfap +
882 				    offsetof(struct fxp_rfa, rfa_status)) &
883 				    htole16(FXP_RFA_STATUS_RNR))
884 					rnr = 1;
885 
886 				/*
887 				 * Remove first packet from the chain.
888 				 */
889 				sc->rfa_headm = m->m_next;
890 				m->m_next = NULL;
891 
892 				/*
893 				 * Add a new buffer to the receive chain.
894 				 * If this fails, the old buffer is recycled
895 				 * instead.
896 				 */
897 				if (fxp_add_rfabuf(sc, m) == 0) {
898 					u_int16_t total_len;
899 
900 					total_len = htole16(*(u_int16_t *)(rfap +
901 					    offsetof(struct fxp_rfa,
902 					    actual_size))) &
903 					    (MCLBYTES - 1);
904 					if (total_len <
905 					    sizeof(struct ether_header)) {
906 						m_freem(m);
907 						goto rcvloop;
908 					}
909 					if (*(u_int16_t *)(rfap +
910 					    offsetof(struct fxp_rfa,
911 					    rfa_status)) &
912 					    htole16(FXP_RFA_STATUS_CRC)) {
913 						m_freem(m);
914 						goto rcvloop;
915 					}
916 
917 					m->m_pkthdr.rcvif = ifp;
918 					m->m_pkthdr.len = m->m_len =
919 					    total_len;
920 #if NBPFILTER > 0
921 					if (ifp->if_bpf)
922 						bpf_mtap(ifp->if_bpf, m,
923 						    BPF_DIRECTION_IN);
924 #endif /* NBPFILTER > 0 */
925 					ether_input_mbuf(ifp, m);
926 				}
927 				goto rcvloop;
928 			}
929 		}
930 		if (rnr) {
931 			rxmap = *((bus_dmamap_t *)
932 			    sc->rfa_headm->m_ext.ext_buf);
933 			fxp_scb_wait(sc);
934 			CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
935 				    rxmap->dm_segs[0].ds_addr +
936 				    RFA_ALIGNMENT_FUDGE);
937 			fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
938 
939 		}
940 	}
941 	return (claimed);
942 }
943 
944 /*
945  * Update packet in/out/collision statistics. The i82557 doesn't
946  * allow you to access these counters without doing a fairly
947  * expensive DMA to get _all_ of the statistics it maintains, so
948  * we do this operation here only once per second. The statistics
949  * counters in the kernel are updated from the previous dump-stats
950  * DMA and then a new dump-stats DMA is started. The on-chip
951  * counters are zeroed when the DMA completes. If we can't start
952  * the DMA immediately, we don't wait - we just prepare to read
953  * them again next time.
954  */
955 void
956 fxp_stats_update(void *arg)
957 {
958 	struct fxp_softc *sc = arg;
959 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
960 	struct fxp_stats *sp = &sc->sc_ctrl->stats;
961 	int s;
962 
963 	FXP_STATS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
964 	ifp->if_opackets += letoh32(sp->tx_good);
965 	ifp->if_collisions += letoh32(sp->tx_total_collisions);
966 	if (sp->rx_good) {
967 		ifp->if_ipackets += letoh32(sp->rx_good);
968 		sc->rx_idle_secs = 0;
969 	} else if (sc->sc_flags & FXPF_RECV_WORKAROUND)
970 		sc->rx_idle_secs++;
971 	ifp->if_ierrors +=
972 	    letoh32(sp->rx_crc_errors) +
973 	    letoh32(sp->rx_alignment_errors) +
974 	    letoh32(sp->rx_rnr_errors) +
975 	    letoh32(sp->rx_overrun_errors);
976 	/*
977 	 * If any transmit underruns occurred, bump up the transmit
978 	 * threshold by another 512 bytes (64 * 8).
979 	 */
980 	if (sp->tx_underruns) {
981 		ifp->if_oerrors += letoh32(sp->tx_underruns);
982 		if (tx_threshold < 192)
983 			tx_threshold += 64;
984 	}
985 	s = splnet();
986 	/*
987 	 * If we haven't received any packets in FXP_MAX_RX_IDLE seconds,
988 	 * then assume the receiver has locked up and attempt to clear
989 	 * the condition by reprogramming the multicast filter. This is
990 	 * a work-around for a bug in the 82557 where the receiver locks
991 	 * up if it gets certain types of garbage in the synchronization
992 	 * bits prior to the packet header. This bug is supposed to only
993 	 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
994 	 * mode as well (perhaps due to a 10/100 speed transition).
995 	 */
996 	if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
997 		sc->rx_idle_secs = 0;
998 		fxp_init(sc);
999 		splx(s);
1000 		return;
1001 	}
1002 	/*
1003 	 * If there is no pending command, start another stats
1004 	 * dump. Otherwise punt for now.
1005 	 */
1006 	FXP_STATS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1007 	if (!(CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff)) {
1008 		/*
1009 		 * Start another stats dump.
1010 		 */
1011 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1012 	} else {
1013 		/*
1014 		 * A previous command is still waiting to be accepted.
1015 		 * Just zero our copy of the stats and wait for the
1016 		 * next timer event to update them.
1017 		 */
1018 		sp->tx_good = 0;
1019 		sp->tx_underruns = 0;
1020 		sp->tx_total_collisions = 0;
1021 
1022 		sp->rx_good = 0;
1023 		sp->rx_crc_errors = 0;
1024 		sp->rx_alignment_errors = 0;
1025 		sp->rx_rnr_errors = 0;
1026 		sp->rx_overrun_errors = 0;
1027 	}
1028 
1029 	/* Tick the MII clock. */
1030 	mii_tick(&sc->sc_mii);
1031 
1032 	splx(s);
1033 	/*
1034 	 * Schedule another timeout one second from now.
1035 	 */
1036 	timeout_add_sec(&sc->stats_update_to, 1);
1037 }
1038 
1039 void
1040 fxp_detach(struct fxp_softc *sc)
1041 {
1042 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1043 
1044 	/* Get rid of our timeouts and mbufs */
1045 	fxp_stop(sc, 1, 1);
1046 
1047 	/* Detach any PHYs we might have. */
1048 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
1049 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1050 
1051 	/* Delete any remaining media. */
1052 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
1053 
1054 	ether_ifdetach(ifp);
1055 	if_detach(ifp);
1056 
1057 #ifndef SMALL_KERNEL
1058 	if (sc->sc_ucodebuf)
1059 		free(sc->sc_ucodebuf, M_DEVBUF);
1060 #endif
1061 }
1062 
1063 /*
1064  * Stop the interface. Cancels the statistics updater and resets
1065  * the interface.
1066  */
1067 void
1068 fxp_stop(struct fxp_softc *sc, int drain, int softonly)
1069 {
1070 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1071 	int i;
1072 
1073 	/*
1074 	 * Cancel stats updater.
1075 	 */
1076 	timeout_del(&sc->stats_update_to);
1077 
1078 	/*
1079 	 * Turn down interface (done early to avoid bad interactions
1080 	 * between panics, and the watchdog timer)
1081 	 */
1082 	ifp->if_timer = 0;
1083 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1084 
1085 	if (!softonly)
1086 		mii_down(&sc->sc_mii);
1087 
1088 	/*
1089 	 * Issue software reset.
1090 	 */
1091 	if (!softonly) {
1092 		CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1093 		DELAY(10);
1094 	}
1095 
1096 	/*
1097 	 * Release any xmit buffers.
1098 	 */
1099 	for (i = 0; i < FXP_NTXCB; i++) {
1100 		if (sc->txs[i].tx_mbuf != NULL) {
1101 			bus_dmamap_unload(sc->sc_dmat, sc->txs[i].tx_map);
1102 			m_freem(sc->txs[i].tx_mbuf);
1103 			sc->txs[i].tx_mbuf = NULL;
1104 		}
1105 	}
1106 	sc->sc_cbt_cnt = 0;
1107 
1108 	if (drain) {
1109 		bus_dmamap_t rxmap;
1110 		struct mbuf *m;
1111 
1112 		/*
1113 		 * Free all the receive buffers then reallocate/reinitialize
1114 		 */
1115 		m = sc->rfa_headm;
1116 		while (m != NULL) {
1117 			rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf);
1118 			bus_dmamap_unload(sc->sc_dmat, rxmap);
1119 			FXP_RXMAP_PUT(sc, rxmap);
1120 			m = m_free(m);
1121 			sc->rx_bufs--;
1122 		}
1123 		sc->rfa_headm = NULL;
1124 		sc->rfa_tailm = NULL;
1125 		for (i = 0; i < FXP_NRFABUFS_MIN; i++) {
1126 			if (fxp_add_rfabuf(sc, NULL) != 0) {
1127 				/*
1128 				 * This "can't happen" - we're at splnet()
1129 				 * and we just freed all the buffers we need
1130 				 * above.
1131 				 */
1132 				panic("fxp_stop: no buffers!");
1133 			}
1134 			sc->rx_bufs++;
1135 		}
1136 	}
1137 }
1138 
1139 /*
1140  * Watchdog/transmission transmit timeout handler. Called when a
1141  * transmission is started on the interface, but no interrupt is
1142  * received before the timeout. This usually indicates that the
1143  * card has wedged for some reason.
1144  */
1145 void
1146 fxp_watchdog(struct ifnet *ifp)
1147 {
1148 	struct fxp_softc *sc = ifp->if_softc;
1149 
1150 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1151 	ifp->if_oerrors++;
1152 
1153 	fxp_init(sc);
1154 }
1155 
1156 /*
1157  * Submit a command to the i82557.
1158  */
1159 void
1160 fxp_scb_cmd(struct fxp_softc *sc, u_int16_t cmd)
1161 {
1162 	CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, cmd);
1163 }
1164 
1165 void
1166 fxp_init(void *xsc)
1167 {
1168 	struct fxp_softc *sc = xsc;
1169 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1170 	struct fxp_cb_config *cbp;
1171 	struct fxp_cb_ias *cb_ias;
1172 	struct fxp_cb_tx *txp;
1173 	bus_dmamap_t rxmap;
1174 	int i, prm, save_bf, lrxen, allm, bufs;
1175 
1176 	splassert(IPL_NET);
1177 
1178 	/*
1179 	 * Cancel any pending I/O
1180 	 */
1181 	fxp_stop(sc, 0, 0);
1182 
1183 	/*
1184 	 * Initialize base of CBL and RFA memory. Loading with zero
1185 	 * sets it up for regular linear addressing.
1186 	 */
1187 	fxp_scb_wait(sc);
1188 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1189 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1190 
1191 	fxp_scb_wait(sc);
1192 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1193 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1194 
1195 #ifndef SMALL_KERNEL
1196 	fxp_load_ucode(sc);
1197 #endif
1198 	/* Once through to set flags */
1199 	fxp_mc_setup(sc, 0);
1200 
1201         /*
1202 	 * In order to support receiving 802.1Q VLAN frames, we have to
1203 	 * enable "save bad frames", since they are 4 bytes larger than
1204 	 * the normal Ethernet maximum frame length. On i82558 and later,
1205 	 * we have a better mechanism for this.
1206 	 */
1207 	save_bf = 0;
1208 	lrxen = 0;
1209 
1210 	if (sc->sc_revision >= FXP_REV_82558_A4)
1211 		lrxen = 1;
1212 	else
1213 		save_bf = 1;
1214 
1215 	/*
1216 	 * Initialize base of dump-stats buffer.
1217 	 */
1218 	fxp_scb_wait(sc);
1219 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1220 	    sc->tx_cb_map->dm_segs->ds_addr +
1221 	    offsetof(struct fxp_ctrl, stats));
1222 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1223 
1224 	cbp = &sc->sc_ctrl->u.cfg;
1225 	/*
1226 	 * This bcopy is kind of disgusting, but there are a bunch of must be
1227 	 * zero and must be one bits in this structure and this is the easiest
1228 	 * way to initialize them all to proper values.
1229 	 */
1230 	bcopy(fxp_cb_config_template, (void *)&cbp->cb_status,
1231 		sizeof(fxp_cb_config_template));
1232 
1233 	prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1234 	allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1235 
1236 #if 0
1237 	cbp->cb_status =	0;
1238 	cbp->cb_command =	FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1239 	cbp->link_addr =	0xffffffff;	/* (no) next command */
1240 	cbp->byte_count =	22;		/* (22) bytes to config */
1241 	cbp->rx_fifo_limit =	8;	/* rx fifo threshold (32 bytes) */
1242 	cbp->tx_fifo_limit =	0;	/* tx fifo threshold (0 bytes) */
1243 	cbp->adaptive_ifs =	0;	/* (no) adaptive interframe spacing */
1244 	cbp->rx_dma_bytecount =	0;	/* (no) rx DMA max */
1245 	cbp->tx_dma_bytecount =	0;	/* (no) tx DMA max */
1246 	cbp->dma_bce =		0;	/* (disable) dma max counters */
1247 	cbp->late_scb =		0;	/* (don't) defer SCB update */
1248 	cbp->tno_int =		0;	/* (disable) tx not okay interrupt */
1249 	cbp->ci_int =		1;	/* interrupt on CU idle */
1250 	cbp->save_bf =		save_bf ? 1 : prm; /* save bad frames */
1251 	cbp->disc_short_rx =	!prm;	/* discard short packets */
1252 	cbp->underrun_retry =	1;	/* retry mode (1) on DMA underrun */
1253 	cbp->mediatype =	!sc->phy_10Mbps_only; /* interface mode */
1254 	cbp->nsai =		1;	/* (don't) disable source addr insert */
1255 	cbp->preamble_length =	2;	/* (7 byte) preamble */
1256 	cbp->loopback =		0;	/* (don't) loopback */
1257 	cbp->linear_priority =	0;	/* (normal CSMA/CD operation) */
1258 	cbp->linear_pri_mode =	0;	/* (wait after xmit only) */
1259 	cbp->interfrm_spacing =	6;	/* (96 bits of) interframe spacing */
1260 	cbp->promiscuous =	prm;	/* promiscuous mode */
1261 	cbp->bcast_disable =	0;	/* (don't) disable broadcasts */
1262 	cbp->crscdt =		0;	/* (CRS only) */
1263 	cbp->stripping =	!prm;	/* truncate rx packet to byte count */
1264 	cbp->padding =		1;	/* (do) pad short tx packets */
1265 	cbp->rcv_crc_xfer =	0;	/* (don't) xfer CRC to host */
1266 	cbp->long_rx =		lrxen;	/* (enable) long packets */
1267 	cbp->force_fdx =	0;	/* (don't) force full duplex */
1268 	cbp->fdx_pin_en =	1;	/* (enable) FDX# pin */
1269 	cbp->multi_ia =		0;	/* (don't) accept multiple IAs */
1270 	cbp->mc_all =		allm;
1271 #else
1272 	cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL);
1273 
1274 	if (allm && !prm)
1275 		cbp->mc_all |= 0x08;		/* accept all multicasts */
1276 	else
1277 		cbp->mc_all &= ~0x08;		/* reject all multicasts */
1278 
1279 	if (prm) {
1280 		cbp->promiscuous |= 1;		/* promiscuous mode */
1281 		cbp->ctrl2 &= ~0x01;		/* save short packets */
1282 		cbp->stripping &= ~0x01;	/* don't truncate rx packets */
1283 	} else {
1284 		cbp->promiscuous &= ~1;		/* no promiscuous mode */
1285 		cbp->ctrl2 |= 0x01;		/* discard short packets */
1286 		cbp->stripping |= 0x01;		/* truncate rx packets */
1287 	}
1288 
1289 	if (prm || save_bf)
1290 		cbp->ctrl1 |= 0x80;		/* save bad frames */
1291 	else
1292 		cbp->ctrl1 &= ~0x80;		/* discard bad frames */
1293 
1294 	if (sc->sc_flags & FXPF_MWI_ENABLE)
1295 		cbp->ctrl0 |= 0x01;		/* enable PCI MWI command */
1296 
1297 	if(!sc->phy_10Mbps_only)			/* interface mode */
1298 		cbp->mediatype |= 0x01;
1299 	else
1300 		cbp->mediatype &= ~0x01;
1301 
1302 	if(lrxen)			/* long packets */
1303 		cbp->stripping |= 0x08;
1304 	else
1305 		cbp->stripping &= ~0x08;
1306 
1307 	cbp->tx_dma_bytecount = 0; /* (no) tx DMA max, dma_dce = 0 ??? */
1308 	cbp->ctrl1 |= 0x08;	/* ci_int = 1 */
1309 	cbp->ctrl3 |= 0x08;	/* nsai */
1310 	cbp->fifo_limit = 0x08; /* tx and rx fifo limit */
1311 	cbp->fdx_pin |= 0x80;	/* Enable full duplex setting by pin */
1312 #endif
1313 
1314 	/*
1315 	 * Start the config command/DMA.
1316 	 */
1317 	fxp_scb_wait(sc);
1318 	FXP_CFG_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1319 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1320 	    offsetof(struct fxp_ctrl, u.cfg));
1321 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1322 	/* ...and wait for it to complete. */
1323 	i = FXP_CMD_TMO;
1324 	do {
1325 		DELAY(1);
1326 		FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1327 	} while ((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0 && i--);
1328 
1329 	FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1330 	if (!(cbp->cb_status & htole16(FXP_CB_STATUS_C))) {
1331 		printf("%s: config command timeout\n", sc->sc_dev.dv_xname);
1332 		return;
1333 	}
1334 
1335 	/*
1336 	 * Now initialize the station address.
1337 	 */
1338 	cb_ias = &sc->sc_ctrl->u.ias;
1339 	cb_ias->cb_status = htole16(0);
1340 	cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
1341 	cb_ias->link_addr = htole32(0xffffffff);
1342 	bcopy(sc->sc_arpcom.ac_enaddr, (void *)cb_ias->macaddr,
1343 	    sizeof(sc->sc_arpcom.ac_enaddr));
1344 
1345 	/*
1346 	 * Start the IAS (Individual Address Setup) command/DMA.
1347 	 */
1348 	fxp_scb_wait(sc);
1349 	FXP_IAS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1350 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1351 	    offsetof(struct fxp_ctrl, u.ias));
1352 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1353 	/* ...and wait for it to complete. */
1354 	i = FXP_CMD_TMO;
1355 	do {
1356 		DELAY(1);
1357 		FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1358 	} while (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C)) && i--);
1359 
1360 	FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1361 	if (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C))) {
1362 		printf("%s: IAS command timeout\n", sc->sc_dev.dv_xname);
1363 		return;
1364 	}
1365 
1366 	/* Again, this time really upload the multicast addresses */
1367 	fxp_mc_setup(sc, 1);
1368 
1369 	/*
1370 	 * Initialize transmit control block (TxCB) list.
1371 	 */
1372 	bzero(sc->sc_ctrl->tx_cb, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
1373 	txp = sc->sc_ctrl->tx_cb;
1374 	for (i = 0; i < FXP_NTXCB; i++) {
1375 		txp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
1376 		txp[i].link_addr = htole32(sc->tx_cb_map->dm_segs->ds_addr +
1377 		    offsetof(struct fxp_ctrl, tx_cb[(i + 1) & FXP_TXCB_MASK]));
1378 		txp[i].tbd_array_addr =htole32(sc->tx_cb_map->dm_segs->ds_addr +
1379 		    offsetof(struct fxp_ctrl, tx_cb[i].tbd[0]));
1380 	}
1381 	/*
1382 	 * Set the suspend flag on the first TxCB and start the control
1383 	 * unit. It will execute the NOP and then suspend.
1384 	 */
1385 	sc->sc_cbt_prev = sc->sc_cbt_prod = sc->sc_cbt_cons = sc->txs;
1386 	sc->sc_cbt_cnt = 1;
1387 	sc->sc_ctrl->tx_cb[0].cb_command = htole16(FXP_CB_COMMAND_NOP |
1388 	    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
1389 	bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map, 0,
1390 	    sc->tx_cb_map->dm_mapsize,
1391 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1392 
1393 	fxp_scb_wait(sc);
1394 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1395 	    offsetof(struct fxp_ctrl, tx_cb[0]));
1396 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1397 
1398 	/*
1399 	 * Initialize receiver buffer area - RFA.
1400 	 */
1401 	if (ifp->if_flags & IFF_UP)
1402 		bufs = FXP_NRFABUFS_MAX;
1403 	else
1404 		bufs = FXP_NRFABUFS_MIN;
1405 	if (sc->rx_bufs > bufs) {
1406 		while (sc->rfa_headm != NULL && sc->rx_bufs-- > bufs) {
1407 			rxmap = *((bus_dmamap_t *)sc->rfa_headm->m_ext.ext_buf);
1408 			bus_dmamap_unload(sc->sc_dmat, rxmap);
1409 			FXP_RXMAP_PUT(sc, rxmap);
1410 			sc->rfa_headm = m_free(sc->rfa_headm);
1411 		}
1412 	} else if (sc->rx_bufs < bufs) {
1413 		int err, tmp_rx_bufs = sc->rx_bufs;
1414 		for (i = sc->rx_bufs; i < bufs; i++) {
1415 			if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1416 			    MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) {
1417 				printf("%s: unable to create rx dma map %d, "
1418 				  "error %d\n", sc->sc_dev.dv_xname, i, err);
1419 				break;
1420 			}
1421 			sc->rx_bufs++;
1422 		}
1423 		for (i = tmp_rx_bufs; i < sc->rx_bufs; i++)
1424 			if (fxp_add_rfabuf(sc, NULL) != 0)
1425 				break;
1426 	}
1427 	fxp_scb_wait(sc);
1428 
1429 	/*
1430 	 * Set current media.
1431 	 */
1432 	mii_mediachg(&sc->sc_mii);
1433 
1434 	ifp->if_flags |= IFF_RUNNING;
1435 	ifp->if_flags &= ~IFF_OACTIVE;
1436 
1437 	/*
1438 	 * Request a software generated interrupt that will be used to
1439 	 * (re)start the RU processing.  If we direct the chip to start
1440 	 * receiving from the start of queue now, instead of letting the
1441 	 * interrupt handler first process all received packets, we run
1442 	 * the risk of having it overwrite mbuf clusters while they are
1443 	 * being processed or after they have been returned to the pool.
1444 	 */
1445 	CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND,
1446 	    CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) |
1447 	    FXP_SCB_INTRCNTL_REQUEST_SWI);
1448 
1449 	/*
1450 	 * Start stats updater.
1451 	 */
1452 	timeout_add_sec(&sc->stats_update_to, 1);
1453 }
1454 
1455 /*
1456  * Change media according to request.
1457  */
1458 int
1459 fxp_mediachange(struct ifnet *ifp)
1460 {
1461 	struct fxp_softc *sc = ifp->if_softc;
1462 	struct mii_data *mii = &sc->sc_mii;
1463 
1464 	if (mii->mii_instance) {
1465 		struct mii_softc *miisc;
1466 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1467 			mii_phy_reset(miisc);
1468 	}
1469 	mii_mediachg(&sc->sc_mii);
1470 	return (0);
1471 }
1472 
1473 /*
1474  * Notify the world which media we're using.
1475  */
1476 void
1477 fxp_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1478 {
1479 	struct fxp_softc *sc = ifp->if_softc;
1480 
1481 	mii_pollstat(&sc->sc_mii);
1482 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1483 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1484 }
1485 
1486 /*
1487  * Add a buffer to the end of the RFA buffer list.
1488  * Return 0 if successful, 1 for failure. A failure results in
1489  * adding the 'oldm' (if non-NULL) on to the end of the list -
1490  * tossing out its old contents and recycling it.
1491  * The RFA struct is stuck at the beginning of mbuf cluster and the
1492  * data pointer is fixed up to point just past it.
1493  */
1494 int
1495 fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm)
1496 {
1497 	u_int32_t v;
1498 	struct mbuf *m;
1499 	u_int8_t *rfap;
1500 	bus_dmamap_t rxmap = NULL;
1501 
1502 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1503 	if (m != NULL) {
1504 		MCLGET(m, M_DONTWAIT);
1505 		if ((m->m_flags & M_EXT) == 0) {
1506 			m_freem(m);
1507 			if (oldm == NULL)
1508 				return 1;
1509 			m = oldm;
1510 			m->m_data = m->m_ext.ext_buf;
1511 		}
1512 		if (oldm == NULL) {
1513 			rxmap = FXP_RXMAP_GET(sc);
1514 			*((bus_dmamap_t *)m->m_ext.ext_buf) = rxmap;
1515 			bus_dmamap_load(sc->sc_dmat, rxmap,
1516 			    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1517 			    BUS_DMA_NOWAIT);
1518 		} else if (oldm == m)
1519 			rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf);
1520 		else {
1521 			rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf);
1522 			bus_dmamap_unload(sc->sc_dmat, rxmap);
1523 			bus_dmamap_load(sc->sc_dmat, rxmap,
1524 			    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1525 			    BUS_DMA_NOWAIT);
1526 			*mtod(m, bus_dmamap_t *) = rxmap;
1527 		}
1528 	} else {
1529 		if (oldm == NULL)
1530 			return 1;
1531 		m = oldm;
1532 		m->m_data = m->m_ext.ext_buf;
1533 		rxmap = *mtod(m, bus_dmamap_t *);
1534 	}
1535 
1536 	/*
1537 	 * Move the data pointer up so that the incoming data packet
1538 	 * will be 32-bit aligned.
1539 	 */
1540 	m->m_data += RFA_ALIGNMENT_FUDGE;
1541 
1542 	/*
1543 	 * Get a pointer to the base of the mbuf cluster and move
1544 	 * data start past it.
1545 	 */
1546 	rfap = m->m_data;
1547 	m->m_data += sizeof(struct fxp_rfa);
1548 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, size)) =
1549 	    htole16(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE);
1550 
1551 	/*
1552 	 * Initialize the rest of the RFA.  Note that since the RFA
1553 	 * is misaligned, we cannot store values directly.  Instead,
1554 	 * we use an optimized, inline copy.
1555 	 */
1556 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_status)) = 0;
1557 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) =
1558 	    htole16(FXP_RFA_CONTROL_EL);
1559 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, actual_size)) = 0;
1560 
1561 	v = -1;
1562 	fxp_lwcopy(&v,
1563 	    (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr)));
1564 	fxp_lwcopy(&v,
1565 	    (u_int32_t *)(rfap + offsetof(struct fxp_rfa, rbd_addr)));
1566 
1567 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, MCLBYTES,
1568 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1569 
1570 	/*
1571 	 * If there are other buffers already on the list, attach this
1572 	 * one to the end by fixing up the tail to point to this one.
1573 	 */
1574 	if (sc->rfa_headm != NULL) {
1575 		sc->rfa_tailm->m_next = m;
1576 		v = htole32(rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE);
1577 		rfap = sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE;
1578 		fxp_lwcopy(&v,
1579 		    (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr)));
1580 		*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) &=
1581 		    htole16((u_int16_t)~FXP_RFA_CONTROL_EL);
1582 		/* XXX we only need to sync the control struct */
1583 		bus_dmamap_sync(sc->sc_dmat,
1584 		    *((bus_dmamap_t *)sc->rfa_tailm->m_ext.ext_buf), 0,
1585 			MCLBYTES, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1586 	} else
1587 		sc->rfa_headm = m;
1588 
1589 	sc->rfa_tailm = m;
1590 
1591 	return (m == oldm);
1592 }
1593 
1594 int
1595 fxp_mdi_read(struct device *self, int phy, int reg)
1596 {
1597 	struct fxp_softc *sc = (struct fxp_softc *)self;
1598 	int count = FXP_CMD_TMO;
1599 	int value;
1600 
1601 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1602 	    (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1603 
1604 	while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1605 	    && count--)
1606 		DELAY(10);
1607 
1608 	if (count <= 0)
1609 		printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname);
1610 
1611 	return (value & 0xffff);
1612 }
1613 
1614 void
1615 fxp_statchg(struct device *self)
1616 {
1617 	/* Nothing to do. */
1618 }
1619 
1620 void
1621 fxp_mdi_write(struct device *self, int phy, int reg, int value)
1622 {
1623 	struct fxp_softc *sc = (struct fxp_softc *)self;
1624 	int count = FXP_CMD_TMO;
1625 
1626 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1627 	    (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1628 	    (value & 0xffff));
1629 
1630 	while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1631 	    count--)
1632 		DELAY(10);
1633 
1634 	if (count <= 0)
1635 		printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname);
1636 }
1637 
1638 int
1639 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1640 {
1641 	struct fxp_softc *sc = ifp->if_softc;
1642 	struct ifreq *ifr = (struct ifreq *)data;
1643 	struct ifaddr *ifa = (struct ifaddr *)data;
1644 	int s, error = 0;
1645 
1646 	s = splnet();
1647 
1648 	switch (command) {
1649 	case SIOCSIFADDR:
1650 		ifp->if_flags |= IFF_UP;
1651 		if (!(ifp->if_flags & IFF_RUNNING))
1652 			fxp_init(sc);
1653 #ifdef INET
1654 		if (ifa->ifa_addr->sa_family == AF_INET)
1655 			arp_ifinit(&sc->sc_arpcom, ifa);
1656 #endif
1657 		break;
1658 
1659 	case SIOCSIFFLAGS:
1660 		if (ifp->if_flags & IFF_UP) {
1661 			if (ifp->if_flags & IFF_RUNNING)
1662 				error = ENETRESET;
1663 			else
1664 				fxp_init(sc);
1665 		} else {
1666 			if (ifp->if_flags & IFF_RUNNING)
1667 				fxp_stop(sc, 1, 0);
1668 		}
1669 		break;
1670 
1671 	case SIOCSIFMEDIA:
1672 	case SIOCGIFMEDIA:
1673 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1674 		break;
1675 
1676 	default:
1677 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
1678 	}
1679 
1680 	if (error == ENETRESET) {
1681 		if (ifp->if_flags & IFF_RUNNING)
1682 			fxp_init(sc);
1683 		error = 0;
1684 	}
1685 
1686 	splx(s);
1687 	return (error);
1688 }
1689 
1690 /*
1691  * Program the multicast filter.
1692  *
1693  * We have an artificial restriction that the multicast setup command
1694  * must be the first command in the chain, so we take steps to ensure
1695  * this. By requiring this, it allows us to keep up the performance of
1696  * the pre-initialized command ring (esp. link pointers) by not actually
1697  * inserting the mcsetup command in the ring - i.e. its link pointer
1698  * points to the TxCB ring, but the mcsetup descriptor itself is not part
1699  * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
1700  * lead into the regular TxCB ring when it completes.
1701  *
1702  * This function must be called at splnet.
1703  */
1704 void
1705 fxp_mc_setup(struct fxp_softc *sc, int doit)
1706 {
1707 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1708 	struct arpcom *ac = &sc->sc_arpcom;
1709 	struct fxp_cb_mcs *mcsp = &sc->sc_ctrl->u.mcs;
1710 	struct ether_multistep step;
1711 	struct ether_multi *enm;
1712 	int i, nmcasts = 0;
1713 
1714 	splassert(IPL_NET);
1715 
1716 	ifp->if_flags &= ~IFF_ALLMULTI;
1717 
1718 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1719 	    ac->ac_multicnt >= MAXMCADDR) {
1720 		ifp->if_flags |= IFF_ALLMULTI;
1721 	} else {
1722 		ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
1723 		while (enm != NULL) {
1724 			bcopy(enm->enm_addrlo,
1725 			    (void *)&mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
1726 
1727 			nmcasts++;
1728 
1729 			ETHER_NEXT_MULTI(step, enm);
1730 		}
1731 	}
1732 
1733 	if (doit == 0)
1734 		return;
1735 
1736 	/*
1737 	 * Initialize multicast setup descriptor.
1738 	 */
1739 	mcsp->cb_status = htole16(0);
1740 	mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
1741 	mcsp->link_addr = htole32(-1);
1742 	mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
1743 
1744 	/*
1745 	 * Wait until command unit is not active. This should never
1746 	 * be the case when nothing is queued, but make sure anyway.
1747 	 */
1748 	for (i = FXP_CMD_TMO; (CSR_READ_2(sc, FXP_CSR_SCB_STATUS) &
1749 	    FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE && i--; DELAY(1));
1750 
1751 	if ((CSR_READ_2(sc, FXP_CSR_SCB_STATUS) &
1752 	    FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE) {
1753 		printf("%s: timeout waiting for CU ready\n",
1754 		    sc->sc_dev.dv_xname);
1755 		return;
1756 	}
1757 
1758 	/*
1759 	 * Start the multicast setup command.
1760 	 */
1761 	fxp_scb_wait(sc);
1762 	FXP_MCS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1763 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1764 	    offsetof(struct fxp_ctrl, u.mcs));
1765 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1766 
1767 	i = FXP_CMD_TMO;
1768 	do {
1769 		DELAY(1);
1770 		FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1771 	} while (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C)) && i--);
1772 
1773 	FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1774 	if (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C))) {
1775 		printf("%s: multicast command timeout\n", sc->sc_dev.dv_xname);
1776 		return;
1777 	}
1778 
1779 }
1780 
1781 #ifndef SMALL_KERNEL
1782 #include <dev/microcode/fxp/rcvbundl.h>
1783 struct ucode {
1784 	u_int16_t	revision;
1785 	u_int16_t	int_delay_offset;
1786 	u_int16_t	bundle_max_offset;
1787 	u_int16_t	min_size_mask_offset;
1788 	const char	*uname;
1789 } const ucode_table[] = {
1790 	{ FXP_REV_82558_A4, D101_CPUSAVER_DWORD,
1791 	  0, 0,
1792 	  "fxp-d101a" },
1793 
1794 	{ FXP_REV_82558_B0, D101_CPUSAVER_DWORD,
1795 	  0, 0,
1796 	  "fxp-d101b0" },
1797 
1798 	{ FXP_REV_82559_A0, D101M_CPUSAVER_DWORD,
1799 	  D101M_CPUSAVER_BUNDLE_MAX_DWORD, D101M_CPUSAVER_MIN_SIZE_DWORD,
1800 	  "fxp-d101ma" },
1801 
1802 	{ FXP_REV_82559S_A, D101S_CPUSAVER_DWORD,
1803 	  D101S_CPUSAVER_BUNDLE_MAX_DWORD, D101S_CPUSAVER_MIN_SIZE_DWORD,
1804 	  "fxp-d101s" },
1805 
1806 	{ FXP_REV_82550, D102_B_CPUSAVER_DWORD,
1807 	  D102_B_CPUSAVER_BUNDLE_MAX_DWORD, D102_B_CPUSAVER_MIN_SIZE_DWORD,
1808 	  "fxp-d102" },
1809 
1810 	{ FXP_REV_82550_C, D102_C_CPUSAVER_DWORD,
1811 	  D102_C_CPUSAVER_BUNDLE_MAX_DWORD, D102_C_CPUSAVER_MIN_SIZE_DWORD,
1812 	  "fxp-d102c" },
1813 
1814 	{ FXP_REV_82551_F, D102_E_CPUSAVER_DWORD,
1815 	  D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD,
1816 	  "fxp-d102e" },
1817 
1818 	{ FXP_REV_82551_10, D102_E_CPUSAVER_DWORD,
1819 	  D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD,
1820 	  "fxp-d102e" },
1821 
1822 	{ 0, 0,
1823 	  0, 0,
1824 	  NULL }
1825 };
1826 
1827 void
1828 fxp_load_ucode(struct fxp_softc *sc)
1829 {
1830 	const struct ucode *uc;
1831 	struct fxp_cb_ucode *cbp = &sc->sc_ctrl->u.code;
1832 	int i, error;
1833 
1834 	if (sc->sc_flags & FXPF_NOUCODE)
1835 		return;
1836 
1837 	for (uc = ucode_table; uc->revision != 0; uc++)
1838 		if (sc->sc_revision == uc->revision)
1839 			break;
1840 	if (uc->revision == 0) {
1841 		sc->sc_flags |= FXPF_NOUCODE;
1842 		return;	/* no ucode for this chip is found */
1843 	}
1844 
1845 	if (sc->sc_ucodebuf)
1846 		goto reloadit;
1847 
1848 	if (sc->sc_revision == FXP_REV_82550_C) {
1849 		u_int16_t data;
1850 
1851 		/*
1852 		 * 82550C without the server extensions
1853 		 * locks up with the microcode patch.
1854 		 */
1855 		fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_COMPAT, 1);
1856 		if ((data & FXP_EEPROM_REG_COMPAT_SRV) == 0) {
1857 			sc->sc_flags |= FXPF_NOUCODE;
1858 			return;
1859 		}
1860 	}
1861 
1862 	error = loadfirmware(uc->uname, (u_char **)&sc->sc_ucodebuf,
1863 	    &sc->sc_ucodelen);
1864 	if (error) {
1865 		printf("%s: error %d, could not read firmware %s\n",
1866 		    sc->sc_dev.dv_xname, error, uc->uname);
1867 		return;
1868 	}
1869 
1870 reloadit:
1871 	if (sc->sc_flags & FXPF_UCODELOADED)
1872 		return;
1873 
1874 	cbp->cb_status = 0;
1875 	cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE|FXP_CB_COMMAND_EL);
1876 	cbp->link_addr = 0xffffffff;	/* (no) next command */
1877 	for (i = 0; i < (sc->sc_ucodelen / sizeof(u_int32_t)); i++)
1878 		cbp->ucode[i] = sc->sc_ucodebuf[i];
1879 
1880 	if (uc->int_delay_offset)
1881 		*((u_int16_t *)&cbp->ucode[uc->int_delay_offset]) =
1882 			htole16(sc->sc_int_delay + sc->sc_int_delay / 2);
1883 
1884 	if (uc->bundle_max_offset)
1885 		*((u_int16_t *)&cbp->ucode[uc->bundle_max_offset]) =
1886 			htole16(sc->sc_bundle_max);
1887 
1888 	if (uc->min_size_mask_offset)
1889 		*((u_int16_t *)&cbp->ucode[uc->min_size_mask_offset]) =
1890 			htole16(sc->sc_min_size_mask);
1891 
1892 	FXP_UCODE_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1893 
1894 	/*
1895 	 * Download the ucode to the chip.
1896 	 */
1897 	fxp_scb_wait(sc);
1898 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr
1899 	      + offsetof(struct fxp_ctrl, u.code));
1900 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1901 
1902 	/* ...and wait for it to complete. */
1903 	i = FXP_CMD_TMO;
1904 	do {
1905 		DELAY(2);
1906 		FXP_UCODE_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1907 	} while (((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0) && --i);
1908 	if (i == 0) {
1909 		printf("%s: timeout loading microcode\n", sc->sc_dev.dv_xname);
1910 		return;
1911 	}
1912 	sc->sc_flags |= FXPF_UCODELOADED;
1913 
1914 #ifdef DEBUG
1915 	printf("%s: microcode loaded, int_delay: %d usec",
1916 	    sc->sc_dev.dv_xname, sc->sc_int_delay);
1917 
1918 	if (uc->bundle_max_offset)
1919 		printf(", bundle_max %d\n", sc->sc_bundle_max);
1920 	else
1921 		printf("\n");
1922 #endif
1923 }
1924 #endif /* SMALL_KERNEL */
1925