xref: /dragonfly/sys/dev/netif/vr/if_vr.c (revision 5dfd06ac)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_vr.c,v 1.26.2.13 2003/02/06 04:46:20 silby Exp $
33  * $DragonFly: src/sys/dev/netif/vr/if_vr.c,v 1.44 2006/12/22 23:26:22 swildner Exp $
34  */
35 
36 /*
37  * VIA Rhine fast ethernet PCI NIC driver
38  *
39  * Supports various network adapters based on the VIA Rhine
40  * and Rhine II PCI controllers, including the D-Link DFE530TX.
41  * Datasheets are available at http://www.via.com.tw.
42  *
43  * Written by Bill Paul <wpaul@ctr.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The VIA Rhine controllers are similar in some respects to the
50  * the DEC tulip chips, except less complicated. The controller
51  * uses an MII bus and an external physical layer interface. The
52  * receiver has a one entry perfect filter and a 64-bit hash table
53  * multicast filter. Transmit and receive descriptors are similar
54  * to the tulip.
55  *
56  * The Rhine has a serious flaw in its transmit DMA mechanism:
57  * transmit buffers must be longword aligned. Unfortunately,
58  * FreeBSD doesn't guarantee that mbufs will be filled in starting
59  * at longword boundaries, so we have to do a buffer copy before
60  * transmission.
61  */
62 
63 #include "opt_polling.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/sockio.h>
68 #include <sys/mbuf.h>
69 #include <sys/malloc.h>
70 #include <sys/kernel.h>
71 #include <sys/socket.h>
72 #include <sys/serialize.h>
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 #include <sys/thread2.h>
76 
77 #include <net/if.h>
78 #include <net/ifq_var.h>
79 #include <net/if_arp.h>
80 #include <net/ethernet.h>
81 #include <net/if_dl.h>
82 #include <net/if_media.h>
83 
84 #include <net/bpf.h>
85 
86 #include <vm/vm.h>              /* for vtophys */
87 #include <vm/pmap.h>            /* for vtophys */
88 
89 #include <dev/netif/mii_layer/mii.h>
90 #include <dev/netif/mii_layer/miivar.h>
91 
92 #include <bus/pci/pcidevs.h>
93 #include <bus/pci/pcireg.h>
94 #include <bus/pci/pcivar.h>
95 
96 #define VR_USEIOSPACE
97 
98 #include <dev/netif/vr/if_vrreg.h>
99 
100 /* "controller miibus0" required.  See GENERIC if you get errors here. */
101 #include "miibus_if.h"
102 
103 #undef VR_USESWSHIFT
104 
105 /*
106  * Various supported device vendors/types and their names.
107  */
108 static struct vr_type vr_devs[] = {
109 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
110 		"VIA VT3043 Rhine I 10/100BaseTX" },
111 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
112 		"VIA VT86C100A Rhine II 10/100BaseTX" },
113 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
114 		"VIA VT6102 Rhine II 10/100BaseTX" },
115 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105,
116 		"VIA VT6105 Rhine III 10/100BaseTX" },
117 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M,
118 		"VIA VT6105M Rhine III 10/100BaseTX" },
119 	{ PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII,
120 		"Delta Electronics Rhine II 10/100BaseTX" },
121 	{ PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII,
122 		"Addtron Technology Rhine II 10/100BaseTX" },
123 	{ 0, 0, NULL }
124 };
125 
126 static int	vr_probe(device_t);
127 static int	vr_attach(device_t);
128 static int	vr_detach(device_t);
129 
130 static int	vr_newbuf(struct vr_softc *, struct vr_chain_onefrag *,
131 			  struct mbuf *);
132 static int	vr_encap(struct vr_softc *, int, struct mbuf * );
133 
134 static void	vr_rxeof(struct vr_softc *);
135 static void	vr_rxeoc(struct vr_softc *);
136 static void	vr_txeof(struct vr_softc *);
137 static void	vr_txeoc(struct vr_softc *);
138 static void	vr_tick(void *);
139 static void	vr_intr(void *);
140 static void	vr_start(struct ifnet *);
141 static int	vr_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
142 static void	vr_init(void *);
143 static void	vr_stop(struct vr_softc *);
144 static void	vr_watchdog(struct ifnet *);
145 static void	vr_shutdown(device_t);
146 static int	vr_ifmedia_upd(struct ifnet *);
147 static void	vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
148 
149 #ifdef VR_USESWSHIFT
150 static void	vr_mii_sync(struct vr_softc *);
151 static void	vr_mii_send(struct vr_softc *, uint32_t, int);
152 #endif
153 static int	vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
154 static int	vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
155 static int	vr_miibus_readreg(device_t, int, int);
156 static int	vr_miibus_writereg(device_t, int, int, int);
157 static void	vr_miibus_statchg(device_t);
158 
159 static void	vr_setcfg(struct vr_softc *, int);
160 static void	vr_setmulti(struct vr_softc *);
161 static void	vr_reset(struct vr_softc *);
162 static int	vr_list_rx_init(struct vr_softc *);
163 static int	vr_list_tx_init(struct vr_softc *);
164 #ifdef DEVICE_POLLING
165 static void	vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
166 #endif
167 
168 #ifdef VR_USEIOSPACE
169 #define VR_RES			SYS_RES_IOPORT
170 #define VR_RID			VR_PCI_LOIO
171 #else
172 #define VR_RES			SYS_RES_MEMORY
173 #define VR_RID			VR_PCI_LOMEM
174 #endif
175 
176 static device_method_t vr_methods[] = {
177 	/* Device interface */
178 	DEVMETHOD(device_probe,		vr_probe),
179 	DEVMETHOD(device_attach,	vr_attach),
180 	DEVMETHOD(device_detach, 	vr_detach),
181 	DEVMETHOD(device_shutdown,	vr_shutdown),
182 
183 	/* bus interface */
184 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
185 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
186 
187 	/* MII interface */
188 	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
189 	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
190 	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
191 
192 	{ 0, 0 }
193 };
194 
195 static driver_t vr_driver = {
196 	"vr",
197 	vr_methods,
198 	sizeof(struct vr_softc)
199 };
200 
201 static devclass_t vr_devclass;
202 
203 DECLARE_DUMMY_MODULE(if_vr);
204 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0);
205 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
206 
207 #define VR_SETBIT(sc, reg, x)				\
208 	CSR_WRITE_1(sc, reg,				\
209 		CSR_READ_1(sc, reg) | (x))
210 
211 #define VR_CLRBIT(sc, reg, x)				\
212 	CSR_WRITE_1(sc, reg,				\
213 		CSR_READ_1(sc, reg) & ~(x))
214 
215 #define VR_SETBIT16(sc, reg, x)				\
216 	CSR_WRITE_2(sc, reg,				\
217 		CSR_READ_2(sc, reg) | (x))
218 
219 #define VR_CLRBIT16(sc, reg, x)				\
220 	CSR_WRITE_2(sc, reg,				\
221 		CSR_READ_2(sc, reg) & ~(x))
222 
223 #define VR_SETBIT32(sc, reg, x)				\
224 	CSR_WRITE_4(sc, reg,				\
225 		CSR_READ_4(sc, reg) | (x))
226 
227 #define VR_CLRBIT32(sc, reg, x)				\
228 	CSR_WRITE_4(sc, reg,				\
229 		CSR_READ_4(sc, reg) & ~(x))
230 
231 #define SIO_SET(x)					\
232 	CSR_WRITE_1(sc, VR_MIICMD,			\
233 		CSR_READ_1(sc, VR_MIICMD) | (x))
234 
235 #define SIO_CLR(x)					\
236 	CSR_WRITE_1(sc, VR_MIICMD,			\
237 		CSR_READ_1(sc, VR_MIICMD) & ~(x))
238 
239 #ifdef VR_USESWSHIFT
240 /*
241  * Sync the PHYs by setting data bit and strobing the clock 32 times.
242  */
243 static void
244 vr_mii_sync(struct vr_softc *sc)
245 {
246 	int i;
247 
248 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
249 
250 	for (i = 0; i < 32; i++) {
251 		SIO_SET(VR_MIICMD_CLK);
252 		DELAY(1);
253 		SIO_CLR(VR_MIICMD_CLK);
254 		DELAY(1);
255 	}
256 }
257 
258 /*
259  * Clock a series of bits through the MII.
260  */
261 static void
262 vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt)
263 {
264 	int i;
265 
266 	SIO_CLR(VR_MIICMD_CLK);
267 
268 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
269                 if (bits & i)
270 			SIO_SET(VR_MIICMD_DATAIN);
271                 else
272 			SIO_CLR(VR_MIICMD_DATAIN);
273 		DELAY(1);
274 		SIO_CLR(VR_MIICMD_CLK);
275 		DELAY(1);
276 		SIO_SET(VR_MIICMD_CLK);
277 	}
278 }
279 #endif
280 
281 /*
282  * Read an PHY register through the MII.
283  */
284 static int
285 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
286 #ifdef VR_USESWSHIFT
287 {
288 	int i, ack;
289 
290 	/* Set up frame for RX. */
291 	frame->mii_stdelim = VR_MII_STARTDELIM;
292 	frame->mii_opcode = VR_MII_READOP;
293 	frame->mii_turnaround = 0;
294 	frame->mii_data = 0;
295 
296 	CSR_WRITE_1(sc, VR_MIICMD, 0);
297 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
298 
299 	/* Turn on data xmit. */
300 	SIO_SET(VR_MIICMD_DIR);
301 
302 	vr_mii_sync(sc);
303 
304 	/* Send command/address info. */
305 	vr_mii_send(sc, frame->mii_stdelim, 2);
306 	vr_mii_send(sc, frame->mii_opcode, 2);
307 	vr_mii_send(sc, frame->mii_phyaddr, 5);
308 	vr_mii_send(sc, frame->mii_regaddr, 5);
309 
310 	/* Idle bit. */
311 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
312 	DELAY(1);
313 	SIO_SET(VR_MIICMD_CLK);
314 	DELAY(1);
315 
316 	/* Turn off xmit. */
317 	SIO_CLR(VR_MIICMD_DIR);
318 
319 	/* Check for ack */
320 	SIO_CLR(VR_MIICMD_CLK);
321 	DELAY(1);
322 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
323 	SIO_SET(VR_MIICMD_CLK);
324 	DELAY(1);
325 
326 	/*
327 	 * Now try reading data bits. If the ack failed, we still
328 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
329 	 */
330 	if (ack) {
331 		for(i = 0; i < 16; i++) {
332 			SIO_CLR(VR_MIICMD_CLK);
333 			DELAY(1);
334 			SIO_SET(VR_MIICMD_CLK);
335 			DELAY(1);
336 		}
337 		goto fail;
338 	}
339 
340 	for (i = 0x8000; i; i >>= 1) {
341 		SIO_CLR(VR_MIICMD_CLK);
342 		DELAY(1);
343 		if (!ack) {
344 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
345 				frame->mii_data |= i;
346 			DELAY(1);
347 		}
348 		SIO_SET(VR_MIICMD_CLK);
349 		DELAY(1);
350 	}
351 
352 fail:
353 	SIO_CLR(VR_MIICMD_CLK);
354 	DELAY(1);
355 	SIO_SET(VR_MIICMD_CLK);
356 	DELAY(1);
357 
358 	if (ack)
359 		return(1);
360 	return(0);
361 }
362 #else
363 {
364 	int i;
365 
366   	/* Set the PHY address. */
367 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
368 	    frame->mii_phyaddr);
369 
370 	/* Set the register address. */
371 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
372 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
373 
374 	for (i = 0; i < 10000; i++) {
375 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
376 			break;
377 		DELAY(1);
378 	}
379 	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
380 
381 	return(0);
382 }
383 #endif
384 
385 
386 /*
387  * Write to a PHY register through the MII.
388  */
389 static int
390 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
391 #ifdef VR_USESWSHIFT
392 {
393 	CSR_WRITE_1(sc, VR_MIICMD, 0);
394 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
395 
396 	/* Set up frame for TX. */
397 	frame->mii_stdelim = VR_MII_STARTDELIM;
398 	frame->mii_opcode = VR_MII_WRITEOP;
399 	frame->mii_turnaround = VR_MII_TURNAROUND;
400 
401 	/* Turn on data output. */
402 	SIO_SET(VR_MIICMD_DIR);
403 
404 	vr_mii_sync(sc);
405 
406 	vr_mii_send(sc, frame->mii_stdelim, 2);
407 	vr_mii_send(sc, frame->mii_opcode, 2);
408 	vr_mii_send(sc, frame->mii_phyaddr, 5);
409 	vr_mii_send(sc, frame->mii_regaddr, 5);
410 	vr_mii_send(sc, frame->mii_turnaround, 2);
411 	vr_mii_send(sc, frame->mii_data, 16);
412 
413 	/* Idle bit. */
414 	SIO_SET(VR_MIICMD_CLK);
415 	DELAY(1);
416 	SIO_CLR(VR_MIICMD_CLK);
417 	DELAY(1);
418 
419 	/* Turn off xmit. */
420 	SIO_CLR(VR_MIICMD_DIR);
421 
422 	return(0);
423 }
424 #else
425 {
426 	int i;
427 
428   	/* Set the PHY-adress */
429 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
430 		    frame->mii_phyaddr);
431 
432 	/* Set the register address and data to write. */
433 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
434 	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
435 
436 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
437 
438 	for (i = 0; i < 10000; i++) {
439 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
440 			break;
441 		DELAY(1);
442 	}
443 	return(0);
444 }
445 #endif
446 
447 static int
448 vr_miibus_readreg(device_t dev, int phy, int reg)
449 {
450 	struct vr_mii_frame frame;
451 	struct vr_softc *sc;
452 
453 	sc = device_get_softc(dev);
454 
455 	switch (sc->vr_revid) {
456 	case REV_ID_VT6102_APOLLO:
457 		if (phy != 1)
458 			return(0);
459 		break;
460 	default:
461 		break;
462 	}
463 
464 	bzero(&frame, sizeof(frame));
465 
466 	frame.mii_phyaddr = phy;
467 	frame.mii_regaddr = reg;
468 	vr_mii_readreg(sc, &frame);
469 
470 	return(frame.mii_data);
471 }
472 
473 static int
474 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
475 {
476 	struct vr_mii_frame frame;
477 	struct vr_softc *sc;
478 
479 	sc = device_get_softc(dev);
480 
481 	switch (sc->vr_revid) {
482 	case REV_ID_VT6102_APOLLO:
483 		if (phy != 1)
484 			return 0;
485 		break;
486 	default:
487 		break;
488 	}
489 
490 	bzero(&frame, sizeof(frame));
491 
492 	frame.mii_phyaddr = phy;
493 	frame.mii_regaddr = reg;
494 	frame.mii_data = data;
495 
496 	vr_mii_writereg(sc, &frame);
497 
498 	return(0);
499 }
500 
501 static void
502 vr_miibus_statchg(device_t dev)
503 {
504 	struct mii_data *mii;
505 	struct vr_softc *sc;
506 
507 	sc = device_get_softc(dev);
508 	mii = device_get_softc(sc->vr_miibus);
509 	vr_setcfg(sc, mii->mii_media_active);
510 }
511 
512 /*
513  * Program the 64-bit multicast hash filter.
514  */
515 static void
516 vr_setmulti(struct vr_softc *sc)
517 {
518 	struct ifnet *ifp;
519 	uint32_t hashes[2] = { 0, 0 };
520 	struct ifmultiaddr *ifma;
521 	uint8_t rxfilt;
522 	int mcnt = 0;
523 
524 	ifp = &sc->arpcom.ac_if;
525 
526 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
527 
528 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
529 		rxfilt |= VR_RXCFG_RX_MULTI;
530 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
531 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
532 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
533 		return;
534 	}
535 
536 	/* First, zero out all the existing hash bits. */
537 	CSR_WRITE_4(sc, VR_MAR0, 0);
538 	CSR_WRITE_4(sc, VR_MAR1, 0);
539 
540 	/* Now program new ones. */
541 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
542 		int h;
543 
544 		if (ifma->ifma_addr->sa_family != AF_LINK)
545 			continue;
546 
547 		/* use the lower 6 bits */
548 		h = (ether_crc32_be(
549 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
550 			ETHER_ADDR_LEN) >> 26) & 0x0000003F;
551 		if (h < 32)
552 			hashes[0] |= (1 << h);
553 		else
554 			hashes[1] |= (1 << (h - 32));
555 		mcnt++;
556 	}
557 
558 	if (mcnt)
559 		rxfilt |= VR_RXCFG_RX_MULTI;
560 	else
561 		rxfilt &= ~VR_RXCFG_RX_MULTI;
562 
563 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
564 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
565 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
566 }
567 
568 /*
569  * In order to fiddle with the
570  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
571  * first have to put the transmit and/or receive logic in the idle state.
572  */
573 static void
574 vr_setcfg(struct vr_softc *sc, int media)
575 {
576 	int restart = 0;
577 
578 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
579 		restart = 1;
580 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
581 	}
582 
583 	if ((media & IFM_GMASK) == IFM_FDX)
584 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
585 	else
586 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
587 
588 	if (restart)
589 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
590 }
591 
592 static void
593 vr_reset(struct vr_softc *sc)
594 {
595 	int i;
596 
597 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
598 
599 	for (i = 0; i < VR_TIMEOUT; i++) {
600 		DELAY(10);
601 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
602 			break;
603 	}
604 	if (i == VR_TIMEOUT) {
605 		struct ifnet *ifp = &sc->arpcom.ac_if;
606 
607 		if (sc->vr_revid < REV_ID_VT3065_A) {
608 			if_printf(ifp, "reset never completed!\n");
609 		} else {
610 			/* Use newer force reset command */
611 			if_printf(ifp, "Using force reset command.\n");
612 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
613 		}
614 	}
615 
616 	/* Wait a little while for the chip to get its brains in order. */
617 	DELAY(1000);
618 }
619 
620 /*
621  * Probe for a VIA Rhine chip. Check the PCI vendor and device
622  * IDs against our list and return a device name if we find a match.
623  */
624 static int
625 vr_probe(device_t dev)
626 {
627 	struct vr_type *t;
628 	uint16_t vid, did;
629 
630 	vid = pci_get_vendor(dev);
631 	did = pci_get_device(dev);
632 
633 	for (t = vr_devs; t->vr_name != NULL; ++t) {
634 		if (vid == t->vr_vid && did == t->vr_did) {
635 			device_set_desc(dev, t->vr_name);
636 			return(0);
637 		}
638 	}
639 
640 	return(ENXIO);
641 }
642 
643 /*
644  * Attach the interface. Allocate softc structures, do ifmedia
645  * setup and ethernet/BPF attach.
646  */
647 static int
648 vr_attach(device_t dev)
649 {
650 	int i;
651 	uint8_t eaddr[ETHER_ADDR_LEN];
652 	struct vr_softc *sc;
653 	struct ifnet *ifp;
654 	int error = 0, rid;
655 
656 	sc = device_get_softc(dev);
657 	callout_init(&sc->vr_stat_timer);
658 
659 	/*
660 	 * Handle power management nonsense.
661 	 */
662 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
663 		uint32_t iobase, membase, irq;
664 
665 		/* Save important PCI config data. */
666 		iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
667 		membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
668 		irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
669 
670 		/* Reset the power state. */
671 		device_printf(dev, "chip is in D%d power mode "
672 		"-- setting to D0\n", pci_get_powerstate(dev));
673 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
674 
675 		/* Restore PCI config data. */
676 		pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
677 		pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
678 		pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
679 	}
680 
681 	pci_enable_busmaster(dev);
682 
683 	sc->vr_revid = pci_get_revid(dev);
684 
685 	rid = VR_RID;
686 	sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE);
687 
688 	if (sc->vr_res == NULL) {
689 		device_printf(dev, "couldn't map ports/memory\n");
690 		return ENXIO;
691 	}
692 
693 	sc->vr_btag = rman_get_bustag(sc->vr_res);
694 	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
695 
696 	/* Allocate interrupt */
697 	rid = 0;
698 	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
699 					    RF_SHAREABLE | RF_ACTIVE);
700 
701 	if (sc->vr_irq == NULL) {
702 		device_printf(dev, "couldn't map interrupt\n");
703 		error = ENXIO;
704 		goto fail;
705 	}
706 
707 	/*
708 	 * Windows may put the chip in suspend mode when it
709 	 * shuts down. Be sure to kick it in the head to wake it
710 	 * up again.
711 	 */
712 	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
713 
714 	ifp = &sc->arpcom.ac_if;
715 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
716 
717 	/* Reset the adapter. */
718 	vr_reset(sc);
719 
720         /*
721 	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
722 	 * initialization and disable AUTOPOLL.
723 	 */
724         pci_write_config(dev, VR_PCI_MODE,
725 	    pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4);
726 	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
727 
728 	/*
729 	 * Get station address. The way the Rhine chips work,
730 	 * you're not allowed to directly access the EEPROM once
731 	 * they've been programmed a special way. Consequently,
732 	 * we need to read the node address from the PAR0 and PAR1
733 	 * registers.
734 	 */
735 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
736 	DELAY(200);
737 	for (i = 0; i < ETHER_ADDR_LEN; i++)
738 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
739 
740 	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
741 	    M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
742 
743 	if (sc->vr_ldata == NULL) {
744 		device_printf(dev, "no memory for list buffers!\n");
745 		error = ENXIO;
746 		goto fail;
747 	}
748 
749 	/* Initialize TX buffer */
750 	sc->vr_cdata.vr_tx_buf = contigmalloc(VR_TX_BUF_SIZE, M_DEVBUF,
751 	    M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
752 	if (sc->vr_cdata.vr_tx_buf == NULL) {
753 		device_printf(dev, "can't allocate tx buffer!\n");
754 		error = ENXIO;
755 		goto fail;
756 	}
757 
758 	/* Set various TX indexes to invalid value */
759 	sc->vr_cdata.vr_tx_free_idx = -1;
760 	sc->vr_cdata.vr_tx_tail_idx = -1;
761 	sc->vr_cdata.vr_tx_head_idx = -1;
762 
763 
764 	ifp->if_softc = sc;
765 	ifp->if_mtu = ETHERMTU;
766 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
767 	ifp->if_ioctl = vr_ioctl;
768 	ifp->if_start = vr_start;
769 #ifdef DEVICE_POLLING
770 	ifp->if_poll = vr_poll;
771 #endif
772 	ifp->if_watchdog = vr_watchdog;
773 	ifp->if_init = vr_init;
774 	ifp->if_baudrate = 10000000;
775 	ifq_set_maxlen(&ifp->if_snd, VR_TX_LIST_CNT - 1);
776 	ifq_set_ready(&ifp->if_snd);
777 
778 	/*
779 	 * Do MII setup.
780 	 */
781 	if (mii_phy_probe(dev, &sc->vr_miibus,
782 	    vr_ifmedia_upd, vr_ifmedia_sts)) {
783 		if_printf(ifp, "MII without any phy!\n");
784 		error = ENXIO;
785 		goto fail;
786 	}
787 
788 	/* Call MI attach routine. */
789 	ether_ifattach(ifp, eaddr, NULL);
790 
791 	error = bus_setup_intr(dev, sc->vr_irq, INTR_NETSAFE,
792 			       vr_intr, sc, &sc->vr_intrhand,
793 			       ifp->if_serializer);
794 
795 	if (error) {
796 		device_printf(dev, "couldn't set up irq\n");
797 		ether_ifdetach(ifp);
798 		goto fail;
799 	}
800 	return 0;
801 
802 fail:
803 	vr_detach(dev);
804 	return(error);
805 }
806 
807 static int
808 vr_detach(device_t dev)
809 {
810 	struct vr_softc *sc = device_get_softc(dev);
811 	struct ifnet *ifp = &sc->arpcom.ac_if;
812 
813 	if (device_is_attached(dev)) {
814 		lwkt_serialize_enter(ifp->if_serializer);
815 		vr_stop(sc);
816 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
817 		lwkt_serialize_exit(ifp->if_serializer);
818 
819 		ether_ifdetach(ifp);
820 	}
821 	if (sc->vr_miibus != NULL)
822 		device_delete_child(dev, sc->vr_miibus);
823 	bus_generic_detach(dev);
824 
825 	if (sc->vr_irq != NULL)
826 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
827 	if (sc->vr_res != NULL)
828 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
829 	if (sc->vr_ldata != NULL)
830 		contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
831 	if (sc->vr_cdata.vr_tx_buf != NULL)
832 		contigfree(sc->vr_cdata.vr_tx_buf, VR_TX_BUF_SIZE, M_DEVBUF);
833 
834 	return(0);
835 }
836 
837 /*
838  * Initialize the transmit descriptors.
839  */
840 static int
841 vr_list_tx_init(struct vr_softc *sc)
842 {
843 	struct vr_chain_data *cd;
844 	struct vr_list_data *ld;
845 	struct vr_chain *tx_chain;
846 	int i;
847 
848 	cd = &sc->vr_cdata;
849 	ld = sc->vr_ldata;
850 	tx_chain = cd->vr_tx_chain;
851 
852 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
853 		tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
854 		if (i == (VR_TX_LIST_CNT - 1))
855 			tx_chain[i].vr_next_idx = 0;
856 		else
857 			tx_chain[i].vr_next_idx = i + 1;
858 	}
859 
860 	for (i = 0; i < VR_TX_LIST_CNT; ++i) {
861 		void *tx_buf;
862 		int next_idx;
863 
864 		tx_buf = VR_TX_BUF(sc, i);
865 		next_idx = tx_chain[i].vr_next_idx;
866 
867 		tx_chain[i].vr_next_desc_paddr =
868 			vtophys(tx_chain[next_idx].vr_ptr);
869 		tx_chain[i].vr_buf_paddr = vtophys(tx_buf);
870 	}
871 
872 	cd->vr_tx_free_idx = 0;
873 	cd->vr_tx_tail_idx = cd->vr_tx_head_idx = -1;
874 
875 	return 0;
876 }
877 
878 
879 /*
880  * Initialize the RX descriptors and allocate mbufs for them. Note that
881  * we arrange the descriptors in a closed ring, so that the last descriptor
882  * points back to the first.
883  */
884 static int
885 vr_list_rx_init(struct vr_softc *sc)
886 {
887 	struct vr_chain_data *cd;
888 	struct vr_list_data *ld;
889 	int i, nexti;
890 
891 	cd = &sc->vr_cdata;
892 	ld = sc->vr_ldata;
893 
894 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
895 		cd->vr_rx_chain[i].vr_ptr = (struct vr_desc *)&ld->vr_rx_list[i];
896 		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
897 			return(ENOBUFS);
898 		if (i == (VR_RX_LIST_CNT - 1))
899 			nexti = 0;
900 		else
901 			nexti = i + 1;
902 		cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti];
903 		ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[nexti]);
904 	}
905 
906 	cd->vr_rx_head = &cd->vr_rx_chain[0];
907 
908 	return(0);
909 }
910 
911 /*
912  * Initialize an RX descriptor and attach an MBUF cluster.
913  * Note: the length fields are only 11 bits wide, which means the
914  * largest size we can specify is 2047. This is important because
915  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
916  * overflow the field and make a mess.
917  */
918 static int
919 vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m)
920 {
921 	struct mbuf *m_new = NULL;
922 
923 	if (m == NULL) {
924 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
925 		if (m_new == NULL)
926 			return (ENOBUFS);
927 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
928 	} else {
929 		m_new = m;
930 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
931 		m_new->m_data = m_new->m_ext.ext_buf;
932 	}
933 
934 	m_adj(m_new, sizeof(uint64_t));
935 
936 	c->vr_mbuf = m_new;
937 	c->vr_ptr->vr_status = VR_RXSTAT;
938 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
939 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
940 
941 	return(0);
942 }
943 
944 /*
945  * A frame has been uploaded: pass the resulting mbuf chain up to
946  * the higher level protocols.
947  */
948 static void
949 vr_rxeof(struct vr_softc *sc)
950 {
951         struct mbuf *m;
952         struct ifnet *ifp;
953 	struct vr_chain_onefrag *cur_rx;
954 	int total_len = 0;
955 	uint32_t rxstat;
956 
957 	ifp = &sc->arpcom.ac_if;
958 
959 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
960 							VR_RXSTAT_OWN)) {
961 		struct mbuf *m0 = NULL;
962 
963 		cur_rx = sc->vr_cdata.vr_rx_head;
964 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
965 		m = cur_rx->vr_mbuf;
966 
967 		/*
968 		 * If an error occurs, update stats, clear the
969 		 * status word and leave the mbuf cluster in place:
970 		 * it should simply get re-used next time this descriptor
971 	 	 * comes up in the ring.
972 		 */
973 		if (rxstat & VR_RXSTAT_RXERR) {
974 			ifp->if_ierrors++;
975 			if_printf(ifp, "rx error (%02x):", rxstat & 0x000000ff);
976 			if (rxstat & VR_RXSTAT_CRCERR)
977 				kprintf(" crc error");
978 			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
979 				kprintf(" frame alignment error\n");
980 			if (rxstat & VR_RXSTAT_FIFOOFLOW)
981 				kprintf(" FIFO overflow");
982 			if (rxstat & VR_RXSTAT_GIANT)
983 				kprintf(" received giant packet");
984 			if (rxstat & VR_RXSTAT_RUNT)
985 				kprintf(" received runt packet");
986 			if (rxstat & VR_RXSTAT_BUSERR)
987 				kprintf(" system bus error");
988 			if (rxstat & VR_RXSTAT_BUFFERR)
989 				kprintf("rx buffer error");
990 			kprintf("\n");
991 			vr_newbuf(sc, cur_rx, m);
992 			continue;
993 		}
994 
995 		/* No errors; receive the packet. */
996 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
997 
998 		/*
999 		 * XXX The VIA Rhine chip includes the CRC with every
1000 		 * received frame, and there's no way to turn this
1001 		 * behavior off (at least, I can't find anything in
1002 	 	 * the manual that explains how to do it) so we have
1003 		 * to trim off the CRC manually.
1004 		 */
1005 		total_len -= ETHER_CRC_LEN;
1006 
1007 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1008 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1009 		vr_newbuf(sc, cur_rx, m);
1010 		if (m0 == NULL) {
1011 			ifp->if_ierrors++;
1012 			continue;
1013 		}
1014 		m_adj(m0, ETHER_ALIGN);
1015 		m = m0;
1016 
1017 		ifp->if_ipackets++;
1018 		ifp->if_input(ifp, m);
1019 	}
1020 }
1021 
1022 static void
1023 vr_rxeoc(struct vr_softc *sc)
1024 {
1025 	struct ifnet *ifp;
1026 	int i;
1027 
1028 	ifp = &sc->arpcom.ac_if;
1029 
1030 	ifp->if_ierrors++;
1031 
1032 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1033         DELAY(10000);
1034 
1035 	/* Wait for receiver to stop */
1036 	for (i = 0x400;
1037 	     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1038 	     i--)
1039 		;	/* Wait for receiver to stop */
1040 
1041 	if (i == 0) {
1042 		if_printf(ifp, "rx shutdown error!\n");
1043 		sc->vr_flags |= VR_F_RESTART;
1044 		return;
1045 	}
1046 
1047 	vr_rxeof(sc);
1048 
1049 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1050 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1051 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1052 }
1053 
1054 /*
1055  * A frame was downloaded to the chip. It's safe for us to clean up
1056  * the list buffers.
1057  */
1058 static void
1059 vr_txeof(struct vr_softc *sc)
1060 {
1061 	struct vr_chain_data *cd;
1062 	struct vr_chain *tx_chain;
1063 	struct ifnet *ifp;
1064 
1065 	ifp = &sc->arpcom.ac_if;
1066 	cd = &sc->vr_cdata;
1067 
1068 	/* Reset the timeout timer; if_txeoc will clear it. */
1069 	ifp->if_timer = 5;
1070 
1071 	/* Sanity check. */
1072 	if (cd->vr_tx_head_idx == -1)
1073 		return;
1074 
1075 	tx_chain = cd->vr_tx_chain;
1076 
1077 	/*
1078 	 * Go through our tx list and free mbufs for those
1079 	 * frames that have been transmitted.
1080 	 */
1081 	while(tx_chain[cd->vr_tx_head_idx].vr_buf != NULL) {
1082 		struct vr_chain *cur_tx;
1083 		uint32_t txstat;
1084 		int i;
1085 
1086 		cur_tx = &tx_chain[cd->vr_tx_head_idx];
1087 		txstat = cur_tx->vr_ptr->vr_status;
1088 
1089 		if ((txstat & VR_TXSTAT_ABRT) ||
1090 		    (txstat & VR_TXSTAT_UDF)) {
1091 			for (i = 0x400;
1092 			     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1093 			     i--)
1094 				;	/* Wait for chip to shutdown */
1095 			if (i == 0) {
1096 				if_printf(ifp, "tx shutdown timeout\n");
1097 				sc->vr_flags |= VR_F_RESTART;
1098 				break;
1099 			}
1100 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1101 			CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1102 			break;
1103 		}
1104 
1105 		if (txstat & VR_TXSTAT_OWN)
1106 			break;
1107 
1108 		if (txstat & VR_TXSTAT_ERRSUM) {
1109 			ifp->if_oerrors++;
1110 			if (txstat & VR_TXSTAT_DEFER)
1111 				ifp->if_collisions++;
1112 			if (txstat & VR_TXSTAT_LATECOLL)
1113 				ifp->if_collisions++;
1114 		}
1115 
1116 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
1117 
1118 		ifp->if_opackets++;
1119 		cur_tx->vr_buf = NULL;
1120 
1121 		if (cd->vr_tx_head_idx == cd->vr_tx_tail_idx) {
1122 			cd->vr_tx_head_idx = -1;
1123 			cd->vr_tx_tail_idx = -1;
1124 			break;
1125 		}
1126 
1127 		cd->vr_tx_head_idx = cur_tx->vr_next_idx;
1128 	}
1129 }
1130 
1131 /*
1132  * TX 'end of channel' interrupt handler.
1133  */
1134 static void
1135 vr_txeoc(struct vr_softc *sc)
1136 {
1137 	struct ifnet *ifp;
1138 
1139 	ifp = &sc->arpcom.ac_if;
1140 
1141 	if (sc->vr_cdata.vr_tx_head_idx == -1) {
1142 		ifp->if_flags &= ~IFF_OACTIVE;
1143 		sc->vr_cdata.vr_tx_tail_idx = -1;
1144 		ifp->if_timer = 0;
1145 	}
1146 }
1147 
1148 static void
1149 vr_tick(void *xsc)
1150 {
1151 	struct vr_softc *sc = xsc;
1152 	struct ifnet *ifp = &sc->arpcom.ac_if;
1153 	struct mii_data *mii;
1154 
1155 	lwkt_serialize_enter(ifp->if_serializer);
1156 
1157 	if (sc->vr_flags & VR_F_RESTART) {
1158 		if_printf(&sc->arpcom.ac_if, "restarting\n");
1159 		vr_stop(sc);
1160 		vr_reset(sc);
1161 		vr_init(sc);
1162 		sc->vr_flags &= ~VR_F_RESTART;
1163 	}
1164 
1165 	mii = device_get_softc(sc->vr_miibus);
1166 	mii_tick(mii);
1167 
1168 	callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc);
1169 
1170 	lwkt_serialize_exit(ifp->if_serializer);
1171 }
1172 
1173 static void
1174 vr_intr(void *arg)
1175 {
1176 	struct vr_softc *sc;
1177 	struct ifnet *ifp;
1178 	uint16_t status;
1179 
1180 	sc = arg;
1181 	ifp = &sc->arpcom.ac_if;
1182 
1183 	/* Supress unwanted interrupts. */
1184 	if (!(ifp->if_flags & IFF_UP)) {
1185 		vr_stop(sc);
1186 		return;
1187 	}
1188 
1189 	/* Disable interrupts. */
1190 	if ((ifp->if_flags & IFF_POLLING) == 0)
1191 		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1192 
1193 	for (;;) {
1194 		status = CSR_READ_2(sc, VR_ISR);
1195 		if (status)
1196 			CSR_WRITE_2(sc, VR_ISR, status);
1197 
1198 		if ((status & VR_INTRS) == 0)
1199 			break;
1200 
1201 		if (status & VR_ISR_RX_OK)
1202 			vr_rxeof(sc);
1203 
1204 		if (status & VR_ISR_RX_DROPPED) {
1205 			if_printf(ifp, "rx packet lost\n");
1206 			ifp->if_ierrors++;
1207 			}
1208 
1209 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1210 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1211 			if_printf(ifp, "receive error (%04x)", status);
1212 			if (status & VR_ISR_RX_NOBUF)
1213 				kprintf(" no buffers");
1214 			if (status & VR_ISR_RX_OFLOW)
1215 				kprintf(" overflow");
1216 			if (status & VR_ISR_RX_DROPPED)
1217 				kprintf(" packet lost");
1218 			kprintf("\n");
1219 			vr_rxeoc(sc);
1220 		}
1221 
1222 		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1223 			vr_reset(sc);
1224 			vr_init(sc);
1225 			break;
1226 		}
1227 
1228 		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1229 		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1230 			vr_txeof(sc);
1231 			if ((status & VR_ISR_UDFI) ||
1232 			    (status & VR_ISR_TX_ABRT2) ||
1233 			    (status & VR_ISR_TX_ABRT)) {
1234 				ifp->if_oerrors++;
1235 				if (sc->vr_cdata.vr_tx_head_idx != -1) {
1236 					VR_SETBIT16(sc, VR_COMMAND,
1237 						    VR_CMD_TX_ON);
1238 					VR_SETBIT16(sc, VR_COMMAND,
1239 						    VR_CMD_TX_GO);
1240 				}
1241 			} else {
1242 				vr_txeoc(sc);
1243 			}
1244 		}
1245 
1246 	}
1247 
1248 	/* Re-enable interrupts. */
1249 	if ((ifp->if_flags & IFF_POLLING) == 0)
1250 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1251 
1252 	if (!ifq_is_empty(&ifp->if_snd))
1253 		vr_start(ifp);
1254 }
1255 
1256 /*
1257  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1258  * pointers to the fragment pointers.
1259  */
1260 static int
1261 vr_encap(struct vr_softc *sc, int chain_idx, struct mbuf *m_head)
1262 {
1263 	struct vr_chain *c;
1264 	struct vr_desc *f;
1265 	caddr_t tx_buf;
1266 	int len;
1267 
1268 	KASSERT(chain_idx >= 0 && chain_idx < VR_TX_LIST_CNT,
1269 		("%s: chain idx(%d) out of range 0-%d",
1270 		 sc->arpcom.ac_if.if_xname, chain_idx, VR_TX_LIST_CNT));
1271 
1272 	/*
1273 	 * The VIA Rhine wants packet buffers to be longword
1274 	 * aligned, but very often our mbufs aren't. Rather than
1275 	 * waste time trying to decide when to copy and when not
1276 	 * to copy, just do it all the time.
1277 	 */
1278 	tx_buf = VR_TX_BUF(sc, chain_idx);
1279 	m_copydata(m_head, 0, m_head->m_pkthdr.len, tx_buf);
1280 	len = m_head->m_pkthdr.len;
1281 
1282 	/*
1283 	 * The Rhine chip doesn't auto-pad, so we have to make
1284 	 * sure to pad short frames out to the minimum frame length
1285 	 * ourselves.
1286 	 */
1287 	if (len < VR_MIN_FRAMELEN) {
1288 		bzero(tx_buf + len, VR_MIN_FRAMELEN - len);
1289 		len = VR_MIN_FRAMELEN;
1290  	}
1291 
1292 	c = &sc->vr_cdata.vr_tx_chain[chain_idx];
1293 	c->vr_buf = tx_buf;
1294 
1295 	f = c->vr_ptr;
1296 	f->vr_data = c->vr_buf_paddr;
1297 	f->vr_ctl = len;
1298 	f->vr_ctl |= (VR_TXCTL_TLINK | VR_TXCTL_FIRSTFRAG);
1299 	f->vr_ctl |= (VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1300 	f->vr_status = 0;
1301 	f->vr_next = c->vr_next_desc_paddr;
1302 
1303 	return(0);
1304 }
1305 
1306 /*
1307  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1308  * to the mbuf data regions directly in the transmit lists. We also save a
1309  * copy of the pointers since the transmit list fragment pointers are
1310  * physical addresses.
1311  */
1312 static void
1313 vr_start(struct ifnet *ifp)
1314 {
1315 	struct vr_softc *sc;
1316 	struct vr_chain_data *cd;
1317 	struct vr_chain *tx_chain;
1318 	int cur_tx_idx, start_tx_idx, prev_tx_idx;
1319 
1320 	if (ifp->if_flags & IFF_OACTIVE)
1321 		return;
1322 
1323 	sc = ifp->if_softc;
1324 	cd = &sc->vr_cdata;
1325 	tx_chain = cd->vr_tx_chain;
1326 
1327 	start_tx_idx = cd->vr_tx_free_idx;
1328 	cur_tx_idx = prev_tx_idx = -1;
1329 
1330 	/* Check for an available queue slot. If there are none, punt. */
1331 	if (tx_chain[start_tx_idx].vr_buf != NULL) {
1332 		ifp->if_flags |= IFF_OACTIVE;
1333 		return;
1334 	}
1335 
1336 	while(tx_chain[cd->vr_tx_free_idx].vr_buf == NULL) {
1337 		struct mbuf *m_head;
1338 		struct vr_chain *cur_tx;
1339 
1340 		m_head = ifq_poll(&ifp->if_snd);
1341 		if (m_head == NULL)
1342 			break;
1343 
1344 		/* Pick a descriptor off the free list. */
1345 		cur_tx_idx = cd->vr_tx_free_idx;
1346 		cur_tx = &tx_chain[cur_tx_idx];
1347 
1348 		/* Pack the data into the descriptor. */
1349 		if (vr_encap(sc, cur_tx_idx, m_head)) {
1350 			ifp->if_flags |= IFF_OACTIVE;
1351 			cur_tx_idx = prev_tx_idx;
1352 			break;
1353 		}
1354 
1355 		ifq_dequeue(&ifp->if_snd, m_head);
1356 
1357 		/* XXX */
1358 		if (cur_tx_idx != start_tx_idx)
1359 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1360 
1361 		BPF_MTAP(ifp, m_head);
1362 		m_freem(m_head);
1363 
1364 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1365 		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1366 
1367 		/* Iff everything went OK, we bump up free index. */
1368 		prev_tx_idx = cur_tx_idx;
1369 		cd->vr_tx_free_idx = cur_tx->vr_next_idx;
1370 	}
1371 
1372 	/* If there are no frames queued, bail. */
1373 	if (cur_tx_idx == -1)
1374 		return;
1375 
1376 	sc->vr_cdata.vr_tx_tail_idx = cur_tx_idx;
1377 
1378 	if (sc->vr_cdata.vr_tx_head_idx == -1)
1379 		sc->vr_cdata.vr_tx_head_idx = start_tx_idx;
1380 
1381 	/*
1382 	 * Set a timeout in case the chip goes out to lunch.
1383 	 */
1384 	ifp->if_timer = 5;
1385 }
1386 
1387 static void
1388 vr_init(void *xsc)
1389 {
1390 	struct vr_softc *sc = xsc;
1391 	struct ifnet *ifp = &sc->arpcom.ac_if;
1392 	struct mii_data *mii;
1393 	int i;
1394 
1395 	mii = device_get_softc(sc->vr_miibus);
1396 
1397 	/* Cancel pending I/O and free all RX/TX buffers. */
1398 	vr_stop(sc);
1399 	vr_reset(sc);
1400 
1401 	/* Set our station address. */
1402 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1403 		CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1404 
1405 	/* Set DMA size. */
1406 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1407 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1408 
1409 	/*
1410 	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1411 	 * so we must set both.
1412 	 */
1413 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1414 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1415 
1416 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1417 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1418 
1419 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1420 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1421 
1422 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1423 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1424 
1425 	/* Init circular RX list. */
1426 	if (vr_list_rx_init(sc) == ENOBUFS) {
1427 		vr_stop(sc);
1428 		if_printf(ifp, "initialization failed: no memory for rx buffers\n");
1429 		return;
1430 	}
1431 
1432 	/* Init tx descriptors. */
1433 	vr_list_tx_init(sc);
1434 
1435 	/* If we want promiscuous mode, set the allframes bit. */
1436 	if (ifp->if_flags & IFF_PROMISC)
1437 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1438 	else
1439 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1440 
1441 	/* Set capture broadcast bit to capture broadcast frames. */
1442 	if (ifp->if_flags & IFF_BROADCAST)
1443 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1444 	else
1445 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1446 
1447 	/*
1448 	 * Program the multicast filter, if necessary.
1449 	 */
1450 	vr_setmulti(sc);
1451 
1452 	/*
1453 	 * Load the address of the RX list.
1454 	 */
1455 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1456 
1457 	/* Enable receiver and transmitter. */
1458 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1459 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1460 				    VR_CMD_RX_GO);
1461 
1462 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1463 
1464 	/*
1465 	 * Enable interrupts, unless we are polling.
1466 	 */
1467 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1468 	if ((ifp->if_flags & IFF_POLLING) == 0)
1469 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1470 
1471 	mii_mediachg(mii);
1472 
1473 	ifp->if_flags |= IFF_RUNNING;
1474 	ifp->if_flags &= ~IFF_OACTIVE;
1475 
1476 	callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc);
1477 }
1478 
1479 /*
1480  * Set media options.
1481  */
1482 static int
1483 vr_ifmedia_upd(struct ifnet *ifp)
1484 {
1485 	struct vr_softc *sc;
1486 
1487 	sc = ifp->if_softc;
1488 
1489 	if (ifp->if_flags & IFF_UP)
1490 		vr_init(sc);
1491 
1492 	return(0);
1493 }
1494 
1495 /*
1496  * Report current media status.
1497  */
1498 static void
1499 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1500 {
1501 	struct vr_softc *sc;
1502 	struct mii_data *mii;
1503 
1504 	sc = ifp->if_softc;
1505 	mii = device_get_softc(sc->vr_miibus);
1506 	mii_pollstat(mii);
1507 	ifmr->ifm_active = mii->mii_media_active;
1508 	ifmr->ifm_status = mii->mii_media_status;
1509 }
1510 
1511 static int
1512 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1513 {
1514 	struct vr_softc *sc = ifp->if_softc;
1515 	struct ifreq *ifr = (struct ifreq *) data;
1516 	struct mii_data *mii;
1517 	int error = 0;
1518 
1519 	switch(command) {
1520 	case SIOCSIFFLAGS:
1521 		if (ifp->if_flags & IFF_UP) {
1522 			vr_init(sc);
1523 		} else {
1524 			if (ifp->if_flags & IFF_RUNNING)
1525 				vr_stop(sc);
1526 		}
1527 		error = 0;
1528 		break;
1529 	case SIOCADDMULTI:
1530 	case SIOCDELMULTI:
1531 		vr_setmulti(sc);
1532 		error = 0;
1533 		break;
1534 	case SIOCGIFMEDIA:
1535 	case SIOCSIFMEDIA:
1536 		mii = device_get_softc(sc->vr_miibus);
1537 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1538 		break;
1539 	default:
1540 		error = ether_ioctl(ifp, command, data);
1541 		break;
1542 	}
1543 	return(error);
1544 }
1545 
1546 #ifdef DEVICE_POLLING
1547 
1548 static void
1549 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1550 {
1551 	struct vr_softc *sc = ifp->if_softc;
1552 
1553 	switch(cmd) {
1554 	case POLL_REGISTER:
1555 		/* disable interrupts */
1556 		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1557 		break;
1558 	case POLL_DEREGISTER:
1559 		/* enable interrupts */
1560 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1561 		break;
1562 	default:
1563 		vr_intr(sc);
1564 		break;
1565 	}
1566 }
1567 #endif
1568 
1569 static void
1570 vr_watchdog(struct ifnet *ifp)
1571 {
1572 	struct vr_softc *sc;
1573 
1574 	sc = ifp->if_softc;
1575 
1576 	ifp->if_oerrors++;
1577 	if_printf(ifp, "watchdog timeout\n");
1578 
1579 #ifdef DEVICE_POLLING
1580 	if (++sc->vr_wdogerrors == 1 && (ifp->if_flags & IFF_POLLING) == 0) {
1581 		if_printf(ifp, "ints don't seem to be working, "
1582 			"emergency switch to polling\n");
1583 		emergency_poll_enable("if_vr");
1584 		ether_poll_register(ifp);	/* XXX illegal */
1585 	} else
1586 #endif
1587 	{
1588 		vr_stop(sc);
1589 		vr_reset(sc);
1590 		vr_init(sc);
1591 	}
1592 
1593 	if (!ifq_is_empty(&ifp->if_snd))
1594 		vr_start(ifp);
1595 }
1596 
1597 /*
1598  * Stop the adapter and free any mbufs allocated to the
1599  * RX and TX lists.
1600  */
1601 static void
1602 vr_stop(struct vr_softc *sc)
1603 {
1604 	int i;
1605 	struct ifnet *ifp;
1606 
1607 	ifp = &sc->arpcom.ac_if;
1608 	ifp->if_timer = 0;
1609 
1610 	callout_stop(&sc->vr_stat_timer);
1611 
1612 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1613 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1614 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1615 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1616 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1617 
1618 	/*
1619 	 * Free data in the RX lists.
1620 	 */
1621 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1622 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1623 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1624 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1625 		}
1626 	}
1627 	bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list));
1628 
1629 	/*
1630 	 * Reset the TX list buffer pointers.
1631 	 */
1632 	for (i = 0; i < VR_TX_LIST_CNT; i++)
1633 		sc->vr_cdata.vr_tx_chain[i].vr_buf = NULL;
1634 
1635 	bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list));
1636 
1637 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1638 }
1639 
1640 /*
1641  * Stop all chip I/O so that the kernel's probe routines don't
1642  * get confused by errant DMAs when rebooting.
1643  */
1644 static void
1645 vr_shutdown(device_t dev)
1646 {
1647 	struct vr_softc *sc;
1648 
1649 	sc = device_get_softc(dev);
1650 
1651 	vr_stop(sc);
1652 }
1653