xref: /dragonfly/sys/dev/netif/vr/if_vr.c (revision 19fe1c42)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_vr.c,v 1.26.2.13 2003/02/06 04:46:20 silby Exp $
33  * $DragonFly: src/sys/dev/netif/vr/if_vr.c,v 1.48 2008/08/17 04:32:35 sephe Exp $
34  */
35 
36 /*
37  * VIA Rhine fast ethernet PCI NIC driver
38  *
39  * Supports various network adapters based on the VIA Rhine
40  * and Rhine II PCI controllers, including the D-Link DFE530TX.
41  * Datasheets are available at http://www.via.com.tw.
42  *
43  * Written by Bill Paul <wpaul@ctr.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The VIA Rhine controllers are similar in some respects to the
50  * the DEC tulip chips, except less complicated. The controller
51  * uses an MII bus and an external physical layer interface. The
52  * receiver has a one entry perfect filter and a 64-bit hash table
53  * multicast filter. Transmit and receive descriptors are similar
54  * to the tulip.
55  *
56  * The Rhine has a serious flaw in its transmit DMA mechanism:
57  * transmit buffers must be longword aligned. Unfortunately,
58  * FreeBSD doesn't guarantee that mbufs will be filled in starting
59  * at longword boundaries, so we have to do a buffer copy before
60  * transmission.
61  */
62 
63 #include "opt_polling.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/sockio.h>
68 #include <sys/mbuf.h>
69 #include <sys/malloc.h>
70 #include <sys/kernel.h>
71 #include <sys/socket.h>
72 #include <sys/serialize.h>
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 #include <sys/thread2.h>
76 #include <sys/interrupt.h>
77 
78 #include <net/if.h>
79 #include <net/ifq_var.h>
80 #include <net/if_arp.h>
81 #include <net/ethernet.h>
82 #include <net/if_dl.h>
83 #include <net/if_media.h>
84 
85 #include <net/bpf.h>
86 
87 #include <vm/vm.h>              /* for vtophys */
88 #include <vm/pmap.h>            /* for vtophys */
89 
90 #include <dev/netif/mii_layer/mii.h>
91 #include <dev/netif/mii_layer/miivar.h>
92 
93 #include <bus/pci/pcidevs.h>
94 #include <bus/pci/pcireg.h>
95 #include <bus/pci/pcivar.h>
96 
97 #define VR_USEIOSPACE
98 
99 #include <dev/netif/vr/if_vrreg.h>
100 
101 /* "controller miibus0" required.  See GENERIC if you get errors here. */
102 #include "miibus_if.h"
103 
104 #undef VR_USESWSHIFT
105 
106 /*
107  * Various supported device vendors/types and their names.
108  */
109 static struct vr_type vr_devs[] = {
110 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
111 		"VIA VT3043 Rhine I 10/100BaseTX" },
112 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
113 		"VIA VT86C100A Rhine II 10/100BaseTX" },
114 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
115 		"VIA VT6102 Rhine II 10/100BaseTX" },
116 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105,
117 		"VIA VT6105 Rhine III 10/100BaseTX" },
118 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M,
119 		"VIA VT6105M Rhine III 10/100BaseTX" },
120 	{ PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII,
121 		"Delta Electronics Rhine II 10/100BaseTX" },
122 	{ PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII,
123 		"Addtron Technology Rhine II 10/100BaseTX" },
124 	{ 0, 0, NULL }
125 };
126 
127 static int	vr_probe(device_t);
128 static int	vr_attach(device_t);
129 static int	vr_detach(device_t);
130 
131 static int	vr_newbuf(struct vr_softc *, struct vr_chain_onefrag *,
132 			  struct mbuf *);
133 static int	vr_encap(struct vr_softc *, int, struct mbuf * );
134 
135 static void	vr_rxeof(struct vr_softc *);
136 static void	vr_rxeoc(struct vr_softc *);
137 static void	vr_txeof(struct vr_softc *);
138 static void	vr_txeoc(struct vr_softc *);
139 static void	vr_tick(void *);
140 static void	vr_intr(void *);
141 static void	vr_start(struct ifnet *);
142 static int	vr_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
143 static void	vr_init(void *);
144 static void	vr_stop(struct vr_softc *);
145 static void	vr_watchdog(struct ifnet *);
146 static void	vr_shutdown(device_t);
147 static int	vr_ifmedia_upd(struct ifnet *);
148 static void	vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149 
150 #ifdef VR_USESWSHIFT
151 static void	vr_mii_sync(struct vr_softc *);
152 static void	vr_mii_send(struct vr_softc *, uint32_t, int);
153 #endif
154 static int	vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
155 static int	vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
156 static int	vr_miibus_readreg(device_t, int, int);
157 static int	vr_miibus_writereg(device_t, int, int, int);
158 static void	vr_miibus_statchg(device_t);
159 
160 static void	vr_setcfg(struct vr_softc *, int);
161 static void	vr_setmulti(struct vr_softc *);
162 static void	vr_reset(struct vr_softc *);
163 static int	vr_list_rx_init(struct vr_softc *);
164 static int	vr_list_tx_init(struct vr_softc *);
165 #ifdef DEVICE_POLLING
166 static void	vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
167 #endif
168 
169 #ifdef VR_USEIOSPACE
170 #define VR_RES			SYS_RES_IOPORT
171 #define VR_RID			VR_PCI_LOIO
172 #else
173 #define VR_RES			SYS_RES_MEMORY
174 #define VR_RID			VR_PCI_LOMEM
175 #endif
176 
177 static device_method_t vr_methods[] = {
178 	/* Device interface */
179 	DEVMETHOD(device_probe,		vr_probe),
180 	DEVMETHOD(device_attach,	vr_attach),
181 	DEVMETHOD(device_detach, 	vr_detach),
182 	DEVMETHOD(device_shutdown,	vr_shutdown),
183 
184 	/* bus interface */
185 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
186 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
187 
188 	/* MII interface */
189 	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
190 	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
191 	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
192 
193 	{ 0, 0 }
194 };
195 
196 static driver_t vr_driver = {
197 	"vr",
198 	vr_methods,
199 	sizeof(struct vr_softc)
200 };
201 
202 static devclass_t vr_devclass;
203 
204 DECLARE_DUMMY_MODULE(if_vr);
205 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0);
206 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
207 
208 #define VR_SETBIT(sc, reg, x)				\
209 	CSR_WRITE_1(sc, reg,				\
210 		CSR_READ_1(sc, reg) | (x))
211 
212 #define VR_CLRBIT(sc, reg, x)				\
213 	CSR_WRITE_1(sc, reg,				\
214 		CSR_READ_1(sc, reg) & ~(x))
215 
216 #define VR_SETBIT16(sc, reg, x)				\
217 	CSR_WRITE_2(sc, reg,				\
218 		CSR_READ_2(sc, reg) | (x))
219 
220 #define VR_CLRBIT16(sc, reg, x)				\
221 	CSR_WRITE_2(sc, reg,				\
222 		CSR_READ_2(sc, reg) & ~(x))
223 
224 #define VR_SETBIT32(sc, reg, x)				\
225 	CSR_WRITE_4(sc, reg,				\
226 		CSR_READ_4(sc, reg) | (x))
227 
228 #define VR_CLRBIT32(sc, reg, x)				\
229 	CSR_WRITE_4(sc, reg,				\
230 		CSR_READ_4(sc, reg) & ~(x))
231 
232 #define SIO_SET(x)					\
233 	CSR_WRITE_1(sc, VR_MIICMD,			\
234 		CSR_READ_1(sc, VR_MIICMD) | (x))
235 
236 #define SIO_CLR(x)					\
237 	CSR_WRITE_1(sc, VR_MIICMD,			\
238 		CSR_READ_1(sc, VR_MIICMD) & ~(x))
239 
240 #ifdef VR_USESWSHIFT
241 /*
242  * Sync the PHYs by setting data bit and strobing the clock 32 times.
243  */
244 static void
245 vr_mii_sync(struct vr_softc *sc)
246 {
247 	int i;
248 
249 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
250 
251 	for (i = 0; i < 32; i++) {
252 		SIO_SET(VR_MIICMD_CLK);
253 		DELAY(1);
254 		SIO_CLR(VR_MIICMD_CLK);
255 		DELAY(1);
256 	}
257 }
258 
259 /*
260  * Clock a series of bits through the MII.
261  */
262 static void
263 vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt)
264 {
265 	int i;
266 
267 	SIO_CLR(VR_MIICMD_CLK);
268 
269 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
270                 if (bits & i)
271 			SIO_SET(VR_MIICMD_DATAIN);
272                 else
273 			SIO_CLR(VR_MIICMD_DATAIN);
274 		DELAY(1);
275 		SIO_CLR(VR_MIICMD_CLK);
276 		DELAY(1);
277 		SIO_SET(VR_MIICMD_CLK);
278 	}
279 }
280 #endif
281 
282 /*
283  * Read an PHY register through the MII.
284  */
285 static int
286 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
287 #ifdef VR_USESWSHIFT
288 {
289 	int i, ack;
290 
291 	/* Set up frame for RX. */
292 	frame->mii_stdelim = VR_MII_STARTDELIM;
293 	frame->mii_opcode = VR_MII_READOP;
294 	frame->mii_turnaround = 0;
295 	frame->mii_data = 0;
296 
297 	CSR_WRITE_1(sc, VR_MIICMD, 0);
298 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
299 
300 	/* Turn on data xmit. */
301 	SIO_SET(VR_MIICMD_DIR);
302 
303 	vr_mii_sync(sc);
304 
305 	/* Send command/address info. */
306 	vr_mii_send(sc, frame->mii_stdelim, 2);
307 	vr_mii_send(sc, frame->mii_opcode, 2);
308 	vr_mii_send(sc, frame->mii_phyaddr, 5);
309 	vr_mii_send(sc, frame->mii_regaddr, 5);
310 
311 	/* Idle bit. */
312 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
313 	DELAY(1);
314 	SIO_SET(VR_MIICMD_CLK);
315 	DELAY(1);
316 
317 	/* Turn off xmit. */
318 	SIO_CLR(VR_MIICMD_DIR);
319 
320 	/* Check for ack */
321 	SIO_CLR(VR_MIICMD_CLK);
322 	DELAY(1);
323 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
324 	SIO_SET(VR_MIICMD_CLK);
325 	DELAY(1);
326 
327 	/*
328 	 * Now try reading data bits. If the ack failed, we still
329 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
330 	 */
331 	if (ack) {
332 		for(i = 0; i < 16; i++) {
333 			SIO_CLR(VR_MIICMD_CLK);
334 			DELAY(1);
335 			SIO_SET(VR_MIICMD_CLK);
336 			DELAY(1);
337 		}
338 		goto fail;
339 	}
340 
341 	for (i = 0x8000; i; i >>= 1) {
342 		SIO_CLR(VR_MIICMD_CLK);
343 		DELAY(1);
344 		if (!ack) {
345 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
346 				frame->mii_data |= i;
347 			DELAY(1);
348 		}
349 		SIO_SET(VR_MIICMD_CLK);
350 		DELAY(1);
351 	}
352 
353 fail:
354 	SIO_CLR(VR_MIICMD_CLK);
355 	DELAY(1);
356 	SIO_SET(VR_MIICMD_CLK);
357 	DELAY(1);
358 
359 	if (ack)
360 		return(1);
361 	return(0);
362 }
363 #else
364 {
365 	int i;
366 
367   	/* Set the PHY address. */
368 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
369 	    frame->mii_phyaddr);
370 
371 	/* Set the register address. */
372 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
373 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
374 
375 	for (i = 0; i < 10000; i++) {
376 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
377 			break;
378 		DELAY(1);
379 	}
380 	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
381 
382 	return(0);
383 }
384 #endif
385 
386 
387 /*
388  * Write to a PHY register through the MII.
389  */
390 static int
391 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
392 #ifdef VR_USESWSHIFT
393 {
394 	CSR_WRITE_1(sc, VR_MIICMD, 0);
395 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
396 
397 	/* Set up frame for TX. */
398 	frame->mii_stdelim = VR_MII_STARTDELIM;
399 	frame->mii_opcode = VR_MII_WRITEOP;
400 	frame->mii_turnaround = VR_MII_TURNAROUND;
401 
402 	/* Turn on data output. */
403 	SIO_SET(VR_MIICMD_DIR);
404 
405 	vr_mii_sync(sc);
406 
407 	vr_mii_send(sc, frame->mii_stdelim, 2);
408 	vr_mii_send(sc, frame->mii_opcode, 2);
409 	vr_mii_send(sc, frame->mii_phyaddr, 5);
410 	vr_mii_send(sc, frame->mii_regaddr, 5);
411 	vr_mii_send(sc, frame->mii_turnaround, 2);
412 	vr_mii_send(sc, frame->mii_data, 16);
413 
414 	/* Idle bit. */
415 	SIO_SET(VR_MIICMD_CLK);
416 	DELAY(1);
417 	SIO_CLR(VR_MIICMD_CLK);
418 	DELAY(1);
419 
420 	/* Turn off xmit. */
421 	SIO_CLR(VR_MIICMD_DIR);
422 
423 	return(0);
424 }
425 #else
426 {
427 	int i;
428 
429   	/* Set the PHY-adress */
430 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
431 		    frame->mii_phyaddr);
432 
433 	/* Set the register address and data to write. */
434 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
435 	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
436 
437 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
438 
439 	for (i = 0; i < 10000; i++) {
440 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
441 			break;
442 		DELAY(1);
443 	}
444 	return(0);
445 }
446 #endif
447 
448 static int
449 vr_miibus_readreg(device_t dev, int phy, int reg)
450 {
451 	struct vr_mii_frame frame;
452 	struct vr_softc *sc;
453 
454 	sc = device_get_softc(dev);
455 
456 	switch (sc->vr_revid) {
457 	case REV_ID_VT6102_APOLLO:
458 		if (phy != 1)
459 			return(0);
460 		break;
461 	default:
462 		break;
463 	}
464 
465 	bzero(&frame, sizeof(frame));
466 
467 	frame.mii_phyaddr = phy;
468 	frame.mii_regaddr = reg;
469 	vr_mii_readreg(sc, &frame);
470 
471 	return(frame.mii_data);
472 }
473 
474 static int
475 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
476 {
477 	struct vr_mii_frame frame;
478 	struct vr_softc *sc;
479 
480 	sc = device_get_softc(dev);
481 
482 	switch (sc->vr_revid) {
483 	case REV_ID_VT6102_APOLLO:
484 		if (phy != 1)
485 			return 0;
486 		break;
487 	default:
488 		break;
489 	}
490 
491 	bzero(&frame, sizeof(frame));
492 
493 	frame.mii_phyaddr = phy;
494 	frame.mii_regaddr = reg;
495 	frame.mii_data = data;
496 
497 	vr_mii_writereg(sc, &frame);
498 
499 	return(0);
500 }
501 
502 static void
503 vr_miibus_statchg(device_t dev)
504 {
505 	struct mii_data *mii;
506 	struct vr_softc *sc;
507 
508 	sc = device_get_softc(dev);
509 	mii = device_get_softc(sc->vr_miibus);
510 	vr_setcfg(sc, mii->mii_media_active);
511 }
512 
513 /*
514  * Program the 64-bit multicast hash filter.
515  */
516 static void
517 vr_setmulti(struct vr_softc *sc)
518 {
519 	struct ifnet *ifp;
520 	uint32_t hashes[2] = { 0, 0 };
521 	struct ifmultiaddr *ifma;
522 	uint8_t rxfilt;
523 	int mcnt = 0;
524 
525 	ifp = &sc->arpcom.ac_if;
526 
527 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
528 
529 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
530 		rxfilt |= VR_RXCFG_RX_MULTI;
531 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
532 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
533 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
534 		return;
535 	}
536 
537 	/* First, zero out all the existing hash bits. */
538 	CSR_WRITE_4(sc, VR_MAR0, 0);
539 	CSR_WRITE_4(sc, VR_MAR1, 0);
540 
541 	/* Now program new ones. */
542 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
543 		int h;
544 
545 		if (ifma->ifma_addr->sa_family != AF_LINK)
546 			continue;
547 
548 		/* use the lower 6 bits */
549 		h = (ether_crc32_be(
550 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
551 			ETHER_ADDR_LEN) >> 26) & 0x0000003F;
552 		if (h < 32)
553 			hashes[0] |= (1 << h);
554 		else
555 			hashes[1] |= (1 << (h - 32));
556 		mcnt++;
557 	}
558 
559 	if (mcnt)
560 		rxfilt |= VR_RXCFG_RX_MULTI;
561 	else
562 		rxfilt &= ~VR_RXCFG_RX_MULTI;
563 
564 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
565 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
566 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
567 }
568 
569 /*
570  * In order to fiddle with the
571  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
572  * first have to put the transmit and/or receive logic in the idle state.
573  */
574 static void
575 vr_setcfg(struct vr_softc *sc, int media)
576 {
577 	int restart = 0;
578 
579 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
580 		restart = 1;
581 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
582 	}
583 
584 	if ((media & IFM_GMASK) == IFM_FDX)
585 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
586 	else
587 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
588 
589 	if (restart)
590 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
591 }
592 
593 static void
594 vr_reset(struct vr_softc *sc)
595 {
596 	int i;
597 
598 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
599 
600 	for (i = 0; i < VR_TIMEOUT; i++) {
601 		DELAY(10);
602 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
603 			break;
604 	}
605 	if (i == VR_TIMEOUT) {
606 		struct ifnet *ifp = &sc->arpcom.ac_if;
607 
608 		if (sc->vr_revid < REV_ID_VT3065_A) {
609 			if_printf(ifp, "reset never completed!\n");
610 		} else {
611 			/* Use newer force reset command */
612 			if_printf(ifp, "Using force reset command.\n");
613 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
614 		}
615 	}
616 
617 	/* Wait a little while for the chip to get its brains in order. */
618 	DELAY(1000);
619 }
620 
621 /*
622  * Probe for a VIA Rhine chip. Check the PCI vendor and device
623  * IDs against our list and return a device name if we find a match.
624  */
625 static int
626 vr_probe(device_t dev)
627 {
628 	struct vr_type *t;
629 	uint16_t vid, did;
630 
631 	vid = pci_get_vendor(dev);
632 	did = pci_get_device(dev);
633 
634 	for (t = vr_devs; t->vr_name != NULL; ++t) {
635 		if (vid == t->vr_vid && did == t->vr_did) {
636 			device_set_desc(dev, t->vr_name);
637 			return(0);
638 		}
639 	}
640 
641 	return(ENXIO);
642 }
643 
644 /*
645  * Attach the interface. Allocate softc structures, do ifmedia
646  * setup and ethernet/BPF attach.
647  */
648 static int
649 vr_attach(device_t dev)
650 {
651 	int i;
652 	uint8_t eaddr[ETHER_ADDR_LEN];
653 	struct vr_softc *sc;
654 	struct ifnet *ifp;
655 	int error = 0, rid;
656 
657 	sc = device_get_softc(dev);
658 	callout_init(&sc->vr_stat_timer);
659 
660 	/*
661 	 * Handle power management nonsense.
662 	 */
663 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
664 		uint32_t iobase, membase, irq;
665 
666 		/* Save important PCI config data. */
667 		iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
668 		membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
669 		irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
670 
671 		/* Reset the power state. */
672 		device_printf(dev, "chip is in D%d power mode "
673 		"-- setting to D0\n", pci_get_powerstate(dev));
674 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
675 
676 		/* Restore PCI config data. */
677 		pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
678 		pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
679 		pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
680 	}
681 
682 	pci_enable_busmaster(dev);
683 
684 	sc->vr_revid = pci_get_revid(dev);
685 
686 	rid = VR_RID;
687 	sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE);
688 
689 	if (sc->vr_res == NULL) {
690 		device_printf(dev, "couldn't map ports/memory\n");
691 		return ENXIO;
692 	}
693 
694 	sc->vr_btag = rman_get_bustag(sc->vr_res);
695 	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
696 
697 	/* Allocate interrupt */
698 	rid = 0;
699 	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
700 					    RF_SHAREABLE | RF_ACTIVE);
701 
702 	if (sc->vr_irq == NULL) {
703 		device_printf(dev, "couldn't map interrupt\n");
704 		error = ENXIO;
705 		goto fail;
706 	}
707 
708 	/*
709 	 * Windows may put the chip in suspend mode when it
710 	 * shuts down. Be sure to kick it in the head to wake it
711 	 * up again.
712 	 */
713 	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
714 
715 	ifp = &sc->arpcom.ac_if;
716 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
717 
718 	/* Reset the adapter. */
719 	vr_reset(sc);
720 
721         /*
722 	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
723 	 * initialization and disable AUTOPOLL.
724 	 */
725         pci_write_config(dev, VR_PCI_MODE,
726 	    pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4);
727 	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
728 
729 	/*
730 	 * Get station address. The way the Rhine chips work,
731 	 * you're not allowed to directly access the EEPROM once
732 	 * they've been programmed a special way. Consequently,
733 	 * we need to read the node address from the PAR0 and PAR1
734 	 * registers.
735 	 */
736 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
737 	DELAY(200);
738 	for (i = 0; i < ETHER_ADDR_LEN; i++)
739 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
740 
741 	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
742 	    M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
743 
744 	if (sc->vr_ldata == NULL) {
745 		device_printf(dev, "no memory for list buffers!\n");
746 		error = ENXIO;
747 		goto fail;
748 	}
749 
750 	/* Initialize TX buffer */
751 	sc->vr_cdata.vr_tx_buf = contigmalloc(VR_TX_BUF_SIZE, M_DEVBUF,
752 	    M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
753 	if (sc->vr_cdata.vr_tx_buf == NULL) {
754 		device_printf(dev, "can't allocate tx buffer!\n");
755 		error = ENXIO;
756 		goto fail;
757 	}
758 
759 	/* Set various TX indexes to invalid value */
760 	sc->vr_cdata.vr_tx_free_idx = -1;
761 	sc->vr_cdata.vr_tx_tail_idx = -1;
762 	sc->vr_cdata.vr_tx_head_idx = -1;
763 
764 
765 	ifp->if_softc = sc;
766 	ifp->if_mtu = ETHERMTU;
767 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
768 	ifp->if_ioctl = vr_ioctl;
769 	ifp->if_start = vr_start;
770 #ifdef DEVICE_POLLING
771 	ifp->if_poll = vr_poll;
772 #endif
773 	ifp->if_watchdog = vr_watchdog;
774 	ifp->if_init = vr_init;
775 	ifp->if_baudrate = 10000000;
776 	ifq_set_maxlen(&ifp->if_snd, VR_TX_LIST_CNT - 1);
777 	ifq_set_ready(&ifp->if_snd);
778 
779 	/*
780 	 * Do MII setup.
781 	 */
782 	if (mii_phy_probe(dev, &sc->vr_miibus,
783 	    vr_ifmedia_upd, vr_ifmedia_sts)) {
784 		if_printf(ifp, "MII without any phy!\n");
785 		error = ENXIO;
786 		goto fail;
787 	}
788 
789 	/* Call MI attach routine. */
790 	ether_ifattach(ifp, eaddr, NULL);
791 
792 	error = bus_setup_intr(dev, sc->vr_irq, INTR_MPSAFE,
793 			       vr_intr, sc, &sc->vr_intrhand,
794 			       ifp->if_serializer);
795 	if (error) {
796 		device_printf(dev, "couldn't set up irq\n");
797 		ether_ifdetach(ifp);
798 		goto fail;
799 	}
800 
801 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->vr_irq));
802 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
803 
804 	return 0;
805 
806 fail:
807 	vr_detach(dev);
808 	return(error);
809 }
810 
811 static int
812 vr_detach(device_t dev)
813 {
814 	struct vr_softc *sc = device_get_softc(dev);
815 	struct ifnet *ifp = &sc->arpcom.ac_if;
816 
817 	if (device_is_attached(dev)) {
818 		lwkt_serialize_enter(ifp->if_serializer);
819 		vr_stop(sc);
820 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
821 		lwkt_serialize_exit(ifp->if_serializer);
822 
823 		ether_ifdetach(ifp);
824 	}
825 	if (sc->vr_miibus != NULL)
826 		device_delete_child(dev, sc->vr_miibus);
827 	bus_generic_detach(dev);
828 
829 	if (sc->vr_irq != NULL)
830 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
831 	if (sc->vr_res != NULL)
832 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
833 	if (sc->vr_ldata != NULL)
834 		contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
835 	if (sc->vr_cdata.vr_tx_buf != NULL)
836 		contigfree(sc->vr_cdata.vr_tx_buf, VR_TX_BUF_SIZE, M_DEVBUF);
837 
838 	return(0);
839 }
840 
841 /*
842  * Initialize the transmit descriptors.
843  */
844 static int
845 vr_list_tx_init(struct vr_softc *sc)
846 {
847 	struct vr_chain_data *cd;
848 	struct vr_list_data *ld;
849 	struct vr_chain *tx_chain;
850 	int i;
851 
852 	cd = &sc->vr_cdata;
853 	ld = sc->vr_ldata;
854 	tx_chain = cd->vr_tx_chain;
855 
856 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
857 		tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
858 		if (i == (VR_TX_LIST_CNT - 1))
859 			tx_chain[i].vr_next_idx = 0;
860 		else
861 			tx_chain[i].vr_next_idx = i + 1;
862 	}
863 
864 	for (i = 0; i < VR_TX_LIST_CNT; ++i) {
865 		void *tx_buf;
866 		int next_idx;
867 
868 		tx_buf = VR_TX_BUF(sc, i);
869 		next_idx = tx_chain[i].vr_next_idx;
870 
871 		tx_chain[i].vr_next_desc_paddr =
872 			vtophys(tx_chain[next_idx].vr_ptr);
873 		tx_chain[i].vr_buf_paddr = vtophys(tx_buf);
874 	}
875 
876 	cd->vr_tx_free_idx = 0;
877 	cd->vr_tx_tail_idx = cd->vr_tx_head_idx = -1;
878 
879 	return 0;
880 }
881 
882 
883 /*
884  * Initialize the RX descriptors and allocate mbufs for them. Note that
885  * we arrange the descriptors in a closed ring, so that the last descriptor
886  * points back to the first.
887  */
888 static int
889 vr_list_rx_init(struct vr_softc *sc)
890 {
891 	struct vr_chain_data *cd;
892 	struct vr_list_data *ld;
893 	int i, nexti;
894 
895 	cd = &sc->vr_cdata;
896 	ld = sc->vr_ldata;
897 
898 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
899 		cd->vr_rx_chain[i].vr_ptr = (struct vr_desc *)&ld->vr_rx_list[i];
900 		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
901 			return(ENOBUFS);
902 		if (i == (VR_RX_LIST_CNT - 1))
903 			nexti = 0;
904 		else
905 			nexti = i + 1;
906 		cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti];
907 		ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[nexti]);
908 	}
909 
910 	cd->vr_rx_head = &cd->vr_rx_chain[0];
911 
912 	return(0);
913 }
914 
915 /*
916  * Initialize an RX descriptor and attach an MBUF cluster.
917  * Note: the length fields are only 11 bits wide, which means the
918  * largest size we can specify is 2047. This is important because
919  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
920  * overflow the field and make a mess.
921  */
922 static int
923 vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m)
924 {
925 	struct mbuf *m_new = NULL;
926 
927 	if (m == NULL) {
928 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
929 		if (m_new == NULL)
930 			return (ENOBUFS);
931 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
932 	} else {
933 		m_new = m;
934 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
935 		m_new->m_data = m_new->m_ext.ext_buf;
936 	}
937 
938 	m_adj(m_new, sizeof(uint64_t));
939 
940 	c->vr_mbuf = m_new;
941 	c->vr_ptr->vr_status = VR_RXSTAT;
942 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
943 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
944 
945 	return(0);
946 }
947 
948 /*
949  * A frame has been uploaded: pass the resulting mbuf chain up to
950  * the higher level protocols.
951  */
952 static void
953 vr_rxeof(struct vr_softc *sc)
954 {
955         struct mbuf *m;
956         struct ifnet *ifp;
957 	struct vr_chain_onefrag *cur_rx;
958 	int total_len = 0;
959 	uint32_t rxstat;
960 
961 	ifp = &sc->arpcom.ac_if;
962 
963 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
964 							VR_RXSTAT_OWN)) {
965 		struct mbuf *m0 = NULL;
966 
967 		cur_rx = sc->vr_cdata.vr_rx_head;
968 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
969 		m = cur_rx->vr_mbuf;
970 
971 		/*
972 		 * If an error occurs, update stats, clear the
973 		 * status word and leave the mbuf cluster in place:
974 		 * it should simply get re-used next time this descriptor
975 	 	 * comes up in the ring.
976 		 */
977 		if (rxstat & VR_RXSTAT_RXERR) {
978 			ifp->if_ierrors++;
979 			if_printf(ifp, "rx error (%02x):", rxstat & 0x000000ff);
980 			if (rxstat & VR_RXSTAT_CRCERR)
981 				kprintf(" crc error");
982 			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
983 				kprintf(" frame alignment error\n");
984 			if (rxstat & VR_RXSTAT_FIFOOFLOW)
985 				kprintf(" FIFO overflow");
986 			if (rxstat & VR_RXSTAT_GIANT)
987 				kprintf(" received giant packet");
988 			if (rxstat & VR_RXSTAT_RUNT)
989 				kprintf(" received runt packet");
990 			if (rxstat & VR_RXSTAT_BUSERR)
991 				kprintf(" system bus error");
992 			if (rxstat & VR_RXSTAT_BUFFERR)
993 				kprintf("rx buffer error");
994 			kprintf("\n");
995 			vr_newbuf(sc, cur_rx, m);
996 			continue;
997 		}
998 
999 		/* No errors; receive the packet. */
1000 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1001 
1002 		/*
1003 		 * XXX The VIA Rhine chip includes the CRC with every
1004 		 * received frame, and there's no way to turn this
1005 		 * behavior off (at least, I can't find anything in
1006 	 	 * the manual that explains how to do it) so we have
1007 		 * to trim off the CRC manually.
1008 		 */
1009 		total_len -= ETHER_CRC_LEN;
1010 
1011 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1012 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1013 		vr_newbuf(sc, cur_rx, m);
1014 		if (m0 == NULL) {
1015 			ifp->if_ierrors++;
1016 			continue;
1017 		}
1018 		m_adj(m0, ETHER_ALIGN);
1019 		m = m0;
1020 
1021 		ifp->if_ipackets++;
1022 		ifp->if_input(ifp, m);
1023 	}
1024 }
1025 
1026 static void
1027 vr_rxeoc(struct vr_softc *sc)
1028 {
1029 	struct ifnet *ifp;
1030 	int i;
1031 
1032 	ifp = &sc->arpcom.ac_if;
1033 
1034 	ifp->if_ierrors++;
1035 
1036 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1037         DELAY(10000);
1038 
1039 	/* Wait for receiver to stop */
1040 	for (i = 0x400;
1041 	     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1042 	     i--)
1043 		;	/* Wait for receiver to stop */
1044 
1045 	if (i == 0) {
1046 		if_printf(ifp, "rx shutdown error!\n");
1047 		sc->vr_flags |= VR_F_RESTART;
1048 		return;
1049 	}
1050 
1051 	vr_rxeof(sc);
1052 
1053 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1054 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1055 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1056 }
1057 
1058 /*
1059  * A frame was downloaded to the chip. It's safe for us to clean up
1060  * the list buffers.
1061  */
1062 static void
1063 vr_txeof(struct vr_softc *sc)
1064 {
1065 	struct vr_chain_data *cd;
1066 	struct vr_chain *tx_chain;
1067 	struct ifnet *ifp;
1068 
1069 	ifp = &sc->arpcom.ac_if;
1070 	cd = &sc->vr_cdata;
1071 
1072 	/* Reset the timeout timer; if_txeoc will clear it. */
1073 	ifp->if_timer = 5;
1074 
1075 	/* Sanity check. */
1076 	if (cd->vr_tx_head_idx == -1)
1077 		return;
1078 
1079 	tx_chain = cd->vr_tx_chain;
1080 
1081 	/*
1082 	 * Go through our tx list and free mbufs for those
1083 	 * frames that have been transmitted.
1084 	 */
1085 	while(tx_chain[cd->vr_tx_head_idx].vr_buf != NULL) {
1086 		struct vr_chain *cur_tx;
1087 		uint32_t txstat;
1088 		int i;
1089 
1090 		cur_tx = &tx_chain[cd->vr_tx_head_idx];
1091 		txstat = cur_tx->vr_ptr->vr_status;
1092 
1093 		if ((txstat & VR_TXSTAT_ABRT) ||
1094 		    (txstat & VR_TXSTAT_UDF)) {
1095 			for (i = 0x400;
1096 			     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1097 			     i--)
1098 				;	/* Wait for chip to shutdown */
1099 			if (i == 0) {
1100 				if_printf(ifp, "tx shutdown timeout\n");
1101 				sc->vr_flags |= VR_F_RESTART;
1102 				break;
1103 			}
1104 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1105 			CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1106 			break;
1107 		}
1108 
1109 		if (txstat & VR_TXSTAT_OWN)
1110 			break;
1111 
1112 		if (txstat & VR_TXSTAT_ERRSUM) {
1113 			ifp->if_oerrors++;
1114 			if (txstat & VR_TXSTAT_DEFER)
1115 				ifp->if_collisions++;
1116 			if (txstat & VR_TXSTAT_LATECOLL)
1117 				ifp->if_collisions++;
1118 		}
1119 
1120 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
1121 
1122 		ifp->if_opackets++;
1123 		cur_tx->vr_buf = NULL;
1124 
1125 		if (cd->vr_tx_head_idx == cd->vr_tx_tail_idx) {
1126 			cd->vr_tx_head_idx = -1;
1127 			cd->vr_tx_tail_idx = -1;
1128 			break;
1129 		}
1130 
1131 		cd->vr_tx_head_idx = cur_tx->vr_next_idx;
1132 	}
1133 }
1134 
1135 /*
1136  * TX 'end of channel' interrupt handler.
1137  */
1138 static void
1139 vr_txeoc(struct vr_softc *sc)
1140 {
1141 	struct ifnet *ifp;
1142 
1143 	ifp = &sc->arpcom.ac_if;
1144 
1145 	if (sc->vr_cdata.vr_tx_head_idx == -1) {
1146 		ifp->if_flags &= ~IFF_OACTIVE;
1147 		sc->vr_cdata.vr_tx_tail_idx = -1;
1148 		ifp->if_timer = 0;
1149 	}
1150 }
1151 
1152 static void
1153 vr_tick(void *xsc)
1154 {
1155 	struct vr_softc *sc = xsc;
1156 	struct ifnet *ifp = &sc->arpcom.ac_if;
1157 	struct mii_data *mii;
1158 
1159 	lwkt_serialize_enter(ifp->if_serializer);
1160 
1161 	if (sc->vr_flags & VR_F_RESTART) {
1162 		if_printf(&sc->arpcom.ac_if, "restarting\n");
1163 		vr_stop(sc);
1164 		vr_reset(sc);
1165 		vr_init(sc);
1166 		sc->vr_flags &= ~VR_F_RESTART;
1167 	}
1168 
1169 	mii = device_get_softc(sc->vr_miibus);
1170 	mii_tick(mii);
1171 
1172 	callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc);
1173 
1174 	lwkt_serialize_exit(ifp->if_serializer);
1175 }
1176 
1177 static void
1178 vr_intr(void *arg)
1179 {
1180 	struct vr_softc *sc;
1181 	struct ifnet *ifp;
1182 	uint16_t status;
1183 
1184 	sc = arg;
1185 	ifp = &sc->arpcom.ac_if;
1186 
1187 	/* Supress unwanted interrupts. */
1188 	if (!(ifp->if_flags & IFF_UP)) {
1189 		vr_stop(sc);
1190 		return;
1191 	}
1192 
1193 	/* Disable interrupts. */
1194 	if ((ifp->if_flags & IFF_POLLING) == 0)
1195 		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1196 
1197 	for (;;) {
1198 		status = CSR_READ_2(sc, VR_ISR);
1199 		if (status)
1200 			CSR_WRITE_2(sc, VR_ISR, status);
1201 
1202 		if ((status & VR_INTRS) == 0)
1203 			break;
1204 
1205 		if (status & VR_ISR_RX_OK)
1206 			vr_rxeof(sc);
1207 
1208 		if (status & VR_ISR_RX_DROPPED) {
1209 			if_printf(ifp, "rx packet lost\n");
1210 			ifp->if_ierrors++;
1211 			}
1212 
1213 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1214 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1215 			if_printf(ifp, "receive error (%04x)", status);
1216 			if (status & VR_ISR_RX_NOBUF)
1217 				kprintf(" no buffers");
1218 			if (status & VR_ISR_RX_OFLOW)
1219 				kprintf(" overflow");
1220 			if (status & VR_ISR_RX_DROPPED)
1221 				kprintf(" packet lost");
1222 			kprintf("\n");
1223 			vr_rxeoc(sc);
1224 		}
1225 
1226 		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1227 			vr_reset(sc);
1228 			vr_init(sc);
1229 			break;
1230 		}
1231 
1232 		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1233 		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1234 			vr_txeof(sc);
1235 			if ((status & VR_ISR_UDFI) ||
1236 			    (status & VR_ISR_TX_ABRT2) ||
1237 			    (status & VR_ISR_TX_ABRT)) {
1238 				ifp->if_oerrors++;
1239 				if (sc->vr_cdata.vr_tx_head_idx != -1) {
1240 					VR_SETBIT16(sc, VR_COMMAND,
1241 						    VR_CMD_TX_ON);
1242 					VR_SETBIT16(sc, VR_COMMAND,
1243 						    VR_CMD_TX_GO);
1244 				}
1245 			} else {
1246 				vr_txeoc(sc);
1247 			}
1248 		}
1249 
1250 	}
1251 
1252 	/* Re-enable interrupts. */
1253 	if ((ifp->if_flags & IFF_POLLING) == 0)
1254 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1255 
1256 	if (!ifq_is_empty(&ifp->if_snd))
1257 		if_devstart(ifp);
1258 }
1259 
1260 /*
1261  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1262  * pointers to the fragment pointers.
1263  */
1264 static int
1265 vr_encap(struct vr_softc *sc, int chain_idx, struct mbuf *m_head)
1266 {
1267 	struct vr_chain *c;
1268 	struct vr_desc *f;
1269 	caddr_t tx_buf;
1270 	int len;
1271 
1272 	KASSERT(chain_idx >= 0 && chain_idx < VR_TX_LIST_CNT,
1273 		("%s: chain idx(%d) out of range 0-%d",
1274 		 sc->arpcom.ac_if.if_xname, chain_idx, VR_TX_LIST_CNT));
1275 
1276 	/*
1277 	 * The VIA Rhine wants packet buffers to be longword
1278 	 * aligned, but very often our mbufs aren't. Rather than
1279 	 * waste time trying to decide when to copy and when not
1280 	 * to copy, just do it all the time.
1281 	 */
1282 	tx_buf = VR_TX_BUF(sc, chain_idx);
1283 	m_copydata(m_head, 0, m_head->m_pkthdr.len, tx_buf);
1284 	len = m_head->m_pkthdr.len;
1285 
1286 	/*
1287 	 * The Rhine chip doesn't auto-pad, so we have to make
1288 	 * sure to pad short frames out to the minimum frame length
1289 	 * ourselves.
1290 	 */
1291 	if (len < VR_MIN_FRAMELEN) {
1292 		bzero(tx_buf + len, VR_MIN_FRAMELEN - len);
1293 		len = VR_MIN_FRAMELEN;
1294  	}
1295 
1296 	c = &sc->vr_cdata.vr_tx_chain[chain_idx];
1297 	c->vr_buf = tx_buf;
1298 
1299 	f = c->vr_ptr;
1300 	f->vr_data = c->vr_buf_paddr;
1301 	f->vr_ctl = len;
1302 	f->vr_ctl |= (VR_TXCTL_TLINK | VR_TXCTL_FIRSTFRAG);
1303 	f->vr_ctl |= (VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1304 	f->vr_status = 0;
1305 	f->vr_next = c->vr_next_desc_paddr;
1306 
1307 	return(0);
1308 }
1309 
1310 /*
1311  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1312  * to the mbuf data regions directly in the transmit lists. We also save a
1313  * copy of the pointers since the transmit list fragment pointers are
1314  * physical addresses.
1315  */
1316 static void
1317 vr_start(struct ifnet *ifp)
1318 {
1319 	struct vr_softc *sc;
1320 	struct vr_chain_data *cd;
1321 	struct vr_chain *tx_chain;
1322 	int cur_tx_idx, start_tx_idx, prev_tx_idx;
1323 
1324 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
1325 		return;
1326 
1327 	sc = ifp->if_softc;
1328 	cd = &sc->vr_cdata;
1329 	tx_chain = cd->vr_tx_chain;
1330 
1331 	start_tx_idx = cd->vr_tx_free_idx;
1332 	cur_tx_idx = prev_tx_idx = -1;
1333 
1334 	/* Check for an available queue slot. If there are none, punt. */
1335 	if (tx_chain[start_tx_idx].vr_buf != NULL) {
1336 		ifp->if_flags |= IFF_OACTIVE;
1337 		return;
1338 	}
1339 
1340 	while (tx_chain[cd->vr_tx_free_idx].vr_buf == NULL) {
1341 		struct mbuf *m_head;
1342 		struct vr_chain *cur_tx;
1343 
1344 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1345 		if (m_head == NULL)
1346 			break;
1347 
1348 		/* Pick a descriptor off the free list. */
1349 		cur_tx_idx = cd->vr_tx_free_idx;
1350 		cur_tx = &tx_chain[cur_tx_idx];
1351 
1352 		/* Pack the data into the descriptor. */
1353 		if (vr_encap(sc, cur_tx_idx, m_head)) {
1354 			ifp->if_flags |= IFF_OACTIVE;
1355 			cur_tx_idx = prev_tx_idx;
1356 			break;
1357 		}
1358 
1359 		/* XXX */
1360 		if (cur_tx_idx != start_tx_idx)
1361 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1362 
1363 		BPF_MTAP(ifp, m_head);
1364 		m_freem(m_head);
1365 
1366 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1367 		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1368 
1369 		/* Iff everything went OK, we bump up free index. */
1370 		prev_tx_idx = cur_tx_idx;
1371 		cd->vr_tx_free_idx = cur_tx->vr_next_idx;
1372 	}
1373 
1374 	/* If there are no frames queued, bail. */
1375 	if (cur_tx_idx == -1)
1376 		return;
1377 
1378 	sc->vr_cdata.vr_tx_tail_idx = cur_tx_idx;
1379 
1380 	if (sc->vr_cdata.vr_tx_head_idx == -1)
1381 		sc->vr_cdata.vr_tx_head_idx = start_tx_idx;
1382 
1383 	/*
1384 	 * Set a timeout in case the chip goes out to lunch.
1385 	 */
1386 	ifp->if_timer = 5;
1387 }
1388 
1389 static void
1390 vr_init(void *xsc)
1391 {
1392 	struct vr_softc *sc = xsc;
1393 	struct ifnet *ifp = &sc->arpcom.ac_if;
1394 	struct mii_data *mii;
1395 	int i;
1396 
1397 	mii = device_get_softc(sc->vr_miibus);
1398 
1399 	/* Cancel pending I/O and free all RX/TX buffers. */
1400 	vr_stop(sc);
1401 	vr_reset(sc);
1402 
1403 	/* Set our station address. */
1404 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1405 		CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1406 
1407 	/* Set DMA size. */
1408 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1409 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1410 
1411 	/*
1412 	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1413 	 * so we must set both.
1414 	 */
1415 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1416 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1417 
1418 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1419 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1420 
1421 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1422 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1423 
1424 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1425 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1426 
1427 	/* Init circular RX list. */
1428 	if (vr_list_rx_init(sc) == ENOBUFS) {
1429 		vr_stop(sc);
1430 		if_printf(ifp, "initialization failed: no memory for rx buffers\n");
1431 		return;
1432 	}
1433 
1434 	/* Init tx descriptors. */
1435 	vr_list_tx_init(sc);
1436 
1437 	/* If we want promiscuous mode, set the allframes bit. */
1438 	if (ifp->if_flags & IFF_PROMISC)
1439 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1440 	else
1441 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1442 
1443 	/* Set capture broadcast bit to capture broadcast frames. */
1444 	if (ifp->if_flags & IFF_BROADCAST)
1445 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1446 	else
1447 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1448 
1449 	/*
1450 	 * Program the multicast filter, if necessary.
1451 	 */
1452 	vr_setmulti(sc);
1453 
1454 	/*
1455 	 * Load the address of the RX list.
1456 	 */
1457 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1458 
1459 	/* Enable receiver and transmitter. */
1460 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1461 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1462 				    VR_CMD_RX_GO);
1463 
1464 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1465 
1466 	/*
1467 	 * Enable interrupts, unless we are polling.
1468 	 */
1469 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1470 #ifdef DEVICE_POLLING
1471 	if ((ifp->if_flags & IFF_POLLING) == 0)
1472 #endif
1473 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1474 
1475 	mii_mediachg(mii);
1476 
1477 	ifp->if_flags |= IFF_RUNNING;
1478 	ifp->if_flags &= ~IFF_OACTIVE;
1479 
1480 	callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc);
1481 }
1482 
1483 /*
1484  * Set media options.
1485  */
1486 static int
1487 vr_ifmedia_upd(struct ifnet *ifp)
1488 {
1489 	struct vr_softc *sc;
1490 
1491 	sc = ifp->if_softc;
1492 
1493 	if (ifp->if_flags & IFF_UP)
1494 		vr_init(sc);
1495 
1496 	return(0);
1497 }
1498 
1499 /*
1500  * Report current media status.
1501  */
1502 static void
1503 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1504 {
1505 	struct vr_softc *sc;
1506 	struct mii_data *mii;
1507 
1508 	sc = ifp->if_softc;
1509 	mii = device_get_softc(sc->vr_miibus);
1510 	mii_pollstat(mii);
1511 	ifmr->ifm_active = mii->mii_media_active;
1512 	ifmr->ifm_status = mii->mii_media_status;
1513 }
1514 
1515 static int
1516 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1517 {
1518 	struct vr_softc *sc = ifp->if_softc;
1519 	struct ifreq *ifr = (struct ifreq *) data;
1520 	struct mii_data *mii;
1521 	int error = 0;
1522 
1523 	switch(command) {
1524 	case SIOCSIFFLAGS:
1525 		if (ifp->if_flags & IFF_UP) {
1526 			vr_init(sc);
1527 		} else {
1528 			if (ifp->if_flags & IFF_RUNNING)
1529 				vr_stop(sc);
1530 		}
1531 		error = 0;
1532 		break;
1533 	case SIOCADDMULTI:
1534 	case SIOCDELMULTI:
1535 		vr_setmulti(sc);
1536 		error = 0;
1537 		break;
1538 	case SIOCGIFMEDIA:
1539 	case SIOCSIFMEDIA:
1540 		mii = device_get_softc(sc->vr_miibus);
1541 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1542 		break;
1543 	default:
1544 		error = ether_ioctl(ifp, command, data);
1545 		break;
1546 	}
1547 	return(error);
1548 }
1549 
1550 #ifdef DEVICE_POLLING
1551 
1552 static void
1553 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1554 {
1555 	struct vr_softc *sc = ifp->if_softc;
1556 
1557 	switch(cmd) {
1558 	case POLL_REGISTER:
1559 		/* disable interrupts */
1560 		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1561 		break;
1562 	case POLL_DEREGISTER:
1563 		/* enable interrupts */
1564 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1565 		break;
1566 	default:
1567 		vr_intr(sc);
1568 		break;
1569 	}
1570 }
1571 #endif
1572 
1573 static void
1574 vr_watchdog(struct ifnet *ifp)
1575 {
1576 	struct vr_softc *sc;
1577 
1578 	sc = ifp->if_softc;
1579 
1580 	ifp->if_oerrors++;
1581 	if_printf(ifp, "watchdog timeout\n");
1582 
1583 	vr_stop(sc);
1584 	vr_reset(sc);
1585 	vr_init(sc);
1586 
1587 	if (!ifq_is_empty(&ifp->if_snd))
1588 		if_devstart(ifp);
1589 }
1590 
1591 /*
1592  * Stop the adapter and free any mbufs allocated to the
1593  * RX and TX lists.
1594  */
1595 static void
1596 vr_stop(struct vr_softc *sc)
1597 {
1598 	int i;
1599 	struct ifnet *ifp;
1600 
1601 	ifp = &sc->arpcom.ac_if;
1602 	ifp->if_timer = 0;
1603 
1604 	callout_stop(&sc->vr_stat_timer);
1605 
1606 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1607 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1608 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1609 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1610 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1611 
1612 	/*
1613 	 * Free data in the RX lists.
1614 	 */
1615 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1616 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1617 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1618 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1619 		}
1620 	}
1621 	bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list));
1622 
1623 	/*
1624 	 * Reset the TX list buffer pointers.
1625 	 */
1626 	for (i = 0; i < VR_TX_LIST_CNT; i++)
1627 		sc->vr_cdata.vr_tx_chain[i].vr_buf = NULL;
1628 
1629 	bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list));
1630 
1631 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1632 }
1633 
1634 /*
1635  * Stop all chip I/O so that the kernel's probe routines don't
1636  * get confused by errant DMAs when rebooting.
1637  */
1638 static void
1639 vr_shutdown(device_t dev)
1640 {
1641 	struct vr_softc *sc;
1642 
1643 	sc = device_get_softc(dev);
1644 
1645 	vr_stop(sc);
1646 }
1647