xref: /dragonfly/sys/dev/netif/vr/if_vr.c (revision c6f73aab)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_vr.c,v 1.26.2.13 2003/02/06 04:46:20 silby Exp $
33  */
34 
35 /*
36  * VIA Rhine fast ethernet PCI NIC driver
37  *
38  * Supports various network adapters based on the VIA Rhine
39  * and Rhine II PCI controllers, including the D-Link DFE530TX.
40  * Datasheets are available at http://www.via.com.tw.
41  *
42  * Written by Bill Paul <wpaul@ctr.columbia.edu>
43  * Electrical Engineering Department
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The VIA Rhine controllers are similar in some respects to the
49  * the DEC tulip chips, except less complicated. The controller
50  * uses an MII bus and an external physical layer interface. The
51  * receiver has a one entry perfect filter and a 64-bit hash table
52  * multicast filter. Transmit and receive descriptors are similar
53  * to the tulip.
54  *
55  * The Rhine has a serious flaw in its transmit DMA mechanism:
56  * transmit buffers must be longword aligned. Unfortunately,
57  * FreeBSD doesn't guarantee that mbufs will be filled in starting
58  * at longword boundaries, so we have to do a buffer copy before
59  * transmission.
60  */
61 
62 #include "opt_ifpoll.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71 #include <sys/serialize.h>
72 #include <sys/bus.h>
73 #include <sys/rman.h>
74 #include <sys/thread2.h>
75 #include <sys/interrupt.h>
76 
77 #include <net/if.h>
78 #include <net/ifq_var.h>
79 #include <net/if_arp.h>
80 #include <net/ethernet.h>
81 #include <net/if_dl.h>
82 #include <net/if_media.h>
83 #include <net/if_poll.h>
84 
85 #include <net/bpf.h>
86 
87 #include <vm/vm.h>              /* for vtophys */
88 #include <vm/pmap.h>            /* for vtophys */
89 
90 #include <dev/netif/mii_layer/mii.h>
91 #include <dev/netif/mii_layer/miivar.h>
92 
93 #include "pcidevs.h"
94 #include <bus/pci/pcireg.h>
95 #include <bus/pci/pcivar.h>
96 
97 #define VR_USEIOSPACE
98 
99 #include <dev/netif/vr/if_vrreg.h>
100 
101 /* "controller miibus0" required.  See GENERIC if you get errors here. */
102 #include "miibus_if.h"
103 
104 #undef VR_USESWSHIFT
105 
106 /*
107  * Various supported device vendors/types and their names.
108  */
109 static struct vr_type vr_devs[] = {
110 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
111 		"VIA VT3043 Rhine I 10/100BaseTX" },
112 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
113 		"VIA VT86C100A Rhine II 10/100BaseTX" },
114 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
115 		"VIA VT6102 Rhine II 10/100BaseTX" },
116 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105,
117 		"VIA VT6105 Rhine III 10/100BaseTX" },
118 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M,
119 		"VIA VT6105M Rhine III 10/100BaseTX" },
120 	{ PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII,
121 		"Delta Electronics Rhine II 10/100BaseTX" },
122 	{ PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII,
123 		"Addtron Technology Rhine II 10/100BaseTX" },
124 	{ 0, 0, NULL }
125 };
126 
127 static int	vr_probe(device_t);
128 static int	vr_attach(device_t);
129 static int	vr_detach(device_t);
130 
131 static int	vr_newbuf(struct vr_softc *, struct vr_chain_onefrag *,
132 			  struct mbuf *);
133 static int	vr_encap(struct vr_softc *, int, struct mbuf * );
134 
135 static void	vr_rxeof(struct vr_softc *);
136 static void	vr_rxeoc(struct vr_softc *);
137 static void	vr_txeof(struct vr_softc *);
138 static void	vr_txeoc(struct vr_softc *);
139 static void	vr_tick(void *);
140 static void	vr_intr(void *);
141 static void	vr_start(struct ifnet *, struct ifaltq_subque *);
142 static int	vr_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
143 static void	vr_init(void *);
144 static void	vr_stop(struct vr_softc *);
145 static void	vr_watchdog(struct ifnet *);
146 static void	vr_shutdown(device_t);
147 static int	vr_ifmedia_upd(struct ifnet *);
148 static void	vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149 
150 #ifdef VR_USESWSHIFT
151 static void	vr_mii_sync(struct vr_softc *);
152 static void	vr_mii_send(struct vr_softc *, uint32_t, int);
153 #endif
154 static int	vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
155 static int	vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
156 static int	vr_miibus_readreg(device_t, int, int);
157 static int	vr_miibus_writereg(device_t, int, int, int);
158 static void	vr_miibus_statchg(device_t);
159 
160 static void	vr_setcfg(struct vr_softc *, int);
161 static void	vr_setmulti(struct vr_softc *);
162 static void	vr_reset(struct vr_softc *);
163 static int	vr_list_rx_init(struct vr_softc *);
164 static int	vr_list_tx_init(struct vr_softc *);
165 #ifdef IFPOLL_ENABLE
166 static void	vr_npoll(struct ifnet *, struct ifpoll_info *);
167 static void	vr_npoll_compat(struct ifnet *, void *, int);
168 #endif
169 
170 #ifdef VR_USEIOSPACE
171 #define VR_RES			SYS_RES_IOPORT
172 #define VR_RID			VR_PCI_LOIO
173 #else
174 #define VR_RES			SYS_RES_MEMORY
175 #define VR_RID			VR_PCI_LOMEM
176 #endif
177 
178 static device_method_t vr_methods[] = {
179 	/* Device interface */
180 	DEVMETHOD(device_probe,		vr_probe),
181 	DEVMETHOD(device_attach,	vr_attach),
182 	DEVMETHOD(device_detach, 	vr_detach),
183 	DEVMETHOD(device_shutdown,	vr_shutdown),
184 
185 	/* bus interface */
186 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
187 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
188 
189 	/* MII interface */
190 	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
191 	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
192 	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
193 
194 	DEVMETHOD_END
195 };
196 
197 static driver_t vr_driver = {
198 	"vr",
199 	vr_methods,
200 	sizeof(struct vr_softc)
201 };
202 
203 static devclass_t vr_devclass;
204 
205 DECLARE_DUMMY_MODULE(if_vr);
206 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, NULL, NULL);
207 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, NULL, NULL);
208 
209 #define VR_SETBIT(sc, reg, x)				\
210 	CSR_WRITE_1(sc, reg,				\
211 		CSR_READ_1(sc, reg) | (x))
212 
213 #define VR_CLRBIT(sc, reg, x)				\
214 	CSR_WRITE_1(sc, reg,				\
215 		CSR_READ_1(sc, reg) & ~(x))
216 
217 #define VR_SETBIT16(sc, reg, x)				\
218 	CSR_WRITE_2(sc, reg,				\
219 		CSR_READ_2(sc, reg) | (x))
220 
221 #define VR_CLRBIT16(sc, reg, x)				\
222 	CSR_WRITE_2(sc, reg,				\
223 		CSR_READ_2(sc, reg) & ~(x))
224 
225 #define VR_SETBIT32(sc, reg, x)				\
226 	CSR_WRITE_4(sc, reg,				\
227 		CSR_READ_4(sc, reg) | (x))
228 
229 #define VR_CLRBIT32(sc, reg, x)				\
230 	CSR_WRITE_4(sc, reg,				\
231 		CSR_READ_4(sc, reg) & ~(x))
232 
233 #define SIO_SET(x)					\
234 	CSR_WRITE_1(sc, VR_MIICMD,			\
235 		CSR_READ_1(sc, VR_MIICMD) | (x))
236 
237 #define SIO_CLR(x)					\
238 	CSR_WRITE_1(sc, VR_MIICMD,			\
239 		CSR_READ_1(sc, VR_MIICMD) & ~(x))
240 
241 #ifdef VR_USESWSHIFT
242 /*
243  * Sync the PHYs by setting data bit and strobing the clock 32 times.
244  */
245 static void
246 vr_mii_sync(struct vr_softc *sc)
247 {
248 	int i;
249 
250 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
251 
252 	for (i = 0; i < 32; i++) {
253 		SIO_SET(VR_MIICMD_CLK);
254 		DELAY(1);
255 		SIO_CLR(VR_MIICMD_CLK);
256 		DELAY(1);
257 	}
258 }
259 
260 /*
261  * Clock a series of bits through the MII.
262  */
263 static void
264 vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt)
265 {
266 	int i;
267 
268 	SIO_CLR(VR_MIICMD_CLK);
269 
270 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
271                 if (bits & i)
272 			SIO_SET(VR_MIICMD_DATAIN);
273                 else
274 			SIO_CLR(VR_MIICMD_DATAIN);
275 		DELAY(1);
276 		SIO_CLR(VR_MIICMD_CLK);
277 		DELAY(1);
278 		SIO_SET(VR_MIICMD_CLK);
279 	}
280 }
281 #endif
282 
283 /*
284  * Read an PHY register through the MII.
285  */
286 static int
287 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
288 #ifdef VR_USESWSHIFT
289 {
290 	int i, ack;
291 
292 	/* Set up frame for RX. */
293 	frame->mii_stdelim = VR_MII_STARTDELIM;
294 	frame->mii_opcode = VR_MII_READOP;
295 	frame->mii_turnaround = 0;
296 	frame->mii_data = 0;
297 
298 	CSR_WRITE_1(sc, VR_MIICMD, 0);
299 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
300 
301 	/* Turn on data xmit. */
302 	SIO_SET(VR_MIICMD_DIR);
303 
304 	vr_mii_sync(sc);
305 
306 	/* Send command/address info. */
307 	vr_mii_send(sc, frame->mii_stdelim, 2);
308 	vr_mii_send(sc, frame->mii_opcode, 2);
309 	vr_mii_send(sc, frame->mii_phyaddr, 5);
310 	vr_mii_send(sc, frame->mii_regaddr, 5);
311 
312 	/* Idle bit. */
313 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
314 	DELAY(1);
315 	SIO_SET(VR_MIICMD_CLK);
316 	DELAY(1);
317 
318 	/* Turn off xmit. */
319 	SIO_CLR(VR_MIICMD_DIR);
320 
321 	/* Check for ack */
322 	SIO_CLR(VR_MIICMD_CLK);
323 	DELAY(1);
324 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
325 	SIO_SET(VR_MIICMD_CLK);
326 	DELAY(1);
327 
328 	/*
329 	 * Now try reading data bits. If the ack failed, we still
330 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
331 	 */
332 	if (ack) {
333 		for(i = 0; i < 16; i++) {
334 			SIO_CLR(VR_MIICMD_CLK);
335 			DELAY(1);
336 			SIO_SET(VR_MIICMD_CLK);
337 			DELAY(1);
338 		}
339 		goto fail;
340 	}
341 
342 	for (i = 0x8000; i; i >>= 1) {
343 		SIO_CLR(VR_MIICMD_CLK);
344 		DELAY(1);
345 		if (!ack) {
346 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
347 				frame->mii_data |= i;
348 			DELAY(1);
349 		}
350 		SIO_SET(VR_MIICMD_CLK);
351 		DELAY(1);
352 	}
353 
354 fail:
355 	SIO_CLR(VR_MIICMD_CLK);
356 	DELAY(1);
357 	SIO_SET(VR_MIICMD_CLK);
358 	DELAY(1);
359 
360 	if (ack)
361 		return(1);
362 	return(0);
363 }
364 #else
365 {
366 	int i;
367 
368   	/* Set the PHY address. */
369 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
370 	    frame->mii_phyaddr);
371 
372 	/* Set the register address. */
373 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
374 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
375 
376 	for (i = 0; i < 10000; i++) {
377 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
378 			break;
379 		DELAY(1);
380 	}
381 	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
382 
383 	return(0);
384 }
385 #endif
386 
387 
388 /*
389  * Write to a PHY register through the MII.
390  */
391 static int
392 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
393 #ifdef VR_USESWSHIFT
394 {
395 	CSR_WRITE_1(sc, VR_MIICMD, 0);
396 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
397 
398 	/* Set up frame for TX. */
399 	frame->mii_stdelim = VR_MII_STARTDELIM;
400 	frame->mii_opcode = VR_MII_WRITEOP;
401 	frame->mii_turnaround = VR_MII_TURNAROUND;
402 
403 	/* Turn on data output. */
404 	SIO_SET(VR_MIICMD_DIR);
405 
406 	vr_mii_sync(sc);
407 
408 	vr_mii_send(sc, frame->mii_stdelim, 2);
409 	vr_mii_send(sc, frame->mii_opcode, 2);
410 	vr_mii_send(sc, frame->mii_phyaddr, 5);
411 	vr_mii_send(sc, frame->mii_regaddr, 5);
412 	vr_mii_send(sc, frame->mii_turnaround, 2);
413 	vr_mii_send(sc, frame->mii_data, 16);
414 
415 	/* Idle bit. */
416 	SIO_SET(VR_MIICMD_CLK);
417 	DELAY(1);
418 	SIO_CLR(VR_MIICMD_CLK);
419 	DELAY(1);
420 
421 	/* Turn off xmit. */
422 	SIO_CLR(VR_MIICMD_DIR);
423 
424 	return(0);
425 }
426 #else
427 {
428 	int i;
429 
430   	/* Set the PHY-adress */
431 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
432 		    frame->mii_phyaddr);
433 
434 	/* Set the register address and data to write. */
435 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
436 	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
437 
438 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
439 
440 	for (i = 0; i < 10000; i++) {
441 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
442 			break;
443 		DELAY(1);
444 	}
445 	return(0);
446 }
447 #endif
448 
449 static int
450 vr_miibus_readreg(device_t dev, int phy, int reg)
451 {
452 	struct vr_mii_frame frame;
453 	struct vr_softc *sc;
454 
455 	sc = device_get_softc(dev);
456 
457 	switch (sc->vr_revid) {
458 	case REV_ID_VT6102_APOLLO:
459 		if (phy != 1)
460 			return(0);
461 		break;
462 	default:
463 		break;
464 	}
465 
466 	bzero(&frame, sizeof(frame));
467 
468 	frame.mii_phyaddr = phy;
469 	frame.mii_regaddr = reg;
470 	vr_mii_readreg(sc, &frame);
471 
472 	return(frame.mii_data);
473 }
474 
475 static int
476 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
477 {
478 	struct vr_mii_frame frame;
479 	struct vr_softc *sc;
480 
481 	sc = device_get_softc(dev);
482 
483 	switch (sc->vr_revid) {
484 	case REV_ID_VT6102_APOLLO:
485 		if (phy != 1)
486 			return 0;
487 		break;
488 	default:
489 		break;
490 	}
491 
492 	bzero(&frame, sizeof(frame));
493 
494 	frame.mii_phyaddr = phy;
495 	frame.mii_regaddr = reg;
496 	frame.mii_data = data;
497 
498 	vr_mii_writereg(sc, &frame);
499 
500 	return(0);
501 }
502 
503 static void
504 vr_miibus_statchg(device_t dev)
505 {
506 	struct mii_data *mii;
507 	struct vr_softc *sc;
508 
509 	sc = device_get_softc(dev);
510 	mii = device_get_softc(sc->vr_miibus);
511 	vr_setcfg(sc, mii->mii_media_active);
512 }
513 
514 /*
515  * Program the 64-bit multicast hash filter.
516  */
517 static void
518 vr_setmulti(struct vr_softc *sc)
519 {
520 	struct ifnet *ifp;
521 	uint32_t hashes[2] = { 0, 0 };
522 	struct ifmultiaddr *ifma;
523 	uint8_t rxfilt;
524 	int mcnt = 0;
525 
526 	ifp = &sc->arpcom.ac_if;
527 
528 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
529 
530 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
531 		rxfilt |= VR_RXCFG_RX_MULTI;
532 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
533 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
534 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
535 		return;
536 	}
537 
538 	/* First, zero out all the existing hash bits. */
539 	CSR_WRITE_4(sc, VR_MAR0, 0);
540 	CSR_WRITE_4(sc, VR_MAR1, 0);
541 
542 	/* Now program new ones. */
543 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
544 		int h;
545 
546 		if (ifma->ifma_addr->sa_family != AF_LINK)
547 			continue;
548 
549 		/* use the lower 6 bits */
550 		h = (ether_crc32_be(
551 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
552 			ETHER_ADDR_LEN) >> 26) & 0x0000003F;
553 		if (h < 32)
554 			hashes[0] |= (1 << h);
555 		else
556 			hashes[1] |= (1 << (h - 32));
557 		mcnt++;
558 	}
559 
560 	if (mcnt)
561 		rxfilt |= VR_RXCFG_RX_MULTI;
562 	else
563 		rxfilt &= ~VR_RXCFG_RX_MULTI;
564 
565 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
566 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
567 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
568 }
569 
570 /*
571  * In order to fiddle with the
572  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
573  * first have to put the transmit and/or receive logic in the idle state.
574  */
575 static void
576 vr_setcfg(struct vr_softc *sc, int media)
577 {
578 	int restart = 0;
579 
580 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
581 		restart = 1;
582 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
583 	}
584 
585 	if ((media & IFM_GMASK) == IFM_FDX)
586 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
587 	else
588 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
589 
590 	if (restart)
591 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
592 }
593 
594 static void
595 vr_reset(struct vr_softc *sc)
596 {
597 	int i;
598 
599 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
600 
601 	for (i = 0; i < VR_TIMEOUT; i++) {
602 		DELAY(10);
603 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
604 			break;
605 	}
606 	if (i == VR_TIMEOUT) {
607 		struct ifnet *ifp = &sc->arpcom.ac_if;
608 
609 		if (sc->vr_revid < REV_ID_VT3065_A) {
610 			if_printf(ifp, "reset never completed!\n");
611 		} else {
612 			/* Use newer force reset command */
613 			if_printf(ifp, "Using force reset command.\n");
614 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
615 		}
616 	}
617 
618 	/* Wait a little while for the chip to get its brains in order. */
619 	DELAY(1000);
620 }
621 
622 /*
623  * Probe for a VIA Rhine chip. Check the PCI vendor and device
624  * IDs against our list and return a device name if we find a match.
625  */
626 static int
627 vr_probe(device_t dev)
628 {
629 	struct vr_type *t;
630 	uint16_t vid, did;
631 
632 	vid = pci_get_vendor(dev);
633 	did = pci_get_device(dev);
634 
635 	for (t = vr_devs; t->vr_name != NULL; ++t) {
636 		if (vid == t->vr_vid && did == t->vr_did) {
637 			device_set_desc(dev, t->vr_name);
638 			return(0);
639 		}
640 	}
641 
642 	return(ENXIO);
643 }
644 
645 /*
646  * Attach the interface. Allocate softc structures, do ifmedia
647  * setup and ethernet/BPF attach.
648  */
649 static int
650 vr_attach(device_t dev)
651 {
652 	int i;
653 	uint8_t eaddr[ETHER_ADDR_LEN];
654 	struct vr_softc *sc;
655 	struct ifnet *ifp;
656 	int error = 0, rid;
657 
658 	sc = device_get_softc(dev);
659 	callout_init(&sc->vr_stat_timer);
660 
661 	/*
662 	 * Handle power management nonsense.
663 	 */
664 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
665 		uint32_t iobase, membase, irq;
666 
667 		/* Save important PCI config data. */
668 		iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
669 		membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
670 		irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
671 
672 		/* Reset the power state. */
673 		device_printf(dev, "chip is in D%d power mode "
674 		"-- setting to D0\n", pci_get_powerstate(dev));
675 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
676 
677 		/* Restore PCI config data. */
678 		pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
679 		pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
680 		pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
681 	}
682 
683 	pci_enable_busmaster(dev);
684 
685 	sc->vr_revid = pci_get_revid(dev);
686 
687 	rid = VR_RID;
688 	sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE);
689 
690 	if (sc->vr_res == NULL) {
691 		device_printf(dev, "couldn't map ports/memory\n");
692 		return ENXIO;
693 	}
694 
695 	sc->vr_btag = rman_get_bustag(sc->vr_res);
696 	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
697 
698 	/* Allocate interrupt */
699 	rid = 0;
700 	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
701 					    RF_SHAREABLE | RF_ACTIVE);
702 
703 	if (sc->vr_irq == NULL) {
704 		device_printf(dev, "couldn't map interrupt\n");
705 		error = ENXIO;
706 		goto fail;
707 	}
708 
709 	/*
710 	 * Windows may put the chip in suspend mode when it
711 	 * shuts down. Be sure to kick it in the head to wake it
712 	 * up again.
713 	 */
714 	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
715 
716 	ifp = &sc->arpcom.ac_if;
717 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
718 
719 	/* Reset the adapter. */
720 	vr_reset(sc);
721 
722         /*
723 	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
724 	 * initialization and disable AUTOPOLL.
725 	 */
726         pci_write_config(dev, VR_PCI_MODE,
727 	    pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4);
728 	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
729 
730 	/*
731 	 * Get station address. The way the Rhine chips work,
732 	 * you're not allowed to directly access the EEPROM once
733 	 * they've been programmed a special way. Consequently,
734 	 * we need to read the node address from the PAR0 and PAR1
735 	 * registers.
736 	 */
737 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
738 	DELAY(200);
739 	for (i = 0; i < ETHER_ADDR_LEN; i++)
740 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
741 
742 	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
743 	    M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
744 
745 	if (sc->vr_ldata == NULL) {
746 		device_printf(dev, "no memory for list buffers!\n");
747 		error = ENXIO;
748 		goto fail;
749 	}
750 
751 	/* Initialize TX buffer */
752 	sc->vr_cdata.vr_tx_buf = contigmalloc(VR_TX_BUF_SIZE, M_DEVBUF,
753 	    M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
754 	if (sc->vr_cdata.vr_tx_buf == NULL) {
755 		device_printf(dev, "can't allocate tx buffer!\n");
756 		error = ENXIO;
757 		goto fail;
758 	}
759 
760 	/* Set various TX indexes to invalid value */
761 	sc->vr_cdata.vr_tx_free_idx = -1;
762 	sc->vr_cdata.vr_tx_tail_idx = -1;
763 	sc->vr_cdata.vr_tx_head_idx = -1;
764 
765 
766 	ifp->if_softc = sc;
767 	ifp->if_mtu = ETHERMTU;
768 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
769 	ifp->if_ioctl = vr_ioctl;
770 	ifp->if_start = vr_start;
771 #ifdef IFPOLL_ENABLE
772 	ifp->if_npoll = vr_npoll;
773 #endif
774 	ifp->if_watchdog = vr_watchdog;
775 	ifp->if_init = vr_init;
776 	ifp->if_baudrate = 10000000;
777 	ifq_set_maxlen(&ifp->if_snd, VR_TX_LIST_CNT - 1);
778 	ifq_set_ready(&ifp->if_snd);
779 
780 	/*
781 	 * Do MII setup.
782 	 */
783 	if (mii_phy_probe(dev, &sc->vr_miibus,
784 	    vr_ifmedia_upd, vr_ifmedia_sts)) {
785 		if_printf(ifp, "MII without any phy!\n");
786 		error = ENXIO;
787 		goto fail;
788 	}
789 
790 	/* Call MI attach routine. */
791 	ether_ifattach(ifp, eaddr, NULL);
792 
793 	ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vr_irq));
794 
795 #ifdef IFPOLL_ENABLE
796 	ifpoll_compat_setup(&sc->vr_npoll, NULL, NULL, device_get_unit(dev),
797 	    ifp->if_serializer);
798 #endif
799 
800 	error = bus_setup_intr(dev, sc->vr_irq, INTR_MPSAFE,
801 			       vr_intr, sc, &sc->vr_intrhand,
802 			       ifp->if_serializer);
803 	if (error) {
804 		device_printf(dev, "couldn't set up irq\n");
805 		ether_ifdetach(ifp);
806 		goto fail;
807 	}
808 
809 	return 0;
810 
811 fail:
812 	vr_detach(dev);
813 	return(error);
814 }
815 
816 static int
817 vr_detach(device_t dev)
818 {
819 	struct vr_softc *sc = device_get_softc(dev);
820 	struct ifnet *ifp = &sc->arpcom.ac_if;
821 
822 	if (device_is_attached(dev)) {
823 		lwkt_serialize_enter(ifp->if_serializer);
824 		vr_stop(sc);
825 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
826 		lwkt_serialize_exit(ifp->if_serializer);
827 
828 		ether_ifdetach(ifp);
829 	}
830 	if (sc->vr_miibus != NULL)
831 		device_delete_child(dev, sc->vr_miibus);
832 	bus_generic_detach(dev);
833 
834 	if (sc->vr_irq != NULL)
835 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
836 	if (sc->vr_res != NULL)
837 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
838 	if (sc->vr_ldata != NULL)
839 		contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
840 	if (sc->vr_cdata.vr_tx_buf != NULL)
841 		contigfree(sc->vr_cdata.vr_tx_buf, VR_TX_BUF_SIZE, M_DEVBUF);
842 
843 	return(0);
844 }
845 
846 /*
847  * Initialize the transmit descriptors.
848  */
849 static int
850 vr_list_tx_init(struct vr_softc *sc)
851 {
852 	struct vr_chain_data *cd;
853 	struct vr_list_data *ld;
854 	struct vr_chain *tx_chain;
855 	int i;
856 
857 	cd = &sc->vr_cdata;
858 	ld = sc->vr_ldata;
859 	tx_chain = cd->vr_tx_chain;
860 
861 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
862 		tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
863 		if (i == (VR_TX_LIST_CNT - 1))
864 			tx_chain[i].vr_next_idx = 0;
865 		else
866 			tx_chain[i].vr_next_idx = i + 1;
867 	}
868 
869 	for (i = 0; i < VR_TX_LIST_CNT; ++i) {
870 		void *tx_buf;
871 		int next_idx;
872 
873 		tx_buf = VR_TX_BUF(sc, i);
874 		next_idx = tx_chain[i].vr_next_idx;
875 
876 		tx_chain[i].vr_next_desc_paddr =
877 			vtophys(tx_chain[next_idx].vr_ptr);
878 		tx_chain[i].vr_buf_paddr = vtophys(tx_buf);
879 	}
880 
881 	cd->vr_tx_free_idx = 0;
882 	cd->vr_tx_tail_idx = cd->vr_tx_head_idx = -1;
883 
884 	return 0;
885 }
886 
887 
888 /*
889  * Initialize the RX descriptors and allocate mbufs for them. Note that
890  * we arrange the descriptors in a closed ring, so that the last descriptor
891  * points back to the first.
892  */
893 static int
894 vr_list_rx_init(struct vr_softc *sc)
895 {
896 	struct vr_chain_data *cd;
897 	struct vr_list_data *ld;
898 	int i, nexti;
899 
900 	cd = &sc->vr_cdata;
901 	ld = sc->vr_ldata;
902 
903 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
904 		cd->vr_rx_chain[i].vr_ptr = (struct vr_desc *)&ld->vr_rx_list[i];
905 		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
906 			return(ENOBUFS);
907 		if (i == (VR_RX_LIST_CNT - 1))
908 			nexti = 0;
909 		else
910 			nexti = i + 1;
911 		cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti];
912 		ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[nexti]);
913 	}
914 
915 	cd->vr_rx_head = &cd->vr_rx_chain[0];
916 
917 	return(0);
918 }
919 
920 /*
921  * Initialize an RX descriptor and attach an MBUF cluster.
922  * Note: the length fields are only 11 bits wide, which means the
923  * largest size we can specify is 2047. This is important because
924  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
925  * overflow the field and make a mess.
926  */
927 static int
928 vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m)
929 {
930 	struct mbuf *m_new = NULL;
931 
932 	if (m == NULL) {
933 		m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
934 		if (m_new == NULL)
935 			return (ENOBUFS);
936 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
937 	} else {
938 		m_new = m;
939 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
940 		m_new->m_data = m_new->m_ext.ext_buf;
941 	}
942 
943 	m_adj(m_new, sizeof(uint64_t));
944 
945 	c->vr_mbuf = m_new;
946 	c->vr_ptr->vr_status = VR_RXSTAT;
947 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
948 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
949 
950 	return(0);
951 }
952 
953 /*
954  * A frame has been uploaded: pass the resulting mbuf chain up to
955  * the higher level protocols.
956  */
957 static void
958 vr_rxeof(struct vr_softc *sc)
959 {
960         struct mbuf *m;
961         struct ifnet *ifp;
962 	struct vr_chain_onefrag *cur_rx;
963 	int total_len = 0;
964 	uint32_t rxstat;
965 
966 	ifp = &sc->arpcom.ac_if;
967 
968 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
969 							VR_RXSTAT_OWN)) {
970 		struct mbuf *m0 = NULL;
971 
972 		cur_rx = sc->vr_cdata.vr_rx_head;
973 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
974 		m = cur_rx->vr_mbuf;
975 
976 		/*
977 		 * If an error occurs, update stats, clear the
978 		 * status word and leave the mbuf cluster in place:
979 		 * it should simply get re-used next time this descriptor
980 	 	 * comes up in the ring.
981 		 */
982 		if (rxstat & VR_RXSTAT_RXERR) {
983 			IFNET_STAT_INC(ifp, ierrors, 1);
984 			if_printf(ifp, "rx error (%02x):", rxstat & 0x000000ff);
985 			if (rxstat & VR_RXSTAT_CRCERR)
986 				kprintf(" crc error");
987 			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
988 				kprintf(" frame alignment error\n");
989 			if (rxstat & VR_RXSTAT_FIFOOFLOW)
990 				kprintf(" FIFO overflow");
991 			if (rxstat & VR_RXSTAT_GIANT)
992 				kprintf(" received giant packet");
993 			if (rxstat & VR_RXSTAT_RUNT)
994 				kprintf(" received runt packet");
995 			if (rxstat & VR_RXSTAT_BUSERR)
996 				kprintf(" system bus error");
997 			if (rxstat & VR_RXSTAT_BUFFERR)
998 				kprintf("rx buffer error");
999 			kprintf("\n");
1000 			vr_newbuf(sc, cur_rx, m);
1001 			continue;
1002 		}
1003 
1004 		/* No errors; receive the packet. */
1005 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1006 
1007 		/*
1008 		 * XXX The VIA Rhine chip includes the CRC with every
1009 		 * received frame, and there's no way to turn this
1010 		 * behavior off (at least, I can't find anything in
1011 	 	 * the manual that explains how to do it) so we have
1012 		 * to trim off the CRC manually.
1013 		 */
1014 		total_len -= ETHER_CRC_LEN;
1015 
1016 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1017 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1018 		vr_newbuf(sc, cur_rx, m);
1019 		if (m0 == NULL) {
1020 			IFNET_STAT_INC(ifp, ierrors, 1);
1021 			continue;
1022 		}
1023 		m_adj(m0, ETHER_ALIGN);
1024 		m = m0;
1025 
1026 		IFNET_STAT_INC(ifp, ipackets, 1);
1027 		ifp->if_input(ifp, m, NULL, -1);
1028 	}
1029 }
1030 
1031 static void
1032 vr_rxeoc(struct vr_softc *sc)
1033 {
1034 	struct ifnet *ifp;
1035 	int i;
1036 
1037 	ifp = &sc->arpcom.ac_if;
1038 
1039 	IFNET_STAT_INC(ifp, ierrors, 1);
1040 
1041 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1042         DELAY(10000);
1043 
1044 	/* Wait for receiver to stop */
1045 	for (i = 0x400;
1046 	     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1047 	     i--)
1048 		;	/* Wait for receiver to stop */
1049 
1050 	if (i == 0) {
1051 		if_printf(ifp, "rx shutdown error!\n");
1052 		sc->vr_flags |= VR_F_RESTART;
1053 		return;
1054 	}
1055 
1056 	vr_rxeof(sc);
1057 
1058 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1059 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1060 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1061 }
1062 
1063 /*
1064  * A frame was downloaded to the chip. It's safe for us to clean up
1065  * the list buffers.
1066  */
1067 static void
1068 vr_txeof(struct vr_softc *sc)
1069 {
1070 	struct vr_chain_data *cd;
1071 	struct vr_chain *tx_chain;
1072 	struct ifnet *ifp;
1073 
1074 	ifp = &sc->arpcom.ac_if;
1075 	cd = &sc->vr_cdata;
1076 
1077 	/* Reset the timeout timer; if_txeoc will clear it. */
1078 	ifp->if_timer = 5;
1079 
1080 	/* Sanity check. */
1081 	if (cd->vr_tx_head_idx == -1)
1082 		return;
1083 
1084 	tx_chain = cd->vr_tx_chain;
1085 
1086 	/*
1087 	 * Go through our tx list and free mbufs for those
1088 	 * frames that have been transmitted.
1089 	 */
1090 	while(tx_chain[cd->vr_tx_head_idx].vr_buf != NULL) {
1091 		struct vr_chain *cur_tx;
1092 		uint32_t txstat;
1093 		int i;
1094 
1095 		cur_tx = &tx_chain[cd->vr_tx_head_idx];
1096 		txstat = cur_tx->vr_ptr->vr_status;
1097 
1098 		if ((txstat & VR_TXSTAT_ABRT) ||
1099 		    (txstat & VR_TXSTAT_UDF)) {
1100 			for (i = 0x400;
1101 			     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1102 			     i--)
1103 				;	/* Wait for chip to shutdown */
1104 			if (i == 0) {
1105 				if_printf(ifp, "tx shutdown timeout\n");
1106 				sc->vr_flags |= VR_F_RESTART;
1107 				break;
1108 			}
1109 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1110 			CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1111 			break;
1112 		}
1113 
1114 		if (txstat & VR_TXSTAT_OWN)
1115 			break;
1116 
1117 		if (txstat & VR_TXSTAT_ERRSUM) {
1118 			IFNET_STAT_INC(ifp, oerrors, 1);
1119 			if (txstat & VR_TXSTAT_DEFER)
1120 				IFNET_STAT_INC(ifp, collisions, 1);
1121 			if (txstat & VR_TXSTAT_LATECOLL)
1122 				IFNET_STAT_INC(ifp, collisions, 1);
1123 		}
1124 
1125 		IFNET_STAT_INC(ifp, collisions,
1126 		    (txstat & VR_TXSTAT_COLLCNT) >> 3);
1127 
1128 		IFNET_STAT_INC(ifp, opackets, 1);
1129 		cur_tx->vr_buf = NULL;
1130 
1131 		if (cd->vr_tx_head_idx == cd->vr_tx_tail_idx) {
1132 			cd->vr_tx_head_idx = -1;
1133 			cd->vr_tx_tail_idx = -1;
1134 			break;
1135 		}
1136 
1137 		cd->vr_tx_head_idx = cur_tx->vr_next_idx;
1138 	}
1139 }
1140 
1141 /*
1142  * TX 'end of channel' interrupt handler.
1143  */
1144 static void
1145 vr_txeoc(struct vr_softc *sc)
1146 {
1147 	struct ifnet *ifp;
1148 
1149 	ifp = &sc->arpcom.ac_if;
1150 
1151 	if (sc->vr_cdata.vr_tx_head_idx == -1) {
1152 		ifq_clr_oactive(&ifp->if_snd);
1153 		sc->vr_cdata.vr_tx_tail_idx = -1;
1154 		ifp->if_timer = 0;
1155 	}
1156 }
1157 
1158 static void
1159 vr_tick(void *xsc)
1160 {
1161 	struct vr_softc *sc = xsc;
1162 	struct ifnet *ifp = &sc->arpcom.ac_if;
1163 	struct mii_data *mii;
1164 
1165 	lwkt_serialize_enter(ifp->if_serializer);
1166 
1167 	if (sc->vr_flags & VR_F_RESTART) {
1168 		if_printf(&sc->arpcom.ac_if, "restarting\n");
1169 		vr_stop(sc);
1170 		vr_reset(sc);
1171 		vr_init(sc);
1172 		sc->vr_flags &= ~VR_F_RESTART;
1173 	}
1174 
1175 	mii = device_get_softc(sc->vr_miibus);
1176 	mii_tick(mii);
1177 
1178 	callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc);
1179 
1180 	lwkt_serialize_exit(ifp->if_serializer);
1181 }
1182 
1183 static void
1184 vr_intr(void *arg)
1185 {
1186 	struct vr_softc *sc;
1187 	struct ifnet *ifp;
1188 	uint16_t status;
1189 
1190 	sc = arg;
1191 	ifp = &sc->arpcom.ac_if;
1192 
1193 	/* Supress unwanted interrupts. */
1194 	if (!(ifp->if_flags & IFF_UP)) {
1195 		vr_stop(sc);
1196 		return;
1197 	}
1198 
1199 	/* Disable interrupts. */
1200 	if ((ifp->if_flags & IFF_NPOLLING) == 0)
1201 		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1202 
1203 	for (;;) {
1204 		status = CSR_READ_2(sc, VR_ISR);
1205 		if (status)
1206 			CSR_WRITE_2(sc, VR_ISR, status);
1207 
1208 		if ((status & VR_INTRS) == 0)
1209 			break;
1210 
1211 		if (status & VR_ISR_RX_OK)
1212 			vr_rxeof(sc);
1213 
1214 		if (status & VR_ISR_RX_DROPPED) {
1215 			if_printf(ifp, "rx packet lost\n");
1216 			IFNET_STAT_INC(ifp, ierrors, 1);
1217 			}
1218 
1219 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1220 		    (status & VR_ISR_RX_OFLOW)) {
1221 			if_printf(ifp, "receive error (%04x)", status);
1222 			if (status & VR_ISR_RX_NOBUF)
1223 				kprintf(" no buffers");
1224 			if (status & VR_ISR_RX_OFLOW)
1225 				kprintf(" overflow");
1226 			if (status & VR_ISR_RX_DROPPED)
1227 				kprintf(" packet lost");
1228 			kprintf("\n");
1229 			vr_rxeoc(sc);
1230 		}
1231 
1232 		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1233 			vr_reset(sc);
1234 			vr_init(sc);
1235 			break;
1236 		}
1237 
1238 		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1239 		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1240 			vr_txeof(sc);
1241 			if ((status & VR_ISR_UDFI) ||
1242 			    (status & VR_ISR_TX_ABRT2) ||
1243 			    (status & VR_ISR_TX_ABRT)) {
1244 				IFNET_STAT_INC(ifp, oerrors, 1);
1245 				if (sc->vr_cdata.vr_tx_head_idx != -1) {
1246 					VR_SETBIT16(sc, VR_COMMAND,
1247 						    VR_CMD_TX_ON);
1248 					VR_SETBIT16(sc, VR_COMMAND,
1249 						    VR_CMD_TX_GO);
1250 				}
1251 			} else {
1252 				vr_txeoc(sc);
1253 			}
1254 		}
1255 
1256 	}
1257 
1258 	/* Re-enable interrupts. */
1259 	if ((ifp->if_flags & IFF_NPOLLING) == 0)
1260 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1261 
1262 	if (!ifq_is_empty(&ifp->if_snd))
1263 		if_devstart(ifp);
1264 }
1265 
1266 /*
1267  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1268  * pointers to the fragment pointers.
1269  */
1270 static int
1271 vr_encap(struct vr_softc *sc, int chain_idx, struct mbuf *m_head)
1272 {
1273 	struct vr_chain *c;
1274 	struct vr_desc *f;
1275 	caddr_t tx_buf;
1276 	int len;
1277 
1278 	KASSERT(chain_idx >= 0 && chain_idx < VR_TX_LIST_CNT,
1279 		("%s: chain idx(%d) out of range 0-%d",
1280 		 sc->arpcom.ac_if.if_xname, chain_idx, VR_TX_LIST_CNT));
1281 
1282 	/*
1283 	 * The VIA Rhine wants packet buffers to be longword
1284 	 * aligned, but very often our mbufs aren't. Rather than
1285 	 * waste time trying to decide when to copy and when not
1286 	 * to copy, just do it all the time.
1287 	 */
1288 	tx_buf = VR_TX_BUF(sc, chain_idx);
1289 	m_copydata(m_head, 0, m_head->m_pkthdr.len, tx_buf);
1290 	len = m_head->m_pkthdr.len;
1291 
1292 	/*
1293 	 * The Rhine chip doesn't auto-pad, so we have to make
1294 	 * sure to pad short frames out to the minimum frame length
1295 	 * ourselves.
1296 	 */
1297 	if (len < VR_MIN_FRAMELEN) {
1298 		bzero(tx_buf + len, VR_MIN_FRAMELEN - len);
1299 		len = VR_MIN_FRAMELEN;
1300  	}
1301 
1302 	c = &sc->vr_cdata.vr_tx_chain[chain_idx];
1303 	c->vr_buf = tx_buf;
1304 
1305 	f = c->vr_ptr;
1306 	f->vr_data = c->vr_buf_paddr;
1307 	f->vr_ctl = len;
1308 	f->vr_ctl |= (VR_TXCTL_TLINK | VR_TXCTL_FIRSTFRAG);
1309 	f->vr_ctl |= (VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1310 	f->vr_status = 0;
1311 	f->vr_next = c->vr_next_desc_paddr;
1312 
1313 	return(0);
1314 }
1315 
1316 /*
1317  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1318  * to the mbuf data regions directly in the transmit lists. We also save a
1319  * copy of the pointers since the transmit list fragment pointers are
1320  * physical addresses.
1321  */
1322 static void
1323 vr_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1324 {
1325 	struct vr_softc *sc;
1326 	struct vr_chain_data *cd;
1327 	struct vr_chain *tx_chain;
1328 	int cur_tx_idx, start_tx_idx, prev_tx_idx;
1329 
1330 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1331 
1332 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1333 		return;
1334 
1335 	sc = ifp->if_softc;
1336 	cd = &sc->vr_cdata;
1337 	tx_chain = cd->vr_tx_chain;
1338 
1339 	start_tx_idx = cd->vr_tx_free_idx;
1340 	cur_tx_idx = prev_tx_idx = -1;
1341 
1342 	/* Check for an available queue slot. If there are none, punt. */
1343 	if (tx_chain[start_tx_idx].vr_buf != NULL) {
1344 		ifq_set_oactive(&ifp->if_snd);
1345 		return;
1346 	}
1347 
1348 	while (tx_chain[cd->vr_tx_free_idx].vr_buf == NULL) {
1349 		struct mbuf *m_head;
1350 		struct vr_chain *cur_tx;
1351 
1352 		m_head = ifq_dequeue(&ifp->if_snd);
1353 		if (m_head == NULL)
1354 			break;
1355 
1356 		/* Pick a descriptor off the free list. */
1357 		cur_tx_idx = cd->vr_tx_free_idx;
1358 		cur_tx = &tx_chain[cur_tx_idx];
1359 
1360 		/* Pack the data into the descriptor. */
1361 		if (vr_encap(sc, cur_tx_idx, m_head)) {
1362 			ifq_set_oactive(&ifp->if_snd);
1363 			cur_tx_idx = prev_tx_idx;
1364 			break;
1365 		}
1366 
1367 		/* XXX */
1368 		if (cur_tx_idx != start_tx_idx)
1369 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1370 
1371 		BPF_MTAP(ifp, m_head);
1372 		m_freem(m_head);
1373 
1374 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1375 		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1376 
1377 		/* Iff everything went OK, we bump up free index. */
1378 		prev_tx_idx = cur_tx_idx;
1379 		cd->vr_tx_free_idx = cur_tx->vr_next_idx;
1380 	}
1381 
1382 	/* If there are no frames queued, bail. */
1383 	if (cur_tx_idx == -1)
1384 		return;
1385 
1386 	sc->vr_cdata.vr_tx_tail_idx = cur_tx_idx;
1387 
1388 	if (sc->vr_cdata.vr_tx_head_idx == -1)
1389 		sc->vr_cdata.vr_tx_head_idx = start_tx_idx;
1390 
1391 	/*
1392 	 * Set a timeout in case the chip goes out to lunch.
1393 	 */
1394 	ifp->if_timer = 5;
1395 }
1396 
1397 static void
1398 vr_init(void *xsc)
1399 {
1400 	struct vr_softc *sc = xsc;
1401 	struct ifnet *ifp = &sc->arpcom.ac_if;
1402 	struct mii_data *mii;
1403 	int i;
1404 
1405 	mii = device_get_softc(sc->vr_miibus);
1406 
1407 	/* Cancel pending I/O and free all RX/TX buffers. */
1408 	vr_stop(sc);
1409 	vr_reset(sc);
1410 
1411 	/* Set our station address. */
1412 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1413 		CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1414 
1415 	/* Set DMA size. */
1416 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1417 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1418 
1419 	/*
1420 	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1421 	 * so we must set both.
1422 	 */
1423 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1424 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1425 
1426 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1427 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1428 
1429 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1430 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1431 
1432 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1433 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1434 
1435 	/* Init circular RX list. */
1436 	if (vr_list_rx_init(sc) == ENOBUFS) {
1437 		vr_stop(sc);
1438 		if_printf(ifp, "initialization failed: no memory for rx buffers\n");
1439 		return;
1440 	}
1441 
1442 	/* Init tx descriptors. */
1443 	vr_list_tx_init(sc);
1444 
1445 	/* If we want promiscuous mode, set the allframes bit. */
1446 	if (ifp->if_flags & IFF_PROMISC)
1447 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1448 	else
1449 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1450 
1451 	/* Set capture broadcast bit to capture broadcast frames. */
1452 	if (ifp->if_flags & IFF_BROADCAST)
1453 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1454 	else
1455 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1456 
1457 	/*
1458 	 * Program the multicast filter, if necessary.
1459 	 */
1460 	vr_setmulti(sc);
1461 
1462 	/*
1463 	 * Load the address of the RX list.
1464 	 */
1465 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1466 
1467 	/* Enable receiver and transmitter. */
1468 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1469 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1470 				    VR_CMD_RX_GO);
1471 
1472 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1473 
1474 	/*
1475 	 * Enable interrupts, unless we are polling.
1476 	 */
1477 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1478 #ifdef IFPOLL_ENABLE
1479 	if ((ifp->if_flags & IFF_NPOLLING) == 0)
1480 #endif
1481 		CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1482 
1483 	mii_mediachg(mii);
1484 
1485 	ifp->if_flags |= IFF_RUNNING;
1486 	ifq_clr_oactive(&ifp->if_snd);
1487 
1488 	callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc);
1489 }
1490 
1491 /*
1492  * Set media options.
1493  */
1494 static int
1495 vr_ifmedia_upd(struct ifnet *ifp)
1496 {
1497 	struct vr_softc *sc;
1498 
1499 	sc = ifp->if_softc;
1500 
1501 	if (ifp->if_flags & IFF_UP)
1502 		vr_init(sc);
1503 
1504 	return(0);
1505 }
1506 
1507 /*
1508  * Report current media status.
1509  */
1510 static void
1511 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1512 {
1513 	struct vr_softc *sc;
1514 	struct mii_data *mii;
1515 
1516 	sc = ifp->if_softc;
1517 	mii = device_get_softc(sc->vr_miibus);
1518 	mii_pollstat(mii);
1519 	ifmr->ifm_active = mii->mii_media_active;
1520 	ifmr->ifm_status = mii->mii_media_status;
1521 }
1522 
1523 static int
1524 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1525 {
1526 	struct vr_softc *sc = ifp->if_softc;
1527 	struct ifreq *ifr = (struct ifreq *) data;
1528 	struct mii_data *mii;
1529 	int error = 0;
1530 
1531 	switch(command) {
1532 	case SIOCSIFFLAGS:
1533 		if (ifp->if_flags & IFF_UP) {
1534 			vr_init(sc);
1535 		} else {
1536 			if (ifp->if_flags & IFF_RUNNING)
1537 				vr_stop(sc);
1538 		}
1539 		error = 0;
1540 		break;
1541 	case SIOCADDMULTI:
1542 	case SIOCDELMULTI:
1543 		vr_setmulti(sc);
1544 		error = 0;
1545 		break;
1546 	case SIOCGIFMEDIA:
1547 	case SIOCSIFMEDIA:
1548 		mii = device_get_softc(sc->vr_miibus);
1549 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1550 		break;
1551 	default:
1552 		error = ether_ioctl(ifp, command, data);
1553 		break;
1554 	}
1555 	return(error);
1556 }
1557 
1558 #ifdef IFPOLL_ENABLE
1559 
1560 static void
1561 vr_npoll_compat(struct ifnet *ifp, void *arg __unused, int count __unused)
1562 {
1563 	struct vr_softc *sc = ifp->if_softc;
1564 
1565 	ASSERT_SERIALIZED(ifp->if_serializer);
1566 	vr_intr(sc);
1567 }
1568 
1569 static void
1570 vr_npoll(struct ifnet *ifp, struct ifpoll_info *info)
1571 {
1572 	struct vr_softc *sc = ifp->if_softc;
1573 
1574 	ASSERT_SERIALIZED(ifp->if_serializer);
1575 
1576 	if (info != NULL) {
1577 		int cpuid = sc->vr_npoll.ifpc_cpuid;
1578 
1579 		info->ifpi_rx[cpuid].poll_func = vr_npoll_compat;
1580 		info->ifpi_rx[cpuid].arg = NULL;
1581 		info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
1582 
1583 		if (ifp->if_flags & IFF_RUNNING) {
1584 			/* disable interrupts */
1585 			CSR_WRITE_2(sc, VR_IMR, 0x0000);
1586 		}
1587 		ifq_set_cpuid(&ifp->if_snd, cpuid);
1588 	} else {
1589 		if (ifp->if_flags & IFF_RUNNING) {
1590 			/* enable interrupts */
1591 			CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1592 		}
1593 		ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vr_irq));
1594 	}
1595 }
1596 
1597 #endif	/* IFPOLL_ENABLE */
1598 
1599 static void
1600 vr_watchdog(struct ifnet *ifp)
1601 {
1602 	struct vr_softc *sc;
1603 
1604 	sc = ifp->if_softc;
1605 
1606 	IFNET_STAT_INC(ifp, oerrors, 1);
1607 	if_printf(ifp, "watchdog timeout\n");
1608 
1609 	vr_stop(sc);
1610 	vr_reset(sc);
1611 	vr_init(sc);
1612 
1613 	if (!ifq_is_empty(&ifp->if_snd))
1614 		if_devstart(ifp);
1615 }
1616 
1617 /*
1618  * Stop the adapter and free any mbufs allocated to the
1619  * RX and TX lists.
1620  */
1621 static void
1622 vr_stop(struct vr_softc *sc)
1623 {
1624 	int i;
1625 	struct ifnet *ifp;
1626 
1627 	ifp = &sc->arpcom.ac_if;
1628 	ifp->if_timer = 0;
1629 
1630 	callout_stop(&sc->vr_stat_timer);
1631 
1632 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1633 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1634 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1635 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1636 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1637 
1638 	/*
1639 	 * Free data in the RX lists.
1640 	 */
1641 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1642 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1643 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1644 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1645 		}
1646 	}
1647 	bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list));
1648 
1649 	/*
1650 	 * Reset the TX list buffer pointers.
1651 	 */
1652 	for (i = 0; i < VR_TX_LIST_CNT; i++)
1653 		sc->vr_cdata.vr_tx_chain[i].vr_buf = NULL;
1654 
1655 	bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list));
1656 
1657 	ifp->if_flags &= ~IFF_RUNNING;
1658 	ifq_clr_oactive(&ifp->if_snd);
1659 }
1660 
1661 /*
1662  * Stop all chip I/O so that the kernel's probe routines don't
1663  * get confused by errant DMAs when rebooting.
1664  */
1665 static void
1666 vr_shutdown(device_t dev)
1667 {
1668 	struct vr_softc *sc;
1669 
1670 	sc = device_get_softc(dev);
1671 
1672 	vr_stop(sc);
1673 }
1674