xref: /openbsd/sys/dev/pci/if_vge.c (revision 0f9891f1)
1*0f9891f1Sjsg /*	$OpenBSD: if_vge.c,v 1.78 2024/05/24 06:02:57 jsg Exp $	*/
280c668ffSpvalchev /*	$FreeBSD: if_vge.c,v 1.3 2004/09/11 22:13:25 wpaul Exp $	*/
380c668ffSpvalchev /*
480c668ffSpvalchev  * Copyright (c) 2004
580c668ffSpvalchev  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
680c668ffSpvalchev  *
780c668ffSpvalchev  * Redistribution and use in source and binary forms, with or without
880c668ffSpvalchev  * modification, are permitted provided that the following conditions
980c668ffSpvalchev  * are met:
1080c668ffSpvalchev  * 1. Redistributions of source code must retain the above copyright
1180c668ffSpvalchev  *    notice, this list of conditions and the following disclaimer.
1280c668ffSpvalchev  * 2. Redistributions in binary form must reproduce the above copyright
1380c668ffSpvalchev  *    notice, this list of conditions and the following disclaimer in the
1480c668ffSpvalchev  *    documentation and/or other materials provided with the distribution.
1580c668ffSpvalchev  * 3. All advertising materials mentioning features or use of this software
1680c668ffSpvalchev  *    must display the following acknowledgement:
1780c668ffSpvalchev  *	This product includes software developed by Bill Paul.
1880c668ffSpvalchev  * 4. Neither the name of the author nor the names of any co-contributors
1980c668ffSpvalchev  *    may be used to endorse or promote products derived from this software
2080c668ffSpvalchev  *    without specific prior written permission.
2180c668ffSpvalchev  *
2280c668ffSpvalchev  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
2380c668ffSpvalchev  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2480c668ffSpvalchev  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2580c668ffSpvalchev  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
2680c668ffSpvalchev  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2780c668ffSpvalchev  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2880c668ffSpvalchev  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2980c668ffSpvalchev  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3080c668ffSpvalchev  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3180c668ffSpvalchev  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
3280c668ffSpvalchev  * THE POSSIBILITY OF SUCH DAMAGE.
3380c668ffSpvalchev  */
3480c668ffSpvalchev 
3580c668ffSpvalchev /*
3680c668ffSpvalchev  * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
3780c668ffSpvalchev  *
3880c668ffSpvalchev  * Written by Bill Paul <wpaul@windriver.com>
3980c668ffSpvalchev  * Senior Networking Software Engineer
4080c668ffSpvalchev  * Wind River Systems
4180c668ffSpvalchev  *
4280c668ffSpvalchev  * Ported to OpenBSD by Peter Valchev <pvalchev@openbsd.org>
4380c668ffSpvalchev  */
4480c668ffSpvalchev 
4580c668ffSpvalchev /*
46e959411aStom  * The VIA Networking VT6122 is a 32bit, 33/66MHz PCI device that
4780c668ffSpvalchev  * combines a tri-speed ethernet MAC and PHY, with the following
4880c668ffSpvalchev  * features:
4980c668ffSpvalchev  *
5080c668ffSpvalchev  *	o Jumbo frame support up to 16K
5180c668ffSpvalchev  *	o Transmit and receive flow control
5280c668ffSpvalchev  *	o IPv4 checksum offload
5380c668ffSpvalchev  *	o VLAN tag insertion and stripping
5480c668ffSpvalchev  *	o TCP large send
5580c668ffSpvalchev  *	o 64-bit multicast hash table filter
5680c668ffSpvalchev  *	o 64 entry CAM filter
5780c668ffSpvalchev  *	o 16K RX FIFO and 48K TX FIFO memory
5880c668ffSpvalchev  *	o Interrupt moderation
5980c668ffSpvalchev  *
6080c668ffSpvalchev  * The VT6122 supports up to four transmit DMA queues. The descriptors
6180c668ffSpvalchev  * in the transmit ring can address up to 7 data fragments; frames which
6280c668ffSpvalchev  * span more than 7 data buffers must be coalesced, but in general the
6380c668ffSpvalchev  * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
6480c668ffSpvalchev  * long. The receive descriptors address only a single buffer.
6580c668ffSpvalchev  *
6680c668ffSpvalchev  * There are two peculiar design issues with the VT6122. One is that
6780c668ffSpvalchev  * receive data buffers must be aligned on a 32-bit boundary. This is
6880c668ffSpvalchev  * not a problem where the VT6122 is used as a LOM device in x86-based
6980c668ffSpvalchev  * systems, but on architectures that generate unaligned access traps, we
7080c668ffSpvalchev  * have to do some copying.
7180c668ffSpvalchev  *
7280c668ffSpvalchev  * The other issue has to do with the way 64-bit addresses are handled.
7380c668ffSpvalchev  * The DMA descriptors only allow you to specify 48 bits of addressing
7480c668ffSpvalchev  * information. The remaining 16 bits are specified using one of the
7580c668ffSpvalchev  * I/O registers. If you only have a 32-bit system, then this isn't
7680c668ffSpvalchev  * an issue, but if you have a 64-bit system and more than 4GB of
7780c668ffSpvalchev  * memory, you must have to make sure your network data buffers reside
7880c668ffSpvalchev  * in the same 48-bit 'segment.'
7980c668ffSpvalchev  *
8080c668ffSpvalchev  * Special thanks to Ryan Fu at VIA Networking for providing documentation
8180c668ffSpvalchev  * and sample NICs for testing.
8280c668ffSpvalchev  */
8380c668ffSpvalchev 
8480c668ffSpvalchev #include "bpfilter.h"
85772b0c86Sbrad #include "vlan.h"
8680c668ffSpvalchev 
8780c668ffSpvalchev #include <sys/param.h>
8880c668ffSpvalchev #include <sys/endian.h>
8980c668ffSpvalchev #include <sys/systm.h>
9080c668ffSpvalchev #include <sys/sockio.h>
9180c668ffSpvalchev #include <sys/mbuf.h>
9280c668ffSpvalchev #include <sys/device.h>
9306a9f16dSbrad #include <sys/timeout.h>
9480c668ffSpvalchev 
9580c668ffSpvalchev #include <net/if.h>
9680c668ffSpvalchev #include <net/if_media.h>
9780c668ffSpvalchev 
9880c668ffSpvalchev #include <netinet/in.h>
9980c668ffSpvalchev #include <netinet/if_ether.h>
10080c668ffSpvalchev 
10180c668ffSpvalchev #if NBPFILTER > 0
10280c668ffSpvalchev #include <net/bpf.h>
10380c668ffSpvalchev #endif
10480c668ffSpvalchev 
10580c668ffSpvalchev #include <dev/mii/miivar.h>
10680c668ffSpvalchev 
10780c668ffSpvalchev #include <dev/pci/pcireg.h>
10880c668ffSpvalchev #include <dev/pci/pcivar.h>
10980c668ffSpvalchev #include <dev/pci/pcidevs.h>
11080c668ffSpvalchev 
11180c668ffSpvalchev #include <dev/pci/if_vgereg.h>
11280c668ffSpvalchev #include <dev/pci/if_vgevar.h>
11380c668ffSpvalchev 
11480c668ffSpvalchev int vge_probe		(struct device *, void *, void *);
11580c668ffSpvalchev void vge_attach		(struct device *, struct device *, void *);
116d8ad6bb2Skettenis int vge_detach		(struct device *, int);
11780c668ffSpvalchev 
11880c668ffSpvalchev int vge_encap		(struct vge_softc *, struct mbuf *, int);
11980c668ffSpvalchev 
12080c668ffSpvalchev int vge_allocmem		(struct vge_softc *);
121d8ad6bb2Skettenis void vge_freemem	(struct vge_softc *);
12280c668ffSpvalchev int vge_newbuf		(struct vge_softc *, int, struct mbuf *);
12380c668ffSpvalchev int vge_rx_list_init	(struct vge_softc *);
12480c668ffSpvalchev int vge_tx_list_init	(struct vge_softc *);
12580c668ffSpvalchev void vge_rxeof		(struct vge_softc *);
12680c668ffSpvalchev void vge_txeof		(struct vge_softc *);
12780c668ffSpvalchev int vge_intr		(void *);
12880c668ffSpvalchev void vge_tick		(void *);
12980c668ffSpvalchev void vge_start		(struct ifnet *);
13080c668ffSpvalchev int vge_ioctl		(struct ifnet *, u_long, caddr_t);
13180c668ffSpvalchev int vge_init		(struct ifnet *);
13280c668ffSpvalchev void vge_stop		(struct vge_softc *);
13380c668ffSpvalchev void vge_watchdog	(struct ifnet *);
13480c668ffSpvalchev int vge_ifmedia_upd	(struct ifnet *);
13580c668ffSpvalchev void vge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
13680c668ffSpvalchev 
137eda47238Sbrad #ifdef VGE_EEPROM
13880c668ffSpvalchev void vge_eeprom_getword	(struct vge_softc *, int, u_int16_t *);
139eda47238Sbrad #endif
14080c668ffSpvalchev void vge_read_eeprom	(struct vge_softc *, caddr_t, int, int, int);
14180c668ffSpvalchev 
14280c668ffSpvalchev void vge_miipoll_start	(struct vge_softc *);
14380c668ffSpvalchev void vge_miipoll_stop	(struct vge_softc *);
14480c668ffSpvalchev int vge_miibus_readreg	(struct device *, int, int);
14580c668ffSpvalchev void vge_miibus_writereg (struct device *, int, int, int);
14680c668ffSpvalchev void vge_miibus_statchg	(struct device *);
14780c668ffSpvalchev 
14880c668ffSpvalchev void vge_cam_clear	(struct vge_softc *);
14980c668ffSpvalchev int vge_cam_set		(struct vge_softc *, uint8_t *);
1508ec68754Sbrad void vge_iff		(struct vge_softc *);
15180c668ffSpvalchev void vge_reset		(struct vge_softc *);
15280c668ffSpvalchev 
1538d2c75e4Smpi const struct cfattach vge_ca = {
154d8ad6bb2Skettenis 	sizeof(struct vge_softc), vge_probe, vge_attach, vge_detach
15580c668ffSpvalchev };
15680c668ffSpvalchev 
15780c668ffSpvalchev struct cfdriver vge_cd = {
158e4a2e22cSjasper 	NULL, "vge", DV_IFNET
15980c668ffSpvalchev };
16080c668ffSpvalchev 
16180c668ffSpvalchev #define VGE_PCI_LOIO             0x10
16280c668ffSpvalchev #define VGE_PCI_LOMEM            0x14
16380c668ffSpvalchev 
16480c668ffSpvalchev int vge_debug = 0;
16580c668ffSpvalchev #define DPRINTF(x)	if (vge_debug) printf x
16680c668ffSpvalchev #define DPRINTFN(n, x)	if (vge_debug >= (n)) printf x
16780c668ffSpvalchev 
16880c668ffSpvalchev const struct pci_matchid vge_devices[] = {
16946564b25Sjsg 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X },
17080c668ffSpvalchev };
17180c668ffSpvalchev 
172eda47238Sbrad #ifdef VGE_EEPROM
17380c668ffSpvalchev /*
17480c668ffSpvalchev  * Read a word of data stored in the EEPROM at address 'addr.'
17580c668ffSpvalchev  */
17680c668ffSpvalchev void
vge_eeprom_getword(struct vge_softc * sc,int addr,u_int16_t * dest)17780c668ffSpvalchev vge_eeprom_getword(struct vge_softc *sc, int addr, u_int16_t *dest)
17880c668ffSpvalchev {
179f0f154b9Sbrad 	int			i;
18080c668ffSpvalchev 	u_int16_t		word = 0;
18180c668ffSpvalchev 
18280c668ffSpvalchev 	/*
18380c668ffSpvalchev 	 * Enter EEPROM embedded programming mode. In order to
18480c668ffSpvalchev 	 * access the EEPROM at all, we first have to set the
18580c668ffSpvalchev 	 * EELOAD bit in the CHIPCFG2 register.
18680c668ffSpvalchev 	 */
18780c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
18880c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
18980c668ffSpvalchev 
19080c668ffSpvalchev 	/* Select the address of the word we want to read */
19180c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_EEADDR, addr);
19280c668ffSpvalchev 
19380c668ffSpvalchev 	/* Issue read command */
19480c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
19580c668ffSpvalchev 
19680c668ffSpvalchev 	/* Wait for the done bit to be set. */
19780c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
19880c668ffSpvalchev 		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
19980c668ffSpvalchev 			break;
20080c668ffSpvalchev 	}
20180c668ffSpvalchev 
20280c668ffSpvalchev 	if (i == VGE_TIMEOUT) {
20380c668ffSpvalchev 		printf("%s: EEPROM read timed out\n", sc->vge_dev.dv_xname);
20480c668ffSpvalchev 		*dest = 0;
20580c668ffSpvalchev 		return;
20680c668ffSpvalchev 	}
20780c668ffSpvalchev 
20880c668ffSpvalchev 	/* Read the result */
20980c668ffSpvalchev 	word = CSR_READ_2(sc, VGE_EERDDAT);
21080c668ffSpvalchev 
21180c668ffSpvalchev 	/* Turn off EEPROM access mode. */
21280c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
21380c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
21480c668ffSpvalchev 
21580c668ffSpvalchev 	*dest = word;
21680c668ffSpvalchev }
217eda47238Sbrad #endif
21880c668ffSpvalchev 
21980c668ffSpvalchev /*
22080c668ffSpvalchev  * Read a sequence of words from the EEPROM.
22180c668ffSpvalchev  */
22280c668ffSpvalchev void
vge_read_eeprom(struct vge_softc * sc,caddr_t dest,int off,int cnt,int swap)22380c668ffSpvalchev vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt,
22480c668ffSpvalchev     int swap)
22580c668ffSpvalchev {
22680c668ffSpvalchev 	int			i;
227eda47238Sbrad #ifdef VGE_EEPROM
22880c668ffSpvalchev 	u_int16_t		word = 0, *ptr;
22980c668ffSpvalchev 
23080c668ffSpvalchev 	for (i = 0; i < cnt; i++) {
23180c668ffSpvalchev 		vge_eeprom_getword(sc, off + i, &word);
23280c668ffSpvalchev 		ptr = (u_int16_t *)(dest + (i * 2));
23380c668ffSpvalchev 		if (swap)
23480c668ffSpvalchev 			*ptr = ntohs(word);
23580c668ffSpvalchev 		else
23680c668ffSpvalchev 			*ptr = word;
23780c668ffSpvalchev 	}
238eda47238Sbrad #else
239eda47238Sbrad 	for (i = 0; i < ETHER_ADDR_LEN; i++)
240eda47238Sbrad 		dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
241eda47238Sbrad #endif
24280c668ffSpvalchev }
24380c668ffSpvalchev 
24480c668ffSpvalchev void
vge_miipoll_stop(struct vge_softc * sc)24580c668ffSpvalchev vge_miipoll_stop(struct vge_softc *sc)
24680c668ffSpvalchev {
24780c668ffSpvalchev 	int			i;
24880c668ffSpvalchev 
24980c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_MIICMD, 0);
25080c668ffSpvalchev 
25180c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
25280c668ffSpvalchev 		DELAY(1);
25380c668ffSpvalchev 		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
25480c668ffSpvalchev 			break;
25580c668ffSpvalchev 	}
25680c668ffSpvalchev 
25780c668ffSpvalchev 	if (i == VGE_TIMEOUT)
25880c668ffSpvalchev 		printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname);
25980c668ffSpvalchev }
26080c668ffSpvalchev 
26180c668ffSpvalchev void
vge_miipoll_start(struct vge_softc * sc)26280c668ffSpvalchev vge_miipoll_start(struct vge_softc *sc)
26380c668ffSpvalchev {
26480c668ffSpvalchev 	int			i;
26580c668ffSpvalchev 
26680c668ffSpvalchev 	/* First, make sure we're idle. */
26780c668ffSpvalchev 
26880c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_MIICMD, 0);
26980c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
27080c668ffSpvalchev 
27180c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
27280c668ffSpvalchev 		DELAY(1);
27380c668ffSpvalchev 		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
27480c668ffSpvalchev 			break;
27580c668ffSpvalchev 	}
27680c668ffSpvalchev 
27780c668ffSpvalchev 	if (i == VGE_TIMEOUT) {
27880c668ffSpvalchev 		printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname);
27980c668ffSpvalchev 		return;
28080c668ffSpvalchev 	}
28180c668ffSpvalchev 
28280c668ffSpvalchev 	/* Now enable auto poll mode. */
28380c668ffSpvalchev 
28480c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
28580c668ffSpvalchev 
28680c668ffSpvalchev 	/* And make sure it started. */
28780c668ffSpvalchev 
28880c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
28980c668ffSpvalchev 		DELAY(1);
29080c668ffSpvalchev 		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
29180c668ffSpvalchev 			break;
29280c668ffSpvalchev 	}
29380c668ffSpvalchev 
29480c668ffSpvalchev 	if (i == VGE_TIMEOUT)
29580c668ffSpvalchev 		printf("%s: failed to start MII autopoll\n", sc->vge_dev.dv_xname);
29680c668ffSpvalchev }
29780c668ffSpvalchev 
29880c668ffSpvalchev int
vge_miibus_readreg(struct device * dev,int phy,int reg)29980c668ffSpvalchev vge_miibus_readreg(struct device *dev, int phy, int reg)
30080c668ffSpvalchev {
30180c668ffSpvalchev 	struct vge_softc	*sc = (struct vge_softc *)dev;
30280c668ffSpvalchev 	int			i, s;
30380c668ffSpvalchev 	u_int16_t		rval = 0;
30480c668ffSpvalchev 
30580c668ffSpvalchev 	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
30680c668ffSpvalchev 		return(0);
30780c668ffSpvalchev 
308e811f458Sbrad 	s = splnet();
30980c668ffSpvalchev 
31080c668ffSpvalchev 	vge_miipoll_stop(sc);
31180c668ffSpvalchev 
31280c668ffSpvalchev 	/* Specify the register we want to read. */
31380c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
31480c668ffSpvalchev 
31580c668ffSpvalchev 	/* Issue read command. */
31680c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
31780c668ffSpvalchev 
31880c668ffSpvalchev 	/* Wait for the read command bit to self-clear. */
31980c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
32080c668ffSpvalchev 		DELAY(1);
32180c668ffSpvalchev 		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
32280c668ffSpvalchev 			break;
32380c668ffSpvalchev 	}
32480c668ffSpvalchev 
32580c668ffSpvalchev 	if (i == VGE_TIMEOUT)
32680c668ffSpvalchev 		printf("%s: MII read timed out\n", sc->vge_dev.dv_xname);
32780c668ffSpvalchev 	else
32880c668ffSpvalchev 		rval = CSR_READ_2(sc, VGE_MIIDATA);
32980c668ffSpvalchev 
33080c668ffSpvalchev 	vge_miipoll_start(sc);
33180c668ffSpvalchev 	splx(s);
33280c668ffSpvalchev 
33380c668ffSpvalchev 	return (rval);
33480c668ffSpvalchev }
33580c668ffSpvalchev 
33680c668ffSpvalchev void
vge_miibus_writereg(struct device * dev,int phy,int reg,int data)33780c668ffSpvalchev vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
33880c668ffSpvalchev {
33980c668ffSpvalchev 	struct vge_softc	*sc = (struct vge_softc *)dev;
34080c668ffSpvalchev 	int			i, s;
34180c668ffSpvalchev 
34280c668ffSpvalchev 	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
34380c668ffSpvalchev 		return;
34480c668ffSpvalchev 
345e811f458Sbrad 	s = splnet();
34680c668ffSpvalchev 	vge_miipoll_stop(sc);
34780c668ffSpvalchev 
34880c668ffSpvalchev 	/* Specify the register we want to write. */
34980c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
35080c668ffSpvalchev 
35180c668ffSpvalchev 	/* Specify the data we want to write. */
35280c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_MIIDATA, data);
35380c668ffSpvalchev 
35480c668ffSpvalchev 	/* Issue write command. */
35580c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
35680c668ffSpvalchev 
35780c668ffSpvalchev 	/* Wait for the write command bit to self-clear. */
35880c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
35980c668ffSpvalchev 		DELAY(1);
36080c668ffSpvalchev 		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
36180c668ffSpvalchev 			break;
36280c668ffSpvalchev 	}
36380c668ffSpvalchev 
36480c668ffSpvalchev 	if (i == VGE_TIMEOUT) {
36580c668ffSpvalchev 		printf("%s: MII write timed out\n", sc->vge_dev.dv_xname);
36680c668ffSpvalchev 	}
36780c668ffSpvalchev 
36880c668ffSpvalchev 	vge_miipoll_start(sc);
36980c668ffSpvalchev 	splx(s);
37080c668ffSpvalchev }
37180c668ffSpvalchev 
37280c668ffSpvalchev void
vge_cam_clear(struct vge_softc * sc)37380c668ffSpvalchev vge_cam_clear(struct vge_softc *sc)
37480c668ffSpvalchev {
37580c668ffSpvalchev 	int			i;
37680c668ffSpvalchev 
37780c668ffSpvalchev 	/*
37880c668ffSpvalchev 	 * Turn off all the mask bits. This tells the chip
37980c668ffSpvalchev 	 * that none of the entries in the CAM filter are valid.
38080c668ffSpvalchev 	 * desired entries will be enabled as we fill the filter in.
38180c668ffSpvalchev 	 */
38280c668ffSpvalchev 
38380c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
38480c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
38580c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
38680c668ffSpvalchev 	for (i = 0; i < 8; i++)
38780c668ffSpvalchev 		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
38880c668ffSpvalchev 
38980c668ffSpvalchev 	/* Clear the VLAN filter too. */
39080c668ffSpvalchev 
39180c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
39280c668ffSpvalchev 	for (i = 0; i < 8; i++)
39380c668ffSpvalchev 		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
39480c668ffSpvalchev 
39580c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
39680c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
39780c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
39880c668ffSpvalchev 
39980c668ffSpvalchev 	sc->vge_camidx = 0;
40080c668ffSpvalchev }
40180c668ffSpvalchev 
40280c668ffSpvalchev int
vge_cam_set(struct vge_softc * sc,uint8_t * addr)40380c668ffSpvalchev vge_cam_set(struct vge_softc *sc, uint8_t *addr)
40480c668ffSpvalchev {
40580c668ffSpvalchev 	int			i, error = 0;
40680c668ffSpvalchev 
40780c668ffSpvalchev 	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
40880c668ffSpvalchev 		return(ENOSPC);
40980c668ffSpvalchev 
41080c668ffSpvalchev 	/* Select the CAM data page. */
41180c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
41280c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
41380c668ffSpvalchev 
41480c668ffSpvalchev 	/* Set the filter entry we want to update and enable writing. */
41580c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
41680c668ffSpvalchev 
41780c668ffSpvalchev 	/* Write the address to the CAM registers */
41880c668ffSpvalchev 	for (i = 0; i < ETHER_ADDR_LEN; i++)
41980c668ffSpvalchev 		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
42080c668ffSpvalchev 
42180c668ffSpvalchev 	/* Issue a write command. */
42280c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
42380c668ffSpvalchev 
42480c668ffSpvalchev 	/* Wake for it to clear. */
42580c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
42680c668ffSpvalchev 		DELAY(1);
42780c668ffSpvalchev 		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
42880c668ffSpvalchev 			break;
42980c668ffSpvalchev 	}
43080c668ffSpvalchev 
43180c668ffSpvalchev 	if (i == VGE_TIMEOUT) {
43280c668ffSpvalchev 		printf("%s: setting CAM filter failed\n", sc->vge_dev.dv_xname);
43380c668ffSpvalchev 		error = EIO;
43480c668ffSpvalchev 		goto fail;
43580c668ffSpvalchev 	}
43680c668ffSpvalchev 
43780c668ffSpvalchev 	/* Select the CAM mask page. */
43880c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
43980c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
44080c668ffSpvalchev 
44180c668ffSpvalchev 	/* Set the mask bit that enables this filter. */
44280c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
44380c668ffSpvalchev 	    1<<(sc->vge_camidx & 7));
44480c668ffSpvalchev 
44580c668ffSpvalchev 	sc->vge_camidx++;
44680c668ffSpvalchev 
44780c668ffSpvalchev fail:
44880c668ffSpvalchev 	/* Turn off access to CAM. */
44980c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
45080c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
45180c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
45280c668ffSpvalchev 
45380c668ffSpvalchev 	return (error);
45480c668ffSpvalchev }
45580c668ffSpvalchev 
45680c668ffSpvalchev /*
4578ec68754Sbrad  * We use the 64-entry CAM filter for perfect filtering.
4588ec68754Sbrad  * If there's more than 64 multicast addresses, we use the
4598ec68754Sbrad  * hash filter instead.
46080c668ffSpvalchev  */
46180c668ffSpvalchev void
vge_iff(struct vge_softc * sc)4628ec68754Sbrad vge_iff(struct vge_softc *sc)
46380c668ffSpvalchev {
46480c668ffSpvalchev 	struct arpcom		*ac = &sc->arpcom;
46580c668ffSpvalchev 	struct ifnet		*ifp = &ac->ac_if;
46680c668ffSpvalchev 	struct ether_multi	*enm;
46780c668ffSpvalchev 	struct ether_multistep	step;
4688ec68754Sbrad 	u_int32_t		h = 0, hashes[2];
4698ec68754Sbrad 	u_int8_t		rxctl;
47037938799Sbrad 	int			error;
47180c668ffSpvalchev 
47280c668ffSpvalchev 	vge_cam_clear(sc);
4738ec68754Sbrad 	rxctl = CSR_READ_1(sc, VGE_RXCTL);
4748ec68754Sbrad 	rxctl &= ~(VGE_RXCTL_RX_BCAST | VGE_RXCTL_RX_MCAST |
4758ec68754Sbrad 	    VGE_RXCTL_RX_PROMISC | VGE_RXCTL_RX_UCAST);
4768ec68754Sbrad 	bzero(hashes, sizeof(hashes));
477cc796f38Sbrad 	ifp->if_flags &= ~IFF_ALLMULTI;
47880c668ffSpvalchev 
47980c668ffSpvalchev 	/*
4808ec68754Sbrad 	 * Always accept broadcast frames.
4818ec68754Sbrad 	 * Always accept frames destined to our station address.
48280c668ffSpvalchev 	 */
4838ec68754Sbrad 	rxctl |= VGE_RXCTL_RX_BCAST | VGE_RXCTL_RX_UCAST;
48480c668ffSpvalchev 
4858ec68754Sbrad 	if ((ifp->if_flags & IFF_PROMISC) == 0)
4868ec68754Sbrad 		rxctl |= VGE_RXCTL_RX_MCAST;
4878ec68754Sbrad 
4888ec68754Sbrad 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
4898ec68754Sbrad 		ifp->if_flags |= IFF_ALLMULTI;
4908ec68754Sbrad 		if (ifp->if_flags & IFF_PROMISC)
4918ec68754Sbrad 			rxctl |= VGE_RXCTL_RX_PROMISC;
4928ec68754Sbrad 		hashes[0] = hashes[1] = 0xFFFFFFFF;
4938ec68754Sbrad 	} else if (ac->ac_multicnt > VGE_CAM_MAXADDRS) {
49480c668ffSpvalchev 		ETHER_FIRST_MULTI(step, ac, enm);
49580c668ffSpvalchev 		while (enm != NULL) {
4968ec68754Sbrad 			h = ether_crc32_be(enm->enm_addrlo,
4978ec68754Sbrad 			    ETHER_ADDR_LEN) >> 26;
498cc796f38Sbrad 
4998ec68754Sbrad 			hashes[h >> 5] |= 1 << (h & 0x1f);
5008ec68754Sbrad 
5018ec68754Sbrad 			ETHER_NEXT_MULTI(step, enm);
5028ec68754Sbrad 		}
5038ec68754Sbrad 	} else {
5048ec68754Sbrad 		ETHER_FIRST_MULTI(step, ac, enm);
5058ec68754Sbrad 		while (enm != NULL) {
50637938799Sbrad 			error = vge_cam_set(sc, enm->enm_addrlo);
50737938799Sbrad 			if (error)
50880c668ffSpvalchev 				break;
509cc796f38Sbrad 
51037938799Sbrad 			ETHER_NEXT_MULTI(step, enm);
51137938799Sbrad 		}
51280c668ffSpvalchev 	}
51380c668ffSpvalchev 
51480c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
51580c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
5168ec68754Sbrad 	CSR_WRITE_1(sc, VGE_RXCTL, rxctl);
51780c668ffSpvalchev }
51880c668ffSpvalchev 
51980c668ffSpvalchev void
vge_reset(struct vge_softc * sc)52080c668ffSpvalchev vge_reset(struct vge_softc *sc)
52180c668ffSpvalchev {
522f0f154b9Sbrad 	int			i;
52380c668ffSpvalchev 
52480c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
52580c668ffSpvalchev 
52680c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
52780c668ffSpvalchev 		DELAY(5);
52880c668ffSpvalchev 		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
52980c668ffSpvalchev 			break;
53080c668ffSpvalchev 	}
53180c668ffSpvalchev 
53280c668ffSpvalchev 	if (i == VGE_TIMEOUT) {
53380c668ffSpvalchev 		printf("%s: soft reset timed out", sc->vge_dev.dv_xname);
53480c668ffSpvalchev 		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
53580c668ffSpvalchev 		DELAY(2000);
53680c668ffSpvalchev 	}
53780c668ffSpvalchev 
53880c668ffSpvalchev 	DELAY(5000);
53980c668ffSpvalchev 
54080c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
54180c668ffSpvalchev 
54280c668ffSpvalchev 	for (i = 0; i < VGE_TIMEOUT; i++) {
54380c668ffSpvalchev 		DELAY(5);
54480c668ffSpvalchev 		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
54580c668ffSpvalchev 			break;
54680c668ffSpvalchev 	}
54780c668ffSpvalchev 
54880c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
54980c668ffSpvalchev }
55080c668ffSpvalchev 
55180c668ffSpvalchev /*
55280c668ffSpvalchev  * Probe for a VIA gigabit chip. Check the PCI vendor and device
55380c668ffSpvalchev  * IDs against our list and return a device name if we find a match.
55480c668ffSpvalchev  */
55580c668ffSpvalchev int
vge_probe(struct device * dev,void * match,void * aux)55680c668ffSpvalchev vge_probe(struct device *dev, void *match, void *aux)
55780c668ffSpvalchev {
55880c668ffSpvalchev 	return (pci_matchbyid((struct pci_attach_args *)aux, vge_devices,
559299fb045Sjasper 	    nitems(vge_devices)));
56080c668ffSpvalchev }
56180c668ffSpvalchev 
56280c668ffSpvalchev /*
56380c668ffSpvalchev  * Allocate memory for RX/TX rings
56480c668ffSpvalchev  */
56580c668ffSpvalchev int
vge_allocmem(struct vge_softc * sc)56680c668ffSpvalchev vge_allocmem(struct vge_softc *sc)
56780c668ffSpvalchev {
56880c668ffSpvalchev 	int			nseg, rseg;
56980c668ffSpvalchev 	int			i, error;
57080c668ffSpvalchev 
57180c668ffSpvalchev 	nseg = 32;
57280c668ffSpvalchev 
57380c668ffSpvalchev 	/* Allocate DMA'able memory for the TX ring */
57480c668ffSpvalchev 
57580c668ffSpvalchev 	error = bus_dmamap_create(sc->sc_dmat, VGE_TX_LIST_SZ, 1,
57680c668ffSpvalchev 	    VGE_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
57780c668ffSpvalchev 	    &sc->vge_ldata.vge_tx_list_map);
57880c668ffSpvalchev 	if (error)
57980c668ffSpvalchev 		return (ENOMEM);
58080c668ffSpvalchev 	error = bus_dmamem_alloc(sc->sc_dmat, VGE_TX_LIST_SZ,
58180c668ffSpvalchev 	    ETHER_ALIGN, 0,
58280c668ffSpvalchev 	    &sc->vge_ldata.vge_tx_listseg, 1, &rseg, BUS_DMA_NOWAIT);
58380c668ffSpvalchev 	if (error) {
58480c668ffSpvalchev 		printf("%s: can't alloc TX list\n", sc->vge_dev.dv_xname);
58580c668ffSpvalchev 		return (ENOMEM);
58680c668ffSpvalchev 	}
58780c668ffSpvalchev 
58880c668ffSpvalchev 	/* Load the map for the TX ring. */
58980c668ffSpvalchev 	error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg,
59080c668ffSpvalchev 	     1, VGE_TX_LIST_SZ,
59180c668ffSpvalchev 	     (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
59280c668ffSpvalchev 	memset(sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ);
59380c668ffSpvalchev 	if (error) {
59480c668ffSpvalchev 		printf("%s: can't map TX dma buffers\n",
59580c668ffSpvalchev 		    sc->vge_dev.dv_xname);
59680c668ffSpvalchev 		bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg);
59780c668ffSpvalchev 		return (ENOMEM);
59880c668ffSpvalchev 	}
59980c668ffSpvalchev 
60080c668ffSpvalchev 	error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map,
60180c668ffSpvalchev 	    sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
60280c668ffSpvalchev 	if (error) {
60380c668ffSpvalchev 		printf("%s: can't load TX dma map\n", sc->vge_dev.dv_xname);
60480c668ffSpvalchev 		bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map);
60580c668ffSpvalchev 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list,
60680c668ffSpvalchev 		    VGE_TX_LIST_SZ);
60780c668ffSpvalchev 		bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg);
60880c668ffSpvalchev 		return (ENOMEM);
60980c668ffSpvalchev 	}
61080c668ffSpvalchev 
61180c668ffSpvalchev 	/* Create DMA maps for TX buffers */
61280c668ffSpvalchev 
61380c668ffSpvalchev 	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
6140bd04349Sdlg 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg,
6150bd04349Sdlg 		    VGE_TX_FRAGS, MCLBYTES, 0, BUS_DMA_ALLOCNOW,
61680c668ffSpvalchev 		    &sc->vge_ldata.vge_tx_dmamap[i]);
61780c668ffSpvalchev 		if (error) {
61880c668ffSpvalchev 			printf("%s: can't create DMA map for TX\n",
61980c668ffSpvalchev 			    sc->vge_dev.dv_xname);
62080c668ffSpvalchev 			return (ENOMEM);
62180c668ffSpvalchev 		}
62280c668ffSpvalchev 	}
62380c668ffSpvalchev 
62480c668ffSpvalchev 	/* Allocate DMA'able memory for the RX ring */
62580c668ffSpvalchev 
62680c668ffSpvalchev 	error = bus_dmamap_create(sc->sc_dmat, VGE_RX_LIST_SZ, 1,
62780c668ffSpvalchev 	    VGE_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
62880c668ffSpvalchev 	    &sc->vge_ldata.vge_rx_list_map);
62980c668ffSpvalchev 	if (error)
63080c668ffSpvalchev 		return (ENOMEM);
63180c668ffSpvalchev 	error = bus_dmamem_alloc(sc->sc_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
63280c668ffSpvalchev 	    0, &sc->vge_ldata.vge_rx_listseg, 1, &rseg, BUS_DMA_NOWAIT);
63380c668ffSpvalchev 	if (error) {
63480c668ffSpvalchev 		printf("%s: can't alloc RX list\n", sc->vge_dev.dv_xname);
63580c668ffSpvalchev 		return (ENOMEM);
63680c668ffSpvalchev 	}
63780c668ffSpvalchev 
63880c668ffSpvalchev 	/* Load the map for the RX ring. */
63980c668ffSpvalchev 
64080c668ffSpvalchev 	error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg,
64180c668ffSpvalchev 	     1, VGE_RX_LIST_SZ,
64280c668ffSpvalchev 	     (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
64380c668ffSpvalchev 	memset(sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ);
64480c668ffSpvalchev 	if (error) {
64580c668ffSpvalchev 		printf("%s: can't map RX dma buffers\n",
64680c668ffSpvalchev 		    sc->vge_dev.dv_xname);
64780c668ffSpvalchev 		bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg);
64880c668ffSpvalchev 		return (ENOMEM);
64980c668ffSpvalchev 	}
65080c668ffSpvalchev 	error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map,
65180c668ffSpvalchev 	    sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
65280c668ffSpvalchev 	if (error) {
65380c668ffSpvalchev 		printf("%s: can't load RX dma map\n", sc->vge_dev.dv_xname);
65480c668ffSpvalchev 		bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map);
65580c668ffSpvalchev 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list,
65680c668ffSpvalchev 		    VGE_RX_LIST_SZ);
65780c668ffSpvalchev 		bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg);
65880c668ffSpvalchev 		return (ENOMEM);
65980c668ffSpvalchev 	}
66080c668ffSpvalchev 
66180c668ffSpvalchev 	/* Create DMA maps for RX buffers */
66280c668ffSpvalchev 
66380c668ffSpvalchev 	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
66480c668ffSpvalchev 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg,
66580c668ffSpvalchev 		    MCLBYTES, 0, BUS_DMA_ALLOCNOW,
66680c668ffSpvalchev 		    &sc->vge_ldata.vge_rx_dmamap[i]);
66780c668ffSpvalchev 		if (error) {
66880c668ffSpvalchev 			printf("%s: can't create DMA map for RX\n",
66980c668ffSpvalchev 			    sc->vge_dev.dv_xname);
67080c668ffSpvalchev 			return (ENOMEM);
67180c668ffSpvalchev 		}
67280c668ffSpvalchev 	}
67380c668ffSpvalchev 
67480c668ffSpvalchev 	return (0);
67580c668ffSpvalchev }
67680c668ffSpvalchev 
677d8ad6bb2Skettenis void
vge_freemem(struct vge_softc * sc)678d8ad6bb2Skettenis vge_freemem(struct vge_softc *sc)
679d8ad6bb2Skettenis {
680d8ad6bb2Skettenis 	int i;
681d8ad6bb2Skettenis 
682d8ad6bb2Skettenis 	for (i = 0; i < VGE_RX_DESC_CNT; i++)
683d8ad6bb2Skettenis 		bus_dmamap_destroy(sc->sc_dmat,
684d8ad6bb2Skettenis 		    sc->vge_ldata.vge_rx_dmamap[i]);
685d8ad6bb2Skettenis 
686d8ad6bb2Skettenis 	bus_dmamap_unload(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map);
687d8ad6bb2Skettenis 	bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map);
688d8ad6bb2Skettenis 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list,
689d8ad6bb2Skettenis 	    VGE_RX_LIST_SZ);
690d8ad6bb2Skettenis 	bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, 1);
691d8ad6bb2Skettenis 
692d8ad6bb2Skettenis 	for (i = 0; i < VGE_TX_DESC_CNT; i++)
693d8ad6bb2Skettenis 		bus_dmamap_destroy(sc->sc_dmat,
694d8ad6bb2Skettenis 		    sc->vge_ldata.vge_tx_dmamap[i]);
695d8ad6bb2Skettenis 
696d8ad6bb2Skettenis 	bus_dmamap_unload(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map);
697d8ad6bb2Skettenis 	bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map);
698d8ad6bb2Skettenis 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list,
699d8ad6bb2Skettenis 	    VGE_TX_LIST_SZ);
700d8ad6bb2Skettenis 	bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, 1);
701d8ad6bb2Skettenis }
702d8ad6bb2Skettenis 
70380c668ffSpvalchev /*
70480c668ffSpvalchev  * Attach the interface. Allocate softc structures, do ifmedia
70580c668ffSpvalchev  * setup and ethernet/BPF attach.
70680c668ffSpvalchev  */
70780c668ffSpvalchev void
vge_attach(struct device * parent,struct device * self,void * aux)70880c668ffSpvalchev vge_attach(struct device *parent, struct device *self, void *aux)
70980c668ffSpvalchev {
71080c668ffSpvalchev 	u_char			eaddr[ETHER_ADDR_LEN];
71180c668ffSpvalchev 	struct vge_softc	*sc = (struct vge_softc *)self;
71280c668ffSpvalchev 	struct pci_attach_args	*pa = aux;
71380c668ffSpvalchev 	pci_chipset_tag_t	pc = pa->pa_pc;
71480c668ffSpvalchev 	pci_intr_handle_t	ih;
71580c668ffSpvalchev 	const char		*intrstr = NULL;
71680c668ffSpvalchev 	struct ifnet		*ifp;
717568fa3e6Skettenis 	int			error = 0;
71880c668ffSpvalchev 
71980c668ffSpvalchev 	/*
72080c668ffSpvalchev 	 * Map control/status registers.
72180c668ffSpvalchev 	 */
72280c668ffSpvalchev 	if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
723d8ad6bb2Skettenis 	    &sc->vge_btag, &sc->vge_bhandle, NULL, &sc->vge_bsize, 0)) {
72480c668ffSpvalchev 		if (pci_mapreg_map(pa, VGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
725d8ad6bb2Skettenis 		    &sc->vge_btag, &sc->vge_bhandle, NULL, &sc->vge_bsize, 0)) {
726a0cd4541Skettenis 			printf(": can't map mem or i/o space\n");
72780c668ffSpvalchev 			return;
72880c668ffSpvalchev 		}
72980c668ffSpvalchev 	}
73080c668ffSpvalchev 
73180c668ffSpvalchev 	/* Allocate interrupt */
73280c668ffSpvalchev 	if (pci_intr_map(pa, &ih)) {
73380c668ffSpvalchev 		printf(": couldn't map interrupt\n");
73480c668ffSpvalchev 		return;
73580c668ffSpvalchev 	}
73680c668ffSpvalchev 	intrstr = pci_intr_string(pc, ih);
73780c668ffSpvalchev 	sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc,
73880c668ffSpvalchev 	    sc->vge_dev.dv_xname);
73980c668ffSpvalchev 	if (sc->vge_intrhand == NULL) {
74080c668ffSpvalchev 		printf(": couldn't establish interrupt");
74180c668ffSpvalchev 		if (intrstr != NULL)
74280c668ffSpvalchev 			printf(" at %s", intrstr);
74380c668ffSpvalchev 		return;
74480c668ffSpvalchev 	}
74580c668ffSpvalchev 	printf(": %s", intrstr);
74680c668ffSpvalchev 
74780c668ffSpvalchev 	sc->sc_dmat = pa->pa_dmat;
748d8ad6bb2Skettenis 	sc->sc_pc = pa->pa_pc;
74980c668ffSpvalchev 
75080c668ffSpvalchev 	/* Reset the adapter. */
75180c668ffSpvalchev 	vge_reset(sc);
75280c668ffSpvalchev 
75380c668ffSpvalchev 	/*
75480c668ffSpvalchev 	 * Get station address from the EEPROM.
75580c668ffSpvalchev 	 */
756568fa3e6Skettenis 	vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 1);
75780c668ffSpvalchev 
758a768fe4bStedu 	bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
75980c668ffSpvalchev 
76080c668ffSpvalchev 	printf(", address %s\n",
76180c668ffSpvalchev 	    ether_sprintf(sc->arpcom.ac_enaddr));
76280c668ffSpvalchev 
76380c668ffSpvalchev 	error = vge_allocmem(sc);
76480c668ffSpvalchev 
76580c668ffSpvalchev 	if (error)
76680c668ffSpvalchev 		return;
76780c668ffSpvalchev 
76880c668ffSpvalchev 	ifp = &sc->arpcom.ac_if;
76980c668ffSpvalchev 	ifp->if_softc = sc;
77080c668ffSpvalchev 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
77180c668ffSpvalchev 	ifp->if_ioctl = vge_ioctl;
77280c668ffSpvalchev 	ifp->if_start = vge_start;
77380c668ffSpvalchev 	ifp->if_watchdog = vge_watchdog;
774031468aaSbrad #ifdef VGE_JUMBO
77505932b4eSbrad 	ifp->if_hardmtu = VGE_JUMBO_MTU;
776031468aaSbrad #endif
777cf96265bSbluhm 	ifq_init_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN);
77880c668ffSpvalchev 
7793f040b3eSbrad 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
7803f040b3eSbrad 				IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
781114effedSbrad 
782772b0c86Sbrad #if NVLAN > 0
783772b0c86Sbrad 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
784772b0c86Sbrad #endif
785772b0c86Sbrad 
78680c668ffSpvalchev 	/* Set interface name */
78780c668ffSpvalchev 	strlcpy(ifp->if_xname, sc->vge_dev.dv_xname, IFNAMSIZ);
78880c668ffSpvalchev 
78980c668ffSpvalchev 	/* Do MII setup */
79080c668ffSpvalchev 	sc->sc_mii.mii_ifp = ifp;
79180c668ffSpvalchev 	sc->sc_mii.mii_readreg = vge_miibus_readreg;
79280c668ffSpvalchev 	sc->sc_mii.mii_writereg = vge_miibus_writereg;
79380c668ffSpvalchev 	sc->sc_mii.mii_statchg = vge_miibus_statchg;
79480c668ffSpvalchev 	ifmedia_init(&sc->sc_mii.mii_media, 0,
79580c668ffSpvalchev 	    vge_ifmedia_upd, vge_ifmedia_sts);
79680c668ffSpvalchev 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7977af3937fSbrad 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
79880c668ffSpvalchev 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
79980c668ffSpvalchev 		printf("%s: no PHY found!\n", sc->vge_dev.dv_xname);
80080c668ffSpvalchev 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
80180c668ffSpvalchev 		    0, NULL);
80280c668ffSpvalchev 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
80380c668ffSpvalchev 	} else
80480c668ffSpvalchev 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
80580c668ffSpvalchev 
80680c668ffSpvalchev 	timeout_set(&sc->timer_handle, vge_tick, sc);
80780c668ffSpvalchev 
80880c668ffSpvalchev 	/*
80980c668ffSpvalchev 	 * Call MI attach routine.
81080c668ffSpvalchev 	 */
81180c668ffSpvalchev 	if_attach(ifp);
81280c668ffSpvalchev 	ether_ifattach(ifp);
81380c668ffSpvalchev }
81480c668ffSpvalchev 
81580c668ffSpvalchev int
vge_detach(struct device * self,int flags)816d8ad6bb2Skettenis vge_detach(struct device *self, int flags)
817d8ad6bb2Skettenis {
818d8ad6bb2Skettenis 	struct vge_softc *sc = (void *)self;
819d8ad6bb2Skettenis 	struct ifnet *ifp = &sc->arpcom.ac_if;
820d8ad6bb2Skettenis 
821d8ad6bb2Skettenis 	pci_intr_disestablish(sc->sc_pc, sc->vge_intrhand);
822d8ad6bb2Skettenis 
823d8ad6bb2Skettenis 	vge_stop(sc);
824d8ad6bb2Skettenis 
825d8ad6bb2Skettenis 	/* Detach all PHYs */
826d8ad6bb2Skettenis 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
827d8ad6bb2Skettenis 
828d8ad6bb2Skettenis 	/* Delete any remaining media. */
829d8ad6bb2Skettenis 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
830d8ad6bb2Skettenis 
831d8ad6bb2Skettenis 	ether_ifdetach(ifp);
832d8ad6bb2Skettenis 	if_detach(ifp);
833d8ad6bb2Skettenis 
834d8ad6bb2Skettenis 	vge_freemem(sc);
835d8ad6bb2Skettenis 
836d8ad6bb2Skettenis 	bus_space_unmap(sc->vge_btag, sc->vge_bhandle, sc->vge_bsize);
837d8ad6bb2Skettenis 	return (0);
838d8ad6bb2Skettenis }
839d8ad6bb2Skettenis 
840d8ad6bb2Skettenis int
vge_newbuf(struct vge_softc * sc,int idx,struct mbuf * m)84180c668ffSpvalchev vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
84280c668ffSpvalchev {
84380c668ffSpvalchev 	struct mbuf		*m_new = NULL;
84480c668ffSpvalchev 	struct vge_rx_desc	*r;
84580c668ffSpvalchev 	bus_dmamap_t		rxmap = sc->vge_ldata.vge_rx_dmamap[idx];
84680c668ffSpvalchev 	int			i;
84780c668ffSpvalchev 
84880c668ffSpvalchev 	if (m == NULL) {
84980c668ffSpvalchev 		/* Allocate a new mbuf */
85080c668ffSpvalchev 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
85180c668ffSpvalchev 		if (m_new == NULL)
85280c668ffSpvalchev 			return (ENOBUFS);
85380c668ffSpvalchev 
85480c668ffSpvalchev 		/* Allocate a cluster */
85580c668ffSpvalchev 		MCLGET(m_new, M_DONTWAIT);
85680c668ffSpvalchev 		if (!(m_new->m_flags & M_EXT)) {
85780c668ffSpvalchev 			m_freem(m_new);
85880c668ffSpvalchev 			return (ENOBUFS);
85980c668ffSpvalchev 		}
86099bd4cd2Skettenis 
86199bd4cd2Skettenis 		m = m_new;
86280c668ffSpvalchev 	} else
86399bd4cd2Skettenis 		m->m_data = m->m_ext.ext_buf;
86480c668ffSpvalchev 
86599bd4cd2Skettenis 	m->m_len = m->m_pkthdr.len = MCLBYTES;
86680c668ffSpvalchev 	/* Fix-up alignment so payload is doubleword-aligned */
86799bd4cd2Skettenis 	/* XXX m_adj(m, ETHER_ALIGN); */
86880c668ffSpvalchev 
86999bd4cd2Skettenis 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
87080c668ffSpvalchev 		return (ENOBUFS);
87180c668ffSpvalchev 
87280c668ffSpvalchev 	if (rxmap->dm_nsegs > 1)
87380c668ffSpvalchev 		goto out;
87480c668ffSpvalchev 
87580c668ffSpvalchev 	/* Map the segments into RX descriptors */
87680c668ffSpvalchev 	r = &sc->vge_ldata.vge_rx_list[idx];
87780c668ffSpvalchev 
87880c668ffSpvalchev 	if (letoh32(r->vge_sts) & VGE_RDSTS_OWN) {
87980c668ffSpvalchev 		printf("%s: tried to map a busy RX descriptor\n",
88080c668ffSpvalchev 		    sc->vge_dev.dv_xname);
88180c668ffSpvalchev 		goto out;
88280c668ffSpvalchev 	}
88380c668ffSpvalchev 	r->vge_buflen = htole16(VGE_BUFLEN(rxmap->dm_segs[0].ds_len) | VGE_RXDESC_I);
88480c668ffSpvalchev 	r->vge_addrlo = htole32(VGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
88580c668ffSpvalchev 	r->vge_addrhi = htole16(VGE_ADDR_HI(rxmap->dm_segs[0].ds_addr) & 0xFFFF);
886bd17711bSpvalchev 	r->vge_sts = htole32(0);
887bd17711bSpvalchev 	r->vge_ctl = htole32(0);
88880c668ffSpvalchev 
88980c668ffSpvalchev 	/*
89080c668ffSpvalchev 	 * Note: the manual fails to document the fact that for
89180c668ffSpvalchev 	 * proper operation, the driver needs to replenish the RX
89280c668ffSpvalchev 	 * DMA ring 4 descriptors at a time (rather than one at a
89380c668ffSpvalchev 	 * time, like most chips). We can allocate the new buffers
89480c668ffSpvalchev 	 * but we should not set the OWN bits until we're ready
89580c668ffSpvalchev 	 * to hand back 4 of them in one shot.
89680c668ffSpvalchev 	 */
89780c668ffSpvalchev #define VGE_RXCHUNK 4
89880c668ffSpvalchev 	sc->vge_rx_consumed++;
89980c668ffSpvalchev 	if (sc->vge_rx_consumed == VGE_RXCHUNK) {
90080c668ffSpvalchev 		for (i = idx; i != idx - sc->vge_rx_consumed; i--)
90180c668ffSpvalchev 			sc->vge_ldata.vge_rx_list[i].vge_sts |=
90280c668ffSpvalchev 			    htole32(VGE_RDSTS_OWN);
90380c668ffSpvalchev 		sc->vge_rx_consumed = 0;
90480c668ffSpvalchev 	}
90580c668ffSpvalchev 
90699bd4cd2Skettenis 	sc->vge_ldata.vge_rx_mbuf[idx] = m;
90780c668ffSpvalchev 
90880c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
90980c668ffSpvalchev 	    rxmap->dm_mapsize, BUS_DMASYNC_PREREAD);
91080c668ffSpvalchev 
91180c668ffSpvalchev 	return (0);
91280c668ffSpvalchev out:
91380c668ffSpvalchev 	DPRINTF(("vge_newbuf: out of memory\n"));
91480c668ffSpvalchev 	if (m_new != NULL)
91580c668ffSpvalchev 		m_freem(m_new);
91680c668ffSpvalchev 	return (ENOMEM);
91780c668ffSpvalchev }
91880c668ffSpvalchev 
91980c668ffSpvalchev int
vge_tx_list_init(struct vge_softc * sc)92080c668ffSpvalchev vge_tx_list_init(struct vge_softc *sc)
92180c668ffSpvalchev {
922a768fe4bStedu 	bzero(sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
923a768fe4bStedu 	bzero(&sc->vge_ldata.vge_tx_mbuf,
92480c668ffSpvalchev 	    (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
92580c668ffSpvalchev 
92680c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat,
92780c668ffSpvalchev 	    sc->vge_ldata.vge_tx_list_map, 0,
92880c668ffSpvalchev 	    sc->vge_ldata.vge_tx_list_map->dm_mapsize,
92980c668ffSpvalchev 	    BUS_DMASYNC_PREWRITE);
93080c668ffSpvalchev 	sc->vge_ldata.vge_tx_prodidx = 0;
93180c668ffSpvalchev 	sc->vge_ldata.vge_tx_considx = 0;
93280c668ffSpvalchev 	sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
93380c668ffSpvalchev 
93480c668ffSpvalchev 	return (0);
93580c668ffSpvalchev }
93680c668ffSpvalchev 
93780c668ffSpvalchev /* Init RX descriptors and allocate mbufs with vge_newbuf()
93880c668ffSpvalchev  * A ring is used, and last descriptor points to first. */
93980c668ffSpvalchev int
vge_rx_list_init(struct vge_softc * sc)94080c668ffSpvalchev vge_rx_list_init(struct vge_softc *sc)
94180c668ffSpvalchev {
94280c668ffSpvalchev 	int			i;
94380c668ffSpvalchev 
944a768fe4bStedu 	bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
945a768fe4bStedu 	bzero(&sc->vge_ldata.vge_rx_mbuf,
94680c668ffSpvalchev 	    (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
94780c668ffSpvalchev 
94880c668ffSpvalchev 	sc->vge_rx_consumed = 0;
94980c668ffSpvalchev 
95080c668ffSpvalchev 	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
95180c668ffSpvalchev 		if (vge_newbuf(sc, i, NULL) == ENOBUFS)
95280c668ffSpvalchev 			return (ENOBUFS);
95380c668ffSpvalchev 	}
95480c668ffSpvalchev 
95580c668ffSpvalchev 	/* Flush the RX descriptors */
95680c668ffSpvalchev 
95780c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat,
95880c668ffSpvalchev 	    sc->vge_ldata.vge_rx_list_map,
95980c668ffSpvalchev 	    0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
96080c668ffSpvalchev 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
96180c668ffSpvalchev 
96280c668ffSpvalchev 	sc->vge_ldata.vge_rx_prodidx = 0;
96380c668ffSpvalchev 	sc->vge_rx_consumed = 0;
96480c668ffSpvalchev 	sc->vge_head = sc->vge_tail = NULL;
96580c668ffSpvalchev 
96680c668ffSpvalchev 	return (0);
96780c668ffSpvalchev }
96880c668ffSpvalchev 
96980c668ffSpvalchev /*
97080c668ffSpvalchev  * RX handler. We support the reception of jumbo frames that have
97180c668ffSpvalchev  * been fragmented across multiple 2K mbuf cluster buffers.
97280c668ffSpvalchev  */
97380c668ffSpvalchev void
vge_rxeof(struct vge_softc * sc)97480c668ffSpvalchev vge_rxeof(struct vge_softc *sc)
97580c668ffSpvalchev {
9762c807ef4Smpi 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
97780c668ffSpvalchev 	struct mbuf		*m;
97880c668ffSpvalchev 	struct ifnet		*ifp;
97980c668ffSpvalchev 	int			i, total_len;
98080c668ffSpvalchev 	int			lim = 0;
98180c668ffSpvalchev 	struct vge_rx_desc	*cur_rx;
98280c668ffSpvalchev 	u_int32_t		rxstat, rxctl;
98380c668ffSpvalchev 
98480c668ffSpvalchev 	ifp = &sc->arpcom.ac_if;
98580c668ffSpvalchev 	i = sc->vge_ldata.vge_rx_prodidx;
98680c668ffSpvalchev 
98780c668ffSpvalchev 	/* Invalidate the descriptor memory */
98880c668ffSpvalchev 
98980c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat,
99080c668ffSpvalchev 	    sc->vge_ldata.vge_rx_list_map,
99180c668ffSpvalchev 	    0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
99280c668ffSpvalchev 	    BUS_DMASYNC_POSTREAD);
99380c668ffSpvalchev 
99480c668ffSpvalchev 	while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
99580c668ffSpvalchev 		struct mbuf *m0 = NULL;
99680c668ffSpvalchev 
99780c668ffSpvalchev 		cur_rx = &sc->vge_ldata.vge_rx_list[i];
99880c668ffSpvalchev 		m = sc->vge_ldata.vge_rx_mbuf[i];
99980c668ffSpvalchev 		total_len = VGE_RXBYTES(cur_rx);
100080c668ffSpvalchev 		rxstat = letoh32(cur_rx->vge_sts);
100180c668ffSpvalchev 		rxctl = letoh32(cur_rx->vge_ctl);
100280c668ffSpvalchev 
100380c668ffSpvalchev 		/* Invalidate the RX mbuf and unload its map */
100480c668ffSpvalchev 
100580c668ffSpvalchev 		bus_dmamap_sync(sc->sc_dmat,
100680c668ffSpvalchev 		    sc->vge_ldata.vge_rx_dmamap[i],
100780c668ffSpvalchev 		    0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize,
100880c668ffSpvalchev 		    BUS_DMASYNC_POSTWRITE);
100980c668ffSpvalchev 		bus_dmamap_unload(sc->sc_dmat,
101080c668ffSpvalchev 		    sc->vge_ldata.vge_rx_dmamap[i]);
101180c668ffSpvalchev 
101280c668ffSpvalchev 		/*
101380c668ffSpvalchev 		 * If the 'start of frame' bit is set, this indicates
101480c668ffSpvalchev 		 * either the first fragment in a multi-fragment receive,
101580c668ffSpvalchev 		 * or an intermediate fragment. Either way, we want to
101680c668ffSpvalchev 		 * accumulate the buffers.
101780c668ffSpvalchev 		 */
101880c668ffSpvalchev 		if (rxstat & VGE_RXPKT_SOF) {
101980c668ffSpvalchev 			DPRINTF(("vge_rxeof: SOF\n"));
102080c668ffSpvalchev 			m->m_len = MCLBYTES;
102180c668ffSpvalchev 			if (sc->vge_head == NULL)
102280c668ffSpvalchev 				sc->vge_head = sc->vge_tail = m;
102380c668ffSpvalchev 			else {
102480c668ffSpvalchev 				m->m_flags &= ~M_PKTHDR;
102580c668ffSpvalchev 				sc->vge_tail->m_next = m;
102680c668ffSpvalchev 				sc->vge_tail = m;
102780c668ffSpvalchev 			}
102880c668ffSpvalchev 			vge_newbuf(sc, i, NULL);
102980c668ffSpvalchev 			VGE_RX_DESC_INC(i);
103080c668ffSpvalchev 			continue;
103180c668ffSpvalchev 		}
103280c668ffSpvalchev 
103380c668ffSpvalchev 		/*
103480c668ffSpvalchev 		 * Bad/error frames will have the RXOK bit cleared.
103580c668ffSpvalchev 		 * However, there's one error case we want to allow:
103680c668ffSpvalchev 		 * if a VLAN tagged frame arrives and the chip can't
103780c668ffSpvalchev 		 * match it against the CAM filter, it considers this
103880c668ffSpvalchev 		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
103980c668ffSpvalchev 		 * We don't want to drop the frame though: our VLAN
104080c668ffSpvalchev 		 * filtering is done in software.
104180c668ffSpvalchev 		 */
104280c668ffSpvalchev 		if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
104380c668ffSpvalchev 		    && !(rxstat & VGE_RDSTS_CSUMERR)) {
104480c668ffSpvalchev 			ifp->if_ierrors++;
104580c668ffSpvalchev 			/*
104680c668ffSpvalchev 			 * If this is part of a multi-fragment packet,
104780c668ffSpvalchev 			 * discard all the pieces.
104880c668ffSpvalchev 			 */
104980c668ffSpvalchev 			if (sc->vge_head != NULL) {
105080c668ffSpvalchev 				m_freem(sc->vge_head);
105180c668ffSpvalchev 				sc->vge_head = sc->vge_tail = NULL;
105280c668ffSpvalchev 			}
105380c668ffSpvalchev 			vge_newbuf(sc, i, m);
105480c668ffSpvalchev 			VGE_RX_DESC_INC(i);
105580c668ffSpvalchev 			continue;
105680c668ffSpvalchev 		}
105780c668ffSpvalchev 
105880c668ffSpvalchev 		/*
105980c668ffSpvalchev 		 * If allocating a replacement mbuf fails,
106080c668ffSpvalchev 		 * reload the current one.
106180c668ffSpvalchev 		 */
106280c668ffSpvalchev 
106380c668ffSpvalchev 		if (vge_newbuf(sc, i, NULL) == ENOBUFS) {
106480c668ffSpvalchev 			if (sc->vge_head != NULL) {
106580c668ffSpvalchev 				m_freem(sc->vge_head);
106680c668ffSpvalchev 				sc->vge_head = sc->vge_tail = NULL;
106780c668ffSpvalchev 			}
106880c668ffSpvalchev 
1069726f1e82Snaddy 			m0 = m_devget(mtod(m, char *),
1070c35f95b8Smpi 			    total_len - ETHER_CRC_LEN, ETHER_ALIGN);
107180c668ffSpvalchev 			vge_newbuf(sc, i, m);
107280c668ffSpvalchev 			if (m0 == NULL) {
107380c668ffSpvalchev 				ifp->if_ierrors++;
107480c668ffSpvalchev 				continue;
107580c668ffSpvalchev 			}
107680c668ffSpvalchev 			m = m0;
107780c668ffSpvalchev 
107880c668ffSpvalchev 			VGE_RX_DESC_INC(i);
107980c668ffSpvalchev 			continue;
108080c668ffSpvalchev 		}
108180c668ffSpvalchev 
108280c668ffSpvalchev 		VGE_RX_DESC_INC(i);
108380c668ffSpvalchev 
108480c668ffSpvalchev 		if (sc->vge_head != NULL) {
108580c668ffSpvalchev 			m->m_len = total_len % MCLBYTES;
108680c668ffSpvalchev 			/*
108780c668ffSpvalchev 			 * Special case: if there's 4 bytes or less
108880c668ffSpvalchev 			 * in this buffer, the mbuf can be discarded:
108980c668ffSpvalchev 			 * the last 4 bytes is the CRC, which we don't
109080c668ffSpvalchev 			 * care about anyway.
109180c668ffSpvalchev 			 */
109280c668ffSpvalchev 			if (m->m_len <= ETHER_CRC_LEN) {
109380c668ffSpvalchev 				sc->vge_tail->m_len -=
109480c668ffSpvalchev 				    (ETHER_CRC_LEN - m->m_len);
109580c668ffSpvalchev 				m_freem(m);
109680c668ffSpvalchev 			} else {
109780c668ffSpvalchev 				m->m_len -= ETHER_CRC_LEN;
109880c668ffSpvalchev 				m->m_flags &= ~M_PKTHDR;
109980c668ffSpvalchev 				sc->vge_tail->m_next = m;
110080c668ffSpvalchev 			}
110180c668ffSpvalchev 			m = sc->vge_head;
110280c668ffSpvalchev 			sc->vge_head = sc->vge_tail = NULL;
110380c668ffSpvalchev 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
110480c668ffSpvalchev 		} else
110580c668ffSpvalchev 			m->m_pkthdr.len = m->m_len =
110680c668ffSpvalchev 			    (total_len - ETHER_CRC_LEN);
110780c668ffSpvalchev 
110880c668ffSpvalchev #ifdef __STRICT_ALIGNMENT
1109a768fe4bStedu 		bcopy(m->m_data, m->m_data + ETHER_ALIGN, total_len);
111080c668ffSpvalchev 		m->m_data += ETHER_ALIGN;
111180c668ffSpvalchev #endif
1112114effedSbrad 		/* Do RX checksumming */
111380c668ffSpvalchev 
111480c668ffSpvalchev 		/* Check IP header checksum */
1115eb2db039Sbrad 		if ((rxctl & VGE_RDCTL_IPPKT) &&
1116114effedSbrad 		    (rxctl & VGE_RDCTL_IPCSUMOK))
1117b1370ee4Sbrad 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
111880c668ffSpvalchev 
111980c668ffSpvalchev 		/* Check TCP/UDP checksum */
1120b1370ee4Sbrad 		if ((rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT)) &&
1121eb2db039Sbrad 		    (rxctl & VGE_RDCTL_PROTOCSUMOK))
1122b1370ee4Sbrad 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
112380c668ffSpvalchev 
1124772b0c86Sbrad #if NVLAN > 0
1125772b0c86Sbrad 		if (rxstat & VGE_RDSTS_VTAG) {
1126772b0c86Sbrad 			m->m_pkthdr.ether_vtag = swap16(rxctl & VGE_RDCTL_VLANID);
1127772b0c86Sbrad 			m->m_flags |= M_VLANTAG;
1128772b0c86Sbrad 		}
1129772b0c86Sbrad #endif
1130772b0c86Sbrad 
11312c807ef4Smpi 		ml_enqueue(&ml, m);
113280c668ffSpvalchev 
113380c668ffSpvalchev 		lim++;
113480c668ffSpvalchev 		if (lim == VGE_RX_DESC_CNT)
113580c668ffSpvalchev 			break;
113680c668ffSpvalchev 	}
113780c668ffSpvalchev 
11382c807ef4Smpi 	if_input(ifp, &ml);
11392c807ef4Smpi 
114080c668ffSpvalchev 	/* Flush the RX DMA ring */
114180c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat,
114280c668ffSpvalchev 	    sc->vge_ldata.vge_rx_list_map,
114380c668ffSpvalchev 	    0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
114480c668ffSpvalchev 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
114580c668ffSpvalchev 
114680c668ffSpvalchev 	sc->vge_ldata.vge_rx_prodidx = i;
114780c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
114880c668ffSpvalchev }
114980c668ffSpvalchev 
115080c668ffSpvalchev void
vge_txeof(struct vge_softc * sc)115180c668ffSpvalchev vge_txeof(struct vge_softc *sc)
115280c668ffSpvalchev {
115380c668ffSpvalchev 	struct ifnet		*ifp;
115480c668ffSpvalchev 	u_int32_t		txstat;
115580c668ffSpvalchev 	int			idx;
115680c668ffSpvalchev 
115780c668ffSpvalchev 	ifp = &sc->arpcom.ac_if;
115880c668ffSpvalchev 	idx = sc->vge_ldata.vge_tx_considx;
115980c668ffSpvalchev 
116080c668ffSpvalchev 	/* Invalidate the TX descriptor list */
116180c668ffSpvalchev 
116280c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat,
116380c668ffSpvalchev 	    sc->vge_ldata.vge_tx_list_map,
116480c668ffSpvalchev 	    0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
116580c668ffSpvalchev 	    BUS_DMASYNC_POSTREAD);
116680c668ffSpvalchev 
116780c668ffSpvalchev 	/* Transmitted frames can be now free'd from the TX list */
116880c668ffSpvalchev 	while (idx != sc->vge_ldata.vge_tx_prodidx) {
116980c668ffSpvalchev 		txstat = letoh32(sc->vge_ldata.vge_tx_list[idx].vge_sts);
117080c668ffSpvalchev 		if (txstat & VGE_TDSTS_OWN)
117180c668ffSpvalchev 			break;
117280c668ffSpvalchev 
117380c668ffSpvalchev 		m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
117480c668ffSpvalchev 		sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
117580c668ffSpvalchev 		bus_dmamap_unload(sc->sc_dmat,
117680c668ffSpvalchev 		    sc->vge_ldata.vge_tx_dmamap[idx]);
117780c668ffSpvalchev 		if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
117880c668ffSpvalchev 			ifp->if_collisions++;
117980c668ffSpvalchev 		if (txstat & VGE_TDSTS_TXERR)
118080c668ffSpvalchev 			ifp->if_oerrors++;
118180c668ffSpvalchev 
118280c668ffSpvalchev 		sc->vge_ldata.vge_tx_free++;
118380c668ffSpvalchev 		VGE_TX_DESC_INC(idx);
118480c668ffSpvalchev 	}
118580c668ffSpvalchev 
118680c668ffSpvalchev 	/* No changes made to the TX ring, so no flush needed */
118780c668ffSpvalchev 
118880c668ffSpvalchev 	if (idx != sc->vge_ldata.vge_tx_considx) {
118980c668ffSpvalchev 		sc->vge_ldata.vge_tx_considx = idx;
1190de6cd8fbSdlg 		ifq_clr_oactive(&ifp->if_snd);
119180c668ffSpvalchev 		ifp->if_timer = 0;
119280c668ffSpvalchev 	}
119380c668ffSpvalchev 
119480c668ffSpvalchev 	/*
119580c668ffSpvalchev 	 * If not all descriptors have been released reaped yet,
119680c668ffSpvalchev 	 * reload the timer so that we will eventually get another
119780c668ffSpvalchev 	 * interrupt that will cause us to re-enter this routine.
119880c668ffSpvalchev 	 * This is done in case the transmitter has gone idle.
119980c668ffSpvalchev 	 */
120080c668ffSpvalchev 	if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT)
120180c668ffSpvalchev 		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
120280c668ffSpvalchev }
120380c668ffSpvalchev 
120480c668ffSpvalchev void
vge_tick(void * xsc)120580c668ffSpvalchev vge_tick(void *xsc)
120680c668ffSpvalchev {
120780c668ffSpvalchev 	struct vge_softc	*sc = xsc;
120880c668ffSpvalchev 	struct ifnet		*ifp = &sc->arpcom.ac_if;
120980c668ffSpvalchev 	struct mii_data		*mii = &sc->sc_mii;
121080c668ffSpvalchev 	int s;
121180c668ffSpvalchev 
1212e811f458Sbrad 	s = splnet();
121380c668ffSpvalchev 
121480c668ffSpvalchev 	mii_tick(mii);
121580c668ffSpvalchev 
121680c668ffSpvalchev 	if (sc->vge_link) {
12179941fc53Scanacar 		if (!(mii->mii_media_status & IFM_ACTIVE)) {
121880c668ffSpvalchev 			sc->vge_link = 0;
1219eda47238Sbrad 			ifp->if_link_state = LINK_STATE_DOWN;
12204d5bd2c0Sbrad 			if_link_state_change(ifp);
12219941fc53Scanacar 		}
122280c668ffSpvalchev 	} else {
122380c668ffSpvalchev 		if (mii->mii_media_status & IFM_ACTIVE &&
122480c668ffSpvalchev 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
122580c668ffSpvalchev 			sc->vge_link = 1;
1226812d50c0Sreyk 			if (mii->mii_media_status & IFM_FDX)
1227812d50c0Sreyk 				ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1228812d50c0Sreyk 			else
1229c955b60aSbrad 				ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
12304d5bd2c0Sbrad 			if_link_state_change(ifp);
12310cae21bdSpatrick 			if (!ifq_empty(&ifp->if_snd))
123280c668ffSpvalchev 				vge_start(ifp);
123380c668ffSpvalchev 		}
123480c668ffSpvalchev 	}
1235e15e1c8cSblambert 	timeout_add_sec(&sc->timer_handle, 1);
123680c668ffSpvalchev 	splx(s);
123780c668ffSpvalchev }
123880c668ffSpvalchev 
123980c668ffSpvalchev int
vge_intr(void * arg)124080c668ffSpvalchev vge_intr(void *arg)
124180c668ffSpvalchev {
124280c668ffSpvalchev 	struct vge_softc	*sc = arg;
124380c668ffSpvalchev 	struct ifnet		*ifp;
124480c668ffSpvalchev 	u_int32_t		status;
124580c668ffSpvalchev 	int			claimed = 0;
124680c668ffSpvalchev 
124780c668ffSpvalchev 	ifp = &sc->arpcom.ac_if;
124880c668ffSpvalchev 
124980c668ffSpvalchev 	if (!(ifp->if_flags & IFF_UP))
125080c668ffSpvalchev 		return 0;
125180c668ffSpvalchev 
125280c668ffSpvalchev 	/* Disable interrupts */
125380c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
125480c668ffSpvalchev 
125580c668ffSpvalchev 	for (;;) {
125680c668ffSpvalchev 		status = CSR_READ_4(sc, VGE_ISR);
125780c668ffSpvalchev 		DPRINTFN(3, ("vge_intr: status=%#x\n", status));
125880c668ffSpvalchev 
125980c668ffSpvalchev 		/* If the card has gone away the read returns 0xffffffff. */
126080c668ffSpvalchev 		if (status == 0xFFFFFFFF)
126180c668ffSpvalchev 			break;
126280c668ffSpvalchev 
126380c668ffSpvalchev 		if (status) {
126480c668ffSpvalchev 			CSR_WRITE_4(sc, VGE_ISR, status);
126580c668ffSpvalchev 		}
126680c668ffSpvalchev 
126780c668ffSpvalchev 		if ((status & VGE_INTRS) == 0)
126880c668ffSpvalchev 			break;
126980c668ffSpvalchev 
127080c668ffSpvalchev 		claimed = 1;
127180c668ffSpvalchev 
127280c668ffSpvalchev 		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
127380c668ffSpvalchev 			vge_rxeof(sc);
127480c668ffSpvalchev 
127580c668ffSpvalchev 		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
127680c668ffSpvalchev 			DPRINTFN(2, ("vge_intr: RX error, recovering\n"));
127780c668ffSpvalchev 			vge_rxeof(sc);
127880c668ffSpvalchev 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
127980c668ffSpvalchev 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
128080c668ffSpvalchev 		}
128180c668ffSpvalchev 
128280c668ffSpvalchev 		if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
128380c668ffSpvalchev 			vge_txeof(sc);
128480c668ffSpvalchev 
128580c668ffSpvalchev 		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
128680c668ffSpvalchev 			DPRINTFN(2, ("DMA_STALL\n"));
128780c668ffSpvalchev 			vge_init(ifp);
128880c668ffSpvalchev 		}
128980c668ffSpvalchev 
129080c668ffSpvalchev 		if (status & VGE_ISR_LINKSTS) {
129180c668ffSpvalchev 			timeout_del(&sc->timer_handle);
129280c668ffSpvalchev 			vge_tick(sc);
129380c668ffSpvalchev 		}
129480c668ffSpvalchev 	}
129580c668ffSpvalchev 
129680c668ffSpvalchev 	/* Re-enable interrupts */
129780c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
129880c668ffSpvalchev 
12990cae21bdSpatrick 	if (!ifq_empty(&ifp->if_snd))
130080c668ffSpvalchev 		vge_start(ifp);
130180c668ffSpvalchev 
130280c668ffSpvalchev 	return (claimed);
130380c668ffSpvalchev }
130480c668ffSpvalchev 
130580c668ffSpvalchev /*
130680c668ffSpvalchev  * Encapsulate an mbuf chain into the TX ring by combining it w/
130780c668ffSpvalchev  * the descriptors.
130880c668ffSpvalchev  */
130980c668ffSpvalchev int
vge_encap(struct vge_softc * sc,struct mbuf * m_head,int idx)131080c668ffSpvalchev vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
131180c668ffSpvalchev {
131280c668ffSpvalchev 	bus_dmamap_t		txmap;
131380c668ffSpvalchev 	struct vge_tx_desc	*d = NULL;
131480c668ffSpvalchev 	struct vge_tx_frag	*f;
131580c668ffSpvalchev 	int			error, frag;
1316a340f9c4Sbrad 	u_int32_t		vge_flags;
13170bd04349Sdlg 	unsigned int		len;
1318a340f9c4Sbrad 
1319a340f9c4Sbrad 	vge_flags = 0;
1320a340f9c4Sbrad 
1321a340f9c4Sbrad 	if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1322a340f9c4Sbrad 		vge_flags |= VGE_TDCTL_IPCSUM;
13231911f71fShenning 	if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1324a340f9c4Sbrad 		vge_flags |= VGE_TDCTL_TCPCSUM;
13251911f71fShenning 	if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1326a340f9c4Sbrad 		vge_flags |= VGE_TDCTL_UDPCSUM;
132780c668ffSpvalchev 
132880c668ffSpvalchev 	txmap = sc->vge_ldata.vge_tx_dmamap[idx];
132980c668ffSpvalchev 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap,
133080c668ffSpvalchev 	    m_head, BUS_DMA_NOWAIT);
13310bd04349Sdlg 	switch (error) {
13320bd04349Sdlg 	case 0:
13330bd04349Sdlg 		break;
13340bd04349Sdlg 	case EFBIG: /* mbuf chain is too fragmented */
13350bd04349Sdlg 		if ((error = m_defrag(m_head, M_DONTWAIT)) == 0 &&
13360bd04349Sdlg 		    (error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head,
13370bd04349Sdlg 		    BUS_DMA_NOWAIT)) == 0)
13380bd04349Sdlg 			break;
13390bd04349Sdlg 	default:
13400bd04349Sdlg 		return (error);
134180c668ffSpvalchev         }
134280c668ffSpvalchev 
134380c668ffSpvalchev 	d = &sc->vge_ldata.vge_tx_list[idx];
134480c668ffSpvalchev 	/* If owned by chip, fail */
134580c668ffSpvalchev 	if (letoh32(d->vge_sts) & VGE_TDSTS_OWN)
134680c668ffSpvalchev 		return (ENOBUFS);
134780c668ffSpvalchev 
134880c668ffSpvalchev 	for (frag = 0; frag < txmap->dm_nsegs; frag++) {
134980c668ffSpvalchev 		f = &d->vge_frag[frag];
135080c668ffSpvalchev 		f->vge_buflen = htole16(VGE_BUFLEN(txmap->dm_segs[frag].ds_len));
135180c668ffSpvalchev 		f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[frag].ds_addr));
135280c668ffSpvalchev 		f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[frag].ds_addr) & 0xFFFF);
135380c668ffSpvalchev 	}
135480c668ffSpvalchev 
135580c668ffSpvalchev 	/* This chip does not do auto-padding */
135680c668ffSpvalchev 	if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) {
135780c668ffSpvalchev 		f = &d->vge_frag[frag];
135880c668ffSpvalchev 
135980c668ffSpvalchev 		f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
136080c668ffSpvalchev 		    m_head->m_pkthdr.len));
136180c668ffSpvalchev 		f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[0].ds_addr));
136280c668ffSpvalchev 		f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[0].ds_addr) & 0xFFFF);
13630bd04349Sdlg 		len = VGE_MIN_FRAMELEN;
136480c668ffSpvalchev 		frag++;
13650bd04349Sdlg 	} else
13660bd04349Sdlg 		len = m_head->m_pkthdr.len;
13670bd04349Sdlg 
136880c668ffSpvalchev 	/* For some reason, we need to tell the card fragment + 1 */
136980c668ffSpvalchev 	frag++;
137080c668ffSpvalchev 
137180c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
137280c668ffSpvalchev 	    BUS_DMASYNC_PREWRITE);
137380c668ffSpvalchev 
13740bd04349Sdlg 	d->vge_sts = htole32(len << 16);
1375a340f9c4Sbrad 	d->vge_ctl = htole32(vge_flags|(frag << 28) | VGE_TD_LS_NORM);
137680c668ffSpvalchev 
13770bd04349Sdlg 	if (len > ETHERMTU + ETHER_HDR_LEN)
1378bd17711bSpvalchev 		d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
137980c668ffSpvalchev 
1380772b0c86Sbrad #if NVLAN > 0
1381772b0c86Sbrad 	/* Set up hardware VLAN tagging. */
1382772b0c86Sbrad 	if (m_head->m_flags & M_VLANTAG) {
1383772b0c86Sbrad 		d->vge_ctl |= htole32(m_head->m_pkthdr.ether_vtag |
1384772b0c86Sbrad 		    VGE_TDCTL_VTAG);
1385772b0c86Sbrad 	}
1386772b0c86Sbrad #endif
1387772b0c86Sbrad 
138880c668ffSpvalchev 	sc->vge_ldata.vge_tx_dmamap[idx] = txmap;
138980c668ffSpvalchev 	sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
139080c668ffSpvalchev 	sc->vge_ldata.vge_tx_free--;
139180c668ffSpvalchev 	sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
139280c668ffSpvalchev 
139380c668ffSpvalchev 	idx++;
139480c668ffSpvalchev 	return (0);
139580c668ffSpvalchev }
139680c668ffSpvalchev 
139780c668ffSpvalchev /*
139880c668ffSpvalchev  * Main transmit routine.
139980c668ffSpvalchev  */
140080c668ffSpvalchev void
vge_start(struct ifnet * ifp)140180c668ffSpvalchev vge_start(struct ifnet *ifp)
140280c668ffSpvalchev {
140380c668ffSpvalchev 	struct vge_softc	*sc;
140480c668ffSpvalchev 	struct mbuf		*m_head = NULL;
140580c668ffSpvalchev 	int			idx, pidx = 0;
140680c668ffSpvalchev 
140780c668ffSpvalchev 	sc = ifp->if_softc;
140880c668ffSpvalchev 
1409de6cd8fbSdlg 	if (!sc->vge_link || ifq_is_oactive(&ifp->if_snd))
141080c668ffSpvalchev 		return;
141180c668ffSpvalchev 
14120cae21bdSpatrick 	if (ifq_empty(&ifp->if_snd))
141380c668ffSpvalchev 		return;
141480c668ffSpvalchev 
141580c668ffSpvalchev 	idx = sc->vge_ldata.vge_tx_prodidx;
141680c668ffSpvalchev 
141780c668ffSpvalchev 	pidx = idx - 1;
141880c668ffSpvalchev 	if (pidx < 0)
141980c668ffSpvalchev 		pidx = VGE_TX_DESC_CNT - 1;
142080c668ffSpvalchev 
14210bd04349Sdlg 	for (;;) {
14220bd04349Sdlg 		if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
1423de6cd8fbSdlg 			ifq_set_oactive(&ifp->if_snd);
14240bd04349Sdlg 			break;
14250bd04349Sdlg 		}
14260bd04349Sdlg 
142763bcfa73Spatrick 		m_head = ifq_dequeue(&ifp->if_snd);
142880c668ffSpvalchev 		if (m_head == NULL)
142980c668ffSpvalchev 			break;
143080c668ffSpvalchev 
14310bd04349Sdlg 		if (vge_encap(sc, m_head, idx)) {
14320bd04349Sdlg 			m_freem(m_head);
14330bd04349Sdlg 			ifp->if_oerrors++;
14340bd04349Sdlg 			continue;
14350bd04349Sdlg 		}
14360bd04349Sdlg 
14373dfee615Spvalchev 		/*
14383dfee615Spvalchev 		 * If there's a BPF listener, bounce a copy of this frame
14393dfee615Spvalchev 		 * to him.
14403dfee615Spvalchev 		 */
14413dfee615Spvalchev #if NBPFILTER > 0
14423dfee615Spvalchev 		if (ifp->if_bpf)
14439b986731Snaddy 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
14443dfee615Spvalchev #endif
14453dfee615Spvalchev 
144680c668ffSpvalchev 		sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
144780c668ffSpvalchev 		    htole16(VGE_TXDESC_Q);
144880c668ffSpvalchev 
144980c668ffSpvalchev 		pidx = idx;
145080c668ffSpvalchev 		VGE_TX_DESC_INC(idx);
145180c668ffSpvalchev 	}
145280c668ffSpvalchev 
145380c668ffSpvalchev 	if (idx == sc->vge_ldata.vge_tx_prodidx) {
145480c668ffSpvalchev 		return;
145580c668ffSpvalchev 	}
145680c668ffSpvalchev 
145780c668ffSpvalchev 	/* Flush the TX descriptors */
145880c668ffSpvalchev 
145980c668ffSpvalchev 	bus_dmamap_sync(sc->sc_dmat,
146080c668ffSpvalchev 	    sc->vge_ldata.vge_tx_list_map,
146180c668ffSpvalchev 	    0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
146280c668ffSpvalchev 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
146380c668ffSpvalchev 
146480c668ffSpvalchev 	/* Issue a transmit command. */
146580c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
146680c668ffSpvalchev 
146780c668ffSpvalchev 	sc->vge_ldata.vge_tx_prodidx = idx;
146880c668ffSpvalchev 
146980c668ffSpvalchev 	/*
147080c668ffSpvalchev 	 * Use the countdown timer for interrupt moderation.
147180c668ffSpvalchev 	 * 'TX done' interrupts are disabled. Instead, we reset the
147280c668ffSpvalchev 	 * countdown timer, which will begin counting until it hits
147380c668ffSpvalchev 	 * the value in the SSTIMER register, and then trigger an
147480c668ffSpvalchev 	 * interrupt. Each time we set the TIMER0_ENABLE bit, the
147580c668ffSpvalchev 	 * the timer count is reloaded. Only when the transmitter
147680c668ffSpvalchev 	 * is idle will the timer hit 0 and an interrupt fire.
147780c668ffSpvalchev 	 */
147880c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
147980c668ffSpvalchev 
148080c668ffSpvalchev 	/*
148180c668ffSpvalchev 	 * Set a timeout in case the chip goes out to lunch.
148280c668ffSpvalchev 	 */
148380c668ffSpvalchev 	ifp->if_timer = 5;
148480c668ffSpvalchev }
148580c668ffSpvalchev 
148680c668ffSpvalchev int
vge_init(struct ifnet * ifp)148780c668ffSpvalchev vge_init(struct ifnet *ifp)
148880c668ffSpvalchev {
148980c668ffSpvalchev 	struct vge_softc	*sc = ifp->if_softc;
149080c668ffSpvalchev 	int			i;
149180c668ffSpvalchev 
149280c668ffSpvalchev 	/*
149380c668ffSpvalchev 	 * Cancel pending I/O and free all RX/TX buffers.
149480c668ffSpvalchev 	 */
149580c668ffSpvalchev 	vge_stop(sc);
149680c668ffSpvalchev 	vge_reset(sc);
149780c668ffSpvalchev 
149880c668ffSpvalchev 	/* Initialize RX descriptors list */
149980c668ffSpvalchev 	if (vge_rx_list_init(sc) == ENOBUFS) {
150080c668ffSpvalchev 		printf("%s: init failed: no memory for RX buffers\n",
150180c668ffSpvalchev 		    sc->vge_dev.dv_xname);
150280c668ffSpvalchev 		vge_stop(sc);
150380c668ffSpvalchev 		return (ENOBUFS);
150480c668ffSpvalchev 	}
150580c668ffSpvalchev 	/* Initialize TX descriptors */
150680c668ffSpvalchev 	if (vge_tx_list_init(sc) == ENOBUFS) {
150780c668ffSpvalchev 		printf("%s: init failed: no memory for TX buffers\n",
150880c668ffSpvalchev 		    sc->vge_dev.dv_xname);
150980c668ffSpvalchev 		vge_stop(sc);
151080c668ffSpvalchev 		return (ENOBUFS);
151180c668ffSpvalchev 	}
151280c668ffSpvalchev 
151380c668ffSpvalchev 	/* Set our station address */
151480c668ffSpvalchev 	for (i = 0; i < ETHER_ADDR_LEN; i++)
151580c668ffSpvalchev 		CSR_WRITE_1(sc, VGE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
151680c668ffSpvalchev 
1517ed4a0dd9Sbrad 	/* Set receive FIFO threshold */
1518ed4a0dd9Sbrad 	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR);
1519ed4a0dd9Sbrad 	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
152080c668ffSpvalchev 
1521772b0c86Sbrad 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) {
1522772b0c86Sbrad 		/*
1523772b0c86Sbrad 		 * Allow transmission and reception of VLAN tagged
1524772b0c86Sbrad 		 * frames.
1525772b0c86Sbrad 		 */
1526772b0c86Sbrad 		CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_VTAGOPT);
1527772b0c86Sbrad 		CSR_SETBIT_1(sc, VGE_RXCFG, VGE_VTAG_OPT2);
1528772b0c86Sbrad 	}
1529772b0c86Sbrad 
153080c668ffSpvalchev 	/* Set DMA burst length */
153180c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
153280c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
153380c668ffSpvalchev 
153480c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
153580c668ffSpvalchev 
153680c668ffSpvalchev 	/* Set collision backoff algorithm */
153780c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
153880c668ffSpvalchev 	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
153980c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
154080c668ffSpvalchev 
154180c668ffSpvalchev 	/* Disable LPSEL field in priority resolution */
154280c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
154380c668ffSpvalchev 
154480c668ffSpvalchev 	/*
154580c668ffSpvalchev 	 * Load the addresses of the DMA queues into the chip.
154680c668ffSpvalchev 	 * Note that we only use one transmit queue.
154780c668ffSpvalchev 	 */
154880c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
154980c668ffSpvalchev 	    VGE_ADDR_LO(sc->vge_ldata.vge_tx_listseg.ds_addr));
155080c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
155180c668ffSpvalchev 
155280c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
155380c668ffSpvalchev 	    VGE_ADDR_LO(sc->vge_ldata.vge_rx_listseg.ds_addr));
155480c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
155580c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
155680c668ffSpvalchev 
155780c668ffSpvalchev 	/* Enable and wake up the RX descriptor queue */
155880c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
155980c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
156080c668ffSpvalchev 
156180c668ffSpvalchev 	/* Enable the TX descriptor queue */
156280c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
156380c668ffSpvalchev 
156480c668ffSpvalchev 	/* Set up the receive filter -- allow large frames for VLANs. */
15658ec68754Sbrad 	CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_GIANT);
156680c668ffSpvalchev 
15678ec68754Sbrad 	/* Program promiscuous mode and multicast filters. */
15688ec68754Sbrad 	vge_iff(sc);
156980c668ffSpvalchev 
15707af3937fSbrad 	/* Initialize pause timer. */
15717af3937fSbrad 	CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF);
15727af3937fSbrad 	/*
15737af3937fSbrad 	 * Initialize flow control parameters.
15747af3937fSbrad 	 *  TX XON high threshold : 48
15757af3937fSbrad 	 *  TX pause low threshold : 24
15767af3937fSbrad 	 *  Disable half-duplex flow control
15777af3937fSbrad 	 */
15787af3937fSbrad 	CSR_WRITE_1(sc, VGE_CRC2, 0xFF);
15797af3937fSbrad 	CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B);
158080c668ffSpvalchev 
158180c668ffSpvalchev 	/* Enable jumbo frame reception (if desired) */
158280c668ffSpvalchev 
158380c668ffSpvalchev 	/* Start the MAC. */
158480c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
158580c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
158680c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS0,
158780c668ffSpvalchev 	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
158880c668ffSpvalchev 
158980c668ffSpvalchev 	/*
159080c668ffSpvalchev 	 * Configure one-shot timer for microsecond
15914b1a56afSjsg 	 * resolution and load it for 500 usecs.
159280c668ffSpvalchev 	 */
159380c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
159480c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_SSTIMER, 400);
159580c668ffSpvalchev 
159680c668ffSpvalchev 	/*
159780c668ffSpvalchev 	 * Configure interrupt moderation for receive. Enable
159880c668ffSpvalchev 	 * the holdoff counter and load it, and set the RX
159980c668ffSpvalchev 	 * suppression count to the number of descriptors we
160080c668ffSpvalchev 	 * want to allow before triggering an interrupt.
160180c668ffSpvalchev 	 * The holdoff timer is in units of 20 usecs.
160280c668ffSpvalchev 	 */
160380c668ffSpvalchev 
160480c668ffSpvalchev #ifdef notyet
160580c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
160680c668ffSpvalchev 	/* Select the interrupt holdoff timer page. */
160780c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
160880c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
160980c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
161080c668ffSpvalchev 
161180c668ffSpvalchev 	/* Enable use of the holdoff timer. */
161280c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
161380c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
161480c668ffSpvalchev 
161580c668ffSpvalchev 	/* Select the RX suppression threshold page. */
161680c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
161780c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
161880c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
161980c668ffSpvalchev 
162080c668ffSpvalchev 	/* Restore the page select bits. */
162180c668ffSpvalchev 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
162280c668ffSpvalchev 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
162380c668ffSpvalchev #endif
162480c668ffSpvalchev 
162580c668ffSpvalchev 	/*
162680c668ffSpvalchev 	 * Enable interrupts.
162780c668ffSpvalchev 	 */
162880c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
162980c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_ISR, 0);
163080c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
163180c668ffSpvalchev 
163280c668ffSpvalchev 	/* Restore BMCR state */
163380c668ffSpvalchev 	mii_mediachg(&sc->sc_mii);
163480c668ffSpvalchev 
163580c668ffSpvalchev 	ifp->if_flags |= IFF_RUNNING;
1636de6cd8fbSdlg 	ifq_clr_oactive(&ifp->if_snd);
163780c668ffSpvalchev 
163880c668ffSpvalchev 	sc->vge_link = 0;
163980c668ffSpvalchev 
164080c668ffSpvalchev 	if (!timeout_pending(&sc->timer_handle))
1641e15e1c8cSblambert 		timeout_add_sec(&sc->timer_handle, 1);
164280c668ffSpvalchev 
164380c668ffSpvalchev 	return (0);
164480c668ffSpvalchev }
164580c668ffSpvalchev 
164680c668ffSpvalchev /*
164780c668ffSpvalchev  * Set media options.
164880c668ffSpvalchev  */
164980c668ffSpvalchev int
vge_ifmedia_upd(struct ifnet * ifp)165080c668ffSpvalchev vge_ifmedia_upd(struct ifnet *ifp)
165180c668ffSpvalchev {
165280c668ffSpvalchev 	struct vge_softc *sc = ifp->if_softc;
165380c668ffSpvalchev 
165480c668ffSpvalchev 	return (mii_mediachg(&sc->sc_mii));
165580c668ffSpvalchev }
165680c668ffSpvalchev 
165780c668ffSpvalchev /*
165880c668ffSpvalchev  * Report current media status.
165980c668ffSpvalchev  */
166080c668ffSpvalchev void
vge_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)166180c668ffSpvalchev vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
166280c668ffSpvalchev {
166380c668ffSpvalchev 	struct vge_softc *sc = ifp->if_softc;
166480c668ffSpvalchev 
166580c668ffSpvalchev 	mii_pollstat(&sc->sc_mii);
166680c668ffSpvalchev 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
166780c668ffSpvalchev 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
166880c668ffSpvalchev }
166980c668ffSpvalchev 
167080c668ffSpvalchev void
vge_miibus_statchg(struct device * dev)167180c668ffSpvalchev vge_miibus_statchg(struct device *dev)
167280c668ffSpvalchev {
167380c668ffSpvalchev 	struct vge_softc	*sc = (struct vge_softc *)dev;
167480c668ffSpvalchev 	struct mii_data		*mii;
167580c668ffSpvalchev 	struct ifmedia_entry	*ife;
167680c668ffSpvalchev 
167780c668ffSpvalchev 	mii = &sc->sc_mii;
167880c668ffSpvalchev 	ife = mii->mii_media.ifm_cur;
167980c668ffSpvalchev 
168080c668ffSpvalchev 	/*
168180c668ffSpvalchev 	 * If the user manually selects a media mode, we need to turn
168280c668ffSpvalchev 	 * on the forced MAC mode bit in the DIAGCTL register. If the
168380c668ffSpvalchev 	 * user happens to choose a full duplex mode, we also need to
168480c668ffSpvalchev 	 * set the 'force full duplex' bit. This applies only to
168580c668ffSpvalchev 	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
168680c668ffSpvalchev 	 * mode is disabled, and in 1000baseT mode, full duplex is
168780c668ffSpvalchev 	 * always implied, so we turn on the forced mode bit but leave
168880c668ffSpvalchev 	 * the FDX bit cleared.
168980c668ffSpvalchev 	 */
169080c668ffSpvalchev 
169180c668ffSpvalchev 	switch (IFM_SUBTYPE(ife->ifm_media)) {
169280c668ffSpvalchev 	case IFM_AUTO:
169380c668ffSpvalchev 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
169480c668ffSpvalchev 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
169580c668ffSpvalchev 		break;
169680c668ffSpvalchev 	case IFM_1000_T:
169780c668ffSpvalchev 		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
169880c668ffSpvalchev 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
169980c668ffSpvalchev 		break;
170080c668ffSpvalchev 	case IFM_100_TX:
170180c668ffSpvalchev 	case IFM_10_T:
170280c668ffSpvalchev 		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
170380c668ffSpvalchev 		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
170480c668ffSpvalchev 			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
170580c668ffSpvalchev 		} else {
170680c668ffSpvalchev 			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
170780c668ffSpvalchev 		}
170880c668ffSpvalchev 		break;
170980c668ffSpvalchev 	default:
1710f2a0e423Sstsp 		printf("%s: unknown media type: %llx\n",
171180c668ffSpvalchev 		    sc->vge_dev.dv_xname, IFM_SUBTYPE(ife->ifm_media));
171280c668ffSpvalchev 		break;
171380c668ffSpvalchev 	}
17147af3937fSbrad 
17157af3937fSbrad 	/*
17167af3937fSbrad 	 * 802.3x flow control
17177af3937fSbrad 	*/
17187af3937fSbrad 	CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |
17197af3937fSbrad 	    VGE_CR2_FDX_RXFLOWCTL_ENABLE);
17207af3937fSbrad 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
17217af3937fSbrad 		CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_FDX_TXFLOWCTL_ENABLE);
17227af3937fSbrad 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
17237af3937fSbrad 		CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_FDX_RXFLOWCTL_ENABLE);
172480c668ffSpvalchev }
172580c668ffSpvalchev 
172680c668ffSpvalchev int
vge_ioctl(struct ifnet * ifp,u_long command,caddr_t data)172780c668ffSpvalchev vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
172880c668ffSpvalchev {
172980c668ffSpvalchev 	struct vge_softc	*sc = ifp->if_softc;
173034f0f0fdSbrad 	struct ifreq		*ifr = (struct ifreq *) data;
173180c668ffSpvalchev 	int			s, error = 0;
173280c668ffSpvalchev 
1733e811f458Sbrad 	s = splnet();
173480c668ffSpvalchev 
173580c668ffSpvalchev 	switch (command) {
173680c668ffSpvalchev 	case SIOCSIFADDR:
173780c668ffSpvalchev 		ifp->if_flags |= IFF_UP;
17388ec68754Sbrad 		if (!(ifp->if_flags & IFF_RUNNING))
17398ec68754Sbrad 			vge_init(ifp);
1740ca2db565Sbrad 		break;
174134f0f0fdSbrad 
174280c668ffSpvalchev 	case SIOCSIFFLAGS:
174380c668ffSpvalchev 		if (ifp->if_flags & IFF_UP) {
17448ec68754Sbrad 			if (ifp->if_flags & IFF_RUNNING)
17458ec68754Sbrad 				error = ENETRESET;
17468ec68754Sbrad 			else
174780c668ffSpvalchev 				vge_init(ifp);
174880c668ffSpvalchev 		} else {
174980c668ffSpvalchev 			if (ifp->if_flags & IFF_RUNNING)
175080c668ffSpvalchev 				vge_stop(sc);
175180c668ffSpvalchev 		}
175280c668ffSpvalchev 		break;
175334f0f0fdSbrad 
175434f0f0fdSbrad 	case SIOCGIFMEDIA:
175534f0f0fdSbrad 	case SIOCSIFMEDIA:
175634f0f0fdSbrad 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
175734f0f0fdSbrad 		break;
175834f0f0fdSbrad 
175934f0f0fdSbrad 	default:
176034f0f0fdSbrad 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
176134f0f0fdSbrad 	}
17626cd30ee6Sbrad 
176380c668ffSpvalchev 	if (error == ENETRESET) {
17646cd30ee6Sbrad 		if (ifp->if_flags & IFF_RUNNING)
17658ec68754Sbrad 			vge_iff(sc);
176680c668ffSpvalchev 		error = 0;
176780c668ffSpvalchev 	}
176880c668ffSpvalchev 
176980c668ffSpvalchev 	splx(s);
177080c668ffSpvalchev 	return (error);
177180c668ffSpvalchev }
177280c668ffSpvalchev 
177380c668ffSpvalchev void
vge_watchdog(struct ifnet * ifp)177480c668ffSpvalchev vge_watchdog(struct ifnet *ifp)
177580c668ffSpvalchev {
177680c668ffSpvalchev 	struct vge_softc *sc = ifp->if_softc;
177780c668ffSpvalchev 	int s;
177880c668ffSpvalchev 
177980c668ffSpvalchev 	s = splnet();
178080c668ffSpvalchev 	printf("%s: watchdog timeout\n", sc->vge_dev.dv_xname);
178180c668ffSpvalchev 	ifp->if_oerrors++;
178280c668ffSpvalchev 
178380c668ffSpvalchev 	vge_txeof(sc);
178480c668ffSpvalchev 	vge_rxeof(sc);
178580c668ffSpvalchev 
178680c668ffSpvalchev 	vge_init(ifp);
178780c668ffSpvalchev 
178880c668ffSpvalchev 	splx(s);
178980c668ffSpvalchev }
179080c668ffSpvalchev 
179180c668ffSpvalchev /*
179280c668ffSpvalchev  * Stop the adapter and free any mbufs allocated to the
179380c668ffSpvalchev  * RX and TX lists.
179480c668ffSpvalchev  */
179580c668ffSpvalchev void
vge_stop(struct vge_softc * sc)179680c668ffSpvalchev vge_stop(struct vge_softc *sc)
179780c668ffSpvalchev {
1798f0f154b9Sbrad 	int			i;
179980c668ffSpvalchev 	struct ifnet		*ifp;
180080c668ffSpvalchev 
180180c668ffSpvalchev 	ifp = &sc->arpcom.ac_if;
180280c668ffSpvalchev 	ifp->if_timer = 0;
1803a499dca2Smk 
180480c668ffSpvalchev 	timeout_del(&sc->timer_handle);
180580c668ffSpvalchev 
1806de6cd8fbSdlg 	ifp->if_flags &= ~IFF_RUNNING;
1807de6cd8fbSdlg 	ifq_clr_oactive(&ifp->if_snd);
180880c668ffSpvalchev 
180980c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
181080c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
181180c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
181280c668ffSpvalchev 	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
181380c668ffSpvalchev 	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
181480c668ffSpvalchev 	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
181580c668ffSpvalchev 
181680c668ffSpvalchev 	if (sc->vge_head != NULL) {
181780c668ffSpvalchev 		m_freem(sc->vge_head);
181880c668ffSpvalchev 		sc->vge_head = sc->vge_tail = NULL;
181980c668ffSpvalchev 	}
182080c668ffSpvalchev 
182180c668ffSpvalchev 	/* Free the TX list buffers. */
182280c668ffSpvalchev 	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
182380c668ffSpvalchev 		if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
182480c668ffSpvalchev 			bus_dmamap_unload(sc->sc_dmat,
182580c668ffSpvalchev 			    sc->vge_ldata.vge_tx_dmamap[i]);
182680c668ffSpvalchev 			m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
182780c668ffSpvalchev 			sc->vge_ldata.vge_tx_mbuf[i] = NULL;
182880c668ffSpvalchev 		}
182980c668ffSpvalchev 	}
183080c668ffSpvalchev 
183180c668ffSpvalchev 	/* Free the RX list buffers. */
183280c668ffSpvalchev 	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
183380c668ffSpvalchev 		if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
183480c668ffSpvalchev 			bus_dmamap_unload(sc->sc_dmat,
183580c668ffSpvalchev 			    sc->vge_ldata.vge_rx_dmamap[i]);
183680c668ffSpvalchev 			m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
183780c668ffSpvalchev 			sc->vge_ldata.vge_rx_mbuf[i] = NULL;
183880c668ffSpvalchev 		}
183980c668ffSpvalchev 	}
184080c668ffSpvalchev }
1841