xref: /openbsd/sys/dev/pci/if_lge.c (revision 4bdff4be)
1 /*	$OpenBSD: if_lge.c,v 1.80 2023/11/10 15:51:20 bluhm Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001
5  *	Bill Paul <william.paul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.6 2001/06/20 19:47:55 bmilekic Exp $
35  */
36 
37 /*
38  * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
39  * documentation not available, but ask me nicely.
40  *
41  * Written by Bill Paul <william.paul@windriver.com>
42  * Wind River Systems
43  */
44 
45 /*
46  * The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
47  * It's a 64-bit PCI part that supports TCP/IP checksum offload,
48  * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
49  * are three supported methods for data transfer between host and
50  * NIC: programmed I/O, traditional scatter/gather DMA and Packet
51  * Propulsion Technology (tm) DMA. The latter mechanism is a form
52  * of double buffer DMA where the packet data is copied to a
53  * pre-allocated DMA buffer who's physical address has been loaded
54  * into a table at device initialization time. The rationale is that
55  * the virtual to physical address translation needed for normal
56  * scatter/gather DMA is more expensive than the data copy needed
57  * for double buffering. This may be true in Windows NT and the like,
58  * but it isn't true for us, at least on the x86 arch. This driver
59  * uses the scatter/gather I/O method for both TX and RX.
60  *
61  * The LXT1001 only supports TCP/IP checksum offload on receive.
62  * Also, the VLAN tagging is done using a 16-entry table which allows
63  * the chip to perform hardware filtering based on VLAN tags. Sadly,
64  * our vlan support doesn't currently play well with this kind of
65  * hardware support.
66  *
67  * Special thanks to:
68  * - Jeff James at Intel, for arranging to have the LXT1001 manual
69  *   released (at long last)
70  * - Beny Chen at D-Link, for actually sending it to me
71  * - Brad Short and Keith Alexis at SMC, for sending me sample
72  *   SMC9462SX and SMC9462TX adapters for testing
73  * - Paul Saab at Y!, for not killing me (though it remains to be seen
74  *   if in fact he did me much of a favor)
75  */
76 
77 #include "bpfilter.h"
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/sockio.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/kernel.h>
85 #include <sys/device.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_media.h>
90 
91 #include <netinet/in.h>
92 #include <netinet/if_ether.h>
93 
94 #if NBPFILTER > 0
95 #include <net/bpf.h>
96 #endif
97 
98 #include <uvm/uvm_extern.h>              /* for vtophys */
99 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
100 
101 #include <dev/pci/pcireg.h>
102 #include <dev/pci/pcivar.h>
103 #include <dev/pci/pcidevs.h>
104 
105 #include <dev/mii/mii.h>
106 #include <dev/mii/miivar.h>
107 
108 #define LGE_USEIOSPACE
109 
110 #include <dev/pci/if_lgereg.h>
111 
112 int lge_probe(struct device *, void *, void *);
113 void lge_attach(struct device *, struct device *, void *);
114 
115 const struct cfattach lge_ca = {
116 	sizeof(struct lge_softc), lge_probe, lge_attach
117 };
118 
119 struct cfdriver lge_cd = {
120 	NULL, "lge", DV_IFNET
121 };
122 
123 int lge_newbuf(struct lge_softc *, struct lge_rx_desc *,
124 			     struct mbuf *);
125 int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *);
126 void lge_rxeof(struct lge_softc *, int);
127 void lge_txeof(struct lge_softc *);
128 int lge_intr(void *);
129 void lge_tick(void *);
130 void lge_start(struct ifnet *);
131 int lge_ioctl(struct ifnet *, u_long, caddr_t);
132 void lge_init(void *);
133 void lge_stop(struct lge_softc *);
134 void lge_watchdog(struct ifnet *);
135 int lge_ifmedia_upd(struct ifnet *);
136 void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
137 
138 void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *);
139 void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int);
140 
141 int lge_miibus_readreg(struct device *, int, int);
142 void lge_miibus_writereg(struct device *, int, int, int);
143 void lge_miibus_statchg(struct device *);
144 
145 void lge_setmulti(struct lge_softc *);
146 void lge_reset(struct lge_softc *);
147 int lge_list_rx_init(struct lge_softc *);
148 int lge_list_tx_init(struct lge_softc *);
149 
150 #ifdef LGE_DEBUG
151 #define DPRINTF(x)	if (lgedebug) printf x
152 #define DPRINTFN(n,x)	if (lgedebug >= (n)) printf x
153 int	lgedebug = 0;
154 #else
155 #define DPRINTF(x)
156 #define DPRINTFN(n,x)
157 #endif
158 
159 const struct pci_matchid lge_devices[] = {
160 	{ PCI_VENDOR_LEVEL1, PCI_PRODUCT_LEVEL1_LXT1001 }
161 };
162 
163 #define LGE_SETBIT(sc, reg, x)				\
164 	CSR_WRITE_4(sc, reg,				\
165 		CSR_READ_4(sc, reg) | (x))
166 
167 #define LGE_CLRBIT(sc, reg, x)				\
168 	CSR_WRITE_4(sc, reg,				\
169 		CSR_READ_4(sc, reg) & ~(x))
170 
171 #define SIO_SET(x)					\
172 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x)
173 
174 #define SIO_CLR(x)					\
175 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x)
176 
177 /*
178  * Read a word of data stored in the EEPROM at address 'addr.'
179  */
180 void
181 lge_eeprom_getword(struct lge_softc *sc, int addr, u_int16_t *dest)
182 {
183 	int			i;
184 	u_int32_t		val;
185 
186 	CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
187 	    LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8));
188 
189 	for (i = 0; i < LGE_TIMEOUT; i++)
190 		if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ))
191 			break;
192 
193 	if (i == LGE_TIMEOUT) {
194 		printf("%s: EEPROM read timed out\n", sc->sc_dv.dv_xname);
195 		return;
196 	}
197 
198 	val = CSR_READ_4(sc, LGE_EEDATA);
199 
200 	if (addr & 1)
201 		*dest = (val >> 16) & 0xFFFF;
202 	else
203 		*dest = val & 0xFFFF;
204 }
205 
206 /*
207  * Read a sequence of words from the EEPROM.
208  */
209 void
210 lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off,
211     int cnt, int swap)
212 {
213 	int			i;
214 	u_int16_t		word = 0, *ptr;
215 
216 	for (i = 0; i < cnt; i++) {
217 		lge_eeprom_getword(sc, off + i, &word);
218 		ptr = (u_int16_t *)(dest + (i * 2));
219 		if (swap)
220 			*ptr = ntohs(word);
221 		else
222 			*ptr = word;
223 	}
224 }
225 
226 int
227 lge_miibus_readreg(struct device *dev, int phy, int reg)
228 {
229 	struct lge_softc	*sc = (struct lge_softc *)dev;
230 	int			i;
231 
232 	/*
233 	 * If we have a non-PCS PHY, pretend that the internal
234 	 * autoneg stuff at PHY address 0 isn't there so that
235 	 * the miibus code will find only the GMII PHY.
236 	 */
237 	if (sc->lge_pcs == 0 && phy == 0)
238 		return (0);
239 
240 	CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
241 
242 	for (i = 0; i < LGE_TIMEOUT; i++)
243 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
244 			break;
245 
246 	if (i == LGE_TIMEOUT) {
247 		printf("%s: PHY read timed out\n", sc->sc_dv.dv_xname);
248 		return (0);
249 	}
250 
251 	return (CSR_READ_4(sc, LGE_GMIICTL) >> 16);
252 }
253 
254 void
255 lge_miibus_writereg(struct device *dev, int phy, int reg, int data)
256 {
257 	struct lge_softc	*sc = (struct lge_softc *)dev;
258 	int			i;
259 
260 	CSR_WRITE_4(sc, LGE_GMIICTL,
261 	    (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
262 
263 	for (i = 0; i < LGE_TIMEOUT; i++)
264 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
265 			break;
266 
267 	if (i == LGE_TIMEOUT) {
268 		printf("%s: PHY write timed out\n", sc->sc_dv.dv_xname);
269 	}
270 }
271 
272 void
273 lge_miibus_statchg(struct device *dev)
274 {
275 	struct lge_softc	*sc = (struct lge_softc *)dev;
276 	struct mii_data		*mii = &sc->lge_mii;
277 
278 	LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
279 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
280 	case IFM_1000_T:
281 	case IFM_1000_SX:
282 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
283 		break;
284 	case IFM_100_TX:
285 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
286 		break;
287 	case IFM_10_T:
288 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
289 		break;
290 	default:
291 		/*
292 		 * Choose something, even if it's wrong. Clearing
293 		 * all the bits will hose autoneg on the internal
294 		 * PHY.
295 		 */
296 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
297 		break;
298 	}
299 
300 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
301 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
302 	} else {
303 		LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
304 	}
305 }
306 
307 void
308 lge_setmulti(struct lge_softc *sc)
309 {
310 	struct arpcom		*ac = &sc->arpcom;
311 	struct ifnet		*ifp = &ac->ac_if;
312 	struct ether_multi      *enm;
313 	struct ether_multistep  step;
314 	u_int32_t		h = 0, hashes[2] = { 0, 0 };
315 
316 	/* Make sure multicast hash table is enabled. */
317 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST);
318 
319 	if (ac->ac_multirangecnt > 0)
320 		ifp->if_flags |= IFF_ALLMULTI;
321 
322 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
323 		CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
324 		CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
325 		return;
326 	}
327 
328 	/* first, zot all the existing hash bits */
329 	CSR_WRITE_4(sc, LGE_MAR0, 0);
330 	CSR_WRITE_4(sc, LGE_MAR1, 0);
331 
332 	/* now program new ones */
333 	ETHER_FIRST_MULTI(step, ac, enm);
334 	while (enm != NULL) {
335 		h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26) &
336 		    0x0000003F;
337 		if (h < 32)
338 			hashes[0] |= (1 << h);
339 		else
340 			hashes[1] |= (1 << (h - 32));
341 		ETHER_NEXT_MULTI(step, enm);
342 	}
343 
344 	CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
345 	CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
346 }
347 
348 void
349 lge_reset(struct lge_softc *sc)
350 {
351 	int			i;
352 
353 	LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST);
354 
355 	for (i = 0; i < LGE_TIMEOUT; i++) {
356 		if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST))
357 			break;
358 	}
359 
360 	if (i == LGE_TIMEOUT)
361 		printf("%s: reset never completed\n", sc->sc_dv.dv_xname);
362 
363 	/* Wait a little while for the chip to get its brains in order. */
364 	DELAY(1000);
365 }
366 
367 /*
368  * Probe for a Level 1 chip. Check the PCI vendor and device
369  * IDs against our list and return a device name if we find a match.
370  */
371 int
372 lge_probe(struct device *parent, void *match, void *aux)
373 {
374 	return (pci_matchbyid((struct pci_attach_args *)aux, lge_devices,
375 	    nitems(lge_devices)));
376 }
377 
378 /*
379  * Attach the interface. Allocate softc structures, do ifmedia
380  * setup and ethernet/BPF attach.
381  */
382 void
383 lge_attach(struct device *parent, struct device *self, void *aux)
384 {
385 	struct lge_softc	*sc = (struct lge_softc *)self;
386 	struct pci_attach_args	*pa = aux;
387 	pci_chipset_tag_t	pc = pa->pa_pc;
388 	pci_intr_handle_t	ih;
389 	const char		*intrstr = NULL;
390 	bus_size_t		size;
391 	bus_dma_segment_t	seg;
392 	bus_dmamap_t		dmamap;
393 	int			rseg;
394 	u_char			eaddr[ETHER_ADDR_LEN];
395 #ifndef LGE_USEIOSPACE
396 	pcireg_t		memtype;
397 #endif
398 	struct ifnet		*ifp;
399 	caddr_t			kva;
400 
401 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
402 
403 	/*
404 	 * Map control/status registers.
405 	 */
406 	DPRINTFN(5, ("Map control/status regs\n"));
407 
408 	DPRINTFN(5, ("pci_mapreg_map\n"));
409 #ifdef LGE_USEIOSPACE
410 	if (pci_mapreg_map(pa, LGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
411 	    &sc->lge_btag, &sc->lge_bhandle, NULL, &size, 0)) {
412 		printf(": can't map i/o space\n");
413 		return;
414 	}
415 #else
416 	memtype = pci_mapreg_type(pc, pa->pa_tag, LGE_PCI_LOMEM);
417 	if (pci_mapreg_map(pa, LGE_PCI_LOMEM, memtype, 0, &sc->lge_btag,
418 	    &sc->lge_bhandle, NULL, &size, 0)) {
419 		printf(": can't map mem space\n");
420 		return;
421 	}
422 #endif
423 
424 	DPRINTFN(5, ("pci_intr_map\n"));
425 	if (pci_intr_map(pa, &ih)) {
426 		printf(": couldn't map interrupt\n");
427 		goto fail_1;
428 	}
429 
430 	DPRINTFN(5, ("pci_intr_string\n"));
431 	intrstr = pci_intr_string(pc, ih);
432 	DPRINTFN(5, ("pci_intr_establish\n"));
433 	sc->lge_intrhand = pci_intr_establish(pc, ih, IPL_NET, lge_intr, sc,
434 					      sc->sc_dv.dv_xname);
435 	if (sc->lge_intrhand == NULL) {
436 		printf(": couldn't establish interrupt");
437 		if (intrstr != NULL)
438 			printf(" at %s", intrstr);
439 		printf("\n");
440 		goto fail_1;
441 	}
442 	printf(": %s", intrstr);
443 
444 	/* Reset the adapter. */
445 	DPRINTFN(5, ("lge_reset\n"));
446 	lge_reset(sc);
447 
448 	/*
449 	 * Get station address from the EEPROM.
450 	 */
451 	DPRINTFN(5, ("lge_read_eeprom\n"));
452 	lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0);
453 	lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0);
454 	lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0);
455 
456 	/*
457 	 * A Level 1 chip was detected. Inform the world.
458 	 */
459 	printf(", address %s\n", ether_sprintf(eaddr));
460 
461 	bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
462 
463 	sc->sc_dmatag = pa->pa_dmat;
464 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
465 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct lge_list_data),
466 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
467 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
468 		goto fail_2;
469 	}
470 	DPRINTFN(5, ("bus_dmamem_map\n"));
471 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
472 			   sizeof(struct lge_list_data), &kva,
473 			   BUS_DMA_NOWAIT)) {
474 		printf("%s: can't map dma buffers (%zd bytes)\n",
475 		       sc->sc_dv.dv_xname, sizeof(struct lge_list_data));
476 		goto fail_3;
477 	}
478 	DPRINTFN(5, ("bus_dmamap_create\n"));
479 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct lge_list_data), 1,
480 			      sizeof(struct lge_list_data), 0,
481 			      BUS_DMA_NOWAIT, &dmamap)) {
482 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
483 		goto fail_4;
484 	}
485 	DPRINTFN(5, ("bus_dmamap_load\n"));
486 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
487 			    sizeof(struct lge_list_data), NULL,
488 			    BUS_DMA_NOWAIT)) {
489 		goto fail_5;
490 	}
491 
492 	DPRINTFN(5, ("bzero\n"));
493 	sc->lge_ldata = (struct lge_list_data *)kva;
494 
495 	ifp = &sc->arpcom.ac_if;
496 	ifp->if_softc = sc;
497 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
498 	ifp->if_ioctl = lge_ioctl;
499 	ifp->if_start = lge_start;
500 	ifp->if_watchdog = lge_watchdog;
501 	ifp->if_hardmtu = LGE_JUMBO_MTU;
502 	ifq_init_maxlen(&ifp->if_snd, LGE_TX_LIST_CNT - 1);
503 	DPRINTFN(5, ("bcopy\n"));
504 	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
505 
506 	ifp->if_capabilities = IFCAP_VLAN_MTU;
507 
508 	if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
509 		sc->lge_pcs = 1;
510 	else
511 		sc->lge_pcs = 0;
512 
513 	/*
514 	 * Do MII setup.
515 	 */
516 	DPRINTFN(5, ("mii setup\n"));
517 	sc->lge_mii.mii_ifp = ifp;
518 	sc->lge_mii.mii_readreg = lge_miibus_readreg;
519 	sc->lge_mii.mii_writereg = lge_miibus_writereg;
520 	sc->lge_mii.mii_statchg = lge_miibus_statchg;
521 	ifmedia_init(&sc->lge_mii.mii_media, 0, lge_ifmedia_upd,
522 		     lge_ifmedia_sts);
523 	mii_attach(&sc->sc_dv, &sc->lge_mii, 0xffffffff, MII_PHY_ANY,
524 		   MII_OFFSET_ANY, 0);
525 
526 	if (LIST_FIRST(&sc->lge_mii.mii_phys) == NULL) {
527 		printf("%s: no PHY found!\n", sc->sc_dv.dv_xname);
528 		ifmedia_add(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL,
529 			    0, NULL);
530 		ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL);
531 	} else {
532 		DPRINTFN(5, ("ifmedia_set\n"));
533 		ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_AUTO);
534 	}
535 
536 	/*
537 	 * Call MI attach routine.
538 	 */
539 	DPRINTFN(5, ("if_attach\n"));
540 	if_attach(ifp);
541 	DPRINTFN(5, ("ether_ifattach\n"));
542 	ether_ifattach(ifp);
543 	DPRINTFN(5, ("timeout_set\n"));
544 	timeout_set(&sc->lge_timeout, lge_tick, sc);
545 	timeout_add_sec(&sc->lge_timeout, 1);
546 	return;
547 
548 fail_5:
549 	bus_dmamap_destroy(sc->sc_dmatag, dmamap);
550 
551 fail_4:
552 	bus_dmamem_unmap(sc->sc_dmatag, kva,
553 	    sizeof(struct lge_list_data));
554 
555 fail_3:
556 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
557 
558 fail_2:
559 	pci_intr_disestablish(pc, sc->lge_intrhand);
560 
561 fail_1:
562 	bus_space_unmap(sc->lge_btag, sc->lge_bhandle, size);
563 }
564 
565 /*
566  * Initialize the transmit descriptors.
567  */
568 int
569 lge_list_tx_init(struct lge_softc *sc)
570 {
571 	struct lge_list_data	*ld;
572 	struct lge_ring_data	*cd;
573 	int			i;
574 
575 	cd = &sc->lge_cdata;
576 	ld = sc->lge_ldata;
577 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
578 		ld->lge_tx_list[i].lge_mbuf = NULL;
579 		ld->lge_tx_list[i].lge_ctl = 0;
580 	}
581 
582 	cd->lge_tx_prod = cd->lge_tx_cons = 0;
583 
584 	return (0);
585 }
586 
587 
588 /*
589  * Initialize the RX descriptors and allocate mbufs for them. Note that
590  * we arrange the descriptors in a closed ring, so that the last descriptor
591  * points back to the first.
592  */
593 int
594 lge_list_rx_init(struct lge_softc *sc)
595 {
596 	struct lge_list_data	*ld;
597 	struct lge_ring_data	*cd;
598 	int			i;
599 
600 	ld = sc->lge_ldata;
601 	cd = &sc->lge_cdata;
602 
603 	cd->lge_rx_prod = cd->lge_rx_cons = 0;
604 
605 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
606 
607 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
608 		if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
609 			break;
610 		if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
611 			return (ENOBUFS);
612 	}
613 
614 	/* Clear possible 'rx command queue empty' interrupt. */
615 	CSR_READ_4(sc, LGE_ISR);
616 
617 	return (0);
618 }
619 
620 /*
621  * Initialize a RX descriptor and attach a MBUF cluster.
622  */
623 int
624 lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m)
625 {
626 	struct mbuf		*m_new = NULL;
627 
628 	if (m == NULL) {
629 		m_new = MCLGETL(NULL, M_DONTWAIT, LGE_JLEN);
630 		if (m_new == NULL)
631 			return (ENOBUFS);
632 	} else {
633 		/*
634 		 * We're re-using a previously allocated mbuf;
635 		 * be sure to re-init pointers and lengths to
636 		 * default values.
637 		 */
638 		m_new = m;
639 		m_new->m_data = m_new->m_ext.ext_buf;
640 	}
641 	m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN;
642 
643 	/*
644 	 * Adjust alignment so packet payload begins on a
645 	 * longword boundary. Mandatory for Alpha, useful on
646 	 * x86 too.
647 	*/
648 	m_adj(m_new, ETHER_ALIGN);
649 
650 	c->lge_mbuf = m_new;
651 	c->lge_fragptr_hi = 0;
652 	c->lge_fragptr_lo = VTOPHYS(mtod(m_new, caddr_t));
653 	c->lge_fraglen = m_new->m_len;
654 	c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
655 	c->lge_sts = 0;
656 
657 	/*
658 	 * Put this buffer in the RX command FIFO. To do this,
659 	 * we just write the physical address of the descriptor
660 	 * into the RX descriptor address registers. Note that
661 	 * there are two registers, one high DWORD and one low
662 	 * DWORD, which lets us specify a 64-bit address if
663 	 * desired. We only use a 32-bit address for now.
664 	 * Writing to the low DWORD register is what actually
665 	 * causes the command to be issued, so we do that
666 	 * last.
667 	 */
668 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, VTOPHYS(c));
669 	LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
670 
671 	return (0);
672 }
673 
674 /*
675  * A frame has been uploaded: pass the resulting mbuf chain up to
676  * the higher level protocols.
677  */
678 void
679 lge_rxeof(struct lge_softc *sc, int cnt)
680 {
681 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
682         struct mbuf		*m;
683         struct ifnet		*ifp;
684 	struct lge_rx_desc	*cur_rx;
685 	int			c, i, total_len = 0;
686 	u_int32_t		rxsts, rxctl;
687 
688 	ifp = &sc->arpcom.ac_if;
689 
690 	/* Find out how many frames were processed. */
691 	c = cnt;
692 	i = sc->lge_cdata.lge_rx_cons;
693 
694 	/* Suck them in. */
695 	while(c) {
696 		struct mbuf		*m0 = NULL;
697 
698 		cur_rx = &sc->lge_ldata->lge_rx_list[i];
699 		rxctl = cur_rx->lge_ctl;
700 		rxsts = cur_rx->lge_sts;
701 		m = cur_rx->lge_mbuf;
702 		cur_rx->lge_mbuf = NULL;
703 		total_len = LGE_RXBYTES(cur_rx);
704 		LGE_INC(i, LGE_RX_LIST_CNT);
705 		c--;
706 
707 		/*
708 		 * If an error occurs, update stats, clear the
709 		 * status word and leave the mbuf cluster in place:
710 		 * it should simply get re-used next time this descriptor
711 	 	 * comes up in the ring.
712 		 */
713 		if (rxctl & LGE_RXCTL_ERRMASK) {
714 			ifp->if_ierrors++;
715 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
716 			continue;
717 		}
718 
719 		if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
720 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
721 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
722 			if (m0 == NULL) {
723 				ifp->if_ierrors++;
724 				continue;
725 			}
726 			m = m0;
727 		} else {
728 			m->m_pkthdr.len = m->m_len = total_len;
729 		}
730 
731 		/* Do IP checksum checking. */
732 		if (rxsts & LGE_RXSTS_ISIP) {
733 			if (!(rxsts & LGE_RXSTS_IPCSUMERR))
734 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
735 		}
736 		if (rxsts & LGE_RXSTS_ISTCP) {
737 			if (!(rxsts & LGE_RXSTS_TCPCSUMERR))
738 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
739 		}
740 		if (rxsts & LGE_RXSTS_ISUDP) {
741 			if (!(rxsts & LGE_RXSTS_UDPCSUMERR))
742 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
743 		}
744 
745 		ml_enqueue(&ml, m);
746 	}
747 
748 	if_input(ifp, &ml);
749 
750 	sc->lge_cdata.lge_rx_cons = i;
751 }
752 
753 /*
754  * A frame was downloaded to the chip. It's safe for us to clean up
755  * the list buffers.
756  */
757 
758 void
759 lge_txeof(struct lge_softc *sc)
760 {
761 	struct lge_tx_desc	*cur_tx = NULL;
762 	struct ifnet		*ifp;
763 	u_int32_t		idx, txdone;
764 
765 	ifp = &sc->arpcom.ac_if;
766 
767 	/* Clear the timeout timer. */
768 	ifp->if_timer = 0;
769 
770 	/*
771 	 * Go through our tx list and free mbufs for those
772 	 * frames that have been transmitted.
773 	 */
774 	idx = sc->lge_cdata.lge_tx_cons;
775 	txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
776 
777 	while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
778 		cur_tx = &sc->lge_ldata->lge_tx_list[idx];
779 
780 		if (cur_tx->lge_mbuf != NULL) {
781 			m_freem(cur_tx->lge_mbuf);
782 			cur_tx->lge_mbuf = NULL;
783 		}
784 		cur_tx->lge_ctl = 0;
785 
786 		txdone--;
787 		LGE_INC(idx, LGE_TX_LIST_CNT);
788 		ifp->if_timer = 0;
789 	}
790 
791 	sc->lge_cdata.lge_tx_cons = idx;
792 
793 	if (cur_tx != NULL)
794 		ifq_clr_oactive(&ifp->if_snd);
795 }
796 
797 void
798 lge_tick(void *xsc)
799 {
800 	struct lge_softc	*sc = xsc;
801 	struct mii_data		*mii = &sc->lge_mii;
802 	struct ifnet		*ifp = &sc->arpcom.ac_if;
803 	int			s;
804 
805 	s = splnet();
806 
807 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
808 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
809 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
810 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
811 
812 	if (!sc->lge_link) {
813 		mii_tick(mii);
814 		if (mii->mii_media_status & IFM_ACTIVE &&
815 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
816 			sc->lge_link++;
817 			if (!ifq_empty(&ifp->if_snd))
818 				lge_start(ifp);
819 		}
820 	}
821 
822 	timeout_add_sec(&sc->lge_timeout, 1);
823 
824 	splx(s);
825 }
826 
827 int
828 lge_intr(void *arg)
829 {
830 	struct lge_softc	*sc;
831 	struct ifnet		*ifp;
832 	u_int32_t		status;
833 	int			claimed = 0;
834 
835 	sc = arg;
836 	ifp = &sc->arpcom.ac_if;
837 
838 	/* Suppress unwanted interrupts */
839 	if (!(ifp->if_flags & IFF_UP)) {
840 		lge_stop(sc);
841 		return (0);
842 	}
843 
844 	for (;;) {
845 		/*
846 		 * Reading the ISR register clears all interrupts, and
847 		 * clears the 'interrupts enabled' bit in the IMR
848 		 * register.
849 		 */
850 		status = CSR_READ_4(sc, LGE_ISR);
851 
852 		if ((status & LGE_INTRS) == 0)
853 			break;
854 
855 		claimed = 1;
856 
857 		if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
858 			lge_txeof(sc);
859 
860 		if (status & LGE_ISR_RXDMA_DONE)
861 			lge_rxeof(sc, LGE_RX_DMACNT(status));
862 
863 		if (status & LGE_ISR_RXCMDFIFO_EMPTY)
864 			lge_init(sc);
865 
866 		if (status & LGE_ISR_PHY_INTR) {
867 			sc->lge_link = 0;
868 			timeout_del(&sc->lge_timeout);
869 			lge_tick(sc);
870 		}
871 	}
872 
873 	/* Re-enable interrupts. */
874 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
875 
876 	if (!ifq_empty(&ifp->if_snd))
877 		lge_start(ifp);
878 
879 	return (claimed);
880 }
881 
882 /*
883  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
884  * pointers to the fragment pointers.
885  */
886 int
887 lge_encap(struct lge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
888 {
889 	struct lge_frag		*f = NULL;
890 	struct lge_tx_desc	*cur_tx;
891 	struct mbuf		*m;
892 	int			frag = 0, tot_len = 0;
893 
894 	/*
895  	 * Start packing the mbufs in this chain into
896 	 * the fragment pointers. Stop when we run out
897  	 * of fragments or hit the end of the mbuf chain.
898 	 */
899 	m = m_head;
900 	cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
901 	frag = 0;
902 
903 	for (m = m_head; m != NULL; m = m->m_next) {
904 		if (m->m_len != 0) {
905 			tot_len += m->m_len;
906 			f = &cur_tx->lge_frags[frag];
907 			f->lge_fraglen = m->m_len;
908 			f->lge_fragptr_lo = VTOPHYS(mtod(m, vaddr_t));
909 			f->lge_fragptr_hi = 0;
910 			frag++;
911 		}
912 	}
913 
914 	if (m != NULL)
915 		return (ENOBUFS);
916 
917 	cur_tx->lge_mbuf = m_head;
918 	cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
919 	LGE_INC((*txidx), LGE_TX_LIST_CNT);
920 
921 	/* Queue for transmit */
922 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, VTOPHYS(cur_tx));
923 
924 	return (0);
925 }
926 
927 /*
928  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
929  * to the mbuf data regions directly in the transmit lists. We also save a
930  * copy of the pointers since the transmit list fragment pointers are
931  * physical addresses.
932  */
933 
934 void
935 lge_start(struct ifnet *ifp)
936 {
937 	struct lge_softc	*sc;
938 	struct mbuf		*m_head = NULL;
939 	u_int32_t		idx;
940 	int			pkts = 0;
941 
942 	sc = ifp->if_softc;
943 
944 	if (!sc->lge_link)
945 		return;
946 
947 	idx = sc->lge_cdata.lge_tx_prod;
948 
949 	if (ifq_is_oactive(&ifp->if_snd))
950 		return;
951 
952 	while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
953 		if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0)
954 			break;
955 
956 		m_head = ifq_deq_begin(&ifp->if_snd);
957 		if (m_head == NULL)
958 			break;
959 
960 		if (lge_encap(sc, m_head, &idx)) {
961 			ifq_deq_rollback(&ifp->if_snd, m_head);
962 			ifq_set_oactive(&ifp->if_snd);
963 			break;
964 		}
965 
966 		/* now we are committed to transmit the packet */
967 		ifq_deq_commit(&ifp->if_snd, m_head);
968 		pkts++;
969 
970 #if NBPFILTER > 0
971 		/*
972 		 * If there's a BPF listener, bounce a copy of this frame
973 		 * to him.
974 		 */
975 		if (ifp->if_bpf)
976 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
977 #endif
978 	}
979 	if (pkts == 0)
980 		return;
981 
982 	sc->lge_cdata.lge_tx_prod = idx;
983 
984 	/*
985 	 * Set a timeout in case the chip goes out to lunch.
986 	 */
987 	ifp->if_timer = 5;
988 }
989 
990 void
991 lge_init(void *xsc)
992 {
993 	struct lge_softc	*sc = xsc;
994 	struct ifnet		*ifp = &sc->arpcom.ac_if;
995 	int			s;
996 
997 	s = splnet();
998 
999 	/*
1000 	 * Cancel pending I/O and free all RX/TX buffers.
1001 	 */
1002 	lge_stop(sc);
1003 	lge_reset(sc);
1004 
1005 	/* Set MAC address */
1006 	CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1007 	CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1008 
1009 	/* Init circular RX list. */
1010 	if (lge_list_rx_init(sc) == ENOBUFS) {
1011 		printf("%s: initialization failed: no "
1012 		       "memory for rx buffers\n", sc->sc_dv.dv_xname);
1013 		lge_stop(sc);
1014 		splx(s);
1015 		return;
1016 	}
1017 
1018 	/*
1019 	 * Init tx descriptors.
1020 	 */
1021 	lge_list_tx_init(sc);
1022 
1023 	/* Set initial value for MODE1 register. */
1024 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST|
1025 	    LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD|
1026 	    LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0|
1027 	    LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2);
1028 
1029 	 /* If we want promiscuous mode, set the allframes bit. */
1030 	if (ifp->if_flags & IFF_PROMISC) {
1031 		CSR_WRITE_4(sc, LGE_MODE1,
1032 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC);
1033 	} else {
1034 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
1035 	}
1036 
1037 	/*
1038 	 * Set the capture broadcast bit to capture broadcast frames.
1039 	 */
1040 	if (ifp->if_flags & IFF_BROADCAST) {
1041 		CSR_WRITE_4(sc, LGE_MODE1,
1042 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST);
1043 	} else {
1044 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
1045 	}
1046 
1047 	/* Packet padding workaround? */
1048 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
1049 
1050 	/* No error frames */
1051 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
1052 
1053 	/* Receive large frames */
1054 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS);
1055 
1056 	/* Workaround: disable RX/TX flow control */
1057 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
1058 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
1059 
1060 	/* Make sure to strip CRC from received frames */
1061 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
1062 
1063 	/* Turn off magic packet mode */
1064 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
1065 
1066 	/* Turn off all VLAN stuff */
1067 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX|
1068 	    LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT);
1069 
1070 	/* Workarond: FIFO overflow */
1071 	CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
1072 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
1073 
1074 	/*
1075 	 * Load the multicast filter.
1076 	 */
1077 	lge_setmulti(sc);
1078 
1079 	/*
1080 	 * Enable hardware checksum validation for all received IPv4
1081 	 * packets, do not reject packets with bad checksums.
1082 	 */
1083 	CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM|
1084 	    LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM|
1085 	    LGE_MODE2_RX_ERRCSUM);
1086 
1087 	/*
1088 	 * Enable the delivery of PHY interrupts based on
1089 	 * link/speed/duplex status changes.
1090 	 */
1091 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL);
1092 
1093 	/* Enable receiver and transmitter. */
1094 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
1095 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB);
1096 
1097 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
1098 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB);
1099 
1100 	/*
1101 	 * Enable interrupts.
1102 	 */
1103 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|
1104 	    LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS);
1105 
1106 	lge_ifmedia_upd(ifp);
1107 
1108 	ifp->if_flags |= IFF_RUNNING;
1109 	ifq_clr_oactive(&ifp->if_snd);
1110 
1111 	splx(s);
1112 
1113 	timeout_add_sec(&sc->lge_timeout, 1);
1114 }
1115 
1116 /*
1117  * Set media options.
1118  */
1119 int
1120 lge_ifmedia_upd(struct ifnet *ifp)
1121 {
1122 	struct lge_softc	*sc = ifp->if_softc;
1123 	struct mii_data		*mii = &sc->lge_mii;
1124 
1125 	sc->lge_link = 0;
1126 	if (mii->mii_instance) {
1127 		struct mii_softc *miisc;
1128 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1129 			mii_phy_reset(miisc);
1130 	}
1131 	mii_mediachg(mii);
1132 
1133 	return (0);
1134 }
1135 
1136 /*
1137  * Report current media status.
1138  */
1139 void
1140 lge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1141 {
1142 	struct lge_softc	*sc = ifp->if_softc;
1143 	struct mii_data		*mii = &sc->lge_mii;
1144 
1145 	mii_pollstat(mii);
1146 	ifmr->ifm_active = mii->mii_media_active;
1147 	ifmr->ifm_status = mii->mii_media_status;
1148 }
1149 
1150 int
1151 lge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1152 {
1153 	struct lge_softc	*sc = ifp->if_softc;
1154 	struct ifreq		*ifr = (struct ifreq *) data;
1155 	struct mii_data		*mii;
1156 	int			s, error = 0;
1157 
1158 	s = splnet();
1159 
1160 	switch(command) {
1161 	case SIOCSIFADDR:
1162 		ifp->if_flags |= IFF_UP;
1163 		if (!(ifp->if_flags & IFF_RUNNING))
1164 			lge_init(sc);
1165 		break;
1166 
1167 	case SIOCSIFFLAGS:
1168 		if (ifp->if_flags & IFF_UP) {
1169 			if (ifp->if_flags & IFF_RUNNING &&
1170 			    ifp->if_flags & IFF_PROMISC &&
1171 			    !(sc->lge_if_flags & IFF_PROMISC)) {
1172 				CSR_WRITE_4(sc, LGE_MODE1,
1173 				    LGE_MODE1_SETRST_CTL1|
1174 				    LGE_MODE1_RX_PROMISC);
1175 				lge_setmulti(sc);
1176 			} else if (ifp->if_flags & IFF_RUNNING &&
1177 			    !(ifp->if_flags & IFF_PROMISC) &&
1178 			    sc->lge_if_flags & IFF_PROMISC) {
1179 				CSR_WRITE_4(sc, LGE_MODE1,
1180 				    LGE_MODE1_RX_PROMISC);
1181 				lge_setmulti(sc);
1182 			} else if (ifp->if_flags & IFF_RUNNING &&
1183 			    (ifp->if_flags ^ sc->lge_if_flags) & IFF_ALLMULTI) {
1184 				lge_setmulti(sc);
1185 			} else {
1186 				if (!(ifp->if_flags & IFF_RUNNING))
1187 					lge_init(sc);
1188 			}
1189 		} else {
1190 			if (ifp->if_flags & IFF_RUNNING)
1191 				lge_stop(sc);
1192 		}
1193 		sc->lge_if_flags = ifp->if_flags;
1194 		break;
1195 
1196 	case SIOCGIFMEDIA:
1197 	case SIOCSIFMEDIA:
1198 		mii = &sc->lge_mii;
1199 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1200 		break;
1201 
1202 	default:
1203 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1204 	}
1205 
1206 	if (error == ENETRESET) {
1207 		if (ifp->if_flags & IFF_RUNNING)
1208 			lge_setmulti(sc);
1209 		error = 0;
1210 	}
1211 
1212 	splx(s);
1213 	return (error);
1214 }
1215 
1216 void
1217 lge_watchdog(struct ifnet *ifp)
1218 {
1219 	struct lge_softc	*sc;
1220 
1221 	sc = ifp->if_softc;
1222 
1223 	ifp->if_oerrors++;
1224 	printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
1225 
1226 	lge_stop(sc);
1227 	lge_reset(sc);
1228 	lge_init(sc);
1229 
1230 	if (!ifq_empty(&ifp->if_snd))
1231 		lge_start(ifp);
1232 }
1233 
1234 /*
1235  * Stop the adapter and free any mbufs allocated to the
1236  * RX and TX lists.
1237  */
1238 void
1239 lge_stop(struct lge_softc *sc)
1240 {
1241 	int			i;
1242 	struct ifnet		*ifp;
1243 
1244 	ifp = &sc->arpcom.ac_if;
1245 	ifp->if_timer = 0;
1246 	timeout_del(&sc->lge_timeout);
1247 
1248 	ifp->if_flags &= ~IFF_RUNNING;
1249 	ifq_clr_oactive(&ifp->if_snd);
1250 
1251 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
1252 
1253 	/* Disable receiver and transmitter. */
1254 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
1255 	sc->lge_link = 0;
1256 
1257 	/*
1258 	 * Free data in the RX lists.
1259 	 */
1260 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
1261 		if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
1262 			m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
1263 			sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
1264 		}
1265 	}
1266 	bzero(&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list));
1267 
1268 	/*
1269 	 * Free the TX list buffers.
1270 	 */
1271 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
1272 		if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
1273 			m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
1274 			sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
1275 		}
1276 	}
1277 
1278 	bzero(&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list));
1279 }
1280