xref: /openbsd/sys/dev/pci/if_lge.c (revision 5eec54d3)
1 /*	$OpenBSD: if_lge.c,v 1.82 2024/09/06 10:54:08 jsg Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001
5  *	Bill Paul <william.paul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.6 2001/06/20 19:47:55 bmilekic Exp $
35  */
36 
37 /*
38  * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
39  * documentation not available, but ask me nicely.
40  *
41  * Written by Bill Paul <william.paul@windriver.com>
42  * Wind River Systems
43  */
44 
45 /*
46  * The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
47  * It's a 64-bit PCI part that supports TCP/IP checksum offload,
48  * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
49  * are three supported methods for data transfer between host and
50  * NIC: programmed I/O, traditional scatter/gather DMA and Packet
51  * Propulsion Technology (tm) DMA. The latter mechanism is a form
52  * of double buffer DMA where the packet data is copied to a
53  * pre-allocated DMA buffer who's physical address has been loaded
54  * into a table at device initialization time. The rationale is that
55  * the virtual to physical address translation needed for normal
56  * scatter/gather DMA is more expensive than the data copy needed
57  * for double buffering. This may be true in Windows NT and the like,
58  * but it isn't true for us, at least on the x86 arch. This driver
59  * uses the scatter/gather I/O method for both TX and RX.
60  *
61  * The LXT1001 only supports TCP/IP checksum offload on receive.
62  * Also, the VLAN tagging is done using a 16-entry table which allows
63  * the chip to perform hardware filtering based on VLAN tags. Sadly,
64  * our vlan support doesn't currently play well with this kind of
65  * hardware support.
66  *
67  * Special thanks to:
68  * - Jeff James at Intel, for arranging to have the LXT1001 manual
69  *   released (at long last)
70  * - Beny Chen at D-Link, for actually sending it to me
71  * - Brad Short and Keith Alexis at SMC, for sending me sample
72  *   SMC9462SX and SMC9462TX adapters for testing
73  * - Paul Saab at Y!, for not killing me (though it remains to be seen
74  *   if in fact he did me much of a favor)
75  */
76 
77 #include "bpfilter.h"
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/sockio.h>
82 #include <sys/mbuf.h>
83 #include <sys/device.h>
84 
85 #include <net/if.h>
86 #include <net/if_media.h>
87 
88 #include <netinet/in.h>
89 #include <netinet/if_ether.h>
90 
91 #if NBPFILTER > 0
92 #include <net/bpf.h>
93 #endif
94 
95 #include <uvm/uvm_extern.h>              /* for vtophys */
96 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
97 
98 #include <dev/pci/pcireg.h>
99 #include <dev/pci/pcivar.h>
100 #include <dev/pci/pcidevs.h>
101 
102 #include <dev/mii/miivar.h>
103 
104 #define LGE_USEIOSPACE
105 
106 #include <dev/pci/if_lgereg.h>
107 
108 int lge_probe(struct device *, void *, void *);
109 void lge_attach(struct device *, struct device *, void *);
110 
111 const struct cfattach lge_ca = {
112 	sizeof(struct lge_softc), lge_probe, lge_attach
113 };
114 
115 struct cfdriver lge_cd = {
116 	NULL, "lge", DV_IFNET
117 };
118 
119 int lge_newbuf(struct lge_softc *, struct lge_rx_desc *,
120 			     struct mbuf *);
121 int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *);
122 void lge_rxeof(struct lge_softc *, int);
123 void lge_txeof(struct lge_softc *);
124 int lge_intr(void *);
125 void lge_tick(void *);
126 void lge_start(struct ifnet *);
127 int lge_ioctl(struct ifnet *, u_long, caddr_t);
128 void lge_init(void *);
129 void lge_stop(struct lge_softc *);
130 void lge_watchdog(struct ifnet *);
131 int lge_ifmedia_upd(struct ifnet *);
132 void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
133 
134 void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *);
135 void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int);
136 
137 int lge_miibus_readreg(struct device *, int, int);
138 void lge_miibus_writereg(struct device *, int, int, int);
139 void lge_miibus_statchg(struct device *);
140 
141 void lge_setmulti(struct lge_softc *);
142 void lge_reset(struct lge_softc *);
143 int lge_list_rx_init(struct lge_softc *);
144 int lge_list_tx_init(struct lge_softc *);
145 
146 #ifdef LGE_DEBUG
147 #define DPRINTF(x)	if (lgedebug) printf x
148 #define DPRINTFN(n,x)	if (lgedebug >= (n)) printf x
149 int	lgedebug = 0;
150 #else
151 #define DPRINTF(x)
152 #define DPRINTFN(n,x)
153 #endif
154 
155 const struct pci_matchid lge_devices[] = {
156 	{ PCI_VENDOR_LEVEL1, PCI_PRODUCT_LEVEL1_LXT1001 }
157 };
158 
159 #define LGE_SETBIT(sc, reg, x)				\
160 	CSR_WRITE_4(sc, reg,				\
161 		CSR_READ_4(sc, reg) | (x))
162 
163 #define LGE_CLRBIT(sc, reg, x)				\
164 	CSR_WRITE_4(sc, reg,				\
165 		CSR_READ_4(sc, reg) & ~(x))
166 
167 #define SIO_SET(x)					\
168 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x)
169 
170 #define SIO_CLR(x)					\
171 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x)
172 
173 /*
174  * Read a word of data stored in the EEPROM at address 'addr.'
175  */
176 void
lge_eeprom_getword(struct lge_softc * sc,int addr,u_int16_t * dest)177 lge_eeprom_getword(struct lge_softc *sc, int addr, u_int16_t *dest)
178 {
179 	int			i;
180 	u_int32_t		val;
181 
182 	CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
183 	    LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8));
184 
185 	for (i = 0; i < LGE_TIMEOUT; i++)
186 		if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ))
187 			break;
188 
189 	if (i == LGE_TIMEOUT) {
190 		printf("%s: EEPROM read timed out\n", sc->sc_dv.dv_xname);
191 		return;
192 	}
193 
194 	val = CSR_READ_4(sc, LGE_EEDATA);
195 
196 	if (addr & 1)
197 		*dest = (val >> 16) & 0xFFFF;
198 	else
199 		*dest = val & 0xFFFF;
200 }
201 
202 /*
203  * Read a sequence of words from the EEPROM.
204  */
205 void
lge_read_eeprom(struct lge_softc * sc,caddr_t dest,int off,int cnt,int swap)206 lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off,
207     int cnt, int swap)
208 {
209 	int			i;
210 	u_int16_t		word = 0, *ptr;
211 
212 	for (i = 0; i < cnt; i++) {
213 		lge_eeprom_getword(sc, off + i, &word);
214 		ptr = (u_int16_t *)(dest + (i * 2));
215 		if (swap)
216 			*ptr = ntohs(word);
217 		else
218 			*ptr = word;
219 	}
220 }
221 
222 int
lge_miibus_readreg(struct device * dev,int phy,int reg)223 lge_miibus_readreg(struct device *dev, int phy, int reg)
224 {
225 	struct lge_softc	*sc = (struct lge_softc *)dev;
226 	int			i;
227 
228 	/*
229 	 * If we have a non-PCS PHY, pretend that the internal
230 	 * autoneg stuff at PHY address 0 isn't there so that
231 	 * the miibus code will find only the GMII PHY.
232 	 */
233 	if (sc->lge_pcs == 0 && phy == 0)
234 		return (0);
235 
236 	CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
237 
238 	for (i = 0; i < LGE_TIMEOUT; i++)
239 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
240 			break;
241 
242 	if (i == LGE_TIMEOUT) {
243 		printf("%s: PHY read timed out\n", sc->sc_dv.dv_xname);
244 		return (0);
245 	}
246 
247 	return (CSR_READ_4(sc, LGE_GMIICTL) >> 16);
248 }
249 
250 void
lge_miibus_writereg(struct device * dev,int phy,int reg,int data)251 lge_miibus_writereg(struct device *dev, int phy, int reg, int data)
252 {
253 	struct lge_softc	*sc = (struct lge_softc *)dev;
254 	int			i;
255 
256 	CSR_WRITE_4(sc, LGE_GMIICTL,
257 	    (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
258 
259 	for (i = 0; i < LGE_TIMEOUT; i++)
260 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
261 			break;
262 
263 	if (i == LGE_TIMEOUT) {
264 		printf("%s: PHY write timed out\n", sc->sc_dv.dv_xname);
265 	}
266 }
267 
268 void
lge_miibus_statchg(struct device * dev)269 lge_miibus_statchg(struct device *dev)
270 {
271 	struct lge_softc	*sc = (struct lge_softc *)dev;
272 	struct mii_data		*mii = &sc->lge_mii;
273 
274 	LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
275 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
276 	case IFM_1000_T:
277 	case IFM_1000_SX:
278 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
279 		break;
280 	case IFM_100_TX:
281 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
282 		break;
283 	case IFM_10_T:
284 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
285 		break;
286 	default:
287 		/*
288 		 * Choose something, even if it's wrong. Clearing
289 		 * all the bits will hose autoneg on the internal
290 		 * PHY.
291 		 */
292 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
293 		break;
294 	}
295 
296 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
297 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
298 	} else {
299 		LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
300 	}
301 }
302 
303 void
lge_setmulti(struct lge_softc * sc)304 lge_setmulti(struct lge_softc *sc)
305 {
306 	struct arpcom		*ac = &sc->arpcom;
307 	struct ifnet		*ifp = &ac->ac_if;
308 	struct ether_multi      *enm;
309 	struct ether_multistep  step;
310 	u_int32_t		h = 0, hashes[2] = { 0, 0 };
311 
312 	/* Make sure multicast hash table is enabled. */
313 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST);
314 
315 	if (ac->ac_multirangecnt > 0)
316 		ifp->if_flags |= IFF_ALLMULTI;
317 
318 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
319 		CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
320 		CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
321 		return;
322 	}
323 
324 	/* first, zot all the existing hash bits */
325 	CSR_WRITE_4(sc, LGE_MAR0, 0);
326 	CSR_WRITE_4(sc, LGE_MAR1, 0);
327 
328 	/* now program new ones */
329 	ETHER_FIRST_MULTI(step, ac, enm);
330 	while (enm != NULL) {
331 		h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26) &
332 		    0x0000003F;
333 		if (h < 32)
334 			hashes[0] |= (1 << h);
335 		else
336 			hashes[1] |= (1 << (h - 32));
337 		ETHER_NEXT_MULTI(step, enm);
338 	}
339 
340 	CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
341 	CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
342 }
343 
344 void
lge_reset(struct lge_softc * sc)345 lge_reset(struct lge_softc *sc)
346 {
347 	int			i;
348 
349 	LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST);
350 
351 	for (i = 0; i < LGE_TIMEOUT; i++) {
352 		if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST))
353 			break;
354 	}
355 
356 	if (i == LGE_TIMEOUT)
357 		printf("%s: reset never completed\n", sc->sc_dv.dv_xname);
358 
359 	/* Wait a little while for the chip to get its brains in order. */
360 	DELAY(1000);
361 }
362 
363 /*
364  * Probe for a Level 1 chip. Check the PCI vendor and device
365  * IDs against our list and return a device name if we find a match.
366  */
367 int
lge_probe(struct device * parent,void * match,void * aux)368 lge_probe(struct device *parent, void *match, void *aux)
369 {
370 	return (pci_matchbyid((struct pci_attach_args *)aux, lge_devices,
371 	    nitems(lge_devices)));
372 }
373 
374 /*
375  * Attach the interface. Allocate softc structures, do ifmedia
376  * setup and ethernet/BPF attach.
377  */
378 void
lge_attach(struct device * parent,struct device * self,void * aux)379 lge_attach(struct device *parent, struct device *self, void *aux)
380 {
381 	struct lge_softc	*sc = (struct lge_softc *)self;
382 	struct pci_attach_args	*pa = aux;
383 	pci_chipset_tag_t	pc = pa->pa_pc;
384 	pci_intr_handle_t	ih;
385 	const char		*intrstr = NULL;
386 	bus_size_t		size;
387 	bus_dma_segment_t	seg;
388 	bus_dmamap_t		dmamap;
389 	int			rseg;
390 	u_char			eaddr[ETHER_ADDR_LEN];
391 #ifndef LGE_USEIOSPACE
392 	pcireg_t		memtype;
393 #endif
394 	struct ifnet		*ifp;
395 	caddr_t			kva;
396 
397 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
398 
399 	/*
400 	 * Map control/status registers.
401 	 */
402 	DPRINTFN(5, ("Map control/status regs\n"));
403 
404 	DPRINTFN(5, ("pci_mapreg_map\n"));
405 #ifdef LGE_USEIOSPACE
406 	if (pci_mapreg_map(pa, LGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
407 	    &sc->lge_btag, &sc->lge_bhandle, NULL, &size, 0)) {
408 		printf(": can't map i/o space\n");
409 		return;
410 	}
411 #else
412 	memtype = pci_mapreg_type(pc, pa->pa_tag, LGE_PCI_LOMEM);
413 	if (pci_mapreg_map(pa, LGE_PCI_LOMEM, memtype, 0, &sc->lge_btag,
414 	    &sc->lge_bhandle, NULL, &size, 0)) {
415 		printf(": can't map mem space\n");
416 		return;
417 	}
418 #endif
419 
420 	DPRINTFN(5, ("pci_intr_map\n"));
421 	if (pci_intr_map(pa, &ih)) {
422 		printf(": couldn't map interrupt\n");
423 		goto fail_1;
424 	}
425 
426 	DPRINTFN(5, ("pci_intr_string\n"));
427 	intrstr = pci_intr_string(pc, ih);
428 	DPRINTFN(5, ("pci_intr_establish\n"));
429 	sc->lge_intrhand = pci_intr_establish(pc, ih, IPL_NET, lge_intr, sc,
430 					      sc->sc_dv.dv_xname);
431 	if (sc->lge_intrhand == NULL) {
432 		printf(": couldn't establish interrupt");
433 		if (intrstr != NULL)
434 			printf(" at %s", intrstr);
435 		printf("\n");
436 		goto fail_1;
437 	}
438 	printf(": %s", intrstr);
439 
440 	/* Reset the adapter. */
441 	DPRINTFN(5, ("lge_reset\n"));
442 	lge_reset(sc);
443 
444 	/*
445 	 * Get station address from the EEPROM.
446 	 */
447 	DPRINTFN(5, ("lge_read_eeprom\n"));
448 	lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0);
449 	lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0);
450 	lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0);
451 
452 	/*
453 	 * A Level 1 chip was detected. Inform the world.
454 	 */
455 	printf(", address %s\n", ether_sprintf(eaddr));
456 
457 	bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
458 
459 	sc->sc_dmatag = pa->pa_dmat;
460 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
461 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct lge_list_data),
462 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
463 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
464 		goto fail_2;
465 	}
466 	DPRINTFN(5, ("bus_dmamem_map\n"));
467 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
468 			   sizeof(struct lge_list_data), &kva,
469 			   BUS_DMA_NOWAIT)) {
470 		printf("%s: can't map dma buffers (%zd bytes)\n",
471 		       sc->sc_dv.dv_xname, sizeof(struct lge_list_data));
472 		goto fail_3;
473 	}
474 	DPRINTFN(5, ("bus_dmamap_create\n"));
475 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct lge_list_data), 1,
476 			      sizeof(struct lge_list_data), 0,
477 			      BUS_DMA_NOWAIT, &dmamap)) {
478 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
479 		goto fail_4;
480 	}
481 	DPRINTFN(5, ("bus_dmamap_load\n"));
482 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
483 			    sizeof(struct lge_list_data), NULL,
484 			    BUS_DMA_NOWAIT)) {
485 		goto fail_5;
486 	}
487 
488 	DPRINTFN(5, ("bzero\n"));
489 	sc->lge_ldata = (struct lge_list_data *)kva;
490 
491 	ifp = &sc->arpcom.ac_if;
492 	ifp->if_softc = sc;
493 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
494 	ifp->if_ioctl = lge_ioctl;
495 	ifp->if_start = lge_start;
496 	ifp->if_watchdog = lge_watchdog;
497 	ifp->if_hardmtu = LGE_JUMBO_MTU;
498 	ifq_init_maxlen(&ifp->if_snd, LGE_TX_LIST_CNT - 1);
499 	DPRINTFN(5, ("bcopy\n"));
500 	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
501 
502 	ifp->if_capabilities = IFCAP_VLAN_MTU;
503 
504 	if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
505 		sc->lge_pcs = 1;
506 	else
507 		sc->lge_pcs = 0;
508 
509 	/*
510 	 * Do MII setup.
511 	 */
512 	DPRINTFN(5, ("mii setup\n"));
513 	sc->lge_mii.mii_ifp = ifp;
514 	sc->lge_mii.mii_readreg = lge_miibus_readreg;
515 	sc->lge_mii.mii_writereg = lge_miibus_writereg;
516 	sc->lge_mii.mii_statchg = lge_miibus_statchg;
517 	ifmedia_init(&sc->lge_mii.mii_media, 0, lge_ifmedia_upd,
518 		     lge_ifmedia_sts);
519 	mii_attach(&sc->sc_dv, &sc->lge_mii, 0xffffffff, MII_PHY_ANY,
520 		   MII_OFFSET_ANY, 0);
521 
522 	if (LIST_FIRST(&sc->lge_mii.mii_phys) == NULL) {
523 		printf("%s: no PHY found!\n", sc->sc_dv.dv_xname);
524 		ifmedia_add(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL,
525 			    0, NULL);
526 		ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL);
527 	} else {
528 		DPRINTFN(5, ("ifmedia_set\n"));
529 		ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_AUTO);
530 	}
531 
532 	/*
533 	 * Call MI attach routine.
534 	 */
535 	DPRINTFN(5, ("if_attach\n"));
536 	if_attach(ifp);
537 	DPRINTFN(5, ("ether_ifattach\n"));
538 	ether_ifattach(ifp);
539 	DPRINTFN(5, ("timeout_set\n"));
540 	timeout_set(&sc->lge_timeout, lge_tick, sc);
541 	timeout_add_sec(&sc->lge_timeout, 1);
542 	return;
543 
544 fail_5:
545 	bus_dmamap_destroy(sc->sc_dmatag, dmamap);
546 
547 fail_4:
548 	bus_dmamem_unmap(sc->sc_dmatag, kva,
549 	    sizeof(struct lge_list_data));
550 
551 fail_3:
552 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
553 
554 fail_2:
555 	pci_intr_disestablish(pc, sc->lge_intrhand);
556 
557 fail_1:
558 	bus_space_unmap(sc->lge_btag, sc->lge_bhandle, size);
559 }
560 
561 /*
562  * Initialize the transmit descriptors.
563  */
564 int
lge_list_tx_init(struct lge_softc * sc)565 lge_list_tx_init(struct lge_softc *sc)
566 {
567 	struct lge_list_data	*ld;
568 	struct lge_ring_data	*cd;
569 	int			i;
570 
571 	cd = &sc->lge_cdata;
572 	ld = sc->lge_ldata;
573 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
574 		ld->lge_tx_list[i].lge_mbuf = NULL;
575 		ld->lge_tx_list[i].lge_ctl = 0;
576 	}
577 
578 	cd->lge_tx_prod = cd->lge_tx_cons = 0;
579 
580 	return (0);
581 }
582 
583 
584 /*
585  * Initialize the RX descriptors and allocate mbufs for them. Note that
586  * we arrange the descriptors in a closed ring, so that the last descriptor
587  * points back to the first.
588  */
589 int
lge_list_rx_init(struct lge_softc * sc)590 lge_list_rx_init(struct lge_softc *sc)
591 {
592 	struct lge_list_data	*ld;
593 	struct lge_ring_data	*cd;
594 	int			i;
595 
596 	ld = sc->lge_ldata;
597 	cd = &sc->lge_cdata;
598 
599 	cd->lge_rx_prod = cd->lge_rx_cons = 0;
600 
601 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
602 
603 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
604 		if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
605 			break;
606 		if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
607 			return (ENOBUFS);
608 	}
609 
610 	/* Clear possible 'rx command queue empty' interrupt. */
611 	CSR_READ_4(sc, LGE_ISR);
612 
613 	return (0);
614 }
615 
616 /*
617  * Initialize a RX descriptor and attach a MBUF cluster.
618  */
619 int
lge_newbuf(struct lge_softc * sc,struct lge_rx_desc * c,struct mbuf * m)620 lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m)
621 {
622 	struct mbuf		*m_new = NULL;
623 
624 	if (m == NULL) {
625 		m_new = MCLGETL(NULL, M_DONTWAIT, LGE_JLEN);
626 		if (m_new == NULL)
627 			return (ENOBUFS);
628 	} else {
629 		/*
630 		 * We're re-using a previously allocated mbuf;
631 		 * be sure to re-init pointers and lengths to
632 		 * default values.
633 		 */
634 		m_new = m;
635 		m_new->m_data = m_new->m_ext.ext_buf;
636 	}
637 	m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN;
638 
639 	/*
640 	 * Adjust alignment so packet payload begins on a
641 	 * longword boundary. Mandatory for Alpha, useful on
642 	 * x86 too.
643 	*/
644 	m_adj(m_new, ETHER_ALIGN);
645 
646 	c->lge_mbuf = m_new;
647 	c->lge_fragptr_hi = 0;
648 	c->lge_fragptr_lo = VTOPHYS(mtod(m_new, caddr_t));
649 	c->lge_fraglen = m_new->m_len;
650 	c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
651 	c->lge_sts = 0;
652 
653 	/*
654 	 * Put this buffer in the RX command FIFO. To do this,
655 	 * we just write the physical address of the descriptor
656 	 * into the RX descriptor address registers. Note that
657 	 * there are two registers, one high DWORD and one low
658 	 * DWORD, which lets us specify a 64-bit address if
659 	 * desired. We only use a 32-bit address for now.
660 	 * Writing to the low DWORD register is what actually
661 	 * causes the command to be issued, so we do that
662 	 * last.
663 	 */
664 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, VTOPHYS(c));
665 	LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
666 
667 	return (0);
668 }
669 
670 /*
671  * A frame has been uploaded: pass the resulting mbuf chain up to
672  * the higher level protocols.
673  */
674 void
lge_rxeof(struct lge_softc * sc,int cnt)675 lge_rxeof(struct lge_softc *sc, int cnt)
676 {
677 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
678         struct mbuf		*m;
679         struct ifnet		*ifp;
680 	struct lge_rx_desc	*cur_rx;
681 	int			c, i, total_len = 0;
682 	u_int32_t		rxsts, rxctl;
683 
684 	ifp = &sc->arpcom.ac_if;
685 
686 	/* Find out how many frames were processed. */
687 	c = cnt;
688 	i = sc->lge_cdata.lge_rx_cons;
689 
690 	/* Suck them in. */
691 	while(c) {
692 		struct mbuf		*m0 = NULL;
693 
694 		cur_rx = &sc->lge_ldata->lge_rx_list[i];
695 		rxctl = cur_rx->lge_ctl;
696 		rxsts = cur_rx->lge_sts;
697 		m = cur_rx->lge_mbuf;
698 		cur_rx->lge_mbuf = NULL;
699 		total_len = LGE_RXBYTES(cur_rx);
700 		LGE_INC(i, LGE_RX_LIST_CNT);
701 		c--;
702 
703 		/*
704 		 * If an error occurs, update stats, clear the
705 		 * status word and leave the mbuf cluster in place:
706 		 * it should simply get re-used next time this descriptor
707 	 	 * comes up in the ring.
708 		 */
709 		if (rxctl & LGE_RXCTL_ERRMASK) {
710 			ifp->if_ierrors++;
711 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
712 			continue;
713 		}
714 
715 		if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
716 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
717 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
718 			if (m0 == NULL) {
719 				ifp->if_ierrors++;
720 				continue;
721 			}
722 			m = m0;
723 		} else {
724 			m->m_pkthdr.len = m->m_len = total_len;
725 		}
726 
727 		/* Do IP checksum checking. */
728 		if (rxsts & LGE_RXSTS_ISIP) {
729 			if (!(rxsts & LGE_RXSTS_IPCSUMERR))
730 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
731 		}
732 		if (rxsts & LGE_RXSTS_ISTCP) {
733 			if (!(rxsts & LGE_RXSTS_TCPCSUMERR))
734 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
735 		}
736 		if (rxsts & LGE_RXSTS_ISUDP) {
737 			if (!(rxsts & LGE_RXSTS_UDPCSUMERR))
738 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
739 		}
740 
741 		ml_enqueue(&ml, m);
742 	}
743 
744 	if_input(ifp, &ml);
745 
746 	sc->lge_cdata.lge_rx_cons = i;
747 }
748 
749 /*
750  * A frame was downloaded to the chip. It's safe for us to clean up
751  * the list buffers.
752  */
753 
754 void
lge_txeof(struct lge_softc * sc)755 lge_txeof(struct lge_softc *sc)
756 {
757 	struct lge_tx_desc	*cur_tx = NULL;
758 	struct ifnet		*ifp;
759 	u_int32_t		idx, txdone;
760 
761 	ifp = &sc->arpcom.ac_if;
762 
763 	/* Clear the timeout timer. */
764 	ifp->if_timer = 0;
765 
766 	/*
767 	 * Go through our tx list and free mbufs for those
768 	 * frames that have been transmitted.
769 	 */
770 	idx = sc->lge_cdata.lge_tx_cons;
771 	txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
772 
773 	while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
774 		cur_tx = &sc->lge_ldata->lge_tx_list[idx];
775 
776 		if (cur_tx->lge_mbuf != NULL) {
777 			m_freem(cur_tx->lge_mbuf);
778 			cur_tx->lge_mbuf = NULL;
779 		}
780 		cur_tx->lge_ctl = 0;
781 
782 		txdone--;
783 		LGE_INC(idx, LGE_TX_LIST_CNT);
784 		ifp->if_timer = 0;
785 	}
786 
787 	sc->lge_cdata.lge_tx_cons = idx;
788 
789 	if (cur_tx != NULL)
790 		ifq_clr_oactive(&ifp->if_snd);
791 }
792 
793 void
lge_tick(void * xsc)794 lge_tick(void *xsc)
795 {
796 	struct lge_softc	*sc = xsc;
797 	struct mii_data		*mii = &sc->lge_mii;
798 	struct ifnet		*ifp = &sc->arpcom.ac_if;
799 	int			s;
800 
801 	s = splnet();
802 
803 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
804 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
805 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
806 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
807 
808 	if (!sc->lge_link) {
809 		mii_tick(mii);
810 		if (mii->mii_media_status & IFM_ACTIVE &&
811 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
812 			sc->lge_link++;
813 			if (!ifq_empty(&ifp->if_snd))
814 				lge_start(ifp);
815 		}
816 	}
817 
818 	timeout_add_sec(&sc->lge_timeout, 1);
819 
820 	splx(s);
821 }
822 
823 int
lge_intr(void * arg)824 lge_intr(void *arg)
825 {
826 	struct lge_softc	*sc;
827 	struct ifnet		*ifp;
828 	u_int32_t		status;
829 	int			claimed = 0;
830 
831 	sc = arg;
832 	ifp = &sc->arpcom.ac_if;
833 
834 	/* Suppress unwanted interrupts */
835 	if (!(ifp->if_flags & IFF_UP)) {
836 		lge_stop(sc);
837 		return (0);
838 	}
839 
840 	for (;;) {
841 		/*
842 		 * Reading the ISR register clears all interrupts, and
843 		 * clears the 'interrupts enabled' bit in the IMR
844 		 * register.
845 		 */
846 		status = CSR_READ_4(sc, LGE_ISR);
847 
848 		if ((status & LGE_INTRS) == 0)
849 			break;
850 
851 		claimed = 1;
852 
853 		if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
854 			lge_txeof(sc);
855 
856 		if (status & LGE_ISR_RXDMA_DONE)
857 			lge_rxeof(sc, LGE_RX_DMACNT(status));
858 
859 		if (status & LGE_ISR_RXCMDFIFO_EMPTY)
860 			lge_init(sc);
861 
862 		if (status & LGE_ISR_PHY_INTR) {
863 			sc->lge_link = 0;
864 			timeout_del(&sc->lge_timeout);
865 			lge_tick(sc);
866 		}
867 	}
868 
869 	/* Re-enable interrupts. */
870 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
871 
872 	if (!ifq_empty(&ifp->if_snd))
873 		lge_start(ifp);
874 
875 	return (claimed);
876 }
877 
878 /*
879  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
880  * pointers to the fragment pointers.
881  */
882 int
lge_encap(struct lge_softc * sc,struct mbuf * m_head,u_int32_t * txidx)883 lge_encap(struct lge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
884 {
885 	struct lge_frag		*f = NULL;
886 	struct lge_tx_desc	*cur_tx;
887 	struct mbuf		*m;
888 	int			frag = 0, tot_len = 0;
889 
890 	/*
891  	 * Start packing the mbufs in this chain into
892 	 * the fragment pointers. Stop when we run out
893  	 * of fragments or hit the end of the mbuf chain.
894 	 */
895 	m = m_head;
896 	cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
897 	frag = 0;
898 
899 	for (m = m_head; m != NULL; m = m->m_next) {
900 		if (m->m_len != 0) {
901 			tot_len += m->m_len;
902 			f = &cur_tx->lge_frags[frag];
903 			f->lge_fraglen = m->m_len;
904 			f->lge_fragptr_lo = VTOPHYS(mtod(m, vaddr_t));
905 			f->lge_fragptr_hi = 0;
906 			frag++;
907 		}
908 	}
909 
910 	if (m != NULL)
911 		return (ENOBUFS);
912 
913 	cur_tx->lge_mbuf = m_head;
914 	cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
915 	LGE_INC((*txidx), LGE_TX_LIST_CNT);
916 
917 	/* Queue for transmit */
918 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, VTOPHYS(cur_tx));
919 
920 	return (0);
921 }
922 
923 /*
924  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
925  * to the mbuf data regions directly in the transmit lists. We also save a
926  * copy of the pointers since the transmit list fragment pointers are
927  * physical addresses.
928  */
929 
930 void
lge_start(struct ifnet * ifp)931 lge_start(struct ifnet *ifp)
932 {
933 	struct lge_softc	*sc;
934 	struct mbuf		*m_head = NULL;
935 	u_int32_t		idx;
936 	int			pkts = 0;
937 
938 	sc = ifp->if_softc;
939 
940 	if (!sc->lge_link)
941 		return;
942 
943 	idx = sc->lge_cdata.lge_tx_prod;
944 
945 	if (ifq_is_oactive(&ifp->if_snd))
946 		return;
947 
948 	while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
949 		if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0)
950 			break;
951 
952 		m_head = ifq_deq_begin(&ifp->if_snd);
953 		if (m_head == NULL)
954 			break;
955 
956 		if (lge_encap(sc, m_head, &idx)) {
957 			ifq_deq_rollback(&ifp->if_snd, m_head);
958 			ifq_set_oactive(&ifp->if_snd);
959 			break;
960 		}
961 
962 		/* now we are committed to transmit the packet */
963 		ifq_deq_commit(&ifp->if_snd, m_head);
964 		pkts++;
965 
966 #if NBPFILTER > 0
967 		/*
968 		 * If there's a BPF listener, bounce a copy of this frame
969 		 * to him.
970 		 */
971 		if (ifp->if_bpf)
972 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
973 #endif
974 	}
975 	if (pkts == 0)
976 		return;
977 
978 	sc->lge_cdata.lge_tx_prod = idx;
979 
980 	/*
981 	 * Set a timeout in case the chip goes out to lunch.
982 	 */
983 	ifp->if_timer = 5;
984 }
985 
986 void
lge_init(void * xsc)987 lge_init(void *xsc)
988 {
989 	struct lge_softc	*sc = xsc;
990 	struct ifnet		*ifp = &sc->arpcom.ac_if;
991 	int			s;
992 
993 	s = splnet();
994 
995 	/*
996 	 * Cancel pending I/O and free all RX/TX buffers.
997 	 */
998 	lge_stop(sc);
999 	lge_reset(sc);
1000 
1001 	/* Set MAC address */
1002 	CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1003 	CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1004 
1005 	/* Init circular RX list. */
1006 	if (lge_list_rx_init(sc) == ENOBUFS) {
1007 		printf("%s: initialization failed: no "
1008 		       "memory for rx buffers\n", sc->sc_dv.dv_xname);
1009 		lge_stop(sc);
1010 		splx(s);
1011 		return;
1012 	}
1013 
1014 	/*
1015 	 * Init tx descriptors.
1016 	 */
1017 	lge_list_tx_init(sc);
1018 
1019 	/* Set initial value for MODE1 register. */
1020 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST|
1021 	    LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD|
1022 	    LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0|
1023 	    LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2);
1024 
1025 	 /* If we want promiscuous mode, set the allframes bit. */
1026 	if (ifp->if_flags & IFF_PROMISC) {
1027 		CSR_WRITE_4(sc, LGE_MODE1,
1028 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC);
1029 	} else {
1030 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
1031 	}
1032 
1033 	/*
1034 	 * Set the capture broadcast bit to capture broadcast frames.
1035 	 */
1036 	if (ifp->if_flags & IFF_BROADCAST) {
1037 		CSR_WRITE_4(sc, LGE_MODE1,
1038 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST);
1039 	} else {
1040 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
1041 	}
1042 
1043 	/* Packet padding workaround? */
1044 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
1045 
1046 	/* No error frames */
1047 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
1048 
1049 	/* Receive large frames */
1050 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS);
1051 
1052 	/* Workaround: disable RX/TX flow control */
1053 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
1054 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
1055 
1056 	/* Make sure to strip CRC from received frames */
1057 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
1058 
1059 	/* Turn off magic packet mode */
1060 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
1061 
1062 	/* Turn off all VLAN stuff */
1063 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX|
1064 	    LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT);
1065 
1066 	/* Workaround: FIFO overflow */
1067 	CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
1068 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
1069 
1070 	/*
1071 	 * Load the multicast filter.
1072 	 */
1073 	lge_setmulti(sc);
1074 
1075 	/*
1076 	 * Enable hardware checksum validation for all received IPv4
1077 	 * packets, do not reject packets with bad checksums.
1078 	 */
1079 	CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM|
1080 	    LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM|
1081 	    LGE_MODE2_RX_ERRCSUM);
1082 
1083 	/*
1084 	 * Enable the delivery of PHY interrupts based on
1085 	 * link/speed/duplex status changes.
1086 	 */
1087 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL);
1088 
1089 	/* Enable receiver and transmitter. */
1090 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
1091 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB);
1092 
1093 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
1094 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB);
1095 
1096 	/*
1097 	 * Enable interrupts.
1098 	 */
1099 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|
1100 	    LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS);
1101 
1102 	lge_ifmedia_upd(ifp);
1103 
1104 	ifp->if_flags |= IFF_RUNNING;
1105 	ifq_clr_oactive(&ifp->if_snd);
1106 
1107 	splx(s);
1108 
1109 	timeout_add_sec(&sc->lge_timeout, 1);
1110 }
1111 
1112 /*
1113  * Set media options.
1114  */
1115 int
lge_ifmedia_upd(struct ifnet * ifp)1116 lge_ifmedia_upd(struct ifnet *ifp)
1117 {
1118 	struct lge_softc	*sc = ifp->if_softc;
1119 	struct mii_data		*mii = &sc->lge_mii;
1120 
1121 	sc->lge_link = 0;
1122 	if (mii->mii_instance) {
1123 		struct mii_softc *miisc;
1124 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1125 			mii_phy_reset(miisc);
1126 	}
1127 	mii_mediachg(mii);
1128 
1129 	return (0);
1130 }
1131 
1132 /*
1133  * Report current media status.
1134  */
1135 void
lge_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1136 lge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1137 {
1138 	struct lge_softc	*sc = ifp->if_softc;
1139 	struct mii_data		*mii = &sc->lge_mii;
1140 
1141 	mii_pollstat(mii);
1142 	ifmr->ifm_active = mii->mii_media_active;
1143 	ifmr->ifm_status = mii->mii_media_status;
1144 }
1145 
1146 int
lge_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1147 lge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1148 {
1149 	struct lge_softc	*sc = ifp->if_softc;
1150 	struct ifreq		*ifr = (struct ifreq *) data;
1151 	struct mii_data		*mii;
1152 	int			s, error = 0;
1153 
1154 	s = splnet();
1155 
1156 	switch(command) {
1157 	case SIOCSIFADDR:
1158 		ifp->if_flags |= IFF_UP;
1159 		if (!(ifp->if_flags & IFF_RUNNING))
1160 			lge_init(sc);
1161 		break;
1162 
1163 	case SIOCSIFFLAGS:
1164 		if (ifp->if_flags & IFF_UP) {
1165 			if (ifp->if_flags & IFF_RUNNING &&
1166 			    ifp->if_flags & IFF_PROMISC &&
1167 			    !(sc->lge_if_flags & IFF_PROMISC)) {
1168 				CSR_WRITE_4(sc, LGE_MODE1,
1169 				    LGE_MODE1_SETRST_CTL1|
1170 				    LGE_MODE1_RX_PROMISC);
1171 				lge_setmulti(sc);
1172 			} else if (ifp->if_flags & IFF_RUNNING &&
1173 			    !(ifp->if_flags & IFF_PROMISC) &&
1174 			    sc->lge_if_flags & IFF_PROMISC) {
1175 				CSR_WRITE_4(sc, LGE_MODE1,
1176 				    LGE_MODE1_RX_PROMISC);
1177 				lge_setmulti(sc);
1178 			} else if (ifp->if_flags & IFF_RUNNING &&
1179 			    (ifp->if_flags ^ sc->lge_if_flags) & IFF_ALLMULTI) {
1180 				lge_setmulti(sc);
1181 			} else {
1182 				if (!(ifp->if_flags & IFF_RUNNING))
1183 					lge_init(sc);
1184 			}
1185 		} else {
1186 			if (ifp->if_flags & IFF_RUNNING)
1187 				lge_stop(sc);
1188 		}
1189 		sc->lge_if_flags = ifp->if_flags;
1190 		break;
1191 
1192 	case SIOCGIFMEDIA:
1193 	case SIOCSIFMEDIA:
1194 		mii = &sc->lge_mii;
1195 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1196 		break;
1197 
1198 	default:
1199 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1200 	}
1201 
1202 	if (error == ENETRESET) {
1203 		if (ifp->if_flags & IFF_RUNNING)
1204 			lge_setmulti(sc);
1205 		error = 0;
1206 	}
1207 
1208 	splx(s);
1209 	return (error);
1210 }
1211 
1212 void
lge_watchdog(struct ifnet * ifp)1213 lge_watchdog(struct ifnet *ifp)
1214 {
1215 	struct lge_softc	*sc;
1216 
1217 	sc = ifp->if_softc;
1218 
1219 	ifp->if_oerrors++;
1220 	printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
1221 
1222 	lge_stop(sc);
1223 	lge_reset(sc);
1224 	lge_init(sc);
1225 
1226 	if (!ifq_empty(&ifp->if_snd))
1227 		lge_start(ifp);
1228 }
1229 
1230 /*
1231  * Stop the adapter and free any mbufs allocated to the
1232  * RX and TX lists.
1233  */
1234 void
lge_stop(struct lge_softc * sc)1235 lge_stop(struct lge_softc *sc)
1236 {
1237 	int			i;
1238 	struct ifnet		*ifp;
1239 
1240 	ifp = &sc->arpcom.ac_if;
1241 	ifp->if_timer = 0;
1242 	timeout_del(&sc->lge_timeout);
1243 
1244 	ifp->if_flags &= ~IFF_RUNNING;
1245 	ifq_clr_oactive(&ifp->if_snd);
1246 
1247 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
1248 
1249 	/* Disable receiver and transmitter. */
1250 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
1251 	sc->lge_link = 0;
1252 
1253 	/*
1254 	 * Free data in the RX lists.
1255 	 */
1256 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
1257 		if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
1258 			m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
1259 			sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
1260 		}
1261 	}
1262 	bzero(&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list));
1263 
1264 	/*
1265 	 * Free the TX list buffers.
1266 	 */
1267 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
1268 		if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
1269 			m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
1270 			sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
1271 		}
1272 	}
1273 
1274 	bzero(&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list));
1275 }
1276