xref: /dragonfly/sys/dev/netif/lge/if_lge.c (revision fcf6efef)
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2000, 2001
4  *	Bill Paul <william.paul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.5.2.2 2001/12/14 19:49:23 jlemon Exp $
34  */
35 
36 /*
37  * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
38  * documentation not available, but ask me nicely.
39  *
40  * Written by Bill Paul <william.paul@windriver.com>
41  * Wind River Systems
42  */
43 
44 /*
45  * The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
46  * It's a 64-bit PCI part that supports TCP/IP checksum offload,
47  * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
48  * are three supported methods for data transfer between host and
49  * NIC: programmed I/O, traditional scatter/gather DMA and Packet
50  * Propulsion Technology (tm) DMA. The latter mechanism is a form
51  * of double buffer DMA where the packet data is copied to a
52  * pre-allocated DMA buffer who's physical address has been loaded
53  * into a table at device initialization time. The rationale is that
54  * the virtual to physical address translation needed for normal
55  * scatter/gather DMA is more expensive than the data copy needed
56  * for double buffering. This may be true in Windows NT and the like,
57  * but it isn't true for us, at least on the x86 arch. This driver
58  * uses the scatter/gather I/O method for both TX and RX.
59  *
60  * The LXT1001 only supports TCP/IP checksum offload on receive.
61  * Also, the VLAN tagging is done using a 16-entry table which allows
62  * the chip to perform hardware filtering based on VLAN tags. Sadly,
63  * our vlan support doesn't currently play well with this kind of
64  * hardware support.
65  *
66  * Special thanks to:
67  * - Jeff James at Intel, for arranging to have the LXT1001 manual
68  *   released (at long last)
69  * - Beny Chen at D-Link, for actually sending it to me
70  * - Brad Short and Keith Alexis at SMC, for sending me sample
71  *   SMC9462SX and SMC9462TX adapters for testing
72  * - Paul Saab at Y!, for not killing me (though it remains to be seen
73  *   if in fact he did me much of a favor)
74  */
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/sockio.h>
79 #include <sys/mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/kernel.h>
82 #include <sys/interrupt.h>
83 #include <sys/socket.h>
84 #include <sys/serialize.h>
85 
86 #include <net/if.h>
87 #include <net/ifq_var.h>
88 #include <net/if_arp.h>
89 #include <net/ethernet.h>
90 #include <net/if_dl.h>
91 #include <net/if_media.h>
92 
93 #include <net/bpf.h>
94 
95 #include <vm/vm.h>              /* for vtophys */
96 #include <vm/pmap.h>            /* for vtophys */
97 #include <sys/bus.h>
98 #include <sys/rman.h>
99 
100 #include <dev/netif/mii_layer/mii.h>
101 #include <dev/netif/mii_layer/miivar.h>
102 
103 #include "pcidevs.h"
104 #include <bus/pci/pcireg.h>
105 #include <bus/pci/pcivar.h>
106 
107 #define LGE_USEIOSPACE
108 
109 #include "if_lgereg.h"
110 
111 /* "controller miibus0" required.  See GENERIC if you get errors here. */
112 #include "miibus_if.h"
113 
114 /*
115  * Various supported device vendors/types and their names.
116  */
117 static struct lge_type lge_devs[] = {
118 	{ PCI_VENDOR_LEVELONE, PCI_PRODUCT_LEVELONE_LXT1001,
119 	    "Level 1 Gigabit Ethernet" },
120 	{ 0, 0, NULL }
121 };
122 
123 static int	lge_probe(device_t);
124 static int	lge_attach(device_t);
125 static int	lge_detach(device_t);
126 
127 static int	lge_alloc_jumbo_mem(struct lge_softc *);
128 static void	lge_free_jumbo_mem(struct lge_softc *);
129 static struct lge_jslot
130 		*lge_jalloc(struct lge_softc *);
131 static void	lge_jfree(void *);
132 static void	lge_jref(void *);
133 
134 static int	lge_newbuf(struct lge_softc *, struct lge_rx_desc *,
135 			   struct mbuf *);
136 static int	lge_encap(struct lge_softc *, struct mbuf *, uint32_t *);
137 static void	lge_rxeof(struct lge_softc *, int);
138 static void	lge_rxeoc(struct lge_softc *);
139 static void	lge_txeof(struct lge_softc *);
140 static void	lge_intr(void *);
141 static void	lge_tick(void *);
142 static void	lge_tick_serialized(void *);
143 static void	lge_start(struct ifnet *, struct ifaltq_subque *);
144 static int	lge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
145 static void	lge_init(void *);
146 static void	lge_stop(struct lge_softc *);
147 static void	lge_watchdog(struct ifnet *);
148 static void	lge_shutdown(device_t);
149 static int	lge_ifmedia_upd(struct ifnet *);
150 static void	lge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
151 
152 static void	lge_eeprom_getword(struct lge_softc *, int, uint16_t *);
153 static void	lge_read_eeprom(struct lge_softc *, caddr_t, int, int);
154 
155 static int	lge_miibus_readreg(device_t, int, int);
156 static int	lge_miibus_writereg(device_t, int, int, int);
157 static void	lge_miibus_statchg(device_t);
158 
159 static void	lge_setmulti(struct lge_softc *);
160 static void	lge_reset(struct lge_softc *);
161 static int	lge_list_rx_init(struct lge_softc *);
162 static int	lge_list_tx_init(struct lge_softc *);
163 
164 #ifdef LGE_USEIOSPACE
165 #define LGE_RES			SYS_RES_IOPORT
166 #define LGE_RID			LGE_PCI_LOIO
167 #else
168 #define LGE_RES			SYS_RES_MEMORY
169 #define LGE_RID			LGE_PCI_LOMEM
170 #endif
171 
172 static device_method_t lge_methods[] = {
173 	/* Device interface */
174 	DEVMETHOD(device_probe,		lge_probe),
175 	DEVMETHOD(device_attach,	lge_attach),
176 	DEVMETHOD(device_detach,	lge_detach),
177 	DEVMETHOD(device_shutdown,	lge_shutdown),
178 
179 	/* bus interface */
180 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
181 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
182 
183 	/* MII interface */
184 	DEVMETHOD(miibus_readreg,	lge_miibus_readreg),
185 	DEVMETHOD(miibus_writereg,	lge_miibus_writereg),
186 	DEVMETHOD(miibus_statchg,	lge_miibus_statchg),
187 
188 	DEVMETHOD_END
189 };
190 
191 static DEFINE_CLASS_0(lge, lge_driver, lge_methods, sizeof(struct lge_softc));
192 static devclass_t lge_devclass;
193 
194 DECLARE_DUMMY_MODULE(if_lge);
195 DRIVER_MODULE(if_lge, pci, lge_driver, lge_devclass, NULL, NULL);
196 DRIVER_MODULE(miibus, lge, miibus_driver, miibus_devclass, NULL, NULL);
197 
198 #define LGE_SETBIT(sc, reg, x)				\
199 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
200 
201 #define LGE_CLRBIT(sc, reg, x)				\
202 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
203 
204 #define SIO_SET(x)					\
205 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | (x))
206 
207 #define SIO_CLR(x)					\
208 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~(x))
209 
210 /*
211  * Read a word of data stored in the EEPROM at address 'addr.'
212  */
213 static void
lge_eeprom_getword(struct lge_softc * sc,int addr,uint16_t * dest)214 lge_eeprom_getword(struct lge_softc *sc, int addr, uint16_t *dest)
215 {
216 	int i;
217 	uint32_t val;
218 
219 	CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
220 	    LGE_EECTL_SINGLEACCESS | ((addr >> 1) << 8));
221 
222 	for (i = 0; i < LGE_TIMEOUT; i++) {
223 		if ((CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ) == 0)
224 			break;
225 	}
226 
227 	if (i == LGE_TIMEOUT) {
228 		kprintf("lge%d: EEPROM read timed out\n", sc->lge_unit);
229 		return;
230 	}
231 
232 	val = CSR_READ_4(sc, LGE_EEDATA);
233 
234 	if (addr & 1)
235 		*dest = (val >> 16) & 0xFFFF;
236 	else
237 		*dest = val & 0xFFFF;
238 }
239 
240 /*
241  * Read a sequence of words from the EEPROM.
242  */
243 static void
lge_read_eeprom(struct lge_softc * sc,caddr_t dest,int off,int cnt)244 lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off, int cnt)
245 {
246 	int i;
247 	uint16_t word = 0, *ptr;
248 
249 	for (i = 0; i < cnt; i++) {
250 		lge_eeprom_getword(sc, off + i, &word);
251 		ptr = (uint16_t *)(dest + (i * 2));
252 		*ptr = ntohs(word);
253 	}
254 }
255 
256 static int
lge_miibus_readreg(device_t dev,int phy,int reg)257 lge_miibus_readreg(device_t dev, int phy, int reg)
258 {
259 	struct lge_softc *sc = device_get_softc(dev);
260 	int i;
261 
262 	/*
263 	 * If we have a non-PCS PHY, pretend that the internal
264 	 * autoneg stuff at PHY address 0 isn't there so that
265 	 * the miibus code will find only the GMII PHY.
266 	 */
267 	if (sc->lge_pcs == 0 && phy == 0)
268 		return(0);
269 
270 	CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
271 
272 	for (i = 0; i < LGE_TIMEOUT; i++) {
273 		if ((CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY) == 0)
274 			break;
275 	}
276 
277 	if (i == LGE_TIMEOUT) {
278 		kprintf("lge%d: PHY read timed out\n", sc->lge_unit);
279 		return(0);
280 	}
281 
282 	return(CSR_READ_4(sc, LGE_GMIICTL) >> 16);
283 }
284 
285 static int
lge_miibus_writereg(device_t dev,int phy,int reg,int data)286 lge_miibus_writereg(device_t dev, int phy, int reg, int data)
287 {
288 	struct lge_softc *sc = device_get_softc(dev);
289 	int i;
290 
291 	CSR_WRITE_4(sc, LGE_GMIICTL,
292 	    (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
293 
294 	for (i = 0; i < LGE_TIMEOUT; i++) {
295 		if ((CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY) == 0)
296 			break;
297 	}
298 
299 	if (i == LGE_TIMEOUT) {
300 		kprintf("lge%d: PHY write timed out\n", sc->lge_unit);
301 		return(0);
302 	}
303 
304 	return(0);
305 }
306 
307 static void
lge_miibus_statchg(device_t dev)308 lge_miibus_statchg(device_t dev)
309 {
310 	struct lge_softc *sc = device_get_softc(dev);
311 	struct mii_data *mii = device_get_softc(sc->lge_miibus);
312 
313 	LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
314 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
315 	case IFM_1000_T:
316 	case IFM_1000_SX:
317 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
318 		break;
319 	case IFM_100_TX:
320 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
321 		break;
322 	case IFM_10_T:
323 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
324 		break;
325 	default:
326 		/*
327 		 * Choose something, even if it's wrong. Clearing
328 		 * all the bits will hose autoneg on the internal
329 		 * PHY.
330 		 */
331 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
332 		break;
333 	}
334 
335 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
336 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
337 	else
338 		LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
339 }
340 
341 static void
lge_setmulti(struct lge_softc * sc)342 lge_setmulti(struct lge_softc *sc)
343 {
344 	struct ifnet *ifp = &sc->arpcom.ac_if;
345 	struct ifmultiaddr *ifma;
346 	uint32_t h = 0, hashes[2] = { 0, 0 };
347 
348 	/* Make sure multicast hash table is enabled. */
349 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_MCAST);
350 
351 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
352 		CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
353 		CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
354 		return;
355 	}
356 
357 	/* first, zot all the existing hash bits */
358 	CSR_WRITE_4(sc, LGE_MAR0, 0);
359 	CSR_WRITE_4(sc, LGE_MAR1, 0);
360 
361 	/* now program new ones */
362 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
363 		if (ifma->ifma_addr->sa_family != AF_LINK)
364 			continue;
365 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
366 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
367 		if (h < 32)
368 			hashes[0] |= (1 << h);
369 		else
370 			hashes[1] |= (1 << (h - 32));
371 	}
372 
373 	CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
374 	CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
375 
376 	return;
377 }
378 
379 static void
lge_reset(struct lge_softc * sc)380 lge_reset(struct lge_softc *sc)
381 {
382 	int i;
383 
384 	LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0 | LGE_MODE1_SOFTRST);
385 
386 	for (i = 0; i < LGE_TIMEOUT; i++) {
387 		if ((CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST) == 0)
388 			break;
389 	}
390 
391 	if (i == LGE_TIMEOUT)
392 		kprintf("lge%d: reset never completed\n", sc->lge_unit);
393 
394 	/* Wait a little while for the chip to get its brains in order. */
395 	DELAY(1000);
396 }
397 
398 /*
399  * Probe for a Level 1 chip. Check the PCI vendor and device
400  * IDs against our list and return a device name if we find a match.
401  */
402 static int
lge_probe(device_t dev)403 lge_probe(device_t dev)
404 {
405 	struct lge_type *t;
406 	uint16_t vendor, product;
407 
408 	vendor = pci_get_vendor(dev);
409 	product = pci_get_device(dev);
410 
411 	for (t = lge_devs; t->lge_name != NULL; t++) {
412 		if (vendor == t->lge_vid && product == t->lge_did) {
413 			device_set_desc(dev, t->lge_name);
414 			return(0);
415 		}
416 	}
417 
418 	return(ENXIO);
419 }
420 
421 /*
422  * Attach the interface. Allocate softc structures, do ifmedia
423  * setup and ethernet/BPF attach.
424  */
425 static int
lge_attach(device_t dev)426 lge_attach(device_t dev)
427 {
428 	uint8_t eaddr[ETHER_ADDR_LEN];
429 	struct lge_softc *sc;
430 	struct ifnet *ifp;
431 	int unit, error = 0, rid;
432 
433 	sc = device_get_softc(dev);
434 	unit = device_get_unit(dev);
435 	callout_init(&sc->lge_stat_timer);
436 	lwkt_serialize_init(&sc->lge_jslot_serializer);
437 
438 	/*
439 	 * Handle power management nonsense.
440 	 */
441 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
442 		uint32_t iobase, membase, irq;
443 
444 		/* Save important PCI config data. */
445 		iobase = pci_read_config(dev, LGE_PCI_LOIO, 4);
446 		membase = pci_read_config(dev, LGE_PCI_LOMEM, 4);
447 		irq = pci_read_config(dev, LGE_PCI_INTLINE, 4);
448 
449 		/* Reset the power state. */
450 		device_printf(dev, "chip is in D%d power mode "
451 		"-- setting to D0\n", pci_get_powerstate(dev));
452 
453 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
454 
455 		/* Restore PCI config data. */
456 		pci_write_config(dev, LGE_PCI_LOIO, iobase, 4);
457 		pci_write_config(dev, LGE_PCI_LOMEM, membase, 4);
458 		pci_write_config(dev, LGE_PCI_INTLINE, irq, 4);
459 	}
460 
461 	pci_enable_busmaster(dev);
462 
463 	rid = LGE_RID;
464 	sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE);
465 
466 	if (sc->lge_res == NULL) {
467 		kprintf("lge%d: couldn't map ports/memory\n", unit);
468 		error = ENXIO;
469 		goto fail;
470 	}
471 
472 	sc->lge_btag = rman_get_bustag(sc->lge_res);
473 	sc->lge_bhandle = rman_get_bushandle(sc->lge_res);
474 
475 	/* Allocate interrupt */
476 	rid = 0;
477 	sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
478 	    RF_SHAREABLE | RF_ACTIVE);
479 
480 	if (sc->lge_irq == NULL) {
481 		kprintf("lge%d: couldn't map interrupt\n", unit);
482 		error = ENXIO;
483 		goto fail;
484 	}
485 
486 	/* Reset the adapter. */
487 	lge_reset(sc);
488 
489 	/*
490 	 * Get station address from the EEPROM.
491 	 */
492 	lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1);
493 	lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1);
494 	lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1);
495 
496 	sc->lge_unit = unit;
497 
498 	sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF,
499 	    M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
500 
501 	if (sc->lge_ldata == NULL) {
502 		kprintf("lge%d: no memory for list buffers!\n", unit);
503 		error = ENXIO;
504 		goto fail;
505 	}
506 
507 	/* Try to allocate memory for jumbo buffers. */
508 	if (lge_alloc_jumbo_mem(sc)) {
509 		kprintf("lge%d: jumbo buffer allocation failed\n",
510                     sc->lge_unit);
511 		error = ENXIO;
512 		goto fail;
513 	}
514 
515 	ifp = &sc->arpcom.ac_if;
516 	ifp->if_softc = sc;
517 	if_initname(ifp, "lge", unit);
518 	ifp->if_mtu = ETHERMTU;
519 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
520 	ifp->if_ioctl = lge_ioctl;
521 	ifp->if_start = lge_start;
522 	ifp->if_watchdog = lge_watchdog;
523 	ifp->if_init = lge_init;
524 	ifp->if_baudrate = 1000000000;
525 	ifq_set_maxlen(&ifp->if_snd, LGE_TX_LIST_CNT - 1);
526 	ifq_set_ready(&ifp->if_snd);
527 	ifp->if_capabilities = IFCAP_RXCSUM;
528 	ifp->if_capenable = ifp->if_capabilities;
529 
530 	if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
531 		sc->lge_pcs = 1;
532 	else
533 		sc->lge_pcs = 0;
534 
535 	/*
536 	 * Do MII setup.
537 	 */
538 	if (mii_phy_probe(dev, &sc->lge_miibus,
539 	    lge_ifmedia_upd, lge_ifmedia_sts)) {
540 		kprintf("lge%d: MII without any PHY!\n", sc->lge_unit);
541 		error = ENXIO;
542 		goto fail;
543 	}
544 
545 	/*
546 	 * Call MI attach routine.
547 	 */
548 	ether_ifattach(ifp, eaddr, NULL);
549 
550 	ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->lge_irq));
551 
552 	error = bus_setup_intr(dev, sc->lge_irq, INTR_MPSAFE,
553 			       lge_intr, sc, &sc->lge_intrhand,
554 			       ifp->if_serializer);
555 	if (error) {
556 		ether_ifdetach(ifp);
557 		kprintf("lge%d: couldn't set up irq\n", unit);
558 		goto fail;
559 	}
560 
561 	return(0);
562 
563 fail:
564 	lge_detach(dev);
565 	return(error);
566 }
567 
568 static int
lge_detach(device_t dev)569 lge_detach(device_t dev)
570 {
571 	struct lge_softc *sc= device_get_softc(dev);
572 	struct ifnet *ifp = &sc->arpcom.ac_if;
573 
574 	if (device_is_attached(dev)) {
575 		lwkt_serialize_enter(ifp->if_serializer);
576 		lge_reset(sc);
577 		lge_stop(sc);
578 		bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
579 		lwkt_serialize_exit(ifp->if_serializer);
580 
581 		ether_ifdetach(ifp);
582 	}
583 
584 	if (sc->lge_miibus)
585 		device_delete_child(dev, sc->lge_miibus);
586 	bus_generic_detach(dev);
587 
588 	if (sc->lge_irq)
589 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
590 	if (sc->lge_res)
591 		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
592 
593 	if (sc->lge_ldata)
594 		contigfree(sc->lge_ldata, sizeof(struct lge_list_data),
595 			   M_DEVBUF);
596 	lge_free_jumbo_mem(sc);
597 
598 	return(0);
599 }
600 
601 /*
602  * Initialize the transmit descriptors.
603  */
604 static int
lge_list_tx_init(struct lge_softc * sc)605 lge_list_tx_init(struct lge_softc *sc)
606 {
607 	struct lge_list_data *ld;
608 	struct lge_ring_data *cd;
609 	int i;
610 
611 	cd = &sc->lge_cdata;
612 	ld = sc->lge_ldata;
613 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
614 		ld->lge_tx_list[i].lge_mbuf = NULL;
615 		ld->lge_tx_list[i].lge_ctl = 0;
616 	}
617 
618 	cd->lge_tx_prod = cd->lge_tx_cons = 0;
619 
620 	return(0);
621 }
622 
623 
624 /*
625  * Initialize the RX descriptors and allocate mbufs for them. Note that
626  * we arralge the descriptors in a closed ring, so that the last descriptor
627  * points back to the first.
628  */
629 static int
lge_list_rx_init(struct lge_softc * sc)630 lge_list_rx_init(struct lge_softc *sc)
631 {
632 	struct lge_list_data *ld;
633 	struct lge_ring_data *cd;
634 	int i;
635 
636 	ld = sc->lge_ldata;
637 	cd = &sc->lge_cdata;
638 
639 	cd->lge_rx_prod = cd->lge_rx_cons = 0;
640 
641 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
642 
643 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
644 		if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
645 			break;
646 		if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
647 			return(ENOBUFS);
648 	}
649 
650 	/* Clear possible 'rx command queue empty' interrupt. */
651 	CSR_READ_4(sc, LGE_ISR);
652 
653 	return(0);
654 }
655 
656 /*
657  * Initialize an RX descriptor and attach an MBUF cluster.
658  */
659 static int
lge_newbuf(struct lge_softc * sc,struct lge_rx_desc * c,struct mbuf * m)660 lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m)
661 {
662 	struct mbuf *m_new = NULL;
663 	struct lge_jslot *buf;
664 
665 	if (m == NULL) {
666 		MGETHDR(m_new, M_NOWAIT, MT_DATA);
667 		if (m_new == NULL) {
668 			kprintf("lge%d: no memory for rx list "
669 			    "-- packet dropped!\n", sc->lge_unit);
670 			return(ENOBUFS);
671 		}
672 
673 		/* Allocate the jumbo buffer */
674 		buf = lge_jalloc(sc);
675 		if (buf == NULL) {
676 #ifdef LGE_VERBOSE
677 			kprintf("lge%d: jumbo allocation failed "
678 			    "-- packet dropped!\n", sc->lge_unit);
679 #endif
680 			m_freem(m_new);
681 			return(ENOBUFS);
682 		}
683 		/* Attach the buffer to the mbuf */
684 		m_new->m_ext.ext_arg = buf;
685 		m_new->m_ext.ext_buf = buf->lge_buf;
686 		m_new->m_ext.ext_free = lge_jfree;
687 		m_new->m_ext.ext_ref = lge_jref;
688 		m_new->m_ext.ext_size = LGE_JUMBO_FRAMELEN;
689 
690 		m_new->m_data = m_new->m_ext.ext_buf;
691 		m_new->m_flags |= M_EXT;
692 		m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
693 	} else {
694 		m_new = m;
695 		m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN;
696 		m_new->m_data = m_new->m_ext.ext_buf;
697 	}
698 
699 	/*
700 	 * Adjust alignment so packet payload begins on a
701 	 * longword boundary. Mandatory for Alpha, useful on
702 	 * x86 too.
703 	*/
704 	m_adj(m_new, ETHER_ALIGN);
705 
706 	c->lge_mbuf = m_new;
707 	c->lge_fragptr_hi = 0;
708 	c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t));
709 	c->lge_fraglen = m_new->m_len;
710 	c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
711 	c->lge_sts = 0;
712 
713 	/*
714 	 * Put this buffer in the RX command FIFO. To do this,
715 	 * we just write the physical address of the descriptor
716 	 * into the RX descriptor address registers. Note that
717 	 * there are two registers, one high DWORD and one low
718 	 * DWORD, which lets us specify a 64-bit address if
719 	 * desired. We only use a 32-bit address for now.
720 	 * Writing to the low DWORD register is what actually
721 	 * causes the command to be issued, so we do that
722 	 * last.
723 	 */
724 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c));
725 	LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
726 
727 	return(0);
728 }
729 
730 static int
lge_alloc_jumbo_mem(struct lge_softc * sc)731 lge_alloc_jumbo_mem(struct lge_softc *sc)
732 {
733 	struct lge_jslot *entry;
734 	caddr_t ptr;
735 	int i;
736 
737 	/* Grab a big chunk o' storage. */
738 	sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF,
739 	    M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
740 
741 	if (sc->lge_cdata.lge_jumbo_buf == NULL) {
742 		kprintf("lge%d: no memory for jumbo buffers!\n", sc->lge_unit);
743 		return(ENOBUFS);
744 	}
745 
746 	SLIST_INIT(&sc->lge_jfree_listhead);
747 
748 	/*
749 	 * Now divide it up into 9K pieces and save the addresses
750 	 * in an array.
751 	 */
752 	ptr = sc->lge_cdata.lge_jumbo_buf;
753 	for (i = 0; i < LGE_JSLOTS; i++) {
754 		entry = &sc->lge_cdata.lge_jslots[i];
755 		entry->lge_sc = sc;
756 		entry->lge_buf = ptr;
757 		entry->lge_inuse = 0;
758 		entry->lge_slot = i;
759 		SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jslot_link);
760 		ptr += LGE_JLEN;
761 	}
762 
763 	return(0);
764 }
765 
766 static void
lge_free_jumbo_mem(struct lge_softc * sc)767 lge_free_jumbo_mem(struct lge_softc *sc)
768 {
769 	if (sc->lge_cdata.lge_jumbo_buf)
770 		contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF);
771 }
772 
773 /*
774  * Allocate a jumbo buffer.
775  */
776 static struct lge_jslot *
lge_jalloc(struct lge_softc * sc)777 lge_jalloc(struct lge_softc *sc)
778 {
779 	struct lge_jslot *entry;
780 
781 	lwkt_serialize_enter(&sc->lge_jslot_serializer);
782 	entry = SLIST_FIRST(&sc->lge_jfree_listhead);
783 	if (entry) {
784 		SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jslot_link);
785 		entry->lge_inuse = 1;
786 	} else {
787 #ifdef LGE_VERBOSE
788 		kprintf("lge%d: no free jumbo buffers\n", sc->lge_unit);
789 #endif
790 	}
791 	lwkt_serialize_exit(&sc->lge_jslot_serializer);
792 	return(entry);
793 }
794 
795 /*
796  * Adjust usage count on a jumbo buffer. In general this doesn't
797  * get used much because our jumbo buffers don't get passed around
798  * a lot, but it's implemented for correctness.
799  */
800 static void
lge_jref(void * arg)801 lge_jref(void *arg)
802 {
803 	struct lge_jslot *entry = (struct lge_jslot *)arg;
804 	struct lge_softc *sc = entry->lge_sc;
805 
806 	if (&sc->lge_cdata.lge_jslots[entry->lge_slot] != entry)
807 		panic("lge_jref: asked to reference buffer "
808 		    "that we don't manage!");
809 	else if (entry->lge_inuse == 0)
810 		panic("lge_jref: buffer already free!");
811 	else
812 		atomic_add_int(&entry->lge_inuse, 1);
813 }
814 
815 /*
816  * Release a jumbo buffer.
817  */
818 static void
lge_jfree(void * arg)819 lge_jfree(void *arg)
820 {
821 	struct lge_jslot *entry = (struct lge_jslot *)arg;
822 	struct lge_softc *sc = entry->lge_sc;
823 
824 	if (sc == NULL)
825 		panic("lge_jfree: can't find softc pointer!");
826 
827 	if (&sc->lge_cdata.lge_jslots[entry->lge_slot] != entry) {
828 		panic("lge_jfree: asked to free buffer that we don't manage!");
829 	} else if (entry->lge_inuse == 0) {
830 		panic("lge_jfree: buffer already free!");
831 	} else {
832 		lwkt_serialize_enter(&sc->lge_jslot_serializer);
833 		atomic_subtract_int(&entry->lge_inuse, 1);
834 		if (entry->lge_inuse == 0) {
835 			SLIST_INSERT_HEAD(&sc->lge_jfree_listhead,
836 					  entry, jslot_link);
837 		}
838 		lwkt_serialize_exit(&sc->lge_jslot_serializer);
839 	}
840 }
841 
842 /*
843  * A frame has been uploaded: pass the resulting mbuf chain up to
844  * the higher level protocols.
845  */
846 static void
lge_rxeof(struct lge_softc * sc,int cnt)847 lge_rxeof(struct lge_softc *sc, int cnt)
848 {
849         struct ifnet *ifp = &sc->arpcom.ac_if;
850         struct mbuf *m;
851 	struct lge_rx_desc *cur_rx;
852 	int c, i, total_len = 0;
853 	uint32_t rxsts, rxctl;
854 
855 
856 	/* Find out how many frames were processed. */
857 	c = cnt;
858 	i = sc->lge_cdata.lge_rx_cons;
859 
860 	/* Suck them in. */
861 	while(c) {
862 		struct mbuf *m0 = NULL;
863 
864 		cur_rx = &sc->lge_ldata->lge_rx_list[i];
865 		rxctl = cur_rx->lge_ctl;
866 		rxsts = cur_rx->lge_sts;
867 		m = cur_rx->lge_mbuf;
868 		cur_rx->lge_mbuf = NULL;
869 		total_len = LGE_RXBYTES(cur_rx);
870 		LGE_INC(i, LGE_RX_LIST_CNT);
871 		c--;
872 
873 		/*
874 		 * If an error occurs, update stats, clear the
875 		 * status word and leave the mbuf cluster in place:
876 		 * it should simply get re-used next time this descriptor
877 	 	 * comes up in the ring.
878 		 */
879 		if (rxctl & LGE_RXCTL_ERRMASK) {
880 			IFNET_STAT_INC(ifp, ierrors, 1);
881 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
882 			continue;
883 		}
884 
885 		if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
886 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
887 				      total_len + ETHER_ALIGN, 0, ifp);
888 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
889 			if (m0 == NULL) {
890 				kprintf("lge%d: no receive buffers "
891 				    "available -- packet dropped!\n",
892 				    sc->lge_unit);
893 				IFNET_STAT_INC(ifp, ierrors, 1);
894 				continue;
895 			}
896 			m_adj(m0, ETHER_ALIGN);
897 			m = m0;
898 		} else {
899 			m->m_pkthdr.rcvif = ifp;
900 			m->m_pkthdr.len = m->m_len = total_len;
901 		}
902 
903 		IFNET_STAT_INC(ifp, ipackets, 1);
904 
905 		/* Do IP checksum checking. */
906 		if (rxsts & LGE_RXSTS_ISIP)
907 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
908 		if (!(rxsts & LGE_RXSTS_IPCSUMERR))
909 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
910 		if ((rxsts & LGE_RXSTS_ISTCP &&
911 		    !(rxsts & LGE_RXSTS_TCPCSUMERR)) ||
912 		    (rxsts & LGE_RXSTS_ISUDP &&
913 		    !(rxsts & LGE_RXSTS_UDPCSUMERR))) {
914 			m->m_pkthdr.csum_flags |=
915 			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
916 			    CSUM_FRAG_NOT_CHECKED;
917 			m->m_pkthdr.csum_data = 0xffff;
918 		}
919 
920 		ifp->if_input(ifp, m, NULL, -1);
921 	}
922 
923 	sc->lge_cdata.lge_rx_cons = i;
924 }
925 
926 static void
lge_rxeoc(struct lge_softc * sc)927 lge_rxeoc(struct lge_softc *sc)
928 {
929 	struct ifnet *ifp = &sc->arpcom.ac_if;
930 
931 	ifp->if_flags &= ~IFF_RUNNING;
932 	lge_init(sc);
933 }
934 
935 /*
936  * A frame was downloaded to the chip. It's safe for us to clean up
937  * the list buffers.
938  */
939 static void
lge_txeof(struct lge_softc * sc)940 lge_txeof(struct lge_softc *sc)
941 {
942 	struct ifnet *ifp = &sc->arpcom.ac_if;
943 	struct lge_tx_desc *cur_tx = NULL;
944 	uint32_t idx, txdone;
945 
946 	/* Clear the timeout timer. */
947 	ifp->if_timer = 0;
948 
949 	/*
950 	 * Go through our tx list and free mbufs for those
951 	 * frames that have been transmitted.
952 	 */
953 	idx = sc->lge_cdata.lge_tx_cons;
954 	txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
955 
956 	while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
957 		cur_tx = &sc->lge_ldata->lge_tx_list[idx];
958 
959 		IFNET_STAT_INC(ifp, opackets, 1);
960 		if (cur_tx->lge_mbuf != NULL) {
961 			m_freem(cur_tx->lge_mbuf);
962 			cur_tx->lge_mbuf = NULL;
963 		}
964 		cur_tx->lge_ctl = 0;
965 
966 		txdone--;
967 		LGE_INC(idx, LGE_TX_LIST_CNT);
968 		ifp->if_timer = 0;
969 	}
970 
971 	sc->lge_cdata.lge_tx_cons = idx;
972 
973 	if (cur_tx != NULL)
974 		ifq_clr_oactive(&ifp->if_snd);
975 }
976 
977 static void
lge_tick(void * xsc)978 lge_tick(void *xsc)
979 {
980 	struct lge_softc *sc = xsc;
981 	struct ifnet *ifp = &sc->arpcom.ac_if;
982 
983 	lwkt_serialize_enter(ifp->if_serializer);
984 	lge_tick_serialized(xsc);
985 	lwkt_serialize_exit(ifp->if_serializer);
986 }
987 
988 static void
lge_tick_serialized(void * xsc)989 lge_tick_serialized(void *xsc)
990 {
991 	struct lge_softc *sc = xsc;
992 	struct mii_data *mii;
993 	struct ifnet *ifp = &sc->arpcom.ac_if;
994 
995 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
996 	IFNET_STAT_INC(ifp, collisions, CSR_READ_4(sc, LGE_STATSVAL));
997 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
998 	IFNET_STAT_INC(ifp, collisions, CSR_READ_4(sc, LGE_STATSVAL));
999 
1000 	if (!sc->lge_link) {
1001 		mii = device_get_softc(sc->lge_miibus);
1002 		mii_tick(mii);
1003 		mii_pollstat(mii);
1004 		if (mii->mii_media_status & IFM_ACTIVE &&
1005 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1006 			sc->lge_link++;
1007 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX||
1008 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1009 				kprintf("lge%d: gigabit link up\n",
1010 				    sc->lge_unit);
1011 			if (!ifq_is_empty(&ifp->if_snd))
1012 				if_devstart(ifp);
1013 		}
1014 	}
1015 
1016 	callout_reset(&sc->lge_stat_timer, hz, lge_tick, sc);
1017 }
1018 
1019 static void
lge_intr(void * arg)1020 lge_intr(void *arg)
1021 {
1022 	struct lge_softc *sc = arg;
1023 	struct ifnet *ifp = &sc->arpcom.ac_if;
1024 	uint32_t status;
1025 
1026 	/* Supress unwanted interrupts */
1027 	if ((ifp->if_flags & IFF_UP) == 0) {
1028 		lge_stop(sc);
1029 		return;
1030 	}
1031 
1032 	for (;;) {
1033 		/*
1034 		 * Reading the ISR register clears all interrupts, and
1035 		 * clears the 'interrupts enabled' bit in the IMR
1036 		 * register.
1037 		 */
1038 		status = CSR_READ_4(sc, LGE_ISR);
1039 
1040 		if ((status & LGE_INTRS) == 0)
1041 			break;
1042 
1043 		if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
1044 			lge_txeof(sc);
1045 
1046 		if (status & LGE_ISR_RXDMA_DONE)
1047 			lge_rxeof(sc, LGE_RX_DMACNT(status));
1048 
1049 		if (status & LGE_ISR_RXCMDFIFO_EMPTY)
1050 			lge_rxeoc(sc);
1051 
1052 		if (status & LGE_ISR_PHY_INTR) {
1053 			sc->lge_link = 0;
1054 			callout_stop(&sc->lge_stat_timer);
1055 			lge_tick_serialized(sc);
1056 		}
1057 	}
1058 
1059 	/* Re-enable interrupts. */
1060 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
1061 
1062 	if (!ifq_is_empty(&ifp->if_snd))
1063 		if_devstart(ifp);
1064 }
1065 
1066 /*
1067  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1068  * pointers to the fragment pointers.
1069  */
1070 static int
lge_encap(struct lge_softc * sc,struct mbuf * m_head,uint32_t * txidx)1071 lge_encap(struct lge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1072 {
1073 	struct lge_frag *f = NULL;
1074 	struct lge_tx_desc *cur_tx;
1075 	struct mbuf *m;
1076 	int frag = 0, tot_len = 0;
1077 
1078 	/*
1079  	 * Start packing the mbufs in this chain into
1080 	 * the fragment pointers. Stop when we run out
1081  	 * of fragments or hit the end of the mbuf chain.
1082 	 */
1083 	m = m_head;
1084 	cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
1085 	frag = 0;
1086 
1087 	for (m = m_head; m != NULL; m = m->m_next) {
1088 		if (m->m_len != 0) {
1089 			if (frag == LGE_FRAG_CNT)
1090 				break;
1091 
1092 			tot_len += m->m_len;
1093 			f = &cur_tx->lge_frags[frag];
1094 			f->lge_fraglen = m->m_len;
1095 			f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t));
1096 			f->lge_fragptr_hi = 0;
1097 			frag++;
1098 		}
1099 	}
1100 	/* Caller should make sure that 'm_head' is not excessive fragmented */
1101 	KASSERT(m == NULL, ("too many fragments"));
1102 
1103 	cur_tx->lge_mbuf = m_head;
1104 	cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
1105 	LGE_INC((*txidx), LGE_TX_LIST_CNT);
1106 
1107 	/* Queue for transmit */
1108 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx));
1109 
1110 	return(0);
1111 }
1112 
1113 /*
1114  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1115  * to the mbuf data regions directly in the transmit lists. We also save a
1116  * copy of the pointers since the transmit list fragment pointers are
1117  * physical addresses.
1118  */
1119 
1120 static void
lge_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1121 lge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1122 {
1123 	struct lge_softc *sc = ifp->if_softc;
1124 	struct mbuf *m_head = NULL, *m_defragged;
1125 	uint32_t idx;
1126 	int need_timer;
1127 
1128 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1129 
1130 	if (!sc->lge_link) {
1131 		ifq_purge(&ifp->if_snd);
1132 		return;
1133 	}
1134 
1135 	idx = sc->lge_cdata.lge_tx_prod;
1136 
1137 	if (ifq_is_oactive(&ifp->if_snd))
1138 		return;
1139 
1140 	need_timer = 0;
1141 	while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
1142 		struct mbuf *m;
1143 		int frags;
1144 
1145 		if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) {
1146 			ifq_set_oactive(&ifp->if_snd);
1147 			break;
1148 		}
1149 
1150 		m_defragged = NULL;
1151 		m_head = ifq_dequeue(&ifp->if_snd);
1152 		if (m_head == NULL)
1153 			break;
1154 
1155 again:
1156 		frags = 0;
1157 		for (m = m_head; m != NULL; m = m->m_next)
1158 			++frags;
1159 		if (frags > LGE_FRAG_CNT) {
1160 			if (m_defragged != NULL) {
1161 				/*
1162 				 * Even after defragmentation, there
1163 				 * are still too many fragments, so
1164 				 * drop this packet.
1165 				 */
1166 				m_freem(m_head);
1167 				continue;
1168 			}
1169 
1170 			m_defragged = m_defrag(m_head, M_NOWAIT);
1171 			if (m_defragged == NULL) {
1172 				m_freem(m_head);
1173 				continue;
1174 			}
1175 			m_head = m_defragged;
1176 
1177 			/* Recount # of fragments */
1178 			goto again;
1179 		}
1180 
1181 		lge_encap(sc, m_head, &idx);
1182 		need_timer = 1;
1183 
1184 		BPF_MTAP(ifp, m_head);
1185 	}
1186 
1187 	if (!need_timer)
1188 		return;
1189 
1190 	sc->lge_cdata.lge_tx_prod = idx;
1191 
1192 	/*
1193 	 * Set a timeout in case the chip goes out to lunch.
1194 	 */
1195 	ifp->if_timer = 5;
1196 }
1197 
1198 static void
lge_init(void * xsc)1199 lge_init(void *xsc)
1200 {
1201 	struct lge_softc *sc = xsc;
1202 	struct ifnet *ifp = &sc->arpcom.ac_if;
1203 
1204 	if (ifp->if_flags & IFF_RUNNING)
1205 		return;
1206 
1207 	/*
1208 	 * Cancel pending I/O and free all RX/TX buffers.
1209 	 */
1210 	lge_stop(sc);
1211 	lge_reset(sc);
1212 
1213 	/* Set MAC address */
1214 	CSR_WRITE_4(sc, LGE_PAR0, *(uint32_t *)(&sc->arpcom.ac_enaddr[0]));
1215 	CSR_WRITE_4(sc, LGE_PAR1, *(uint32_t *)(&sc->arpcom.ac_enaddr[4]));
1216 
1217 	/* Init circular RX list. */
1218 	if (lge_list_rx_init(sc) == ENOBUFS) {
1219 		kprintf("lge%d: initialization failed: no "
1220 		    "memory for rx buffers\n", sc->lge_unit);
1221 		lge_stop(sc);
1222 		return;
1223 	}
1224 
1225 	/*
1226 	 * Init tx descriptors.
1227 	 */
1228 	lge_list_tx_init(sc);
1229 
1230 	/* Set initial value for MODE1 register. */
1231 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST |
1232 	    LGE_MODE1_TX_CRC | LGE_MODE1_TXPAD |
1233 	    LGE_MODE1_RX_FLOWCTL | LGE_MODE1_SETRST_CTL0 |
1234 	    LGE_MODE1_SETRST_CTL1 | LGE_MODE1_SETRST_CTL2);
1235 
1236 	 /* If we want promiscuous mode, set the allframes bit. */
1237 	if (ifp->if_flags & IFF_PROMISC) {
1238 		CSR_WRITE_4(sc, LGE_MODE1,
1239 		    LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_PROMISC);
1240 	} else {
1241 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
1242 	}
1243 
1244 	/*
1245 	 * Set the capture broadcast bit to capture broadcast frames.
1246 	 */
1247 	if (ifp->if_flags & IFF_BROADCAST) {
1248 		CSR_WRITE_4(sc, LGE_MODE1,
1249 		    LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_BCAST);
1250 	} else {
1251 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
1252 	}
1253 
1254 	/* Packet padding workaround? */
1255 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
1256 
1257 	/* No error frames */
1258 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
1259 
1260 	/* Receive large frames */
1261 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_GIANTS);
1262 
1263 	/* Workaround: disable RX/TX flow control */
1264 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
1265 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
1266 
1267 	/* Make sure to strip CRC from received frames */
1268 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
1269 
1270 	/* Turn off magic packet mode */
1271 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
1272 
1273 	/* Turn off all VLAN stuff */
1274 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX | LGE_MODE1_VLAN_TX |
1275 	    LGE_MODE1_VLAN_STRIP | LGE_MODE1_VLAN_INSERT);
1276 
1277 	/* Workarond: FIFO overflow */
1278 	CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
1279 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
1280 
1281 	/*
1282 	 * Load the multicast filter.
1283 	 */
1284 	lge_setmulti(sc);
1285 
1286 	/*
1287 	 * Enable hardware checksum validation for all received IPv4
1288 	 * packets, do not reject packets with bad checksums.
1289 	 */
1290 	CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM |
1291 	    LGE_MODE2_RX_TCPCSUM | LGE_MODE2_RX_UDPCSUM |
1292 	    LGE_MODE2_RX_ERRCSUM);
1293 
1294 	/*
1295 	 * Enable the delivery of PHY interrupts based on
1296 	 * link/speed/duplex status chalges.
1297 	 */
1298 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0 | LGE_MODE1_GMIIPOLL);
1299 
1300 	/* Enable receiver and transmitter. */
1301 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
1302 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_RX_ENB);
1303 
1304 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
1305 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1 | LGE_MODE1_TX_ENB);
1306 
1307 	/*
1308 	 * Enable interrupts.
1309 	 */
1310 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0 |
1311 	    LGE_IMR_SETRST_CTL1 | LGE_IMR_INTR_ENB|LGE_INTRS);
1312 
1313 	lge_ifmedia_upd(ifp);
1314 
1315 	ifp->if_flags |= IFF_RUNNING;
1316 	ifq_clr_oactive(&ifp->if_snd);
1317 
1318 	callout_reset(&sc->lge_stat_timer, hz, lge_tick, sc);
1319 }
1320 
1321 /*
1322  * Set media options.
1323  */
1324 static int
lge_ifmedia_upd(struct ifnet * ifp)1325 lge_ifmedia_upd(struct ifnet *ifp)
1326 {
1327 	struct lge_softc *sc = ifp->if_softc;
1328 	struct mii_data *mii = device_get_softc(sc->lge_miibus);
1329 
1330 	sc->lge_link = 0;
1331 	if (mii->mii_instance) {
1332 		struct mii_softc *miisc;
1333 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1334 			mii_phy_reset(miisc);
1335 	}
1336 	mii_mediachg(mii);
1337 
1338 	return(0);
1339 }
1340 
1341 /*
1342  * Report current media status.
1343  */
1344 static void
lge_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1345 lge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1346 {
1347 	struct lge_softc *sc = ifp->if_softc;
1348 	struct mii_data *mii;
1349 
1350 	mii = device_get_softc(sc->lge_miibus);
1351 	mii_pollstat(mii);
1352 	ifmr->ifm_active = mii->mii_media_active;
1353 	ifmr->ifm_status = mii->mii_media_status;
1354 }
1355 
1356 static int
lge_ioctl(struct ifnet * ifp,u_long command,caddr_t data,struct ucred * cr)1357 lge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1358 {
1359 	struct lge_softc *sc = ifp->if_softc;
1360 	struct ifreq *ifr = (struct ifreq *) data;
1361 	struct mii_data	 *mii;
1362 	int error = 0;
1363 
1364 	switch(command) {
1365 	case SIOCSIFMTU:
1366 		if (ifr->ifr_mtu > LGE_JUMBO_MTU)
1367 			error = EINVAL;
1368 		else
1369 			ifp->if_mtu = ifr->ifr_mtu;
1370 		break;
1371 	case SIOCSIFFLAGS:
1372 		if (ifp->if_flags & IFF_UP) {
1373 			if (ifp->if_flags & IFF_RUNNING &&
1374 			    ifp->if_flags & IFF_PROMISC &&
1375 			    !(sc->lge_if_flags & IFF_PROMISC)) {
1376 				CSR_WRITE_4(sc, LGE_MODE1,
1377 				    LGE_MODE1_SETRST_CTL1|
1378 				    LGE_MODE1_RX_PROMISC);
1379 			} else if (ifp->if_flags & IFF_RUNNING &&
1380 			    !(ifp->if_flags & IFF_PROMISC) &&
1381 			    sc->lge_if_flags & IFF_PROMISC) {
1382 				CSR_WRITE_4(sc, LGE_MODE1,
1383 				    LGE_MODE1_RX_PROMISC);
1384 			} else {
1385 				ifp->if_flags &= ~IFF_RUNNING;
1386 				lge_init(sc);
1387 			}
1388 		} else {
1389 			if (ifp->if_flags & IFF_RUNNING)
1390 				lge_stop(sc);
1391 		}
1392 		sc->lge_if_flags = ifp->if_flags;
1393 		error = 0;
1394 		break;
1395 	case SIOCADDMULTI:
1396 	case SIOCDELMULTI:
1397 		lge_setmulti(sc);
1398 		error = 0;
1399 		break;
1400 	case SIOCGIFMEDIA:
1401 	case SIOCSIFMEDIA:
1402 		mii = device_get_softc(sc->lge_miibus);
1403 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1404 		break;
1405 	default:
1406 		error = ether_ioctl(ifp, command, data);
1407 		break;
1408 	}
1409 
1410 	return(error);
1411 }
1412 
1413 static void
lge_watchdog(struct ifnet * ifp)1414 lge_watchdog(struct ifnet *ifp)
1415 {
1416 	struct lge_softc *sc = ifp->if_softc;
1417 
1418 	IFNET_STAT_INC(ifp, oerrors, 1);
1419 	kprintf("lge%d: watchdog timeout\n", sc->lge_unit);
1420 
1421 	lge_stop(sc);
1422 	lge_reset(sc);
1423 	ifp->if_flags &= ~IFF_RUNNING;
1424 	lge_init(sc);
1425 
1426 	if (!ifq_is_empty(&ifp->if_snd))
1427 		if_devstart(ifp);
1428 }
1429 
1430 /*
1431  * Stop the adapter and free any mbufs allocated to the
1432  * RX and TX lists.
1433  */
1434 static void
lge_stop(struct lge_softc * sc)1435 lge_stop(struct lge_softc *sc)
1436 {
1437 	struct ifnet *ifp = &sc->arpcom.ac_if;
1438 	int i;
1439 
1440 	ifp->if_timer = 0;
1441 	callout_stop(&sc->lge_stat_timer);
1442 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
1443 
1444 	/* Disable receiver and transmitter. */
1445 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
1446 	sc->lge_link = 0;
1447 
1448 	/*
1449 	 * Free data in the RX lists.
1450 	 */
1451 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
1452 		if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
1453 			m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
1454 			sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
1455 		}
1456 	}
1457 	bzero(&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list));
1458 
1459 	/*
1460 	 * Free the TX list buffers.
1461 	 */
1462 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
1463 		if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
1464 			m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
1465 			sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
1466 		}
1467 	}
1468 
1469 	bzero(&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list));
1470 
1471 	ifp->if_flags &= ~IFF_RUNNING;
1472 	ifq_clr_oactive(&ifp->if_snd);
1473 }
1474 
1475 /*
1476  * Stop all chip I/O so that the kernel's probe routines don't
1477  * get confused by errant DMAs when rebooting.
1478  */
1479 static void
lge_shutdown(device_t dev)1480 lge_shutdown(device_t dev)
1481 {
1482 	struct lge_softc *sc = device_get_softc(dev);
1483 
1484 	lge_reset(sc);
1485 	lge_stop(sc);
1486 }
1487