xref: /dragonfly/sys/dev/netif/stge/if_stge.c (revision 23265324)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 /*	$FreeBSD: src/sys/dev/stge/if_stge.c,v 1.2 2006/08/12 01:21:36 yongari Exp $	*/
3 /*	$DragonFly: src/sys/dev/netif/stge/if_stge.c,v 1.1 2006/11/16 13:43:55 sephe Exp $	*/
4 
5 /*-
6  * Copyright (c) 2001 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Jason R. Thorpe.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 /*
42  * Device driver for the Sundance Tech. TC9021 10/100/1000
43  * Ethernet controller.
44  */
45 
46 #include "opt_polling.h"
47 
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/endian.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/module.h>
55 #include <sys/rman.h>
56 #include <sys/serialize.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/sysctl.h>
60 
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/ifq_var.h>
69 #include <net/vlan/if_vlan_var.h>
70 
71 #include <dev/netif/mii_layer/mii.h>
72 #include <dev/netif/mii_layer/miivar.h>
73 
74 #include <bus/pci/pcireg.h>
75 #include <bus/pci/pcivar.h>
76 
77 #include "if_stgereg.h"
78 #include "if_stgevar.h"
79 
80 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
81 
82 /* "device miibus" required.  See GENERIC if you get errors here. */
83 #include "miibus_if.h"
84 
85 /*
86  * Devices supported by this driver.
87  */
88 static struct stge_product {
89 	uint16_t	stge_vendorid;
90 	uint16_t	stge_deviceid;
91 	const char	*stge_name;
92 } stge_products[] = {
93 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
94 	  "Sundance ST-1023 Gigabit Ethernet" },
95 
96 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
97 	  "Sundance ST-2021 Gigabit Ethernet" },
98 
99 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
100 	  "Tamarack TC9021 Gigabit Ethernet" },
101 
102 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
103 	  "Tamarack TC9021 Gigabit Ethernet" },
104 
105 	/*
106 	 * The Sundance sample boards use the Sundance vendor ID,
107 	 * but the Tamarack product ID.
108 	 */
109 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
110 	  "Sundance TC9021 Gigabit Ethernet" },
111 
112 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
113 	  "Sundance TC9021 Gigabit Ethernet" },
114 
115 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL2000,
116 	  "D-Link DL-2000 Gigabit Ethernet" },
117 
118 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
119 	  "Antares Gigabit Ethernet" },
120 
121 	{ 0, 0, NULL }
122 };
123 
124 static int	stge_probe(device_t);
125 static int	stge_attach(device_t);
126 static int	stge_detach(device_t);
127 static void	stge_shutdown(device_t);
128 static int	stge_suspend(device_t);
129 static int	stge_resume(device_t);
130 
131 static int	stge_encap(struct stge_softc *, struct mbuf **);
132 static void	stge_start(struct ifnet *);
133 static void	stge_watchdog(struct ifnet *);
134 static int	stge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
135 static void	stge_init(void *);
136 static void	stge_vlan_setup(struct stge_softc *);
137 static void	stge_stop(struct stge_softc *);
138 static void	stge_start_tx(struct stge_softc *);
139 static void	stge_start_rx(struct stge_softc *);
140 static void	stge_stop_tx(struct stge_softc *);
141 static void	stge_stop_rx(struct stge_softc *);
142 
143 static void	stge_reset(struct stge_softc *, uint32_t);
144 static int	stge_eeprom_wait(struct stge_softc *);
145 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
146 static void	stge_tick(void *);
147 static void	stge_stats_update(struct stge_softc *);
148 static void	stge_set_filter(struct stge_softc *);
149 static void	stge_set_multi(struct stge_softc *);
150 
151 static void	stge_link(struct stge_softc *);
152 static void	stge_intr(void *);
153 static __inline int stge_tx_error(struct stge_softc *);
154 static void	stge_txeof(struct stge_softc *);
155 static void	stge_rxeof(struct stge_softc *, int);
156 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
157 static int	stge_newbuf(struct stge_softc *, int, int);
158 #ifndef __i386__
159 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
160 #endif
161 
162 static void	stge_mii_sync(struct stge_softc *);
163 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
164 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
165 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
166 static int	stge_miibus_readreg(device_t, int, int);
167 static int	stge_miibus_writereg(device_t, int, int, int);
168 static void	stge_miibus_statchg(device_t);
169 static int	stge_mediachange(struct ifnet *);
170 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
171 
172 static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
173 static void	stge_mbuf_dmamap_cb(void *, bus_dma_segment_t *, int,
174 				    bus_size_t, int);
175 static int	stge_dma_alloc(struct stge_softc *);
176 static void	stge_dma_free(struct stge_softc *);
177 static void	stge_dma_wait(struct stge_softc *);
178 static void	stge_init_tx_ring(struct stge_softc *);
179 static int	stge_init_rx_ring(struct stge_softc *);
180 #ifdef DEVICE_POLLING
181 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
182 #endif
183 
184 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
185 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
186 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
187 
188 static device_method_t stge_methods[] = {
189 	/* Device interface */
190 	DEVMETHOD(device_probe,		stge_probe),
191 	DEVMETHOD(device_attach,	stge_attach),
192 	DEVMETHOD(device_detach,	stge_detach),
193 	DEVMETHOD(device_shutdown,	stge_shutdown),
194 	DEVMETHOD(device_suspend,	stge_suspend),
195 	DEVMETHOD(device_resume,	stge_resume),
196 
197 	/* MII interface */
198 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
199 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
200 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
201 
202 	{ 0, 0 }
203 
204 };
205 
206 static driver_t stge_driver = {
207 	"stge",
208 	stge_methods,
209 	sizeof(struct stge_softc)
210 };
211 
212 static devclass_t stge_devclass;
213 
214 DECLARE_DUMMY_MODULE(if_stge);
215 MODULE_DEPEND(if_stge, miibus, 1, 1, 1);
216 DRIVER_MODULE(if_stge, pci, stge_driver, stge_devclass, 0, 0);
217 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
218 
219 #define	MII_SET(x)	\
220 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
221 #define	MII_CLR(x)	\
222 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
223 
224 /*
225  * Sync the PHYs by setting data bit and strobing the clock 32 times.
226  */
227 static void
228 stge_mii_sync(struct stge_softc	*sc)
229 {
230 	int i;
231 
232 	MII_SET(PC_MgmtDir | PC_MgmtData);
233 
234 	for (i = 0; i < 32; i++) {
235 		MII_SET(PC_MgmtClk);
236 		DELAY(1);
237 		MII_CLR(PC_MgmtClk);
238 		DELAY(1);
239 	}
240 }
241 
242 /*
243  * Clock a series of bits through the MII.
244  */
245 static void
246 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
247 {
248 	int i;
249 
250 	MII_CLR(PC_MgmtClk);
251 
252 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
253 		if (bits & i)
254 			MII_SET(PC_MgmtData);
255                 else
256 			MII_CLR(PC_MgmtData);
257 		DELAY(1);
258 		MII_CLR(PC_MgmtClk);
259 		DELAY(1);
260 		MII_SET(PC_MgmtClk);
261 	}
262 }
263 
264 /*
265  * Read an PHY register through the MII.
266  */
267 static int
268 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
269 {
270 	int i, ack;
271 
272 	/*
273 	 * Set up frame for RX.
274 	 */
275 	frame->mii_stdelim = STGE_MII_STARTDELIM;
276 	frame->mii_opcode = STGE_MII_READOP;
277 	frame->mii_turnaround = 0;
278 	frame->mii_data = 0;
279 
280 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
281 	/*
282  	 * Turn on data xmit.
283 	 */
284 	MII_SET(PC_MgmtDir);
285 
286 	stge_mii_sync(sc);
287 
288 	/*
289 	 * Send command/address info.
290 	 */
291 	stge_mii_send(sc, frame->mii_stdelim, 2);
292 	stge_mii_send(sc, frame->mii_opcode, 2);
293 	stge_mii_send(sc, frame->mii_phyaddr, 5);
294 	stge_mii_send(sc, frame->mii_regaddr, 5);
295 
296 	/* Turn off xmit. */
297 	MII_CLR(PC_MgmtDir);
298 
299 	/* Idle bit */
300 	MII_CLR((PC_MgmtClk | PC_MgmtData));
301 	DELAY(1);
302 	MII_SET(PC_MgmtClk);
303 	DELAY(1);
304 
305 	/* Check for ack */
306 	MII_CLR(PC_MgmtClk);
307 	DELAY(1);
308 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
309 	MII_SET(PC_MgmtClk);
310 	DELAY(1);
311 
312 	/*
313 	 * Now try reading data bits. If the ack failed, we still
314 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
315 	 */
316 	if (ack) {
317 		for(i = 0; i < 16; i++) {
318 			MII_CLR(PC_MgmtClk);
319 			DELAY(1);
320 			MII_SET(PC_MgmtClk);
321 			DELAY(1);
322 		}
323 		goto fail;
324 	}
325 
326 	for (i = 0x8000; i; i >>= 1) {
327 		MII_CLR(PC_MgmtClk);
328 		DELAY(1);
329 		if (!ack) {
330 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
331 				frame->mii_data |= i;
332 			DELAY(1);
333 		}
334 		MII_SET(PC_MgmtClk);
335 		DELAY(1);
336 	}
337 
338 fail:
339 	MII_CLR(PC_MgmtClk);
340 	DELAY(1);
341 	MII_SET(PC_MgmtClk);
342 	DELAY(1);
343 
344 	if (ack)
345 		return(1);
346 	return(0);
347 }
348 
349 /*
350  * Write to a PHY register through the MII.
351  */
352 static int
353 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
354 {
355 
356 	/*
357 	 * Set up frame for TX.
358 	 */
359 	frame->mii_stdelim = STGE_MII_STARTDELIM;
360 	frame->mii_opcode = STGE_MII_WRITEOP;
361 	frame->mii_turnaround = STGE_MII_TURNAROUND;
362 
363 	/*
364  	 * Turn on data output.
365 	 */
366 	MII_SET(PC_MgmtDir);
367 
368 	stge_mii_sync(sc);
369 
370 	stge_mii_send(sc, frame->mii_stdelim, 2);
371 	stge_mii_send(sc, frame->mii_opcode, 2);
372 	stge_mii_send(sc, frame->mii_phyaddr, 5);
373 	stge_mii_send(sc, frame->mii_regaddr, 5);
374 	stge_mii_send(sc, frame->mii_turnaround, 2);
375 	stge_mii_send(sc, frame->mii_data, 16);
376 
377 	/* Idle bit. */
378 	MII_SET(PC_MgmtClk);
379 	DELAY(1);
380 	MII_CLR(PC_MgmtClk);
381 	DELAY(1);
382 
383 	/*
384 	 * Turn off xmit.
385 	 */
386 	MII_CLR(PC_MgmtDir);
387 
388 	return(0);
389 }
390 
391 /*
392  * sc_miibus_readreg:	[mii interface function]
393  *
394  *	Read a PHY register on the MII of the TC9021.
395  */
396 static int
397 stge_miibus_readreg(device_t dev, int phy, int reg)
398 {
399 	struct stge_softc *sc;
400 	struct stge_mii_frame frame;
401 	int error;
402 
403 	sc = device_get_softc(dev);
404 
405 	if (reg == STGE_PhyCtrl) {
406 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
407 		error = CSR_READ_1(sc, STGE_PhyCtrl);
408 		return (error);
409 	}
410 	bzero(&frame, sizeof(frame));
411 	frame.mii_phyaddr = phy;
412 	frame.mii_regaddr = reg;
413 
414 	error = stge_mii_readreg(sc, &frame);
415 
416 	if (error != 0) {
417 		/* Don't show errors for PHY probe request */
418 		if (reg != 1)
419 			device_printf(sc->sc_dev, "phy read fail\n");
420 		return (0);
421 	}
422 	return (frame.mii_data);
423 }
424 
425 /*
426  * stge_miibus_writereg:	[mii interface function]
427  *
428  *	Write a PHY register on the MII of the TC9021.
429  */
430 static int
431 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
432 {
433 	struct stge_softc *sc;
434 	struct stge_mii_frame frame;
435 	int error;
436 
437 	sc = device_get_softc(dev);
438 
439 	bzero(&frame, sizeof(frame));
440 	frame.mii_phyaddr = phy;
441 	frame.mii_regaddr = reg;
442 	frame.mii_data = val;
443 
444 	error = stge_mii_writereg(sc, &frame);
445 
446 	if (error != 0)
447 		device_printf(sc->sc_dev, "phy write fail\n");
448 	return (0);
449 }
450 
451 /*
452  * stge_miibus_statchg:	[mii interface function]
453  *
454  *	Callback from MII layer when media changes.
455  */
456 static void
457 stge_miibus_statchg(device_t dev)
458 {
459 	struct stge_softc *sc;
460 	struct mii_data *mii;
461 
462 	sc = device_get_softc(dev);
463 	mii = device_get_softc(sc->sc_miibus);
464 
465 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)
466 		return;
467 
468 	sc->sc_MACCtrl = 0;
469 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
470 		sc->sc_MACCtrl |= MC_DuplexSelect;
471 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
472 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
473 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
474 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
475 
476 	stge_link(sc);
477 }
478 
479 /*
480  * stge_mediastatus:	[ifmedia interface function]
481  *
482  *	Get the current interface media status.
483  */
484 static void
485 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
486 {
487 	struct stge_softc *sc;
488 	struct mii_data *mii;
489 
490 	sc = ifp->if_softc;
491 	mii = device_get_softc(sc->sc_miibus);
492 
493 	mii_pollstat(mii);
494 	ifmr->ifm_status = mii->mii_media_status;
495 	ifmr->ifm_active = mii->mii_media_active;
496 }
497 
498 /*
499  * stge_mediachange:	[ifmedia interface function]
500  *
501  *	Set hardware to newly-selected media.
502  */
503 static int
504 stge_mediachange(struct ifnet *ifp)
505 {
506 	struct stge_softc *sc;
507 	struct mii_data *mii;
508 
509 	sc = ifp->if_softc;
510 	mii = device_get_softc(sc->sc_miibus);
511 	mii_mediachg(mii);
512 
513 	return (0);
514 }
515 
516 static int
517 stge_eeprom_wait(struct stge_softc *sc)
518 {
519 	int i;
520 
521 	for (i = 0; i < STGE_TIMEOUT; i++) {
522 		DELAY(1000);
523 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
524 			return (0);
525 	}
526 	return (1);
527 }
528 
529 /*
530  * stge_read_eeprom:
531  *
532  *	Read data from the serial EEPROM.
533  */
534 static void
535 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
536 {
537 
538 	if (stge_eeprom_wait(sc))
539 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
540 
541 	CSR_WRITE_2(sc, STGE_EepromCtrl,
542 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
543 	if (stge_eeprom_wait(sc))
544 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
545 	*data = CSR_READ_2(sc, STGE_EepromData);
546 }
547 
548 
549 static int
550 stge_probe(device_t dev)
551 {
552 	struct stge_product *sp;
553 	uint16_t vendor, devid;
554 
555 	vendor = pci_get_vendor(dev);
556 	devid = pci_get_device(dev);
557 
558 	for (sp = stge_products; sp->stge_name != NULL; sp++) {
559 		if (vendor == sp->stge_vendorid &&
560 		    devid == sp->stge_deviceid) {
561 			device_set_desc(dev, sp->stge_name);
562 			return (0);
563 		}
564 	}
565 
566 	return (ENXIO);
567 }
568 
569 static int
570 stge_attach(device_t dev)
571 {
572 	struct stge_softc *sc;
573 	struct ifnet *ifp;
574 	uint8_t enaddr[ETHER_ADDR_LEN];
575 	int error, i;
576 	uint16_t cmd;
577 	uint32_t val;
578 
579 	error = 0;
580 	sc = device_get_softc(dev);
581 	sc->sc_dev = dev;
582 	ifp = &sc->arpcom.ac_if;
583 
584 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
585 
586 	callout_init(&sc->sc_tick_ch);
587 
588 #ifndef BURN_BRIDGES
589 	/*
590 	 * Handle power management nonsense.
591 	 */
592 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
593 		uint32_t iobase, membase, irq;
594 
595 		/* Save important PCI config data. */
596 		iobase = pci_read_config(dev, STGE_PCIR_LOIO, 4);
597 		membase = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
598 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
599 
600 		/* Reset the power state. */
601 		device_printf(dev, "chip is in D%d power mode "
602 			      "-- setting to D0\n", pci_get_powerstate(dev));
603 
604 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
605 
606 		/* Restore PCI config data. */
607 		pci_write_config(dev, STGE_PCIR_LOIO, iobase, 4);
608 		pci_write_config(dev, STGE_PCIR_LOMEM, membase, 4);
609 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
610 	}
611 #endif
612 
613 	/*
614 	 * Map the device.
615 	 */
616 	pci_enable_busmaster(dev);
617 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
618 	val = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
619 
620 	if ((val & 0x01) != 0) {
621 		sc->sc_res_rid = STGE_PCIR_LOMEM;
622 		sc->sc_res_type = SYS_RES_MEMORY;
623 	} else {
624 		sc->sc_res_rid = STGE_PCIR_LOIO;
625 		sc->sc_res_type = SYS_RES_IOPORT;
626 
627 		val = pci_read_config(dev, sc->sc_res_rid, 4);
628 		if ((val & 0x01) == 0) {
629 			device_printf(dev, "couldn't locate IO BAR\n");
630 			return ENXIO;
631 		}
632 	}
633 
634 	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
635 					    &sc->sc_res_rid, RF_ACTIVE);
636 	if (sc->sc_res == NULL) {
637 		device_printf(dev, "couldn't allocate resource\n");
638 		return ENXIO;
639 	}
640 	sc->sc_btag = rman_get_bustag(sc->sc_res);
641 	sc->sc_bhandle = rman_get_bushandle(sc->sc_res);
642 
643 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
644 					    &sc->sc_irq_rid,
645 					    RF_ACTIVE | RF_SHAREABLE);
646 	if (sc->sc_irq == NULL) {
647 		device_printf(dev, "couldn't allocate IRQ\n");
648 		error = ENXIO;
649 		goto fail;
650 	}
651 
652 	sc->sc_rev = pci_get_revid(dev);
653 
654 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
655 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
656 
657 	sysctl_ctx_init(&sc->sc_sysctl_ctx);
658 	sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
659 					     SYSCTL_STATIC_CHILDREN(_hw),
660 					     OID_AUTO,
661 					     device_get_nameunit(dev),
662 					     CTLFLAG_RD, 0, "");
663 	if (sc->sc_sysctl_tree == NULL) {
664 		device_printf(dev, "can't add sysctl node\n");
665 		error = ENXIO;
666 		goto fail;
667 	}
668 
669 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
670 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
671 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
672 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
673 
674 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
675 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
676 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
677 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
678 
679 	if ((error = stge_dma_alloc(sc) != 0))
680 		goto fail;
681 
682 	/*
683 	 * Determine if we're copper or fiber.  It affects how we
684 	 * reset the card.
685 	 */
686 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
687 		sc->sc_usefiber = 1;
688 	else
689 		sc->sc_usefiber = 0;
690 
691 	/* Load LED configuration from EEPROM. */
692 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
693 
694 	/*
695 	 * Reset the chip to a known state.
696 	 */
697 	stge_reset(sc, STGE_RESET_FULL);
698 
699 	/*
700 	 * Reading the station address from the EEPROM doesn't seem
701 	 * to work, at least on my sample boards.  Instead, since
702 	 * the reset sequence does AutoInit, read it from the station
703 	 * address registers. For Sundance 1023 you can only read it
704 	 * from EEPROM.
705 	 */
706 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
707 		uint16_t v;
708 
709 		v = CSR_READ_2(sc, STGE_StationAddress0);
710 		enaddr[0] = v & 0xff;
711 		enaddr[1] = v >> 8;
712 		v = CSR_READ_2(sc, STGE_StationAddress1);
713 		enaddr[2] = v & 0xff;
714 		enaddr[3] = v >> 8;
715 		v = CSR_READ_2(sc, STGE_StationAddress2);
716 		enaddr[4] = v & 0xff;
717 		enaddr[5] = v >> 8;
718 		sc->sc_stge1023 = 0;
719 	} else {
720 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
721 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
722 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
723 			    &myaddr[i]);
724 			myaddr[i] = le16toh(myaddr[i]);
725 		}
726 		bcopy(myaddr, enaddr, sizeof(enaddr));
727 		sc->sc_stge1023 = 1;
728 	}
729 
730 	ifp->if_softc = sc;
731 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
732 	ifp->if_ioctl = stge_ioctl;
733 	ifp->if_start = stge_start;
734 	ifp->if_watchdog = stge_watchdog;
735 	ifp->if_init = stge_init;
736 #ifdef DEVICE_POLLING
737 	ifp->if_poll = stge_poll;
738 #endif
739 	ifp->if_mtu = ETHERMTU;
740 	ifq_set_maxlen(&ifp->if_snd, STGE_TX_RING_CNT - 1);
741 	ifq_set_ready(&ifp->if_snd);
742 	/* Revision B3 and earlier chips have checksum bug. */
743 	if (sc->sc_rev >= 0x0c) {
744 		ifp->if_hwassist = STGE_CSUM_FEATURES;
745 		ifp->if_capabilities = IFCAP_HWCSUM;
746 	} else {
747 		ifp->if_hwassist = 0;
748 		ifp->if_capabilities = 0;
749 	}
750 	ifp->if_capenable = ifp->if_capabilities;
751 
752 	/*
753 	 * Read some important bits from the PhyCtrl register.
754 	 */
755 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
756 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
757 
758 	/* Set up MII bus. */
759 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
760 	    stge_mediastatus)) != 0) {
761 		device_printf(sc->sc_dev, "no PHY found!\n");
762 		goto fail;
763 	}
764 
765 	ether_ifattach(ifp, enaddr, NULL);
766 
767 	/* VLAN capability setup */
768 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
769 #ifdef notyet
770 	if (sc->sc_rev >= 0x0c)
771 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
772 #endif
773 	ifp->if_capenable = ifp->if_capabilities;
774 
775 	/*
776 	 * Tell the upper layer(s) we support long frames.
777 	 * Must appear after the call to ether_ifattach() because
778 	 * ether_ifattach() sets ifi_hdrlen to the default value.
779 	 */
780 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
781 
782 	/*
783 	 * The manual recommends disabling early transmit, so we
784 	 * do.  It's disabled anyway, if using IP checksumming,
785 	 * since the entire packet must be in the FIFO in order
786 	 * for the chip to perform the checksum.
787 	 */
788 	sc->sc_txthresh = 0x0fff;
789 
790 	/*
791 	 * Disable MWI if the PCI layer tells us to.
792 	 */
793 	sc->sc_DMACtrl = 0;
794 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
795 		sc->sc_DMACtrl |= DMAC_MWIDisable;
796 
797 	/*
798 	 * Hookup IRQ
799 	 */
800 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, stge_intr, sc,
801 			       &sc->sc_ih, ifp->if_serializer);
802 	if (error != 0) {
803 		ether_ifdetach(ifp);
804 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
805 		goto fail;
806 	}
807 
808 fail:
809 	if (error != 0)
810 		stge_detach(dev);
811 
812 	return (error);
813 }
814 
815 static int
816 stge_detach(device_t dev)
817 {
818 	struct stge_softc *sc = device_get_softc(dev);
819 	struct ifnet *ifp = &sc->arpcom.ac_if;
820 
821 	if (device_is_attached(dev)) {
822 		lwkt_serialize_enter(ifp->if_serializer);
823 		/* XXX */
824 		sc->sc_detach = 1;
825 		stge_stop(sc);
826 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
827 		lwkt_serialize_exit(ifp->if_serializer);
828 
829 		ether_ifdetach(ifp);
830 	}
831 
832 	if (sc->sc_sysctl_tree != NULL)
833 		sysctl_ctx_free(&sc->sc_sysctl_ctx);
834 
835 	if (sc->sc_miibus != NULL)
836 		device_delete_child(dev, sc->sc_miibus);
837 	bus_generic_detach(dev);
838 
839 	stge_dma_free(sc);
840 
841 	if (sc->sc_irq != NULL) {
842 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
843 				     sc->sc_irq);
844 	}
845 	if (sc->sc_res != NULL) {
846 		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_rid,
847 				     sc->sc_res);
848 	}
849 
850 	return (0);
851 }
852 
853 struct stge_dmamap_arg {
854 	bus_addr_t	stge_busaddr;
855 };
856 
857 static void
858 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
859 {
860 	struct stge_dmamap_arg *ctx;
861 
862 	if (error != 0)
863 		return;
864 
865 	KASSERT(nseg == 1, ("too many segments %d\n", nseg));
866 
867 	ctx = (struct stge_dmamap_arg *)arg;
868 	ctx->stge_busaddr = segs[0].ds_addr;
869 }
870 
871 struct stge_mbuf_dmamap_arg {
872 	int			nsegs;
873 	bus_dma_segment_t	*segs;
874 };
875 
876 static void
877 stge_mbuf_dmamap_cb(void *xarg, bus_dma_segment_t *segs, int nsegs,
878 		    bus_size_t mapsz __unused, int error)
879 {
880 	struct stge_mbuf_dmamap_arg *arg = xarg;
881 	int i;
882 
883 	if (error) {
884 		arg->nsegs = 0;
885 		return;
886 	}
887 
888 	KASSERT(nsegs <= arg->nsegs,
889 		("too many segments(%d), should be <= %d\n",
890 		 nsegs, arg->nsegs));
891 
892 	arg->nsegs = nsegs;
893 	for (i = 0; i < nsegs; ++i)
894 		arg->segs[i] = segs[i];
895 }
896 
897 static int
898 stge_dma_alloc(struct stge_softc *sc)
899 {
900 	struct stge_dmamap_arg ctx;
901 	struct stge_txdesc *txd;
902 	struct stge_rxdesc *rxd;
903 	int error, i;
904 
905 	/* create parent tag. */
906 	error = bus_dma_tag_create(NULL,	/* parent */
907 		    1, 0,			/* algnmnt, boundary */
908 		    STGE_DMA_MAXADDR,		/* lowaddr */
909 		    BUS_SPACE_MAXADDR,		/* highaddr */
910 		    NULL, NULL,			/* filter, filterarg */
911 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
912 		    0,				/* nsegments */
913 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
914 		    0,				/* flags */
915 		    &sc->sc_cdata.stge_parent_tag);
916 	if (error != 0) {
917 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
918 		goto fail;
919 	}
920 	/* create tag for Tx ring. */
921 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
922 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
923 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
924 		    BUS_SPACE_MAXADDR,		/* highaddr */
925 		    NULL, NULL,			/* filter, filterarg */
926 		    STGE_TX_RING_SZ,		/* maxsize */
927 		    1,				/* nsegments */
928 		    STGE_TX_RING_SZ,		/* maxsegsize */
929 		    0,				/* flags */
930 		    &sc->sc_cdata.stge_tx_ring_tag);
931 	if (error != 0) {
932 		device_printf(sc->sc_dev,
933 		    "failed to allocate Tx ring DMA tag\n");
934 		goto fail;
935 	}
936 
937 	/* create tag for Rx ring. */
938 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
939 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
940 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
941 		    BUS_SPACE_MAXADDR,		/* highaddr */
942 		    NULL, NULL,			/* filter, filterarg */
943 		    STGE_RX_RING_SZ,		/* maxsize */
944 		    1,				/* nsegments */
945 		    STGE_RX_RING_SZ,		/* maxsegsize */
946 		    0,				/* flags */
947 		    &sc->sc_cdata.stge_rx_ring_tag);
948 	if (error != 0) {
949 		device_printf(sc->sc_dev,
950 		    "failed to allocate Rx ring DMA tag\n");
951 		goto fail;
952 	}
953 
954 	/* create tag for Tx buffers. */
955 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
956 		    1, 0,			/* algnmnt, boundary */
957 		    BUS_SPACE_MAXADDR,		/* lowaddr */
958 		    BUS_SPACE_MAXADDR,		/* highaddr */
959 		    NULL, NULL,			/* filter, filterarg */
960 		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
961 		    STGE_MAXTXSEGS,		/* nsegments */
962 		    MCLBYTES,			/* maxsegsize */
963 		    0,				/* flags */
964 		    &sc->sc_cdata.stge_tx_tag);
965 	if (error != 0) {
966 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
967 		goto fail;
968 	}
969 
970 	/* create tag for Rx buffers. */
971 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
972 		    1, 0,			/* algnmnt, boundary */
973 		    BUS_SPACE_MAXADDR,		/* lowaddr */
974 		    BUS_SPACE_MAXADDR,		/* highaddr */
975 		    NULL, NULL,			/* filter, filterarg */
976 		    MCLBYTES,			/* maxsize */
977 		    1,				/* nsegments */
978 		    MCLBYTES,			/* maxsegsize */
979 		    0,				/* flags */
980 		    &sc->sc_cdata.stge_rx_tag);
981 	if (error != 0) {
982 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
983 		goto fail;
984 	}
985 
986 	/* allocate DMA'able memory and load the DMA map for Tx ring. */
987 	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
988 	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
989 	    &sc->sc_cdata.stge_tx_ring_map);
990 	if (error != 0) {
991 		device_printf(sc->sc_dev,
992 		    "failed to allocate DMA'able memory for Tx ring\n");
993 		goto fail;
994 	}
995 
996 	ctx.stge_busaddr = 0;
997 	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
998 	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
999 	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1000 	if (error != 0 || ctx.stge_busaddr == 0) {
1001 		device_printf(sc->sc_dev,
1002 		    "failed to load DMA'able memory for Tx ring\n");
1003 		goto fail;
1004 	}
1005 	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
1006 
1007 	/* allocate DMA'able memory and load the DMA map for Rx ring. */
1008 	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
1009 	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1010 	    &sc->sc_cdata.stge_rx_ring_map);
1011 	if (error != 0) {
1012 		device_printf(sc->sc_dev,
1013 		    "failed to allocate DMA'able memory for Rx ring\n");
1014 		goto fail;
1015 	}
1016 
1017 	ctx.stge_busaddr = 0;
1018 	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
1019 	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
1020 	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1021 	if (error != 0 || ctx.stge_busaddr == 0) {
1022 		device_printf(sc->sc_dev,
1023 		    "failed to load DMA'able memory for Rx ring\n");
1024 		goto fail;
1025 	}
1026 	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1027 
1028 	/* create DMA maps for Tx buffers. */
1029 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
1030 		txd = &sc->sc_cdata.stge_txdesc[i];
1031 		txd->tx_m = NULL;
1032 		txd->tx_dmamap = 0;
1033 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1034 		    &txd->tx_dmamap);
1035 		if (error != 0) {
1036 			device_printf(sc->sc_dev,
1037 			    "failed to create Tx dmamap\n");
1038 			goto fail;
1039 		}
1040 	}
1041 	/* create DMA maps for Rx buffers. */
1042 	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1043 	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1044 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1045 		goto fail;
1046 	}
1047 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
1048 		rxd = &sc->sc_cdata.stge_rxdesc[i];
1049 		rxd->rx_m = NULL;
1050 		rxd->rx_dmamap = 0;
1051 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1052 		    &rxd->rx_dmamap);
1053 		if (error != 0) {
1054 			device_printf(sc->sc_dev,
1055 			    "failed to create Rx dmamap\n");
1056 			goto fail;
1057 		}
1058 	}
1059 
1060 fail:
1061 	return (error);
1062 }
1063 
1064 static void
1065 stge_dma_free(struct stge_softc *sc)
1066 {
1067 	struct stge_txdesc *txd;
1068 	struct stge_rxdesc *rxd;
1069 	int i;
1070 
1071 	/* Tx ring */
1072 	if (sc->sc_cdata.stge_tx_ring_tag) {
1073 		if (sc->sc_cdata.stge_tx_ring_map)
1074 			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1075 			    sc->sc_cdata.stge_tx_ring_map);
1076 		if (sc->sc_cdata.stge_tx_ring_map &&
1077 		    sc->sc_rdata.stge_tx_ring)
1078 			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1079 			    sc->sc_rdata.stge_tx_ring,
1080 			    sc->sc_cdata.stge_tx_ring_map);
1081 		sc->sc_rdata.stge_tx_ring = NULL;
1082 		sc->sc_cdata.stge_tx_ring_map = 0;
1083 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1084 		sc->sc_cdata.stge_tx_ring_tag = NULL;
1085 	}
1086 	/* Rx ring */
1087 	if (sc->sc_cdata.stge_rx_ring_tag) {
1088 		if (sc->sc_cdata.stge_rx_ring_map)
1089 			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1090 			    sc->sc_cdata.stge_rx_ring_map);
1091 		if (sc->sc_cdata.stge_rx_ring_map &&
1092 		    sc->sc_rdata.stge_rx_ring)
1093 			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1094 			    sc->sc_rdata.stge_rx_ring,
1095 			    sc->sc_cdata.stge_rx_ring_map);
1096 		sc->sc_rdata.stge_rx_ring = NULL;
1097 		sc->sc_cdata.stge_rx_ring_map = 0;
1098 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1099 		sc->sc_cdata.stge_rx_ring_tag = NULL;
1100 	}
1101 	/* Tx buffers */
1102 	if (sc->sc_cdata.stge_tx_tag) {
1103 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1104 			txd = &sc->sc_cdata.stge_txdesc[i];
1105 			if (txd->tx_dmamap) {
1106 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1107 				    txd->tx_dmamap);
1108 				txd->tx_dmamap = 0;
1109 			}
1110 		}
1111 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1112 		sc->sc_cdata.stge_tx_tag = NULL;
1113 	}
1114 	/* Rx buffers */
1115 	if (sc->sc_cdata.stge_rx_tag) {
1116 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1117 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1118 			if (rxd->rx_dmamap) {
1119 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1120 				    rxd->rx_dmamap);
1121 				rxd->rx_dmamap = 0;
1122 			}
1123 		}
1124 		if (sc->sc_cdata.stge_rx_sparemap) {
1125 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1126 			    sc->sc_cdata.stge_rx_sparemap);
1127 			sc->sc_cdata.stge_rx_sparemap = 0;
1128 		}
1129 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1130 		sc->sc_cdata.stge_rx_tag = NULL;
1131 	}
1132 
1133 	if (sc->sc_cdata.stge_parent_tag) {
1134 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1135 		sc->sc_cdata.stge_parent_tag = NULL;
1136 	}
1137 }
1138 
1139 /*
1140  * stge_shutdown:
1141  *
1142  *	Make sure the interface is stopped at reboot time.
1143  */
1144 static void
1145 stge_shutdown(device_t dev)
1146 {
1147 	struct stge_softc *sc = device_get_softc(dev);
1148 	struct ifnet *ifp = &sc->arpcom.ac_if;
1149 
1150 	lwkt_serialize_enter(ifp->if_serializer);
1151 	stge_stop(sc);
1152 	lwkt_serialize_exit(ifp->if_serializer);
1153 }
1154 
1155 static int
1156 stge_suspend(device_t dev)
1157 {
1158 	struct stge_softc *sc = device_get_softc(dev);
1159 	struct ifnet *ifp = &sc->arpcom.ac_if;
1160 
1161 	lwkt_serialize_enter(ifp->if_serializer);
1162 	stge_stop(sc);
1163 	sc->sc_suspended = 1;
1164 	lwkt_serialize_exit(ifp->if_serializer);
1165 
1166 	return (0);
1167 }
1168 
1169 static int
1170 stge_resume(device_t dev)
1171 {
1172 	struct stge_softc *sc = device_get_softc(dev);
1173 	struct ifnet *ifp = &sc->arpcom.ac_if;
1174 
1175 	lwkt_serialize_enter(ifp->if_serializer);
1176 	if (ifp->if_flags & IFF_UP)
1177 		stge_init(sc);
1178 	sc->sc_suspended = 0;
1179 	lwkt_serialize_exit(ifp->if_serializer);
1180 
1181 	return (0);
1182 }
1183 
1184 static void
1185 stge_dma_wait(struct stge_softc *sc)
1186 {
1187 	int i;
1188 
1189 	for (i = 0; i < STGE_TIMEOUT; i++) {
1190 		DELAY(2);
1191 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1192 			break;
1193 	}
1194 
1195 	if (i == STGE_TIMEOUT)
1196 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1197 }
1198 
1199 static int
1200 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1201 {
1202 	struct stge_txdesc *txd;
1203 	struct stge_tfd *tfd;
1204 	struct mbuf *m;
1205 	struct stge_mbuf_dmamap_arg arg;
1206 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1207 	int error, i, si;
1208 	uint64_t csum_flags, tfc;
1209 
1210 	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1211 		return (ENOBUFS);
1212 
1213 	arg.nsegs = STGE_MAXTXSEGS;
1214 	arg.segs = txsegs;
1215 	error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1216 				      txd->tx_dmamap, *m_head,
1217 				      stge_mbuf_dmamap_cb, &arg,
1218 				      BUS_DMA_NOWAIT);
1219 	if (error == EFBIG) {
1220 		m = m_defrag(*m_head, MB_DONTWAIT);
1221 		if (m == NULL) {
1222 			m_freem(*m_head);
1223 			*m_head = NULL;
1224 			return (ENOMEM);
1225 		}
1226 		*m_head = m;
1227 		error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1228 					      txd->tx_dmamap, *m_head,
1229 					      stge_mbuf_dmamap_cb, &arg,
1230 					      BUS_DMA_NOWAIT);
1231 		if (error != 0) {
1232 			m_freem(*m_head);
1233 			*m_head = NULL;
1234 			return (error);
1235 		}
1236 	} else if (error != 0)
1237 		return (error);
1238 	if (arg.nsegs == 0) {
1239 		m_freem(*m_head);
1240 		*m_head = NULL;
1241 		return (EIO);
1242 	}
1243 
1244 	m = *m_head;
1245 	csum_flags = 0;
1246 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1247 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1248 			csum_flags |= TFD_IPChecksumEnable;
1249 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1250 			csum_flags |= TFD_TCPChecksumEnable;
1251 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1252 			csum_flags |= TFD_UDPChecksumEnable;
1253 	}
1254 
1255 	si = sc->sc_cdata.stge_tx_prod;
1256 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1257 	for (i = 0; i < arg.nsegs; i++) {
1258 		tfd->tfd_frags[i].frag_word0 =
1259 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1260 		    FRAG_LEN(txsegs[i].ds_len));
1261 	}
1262 	sc->sc_cdata.stge_tx_cnt++;
1263 
1264 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1265 	    TFD_FragCount(arg.nsegs) | csum_flags;
1266 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1267 		tfc |= TFD_TxDMAIndicate;
1268 
1269 	/* Update producer index. */
1270 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1271 
1272 	/* Check if we have a VLAN tag to insert. */
1273 	if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1274 	    m->m_pkthdr.rcvif != NULL &&
1275 	    m->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1276 	    	struct ifvlan *ifv;
1277 
1278 		ifv = m->m_pkthdr.rcvif->if_softc;
1279 		if (ifv != NULL)
1280 			tfc |= TFD_VLANTagInsert | TFD_VID(ifv->ifv_tag);
1281 	}
1282 	tfd->tfd_control = htole64(tfc);
1283 
1284 	/* Update Tx Queue. */
1285 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1286 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1287 	txd->tx_m = m;
1288 
1289 	/* Sync descriptors. */
1290 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1291 	    BUS_DMASYNC_PREWRITE);
1292 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1293 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1294 
1295 	return (0);
1296 }
1297 
1298 /*
1299  * stge_start:		[ifnet interface function]
1300  *
1301  *	Start packet transmission on the interface.
1302  */
1303 static void
1304 stge_start(struct ifnet *ifp)
1305 {
1306 	struct stge_softc *sc;
1307 	struct mbuf *m_head;
1308 	int enq;
1309 
1310 	sc = ifp->if_softc;
1311 
1312 	ASSERT_SERIALIZED(ifp->if_serializer);
1313 
1314 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
1315 	    IFF_RUNNING)
1316 		return;
1317 
1318 	for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) {
1319 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1320 			ifp->if_flags |= IFF_OACTIVE;
1321 			break;
1322 		}
1323 
1324 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1325 		if (m_head == NULL)
1326 			break;
1327 		/*
1328 		 * Pack the data into the transmit ring. If we
1329 		 * don't have room, set the OACTIVE flag and wait
1330 		 * for the NIC to drain the ring.
1331 		 */
1332 		if (stge_encap(sc, &m_head)) {
1333 			if (m_head == NULL)
1334 				break;
1335 			ifp->if_flags |= IFF_OACTIVE;
1336 			break;
1337 		}
1338 
1339 		enq++;
1340 		/*
1341 		 * If there's a BPF listener, bounce a copy of this frame
1342 		 * to him.
1343 		 */
1344 		BPF_MTAP(ifp, m_head);
1345 	}
1346 
1347 	if (enq > 0) {
1348 		/* Transmit */
1349 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1350 
1351 		/* Set a timeout in case the chip goes out to lunch. */
1352 		ifp->if_timer = 5;
1353 	}
1354 }
1355 
1356 /*
1357  * stge_watchdog:	[ifnet interface function]
1358  *
1359  *	Watchdog timer handler.
1360  */
1361 static void
1362 stge_watchdog(struct ifnet *ifp)
1363 {
1364 	ASSERT_SERIALIZED(ifp->if_serializer);
1365 
1366 	if_printf(ifp, "device timeout\n");
1367 	ifp->if_oerrors++;
1368 	stge_init(ifp->if_softc);
1369 }
1370 
1371 /*
1372  * stge_ioctl:		[ifnet interface function]
1373  *
1374  *	Handle control requests from the operator.
1375  */
1376 static int
1377 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1378 {
1379 	struct stge_softc *sc;
1380 	struct ifreq *ifr;
1381 	struct mii_data *mii;
1382 	int error, mask;
1383 
1384 	ASSERT_SERIALIZED(ifp->if_serializer);
1385 
1386 	sc = ifp->if_softc;
1387 	ifr = (struct ifreq *)data;
1388 	error = 0;
1389 	switch (cmd) {
1390 	case SIOCSIFMTU:
1391 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1392 			error = EINVAL;
1393 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1394 			ifp->if_mtu = ifr->ifr_mtu;
1395 			stge_init(sc);
1396 		}
1397 		break;
1398 	case SIOCSIFFLAGS:
1399 		if ((ifp->if_flags & IFF_UP) != 0) {
1400 			if ((ifp->if_flags & IFF_RUNNING) != 0) {
1401 				if (((ifp->if_flags ^ sc->sc_if_flags)
1402 				    & IFF_PROMISC) != 0)
1403 					stge_set_filter(sc);
1404 			} else {
1405 				if (sc->sc_detach == 0)
1406 					stge_init(sc);
1407 			}
1408 		} else {
1409 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1410 				stge_stop(sc);
1411 		}
1412 		sc->sc_if_flags = ifp->if_flags;
1413 		break;
1414 	case SIOCADDMULTI:
1415 	case SIOCDELMULTI:
1416 		if ((ifp->if_flags & IFF_RUNNING) != 0)
1417 			stge_set_multi(sc);
1418 		break;
1419 	case SIOCSIFMEDIA:
1420 	case SIOCGIFMEDIA:
1421 		mii = device_get_softc(sc->sc_miibus);
1422 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1423 		break;
1424 	case SIOCSIFCAP:
1425 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1426 		if ((mask & IFCAP_HWCSUM) != 0) {
1427 			ifp->if_capenable ^= IFCAP_HWCSUM;
1428 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1429 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1430 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1431 			else
1432 				ifp->if_hwassist = 0;
1433 		}
1434 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1435 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1436 			if (ifp->if_flags & IFF_RUNNING)
1437 				stge_vlan_setup(sc);
1438 		}
1439 #if 0
1440 		VLAN_CAPABILITIES(ifp);
1441 #endif
1442 		break;
1443 	default:
1444 		error = ether_ioctl(ifp, cmd, data);
1445 		break;
1446 	}
1447 
1448 	return (error);
1449 }
1450 
1451 static void
1452 stge_link(struct stge_softc *sc)
1453 {
1454 	uint32_t v, ac;
1455 	int i;
1456 
1457 	/*
1458 	 * Update STGE_MACCtrl register depending on link status.
1459 	 * (duplex, flow control etc)
1460 	 */
1461 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1462 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1463 	v |= sc->sc_MACCtrl;
1464 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1465 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1466 		/* Duplex setting changed, reset Tx/Rx functions. */
1467 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1468 		ac |= AC_TxReset | AC_RxReset;
1469 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1470 		for (i = 0; i < STGE_TIMEOUT; i++) {
1471 			DELAY(100);
1472 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1473 				break;
1474 		}
1475 		if (i == STGE_TIMEOUT)
1476 			device_printf(sc->sc_dev, "reset failed to complete\n");
1477 	}
1478 }
1479 
1480 static __inline int
1481 stge_tx_error(struct stge_softc *sc)
1482 {
1483 	uint32_t txstat;
1484 	int error;
1485 
1486 	for (error = 0;;) {
1487 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1488 		if ((txstat & TS_TxComplete) == 0)
1489 			break;
1490 		/* Tx underrun */
1491 		if ((txstat & TS_TxUnderrun) != 0) {
1492 			/*
1493 			 * XXX
1494 			 * There should be a more better way to recover
1495 			 * from Tx underrun instead of a full reset.
1496 			 */
1497 			if (sc->sc_nerr++ < STGE_MAXERR)
1498 				device_printf(sc->sc_dev, "Tx underrun, "
1499 				    "resetting...\n");
1500 			if (sc->sc_nerr == STGE_MAXERR)
1501 				device_printf(sc->sc_dev, "too many errors; "
1502 				    "not reporting any more\n");
1503 			error = -1;
1504 			break;
1505 		}
1506 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1507 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1508 			CSR_WRITE_4(sc, STGE_MACCtrl,
1509 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1510 			    MC_TxEnable);
1511 	}
1512 
1513 	return (error);
1514 }
1515 
1516 /*
1517  * stge_intr:
1518  *
1519  *	Interrupt service routine.
1520  */
1521 static void
1522 stge_intr(void *arg)
1523 {
1524 	struct stge_softc *sc = arg;
1525 	struct ifnet *ifp = &sc->arpcom.ac_if;
1526 	int reinit;
1527 	uint16_t status;
1528 
1529 	ASSERT_SERIALIZED(ifp->if_serializer);
1530 
1531 	status = CSR_READ_2(sc, STGE_IntStatus);
1532 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1533 		return;
1534 
1535 	/* Disable interrupts. */
1536 	for (reinit = 0;;) {
1537 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1538 		status &= sc->sc_IntEnable;
1539 		if (status == 0)
1540 			break;
1541 		/* Host interface errors. */
1542 		if ((status & IS_HostError) != 0) {
1543 			device_printf(sc->sc_dev,
1544 			    "Host interface error, resetting...\n");
1545 			reinit = 1;
1546 			goto force_init;
1547 		}
1548 
1549 		/* Receive interrupts. */
1550 		if ((status & IS_RxDMAComplete) != 0) {
1551 			stge_rxeof(sc, -1);
1552 			if ((status & IS_RFDListEnd) != 0)
1553 				CSR_WRITE_4(sc, STGE_DMACtrl,
1554 				    DMAC_RxDMAPollNow);
1555 		}
1556 
1557 		/* Transmit interrupts. */
1558 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1559 			stge_txeof(sc);
1560 
1561 		/* Transmission errors.*/
1562 		if ((status & IS_TxComplete) != 0) {
1563 			if ((reinit = stge_tx_error(sc)) != 0)
1564 				break;
1565 		}
1566 	}
1567 
1568 force_init:
1569 	if (reinit != 0)
1570 		stge_init(sc);
1571 
1572 	/* Re-enable interrupts. */
1573 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1574 
1575 	/* Try to get more packets going. */
1576 	if (!ifq_is_empty(&ifp->if_snd))
1577 		ifp->if_start(ifp);
1578 }
1579 
1580 /*
1581  * stge_txeof:
1582  *
1583  *	Helper; handle transmit interrupts.
1584  */
1585 static void
1586 stge_txeof(struct stge_softc *sc)
1587 {
1588 	struct ifnet *ifp = &sc->arpcom.ac_if;
1589 	struct stge_txdesc *txd;
1590 	uint64_t control;
1591 	int cons;
1592 
1593 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1594 	if (txd == NULL)
1595 		return;
1596 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1597 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1598 
1599 	/*
1600 	 * Go through our Tx list and free mbufs for those
1601 	 * frames which have been transmitted.
1602 	 */
1603 	for (cons = sc->sc_cdata.stge_tx_cons;;
1604 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1605 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1606 			break;
1607 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1608 		if ((control & TFD_TFDDone) == 0)
1609 			break;
1610 		sc->sc_cdata.stge_tx_cnt--;
1611 		ifp->if_flags &= ~IFF_OACTIVE;
1612 
1613 		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1614 		    BUS_DMASYNC_POSTWRITE);
1615 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1616 
1617 		/* Output counter is updated with statistics register */
1618 		m_freem(txd->tx_m);
1619 		txd->tx_m = NULL;
1620 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1621 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1622 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1623 	}
1624 	sc->sc_cdata.stge_tx_cons = cons;
1625 	if (sc->sc_cdata.stge_tx_cnt == 0)
1626 		ifp->if_timer = 0;
1627 
1628         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1629 	    sc->sc_cdata.stge_tx_ring_map,
1630 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1631 }
1632 
1633 static __inline void
1634 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1635 {
1636 	struct stge_rfd *rfd;
1637 
1638 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1639 	rfd->rfd_status = 0;
1640 }
1641 
1642 #ifndef __i386__
1643 /*
1644  * It seems that TC9021's DMA engine has alignment restrictions in
1645  * DMA scatter operations. The first DMA segment has no address
1646  * alignment restrictins but the rest should be aligned on 4(?) bytes
1647  * boundary. Otherwise it would corrupt random memory. Since we don't
1648  * know which one is used for the first segment in advance we simply
1649  * don't align at all.
1650  * To avoid copying over an entire frame to align, we allocate a new
1651  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1652  * prepended into the existing mbuf chain.
1653  */
1654 static __inline struct mbuf *
1655 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1656 {
1657 	struct mbuf *n;
1658 
1659 	n = NULL;
1660 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1661 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1662 		m->m_data += ETHER_HDR_LEN;
1663 		n = m;
1664 	} else {
1665 		MGETHDR(n, MB_DONTWAIT, MT_DATA);
1666 		if (n != NULL) {
1667 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1668 			m->m_data += ETHER_HDR_LEN;
1669 			m->m_len -= ETHER_HDR_LEN;
1670 			n->m_len = ETHER_HDR_LEN;
1671 			M_MOVE_PKTHDR(n, m);
1672 			n->m_next = m;
1673 		} else
1674 			m_freem(m);
1675 	}
1676 
1677 	return (n);
1678 }
1679 #endif
1680 
1681 /*
1682  * stge_rxeof:
1683  *
1684  *	Helper; handle receive interrupts.
1685  */
1686 static void
1687 stge_rxeof(struct stge_softc *sc, int count)
1688 {
1689 	struct ifnet *ifp = &sc->arpcom.ac_if;
1690 	struct stge_rxdesc *rxd;
1691 	struct mbuf *mp, *m;
1692 	uint64_t status64;
1693 	uint32_t status;
1694 	int cons, prog;
1695 
1696 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1697 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1698 
1699 	prog = 0;
1700 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1701 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1702 #ifdef DEVICE_POLLING
1703 		if (count >= 0 && count-- == 0)
1704 			break;
1705 #endif
1706 
1707 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1708 		status = RFD_RxStatus(status64);
1709 		if ((status & RFD_RFDDone) == 0)
1710 			break;
1711 
1712 		prog++;
1713 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1714 		mp = rxd->rx_m;
1715 
1716 		/*
1717 		 * If the packet had an error, drop it.  Note we count
1718 		 * the error later in the periodic stats update.
1719 		 */
1720 		if ((status & RFD_FrameEnd) != 0 && (status &
1721 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1722 		    RFD_RxAlignmentError | RFD_RxFCSError |
1723 		    RFD_RxLengthError)) != 0) {
1724 			stge_discard_rxbuf(sc, cons);
1725 			if (sc->sc_cdata.stge_rxhead != NULL) {
1726 				m_freem(sc->sc_cdata.stge_rxhead);
1727 				STGE_RXCHAIN_RESET(sc);
1728 			}
1729 			continue;
1730 		}
1731 		/*
1732 		 * Add a new receive buffer to the ring.
1733 		 */
1734 		if (stge_newbuf(sc, cons, 0) != 0) {
1735 			ifp->if_iqdrops++;
1736 			stge_discard_rxbuf(sc, cons);
1737 			if (sc->sc_cdata.stge_rxhead != NULL) {
1738 				m_freem(sc->sc_cdata.stge_rxhead);
1739 				STGE_RXCHAIN_RESET(sc);
1740 			}
1741 			continue;
1742 		}
1743 
1744 		if ((status & RFD_FrameEnd) != 0)
1745 			mp->m_len = RFD_RxDMAFrameLen(status) -
1746 			    sc->sc_cdata.stge_rxlen;
1747 		sc->sc_cdata.stge_rxlen += mp->m_len;
1748 
1749 		/* Chain mbufs. */
1750 		if (sc->sc_cdata.stge_rxhead == NULL) {
1751 			sc->sc_cdata.stge_rxhead = mp;
1752 			sc->sc_cdata.stge_rxtail = mp;
1753 		} else {
1754 			mp->m_flags &= ~M_PKTHDR;
1755 			sc->sc_cdata.stge_rxtail->m_next = mp;
1756 			sc->sc_cdata.stge_rxtail = mp;
1757 		}
1758 
1759 		if ((status & RFD_FrameEnd) != 0) {
1760 			m = sc->sc_cdata.stge_rxhead;
1761 			m->m_pkthdr.rcvif = ifp;
1762 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1763 
1764 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1765 				m_freem(m);
1766 				STGE_RXCHAIN_RESET(sc);
1767 				continue;
1768 			}
1769 			/*
1770 			 * Set the incoming checksum information for
1771 			 * the packet.
1772 			 */
1773 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1774 				if ((status & RFD_IPDetected) != 0) {
1775 					m->m_pkthdr.csum_flags |=
1776 						CSUM_IP_CHECKED;
1777 					if ((status & RFD_IPError) == 0)
1778 						m->m_pkthdr.csum_flags |=
1779 						    CSUM_IP_VALID;
1780 				}
1781 				if (((status & RFD_TCPDetected) != 0 &&
1782 				    (status & RFD_TCPError) == 0) ||
1783 				    ((status & RFD_UDPDetected) != 0 &&
1784 				    (status & RFD_UDPError) == 0)) {
1785 					m->m_pkthdr.csum_flags |=
1786 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1787 					m->m_pkthdr.csum_data = 0xffff;
1788 				}
1789 			}
1790 
1791 #ifndef __i386__
1792 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1793 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1794 					STGE_RXCHAIN_RESET(sc);
1795 					continue;
1796 				}
1797 			}
1798 #endif
1799 
1800 			/* Check for VLAN tagged packets. */
1801 			if ((status & RFD_VLANDetected) != 0 &&
1802 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1803 				VLAN_INPUT_TAG(m, RFD_TCI(status64));
1804 			} else {
1805 				/* Pass it on. */
1806 				ifp->if_input(ifp, m);
1807 			}
1808 
1809 			STGE_RXCHAIN_RESET(sc);
1810 		}
1811 	}
1812 
1813 	if (prog > 0) {
1814 		/* Update the consumer index. */
1815 		sc->sc_cdata.stge_rx_cons = cons;
1816 		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1817 		    sc->sc_cdata.stge_rx_ring_map,
1818 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1819 	}
1820 }
1821 
1822 #ifdef DEVICE_POLLING
1823 static void
1824 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1825 {
1826 	struct stge_softc *sc;
1827 	uint16_t status;
1828 
1829 	sc = ifp->if_softc;
1830 
1831 	switch (cmd) {
1832 	case POLL_REGISTER:
1833 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1834 		break;
1835 	case POLL_DEREGISTER:
1836 		CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1837 		break;
1838 	case POLL_ONLY:
1839 	case POLL_AND_CHECK_STATUS:
1840 		sc->sc_cdata.stge_rxcycles = count;
1841 		stge_rxeof(sc, count);
1842 		stge_txeof(sc);
1843 
1844 		if (cmd == POLL_AND_CHECK_STATUS) {
1845 			status = CSR_READ_2(sc, STGE_IntStatus);
1846 			status &= sc->sc_IntEnable;
1847 			if (status != 0) {
1848 				if (status & IS_HostError) {
1849 					device_printf(sc->sc_dev,
1850 					"Host interface error, "
1851 					"resetting...\n");
1852 					stge_init(sc);
1853 				}
1854 				if ((status & IS_TxComplete) != 0 &&
1855 				    stge_tx_error(sc) != 0)
1856 					stge_init(sc);
1857 			}
1858 
1859 		}
1860 
1861 		if (!ifq_is_empty(&ifp->if_snd))
1862 			ifp->if_start(ifp);
1863 	}
1864 }
1865 #endif	/* DEVICE_POLLING */
1866 
1867 /*
1868  * stge_tick:
1869  *
1870  *	One second timer, used to tick the MII.
1871  */
1872 static void
1873 stge_tick(void *arg)
1874 {
1875 	struct stge_softc *sc = arg;
1876 	struct ifnet *ifp = &sc->arpcom.ac_if;
1877 	struct mii_data *mii;
1878 
1879 	lwkt_serialize_enter(ifp->if_serializer);
1880 
1881 	mii = device_get_softc(sc->sc_miibus);
1882 	mii_tick(mii);
1883 
1884 	/* Update statistics counters. */
1885 	stge_stats_update(sc);
1886 
1887 	/*
1888 	 * Relcaim any pending Tx descriptors to release mbufs in a
1889 	 * timely manner as we don't generate Tx completion interrupts
1890 	 * for every frame. This limits the delay to a maximum of one
1891 	 * second.
1892 	 */
1893 	if (sc->sc_cdata.stge_tx_cnt != 0)
1894 		stge_txeof(sc);
1895 
1896 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1897 
1898 	lwkt_serialize_exit(ifp->if_serializer);
1899 }
1900 
1901 /*
1902  * stge_stats_update:
1903  *
1904  *	Read the TC9021 statistics counters.
1905  */
1906 static void
1907 stge_stats_update(struct stge_softc *sc)
1908 {
1909 	struct ifnet *ifp = &sc->arpcom.ac_if;
1910 
1911 	CSR_READ_4(sc,STGE_OctetRcvOk);
1912 
1913 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1914 
1915 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1916 
1917 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1918 
1919 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1920 
1921 	ifp->if_collisions +=
1922 	    CSR_READ_4(sc, STGE_LateCollisions) +
1923 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1924 	    CSR_READ_4(sc, STGE_SingleColFrames);
1925 
1926 	ifp->if_oerrors +=
1927 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1928 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1929 }
1930 
1931 /*
1932  * stge_reset:
1933  *
1934  *	Perform a soft reset on the TC9021.
1935  */
1936 static void
1937 stge_reset(struct stge_softc *sc, uint32_t how)
1938 {
1939 	uint32_t ac;
1940 	uint8_t v;
1941 	int i, dv;
1942 
1943 	dv = 5000;
1944 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1945 	switch (how) {
1946 	case STGE_RESET_TX:
1947 		ac |= AC_TxReset | AC_FIFO;
1948 		dv = 100;
1949 		break;
1950 	case STGE_RESET_RX:
1951 		ac |= AC_RxReset | AC_FIFO;
1952 		dv = 100;
1953 		break;
1954 	case STGE_RESET_FULL:
1955 	default:
1956 		/*
1957 		 * Only assert RstOut if we're fiber.  We need GMII clocks
1958 		 * to be present in order for the reset to complete on fiber
1959 		 * cards.
1960 		 */
1961 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1962 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1963 		    (sc->sc_usefiber ? AC_RstOut : 0);
1964 		break;
1965 	}
1966 
1967 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1968 
1969 	/* Account for reset problem at 10Mbps. */
1970 	DELAY(dv);
1971 
1972 	for (i = 0; i < STGE_TIMEOUT; i++) {
1973 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1974 			break;
1975 		DELAY(dv);
1976 	}
1977 
1978 	if (i == STGE_TIMEOUT)
1979 		device_printf(sc->sc_dev, "reset failed to complete\n");
1980 
1981 	/* Set LED, from Linux IPG driver. */
1982 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1983 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1984 	if ((sc->sc_led & 0x01) != 0)
1985 		ac |= AC_LEDMode;
1986 	if ((sc->sc_led & 0x03) != 0)
1987 		ac |= AC_LEDModeBit1;
1988 	if ((sc->sc_led & 0x08) != 0)
1989 		ac |= AC_LEDSpeed;
1990 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1991 
1992 	/* Set PHY, from Linux IPG driver */
1993 	v = CSR_READ_1(sc, STGE_PhySet);
1994 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1995 	v |= ((sc->sc_led & 0x70) >> 4);
1996 	CSR_WRITE_1(sc, STGE_PhySet, v);
1997 }
1998 
1999 /*
2000  * stge_init:		[ ifnet interface function ]
2001  *
2002  *	Initialize the interface.
2003  */
2004 static void
2005 stge_init(void *xsc)
2006 {
2007 	struct stge_softc *sc = xsc;
2008 	struct ifnet *ifp = &sc->arpcom.ac_if;
2009 	struct mii_data *mii;
2010 	uint16_t eaddr[3];
2011 	uint32_t v;
2012 	int error;
2013 
2014 	ASSERT_SERIALIZED(ifp->if_serializer);
2015 
2016 	mii = device_get_softc(sc->sc_miibus);
2017 
2018 	/*
2019 	 * Cancel any pending I/O.
2020 	 */
2021 	stge_stop(sc);
2022 
2023 	/* Init descriptors. */
2024 	error = stge_init_rx_ring(sc);
2025 	if (error != 0) {
2026 		device_printf(sc->sc_dev,
2027 		    "initialization failed: no memory for rx buffers\n");
2028 		stge_stop(sc);
2029 		goto out;
2030 	}
2031 	stge_init_tx_ring(sc);
2032 
2033 	/* Set the station address. */
2034 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2035 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2036 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2037 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2038 
2039 	/*
2040 	 * Set the statistics masks.  Disable all the RMON stats,
2041 	 * and disable selected stats in the non-RMON stats registers.
2042 	 */
2043 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2044 	CSR_WRITE_4(sc, STGE_StatisticsMask,
2045 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2046 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2047 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2048 	    (1U << 21));
2049 
2050 	/* Set up the receive filter. */
2051 	stge_set_filter(sc);
2052 	/* Program multicast filter. */
2053 	stge_set_multi(sc);
2054 
2055 	/*
2056 	 * Give the transmit and receive ring to the chip.
2057 	 */
2058 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2059 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2060 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2061 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2062 
2063 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2064 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2065 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2066 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2067 
2068 	/*
2069 	 * Initialize the Tx auto-poll period.  It's OK to make this number
2070 	 * large (255 is the max, but we use 127) -- we explicitly kick the
2071 	 * transmit engine when there's actually a packet.
2072 	 */
2073 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2074 
2075 	/* ..and the Rx auto-poll period. */
2076 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2077 
2078 	/* Initialize the Tx start threshold. */
2079 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2080 
2081 	/* Rx DMA thresholds, from Linux */
2082 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2083 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2084 
2085 	/* Rx early threhold, from Linux */
2086 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2087 
2088 	/* Tx DMA thresholds, from Linux */
2089 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2090 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2091 
2092 	/*
2093 	 * Initialize the Rx DMA interrupt control register.  We
2094 	 * request an interrupt after every incoming packet, but
2095 	 * defer it for sc_rxint_dmawait us. When the number of
2096 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2097 	 * deferring the interrupt, and signal it immediately.
2098 	 */
2099 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2100 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2101 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2102 
2103 	/*
2104 	 * Initialize the interrupt mask.
2105 	 */
2106 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2107 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2108 #ifdef DEVICE_POLLING
2109 	/* Disable interrupts if we are polling. */
2110 	if (ifp->if_flags & IFF_POLLING)
2111 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2112 	else
2113 #endif
2114 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2115 
2116 	/*
2117 	 * Configure the DMA engine.
2118 	 * XXX Should auto-tune TxBurstLimit.
2119 	 */
2120 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2121 
2122 	/*
2123 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2124 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2125 	 * in the Rx FIFO.
2126 	 */
2127 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2128 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2129 
2130 	/*
2131 	 * Set the maximum frame size.
2132 	 */
2133 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2134 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2135 
2136 	/*
2137 	 * Initialize MacCtrl -- do it before setting the media,
2138 	 * as setting the media will actually program the register.
2139 	 *
2140 	 * Note: We have to poke the IFS value before poking
2141 	 * anything else.
2142 	 */
2143 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2144 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2145 
2146 	stge_vlan_setup(sc);
2147 
2148 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2149 		/* Multi-frag frame bug work-around. */
2150 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2151 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2152 
2153 		/* Tx Poll Now bug work-around. */
2154 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2155 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2156 		/* Tx Poll Now bug work-around. */
2157 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2158 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2159 	}
2160 
2161 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2162 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2163 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2164 	/*
2165 	 * It seems that transmitting frames without checking the state of
2166 	 * Rx/Tx MAC wedge the hardware.
2167 	 */
2168 	stge_start_tx(sc);
2169 	stge_start_rx(sc);
2170 
2171 	/*
2172 	 * Set the current media.
2173 	 */
2174 	mii_mediachg(mii);
2175 
2176 	/*
2177 	 * Start the one second MII clock.
2178 	 */
2179 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2180 
2181 	/*
2182 	 * ...all done!
2183 	 */
2184 	ifp->if_flags |= IFF_RUNNING;
2185 	ifp->if_flags &= ~IFF_OACTIVE;
2186 
2187  out:
2188 	if (error != 0)
2189 		device_printf(sc->sc_dev, "interface not running\n");
2190 }
2191 
2192 static void
2193 stge_vlan_setup(struct stge_softc *sc)
2194 {
2195 	struct ifnet *ifp = &sc->arpcom.ac_if;
2196 	uint32_t v;
2197 
2198 	/*
2199 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2200 	 * MC_AutoVLANuntagging bit.
2201 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2202 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2203 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2204 	 * use TFC instead of STGE_VLANTag register.
2205 	 */
2206 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2207 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2208 		v |= MC_AutoVLANuntagging;
2209 	else
2210 		v &= ~MC_AutoVLANuntagging;
2211 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2212 }
2213 
2214 /*
2215  *	Stop transmission on the interface.
2216  */
2217 static void
2218 stge_stop(struct stge_softc *sc)
2219 {
2220 	struct ifnet *ifp = &sc->arpcom.ac_if;
2221 	struct stge_txdesc *txd;
2222 	struct stge_rxdesc *rxd;
2223 	uint32_t v;
2224 	int i;
2225 
2226 	ASSERT_SERIALIZED(ifp->if_serializer);
2227 
2228 	/*
2229 	 * Stop the one second clock.
2230 	 */
2231 	callout_stop(&sc->sc_tick_ch);
2232 
2233 	/*
2234 	 * Reset the chip to a known state.
2235 	 */
2236 	stge_reset(sc, STGE_RESET_FULL);
2237 
2238 	/*
2239 	 * Disable interrupts.
2240 	 */
2241 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2242 
2243 	/*
2244 	 * Stop receiver, transmitter, and stats update.
2245 	 */
2246 	stge_stop_rx(sc);
2247 	stge_stop_tx(sc);
2248 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2249 	v |= MC_StatisticsDisable;
2250 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2251 
2252 	/*
2253 	 * Stop the transmit and receive DMA.
2254 	 */
2255 	stge_dma_wait(sc);
2256 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2257 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2258 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2259 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2260 
2261 	/*
2262 	 * Free RX and TX mbufs still in the queues.
2263 	 */
2264 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2265 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2266 		if (rxd->rx_m != NULL) {
2267 			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2268 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2269 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2270 			    rxd->rx_dmamap);
2271 			m_freem(rxd->rx_m);
2272 			rxd->rx_m = NULL;
2273 		}
2274         }
2275 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2276 		txd = &sc->sc_cdata.stge_txdesc[i];
2277 		if (txd->tx_m != NULL) {
2278 			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2279 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2280 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2281 			    txd->tx_dmamap);
2282 			m_freem(txd->tx_m);
2283 			txd->tx_m = NULL;
2284 		}
2285         }
2286 
2287 	/*
2288 	 * Mark the interface down and cancel the watchdog timer.
2289 	 */
2290 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2291 	ifp->if_timer = 0;
2292 }
2293 
2294 static void
2295 stge_start_tx(struct stge_softc *sc)
2296 {
2297 	uint32_t v;
2298 	int i;
2299 
2300 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2301 	if ((v & MC_TxEnabled) != 0)
2302 		return;
2303 	v |= MC_TxEnable;
2304 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2305 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2306 	for (i = STGE_TIMEOUT; i > 0; i--) {
2307 		DELAY(10);
2308 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2309 		if ((v & MC_TxEnabled) != 0)
2310 			break;
2311 	}
2312 	if (i == 0)
2313 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2314 }
2315 
2316 static void
2317 stge_start_rx(struct stge_softc *sc)
2318 {
2319 	uint32_t v;
2320 	int i;
2321 
2322 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2323 	if ((v & MC_RxEnabled) != 0)
2324 		return;
2325 	v |= MC_RxEnable;
2326 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2327 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2328 	for (i = STGE_TIMEOUT; i > 0; i--) {
2329 		DELAY(10);
2330 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2331 		if ((v & MC_RxEnabled) != 0)
2332 			break;
2333 	}
2334 	if (i == 0)
2335 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2336 }
2337 
2338 static void
2339 stge_stop_tx(struct stge_softc *sc)
2340 {
2341 	uint32_t v;
2342 	int i;
2343 
2344 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2345 	if ((v & MC_TxEnabled) == 0)
2346 		return;
2347 	v |= MC_TxDisable;
2348 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2349 	for (i = STGE_TIMEOUT; i > 0; i--) {
2350 		DELAY(10);
2351 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2352 		if ((v & MC_TxEnabled) == 0)
2353 			break;
2354 	}
2355 	if (i == 0)
2356 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2357 }
2358 
2359 static void
2360 stge_stop_rx(struct stge_softc *sc)
2361 {
2362 	uint32_t v;
2363 	int i;
2364 
2365 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2366 	if ((v & MC_RxEnabled) == 0)
2367 		return;
2368 	v |= MC_RxDisable;
2369 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2370 	for (i = STGE_TIMEOUT; i > 0; i--) {
2371 		DELAY(10);
2372 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2373 		if ((v & MC_RxEnabled) == 0)
2374 			break;
2375 	}
2376 	if (i == 0)
2377 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2378 }
2379 
2380 static void
2381 stge_init_tx_ring(struct stge_softc *sc)
2382 {
2383 	struct stge_ring_data *rd;
2384 	struct stge_txdesc *txd;
2385 	bus_addr_t addr;
2386 	int i;
2387 
2388 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2389 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2390 
2391 	sc->sc_cdata.stge_tx_prod = 0;
2392 	sc->sc_cdata.stge_tx_cons = 0;
2393 	sc->sc_cdata.stge_tx_cnt = 0;
2394 
2395 	rd = &sc->sc_rdata;
2396 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2397 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2398 		if (i == (STGE_TX_RING_CNT - 1))
2399 			addr = STGE_TX_RING_ADDR(sc, 0);
2400 		else
2401 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2402 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2403 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2404 		txd = &sc->sc_cdata.stge_txdesc[i];
2405 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2406 	}
2407 
2408 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2409 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
2410 }
2411 
2412 static int
2413 stge_init_rx_ring(struct stge_softc *sc)
2414 {
2415 	struct stge_ring_data *rd;
2416 	bus_addr_t addr;
2417 	int i;
2418 
2419 	sc->sc_cdata.stge_rx_cons = 0;
2420 	STGE_RXCHAIN_RESET(sc);
2421 
2422 	rd = &sc->sc_rdata;
2423 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2424 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2425 		if (stge_newbuf(sc, i, 1) != 0)
2426 			return (ENOBUFS);
2427 		if (i == (STGE_RX_RING_CNT - 1))
2428 			addr = STGE_RX_RING_ADDR(sc, 0);
2429 		else
2430 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2431 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2432 		rd->stge_rx_ring[i].rfd_status = 0;
2433 	}
2434 
2435 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2436 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_PREWRITE);
2437 
2438 	return (0);
2439 }
2440 
2441 /*
2442  * stge_newbuf:
2443  *
2444  *	Add a receive buffer to the indicated descriptor.
2445  */
2446 static int
2447 stge_newbuf(struct stge_softc *sc, int idx, int waitok)
2448 {
2449 	struct stge_rxdesc *rxd;
2450 	struct stge_rfd *rfd;
2451 	struct mbuf *m;
2452 	struct stge_mbuf_dmamap_arg arg;
2453 	bus_dma_segment_t segs[1];
2454 	bus_dmamap_t map;
2455 
2456 	m = m_getcl(waitok ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2457 	if (m == NULL)
2458 		return (ENOBUFS);
2459 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2460 	/*
2461 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2462 	 * frame is used.
2463 	 */
2464 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2465 		m_adj(m, ETHER_ALIGN);
2466 
2467 	arg.segs = segs;
2468 	arg.nsegs = 1;
2469 	if (bus_dmamap_load_mbuf(sc->sc_cdata.stge_rx_tag,
2470 	    sc->sc_cdata.stge_rx_sparemap, m, stge_mbuf_dmamap_cb, &arg,
2471 	    waitok ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
2472 		m_freem(m);
2473 		return (ENOBUFS);
2474 	}
2475 
2476 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2477 	if (rxd->rx_m != NULL) {
2478 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2479 		    BUS_DMASYNC_POSTREAD);
2480 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2481 	}
2482 	map = rxd->rx_dmamap;
2483 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2484 	sc->sc_cdata.stge_rx_sparemap = map;
2485 	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2486 	    BUS_DMASYNC_PREREAD);
2487 	rxd->rx_m = m;
2488 
2489 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2490 	rfd->rfd_frag.frag_word0 =
2491 	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2492 	rfd->rfd_status = 0;
2493 
2494 	return (0);
2495 }
2496 
2497 /*
2498  * stge_set_filter:
2499  *
2500  *	Set up the receive filter.
2501  */
2502 static void
2503 stge_set_filter(struct stge_softc *sc)
2504 {
2505 	struct ifnet *ifp = &sc->arpcom.ac_if;
2506 	uint16_t mode;
2507 
2508 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2509 	mode |= RM_ReceiveUnicast;
2510 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2511 		mode |= RM_ReceiveBroadcast;
2512 	else
2513 		mode &= ~RM_ReceiveBroadcast;
2514 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2515 		mode |= RM_ReceiveAllFrames;
2516 	else
2517 		mode &= ~RM_ReceiveAllFrames;
2518 
2519 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2520 }
2521 
2522 static void
2523 stge_set_multi(struct stge_softc *sc)
2524 {
2525 	struct ifnet *ifp = &sc->arpcom.ac_if;
2526 	struct ifmultiaddr *ifma;
2527 	uint32_t crc;
2528 	uint32_t mchash[2];
2529 	uint16_t mode;
2530 	int count;
2531 
2532 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2533 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2534 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2535 			mode |= RM_ReceiveAllFrames;
2536 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2537 			mode |= RM_ReceiveMulticast;
2538 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2539 		return;
2540 	}
2541 
2542 	/* clear existing filters. */
2543 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2544 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2545 
2546 	/*
2547 	 * Set up the multicast address filter by passing all multicast
2548 	 * addresses through a CRC generator, and then using the low-order
2549 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2550 	 * high order bits select the register, while the rest of the bits
2551 	 * select the bit within the register.
2552 	 */
2553 
2554 	bzero(mchash, sizeof(mchash));
2555 
2556 	count = 0;
2557 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2558 		if (ifma->ifma_addr->sa_family != AF_LINK)
2559 			continue;
2560 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2561 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2562 
2563 		/* Just want the 6 least significant bits. */
2564 		crc &= 0x3f;
2565 
2566 		/* Set the corresponding bit in the hash table. */
2567 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2568 		count++;
2569 	}
2570 
2571 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2572 	if (count > 0)
2573 		mode |= RM_ReceiveMulticastHash;
2574 	else
2575 		mode &= ~RM_ReceiveMulticastHash;
2576 
2577 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2578 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2579 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2580 }
2581 
2582 static int
2583 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2584 {
2585 	int error, value;
2586 
2587 	if (!arg1)
2588 		return (EINVAL);
2589 	value = *(int *)arg1;
2590 	error = sysctl_handle_int(oidp, &value, 0, req);
2591 	if (error || !req->newptr)
2592 		return (error);
2593 	if (value < low || value > high)
2594 		return (EINVAL);
2595         *(int *)arg1 = value;
2596 
2597         return (0);
2598 }
2599 
2600 static int
2601 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2602 {
2603 	return (sysctl_int_range(oidp, arg1, arg2, req,
2604 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2605 }
2606 
2607 static int
2608 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2609 {
2610 	return (sysctl_int_range(oidp, arg1, arg2, req,
2611 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2612 }
2613