xref: /dragonfly/sys/dev/netif/stge/if_stge.c (revision 9a92bb4c)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 /*	$FreeBSD: src/sys/dev/stge/if_stge.c,v 1.2 2006/08/12 01:21:36 yongari Exp $	*/
3 /*	$DragonFly: src/sys/dev/netif/stge/if_stge.c,v 1.7 2008/08/03 11:00:32 sephe Exp $	*/
4 
5 /*-
6  * Copyright (c) 2001 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Jason R. Thorpe.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 /*
42  * Device driver for the Sundance Tech. TC9021 10/100/1000
43  * Ethernet controller.
44  */
45 
46 #include "opt_polling.h"
47 
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/endian.h>
51 #include <sys/kernel.h>
52 #include <sys/interrupt.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/module.h>
56 #include <sys/rman.h>
57 #include <sys/serialize.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
61 
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/ifq_var.h>
70 #include <net/vlan/if_vlan_var.h>
71 #include <net/vlan/if_vlan_ether.h>
72 
73 #include <dev/netif/mii_layer/mii.h>
74 #include <dev/netif/mii_layer/miivar.h>
75 
76 #include <bus/pci/pcireg.h>
77 #include <bus/pci/pcivar.h>
78 
79 #include "if_stgereg.h"
80 #include "if_stgevar.h"
81 
82 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
83 
84 /* "device miibus" required.  See GENERIC if you get errors here. */
85 #include "miibus_if.h"
86 
87 /*
88  * Devices supported by this driver.
89  */
90 static struct stge_product {
91 	uint16_t	stge_vendorid;
92 	uint16_t	stge_deviceid;
93 	const char	*stge_name;
94 } stge_products[] = {
95 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
96 	  "Sundance ST-1023 Gigabit Ethernet" },
97 
98 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
99 	  "Sundance ST-2021 Gigabit Ethernet" },
100 
101 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
102 	  "Tamarack TC9021 Gigabit Ethernet" },
103 
104 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
105 	  "Tamarack TC9021 Gigabit Ethernet" },
106 
107 	/*
108 	 * The Sundance sample boards use the Sundance vendor ID,
109 	 * but the Tamarack product ID.
110 	 */
111 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
112 	  "Sundance TC9021 Gigabit Ethernet" },
113 
114 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
115 	  "Sundance TC9021 Gigabit Ethernet" },
116 
117 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL2000,
118 	  "D-Link DL-2000 Gigabit Ethernet" },
119 
120 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
121 	  "Antares Gigabit Ethernet" },
122 
123 	{ 0, 0, NULL }
124 };
125 
126 static int	stge_probe(device_t);
127 static int	stge_attach(device_t);
128 static int	stge_detach(device_t);
129 static void	stge_shutdown(device_t);
130 static int	stge_suspend(device_t);
131 static int	stge_resume(device_t);
132 
133 static int	stge_encap(struct stge_softc *, struct mbuf **);
134 static void	stge_start(struct ifnet *);
135 static void	stge_watchdog(struct ifnet *);
136 static int	stge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
137 static void	stge_init(void *);
138 static void	stge_vlan_setup(struct stge_softc *);
139 static void	stge_stop(struct stge_softc *);
140 static void	stge_start_tx(struct stge_softc *);
141 static void	stge_start_rx(struct stge_softc *);
142 static void	stge_stop_tx(struct stge_softc *);
143 static void	stge_stop_rx(struct stge_softc *);
144 
145 static void	stge_reset(struct stge_softc *, uint32_t);
146 static int	stge_eeprom_wait(struct stge_softc *);
147 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
148 static void	stge_tick(void *);
149 static void	stge_stats_update(struct stge_softc *);
150 static void	stge_set_filter(struct stge_softc *);
151 static void	stge_set_multi(struct stge_softc *);
152 
153 static void	stge_link(struct stge_softc *);
154 static void	stge_intr(void *);
155 static __inline int stge_tx_error(struct stge_softc *);
156 static void	stge_txeof(struct stge_softc *);
157 static void	stge_rxeof(struct stge_softc *, int);
158 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
159 static int	stge_newbuf(struct stge_softc *, int, int);
160 #ifndef __i386__
161 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
162 #endif
163 
164 static void	stge_mii_sync(struct stge_softc *);
165 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
166 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
167 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
168 static int	stge_miibus_readreg(device_t, int, int);
169 static int	stge_miibus_writereg(device_t, int, int, int);
170 static void	stge_miibus_statchg(device_t);
171 static int	stge_mediachange(struct ifnet *);
172 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
173 
174 static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
175 static void	stge_mbuf_dmamap_cb(void *, bus_dma_segment_t *, int,
176 				    bus_size_t, int);
177 static int	stge_dma_alloc(struct stge_softc *);
178 static void	stge_dma_free(struct stge_softc *);
179 static void	stge_dma_wait(struct stge_softc *);
180 static void	stge_init_tx_ring(struct stge_softc *);
181 static int	stge_init_rx_ring(struct stge_softc *);
182 #ifdef DEVICE_POLLING
183 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
184 #endif
185 
186 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
187 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
188 
189 static device_method_t stge_methods[] = {
190 	/* Device interface */
191 	DEVMETHOD(device_probe,		stge_probe),
192 	DEVMETHOD(device_attach,	stge_attach),
193 	DEVMETHOD(device_detach,	stge_detach),
194 	DEVMETHOD(device_shutdown,	stge_shutdown),
195 	DEVMETHOD(device_suspend,	stge_suspend),
196 	DEVMETHOD(device_resume,	stge_resume),
197 
198 	/* MII interface */
199 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
200 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
201 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
202 
203 	{ 0, 0 }
204 
205 };
206 
207 static driver_t stge_driver = {
208 	"stge",
209 	stge_methods,
210 	sizeof(struct stge_softc)
211 };
212 
213 static devclass_t stge_devclass;
214 
215 DECLARE_DUMMY_MODULE(if_stge);
216 MODULE_DEPEND(if_stge, miibus, 1, 1, 1);
217 DRIVER_MODULE(if_stge, pci, stge_driver, stge_devclass, 0, 0);
218 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
219 
220 #define	MII_SET(x)	\
221 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
222 #define	MII_CLR(x)	\
223 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
224 
225 /*
226  * Sync the PHYs by setting data bit and strobing the clock 32 times.
227  */
228 static void
229 stge_mii_sync(struct stge_softc	*sc)
230 {
231 	int i;
232 
233 	MII_SET(PC_MgmtDir | PC_MgmtData);
234 
235 	for (i = 0; i < 32; i++) {
236 		MII_SET(PC_MgmtClk);
237 		DELAY(1);
238 		MII_CLR(PC_MgmtClk);
239 		DELAY(1);
240 	}
241 }
242 
243 /*
244  * Clock a series of bits through the MII.
245  */
246 static void
247 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
248 {
249 	int i;
250 
251 	MII_CLR(PC_MgmtClk);
252 
253 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
254 		if (bits & i)
255 			MII_SET(PC_MgmtData);
256                 else
257 			MII_CLR(PC_MgmtData);
258 		DELAY(1);
259 		MII_CLR(PC_MgmtClk);
260 		DELAY(1);
261 		MII_SET(PC_MgmtClk);
262 	}
263 }
264 
265 /*
266  * Read an PHY register through the MII.
267  */
268 static int
269 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
270 {
271 	int i, ack;
272 
273 	/*
274 	 * Set up frame for RX.
275 	 */
276 	frame->mii_stdelim = STGE_MII_STARTDELIM;
277 	frame->mii_opcode = STGE_MII_READOP;
278 	frame->mii_turnaround = 0;
279 	frame->mii_data = 0;
280 
281 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
282 	/*
283  	 * Turn on data xmit.
284 	 */
285 	MII_SET(PC_MgmtDir);
286 
287 	stge_mii_sync(sc);
288 
289 	/*
290 	 * Send command/address info.
291 	 */
292 	stge_mii_send(sc, frame->mii_stdelim, 2);
293 	stge_mii_send(sc, frame->mii_opcode, 2);
294 	stge_mii_send(sc, frame->mii_phyaddr, 5);
295 	stge_mii_send(sc, frame->mii_regaddr, 5);
296 
297 	/* Turn off xmit. */
298 	MII_CLR(PC_MgmtDir);
299 
300 	/* Idle bit */
301 	MII_CLR((PC_MgmtClk | PC_MgmtData));
302 	DELAY(1);
303 	MII_SET(PC_MgmtClk);
304 	DELAY(1);
305 
306 	/* Check for ack */
307 	MII_CLR(PC_MgmtClk);
308 	DELAY(1);
309 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
310 	MII_SET(PC_MgmtClk);
311 	DELAY(1);
312 
313 	/*
314 	 * Now try reading data bits. If the ack failed, we still
315 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
316 	 */
317 	if (ack) {
318 		for(i = 0; i < 16; i++) {
319 			MII_CLR(PC_MgmtClk);
320 			DELAY(1);
321 			MII_SET(PC_MgmtClk);
322 			DELAY(1);
323 		}
324 		goto fail;
325 	}
326 
327 	for (i = 0x8000; i; i >>= 1) {
328 		MII_CLR(PC_MgmtClk);
329 		DELAY(1);
330 		if (!ack) {
331 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
332 				frame->mii_data |= i;
333 			DELAY(1);
334 		}
335 		MII_SET(PC_MgmtClk);
336 		DELAY(1);
337 	}
338 
339 fail:
340 	MII_CLR(PC_MgmtClk);
341 	DELAY(1);
342 	MII_SET(PC_MgmtClk);
343 	DELAY(1);
344 
345 	if (ack)
346 		return(1);
347 	return(0);
348 }
349 
350 /*
351  * Write to a PHY register through the MII.
352  */
353 static int
354 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
355 {
356 
357 	/*
358 	 * Set up frame for TX.
359 	 */
360 	frame->mii_stdelim = STGE_MII_STARTDELIM;
361 	frame->mii_opcode = STGE_MII_WRITEOP;
362 	frame->mii_turnaround = STGE_MII_TURNAROUND;
363 
364 	/*
365  	 * Turn on data output.
366 	 */
367 	MII_SET(PC_MgmtDir);
368 
369 	stge_mii_sync(sc);
370 
371 	stge_mii_send(sc, frame->mii_stdelim, 2);
372 	stge_mii_send(sc, frame->mii_opcode, 2);
373 	stge_mii_send(sc, frame->mii_phyaddr, 5);
374 	stge_mii_send(sc, frame->mii_regaddr, 5);
375 	stge_mii_send(sc, frame->mii_turnaround, 2);
376 	stge_mii_send(sc, frame->mii_data, 16);
377 
378 	/* Idle bit. */
379 	MII_SET(PC_MgmtClk);
380 	DELAY(1);
381 	MII_CLR(PC_MgmtClk);
382 	DELAY(1);
383 
384 	/*
385 	 * Turn off xmit.
386 	 */
387 	MII_CLR(PC_MgmtDir);
388 
389 	return(0);
390 }
391 
392 /*
393  * sc_miibus_readreg:	[mii interface function]
394  *
395  *	Read a PHY register on the MII of the TC9021.
396  */
397 static int
398 stge_miibus_readreg(device_t dev, int phy, int reg)
399 {
400 	struct stge_softc *sc;
401 	struct stge_mii_frame frame;
402 	int error;
403 
404 	sc = device_get_softc(dev);
405 
406 	if (reg == STGE_PhyCtrl) {
407 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
408 		error = CSR_READ_1(sc, STGE_PhyCtrl);
409 		return (error);
410 	}
411 	bzero(&frame, sizeof(frame));
412 	frame.mii_phyaddr = phy;
413 	frame.mii_regaddr = reg;
414 
415 	error = stge_mii_readreg(sc, &frame);
416 
417 	if (error != 0) {
418 		/* Don't show errors for PHY probe request */
419 		if (reg != 1)
420 			device_printf(sc->sc_dev, "phy read fail\n");
421 		return (0);
422 	}
423 	return (frame.mii_data);
424 }
425 
426 /*
427  * stge_miibus_writereg:	[mii interface function]
428  *
429  *	Write a PHY register on the MII of the TC9021.
430  */
431 static int
432 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
433 {
434 	struct stge_softc *sc;
435 	struct stge_mii_frame frame;
436 	int error;
437 
438 	sc = device_get_softc(dev);
439 
440 	bzero(&frame, sizeof(frame));
441 	frame.mii_phyaddr = phy;
442 	frame.mii_regaddr = reg;
443 	frame.mii_data = val;
444 
445 	error = stge_mii_writereg(sc, &frame);
446 
447 	if (error != 0)
448 		device_printf(sc->sc_dev, "phy write fail\n");
449 	return (0);
450 }
451 
452 /*
453  * stge_miibus_statchg:	[mii interface function]
454  *
455  *	Callback from MII layer when media changes.
456  */
457 static void
458 stge_miibus_statchg(device_t dev)
459 {
460 	struct stge_softc *sc;
461 	struct mii_data *mii;
462 
463 	sc = device_get_softc(dev);
464 	mii = device_get_softc(sc->sc_miibus);
465 
466 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)
467 		return;
468 
469 	sc->sc_MACCtrl = 0;
470 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
471 		sc->sc_MACCtrl |= MC_DuplexSelect;
472 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
473 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
474 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
475 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
476 
477 	stge_link(sc);
478 }
479 
480 /*
481  * stge_mediastatus:	[ifmedia interface function]
482  *
483  *	Get the current interface media status.
484  */
485 static void
486 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
487 {
488 	struct stge_softc *sc;
489 	struct mii_data *mii;
490 
491 	sc = ifp->if_softc;
492 	mii = device_get_softc(sc->sc_miibus);
493 
494 	mii_pollstat(mii);
495 	ifmr->ifm_status = mii->mii_media_status;
496 	ifmr->ifm_active = mii->mii_media_active;
497 }
498 
499 /*
500  * stge_mediachange:	[ifmedia interface function]
501  *
502  *	Set hardware to newly-selected media.
503  */
504 static int
505 stge_mediachange(struct ifnet *ifp)
506 {
507 	struct stge_softc *sc;
508 	struct mii_data *mii;
509 
510 	sc = ifp->if_softc;
511 	mii = device_get_softc(sc->sc_miibus);
512 	mii_mediachg(mii);
513 
514 	return (0);
515 }
516 
517 static int
518 stge_eeprom_wait(struct stge_softc *sc)
519 {
520 	int i;
521 
522 	for (i = 0; i < STGE_TIMEOUT; i++) {
523 		DELAY(1000);
524 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
525 			return (0);
526 	}
527 	return (1);
528 }
529 
530 /*
531  * stge_read_eeprom:
532  *
533  *	Read data from the serial EEPROM.
534  */
535 static void
536 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
537 {
538 
539 	if (stge_eeprom_wait(sc))
540 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
541 
542 	CSR_WRITE_2(sc, STGE_EepromCtrl,
543 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
544 	if (stge_eeprom_wait(sc))
545 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
546 	*data = CSR_READ_2(sc, STGE_EepromData);
547 }
548 
549 
550 static int
551 stge_probe(device_t dev)
552 {
553 	struct stge_product *sp;
554 	uint16_t vendor, devid;
555 
556 	vendor = pci_get_vendor(dev);
557 	devid = pci_get_device(dev);
558 
559 	for (sp = stge_products; sp->stge_name != NULL; sp++) {
560 		if (vendor == sp->stge_vendorid &&
561 		    devid == sp->stge_deviceid) {
562 			device_set_desc(dev, sp->stge_name);
563 			return (0);
564 		}
565 	}
566 
567 	return (ENXIO);
568 }
569 
570 static int
571 stge_attach(device_t dev)
572 {
573 	struct stge_softc *sc;
574 	struct ifnet *ifp;
575 	uint8_t enaddr[ETHER_ADDR_LEN];
576 	int error, i;
577 	uint16_t cmd;
578 	uint32_t val;
579 
580 	error = 0;
581 	sc = device_get_softc(dev);
582 	sc->sc_dev = dev;
583 	ifp = &sc->arpcom.ac_if;
584 
585 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
586 
587 	callout_init(&sc->sc_tick_ch);
588 
589 #ifndef BURN_BRIDGES
590 	/*
591 	 * Handle power management nonsense.
592 	 */
593 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
594 		uint32_t iobase, membase, irq;
595 
596 		/* Save important PCI config data. */
597 		iobase = pci_read_config(dev, STGE_PCIR_LOIO, 4);
598 		membase = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
599 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
600 
601 		/* Reset the power state. */
602 		device_printf(dev, "chip is in D%d power mode "
603 			      "-- setting to D0\n", pci_get_powerstate(dev));
604 
605 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
606 
607 		/* Restore PCI config data. */
608 		pci_write_config(dev, STGE_PCIR_LOIO, iobase, 4);
609 		pci_write_config(dev, STGE_PCIR_LOMEM, membase, 4);
610 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
611 	}
612 #endif
613 
614 	/*
615 	 * Map the device.
616 	 */
617 	pci_enable_busmaster(dev);
618 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
619 	val = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
620 
621 	if ((val & 0x01) != 0) {
622 		sc->sc_res_rid = STGE_PCIR_LOMEM;
623 		sc->sc_res_type = SYS_RES_MEMORY;
624 	} else {
625 		sc->sc_res_rid = STGE_PCIR_LOIO;
626 		sc->sc_res_type = SYS_RES_IOPORT;
627 
628 		val = pci_read_config(dev, sc->sc_res_rid, 4);
629 		if ((val & 0x01) == 0) {
630 			device_printf(dev, "couldn't locate IO BAR\n");
631 			return ENXIO;
632 		}
633 	}
634 
635 	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
636 					    &sc->sc_res_rid, RF_ACTIVE);
637 	if (sc->sc_res == NULL) {
638 		device_printf(dev, "couldn't allocate resource\n");
639 		return ENXIO;
640 	}
641 	sc->sc_btag = rman_get_bustag(sc->sc_res);
642 	sc->sc_bhandle = rman_get_bushandle(sc->sc_res);
643 
644 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
645 					    &sc->sc_irq_rid,
646 					    RF_ACTIVE | RF_SHAREABLE);
647 	if (sc->sc_irq == NULL) {
648 		device_printf(dev, "couldn't allocate IRQ\n");
649 		error = ENXIO;
650 		goto fail;
651 	}
652 
653 	sc->sc_rev = pci_get_revid(dev);
654 
655 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
656 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
657 
658 	sysctl_ctx_init(&sc->sc_sysctl_ctx);
659 	sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
660 					     SYSCTL_STATIC_CHILDREN(_hw),
661 					     OID_AUTO,
662 					     device_get_nameunit(dev),
663 					     CTLFLAG_RD, 0, "");
664 	if (sc->sc_sysctl_tree == NULL) {
665 		device_printf(dev, "can't add sysctl node\n");
666 		error = ENXIO;
667 		goto fail;
668 	}
669 
670 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
671 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
672 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
673 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
674 
675 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
676 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
677 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
678 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
679 
680 	if ((error = stge_dma_alloc(sc) != 0))
681 		goto fail;
682 
683 	/*
684 	 * Determine if we're copper or fiber.  It affects how we
685 	 * reset the card.
686 	 */
687 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
688 		sc->sc_usefiber = 1;
689 	else
690 		sc->sc_usefiber = 0;
691 
692 	/* Load LED configuration from EEPROM. */
693 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
694 
695 	/*
696 	 * Reset the chip to a known state.
697 	 */
698 	stge_reset(sc, STGE_RESET_FULL);
699 
700 	/*
701 	 * Reading the station address from the EEPROM doesn't seem
702 	 * to work, at least on my sample boards.  Instead, since
703 	 * the reset sequence does AutoInit, read it from the station
704 	 * address registers. For Sundance 1023 you can only read it
705 	 * from EEPROM.
706 	 */
707 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
708 		uint16_t v;
709 
710 		v = CSR_READ_2(sc, STGE_StationAddress0);
711 		enaddr[0] = v & 0xff;
712 		enaddr[1] = v >> 8;
713 		v = CSR_READ_2(sc, STGE_StationAddress1);
714 		enaddr[2] = v & 0xff;
715 		enaddr[3] = v >> 8;
716 		v = CSR_READ_2(sc, STGE_StationAddress2);
717 		enaddr[4] = v & 0xff;
718 		enaddr[5] = v >> 8;
719 		sc->sc_stge1023 = 0;
720 	} else {
721 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
722 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
723 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
724 			    &myaddr[i]);
725 			myaddr[i] = le16toh(myaddr[i]);
726 		}
727 		bcopy(myaddr, enaddr, sizeof(enaddr));
728 		sc->sc_stge1023 = 1;
729 	}
730 
731 	ifp->if_softc = sc;
732 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
733 	ifp->if_ioctl = stge_ioctl;
734 	ifp->if_start = stge_start;
735 	ifp->if_watchdog = stge_watchdog;
736 	ifp->if_init = stge_init;
737 #ifdef DEVICE_POLLING
738 	ifp->if_poll = stge_poll;
739 #endif
740 	ifp->if_mtu = ETHERMTU;
741 	ifq_set_maxlen(&ifp->if_snd, STGE_TX_RING_CNT - 1);
742 	ifq_set_ready(&ifp->if_snd);
743 	/* Revision B3 and earlier chips have checksum bug. */
744 	if (sc->sc_rev >= 0x0c) {
745 		ifp->if_hwassist = STGE_CSUM_FEATURES;
746 		ifp->if_capabilities = IFCAP_HWCSUM;
747 	} else {
748 		ifp->if_hwassist = 0;
749 		ifp->if_capabilities = 0;
750 	}
751 	ifp->if_capenable = ifp->if_capabilities;
752 
753 	/*
754 	 * Read some important bits from the PhyCtrl register.
755 	 */
756 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
757 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
758 
759 	/* Set up MII bus. */
760 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
761 	    stge_mediastatus)) != 0) {
762 		device_printf(sc->sc_dev, "no PHY found!\n");
763 		goto fail;
764 	}
765 
766 	ether_ifattach(ifp, enaddr, NULL);
767 
768 	/* VLAN capability setup */
769 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
770 #ifdef notyet
771 	if (sc->sc_rev >= 0x0c)
772 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
773 #endif
774 	ifp->if_capenable = ifp->if_capabilities;
775 
776 	/*
777 	 * Tell the upper layer(s) we support long frames.
778 	 * Must appear after the call to ether_ifattach() because
779 	 * ether_ifattach() sets ifi_hdrlen to the default value.
780 	 */
781 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
782 
783 	/*
784 	 * The manual recommends disabling early transmit, so we
785 	 * do.  It's disabled anyway, if using IP checksumming,
786 	 * since the entire packet must be in the FIFO in order
787 	 * for the chip to perform the checksum.
788 	 */
789 	sc->sc_txthresh = 0x0fff;
790 
791 	/*
792 	 * Disable MWI if the PCI layer tells us to.
793 	 */
794 	sc->sc_DMACtrl = 0;
795 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
796 		sc->sc_DMACtrl |= DMAC_MWIDisable;
797 
798 	/*
799 	 * Hookup IRQ
800 	 */
801 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, stge_intr, sc,
802 			       &sc->sc_ih, ifp->if_serializer);
803 	if (error != 0) {
804 		ether_ifdetach(ifp);
805 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
806 		goto fail;
807 	}
808 
809 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq));
810 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
811 
812 fail:
813 	if (error != 0)
814 		stge_detach(dev);
815 
816 	return (error);
817 }
818 
819 static int
820 stge_detach(device_t dev)
821 {
822 	struct stge_softc *sc = device_get_softc(dev);
823 	struct ifnet *ifp = &sc->arpcom.ac_if;
824 
825 	if (device_is_attached(dev)) {
826 		lwkt_serialize_enter(ifp->if_serializer);
827 		/* XXX */
828 		sc->sc_detach = 1;
829 		stge_stop(sc);
830 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
831 		lwkt_serialize_exit(ifp->if_serializer);
832 
833 		ether_ifdetach(ifp);
834 	}
835 
836 	if (sc->sc_sysctl_tree != NULL)
837 		sysctl_ctx_free(&sc->sc_sysctl_ctx);
838 
839 	if (sc->sc_miibus != NULL)
840 		device_delete_child(dev, sc->sc_miibus);
841 	bus_generic_detach(dev);
842 
843 	stge_dma_free(sc);
844 
845 	if (sc->sc_irq != NULL) {
846 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
847 				     sc->sc_irq);
848 	}
849 	if (sc->sc_res != NULL) {
850 		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_rid,
851 				     sc->sc_res);
852 	}
853 
854 	return (0);
855 }
856 
857 struct stge_dmamap_arg {
858 	bus_addr_t	stge_busaddr;
859 };
860 
861 static void
862 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
863 {
864 	struct stge_dmamap_arg *ctx;
865 
866 	if (error != 0)
867 		return;
868 
869 	KASSERT(nseg == 1, ("too many segments %d\n", nseg));
870 
871 	ctx = (struct stge_dmamap_arg *)arg;
872 	ctx->stge_busaddr = segs[0].ds_addr;
873 }
874 
875 struct stge_mbuf_dmamap_arg {
876 	int			nsegs;
877 	bus_dma_segment_t	*segs;
878 };
879 
880 static void
881 stge_mbuf_dmamap_cb(void *xarg, bus_dma_segment_t *segs, int nsegs,
882 		    bus_size_t mapsz __unused, int error)
883 {
884 	struct stge_mbuf_dmamap_arg *arg = xarg;
885 	int i;
886 
887 	if (error) {
888 		arg->nsegs = 0;
889 		return;
890 	}
891 
892 	KASSERT(nsegs <= arg->nsegs,
893 		("too many segments(%d), should be <= %d\n",
894 		 nsegs, arg->nsegs));
895 
896 	arg->nsegs = nsegs;
897 	for (i = 0; i < nsegs; ++i)
898 		arg->segs[i] = segs[i];
899 }
900 
901 static int
902 stge_dma_alloc(struct stge_softc *sc)
903 {
904 	struct stge_dmamap_arg ctx;
905 	struct stge_txdesc *txd;
906 	struct stge_rxdesc *rxd;
907 	int error, i;
908 
909 	/* create parent tag. */
910 	error = bus_dma_tag_create(NULL,	/* parent */
911 		    1, 0,			/* algnmnt, boundary */
912 		    STGE_DMA_MAXADDR,		/* lowaddr */
913 		    BUS_SPACE_MAXADDR,		/* highaddr */
914 		    NULL, NULL,			/* filter, filterarg */
915 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
916 		    0,				/* nsegments */
917 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
918 		    0,				/* flags */
919 		    &sc->sc_cdata.stge_parent_tag);
920 	if (error != 0) {
921 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
922 		goto fail;
923 	}
924 	/* create tag for Tx ring. */
925 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
926 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
927 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
928 		    BUS_SPACE_MAXADDR,		/* highaddr */
929 		    NULL, NULL,			/* filter, filterarg */
930 		    STGE_TX_RING_SZ,		/* maxsize */
931 		    1,				/* nsegments */
932 		    STGE_TX_RING_SZ,		/* maxsegsize */
933 		    0,				/* flags */
934 		    &sc->sc_cdata.stge_tx_ring_tag);
935 	if (error != 0) {
936 		device_printf(sc->sc_dev,
937 		    "failed to allocate Tx ring DMA tag\n");
938 		goto fail;
939 	}
940 
941 	/* create tag for Rx ring. */
942 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
943 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
944 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
945 		    BUS_SPACE_MAXADDR,		/* highaddr */
946 		    NULL, NULL,			/* filter, filterarg */
947 		    STGE_RX_RING_SZ,		/* maxsize */
948 		    1,				/* nsegments */
949 		    STGE_RX_RING_SZ,		/* maxsegsize */
950 		    0,				/* flags */
951 		    &sc->sc_cdata.stge_rx_ring_tag);
952 	if (error != 0) {
953 		device_printf(sc->sc_dev,
954 		    "failed to allocate Rx ring DMA tag\n");
955 		goto fail;
956 	}
957 
958 	/* create tag for Tx buffers. */
959 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
960 		    1, 0,			/* algnmnt, boundary */
961 		    BUS_SPACE_MAXADDR,		/* lowaddr */
962 		    BUS_SPACE_MAXADDR,		/* highaddr */
963 		    NULL, NULL,			/* filter, filterarg */
964 		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
965 		    STGE_MAXTXSEGS,		/* nsegments */
966 		    MCLBYTES,			/* maxsegsize */
967 		    0,				/* flags */
968 		    &sc->sc_cdata.stge_tx_tag);
969 	if (error != 0) {
970 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
971 		goto fail;
972 	}
973 
974 	/* create tag for Rx buffers. */
975 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
976 		    1, 0,			/* algnmnt, boundary */
977 		    BUS_SPACE_MAXADDR,		/* lowaddr */
978 		    BUS_SPACE_MAXADDR,		/* highaddr */
979 		    NULL, NULL,			/* filter, filterarg */
980 		    MCLBYTES,			/* maxsize */
981 		    1,				/* nsegments */
982 		    MCLBYTES,			/* maxsegsize */
983 		    0,				/* flags */
984 		    &sc->sc_cdata.stge_rx_tag);
985 	if (error != 0) {
986 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
987 		goto fail;
988 	}
989 
990 	/* allocate DMA'able memory and load the DMA map for Tx ring. */
991 	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
992 	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
993 	    &sc->sc_cdata.stge_tx_ring_map);
994 	if (error != 0) {
995 		device_printf(sc->sc_dev,
996 		    "failed to allocate DMA'able memory for Tx ring\n");
997 		goto fail;
998 	}
999 
1000 	ctx.stge_busaddr = 0;
1001 	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
1002 	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
1003 	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1004 	if (error != 0 || ctx.stge_busaddr == 0) {
1005 		device_printf(sc->sc_dev,
1006 		    "failed to load DMA'able memory for Tx ring\n");
1007 		goto fail;
1008 	}
1009 	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
1010 
1011 	/* allocate DMA'able memory and load the DMA map for Rx ring. */
1012 	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
1013 	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1014 	    &sc->sc_cdata.stge_rx_ring_map);
1015 	if (error != 0) {
1016 		device_printf(sc->sc_dev,
1017 		    "failed to allocate DMA'able memory for Rx ring\n");
1018 		goto fail;
1019 	}
1020 
1021 	ctx.stge_busaddr = 0;
1022 	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
1023 	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
1024 	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1025 	if (error != 0 || ctx.stge_busaddr == 0) {
1026 		device_printf(sc->sc_dev,
1027 		    "failed to load DMA'able memory for Rx ring\n");
1028 		goto fail;
1029 	}
1030 	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1031 
1032 	/* create DMA maps for Tx buffers. */
1033 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
1034 		txd = &sc->sc_cdata.stge_txdesc[i];
1035 		txd->tx_m = NULL;
1036 		txd->tx_dmamap = 0;
1037 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1038 		    &txd->tx_dmamap);
1039 		if (error != 0) {
1040 			device_printf(sc->sc_dev,
1041 			    "failed to create Tx dmamap\n");
1042 			goto fail;
1043 		}
1044 	}
1045 	/* create DMA maps for Rx buffers. */
1046 	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1047 	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1048 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1049 		goto fail;
1050 	}
1051 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
1052 		rxd = &sc->sc_cdata.stge_rxdesc[i];
1053 		rxd->rx_m = NULL;
1054 		rxd->rx_dmamap = 0;
1055 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1056 		    &rxd->rx_dmamap);
1057 		if (error != 0) {
1058 			device_printf(sc->sc_dev,
1059 			    "failed to create Rx dmamap\n");
1060 			goto fail;
1061 		}
1062 	}
1063 
1064 fail:
1065 	return (error);
1066 }
1067 
1068 static void
1069 stge_dma_free(struct stge_softc *sc)
1070 {
1071 	struct stge_txdesc *txd;
1072 	struct stge_rxdesc *rxd;
1073 	int i;
1074 
1075 	/* Tx ring */
1076 	if (sc->sc_cdata.stge_tx_ring_tag) {
1077 		if (sc->sc_cdata.stge_tx_ring_map)
1078 			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1079 			    sc->sc_cdata.stge_tx_ring_map);
1080 		if (sc->sc_cdata.stge_tx_ring_map &&
1081 		    sc->sc_rdata.stge_tx_ring)
1082 			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1083 			    sc->sc_rdata.stge_tx_ring,
1084 			    sc->sc_cdata.stge_tx_ring_map);
1085 		sc->sc_rdata.stge_tx_ring = NULL;
1086 		sc->sc_cdata.stge_tx_ring_map = 0;
1087 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1088 		sc->sc_cdata.stge_tx_ring_tag = NULL;
1089 	}
1090 	/* Rx ring */
1091 	if (sc->sc_cdata.stge_rx_ring_tag) {
1092 		if (sc->sc_cdata.stge_rx_ring_map)
1093 			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1094 			    sc->sc_cdata.stge_rx_ring_map);
1095 		if (sc->sc_cdata.stge_rx_ring_map &&
1096 		    sc->sc_rdata.stge_rx_ring)
1097 			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1098 			    sc->sc_rdata.stge_rx_ring,
1099 			    sc->sc_cdata.stge_rx_ring_map);
1100 		sc->sc_rdata.stge_rx_ring = NULL;
1101 		sc->sc_cdata.stge_rx_ring_map = 0;
1102 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1103 		sc->sc_cdata.stge_rx_ring_tag = NULL;
1104 	}
1105 	/* Tx buffers */
1106 	if (sc->sc_cdata.stge_tx_tag) {
1107 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1108 			txd = &sc->sc_cdata.stge_txdesc[i];
1109 			if (txd->tx_dmamap) {
1110 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1111 				    txd->tx_dmamap);
1112 				txd->tx_dmamap = 0;
1113 			}
1114 		}
1115 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1116 		sc->sc_cdata.stge_tx_tag = NULL;
1117 	}
1118 	/* Rx buffers */
1119 	if (sc->sc_cdata.stge_rx_tag) {
1120 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1121 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1122 			if (rxd->rx_dmamap) {
1123 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1124 				    rxd->rx_dmamap);
1125 				rxd->rx_dmamap = 0;
1126 			}
1127 		}
1128 		if (sc->sc_cdata.stge_rx_sparemap) {
1129 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1130 			    sc->sc_cdata.stge_rx_sparemap);
1131 			sc->sc_cdata.stge_rx_sparemap = 0;
1132 		}
1133 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1134 		sc->sc_cdata.stge_rx_tag = NULL;
1135 	}
1136 
1137 	if (sc->sc_cdata.stge_parent_tag) {
1138 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1139 		sc->sc_cdata.stge_parent_tag = NULL;
1140 	}
1141 }
1142 
1143 /*
1144  * stge_shutdown:
1145  *
1146  *	Make sure the interface is stopped at reboot time.
1147  */
1148 static void
1149 stge_shutdown(device_t dev)
1150 {
1151 	struct stge_softc *sc = device_get_softc(dev);
1152 	struct ifnet *ifp = &sc->arpcom.ac_if;
1153 
1154 	lwkt_serialize_enter(ifp->if_serializer);
1155 	stge_stop(sc);
1156 	lwkt_serialize_exit(ifp->if_serializer);
1157 }
1158 
1159 static int
1160 stge_suspend(device_t dev)
1161 {
1162 	struct stge_softc *sc = device_get_softc(dev);
1163 	struct ifnet *ifp = &sc->arpcom.ac_if;
1164 
1165 	lwkt_serialize_enter(ifp->if_serializer);
1166 	stge_stop(sc);
1167 	sc->sc_suspended = 1;
1168 	lwkt_serialize_exit(ifp->if_serializer);
1169 
1170 	return (0);
1171 }
1172 
1173 static int
1174 stge_resume(device_t dev)
1175 {
1176 	struct stge_softc *sc = device_get_softc(dev);
1177 	struct ifnet *ifp = &sc->arpcom.ac_if;
1178 
1179 	lwkt_serialize_enter(ifp->if_serializer);
1180 	if (ifp->if_flags & IFF_UP)
1181 		stge_init(sc);
1182 	sc->sc_suspended = 0;
1183 	lwkt_serialize_exit(ifp->if_serializer);
1184 
1185 	return (0);
1186 }
1187 
1188 static void
1189 stge_dma_wait(struct stge_softc *sc)
1190 {
1191 	int i;
1192 
1193 	for (i = 0; i < STGE_TIMEOUT; i++) {
1194 		DELAY(2);
1195 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1196 			break;
1197 	}
1198 
1199 	if (i == STGE_TIMEOUT)
1200 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1201 }
1202 
1203 static int
1204 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1205 {
1206 	struct stge_txdesc *txd;
1207 	struct stge_tfd *tfd;
1208 	struct mbuf *m;
1209 	struct stge_mbuf_dmamap_arg arg;
1210 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1211 	int error, i, si;
1212 	uint64_t csum_flags, tfc;
1213 
1214 	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1215 		return (ENOBUFS);
1216 
1217 	arg.nsegs = STGE_MAXTXSEGS;
1218 	arg.segs = txsegs;
1219 	error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1220 				      txd->tx_dmamap, *m_head,
1221 				      stge_mbuf_dmamap_cb, &arg,
1222 				      BUS_DMA_NOWAIT);
1223 	if (error == EFBIG) {
1224 		m = m_defrag(*m_head, MB_DONTWAIT);
1225 		if (m == NULL) {
1226 			m_freem(*m_head);
1227 			*m_head = NULL;
1228 			return (ENOMEM);
1229 		}
1230 		*m_head = m;
1231 		error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1232 					      txd->tx_dmamap, *m_head,
1233 					      stge_mbuf_dmamap_cb, &arg,
1234 					      BUS_DMA_NOWAIT);
1235 		if (error != 0) {
1236 			m_freem(*m_head);
1237 			*m_head = NULL;
1238 			return (error);
1239 		}
1240 	} else if (error != 0)
1241 		return (error);
1242 	if (arg.nsegs == 0) {
1243 		m_freem(*m_head);
1244 		*m_head = NULL;
1245 		return (EIO);
1246 	}
1247 
1248 	m = *m_head;
1249 	csum_flags = 0;
1250 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1251 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1252 			csum_flags |= TFD_IPChecksumEnable;
1253 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1254 			csum_flags |= TFD_TCPChecksumEnable;
1255 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1256 			csum_flags |= TFD_UDPChecksumEnable;
1257 	}
1258 
1259 	si = sc->sc_cdata.stge_tx_prod;
1260 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1261 	for (i = 0; i < arg.nsegs; i++) {
1262 		tfd->tfd_frags[i].frag_word0 =
1263 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1264 		    FRAG_LEN(txsegs[i].ds_len));
1265 	}
1266 	sc->sc_cdata.stge_tx_cnt++;
1267 
1268 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1269 	    TFD_FragCount(arg.nsegs) | csum_flags;
1270 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1271 		tfc |= TFD_TxDMAIndicate;
1272 
1273 	/* Update producer index. */
1274 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1275 
1276 	/* Check if we have a VLAN tag to insert. */
1277 	if (m->m_flags & M_VLANTAG)
1278 		tfc |= TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vlantag);
1279 	tfd->tfd_control = htole64(tfc);
1280 
1281 	/* Update Tx Queue. */
1282 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1283 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1284 	txd->tx_m = m;
1285 
1286 	/* Sync descriptors. */
1287 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1288 	    BUS_DMASYNC_PREWRITE);
1289 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1290 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1291 
1292 	return (0);
1293 }
1294 
1295 /*
1296  * stge_start:		[ifnet interface function]
1297  *
1298  *	Start packet transmission on the interface.
1299  */
1300 static void
1301 stge_start(struct ifnet *ifp)
1302 {
1303 	struct stge_softc *sc;
1304 	struct mbuf *m_head;
1305 	int enq;
1306 
1307 	sc = ifp->if_softc;
1308 
1309 	ASSERT_SERIALIZED(ifp->if_serializer);
1310 
1311 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
1312 	    IFF_RUNNING)
1313 		return;
1314 
1315 	for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) {
1316 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1317 			ifp->if_flags |= IFF_OACTIVE;
1318 			break;
1319 		}
1320 
1321 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1322 		if (m_head == NULL)
1323 			break;
1324 		/*
1325 		 * Pack the data into the transmit ring. If we
1326 		 * don't have room, set the OACTIVE flag and wait
1327 		 * for the NIC to drain the ring.
1328 		 */
1329 		if (stge_encap(sc, &m_head)) {
1330 			if (m_head != NULL) {
1331 				m_freem(m_head);
1332 				ifp->if_flags |= IFF_OACTIVE;
1333 			}
1334 			break;
1335 		}
1336 
1337 		enq++;
1338 		/*
1339 		 * If there's a BPF listener, bounce a copy of this frame
1340 		 * to him.
1341 		 */
1342 		ETHER_BPF_MTAP(ifp, m_head);
1343 	}
1344 
1345 	if (enq > 0) {
1346 		/* Transmit */
1347 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1348 
1349 		/* Set a timeout in case the chip goes out to lunch. */
1350 		ifp->if_timer = 5;
1351 	}
1352 }
1353 
1354 /*
1355  * stge_watchdog:	[ifnet interface function]
1356  *
1357  *	Watchdog timer handler.
1358  */
1359 static void
1360 stge_watchdog(struct ifnet *ifp)
1361 {
1362 	ASSERT_SERIALIZED(ifp->if_serializer);
1363 
1364 	if_printf(ifp, "device timeout\n");
1365 	ifp->if_oerrors++;
1366 	stge_init(ifp->if_softc);
1367 }
1368 
1369 /*
1370  * stge_ioctl:		[ifnet interface function]
1371  *
1372  *	Handle control requests from the operator.
1373  */
1374 static int
1375 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1376 {
1377 	struct stge_softc *sc;
1378 	struct ifreq *ifr;
1379 	struct mii_data *mii;
1380 	int error, mask;
1381 
1382 	ASSERT_SERIALIZED(ifp->if_serializer);
1383 
1384 	sc = ifp->if_softc;
1385 	ifr = (struct ifreq *)data;
1386 	error = 0;
1387 	switch (cmd) {
1388 	case SIOCSIFMTU:
1389 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1390 			error = EINVAL;
1391 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1392 			ifp->if_mtu = ifr->ifr_mtu;
1393 			stge_init(sc);
1394 		}
1395 		break;
1396 	case SIOCSIFFLAGS:
1397 		if ((ifp->if_flags & IFF_UP) != 0) {
1398 			if ((ifp->if_flags & IFF_RUNNING) != 0) {
1399 				if (((ifp->if_flags ^ sc->sc_if_flags)
1400 				    & IFF_PROMISC) != 0)
1401 					stge_set_filter(sc);
1402 			} else {
1403 				if (sc->sc_detach == 0)
1404 					stge_init(sc);
1405 			}
1406 		} else {
1407 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1408 				stge_stop(sc);
1409 		}
1410 		sc->sc_if_flags = ifp->if_flags;
1411 		break;
1412 	case SIOCADDMULTI:
1413 	case SIOCDELMULTI:
1414 		if ((ifp->if_flags & IFF_RUNNING) != 0)
1415 			stge_set_multi(sc);
1416 		break;
1417 	case SIOCSIFMEDIA:
1418 	case SIOCGIFMEDIA:
1419 		mii = device_get_softc(sc->sc_miibus);
1420 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1421 		break;
1422 	case SIOCSIFCAP:
1423 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1424 		if ((mask & IFCAP_HWCSUM) != 0) {
1425 			ifp->if_capenable ^= IFCAP_HWCSUM;
1426 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1427 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1428 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1429 			else
1430 				ifp->if_hwassist = 0;
1431 		}
1432 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1433 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1434 			if (ifp->if_flags & IFF_RUNNING)
1435 				stge_vlan_setup(sc);
1436 		}
1437 #if 0
1438 		VLAN_CAPABILITIES(ifp);
1439 #endif
1440 		break;
1441 	default:
1442 		error = ether_ioctl(ifp, cmd, data);
1443 		break;
1444 	}
1445 
1446 	return (error);
1447 }
1448 
1449 static void
1450 stge_link(struct stge_softc *sc)
1451 {
1452 	uint32_t v, ac;
1453 	int i;
1454 
1455 	/*
1456 	 * Update STGE_MACCtrl register depending on link status.
1457 	 * (duplex, flow control etc)
1458 	 */
1459 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1460 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1461 	v |= sc->sc_MACCtrl;
1462 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1463 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1464 		/* Duplex setting changed, reset Tx/Rx functions. */
1465 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1466 		ac |= AC_TxReset | AC_RxReset;
1467 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1468 		for (i = 0; i < STGE_TIMEOUT; i++) {
1469 			DELAY(100);
1470 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1471 				break;
1472 		}
1473 		if (i == STGE_TIMEOUT)
1474 			device_printf(sc->sc_dev, "reset failed to complete\n");
1475 	}
1476 }
1477 
1478 static __inline int
1479 stge_tx_error(struct stge_softc *sc)
1480 {
1481 	uint32_t txstat;
1482 	int error;
1483 
1484 	for (error = 0;;) {
1485 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1486 		if ((txstat & TS_TxComplete) == 0)
1487 			break;
1488 		/* Tx underrun */
1489 		if ((txstat & TS_TxUnderrun) != 0) {
1490 			/*
1491 			 * XXX
1492 			 * There should be a more better way to recover
1493 			 * from Tx underrun instead of a full reset.
1494 			 */
1495 			if (sc->sc_nerr++ < STGE_MAXERR)
1496 				device_printf(sc->sc_dev, "Tx underrun, "
1497 				    "resetting...\n");
1498 			if (sc->sc_nerr == STGE_MAXERR)
1499 				device_printf(sc->sc_dev, "too many errors; "
1500 				    "not reporting any more\n");
1501 			error = -1;
1502 			break;
1503 		}
1504 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1505 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1506 			CSR_WRITE_4(sc, STGE_MACCtrl,
1507 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1508 			    MC_TxEnable);
1509 	}
1510 
1511 	return (error);
1512 }
1513 
1514 /*
1515  * stge_intr:
1516  *
1517  *	Interrupt service routine.
1518  */
1519 static void
1520 stge_intr(void *arg)
1521 {
1522 	struct stge_softc *sc = arg;
1523 	struct ifnet *ifp = &sc->arpcom.ac_if;
1524 	int reinit;
1525 	uint16_t status;
1526 
1527 	ASSERT_SERIALIZED(ifp->if_serializer);
1528 
1529 	status = CSR_READ_2(sc, STGE_IntStatus);
1530 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1531 		return;
1532 
1533 	/* Disable interrupts. */
1534 	for (reinit = 0;;) {
1535 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1536 		status &= sc->sc_IntEnable;
1537 		if (status == 0)
1538 			break;
1539 		/* Host interface errors. */
1540 		if ((status & IS_HostError) != 0) {
1541 			device_printf(sc->sc_dev,
1542 			    "Host interface error, resetting...\n");
1543 			reinit = 1;
1544 			goto force_init;
1545 		}
1546 
1547 		/* Receive interrupts. */
1548 		if ((status & IS_RxDMAComplete) != 0) {
1549 			stge_rxeof(sc, -1);
1550 			if ((status & IS_RFDListEnd) != 0)
1551 				CSR_WRITE_4(sc, STGE_DMACtrl,
1552 				    DMAC_RxDMAPollNow);
1553 		}
1554 
1555 		/* Transmit interrupts. */
1556 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1557 			stge_txeof(sc);
1558 
1559 		/* Transmission errors.*/
1560 		if ((status & IS_TxComplete) != 0) {
1561 			if ((reinit = stge_tx_error(sc)) != 0)
1562 				break;
1563 		}
1564 	}
1565 
1566 force_init:
1567 	if (reinit != 0)
1568 		stge_init(sc);
1569 
1570 	/* Re-enable interrupts. */
1571 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1572 
1573 	/* Try to get more packets going. */
1574 	if (!ifq_is_empty(&ifp->if_snd))
1575 		if_devstart(ifp);
1576 }
1577 
1578 /*
1579  * stge_txeof:
1580  *
1581  *	Helper; handle transmit interrupts.
1582  */
1583 static void
1584 stge_txeof(struct stge_softc *sc)
1585 {
1586 	struct ifnet *ifp = &sc->arpcom.ac_if;
1587 	struct stge_txdesc *txd;
1588 	uint64_t control;
1589 	int cons;
1590 
1591 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1592 	if (txd == NULL)
1593 		return;
1594 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1595 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1596 
1597 	/*
1598 	 * Go through our Tx list and free mbufs for those
1599 	 * frames which have been transmitted.
1600 	 */
1601 	for (cons = sc->sc_cdata.stge_tx_cons;;
1602 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1603 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1604 			break;
1605 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1606 		if ((control & TFD_TFDDone) == 0)
1607 			break;
1608 		sc->sc_cdata.stge_tx_cnt--;
1609 		ifp->if_flags &= ~IFF_OACTIVE;
1610 
1611 		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1612 		    BUS_DMASYNC_POSTWRITE);
1613 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1614 
1615 		/* Output counter is updated with statistics register */
1616 		m_freem(txd->tx_m);
1617 		txd->tx_m = NULL;
1618 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1619 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1620 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1621 	}
1622 	sc->sc_cdata.stge_tx_cons = cons;
1623 	if (sc->sc_cdata.stge_tx_cnt == 0)
1624 		ifp->if_timer = 0;
1625 
1626         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1627 	    sc->sc_cdata.stge_tx_ring_map,
1628 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1629 }
1630 
1631 static __inline void
1632 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1633 {
1634 	struct stge_rfd *rfd;
1635 
1636 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1637 	rfd->rfd_status = 0;
1638 }
1639 
1640 #ifndef __i386__
1641 /*
1642  * It seems that TC9021's DMA engine has alignment restrictions in
1643  * DMA scatter operations. The first DMA segment has no address
1644  * alignment restrictins but the rest should be aligned on 4(?) bytes
1645  * boundary. Otherwise it would corrupt random memory. Since we don't
1646  * know which one is used for the first segment in advance we simply
1647  * don't align at all.
1648  * To avoid copying over an entire frame to align, we allocate a new
1649  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1650  * prepended into the existing mbuf chain.
1651  */
1652 static __inline struct mbuf *
1653 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1654 {
1655 	struct mbuf *n;
1656 
1657 	n = NULL;
1658 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1659 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1660 		m->m_data += ETHER_HDR_LEN;
1661 		n = m;
1662 	} else {
1663 		MGETHDR(n, MB_DONTWAIT, MT_DATA);
1664 		if (n != NULL) {
1665 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1666 			m->m_data += ETHER_HDR_LEN;
1667 			m->m_len -= ETHER_HDR_LEN;
1668 			n->m_len = ETHER_HDR_LEN;
1669 			M_MOVE_PKTHDR(n, m);
1670 			n->m_next = m;
1671 		} else
1672 			m_freem(m);
1673 	}
1674 
1675 	return (n);
1676 }
1677 #endif
1678 
1679 /*
1680  * stge_rxeof:
1681  *
1682  *	Helper; handle receive interrupts.
1683  */
1684 static void
1685 stge_rxeof(struct stge_softc *sc, int count)
1686 {
1687 	struct ifnet *ifp = &sc->arpcom.ac_if;
1688 	struct stge_rxdesc *rxd;
1689 	struct mbuf *mp, *m;
1690 	uint64_t status64;
1691 	uint32_t status;
1692 	int cons, prog;
1693 
1694 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1695 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1696 
1697 	prog = 0;
1698 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1699 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1700 #ifdef DEVICE_POLLING
1701 		if (count >= 0 && count-- == 0)
1702 			break;
1703 #endif
1704 
1705 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1706 		status = RFD_RxStatus(status64);
1707 		if ((status & RFD_RFDDone) == 0)
1708 			break;
1709 
1710 		prog++;
1711 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1712 		mp = rxd->rx_m;
1713 
1714 		/*
1715 		 * If the packet had an error, drop it.  Note we count
1716 		 * the error later in the periodic stats update.
1717 		 */
1718 		if ((status & RFD_FrameEnd) != 0 && (status &
1719 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1720 		    RFD_RxAlignmentError | RFD_RxFCSError |
1721 		    RFD_RxLengthError)) != 0) {
1722 			stge_discard_rxbuf(sc, cons);
1723 			if (sc->sc_cdata.stge_rxhead != NULL) {
1724 				m_freem(sc->sc_cdata.stge_rxhead);
1725 				STGE_RXCHAIN_RESET(sc);
1726 			}
1727 			continue;
1728 		}
1729 		/*
1730 		 * Add a new receive buffer to the ring.
1731 		 */
1732 		if (stge_newbuf(sc, cons, 0) != 0) {
1733 			ifp->if_iqdrops++;
1734 			stge_discard_rxbuf(sc, cons);
1735 			if (sc->sc_cdata.stge_rxhead != NULL) {
1736 				m_freem(sc->sc_cdata.stge_rxhead);
1737 				STGE_RXCHAIN_RESET(sc);
1738 			}
1739 			continue;
1740 		}
1741 
1742 		if ((status & RFD_FrameEnd) != 0)
1743 			mp->m_len = RFD_RxDMAFrameLen(status) -
1744 			    sc->sc_cdata.stge_rxlen;
1745 		sc->sc_cdata.stge_rxlen += mp->m_len;
1746 
1747 		/* Chain mbufs. */
1748 		if (sc->sc_cdata.stge_rxhead == NULL) {
1749 			sc->sc_cdata.stge_rxhead = mp;
1750 			sc->sc_cdata.stge_rxtail = mp;
1751 		} else {
1752 			mp->m_flags &= ~M_PKTHDR;
1753 			sc->sc_cdata.stge_rxtail->m_next = mp;
1754 			sc->sc_cdata.stge_rxtail = mp;
1755 		}
1756 
1757 		if ((status & RFD_FrameEnd) != 0) {
1758 			m = sc->sc_cdata.stge_rxhead;
1759 			m->m_pkthdr.rcvif = ifp;
1760 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1761 
1762 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1763 				m_freem(m);
1764 				STGE_RXCHAIN_RESET(sc);
1765 				continue;
1766 			}
1767 			/*
1768 			 * Set the incoming checksum information for
1769 			 * the packet.
1770 			 */
1771 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1772 				if ((status & RFD_IPDetected) != 0) {
1773 					m->m_pkthdr.csum_flags |=
1774 						CSUM_IP_CHECKED;
1775 					if ((status & RFD_IPError) == 0)
1776 						m->m_pkthdr.csum_flags |=
1777 						    CSUM_IP_VALID;
1778 				}
1779 				if (((status & RFD_TCPDetected) != 0 &&
1780 				    (status & RFD_TCPError) == 0) ||
1781 				    ((status & RFD_UDPDetected) != 0 &&
1782 				    (status & RFD_UDPError) == 0)) {
1783 					m->m_pkthdr.csum_flags |=
1784 					    (CSUM_DATA_VALID |
1785 					     CSUM_PSEUDO_HDR |
1786 					     CSUM_FRAG_NOT_CHECKED);
1787 					m->m_pkthdr.csum_data = 0xffff;
1788 				}
1789 			}
1790 
1791 #ifndef __i386__
1792 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1793 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1794 					STGE_RXCHAIN_RESET(sc);
1795 					continue;
1796 				}
1797 			}
1798 #endif
1799 
1800 			/* Check for VLAN tagged packets. */
1801 			if ((status & RFD_VLANDetected) != 0 &&
1802 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1803 				m->m_flags |= M_VLANTAG;
1804 				m->m_pkthdr.ether_vlantag = RFD_TCI(status64);
1805 			}
1806 			/* Pass it on. */
1807 			ifp->if_input(ifp, m);
1808 
1809 			STGE_RXCHAIN_RESET(sc);
1810 		}
1811 	}
1812 
1813 	if (prog > 0) {
1814 		/* Update the consumer index. */
1815 		sc->sc_cdata.stge_rx_cons = cons;
1816 		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1817 		    sc->sc_cdata.stge_rx_ring_map,
1818 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1819 	}
1820 }
1821 
1822 #ifdef DEVICE_POLLING
1823 static void
1824 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1825 {
1826 	struct stge_softc *sc;
1827 	uint16_t status;
1828 
1829 	sc = ifp->if_softc;
1830 
1831 	switch (cmd) {
1832 	case POLL_REGISTER:
1833 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1834 		break;
1835 	case POLL_DEREGISTER:
1836 		CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1837 		break;
1838 	case POLL_ONLY:
1839 	case POLL_AND_CHECK_STATUS:
1840 		sc->sc_cdata.stge_rxcycles = count;
1841 		stge_rxeof(sc, count);
1842 		stge_txeof(sc);
1843 
1844 		if (cmd == POLL_AND_CHECK_STATUS) {
1845 			status = CSR_READ_2(sc, STGE_IntStatus);
1846 			status &= sc->sc_IntEnable;
1847 			if (status != 0) {
1848 				if (status & IS_HostError) {
1849 					device_printf(sc->sc_dev,
1850 					"Host interface error, "
1851 					"resetting...\n");
1852 					stge_init(sc);
1853 				}
1854 				if ((status & IS_TxComplete) != 0 &&
1855 				    stge_tx_error(sc) != 0)
1856 					stge_init(sc);
1857 			}
1858 
1859 		}
1860 
1861 		if (!ifq_is_empty(&ifp->if_snd))
1862 			if_devstart(ifp);
1863 	}
1864 }
1865 #endif	/* DEVICE_POLLING */
1866 
1867 /*
1868  * stge_tick:
1869  *
1870  *	One second timer, used to tick the MII.
1871  */
1872 static void
1873 stge_tick(void *arg)
1874 {
1875 	struct stge_softc *sc = arg;
1876 	struct ifnet *ifp = &sc->arpcom.ac_if;
1877 	struct mii_data *mii;
1878 
1879 	lwkt_serialize_enter(ifp->if_serializer);
1880 
1881 	mii = device_get_softc(sc->sc_miibus);
1882 	mii_tick(mii);
1883 
1884 	/* Update statistics counters. */
1885 	stge_stats_update(sc);
1886 
1887 	/*
1888 	 * Relcaim any pending Tx descriptors to release mbufs in a
1889 	 * timely manner as we don't generate Tx completion interrupts
1890 	 * for every frame. This limits the delay to a maximum of one
1891 	 * second.
1892 	 */
1893 	if (sc->sc_cdata.stge_tx_cnt != 0)
1894 		stge_txeof(sc);
1895 
1896 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1897 
1898 	lwkt_serialize_exit(ifp->if_serializer);
1899 }
1900 
1901 /*
1902  * stge_stats_update:
1903  *
1904  *	Read the TC9021 statistics counters.
1905  */
1906 static void
1907 stge_stats_update(struct stge_softc *sc)
1908 {
1909 	struct ifnet *ifp = &sc->arpcom.ac_if;
1910 
1911 	CSR_READ_4(sc,STGE_OctetRcvOk);
1912 
1913 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1914 
1915 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1916 
1917 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1918 
1919 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1920 
1921 	ifp->if_collisions +=
1922 	    CSR_READ_4(sc, STGE_LateCollisions) +
1923 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1924 	    CSR_READ_4(sc, STGE_SingleColFrames);
1925 
1926 	ifp->if_oerrors +=
1927 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1928 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1929 }
1930 
1931 /*
1932  * stge_reset:
1933  *
1934  *	Perform a soft reset on the TC9021.
1935  */
1936 static void
1937 stge_reset(struct stge_softc *sc, uint32_t how)
1938 {
1939 	uint32_t ac;
1940 	uint8_t v;
1941 	int i, dv;
1942 
1943 	dv = 5000;
1944 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1945 	switch (how) {
1946 	case STGE_RESET_TX:
1947 		ac |= AC_TxReset | AC_FIFO;
1948 		dv = 100;
1949 		break;
1950 	case STGE_RESET_RX:
1951 		ac |= AC_RxReset | AC_FIFO;
1952 		dv = 100;
1953 		break;
1954 	case STGE_RESET_FULL:
1955 	default:
1956 		/*
1957 		 * Only assert RstOut if we're fiber.  We need GMII clocks
1958 		 * to be present in order for the reset to complete on fiber
1959 		 * cards.
1960 		 */
1961 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1962 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1963 		    (sc->sc_usefiber ? AC_RstOut : 0);
1964 		break;
1965 	}
1966 
1967 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1968 
1969 	/* Account for reset problem at 10Mbps. */
1970 	DELAY(dv);
1971 
1972 	for (i = 0; i < STGE_TIMEOUT; i++) {
1973 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1974 			break;
1975 		DELAY(dv);
1976 	}
1977 
1978 	if (i == STGE_TIMEOUT)
1979 		device_printf(sc->sc_dev, "reset failed to complete\n");
1980 
1981 	/* Set LED, from Linux IPG driver. */
1982 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1983 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1984 	if ((sc->sc_led & 0x01) != 0)
1985 		ac |= AC_LEDMode;
1986 	if ((sc->sc_led & 0x03) != 0)
1987 		ac |= AC_LEDModeBit1;
1988 	if ((sc->sc_led & 0x08) != 0)
1989 		ac |= AC_LEDSpeed;
1990 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1991 
1992 	/* Set PHY, from Linux IPG driver */
1993 	v = CSR_READ_1(sc, STGE_PhySet);
1994 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1995 	v |= ((sc->sc_led & 0x70) >> 4);
1996 	CSR_WRITE_1(sc, STGE_PhySet, v);
1997 }
1998 
1999 /*
2000  * stge_init:		[ ifnet interface function ]
2001  *
2002  *	Initialize the interface.
2003  */
2004 static void
2005 stge_init(void *xsc)
2006 {
2007 	struct stge_softc *sc = xsc;
2008 	struct ifnet *ifp = &sc->arpcom.ac_if;
2009 	struct mii_data *mii;
2010 	uint16_t eaddr[3];
2011 	uint32_t v;
2012 	int error;
2013 
2014 	ASSERT_SERIALIZED(ifp->if_serializer);
2015 
2016 	mii = device_get_softc(sc->sc_miibus);
2017 
2018 	/*
2019 	 * Cancel any pending I/O.
2020 	 */
2021 	stge_stop(sc);
2022 
2023 	/* Init descriptors. */
2024 	error = stge_init_rx_ring(sc);
2025 	if (error != 0) {
2026 		device_printf(sc->sc_dev,
2027 		    "initialization failed: no memory for rx buffers\n");
2028 		stge_stop(sc);
2029 		goto out;
2030 	}
2031 	stge_init_tx_ring(sc);
2032 
2033 	/* Set the station address. */
2034 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2035 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2036 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2037 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2038 
2039 	/*
2040 	 * Set the statistics masks.  Disable all the RMON stats,
2041 	 * and disable selected stats in the non-RMON stats registers.
2042 	 */
2043 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2044 	CSR_WRITE_4(sc, STGE_StatisticsMask,
2045 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2046 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2047 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2048 	    (1U << 21));
2049 
2050 	/* Set up the receive filter. */
2051 	stge_set_filter(sc);
2052 	/* Program multicast filter. */
2053 	stge_set_multi(sc);
2054 
2055 	/*
2056 	 * Give the transmit and receive ring to the chip.
2057 	 */
2058 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2059 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2060 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2061 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2062 
2063 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2064 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2065 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2066 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2067 
2068 	/*
2069 	 * Initialize the Tx auto-poll period.  It's OK to make this number
2070 	 * large (255 is the max, but we use 127) -- we explicitly kick the
2071 	 * transmit engine when there's actually a packet.
2072 	 */
2073 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2074 
2075 	/* ..and the Rx auto-poll period. */
2076 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2077 
2078 	/* Initialize the Tx start threshold. */
2079 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2080 
2081 	/* Rx DMA thresholds, from Linux */
2082 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2083 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2084 
2085 	/* Rx early threhold, from Linux */
2086 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2087 
2088 	/* Tx DMA thresholds, from Linux */
2089 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2090 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2091 
2092 	/*
2093 	 * Initialize the Rx DMA interrupt control register.  We
2094 	 * request an interrupt after every incoming packet, but
2095 	 * defer it for sc_rxint_dmawait us. When the number of
2096 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2097 	 * deferring the interrupt, and signal it immediately.
2098 	 */
2099 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2100 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2101 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2102 
2103 	/*
2104 	 * Initialize the interrupt mask.
2105 	 */
2106 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2107 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2108 #ifdef DEVICE_POLLING
2109 	/* Disable interrupts if we are polling. */
2110 	if (ifp->if_flags & IFF_POLLING)
2111 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2112 	else
2113 #endif
2114 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2115 
2116 	/*
2117 	 * Configure the DMA engine.
2118 	 * XXX Should auto-tune TxBurstLimit.
2119 	 */
2120 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2121 
2122 	/*
2123 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2124 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2125 	 * in the Rx FIFO.
2126 	 */
2127 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2128 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2129 
2130 	/*
2131 	 * Set the maximum frame size.
2132 	 */
2133 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2134 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2135 
2136 	/*
2137 	 * Initialize MacCtrl -- do it before setting the media,
2138 	 * as setting the media will actually program the register.
2139 	 *
2140 	 * Note: We have to poke the IFS value before poking
2141 	 * anything else.
2142 	 */
2143 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2144 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2145 
2146 	stge_vlan_setup(sc);
2147 
2148 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2149 		/* Multi-frag frame bug work-around. */
2150 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2151 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2152 
2153 		/* Tx Poll Now bug work-around. */
2154 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2155 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2156 		/* Tx Poll Now bug work-around. */
2157 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2158 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2159 	}
2160 
2161 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2162 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2163 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2164 	/*
2165 	 * It seems that transmitting frames without checking the state of
2166 	 * Rx/Tx MAC wedge the hardware.
2167 	 */
2168 	stge_start_tx(sc);
2169 	stge_start_rx(sc);
2170 
2171 	/*
2172 	 * Set the current media.
2173 	 */
2174 	mii_mediachg(mii);
2175 
2176 	/*
2177 	 * Start the one second MII clock.
2178 	 */
2179 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2180 
2181 	/*
2182 	 * ...all done!
2183 	 */
2184 	ifp->if_flags |= IFF_RUNNING;
2185 	ifp->if_flags &= ~IFF_OACTIVE;
2186 
2187  out:
2188 	if (error != 0)
2189 		device_printf(sc->sc_dev, "interface not running\n");
2190 }
2191 
2192 static void
2193 stge_vlan_setup(struct stge_softc *sc)
2194 {
2195 	struct ifnet *ifp = &sc->arpcom.ac_if;
2196 	uint32_t v;
2197 
2198 	/*
2199 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2200 	 * MC_AutoVLANuntagging bit.
2201 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2202 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2203 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2204 	 * use TFC instead of STGE_VLANTag register.
2205 	 */
2206 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2207 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2208 		v |= MC_AutoVLANuntagging;
2209 	else
2210 		v &= ~MC_AutoVLANuntagging;
2211 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2212 }
2213 
2214 /*
2215  *	Stop transmission on the interface.
2216  */
2217 static void
2218 stge_stop(struct stge_softc *sc)
2219 {
2220 	struct ifnet *ifp = &sc->arpcom.ac_if;
2221 	struct stge_txdesc *txd;
2222 	struct stge_rxdesc *rxd;
2223 	uint32_t v;
2224 	int i;
2225 
2226 	ASSERT_SERIALIZED(ifp->if_serializer);
2227 
2228 	/*
2229 	 * Stop the one second clock.
2230 	 */
2231 	callout_stop(&sc->sc_tick_ch);
2232 
2233 	/*
2234 	 * Reset the chip to a known state.
2235 	 */
2236 	stge_reset(sc, STGE_RESET_FULL);
2237 
2238 	/*
2239 	 * Disable interrupts.
2240 	 */
2241 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2242 
2243 	/*
2244 	 * Stop receiver, transmitter, and stats update.
2245 	 */
2246 	stge_stop_rx(sc);
2247 	stge_stop_tx(sc);
2248 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2249 	v |= MC_StatisticsDisable;
2250 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2251 
2252 	/*
2253 	 * Stop the transmit and receive DMA.
2254 	 */
2255 	stge_dma_wait(sc);
2256 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2257 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2258 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2259 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2260 
2261 	/*
2262 	 * Free RX and TX mbufs still in the queues.
2263 	 */
2264 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2265 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2266 		if (rxd->rx_m != NULL) {
2267 			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2268 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2269 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2270 			    rxd->rx_dmamap);
2271 			m_freem(rxd->rx_m);
2272 			rxd->rx_m = NULL;
2273 		}
2274         }
2275 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2276 		txd = &sc->sc_cdata.stge_txdesc[i];
2277 		if (txd->tx_m != NULL) {
2278 			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2279 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2280 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2281 			    txd->tx_dmamap);
2282 			m_freem(txd->tx_m);
2283 			txd->tx_m = NULL;
2284 		}
2285         }
2286 
2287 	/*
2288 	 * Mark the interface down and cancel the watchdog timer.
2289 	 */
2290 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2291 	ifp->if_timer = 0;
2292 }
2293 
2294 static void
2295 stge_start_tx(struct stge_softc *sc)
2296 {
2297 	uint32_t v;
2298 	int i;
2299 
2300 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2301 	if ((v & MC_TxEnabled) != 0)
2302 		return;
2303 	v |= MC_TxEnable;
2304 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2305 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2306 	for (i = STGE_TIMEOUT; i > 0; i--) {
2307 		DELAY(10);
2308 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2309 		if ((v & MC_TxEnabled) != 0)
2310 			break;
2311 	}
2312 	if (i == 0)
2313 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2314 }
2315 
2316 static void
2317 stge_start_rx(struct stge_softc *sc)
2318 {
2319 	uint32_t v;
2320 	int i;
2321 
2322 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2323 	if ((v & MC_RxEnabled) != 0)
2324 		return;
2325 	v |= MC_RxEnable;
2326 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2327 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2328 	for (i = STGE_TIMEOUT; i > 0; i--) {
2329 		DELAY(10);
2330 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2331 		if ((v & MC_RxEnabled) != 0)
2332 			break;
2333 	}
2334 	if (i == 0)
2335 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2336 }
2337 
2338 static void
2339 stge_stop_tx(struct stge_softc *sc)
2340 {
2341 	uint32_t v;
2342 	int i;
2343 
2344 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2345 	if ((v & MC_TxEnabled) == 0)
2346 		return;
2347 	v |= MC_TxDisable;
2348 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2349 	for (i = STGE_TIMEOUT; i > 0; i--) {
2350 		DELAY(10);
2351 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2352 		if ((v & MC_TxEnabled) == 0)
2353 			break;
2354 	}
2355 	if (i == 0)
2356 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2357 }
2358 
2359 static void
2360 stge_stop_rx(struct stge_softc *sc)
2361 {
2362 	uint32_t v;
2363 	int i;
2364 
2365 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2366 	if ((v & MC_RxEnabled) == 0)
2367 		return;
2368 	v |= MC_RxDisable;
2369 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2370 	for (i = STGE_TIMEOUT; i > 0; i--) {
2371 		DELAY(10);
2372 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2373 		if ((v & MC_RxEnabled) == 0)
2374 			break;
2375 	}
2376 	if (i == 0)
2377 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2378 }
2379 
2380 static void
2381 stge_init_tx_ring(struct stge_softc *sc)
2382 {
2383 	struct stge_ring_data *rd;
2384 	struct stge_txdesc *txd;
2385 	bus_addr_t addr;
2386 	int i;
2387 
2388 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2389 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2390 
2391 	sc->sc_cdata.stge_tx_prod = 0;
2392 	sc->sc_cdata.stge_tx_cons = 0;
2393 	sc->sc_cdata.stge_tx_cnt = 0;
2394 
2395 	rd = &sc->sc_rdata;
2396 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2397 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2398 		if (i == (STGE_TX_RING_CNT - 1))
2399 			addr = STGE_TX_RING_ADDR(sc, 0);
2400 		else
2401 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2402 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2403 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2404 		txd = &sc->sc_cdata.stge_txdesc[i];
2405 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2406 	}
2407 
2408 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2409 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
2410 }
2411 
2412 static int
2413 stge_init_rx_ring(struct stge_softc *sc)
2414 {
2415 	struct stge_ring_data *rd;
2416 	bus_addr_t addr;
2417 	int i;
2418 
2419 	sc->sc_cdata.stge_rx_cons = 0;
2420 	STGE_RXCHAIN_RESET(sc);
2421 
2422 	rd = &sc->sc_rdata;
2423 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2424 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2425 		if (stge_newbuf(sc, i, 1) != 0)
2426 			return (ENOBUFS);
2427 		if (i == (STGE_RX_RING_CNT - 1))
2428 			addr = STGE_RX_RING_ADDR(sc, 0);
2429 		else
2430 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2431 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2432 		rd->stge_rx_ring[i].rfd_status = 0;
2433 	}
2434 
2435 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2436 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_PREWRITE);
2437 
2438 	return (0);
2439 }
2440 
2441 /*
2442  * stge_newbuf:
2443  *
2444  *	Add a receive buffer to the indicated descriptor.
2445  */
2446 static int
2447 stge_newbuf(struct stge_softc *sc, int idx, int waitok)
2448 {
2449 	struct stge_rxdesc *rxd;
2450 	struct stge_rfd *rfd;
2451 	struct mbuf *m;
2452 	struct stge_mbuf_dmamap_arg arg;
2453 	bus_dma_segment_t segs[1];
2454 	bus_dmamap_t map;
2455 
2456 	m = m_getcl(waitok ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2457 	if (m == NULL)
2458 		return (ENOBUFS);
2459 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2460 	/*
2461 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2462 	 * frame is used.
2463 	 */
2464 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2465 		m_adj(m, ETHER_ALIGN);
2466 
2467 	arg.segs = segs;
2468 	arg.nsegs = 1;
2469 	if (bus_dmamap_load_mbuf(sc->sc_cdata.stge_rx_tag,
2470 	    sc->sc_cdata.stge_rx_sparemap, m, stge_mbuf_dmamap_cb, &arg,
2471 	    waitok ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
2472 		m_freem(m);
2473 		return (ENOBUFS);
2474 	}
2475 
2476 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2477 	if (rxd->rx_m != NULL) {
2478 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2479 		    BUS_DMASYNC_POSTREAD);
2480 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2481 	}
2482 	map = rxd->rx_dmamap;
2483 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2484 	sc->sc_cdata.stge_rx_sparemap = map;
2485 	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2486 	    BUS_DMASYNC_PREREAD);
2487 	rxd->rx_m = m;
2488 
2489 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2490 	rfd->rfd_frag.frag_word0 =
2491 	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2492 	rfd->rfd_status = 0;
2493 
2494 	return (0);
2495 }
2496 
2497 /*
2498  * stge_set_filter:
2499  *
2500  *	Set up the receive filter.
2501  */
2502 static void
2503 stge_set_filter(struct stge_softc *sc)
2504 {
2505 	struct ifnet *ifp = &sc->arpcom.ac_if;
2506 	uint16_t mode;
2507 
2508 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2509 	mode |= RM_ReceiveUnicast;
2510 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2511 		mode |= RM_ReceiveBroadcast;
2512 	else
2513 		mode &= ~RM_ReceiveBroadcast;
2514 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2515 		mode |= RM_ReceiveAllFrames;
2516 	else
2517 		mode &= ~RM_ReceiveAllFrames;
2518 
2519 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2520 }
2521 
2522 static void
2523 stge_set_multi(struct stge_softc *sc)
2524 {
2525 	struct ifnet *ifp = &sc->arpcom.ac_if;
2526 	struct ifmultiaddr *ifma;
2527 	uint32_t crc;
2528 	uint32_t mchash[2];
2529 	uint16_t mode;
2530 	int count;
2531 
2532 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2533 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2534 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2535 			mode |= RM_ReceiveAllFrames;
2536 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2537 			mode |= RM_ReceiveMulticast;
2538 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2539 		return;
2540 	}
2541 
2542 	/* clear existing filters. */
2543 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2544 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2545 
2546 	/*
2547 	 * Set up the multicast address filter by passing all multicast
2548 	 * addresses through a CRC generator, and then using the low-order
2549 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2550 	 * high order bits select the register, while the rest of the bits
2551 	 * select the bit within the register.
2552 	 */
2553 
2554 	bzero(mchash, sizeof(mchash));
2555 
2556 	count = 0;
2557 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2558 		if (ifma->ifma_addr->sa_family != AF_LINK)
2559 			continue;
2560 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2561 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2562 
2563 		/* Just want the 6 least significant bits. */
2564 		crc &= 0x3f;
2565 
2566 		/* Set the corresponding bit in the hash table. */
2567 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2568 		count++;
2569 	}
2570 
2571 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2572 	if (count > 0)
2573 		mode |= RM_ReceiveMulticastHash;
2574 	else
2575 		mode &= ~RM_ReceiveMulticastHash;
2576 
2577 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2578 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2579 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2580 }
2581 
2582 static int
2583 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2584 {
2585 	return (sysctl_int_range(oidp, arg1, arg2, req,
2586 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2587 }
2588 
2589 static int
2590 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2591 {
2592 	return (sysctl_int_range(oidp, arg1, arg2, req,
2593 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2594 }
2595