xref: /dragonfly/sys/dev/netif/stge/if_stge.c (revision 7bc7e232)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 /*	$FreeBSD: src/sys/dev/stge/if_stge.c,v 1.2 2006/08/12 01:21:36 yongari Exp $	*/
3 /*	$DragonFly: src/sys/dev/netif/stge/if_stge.c,v 1.2 2007/08/14 13:30:35 sephe Exp $	*/
4 
5 /*-
6  * Copyright (c) 2001 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Jason R. Thorpe.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 /*
42  * Device driver for the Sundance Tech. TC9021 10/100/1000
43  * Ethernet controller.
44  */
45 
46 #include "opt_polling.h"
47 
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/endian.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/module.h>
55 #include <sys/rman.h>
56 #include <sys/serialize.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/sysctl.h>
60 
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/ifq_var.h>
69 #include <net/vlan/if_vlan_var.h>
70 
71 #include <dev/netif/mii_layer/mii.h>
72 #include <dev/netif/mii_layer/miivar.h>
73 
74 #include <bus/pci/pcireg.h>
75 #include <bus/pci/pcivar.h>
76 
77 #include "if_stgereg.h"
78 #include "if_stgevar.h"
79 
80 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
81 
82 /* "device miibus" required.  See GENERIC if you get errors here. */
83 #include "miibus_if.h"
84 
85 /*
86  * Devices supported by this driver.
87  */
88 static struct stge_product {
89 	uint16_t	stge_vendorid;
90 	uint16_t	stge_deviceid;
91 	const char	*stge_name;
92 } stge_products[] = {
93 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
94 	  "Sundance ST-1023 Gigabit Ethernet" },
95 
96 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
97 	  "Sundance ST-2021 Gigabit Ethernet" },
98 
99 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
100 	  "Tamarack TC9021 Gigabit Ethernet" },
101 
102 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
103 	  "Tamarack TC9021 Gigabit Ethernet" },
104 
105 	/*
106 	 * The Sundance sample boards use the Sundance vendor ID,
107 	 * but the Tamarack product ID.
108 	 */
109 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
110 	  "Sundance TC9021 Gigabit Ethernet" },
111 
112 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
113 	  "Sundance TC9021 Gigabit Ethernet" },
114 
115 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL2000,
116 	  "D-Link DL-2000 Gigabit Ethernet" },
117 
118 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
119 	  "Antares Gigabit Ethernet" },
120 
121 	{ 0, 0, NULL }
122 };
123 
124 static int	stge_probe(device_t);
125 static int	stge_attach(device_t);
126 static int	stge_detach(device_t);
127 static void	stge_shutdown(device_t);
128 static int	stge_suspend(device_t);
129 static int	stge_resume(device_t);
130 
131 static int	stge_encap(struct stge_softc *, struct mbuf **);
132 static void	stge_start(struct ifnet *);
133 static void	stge_watchdog(struct ifnet *);
134 static int	stge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
135 static void	stge_init(void *);
136 static void	stge_vlan_setup(struct stge_softc *);
137 static void	stge_stop(struct stge_softc *);
138 static void	stge_start_tx(struct stge_softc *);
139 static void	stge_start_rx(struct stge_softc *);
140 static void	stge_stop_tx(struct stge_softc *);
141 static void	stge_stop_rx(struct stge_softc *);
142 
143 static void	stge_reset(struct stge_softc *, uint32_t);
144 static int	stge_eeprom_wait(struct stge_softc *);
145 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
146 static void	stge_tick(void *);
147 static void	stge_stats_update(struct stge_softc *);
148 static void	stge_set_filter(struct stge_softc *);
149 static void	stge_set_multi(struct stge_softc *);
150 
151 static void	stge_link(struct stge_softc *);
152 static void	stge_intr(void *);
153 static __inline int stge_tx_error(struct stge_softc *);
154 static void	stge_txeof(struct stge_softc *);
155 static void	stge_rxeof(struct stge_softc *, int);
156 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
157 static int	stge_newbuf(struct stge_softc *, int, int);
158 #ifndef __i386__
159 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
160 #endif
161 
162 static void	stge_mii_sync(struct stge_softc *);
163 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
164 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
165 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
166 static int	stge_miibus_readreg(device_t, int, int);
167 static int	stge_miibus_writereg(device_t, int, int, int);
168 static void	stge_miibus_statchg(device_t);
169 static int	stge_mediachange(struct ifnet *);
170 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
171 
172 static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
173 static void	stge_mbuf_dmamap_cb(void *, bus_dma_segment_t *, int,
174 				    bus_size_t, int);
175 static int	stge_dma_alloc(struct stge_softc *);
176 static void	stge_dma_free(struct stge_softc *);
177 static void	stge_dma_wait(struct stge_softc *);
178 static void	stge_init_tx_ring(struct stge_softc *);
179 static int	stge_init_rx_ring(struct stge_softc *);
180 #ifdef DEVICE_POLLING
181 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
182 #endif
183 
184 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
185 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
186 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
187 
188 static device_method_t stge_methods[] = {
189 	/* Device interface */
190 	DEVMETHOD(device_probe,		stge_probe),
191 	DEVMETHOD(device_attach,	stge_attach),
192 	DEVMETHOD(device_detach,	stge_detach),
193 	DEVMETHOD(device_shutdown,	stge_shutdown),
194 	DEVMETHOD(device_suspend,	stge_suspend),
195 	DEVMETHOD(device_resume,	stge_resume),
196 
197 	/* MII interface */
198 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
199 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
200 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
201 
202 	{ 0, 0 }
203 
204 };
205 
206 static driver_t stge_driver = {
207 	"stge",
208 	stge_methods,
209 	sizeof(struct stge_softc)
210 };
211 
212 static devclass_t stge_devclass;
213 
214 DECLARE_DUMMY_MODULE(if_stge);
215 MODULE_DEPEND(if_stge, miibus, 1, 1, 1);
216 DRIVER_MODULE(if_stge, pci, stge_driver, stge_devclass, 0, 0);
217 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
218 
219 #define	MII_SET(x)	\
220 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
221 #define	MII_CLR(x)	\
222 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
223 
224 /*
225  * Sync the PHYs by setting data bit and strobing the clock 32 times.
226  */
227 static void
228 stge_mii_sync(struct stge_softc	*sc)
229 {
230 	int i;
231 
232 	MII_SET(PC_MgmtDir | PC_MgmtData);
233 
234 	for (i = 0; i < 32; i++) {
235 		MII_SET(PC_MgmtClk);
236 		DELAY(1);
237 		MII_CLR(PC_MgmtClk);
238 		DELAY(1);
239 	}
240 }
241 
242 /*
243  * Clock a series of bits through the MII.
244  */
245 static void
246 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
247 {
248 	int i;
249 
250 	MII_CLR(PC_MgmtClk);
251 
252 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
253 		if (bits & i)
254 			MII_SET(PC_MgmtData);
255                 else
256 			MII_CLR(PC_MgmtData);
257 		DELAY(1);
258 		MII_CLR(PC_MgmtClk);
259 		DELAY(1);
260 		MII_SET(PC_MgmtClk);
261 	}
262 }
263 
264 /*
265  * Read an PHY register through the MII.
266  */
267 static int
268 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
269 {
270 	int i, ack;
271 
272 	/*
273 	 * Set up frame for RX.
274 	 */
275 	frame->mii_stdelim = STGE_MII_STARTDELIM;
276 	frame->mii_opcode = STGE_MII_READOP;
277 	frame->mii_turnaround = 0;
278 	frame->mii_data = 0;
279 
280 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
281 	/*
282  	 * Turn on data xmit.
283 	 */
284 	MII_SET(PC_MgmtDir);
285 
286 	stge_mii_sync(sc);
287 
288 	/*
289 	 * Send command/address info.
290 	 */
291 	stge_mii_send(sc, frame->mii_stdelim, 2);
292 	stge_mii_send(sc, frame->mii_opcode, 2);
293 	stge_mii_send(sc, frame->mii_phyaddr, 5);
294 	stge_mii_send(sc, frame->mii_regaddr, 5);
295 
296 	/* Turn off xmit. */
297 	MII_CLR(PC_MgmtDir);
298 
299 	/* Idle bit */
300 	MII_CLR((PC_MgmtClk | PC_MgmtData));
301 	DELAY(1);
302 	MII_SET(PC_MgmtClk);
303 	DELAY(1);
304 
305 	/* Check for ack */
306 	MII_CLR(PC_MgmtClk);
307 	DELAY(1);
308 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
309 	MII_SET(PC_MgmtClk);
310 	DELAY(1);
311 
312 	/*
313 	 * Now try reading data bits. If the ack failed, we still
314 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
315 	 */
316 	if (ack) {
317 		for(i = 0; i < 16; i++) {
318 			MII_CLR(PC_MgmtClk);
319 			DELAY(1);
320 			MII_SET(PC_MgmtClk);
321 			DELAY(1);
322 		}
323 		goto fail;
324 	}
325 
326 	for (i = 0x8000; i; i >>= 1) {
327 		MII_CLR(PC_MgmtClk);
328 		DELAY(1);
329 		if (!ack) {
330 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
331 				frame->mii_data |= i;
332 			DELAY(1);
333 		}
334 		MII_SET(PC_MgmtClk);
335 		DELAY(1);
336 	}
337 
338 fail:
339 	MII_CLR(PC_MgmtClk);
340 	DELAY(1);
341 	MII_SET(PC_MgmtClk);
342 	DELAY(1);
343 
344 	if (ack)
345 		return(1);
346 	return(0);
347 }
348 
349 /*
350  * Write to a PHY register through the MII.
351  */
352 static int
353 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
354 {
355 
356 	/*
357 	 * Set up frame for TX.
358 	 */
359 	frame->mii_stdelim = STGE_MII_STARTDELIM;
360 	frame->mii_opcode = STGE_MII_WRITEOP;
361 	frame->mii_turnaround = STGE_MII_TURNAROUND;
362 
363 	/*
364  	 * Turn on data output.
365 	 */
366 	MII_SET(PC_MgmtDir);
367 
368 	stge_mii_sync(sc);
369 
370 	stge_mii_send(sc, frame->mii_stdelim, 2);
371 	stge_mii_send(sc, frame->mii_opcode, 2);
372 	stge_mii_send(sc, frame->mii_phyaddr, 5);
373 	stge_mii_send(sc, frame->mii_regaddr, 5);
374 	stge_mii_send(sc, frame->mii_turnaround, 2);
375 	stge_mii_send(sc, frame->mii_data, 16);
376 
377 	/* Idle bit. */
378 	MII_SET(PC_MgmtClk);
379 	DELAY(1);
380 	MII_CLR(PC_MgmtClk);
381 	DELAY(1);
382 
383 	/*
384 	 * Turn off xmit.
385 	 */
386 	MII_CLR(PC_MgmtDir);
387 
388 	return(0);
389 }
390 
391 /*
392  * sc_miibus_readreg:	[mii interface function]
393  *
394  *	Read a PHY register on the MII of the TC9021.
395  */
396 static int
397 stge_miibus_readreg(device_t dev, int phy, int reg)
398 {
399 	struct stge_softc *sc;
400 	struct stge_mii_frame frame;
401 	int error;
402 
403 	sc = device_get_softc(dev);
404 
405 	if (reg == STGE_PhyCtrl) {
406 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
407 		error = CSR_READ_1(sc, STGE_PhyCtrl);
408 		return (error);
409 	}
410 	bzero(&frame, sizeof(frame));
411 	frame.mii_phyaddr = phy;
412 	frame.mii_regaddr = reg;
413 
414 	error = stge_mii_readreg(sc, &frame);
415 
416 	if (error != 0) {
417 		/* Don't show errors for PHY probe request */
418 		if (reg != 1)
419 			device_printf(sc->sc_dev, "phy read fail\n");
420 		return (0);
421 	}
422 	return (frame.mii_data);
423 }
424 
425 /*
426  * stge_miibus_writereg:	[mii interface function]
427  *
428  *	Write a PHY register on the MII of the TC9021.
429  */
430 static int
431 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
432 {
433 	struct stge_softc *sc;
434 	struct stge_mii_frame frame;
435 	int error;
436 
437 	sc = device_get_softc(dev);
438 
439 	bzero(&frame, sizeof(frame));
440 	frame.mii_phyaddr = phy;
441 	frame.mii_regaddr = reg;
442 	frame.mii_data = val;
443 
444 	error = stge_mii_writereg(sc, &frame);
445 
446 	if (error != 0)
447 		device_printf(sc->sc_dev, "phy write fail\n");
448 	return (0);
449 }
450 
451 /*
452  * stge_miibus_statchg:	[mii interface function]
453  *
454  *	Callback from MII layer when media changes.
455  */
456 static void
457 stge_miibus_statchg(device_t dev)
458 {
459 	struct stge_softc *sc;
460 	struct mii_data *mii;
461 
462 	sc = device_get_softc(dev);
463 	mii = device_get_softc(sc->sc_miibus);
464 
465 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)
466 		return;
467 
468 	sc->sc_MACCtrl = 0;
469 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
470 		sc->sc_MACCtrl |= MC_DuplexSelect;
471 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
472 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
473 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
474 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
475 
476 	stge_link(sc);
477 }
478 
479 /*
480  * stge_mediastatus:	[ifmedia interface function]
481  *
482  *	Get the current interface media status.
483  */
484 static void
485 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
486 {
487 	struct stge_softc *sc;
488 	struct mii_data *mii;
489 
490 	sc = ifp->if_softc;
491 	mii = device_get_softc(sc->sc_miibus);
492 
493 	mii_pollstat(mii);
494 	ifmr->ifm_status = mii->mii_media_status;
495 	ifmr->ifm_active = mii->mii_media_active;
496 }
497 
498 /*
499  * stge_mediachange:	[ifmedia interface function]
500  *
501  *	Set hardware to newly-selected media.
502  */
503 static int
504 stge_mediachange(struct ifnet *ifp)
505 {
506 	struct stge_softc *sc;
507 	struct mii_data *mii;
508 
509 	sc = ifp->if_softc;
510 	mii = device_get_softc(sc->sc_miibus);
511 	mii_mediachg(mii);
512 
513 	return (0);
514 }
515 
516 static int
517 stge_eeprom_wait(struct stge_softc *sc)
518 {
519 	int i;
520 
521 	for (i = 0; i < STGE_TIMEOUT; i++) {
522 		DELAY(1000);
523 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
524 			return (0);
525 	}
526 	return (1);
527 }
528 
529 /*
530  * stge_read_eeprom:
531  *
532  *	Read data from the serial EEPROM.
533  */
534 static void
535 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
536 {
537 
538 	if (stge_eeprom_wait(sc))
539 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
540 
541 	CSR_WRITE_2(sc, STGE_EepromCtrl,
542 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
543 	if (stge_eeprom_wait(sc))
544 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
545 	*data = CSR_READ_2(sc, STGE_EepromData);
546 }
547 
548 
549 static int
550 stge_probe(device_t dev)
551 {
552 	struct stge_product *sp;
553 	uint16_t vendor, devid;
554 
555 	vendor = pci_get_vendor(dev);
556 	devid = pci_get_device(dev);
557 
558 	for (sp = stge_products; sp->stge_name != NULL; sp++) {
559 		if (vendor == sp->stge_vendorid &&
560 		    devid == sp->stge_deviceid) {
561 			device_set_desc(dev, sp->stge_name);
562 			return (0);
563 		}
564 	}
565 
566 	return (ENXIO);
567 }
568 
569 static int
570 stge_attach(device_t dev)
571 {
572 	struct stge_softc *sc;
573 	struct ifnet *ifp;
574 	uint8_t enaddr[ETHER_ADDR_LEN];
575 	int error, i;
576 	uint16_t cmd;
577 	uint32_t val;
578 
579 	error = 0;
580 	sc = device_get_softc(dev);
581 	sc->sc_dev = dev;
582 	ifp = &sc->arpcom.ac_if;
583 
584 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
585 
586 	callout_init(&sc->sc_tick_ch);
587 
588 #ifndef BURN_BRIDGES
589 	/*
590 	 * Handle power management nonsense.
591 	 */
592 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
593 		uint32_t iobase, membase, irq;
594 
595 		/* Save important PCI config data. */
596 		iobase = pci_read_config(dev, STGE_PCIR_LOIO, 4);
597 		membase = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
598 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
599 
600 		/* Reset the power state. */
601 		device_printf(dev, "chip is in D%d power mode "
602 			      "-- setting to D0\n", pci_get_powerstate(dev));
603 
604 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
605 
606 		/* Restore PCI config data. */
607 		pci_write_config(dev, STGE_PCIR_LOIO, iobase, 4);
608 		pci_write_config(dev, STGE_PCIR_LOMEM, membase, 4);
609 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
610 	}
611 #endif
612 
613 	/*
614 	 * Map the device.
615 	 */
616 	pci_enable_busmaster(dev);
617 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
618 	val = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
619 
620 	if ((val & 0x01) != 0) {
621 		sc->sc_res_rid = STGE_PCIR_LOMEM;
622 		sc->sc_res_type = SYS_RES_MEMORY;
623 	} else {
624 		sc->sc_res_rid = STGE_PCIR_LOIO;
625 		sc->sc_res_type = SYS_RES_IOPORT;
626 
627 		val = pci_read_config(dev, sc->sc_res_rid, 4);
628 		if ((val & 0x01) == 0) {
629 			device_printf(dev, "couldn't locate IO BAR\n");
630 			return ENXIO;
631 		}
632 	}
633 
634 	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
635 					    &sc->sc_res_rid, RF_ACTIVE);
636 	if (sc->sc_res == NULL) {
637 		device_printf(dev, "couldn't allocate resource\n");
638 		return ENXIO;
639 	}
640 	sc->sc_btag = rman_get_bustag(sc->sc_res);
641 	sc->sc_bhandle = rman_get_bushandle(sc->sc_res);
642 
643 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
644 					    &sc->sc_irq_rid,
645 					    RF_ACTIVE | RF_SHAREABLE);
646 	if (sc->sc_irq == NULL) {
647 		device_printf(dev, "couldn't allocate IRQ\n");
648 		error = ENXIO;
649 		goto fail;
650 	}
651 
652 	sc->sc_rev = pci_get_revid(dev);
653 
654 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
655 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
656 
657 	sysctl_ctx_init(&sc->sc_sysctl_ctx);
658 	sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
659 					     SYSCTL_STATIC_CHILDREN(_hw),
660 					     OID_AUTO,
661 					     device_get_nameunit(dev),
662 					     CTLFLAG_RD, 0, "");
663 	if (sc->sc_sysctl_tree == NULL) {
664 		device_printf(dev, "can't add sysctl node\n");
665 		error = ENXIO;
666 		goto fail;
667 	}
668 
669 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
670 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
671 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
672 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
673 
674 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
675 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
676 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
677 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
678 
679 	if ((error = stge_dma_alloc(sc) != 0))
680 		goto fail;
681 
682 	/*
683 	 * Determine if we're copper or fiber.  It affects how we
684 	 * reset the card.
685 	 */
686 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
687 		sc->sc_usefiber = 1;
688 	else
689 		sc->sc_usefiber = 0;
690 
691 	/* Load LED configuration from EEPROM. */
692 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
693 
694 	/*
695 	 * Reset the chip to a known state.
696 	 */
697 	stge_reset(sc, STGE_RESET_FULL);
698 
699 	/*
700 	 * Reading the station address from the EEPROM doesn't seem
701 	 * to work, at least on my sample boards.  Instead, since
702 	 * the reset sequence does AutoInit, read it from the station
703 	 * address registers. For Sundance 1023 you can only read it
704 	 * from EEPROM.
705 	 */
706 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
707 		uint16_t v;
708 
709 		v = CSR_READ_2(sc, STGE_StationAddress0);
710 		enaddr[0] = v & 0xff;
711 		enaddr[1] = v >> 8;
712 		v = CSR_READ_2(sc, STGE_StationAddress1);
713 		enaddr[2] = v & 0xff;
714 		enaddr[3] = v >> 8;
715 		v = CSR_READ_2(sc, STGE_StationAddress2);
716 		enaddr[4] = v & 0xff;
717 		enaddr[5] = v >> 8;
718 		sc->sc_stge1023 = 0;
719 	} else {
720 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
721 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
722 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
723 			    &myaddr[i]);
724 			myaddr[i] = le16toh(myaddr[i]);
725 		}
726 		bcopy(myaddr, enaddr, sizeof(enaddr));
727 		sc->sc_stge1023 = 1;
728 	}
729 
730 	ifp->if_softc = sc;
731 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
732 	ifp->if_ioctl = stge_ioctl;
733 	ifp->if_start = stge_start;
734 	ifp->if_watchdog = stge_watchdog;
735 	ifp->if_init = stge_init;
736 #ifdef DEVICE_POLLING
737 	ifp->if_poll = stge_poll;
738 #endif
739 	ifp->if_mtu = ETHERMTU;
740 	ifq_set_maxlen(&ifp->if_snd, STGE_TX_RING_CNT - 1);
741 	ifq_set_ready(&ifp->if_snd);
742 	/* Revision B3 and earlier chips have checksum bug. */
743 	if (sc->sc_rev >= 0x0c) {
744 		ifp->if_hwassist = STGE_CSUM_FEATURES;
745 		ifp->if_capabilities = IFCAP_HWCSUM;
746 	} else {
747 		ifp->if_hwassist = 0;
748 		ifp->if_capabilities = 0;
749 	}
750 	ifp->if_capenable = ifp->if_capabilities;
751 
752 	/*
753 	 * Read some important bits from the PhyCtrl register.
754 	 */
755 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
756 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
757 
758 	/* Set up MII bus. */
759 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
760 	    stge_mediastatus)) != 0) {
761 		device_printf(sc->sc_dev, "no PHY found!\n");
762 		goto fail;
763 	}
764 
765 	ether_ifattach(ifp, enaddr, NULL);
766 
767 	/* VLAN capability setup */
768 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
769 #ifdef notyet
770 	if (sc->sc_rev >= 0x0c)
771 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
772 #endif
773 	ifp->if_capenable = ifp->if_capabilities;
774 
775 	/*
776 	 * Tell the upper layer(s) we support long frames.
777 	 * Must appear after the call to ether_ifattach() because
778 	 * ether_ifattach() sets ifi_hdrlen to the default value.
779 	 */
780 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
781 
782 	/*
783 	 * The manual recommends disabling early transmit, so we
784 	 * do.  It's disabled anyway, if using IP checksumming,
785 	 * since the entire packet must be in the FIFO in order
786 	 * for the chip to perform the checksum.
787 	 */
788 	sc->sc_txthresh = 0x0fff;
789 
790 	/*
791 	 * Disable MWI if the PCI layer tells us to.
792 	 */
793 	sc->sc_DMACtrl = 0;
794 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
795 		sc->sc_DMACtrl |= DMAC_MWIDisable;
796 
797 	/*
798 	 * Hookup IRQ
799 	 */
800 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, stge_intr, sc,
801 			       &sc->sc_ih, ifp->if_serializer);
802 	if (error != 0) {
803 		ether_ifdetach(ifp);
804 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
805 		goto fail;
806 	}
807 
808 fail:
809 	if (error != 0)
810 		stge_detach(dev);
811 
812 	return (error);
813 }
814 
815 static int
816 stge_detach(device_t dev)
817 {
818 	struct stge_softc *sc = device_get_softc(dev);
819 	struct ifnet *ifp = &sc->arpcom.ac_if;
820 
821 	if (device_is_attached(dev)) {
822 		lwkt_serialize_enter(ifp->if_serializer);
823 		/* XXX */
824 		sc->sc_detach = 1;
825 		stge_stop(sc);
826 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
827 		lwkt_serialize_exit(ifp->if_serializer);
828 
829 		ether_ifdetach(ifp);
830 	}
831 
832 	if (sc->sc_sysctl_tree != NULL)
833 		sysctl_ctx_free(&sc->sc_sysctl_ctx);
834 
835 	if (sc->sc_miibus != NULL)
836 		device_delete_child(dev, sc->sc_miibus);
837 	bus_generic_detach(dev);
838 
839 	stge_dma_free(sc);
840 
841 	if (sc->sc_irq != NULL) {
842 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
843 				     sc->sc_irq);
844 	}
845 	if (sc->sc_res != NULL) {
846 		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_rid,
847 				     sc->sc_res);
848 	}
849 
850 	return (0);
851 }
852 
853 struct stge_dmamap_arg {
854 	bus_addr_t	stge_busaddr;
855 };
856 
857 static void
858 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
859 {
860 	struct stge_dmamap_arg *ctx;
861 
862 	if (error != 0)
863 		return;
864 
865 	KASSERT(nseg == 1, ("too many segments %d\n", nseg));
866 
867 	ctx = (struct stge_dmamap_arg *)arg;
868 	ctx->stge_busaddr = segs[0].ds_addr;
869 }
870 
871 struct stge_mbuf_dmamap_arg {
872 	int			nsegs;
873 	bus_dma_segment_t	*segs;
874 };
875 
876 static void
877 stge_mbuf_dmamap_cb(void *xarg, bus_dma_segment_t *segs, int nsegs,
878 		    bus_size_t mapsz __unused, int error)
879 {
880 	struct stge_mbuf_dmamap_arg *arg = xarg;
881 	int i;
882 
883 	if (error) {
884 		arg->nsegs = 0;
885 		return;
886 	}
887 
888 	KASSERT(nsegs <= arg->nsegs,
889 		("too many segments(%d), should be <= %d\n",
890 		 nsegs, arg->nsegs));
891 
892 	arg->nsegs = nsegs;
893 	for (i = 0; i < nsegs; ++i)
894 		arg->segs[i] = segs[i];
895 }
896 
897 static int
898 stge_dma_alloc(struct stge_softc *sc)
899 {
900 	struct stge_dmamap_arg ctx;
901 	struct stge_txdesc *txd;
902 	struct stge_rxdesc *rxd;
903 	int error, i;
904 
905 	/* create parent tag. */
906 	error = bus_dma_tag_create(NULL,	/* parent */
907 		    1, 0,			/* algnmnt, boundary */
908 		    STGE_DMA_MAXADDR,		/* lowaddr */
909 		    BUS_SPACE_MAXADDR,		/* highaddr */
910 		    NULL, NULL,			/* filter, filterarg */
911 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
912 		    0,				/* nsegments */
913 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
914 		    0,				/* flags */
915 		    &sc->sc_cdata.stge_parent_tag);
916 	if (error != 0) {
917 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
918 		goto fail;
919 	}
920 	/* create tag for Tx ring. */
921 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
922 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
923 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
924 		    BUS_SPACE_MAXADDR,		/* highaddr */
925 		    NULL, NULL,			/* filter, filterarg */
926 		    STGE_TX_RING_SZ,		/* maxsize */
927 		    1,				/* nsegments */
928 		    STGE_TX_RING_SZ,		/* maxsegsize */
929 		    0,				/* flags */
930 		    &sc->sc_cdata.stge_tx_ring_tag);
931 	if (error != 0) {
932 		device_printf(sc->sc_dev,
933 		    "failed to allocate Tx ring DMA tag\n");
934 		goto fail;
935 	}
936 
937 	/* create tag for Rx ring. */
938 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
939 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
940 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
941 		    BUS_SPACE_MAXADDR,		/* highaddr */
942 		    NULL, NULL,			/* filter, filterarg */
943 		    STGE_RX_RING_SZ,		/* maxsize */
944 		    1,				/* nsegments */
945 		    STGE_RX_RING_SZ,		/* maxsegsize */
946 		    0,				/* flags */
947 		    &sc->sc_cdata.stge_rx_ring_tag);
948 	if (error != 0) {
949 		device_printf(sc->sc_dev,
950 		    "failed to allocate Rx ring DMA tag\n");
951 		goto fail;
952 	}
953 
954 	/* create tag for Tx buffers. */
955 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
956 		    1, 0,			/* algnmnt, boundary */
957 		    BUS_SPACE_MAXADDR,		/* lowaddr */
958 		    BUS_SPACE_MAXADDR,		/* highaddr */
959 		    NULL, NULL,			/* filter, filterarg */
960 		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
961 		    STGE_MAXTXSEGS,		/* nsegments */
962 		    MCLBYTES,			/* maxsegsize */
963 		    0,				/* flags */
964 		    &sc->sc_cdata.stge_tx_tag);
965 	if (error != 0) {
966 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
967 		goto fail;
968 	}
969 
970 	/* create tag for Rx buffers. */
971 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
972 		    1, 0,			/* algnmnt, boundary */
973 		    BUS_SPACE_MAXADDR,		/* lowaddr */
974 		    BUS_SPACE_MAXADDR,		/* highaddr */
975 		    NULL, NULL,			/* filter, filterarg */
976 		    MCLBYTES,			/* maxsize */
977 		    1,				/* nsegments */
978 		    MCLBYTES,			/* maxsegsize */
979 		    0,				/* flags */
980 		    &sc->sc_cdata.stge_rx_tag);
981 	if (error != 0) {
982 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
983 		goto fail;
984 	}
985 
986 	/* allocate DMA'able memory and load the DMA map for Tx ring. */
987 	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
988 	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
989 	    &sc->sc_cdata.stge_tx_ring_map);
990 	if (error != 0) {
991 		device_printf(sc->sc_dev,
992 		    "failed to allocate DMA'able memory for Tx ring\n");
993 		goto fail;
994 	}
995 
996 	ctx.stge_busaddr = 0;
997 	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
998 	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
999 	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1000 	if (error != 0 || ctx.stge_busaddr == 0) {
1001 		device_printf(sc->sc_dev,
1002 		    "failed to load DMA'able memory for Tx ring\n");
1003 		goto fail;
1004 	}
1005 	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
1006 
1007 	/* allocate DMA'able memory and load the DMA map for Rx ring. */
1008 	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
1009 	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1010 	    &sc->sc_cdata.stge_rx_ring_map);
1011 	if (error != 0) {
1012 		device_printf(sc->sc_dev,
1013 		    "failed to allocate DMA'able memory for Rx ring\n");
1014 		goto fail;
1015 	}
1016 
1017 	ctx.stge_busaddr = 0;
1018 	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
1019 	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
1020 	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1021 	if (error != 0 || ctx.stge_busaddr == 0) {
1022 		device_printf(sc->sc_dev,
1023 		    "failed to load DMA'able memory for Rx ring\n");
1024 		goto fail;
1025 	}
1026 	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1027 
1028 	/* create DMA maps for Tx buffers. */
1029 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
1030 		txd = &sc->sc_cdata.stge_txdesc[i];
1031 		txd->tx_m = NULL;
1032 		txd->tx_dmamap = 0;
1033 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1034 		    &txd->tx_dmamap);
1035 		if (error != 0) {
1036 			device_printf(sc->sc_dev,
1037 			    "failed to create Tx dmamap\n");
1038 			goto fail;
1039 		}
1040 	}
1041 	/* create DMA maps for Rx buffers. */
1042 	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1043 	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1044 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1045 		goto fail;
1046 	}
1047 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
1048 		rxd = &sc->sc_cdata.stge_rxdesc[i];
1049 		rxd->rx_m = NULL;
1050 		rxd->rx_dmamap = 0;
1051 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1052 		    &rxd->rx_dmamap);
1053 		if (error != 0) {
1054 			device_printf(sc->sc_dev,
1055 			    "failed to create Rx dmamap\n");
1056 			goto fail;
1057 		}
1058 	}
1059 
1060 fail:
1061 	return (error);
1062 }
1063 
1064 static void
1065 stge_dma_free(struct stge_softc *sc)
1066 {
1067 	struct stge_txdesc *txd;
1068 	struct stge_rxdesc *rxd;
1069 	int i;
1070 
1071 	/* Tx ring */
1072 	if (sc->sc_cdata.stge_tx_ring_tag) {
1073 		if (sc->sc_cdata.stge_tx_ring_map)
1074 			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1075 			    sc->sc_cdata.stge_tx_ring_map);
1076 		if (sc->sc_cdata.stge_tx_ring_map &&
1077 		    sc->sc_rdata.stge_tx_ring)
1078 			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1079 			    sc->sc_rdata.stge_tx_ring,
1080 			    sc->sc_cdata.stge_tx_ring_map);
1081 		sc->sc_rdata.stge_tx_ring = NULL;
1082 		sc->sc_cdata.stge_tx_ring_map = 0;
1083 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1084 		sc->sc_cdata.stge_tx_ring_tag = NULL;
1085 	}
1086 	/* Rx ring */
1087 	if (sc->sc_cdata.stge_rx_ring_tag) {
1088 		if (sc->sc_cdata.stge_rx_ring_map)
1089 			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1090 			    sc->sc_cdata.stge_rx_ring_map);
1091 		if (sc->sc_cdata.stge_rx_ring_map &&
1092 		    sc->sc_rdata.stge_rx_ring)
1093 			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1094 			    sc->sc_rdata.stge_rx_ring,
1095 			    sc->sc_cdata.stge_rx_ring_map);
1096 		sc->sc_rdata.stge_rx_ring = NULL;
1097 		sc->sc_cdata.stge_rx_ring_map = 0;
1098 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1099 		sc->sc_cdata.stge_rx_ring_tag = NULL;
1100 	}
1101 	/* Tx buffers */
1102 	if (sc->sc_cdata.stge_tx_tag) {
1103 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1104 			txd = &sc->sc_cdata.stge_txdesc[i];
1105 			if (txd->tx_dmamap) {
1106 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1107 				    txd->tx_dmamap);
1108 				txd->tx_dmamap = 0;
1109 			}
1110 		}
1111 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1112 		sc->sc_cdata.stge_tx_tag = NULL;
1113 	}
1114 	/* Rx buffers */
1115 	if (sc->sc_cdata.stge_rx_tag) {
1116 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1117 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1118 			if (rxd->rx_dmamap) {
1119 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1120 				    rxd->rx_dmamap);
1121 				rxd->rx_dmamap = 0;
1122 			}
1123 		}
1124 		if (sc->sc_cdata.stge_rx_sparemap) {
1125 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1126 			    sc->sc_cdata.stge_rx_sparemap);
1127 			sc->sc_cdata.stge_rx_sparemap = 0;
1128 		}
1129 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1130 		sc->sc_cdata.stge_rx_tag = NULL;
1131 	}
1132 
1133 	if (sc->sc_cdata.stge_parent_tag) {
1134 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1135 		sc->sc_cdata.stge_parent_tag = NULL;
1136 	}
1137 }
1138 
1139 /*
1140  * stge_shutdown:
1141  *
1142  *	Make sure the interface is stopped at reboot time.
1143  */
1144 static void
1145 stge_shutdown(device_t dev)
1146 {
1147 	struct stge_softc *sc = device_get_softc(dev);
1148 	struct ifnet *ifp = &sc->arpcom.ac_if;
1149 
1150 	lwkt_serialize_enter(ifp->if_serializer);
1151 	stge_stop(sc);
1152 	lwkt_serialize_exit(ifp->if_serializer);
1153 }
1154 
1155 static int
1156 stge_suspend(device_t dev)
1157 {
1158 	struct stge_softc *sc = device_get_softc(dev);
1159 	struct ifnet *ifp = &sc->arpcom.ac_if;
1160 
1161 	lwkt_serialize_enter(ifp->if_serializer);
1162 	stge_stop(sc);
1163 	sc->sc_suspended = 1;
1164 	lwkt_serialize_exit(ifp->if_serializer);
1165 
1166 	return (0);
1167 }
1168 
1169 static int
1170 stge_resume(device_t dev)
1171 {
1172 	struct stge_softc *sc = device_get_softc(dev);
1173 	struct ifnet *ifp = &sc->arpcom.ac_if;
1174 
1175 	lwkt_serialize_enter(ifp->if_serializer);
1176 	if (ifp->if_flags & IFF_UP)
1177 		stge_init(sc);
1178 	sc->sc_suspended = 0;
1179 	lwkt_serialize_exit(ifp->if_serializer);
1180 
1181 	return (0);
1182 }
1183 
1184 static void
1185 stge_dma_wait(struct stge_softc *sc)
1186 {
1187 	int i;
1188 
1189 	for (i = 0; i < STGE_TIMEOUT; i++) {
1190 		DELAY(2);
1191 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1192 			break;
1193 	}
1194 
1195 	if (i == STGE_TIMEOUT)
1196 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1197 }
1198 
1199 static int
1200 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1201 {
1202 	struct stge_txdesc *txd;
1203 	struct stge_tfd *tfd;
1204 	struct mbuf *m;
1205 	struct stge_mbuf_dmamap_arg arg;
1206 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1207 	int error, i, si;
1208 	uint64_t csum_flags, tfc;
1209 
1210 	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1211 		return (ENOBUFS);
1212 
1213 	arg.nsegs = STGE_MAXTXSEGS;
1214 	arg.segs = txsegs;
1215 	error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1216 				      txd->tx_dmamap, *m_head,
1217 				      stge_mbuf_dmamap_cb, &arg,
1218 				      BUS_DMA_NOWAIT);
1219 	if (error == EFBIG) {
1220 		m = m_defrag(*m_head, MB_DONTWAIT);
1221 		if (m == NULL) {
1222 			m_freem(*m_head);
1223 			*m_head = NULL;
1224 			return (ENOMEM);
1225 		}
1226 		*m_head = m;
1227 		error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1228 					      txd->tx_dmamap, *m_head,
1229 					      stge_mbuf_dmamap_cb, &arg,
1230 					      BUS_DMA_NOWAIT);
1231 		if (error != 0) {
1232 			m_freem(*m_head);
1233 			*m_head = NULL;
1234 			return (error);
1235 		}
1236 	} else if (error != 0)
1237 		return (error);
1238 	if (arg.nsegs == 0) {
1239 		m_freem(*m_head);
1240 		*m_head = NULL;
1241 		return (EIO);
1242 	}
1243 
1244 	m = *m_head;
1245 	csum_flags = 0;
1246 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1247 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1248 			csum_flags |= TFD_IPChecksumEnable;
1249 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1250 			csum_flags |= TFD_TCPChecksumEnable;
1251 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1252 			csum_flags |= TFD_UDPChecksumEnable;
1253 	}
1254 
1255 	si = sc->sc_cdata.stge_tx_prod;
1256 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1257 	for (i = 0; i < arg.nsegs; i++) {
1258 		tfd->tfd_frags[i].frag_word0 =
1259 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1260 		    FRAG_LEN(txsegs[i].ds_len));
1261 	}
1262 	sc->sc_cdata.stge_tx_cnt++;
1263 
1264 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1265 	    TFD_FragCount(arg.nsegs) | csum_flags;
1266 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1267 		tfc |= TFD_TxDMAIndicate;
1268 
1269 	/* Update producer index. */
1270 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1271 
1272 	/* Check if we have a VLAN tag to insert. */
1273 	if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1274 	    m->m_pkthdr.rcvif != NULL &&
1275 	    m->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1276 	    	struct ifvlan *ifv;
1277 
1278 		ifv = m->m_pkthdr.rcvif->if_softc;
1279 		if (ifv != NULL)
1280 			tfc |= TFD_VLANTagInsert | TFD_VID(ifv->ifv_tag);
1281 	}
1282 	tfd->tfd_control = htole64(tfc);
1283 
1284 	/* Update Tx Queue. */
1285 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1286 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1287 	txd->tx_m = m;
1288 
1289 	/* Sync descriptors. */
1290 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1291 	    BUS_DMASYNC_PREWRITE);
1292 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1293 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1294 
1295 	return (0);
1296 }
1297 
1298 /*
1299  * stge_start:		[ifnet interface function]
1300  *
1301  *	Start packet transmission on the interface.
1302  */
1303 static void
1304 stge_start(struct ifnet *ifp)
1305 {
1306 	struct stge_softc *sc;
1307 	struct mbuf *m_head;
1308 	int enq;
1309 
1310 	sc = ifp->if_softc;
1311 
1312 	ASSERT_SERIALIZED(ifp->if_serializer);
1313 
1314 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
1315 	    IFF_RUNNING)
1316 		return;
1317 
1318 	for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) {
1319 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1320 			ifp->if_flags |= IFF_OACTIVE;
1321 			break;
1322 		}
1323 
1324 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1325 		if (m_head == NULL)
1326 			break;
1327 		/*
1328 		 * Pack the data into the transmit ring. If we
1329 		 * don't have room, set the OACTIVE flag and wait
1330 		 * for the NIC to drain the ring.
1331 		 */
1332 		if (stge_encap(sc, &m_head)) {
1333 			if (m_head == NULL)
1334 				break;
1335 			ifp->if_flags |= IFF_OACTIVE;
1336 			break;
1337 		}
1338 
1339 		enq++;
1340 		/*
1341 		 * If there's a BPF listener, bounce a copy of this frame
1342 		 * to him.
1343 		 */
1344 		BPF_MTAP(ifp, m_head);
1345 	}
1346 
1347 	if (enq > 0) {
1348 		/* Transmit */
1349 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1350 
1351 		/* Set a timeout in case the chip goes out to lunch. */
1352 		ifp->if_timer = 5;
1353 	}
1354 }
1355 
1356 /*
1357  * stge_watchdog:	[ifnet interface function]
1358  *
1359  *	Watchdog timer handler.
1360  */
1361 static void
1362 stge_watchdog(struct ifnet *ifp)
1363 {
1364 	ASSERT_SERIALIZED(ifp->if_serializer);
1365 
1366 	if_printf(ifp, "device timeout\n");
1367 	ifp->if_oerrors++;
1368 	stge_init(ifp->if_softc);
1369 }
1370 
1371 /*
1372  * stge_ioctl:		[ifnet interface function]
1373  *
1374  *	Handle control requests from the operator.
1375  */
1376 static int
1377 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1378 {
1379 	struct stge_softc *sc;
1380 	struct ifreq *ifr;
1381 	struct mii_data *mii;
1382 	int error, mask;
1383 
1384 	ASSERT_SERIALIZED(ifp->if_serializer);
1385 
1386 	sc = ifp->if_softc;
1387 	ifr = (struct ifreq *)data;
1388 	error = 0;
1389 	switch (cmd) {
1390 	case SIOCSIFMTU:
1391 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1392 			error = EINVAL;
1393 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1394 			ifp->if_mtu = ifr->ifr_mtu;
1395 			stge_init(sc);
1396 		}
1397 		break;
1398 	case SIOCSIFFLAGS:
1399 		if ((ifp->if_flags & IFF_UP) != 0) {
1400 			if ((ifp->if_flags & IFF_RUNNING) != 0) {
1401 				if (((ifp->if_flags ^ sc->sc_if_flags)
1402 				    & IFF_PROMISC) != 0)
1403 					stge_set_filter(sc);
1404 			} else {
1405 				if (sc->sc_detach == 0)
1406 					stge_init(sc);
1407 			}
1408 		} else {
1409 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1410 				stge_stop(sc);
1411 		}
1412 		sc->sc_if_flags = ifp->if_flags;
1413 		break;
1414 	case SIOCADDMULTI:
1415 	case SIOCDELMULTI:
1416 		if ((ifp->if_flags & IFF_RUNNING) != 0)
1417 			stge_set_multi(sc);
1418 		break;
1419 	case SIOCSIFMEDIA:
1420 	case SIOCGIFMEDIA:
1421 		mii = device_get_softc(sc->sc_miibus);
1422 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1423 		break;
1424 	case SIOCSIFCAP:
1425 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1426 		if ((mask & IFCAP_HWCSUM) != 0) {
1427 			ifp->if_capenable ^= IFCAP_HWCSUM;
1428 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1429 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1430 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1431 			else
1432 				ifp->if_hwassist = 0;
1433 		}
1434 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1435 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1436 			if (ifp->if_flags & IFF_RUNNING)
1437 				stge_vlan_setup(sc);
1438 		}
1439 #if 0
1440 		VLAN_CAPABILITIES(ifp);
1441 #endif
1442 		break;
1443 	default:
1444 		error = ether_ioctl(ifp, cmd, data);
1445 		break;
1446 	}
1447 
1448 	return (error);
1449 }
1450 
1451 static void
1452 stge_link(struct stge_softc *sc)
1453 {
1454 	uint32_t v, ac;
1455 	int i;
1456 
1457 	/*
1458 	 * Update STGE_MACCtrl register depending on link status.
1459 	 * (duplex, flow control etc)
1460 	 */
1461 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1462 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1463 	v |= sc->sc_MACCtrl;
1464 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1465 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1466 		/* Duplex setting changed, reset Tx/Rx functions. */
1467 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1468 		ac |= AC_TxReset | AC_RxReset;
1469 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1470 		for (i = 0; i < STGE_TIMEOUT; i++) {
1471 			DELAY(100);
1472 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1473 				break;
1474 		}
1475 		if (i == STGE_TIMEOUT)
1476 			device_printf(sc->sc_dev, "reset failed to complete\n");
1477 	}
1478 }
1479 
1480 static __inline int
1481 stge_tx_error(struct stge_softc *sc)
1482 {
1483 	uint32_t txstat;
1484 	int error;
1485 
1486 	for (error = 0;;) {
1487 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1488 		if ((txstat & TS_TxComplete) == 0)
1489 			break;
1490 		/* Tx underrun */
1491 		if ((txstat & TS_TxUnderrun) != 0) {
1492 			/*
1493 			 * XXX
1494 			 * There should be a more better way to recover
1495 			 * from Tx underrun instead of a full reset.
1496 			 */
1497 			if (sc->sc_nerr++ < STGE_MAXERR)
1498 				device_printf(sc->sc_dev, "Tx underrun, "
1499 				    "resetting...\n");
1500 			if (sc->sc_nerr == STGE_MAXERR)
1501 				device_printf(sc->sc_dev, "too many errors; "
1502 				    "not reporting any more\n");
1503 			error = -1;
1504 			break;
1505 		}
1506 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1507 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1508 			CSR_WRITE_4(sc, STGE_MACCtrl,
1509 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1510 			    MC_TxEnable);
1511 	}
1512 
1513 	return (error);
1514 }
1515 
1516 /*
1517  * stge_intr:
1518  *
1519  *	Interrupt service routine.
1520  */
1521 static void
1522 stge_intr(void *arg)
1523 {
1524 	struct stge_softc *sc = arg;
1525 	struct ifnet *ifp = &sc->arpcom.ac_if;
1526 	int reinit;
1527 	uint16_t status;
1528 
1529 	ASSERT_SERIALIZED(ifp->if_serializer);
1530 
1531 	status = CSR_READ_2(sc, STGE_IntStatus);
1532 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1533 		return;
1534 
1535 	/* Disable interrupts. */
1536 	for (reinit = 0;;) {
1537 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1538 		status &= sc->sc_IntEnable;
1539 		if (status == 0)
1540 			break;
1541 		/* Host interface errors. */
1542 		if ((status & IS_HostError) != 0) {
1543 			device_printf(sc->sc_dev,
1544 			    "Host interface error, resetting...\n");
1545 			reinit = 1;
1546 			goto force_init;
1547 		}
1548 
1549 		/* Receive interrupts. */
1550 		if ((status & IS_RxDMAComplete) != 0) {
1551 			stge_rxeof(sc, -1);
1552 			if ((status & IS_RFDListEnd) != 0)
1553 				CSR_WRITE_4(sc, STGE_DMACtrl,
1554 				    DMAC_RxDMAPollNow);
1555 		}
1556 
1557 		/* Transmit interrupts. */
1558 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1559 			stge_txeof(sc);
1560 
1561 		/* Transmission errors.*/
1562 		if ((status & IS_TxComplete) != 0) {
1563 			if ((reinit = stge_tx_error(sc)) != 0)
1564 				break;
1565 		}
1566 	}
1567 
1568 force_init:
1569 	if (reinit != 0)
1570 		stge_init(sc);
1571 
1572 	/* Re-enable interrupts. */
1573 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1574 
1575 	/* Try to get more packets going. */
1576 	if (!ifq_is_empty(&ifp->if_snd))
1577 		ifp->if_start(ifp);
1578 }
1579 
1580 /*
1581  * stge_txeof:
1582  *
1583  *	Helper; handle transmit interrupts.
1584  */
1585 static void
1586 stge_txeof(struct stge_softc *sc)
1587 {
1588 	struct ifnet *ifp = &sc->arpcom.ac_if;
1589 	struct stge_txdesc *txd;
1590 	uint64_t control;
1591 	int cons;
1592 
1593 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1594 	if (txd == NULL)
1595 		return;
1596 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1597 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1598 
1599 	/*
1600 	 * Go through our Tx list and free mbufs for those
1601 	 * frames which have been transmitted.
1602 	 */
1603 	for (cons = sc->sc_cdata.stge_tx_cons;;
1604 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1605 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1606 			break;
1607 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1608 		if ((control & TFD_TFDDone) == 0)
1609 			break;
1610 		sc->sc_cdata.stge_tx_cnt--;
1611 		ifp->if_flags &= ~IFF_OACTIVE;
1612 
1613 		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1614 		    BUS_DMASYNC_POSTWRITE);
1615 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1616 
1617 		/* Output counter is updated with statistics register */
1618 		m_freem(txd->tx_m);
1619 		txd->tx_m = NULL;
1620 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1621 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1622 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1623 	}
1624 	sc->sc_cdata.stge_tx_cons = cons;
1625 	if (sc->sc_cdata.stge_tx_cnt == 0)
1626 		ifp->if_timer = 0;
1627 
1628         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1629 	    sc->sc_cdata.stge_tx_ring_map,
1630 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1631 }
1632 
1633 static __inline void
1634 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1635 {
1636 	struct stge_rfd *rfd;
1637 
1638 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1639 	rfd->rfd_status = 0;
1640 }
1641 
1642 #ifndef __i386__
1643 /*
1644  * It seems that TC9021's DMA engine has alignment restrictions in
1645  * DMA scatter operations. The first DMA segment has no address
1646  * alignment restrictins but the rest should be aligned on 4(?) bytes
1647  * boundary. Otherwise it would corrupt random memory. Since we don't
1648  * know which one is used for the first segment in advance we simply
1649  * don't align at all.
1650  * To avoid copying over an entire frame to align, we allocate a new
1651  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1652  * prepended into the existing mbuf chain.
1653  */
1654 static __inline struct mbuf *
1655 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1656 {
1657 	struct mbuf *n;
1658 
1659 	n = NULL;
1660 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1661 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1662 		m->m_data += ETHER_HDR_LEN;
1663 		n = m;
1664 	} else {
1665 		MGETHDR(n, MB_DONTWAIT, MT_DATA);
1666 		if (n != NULL) {
1667 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1668 			m->m_data += ETHER_HDR_LEN;
1669 			m->m_len -= ETHER_HDR_LEN;
1670 			n->m_len = ETHER_HDR_LEN;
1671 			M_MOVE_PKTHDR(n, m);
1672 			n->m_next = m;
1673 		} else
1674 			m_freem(m);
1675 	}
1676 
1677 	return (n);
1678 }
1679 #endif
1680 
1681 /*
1682  * stge_rxeof:
1683  *
1684  *	Helper; handle receive interrupts.
1685  */
1686 static void
1687 stge_rxeof(struct stge_softc *sc, int count)
1688 {
1689 	struct ifnet *ifp = &sc->arpcom.ac_if;
1690 	struct stge_rxdesc *rxd;
1691 	struct mbuf *mp, *m;
1692 	uint64_t status64;
1693 	uint32_t status;
1694 	int cons, prog;
1695 
1696 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1697 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1698 
1699 	prog = 0;
1700 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1701 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1702 #ifdef DEVICE_POLLING
1703 		if (count >= 0 && count-- == 0)
1704 			break;
1705 #endif
1706 
1707 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1708 		status = RFD_RxStatus(status64);
1709 		if ((status & RFD_RFDDone) == 0)
1710 			break;
1711 
1712 		prog++;
1713 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1714 		mp = rxd->rx_m;
1715 
1716 		/*
1717 		 * If the packet had an error, drop it.  Note we count
1718 		 * the error later in the periodic stats update.
1719 		 */
1720 		if ((status & RFD_FrameEnd) != 0 && (status &
1721 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1722 		    RFD_RxAlignmentError | RFD_RxFCSError |
1723 		    RFD_RxLengthError)) != 0) {
1724 			stge_discard_rxbuf(sc, cons);
1725 			if (sc->sc_cdata.stge_rxhead != NULL) {
1726 				m_freem(sc->sc_cdata.stge_rxhead);
1727 				STGE_RXCHAIN_RESET(sc);
1728 			}
1729 			continue;
1730 		}
1731 		/*
1732 		 * Add a new receive buffer to the ring.
1733 		 */
1734 		if (stge_newbuf(sc, cons, 0) != 0) {
1735 			ifp->if_iqdrops++;
1736 			stge_discard_rxbuf(sc, cons);
1737 			if (sc->sc_cdata.stge_rxhead != NULL) {
1738 				m_freem(sc->sc_cdata.stge_rxhead);
1739 				STGE_RXCHAIN_RESET(sc);
1740 			}
1741 			continue;
1742 		}
1743 
1744 		if ((status & RFD_FrameEnd) != 0)
1745 			mp->m_len = RFD_RxDMAFrameLen(status) -
1746 			    sc->sc_cdata.stge_rxlen;
1747 		sc->sc_cdata.stge_rxlen += mp->m_len;
1748 
1749 		/* Chain mbufs. */
1750 		if (sc->sc_cdata.stge_rxhead == NULL) {
1751 			sc->sc_cdata.stge_rxhead = mp;
1752 			sc->sc_cdata.stge_rxtail = mp;
1753 		} else {
1754 			mp->m_flags &= ~M_PKTHDR;
1755 			sc->sc_cdata.stge_rxtail->m_next = mp;
1756 			sc->sc_cdata.stge_rxtail = mp;
1757 		}
1758 
1759 		if ((status & RFD_FrameEnd) != 0) {
1760 			m = sc->sc_cdata.stge_rxhead;
1761 			m->m_pkthdr.rcvif = ifp;
1762 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1763 
1764 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1765 				m_freem(m);
1766 				STGE_RXCHAIN_RESET(sc);
1767 				continue;
1768 			}
1769 			/*
1770 			 * Set the incoming checksum information for
1771 			 * the packet.
1772 			 */
1773 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1774 				if ((status & RFD_IPDetected) != 0) {
1775 					m->m_pkthdr.csum_flags |=
1776 						CSUM_IP_CHECKED;
1777 					if ((status & RFD_IPError) == 0)
1778 						m->m_pkthdr.csum_flags |=
1779 						    CSUM_IP_VALID;
1780 				}
1781 				if (((status & RFD_TCPDetected) != 0 &&
1782 				    (status & RFD_TCPError) == 0) ||
1783 				    ((status & RFD_UDPDetected) != 0 &&
1784 				    (status & RFD_UDPError) == 0)) {
1785 					m->m_pkthdr.csum_flags |=
1786 					    (CSUM_DATA_VALID |
1787 					     CSUM_PSEUDO_HDR |
1788 					     CSUM_FRAG_NOT_CHECKED);
1789 					m->m_pkthdr.csum_data = 0xffff;
1790 				}
1791 			}
1792 
1793 #ifndef __i386__
1794 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1795 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1796 					STGE_RXCHAIN_RESET(sc);
1797 					continue;
1798 				}
1799 			}
1800 #endif
1801 
1802 			/* Check for VLAN tagged packets. */
1803 			if ((status & RFD_VLANDetected) != 0 &&
1804 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1805 				VLAN_INPUT_TAG(m, RFD_TCI(status64));
1806 			} else {
1807 				/* Pass it on. */
1808 				ifp->if_input(ifp, m);
1809 			}
1810 
1811 			STGE_RXCHAIN_RESET(sc);
1812 		}
1813 	}
1814 
1815 	if (prog > 0) {
1816 		/* Update the consumer index. */
1817 		sc->sc_cdata.stge_rx_cons = cons;
1818 		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1819 		    sc->sc_cdata.stge_rx_ring_map,
1820 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1821 	}
1822 }
1823 
1824 #ifdef DEVICE_POLLING
1825 static void
1826 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1827 {
1828 	struct stge_softc *sc;
1829 	uint16_t status;
1830 
1831 	sc = ifp->if_softc;
1832 
1833 	switch (cmd) {
1834 	case POLL_REGISTER:
1835 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1836 		break;
1837 	case POLL_DEREGISTER:
1838 		CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1839 		break;
1840 	case POLL_ONLY:
1841 	case POLL_AND_CHECK_STATUS:
1842 		sc->sc_cdata.stge_rxcycles = count;
1843 		stge_rxeof(sc, count);
1844 		stge_txeof(sc);
1845 
1846 		if (cmd == POLL_AND_CHECK_STATUS) {
1847 			status = CSR_READ_2(sc, STGE_IntStatus);
1848 			status &= sc->sc_IntEnable;
1849 			if (status != 0) {
1850 				if (status & IS_HostError) {
1851 					device_printf(sc->sc_dev,
1852 					"Host interface error, "
1853 					"resetting...\n");
1854 					stge_init(sc);
1855 				}
1856 				if ((status & IS_TxComplete) != 0 &&
1857 				    stge_tx_error(sc) != 0)
1858 					stge_init(sc);
1859 			}
1860 
1861 		}
1862 
1863 		if (!ifq_is_empty(&ifp->if_snd))
1864 			ifp->if_start(ifp);
1865 	}
1866 }
1867 #endif	/* DEVICE_POLLING */
1868 
1869 /*
1870  * stge_tick:
1871  *
1872  *	One second timer, used to tick the MII.
1873  */
1874 static void
1875 stge_tick(void *arg)
1876 {
1877 	struct stge_softc *sc = arg;
1878 	struct ifnet *ifp = &sc->arpcom.ac_if;
1879 	struct mii_data *mii;
1880 
1881 	lwkt_serialize_enter(ifp->if_serializer);
1882 
1883 	mii = device_get_softc(sc->sc_miibus);
1884 	mii_tick(mii);
1885 
1886 	/* Update statistics counters. */
1887 	stge_stats_update(sc);
1888 
1889 	/*
1890 	 * Relcaim any pending Tx descriptors to release mbufs in a
1891 	 * timely manner as we don't generate Tx completion interrupts
1892 	 * for every frame. This limits the delay to a maximum of one
1893 	 * second.
1894 	 */
1895 	if (sc->sc_cdata.stge_tx_cnt != 0)
1896 		stge_txeof(sc);
1897 
1898 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1899 
1900 	lwkt_serialize_exit(ifp->if_serializer);
1901 }
1902 
1903 /*
1904  * stge_stats_update:
1905  *
1906  *	Read the TC9021 statistics counters.
1907  */
1908 static void
1909 stge_stats_update(struct stge_softc *sc)
1910 {
1911 	struct ifnet *ifp = &sc->arpcom.ac_if;
1912 
1913 	CSR_READ_4(sc,STGE_OctetRcvOk);
1914 
1915 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1916 
1917 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1918 
1919 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1920 
1921 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1922 
1923 	ifp->if_collisions +=
1924 	    CSR_READ_4(sc, STGE_LateCollisions) +
1925 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1926 	    CSR_READ_4(sc, STGE_SingleColFrames);
1927 
1928 	ifp->if_oerrors +=
1929 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1930 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1931 }
1932 
1933 /*
1934  * stge_reset:
1935  *
1936  *	Perform a soft reset on the TC9021.
1937  */
1938 static void
1939 stge_reset(struct stge_softc *sc, uint32_t how)
1940 {
1941 	uint32_t ac;
1942 	uint8_t v;
1943 	int i, dv;
1944 
1945 	dv = 5000;
1946 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1947 	switch (how) {
1948 	case STGE_RESET_TX:
1949 		ac |= AC_TxReset | AC_FIFO;
1950 		dv = 100;
1951 		break;
1952 	case STGE_RESET_RX:
1953 		ac |= AC_RxReset | AC_FIFO;
1954 		dv = 100;
1955 		break;
1956 	case STGE_RESET_FULL:
1957 	default:
1958 		/*
1959 		 * Only assert RstOut if we're fiber.  We need GMII clocks
1960 		 * to be present in order for the reset to complete on fiber
1961 		 * cards.
1962 		 */
1963 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1964 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1965 		    (sc->sc_usefiber ? AC_RstOut : 0);
1966 		break;
1967 	}
1968 
1969 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1970 
1971 	/* Account for reset problem at 10Mbps. */
1972 	DELAY(dv);
1973 
1974 	for (i = 0; i < STGE_TIMEOUT; i++) {
1975 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1976 			break;
1977 		DELAY(dv);
1978 	}
1979 
1980 	if (i == STGE_TIMEOUT)
1981 		device_printf(sc->sc_dev, "reset failed to complete\n");
1982 
1983 	/* Set LED, from Linux IPG driver. */
1984 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1985 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1986 	if ((sc->sc_led & 0x01) != 0)
1987 		ac |= AC_LEDMode;
1988 	if ((sc->sc_led & 0x03) != 0)
1989 		ac |= AC_LEDModeBit1;
1990 	if ((sc->sc_led & 0x08) != 0)
1991 		ac |= AC_LEDSpeed;
1992 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1993 
1994 	/* Set PHY, from Linux IPG driver */
1995 	v = CSR_READ_1(sc, STGE_PhySet);
1996 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1997 	v |= ((sc->sc_led & 0x70) >> 4);
1998 	CSR_WRITE_1(sc, STGE_PhySet, v);
1999 }
2000 
2001 /*
2002  * stge_init:		[ ifnet interface function ]
2003  *
2004  *	Initialize the interface.
2005  */
2006 static void
2007 stge_init(void *xsc)
2008 {
2009 	struct stge_softc *sc = xsc;
2010 	struct ifnet *ifp = &sc->arpcom.ac_if;
2011 	struct mii_data *mii;
2012 	uint16_t eaddr[3];
2013 	uint32_t v;
2014 	int error;
2015 
2016 	ASSERT_SERIALIZED(ifp->if_serializer);
2017 
2018 	mii = device_get_softc(sc->sc_miibus);
2019 
2020 	/*
2021 	 * Cancel any pending I/O.
2022 	 */
2023 	stge_stop(sc);
2024 
2025 	/* Init descriptors. */
2026 	error = stge_init_rx_ring(sc);
2027 	if (error != 0) {
2028 		device_printf(sc->sc_dev,
2029 		    "initialization failed: no memory for rx buffers\n");
2030 		stge_stop(sc);
2031 		goto out;
2032 	}
2033 	stge_init_tx_ring(sc);
2034 
2035 	/* Set the station address. */
2036 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2037 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2038 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2039 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2040 
2041 	/*
2042 	 * Set the statistics masks.  Disable all the RMON stats,
2043 	 * and disable selected stats in the non-RMON stats registers.
2044 	 */
2045 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2046 	CSR_WRITE_4(sc, STGE_StatisticsMask,
2047 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2048 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2049 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2050 	    (1U << 21));
2051 
2052 	/* Set up the receive filter. */
2053 	stge_set_filter(sc);
2054 	/* Program multicast filter. */
2055 	stge_set_multi(sc);
2056 
2057 	/*
2058 	 * Give the transmit and receive ring to the chip.
2059 	 */
2060 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2061 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2062 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2063 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2064 
2065 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2066 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2067 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2068 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2069 
2070 	/*
2071 	 * Initialize the Tx auto-poll period.  It's OK to make this number
2072 	 * large (255 is the max, but we use 127) -- we explicitly kick the
2073 	 * transmit engine when there's actually a packet.
2074 	 */
2075 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2076 
2077 	/* ..and the Rx auto-poll period. */
2078 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2079 
2080 	/* Initialize the Tx start threshold. */
2081 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2082 
2083 	/* Rx DMA thresholds, from Linux */
2084 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2085 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2086 
2087 	/* Rx early threhold, from Linux */
2088 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2089 
2090 	/* Tx DMA thresholds, from Linux */
2091 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2092 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2093 
2094 	/*
2095 	 * Initialize the Rx DMA interrupt control register.  We
2096 	 * request an interrupt after every incoming packet, but
2097 	 * defer it for sc_rxint_dmawait us. When the number of
2098 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2099 	 * deferring the interrupt, and signal it immediately.
2100 	 */
2101 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2102 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2103 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2104 
2105 	/*
2106 	 * Initialize the interrupt mask.
2107 	 */
2108 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2109 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2110 #ifdef DEVICE_POLLING
2111 	/* Disable interrupts if we are polling. */
2112 	if (ifp->if_flags & IFF_POLLING)
2113 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2114 	else
2115 #endif
2116 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2117 
2118 	/*
2119 	 * Configure the DMA engine.
2120 	 * XXX Should auto-tune TxBurstLimit.
2121 	 */
2122 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2123 
2124 	/*
2125 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2126 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2127 	 * in the Rx FIFO.
2128 	 */
2129 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2130 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2131 
2132 	/*
2133 	 * Set the maximum frame size.
2134 	 */
2135 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2136 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2137 
2138 	/*
2139 	 * Initialize MacCtrl -- do it before setting the media,
2140 	 * as setting the media will actually program the register.
2141 	 *
2142 	 * Note: We have to poke the IFS value before poking
2143 	 * anything else.
2144 	 */
2145 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2146 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2147 
2148 	stge_vlan_setup(sc);
2149 
2150 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2151 		/* Multi-frag frame bug work-around. */
2152 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2153 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2154 
2155 		/* Tx Poll Now bug work-around. */
2156 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2157 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2158 		/* Tx Poll Now bug work-around. */
2159 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2160 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2161 	}
2162 
2163 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2164 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2165 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2166 	/*
2167 	 * It seems that transmitting frames without checking the state of
2168 	 * Rx/Tx MAC wedge the hardware.
2169 	 */
2170 	stge_start_tx(sc);
2171 	stge_start_rx(sc);
2172 
2173 	/*
2174 	 * Set the current media.
2175 	 */
2176 	mii_mediachg(mii);
2177 
2178 	/*
2179 	 * Start the one second MII clock.
2180 	 */
2181 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2182 
2183 	/*
2184 	 * ...all done!
2185 	 */
2186 	ifp->if_flags |= IFF_RUNNING;
2187 	ifp->if_flags &= ~IFF_OACTIVE;
2188 
2189  out:
2190 	if (error != 0)
2191 		device_printf(sc->sc_dev, "interface not running\n");
2192 }
2193 
2194 static void
2195 stge_vlan_setup(struct stge_softc *sc)
2196 {
2197 	struct ifnet *ifp = &sc->arpcom.ac_if;
2198 	uint32_t v;
2199 
2200 	/*
2201 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2202 	 * MC_AutoVLANuntagging bit.
2203 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2204 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2205 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2206 	 * use TFC instead of STGE_VLANTag register.
2207 	 */
2208 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2209 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2210 		v |= MC_AutoVLANuntagging;
2211 	else
2212 		v &= ~MC_AutoVLANuntagging;
2213 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2214 }
2215 
2216 /*
2217  *	Stop transmission on the interface.
2218  */
2219 static void
2220 stge_stop(struct stge_softc *sc)
2221 {
2222 	struct ifnet *ifp = &sc->arpcom.ac_if;
2223 	struct stge_txdesc *txd;
2224 	struct stge_rxdesc *rxd;
2225 	uint32_t v;
2226 	int i;
2227 
2228 	ASSERT_SERIALIZED(ifp->if_serializer);
2229 
2230 	/*
2231 	 * Stop the one second clock.
2232 	 */
2233 	callout_stop(&sc->sc_tick_ch);
2234 
2235 	/*
2236 	 * Reset the chip to a known state.
2237 	 */
2238 	stge_reset(sc, STGE_RESET_FULL);
2239 
2240 	/*
2241 	 * Disable interrupts.
2242 	 */
2243 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2244 
2245 	/*
2246 	 * Stop receiver, transmitter, and stats update.
2247 	 */
2248 	stge_stop_rx(sc);
2249 	stge_stop_tx(sc);
2250 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2251 	v |= MC_StatisticsDisable;
2252 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2253 
2254 	/*
2255 	 * Stop the transmit and receive DMA.
2256 	 */
2257 	stge_dma_wait(sc);
2258 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2259 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2260 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2261 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2262 
2263 	/*
2264 	 * Free RX and TX mbufs still in the queues.
2265 	 */
2266 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2267 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2268 		if (rxd->rx_m != NULL) {
2269 			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2270 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2271 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2272 			    rxd->rx_dmamap);
2273 			m_freem(rxd->rx_m);
2274 			rxd->rx_m = NULL;
2275 		}
2276         }
2277 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2278 		txd = &sc->sc_cdata.stge_txdesc[i];
2279 		if (txd->tx_m != NULL) {
2280 			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2281 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2282 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2283 			    txd->tx_dmamap);
2284 			m_freem(txd->tx_m);
2285 			txd->tx_m = NULL;
2286 		}
2287         }
2288 
2289 	/*
2290 	 * Mark the interface down and cancel the watchdog timer.
2291 	 */
2292 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2293 	ifp->if_timer = 0;
2294 }
2295 
2296 static void
2297 stge_start_tx(struct stge_softc *sc)
2298 {
2299 	uint32_t v;
2300 	int i;
2301 
2302 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2303 	if ((v & MC_TxEnabled) != 0)
2304 		return;
2305 	v |= MC_TxEnable;
2306 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2307 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2308 	for (i = STGE_TIMEOUT; i > 0; i--) {
2309 		DELAY(10);
2310 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2311 		if ((v & MC_TxEnabled) != 0)
2312 			break;
2313 	}
2314 	if (i == 0)
2315 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2316 }
2317 
2318 static void
2319 stge_start_rx(struct stge_softc *sc)
2320 {
2321 	uint32_t v;
2322 	int i;
2323 
2324 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2325 	if ((v & MC_RxEnabled) != 0)
2326 		return;
2327 	v |= MC_RxEnable;
2328 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2329 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2330 	for (i = STGE_TIMEOUT; i > 0; i--) {
2331 		DELAY(10);
2332 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2333 		if ((v & MC_RxEnabled) != 0)
2334 			break;
2335 	}
2336 	if (i == 0)
2337 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2338 }
2339 
2340 static void
2341 stge_stop_tx(struct stge_softc *sc)
2342 {
2343 	uint32_t v;
2344 	int i;
2345 
2346 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2347 	if ((v & MC_TxEnabled) == 0)
2348 		return;
2349 	v |= MC_TxDisable;
2350 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2351 	for (i = STGE_TIMEOUT; i > 0; i--) {
2352 		DELAY(10);
2353 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2354 		if ((v & MC_TxEnabled) == 0)
2355 			break;
2356 	}
2357 	if (i == 0)
2358 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2359 }
2360 
2361 static void
2362 stge_stop_rx(struct stge_softc *sc)
2363 {
2364 	uint32_t v;
2365 	int i;
2366 
2367 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2368 	if ((v & MC_RxEnabled) == 0)
2369 		return;
2370 	v |= MC_RxDisable;
2371 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2372 	for (i = STGE_TIMEOUT; i > 0; i--) {
2373 		DELAY(10);
2374 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2375 		if ((v & MC_RxEnabled) == 0)
2376 			break;
2377 	}
2378 	if (i == 0)
2379 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2380 }
2381 
2382 static void
2383 stge_init_tx_ring(struct stge_softc *sc)
2384 {
2385 	struct stge_ring_data *rd;
2386 	struct stge_txdesc *txd;
2387 	bus_addr_t addr;
2388 	int i;
2389 
2390 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2391 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2392 
2393 	sc->sc_cdata.stge_tx_prod = 0;
2394 	sc->sc_cdata.stge_tx_cons = 0;
2395 	sc->sc_cdata.stge_tx_cnt = 0;
2396 
2397 	rd = &sc->sc_rdata;
2398 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2399 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2400 		if (i == (STGE_TX_RING_CNT - 1))
2401 			addr = STGE_TX_RING_ADDR(sc, 0);
2402 		else
2403 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2404 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2405 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2406 		txd = &sc->sc_cdata.stge_txdesc[i];
2407 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2408 	}
2409 
2410 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2411 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
2412 }
2413 
2414 static int
2415 stge_init_rx_ring(struct stge_softc *sc)
2416 {
2417 	struct stge_ring_data *rd;
2418 	bus_addr_t addr;
2419 	int i;
2420 
2421 	sc->sc_cdata.stge_rx_cons = 0;
2422 	STGE_RXCHAIN_RESET(sc);
2423 
2424 	rd = &sc->sc_rdata;
2425 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2426 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2427 		if (stge_newbuf(sc, i, 1) != 0)
2428 			return (ENOBUFS);
2429 		if (i == (STGE_RX_RING_CNT - 1))
2430 			addr = STGE_RX_RING_ADDR(sc, 0);
2431 		else
2432 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2433 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2434 		rd->stge_rx_ring[i].rfd_status = 0;
2435 	}
2436 
2437 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2438 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_PREWRITE);
2439 
2440 	return (0);
2441 }
2442 
2443 /*
2444  * stge_newbuf:
2445  *
2446  *	Add a receive buffer to the indicated descriptor.
2447  */
2448 static int
2449 stge_newbuf(struct stge_softc *sc, int idx, int waitok)
2450 {
2451 	struct stge_rxdesc *rxd;
2452 	struct stge_rfd *rfd;
2453 	struct mbuf *m;
2454 	struct stge_mbuf_dmamap_arg arg;
2455 	bus_dma_segment_t segs[1];
2456 	bus_dmamap_t map;
2457 
2458 	m = m_getcl(waitok ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2459 	if (m == NULL)
2460 		return (ENOBUFS);
2461 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2462 	/*
2463 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2464 	 * frame is used.
2465 	 */
2466 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2467 		m_adj(m, ETHER_ALIGN);
2468 
2469 	arg.segs = segs;
2470 	arg.nsegs = 1;
2471 	if (bus_dmamap_load_mbuf(sc->sc_cdata.stge_rx_tag,
2472 	    sc->sc_cdata.stge_rx_sparemap, m, stge_mbuf_dmamap_cb, &arg,
2473 	    waitok ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
2474 		m_freem(m);
2475 		return (ENOBUFS);
2476 	}
2477 
2478 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2479 	if (rxd->rx_m != NULL) {
2480 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2481 		    BUS_DMASYNC_POSTREAD);
2482 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2483 	}
2484 	map = rxd->rx_dmamap;
2485 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2486 	sc->sc_cdata.stge_rx_sparemap = map;
2487 	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2488 	    BUS_DMASYNC_PREREAD);
2489 	rxd->rx_m = m;
2490 
2491 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2492 	rfd->rfd_frag.frag_word0 =
2493 	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2494 	rfd->rfd_status = 0;
2495 
2496 	return (0);
2497 }
2498 
2499 /*
2500  * stge_set_filter:
2501  *
2502  *	Set up the receive filter.
2503  */
2504 static void
2505 stge_set_filter(struct stge_softc *sc)
2506 {
2507 	struct ifnet *ifp = &sc->arpcom.ac_if;
2508 	uint16_t mode;
2509 
2510 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2511 	mode |= RM_ReceiveUnicast;
2512 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2513 		mode |= RM_ReceiveBroadcast;
2514 	else
2515 		mode &= ~RM_ReceiveBroadcast;
2516 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2517 		mode |= RM_ReceiveAllFrames;
2518 	else
2519 		mode &= ~RM_ReceiveAllFrames;
2520 
2521 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2522 }
2523 
2524 static void
2525 stge_set_multi(struct stge_softc *sc)
2526 {
2527 	struct ifnet *ifp = &sc->arpcom.ac_if;
2528 	struct ifmultiaddr *ifma;
2529 	uint32_t crc;
2530 	uint32_t mchash[2];
2531 	uint16_t mode;
2532 	int count;
2533 
2534 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2535 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2536 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2537 			mode |= RM_ReceiveAllFrames;
2538 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2539 			mode |= RM_ReceiveMulticast;
2540 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2541 		return;
2542 	}
2543 
2544 	/* clear existing filters. */
2545 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2546 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2547 
2548 	/*
2549 	 * Set up the multicast address filter by passing all multicast
2550 	 * addresses through a CRC generator, and then using the low-order
2551 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2552 	 * high order bits select the register, while the rest of the bits
2553 	 * select the bit within the register.
2554 	 */
2555 
2556 	bzero(mchash, sizeof(mchash));
2557 
2558 	count = 0;
2559 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2560 		if (ifma->ifma_addr->sa_family != AF_LINK)
2561 			continue;
2562 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2563 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2564 
2565 		/* Just want the 6 least significant bits. */
2566 		crc &= 0x3f;
2567 
2568 		/* Set the corresponding bit in the hash table. */
2569 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2570 		count++;
2571 	}
2572 
2573 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2574 	if (count > 0)
2575 		mode |= RM_ReceiveMulticastHash;
2576 	else
2577 		mode &= ~RM_ReceiveMulticastHash;
2578 
2579 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2580 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2581 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2582 }
2583 
2584 static int
2585 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2586 {
2587 	int error, value;
2588 
2589 	if (!arg1)
2590 		return (EINVAL);
2591 	value = *(int *)arg1;
2592 	error = sysctl_handle_int(oidp, &value, 0, req);
2593 	if (error || !req->newptr)
2594 		return (error);
2595 	if (value < low || value > high)
2596 		return (EINVAL);
2597         *(int *)arg1 = value;
2598 
2599         return (0);
2600 }
2601 
2602 static int
2603 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2604 {
2605 	return (sysctl_int_range(oidp, arg1, arg2, req,
2606 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2607 }
2608 
2609 static int
2610 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2611 {
2612 	return (sysctl_int_range(oidp, arg1, arg2, req,
2613 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2614 }
2615