xref: /dragonfly/sys/dev/netif/stge/if_stge.c (revision 36a3d1d6)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 /*	$FreeBSD: src/sys/dev/stge/if_stge.c,v 1.2 2006/08/12 01:21:36 yongari Exp $	*/
3 /*	$DragonFly: src/sys/dev/netif/stge/if_stge.c,v 1.7 2008/08/03 11:00:32 sephe Exp $	*/
4 
5 /*-
6  * Copyright (c) 2001 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Jason R. Thorpe.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 /*
42  * Device driver for the Sundance Tech. TC9021 10/100/1000
43  * Ethernet controller.
44  */
45 
46 #include "opt_polling.h"
47 
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/endian.h>
51 #include <sys/kernel.h>
52 #include <sys/interrupt.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/module.h>
56 #include <sys/rman.h>
57 #include <sys/serialize.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
61 
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/ifq_var.h>
70 #include <net/vlan/if_vlan_var.h>
71 #include <net/vlan/if_vlan_ether.h>
72 
73 #include <dev/netif/mii_layer/mii.h>
74 #include <dev/netif/mii_layer/miivar.h>
75 
76 #include <bus/pci/pcireg.h>
77 #include <bus/pci/pcivar.h>
78 
79 #include "if_stgereg.h"
80 #include "if_stgevar.h"
81 
82 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
83 
84 /* "device miibus" required.  See GENERIC if you get errors here. */
85 #include "miibus_if.h"
86 
87 /*
88  * Devices supported by this driver.
89  */
90 static struct stge_product {
91 	uint16_t	stge_vendorid;
92 	uint16_t	stge_deviceid;
93 	const char	*stge_name;
94 } stge_products[] = {
95 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
96 	  "Sundance ST-1023 Gigabit Ethernet" },
97 
98 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
99 	  "Sundance ST-2021 Gigabit Ethernet" },
100 
101 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
102 	  "Tamarack TC9021 Gigabit Ethernet" },
103 
104 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
105 	  "Tamarack TC9021 Gigabit Ethernet" },
106 
107 	/*
108 	 * The Sundance sample boards use the Sundance vendor ID,
109 	 * but the Tamarack product ID.
110 	 */
111 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
112 	  "Sundance TC9021 Gigabit Ethernet" },
113 
114 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
115 	  "Sundance TC9021 Gigabit Ethernet" },
116 
117 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL2000,
118 	  "D-Link DL-2000 Gigabit Ethernet" },
119 
120 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
121 	  "Antares Gigabit Ethernet" },
122 
123 	{ 0, 0, NULL }
124 };
125 
126 static int	stge_probe(device_t);
127 static int	stge_attach(device_t);
128 static int	stge_detach(device_t);
129 static void	stge_shutdown(device_t);
130 static int	stge_suspend(device_t);
131 static int	stge_resume(device_t);
132 
133 static int	stge_encap(struct stge_softc *, struct mbuf **);
134 static void	stge_start(struct ifnet *);
135 static void	stge_watchdog(struct ifnet *);
136 static int	stge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
137 static void	stge_init(void *);
138 static void	stge_vlan_setup(struct stge_softc *);
139 static void	stge_stop(struct stge_softc *);
140 static void	stge_start_tx(struct stge_softc *);
141 static void	stge_start_rx(struct stge_softc *);
142 static void	stge_stop_tx(struct stge_softc *);
143 static void	stge_stop_rx(struct stge_softc *);
144 
145 static void	stge_reset(struct stge_softc *, uint32_t);
146 static int	stge_eeprom_wait(struct stge_softc *);
147 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
148 static void	stge_tick(void *);
149 static void	stge_stats_update(struct stge_softc *);
150 static void	stge_set_filter(struct stge_softc *);
151 static void	stge_set_multi(struct stge_softc *);
152 
153 static void	stge_link(struct stge_softc *);
154 static void	stge_intr(void *);
155 static __inline int stge_tx_error(struct stge_softc *);
156 static void	stge_txeof(struct stge_softc *);
157 static void	stge_rxeof(struct stge_softc *, int);
158 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
159 static int	stge_newbuf(struct stge_softc *, int, int);
160 #ifndef __i386__
161 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
162 #endif
163 
164 static void	stge_mii_sync(struct stge_softc *);
165 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
166 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
167 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
168 static int	stge_miibus_readreg(device_t, int, int);
169 static int	stge_miibus_writereg(device_t, int, int, int);
170 static void	stge_miibus_statchg(device_t);
171 static int	stge_mediachange(struct ifnet *);
172 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
173 
174 static int	stge_dma_alloc(struct stge_softc *);
175 static void	stge_dma_free(struct stge_softc *);
176 static void	stge_dma_wait(struct stge_softc *);
177 static void	stge_init_tx_ring(struct stge_softc *);
178 static int	stge_init_rx_ring(struct stge_softc *);
179 #ifdef DEVICE_POLLING
180 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
181 #endif
182 
183 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
184 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
185 
186 static device_method_t stge_methods[] = {
187 	/* Device interface */
188 	DEVMETHOD(device_probe,		stge_probe),
189 	DEVMETHOD(device_attach,	stge_attach),
190 	DEVMETHOD(device_detach,	stge_detach),
191 	DEVMETHOD(device_shutdown,	stge_shutdown),
192 	DEVMETHOD(device_suspend,	stge_suspend),
193 	DEVMETHOD(device_resume,	stge_resume),
194 
195 	/* MII interface */
196 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
197 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
198 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
199 
200 	{ 0, 0 }
201 
202 };
203 
204 static driver_t stge_driver = {
205 	"stge",
206 	stge_methods,
207 	sizeof(struct stge_softc)
208 };
209 
210 static devclass_t stge_devclass;
211 
212 DECLARE_DUMMY_MODULE(if_stge);
213 MODULE_DEPEND(if_stge, miibus, 1, 1, 1);
214 DRIVER_MODULE(if_stge, pci, stge_driver, stge_devclass, 0, 0);
215 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
216 
217 #define	MII_SET(x)	\
218 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
219 #define	MII_CLR(x)	\
220 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
221 
222 /*
223  * Sync the PHYs by setting data bit and strobing the clock 32 times.
224  */
225 static void
226 stge_mii_sync(struct stge_softc	*sc)
227 {
228 	int i;
229 
230 	MII_SET(PC_MgmtDir | PC_MgmtData);
231 
232 	for (i = 0; i < 32; i++) {
233 		MII_SET(PC_MgmtClk);
234 		DELAY(1);
235 		MII_CLR(PC_MgmtClk);
236 		DELAY(1);
237 	}
238 }
239 
240 /*
241  * Clock a series of bits through the MII.
242  */
243 static void
244 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
245 {
246 	int i;
247 
248 	MII_CLR(PC_MgmtClk);
249 
250 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
251 		if (bits & i)
252 			MII_SET(PC_MgmtData);
253                 else
254 			MII_CLR(PC_MgmtData);
255 		DELAY(1);
256 		MII_CLR(PC_MgmtClk);
257 		DELAY(1);
258 		MII_SET(PC_MgmtClk);
259 	}
260 }
261 
262 /*
263  * Read an PHY register through the MII.
264  */
265 static int
266 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
267 {
268 	int i, ack;
269 
270 	/*
271 	 * Set up frame for RX.
272 	 */
273 	frame->mii_stdelim = STGE_MII_STARTDELIM;
274 	frame->mii_opcode = STGE_MII_READOP;
275 	frame->mii_turnaround = 0;
276 	frame->mii_data = 0;
277 
278 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
279 	/*
280  	 * Turn on data xmit.
281 	 */
282 	MII_SET(PC_MgmtDir);
283 
284 	stge_mii_sync(sc);
285 
286 	/*
287 	 * Send command/address info.
288 	 */
289 	stge_mii_send(sc, frame->mii_stdelim, 2);
290 	stge_mii_send(sc, frame->mii_opcode, 2);
291 	stge_mii_send(sc, frame->mii_phyaddr, 5);
292 	stge_mii_send(sc, frame->mii_regaddr, 5);
293 
294 	/* Turn off xmit. */
295 	MII_CLR(PC_MgmtDir);
296 
297 	/* Idle bit */
298 	MII_CLR((PC_MgmtClk | PC_MgmtData));
299 	DELAY(1);
300 	MII_SET(PC_MgmtClk);
301 	DELAY(1);
302 
303 	/* Check for ack */
304 	MII_CLR(PC_MgmtClk);
305 	DELAY(1);
306 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
307 	MII_SET(PC_MgmtClk);
308 	DELAY(1);
309 
310 	/*
311 	 * Now try reading data bits. If the ack failed, we still
312 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
313 	 */
314 	if (ack) {
315 		for(i = 0; i < 16; i++) {
316 			MII_CLR(PC_MgmtClk);
317 			DELAY(1);
318 			MII_SET(PC_MgmtClk);
319 			DELAY(1);
320 		}
321 		goto fail;
322 	}
323 
324 	for (i = 0x8000; i; i >>= 1) {
325 		MII_CLR(PC_MgmtClk);
326 		DELAY(1);
327 		if (!ack) {
328 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
329 				frame->mii_data |= i;
330 			DELAY(1);
331 		}
332 		MII_SET(PC_MgmtClk);
333 		DELAY(1);
334 	}
335 
336 fail:
337 	MII_CLR(PC_MgmtClk);
338 	DELAY(1);
339 	MII_SET(PC_MgmtClk);
340 	DELAY(1);
341 
342 	if (ack)
343 		return(1);
344 	return(0);
345 }
346 
347 /*
348  * Write to a PHY register through the MII.
349  */
350 static int
351 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
352 {
353 
354 	/*
355 	 * Set up frame for TX.
356 	 */
357 	frame->mii_stdelim = STGE_MII_STARTDELIM;
358 	frame->mii_opcode = STGE_MII_WRITEOP;
359 	frame->mii_turnaround = STGE_MII_TURNAROUND;
360 
361 	/*
362  	 * Turn on data output.
363 	 */
364 	MII_SET(PC_MgmtDir);
365 
366 	stge_mii_sync(sc);
367 
368 	stge_mii_send(sc, frame->mii_stdelim, 2);
369 	stge_mii_send(sc, frame->mii_opcode, 2);
370 	stge_mii_send(sc, frame->mii_phyaddr, 5);
371 	stge_mii_send(sc, frame->mii_regaddr, 5);
372 	stge_mii_send(sc, frame->mii_turnaround, 2);
373 	stge_mii_send(sc, frame->mii_data, 16);
374 
375 	/* Idle bit. */
376 	MII_SET(PC_MgmtClk);
377 	DELAY(1);
378 	MII_CLR(PC_MgmtClk);
379 	DELAY(1);
380 
381 	/*
382 	 * Turn off xmit.
383 	 */
384 	MII_CLR(PC_MgmtDir);
385 
386 	return(0);
387 }
388 
389 /*
390  * sc_miibus_readreg:	[mii interface function]
391  *
392  *	Read a PHY register on the MII of the TC9021.
393  */
394 static int
395 stge_miibus_readreg(device_t dev, int phy, int reg)
396 {
397 	struct stge_softc *sc;
398 	struct stge_mii_frame frame;
399 	int error;
400 
401 	sc = device_get_softc(dev);
402 
403 	if (reg == STGE_PhyCtrl) {
404 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
405 		error = CSR_READ_1(sc, STGE_PhyCtrl);
406 		return (error);
407 	}
408 	bzero(&frame, sizeof(frame));
409 	frame.mii_phyaddr = phy;
410 	frame.mii_regaddr = reg;
411 
412 	error = stge_mii_readreg(sc, &frame);
413 
414 	if (error != 0) {
415 		/* Don't show errors for PHY probe request */
416 		if (reg != 1)
417 			device_printf(sc->sc_dev, "phy read fail\n");
418 		return (0);
419 	}
420 	return (frame.mii_data);
421 }
422 
423 /*
424  * stge_miibus_writereg:	[mii interface function]
425  *
426  *	Write a PHY register on the MII of the TC9021.
427  */
428 static int
429 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
430 {
431 	struct stge_softc *sc;
432 	struct stge_mii_frame frame;
433 	int error;
434 
435 	sc = device_get_softc(dev);
436 
437 	bzero(&frame, sizeof(frame));
438 	frame.mii_phyaddr = phy;
439 	frame.mii_regaddr = reg;
440 	frame.mii_data = val;
441 
442 	error = stge_mii_writereg(sc, &frame);
443 
444 	if (error != 0)
445 		device_printf(sc->sc_dev, "phy write fail\n");
446 	return (0);
447 }
448 
449 /*
450  * stge_miibus_statchg:	[mii interface function]
451  *
452  *	Callback from MII layer when media changes.
453  */
454 static void
455 stge_miibus_statchg(device_t dev)
456 {
457 	struct stge_softc *sc;
458 	struct mii_data *mii;
459 
460 	sc = device_get_softc(dev);
461 	mii = device_get_softc(sc->sc_miibus);
462 
463 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)
464 		return;
465 
466 	sc->sc_MACCtrl = 0;
467 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
468 		sc->sc_MACCtrl |= MC_DuplexSelect;
469 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
470 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
471 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
472 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
473 
474 	stge_link(sc);
475 }
476 
477 /*
478  * stge_mediastatus:	[ifmedia interface function]
479  *
480  *	Get the current interface media status.
481  */
482 static void
483 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
484 {
485 	struct stge_softc *sc;
486 	struct mii_data *mii;
487 
488 	sc = ifp->if_softc;
489 	mii = device_get_softc(sc->sc_miibus);
490 
491 	mii_pollstat(mii);
492 	ifmr->ifm_status = mii->mii_media_status;
493 	ifmr->ifm_active = mii->mii_media_active;
494 }
495 
496 /*
497  * stge_mediachange:	[ifmedia interface function]
498  *
499  *	Set hardware to newly-selected media.
500  */
501 static int
502 stge_mediachange(struct ifnet *ifp)
503 {
504 	struct stge_softc *sc;
505 	struct mii_data *mii;
506 
507 	sc = ifp->if_softc;
508 	mii = device_get_softc(sc->sc_miibus);
509 	mii_mediachg(mii);
510 
511 	return (0);
512 }
513 
514 static int
515 stge_eeprom_wait(struct stge_softc *sc)
516 {
517 	int i;
518 
519 	for (i = 0; i < STGE_TIMEOUT; i++) {
520 		DELAY(1000);
521 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
522 			return (0);
523 	}
524 	return (1);
525 }
526 
527 /*
528  * stge_read_eeprom:
529  *
530  *	Read data from the serial EEPROM.
531  */
532 static void
533 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
534 {
535 
536 	if (stge_eeprom_wait(sc))
537 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
538 
539 	CSR_WRITE_2(sc, STGE_EepromCtrl,
540 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
541 	if (stge_eeprom_wait(sc))
542 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
543 	*data = CSR_READ_2(sc, STGE_EepromData);
544 }
545 
546 
547 static int
548 stge_probe(device_t dev)
549 {
550 	struct stge_product *sp;
551 	uint16_t vendor, devid;
552 
553 	vendor = pci_get_vendor(dev);
554 	devid = pci_get_device(dev);
555 
556 	for (sp = stge_products; sp->stge_name != NULL; sp++) {
557 		if (vendor == sp->stge_vendorid &&
558 		    devid == sp->stge_deviceid) {
559 			device_set_desc(dev, sp->stge_name);
560 			return (0);
561 		}
562 	}
563 
564 	return (ENXIO);
565 }
566 
567 static int
568 stge_attach(device_t dev)
569 {
570 	struct stge_softc *sc;
571 	struct ifnet *ifp;
572 	uint8_t enaddr[ETHER_ADDR_LEN];
573 	int error, i;
574 	uint16_t cmd;
575 	uint32_t val;
576 
577 	error = 0;
578 	sc = device_get_softc(dev);
579 	sc->sc_dev = dev;
580 	ifp = &sc->arpcom.ac_if;
581 
582 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
583 
584 	callout_init(&sc->sc_tick_ch);
585 
586 #ifndef BURN_BRIDGES
587 	/*
588 	 * Handle power management nonsense.
589 	 */
590 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
591 		uint32_t iobase, membase, irq;
592 
593 		/* Save important PCI config data. */
594 		iobase = pci_read_config(dev, STGE_PCIR_LOIO, 4);
595 		membase = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
596 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
597 
598 		/* Reset the power state. */
599 		device_printf(dev, "chip is in D%d power mode "
600 			      "-- setting to D0\n", pci_get_powerstate(dev));
601 
602 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
603 
604 		/* Restore PCI config data. */
605 		pci_write_config(dev, STGE_PCIR_LOIO, iobase, 4);
606 		pci_write_config(dev, STGE_PCIR_LOMEM, membase, 4);
607 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
608 	}
609 #endif
610 
611 	/*
612 	 * Map the device.
613 	 */
614 	pci_enable_busmaster(dev);
615 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
616 	val = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
617 
618 	if ((val & 0x01) != 0) {
619 		sc->sc_res_rid = STGE_PCIR_LOMEM;
620 		sc->sc_res_type = SYS_RES_MEMORY;
621 	} else {
622 		sc->sc_res_rid = STGE_PCIR_LOIO;
623 		sc->sc_res_type = SYS_RES_IOPORT;
624 
625 		val = pci_read_config(dev, sc->sc_res_rid, 4);
626 		if ((val & 0x01) == 0) {
627 			device_printf(dev, "couldn't locate IO BAR\n");
628 			return ENXIO;
629 		}
630 	}
631 
632 	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
633 					    &sc->sc_res_rid, RF_ACTIVE);
634 	if (sc->sc_res == NULL) {
635 		device_printf(dev, "couldn't allocate resource\n");
636 		return ENXIO;
637 	}
638 	sc->sc_btag = rman_get_bustag(sc->sc_res);
639 	sc->sc_bhandle = rman_get_bushandle(sc->sc_res);
640 
641 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
642 					    &sc->sc_irq_rid,
643 					    RF_ACTIVE | RF_SHAREABLE);
644 	if (sc->sc_irq == NULL) {
645 		device_printf(dev, "couldn't allocate IRQ\n");
646 		error = ENXIO;
647 		goto fail;
648 	}
649 
650 	sc->sc_rev = pci_get_revid(dev);
651 
652 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
653 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
654 
655 	sysctl_ctx_init(&sc->sc_sysctl_ctx);
656 	sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
657 					     SYSCTL_STATIC_CHILDREN(_hw),
658 					     OID_AUTO,
659 					     device_get_nameunit(dev),
660 					     CTLFLAG_RD, 0, "");
661 	if (sc->sc_sysctl_tree == NULL) {
662 		device_printf(dev, "can't add sysctl node\n");
663 		error = ENXIO;
664 		goto fail;
665 	}
666 
667 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
668 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
669 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
670 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
671 
672 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
673 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
674 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
675 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
676 
677 	error = stge_dma_alloc(sc);
678 	if (error != 0)
679 		goto fail;
680 
681 	/*
682 	 * Determine if we're copper or fiber.  It affects how we
683 	 * reset the card.
684 	 */
685 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
686 		sc->sc_usefiber = 1;
687 	else
688 		sc->sc_usefiber = 0;
689 
690 	/* Load LED configuration from EEPROM. */
691 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
692 
693 	/*
694 	 * Reset the chip to a known state.
695 	 */
696 	stge_reset(sc, STGE_RESET_FULL);
697 
698 	/*
699 	 * Reading the station address from the EEPROM doesn't seem
700 	 * to work, at least on my sample boards.  Instead, since
701 	 * the reset sequence does AutoInit, read it from the station
702 	 * address registers. For Sundance 1023 you can only read it
703 	 * from EEPROM.
704 	 */
705 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
706 		uint16_t v;
707 
708 		v = CSR_READ_2(sc, STGE_StationAddress0);
709 		enaddr[0] = v & 0xff;
710 		enaddr[1] = v >> 8;
711 		v = CSR_READ_2(sc, STGE_StationAddress1);
712 		enaddr[2] = v & 0xff;
713 		enaddr[3] = v >> 8;
714 		v = CSR_READ_2(sc, STGE_StationAddress2);
715 		enaddr[4] = v & 0xff;
716 		enaddr[5] = v >> 8;
717 		sc->sc_stge1023 = 0;
718 	} else {
719 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
720 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
721 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
722 			    &myaddr[i]);
723 			myaddr[i] = le16toh(myaddr[i]);
724 		}
725 		bcopy(myaddr, enaddr, sizeof(enaddr));
726 		sc->sc_stge1023 = 1;
727 	}
728 
729 	ifp->if_softc = sc;
730 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
731 	ifp->if_ioctl = stge_ioctl;
732 	ifp->if_start = stge_start;
733 	ifp->if_watchdog = stge_watchdog;
734 	ifp->if_init = stge_init;
735 #ifdef DEVICE_POLLING
736 	ifp->if_poll = stge_poll;
737 #endif
738 	ifp->if_mtu = ETHERMTU;
739 	ifq_set_maxlen(&ifp->if_snd, STGE_TX_RING_CNT - 1);
740 	ifq_set_ready(&ifp->if_snd);
741 	/* Revision B3 and earlier chips have checksum bug. */
742 	if (sc->sc_rev >= 0x0c) {
743 		ifp->if_hwassist = STGE_CSUM_FEATURES;
744 		ifp->if_capabilities = IFCAP_HWCSUM;
745 	} else {
746 		ifp->if_hwassist = 0;
747 		ifp->if_capabilities = 0;
748 	}
749 	ifp->if_capenable = ifp->if_capabilities;
750 
751 	/*
752 	 * Read some important bits from the PhyCtrl register.
753 	 */
754 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
755 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
756 
757 	/* Set up MII bus. */
758 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
759 	    stge_mediastatus)) != 0) {
760 		device_printf(sc->sc_dev, "no PHY found!\n");
761 		goto fail;
762 	}
763 
764 	ether_ifattach(ifp, enaddr, NULL);
765 
766 	/* VLAN capability setup */
767 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
768 #ifdef notyet
769 	if (sc->sc_rev >= 0x0c)
770 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
771 #endif
772 	ifp->if_capenable = ifp->if_capabilities;
773 
774 	/*
775 	 * Tell the upper layer(s) we support long frames.
776 	 * Must appear after the call to ether_ifattach() because
777 	 * ether_ifattach() sets ifi_hdrlen to the default value.
778 	 */
779 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
780 
781 	/*
782 	 * The manual recommends disabling early transmit, so we
783 	 * do.  It's disabled anyway, if using IP checksumming,
784 	 * since the entire packet must be in the FIFO in order
785 	 * for the chip to perform the checksum.
786 	 */
787 	sc->sc_txthresh = 0x0fff;
788 
789 	/*
790 	 * Disable MWI if the PCI layer tells us to.
791 	 */
792 	sc->sc_DMACtrl = 0;
793 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
794 		sc->sc_DMACtrl |= DMAC_MWIDisable;
795 
796 	/*
797 	 * Hookup IRQ
798 	 */
799 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, stge_intr, sc,
800 			       &sc->sc_ih, ifp->if_serializer);
801 	if (error != 0) {
802 		ether_ifdetach(ifp);
803 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
804 		goto fail;
805 	}
806 
807 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq));
808 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
809 
810 fail:
811 	if (error != 0)
812 		stge_detach(dev);
813 
814 	return (error);
815 }
816 
817 static int
818 stge_detach(device_t dev)
819 {
820 	struct stge_softc *sc = device_get_softc(dev);
821 	struct ifnet *ifp = &sc->arpcom.ac_if;
822 
823 	if (device_is_attached(dev)) {
824 		lwkt_serialize_enter(ifp->if_serializer);
825 		/* XXX */
826 		sc->sc_detach = 1;
827 		stge_stop(sc);
828 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
829 		lwkt_serialize_exit(ifp->if_serializer);
830 
831 		ether_ifdetach(ifp);
832 	}
833 
834 	if (sc->sc_sysctl_tree != NULL)
835 		sysctl_ctx_free(&sc->sc_sysctl_ctx);
836 
837 	if (sc->sc_miibus != NULL)
838 		device_delete_child(dev, sc->sc_miibus);
839 	bus_generic_detach(dev);
840 
841 	stge_dma_free(sc);
842 
843 	if (sc->sc_irq != NULL) {
844 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
845 				     sc->sc_irq);
846 	}
847 	if (sc->sc_res != NULL) {
848 		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_rid,
849 				     sc->sc_res);
850 	}
851 
852 	return (0);
853 }
854 
855 static int
856 stge_dma_alloc(struct stge_softc *sc)
857 {
858 	struct stge_txdesc *txd;
859 	struct stge_rxdesc *rxd;
860 	int error, i;
861 
862 	/* create parent tag. */
863 	error = bus_dma_tag_create(NULL,	/* parent */
864 		    1, 0,			/* algnmnt, boundary */
865 		    STGE_DMA_MAXADDR,		/* lowaddr */
866 		    BUS_SPACE_MAXADDR,		/* highaddr */
867 		    NULL, NULL,			/* filter, filterarg */
868 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
869 		    0,				/* nsegments */
870 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
871 		    0,				/* flags */
872 		    &sc->sc_cdata.stge_parent_tag);
873 	if (error != 0) {
874 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
875 		return error;
876 	}
877 
878 	/* allocate Tx ring. */
879 	sc->sc_rdata.stge_tx_ring =
880 		bus_dmamem_coherent_any(sc->sc_cdata.stge_parent_tag,
881 			STGE_RING_ALIGN, STGE_TX_RING_SZ,
882 			BUS_DMA_WAITOK | BUS_DMA_ZERO,
883 			&sc->sc_cdata.stge_tx_ring_tag,
884 			&sc->sc_cdata.stge_tx_ring_map,
885 			&sc->sc_rdata.stge_tx_ring_paddr);
886 	if (sc->sc_rdata.stge_tx_ring == NULL) {
887 		device_printf(sc->sc_dev,
888 		    "failed to allocate Tx ring\n");
889 		return ENOMEM;
890 	}
891 
892 	/* allocate Rx ring. */
893 	sc->sc_rdata.stge_rx_ring =
894 		bus_dmamem_coherent_any(sc->sc_cdata.stge_parent_tag,
895 			STGE_RING_ALIGN, STGE_RX_RING_SZ,
896 			BUS_DMA_WAITOK | BUS_DMA_ZERO,
897 			&sc->sc_cdata.stge_rx_ring_tag,
898 			&sc->sc_cdata.stge_rx_ring_map,
899 			&sc->sc_rdata.stge_rx_ring_paddr);
900 	if (sc->sc_rdata.stge_rx_ring == NULL) {
901 		device_printf(sc->sc_dev,
902 		    "failed to allocate Rx ring\n");
903 		return ENOMEM;
904 	}
905 
906 	/* create tag for Tx buffers. */
907 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
908 		    1, 0,			/* algnmnt, boundary */
909 		    BUS_SPACE_MAXADDR,		/* lowaddr */
910 		    BUS_SPACE_MAXADDR,		/* highaddr */
911 		    NULL, NULL,			/* filter, filterarg */
912 		    STGE_JUMBO_FRAMELEN,	/* maxsize */
913 		    STGE_MAXTXSEGS,		/* nsegments */
914 		    STGE_MAXSGSIZE,		/* maxsegsize */
915 		    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,/* flags */
916 		    &sc->sc_cdata.stge_tx_tag);
917 	if (error != 0) {
918 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
919 		return error;
920 	}
921 
922 	/* create DMA maps for Tx buffers. */
923 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
924 		txd = &sc->sc_cdata.stge_txdesc[i];
925 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag,
926 				BUS_DMA_WAITOK, &txd->tx_dmamap);
927 		if (error != 0) {
928 			int j;
929 
930 			for (j = 0; j < i; ++j) {
931 				txd = &sc->sc_cdata.stge_txdesc[j];
932 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
933 					txd->tx_dmamap);
934 			}
935 			bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
936 			sc->sc_cdata.stge_tx_tag = NULL;
937 
938 			device_printf(sc->sc_dev,
939 			    "failed to create Tx dmamap\n");
940 			return error;
941 		}
942 	}
943 
944 	/* create tag for Rx buffers. */
945 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
946 		    1, 0,			/* algnmnt, boundary */
947 		    BUS_SPACE_MAXADDR,		/* lowaddr */
948 		    BUS_SPACE_MAXADDR,		/* highaddr */
949 		    NULL, NULL,			/* filter, filterarg */
950 		    MCLBYTES,			/* maxsize */
951 		    1,				/* nsegments */
952 		    MCLBYTES,			/* maxsegsize */
953 		    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,/* flags */
954 		    &sc->sc_cdata.stge_rx_tag);
955 	if (error != 0) {
956 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
957 		return error;
958 	}
959 
960 	/* create DMA maps for Rx buffers. */
961 	error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, BUS_DMA_WAITOK,
962 			&sc->sc_cdata.stge_rx_sparemap);
963 	if (error != 0) {
964 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
965 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
966 		sc->sc_cdata.stge_rx_tag = NULL;
967 		return error;
968 	}
969 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
970 		rxd = &sc->sc_cdata.stge_rxdesc[i];
971 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag,
972 				BUS_DMA_WAITOK, &rxd->rx_dmamap);
973 		if (error != 0) {
974 			int j;
975 
976 			for (j = 0; j < i; ++j) {
977 				rxd = &sc->sc_cdata.stge_rxdesc[j];
978 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
979 					rxd->rx_dmamap);
980 			}
981 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
982 				sc->sc_cdata.stge_rx_sparemap);
983 			bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
984 			sc->sc_cdata.stge_rx_tag = NULL;
985 
986 			device_printf(sc->sc_dev,
987 			    "failed to create Rx dmamap\n");
988 			return error;
989 		}
990 	}
991 	return 0;
992 }
993 
994 static void
995 stge_dma_free(struct stge_softc *sc)
996 {
997 	struct stge_txdesc *txd;
998 	struct stge_rxdesc *rxd;
999 	int i;
1000 
1001 	/* Tx ring */
1002 	if (sc->sc_cdata.stge_tx_ring_tag) {
1003 		bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1004 		    sc->sc_cdata.stge_tx_ring_map);
1005 		bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1006 		    sc->sc_rdata.stge_tx_ring,
1007 		    sc->sc_cdata.stge_tx_ring_map);
1008 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1009 	}
1010 
1011 	/* Rx ring */
1012 	if (sc->sc_cdata.stge_rx_ring_tag) {
1013 		bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1014 		    sc->sc_cdata.stge_rx_ring_map);
1015 		bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1016 		    sc->sc_rdata.stge_rx_ring,
1017 		    sc->sc_cdata.stge_rx_ring_map);
1018 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1019 	}
1020 
1021 	/* Tx buffers */
1022 	if (sc->sc_cdata.stge_tx_tag) {
1023 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1024 			txd = &sc->sc_cdata.stge_txdesc[i];
1025 			bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1026 			    txd->tx_dmamap);
1027 		}
1028 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1029 	}
1030 
1031 	/* Rx buffers */
1032 	if (sc->sc_cdata.stge_rx_tag) {
1033 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1034 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1035 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1036 			    rxd->rx_dmamap);
1037 		}
1038 		bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1039 		    sc->sc_cdata.stge_rx_sparemap);
1040 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1041 	}
1042 
1043 	/* Top level tag */
1044 	if (sc->sc_cdata.stge_parent_tag)
1045 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1046 }
1047 
1048 /*
1049  * stge_shutdown:
1050  *
1051  *	Make sure the interface is stopped at reboot time.
1052  */
1053 static void
1054 stge_shutdown(device_t dev)
1055 {
1056 	struct stge_softc *sc = device_get_softc(dev);
1057 	struct ifnet *ifp = &sc->arpcom.ac_if;
1058 
1059 	lwkt_serialize_enter(ifp->if_serializer);
1060 	stge_stop(sc);
1061 	lwkt_serialize_exit(ifp->if_serializer);
1062 }
1063 
1064 static int
1065 stge_suspend(device_t dev)
1066 {
1067 	struct stge_softc *sc = device_get_softc(dev);
1068 	struct ifnet *ifp = &sc->arpcom.ac_if;
1069 
1070 	lwkt_serialize_enter(ifp->if_serializer);
1071 	stge_stop(sc);
1072 	sc->sc_suspended = 1;
1073 	lwkt_serialize_exit(ifp->if_serializer);
1074 
1075 	return (0);
1076 }
1077 
1078 static int
1079 stge_resume(device_t dev)
1080 {
1081 	struct stge_softc *sc = device_get_softc(dev);
1082 	struct ifnet *ifp = &sc->arpcom.ac_if;
1083 
1084 	lwkt_serialize_enter(ifp->if_serializer);
1085 	if (ifp->if_flags & IFF_UP)
1086 		stge_init(sc);
1087 	sc->sc_suspended = 0;
1088 	lwkt_serialize_exit(ifp->if_serializer);
1089 
1090 	return (0);
1091 }
1092 
1093 static void
1094 stge_dma_wait(struct stge_softc *sc)
1095 {
1096 	int i;
1097 
1098 	for (i = 0; i < STGE_TIMEOUT; i++) {
1099 		DELAY(2);
1100 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1101 			break;
1102 	}
1103 
1104 	if (i == STGE_TIMEOUT)
1105 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1106 }
1107 
1108 static int
1109 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1110 {
1111 	struct stge_txdesc *txd;
1112 	struct stge_tfd *tfd;
1113 	struct mbuf *m;
1114 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1115 	int error, i, si, nsegs;
1116 	uint64_t csum_flags, tfc;
1117 
1118 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq);
1119 	KKASSERT(txd != NULL);
1120 
1121 	error =  bus_dmamap_load_mbuf_defrag(sc->sc_cdata.stge_tx_tag,
1122 			txd->tx_dmamap, m_head,
1123 			txsegs, STGE_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT);
1124 	if (error) {
1125 		m_freem(*m_head);
1126 		*m_head = NULL;
1127 		return (error);
1128 	}
1129 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1130 	    BUS_DMASYNC_PREWRITE);
1131 
1132 	m = *m_head;
1133 
1134 	csum_flags = 0;
1135 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1136 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1137 			csum_flags |= TFD_IPChecksumEnable;
1138 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1139 			csum_flags |= TFD_TCPChecksumEnable;
1140 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1141 			csum_flags |= TFD_UDPChecksumEnable;
1142 	}
1143 
1144 	si = sc->sc_cdata.stge_tx_prod;
1145 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1146 	for (i = 0; i < nsegs; i++) {
1147 		tfd->tfd_frags[i].frag_word0 =
1148 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1149 		    FRAG_LEN(txsegs[i].ds_len));
1150 	}
1151 	sc->sc_cdata.stge_tx_cnt++;
1152 
1153 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1154 	    TFD_FragCount(nsegs) | csum_flags;
1155 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1156 		tfc |= TFD_TxDMAIndicate;
1157 
1158 	/* Update producer index. */
1159 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1160 
1161 	/* Check if we have a VLAN tag to insert. */
1162 	if (m->m_flags & M_VLANTAG)
1163 		tfc |= TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vlantag);
1164 	tfd->tfd_control = htole64(tfc);
1165 
1166 	/* Update Tx Queue. */
1167 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1168 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1169 	txd->tx_m = m;
1170 
1171 	return (0);
1172 }
1173 
1174 /*
1175  * stge_start:		[ifnet interface function]
1176  *
1177  *	Start packet transmission on the interface.
1178  */
1179 static void
1180 stge_start(struct ifnet *ifp)
1181 {
1182 	struct stge_softc *sc;
1183 	struct mbuf *m_head;
1184 	int enq;
1185 
1186 	sc = ifp->if_softc;
1187 
1188 	ASSERT_SERIALIZED(ifp->if_serializer);
1189 
1190 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
1191 	    IFF_RUNNING)
1192 		return;
1193 
1194 	enq = 0;
1195 	while (!ifq_is_empty(&ifp->if_snd)) {
1196 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1197 			ifp->if_flags |= IFF_OACTIVE;
1198 			break;
1199 		}
1200 
1201 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1202 		if (m_head == NULL)
1203 			break;
1204 
1205 		/*
1206 		 * Pack the data into the transmit ring. If we
1207 		 * don't have room, set the OACTIVE flag and wait
1208 		 * for the NIC to drain the ring.
1209 		 */
1210 		if (stge_encap(sc, &m_head)) {
1211 			if (sc->sc_cdata.stge_tx_cnt == 0) {
1212 				continue;
1213 			} else {
1214 				ifp->if_flags |= IFF_OACTIVE;
1215 				break;
1216 			}
1217 		}
1218 		enq = 1;
1219 
1220 		/*
1221 		 * If there's a BPF listener, bounce a copy of this frame
1222 		 * to him.
1223 		 */
1224 		ETHER_BPF_MTAP(ifp, m_head);
1225 	}
1226 
1227 	if (enq) {
1228 		/* Transmit */
1229 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1230 
1231 		/* Set a timeout in case the chip goes out to lunch. */
1232 		ifp->if_timer = 5;
1233 	}
1234 }
1235 
1236 /*
1237  * stge_watchdog:	[ifnet interface function]
1238  *
1239  *	Watchdog timer handler.
1240  */
1241 static void
1242 stge_watchdog(struct ifnet *ifp)
1243 {
1244 	ASSERT_SERIALIZED(ifp->if_serializer);
1245 
1246 	if_printf(ifp, "device timeout\n");
1247 	ifp->if_oerrors++;
1248 	stge_init(ifp->if_softc);
1249 }
1250 
1251 /*
1252  * stge_ioctl:		[ifnet interface function]
1253  *
1254  *	Handle control requests from the operator.
1255  */
1256 static int
1257 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1258 {
1259 	struct stge_softc *sc;
1260 	struct ifreq *ifr;
1261 	struct mii_data *mii;
1262 	int error, mask;
1263 
1264 	ASSERT_SERIALIZED(ifp->if_serializer);
1265 
1266 	sc = ifp->if_softc;
1267 	ifr = (struct ifreq *)data;
1268 	error = 0;
1269 	switch (cmd) {
1270 	case SIOCSIFMTU:
1271 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1272 			error = EINVAL;
1273 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1274 			ifp->if_mtu = ifr->ifr_mtu;
1275 			stge_init(sc);
1276 		}
1277 		break;
1278 	case SIOCSIFFLAGS:
1279 		if ((ifp->if_flags & IFF_UP) != 0) {
1280 			if ((ifp->if_flags & IFF_RUNNING) != 0) {
1281 				if (((ifp->if_flags ^ sc->sc_if_flags)
1282 				    & IFF_PROMISC) != 0)
1283 					stge_set_filter(sc);
1284 			} else {
1285 				if (sc->sc_detach == 0)
1286 					stge_init(sc);
1287 			}
1288 		} else {
1289 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1290 				stge_stop(sc);
1291 		}
1292 		sc->sc_if_flags = ifp->if_flags;
1293 		break;
1294 	case SIOCADDMULTI:
1295 	case SIOCDELMULTI:
1296 		if ((ifp->if_flags & IFF_RUNNING) != 0)
1297 			stge_set_multi(sc);
1298 		break;
1299 	case SIOCSIFMEDIA:
1300 	case SIOCGIFMEDIA:
1301 		mii = device_get_softc(sc->sc_miibus);
1302 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1303 		break;
1304 	case SIOCSIFCAP:
1305 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1306 		if ((mask & IFCAP_HWCSUM) != 0) {
1307 			ifp->if_capenable ^= IFCAP_HWCSUM;
1308 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1309 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1310 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1311 			else
1312 				ifp->if_hwassist = 0;
1313 		}
1314 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1315 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1316 			if (ifp->if_flags & IFF_RUNNING)
1317 				stge_vlan_setup(sc);
1318 		}
1319 #if 0
1320 		VLAN_CAPABILITIES(ifp);
1321 #endif
1322 		break;
1323 	default:
1324 		error = ether_ioctl(ifp, cmd, data);
1325 		break;
1326 	}
1327 
1328 	return (error);
1329 }
1330 
1331 static void
1332 stge_link(struct stge_softc *sc)
1333 {
1334 	uint32_t v, ac;
1335 	int i;
1336 
1337 	/*
1338 	 * Update STGE_MACCtrl register depending on link status.
1339 	 * (duplex, flow control etc)
1340 	 */
1341 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1342 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1343 	v |= sc->sc_MACCtrl;
1344 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1345 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1346 		/* Duplex setting changed, reset Tx/Rx functions. */
1347 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1348 		ac |= AC_TxReset | AC_RxReset;
1349 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1350 		for (i = 0; i < STGE_TIMEOUT; i++) {
1351 			DELAY(100);
1352 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1353 				break;
1354 		}
1355 		if (i == STGE_TIMEOUT)
1356 			device_printf(sc->sc_dev, "reset failed to complete\n");
1357 	}
1358 }
1359 
1360 static __inline int
1361 stge_tx_error(struct stge_softc *sc)
1362 {
1363 	uint32_t txstat;
1364 	int error;
1365 
1366 	for (error = 0;;) {
1367 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1368 		if ((txstat & TS_TxComplete) == 0)
1369 			break;
1370 		/* Tx underrun */
1371 		if ((txstat & TS_TxUnderrun) != 0) {
1372 			/*
1373 			 * XXX
1374 			 * There should be a more better way to recover
1375 			 * from Tx underrun instead of a full reset.
1376 			 */
1377 			if (sc->sc_nerr++ < STGE_MAXERR)
1378 				device_printf(sc->sc_dev, "Tx underrun, "
1379 				    "resetting...\n");
1380 			if (sc->sc_nerr == STGE_MAXERR)
1381 				device_printf(sc->sc_dev, "too many errors; "
1382 				    "not reporting any more\n");
1383 			error = -1;
1384 			break;
1385 		}
1386 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1387 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1388 			CSR_WRITE_4(sc, STGE_MACCtrl,
1389 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1390 			    MC_TxEnable);
1391 	}
1392 
1393 	return (error);
1394 }
1395 
1396 /*
1397  * stge_intr:
1398  *
1399  *	Interrupt service routine.
1400  */
1401 static void
1402 stge_intr(void *arg)
1403 {
1404 	struct stge_softc *sc = arg;
1405 	struct ifnet *ifp = &sc->arpcom.ac_if;
1406 	int reinit;
1407 	uint16_t status;
1408 
1409 	ASSERT_SERIALIZED(ifp->if_serializer);
1410 
1411 	status = CSR_READ_2(sc, STGE_IntStatus);
1412 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1413 		return;
1414 
1415 	/* Disable interrupts. */
1416 	for (reinit = 0;;) {
1417 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1418 		status &= sc->sc_IntEnable;
1419 		if (status == 0)
1420 			break;
1421 		/* Host interface errors. */
1422 		if ((status & IS_HostError) != 0) {
1423 			device_printf(sc->sc_dev,
1424 			    "Host interface error, resetting...\n");
1425 			reinit = 1;
1426 			goto force_init;
1427 		}
1428 
1429 		/* Receive interrupts. */
1430 		if ((status & IS_RxDMAComplete) != 0) {
1431 			stge_rxeof(sc, -1);
1432 			if ((status & IS_RFDListEnd) != 0)
1433 				CSR_WRITE_4(sc, STGE_DMACtrl,
1434 				    DMAC_RxDMAPollNow);
1435 		}
1436 
1437 		/* Transmit interrupts. */
1438 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1439 			stge_txeof(sc);
1440 
1441 		/* Transmission errors.*/
1442 		if ((status & IS_TxComplete) != 0) {
1443 			if ((reinit = stge_tx_error(sc)) != 0)
1444 				break;
1445 		}
1446 	}
1447 
1448 force_init:
1449 	if (reinit != 0)
1450 		stge_init(sc);
1451 
1452 	/* Re-enable interrupts. */
1453 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1454 
1455 	/* Try to get more packets going. */
1456 	if (!ifq_is_empty(&ifp->if_snd))
1457 		if_devstart(ifp);
1458 }
1459 
1460 /*
1461  * stge_txeof:
1462  *
1463  *	Helper; handle transmit interrupts.
1464  */
1465 static void
1466 stge_txeof(struct stge_softc *sc)
1467 {
1468 	struct ifnet *ifp = &sc->arpcom.ac_if;
1469 	struct stge_txdesc *txd;
1470 	uint64_t control;
1471 	int cons;
1472 
1473 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1474 	if (txd == NULL)
1475 		return;
1476 
1477 	/*
1478 	 * Go through our Tx list and free mbufs for those
1479 	 * frames which have been transmitted.
1480 	 */
1481 	for (cons = sc->sc_cdata.stge_tx_cons;;
1482 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1483 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1484 			break;
1485 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1486 		if ((control & TFD_TFDDone) == 0)
1487 			break;
1488 		sc->sc_cdata.stge_tx_cnt--;
1489 
1490 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1491 
1492 		/* Output counter is updated with statistics register */
1493 		m_freem(txd->tx_m);
1494 		txd->tx_m = NULL;
1495 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1496 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1497 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1498 	}
1499 	sc->sc_cdata.stge_tx_cons = cons;
1500 
1501 	if (sc->sc_cdata.stge_tx_cnt < STGE_TX_HIWAT)
1502 		ifp->if_flags &= ~IFF_OACTIVE;
1503 	if (sc->sc_cdata.stge_tx_cnt == 0)
1504 		ifp->if_timer = 0;
1505 }
1506 
1507 static __inline void
1508 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1509 {
1510 	struct stge_rfd *rfd;
1511 
1512 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1513 	rfd->rfd_status = 0;
1514 }
1515 
1516 #ifndef __i386__
1517 /*
1518  * It seems that TC9021's DMA engine has alignment restrictions in
1519  * DMA scatter operations. The first DMA segment has no address
1520  * alignment restrictins but the rest should be aligned on 4(?) bytes
1521  * boundary. Otherwise it would corrupt random memory. Since we don't
1522  * know which one is used for the first segment in advance we simply
1523  * don't align at all.
1524  * To avoid copying over an entire frame to align, we allocate a new
1525  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1526  * prepended into the existing mbuf chain.
1527  */
1528 static __inline struct mbuf *
1529 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1530 {
1531 	struct mbuf *n;
1532 
1533 	n = NULL;
1534 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1535 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1536 		m->m_data += ETHER_HDR_LEN;
1537 		n = m;
1538 	} else {
1539 		MGETHDR(n, MB_DONTWAIT, MT_DATA);
1540 		if (n != NULL) {
1541 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1542 			m->m_data += ETHER_HDR_LEN;
1543 			m->m_len -= ETHER_HDR_LEN;
1544 			n->m_len = ETHER_HDR_LEN;
1545 			M_MOVE_PKTHDR(n, m);
1546 			n->m_next = m;
1547 		} else
1548 			m_freem(m);
1549 	}
1550 
1551 	return (n);
1552 }
1553 #endif
1554 
1555 /*
1556  * stge_rxeof:
1557  *
1558  *	Helper; handle receive interrupts.
1559  */
1560 static void
1561 stge_rxeof(struct stge_softc *sc, int count)
1562 {
1563 	struct ifnet *ifp = &sc->arpcom.ac_if;
1564 	struct stge_rxdesc *rxd;
1565 	struct mbuf *mp, *m;
1566 	uint64_t status64;
1567 	uint32_t status;
1568 	int cons, prog;
1569 
1570 	prog = 0;
1571 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1572 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1573 #ifdef DEVICE_POLLING
1574 		if (count >= 0 && count-- == 0)
1575 			break;
1576 #endif
1577 
1578 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1579 		status = RFD_RxStatus(status64);
1580 		if ((status & RFD_RFDDone) == 0)
1581 			break;
1582 
1583 		prog++;
1584 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1585 		mp = rxd->rx_m;
1586 
1587 		/*
1588 		 * If the packet had an error, drop it.  Note we count
1589 		 * the error later in the periodic stats update.
1590 		 */
1591 		if ((status & RFD_FrameEnd) != 0 && (status &
1592 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1593 		    RFD_RxAlignmentError | RFD_RxFCSError |
1594 		    RFD_RxLengthError)) != 0) {
1595 			stge_discard_rxbuf(sc, cons);
1596 			if (sc->sc_cdata.stge_rxhead != NULL) {
1597 				m_freem(sc->sc_cdata.stge_rxhead);
1598 				STGE_RXCHAIN_RESET(sc);
1599 			}
1600 			continue;
1601 		}
1602 		/*
1603 		 * Add a new receive buffer to the ring.
1604 		 */
1605 		if (stge_newbuf(sc, cons, 0) != 0) {
1606 			ifp->if_iqdrops++;
1607 			stge_discard_rxbuf(sc, cons);
1608 			if (sc->sc_cdata.stge_rxhead != NULL) {
1609 				m_freem(sc->sc_cdata.stge_rxhead);
1610 				STGE_RXCHAIN_RESET(sc);
1611 			}
1612 			continue;
1613 		}
1614 
1615 		if ((status & RFD_FrameEnd) != 0)
1616 			mp->m_len = RFD_RxDMAFrameLen(status) -
1617 			    sc->sc_cdata.stge_rxlen;
1618 		sc->sc_cdata.stge_rxlen += mp->m_len;
1619 
1620 		/* Chain mbufs. */
1621 		if (sc->sc_cdata.stge_rxhead == NULL) {
1622 			sc->sc_cdata.stge_rxhead = mp;
1623 			sc->sc_cdata.stge_rxtail = mp;
1624 		} else {
1625 			mp->m_flags &= ~M_PKTHDR;
1626 			sc->sc_cdata.stge_rxtail->m_next = mp;
1627 			sc->sc_cdata.stge_rxtail = mp;
1628 		}
1629 
1630 		if ((status & RFD_FrameEnd) != 0) {
1631 			m = sc->sc_cdata.stge_rxhead;
1632 			m->m_pkthdr.rcvif = ifp;
1633 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1634 
1635 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1636 				m_freem(m);
1637 				STGE_RXCHAIN_RESET(sc);
1638 				continue;
1639 			}
1640 			/*
1641 			 * Set the incoming checksum information for
1642 			 * the packet.
1643 			 */
1644 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1645 				if ((status & RFD_IPDetected) != 0) {
1646 					m->m_pkthdr.csum_flags |=
1647 						CSUM_IP_CHECKED;
1648 					if ((status & RFD_IPError) == 0)
1649 						m->m_pkthdr.csum_flags |=
1650 						    CSUM_IP_VALID;
1651 				}
1652 				if (((status & RFD_TCPDetected) != 0 &&
1653 				    (status & RFD_TCPError) == 0) ||
1654 				    ((status & RFD_UDPDetected) != 0 &&
1655 				    (status & RFD_UDPError) == 0)) {
1656 					m->m_pkthdr.csum_flags |=
1657 					    (CSUM_DATA_VALID |
1658 					     CSUM_PSEUDO_HDR |
1659 					     CSUM_FRAG_NOT_CHECKED);
1660 					m->m_pkthdr.csum_data = 0xffff;
1661 				}
1662 			}
1663 
1664 #ifndef __i386__
1665 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1666 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1667 					STGE_RXCHAIN_RESET(sc);
1668 					continue;
1669 				}
1670 			}
1671 #endif
1672 
1673 			/* Check for VLAN tagged packets. */
1674 			if ((status & RFD_VLANDetected) != 0 &&
1675 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1676 				m->m_flags |= M_VLANTAG;
1677 				m->m_pkthdr.ether_vlantag = RFD_TCI(status64);
1678 			}
1679 			/* Pass it on. */
1680 			ifp->if_input(ifp, m);
1681 
1682 			STGE_RXCHAIN_RESET(sc);
1683 		}
1684 	}
1685 
1686 	if (prog > 0) {
1687 		/* Update the consumer index. */
1688 		sc->sc_cdata.stge_rx_cons = cons;
1689 	}
1690 }
1691 
1692 #ifdef DEVICE_POLLING
1693 static void
1694 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1695 {
1696 	struct stge_softc *sc;
1697 	uint16_t status;
1698 
1699 	sc = ifp->if_softc;
1700 
1701 	switch (cmd) {
1702 	case POLL_REGISTER:
1703 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1704 		break;
1705 	case POLL_DEREGISTER:
1706 		CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1707 		break;
1708 	case POLL_ONLY:
1709 	case POLL_AND_CHECK_STATUS:
1710 		sc->sc_cdata.stge_rxcycles = count;
1711 		stge_rxeof(sc, count);
1712 		stge_txeof(sc);
1713 
1714 		if (cmd == POLL_AND_CHECK_STATUS) {
1715 			status = CSR_READ_2(sc, STGE_IntStatus);
1716 			status &= sc->sc_IntEnable;
1717 			if (status != 0) {
1718 				if (status & IS_HostError) {
1719 					device_printf(sc->sc_dev,
1720 					"Host interface error, "
1721 					"resetting...\n");
1722 					stge_init(sc);
1723 				}
1724 				if ((status & IS_TxComplete) != 0 &&
1725 				    stge_tx_error(sc) != 0)
1726 					stge_init(sc);
1727 			}
1728 
1729 		}
1730 
1731 		if (!ifq_is_empty(&ifp->if_snd))
1732 			if_devstart(ifp);
1733 	}
1734 }
1735 #endif	/* DEVICE_POLLING */
1736 
1737 /*
1738  * stge_tick:
1739  *
1740  *	One second timer, used to tick the MII.
1741  */
1742 static void
1743 stge_tick(void *arg)
1744 {
1745 	struct stge_softc *sc = arg;
1746 	struct ifnet *ifp = &sc->arpcom.ac_if;
1747 	struct mii_data *mii;
1748 
1749 	lwkt_serialize_enter(ifp->if_serializer);
1750 
1751 	mii = device_get_softc(sc->sc_miibus);
1752 	mii_tick(mii);
1753 
1754 	/* Update statistics counters. */
1755 	stge_stats_update(sc);
1756 
1757 	/*
1758 	 * Relcaim any pending Tx descriptors to release mbufs in a
1759 	 * timely manner as we don't generate Tx completion interrupts
1760 	 * for every frame. This limits the delay to a maximum of one
1761 	 * second.
1762 	 */
1763 	if (sc->sc_cdata.stge_tx_cnt != 0)
1764 		stge_txeof(sc);
1765 
1766 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1767 
1768 	lwkt_serialize_exit(ifp->if_serializer);
1769 }
1770 
1771 /*
1772  * stge_stats_update:
1773  *
1774  *	Read the TC9021 statistics counters.
1775  */
1776 static void
1777 stge_stats_update(struct stge_softc *sc)
1778 {
1779 	struct ifnet *ifp = &sc->arpcom.ac_if;
1780 
1781 	CSR_READ_4(sc,STGE_OctetRcvOk);
1782 
1783 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1784 
1785 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1786 
1787 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1788 
1789 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1790 
1791 	ifp->if_collisions +=
1792 	    CSR_READ_4(sc, STGE_LateCollisions) +
1793 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1794 	    CSR_READ_4(sc, STGE_SingleColFrames);
1795 
1796 	ifp->if_oerrors +=
1797 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1798 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1799 }
1800 
1801 /*
1802  * stge_reset:
1803  *
1804  *	Perform a soft reset on the TC9021.
1805  */
1806 static void
1807 stge_reset(struct stge_softc *sc, uint32_t how)
1808 {
1809 	uint32_t ac;
1810 	uint8_t v;
1811 	int i, dv;
1812 
1813 	dv = 5000;
1814 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1815 	switch (how) {
1816 	case STGE_RESET_TX:
1817 		ac |= AC_TxReset | AC_FIFO;
1818 		dv = 100;
1819 		break;
1820 	case STGE_RESET_RX:
1821 		ac |= AC_RxReset | AC_FIFO;
1822 		dv = 100;
1823 		break;
1824 	case STGE_RESET_FULL:
1825 	default:
1826 		/*
1827 		 * Only assert RstOut if we're fiber.  We need GMII clocks
1828 		 * to be present in order for the reset to complete on fiber
1829 		 * cards.
1830 		 */
1831 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1832 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1833 		    (sc->sc_usefiber ? AC_RstOut : 0);
1834 		break;
1835 	}
1836 
1837 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1838 
1839 	/* Account for reset problem at 10Mbps. */
1840 	DELAY(dv);
1841 
1842 	for (i = 0; i < STGE_TIMEOUT; i++) {
1843 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1844 			break;
1845 		DELAY(dv);
1846 	}
1847 
1848 	if (i == STGE_TIMEOUT)
1849 		device_printf(sc->sc_dev, "reset failed to complete\n");
1850 
1851 	/* Set LED, from Linux IPG driver. */
1852 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1853 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1854 	if ((sc->sc_led & 0x01) != 0)
1855 		ac |= AC_LEDMode;
1856 	if ((sc->sc_led & 0x03) != 0)
1857 		ac |= AC_LEDModeBit1;
1858 	if ((sc->sc_led & 0x08) != 0)
1859 		ac |= AC_LEDSpeed;
1860 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1861 
1862 	/* Set PHY, from Linux IPG driver */
1863 	v = CSR_READ_1(sc, STGE_PhySet);
1864 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1865 	v |= ((sc->sc_led & 0x70) >> 4);
1866 	CSR_WRITE_1(sc, STGE_PhySet, v);
1867 }
1868 
1869 /*
1870  * stge_init:		[ ifnet interface function ]
1871  *
1872  *	Initialize the interface.
1873  */
1874 static void
1875 stge_init(void *xsc)
1876 {
1877 	struct stge_softc *sc = xsc;
1878 	struct ifnet *ifp = &sc->arpcom.ac_if;
1879 	struct mii_data *mii;
1880 	uint16_t eaddr[3];
1881 	uint32_t v;
1882 	int error;
1883 
1884 	ASSERT_SERIALIZED(ifp->if_serializer);
1885 
1886 	mii = device_get_softc(sc->sc_miibus);
1887 
1888 	/*
1889 	 * Cancel any pending I/O.
1890 	 */
1891 	stge_stop(sc);
1892 
1893 	/* Init descriptors. */
1894 	error = stge_init_rx_ring(sc);
1895 	if (error != 0) {
1896 		device_printf(sc->sc_dev,
1897 		    "initialization failed: no memory for rx buffers\n");
1898 		stge_stop(sc);
1899 		goto out;
1900 	}
1901 	stge_init_tx_ring(sc);
1902 
1903 	/* Set the station address. */
1904 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
1905 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
1906 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
1907 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
1908 
1909 	/*
1910 	 * Set the statistics masks.  Disable all the RMON stats,
1911 	 * and disable selected stats in the non-RMON stats registers.
1912 	 */
1913 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
1914 	CSR_WRITE_4(sc, STGE_StatisticsMask,
1915 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
1916 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
1917 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
1918 	    (1U << 21));
1919 
1920 	/* Set up the receive filter. */
1921 	stge_set_filter(sc);
1922 	/* Program multicast filter. */
1923 	stge_set_multi(sc);
1924 
1925 	/*
1926 	 * Give the transmit and receive ring to the chip.
1927 	 */
1928 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
1929 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
1930 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
1931 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
1932 
1933 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
1934 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
1935 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
1936 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
1937 
1938 	/*
1939 	 * Initialize the Tx auto-poll period.  It's OK to make this number
1940 	 * large (255 is the max, but we use 127) -- we explicitly kick the
1941 	 * transmit engine when there's actually a packet.
1942 	 */
1943 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
1944 
1945 	/* ..and the Rx auto-poll period. */
1946 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
1947 
1948 	/* Initialize the Tx start threshold. */
1949 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
1950 
1951 	/* Rx DMA thresholds, from Linux */
1952 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
1953 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
1954 
1955 	/* Rx early threhold, from Linux */
1956 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
1957 
1958 	/* Tx DMA thresholds, from Linux */
1959 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
1960 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
1961 
1962 	/*
1963 	 * Initialize the Rx DMA interrupt control register.  We
1964 	 * request an interrupt after every incoming packet, but
1965 	 * defer it for sc_rxint_dmawait us. When the number of
1966 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
1967 	 * deferring the interrupt, and signal it immediately.
1968 	 */
1969 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
1970 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
1971 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
1972 
1973 	/*
1974 	 * Initialize the interrupt mask.
1975 	 */
1976 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
1977 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
1978 #ifdef DEVICE_POLLING
1979 	/* Disable interrupts if we are polling. */
1980 	if (ifp->if_flags & IFF_POLLING)
1981 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1982 	else
1983 #endif
1984 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1985 
1986 	/*
1987 	 * Configure the DMA engine.
1988 	 * XXX Should auto-tune TxBurstLimit.
1989 	 */
1990 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
1991 
1992 	/*
1993 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
1994 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
1995 	 * in the Rx FIFO.
1996 	 */
1997 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
1998 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
1999 
2000 	/*
2001 	 * Set the maximum frame size.
2002 	 */
2003 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2004 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2005 
2006 	/*
2007 	 * Initialize MacCtrl -- do it before setting the media,
2008 	 * as setting the media will actually program the register.
2009 	 *
2010 	 * Note: We have to poke the IFS value before poking
2011 	 * anything else.
2012 	 */
2013 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2014 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2015 
2016 	stge_vlan_setup(sc);
2017 
2018 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2019 		/* Multi-frag frame bug work-around. */
2020 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2021 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2022 
2023 		/* Tx Poll Now bug work-around. */
2024 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2025 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2026 		/* Tx Poll Now bug work-around. */
2027 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2028 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2029 	}
2030 
2031 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2032 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2033 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2034 	/*
2035 	 * It seems that transmitting frames without checking the state of
2036 	 * Rx/Tx MAC wedge the hardware.
2037 	 */
2038 	stge_start_tx(sc);
2039 	stge_start_rx(sc);
2040 
2041 	/*
2042 	 * Set the current media.
2043 	 */
2044 	mii_mediachg(mii);
2045 
2046 	/*
2047 	 * Start the one second MII clock.
2048 	 */
2049 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2050 
2051 	/*
2052 	 * ...all done!
2053 	 */
2054 	ifp->if_flags |= IFF_RUNNING;
2055 	ifp->if_flags &= ~IFF_OACTIVE;
2056 
2057  out:
2058 	if (error != 0)
2059 		device_printf(sc->sc_dev, "interface not running\n");
2060 }
2061 
2062 static void
2063 stge_vlan_setup(struct stge_softc *sc)
2064 {
2065 	struct ifnet *ifp = &sc->arpcom.ac_if;
2066 	uint32_t v;
2067 
2068 	/*
2069 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2070 	 * MC_AutoVLANuntagging bit.
2071 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2072 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2073 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2074 	 * use TFC instead of STGE_VLANTag register.
2075 	 */
2076 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2077 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2078 		v |= MC_AutoVLANuntagging;
2079 	else
2080 		v &= ~MC_AutoVLANuntagging;
2081 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2082 }
2083 
2084 /*
2085  *	Stop transmission on the interface.
2086  */
2087 static void
2088 stge_stop(struct stge_softc *sc)
2089 {
2090 	struct ifnet *ifp = &sc->arpcom.ac_if;
2091 	struct stge_txdesc *txd;
2092 	struct stge_rxdesc *rxd;
2093 	uint32_t v;
2094 	int i;
2095 
2096 	ASSERT_SERIALIZED(ifp->if_serializer);
2097 
2098 	/*
2099 	 * Stop the one second clock.
2100 	 */
2101 	callout_stop(&sc->sc_tick_ch);
2102 
2103 	/*
2104 	 * Reset the chip to a known state.
2105 	 */
2106 	stge_reset(sc, STGE_RESET_FULL);
2107 
2108 	/*
2109 	 * Disable interrupts.
2110 	 */
2111 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2112 
2113 	/*
2114 	 * Stop receiver, transmitter, and stats update.
2115 	 */
2116 	stge_stop_rx(sc);
2117 	stge_stop_tx(sc);
2118 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2119 	v |= MC_StatisticsDisable;
2120 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2121 
2122 	/*
2123 	 * Stop the transmit and receive DMA.
2124 	 */
2125 	stge_dma_wait(sc);
2126 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2127 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2128 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2129 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2130 
2131 	/*
2132 	 * Free RX and TX mbufs still in the queues.
2133 	 */
2134 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2135 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2136 		if (rxd->rx_m != NULL) {
2137 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2138 			    rxd->rx_dmamap);
2139 			m_freem(rxd->rx_m);
2140 			rxd->rx_m = NULL;
2141 		}
2142         }
2143 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2144 		txd = &sc->sc_cdata.stge_txdesc[i];
2145 		if (txd->tx_m != NULL) {
2146 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2147 			    txd->tx_dmamap);
2148 			m_freem(txd->tx_m);
2149 			txd->tx_m = NULL;
2150 		}
2151         }
2152 
2153 	/*
2154 	 * Mark the interface down and cancel the watchdog timer.
2155 	 */
2156 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2157 	ifp->if_timer = 0;
2158 }
2159 
2160 static void
2161 stge_start_tx(struct stge_softc *sc)
2162 {
2163 	uint32_t v;
2164 	int i;
2165 
2166 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2167 	if ((v & MC_TxEnabled) != 0)
2168 		return;
2169 	v |= MC_TxEnable;
2170 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2171 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2172 	for (i = STGE_TIMEOUT; i > 0; i--) {
2173 		DELAY(10);
2174 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2175 		if ((v & MC_TxEnabled) != 0)
2176 			break;
2177 	}
2178 	if (i == 0)
2179 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2180 }
2181 
2182 static void
2183 stge_start_rx(struct stge_softc *sc)
2184 {
2185 	uint32_t v;
2186 	int i;
2187 
2188 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2189 	if ((v & MC_RxEnabled) != 0)
2190 		return;
2191 	v |= MC_RxEnable;
2192 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2193 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2194 	for (i = STGE_TIMEOUT; i > 0; i--) {
2195 		DELAY(10);
2196 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2197 		if ((v & MC_RxEnabled) != 0)
2198 			break;
2199 	}
2200 	if (i == 0)
2201 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2202 }
2203 
2204 static void
2205 stge_stop_tx(struct stge_softc *sc)
2206 {
2207 	uint32_t v;
2208 	int i;
2209 
2210 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2211 	if ((v & MC_TxEnabled) == 0)
2212 		return;
2213 	v |= MC_TxDisable;
2214 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2215 	for (i = STGE_TIMEOUT; i > 0; i--) {
2216 		DELAY(10);
2217 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2218 		if ((v & MC_TxEnabled) == 0)
2219 			break;
2220 	}
2221 	if (i == 0)
2222 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2223 }
2224 
2225 static void
2226 stge_stop_rx(struct stge_softc *sc)
2227 {
2228 	uint32_t v;
2229 	int i;
2230 
2231 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2232 	if ((v & MC_RxEnabled) == 0)
2233 		return;
2234 	v |= MC_RxDisable;
2235 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2236 	for (i = STGE_TIMEOUT; i > 0; i--) {
2237 		DELAY(10);
2238 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2239 		if ((v & MC_RxEnabled) == 0)
2240 			break;
2241 	}
2242 	if (i == 0)
2243 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2244 }
2245 
2246 static void
2247 stge_init_tx_ring(struct stge_softc *sc)
2248 {
2249 	struct stge_ring_data *rd;
2250 	struct stge_txdesc *txd;
2251 	bus_addr_t addr;
2252 	int i;
2253 
2254 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2255 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2256 
2257 	sc->sc_cdata.stge_tx_prod = 0;
2258 	sc->sc_cdata.stge_tx_cons = 0;
2259 	sc->sc_cdata.stge_tx_cnt = 0;
2260 
2261 	rd = &sc->sc_rdata;
2262 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2263 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2264 		if (i == (STGE_TX_RING_CNT - 1))
2265 			addr = STGE_TX_RING_ADDR(sc, 0);
2266 		else
2267 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2268 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2269 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2270 		txd = &sc->sc_cdata.stge_txdesc[i];
2271 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2272 	}
2273 }
2274 
2275 static int
2276 stge_init_rx_ring(struct stge_softc *sc)
2277 {
2278 	struct stge_ring_data *rd;
2279 	bus_addr_t addr;
2280 	int i;
2281 
2282 	sc->sc_cdata.stge_rx_cons = 0;
2283 	STGE_RXCHAIN_RESET(sc);
2284 
2285 	rd = &sc->sc_rdata;
2286 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2287 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2288 		if (stge_newbuf(sc, i, 1) != 0)
2289 			return (ENOBUFS);
2290 		if (i == (STGE_RX_RING_CNT - 1))
2291 			addr = STGE_RX_RING_ADDR(sc, 0);
2292 		else
2293 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2294 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2295 		rd->stge_rx_ring[i].rfd_status = 0;
2296 	}
2297 	return (0);
2298 }
2299 
2300 /*
2301  * stge_newbuf:
2302  *
2303  *	Add a receive buffer to the indicated descriptor.
2304  */
2305 static int
2306 stge_newbuf(struct stge_softc *sc, int idx, int waitok)
2307 {
2308 	struct stge_rxdesc *rxd;
2309 	struct stge_rfd *rfd;
2310 	struct mbuf *m;
2311 	bus_dma_segment_t seg;
2312 	bus_dmamap_t map;
2313 	int error, nseg;
2314 
2315 	m = m_getcl(waitok ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2316 	if (m == NULL)
2317 		return ENOBUFS;
2318 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2319 
2320 	/*
2321 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2322 	 * frame is used.
2323 	 */
2324 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2325 		m_adj(m, ETHER_ALIGN);
2326 
2327 	error = bus_dmamap_load_mbuf_segment(sc->sc_cdata.stge_rx_tag,
2328 			sc->sc_cdata.stge_rx_sparemap, m,
2329 			&seg, 1, &nseg, BUS_DMA_NOWAIT);
2330 	if (error) {
2331 		m_freem(m);
2332 		return error;
2333 	}
2334 
2335 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2336 	if (rxd->rx_m != NULL) {
2337 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2338 		    BUS_DMASYNC_POSTREAD);
2339 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2340 	}
2341 
2342 	map = rxd->rx_dmamap;
2343 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2344 	sc->sc_cdata.stge_rx_sparemap = map;
2345 
2346 	rxd->rx_m = m;
2347 
2348 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2349 	rfd->rfd_frag.frag_word0 =
2350 	    htole64(FRAG_ADDR(seg.ds_addr) | FRAG_LEN(seg.ds_len));
2351 	rfd->rfd_status = 0;
2352 
2353 	return 0;
2354 }
2355 
2356 /*
2357  * stge_set_filter:
2358  *
2359  *	Set up the receive filter.
2360  */
2361 static void
2362 stge_set_filter(struct stge_softc *sc)
2363 {
2364 	struct ifnet *ifp = &sc->arpcom.ac_if;
2365 	uint16_t mode;
2366 
2367 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2368 	mode |= RM_ReceiveUnicast;
2369 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2370 		mode |= RM_ReceiveBroadcast;
2371 	else
2372 		mode &= ~RM_ReceiveBroadcast;
2373 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2374 		mode |= RM_ReceiveAllFrames;
2375 	else
2376 		mode &= ~RM_ReceiveAllFrames;
2377 
2378 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2379 }
2380 
2381 static void
2382 stge_set_multi(struct stge_softc *sc)
2383 {
2384 	struct ifnet *ifp = &sc->arpcom.ac_if;
2385 	struct ifmultiaddr *ifma;
2386 	uint32_t crc;
2387 	uint32_t mchash[2];
2388 	uint16_t mode;
2389 	int count;
2390 
2391 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2392 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2393 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2394 			mode |= RM_ReceiveAllFrames;
2395 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2396 			mode |= RM_ReceiveMulticast;
2397 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2398 		return;
2399 	}
2400 
2401 	/* clear existing filters. */
2402 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2403 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2404 
2405 	/*
2406 	 * Set up the multicast address filter by passing all multicast
2407 	 * addresses through a CRC generator, and then using the low-order
2408 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2409 	 * high order bits select the register, while the rest of the bits
2410 	 * select the bit within the register.
2411 	 */
2412 
2413 	bzero(mchash, sizeof(mchash));
2414 
2415 	count = 0;
2416 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2417 		if (ifma->ifma_addr->sa_family != AF_LINK)
2418 			continue;
2419 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2420 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2421 
2422 		/* Just want the 6 least significant bits. */
2423 		crc &= 0x3f;
2424 
2425 		/* Set the corresponding bit in the hash table. */
2426 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2427 		count++;
2428 	}
2429 
2430 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2431 	if (count > 0)
2432 		mode |= RM_ReceiveMulticastHash;
2433 	else
2434 		mode &= ~RM_ReceiveMulticastHash;
2435 
2436 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2437 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2438 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2439 }
2440 
2441 static int
2442 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2443 {
2444 	return (sysctl_int_range(oidp, arg1, arg2, req,
2445 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2446 }
2447 
2448 static int
2449 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2450 {
2451 	return (sysctl_int_range(oidp, arg1, arg2, req,
2452 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2453 }
2454