xref: /dragonfly/sys/dev/netif/stge/if_stge.c (revision 6e278935)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 /*	$FreeBSD: src/sys/dev/stge/if_stge.c,v 1.2 2006/08/12 01:21:36 yongari Exp $	*/
3 
4 /*-
5  * Copyright (c) 2001 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Device driver for the Sundance Tech. TC9021 10/100/1000
42  * Ethernet controller.
43  */
44 
45 #include "opt_polling.h"
46 
47 #include <sys/param.h>
48 #include <sys/bus.h>
49 #include <sys/endian.h>
50 #include <sys/kernel.h>
51 #include <sys/interrupt.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/module.h>
55 #include <sys/rman.h>
56 #include <sys/serialize.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/sysctl.h>
60 
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/ifq_var.h>
69 #include <net/vlan/if_vlan_var.h>
70 #include <net/vlan/if_vlan_ether.h>
71 
72 #include <dev/netif/mii_layer/mii.h>
73 #include <dev/netif/mii_layer/miivar.h>
74 
75 #include <bus/pci/pcireg.h>
76 #include <bus/pci/pcivar.h>
77 
78 #include "if_stgereg.h"
79 #include "if_stgevar.h"
80 
81 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
82 
83 /* "device miibus" required.  See GENERIC if you get errors here. */
84 #include "miibus_if.h"
85 
86 /*
87  * Devices supported by this driver.
88  */
89 static struct stge_product {
90 	uint16_t	stge_vendorid;
91 	uint16_t	stge_deviceid;
92 	const char	*stge_name;
93 } stge_products[] = {
94 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
95 	  "Sundance ST-1023 Gigabit Ethernet" },
96 
97 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
98 	  "Sundance ST-2021 Gigabit Ethernet" },
99 
100 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
101 	  "Tamarack TC9021 Gigabit Ethernet" },
102 
103 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
104 	  "Tamarack TC9021 Gigabit Ethernet" },
105 
106 	/*
107 	 * The Sundance sample boards use the Sundance vendor ID,
108 	 * but the Tamarack product ID.
109 	 */
110 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
111 	  "Sundance TC9021 Gigabit Ethernet" },
112 
113 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
114 	  "Sundance TC9021 Gigabit Ethernet" },
115 
116 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL2000,
117 	  "D-Link DL-2000 Gigabit Ethernet" },
118 
119 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
120 	  "Antares Gigabit Ethernet" },
121 
122 	{ 0, 0, NULL }
123 };
124 
125 static int	stge_probe(device_t);
126 static int	stge_attach(device_t);
127 static int	stge_detach(device_t);
128 static void	stge_shutdown(device_t);
129 static int	stge_suspend(device_t);
130 static int	stge_resume(device_t);
131 
132 static int	stge_encap(struct stge_softc *, struct mbuf **);
133 static void	stge_start(struct ifnet *);
134 static void	stge_watchdog(struct ifnet *);
135 static int	stge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
136 static void	stge_init(void *);
137 static void	stge_vlan_setup(struct stge_softc *);
138 static void	stge_stop(struct stge_softc *);
139 static void	stge_start_tx(struct stge_softc *);
140 static void	stge_start_rx(struct stge_softc *);
141 static void	stge_stop_tx(struct stge_softc *);
142 static void	stge_stop_rx(struct stge_softc *);
143 
144 static void	stge_reset(struct stge_softc *, uint32_t);
145 static int	stge_eeprom_wait(struct stge_softc *);
146 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
147 static void	stge_tick(void *);
148 static void	stge_stats_update(struct stge_softc *);
149 static void	stge_set_filter(struct stge_softc *);
150 static void	stge_set_multi(struct stge_softc *);
151 
152 static void	stge_link(struct stge_softc *);
153 static void	stge_intr(void *);
154 static __inline int stge_tx_error(struct stge_softc *);
155 static void	stge_txeof(struct stge_softc *);
156 static void	stge_rxeof(struct stge_softc *, int);
157 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
158 static int	stge_newbuf(struct stge_softc *, int, int);
159 #ifndef __i386__
160 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
161 #endif
162 
163 static void	stge_mii_sync(struct stge_softc *);
164 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
165 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
166 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
167 static int	stge_miibus_readreg(device_t, int, int);
168 static int	stge_miibus_writereg(device_t, int, int, int);
169 static void	stge_miibus_statchg(device_t);
170 static int	stge_mediachange(struct ifnet *);
171 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
172 
173 static int	stge_dma_alloc(struct stge_softc *);
174 static void	stge_dma_free(struct stge_softc *);
175 static void	stge_dma_wait(struct stge_softc *);
176 static void	stge_init_tx_ring(struct stge_softc *);
177 static int	stge_init_rx_ring(struct stge_softc *);
178 #ifdef DEVICE_POLLING
179 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
180 #endif
181 
182 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
183 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
184 
185 static device_method_t stge_methods[] = {
186 	/* Device interface */
187 	DEVMETHOD(device_probe,		stge_probe),
188 	DEVMETHOD(device_attach,	stge_attach),
189 	DEVMETHOD(device_detach,	stge_detach),
190 	DEVMETHOD(device_shutdown,	stge_shutdown),
191 	DEVMETHOD(device_suspend,	stge_suspend),
192 	DEVMETHOD(device_resume,	stge_resume),
193 
194 	/* MII interface */
195 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
196 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
197 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
198 
199 	{ 0, 0 }
200 
201 };
202 
203 static driver_t stge_driver = {
204 	"stge",
205 	stge_methods,
206 	sizeof(struct stge_softc)
207 };
208 
209 static devclass_t stge_devclass;
210 
211 DECLARE_DUMMY_MODULE(if_stge);
212 MODULE_DEPEND(if_stge, miibus, 1, 1, 1);
213 DRIVER_MODULE(if_stge, pci, stge_driver, stge_devclass, NULL, NULL);
214 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, NULL, NULL);
215 
216 #define	MII_SET(x)	\
217 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
218 #define	MII_CLR(x)	\
219 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
220 
221 /*
222  * Sync the PHYs by setting data bit and strobing the clock 32 times.
223  */
224 static void
225 stge_mii_sync(struct stge_softc	*sc)
226 {
227 	int i;
228 
229 	MII_SET(PC_MgmtDir | PC_MgmtData);
230 
231 	for (i = 0; i < 32; i++) {
232 		MII_SET(PC_MgmtClk);
233 		DELAY(1);
234 		MII_CLR(PC_MgmtClk);
235 		DELAY(1);
236 	}
237 }
238 
239 /*
240  * Clock a series of bits through the MII.
241  */
242 static void
243 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
244 {
245 	int i;
246 
247 	MII_CLR(PC_MgmtClk);
248 
249 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
250 		if (bits & i)
251 			MII_SET(PC_MgmtData);
252                 else
253 			MII_CLR(PC_MgmtData);
254 		DELAY(1);
255 		MII_CLR(PC_MgmtClk);
256 		DELAY(1);
257 		MII_SET(PC_MgmtClk);
258 	}
259 }
260 
261 /*
262  * Read an PHY register through the MII.
263  */
264 static int
265 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
266 {
267 	int i, ack;
268 
269 	/*
270 	 * Set up frame for RX.
271 	 */
272 	frame->mii_stdelim = STGE_MII_STARTDELIM;
273 	frame->mii_opcode = STGE_MII_READOP;
274 	frame->mii_turnaround = 0;
275 	frame->mii_data = 0;
276 
277 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
278 	/*
279  	 * Turn on data xmit.
280 	 */
281 	MII_SET(PC_MgmtDir);
282 
283 	stge_mii_sync(sc);
284 
285 	/*
286 	 * Send command/address info.
287 	 */
288 	stge_mii_send(sc, frame->mii_stdelim, 2);
289 	stge_mii_send(sc, frame->mii_opcode, 2);
290 	stge_mii_send(sc, frame->mii_phyaddr, 5);
291 	stge_mii_send(sc, frame->mii_regaddr, 5);
292 
293 	/* Turn off xmit. */
294 	MII_CLR(PC_MgmtDir);
295 
296 	/* Idle bit */
297 	MII_CLR((PC_MgmtClk | PC_MgmtData));
298 	DELAY(1);
299 	MII_SET(PC_MgmtClk);
300 	DELAY(1);
301 
302 	/* Check for ack */
303 	MII_CLR(PC_MgmtClk);
304 	DELAY(1);
305 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
306 	MII_SET(PC_MgmtClk);
307 	DELAY(1);
308 
309 	/*
310 	 * Now try reading data bits. If the ack failed, we still
311 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
312 	 */
313 	if (ack) {
314 		for(i = 0; i < 16; i++) {
315 			MII_CLR(PC_MgmtClk);
316 			DELAY(1);
317 			MII_SET(PC_MgmtClk);
318 			DELAY(1);
319 		}
320 		goto fail;
321 	}
322 
323 	for (i = 0x8000; i; i >>= 1) {
324 		MII_CLR(PC_MgmtClk);
325 		DELAY(1);
326 		if (!ack) {
327 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
328 				frame->mii_data |= i;
329 			DELAY(1);
330 		}
331 		MII_SET(PC_MgmtClk);
332 		DELAY(1);
333 	}
334 
335 fail:
336 	MII_CLR(PC_MgmtClk);
337 	DELAY(1);
338 	MII_SET(PC_MgmtClk);
339 	DELAY(1);
340 
341 	if (ack)
342 		return(1);
343 	return(0);
344 }
345 
346 /*
347  * Write to a PHY register through the MII.
348  */
349 static int
350 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
351 {
352 
353 	/*
354 	 * Set up frame for TX.
355 	 */
356 	frame->mii_stdelim = STGE_MII_STARTDELIM;
357 	frame->mii_opcode = STGE_MII_WRITEOP;
358 	frame->mii_turnaround = STGE_MII_TURNAROUND;
359 
360 	/*
361  	 * Turn on data output.
362 	 */
363 	MII_SET(PC_MgmtDir);
364 
365 	stge_mii_sync(sc);
366 
367 	stge_mii_send(sc, frame->mii_stdelim, 2);
368 	stge_mii_send(sc, frame->mii_opcode, 2);
369 	stge_mii_send(sc, frame->mii_phyaddr, 5);
370 	stge_mii_send(sc, frame->mii_regaddr, 5);
371 	stge_mii_send(sc, frame->mii_turnaround, 2);
372 	stge_mii_send(sc, frame->mii_data, 16);
373 
374 	/* Idle bit. */
375 	MII_SET(PC_MgmtClk);
376 	DELAY(1);
377 	MII_CLR(PC_MgmtClk);
378 	DELAY(1);
379 
380 	/*
381 	 * Turn off xmit.
382 	 */
383 	MII_CLR(PC_MgmtDir);
384 
385 	return(0);
386 }
387 
388 /*
389  * sc_miibus_readreg:	[mii interface function]
390  *
391  *	Read a PHY register on the MII of the TC9021.
392  */
393 static int
394 stge_miibus_readreg(device_t dev, int phy, int reg)
395 {
396 	struct stge_softc *sc;
397 	struct stge_mii_frame frame;
398 	int error;
399 
400 	sc = device_get_softc(dev);
401 
402 	if (reg == STGE_PhyCtrl) {
403 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
404 		error = CSR_READ_1(sc, STGE_PhyCtrl);
405 		return (error);
406 	}
407 	bzero(&frame, sizeof(frame));
408 	frame.mii_phyaddr = phy;
409 	frame.mii_regaddr = reg;
410 
411 	error = stge_mii_readreg(sc, &frame);
412 
413 	if (error != 0) {
414 		/* Don't show errors for PHY probe request */
415 		if (reg != 1)
416 			device_printf(sc->sc_dev, "phy read fail\n");
417 		return (0);
418 	}
419 	return (frame.mii_data);
420 }
421 
422 /*
423  * stge_miibus_writereg:	[mii interface function]
424  *
425  *	Write a PHY register on the MII of the TC9021.
426  */
427 static int
428 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
429 {
430 	struct stge_softc *sc;
431 	struct stge_mii_frame frame;
432 	int error;
433 
434 	sc = device_get_softc(dev);
435 
436 	bzero(&frame, sizeof(frame));
437 	frame.mii_phyaddr = phy;
438 	frame.mii_regaddr = reg;
439 	frame.mii_data = val;
440 
441 	error = stge_mii_writereg(sc, &frame);
442 
443 	if (error != 0)
444 		device_printf(sc->sc_dev, "phy write fail\n");
445 	return (0);
446 }
447 
448 /*
449  * stge_miibus_statchg:	[mii interface function]
450  *
451  *	Callback from MII layer when media changes.
452  */
453 static void
454 stge_miibus_statchg(device_t dev)
455 {
456 	struct stge_softc *sc;
457 	struct mii_data *mii;
458 
459 	sc = device_get_softc(dev);
460 	mii = device_get_softc(sc->sc_miibus);
461 
462 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)
463 		return;
464 
465 	sc->sc_MACCtrl = 0;
466 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
467 		sc->sc_MACCtrl |= MC_DuplexSelect;
468 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
469 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
470 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
471 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
472 
473 	stge_link(sc);
474 }
475 
476 /*
477  * stge_mediastatus:	[ifmedia interface function]
478  *
479  *	Get the current interface media status.
480  */
481 static void
482 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
483 {
484 	struct stge_softc *sc;
485 	struct mii_data *mii;
486 
487 	sc = ifp->if_softc;
488 	mii = device_get_softc(sc->sc_miibus);
489 
490 	mii_pollstat(mii);
491 	ifmr->ifm_status = mii->mii_media_status;
492 	ifmr->ifm_active = mii->mii_media_active;
493 }
494 
495 /*
496  * stge_mediachange:	[ifmedia interface function]
497  *
498  *	Set hardware to newly-selected media.
499  */
500 static int
501 stge_mediachange(struct ifnet *ifp)
502 {
503 	struct stge_softc *sc;
504 	struct mii_data *mii;
505 
506 	sc = ifp->if_softc;
507 	mii = device_get_softc(sc->sc_miibus);
508 	mii_mediachg(mii);
509 
510 	return (0);
511 }
512 
513 static int
514 stge_eeprom_wait(struct stge_softc *sc)
515 {
516 	int i;
517 
518 	for (i = 0; i < STGE_TIMEOUT; i++) {
519 		DELAY(1000);
520 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
521 			return (0);
522 	}
523 	return (1);
524 }
525 
526 /*
527  * stge_read_eeprom:
528  *
529  *	Read data from the serial EEPROM.
530  */
531 static void
532 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
533 {
534 
535 	if (stge_eeprom_wait(sc))
536 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
537 
538 	CSR_WRITE_2(sc, STGE_EepromCtrl,
539 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
540 	if (stge_eeprom_wait(sc))
541 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
542 	*data = CSR_READ_2(sc, STGE_EepromData);
543 }
544 
545 
546 static int
547 stge_probe(device_t dev)
548 {
549 	struct stge_product *sp;
550 	uint16_t vendor, devid;
551 
552 	vendor = pci_get_vendor(dev);
553 	devid = pci_get_device(dev);
554 
555 	for (sp = stge_products; sp->stge_name != NULL; sp++) {
556 		if (vendor == sp->stge_vendorid &&
557 		    devid == sp->stge_deviceid) {
558 			device_set_desc(dev, sp->stge_name);
559 			return (0);
560 		}
561 	}
562 
563 	return (ENXIO);
564 }
565 
566 static int
567 stge_attach(device_t dev)
568 {
569 	struct stge_softc *sc;
570 	struct ifnet *ifp;
571 	uint8_t enaddr[ETHER_ADDR_LEN];
572 	int error, i;
573 	uint16_t cmd;
574 	uint32_t val;
575 
576 	error = 0;
577 	sc = device_get_softc(dev);
578 	sc->sc_dev = dev;
579 	ifp = &sc->arpcom.ac_if;
580 
581 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
582 
583 	callout_init(&sc->sc_tick_ch);
584 
585 #ifndef BURN_BRIDGES
586 	/*
587 	 * Handle power management nonsense.
588 	 */
589 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
590 		uint32_t iobase, membase, irq;
591 
592 		/* Save important PCI config data. */
593 		iobase = pci_read_config(dev, STGE_PCIR_LOIO, 4);
594 		membase = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
595 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
596 
597 		/* Reset the power state. */
598 		device_printf(dev, "chip is in D%d power mode "
599 			      "-- setting to D0\n", pci_get_powerstate(dev));
600 
601 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
602 
603 		/* Restore PCI config data. */
604 		pci_write_config(dev, STGE_PCIR_LOIO, iobase, 4);
605 		pci_write_config(dev, STGE_PCIR_LOMEM, membase, 4);
606 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
607 	}
608 #endif
609 
610 	/*
611 	 * Map the device.
612 	 */
613 	pci_enable_busmaster(dev);
614 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
615 	val = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
616 
617 	if ((val & 0x01) != 0) {
618 		sc->sc_res_rid = STGE_PCIR_LOMEM;
619 		sc->sc_res_type = SYS_RES_MEMORY;
620 	} else {
621 		sc->sc_res_rid = STGE_PCIR_LOIO;
622 		sc->sc_res_type = SYS_RES_IOPORT;
623 
624 		val = pci_read_config(dev, sc->sc_res_rid, 4);
625 		if ((val & 0x01) == 0) {
626 			device_printf(dev, "couldn't locate IO BAR\n");
627 			return ENXIO;
628 		}
629 	}
630 
631 	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
632 					    &sc->sc_res_rid, RF_ACTIVE);
633 	if (sc->sc_res == NULL) {
634 		device_printf(dev, "couldn't allocate resource\n");
635 		return ENXIO;
636 	}
637 	sc->sc_btag = rman_get_bustag(sc->sc_res);
638 	sc->sc_bhandle = rman_get_bushandle(sc->sc_res);
639 
640 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
641 					    &sc->sc_irq_rid,
642 					    RF_ACTIVE | RF_SHAREABLE);
643 	if (sc->sc_irq == NULL) {
644 		device_printf(dev, "couldn't allocate IRQ\n");
645 		error = ENXIO;
646 		goto fail;
647 	}
648 
649 	sc->sc_rev = pci_get_revid(dev);
650 
651 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
652 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
653 
654 	sysctl_ctx_init(&sc->sc_sysctl_ctx);
655 	sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
656 					     SYSCTL_STATIC_CHILDREN(_hw),
657 					     OID_AUTO,
658 					     device_get_nameunit(dev),
659 					     CTLFLAG_RD, 0, "");
660 	if (sc->sc_sysctl_tree == NULL) {
661 		device_printf(dev, "can't add sysctl node\n");
662 		error = ENXIO;
663 		goto fail;
664 	}
665 
666 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
667 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
668 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
669 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
670 
671 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
672 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
673 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
674 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
675 
676 	error = stge_dma_alloc(sc);
677 	if (error != 0)
678 		goto fail;
679 
680 	/*
681 	 * Determine if we're copper or fiber.  It affects how we
682 	 * reset the card.
683 	 */
684 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
685 		sc->sc_usefiber = 1;
686 	else
687 		sc->sc_usefiber = 0;
688 
689 	/* Load LED configuration from EEPROM. */
690 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
691 
692 	/*
693 	 * Reset the chip to a known state.
694 	 */
695 	stge_reset(sc, STGE_RESET_FULL);
696 
697 	/*
698 	 * Reading the station address from the EEPROM doesn't seem
699 	 * to work, at least on my sample boards.  Instead, since
700 	 * the reset sequence does AutoInit, read it from the station
701 	 * address registers. For Sundance 1023 you can only read it
702 	 * from EEPROM.
703 	 */
704 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
705 		uint16_t v;
706 
707 		v = CSR_READ_2(sc, STGE_StationAddress0);
708 		enaddr[0] = v & 0xff;
709 		enaddr[1] = v >> 8;
710 		v = CSR_READ_2(sc, STGE_StationAddress1);
711 		enaddr[2] = v & 0xff;
712 		enaddr[3] = v >> 8;
713 		v = CSR_READ_2(sc, STGE_StationAddress2);
714 		enaddr[4] = v & 0xff;
715 		enaddr[5] = v >> 8;
716 		sc->sc_stge1023 = 0;
717 	} else {
718 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
719 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
720 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
721 			    &myaddr[i]);
722 			myaddr[i] = le16toh(myaddr[i]);
723 		}
724 		bcopy(myaddr, enaddr, sizeof(enaddr));
725 		sc->sc_stge1023 = 1;
726 	}
727 
728 	ifp->if_softc = sc;
729 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
730 	ifp->if_ioctl = stge_ioctl;
731 	ifp->if_start = stge_start;
732 	ifp->if_watchdog = stge_watchdog;
733 	ifp->if_init = stge_init;
734 #ifdef DEVICE_POLLING
735 	ifp->if_poll = stge_poll;
736 #endif
737 	ifp->if_mtu = ETHERMTU;
738 	ifq_set_maxlen(&ifp->if_snd, STGE_TX_RING_CNT - 1);
739 	ifq_set_ready(&ifp->if_snd);
740 	/* Revision B3 and earlier chips have checksum bug. */
741 	if (sc->sc_rev >= 0x0c) {
742 		ifp->if_hwassist = STGE_CSUM_FEATURES;
743 		ifp->if_capabilities = IFCAP_HWCSUM;
744 	} else {
745 		ifp->if_hwassist = 0;
746 		ifp->if_capabilities = 0;
747 	}
748 	ifp->if_capenable = ifp->if_capabilities;
749 
750 	/*
751 	 * Read some important bits from the PhyCtrl register.
752 	 */
753 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
754 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
755 
756 	/* Set up MII bus. */
757 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
758 	    stge_mediastatus)) != 0) {
759 		device_printf(sc->sc_dev, "no PHY found!\n");
760 		goto fail;
761 	}
762 
763 	ether_ifattach(ifp, enaddr, NULL);
764 
765 	/* VLAN capability setup */
766 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
767 #ifdef notyet
768 	if (sc->sc_rev >= 0x0c)
769 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
770 #endif
771 	ifp->if_capenable = ifp->if_capabilities;
772 
773 	/*
774 	 * Tell the upper layer(s) we support long frames.
775 	 * Must appear after the call to ether_ifattach() because
776 	 * ether_ifattach() sets ifi_hdrlen to the default value.
777 	 */
778 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
779 
780 	/*
781 	 * The manual recommends disabling early transmit, so we
782 	 * do.  It's disabled anyway, if using IP checksumming,
783 	 * since the entire packet must be in the FIFO in order
784 	 * for the chip to perform the checksum.
785 	 */
786 	sc->sc_txthresh = 0x0fff;
787 
788 	/*
789 	 * Disable MWI if the PCI layer tells us to.
790 	 */
791 	sc->sc_DMACtrl = 0;
792 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
793 		sc->sc_DMACtrl |= DMAC_MWIDisable;
794 
795 	/*
796 	 * Hookup IRQ
797 	 */
798 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, stge_intr, sc,
799 			       &sc->sc_ih, ifp->if_serializer);
800 	if (error != 0) {
801 		ether_ifdetach(ifp);
802 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
803 		goto fail;
804 	}
805 
806 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq));
807 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
808 
809 fail:
810 	if (error != 0)
811 		stge_detach(dev);
812 
813 	return (error);
814 }
815 
816 static int
817 stge_detach(device_t dev)
818 {
819 	struct stge_softc *sc = device_get_softc(dev);
820 	struct ifnet *ifp = &sc->arpcom.ac_if;
821 
822 	if (device_is_attached(dev)) {
823 		lwkt_serialize_enter(ifp->if_serializer);
824 		/* XXX */
825 		sc->sc_detach = 1;
826 		stge_stop(sc);
827 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
828 		lwkt_serialize_exit(ifp->if_serializer);
829 
830 		ether_ifdetach(ifp);
831 	}
832 
833 	if (sc->sc_sysctl_tree != NULL)
834 		sysctl_ctx_free(&sc->sc_sysctl_ctx);
835 
836 	if (sc->sc_miibus != NULL)
837 		device_delete_child(dev, sc->sc_miibus);
838 	bus_generic_detach(dev);
839 
840 	stge_dma_free(sc);
841 
842 	if (sc->sc_irq != NULL) {
843 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
844 				     sc->sc_irq);
845 	}
846 	if (sc->sc_res != NULL) {
847 		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_rid,
848 				     sc->sc_res);
849 	}
850 
851 	return (0);
852 }
853 
854 static int
855 stge_dma_alloc(struct stge_softc *sc)
856 {
857 	struct stge_txdesc *txd;
858 	struct stge_rxdesc *rxd;
859 	int error, i;
860 
861 	/* create parent tag. */
862 	error = bus_dma_tag_create(NULL,	/* parent */
863 		    1, 0,			/* algnmnt, boundary */
864 		    STGE_DMA_MAXADDR,		/* lowaddr */
865 		    BUS_SPACE_MAXADDR,		/* highaddr */
866 		    NULL, NULL,			/* filter, filterarg */
867 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
868 		    0,				/* nsegments */
869 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
870 		    0,				/* flags */
871 		    &sc->sc_cdata.stge_parent_tag);
872 	if (error != 0) {
873 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
874 		return error;
875 	}
876 
877 	/* allocate Tx ring. */
878 	sc->sc_rdata.stge_tx_ring =
879 		bus_dmamem_coherent_any(sc->sc_cdata.stge_parent_tag,
880 			STGE_RING_ALIGN, STGE_TX_RING_SZ,
881 			BUS_DMA_WAITOK | BUS_DMA_ZERO,
882 			&sc->sc_cdata.stge_tx_ring_tag,
883 			&sc->sc_cdata.stge_tx_ring_map,
884 			&sc->sc_rdata.stge_tx_ring_paddr);
885 	if (sc->sc_rdata.stge_tx_ring == NULL) {
886 		device_printf(sc->sc_dev,
887 		    "failed to allocate Tx ring\n");
888 		return ENOMEM;
889 	}
890 
891 	/* allocate Rx ring. */
892 	sc->sc_rdata.stge_rx_ring =
893 		bus_dmamem_coherent_any(sc->sc_cdata.stge_parent_tag,
894 			STGE_RING_ALIGN, STGE_RX_RING_SZ,
895 			BUS_DMA_WAITOK | BUS_DMA_ZERO,
896 			&sc->sc_cdata.stge_rx_ring_tag,
897 			&sc->sc_cdata.stge_rx_ring_map,
898 			&sc->sc_rdata.stge_rx_ring_paddr);
899 	if (sc->sc_rdata.stge_rx_ring == NULL) {
900 		device_printf(sc->sc_dev,
901 		    "failed to allocate Rx ring\n");
902 		return ENOMEM;
903 	}
904 
905 	/* create tag for Tx buffers. */
906 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
907 		    1, 0,			/* algnmnt, boundary */
908 		    BUS_SPACE_MAXADDR,		/* lowaddr */
909 		    BUS_SPACE_MAXADDR,		/* highaddr */
910 		    NULL, NULL,			/* filter, filterarg */
911 		    STGE_JUMBO_FRAMELEN,	/* maxsize */
912 		    STGE_MAXTXSEGS,		/* nsegments */
913 		    STGE_MAXSGSIZE,		/* maxsegsize */
914 		    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,/* flags */
915 		    &sc->sc_cdata.stge_tx_tag);
916 	if (error != 0) {
917 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
918 		return error;
919 	}
920 
921 	/* create DMA maps for Tx buffers. */
922 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
923 		txd = &sc->sc_cdata.stge_txdesc[i];
924 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag,
925 				BUS_DMA_WAITOK, &txd->tx_dmamap);
926 		if (error != 0) {
927 			int j;
928 
929 			for (j = 0; j < i; ++j) {
930 				txd = &sc->sc_cdata.stge_txdesc[j];
931 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
932 					txd->tx_dmamap);
933 			}
934 			bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
935 			sc->sc_cdata.stge_tx_tag = NULL;
936 
937 			device_printf(sc->sc_dev,
938 			    "failed to create Tx dmamap\n");
939 			return error;
940 		}
941 	}
942 
943 	/* create tag for Rx buffers. */
944 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
945 		    1, 0,			/* algnmnt, boundary */
946 		    BUS_SPACE_MAXADDR,		/* lowaddr */
947 		    BUS_SPACE_MAXADDR,		/* highaddr */
948 		    NULL, NULL,			/* filter, filterarg */
949 		    MCLBYTES,			/* maxsize */
950 		    1,				/* nsegments */
951 		    MCLBYTES,			/* maxsegsize */
952 		    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,/* flags */
953 		    &sc->sc_cdata.stge_rx_tag);
954 	if (error != 0) {
955 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
956 		return error;
957 	}
958 
959 	/* create DMA maps for Rx buffers. */
960 	error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, BUS_DMA_WAITOK,
961 			&sc->sc_cdata.stge_rx_sparemap);
962 	if (error != 0) {
963 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
964 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
965 		sc->sc_cdata.stge_rx_tag = NULL;
966 		return error;
967 	}
968 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
969 		rxd = &sc->sc_cdata.stge_rxdesc[i];
970 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag,
971 				BUS_DMA_WAITOK, &rxd->rx_dmamap);
972 		if (error != 0) {
973 			int j;
974 
975 			for (j = 0; j < i; ++j) {
976 				rxd = &sc->sc_cdata.stge_rxdesc[j];
977 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
978 					rxd->rx_dmamap);
979 			}
980 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
981 				sc->sc_cdata.stge_rx_sparemap);
982 			bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
983 			sc->sc_cdata.stge_rx_tag = NULL;
984 
985 			device_printf(sc->sc_dev,
986 			    "failed to create Rx dmamap\n");
987 			return error;
988 		}
989 	}
990 	return 0;
991 }
992 
993 static void
994 stge_dma_free(struct stge_softc *sc)
995 {
996 	struct stge_txdesc *txd;
997 	struct stge_rxdesc *rxd;
998 	int i;
999 
1000 	/* Tx ring */
1001 	if (sc->sc_cdata.stge_tx_ring_tag) {
1002 		bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1003 		    sc->sc_cdata.stge_tx_ring_map);
1004 		bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1005 		    sc->sc_rdata.stge_tx_ring,
1006 		    sc->sc_cdata.stge_tx_ring_map);
1007 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1008 	}
1009 
1010 	/* Rx ring */
1011 	if (sc->sc_cdata.stge_rx_ring_tag) {
1012 		bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1013 		    sc->sc_cdata.stge_rx_ring_map);
1014 		bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1015 		    sc->sc_rdata.stge_rx_ring,
1016 		    sc->sc_cdata.stge_rx_ring_map);
1017 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1018 	}
1019 
1020 	/* Tx buffers */
1021 	if (sc->sc_cdata.stge_tx_tag) {
1022 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1023 			txd = &sc->sc_cdata.stge_txdesc[i];
1024 			bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1025 			    txd->tx_dmamap);
1026 		}
1027 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1028 	}
1029 
1030 	/* Rx buffers */
1031 	if (sc->sc_cdata.stge_rx_tag) {
1032 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1033 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1034 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1035 			    rxd->rx_dmamap);
1036 		}
1037 		bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1038 		    sc->sc_cdata.stge_rx_sparemap);
1039 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1040 	}
1041 
1042 	/* Top level tag */
1043 	if (sc->sc_cdata.stge_parent_tag)
1044 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1045 }
1046 
1047 /*
1048  * stge_shutdown:
1049  *
1050  *	Make sure the interface is stopped at reboot time.
1051  */
1052 static void
1053 stge_shutdown(device_t dev)
1054 {
1055 	struct stge_softc *sc = device_get_softc(dev);
1056 	struct ifnet *ifp = &sc->arpcom.ac_if;
1057 
1058 	lwkt_serialize_enter(ifp->if_serializer);
1059 	stge_stop(sc);
1060 	lwkt_serialize_exit(ifp->if_serializer);
1061 }
1062 
1063 static int
1064 stge_suspend(device_t dev)
1065 {
1066 	struct stge_softc *sc = device_get_softc(dev);
1067 	struct ifnet *ifp = &sc->arpcom.ac_if;
1068 
1069 	lwkt_serialize_enter(ifp->if_serializer);
1070 	stge_stop(sc);
1071 	sc->sc_suspended = 1;
1072 	lwkt_serialize_exit(ifp->if_serializer);
1073 
1074 	return (0);
1075 }
1076 
1077 static int
1078 stge_resume(device_t dev)
1079 {
1080 	struct stge_softc *sc = device_get_softc(dev);
1081 	struct ifnet *ifp = &sc->arpcom.ac_if;
1082 
1083 	lwkt_serialize_enter(ifp->if_serializer);
1084 	if (ifp->if_flags & IFF_UP)
1085 		stge_init(sc);
1086 	sc->sc_suspended = 0;
1087 	lwkt_serialize_exit(ifp->if_serializer);
1088 
1089 	return (0);
1090 }
1091 
1092 static void
1093 stge_dma_wait(struct stge_softc *sc)
1094 {
1095 	int i;
1096 
1097 	for (i = 0; i < STGE_TIMEOUT; i++) {
1098 		DELAY(2);
1099 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1100 			break;
1101 	}
1102 
1103 	if (i == STGE_TIMEOUT)
1104 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1105 }
1106 
1107 static int
1108 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1109 {
1110 	struct stge_txdesc *txd;
1111 	struct stge_tfd *tfd;
1112 	struct mbuf *m;
1113 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1114 	int error, i, si, nsegs;
1115 	uint64_t csum_flags, tfc;
1116 
1117 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq);
1118 	KKASSERT(txd != NULL);
1119 
1120 	error =  bus_dmamap_load_mbuf_defrag(sc->sc_cdata.stge_tx_tag,
1121 			txd->tx_dmamap, m_head,
1122 			txsegs, STGE_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT);
1123 	if (error) {
1124 		m_freem(*m_head);
1125 		*m_head = NULL;
1126 		return (error);
1127 	}
1128 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1129 	    BUS_DMASYNC_PREWRITE);
1130 
1131 	m = *m_head;
1132 
1133 	csum_flags = 0;
1134 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1135 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1136 			csum_flags |= TFD_IPChecksumEnable;
1137 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1138 			csum_flags |= TFD_TCPChecksumEnable;
1139 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1140 			csum_flags |= TFD_UDPChecksumEnable;
1141 	}
1142 
1143 	si = sc->sc_cdata.stge_tx_prod;
1144 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1145 	for (i = 0; i < nsegs; i++) {
1146 		tfd->tfd_frags[i].frag_word0 =
1147 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1148 		    FRAG_LEN(txsegs[i].ds_len));
1149 	}
1150 	sc->sc_cdata.stge_tx_cnt++;
1151 
1152 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1153 	    TFD_FragCount(nsegs) | csum_flags;
1154 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1155 		tfc |= TFD_TxDMAIndicate;
1156 
1157 	/* Update producer index. */
1158 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1159 
1160 	/* Check if we have a VLAN tag to insert. */
1161 	if (m->m_flags & M_VLANTAG)
1162 		tfc |= TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vlantag);
1163 	tfd->tfd_control = htole64(tfc);
1164 
1165 	/* Update Tx Queue. */
1166 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1167 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1168 	txd->tx_m = m;
1169 
1170 	return (0);
1171 }
1172 
1173 /*
1174  * stge_start:		[ifnet interface function]
1175  *
1176  *	Start packet transmission on the interface.
1177  */
1178 static void
1179 stge_start(struct ifnet *ifp)
1180 {
1181 	struct stge_softc *sc;
1182 	struct mbuf *m_head;
1183 	int enq;
1184 
1185 	sc = ifp->if_softc;
1186 
1187 	ASSERT_SERIALIZED(ifp->if_serializer);
1188 
1189 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
1190 	    IFF_RUNNING)
1191 		return;
1192 
1193 	enq = 0;
1194 	while (!ifq_is_empty(&ifp->if_snd)) {
1195 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1196 			ifp->if_flags |= IFF_OACTIVE;
1197 			break;
1198 		}
1199 
1200 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1201 		if (m_head == NULL)
1202 			break;
1203 
1204 		/*
1205 		 * Pack the data into the transmit ring. If we
1206 		 * don't have room, set the OACTIVE flag and wait
1207 		 * for the NIC to drain the ring.
1208 		 */
1209 		if (stge_encap(sc, &m_head)) {
1210 			if (sc->sc_cdata.stge_tx_cnt == 0) {
1211 				continue;
1212 			} else {
1213 				ifp->if_flags |= IFF_OACTIVE;
1214 				break;
1215 			}
1216 		}
1217 		enq = 1;
1218 
1219 		/*
1220 		 * If there's a BPF listener, bounce a copy of this frame
1221 		 * to him.
1222 		 */
1223 		ETHER_BPF_MTAP(ifp, m_head);
1224 	}
1225 
1226 	if (enq) {
1227 		/* Transmit */
1228 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1229 
1230 		/* Set a timeout in case the chip goes out to lunch. */
1231 		ifp->if_timer = 5;
1232 	}
1233 }
1234 
1235 /*
1236  * stge_watchdog:	[ifnet interface function]
1237  *
1238  *	Watchdog timer handler.
1239  */
1240 static void
1241 stge_watchdog(struct ifnet *ifp)
1242 {
1243 	ASSERT_SERIALIZED(ifp->if_serializer);
1244 
1245 	if_printf(ifp, "device timeout\n");
1246 	ifp->if_oerrors++;
1247 	stge_init(ifp->if_softc);
1248 }
1249 
1250 /*
1251  * stge_ioctl:		[ifnet interface function]
1252  *
1253  *	Handle control requests from the operator.
1254  */
1255 static int
1256 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1257 {
1258 	struct stge_softc *sc;
1259 	struct ifreq *ifr;
1260 	struct mii_data *mii;
1261 	int error, mask;
1262 
1263 	ASSERT_SERIALIZED(ifp->if_serializer);
1264 
1265 	sc = ifp->if_softc;
1266 	ifr = (struct ifreq *)data;
1267 	error = 0;
1268 	switch (cmd) {
1269 	case SIOCSIFMTU:
1270 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1271 			error = EINVAL;
1272 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1273 			ifp->if_mtu = ifr->ifr_mtu;
1274 			stge_init(sc);
1275 		}
1276 		break;
1277 	case SIOCSIFFLAGS:
1278 		if ((ifp->if_flags & IFF_UP) != 0) {
1279 			if ((ifp->if_flags & IFF_RUNNING) != 0) {
1280 				if (((ifp->if_flags ^ sc->sc_if_flags)
1281 				    & IFF_PROMISC) != 0)
1282 					stge_set_filter(sc);
1283 			} else {
1284 				if (sc->sc_detach == 0)
1285 					stge_init(sc);
1286 			}
1287 		} else {
1288 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1289 				stge_stop(sc);
1290 		}
1291 		sc->sc_if_flags = ifp->if_flags;
1292 		break;
1293 	case SIOCADDMULTI:
1294 	case SIOCDELMULTI:
1295 		if ((ifp->if_flags & IFF_RUNNING) != 0)
1296 			stge_set_multi(sc);
1297 		break;
1298 	case SIOCSIFMEDIA:
1299 	case SIOCGIFMEDIA:
1300 		mii = device_get_softc(sc->sc_miibus);
1301 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1302 		break;
1303 	case SIOCSIFCAP:
1304 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1305 		if ((mask & IFCAP_HWCSUM) != 0) {
1306 			ifp->if_capenable ^= IFCAP_HWCSUM;
1307 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1308 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1309 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1310 			else
1311 				ifp->if_hwassist = 0;
1312 		}
1313 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1314 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1315 			if (ifp->if_flags & IFF_RUNNING)
1316 				stge_vlan_setup(sc);
1317 		}
1318 #if 0
1319 		VLAN_CAPABILITIES(ifp);
1320 #endif
1321 		break;
1322 	default:
1323 		error = ether_ioctl(ifp, cmd, data);
1324 		break;
1325 	}
1326 
1327 	return (error);
1328 }
1329 
1330 static void
1331 stge_link(struct stge_softc *sc)
1332 {
1333 	uint32_t v, ac;
1334 	int i;
1335 
1336 	/*
1337 	 * Update STGE_MACCtrl register depending on link status.
1338 	 * (duplex, flow control etc)
1339 	 */
1340 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1341 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1342 	v |= sc->sc_MACCtrl;
1343 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1344 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1345 		/* Duplex setting changed, reset Tx/Rx functions. */
1346 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1347 		ac |= AC_TxReset | AC_RxReset;
1348 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1349 		for (i = 0; i < STGE_TIMEOUT; i++) {
1350 			DELAY(100);
1351 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1352 				break;
1353 		}
1354 		if (i == STGE_TIMEOUT)
1355 			device_printf(sc->sc_dev, "reset failed to complete\n");
1356 	}
1357 }
1358 
1359 static __inline int
1360 stge_tx_error(struct stge_softc *sc)
1361 {
1362 	uint32_t txstat;
1363 	int error;
1364 
1365 	for (error = 0;;) {
1366 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1367 		if ((txstat & TS_TxComplete) == 0)
1368 			break;
1369 		/* Tx underrun */
1370 		if ((txstat & TS_TxUnderrun) != 0) {
1371 			/*
1372 			 * XXX
1373 			 * There should be a more better way to recover
1374 			 * from Tx underrun instead of a full reset.
1375 			 */
1376 			if (sc->sc_nerr++ < STGE_MAXERR)
1377 				device_printf(sc->sc_dev, "Tx underrun, "
1378 				    "resetting...\n");
1379 			if (sc->sc_nerr == STGE_MAXERR)
1380 				device_printf(sc->sc_dev, "too many errors; "
1381 				    "not reporting any more\n");
1382 			error = -1;
1383 			break;
1384 		}
1385 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1386 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1387 			CSR_WRITE_4(sc, STGE_MACCtrl,
1388 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1389 			    MC_TxEnable);
1390 	}
1391 
1392 	return (error);
1393 }
1394 
1395 /*
1396  * stge_intr:
1397  *
1398  *	Interrupt service routine.
1399  */
1400 static void
1401 stge_intr(void *arg)
1402 {
1403 	struct stge_softc *sc = arg;
1404 	struct ifnet *ifp = &sc->arpcom.ac_if;
1405 	int reinit;
1406 	uint16_t status;
1407 
1408 	ASSERT_SERIALIZED(ifp->if_serializer);
1409 
1410 	status = CSR_READ_2(sc, STGE_IntStatus);
1411 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1412 		return;
1413 
1414 	/* Disable interrupts. */
1415 	for (reinit = 0;;) {
1416 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1417 		status &= sc->sc_IntEnable;
1418 		if (status == 0)
1419 			break;
1420 		/* Host interface errors. */
1421 		if ((status & IS_HostError) != 0) {
1422 			device_printf(sc->sc_dev,
1423 			    "Host interface error, resetting...\n");
1424 			reinit = 1;
1425 			goto force_init;
1426 		}
1427 
1428 		/* Receive interrupts. */
1429 		if ((status & IS_RxDMAComplete) != 0) {
1430 			stge_rxeof(sc, -1);
1431 			if ((status & IS_RFDListEnd) != 0)
1432 				CSR_WRITE_4(sc, STGE_DMACtrl,
1433 				    DMAC_RxDMAPollNow);
1434 		}
1435 
1436 		/* Transmit interrupts. */
1437 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1438 			stge_txeof(sc);
1439 
1440 		/* Transmission errors.*/
1441 		if ((status & IS_TxComplete) != 0) {
1442 			if ((reinit = stge_tx_error(sc)) != 0)
1443 				break;
1444 		}
1445 	}
1446 
1447 force_init:
1448 	if (reinit != 0)
1449 		stge_init(sc);
1450 
1451 	/* Re-enable interrupts. */
1452 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1453 
1454 	/* Try to get more packets going. */
1455 	if (!ifq_is_empty(&ifp->if_snd))
1456 		if_devstart(ifp);
1457 }
1458 
1459 /*
1460  * stge_txeof:
1461  *
1462  *	Helper; handle transmit interrupts.
1463  */
1464 static void
1465 stge_txeof(struct stge_softc *sc)
1466 {
1467 	struct ifnet *ifp = &sc->arpcom.ac_if;
1468 	struct stge_txdesc *txd;
1469 	uint64_t control;
1470 	int cons;
1471 
1472 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1473 	if (txd == NULL)
1474 		return;
1475 
1476 	/*
1477 	 * Go through our Tx list and free mbufs for those
1478 	 * frames which have been transmitted.
1479 	 */
1480 	for (cons = sc->sc_cdata.stge_tx_cons;;
1481 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1482 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1483 			break;
1484 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1485 		if ((control & TFD_TFDDone) == 0)
1486 			break;
1487 		sc->sc_cdata.stge_tx_cnt--;
1488 
1489 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1490 
1491 		/* Output counter is updated with statistics register */
1492 		m_freem(txd->tx_m);
1493 		txd->tx_m = NULL;
1494 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1495 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1496 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1497 	}
1498 	sc->sc_cdata.stge_tx_cons = cons;
1499 
1500 	if (sc->sc_cdata.stge_tx_cnt < STGE_TX_HIWAT)
1501 		ifp->if_flags &= ~IFF_OACTIVE;
1502 	if (sc->sc_cdata.stge_tx_cnt == 0)
1503 		ifp->if_timer = 0;
1504 }
1505 
1506 static __inline void
1507 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1508 {
1509 	struct stge_rfd *rfd;
1510 
1511 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1512 	rfd->rfd_status = 0;
1513 }
1514 
1515 #ifndef __i386__
1516 /*
1517  * It seems that TC9021's DMA engine has alignment restrictions in
1518  * DMA scatter operations. The first DMA segment has no address
1519  * alignment restrictins but the rest should be aligned on 4(?) bytes
1520  * boundary. Otherwise it would corrupt random memory. Since we don't
1521  * know which one is used for the first segment in advance we simply
1522  * don't align at all.
1523  * To avoid copying over an entire frame to align, we allocate a new
1524  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1525  * prepended into the existing mbuf chain.
1526  */
1527 static __inline struct mbuf *
1528 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1529 {
1530 	struct mbuf *n;
1531 
1532 	n = NULL;
1533 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1534 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1535 		m->m_data += ETHER_HDR_LEN;
1536 		n = m;
1537 	} else {
1538 		MGETHDR(n, MB_DONTWAIT, MT_DATA);
1539 		if (n != NULL) {
1540 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1541 			m->m_data += ETHER_HDR_LEN;
1542 			m->m_len -= ETHER_HDR_LEN;
1543 			n->m_len = ETHER_HDR_LEN;
1544 			M_MOVE_PKTHDR(n, m);
1545 			n->m_next = m;
1546 		} else
1547 			m_freem(m);
1548 	}
1549 
1550 	return (n);
1551 }
1552 #endif
1553 
1554 /*
1555  * stge_rxeof:
1556  *
1557  *	Helper; handle receive interrupts.
1558  */
1559 static void
1560 stge_rxeof(struct stge_softc *sc, int count)
1561 {
1562 	struct ifnet *ifp = &sc->arpcom.ac_if;
1563 	struct stge_rxdesc *rxd;
1564 	struct mbuf *mp, *m;
1565 	uint64_t status64;
1566 	uint32_t status;
1567 	int cons, prog;
1568 
1569 	prog = 0;
1570 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1571 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1572 #ifdef DEVICE_POLLING
1573 		if (count >= 0 && count-- == 0)
1574 			break;
1575 #endif
1576 
1577 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1578 		status = RFD_RxStatus(status64);
1579 		if ((status & RFD_RFDDone) == 0)
1580 			break;
1581 
1582 		prog++;
1583 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1584 		mp = rxd->rx_m;
1585 
1586 		/*
1587 		 * If the packet had an error, drop it.  Note we count
1588 		 * the error later in the periodic stats update.
1589 		 */
1590 		if ((status & RFD_FrameEnd) != 0 && (status &
1591 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1592 		    RFD_RxAlignmentError | RFD_RxFCSError |
1593 		    RFD_RxLengthError)) != 0) {
1594 			stge_discard_rxbuf(sc, cons);
1595 			if (sc->sc_cdata.stge_rxhead != NULL) {
1596 				m_freem(sc->sc_cdata.stge_rxhead);
1597 				STGE_RXCHAIN_RESET(sc);
1598 			}
1599 			continue;
1600 		}
1601 		/*
1602 		 * Add a new receive buffer to the ring.
1603 		 */
1604 		if (stge_newbuf(sc, cons, 0) != 0) {
1605 			ifp->if_iqdrops++;
1606 			stge_discard_rxbuf(sc, cons);
1607 			if (sc->sc_cdata.stge_rxhead != NULL) {
1608 				m_freem(sc->sc_cdata.stge_rxhead);
1609 				STGE_RXCHAIN_RESET(sc);
1610 			}
1611 			continue;
1612 		}
1613 
1614 		if ((status & RFD_FrameEnd) != 0)
1615 			mp->m_len = RFD_RxDMAFrameLen(status) -
1616 			    sc->sc_cdata.stge_rxlen;
1617 		sc->sc_cdata.stge_rxlen += mp->m_len;
1618 
1619 		/* Chain mbufs. */
1620 		if (sc->sc_cdata.stge_rxhead == NULL) {
1621 			sc->sc_cdata.stge_rxhead = mp;
1622 			sc->sc_cdata.stge_rxtail = mp;
1623 		} else {
1624 			mp->m_flags &= ~M_PKTHDR;
1625 			sc->sc_cdata.stge_rxtail->m_next = mp;
1626 			sc->sc_cdata.stge_rxtail = mp;
1627 		}
1628 
1629 		if ((status & RFD_FrameEnd) != 0) {
1630 			m = sc->sc_cdata.stge_rxhead;
1631 			m->m_pkthdr.rcvif = ifp;
1632 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1633 
1634 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1635 				m_freem(m);
1636 				STGE_RXCHAIN_RESET(sc);
1637 				continue;
1638 			}
1639 			/*
1640 			 * Set the incoming checksum information for
1641 			 * the packet.
1642 			 */
1643 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1644 				if ((status & RFD_IPDetected) != 0) {
1645 					m->m_pkthdr.csum_flags |=
1646 						CSUM_IP_CHECKED;
1647 					if ((status & RFD_IPError) == 0)
1648 						m->m_pkthdr.csum_flags |=
1649 						    CSUM_IP_VALID;
1650 				}
1651 				if (((status & RFD_TCPDetected) != 0 &&
1652 				    (status & RFD_TCPError) == 0) ||
1653 				    ((status & RFD_UDPDetected) != 0 &&
1654 				    (status & RFD_UDPError) == 0)) {
1655 					m->m_pkthdr.csum_flags |=
1656 					    (CSUM_DATA_VALID |
1657 					     CSUM_PSEUDO_HDR |
1658 					     CSUM_FRAG_NOT_CHECKED);
1659 					m->m_pkthdr.csum_data = 0xffff;
1660 				}
1661 			}
1662 
1663 #ifndef __i386__
1664 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1665 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1666 					STGE_RXCHAIN_RESET(sc);
1667 					continue;
1668 				}
1669 			}
1670 #endif
1671 
1672 			/* Check for VLAN tagged packets. */
1673 			if ((status & RFD_VLANDetected) != 0 &&
1674 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1675 				m->m_flags |= M_VLANTAG;
1676 				m->m_pkthdr.ether_vlantag = RFD_TCI(status64);
1677 			}
1678 			/* Pass it on. */
1679 			ifp->if_input(ifp, m);
1680 
1681 			STGE_RXCHAIN_RESET(sc);
1682 		}
1683 	}
1684 
1685 	if (prog > 0) {
1686 		/* Update the consumer index. */
1687 		sc->sc_cdata.stge_rx_cons = cons;
1688 	}
1689 }
1690 
1691 #ifdef DEVICE_POLLING
1692 static void
1693 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1694 {
1695 	struct stge_softc *sc;
1696 	uint16_t status;
1697 
1698 	sc = ifp->if_softc;
1699 
1700 	switch (cmd) {
1701 	case POLL_REGISTER:
1702 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1703 		break;
1704 	case POLL_DEREGISTER:
1705 		CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1706 		break;
1707 	case POLL_ONLY:
1708 	case POLL_AND_CHECK_STATUS:
1709 		sc->sc_cdata.stge_rxcycles = count;
1710 		stge_rxeof(sc, count);
1711 		stge_txeof(sc);
1712 
1713 		if (cmd == POLL_AND_CHECK_STATUS) {
1714 			status = CSR_READ_2(sc, STGE_IntStatus);
1715 			status &= sc->sc_IntEnable;
1716 			if (status != 0) {
1717 				if (status & IS_HostError) {
1718 					device_printf(sc->sc_dev,
1719 					"Host interface error, "
1720 					"resetting...\n");
1721 					stge_init(sc);
1722 				}
1723 				if ((status & IS_TxComplete) != 0 &&
1724 				    stge_tx_error(sc) != 0)
1725 					stge_init(sc);
1726 			}
1727 
1728 		}
1729 
1730 		if (!ifq_is_empty(&ifp->if_snd))
1731 			if_devstart(ifp);
1732 	}
1733 }
1734 #endif	/* DEVICE_POLLING */
1735 
1736 /*
1737  * stge_tick:
1738  *
1739  *	One second timer, used to tick the MII.
1740  */
1741 static void
1742 stge_tick(void *arg)
1743 {
1744 	struct stge_softc *sc = arg;
1745 	struct ifnet *ifp = &sc->arpcom.ac_if;
1746 	struct mii_data *mii;
1747 
1748 	lwkt_serialize_enter(ifp->if_serializer);
1749 
1750 	mii = device_get_softc(sc->sc_miibus);
1751 	mii_tick(mii);
1752 
1753 	/* Update statistics counters. */
1754 	stge_stats_update(sc);
1755 
1756 	/*
1757 	 * Relcaim any pending Tx descriptors to release mbufs in a
1758 	 * timely manner as we don't generate Tx completion interrupts
1759 	 * for every frame. This limits the delay to a maximum of one
1760 	 * second.
1761 	 */
1762 	if (sc->sc_cdata.stge_tx_cnt != 0)
1763 		stge_txeof(sc);
1764 
1765 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1766 
1767 	lwkt_serialize_exit(ifp->if_serializer);
1768 }
1769 
1770 /*
1771  * stge_stats_update:
1772  *
1773  *	Read the TC9021 statistics counters.
1774  */
1775 static void
1776 stge_stats_update(struct stge_softc *sc)
1777 {
1778 	struct ifnet *ifp = &sc->arpcom.ac_if;
1779 
1780 	CSR_READ_4(sc,STGE_OctetRcvOk);
1781 
1782 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1783 
1784 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1785 
1786 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1787 
1788 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1789 
1790 	ifp->if_collisions +=
1791 	    CSR_READ_4(sc, STGE_LateCollisions) +
1792 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1793 	    CSR_READ_4(sc, STGE_SingleColFrames);
1794 
1795 	ifp->if_oerrors +=
1796 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1797 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1798 }
1799 
1800 /*
1801  * stge_reset:
1802  *
1803  *	Perform a soft reset on the TC9021.
1804  */
1805 static void
1806 stge_reset(struct stge_softc *sc, uint32_t how)
1807 {
1808 	uint32_t ac;
1809 	uint8_t v;
1810 	int i, dv;
1811 
1812 	dv = 5000;
1813 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1814 	switch (how) {
1815 	case STGE_RESET_TX:
1816 		ac |= AC_TxReset | AC_FIFO;
1817 		dv = 100;
1818 		break;
1819 	case STGE_RESET_RX:
1820 		ac |= AC_RxReset | AC_FIFO;
1821 		dv = 100;
1822 		break;
1823 	case STGE_RESET_FULL:
1824 	default:
1825 		/*
1826 		 * Only assert RstOut if we're fiber.  We need GMII clocks
1827 		 * to be present in order for the reset to complete on fiber
1828 		 * cards.
1829 		 */
1830 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1831 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1832 		    (sc->sc_usefiber ? AC_RstOut : 0);
1833 		break;
1834 	}
1835 
1836 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1837 
1838 	/* Account for reset problem at 10Mbps. */
1839 	DELAY(dv);
1840 
1841 	for (i = 0; i < STGE_TIMEOUT; i++) {
1842 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1843 			break;
1844 		DELAY(dv);
1845 	}
1846 
1847 	if (i == STGE_TIMEOUT)
1848 		device_printf(sc->sc_dev, "reset failed to complete\n");
1849 
1850 	/* Set LED, from Linux IPG driver. */
1851 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1852 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1853 	if ((sc->sc_led & 0x01) != 0)
1854 		ac |= AC_LEDMode;
1855 	if ((sc->sc_led & 0x03) != 0)
1856 		ac |= AC_LEDModeBit1;
1857 	if ((sc->sc_led & 0x08) != 0)
1858 		ac |= AC_LEDSpeed;
1859 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1860 
1861 	/* Set PHY, from Linux IPG driver */
1862 	v = CSR_READ_1(sc, STGE_PhySet);
1863 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1864 	v |= ((sc->sc_led & 0x70) >> 4);
1865 	CSR_WRITE_1(sc, STGE_PhySet, v);
1866 }
1867 
1868 /*
1869  * stge_init:		[ ifnet interface function ]
1870  *
1871  *	Initialize the interface.
1872  */
1873 static void
1874 stge_init(void *xsc)
1875 {
1876 	struct stge_softc *sc = xsc;
1877 	struct ifnet *ifp = &sc->arpcom.ac_if;
1878 	struct mii_data *mii;
1879 	uint16_t eaddr[3];
1880 	uint32_t v;
1881 	int error;
1882 
1883 	ASSERT_SERIALIZED(ifp->if_serializer);
1884 
1885 	mii = device_get_softc(sc->sc_miibus);
1886 
1887 	/*
1888 	 * Cancel any pending I/O.
1889 	 */
1890 	stge_stop(sc);
1891 
1892 	/* Init descriptors. */
1893 	error = stge_init_rx_ring(sc);
1894 	if (error != 0) {
1895 		device_printf(sc->sc_dev,
1896 		    "initialization failed: no memory for rx buffers\n");
1897 		stge_stop(sc);
1898 		goto out;
1899 	}
1900 	stge_init_tx_ring(sc);
1901 
1902 	/* Set the station address. */
1903 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
1904 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
1905 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
1906 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
1907 
1908 	/*
1909 	 * Set the statistics masks.  Disable all the RMON stats,
1910 	 * and disable selected stats in the non-RMON stats registers.
1911 	 */
1912 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
1913 	CSR_WRITE_4(sc, STGE_StatisticsMask,
1914 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
1915 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
1916 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
1917 	    (1U << 21));
1918 
1919 	/* Set up the receive filter. */
1920 	stge_set_filter(sc);
1921 	/* Program multicast filter. */
1922 	stge_set_multi(sc);
1923 
1924 	/*
1925 	 * Give the transmit and receive ring to the chip.
1926 	 */
1927 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
1928 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
1929 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
1930 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
1931 
1932 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
1933 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
1934 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
1935 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
1936 
1937 	/*
1938 	 * Initialize the Tx auto-poll period.  It's OK to make this number
1939 	 * large (255 is the max, but we use 127) -- we explicitly kick the
1940 	 * transmit engine when there's actually a packet.
1941 	 */
1942 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
1943 
1944 	/* ..and the Rx auto-poll period. */
1945 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
1946 
1947 	/* Initialize the Tx start threshold. */
1948 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
1949 
1950 	/* Rx DMA thresholds, from Linux */
1951 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
1952 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
1953 
1954 	/* Rx early threhold, from Linux */
1955 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
1956 
1957 	/* Tx DMA thresholds, from Linux */
1958 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
1959 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
1960 
1961 	/*
1962 	 * Initialize the Rx DMA interrupt control register.  We
1963 	 * request an interrupt after every incoming packet, but
1964 	 * defer it for sc_rxint_dmawait us. When the number of
1965 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
1966 	 * deferring the interrupt, and signal it immediately.
1967 	 */
1968 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
1969 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
1970 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
1971 
1972 	/*
1973 	 * Initialize the interrupt mask.
1974 	 */
1975 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
1976 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
1977 #ifdef DEVICE_POLLING
1978 	/* Disable interrupts if we are polling. */
1979 	if (ifp->if_flags & IFF_POLLING)
1980 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1981 	else
1982 #endif
1983 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1984 
1985 	/*
1986 	 * Configure the DMA engine.
1987 	 * XXX Should auto-tune TxBurstLimit.
1988 	 */
1989 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
1990 
1991 	/*
1992 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
1993 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
1994 	 * in the Rx FIFO.
1995 	 */
1996 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
1997 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
1998 
1999 	/*
2000 	 * Set the maximum frame size.
2001 	 */
2002 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2003 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2004 
2005 	/*
2006 	 * Initialize MacCtrl -- do it before setting the media,
2007 	 * as setting the media will actually program the register.
2008 	 *
2009 	 * Note: We have to poke the IFS value before poking
2010 	 * anything else.
2011 	 */
2012 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2013 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2014 
2015 	stge_vlan_setup(sc);
2016 
2017 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2018 		/* Multi-frag frame bug work-around. */
2019 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2020 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2021 
2022 		/* Tx Poll Now bug work-around. */
2023 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2024 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2025 		/* Tx Poll Now bug work-around. */
2026 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2027 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2028 	}
2029 
2030 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2031 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2032 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2033 	/*
2034 	 * It seems that transmitting frames without checking the state of
2035 	 * Rx/Tx MAC wedge the hardware.
2036 	 */
2037 	stge_start_tx(sc);
2038 	stge_start_rx(sc);
2039 
2040 	/*
2041 	 * Set the current media.
2042 	 */
2043 	mii_mediachg(mii);
2044 
2045 	/*
2046 	 * Start the one second MII clock.
2047 	 */
2048 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2049 
2050 	/*
2051 	 * ...all done!
2052 	 */
2053 	ifp->if_flags |= IFF_RUNNING;
2054 	ifp->if_flags &= ~IFF_OACTIVE;
2055 
2056  out:
2057 	if (error != 0)
2058 		device_printf(sc->sc_dev, "interface not running\n");
2059 }
2060 
2061 static void
2062 stge_vlan_setup(struct stge_softc *sc)
2063 {
2064 	struct ifnet *ifp = &sc->arpcom.ac_if;
2065 	uint32_t v;
2066 
2067 	/*
2068 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2069 	 * MC_AutoVLANuntagging bit.
2070 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2071 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2072 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2073 	 * use TFC instead of STGE_VLANTag register.
2074 	 */
2075 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2076 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2077 		v |= MC_AutoVLANuntagging;
2078 	else
2079 		v &= ~MC_AutoVLANuntagging;
2080 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2081 }
2082 
2083 /*
2084  *	Stop transmission on the interface.
2085  */
2086 static void
2087 stge_stop(struct stge_softc *sc)
2088 {
2089 	struct ifnet *ifp = &sc->arpcom.ac_if;
2090 	struct stge_txdesc *txd;
2091 	struct stge_rxdesc *rxd;
2092 	uint32_t v;
2093 	int i;
2094 
2095 	ASSERT_SERIALIZED(ifp->if_serializer);
2096 
2097 	/*
2098 	 * Stop the one second clock.
2099 	 */
2100 	callout_stop(&sc->sc_tick_ch);
2101 
2102 	/*
2103 	 * Reset the chip to a known state.
2104 	 */
2105 	stge_reset(sc, STGE_RESET_FULL);
2106 
2107 	/*
2108 	 * Disable interrupts.
2109 	 */
2110 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2111 
2112 	/*
2113 	 * Stop receiver, transmitter, and stats update.
2114 	 */
2115 	stge_stop_rx(sc);
2116 	stge_stop_tx(sc);
2117 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2118 	v |= MC_StatisticsDisable;
2119 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2120 
2121 	/*
2122 	 * Stop the transmit and receive DMA.
2123 	 */
2124 	stge_dma_wait(sc);
2125 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2126 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2127 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2128 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2129 
2130 	/*
2131 	 * Free RX and TX mbufs still in the queues.
2132 	 */
2133 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2134 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2135 		if (rxd->rx_m != NULL) {
2136 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2137 			    rxd->rx_dmamap);
2138 			m_freem(rxd->rx_m);
2139 			rxd->rx_m = NULL;
2140 		}
2141         }
2142 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2143 		txd = &sc->sc_cdata.stge_txdesc[i];
2144 		if (txd->tx_m != NULL) {
2145 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2146 			    txd->tx_dmamap);
2147 			m_freem(txd->tx_m);
2148 			txd->tx_m = NULL;
2149 		}
2150         }
2151 
2152 	/*
2153 	 * Mark the interface down and cancel the watchdog timer.
2154 	 */
2155 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2156 	ifp->if_timer = 0;
2157 }
2158 
2159 static void
2160 stge_start_tx(struct stge_softc *sc)
2161 {
2162 	uint32_t v;
2163 	int i;
2164 
2165 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2166 	if ((v & MC_TxEnabled) != 0)
2167 		return;
2168 	v |= MC_TxEnable;
2169 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2170 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2171 	for (i = STGE_TIMEOUT; i > 0; i--) {
2172 		DELAY(10);
2173 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2174 		if ((v & MC_TxEnabled) != 0)
2175 			break;
2176 	}
2177 	if (i == 0)
2178 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2179 }
2180 
2181 static void
2182 stge_start_rx(struct stge_softc *sc)
2183 {
2184 	uint32_t v;
2185 	int i;
2186 
2187 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2188 	if ((v & MC_RxEnabled) != 0)
2189 		return;
2190 	v |= MC_RxEnable;
2191 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2192 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2193 	for (i = STGE_TIMEOUT; i > 0; i--) {
2194 		DELAY(10);
2195 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2196 		if ((v & MC_RxEnabled) != 0)
2197 			break;
2198 	}
2199 	if (i == 0)
2200 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2201 }
2202 
2203 static void
2204 stge_stop_tx(struct stge_softc *sc)
2205 {
2206 	uint32_t v;
2207 	int i;
2208 
2209 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2210 	if ((v & MC_TxEnabled) == 0)
2211 		return;
2212 	v |= MC_TxDisable;
2213 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2214 	for (i = STGE_TIMEOUT; i > 0; i--) {
2215 		DELAY(10);
2216 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2217 		if ((v & MC_TxEnabled) == 0)
2218 			break;
2219 	}
2220 	if (i == 0)
2221 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2222 }
2223 
2224 static void
2225 stge_stop_rx(struct stge_softc *sc)
2226 {
2227 	uint32_t v;
2228 	int i;
2229 
2230 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2231 	if ((v & MC_RxEnabled) == 0)
2232 		return;
2233 	v |= MC_RxDisable;
2234 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2235 	for (i = STGE_TIMEOUT; i > 0; i--) {
2236 		DELAY(10);
2237 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2238 		if ((v & MC_RxEnabled) == 0)
2239 			break;
2240 	}
2241 	if (i == 0)
2242 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2243 }
2244 
2245 static void
2246 stge_init_tx_ring(struct stge_softc *sc)
2247 {
2248 	struct stge_ring_data *rd;
2249 	struct stge_txdesc *txd;
2250 	bus_addr_t addr;
2251 	int i;
2252 
2253 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2254 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2255 
2256 	sc->sc_cdata.stge_tx_prod = 0;
2257 	sc->sc_cdata.stge_tx_cons = 0;
2258 	sc->sc_cdata.stge_tx_cnt = 0;
2259 
2260 	rd = &sc->sc_rdata;
2261 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2262 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2263 		if (i == (STGE_TX_RING_CNT - 1))
2264 			addr = STGE_TX_RING_ADDR(sc, 0);
2265 		else
2266 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2267 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2268 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2269 		txd = &sc->sc_cdata.stge_txdesc[i];
2270 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2271 	}
2272 }
2273 
2274 static int
2275 stge_init_rx_ring(struct stge_softc *sc)
2276 {
2277 	struct stge_ring_data *rd;
2278 	bus_addr_t addr;
2279 	int i;
2280 
2281 	sc->sc_cdata.stge_rx_cons = 0;
2282 	STGE_RXCHAIN_RESET(sc);
2283 
2284 	rd = &sc->sc_rdata;
2285 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2286 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2287 		if (stge_newbuf(sc, i, 1) != 0)
2288 			return (ENOBUFS);
2289 		if (i == (STGE_RX_RING_CNT - 1))
2290 			addr = STGE_RX_RING_ADDR(sc, 0);
2291 		else
2292 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2293 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2294 		rd->stge_rx_ring[i].rfd_status = 0;
2295 	}
2296 	return (0);
2297 }
2298 
2299 /*
2300  * stge_newbuf:
2301  *
2302  *	Add a receive buffer to the indicated descriptor.
2303  */
2304 static int
2305 stge_newbuf(struct stge_softc *sc, int idx, int waitok)
2306 {
2307 	struct stge_rxdesc *rxd;
2308 	struct stge_rfd *rfd;
2309 	struct mbuf *m;
2310 	bus_dma_segment_t seg;
2311 	bus_dmamap_t map;
2312 	int error, nseg;
2313 
2314 	m = m_getcl(waitok ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2315 	if (m == NULL)
2316 		return ENOBUFS;
2317 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2318 
2319 	/*
2320 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2321 	 * frame is used.
2322 	 */
2323 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2324 		m_adj(m, ETHER_ALIGN);
2325 
2326 	error = bus_dmamap_load_mbuf_segment(sc->sc_cdata.stge_rx_tag,
2327 			sc->sc_cdata.stge_rx_sparemap, m,
2328 			&seg, 1, &nseg, BUS_DMA_NOWAIT);
2329 	if (error) {
2330 		m_freem(m);
2331 		return error;
2332 	}
2333 
2334 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2335 	if (rxd->rx_m != NULL) {
2336 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2337 		    BUS_DMASYNC_POSTREAD);
2338 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2339 	}
2340 
2341 	map = rxd->rx_dmamap;
2342 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2343 	sc->sc_cdata.stge_rx_sparemap = map;
2344 
2345 	rxd->rx_m = m;
2346 
2347 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2348 	rfd->rfd_frag.frag_word0 =
2349 	    htole64(FRAG_ADDR(seg.ds_addr) | FRAG_LEN(seg.ds_len));
2350 	rfd->rfd_status = 0;
2351 
2352 	return 0;
2353 }
2354 
2355 /*
2356  * stge_set_filter:
2357  *
2358  *	Set up the receive filter.
2359  */
2360 static void
2361 stge_set_filter(struct stge_softc *sc)
2362 {
2363 	struct ifnet *ifp = &sc->arpcom.ac_if;
2364 	uint16_t mode;
2365 
2366 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2367 	mode |= RM_ReceiveUnicast;
2368 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2369 		mode |= RM_ReceiveBroadcast;
2370 	else
2371 		mode &= ~RM_ReceiveBroadcast;
2372 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2373 		mode |= RM_ReceiveAllFrames;
2374 	else
2375 		mode &= ~RM_ReceiveAllFrames;
2376 
2377 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2378 }
2379 
2380 static void
2381 stge_set_multi(struct stge_softc *sc)
2382 {
2383 	struct ifnet *ifp = &sc->arpcom.ac_if;
2384 	struct ifmultiaddr *ifma;
2385 	uint32_t crc;
2386 	uint32_t mchash[2];
2387 	uint16_t mode;
2388 	int count;
2389 
2390 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2391 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2392 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2393 			mode |= RM_ReceiveAllFrames;
2394 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2395 			mode |= RM_ReceiveMulticast;
2396 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2397 		return;
2398 	}
2399 
2400 	/* clear existing filters. */
2401 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2402 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2403 
2404 	/*
2405 	 * Set up the multicast address filter by passing all multicast
2406 	 * addresses through a CRC generator, and then using the low-order
2407 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2408 	 * high order bits select the register, while the rest of the bits
2409 	 * select the bit within the register.
2410 	 */
2411 
2412 	bzero(mchash, sizeof(mchash));
2413 
2414 	count = 0;
2415 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2416 		if (ifma->ifma_addr->sa_family != AF_LINK)
2417 			continue;
2418 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2419 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2420 
2421 		/* Just want the 6 least significant bits. */
2422 		crc &= 0x3f;
2423 
2424 		/* Set the corresponding bit in the hash table. */
2425 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2426 		count++;
2427 	}
2428 
2429 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2430 	if (count > 0)
2431 		mode |= RM_ReceiveMulticastHash;
2432 	else
2433 		mode &= ~RM_ReceiveMulticastHash;
2434 
2435 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2436 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2437 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2438 }
2439 
2440 static int
2441 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2442 {
2443 	return (sysctl_int_range(oidp, arg1, arg2, req,
2444 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2445 }
2446 
2447 static int
2448 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2449 {
2450 	return (sysctl_int_range(oidp, arg1, arg2, req,
2451 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2452 }
2453