xref: /dragonfly/sys/dev/netif/stge/if_stge.c (revision 0cfebe3d)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 /*	$FreeBSD: src/sys/dev/stge/if_stge.c,v 1.2 2006/08/12 01:21:36 yongari Exp $	*/
3 /*	$DragonFly: src/sys/dev/netif/stge/if_stge.c,v 1.4 2008/03/10 12:59:52 sephe Exp $	*/
4 
5 /*-
6  * Copyright (c) 2001 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Jason R. Thorpe.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 /*
42  * Device driver for the Sundance Tech. TC9021 10/100/1000
43  * Ethernet controller.
44  */
45 
46 #include "opt_polling.h"
47 
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/endian.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/module.h>
55 #include <sys/rman.h>
56 #include <sys/serialize.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/sysctl.h>
60 
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/ifq_var.h>
69 #include <net/vlan/if_vlan_var.h>
70 #include <net/vlan/if_vlan_ether.h>
71 
72 #include <dev/netif/mii_layer/mii.h>
73 #include <dev/netif/mii_layer/miivar.h>
74 
75 #include <bus/pci/pcireg.h>
76 #include <bus/pci/pcivar.h>
77 
78 #include "if_stgereg.h"
79 #include "if_stgevar.h"
80 
81 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
82 
83 /* "device miibus" required.  See GENERIC if you get errors here. */
84 #include "miibus_if.h"
85 
86 /*
87  * Devices supported by this driver.
88  */
89 static struct stge_product {
90 	uint16_t	stge_vendorid;
91 	uint16_t	stge_deviceid;
92 	const char	*stge_name;
93 } stge_products[] = {
94 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
95 	  "Sundance ST-1023 Gigabit Ethernet" },
96 
97 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
98 	  "Sundance ST-2021 Gigabit Ethernet" },
99 
100 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
101 	  "Tamarack TC9021 Gigabit Ethernet" },
102 
103 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
104 	  "Tamarack TC9021 Gigabit Ethernet" },
105 
106 	/*
107 	 * The Sundance sample boards use the Sundance vendor ID,
108 	 * but the Tamarack product ID.
109 	 */
110 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
111 	  "Sundance TC9021 Gigabit Ethernet" },
112 
113 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
114 	  "Sundance TC9021 Gigabit Ethernet" },
115 
116 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL2000,
117 	  "D-Link DL-2000 Gigabit Ethernet" },
118 
119 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
120 	  "Antares Gigabit Ethernet" },
121 
122 	{ 0, 0, NULL }
123 };
124 
125 static int	stge_probe(device_t);
126 static int	stge_attach(device_t);
127 static int	stge_detach(device_t);
128 static void	stge_shutdown(device_t);
129 static int	stge_suspend(device_t);
130 static int	stge_resume(device_t);
131 
132 static int	stge_encap(struct stge_softc *, struct mbuf **);
133 static void	stge_start(struct ifnet *);
134 static void	stge_watchdog(struct ifnet *);
135 static int	stge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
136 static void	stge_init(void *);
137 static void	stge_vlan_setup(struct stge_softc *);
138 static void	stge_stop(struct stge_softc *);
139 static void	stge_start_tx(struct stge_softc *);
140 static void	stge_start_rx(struct stge_softc *);
141 static void	stge_stop_tx(struct stge_softc *);
142 static void	stge_stop_rx(struct stge_softc *);
143 
144 static void	stge_reset(struct stge_softc *, uint32_t);
145 static int	stge_eeprom_wait(struct stge_softc *);
146 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
147 static void	stge_tick(void *);
148 static void	stge_stats_update(struct stge_softc *);
149 static void	stge_set_filter(struct stge_softc *);
150 static void	stge_set_multi(struct stge_softc *);
151 
152 static void	stge_link(struct stge_softc *);
153 static void	stge_intr(void *);
154 static __inline int stge_tx_error(struct stge_softc *);
155 static void	stge_txeof(struct stge_softc *);
156 static void	stge_rxeof(struct stge_softc *, int);
157 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
158 static int	stge_newbuf(struct stge_softc *, int, int);
159 #ifndef __i386__
160 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
161 #endif
162 
163 static void	stge_mii_sync(struct stge_softc *);
164 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
165 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
166 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
167 static int	stge_miibus_readreg(device_t, int, int);
168 static int	stge_miibus_writereg(device_t, int, int, int);
169 static void	stge_miibus_statchg(device_t);
170 static int	stge_mediachange(struct ifnet *);
171 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
172 
173 static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
174 static void	stge_mbuf_dmamap_cb(void *, bus_dma_segment_t *, int,
175 				    bus_size_t, int);
176 static int	stge_dma_alloc(struct stge_softc *);
177 static void	stge_dma_free(struct stge_softc *);
178 static void	stge_dma_wait(struct stge_softc *);
179 static void	stge_init_tx_ring(struct stge_softc *);
180 static int	stge_init_rx_ring(struct stge_softc *);
181 #ifdef DEVICE_POLLING
182 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
183 #endif
184 
185 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
186 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
187 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
188 
189 static device_method_t stge_methods[] = {
190 	/* Device interface */
191 	DEVMETHOD(device_probe,		stge_probe),
192 	DEVMETHOD(device_attach,	stge_attach),
193 	DEVMETHOD(device_detach,	stge_detach),
194 	DEVMETHOD(device_shutdown,	stge_shutdown),
195 	DEVMETHOD(device_suspend,	stge_suspend),
196 	DEVMETHOD(device_resume,	stge_resume),
197 
198 	/* MII interface */
199 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
200 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
201 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
202 
203 	{ 0, 0 }
204 
205 };
206 
207 static driver_t stge_driver = {
208 	"stge",
209 	stge_methods,
210 	sizeof(struct stge_softc)
211 };
212 
213 static devclass_t stge_devclass;
214 
215 DECLARE_DUMMY_MODULE(if_stge);
216 MODULE_DEPEND(if_stge, miibus, 1, 1, 1);
217 DRIVER_MODULE(if_stge, pci, stge_driver, stge_devclass, 0, 0);
218 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
219 
220 #define	MII_SET(x)	\
221 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
222 #define	MII_CLR(x)	\
223 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
224 
225 /*
226  * Sync the PHYs by setting data bit and strobing the clock 32 times.
227  */
228 static void
229 stge_mii_sync(struct stge_softc	*sc)
230 {
231 	int i;
232 
233 	MII_SET(PC_MgmtDir | PC_MgmtData);
234 
235 	for (i = 0; i < 32; i++) {
236 		MII_SET(PC_MgmtClk);
237 		DELAY(1);
238 		MII_CLR(PC_MgmtClk);
239 		DELAY(1);
240 	}
241 }
242 
243 /*
244  * Clock a series of bits through the MII.
245  */
246 static void
247 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
248 {
249 	int i;
250 
251 	MII_CLR(PC_MgmtClk);
252 
253 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
254 		if (bits & i)
255 			MII_SET(PC_MgmtData);
256                 else
257 			MII_CLR(PC_MgmtData);
258 		DELAY(1);
259 		MII_CLR(PC_MgmtClk);
260 		DELAY(1);
261 		MII_SET(PC_MgmtClk);
262 	}
263 }
264 
265 /*
266  * Read an PHY register through the MII.
267  */
268 static int
269 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
270 {
271 	int i, ack;
272 
273 	/*
274 	 * Set up frame for RX.
275 	 */
276 	frame->mii_stdelim = STGE_MII_STARTDELIM;
277 	frame->mii_opcode = STGE_MII_READOP;
278 	frame->mii_turnaround = 0;
279 	frame->mii_data = 0;
280 
281 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
282 	/*
283  	 * Turn on data xmit.
284 	 */
285 	MII_SET(PC_MgmtDir);
286 
287 	stge_mii_sync(sc);
288 
289 	/*
290 	 * Send command/address info.
291 	 */
292 	stge_mii_send(sc, frame->mii_stdelim, 2);
293 	stge_mii_send(sc, frame->mii_opcode, 2);
294 	stge_mii_send(sc, frame->mii_phyaddr, 5);
295 	stge_mii_send(sc, frame->mii_regaddr, 5);
296 
297 	/* Turn off xmit. */
298 	MII_CLR(PC_MgmtDir);
299 
300 	/* Idle bit */
301 	MII_CLR((PC_MgmtClk | PC_MgmtData));
302 	DELAY(1);
303 	MII_SET(PC_MgmtClk);
304 	DELAY(1);
305 
306 	/* Check for ack */
307 	MII_CLR(PC_MgmtClk);
308 	DELAY(1);
309 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
310 	MII_SET(PC_MgmtClk);
311 	DELAY(1);
312 
313 	/*
314 	 * Now try reading data bits. If the ack failed, we still
315 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
316 	 */
317 	if (ack) {
318 		for(i = 0; i < 16; i++) {
319 			MII_CLR(PC_MgmtClk);
320 			DELAY(1);
321 			MII_SET(PC_MgmtClk);
322 			DELAY(1);
323 		}
324 		goto fail;
325 	}
326 
327 	for (i = 0x8000; i; i >>= 1) {
328 		MII_CLR(PC_MgmtClk);
329 		DELAY(1);
330 		if (!ack) {
331 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
332 				frame->mii_data |= i;
333 			DELAY(1);
334 		}
335 		MII_SET(PC_MgmtClk);
336 		DELAY(1);
337 	}
338 
339 fail:
340 	MII_CLR(PC_MgmtClk);
341 	DELAY(1);
342 	MII_SET(PC_MgmtClk);
343 	DELAY(1);
344 
345 	if (ack)
346 		return(1);
347 	return(0);
348 }
349 
350 /*
351  * Write to a PHY register through the MII.
352  */
353 static int
354 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
355 {
356 
357 	/*
358 	 * Set up frame for TX.
359 	 */
360 	frame->mii_stdelim = STGE_MII_STARTDELIM;
361 	frame->mii_opcode = STGE_MII_WRITEOP;
362 	frame->mii_turnaround = STGE_MII_TURNAROUND;
363 
364 	/*
365  	 * Turn on data output.
366 	 */
367 	MII_SET(PC_MgmtDir);
368 
369 	stge_mii_sync(sc);
370 
371 	stge_mii_send(sc, frame->mii_stdelim, 2);
372 	stge_mii_send(sc, frame->mii_opcode, 2);
373 	stge_mii_send(sc, frame->mii_phyaddr, 5);
374 	stge_mii_send(sc, frame->mii_regaddr, 5);
375 	stge_mii_send(sc, frame->mii_turnaround, 2);
376 	stge_mii_send(sc, frame->mii_data, 16);
377 
378 	/* Idle bit. */
379 	MII_SET(PC_MgmtClk);
380 	DELAY(1);
381 	MII_CLR(PC_MgmtClk);
382 	DELAY(1);
383 
384 	/*
385 	 * Turn off xmit.
386 	 */
387 	MII_CLR(PC_MgmtDir);
388 
389 	return(0);
390 }
391 
392 /*
393  * sc_miibus_readreg:	[mii interface function]
394  *
395  *	Read a PHY register on the MII of the TC9021.
396  */
397 static int
398 stge_miibus_readreg(device_t dev, int phy, int reg)
399 {
400 	struct stge_softc *sc;
401 	struct stge_mii_frame frame;
402 	int error;
403 
404 	sc = device_get_softc(dev);
405 
406 	if (reg == STGE_PhyCtrl) {
407 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
408 		error = CSR_READ_1(sc, STGE_PhyCtrl);
409 		return (error);
410 	}
411 	bzero(&frame, sizeof(frame));
412 	frame.mii_phyaddr = phy;
413 	frame.mii_regaddr = reg;
414 
415 	error = stge_mii_readreg(sc, &frame);
416 
417 	if (error != 0) {
418 		/* Don't show errors for PHY probe request */
419 		if (reg != 1)
420 			device_printf(sc->sc_dev, "phy read fail\n");
421 		return (0);
422 	}
423 	return (frame.mii_data);
424 }
425 
426 /*
427  * stge_miibus_writereg:	[mii interface function]
428  *
429  *	Write a PHY register on the MII of the TC9021.
430  */
431 static int
432 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
433 {
434 	struct stge_softc *sc;
435 	struct stge_mii_frame frame;
436 	int error;
437 
438 	sc = device_get_softc(dev);
439 
440 	bzero(&frame, sizeof(frame));
441 	frame.mii_phyaddr = phy;
442 	frame.mii_regaddr = reg;
443 	frame.mii_data = val;
444 
445 	error = stge_mii_writereg(sc, &frame);
446 
447 	if (error != 0)
448 		device_printf(sc->sc_dev, "phy write fail\n");
449 	return (0);
450 }
451 
452 /*
453  * stge_miibus_statchg:	[mii interface function]
454  *
455  *	Callback from MII layer when media changes.
456  */
457 static void
458 stge_miibus_statchg(device_t dev)
459 {
460 	struct stge_softc *sc;
461 	struct mii_data *mii;
462 
463 	sc = device_get_softc(dev);
464 	mii = device_get_softc(sc->sc_miibus);
465 
466 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)
467 		return;
468 
469 	sc->sc_MACCtrl = 0;
470 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
471 		sc->sc_MACCtrl |= MC_DuplexSelect;
472 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
473 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
474 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
475 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
476 
477 	stge_link(sc);
478 }
479 
480 /*
481  * stge_mediastatus:	[ifmedia interface function]
482  *
483  *	Get the current interface media status.
484  */
485 static void
486 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
487 {
488 	struct stge_softc *sc;
489 	struct mii_data *mii;
490 
491 	sc = ifp->if_softc;
492 	mii = device_get_softc(sc->sc_miibus);
493 
494 	mii_pollstat(mii);
495 	ifmr->ifm_status = mii->mii_media_status;
496 	ifmr->ifm_active = mii->mii_media_active;
497 }
498 
499 /*
500  * stge_mediachange:	[ifmedia interface function]
501  *
502  *	Set hardware to newly-selected media.
503  */
504 static int
505 stge_mediachange(struct ifnet *ifp)
506 {
507 	struct stge_softc *sc;
508 	struct mii_data *mii;
509 
510 	sc = ifp->if_softc;
511 	mii = device_get_softc(sc->sc_miibus);
512 	mii_mediachg(mii);
513 
514 	return (0);
515 }
516 
517 static int
518 stge_eeprom_wait(struct stge_softc *sc)
519 {
520 	int i;
521 
522 	for (i = 0; i < STGE_TIMEOUT; i++) {
523 		DELAY(1000);
524 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
525 			return (0);
526 	}
527 	return (1);
528 }
529 
530 /*
531  * stge_read_eeprom:
532  *
533  *	Read data from the serial EEPROM.
534  */
535 static void
536 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
537 {
538 
539 	if (stge_eeprom_wait(sc))
540 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
541 
542 	CSR_WRITE_2(sc, STGE_EepromCtrl,
543 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
544 	if (stge_eeprom_wait(sc))
545 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
546 	*data = CSR_READ_2(sc, STGE_EepromData);
547 }
548 
549 
550 static int
551 stge_probe(device_t dev)
552 {
553 	struct stge_product *sp;
554 	uint16_t vendor, devid;
555 
556 	vendor = pci_get_vendor(dev);
557 	devid = pci_get_device(dev);
558 
559 	for (sp = stge_products; sp->stge_name != NULL; sp++) {
560 		if (vendor == sp->stge_vendorid &&
561 		    devid == sp->stge_deviceid) {
562 			device_set_desc(dev, sp->stge_name);
563 			return (0);
564 		}
565 	}
566 
567 	return (ENXIO);
568 }
569 
570 static int
571 stge_attach(device_t dev)
572 {
573 	struct stge_softc *sc;
574 	struct ifnet *ifp;
575 	uint8_t enaddr[ETHER_ADDR_LEN];
576 	int error, i;
577 	uint16_t cmd;
578 	uint32_t val;
579 
580 	error = 0;
581 	sc = device_get_softc(dev);
582 	sc->sc_dev = dev;
583 	ifp = &sc->arpcom.ac_if;
584 
585 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
586 
587 	callout_init(&sc->sc_tick_ch);
588 
589 #ifndef BURN_BRIDGES
590 	/*
591 	 * Handle power management nonsense.
592 	 */
593 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
594 		uint32_t iobase, membase, irq;
595 
596 		/* Save important PCI config data. */
597 		iobase = pci_read_config(dev, STGE_PCIR_LOIO, 4);
598 		membase = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
599 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
600 
601 		/* Reset the power state. */
602 		device_printf(dev, "chip is in D%d power mode "
603 			      "-- setting to D0\n", pci_get_powerstate(dev));
604 
605 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
606 
607 		/* Restore PCI config data. */
608 		pci_write_config(dev, STGE_PCIR_LOIO, iobase, 4);
609 		pci_write_config(dev, STGE_PCIR_LOMEM, membase, 4);
610 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
611 	}
612 #endif
613 
614 	/*
615 	 * Map the device.
616 	 */
617 	pci_enable_busmaster(dev);
618 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
619 	val = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
620 
621 	if ((val & 0x01) != 0) {
622 		sc->sc_res_rid = STGE_PCIR_LOMEM;
623 		sc->sc_res_type = SYS_RES_MEMORY;
624 	} else {
625 		sc->sc_res_rid = STGE_PCIR_LOIO;
626 		sc->sc_res_type = SYS_RES_IOPORT;
627 
628 		val = pci_read_config(dev, sc->sc_res_rid, 4);
629 		if ((val & 0x01) == 0) {
630 			device_printf(dev, "couldn't locate IO BAR\n");
631 			return ENXIO;
632 		}
633 	}
634 
635 	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
636 					    &sc->sc_res_rid, RF_ACTIVE);
637 	if (sc->sc_res == NULL) {
638 		device_printf(dev, "couldn't allocate resource\n");
639 		return ENXIO;
640 	}
641 	sc->sc_btag = rman_get_bustag(sc->sc_res);
642 	sc->sc_bhandle = rman_get_bushandle(sc->sc_res);
643 
644 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
645 					    &sc->sc_irq_rid,
646 					    RF_ACTIVE | RF_SHAREABLE);
647 	if (sc->sc_irq == NULL) {
648 		device_printf(dev, "couldn't allocate IRQ\n");
649 		error = ENXIO;
650 		goto fail;
651 	}
652 
653 	sc->sc_rev = pci_get_revid(dev);
654 
655 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
656 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
657 
658 	sysctl_ctx_init(&sc->sc_sysctl_ctx);
659 	sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
660 					     SYSCTL_STATIC_CHILDREN(_hw),
661 					     OID_AUTO,
662 					     device_get_nameunit(dev),
663 					     CTLFLAG_RD, 0, "");
664 	if (sc->sc_sysctl_tree == NULL) {
665 		device_printf(dev, "can't add sysctl node\n");
666 		error = ENXIO;
667 		goto fail;
668 	}
669 
670 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
671 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
672 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
673 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
674 
675 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
676 	    SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
677 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
678 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
679 
680 	if ((error = stge_dma_alloc(sc) != 0))
681 		goto fail;
682 
683 	/*
684 	 * Determine if we're copper or fiber.  It affects how we
685 	 * reset the card.
686 	 */
687 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
688 		sc->sc_usefiber = 1;
689 	else
690 		sc->sc_usefiber = 0;
691 
692 	/* Load LED configuration from EEPROM. */
693 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
694 
695 	/*
696 	 * Reset the chip to a known state.
697 	 */
698 	stge_reset(sc, STGE_RESET_FULL);
699 
700 	/*
701 	 * Reading the station address from the EEPROM doesn't seem
702 	 * to work, at least on my sample boards.  Instead, since
703 	 * the reset sequence does AutoInit, read it from the station
704 	 * address registers. For Sundance 1023 you can only read it
705 	 * from EEPROM.
706 	 */
707 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
708 		uint16_t v;
709 
710 		v = CSR_READ_2(sc, STGE_StationAddress0);
711 		enaddr[0] = v & 0xff;
712 		enaddr[1] = v >> 8;
713 		v = CSR_READ_2(sc, STGE_StationAddress1);
714 		enaddr[2] = v & 0xff;
715 		enaddr[3] = v >> 8;
716 		v = CSR_READ_2(sc, STGE_StationAddress2);
717 		enaddr[4] = v & 0xff;
718 		enaddr[5] = v >> 8;
719 		sc->sc_stge1023 = 0;
720 	} else {
721 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
722 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
723 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
724 			    &myaddr[i]);
725 			myaddr[i] = le16toh(myaddr[i]);
726 		}
727 		bcopy(myaddr, enaddr, sizeof(enaddr));
728 		sc->sc_stge1023 = 1;
729 	}
730 
731 	ifp->if_softc = sc;
732 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
733 	ifp->if_ioctl = stge_ioctl;
734 	ifp->if_start = stge_start;
735 	ifp->if_watchdog = stge_watchdog;
736 	ifp->if_init = stge_init;
737 #ifdef DEVICE_POLLING
738 	ifp->if_poll = stge_poll;
739 #endif
740 	ifp->if_mtu = ETHERMTU;
741 	ifq_set_maxlen(&ifp->if_snd, STGE_TX_RING_CNT - 1);
742 	ifq_set_ready(&ifp->if_snd);
743 	/* Revision B3 and earlier chips have checksum bug. */
744 	if (sc->sc_rev >= 0x0c) {
745 		ifp->if_hwassist = STGE_CSUM_FEATURES;
746 		ifp->if_capabilities = IFCAP_HWCSUM;
747 	} else {
748 		ifp->if_hwassist = 0;
749 		ifp->if_capabilities = 0;
750 	}
751 	ifp->if_capenable = ifp->if_capabilities;
752 
753 	/*
754 	 * Read some important bits from the PhyCtrl register.
755 	 */
756 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
757 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
758 
759 	/* Set up MII bus. */
760 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
761 	    stge_mediastatus)) != 0) {
762 		device_printf(sc->sc_dev, "no PHY found!\n");
763 		goto fail;
764 	}
765 
766 	ether_ifattach(ifp, enaddr, NULL);
767 
768 	/* VLAN capability setup */
769 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
770 #ifdef notyet
771 	if (sc->sc_rev >= 0x0c)
772 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
773 #endif
774 	ifp->if_capenable = ifp->if_capabilities;
775 
776 	/*
777 	 * Tell the upper layer(s) we support long frames.
778 	 * Must appear after the call to ether_ifattach() because
779 	 * ether_ifattach() sets ifi_hdrlen to the default value.
780 	 */
781 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
782 
783 	/*
784 	 * The manual recommends disabling early transmit, so we
785 	 * do.  It's disabled anyway, if using IP checksumming,
786 	 * since the entire packet must be in the FIFO in order
787 	 * for the chip to perform the checksum.
788 	 */
789 	sc->sc_txthresh = 0x0fff;
790 
791 	/*
792 	 * Disable MWI if the PCI layer tells us to.
793 	 */
794 	sc->sc_DMACtrl = 0;
795 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
796 		sc->sc_DMACtrl |= DMAC_MWIDisable;
797 
798 	/*
799 	 * Hookup IRQ
800 	 */
801 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, stge_intr, sc,
802 			       &sc->sc_ih, ifp->if_serializer);
803 	if (error != 0) {
804 		ether_ifdetach(ifp);
805 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
806 		goto fail;
807 	}
808 
809 fail:
810 	if (error != 0)
811 		stge_detach(dev);
812 
813 	return (error);
814 }
815 
816 static int
817 stge_detach(device_t dev)
818 {
819 	struct stge_softc *sc = device_get_softc(dev);
820 	struct ifnet *ifp = &sc->arpcom.ac_if;
821 
822 	if (device_is_attached(dev)) {
823 		lwkt_serialize_enter(ifp->if_serializer);
824 		/* XXX */
825 		sc->sc_detach = 1;
826 		stge_stop(sc);
827 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
828 		lwkt_serialize_exit(ifp->if_serializer);
829 
830 		ether_ifdetach(ifp);
831 	}
832 
833 	if (sc->sc_sysctl_tree != NULL)
834 		sysctl_ctx_free(&sc->sc_sysctl_ctx);
835 
836 	if (sc->sc_miibus != NULL)
837 		device_delete_child(dev, sc->sc_miibus);
838 	bus_generic_detach(dev);
839 
840 	stge_dma_free(sc);
841 
842 	if (sc->sc_irq != NULL) {
843 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
844 				     sc->sc_irq);
845 	}
846 	if (sc->sc_res != NULL) {
847 		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_rid,
848 				     sc->sc_res);
849 	}
850 
851 	return (0);
852 }
853 
854 struct stge_dmamap_arg {
855 	bus_addr_t	stge_busaddr;
856 };
857 
858 static void
859 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
860 {
861 	struct stge_dmamap_arg *ctx;
862 
863 	if (error != 0)
864 		return;
865 
866 	KASSERT(nseg == 1, ("too many segments %d\n", nseg));
867 
868 	ctx = (struct stge_dmamap_arg *)arg;
869 	ctx->stge_busaddr = segs[0].ds_addr;
870 }
871 
872 struct stge_mbuf_dmamap_arg {
873 	int			nsegs;
874 	bus_dma_segment_t	*segs;
875 };
876 
877 static void
878 stge_mbuf_dmamap_cb(void *xarg, bus_dma_segment_t *segs, int nsegs,
879 		    bus_size_t mapsz __unused, int error)
880 {
881 	struct stge_mbuf_dmamap_arg *arg = xarg;
882 	int i;
883 
884 	if (error) {
885 		arg->nsegs = 0;
886 		return;
887 	}
888 
889 	KASSERT(nsegs <= arg->nsegs,
890 		("too many segments(%d), should be <= %d\n",
891 		 nsegs, arg->nsegs));
892 
893 	arg->nsegs = nsegs;
894 	for (i = 0; i < nsegs; ++i)
895 		arg->segs[i] = segs[i];
896 }
897 
898 static int
899 stge_dma_alloc(struct stge_softc *sc)
900 {
901 	struct stge_dmamap_arg ctx;
902 	struct stge_txdesc *txd;
903 	struct stge_rxdesc *rxd;
904 	int error, i;
905 
906 	/* create parent tag. */
907 	error = bus_dma_tag_create(NULL,	/* parent */
908 		    1, 0,			/* algnmnt, boundary */
909 		    STGE_DMA_MAXADDR,		/* lowaddr */
910 		    BUS_SPACE_MAXADDR,		/* highaddr */
911 		    NULL, NULL,			/* filter, filterarg */
912 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
913 		    0,				/* nsegments */
914 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
915 		    0,				/* flags */
916 		    &sc->sc_cdata.stge_parent_tag);
917 	if (error != 0) {
918 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
919 		goto fail;
920 	}
921 	/* create tag for Tx ring. */
922 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
923 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
924 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
925 		    BUS_SPACE_MAXADDR,		/* highaddr */
926 		    NULL, NULL,			/* filter, filterarg */
927 		    STGE_TX_RING_SZ,		/* maxsize */
928 		    1,				/* nsegments */
929 		    STGE_TX_RING_SZ,		/* maxsegsize */
930 		    0,				/* flags */
931 		    &sc->sc_cdata.stge_tx_ring_tag);
932 	if (error != 0) {
933 		device_printf(sc->sc_dev,
934 		    "failed to allocate Tx ring DMA tag\n");
935 		goto fail;
936 	}
937 
938 	/* create tag for Rx ring. */
939 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
940 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
941 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
942 		    BUS_SPACE_MAXADDR,		/* highaddr */
943 		    NULL, NULL,			/* filter, filterarg */
944 		    STGE_RX_RING_SZ,		/* maxsize */
945 		    1,				/* nsegments */
946 		    STGE_RX_RING_SZ,		/* maxsegsize */
947 		    0,				/* flags */
948 		    &sc->sc_cdata.stge_rx_ring_tag);
949 	if (error != 0) {
950 		device_printf(sc->sc_dev,
951 		    "failed to allocate Rx ring DMA tag\n");
952 		goto fail;
953 	}
954 
955 	/* create tag for Tx buffers. */
956 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
957 		    1, 0,			/* algnmnt, boundary */
958 		    BUS_SPACE_MAXADDR,		/* lowaddr */
959 		    BUS_SPACE_MAXADDR,		/* highaddr */
960 		    NULL, NULL,			/* filter, filterarg */
961 		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
962 		    STGE_MAXTXSEGS,		/* nsegments */
963 		    MCLBYTES,			/* maxsegsize */
964 		    0,				/* flags */
965 		    &sc->sc_cdata.stge_tx_tag);
966 	if (error != 0) {
967 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
968 		goto fail;
969 	}
970 
971 	/* create tag for Rx buffers. */
972 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
973 		    1, 0,			/* algnmnt, boundary */
974 		    BUS_SPACE_MAXADDR,		/* lowaddr */
975 		    BUS_SPACE_MAXADDR,		/* highaddr */
976 		    NULL, NULL,			/* filter, filterarg */
977 		    MCLBYTES,			/* maxsize */
978 		    1,				/* nsegments */
979 		    MCLBYTES,			/* maxsegsize */
980 		    0,				/* flags */
981 		    &sc->sc_cdata.stge_rx_tag);
982 	if (error != 0) {
983 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
984 		goto fail;
985 	}
986 
987 	/* allocate DMA'able memory and load the DMA map for Tx ring. */
988 	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
989 	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
990 	    &sc->sc_cdata.stge_tx_ring_map);
991 	if (error != 0) {
992 		device_printf(sc->sc_dev,
993 		    "failed to allocate DMA'able memory for Tx ring\n");
994 		goto fail;
995 	}
996 
997 	ctx.stge_busaddr = 0;
998 	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
999 	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
1000 	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1001 	if (error != 0 || ctx.stge_busaddr == 0) {
1002 		device_printf(sc->sc_dev,
1003 		    "failed to load DMA'able memory for Tx ring\n");
1004 		goto fail;
1005 	}
1006 	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
1007 
1008 	/* allocate DMA'able memory and load the DMA map for Rx ring. */
1009 	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
1010 	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1011 	    &sc->sc_cdata.stge_rx_ring_map);
1012 	if (error != 0) {
1013 		device_printf(sc->sc_dev,
1014 		    "failed to allocate DMA'able memory for Rx ring\n");
1015 		goto fail;
1016 	}
1017 
1018 	ctx.stge_busaddr = 0;
1019 	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
1020 	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
1021 	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1022 	if (error != 0 || ctx.stge_busaddr == 0) {
1023 		device_printf(sc->sc_dev,
1024 		    "failed to load DMA'able memory for Rx ring\n");
1025 		goto fail;
1026 	}
1027 	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1028 
1029 	/* create DMA maps for Tx buffers. */
1030 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
1031 		txd = &sc->sc_cdata.stge_txdesc[i];
1032 		txd->tx_m = NULL;
1033 		txd->tx_dmamap = 0;
1034 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1035 		    &txd->tx_dmamap);
1036 		if (error != 0) {
1037 			device_printf(sc->sc_dev,
1038 			    "failed to create Tx dmamap\n");
1039 			goto fail;
1040 		}
1041 	}
1042 	/* create DMA maps for Rx buffers. */
1043 	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1044 	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1045 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1046 		goto fail;
1047 	}
1048 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
1049 		rxd = &sc->sc_cdata.stge_rxdesc[i];
1050 		rxd->rx_m = NULL;
1051 		rxd->rx_dmamap = 0;
1052 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1053 		    &rxd->rx_dmamap);
1054 		if (error != 0) {
1055 			device_printf(sc->sc_dev,
1056 			    "failed to create Rx dmamap\n");
1057 			goto fail;
1058 		}
1059 	}
1060 
1061 fail:
1062 	return (error);
1063 }
1064 
1065 static void
1066 stge_dma_free(struct stge_softc *sc)
1067 {
1068 	struct stge_txdesc *txd;
1069 	struct stge_rxdesc *rxd;
1070 	int i;
1071 
1072 	/* Tx ring */
1073 	if (sc->sc_cdata.stge_tx_ring_tag) {
1074 		if (sc->sc_cdata.stge_tx_ring_map)
1075 			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1076 			    sc->sc_cdata.stge_tx_ring_map);
1077 		if (sc->sc_cdata.stge_tx_ring_map &&
1078 		    sc->sc_rdata.stge_tx_ring)
1079 			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1080 			    sc->sc_rdata.stge_tx_ring,
1081 			    sc->sc_cdata.stge_tx_ring_map);
1082 		sc->sc_rdata.stge_tx_ring = NULL;
1083 		sc->sc_cdata.stge_tx_ring_map = 0;
1084 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1085 		sc->sc_cdata.stge_tx_ring_tag = NULL;
1086 	}
1087 	/* Rx ring */
1088 	if (sc->sc_cdata.stge_rx_ring_tag) {
1089 		if (sc->sc_cdata.stge_rx_ring_map)
1090 			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1091 			    sc->sc_cdata.stge_rx_ring_map);
1092 		if (sc->sc_cdata.stge_rx_ring_map &&
1093 		    sc->sc_rdata.stge_rx_ring)
1094 			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1095 			    sc->sc_rdata.stge_rx_ring,
1096 			    sc->sc_cdata.stge_rx_ring_map);
1097 		sc->sc_rdata.stge_rx_ring = NULL;
1098 		sc->sc_cdata.stge_rx_ring_map = 0;
1099 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1100 		sc->sc_cdata.stge_rx_ring_tag = NULL;
1101 	}
1102 	/* Tx buffers */
1103 	if (sc->sc_cdata.stge_tx_tag) {
1104 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1105 			txd = &sc->sc_cdata.stge_txdesc[i];
1106 			if (txd->tx_dmamap) {
1107 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1108 				    txd->tx_dmamap);
1109 				txd->tx_dmamap = 0;
1110 			}
1111 		}
1112 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1113 		sc->sc_cdata.stge_tx_tag = NULL;
1114 	}
1115 	/* Rx buffers */
1116 	if (sc->sc_cdata.stge_rx_tag) {
1117 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1118 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1119 			if (rxd->rx_dmamap) {
1120 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1121 				    rxd->rx_dmamap);
1122 				rxd->rx_dmamap = 0;
1123 			}
1124 		}
1125 		if (sc->sc_cdata.stge_rx_sparemap) {
1126 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1127 			    sc->sc_cdata.stge_rx_sparemap);
1128 			sc->sc_cdata.stge_rx_sparemap = 0;
1129 		}
1130 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1131 		sc->sc_cdata.stge_rx_tag = NULL;
1132 	}
1133 
1134 	if (sc->sc_cdata.stge_parent_tag) {
1135 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1136 		sc->sc_cdata.stge_parent_tag = NULL;
1137 	}
1138 }
1139 
1140 /*
1141  * stge_shutdown:
1142  *
1143  *	Make sure the interface is stopped at reboot time.
1144  */
1145 static void
1146 stge_shutdown(device_t dev)
1147 {
1148 	struct stge_softc *sc = device_get_softc(dev);
1149 	struct ifnet *ifp = &sc->arpcom.ac_if;
1150 
1151 	lwkt_serialize_enter(ifp->if_serializer);
1152 	stge_stop(sc);
1153 	lwkt_serialize_exit(ifp->if_serializer);
1154 }
1155 
1156 static int
1157 stge_suspend(device_t dev)
1158 {
1159 	struct stge_softc *sc = device_get_softc(dev);
1160 	struct ifnet *ifp = &sc->arpcom.ac_if;
1161 
1162 	lwkt_serialize_enter(ifp->if_serializer);
1163 	stge_stop(sc);
1164 	sc->sc_suspended = 1;
1165 	lwkt_serialize_exit(ifp->if_serializer);
1166 
1167 	return (0);
1168 }
1169 
1170 static int
1171 stge_resume(device_t dev)
1172 {
1173 	struct stge_softc *sc = device_get_softc(dev);
1174 	struct ifnet *ifp = &sc->arpcom.ac_if;
1175 
1176 	lwkt_serialize_enter(ifp->if_serializer);
1177 	if (ifp->if_flags & IFF_UP)
1178 		stge_init(sc);
1179 	sc->sc_suspended = 0;
1180 	lwkt_serialize_exit(ifp->if_serializer);
1181 
1182 	return (0);
1183 }
1184 
1185 static void
1186 stge_dma_wait(struct stge_softc *sc)
1187 {
1188 	int i;
1189 
1190 	for (i = 0; i < STGE_TIMEOUT; i++) {
1191 		DELAY(2);
1192 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1193 			break;
1194 	}
1195 
1196 	if (i == STGE_TIMEOUT)
1197 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1198 }
1199 
1200 static int
1201 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1202 {
1203 	struct stge_txdesc *txd;
1204 	struct stge_tfd *tfd;
1205 	struct mbuf *m;
1206 	struct stge_mbuf_dmamap_arg arg;
1207 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1208 	int error, i, si;
1209 	uint64_t csum_flags, tfc;
1210 
1211 	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1212 		return (ENOBUFS);
1213 
1214 	arg.nsegs = STGE_MAXTXSEGS;
1215 	arg.segs = txsegs;
1216 	error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1217 				      txd->tx_dmamap, *m_head,
1218 				      stge_mbuf_dmamap_cb, &arg,
1219 				      BUS_DMA_NOWAIT);
1220 	if (error == EFBIG) {
1221 		m = m_defrag(*m_head, MB_DONTWAIT);
1222 		if (m == NULL) {
1223 			m_freem(*m_head);
1224 			*m_head = NULL;
1225 			return (ENOMEM);
1226 		}
1227 		*m_head = m;
1228 		error =  bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1229 					      txd->tx_dmamap, *m_head,
1230 					      stge_mbuf_dmamap_cb, &arg,
1231 					      BUS_DMA_NOWAIT);
1232 		if (error != 0) {
1233 			m_freem(*m_head);
1234 			*m_head = NULL;
1235 			return (error);
1236 		}
1237 	} else if (error != 0)
1238 		return (error);
1239 	if (arg.nsegs == 0) {
1240 		m_freem(*m_head);
1241 		*m_head = NULL;
1242 		return (EIO);
1243 	}
1244 
1245 	m = *m_head;
1246 	csum_flags = 0;
1247 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1248 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1249 			csum_flags |= TFD_IPChecksumEnable;
1250 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1251 			csum_flags |= TFD_TCPChecksumEnable;
1252 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1253 			csum_flags |= TFD_UDPChecksumEnable;
1254 	}
1255 
1256 	si = sc->sc_cdata.stge_tx_prod;
1257 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1258 	for (i = 0; i < arg.nsegs; i++) {
1259 		tfd->tfd_frags[i].frag_word0 =
1260 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1261 		    FRAG_LEN(txsegs[i].ds_len));
1262 	}
1263 	sc->sc_cdata.stge_tx_cnt++;
1264 
1265 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1266 	    TFD_FragCount(arg.nsegs) | csum_flags;
1267 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1268 		tfc |= TFD_TxDMAIndicate;
1269 
1270 	/* Update producer index. */
1271 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1272 
1273 	/* Check if we have a VLAN tag to insert. */
1274 	if (m->m_flags & M_VLANTAG)
1275 		tfc |= TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vlantag);
1276 	tfd->tfd_control = htole64(tfc);
1277 
1278 	/* Update Tx Queue. */
1279 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1280 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1281 	txd->tx_m = m;
1282 
1283 	/* Sync descriptors. */
1284 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1285 	    BUS_DMASYNC_PREWRITE);
1286 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1287 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1288 
1289 	return (0);
1290 }
1291 
1292 /*
1293  * stge_start:		[ifnet interface function]
1294  *
1295  *	Start packet transmission on the interface.
1296  */
1297 static void
1298 stge_start(struct ifnet *ifp)
1299 {
1300 	struct stge_softc *sc;
1301 	struct mbuf *m_head;
1302 	int enq;
1303 
1304 	sc = ifp->if_softc;
1305 
1306 	ASSERT_SERIALIZED(ifp->if_serializer);
1307 
1308 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
1309 	    IFF_RUNNING)
1310 		return;
1311 
1312 	for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) {
1313 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1314 			ifp->if_flags |= IFF_OACTIVE;
1315 			break;
1316 		}
1317 
1318 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1319 		if (m_head == NULL)
1320 			break;
1321 		/*
1322 		 * Pack the data into the transmit ring. If we
1323 		 * don't have room, set the OACTIVE flag and wait
1324 		 * for the NIC to drain the ring.
1325 		 */
1326 		if (stge_encap(sc, &m_head)) {
1327 			if (m_head == NULL)
1328 				break;
1329 			ifp->if_flags |= IFF_OACTIVE;
1330 			break;
1331 		}
1332 
1333 		enq++;
1334 		/*
1335 		 * If there's a BPF listener, bounce a copy of this frame
1336 		 * to him.
1337 		 */
1338 		ETHER_BPF_MTAP(ifp, m_head);
1339 	}
1340 
1341 	if (enq > 0) {
1342 		/* Transmit */
1343 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1344 
1345 		/* Set a timeout in case the chip goes out to lunch. */
1346 		ifp->if_timer = 5;
1347 	}
1348 }
1349 
1350 /*
1351  * stge_watchdog:	[ifnet interface function]
1352  *
1353  *	Watchdog timer handler.
1354  */
1355 static void
1356 stge_watchdog(struct ifnet *ifp)
1357 {
1358 	ASSERT_SERIALIZED(ifp->if_serializer);
1359 
1360 	if_printf(ifp, "device timeout\n");
1361 	ifp->if_oerrors++;
1362 	stge_init(ifp->if_softc);
1363 }
1364 
1365 /*
1366  * stge_ioctl:		[ifnet interface function]
1367  *
1368  *	Handle control requests from the operator.
1369  */
1370 static int
1371 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1372 {
1373 	struct stge_softc *sc;
1374 	struct ifreq *ifr;
1375 	struct mii_data *mii;
1376 	int error, mask;
1377 
1378 	ASSERT_SERIALIZED(ifp->if_serializer);
1379 
1380 	sc = ifp->if_softc;
1381 	ifr = (struct ifreq *)data;
1382 	error = 0;
1383 	switch (cmd) {
1384 	case SIOCSIFMTU:
1385 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1386 			error = EINVAL;
1387 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1388 			ifp->if_mtu = ifr->ifr_mtu;
1389 			stge_init(sc);
1390 		}
1391 		break;
1392 	case SIOCSIFFLAGS:
1393 		if ((ifp->if_flags & IFF_UP) != 0) {
1394 			if ((ifp->if_flags & IFF_RUNNING) != 0) {
1395 				if (((ifp->if_flags ^ sc->sc_if_flags)
1396 				    & IFF_PROMISC) != 0)
1397 					stge_set_filter(sc);
1398 			} else {
1399 				if (sc->sc_detach == 0)
1400 					stge_init(sc);
1401 			}
1402 		} else {
1403 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1404 				stge_stop(sc);
1405 		}
1406 		sc->sc_if_flags = ifp->if_flags;
1407 		break;
1408 	case SIOCADDMULTI:
1409 	case SIOCDELMULTI:
1410 		if ((ifp->if_flags & IFF_RUNNING) != 0)
1411 			stge_set_multi(sc);
1412 		break;
1413 	case SIOCSIFMEDIA:
1414 	case SIOCGIFMEDIA:
1415 		mii = device_get_softc(sc->sc_miibus);
1416 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1417 		break;
1418 	case SIOCSIFCAP:
1419 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1420 		if ((mask & IFCAP_HWCSUM) != 0) {
1421 			ifp->if_capenable ^= IFCAP_HWCSUM;
1422 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1423 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1424 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1425 			else
1426 				ifp->if_hwassist = 0;
1427 		}
1428 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1429 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1430 			if (ifp->if_flags & IFF_RUNNING)
1431 				stge_vlan_setup(sc);
1432 		}
1433 #if 0
1434 		VLAN_CAPABILITIES(ifp);
1435 #endif
1436 		break;
1437 	default:
1438 		error = ether_ioctl(ifp, cmd, data);
1439 		break;
1440 	}
1441 
1442 	return (error);
1443 }
1444 
1445 static void
1446 stge_link(struct stge_softc *sc)
1447 {
1448 	uint32_t v, ac;
1449 	int i;
1450 
1451 	/*
1452 	 * Update STGE_MACCtrl register depending on link status.
1453 	 * (duplex, flow control etc)
1454 	 */
1455 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1456 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1457 	v |= sc->sc_MACCtrl;
1458 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1459 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1460 		/* Duplex setting changed, reset Tx/Rx functions. */
1461 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1462 		ac |= AC_TxReset | AC_RxReset;
1463 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1464 		for (i = 0; i < STGE_TIMEOUT; i++) {
1465 			DELAY(100);
1466 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1467 				break;
1468 		}
1469 		if (i == STGE_TIMEOUT)
1470 			device_printf(sc->sc_dev, "reset failed to complete\n");
1471 	}
1472 }
1473 
1474 static __inline int
1475 stge_tx_error(struct stge_softc *sc)
1476 {
1477 	uint32_t txstat;
1478 	int error;
1479 
1480 	for (error = 0;;) {
1481 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1482 		if ((txstat & TS_TxComplete) == 0)
1483 			break;
1484 		/* Tx underrun */
1485 		if ((txstat & TS_TxUnderrun) != 0) {
1486 			/*
1487 			 * XXX
1488 			 * There should be a more better way to recover
1489 			 * from Tx underrun instead of a full reset.
1490 			 */
1491 			if (sc->sc_nerr++ < STGE_MAXERR)
1492 				device_printf(sc->sc_dev, "Tx underrun, "
1493 				    "resetting...\n");
1494 			if (sc->sc_nerr == STGE_MAXERR)
1495 				device_printf(sc->sc_dev, "too many errors; "
1496 				    "not reporting any more\n");
1497 			error = -1;
1498 			break;
1499 		}
1500 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1501 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1502 			CSR_WRITE_4(sc, STGE_MACCtrl,
1503 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1504 			    MC_TxEnable);
1505 	}
1506 
1507 	return (error);
1508 }
1509 
1510 /*
1511  * stge_intr:
1512  *
1513  *	Interrupt service routine.
1514  */
1515 static void
1516 stge_intr(void *arg)
1517 {
1518 	struct stge_softc *sc = arg;
1519 	struct ifnet *ifp = &sc->arpcom.ac_if;
1520 	int reinit;
1521 	uint16_t status;
1522 
1523 	ASSERT_SERIALIZED(ifp->if_serializer);
1524 
1525 	status = CSR_READ_2(sc, STGE_IntStatus);
1526 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1527 		return;
1528 
1529 	/* Disable interrupts. */
1530 	for (reinit = 0;;) {
1531 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1532 		status &= sc->sc_IntEnable;
1533 		if (status == 0)
1534 			break;
1535 		/* Host interface errors. */
1536 		if ((status & IS_HostError) != 0) {
1537 			device_printf(sc->sc_dev,
1538 			    "Host interface error, resetting...\n");
1539 			reinit = 1;
1540 			goto force_init;
1541 		}
1542 
1543 		/* Receive interrupts. */
1544 		if ((status & IS_RxDMAComplete) != 0) {
1545 			stge_rxeof(sc, -1);
1546 			if ((status & IS_RFDListEnd) != 0)
1547 				CSR_WRITE_4(sc, STGE_DMACtrl,
1548 				    DMAC_RxDMAPollNow);
1549 		}
1550 
1551 		/* Transmit interrupts. */
1552 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1553 			stge_txeof(sc);
1554 
1555 		/* Transmission errors.*/
1556 		if ((status & IS_TxComplete) != 0) {
1557 			if ((reinit = stge_tx_error(sc)) != 0)
1558 				break;
1559 		}
1560 	}
1561 
1562 force_init:
1563 	if (reinit != 0)
1564 		stge_init(sc);
1565 
1566 	/* Re-enable interrupts. */
1567 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1568 
1569 	/* Try to get more packets going. */
1570 	if (!ifq_is_empty(&ifp->if_snd))
1571 		ifp->if_start(ifp);
1572 }
1573 
1574 /*
1575  * stge_txeof:
1576  *
1577  *	Helper; handle transmit interrupts.
1578  */
1579 static void
1580 stge_txeof(struct stge_softc *sc)
1581 {
1582 	struct ifnet *ifp = &sc->arpcom.ac_if;
1583 	struct stge_txdesc *txd;
1584 	uint64_t control;
1585 	int cons;
1586 
1587 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1588 	if (txd == NULL)
1589 		return;
1590 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1591 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1592 
1593 	/*
1594 	 * Go through our Tx list and free mbufs for those
1595 	 * frames which have been transmitted.
1596 	 */
1597 	for (cons = sc->sc_cdata.stge_tx_cons;;
1598 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1599 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1600 			break;
1601 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1602 		if ((control & TFD_TFDDone) == 0)
1603 			break;
1604 		sc->sc_cdata.stge_tx_cnt--;
1605 		ifp->if_flags &= ~IFF_OACTIVE;
1606 
1607 		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1608 		    BUS_DMASYNC_POSTWRITE);
1609 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1610 
1611 		/* Output counter is updated with statistics register */
1612 		m_freem(txd->tx_m);
1613 		txd->tx_m = NULL;
1614 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1615 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1616 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1617 	}
1618 	sc->sc_cdata.stge_tx_cons = cons;
1619 	if (sc->sc_cdata.stge_tx_cnt == 0)
1620 		ifp->if_timer = 0;
1621 
1622         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1623 	    sc->sc_cdata.stge_tx_ring_map,
1624 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1625 }
1626 
1627 static __inline void
1628 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1629 {
1630 	struct stge_rfd *rfd;
1631 
1632 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1633 	rfd->rfd_status = 0;
1634 }
1635 
1636 #ifndef __i386__
1637 /*
1638  * It seems that TC9021's DMA engine has alignment restrictions in
1639  * DMA scatter operations. The first DMA segment has no address
1640  * alignment restrictins but the rest should be aligned on 4(?) bytes
1641  * boundary. Otherwise it would corrupt random memory. Since we don't
1642  * know which one is used for the first segment in advance we simply
1643  * don't align at all.
1644  * To avoid copying over an entire frame to align, we allocate a new
1645  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1646  * prepended into the existing mbuf chain.
1647  */
1648 static __inline struct mbuf *
1649 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1650 {
1651 	struct mbuf *n;
1652 
1653 	n = NULL;
1654 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1655 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1656 		m->m_data += ETHER_HDR_LEN;
1657 		n = m;
1658 	} else {
1659 		MGETHDR(n, MB_DONTWAIT, MT_DATA);
1660 		if (n != NULL) {
1661 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1662 			m->m_data += ETHER_HDR_LEN;
1663 			m->m_len -= ETHER_HDR_LEN;
1664 			n->m_len = ETHER_HDR_LEN;
1665 			M_MOVE_PKTHDR(n, m);
1666 			n->m_next = m;
1667 		} else
1668 			m_freem(m);
1669 	}
1670 
1671 	return (n);
1672 }
1673 #endif
1674 
1675 /*
1676  * stge_rxeof:
1677  *
1678  *	Helper; handle receive interrupts.
1679  */
1680 static void
1681 stge_rxeof(struct stge_softc *sc, int count)
1682 {
1683 	struct ifnet *ifp = &sc->arpcom.ac_if;
1684 	struct stge_rxdesc *rxd;
1685 	struct mbuf *mp, *m;
1686 	uint64_t status64;
1687 	uint32_t status;
1688 	int cons, prog;
1689 
1690 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1691 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1692 
1693 	prog = 0;
1694 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1695 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1696 #ifdef DEVICE_POLLING
1697 		if (count >= 0 && count-- == 0)
1698 			break;
1699 #endif
1700 
1701 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1702 		status = RFD_RxStatus(status64);
1703 		if ((status & RFD_RFDDone) == 0)
1704 			break;
1705 
1706 		prog++;
1707 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1708 		mp = rxd->rx_m;
1709 
1710 		/*
1711 		 * If the packet had an error, drop it.  Note we count
1712 		 * the error later in the periodic stats update.
1713 		 */
1714 		if ((status & RFD_FrameEnd) != 0 && (status &
1715 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1716 		    RFD_RxAlignmentError | RFD_RxFCSError |
1717 		    RFD_RxLengthError)) != 0) {
1718 			stge_discard_rxbuf(sc, cons);
1719 			if (sc->sc_cdata.stge_rxhead != NULL) {
1720 				m_freem(sc->sc_cdata.stge_rxhead);
1721 				STGE_RXCHAIN_RESET(sc);
1722 			}
1723 			continue;
1724 		}
1725 		/*
1726 		 * Add a new receive buffer to the ring.
1727 		 */
1728 		if (stge_newbuf(sc, cons, 0) != 0) {
1729 			ifp->if_iqdrops++;
1730 			stge_discard_rxbuf(sc, cons);
1731 			if (sc->sc_cdata.stge_rxhead != NULL) {
1732 				m_freem(sc->sc_cdata.stge_rxhead);
1733 				STGE_RXCHAIN_RESET(sc);
1734 			}
1735 			continue;
1736 		}
1737 
1738 		if ((status & RFD_FrameEnd) != 0)
1739 			mp->m_len = RFD_RxDMAFrameLen(status) -
1740 			    sc->sc_cdata.stge_rxlen;
1741 		sc->sc_cdata.stge_rxlen += mp->m_len;
1742 
1743 		/* Chain mbufs. */
1744 		if (sc->sc_cdata.stge_rxhead == NULL) {
1745 			sc->sc_cdata.stge_rxhead = mp;
1746 			sc->sc_cdata.stge_rxtail = mp;
1747 		} else {
1748 			mp->m_flags &= ~M_PKTHDR;
1749 			sc->sc_cdata.stge_rxtail->m_next = mp;
1750 			sc->sc_cdata.stge_rxtail = mp;
1751 		}
1752 
1753 		if ((status & RFD_FrameEnd) != 0) {
1754 			m = sc->sc_cdata.stge_rxhead;
1755 			m->m_pkthdr.rcvif = ifp;
1756 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1757 
1758 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1759 				m_freem(m);
1760 				STGE_RXCHAIN_RESET(sc);
1761 				continue;
1762 			}
1763 			/*
1764 			 * Set the incoming checksum information for
1765 			 * the packet.
1766 			 */
1767 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1768 				if ((status & RFD_IPDetected) != 0) {
1769 					m->m_pkthdr.csum_flags |=
1770 						CSUM_IP_CHECKED;
1771 					if ((status & RFD_IPError) == 0)
1772 						m->m_pkthdr.csum_flags |=
1773 						    CSUM_IP_VALID;
1774 				}
1775 				if (((status & RFD_TCPDetected) != 0 &&
1776 				    (status & RFD_TCPError) == 0) ||
1777 				    ((status & RFD_UDPDetected) != 0 &&
1778 				    (status & RFD_UDPError) == 0)) {
1779 					m->m_pkthdr.csum_flags |=
1780 					    (CSUM_DATA_VALID |
1781 					     CSUM_PSEUDO_HDR |
1782 					     CSUM_FRAG_NOT_CHECKED);
1783 					m->m_pkthdr.csum_data = 0xffff;
1784 				}
1785 			}
1786 
1787 #ifndef __i386__
1788 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1789 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1790 					STGE_RXCHAIN_RESET(sc);
1791 					continue;
1792 				}
1793 			}
1794 #endif
1795 
1796 			/* Check for VLAN tagged packets. */
1797 			if ((status & RFD_VLANDetected) != 0 &&
1798 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1799 				VLAN_INPUT_TAG(m, RFD_TCI(status64));
1800 			} else {
1801 				/* Pass it on. */
1802 				ifp->if_input(ifp, m);
1803 			}
1804 
1805 			STGE_RXCHAIN_RESET(sc);
1806 		}
1807 	}
1808 
1809 	if (prog > 0) {
1810 		/* Update the consumer index. */
1811 		sc->sc_cdata.stge_rx_cons = cons;
1812 		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1813 		    sc->sc_cdata.stge_rx_ring_map,
1814 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1815 	}
1816 }
1817 
1818 #ifdef DEVICE_POLLING
1819 static void
1820 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1821 {
1822 	struct stge_softc *sc;
1823 	uint16_t status;
1824 
1825 	sc = ifp->if_softc;
1826 
1827 	switch (cmd) {
1828 	case POLL_REGISTER:
1829 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
1830 		break;
1831 	case POLL_DEREGISTER:
1832 		CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1833 		break;
1834 	case POLL_ONLY:
1835 	case POLL_AND_CHECK_STATUS:
1836 		sc->sc_cdata.stge_rxcycles = count;
1837 		stge_rxeof(sc, count);
1838 		stge_txeof(sc);
1839 
1840 		if (cmd == POLL_AND_CHECK_STATUS) {
1841 			status = CSR_READ_2(sc, STGE_IntStatus);
1842 			status &= sc->sc_IntEnable;
1843 			if (status != 0) {
1844 				if (status & IS_HostError) {
1845 					device_printf(sc->sc_dev,
1846 					"Host interface error, "
1847 					"resetting...\n");
1848 					stge_init(sc);
1849 				}
1850 				if ((status & IS_TxComplete) != 0 &&
1851 				    stge_tx_error(sc) != 0)
1852 					stge_init(sc);
1853 			}
1854 
1855 		}
1856 
1857 		if (!ifq_is_empty(&ifp->if_snd))
1858 			ifp->if_start(ifp);
1859 	}
1860 }
1861 #endif	/* DEVICE_POLLING */
1862 
1863 /*
1864  * stge_tick:
1865  *
1866  *	One second timer, used to tick the MII.
1867  */
1868 static void
1869 stge_tick(void *arg)
1870 {
1871 	struct stge_softc *sc = arg;
1872 	struct ifnet *ifp = &sc->arpcom.ac_if;
1873 	struct mii_data *mii;
1874 
1875 	lwkt_serialize_enter(ifp->if_serializer);
1876 
1877 	mii = device_get_softc(sc->sc_miibus);
1878 	mii_tick(mii);
1879 
1880 	/* Update statistics counters. */
1881 	stge_stats_update(sc);
1882 
1883 	/*
1884 	 * Relcaim any pending Tx descriptors to release mbufs in a
1885 	 * timely manner as we don't generate Tx completion interrupts
1886 	 * for every frame. This limits the delay to a maximum of one
1887 	 * second.
1888 	 */
1889 	if (sc->sc_cdata.stge_tx_cnt != 0)
1890 		stge_txeof(sc);
1891 
1892 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1893 
1894 	lwkt_serialize_exit(ifp->if_serializer);
1895 }
1896 
1897 /*
1898  * stge_stats_update:
1899  *
1900  *	Read the TC9021 statistics counters.
1901  */
1902 static void
1903 stge_stats_update(struct stge_softc *sc)
1904 {
1905 	struct ifnet *ifp = &sc->arpcom.ac_if;
1906 
1907 	CSR_READ_4(sc,STGE_OctetRcvOk);
1908 
1909 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1910 
1911 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1912 
1913 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1914 
1915 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1916 
1917 	ifp->if_collisions +=
1918 	    CSR_READ_4(sc, STGE_LateCollisions) +
1919 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1920 	    CSR_READ_4(sc, STGE_SingleColFrames);
1921 
1922 	ifp->if_oerrors +=
1923 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1924 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1925 }
1926 
1927 /*
1928  * stge_reset:
1929  *
1930  *	Perform a soft reset on the TC9021.
1931  */
1932 static void
1933 stge_reset(struct stge_softc *sc, uint32_t how)
1934 {
1935 	uint32_t ac;
1936 	uint8_t v;
1937 	int i, dv;
1938 
1939 	dv = 5000;
1940 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1941 	switch (how) {
1942 	case STGE_RESET_TX:
1943 		ac |= AC_TxReset | AC_FIFO;
1944 		dv = 100;
1945 		break;
1946 	case STGE_RESET_RX:
1947 		ac |= AC_RxReset | AC_FIFO;
1948 		dv = 100;
1949 		break;
1950 	case STGE_RESET_FULL:
1951 	default:
1952 		/*
1953 		 * Only assert RstOut if we're fiber.  We need GMII clocks
1954 		 * to be present in order for the reset to complete on fiber
1955 		 * cards.
1956 		 */
1957 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1958 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1959 		    (sc->sc_usefiber ? AC_RstOut : 0);
1960 		break;
1961 	}
1962 
1963 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1964 
1965 	/* Account for reset problem at 10Mbps. */
1966 	DELAY(dv);
1967 
1968 	for (i = 0; i < STGE_TIMEOUT; i++) {
1969 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1970 			break;
1971 		DELAY(dv);
1972 	}
1973 
1974 	if (i == STGE_TIMEOUT)
1975 		device_printf(sc->sc_dev, "reset failed to complete\n");
1976 
1977 	/* Set LED, from Linux IPG driver. */
1978 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1979 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1980 	if ((sc->sc_led & 0x01) != 0)
1981 		ac |= AC_LEDMode;
1982 	if ((sc->sc_led & 0x03) != 0)
1983 		ac |= AC_LEDModeBit1;
1984 	if ((sc->sc_led & 0x08) != 0)
1985 		ac |= AC_LEDSpeed;
1986 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1987 
1988 	/* Set PHY, from Linux IPG driver */
1989 	v = CSR_READ_1(sc, STGE_PhySet);
1990 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1991 	v |= ((sc->sc_led & 0x70) >> 4);
1992 	CSR_WRITE_1(sc, STGE_PhySet, v);
1993 }
1994 
1995 /*
1996  * stge_init:		[ ifnet interface function ]
1997  *
1998  *	Initialize the interface.
1999  */
2000 static void
2001 stge_init(void *xsc)
2002 {
2003 	struct stge_softc *sc = xsc;
2004 	struct ifnet *ifp = &sc->arpcom.ac_if;
2005 	struct mii_data *mii;
2006 	uint16_t eaddr[3];
2007 	uint32_t v;
2008 	int error;
2009 
2010 	ASSERT_SERIALIZED(ifp->if_serializer);
2011 
2012 	mii = device_get_softc(sc->sc_miibus);
2013 
2014 	/*
2015 	 * Cancel any pending I/O.
2016 	 */
2017 	stge_stop(sc);
2018 
2019 	/* Init descriptors. */
2020 	error = stge_init_rx_ring(sc);
2021 	if (error != 0) {
2022 		device_printf(sc->sc_dev,
2023 		    "initialization failed: no memory for rx buffers\n");
2024 		stge_stop(sc);
2025 		goto out;
2026 	}
2027 	stge_init_tx_ring(sc);
2028 
2029 	/* Set the station address. */
2030 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2031 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2032 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2033 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2034 
2035 	/*
2036 	 * Set the statistics masks.  Disable all the RMON stats,
2037 	 * and disable selected stats in the non-RMON stats registers.
2038 	 */
2039 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2040 	CSR_WRITE_4(sc, STGE_StatisticsMask,
2041 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2042 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2043 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2044 	    (1U << 21));
2045 
2046 	/* Set up the receive filter. */
2047 	stge_set_filter(sc);
2048 	/* Program multicast filter. */
2049 	stge_set_multi(sc);
2050 
2051 	/*
2052 	 * Give the transmit and receive ring to the chip.
2053 	 */
2054 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2055 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2056 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2057 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2058 
2059 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2060 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2061 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2062 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2063 
2064 	/*
2065 	 * Initialize the Tx auto-poll period.  It's OK to make this number
2066 	 * large (255 is the max, but we use 127) -- we explicitly kick the
2067 	 * transmit engine when there's actually a packet.
2068 	 */
2069 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2070 
2071 	/* ..and the Rx auto-poll period. */
2072 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2073 
2074 	/* Initialize the Tx start threshold. */
2075 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2076 
2077 	/* Rx DMA thresholds, from Linux */
2078 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2079 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2080 
2081 	/* Rx early threhold, from Linux */
2082 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2083 
2084 	/* Tx DMA thresholds, from Linux */
2085 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2086 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2087 
2088 	/*
2089 	 * Initialize the Rx DMA interrupt control register.  We
2090 	 * request an interrupt after every incoming packet, but
2091 	 * defer it for sc_rxint_dmawait us. When the number of
2092 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2093 	 * deferring the interrupt, and signal it immediately.
2094 	 */
2095 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2096 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2097 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2098 
2099 	/*
2100 	 * Initialize the interrupt mask.
2101 	 */
2102 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2103 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2104 #ifdef DEVICE_POLLING
2105 	/* Disable interrupts if we are polling. */
2106 	if (ifp->if_flags & IFF_POLLING)
2107 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2108 	else
2109 #endif
2110 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2111 
2112 	/*
2113 	 * Configure the DMA engine.
2114 	 * XXX Should auto-tune TxBurstLimit.
2115 	 */
2116 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2117 
2118 	/*
2119 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2120 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2121 	 * in the Rx FIFO.
2122 	 */
2123 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2124 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2125 
2126 	/*
2127 	 * Set the maximum frame size.
2128 	 */
2129 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2130 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2131 
2132 	/*
2133 	 * Initialize MacCtrl -- do it before setting the media,
2134 	 * as setting the media will actually program the register.
2135 	 *
2136 	 * Note: We have to poke the IFS value before poking
2137 	 * anything else.
2138 	 */
2139 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2140 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2141 
2142 	stge_vlan_setup(sc);
2143 
2144 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2145 		/* Multi-frag frame bug work-around. */
2146 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2147 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2148 
2149 		/* Tx Poll Now bug work-around. */
2150 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2151 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2152 		/* Tx Poll Now bug work-around. */
2153 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2154 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2155 	}
2156 
2157 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2158 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2159 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2160 	/*
2161 	 * It seems that transmitting frames without checking the state of
2162 	 * Rx/Tx MAC wedge the hardware.
2163 	 */
2164 	stge_start_tx(sc);
2165 	stge_start_rx(sc);
2166 
2167 	/*
2168 	 * Set the current media.
2169 	 */
2170 	mii_mediachg(mii);
2171 
2172 	/*
2173 	 * Start the one second MII clock.
2174 	 */
2175 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2176 
2177 	/*
2178 	 * ...all done!
2179 	 */
2180 	ifp->if_flags |= IFF_RUNNING;
2181 	ifp->if_flags &= ~IFF_OACTIVE;
2182 
2183  out:
2184 	if (error != 0)
2185 		device_printf(sc->sc_dev, "interface not running\n");
2186 }
2187 
2188 static void
2189 stge_vlan_setup(struct stge_softc *sc)
2190 {
2191 	struct ifnet *ifp = &sc->arpcom.ac_if;
2192 	uint32_t v;
2193 
2194 	/*
2195 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2196 	 * MC_AutoVLANuntagging bit.
2197 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2198 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2199 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2200 	 * use TFC instead of STGE_VLANTag register.
2201 	 */
2202 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2203 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2204 		v |= MC_AutoVLANuntagging;
2205 	else
2206 		v &= ~MC_AutoVLANuntagging;
2207 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2208 }
2209 
2210 /*
2211  *	Stop transmission on the interface.
2212  */
2213 static void
2214 stge_stop(struct stge_softc *sc)
2215 {
2216 	struct ifnet *ifp = &sc->arpcom.ac_if;
2217 	struct stge_txdesc *txd;
2218 	struct stge_rxdesc *rxd;
2219 	uint32_t v;
2220 	int i;
2221 
2222 	ASSERT_SERIALIZED(ifp->if_serializer);
2223 
2224 	/*
2225 	 * Stop the one second clock.
2226 	 */
2227 	callout_stop(&sc->sc_tick_ch);
2228 
2229 	/*
2230 	 * Reset the chip to a known state.
2231 	 */
2232 	stge_reset(sc, STGE_RESET_FULL);
2233 
2234 	/*
2235 	 * Disable interrupts.
2236 	 */
2237 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2238 
2239 	/*
2240 	 * Stop receiver, transmitter, and stats update.
2241 	 */
2242 	stge_stop_rx(sc);
2243 	stge_stop_tx(sc);
2244 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2245 	v |= MC_StatisticsDisable;
2246 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2247 
2248 	/*
2249 	 * Stop the transmit and receive DMA.
2250 	 */
2251 	stge_dma_wait(sc);
2252 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2253 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2254 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2255 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2256 
2257 	/*
2258 	 * Free RX and TX mbufs still in the queues.
2259 	 */
2260 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2261 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2262 		if (rxd->rx_m != NULL) {
2263 			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2264 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2265 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2266 			    rxd->rx_dmamap);
2267 			m_freem(rxd->rx_m);
2268 			rxd->rx_m = NULL;
2269 		}
2270         }
2271 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2272 		txd = &sc->sc_cdata.stge_txdesc[i];
2273 		if (txd->tx_m != NULL) {
2274 			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2275 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2276 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2277 			    txd->tx_dmamap);
2278 			m_freem(txd->tx_m);
2279 			txd->tx_m = NULL;
2280 		}
2281         }
2282 
2283 	/*
2284 	 * Mark the interface down and cancel the watchdog timer.
2285 	 */
2286 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2287 	ifp->if_timer = 0;
2288 }
2289 
2290 static void
2291 stge_start_tx(struct stge_softc *sc)
2292 {
2293 	uint32_t v;
2294 	int i;
2295 
2296 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2297 	if ((v & MC_TxEnabled) != 0)
2298 		return;
2299 	v |= MC_TxEnable;
2300 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2301 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2302 	for (i = STGE_TIMEOUT; i > 0; i--) {
2303 		DELAY(10);
2304 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2305 		if ((v & MC_TxEnabled) != 0)
2306 			break;
2307 	}
2308 	if (i == 0)
2309 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2310 }
2311 
2312 static void
2313 stge_start_rx(struct stge_softc *sc)
2314 {
2315 	uint32_t v;
2316 	int i;
2317 
2318 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2319 	if ((v & MC_RxEnabled) != 0)
2320 		return;
2321 	v |= MC_RxEnable;
2322 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2323 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2324 	for (i = STGE_TIMEOUT; i > 0; i--) {
2325 		DELAY(10);
2326 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2327 		if ((v & MC_RxEnabled) != 0)
2328 			break;
2329 	}
2330 	if (i == 0)
2331 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2332 }
2333 
2334 static void
2335 stge_stop_tx(struct stge_softc *sc)
2336 {
2337 	uint32_t v;
2338 	int i;
2339 
2340 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2341 	if ((v & MC_TxEnabled) == 0)
2342 		return;
2343 	v |= MC_TxDisable;
2344 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2345 	for (i = STGE_TIMEOUT; i > 0; i--) {
2346 		DELAY(10);
2347 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2348 		if ((v & MC_TxEnabled) == 0)
2349 			break;
2350 	}
2351 	if (i == 0)
2352 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2353 }
2354 
2355 static void
2356 stge_stop_rx(struct stge_softc *sc)
2357 {
2358 	uint32_t v;
2359 	int i;
2360 
2361 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2362 	if ((v & MC_RxEnabled) == 0)
2363 		return;
2364 	v |= MC_RxDisable;
2365 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2366 	for (i = STGE_TIMEOUT; i > 0; i--) {
2367 		DELAY(10);
2368 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2369 		if ((v & MC_RxEnabled) == 0)
2370 			break;
2371 	}
2372 	if (i == 0)
2373 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2374 }
2375 
2376 static void
2377 stge_init_tx_ring(struct stge_softc *sc)
2378 {
2379 	struct stge_ring_data *rd;
2380 	struct stge_txdesc *txd;
2381 	bus_addr_t addr;
2382 	int i;
2383 
2384 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2385 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2386 
2387 	sc->sc_cdata.stge_tx_prod = 0;
2388 	sc->sc_cdata.stge_tx_cons = 0;
2389 	sc->sc_cdata.stge_tx_cnt = 0;
2390 
2391 	rd = &sc->sc_rdata;
2392 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2393 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2394 		if (i == (STGE_TX_RING_CNT - 1))
2395 			addr = STGE_TX_RING_ADDR(sc, 0);
2396 		else
2397 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2398 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2399 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2400 		txd = &sc->sc_cdata.stge_txdesc[i];
2401 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2402 	}
2403 
2404 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2405 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
2406 }
2407 
2408 static int
2409 stge_init_rx_ring(struct stge_softc *sc)
2410 {
2411 	struct stge_ring_data *rd;
2412 	bus_addr_t addr;
2413 	int i;
2414 
2415 	sc->sc_cdata.stge_rx_cons = 0;
2416 	STGE_RXCHAIN_RESET(sc);
2417 
2418 	rd = &sc->sc_rdata;
2419 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2420 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2421 		if (stge_newbuf(sc, i, 1) != 0)
2422 			return (ENOBUFS);
2423 		if (i == (STGE_RX_RING_CNT - 1))
2424 			addr = STGE_RX_RING_ADDR(sc, 0);
2425 		else
2426 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2427 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2428 		rd->stge_rx_ring[i].rfd_status = 0;
2429 	}
2430 
2431 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2432 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_PREWRITE);
2433 
2434 	return (0);
2435 }
2436 
2437 /*
2438  * stge_newbuf:
2439  *
2440  *	Add a receive buffer to the indicated descriptor.
2441  */
2442 static int
2443 stge_newbuf(struct stge_softc *sc, int idx, int waitok)
2444 {
2445 	struct stge_rxdesc *rxd;
2446 	struct stge_rfd *rfd;
2447 	struct mbuf *m;
2448 	struct stge_mbuf_dmamap_arg arg;
2449 	bus_dma_segment_t segs[1];
2450 	bus_dmamap_t map;
2451 
2452 	m = m_getcl(waitok ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2453 	if (m == NULL)
2454 		return (ENOBUFS);
2455 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2456 	/*
2457 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2458 	 * frame is used.
2459 	 */
2460 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2461 		m_adj(m, ETHER_ALIGN);
2462 
2463 	arg.segs = segs;
2464 	arg.nsegs = 1;
2465 	if (bus_dmamap_load_mbuf(sc->sc_cdata.stge_rx_tag,
2466 	    sc->sc_cdata.stge_rx_sparemap, m, stge_mbuf_dmamap_cb, &arg,
2467 	    waitok ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
2468 		m_freem(m);
2469 		return (ENOBUFS);
2470 	}
2471 
2472 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2473 	if (rxd->rx_m != NULL) {
2474 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2475 		    BUS_DMASYNC_POSTREAD);
2476 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2477 	}
2478 	map = rxd->rx_dmamap;
2479 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2480 	sc->sc_cdata.stge_rx_sparemap = map;
2481 	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2482 	    BUS_DMASYNC_PREREAD);
2483 	rxd->rx_m = m;
2484 
2485 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2486 	rfd->rfd_frag.frag_word0 =
2487 	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2488 	rfd->rfd_status = 0;
2489 
2490 	return (0);
2491 }
2492 
2493 /*
2494  * stge_set_filter:
2495  *
2496  *	Set up the receive filter.
2497  */
2498 static void
2499 stge_set_filter(struct stge_softc *sc)
2500 {
2501 	struct ifnet *ifp = &sc->arpcom.ac_if;
2502 	uint16_t mode;
2503 
2504 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2505 	mode |= RM_ReceiveUnicast;
2506 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2507 		mode |= RM_ReceiveBroadcast;
2508 	else
2509 		mode &= ~RM_ReceiveBroadcast;
2510 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2511 		mode |= RM_ReceiveAllFrames;
2512 	else
2513 		mode &= ~RM_ReceiveAllFrames;
2514 
2515 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2516 }
2517 
2518 static void
2519 stge_set_multi(struct stge_softc *sc)
2520 {
2521 	struct ifnet *ifp = &sc->arpcom.ac_if;
2522 	struct ifmultiaddr *ifma;
2523 	uint32_t crc;
2524 	uint32_t mchash[2];
2525 	uint16_t mode;
2526 	int count;
2527 
2528 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2529 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2530 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2531 			mode |= RM_ReceiveAllFrames;
2532 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2533 			mode |= RM_ReceiveMulticast;
2534 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2535 		return;
2536 	}
2537 
2538 	/* clear existing filters. */
2539 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2540 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2541 
2542 	/*
2543 	 * Set up the multicast address filter by passing all multicast
2544 	 * addresses through a CRC generator, and then using the low-order
2545 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2546 	 * high order bits select the register, while the rest of the bits
2547 	 * select the bit within the register.
2548 	 */
2549 
2550 	bzero(mchash, sizeof(mchash));
2551 
2552 	count = 0;
2553 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2554 		if (ifma->ifma_addr->sa_family != AF_LINK)
2555 			continue;
2556 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2557 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2558 
2559 		/* Just want the 6 least significant bits. */
2560 		crc &= 0x3f;
2561 
2562 		/* Set the corresponding bit in the hash table. */
2563 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2564 		count++;
2565 	}
2566 
2567 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2568 	if (count > 0)
2569 		mode |= RM_ReceiveMulticastHash;
2570 	else
2571 		mode &= ~RM_ReceiveMulticastHash;
2572 
2573 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2574 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2575 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2576 }
2577 
2578 static int
2579 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2580 {
2581 	int error, value;
2582 
2583 	if (!arg1)
2584 		return (EINVAL);
2585 	value = *(int *)arg1;
2586 	error = sysctl_handle_int(oidp, &value, 0, req);
2587 	if (error || !req->newptr)
2588 		return (error);
2589 	if (value < low || value > high)
2590 		return (EINVAL);
2591         *(int *)arg1 = value;
2592 
2593         return (0);
2594 }
2595 
2596 static int
2597 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2598 {
2599 	return (sysctl_int_range(oidp, arg1, arg2, req,
2600 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2601 }
2602 
2603 static int
2604 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2605 {
2606 	return (sysctl_int_range(oidp, arg1, arg2, req,
2607 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2608 }
2609