xref: /openbsd/sys/dev/ic/gem.c (revision 17df1aa7)
1 /*	$OpenBSD: gem.c,v 1.96 2009/10/15 17:54:54 deraadt Exp $	*/
2 /*	$NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */
3 
4 /*
5  *
6  * Copyright (C) 2001 Eduardo Horvath.
7  * All rights reserved.
8  *
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Driver for Sun GEM ethernet controllers.
35  */
36 
37 #include "bpfilter.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/timeout.h>
42 #include <sys/mbuf.h>
43 #include <sys/syslog.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/device.h>
50 
51 #include <machine/endian.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/if_ether.h>
60 #endif
61 
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65 
66 #include <machine/bus.h>
67 #include <machine/intr.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/mii_bitbang.h>
72 
73 #include <dev/ic/gemreg.h>
74 #include <dev/ic/gemvar.h>
75 
76 #define TRIES	10000
77 
78 struct cfdriver gem_cd = {
79 	NULL, "gem", DV_IFNET
80 };
81 
82 void		gem_start(struct ifnet *);
83 void		gem_stop(struct ifnet *, int);
84 int		gem_ioctl(struct ifnet *, u_long, caddr_t);
85 void		gem_tick(void *);
86 void		gem_watchdog(struct ifnet *);
87 int		gem_init(struct ifnet *);
88 void		gem_init_regs(struct gem_softc *);
89 int		gem_ringsize(int);
90 int		gem_meminit(struct gem_softc *);
91 void		gem_mifinit(struct gem_softc *);
92 int		gem_bitwait(struct gem_softc *, bus_space_handle_t, int,
93 		    u_int32_t, u_int32_t);
94 void		gem_reset(struct gem_softc *);
95 int		gem_reset_rx(struct gem_softc *);
96 int		gem_reset_tx(struct gem_softc *);
97 int		gem_disable_rx(struct gem_softc *);
98 int		gem_disable_tx(struct gem_softc *);
99 void		gem_rx_watchdog(void *);
100 void		gem_rxdrain(struct gem_softc *);
101 void		gem_fill_rx_ring(struct gem_softc *);
102 int		gem_add_rxbuf(struct gem_softc *, int idx);
103 void		gem_iff(struct gem_softc *);
104 
105 /* MII methods & callbacks */
106 int		gem_mii_readreg(struct device *, int, int);
107 void		gem_mii_writereg(struct device *, int, int, int);
108 void		gem_mii_statchg(struct device *);
109 int		gem_pcs_readreg(struct device *, int, int);
110 void		gem_pcs_writereg(struct device *, int, int, int);
111 
112 int		gem_mediachange(struct ifnet *);
113 void		gem_mediastatus(struct ifnet *, struct ifmediareq *);
114 
115 int		gem_eint(struct gem_softc *, u_int);
116 int		gem_rint(struct gem_softc *);
117 int		gem_tint(struct gem_softc *, u_int32_t);
118 int		gem_pint(struct gem_softc *);
119 
120 #ifdef GEM_DEBUG
121 #define	DPRINTF(sc, x)	if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
122 				printf x
123 #else
124 #define	DPRINTF(sc, x)	/* nothing */
125 #endif
126 
127 /*
128  * Attach a Gem interface to the system.
129  */
130 void
131 gem_config(struct gem_softc *sc)
132 {
133 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
134 	struct mii_data *mii = &sc->sc_mii;
135 	struct mii_softc *child;
136 	int i, error, phyad;
137 	struct ifmedia_entry *ifm;
138 
139 	/* Make sure the chip is stopped. */
140 	ifp->if_softc = sc;
141 	gem_reset(sc);
142 
143 	/*
144 	 * Allocate the control data structures, and create and load the
145 	 * DMA map for it.
146 	 */
147 	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
148 	    sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
149 	    1, &sc->sc_cdnseg, 0)) != 0) {
150 		printf("\n%s: unable to allocate control data, error = %d\n",
151 		    sc->sc_dev.dv_xname, error);
152 		goto fail_0;
153 	}
154 
155 	/* XXX should map this in with correct endianness */
156 	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
157 	    sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
158 	    BUS_DMA_COHERENT)) != 0) {
159 		printf("\n%s: unable to map control data, error = %d\n",
160 		    sc->sc_dev.dv_xname, error);
161 		goto fail_1;
162 	}
163 
164 	if ((error = bus_dmamap_create(sc->sc_dmatag,
165 	    sizeof(struct gem_control_data), 1,
166 	    sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
167 		printf("\n%s: unable to create control data DMA map, "
168 		    "error = %d\n", sc->sc_dev.dv_xname, error);
169 		goto fail_2;
170 	}
171 
172 	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
173 	    sc->sc_control_data, sizeof(struct gem_control_data), NULL,
174 	    0)) != 0) {
175 		printf("\n%s: unable to load control data DMA map, error = %d\n",
176 		    sc->sc_dev.dv_xname, error);
177 		goto fail_3;
178 	}
179 
180 	/*
181 	 * Create the receive buffer DMA maps.
182 	 */
183 	for (i = 0; i < GEM_NRXDESC; i++) {
184 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
185 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
186 			printf("\n%s: unable to create rx DMA map %d, "
187 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
188 			goto fail_5;
189 		}
190 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
191 	}
192 	/*
193 	 * Create the transmit buffer DMA maps.
194 	 */
195 	for (i = 0; i < GEM_NTXDESC; i++) {
196 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
197 		    GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
198 		    &sc->sc_txd[i].sd_map)) != 0) {
199 			printf("\n%s: unable to create tx DMA map %d, "
200 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
201 			goto fail_6;
202 		}
203 		sc->sc_txd[i].sd_mbuf = NULL;
204 	}
205 
206 	/*
207 	 * From this point forward, the attachment cannot fail.  A failure
208 	 * before this point releases all resources that may have been
209 	 * allocated.
210 	 */
211 
212 	/* Announce ourselves. */
213 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
214 
215 	/* Get RX FIFO size */
216 	sc->sc_rxfifosize = 64 *
217 	    bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE);
218 
219 	/* Initialize ifnet structure. */
220 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
221 	ifp->if_softc = sc;
222 	ifp->if_flags =
223 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
224 	ifp->if_start = gem_start;
225 	ifp->if_ioctl = gem_ioctl;
226 	ifp->if_watchdog = gem_watchdog;
227 	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1);
228 	IFQ_SET_READY(&ifp->if_snd);
229 
230 	/* Hardware reads RX descriptors in multiples of four. */
231 	m_clsetwms(ifp, MCLBYTES, 4, GEM_NRXDESC - 4);
232 
233 	ifp->if_capabilities = IFCAP_VLAN_MTU;
234 
235 	/* Initialize ifmedia structures and MII info */
236 	mii->mii_ifp = ifp;
237 	mii->mii_readreg = gem_mii_readreg;
238 	mii->mii_writereg = gem_mii_writereg;
239 	mii->mii_statchg = gem_mii_statchg;
240 
241 	ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus);
242 
243 	/* Bad things will happen if we touch this register on ERI. */
244 	if (sc->sc_variant != GEM_SUN_ERI)
245 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
246 		    GEM_MII_DATAPATH_MODE, 0);
247 
248 	gem_mifinit(sc);
249 
250 	/*
251 	 * Look for an external PHY.
252 	 */
253 	if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
254 		sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
255 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
256 	            GEM_MIF_CONFIG, sc->sc_mif_config);
257 
258 		switch (sc->sc_variant) {
259 		case GEM_SUN_ERI:
260 			phyad = GEM_PHYAD_EXTERNAL;
261 			break;
262 		default:
263 			phyad = MII_PHY_ANY;
264 			break;
265 		}
266 
267 		mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
268 		    MII_OFFSET_ANY, 0);
269 	}
270 
271 	/*
272 	 * Fall back on an internal PHY if no external PHY was found.
273 	 */
274 	child = LIST_FIRST(&mii->mii_phys);
275 	if (child == NULL && sc->sc_mif_config & GEM_MIF_CONFIG_MDI0) {
276 		sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
277 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
278 	            GEM_MIF_CONFIG, sc->sc_mif_config);
279 
280 		switch (sc->sc_variant) {
281 		case GEM_SUN_ERI:
282 		case GEM_APPLE_K2_GMAC:
283 			phyad = GEM_PHYAD_INTERNAL;
284 			break;
285 		case GEM_APPLE_GMAC:
286 			phyad = GEM_PHYAD_EXTERNAL;
287 			break;
288 		default:
289 			phyad = MII_PHY_ANY;
290 			break;
291 		}
292 
293 		mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
294 		    MII_OFFSET_ANY, 0);
295 	}
296 
297 	/*
298 	 * Try the external PCS SERDES if we didn't find any MII
299 	 * devices.
300 	 */
301 	child = LIST_FIRST(&mii->mii_phys);
302 	if (child == NULL && sc->sc_variant != GEM_SUN_ERI) {
303 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
304 		    GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES);
305 
306 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
307 		    GEM_MII_SLINK_CONTROL,
308 		    GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
309 
310 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
311 		     GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
312 
313 		mii->mii_readreg = gem_pcs_readreg;
314 		mii->mii_writereg = gem_pcs_writereg;
315 
316 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
317 		    MII_OFFSET_ANY, MIIF_NOISOLATE);
318 	}
319 
320 	child = LIST_FIRST(&mii->mii_phys);
321 	if (child == NULL) {
322 		/* No PHY attached */
323 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
324 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
325 	} else {
326 		/*
327 		 * XXX - we can really do the following ONLY if the
328 		 * phy indeed has the auto negotiation capability!!
329 		 */
330 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
331 	}
332 
333 	/* Check if we support GigE media. */
334 	TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
335 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
336 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
337 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
338 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
339 			sc->sc_flags |= GEM_GIGABIT;
340 			break;
341 		}
342 	}
343 
344 	/* Attach the interface. */
345 	if_attach(ifp);
346 	ether_ifattach(ifp);
347 
348 	timeout_set(&sc->sc_tick_ch, gem_tick, sc);
349 	timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc);
350 	return;
351 
352 	/*
353 	 * Free any resources we've allocated during the failed attach
354 	 * attempt.  Do this in reverse order and fall through.
355 	 */
356  fail_6:
357 	for (i = 0; i < GEM_NTXDESC; i++) {
358 		if (sc->sc_txd[i].sd_map != NULL)
359 			bus_dmamap_destroy(sc->sc_dmatag,
360 			    sc->sc_txd[i].sd_map);
361 	}
362  fail_5:
363 	for (i = 0; i < GEM_NRXDESC; i++) {
364 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
365 			bus_dmamap_destroy(sc->sc_dmatag,
366 			    sc->sc_rxsoft[i].rxs_dmamap);
367 	}
368 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
369  fail_3:
370 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
371  fail_2:
372 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
373 	    sizeof(struct gem_control_data));
374  fail_1:
375 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
376  fail_0:
377 	return;
378 }
379 
380 void
381 gem_unconfig(struct gem_softc *sc)
382 {
383 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
384 	int i;
385 
386 	gem_stop(ifp, 1);
387 
388 	for (i = 0; i < GEM_NTXDESC; i++) {
389 		if (sc->sc_txd[i].sd_map != NULL)
390 			bus_dmamap_destroy(sc->sc_dmatag,
391 			    sc->sc_txd[i].sd_map);
392 	}
393 	for (i = 0; i < GEM_NRXDESC; i++) {
394 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
395 			bus_dmamap_destroy(sc->sc_dmatag,
396 			    sc->sc_rxsoft[i].rxs_dmamap);
397 	}
398 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
399 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
400 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
401 	    sizeof(struct gem_control_data));
402 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
403 
404 	/* Detach all PHYs */
405 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
406 
407 	/* Delete all remaining media. */
408 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
409 
410 	ether_ifdetach(ifp);
411 	if_detach(ifp);
412 }
413 
414 
415 void
416 gem_tick(void *arg)
417 {
418 	struct gem_softc *sc = arg;
419 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
420 	bus_space_tag_t t = sc->sc_bustag;
421 	bus_space_handle_t mac = sc->sc_h1;
422 	int s;
423 	u_int32_t v;
424 
425 	/* unload collisions counters */
426 	v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
427 	    bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
428 	ifp->if_collisions += v +
429 	    bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
430 	    bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
431 	ifp->if_oerrors += v;
432 
433 	/* read error counters */
434 	ifp->if_ierrors +=
435 	    bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) +
436 	    bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) +
437 	    bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) +
438 	    bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL);
439 
440 	/* clear the hardware counters */
441 	bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
442 	bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
443 	bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
444 	bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
445 	bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0);
446 	bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0);
447 	bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0);
448 	bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0);
449 
450 	s = splnet();
451 	mii_tick(&sc->sc_mii);
452 	splx(s);
453 
454 	timeout_add_sec(&sc->sc_tick_ch, 1);
455 }
456 
457 int
458 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r,
459    u_int32_t clr, u_int32_t set)
460 {
461 	int i;
462 	u_int32_t reg;
463 
464 	for (i = TRIES; i--; DELAY(100)) {
465 		reg = bus_space_read_4(sc->sc_bustag, h, r);
466 		if ((reg & clr) == 0 && (reg & set) == set)
467 			return (1);
468 	}
469 
470 	return (0);
471 }
472 
473 void
474 gem_reset(struct gem_softc *sc)
475 {
476 	bus_space_tag_t t = sc->sc_bustag;
477 	bus_space_handle_t h = sc->sc_h2;
478 	int s;
479 
480 	s = splnet();
481 	DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
482 	gem_reset_rx(sc);
483 	gem_reset_tx(sc);
484 
485 	/* Do a full reset */
486 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
487 	if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
488 		printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
489 	splx(s);
490 }
491 
492 
493 /*
494  * Drain the receive queue.
495  */
496 void
497 gem_rxdrain(struct gem_softc *sc)
498 {
499 	struct gem_rxsoft *rxs;
500 	int i;
501 
502 	for (i = 0; i < GEM_NRXDESC; i++) {
503 		rxs = &sc->sc_rxsoft[i];
504 		if (rxs->rxs_mbuf != NULL) {
505 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
506 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
507 			bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
508 			m_freem(rxs->rxs_mbuf);
509 			rxs->rxs_mbuf = NULL;
510 		}
511 	}
512 	sc->sc_rx_prod = sc->sc_rx_cons = sc->sc_rx_cnt = 0;
513 }
514 
515 /*
516  * Reset the whole thing.
517  */
518 void
519 gem_stop(struct ifnet *ifp, int softonly)
520 {
521 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
522 	struct gem_sxd *sd;
523 	u_int32_t i;
524 
525 	DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
526 
527 	timeout_del(&sc->sc_tick_ch);
528 
529 	/*
530 	 * Mark the interface down and cancel the watchdog timer.
531 	 */
532 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
533 	ifp->if_timer = 0;
534 
535 	if (!softonly) {
536 		mii_down(&sc->sc_mii);
537 
538 		gem_reset_rx(sc);
539 		gem_reset_tx(sc);
540 	}
541 
542 	/*
543 	 * Release any queued transmit buffers.
544 	 */
545 	for (i = 0; i < GEM_NTXDESC; i++) {
546 		sd = &sc->sc_txd[i];
547 		if (sd->sd_mbuf != NULL) {
548 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
549 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
550 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
551 			m_freem(sd->sd_mbuf);
552 			sd->sd_mbuf = NULL;
553 		}
554 	}
555 	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
556 
557 	gem_rxdrain(sc);
558 }
559 
560 
561 /*
562  * Reset the receiver
563  */
564 int
565 gem_reset_rx(struct gem_softc *sc)
566 {
567 	bus_space_tag_t t = sc->sc_bustag;
568 	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
569 
570 	/*
571 	 * Resetting while DMA is in progress can cause a bus hang, so we
572 	 * disable DMA first.
573 	 */
574 	gem_disable_rx(sc);
575 	bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
576 	/* Wait till it finishes */
577 	if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
578 		printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
579 	/* Wait 5ms extra. */
580 	delay(5000);
581 
582 	/* Finally, reset the ERX */
583 	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
584 	/* Wait till it finishes */
585 	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
586 		printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
587 		return (1);
588 	}
589 	return (0);
590 }
591 
592 
593 /*
594  * Reset the transmitter
595  */
596 int
597 gem_reset_tx(struct gem_softc *sc)
598 {
599 	bus_space_tag_t t = sc->sc_bustag;
600 	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
601 
602 	/*
603 	 * Resetting while DMA is in progress can cause a bus hang, so we
604 	 * disable DMA first.
605 	 */
606 	gem_disable_tx(sc);
607 	bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
608 	/* Wait till it finishes */
609 	if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
610 		printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
611 	/* Wait 5ms extra. */
612 	delay(5000);
613 
614 	/* Finally, reset the ETX */
615 	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
616 	/* Wait till it finishes */
617 	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
618 		printf("%s: cannot reset transmitter\n",
619 			sc->sc_dev.dv_xname);
620 		return (1);
621 	}
622 	return (0);
623 }
624 
625 /*
626  * Disable receiver.
627  */
628 int
629 gem_disable_rx(struct gem_softc *sc)
630 {
631 	bus_space_tag_t t = sc->sc_bustag;
632 	bus_space_handle_t h = sc->sc_h1;
633 	u_int32_t cfg;
634 
635 	/* Flip the enable bit */
636 	cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
637 	cfg &= ~GEM_MAC_RX_ENABLE;
638 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
639 
640 	/* Wait for it to finish */
641 	return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
642 }
643 
644 /*
645  * Disable transmitter.
646  */
647 int
648 gem_disable_tx(struct gem_softc *sc)
649 {
650 	bus_space_tag_t t = sc->sc_bustag;
651 	bus_space_handle_t h = sc->sc_h1;
652 	u_int32_t cfg;
653 
654 	/* Flip the enable bit */
655 	cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
656 	cfg &= ~GEM_MAC_TX_ENABLE;
657 	bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
658 
659 	/* Wait for it to finish */
660 	return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
661 }
662 
663 /*
664  * Initialize interface.
665  */
666 int
667 gem_meminit(struct gem_softc *sc)
668 {
669 	int i;
670 
671 	/*
672 	 * Initialize the transmit descriptor ring.
673 	 */
674 	for (i = 0; i < GEM_NTXDESC; i++) {
675 		sc->sc_txdescs[i].gd_flags = 0;
676 		sc->sc_txdescs[i].gd_addr = 0;
677 	}
678 	GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
679 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
680 
681 	/*
682 	 * Initialize the receive descriptor and receive job
683 	 * descriptor rings.
684 	 */
685 	for (i = 0; i < GEM_NRXDESC; i++) {
686 		sc->sc_rxdescs[i].gd_flags = 0;
687 		sc->sc_rxdescs[i].gd_addr = 0;
688 	}
689 	gem_fill_rx_ring(sc);
690 
691 	return (0);
692 }
693 
694 int
695 gem_ringsize(int sz)
696 {
697 	switch (sz) {
698 	case 32:
699 		return GEM_RING_SZ_32;
700 	case 64:
701 		return GEM_RING_SZ_64;
702 	case 128:
703 		return GEM_RING_SZ_128;
704 	case 256:
705 		return GEM_RING_SZ_256;
706 	case 512:
707 		return GEM_RING_SZ_512;
708 	case 1024:
709 		return GEM_RING_SZ_1024;
710 	case 2048:
711 		return GEM_RING_SZ_2048;
712 	case 4096:
713 		return GEM_RING_SZ_4096;
714 	case 8192:
715 		return GEM_RING_SZ_8192;
716 	default:
717 		printf("gem: invalid Receive Descriptor ring size %d\n", sz);
718 		return GEM_RING_SZ_32;
719 	}
720 }
721 
722 /*
723  * Initialization of interface; set up initialization block
724  * and transmit/receive descriptor rings.
725  */
726 int
727 gem_init(struct ifnet *ifp)
728 {
729 
730 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
731 	bus_space_tag_t t = sc->sc_bustag;
732 	bus_space_handle_t h = sc->sc_h1;
733 	int s;
734 	u_int32_t v;
735 
736 	s = splnet();
737 
738 	DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
739 	/*
740 	 * Initialization sequence. The numbered steps below correspond
741 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
742 	 * Channel Engine manual (part of the PCIO manual).
743 	 * See also the STP2002-STQ document from Sun Microsystems.
744 	 */
745 
746 	/* step 1 & 2. Reset the Ethernet Channel */
747 	gem_stop(ifp, 0);
748 	gem_reset(sc);
749 	DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
750 
751 	/* Re-initialize the MIF */
752 	gem_mifinit(sc);
753 
754 	/* Call MI reset function if any */
755 	if (sc->sc_hwreset)
756 		(*sc->sc_hwreset)(sc);
757 
758 	/* step 3. Setup data structures in host memory */
759 	gem_meminit(sc);
760 
761 	/* step 4. TX MAC registers & counters */
762 	gem_init_regs(sc);
763 
764 	/* step 5. RX MAC registers & counters */
765 	gem_iff(sc);
766 
767 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
768 	bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,
769 	    (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32));
770 	bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
771 
772 	bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
773 	    (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32));
774 	bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
775 
776 	/* step 8. Global Configuration & Interrupt Mask */
777 	bus_space_write_4(t, h, GEM_INTMASK,
778 		      ~(GEM_INTR_TX_INTME|
779 			GEM_INTR_TX_EMPTY|
780 			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
781 			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
782 			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
783 			GEM_INTR_BERR));
784 	bus_space_write_4(t, h, GEM_MAC_RX_MASK,
785 	    GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
786 	bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
787 	bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
788 
789 	/* step 9. ETX Configuration: use mostly default values */
790 
791 	/* Enable DMA */
792 	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
793 	v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x04ff) << 10) &
794 	    GEM_TX_CONFIG_TXFIFO_TH;
795 	bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
796 	bus_space_write_4(t, h, GEM_TX_KICK, 0);
797 
798 	/* step 10. ERX Configuration */
799 
800 	/* Encode Receive Descriptor ring size: four possible values */
801 	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
802 
803 	/* Enable DMA */
804 	bus_space_write_4(t, h, GEM_RX_CONFIG,
805 		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
806 		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
807 		(0<<GEM_RX_CONFIG_CXM_START_SHFT));
808 	/*
809 	 * The following value is for an OFF Threshold of about 3/4 full
810 	 * and an ON Threshold of 1/4 full.
811 	 */
812 	bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
813 	    (3 * sc->sc_rxfifosize / 256) |
814 	    ((sc->sc_rxfifosize / 256) << 12));
815 	bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6);
816 
817 	/* step 11. Configure Media */
818 	mii_mediachg(&sc->sc_mii);
819 
820 	/* step 12. RX_MAC Configuration Register */
821 	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
822 	v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
823 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
824 
825 	/* step 14. Issue Transmit Pending command */
826 
827 	/* Call MI initialization function if any */
828 	if (sc->sc_hwinit)
829 		(*sc->sc_hwinit)(sc);
830 
831 	/* step 15.  Give the receiver a swift kick */
832 	bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
833 
834 	/* Start the one second timer. */
835 	timeout_add_sec(&sc->sc_tick_ch, 1);
836 
837 	ifp->if_flags |= IFF_RUNNING;
838 	ifp->if_flags &= ~IFF_OACTIVE;
839 
840 	splx(s);
841 
842 	return (0);
843 }
844 
845 void
846 gem_init_regs(struct gem_softc *sc)
847 {
848 	bus_space_tag_t t = sc->sc_bustag;
849 	bus_space_handle_t h = sc->sc_h1;
850 	u_int32_t v;
851 
852 	/* These regs are not cleared on reset */
853 	sc->sc_inited = 0;
854 	if (!sc->sc_inited) {
855 		/* Load recommended values */
856 		bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
857 		bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
858 		bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
859 
860 		bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
861 		/* Max frame and max burst size */
862 		bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
863 		    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
864 
865 		bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
866 		bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
867 		bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
868 		bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
869 		bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
870 		    ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
871 
872 		/* Secondary MAC addr set to 0:0:0:0:0:0 */
873 		bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
874 		bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
875 		bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
876 
877 		/* MAC control addr set to 0:1:c2:0:1:80 */
878 		bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
879 		bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
880 		bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
881 
882 		/* MAC filter addr set to 0:0:0:0:0:0 */
883 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
884 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
885 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
886 
887 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
888 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
889 
890 		sc->sc_inited = 1;
891 	}
892 
893 	/* Counters need to be zeroed */
894 	bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
895 	bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
896 	bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
897 	bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
898 	bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
899 	bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
900 	bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
901 	bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
902 	bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
903 	bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
904 	bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
905 
906 	/* Un-pause stuff */
907 	bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0);
908 
909 	/*
910 	 * Set the internal arbitration to "infinite" bursts of the
911 	 * maximum length of 31 * 64 bytes so DMA transfers aren't
912 	 * split up in cache line size chunks. This greatly improves
913 	 * especially RX performance.
914 	 * Enable silicon bug workarounds for the Apple variants.
915 	 */
916 	v = GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT;
917 	if (sc->sc_pci)
918 		v |= GEM_CONFIG_BURST_INF;
919 	else
920 		v |= GEM_CONFIG_BURST_64;
921 	if (sc->sc_variant != GEM_SUN_GEM && sc->sc_variant != GEM_SUN_ERI)
922 		v |= GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX;
923 	bus_space_write_4(t, h, GEM_CONFIG, v);
924 
925 	/*
926 	 * Set the station address.
927 	 */
928 	bus_space_write_4(t, h, GEM_MAC_ADDR0,
929 		(sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
930 	bus_space_write_4(t, h, GEM_MAC_ADDR1,
931 		(sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
932 	bus_space_write_4(t, h, GEM_MAC_ADDR2,
933 		(sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
934 }
935 
936 /*
937  * Receive interrupt.
938  */
939 int
940 gem_rint(struct gem_softc *sc)
941 {
942 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
943 	bus_space_tag_t t = sc->sc_bustag;
944 	bus_space_handle_t h = sc->sc_h1;
945 	struct gem_rxsoft *rxs;
946 	struct mbuf *m;
947 	u_int64_t rxstat;
948 	int i, len;
949 
950 	for (i = sc->sc_rx_cons; sc->sc_rx_cnt > 0; i = GEM_NEXTRX(i)) {
951 		rxs = &sc->sc_rxsoft[i];
952 
953 		GEM_CDRXSYNC(sc, i,
954 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
955 
956 		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
957 
958 		if (rxstat & GEM_RD_OWN) {
959 			/* We have processed all of the receive buffers. */
960 			break;
961 		}
962 
963 		bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
964 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
965 		bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
966 
967 		m = rxs->rxs_mbuf;
968 		rxs->rxs_mbuf = NULL;
969 
970 		sc->sc_rx_cnt--;
971 
972 		if (rxstat & GEM_RD_BAD_CRC) {
973 			ifp->if_ierrors++;
974 #ifdef GEM_DEBUG
975 			printf("%s: receive error: CRC error\n",
976 				sc->sc_dev.dv_xname);
977 #endif
978 			m_freem(m);
979 			continue;
980 		}
981 
982 #ifdef GEM_DEBUG
983 		if (ifp->if_flags & IFF_DEBUG) {
984 			printf("    rxsoft %p descriptor %d: ", rxs, i);
985 			printf("gd_flags: 0x%016llx\t", (long long)
986 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
987 			printf("gd_addr: 0x%016llx\n", (long long)
988 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
989 		}
990 #endif
991 
992 		/* No errors; receive the packet. */
993 		len = GEM_RD_BUFLEN(rxstat);
994 
995 		m->m_data += 2; /* We're already off by two */
996 
997 		ifp->if_ipackets++;
998 		m->m_pkthdr.rcvif = ifp;
999 		m->m_pkthdr.len = m->m_len = len;
1000 
1001 #if NBPFILTER > 0
1002 		if (ifp->if_bpf)
1003 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1004 #endif /* NBPFILTER > 0 */
1005 
1006 		/* Pass it on. */
1007 		ether_input_mbuf(ifp, m);
1008 	}
1009 
1010 	/* Update the receive pointer. */
1011 	sc->sc_rx_cons = i;
1012 	gem_fill_rx_ring(sc);
1013 	bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
1014 
1015 	DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n",
1016 		sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
1017 
1018 	return (1);
1019 }
1020 
1021 void
1022 gem_fill_rx_ring(struct gem_softc *sc)
1023 {
1024 	while (sc->sc_rx_cnt < (GEM_NRXDESC - 4)) {
1025 		if (gem_add_rxbuf(sc, sc->sc_rx_prod))
1026 			break;
1027 	}
1028 }
1029 
1030 /*
1031  * Add a receive buffer to the indicated descriptor.
1032  */
1033 int
1034 gem_add_rxbuf(struct gem_softc *sc, int idx)
1035 {
1036 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1037 	struct mbuf *m;
1038 	int error;
1039 
1040 	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
1041 	if (!m)
1042 		return (ENOBUFS);
1043 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1044 
1045 #ifdef GEM_DEBUG
1046 /* bzero the packet to check dma */
1047 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1048 #endif
1049 
1050 	rxs->rxs_mbuf = m;
1051 
1052 	error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m,
1053 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1054 	if (error) {
1055 		printf("%s: can't load rx DMA map %d, error = %d\n",
1056 		    sc->sc_dev.dv_xname, idx, error);
1057 		panic("gem_add_rxbuf");	/* XXX */
1058 	}
1059 
1060 	bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1061 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1062 
1063 	GEM_INIT_RXDESC(sc, idx);
1064 
1065 	sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod);
1066 	sc->sc_rx_cnt++;
1067 
1068 	return (0);
1069 }
1070 
1071 int
1072 gem_eint(struct gem_softc *sc, u_int status)
1073 {
1074 	if ((status & GEM_INTR_MIF) != 0) {
1075 #ifdef GEM_DEBUG
1076 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1077 #endif
1078 		return (1);
1079 	}
1080 
1081 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS);
1082 	return (1);
1083 }
1084 
1085 int
1086 gem_pint(struct gem_softc *sc)
1087 {
1088 	bus_space_tag_t t = sc->sc_bustag;
1089 	bus_space_handle_t seb = sc->sc_h1;
1090 	u_int32_t status;
1091 
1092 	status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1093 	status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1094 #ifdef GEM_DEBUG
1095 	if (status)
1096 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1097 #endif
1098 	return (1);
1099 }
1100 
1101 int
1102 gem_intr(void *v)
1103 {
1104 	struct gem_softc *sc = (struct gem_softc *)v;
1105 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1106 	bus_space_tag_t t = sc->sc_bustag;
1107 	bus_space_handle_t seb = sc->sc_h1;
1108 	u_int32_t status;
1109 	int r = 0;
1110 
1111 	status = bus_space_read_4(t, seb, GEM_STATUS);
1112 	DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n",
1113 		sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS));
1114 
1115 	if (status == 0xffffffff)
1116 		return (0);
1117 
1118 	if ((status & GEM_INTR_PCS) != 0)
1119 		r |= gem_pint(sc);
1120 
1121 	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1122 		r |= gem_eint(sc, status);
1123 
1124 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1125 		r |= gem_tint(sc, status);
1126 
1127 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1128 		r |= gem_rint(sc);
1129 
1130 	/* We should eventually do more than just print out error stats. */
1131 	if (status & GEM_INTR_TX_MAC) {
1132 		int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1133 #ifdef GEM_DEBUG
1134 		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1135 			printf("%s: MAC tx fault, status %x\n",
1136 			    sc->sc_dev.dv_xname, txstat);
1137 #endif
1138 		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1139 			gem_init(ifp);
1140 	}
1141 	if (status & GEM_INTR_RX_MAC) {
1142 		int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1143 #ifdef GEM_DEBUG
1144  		if (rxstat & ~GEM_MAC_RX_DONE)
1145  			printf("%s: MAC rx fault, status %x\n",
1146  			    sc->sc_dev.dv_xname, rxstat);
1147 #endif
1148 		if (rxstat & GEM_MAC_RX_OVERFLOW) {
1149 			ifp->if_ierrors++;
1150 
1151 			/*
1152 			 * Apparently a silicon bug causes ERI to hang
1153 			 * from time to time.  So if we detect an RX
1154 			 * FIFO overflow, we fire off a timer, and
1155 			 * check whether we're still making progress
1156 			 * by looking at the RX FIFO write and read
1157 			 * pointers.
1158 			 */
1159 			sc->sc_rx_fifo_wr_ptr =
1160 				bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR);
1161 			sc->sc_rx_fifo_rd_ptr =
1162 				bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR);
1163 			timeout_add_msec(&sc->sc_rx_watchdog, 400);
1164 		}
1165 #ifdef GEM_DEBUG
1166 		else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1167 			printf("%s: MAC rx fault, status %x\n",
1168 			    sc->sc_dev.dv_xname, rxstat);
1169 #endif
1170 	}
1171 	return (r);
1172 }
1173 
1174 void
1175 gem_rx_watchdog(void *arg)
1176 {
1177 	struct gem_softc *sc = arg;
1178 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1179 	bus_space_tag_t t = sc->sc_bustag;
1180 	bus_space_handle_t h = sc->sc_h1;
1181 	u_int32_t rx_fifo_wr_ptr;
1182 	u_int32_t rx_fifo_rd_ptr;
1183 	u_int32_t state;
1184 
1185 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1186 		return;
1187 
1188 	rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR);
1189 	rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR);
1190 	state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE);
1191 	if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW &&
1192 	    ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) ||
1193 	     ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) &&
1194 	      (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr)))) {
1195 		/*
1196 		 * The RX state machine is still in overflow state and
1197 		 * the RX FIFO write and read pointers seem to be
1198 		 * stuck.  Whack the chip over the head to get things
1199 		 * going again.
1200 		 */
1201 		gem_init(ifp);
1202 	}
1203 }
1204 
1205 void
1206 gem_watchdog(struct ifnet *ifp)
1207 {
1208 	struct gem_softc *sc = ifp->if_softc;
1209 
1210 	DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1211 		"GEM_MAC_RX_CONFIG %x\n",
1212 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
1213 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
1214 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
1215 
1216 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1217 	++ifp->if_oerrors;
1218 
1219 	/* Try to get more packets going. */
1220 	gem_init(ifp);
1221 }
1222 
1223 /*
1224  * Initialize the MII Management Interface
1225  */
1226 void
1227 gem_mifinit(struct gem_softc *sc)
1228 {
1229 	bus_space_tag_t t = sc->sc_bustag;
1230 	bus_space_handle_t mif = sc->sc_h1;
1231 
1232 	/* Configure the MIF in frame mode */
1233 	sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1234 	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1235 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1236 }
1237 
1238 /*
1239  * MII interface
1240  *
1241  * The GEM MII interface supports at least three different operating modes:
1242  *
1243  * Bitbang mode is implemented using data, clock and output enable registers.
1244  *
1245  * Frame mode is implemented by loading a complete frame into the frame
1246  * register and polling the valid bit for completion.
1247  *
1248  * Polling mode uses the frame register but completion is indicated by
1249  * an interrupt.
1250  *
1251  */
1252 int
1253 gem_mii_readreg(struct device *self, int phy, int reg)
1254 {
1255 	struct gem_softc *sc = (void *)self;
1256 	bus_space_tag_t t = sc->sc_bustag;
1257 	bus_space_handle_t mif = sc->sc_h1;
1258 	int n;
1259 	u_int32_t v;
1260 
1261 #ifdef GEM_DEBUG
1262 	if (sc->sc_debug)
1263 		printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1264 #endif
1265 
1266 	/* Construct the frame command */
1267 	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
1268 		GEM_MIF_FRAME_READ;
1269 
1270 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1271 	for (n = 0; n < 100; n++) {
1272 		DELAY(1);
1273 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1274 		if (v & GEM_MIF_FRAME_TA0)
1275 			return (v & GEM_MIF_FRAME_DATA);
1276 	}
1277 
1278 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1279 	return (0);
1280 }
1281 
1282 void
1283 gem_mii_writereg(struct device *self, int phy, int reg, int val)
1284 {
1285 	struct gem_softc *sc = (void *)self;
1286 	bus_space_tag_t t = sc->sc_bustag;
1287 	bus_space_handle_t mif = sc->sc_h1;
1288 	int n;
1289 	u_int32_t v;
1290 
1291 #ifdef GEM_DEBUG
1292 	if (sc->sc_debug)
1293 		printf("gem_mii_writereg: phy %d reg %d val %x\n",
1294 			phy, reg, val);
1295 #endif
1296 
1297 	/* Construct the frame command */
1298 	v = GEM_MIF_FRAME_WRITE			|
1299 	    (phy << GEM_MIF_PHY_SHIFT)		|
1300 	    (reg << GEM_MIF_REG_SHIFT)		|
1301 	    (val & GEM_MIF_FRAME_DATA);
1302 
1303 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1304 	for (n = 0; n < 100; n++) {
1305 		DELAY(1);
1306 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1307 		if (v & GEM_MIF_FRAME_TA0)
1308 			return;
1309 	}
1310 
1311 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1312 }
1313 
1314 void
1315 gem_mii_statchg(struct device *dev)
1316 {
1317 	struct gem_softc *sc = (void *)dev;
1318 #ifdef GEM_DEBUG
1319 	int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1320 #endif
1321 	bus_space_tag_t t = sc->sc_bustag;
1322 	bus_space_handle_t mac = sc->sc_h1;
1323 	u_int32_t v;
1324 
1325 #ifdef GEM_DEBUG
1326 	if (sc->sc_debug)
1327 		printf("gem_mii_statchg: status change: phy = %d\n", instance);
1328 #endif
1329 
1330 	/* Set tx full duplex options */
1331 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1332 	delay(10000); /* reg must be cleared and delay before changing. */
1333 	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1334 		GEM_MAC_TX_ENABLE;
1335 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1336 		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1337 	}
1338 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1339 
1340 	/* XIF Configuration */
1341 	v = GEM_MAC_XIF_TX_MII_ENA;
1342 	v |= GEM_MAC_XIF_LINK_LED;
1343 
1344 	/* External MII needs echo disable if half duplex. */
1345 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1346 		/* turn on full duplex LED */
1347 		v |= GEM_MAC_XIF_FDPLX_LED;
1348 	else
1349 		/* half duplex -- disable echo */
1350 		v |= GEM_MAC_XIF_ECHO_DISABL;
1351 
1352 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1353 	case IFM_1000_T:  /* Gigabit using GMII interface */
1354 	case IFM_1000_SX:
1355 		v |= GEM_MAC_XIF_GMII_MODE;
1356 		break;
1357 	default:
1358 		v &= ~GEM_MAC_XIF_GMII_MODE;
1359 	}
1360 	bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1361 }
1362 
1363 int
1364 gem_pcs_readreg(struct device *self, int phy, int reg)
1365 {
1366 	struct gem_softc *sc = (void *)self;
1367 	bus_space_tag_t t = sc->sc_bustag;
1368 	bus_space_handle_t pcs = sc->sc_h1;
1369 
1370 #ifdef GEM_DEBUG
1371 	if (sc->sc_debug)
1372 		printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg);
1373 #endif
1374 
1375 	if (phy != GEM_PHYAD_EXTERNAL)
1376 		return (0);
1377 
1378 	switch (reg) {
1379 	case MII_BMCR:
1380 		reg = GEM_MII_CONTROL;
1381 		break;
1382 	case MII_BMSR:
1383 		reg = GEM_MII_STATUS;
1384 		break;
1385 	case MII_ANAR:
1386 		reg = GEM_MII_ANAR;
1387 		break;
1388 	case MII_ANLPAR:
1389 		reg = GEM_MII_ANLPAR;
1390 		break;
1391 	case MII_EXTSR:
1392 		return (EXTSR_1000XFDX|EXTSR_1000XHDX);
1393 	default:
1394 		return (0);
1395 	}
1396 
1397 	return bus_space_read_4(t, pcs, reg);
1398 }
1399 
1400 void
1401 gem_pcs_writereg(struct device *self, int phy, int reg, int val)
1402 {
1403 	struct gem_softc *sc = (void *)self;
1404 	bus_space_tag_t t = sc->sc_bustag;
1405 	bus_space_handle_t pcs = sc->sc_h1;
1406 	int reset = 0;
1407 
1408 #ifdef GEM_DEBUG
1409 	if (sc->sc_debug)
1410 		printf("gem_pcs_writereg: phy %d reg %d val %x\n",
1411 			phy, reg, val);
1412 #endif
1413 
1414 	if (phy != GEM_PHYAD_EXTERNAL)
1415 		return;
1416 
1417 	if (reg == MII_ANAR)
1418 		bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0);
1419 
1420 	switch (reg) {
1421 	case MII_BMCR:
1422 		reset = (val & GEM_MII_CONTROL_RESET);
1423 		reg = GEM_MII_CONTROL;
1424 		break;
1425 	case MII_BMSR:
1426 		reg = GEM_MII_STATUS;
1427 		break;
1428 	case MII_ANAR:
1429 		reg = GEM_MII_ANAR;
1430 		break;
1431 	case MII_ANLPAR:
1432 		reg = GEM_MII_ANLPAR;
1433 		break;
1434 	default:
1435 		return;
1436 	}
1437 
1438 	bus_space_write_4(t, pcs, reg, val);
1439 
1440 	if (reset)
1441 		gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0);
1442 
1443 	if (reg == GEM_MII_ANAR || reset) {
1444 		bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,
1445 		    GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
1446 		bus_space_write_4(t, pcs, GEM_MII_CONFIG,
1447 		    GEM_MII_CONFIG_ENABLE);
1448 	}
1449 }
1450 
1451 int
1452 gem_mediachange(struct ifnet *ifp)
1453 {
1454 	struct gem_softc *sc = ifp->if_softc;
1455 	struct mii_data *mii = &sc->sc_mii;
1456 
1457 	if (mii->mii_instance) {
1458 		struct mii_softc *miisc;
1459 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1460 			mii_phy_reset(miisc);
1461 	}
1462 
1463 	return (mii_mediachg(&sc->sc_mii));
1464 }
1465 
1466 void
1467 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1468 {
1469 	struct gem_softc *sc = ifp->if_softc;
1470 
1471 	mii_pollstat(&sc->sc_mii);
1472 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1473 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1474 }
1475 
1476 /*
1477  * Process an ioctl request.
1478  */
1479 int
1480 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1481 {
1482 	struct gem_softc *sc = ifp->if_softc;
1483 	struct ifaddr *ifa = (struct ifaddr *)data;
1484 	struct ifreq *ifr = (struct ifreq *)data;
1485 	int s, error = 0;
1486 
1487 	s = splnet();
1488 
1489 	switch (cmd) {
1490 	case SIOCSIFADDR:
1491 		ifp->if_flags |= IFF_UP;
1492 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1493 			gem_init(ifp);
1494 #ifdef INET
1495 		if (ifa->ifa_addr->sa_family == AF_INET)
1496 			arp_ifinit(&sc->sc_arpcom, ifa);
1497 #endif
1498 		break;
1499 
1500 	case SIOCSIFFLAGS:
1501 		if (ifp->if_flags & IFF_UP) {
1502 			if (ifp->if_flags & IFF_RUNNING)
1503 				error = ENETRESET;
1504 			else
1505 				gem_init(ifp);
1506 		} else {
1507 			if (ifp->if_flags & IFF_RUNNING)
1508 				gem_stop(ifp, 0);
1509 		}
1510 #ifdef GEM_DEBUG
1511 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1512 #endif
1513 		break;
1514 
1515 	case SIOCGIFMEDIA:
1516 	case SIOCSIFMEDIA:
1517 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1518 		break;
1519 
1520 	default:
1521 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1522 	}
1523 
1524 	if (error == ENETRESET) {
1525 		if (ifp->if_flags & IFF_RUNNING)
1526 			gem_iff(sc);
1527 		error = 0;
1528 	}
1529 
1530 	splx(s);
1531 	return (error);
1532 }
1533 
1534 void
1535 gem_iff(struct gem_softc *sc)
1536 {
1537 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1538 	struct arpcom *ac = &sc->sc_arpcom;
1539 	struct ether_multi *enm;
1540 	struct ether_multistep step;
1541 	bus_space_tag_t t = sc->sc_bustag;
1542 	bus_space_handle_t h = sc->sc_h1;
1543 	u_int32_t crc, hash[16], rxcfg;
1544 	int i;
1545 
1546 	rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1547 	rxcfg &= ~(GEM_MAC_RX_HASH_FILTER | GEM_MAC_RX_PROMISCUOUS |
1548 	    GEM_MAC_RX_PROMISC_GRP);
1549 	ifp->if_flags &= ~IFF_ALLMULTI;
1550 
1551 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1552 		ifp->if_flags |= IFF_ALLMULTI;
1553 		if (ifp->if_flags & IFF_PROMISC)
1554 			rxcfg |= GEM_MAC_RX_PROMISCUOUS;
1555 		else
1556 			rxcfg |= GEM_MAC_RX_PROMISC_GRP;
1557 	} else {
1558 		/*
1559 		 * Set up multicast address filter by passing all multicast
1560 		 * addresses through a crc generator, and then using the
1561 		 * high order 8 bits as an index into the 256 bit logical
1562 		 * address filter.  The high order 4 bits selects the word,
1563 		 * while the other 4 bits select the bit within the word
1564 		 * (where bit 0 is the MSB).
1565 		 */
1566 
1567 		rxcfg |= GEM_MAC_RX_HASH_FILTER;
1568 
1569 		/* Clear hash table */
1570 		for (i = 0; i < 16; i++)
1571 			hash[i] = 0;
1572 
1573 		ETHER_FIRST_MULTI(step, ac, enm);
1574 		while (enm != NULL) {
1575 			crc = ether_crc32_le(enm->enm_addrlo,
1576 			    ETHER_ADDR_LEN);
1577 
1578 			/* Just want the 8 most significant bits. */
1579 			crc >>= 24;
1580 
1581 			/* Set the corresponding bit in the filter. */
1582 			hash[crc >> 4] |= 1 << (15 - (crc & 15));
1583 
1584 			ETHER_NEXT_MULTI(step, enm);
1585 		}
1586 
1587 		/* Now load the hash table into the chip (if we are using it) */
1588 		for (i = 0; i < 16; i++) {
1589 			bus_space_write_4(t, h,
1590 			    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
1591 			    hash[i]);
1592 		}
1593 	}
1594 
1595 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg);
1596 }
1597 
1598 /*
1599  * Transmit interrupt.
1600  */
1601 int
1602 gem_tint(struct gem_softc *sc, u_int32_t status)
1603 {
1604 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1605 	struct gem_sxd *sd;
1606 	u_int32_t cons, hwcons;
1607 
1608 	hwcons = status >> 19;
1609 	cons = sc->sc_tx_cons;
1610 	while (cons != hwcons) {
1611 		sd = &sc->sc_txd[cons];
1612 		if (sd->sd_mbuf != NULL) {
1613 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1614 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1615 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1616 			m_freem(sd->sd_mbuf);
1617 			sd->sd_mbuf = NULL;
1618 			ifp->if_opackets++;
1619 		}
1620 		sc->sc_tx_cnt--;
1621 		if (++cons == GEM_NTXDESC)
1622 			cons = 0;
1623 	}
1624 	sc->sc_tx_cons = cons;
1625 
1626 	if (sc->sc_tx_cnt < GEM_NTXDESC - 2)
1627 		ifp->if_flags &= ~IFF_OACTIVE;
1628 	if (sc->sc_tx_cnt == 0)
1629 		ifp->if_timer = 0;
1630 
1631 	gem_start(ifp);
1632 
1633 	return (1);
1634 }
1635 
1636 void
1637 gem_start(struct ifnet *ifp)
1638 {
1639 	struct gem_softc *sc = ifp->if_softc;
1640 	struct mbuf *m;
1641 	u_int64_t flags;
1642 	bus_dmamap_t map;
1643 	u_int32_t cur, frag, i;
1644 	int error;
1645 
1646 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1647 		return;
1648 
1649 	while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
1650 		IFQ_POLL(&ifp->if_snd, m);
1651 		if (m == NULL)
1652 			break;
1653 
1654 		/*
1655 		 * Encapsulate this packet and start it going...
1656 		 * or fail...
1657 		 */
1658 
1659 		cur = frag = sc->sc_tx_prod;
1660 		map = sc->sc_txd[cur].sd_map;
1661 
1662 		error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
1663 		    BUS_DMA_NOWAIT);
1664 		if (error != 0 && error != EFBIG)
1665 			goto drop;
1666 		if (error != 0) {
1667 			/* Too many fragments, linearize. */
1668 			if (m_defrag(m, M_DONTWAIT))
1669 				goto drop;
1670 			error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
1671 			    BUS_DMA_NOWAIT);
1672 			if (error != 0)
1673 				goto drop;
1674 		}
1675 
1676 		if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) {
1677 			bus_dmamap_unload(sc->sc_dmatag, map);
1678 			ifp->if_flags |= IFF_OACTIVE;
1679 			break;
1680 		}
1681 
1682 		/* We are now committed to transmitting the packet. */
1683 		IFQ_DEQUEUE(&ifp->if_snd, m);
1684 
1685 #if NBPFILTER > 0
1686 		/*
1687 		 * If BPF is listening on this interface, let it see the
1688 		 * packet before we commit it to the wire.
1689 		 */
1690 		if (ifp->if_bpf)
1691 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1692 #endif
1693 
1694 		bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1695 		    BUS_DMASYNC_PREWRITE);
1696 
1697 		for (i = 0; i < map->dm_nsegs; i++) {
1698 			sc->sc_txdescs[frag].gd_addr =
1699 			    GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr);
1700 			flags = map->dm_segs[i].ds_len & GEM_TD_BUFSIZE;
1701 			if (i == 0)
1702 				flags |= GEM_TD_START_OF_PACKET;
1703 			if (i == (map->dm_nsegs - 1))
1704 				flags |= GEM_TD_END_OF_PACKET;
1705 			sc->sc_txdescs[frag].gd_flags =
1706 			    GEM_DMA_WRITE(sc, flags);
1707 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1708 			    GEM_CDTXOFF(frag), sizeof(struct gem_desc),
1709 			    BUS_DMASYNC_PREWRITE);
1710 			cur = frag;
1711 			if (++frag == GEM_NTXDESC)
1712 				frag = 0;
1713 		}
1714 
1715 		sc->sc_tx_cnt += map->dm_nsegs;
1716 		sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
1717 		sc->sc_txd[cur].sd_map = map;
1718 		sc->sc_txd[cur].sd_mbuf = m;
1719 
1720 		bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag);
1721 		sc->sc_tx_prod = frag;
1722 
1723 		ifp->if_timer = 5;
1724 	}
1725 
1726 	return;
1727 
1728  drop:
1729 	IFQ_DEQUEUE(&ifp->if_snd, m);
1730 	m_freem(m);
1731 	ifp->if_oerrors++;
1732 }
1733