xref: /openbsd/sys/dev/ic/gem.c (revision 404b540a)
1 /*	$OpenBSD: gem.c,v 1.95 2009/08/10 20:29:54 deraadt Exp $	*/
2 /*	$NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */
3 
4 /*
5  *
6  * Copyright (C) 2001 Eduardo Horvath.
7  * All rights reserved.
8  *
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Driver for Sun GEM ethernet controllers.
35  */
36 
37 #include "bpfilter.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/timeout.h>
42 #include <sys/mbuf.h>
43 #include <sys/syslog.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/device.h>
50 
51 #include <machine/endian.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/if_ether.h>
60 #endif
61 
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65 
66 #include <machine/bus.h>
67 #include <machine/intr.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/mii_bitbang.h>
72 
73 #include <dev/ic/gemreg.h>
74 #include <dev/ic/gemvar.h>
75 
76 #define TRIES	10000
77 
78 struct cfdriver gem_cd = {
79 	NULL, "gem", DV_IFNET
80 };
81 
82 void		gem_start(struct ifnet *);
83 void		gem_stop(struct ifnet *);
84 int		gem_ioctl(struct ifnet *, u_long, caddr_t);
85 void		gem_tick(void *);
86 void		gem_watchdog(struct ifnet *);
87 int		gem_init(struct ifnet *);
88 void		gem_init_regs(struct gem_softc *);
89 int		gem_ringsize(int);
90 int		gem_meminit(struct gem_softc *);
91 void		gem_mifinit(struct gem_softc *);
92 int		gem_bitwait(struct gem_softc *, bus_space_handle_t, int,
93 		    u_int32_t, u_int32_t);
94 void		gem_reset(struct gem_softc *);
95 int		gem_reset_rx(struct gem_softc *);
96 int		gem_reset_tx(struct gem_softc *);
97 int		gem_disable_rx(struct gem_softc *);
98 int		gem_disable_tx(struct gem_softc *);
99 void		gem_rx_watchdog(void *);
100 void		gem_rxdrain(struct gem_softc *);
101 void		gem_fill_rx_ring(struct gem_softc *);
102 int		gem_add_rxbuf(struct gem_softc *, int idx);
103 void		gem_iff(struct gem_softc *);
104 
105 /* MII methods & callbacks */
106 int		gem_mii_readreg(struct device *, int, int);
107 void		gem_mii_writereg(struct device *, int, int, int);
108 void		gem_mii_statchg(struct device *);
109 int		gem_pcs_readreg(struct device *, int, int);
110 void		gem_pcs_writereg(struct device *, int, int, int);
111 
112 int		gem_mediachange(struct ifnet *);
113 void		gem_mediastatus(struct ifnet *, struct ifmediareq *);
114 
115 int		gem_eint(struct gem_softc *, u_int);
116 int		gem_rint(struct gem_softc *);
117 int		gem_tint(struct gem_softc *, u_int32_t);
118 int		gem_pint(struct gem_softc *);
119 
120 #ifdef GEM_DEBUG
121 #define	DPRINTF(sc, x)	if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
122 				printf x
123 #else
124 #define	DPRINTF(sc, x)	/* nothing */
125 #endif
126 
127 /*
128  * Attach a Gem interface to the system.
129  */
130 void
131 gem_config(struct gem_softc *sc)
132 {
133 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
134 	struct mii_data *mii = &sc->sc_mii;
135 	struct mii_softc *child;
136 	int i, error, phyad;
137 	struct ifmedia_entry *ifm;
138 
139 	/* Make sure the chip is stopped. */
140 	ifp->if_softc = sc;
141 	gem_reset(sc);
142 
143 	/*
144 	 * Allocate the control data structures, and create and load the
145 	 * DMA map for it.
146 	 */
147 	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
148 	    sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
149 	    1, &sc->sc_cdnseg, 0)) != 0) {
150 		printf("\n%s: unable to allocate control data, error = %d\n",
151 		    sc->sc_dev.dv_xname, error);
152 		goto fail_0;
153 	}
154 
155 	/* XXX should map this in with correct endianness */
156 	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
157 	    sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
158 	    BUS_DMA_COHERENT)) != 0) {
159 		printf("\n%s: unable to map control data, error = %d\n",
160 		    sc->sc_dev.dv_xname, error);
161 		goto fail_1;
162 	}
163 
164 	if ((error = bus_dmamap_create(sc->sc_dmatag,
165 	    sizeof(struct gem_control_data), 1,
166 	    sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
167 		printf("\n%s: unable to create control data DMA map, "
168 		    "error = %d\n", sc->sc_dev.dv_xname, error);
169 		goto fail_2;
170 	}
171 
172 	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
173 	    sc->sc_control_data, sizeof(struct gem_control_data), NULL,
174 	    0)) != 0) {
175 		printf("\n%s: unable to load control data DMA map, error = %d\n",
176 		    sc->sc_dev.dv_xname, error);
177 		goto fail_3;
178 	}
179 
180 	/*
181 	 * Create the receive buffer DMA maps.
182 	 */
183 	for (i = 0; i < GEM_NRXDESC; i++) {
184 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
185 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
186 			printf("\n%s: unable to create rx DMA map %d, "
187 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
188 			goto fail_5;
189 		}
190 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
191 	}
192 	/*
193 	 * Create the transmit buffer DMA maps.
194 	 */
195 	for (i = 0; i < GEM_NTXDESC; i++) {
196 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
197 		    GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
198 		    &sc->sc_txd[i].sd_map)) != 0) {
199 			printf("\n%s: unable to create tx DMA map %d, "
200 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
201 			goto fail_6;
202 		}
203 		sc->sc_txd[i].sd_mbuf = NULL;
204 	}
205 
206 	/*
207 	 * From this point forward, the attachment cannot fail.  A failure
208 	 * before this point releases all resources that may have been
209 	 * allocated.
210 	 */
211 
212 	/* Announce ourselves. */
213 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
214 
215 	/* Get RX FIFO size */
216 	sc->sc_rxfifosize = 64 *
217 	    bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE);
218 
219 	/* Initialize ifnet structure. */
220 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
221 	ifp->if_softc = sc;
222 	ifp->if_flags =
223 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
224 	ifp->if_start = gem_start;
225 	ifp->if_ioctl = gem_ioctl;
226 	ifp->if_watchdog = gem_watchdog;
227 	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1);
228 	IFQ_SET_READY(&ifp->if_snd);
229 
230 	/* Hardware reads RX descriptors in multiples of four. */
231 	m_clsetwms(ifp, MCLBYTES, 4, GEM_NRXDESC - 4);
232 
233 	ifp->if_capabilities = IFCAP_VLAN_MTU;
234 
235 	/* Initialize ifmedia structures and MII info */
236 	mii->mii_ifp = ifp;
237 	mii->mii_readreg = gem_mii_readreg;
238 	mii->mii_writereg = gem_mii_writereg;
239 	mii->mii_statchg = gem_mii_statchg;
240 
241 	ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus);
242 
243 	/* Bad things will happen if we touch this register on ERI. */
244 	if (sc->sc_variant != GEM_SUN_ERI)
245 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
246 		    GEM_MII_DATAPATH_MODE, 0);
247 
248 	gem_mifinit(sc);
249 
250 	/*
251 	 * Look for an external PHY.
252 	 */
253 	if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
254 		sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
255 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
256 	            GEM_MIF_CONFIG, sc->sc_mif_config);
257 
258 		switch (sc->sc_variant) {
259 		case GEM_SUN_ERI:
260 			phyad = GEM_PHYAD_EXTERNAL;
261 			break;
262 		default:
263 			phyad = MII_PHY_ANY;
264 			break;
265 		}
266 
267 		mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
268 		    MII_OFFSET_ANY, 0);
269 	}
270 
271 	/*
272 	 * Fall back on an internal PHY if no external PHY was found.
273 	 */
274 	child = LIST_FIRST(&mii->mii_phys);
275 	if (child == NULL && sc->sc_mif_config & GEM_MIF_CONFIG_MDI0) {
276 		sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
277 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
278 	            GEM_MIF_CONFIG, sc->sc_mif_config);
279 
280 		switch (sc->sc_variant) {
281 		case GEM_SUN_ERI:
282 		case GEM_APPLE_K2_GMAC:
283 			phyad = GEM_PHYAD_INTERNAL;
284 			break;
285 		case GEM_APPLE_GMAC:
286 			phyad = GEM_PHYAD_EXTERNAL;
287 			break;
288 		default:
289 			phyad = MII_PHY_ANY;
290 			break;
291 		}
292 
293 		mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
294 		    MII_OFFSET_ANY, 0);
295 	}
296 
297 	/*
298 	 * Try the external PCS SERDES if we didn't find any MII
299 	 * devices.
300 	 */
301 	child = LIST_FIRST(&mii->mii_phys);
302 	if (child == NULL && sc->sc_variant != GEM_SUN_ERI) {
303 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
304 		    GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES);
305 
306 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
307 		    GEM_MII_SLINK_CONTROL,
308 		    GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
309 
310 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
311 		     GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
312 
313 		mii->mii_readreg = gem_pcs_readreg;
314 		mii->mii_writereg = gem_pcs_writereg;
315 
316 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
317 		    MII_OFFSET_ANY, MIIF_NOISOLATE);
318 	}
319 
320 	child = LIST_FIRST(&mii->mii_phys);
321 	if (child == NULL) {
322 		/* No PHY attached */
323 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
324 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
325 	} else {
326 		/*
327 		 * XXX - we can really do the following ONLY if the
328 		 * phy indeed has the auto negotiation capability!!
329 		 */
330 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
331 	}
332 
333 	/* Check if we support GigE media. */
334 	TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
335 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
336 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
337 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
338 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
339 			sc->sc_flags |= GEM_GIGABIT;
340 			break;
341 		}
342 	}
343 
344 	/* Attach the interface. */
345 	if_attach(ifp);
346 	ether_ifattach(ifp);
347 
348 	timeout_set(&sc->sc_tick_ch, gem_tick, sc);
349 	timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc);
350 	return;
351 
352 	/*
353 	 * Free any resources we've allocated during the failed attach
354 	 * attempt.  Do this in reverse order and fall through.
355 	 */
356  fail_6:
357 	for (i = 0; i < GEM_NTXDESC; i++) {
358 		if (sc->sc_txd[i].sd_map != NULL)
359 			bus_dmamap_destroy(sc->sc_dmatag,
360 			    sc->sc_txd[i].sd_map);
361 	}
362  fail_5:
363 	for (i = 0; i < GEM_NRXDESC; i++) {
364 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
365 			bus_dmamap_destroy(sc->sc_dmatag,
366 			    sc->sc_rxsoft[i].rxs_dmamap);
367 	}
368 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
369  fail_3:
370 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
371  fail_2:
372 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
373 	    sizeof(struct gem_control_data));
374  fail_1:
375 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
376  fail_0:
377 	return;
378 }
379 
380 
381 void
382 gem_tick(void *arg)
383 {
384 	struct gem_softc *sc = arg;
385 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
386 	bus_space_tag_t t = sc->sc_bustag;
387 	bus_space_handle_t mac = sc->sc_h1;
388 	int s;
389 	u_int32_t v;
390 
391 	/* unload collisions counters */
392 	v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
393 	    bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
394 	ifp->if_collisions += v +
395 	    bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
396 	    bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
397 	ifp->if_oerrors += v;
398 
399 	/* read error counters */
400 	ifp->if_ierrors +=
401 	    bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) +
402 	    bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) +
403 	    bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) +
404 	    bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL);
405 
406 	/* clear the hardware counters */
407 	bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
408 	bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
409 	bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
410 	bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
411 	bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0);
412 	bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0);
413 	bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0);
414 	bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0);
415 
416 	s = splnet();
417 	mii_tick(&sc->sc_mii);
418 	splx(s);
419 
420 	timeout_add_sec(&sc->sc_tick_ch, 1);
421 }
422 
423 int
424 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r,
425    u_int32_t clr, u_int32_t set)
426 {
427 	int i;
428 	u_int32_t reg;
429 
430 	for (i = TRIES; i--; DELAY(100)) {
431 		reg = bus_space_read_4(sc->sc_bustag, h, r);
432 		if ((reg & clr) == 0 && (reg & set) == set)
433 			return (1);
434 	}
435 
436 	return (0);
437 }
438 
439 void
440 gem_reset(struct gem_softc *sc)
441 {
442 	bus_space_tag_t t = sc->sc_bustag;
443 	bus_space_handle_t h = sc->sc_h2;
444 	int s;
445 
446 	s = splnet();
447 	DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
448 	gem_reset_rx(sc);
449 	gem_reset_tx(sc);
450 
451 	/* Do a full reset */
452 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
453 	if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
454 		printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
455 	splx(s);
456 }
457 
458 
459 /*
460  * Drain the receive queue.
461  */
462 void
463 gem_rxdrain(struct gem_softc *sc)
464 {
465 	struct gem_rxsoft *rxs;
466 	int i;
467 
468 	for (i = 0; i < GEM_NRXDESC; i++) {
469 		rxs = &sc->sc_rxsoft[i];
470 		if (rxs->rxs_mbuf != NULL) {
471 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
472 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
473 			bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
474 			m_freem(rxs->rxs_mbuf);
475 			rxs->rxs_mbuf = NULL;
476 		}
477 	}
478 	sc->sc_rx_prod = sc->sc_rx_cons = sc->sc_rx_cnt = 0;
479 }
480 
481 /*
482  * Reset the whole thing.
483  */
484 void
485 gem_stop(struct ifnet *ifp)
486 {
487 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
488 	struct gem_sxd *sd;
489 	u_int32_t i;
490 
491 	DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
492 
493 	timeout_del(&sc->sc_tick_ch);
494 
495 	/*
496 	 * Mark the interface down and cancel the watchdog timer.
497 	 */
498 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
499 	ifp->if_timer = 0;
500 
501 	mii_down(&sc->sc_mii);
502 
503 	gem_reset_rx(sc);
504 	gem_reset_tx(sc);
505 
506 	/*
507 	 * Release any queued transmit buffers.
508 	 */
509 	for (i = 0; i < GEM_NTXDESC; i++) {
510 		sd = &sc->sc_txd[i];
511 		if (sd->sd_mbuf != NULL) {
512 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
513 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
514 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
515 			m_freem(sd->sd_mbuf);
516 			sd->sd_mbuf = NULL;
517 		}
518 	}
519 	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
520 
521 	gem_rxdrain(sc);
522 }
523 
524 
525 /*
526  * Reset the receiver
527  */
528 int
529 gem_reset_rx(struct gem_softc *sc)
530 {
531 	bus_space_tag_t t = sc->sc_bustag;
532 	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
533 
534 	/*
535 	 * Resetting while DMA is in progress can cause a bus hang, so we
536 	 * disable DMA first.
537 	 */
538 	gem_disable_rx(sc);
539 	bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
540 	/* Wait till it finishes */
541 	if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
542 		printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
543 	/* Wait 5ms extra. */
544 	delay(5000);
545 
546 	/* Finally, reset the ERX */
547 	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
548 	/* Wait till it finishes */
549 	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
550 		printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
551 		return (1);
552 	}
553 	return (0);
554 }
555 
556 
557 /*
558  * Reset the transmitter
559  */
560 int
561 gem_reset_tx(struct gem_softc *sc)
562 {
563 	bus_space_tag_t t = sc->sc_bustag;
564 	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
565 
566 	/*
567 	 * Resetting while DMA is in progress can cause a bus hang, so we
568 	 * disable DMA first.
569 	 */
570 	gem_disable_tx(sc);
571 	bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
572 	/* Wait till it finishes */
573 	if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
574 		printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
575 	/* Wait 5ms extra. */
576 	delay(5000);
577 
578 	/* Finally, reset the ETX */
579 	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
580 	/* Wait till it finishes */
581 	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
582 		printf("%s: cannot reset transmitter\n",
583 			sc->sc_dev.dv_xname);
584 		return (1);
585 	}
586 	return (0);
587 }
588 
589 /*
590  * Disable receiver.
591  */
592 int
593 gem_disable_rx(struct gem_softc *sc)
594 {
595 	bus_space_tag_t t = sc->sc_bustag;
596 	bus_space_handle_t h = sc->sc_h1;
597 	u_int32_t cfg;
598 
599 	/* Flip the enable bit */
600 	cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
601 	cfg &= ~GEM_MAC_RX_ENABLE;
602 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
603 
604 	/* Wait for it to finish */
605 	return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
606 }
607 
608 /*
609  * Disable transmitter.
610  */
611 int
612 gem_disable_tx(struct gem_softc *sc)
613 {
614 	bus_space_tag_t t = sc->sc_bustag;
615 	bus_space_handle_t h = sc->sc_h1;
616 	u_int32_t cfg;
617 
618 	/* Flip the enable bit */
619 	cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
620 	cfg &= ~GEM_MAC_TX_ENABLE;
621 	bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
622 
623 	/* Wait for it to finish */
624 	return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
625 }
626 
627 /*
628  * Initialize interface.
629  */
630 int
631 gem_meminit(struct gem_softc *sc)
632 {
633 	int i;
634 
635 	/*
636 	 * Initialize the transmit descriptor ring.
637 	 */
638 	for (i = 0; i < GEM_NTXDESC; i++) {
639 		sc->sc_txdescs[i].gd_flags = 0;
640 		sc->sc_txdescs[i].gd_addr = 0;
641 	}
642 	GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
643 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
644 
645 	/*
646 	 * Initialize the receive descriptor and receive job
647 	 * descriptor rings.
648 	 */
649 	for (i = 0; i < GEM_NRXDESC; i++) {
650 		sc->sc_rxdescs[i].gd_flags = 0;
651 		sc->sc_rxdescs[i].gd_addr = 0;
652 	}
653 	gem_fill_rx_ring(sc);
654 
655 	return (0);
656 }
657 
658 int
659 gem_ringsize(int sz)
660 {
661 	switch (sz) {
662 	case 32:
663 		return GEM_RING_SZ_32;
664 	case 64:
665 		return GEM_RING_SZ_64;
666 	case 128:
667 		return GEM_RING_SZ_128;
668 	case 256:
669 		return GEM_RING_SZ_256;
670 	case 512:
671 		return GEM_RING_SZ_512;
672 	case 1024:
673 		return GEM_RING_SZ_1024;
674 	case 2048:
675 		return GEM_RING_SZ_2048;
676 	case 4096:
677 		return GEM_RING_SZ_4096;
678 	case 8192:
679 		return GEM_RING_SZ_8192;
680 	default:
681 		printf("gem: invalid Receive Descriptor ring size %d\n", sz);
682 		return GEM_RING_SZ_32;
683 	}
684 }
685 
686 /*
687  * Initialization of interface; set up initialization block
688  * and transmit/receive descriptor rings.
689  */
690 int
691 gem_init(struct ifnet *ifp)
692 {
693 
694 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
695 	bus_space_tag_t t = sc->sc_bustag;
696 	bus_space_handle_t h = sc->sc_h1;
697 	int s;
698 	u_int32_t v;
699 
700 	s = splnet();
701 
702 	DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
703 	/*
704 	 * Initialization sequence. The numbered steps below correspond
705 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
706 	 * Channel Engine manual (part of the PCIO manual).
707 	 * See also the STP2002-STQ document from Sun Microsystems.
708 	 */
709 
710 	/* step 1 & 2. Reset the Ethernet Channel */
711 	gem_stop(ifp);
712 	gem_reset(sc);
713 	DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
714 
715 	/* Re-initialize the MIF */
716 	gem_mifinit(sc);
717 
718 	/* Call MI reset function if any */
719 	if (sc->sc_hwreset)
720 		(*sc->sc_hwreset)(sc);
721 
722 	/* step 3. Setup data structures in host memory */
723 	gem_meminit(sc);
724 
725 	/* step 4. TX MAC registers & counters */
726 	gem_init_regs(sc);
727 
728 	/* step 5. RX MAC registers & counters */
729 	gem_iff(sc);
730 
731 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
732 	bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,
733 	    (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32));
734 	bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
735 
736 	bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
737 	    (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32));
738 	bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
739 
740 	/* step 8. Global Configuration & Interrupt Mask */
741 	bus_space_write_4(t, h, GEM_INTMASK,
742 		      ~(GEM_INTR_TX_INTME|
743 			GEM_INTR_TX_EMPTY|
744 			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
745 			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
746 			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
747 			GEM_INTR_BERR));
748 	bus_space_write_4(t, h, GEM_MAC_RX_MASK,
749 	    GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
750 	bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
751 	bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
752 
753 	/* step 9. ETX Configuration: use mostly default values */
754 
755 	/* Enable DMA */
756 	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
757 	v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x04ff) << 10) &
758 	    GEM_TX_CONFIG_TXFIFO_TH;
759 	bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
760 	bus_space_write_4(t, h, GEM_TX_KICK, 0);
761 
762 	/* step 10. ERX Configuration */
763 
764 	/* Encode Receive Descriptor ring size: four possible values */
765 	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
766 
767 	/* Enable DMA */
768 	bus_space_write_4(t, h, GEM_RX_CONFIG,
769 		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
770 		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
771 		(0<<GEM_RX_CONFIG_CXM_START_SHFT));
772 	/*
773 	 * The following value is for an OFF Threshold of about 3/4 full
774 	 * and an ON Threshold of 1/4 full.
775 	 */
776 	bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
777 	    (3 * sc->sc_rxfifosize / 256) |
778 	    ((sc->sc_rxfifosize / 256) << 12));
779 	bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6);
780 
781 	/* step 11. Configure Media */
782 	mii_mediachg(&sc->sc_mii);
783 
784 	/* step 12. RX_MAC Configuration Register */
785 	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
786 	v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
787 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
788 
789 	/* step 14. Issue Transmit Pending command */
790 
791 	/* Call MI initialization function if any */
792 	if (sc->sc_hwinit)
793 		(*sc->sc_hwinit)(sc);
794 
795 	/* step 15.  Give the receiver a swift kick */
796 	bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
797 
798 	/* Start the one second timer. */
799 	timeout_add_sec(&sc->sc_tick_ch, 1);
800 
801 	ifp->if_flags |= IFF_RUNNING;
802 	ifp->if_flags &= ~IFF_OACTIVE;
803 
804 	splx(s);
805 
806 	return (0);
807 }
808 
809 void
810 gem_init_regs(struct gem_softc *sc)
811 {
812 	bus_space_tag_t t = sc->sc_bustag;
813 	bus_space_handle_t h = sc->sc_h1;
814 	u_int32_t v;
815 
816 	/* These regs are not cleared on reset */
817 	sc->sc_inited = 0;
818 	if (!sc->sc_inited) {
819 		/* Load recommended values */
820 		bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
821 		bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
822 		bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
823 
824 		bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
825 		/* Max frame and max burst size */
826 		bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
827 		    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
828 
829 		bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
830 		bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
831 		bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
832 		bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
833 		bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
834 		    ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
835 
836 		/* Secondary MAC addr set to 0:0:0:0:0:0 */
837 		bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
838 		bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
839 		bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
840 
841 		/* MAC control addr set to 0:1:c2:0:1:80 */
842 		bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
843 		bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
844 		bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
845 
846 		/* MAC filter addr set to 0:0:0:0:0:0 */
847 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
848 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
849 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
850 
851 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
852 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
853 
854 		sc->sc_inited = 1;
855 	}
856 
857 	/* Counters need to be zeroed */
858 	bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
859 	bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
860 	bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
861 	bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
862 	bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
863 	bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
864 	bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
865 	bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
866 	bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
867 	bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
868 	bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
869 
870 	/* Un-pause stuff */
871 	bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0);
872 
873 	/*
874 	 * Set the internal arbitration to "infinite" bursts of the
875 	 * maximum length of 31 * 64 bytes so DMA transfers aren't
876 	 * split up in cache line size chunks. This greatly improves
877 	 * especially RX performance.
878 	 * Enable silicon bug workarounds for the Apple variants.
879 	 */
880 	v = GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT;
881 	if (sc->sc_pci)
882 		v |= GEM_CONFIG_BURST_INF;
883 	else
884 		v |= GEM_CONFIG_BURST_64;
885 	if (sc->sc_variant != GEM_SUN_GEM && sc->sc_variant != GEM_SUN_ERI)
886 		v |= GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX;
887 	bus_space_write_4(t, h, GEM_CONFIG, v);
888 
889 	/*
890 	 * Set the station address.
891 	 */
892 	bus_space_write_4(t, h, GEM_MAC_ADDR0,
893 		(sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
894 	bus_space_write_4(t, h, GEM_MAC_ADDR1,
895 		(sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
896 	bus_space_write_4(t, h, GEM_MAC_ADDR2,
897 		(sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
898 }
899 
900 /*
901  * Receive interrupt.
902  */
903 int
904 gem_rint(struct gem_softc *sc)
905 {
906 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
907 	bus_space_tag_t t = sc->sc_bustag;
908 	bus_space_handle_t h = sc->sc_h1;
909 	struct gem_rxsoft *rxs;
910 	struct mbuf *m;
911 	u_int64_t rxstat;
912 	int i, len;
913 
914 	for (i = sc->sc_rx_cons; sc->sc_rx_cnt > 0; i = GEM_NEXTRX(i)) {
915 		rxs = &sc->sc_rxsoft[i];
916 
917 		GEM_CDRXSYNC(sc, i,
918 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
919 
920 		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
921 
922 		if (rxstat & GEM_RD_OWN) {
923 			/* We have processed all of the receive buffers. */
924 			break;
925 		}
926 
927 		bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
928 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
929 		bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
930 
931 		m = rxs->rxs_mbuf;
932 		rxs->rxs_mbuf = NULL;
933 
934 		sc->sc_rx_cnt--;
935 
936 		if (rxstat & GEM_RD_BAD_CRC) {
937 			ifp->if_ierrors++;
938 #ifdef GEM_DEBUG
939 			printf("%s: receive error: CRC error\n",
940 				sc->sc_dev.dv_xname);
941 #endif
942 			m_freem(m);
943 			continue;
944 		}
945 
946 #ifdef GEM_DEBUG
947 		if (ifp->if_flags & IFF_DEBUG) {
948 			printf("    rxsoft %p descriptor %d: ", rxs, i);
949 			printf("gd_flags: 0x%016llx\t", (long long)
950 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
951 			printf("gd_addr: 0x%016llx\n", (long long)
952 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
953 		}
954 #endif
955 
956 		/* No errors; receive the packet. */
957 		len = GEM_RD_BUFLEN(rxstat);
958 
959 		m->m_data += 2; /* We're already off by two */
960 
961 		ifp->if_ipackets++;
962 		m->m_pkthdr.rcvif = ifp;
963 		m->m_pkthdr.len = m->m_len = len;
964 
965 #if NBPFILTER > 0
966 		if (ifp->if_bpf)
967 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
968 #endif /* NBPFILTER > 0 */
969 
970 		/* Pass it on. */
971 		ether_input_mbuf(ifp, m);
972 	}
973 
974 	/* Update the receive pointer. */
975 	sc->sc_rx_cons = i;
976 	gem_fill_rx_ring(sc);
977 	bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
978 
979 	DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n",
980 		sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
981 
982 	return (1);
983 }
984 
985 void
986 gem_fill_rx_ring(struct gem_softc *sc)
987 {
988 	while (sc->sc_rx_cnt < (GEM_NRXDESC - 4)) {
989 		if (gem_add_rxbuf(sc, sc->sc_rx_prod))
990 			break;
991 	}
992 }
993 
994 /*
995  * Add a receive buffer to the indicated descriptor.
996  */
997 int
998 gem_add_rxbuf(struct gem_softc *sc, int idx)
999 {
1000 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1001 	struct mbuf *m;
1002 	int error;
1003 
1004 	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
1005 	if (!m)
1006 		return (ENOBUFS);
1007 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1008 
1009 #ifdef GEM_DEBUG
1010 /* bzero the packet to check dma */
1011 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1012 #endif
1013 
1014 	rxs->rxs_mbuf = m;
1015 
1016 	error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m,
1017 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1018 	if (error) {
1019 		printf("%s: can't load rx DMA map %d, error = %d\n",
1020 		    sc->sc_dev.dv_xname, idx, error);
1021 		panic("gem_add_rxbuf");	/* XXX */
1022 	}
1023 
1024 	bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1025 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1026 
1027 	GEM_INIT_RXDESC(sc, idx);
1028 
1029 	sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod);
1030 	sc->sc_rx_cnt++;
1031 
1032 	return (0);
1033 }
1034 
1035 int
1036 gem_eint(struct gem_softc *sc, u_int status)
1037 {
1038 	if ((status & GEM_INTR_MIF) != 0) {
1039 #ifdef GEM_DEBUG
1040 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1041 #endif
1042 		return (1);
1043 	}
1044 
1045 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS);
1046 	return (1);
1047 }
1048 
1049 int
1050 gem_pint(struct gem_softc *sc)
1051 {
1052 	bus_space_tag_t t = sc->sc_bustag;
1053 	bus_space_handle_t seb = sc->sc_h1;
1054 	u_int32_t status;
1055 
1056 	status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1057 	status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1058 #ifdef GEM_DEBUG
1059 	if (status)
1060 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1061 #endif
1062 	return (1);
1063 }
1064 
1065 int
1066 gem_intr(void *v)
1067 {
1068 	struct gem_softc *sc = (struct gem_softc *)v;
1069 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1070 	bus_space_tag_t t = sc->sc_bustag;
1071 	bus_space_handle_t seb = sc->sc_h1;
1072 	u_int32_t status;
1073 	int r = 0;
1074 
1075 	status = bus_space_read_4(t, seb, GEM_STATUS);
1076 	DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n",
1077 		sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS));
1078 
1079 	if ((status & GEM_INTR_PCS) != 0)
1080 		r |= gem_pint(sc);
1081 
1082 	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1083 		r |= gem_eint(sc, status);
1084 
1085 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1086 		r |= gem_tint(sc, status);
1087 
1088 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1089 		r |= gem_rint(sc);
1090 
1091 	/* We should eventually do more than just print out error stats. */
1092 	if (status & GEM_INTR_TX_MAC) {
1093 		int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1094 #ifdef GEM_DEBUG
1095 		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1096 			printf("%s: MAC tx fault, status %x\n",
1097 			    sc->sc_dev.dv_xname, txstat);
1098 #endif
1099 		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1100 			gem_init(ifp);
1101 	}
1102 	if (status & GEM_INTR_RX_MAC) {
1103 		int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1104 #ifdef GEM_DEBUG
1105  		if (rxstat & ~GEM_MAC_RX_DONE)
1106  			printf("%s: MAC rx fault, status %x\n",
1107  			    sc->sc_dev.dv_xname, rxstat);
1108 #endif
1109 		if (rxstat & GEM_MAC_RX_OVERFLOW) {
1110 			ifp->if_ierrors++;
1111 
1112 			/*
1113 			 * Apparently a silicon bug causes ERI to hang
1114 			 * from time to time.  So if we detect an RX
1115 			 * FIFO overflow, we fire off a timer, and
1116 			 * check whether we're still making progress
1117 			 * by looking at the RX FIFO write and read
1118 			 * pointers.
1119 			 */
1120 			sc->sc_rx_fifo_wr_ptr =
1121 				bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR);
1122 			sc->sc_rx_fifo_rd_ptr =
1123 				bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR);
1124 			timeout_add_msec(&sc->sc_rx_watchdog, 400);
1125 		}
1126 #ifdef GEM_DEBUG
1127 		else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1128 			printf("%s: MAC rx fault, status %x\n",
1129 			    sc->sc_dev.dv_xname, rxstat);
1130 #endif
1131 	}
1132 	return (r);
1133 }
1134 
1135 void
1136 gem_rx_watchdog(void *arg)
1137 {
1138 	struct gem_softc *sc = arg;
1139 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1140 	bus_space_tag_t t = sc->sc_bustag;
1141 	bus_space_handle_t h = sc->sc_h1;
1142 	u_int32_t rx_fifo_wr_ptr;
1143 	u_int32_t rx_fifo_rd_ptr;
1144 	u_int32_t state;
1145 
1146 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1147 		return;
1148 
1149 	rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR);
1150 	rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR);
1151 	state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE);
1152 	if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW &&
1153 	    ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) ||
1154 	     ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) &&
1155 	      (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr)))) {
1156 		/*
1157 		 * The RX state machine is still in overflow state and
1158 		 * the RX FIFO write and read pointers seem to be
1159 		 * stuck.  Whack the chip over the head to get things
1160 		 * going again.
1161 		 */
1162 		gem_init(ifp);
1163 	}
1164 }
1165 
1166 void
1167 gem_watchdog(struct ifnet *ifp)
1168 {
1169 	struct gem_softc *sc = ifp->if_softc;
1170 
1171 	DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1172 		"GEM_MAC_RX_CONFIG %x\n",
1173 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
1174 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
1175 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
1176 
1177 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1178 	++ifp->if_oerrors;
1179 
1180 	/* Try to get more packets going. */
1181 	gem_init(ifp);
1182 }
1183 
1184 /*
1185  * Initialize the MII Management Interface
1186  */
1187 void
1188 gem_mifinit(struct gem_softc *sc)
1189 {
1190 	bus_space_tag_t t = sc->sc_bustag;
1191 	bus_space_handle_t mif = sc->sc_h1;
1192 
1193 	/* Configure the MIF in frame mode */
1194 	sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1195 	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1196 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1197 }
1198 
1199 /*
1200  * MII interface
1201  *
1202  * The GEM MII interface supports at least three different operating modes:
1203  *
1204  * Bitbang mode is implemented using data, clock and output enable registers.
1205  *
1206  * Frame mode is implemented by loading a complete frame into the frame
1207  * register and polling the valid bit for completion.
1208  *
1209  * Polling mode uses the frame register but completion is indicated by
1210  * an interrupt.
1211  *
1212  */
1213 int
1214 gem_mii_readreg(struct device *self, int phy, int reg)
1215 {
1216 	struct gem_softc *sc = (void *)self;
1217 	bus_space_tag_t t = sc->sc_bustag;
1218 	bus_space_handle_t mif = sc->sc_h1;
1219 	int n;
1220 	u_int32_t v;
1221 
1222 #ifdef GEM_DEBUG
1223 	if (sc->sc_debug)
1224 		printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1225 #endif
1226 
1227 	/* Construct the frame command */
1228 	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
1229 		GEM_MIF_FRAME_READ;
1230 
1231 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1232 	for (n = 0; n < 100; n++) {
1233 		DELAY(1);
1234 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1235 		if (v & GEM_MIF_FRAME_TA0)
1236 			return (v & GEM_MIF_FRAME_DATA);
1237 	}
1238 
1239 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1240 	return (0);
1241 }
1242 
1243 void
1244 gem_mii_writereg(struct device *self, int phy, int reg, int val)
1245 {
1246 	struct gem_softc *sc = (void *)self;
1247 	bus_space_tag_t t = sc->sc_bustag;
1248 	bus_space_handle_t mif = sc->sc_h1;
1249 	int n;
1250 	u_int32_t v;
1251 
1252 #ifdef GEM_DEBUG
1253 	if (sc->sc_debug)
1254 		printf("gem_mii_writereg: phy %d reg %d val %x\n",
1255 			phy, reg, val);
1256 #endif
1257 
1258 	/* Construct the frame command */
1259 	v = GEM_MIF_FRAME_WRITE			|
1260 	    (phy << GEM_MIF_PHY_SHIFT)		|
1261 	    (reg << GEM_MIF_REG_SHIFT)		|
1262 	    (val & GEM_MIF_FRAME_DATA);
1263 
1264 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1265 	for (n = 0; n < 100; n++) {
1266 		DELAY(1);
1267 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1268 		if (v & GEM_MIF_FRAME_TA0)
1269 			return;
1270 	}
1271 
1272 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1273 }
1274 
1275 void
1276 gem_mii_statchg(struct device *dev)
1277 {
1278 	struct gem_softc *sc = (void *)dev;
1279 #ifdef GEM_DEBUG
1280 	int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1281 #endif
1282 	bus_space_tag_t t = sc->sc_bustag;
1283 	bus_space_handle_t mac = sc->sc_h1;
1284 	u_int32_t v;
1285 
1286 #ifdef GEM_DEBUG
1287 	if (sc->sc_debug)
1288 		printf("gem_mii_statchg: status change: phy = %d\n", instance);
1289 #endif
1290 
1291 	/* Set tx full duplex options */
1292 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1293 	delay(10000); /* reg must be cleared and delay before changing. */
1294 	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1295 		GEM_MAC_TX_ENABLE;
1296 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1297 		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1298 	}
1299 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1300 
1301 	/* XIF Configuration */
1302 	v = GEM_MAC_XIF_TX_MII_ENA;
1303 	v |= GEM_MAC_XIF_LINK_LED;
1304 
1305 	/* External MII needs echo disable if half duplex. */
1306 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1307 		/* turn on full duplex LED */
1308 		v |= GEM_MAC_XIF_FDPLX_LED;
1309 	else
1310 		/* half duplex -- disable echo */
1311 		v |= GEM_MAC_XIF_ECHO_DISABL;
1312 
1313 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1314 	case IFM_1000_T:  /* Gigabit using GMII interface */
1315 	case IFM_1000_SX:
1316 		v |= GEM_MAC_XIF_GMII_MODE;
1317 		break;
1318 	default:
1319 		v &= ~GEM_MAC_XIF_GMII_MODE;
1320 	}
1321 	bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1322 }
1323 
1324 int
1325 gem_pcs_readreg(struct device *self, int phy, int reg)
1326 {
1327 	struct gem_softc *sc = (void *)self;
1328 	bus_space_tag_t t = sc->sc_bustag;
1329 	bus_space_handle_t pcs = sc->sc_h1;
1330 
1331 #ifdef GEM_DEBUG
1332 	if (sc->sc_debug)
1333 		printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg);
1334 #endif
1335 
1336 	if (phy != GEM_PHYAD_EXTERNAL)
1337 		return (0);
1338 
1339 	switch (reg) {
1340 	case MII_BMCR:
1341 		reg = GEM_MII_CONTROL;
1342 		break;
1343 	case MII_BMSR:
1344 		reg = GEM_MII_STATUS;
1345 		break;
1346 	case MII_ANAR:
1347 		reg = GEM_MII_ANAR;
1348 		break;
1349 	case MII_ANLPAR:
1350 		reg = GEM_MII_ANLPAR;
1351 		break;
1352 	case MII_EXTSR:
1353 		return (EXTSR_1000XFDX|EXTSR_1000XHDX);
1354 	default:
1355 		return (0);
1356 	}
1357 
1358 	return bus_space_read_4(t, pcs, reg);
1359 }
1360 
1361 void
1362 gem_pcs_writereg(struct device *self, int phy, int reg, int val)
1363 {
1364 	struct gem_softc *sc = (void *)self;
1365 	bus_space_tag_t t = sc->sc_bustag;
1366 	bus_space_handle_t pcs = sc->sc_h1;
1367 	int reset = 0;
1368 
1369 #ifdef GEM_DEBUG
1370 	if (sc->sc_debug)
1371 		printf("gem_pcs_writereg: phy %d reg %d val %x\n",
1372 			phy, reg, val);
1373 #endif
1374 
1375 	if (phy != GEM_PHYAD_EXTERNAL)
1376 		return;
1377 
1378 	if (reg == MII_ANAR)
1379 		bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0);
1380 
1381 	switch (reg) {
1382 	case MII_BMCR:
1383 		reset = (val & GEM_MII_CONTROL_RESET);
1384 		reg = GEM_MII_CONTROL;
1385 		break;
1386 	case MII_BMSR:
1387 		reg = GEM_MII_STATUS;
1388 		break;
1389 	case MII_ANAR:
1390 		reg = GEM_MII_ANAR;
1391 		break;
1392 	case MII_ANLPAR:
1393 		reg = GEM_MII_ANLPAR;
1394 		break;
1395 	default:
1396 		return;
1397 	}
1398 
1399 	bus_space_write_4(t, pcs, reg, val);
1400 
1401 	if (reset)
1402 		gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0);
1403 
1404 	if (reg == GEM_MII_ANAR || reset) {
1405 		bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,
1406 		    GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
1407 		bus_space_write_4(t, pcs, GEM_MII_CONFIG,
1408 		    GEM_MII_CONFIG_ENABLE);
1409 	}
1410 }
1411 
1412 int
1413 gem_mediachange(struct ifnet *ifp)
1414 {
1415 	struct gem_softc *sc = ifp->if_softc;
1416 	struct mii_data *mii = &sc->sc_mii;
1417 
1418 	if (mii->mii_instance) {
1419 		struct mii_softc *miisc;
1420 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1421 			mii_phy_reset(miisc);
1422 	}
1423 
1424 	return (mii_mediachg(&sc->sc_mii));
1425 }
1426 
1427 void
1428 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1429 {
1430 	struct gem_softc *sc = ifp->if_softc;
1431 
1432 	mii_pollstat(&sc->sc_mii);
1433 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1434 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1435 }
1436 
1437 /*
1438  * Process an ioctl request.
1439  */
1440 int
1441 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1442 {
1443 	struct gem_softc *sc = ifp->if_softc;
1444 	struct ifaddr *ifa = (struct ifaddr *)data;
1445 	struct ifreq *ifr = (struct ifreq *)data;
1446 	int s, error = 0;
1447 
1448 	s = splnet();
1449 
1450 	switch (cmd) {
1451 	case SIOCSIFADDR:
1452 		ifp->if_flags |= IFF_UP;
1453 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1454 			gem_init(ifp);
1455 #ifdef INET
1456 		if (ifa->ifa_addr->sa_family == AF_INET)
1457 			arp_ifinit(&sc->sc_arpcom, ifa);
1458 #endif
1459 		break;
1460 
1461 	case SIOCSIFFLAGS:
1462 		if (ifp->if_flags & IFF_UP) {
1463 			if (ifp->if_flags & IFF_RUNNING)
1464 				error = ENETRESET;
1465 			else
1466 				gem_init(ifp);
1467 		} else {
1468 			if (ifp->if_flags & IFF_RUNNING)
1469 				gem_stop(ifp);
1470 		}
1471 #ifdef GEM_DEBUG
1472 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1473 #endif
1474 		break;
1475 
1476 	case SIOCGIFMEDIA:
1477 	case SIOCSIFMEDIA:
1478 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1479 		break;
1480 
1481 	default:
1482 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1483 	}
1484 
1485 	if (error == ENETRESET) {
1486 		if (ifp->if_flags & IFF_RUNNING)
1487 			gem_iff(sc);
1488 		error = 0;
1489 	}
1490 
1491 	splx(s);
1492 	return (error);
1493 }
1494 
1495 void
1496 gem_iff(struct gem_softc *sc)
1497 {
1498 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1499 	struct arpcom *ac = &sc->sc_arpcom;
1500 	struct ether_multi *enm;
1501 	struct ether_multistep step;
1502 	bus_space_tag_t t = sc->sc_bustag;
1503 	bus_space_handle_t h = sc->sc_h1;
1504 	u_int32_t crc, hash[16], rxcfg;
1505 	int i;
1506 
1507 	rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1508 	rxcfg &= ~(GEM_MAC_RX_HASH_FILTER | GEM_MAC_RX_PROMISCUOUS |
1509 	    GEM_MAC_RX_PROMISC_GRP);
1510 	ifp->if_flags &= ~IFF_ALLMULTI;
1511 
1512 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1513 		ifp->if_flags |= IFF_ALLMULTI;
1514 		if (ifp->if_flags & IFF_PROMISC)
1515 			rxcfg |= GEM_MAC_RX_PROMISCUOUS;
1516 		else
1517 			rxcfg |= GEM_MAC_RX_PROMISC_GRP;
1518 	} else {
1519 		/*
1520 		 * Set up multicast address filter by passing all multicast
1521 		 * addresses through a crc generator, and then using the
1522 		 * high order 8 bits as an index into the 256 bit logical
1523 		 * address filter.  The high order 4 bits selects the word,
1524 		 * while the other 4 bits select the bit within the word
1525 		 * (where bit 0 is the MSB).
1526 		 */
1527 
1528 		rxcfg |= GEM_MAC_RX_HASH_FILTER;
1529 
1530 		/* Clear hash table */
1531 		for (i = 0; i < 16; i++)
1532 			hash[i] = 0;
1533 
1534 		ETHER_FIRST_MULTI(step, ac, enm);
1535 		while (enm != NULL) {
1536 			crc = ether_crc32_le(enm->enm_addrlo,
1537 			    ETHER_ADDR_LEN);
1538 
1539 			/* Just want the 8 most significant bits. */
1540 			crc >>= 24;
1541 
1542 			/* Set the corresponding bit in the filter. */
1543 			hash[crc >> 4] |= 1 << (15 - (crc & 15));
1544 
1545 			ETHER_NEXT_MULTI(step, enm);
1546 		}
1547 
1548 		/* Now load the hash table into the chip (if we are using it) */
1549 		for (i = 0; i < 16; i++) {
1550 			bus_space_write_4(t, h,
1551 			    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
1552 			    hash[i]);
1553 		}
1554 	}
1555 
1556 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg);
1557 }
1558 
1559 /*
1560  * Transmit interrupt.
1561  */
1562 int
1563 gem_tint(struct gem_softc *sc, u_int32_t status)
1564 {
1565 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1566 	struct gem_sxd *sd;
1567 	u_int32_t cons, hwcons;
1568 
1569 	hwcons = status >> 19;
1570 	cons = sc->sc_tx_cons;
1571 	while (cons != hwcons) {
1572 		sd = &sc->sc_txd[cons];
1573 		if (sd->sd_mbuf != NULL) {
1574 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1575 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1576 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1577 			m_freem(sd->sd_mbuf);
1578 			sd->sd_mbuf = NULL;
1579 			ifp->if_opackets++;
1580 		}
1581 		sc->sc_tx_cnt--;
1582 		if (++cons == GEM_NTXDESC)
1583 			cons = 0;
1584 	}
1585 	sc->sc_tx_cons = cons;
1586 
1587 	if (sc->sc_tx_cnt < GEM_NTXDESC - 2)
1588 		ifp->if_flags &= ~IFF_OACTIVE;
1589 	if (sc->sc_tx_cnt == 0)
1590 		ifp->if_timer = 0;
1591 
1592 	gem_start(ifp);
1593 
1594 	return (1);
1595 }
1596 
1597 void
1598 gem_start(struct ifnet *ifp)
1599 {
1600 	struct gem_softc *sc = ifp->if_softc;
1601 	struct mbuf *m;
1602 	u_int64_t flags;
1603 	bus_dmamap_t map;
1604 	u_int32_t cur, frag, i;
1605 	int error;
1606 
1607 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1608 		return;
1609 
1610 	while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
1611 		IFQ_POLL(&ifp->if_snd, m);
1612 		if (m == NULL)
1613 			break;
1614 
1615 		/*
1616 		 * Encapsulate this packet and start it going...
1617 		 * or fail...
1618 		 */
1619 
1620 		cur = frag = sc->sc_tx_prod;
1621 		map = sc->sc_txd[cur].sd_map;
1622 
1623 		error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
1624 		    BUS_DMA_NOWAIT);
1625 		if (error != 0 && error != EFBIG)
1626 			goto drop;
1627 		if (error != 0) {
1628 			/* Too many fragments, linearize. */
1629 			if (m_defrag(m, M_DONTWAIT))
1630 				goto drop;
1631 			error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
1632 			    BUS_DMA_NOWAIT);
1633 			if (error != 0)
1634 				goto drop;
1635 		}
1636 
1637 		if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) {
1638 			bus_dmamap_unload(sc->sc_dmatag, map);
1639 			ifp->if_flags |= IFF_OACTIVE;
1640 			break;
1641 		}
1642 
1643 		/* We are now committed to transmitting the packet. */
1644 		IFQ_DEQUEUE(&ifp->if_snd, m);
1645 
1646 #if NBPFILTER > 0
1647 		/*
1648 		 * If BPF is listening on this interface, let it see the
1649 		 * packet before we commit it to the wire.
1650 		 */
1651 		if (ifp->if_bpf)
1652 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1653 #endif
1654 
1655 		bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1656 		    BUS_DMASYNC_PREWRITE);
1657 
1658 		for (i = 0; i < map->dm_nsegs; i++) {
1659 			sc->sc_txdescs[frag].gd_addr =
1660 			    GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr);
1661 			flags = map->dm_segs[i].ds_len & GEM_TD_BUFSIZE;
1662 			if (i == 0)
1663 				flags |= GEM_TD_START_OF_PACKET;
1664 			if (i == (map->dm_nsegs - 1))
1665 				flags |= GEM_TD_END_OF_PACKET;
1666 			sc->sc_txdescs[frag].gd_flags =
1667 			    GEM_DMA_WRITE(sc, flags);
1668 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1669 			    GEM_CDTXOFF(frag), sizeof(struct gem_desc),
1670 			    BUS_DMASYNC_PREWRITE);
1671 			cur = frag;
1672 			if (++frag == GEM_NTXDESC)
1673 				frag = 0;
1674 		}
1675 
1676 		sc->sc_tx_cnt += map->dm_nsegs;
1677 		sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
1678 		sc->sc_txd[cur].sd_map = map;
1679 		sc->sc_txd[cur].sd_mbuf = m;
1680 
1681 		bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag);
1682 		sc->sc_tx_prod = frag;
1683 
1684 		ifp->if_timer = 5;
1685 	}
1686 
1687 	return;
1688 
1689  drop:
1690 	IFQ_DEQUEUE(&ifp->if_snd, m);
1691 	m_freem(m);
1692 	ifp->if_oerrors++;
1693 }
1694