xref: /netbsd/sys/dev/pci/if_wm.c (revision bf9ec67e)
1 /*	$NetBSD: if_wm.c,v 1.9 2002/05/09 01:00:12 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
40  * and i82544 (``Cordova'') Gigabit Ethernet chips.
41  *
42  * TODO (in order of importance):
43  *
44  *	- Fix hw VLAN assist.
45  *
46  *	- Make GMII work on the Livengood.
47  *
48  *	- Fix out-bound IP header checksums.
49  *
50  *	- Fix UDP checksums.
51  *
52  *	- Jumbo frames -- requires changes to network stack due to
53  *	  lame buffer length handling on chip.
54  *
55  * ...and, of course, performance tuning.
56  */
57 
58 #include "bpfilter.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71 
72 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
73 
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78 
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82 
83 #include <netinet/in.h>			/* XXX for struct ip */
84 #include <netinet/in_systm.h>		/* XXX for struct ip */
85 #include <netinet/ip.h>			/* XXX for struct ip */
86 
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90 
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94 
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98 
99 #include <dev/pci/if_wmreg.h>
100 
101 #ifdef WM_DEBUG
102 #define	WM_DEBUG_LINK		0x01
103 #define	WM_DEBUG_TX		0x02
104 #define	WM_DEBUG_RX		0x04
105 #define	WM_DEBUG_GMII		0x08
106 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107 
108 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
109 #else
110 #define	DPRINTF(x, y)	/* nothing */
111 #endif /* WM_DEBUG */
112 
113 /*
114  * Transmit descriptor list size.  Due to errata, we can only have
115  * 256 hardware descriptors in the ring.  We tell the upper layers
116  * that they can queue a lot of packets, and we go ahead and mange
117  * up to 64 of them at a time.  We allow up to 16 DMA segments per
118  * packet.
119  */
120 #define	WM_NTXSEGS		16
121 #define	WM_IFQUEUELEN		256
122 #define	WM_TXQUEUELEN		64
123 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
124 #define	WM_NTXDESC		256
125 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
126 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
127 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
128 
129 /*
130  * Receive descriptor list size.  We have one Rx buffer for normal
131  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
132  * packet.  We allocate 128 receive descriptors, each with a 2k
133  * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
134  */
135 #define	WM_NRXDESC		128
136 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
137 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
138 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
139 
140 /*
141  * Control structures are DMA'd to the i82542 chip.  We allocate them in
142  * a single clump that maps to a single DMA segment to make serveral things
143  * easier.
144  */
145 struct wm_control_data {
146 	/*
147 	 * The transmit descriptors.
148 	 */
149 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150 
151 	/*
152 	 * The receive descriptors.
153 	 */
154 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156 
157 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
158 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
159 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
160 
161 /*
162  * Software state for transmit jobs.
163  */
164 struct wm_txsoft {
165 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
166 	bus_dmamap_t txs_dmamap;	/* our DMA map */
167 	int txs_firstdesc;		/* first descriptor in packet */
168 	int txs_lastdesc;		/* last descriptor in packet */
169 	int txs_ndesc;			/* # of descriptors used */
170 };
171 
172 /*
173  * Software state for receive buffers.  Each descriptor gets a
174  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
175  * more than one buffer, we chain them together.
176  */
177 struct wm_rxsoft {
178 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
179 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
180 };
181 
182 /*
183  * Software state per device.
184  */
185 struct wm_softc {
186 	struct device sc_dev;		/* generic device information */
187 	bus_space_tag_t sc_st;		/* bus space tag */
188 	bus_space_handle_t sc_sh;	/* bus space handle */
189 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
190 	struct ethercom sc_ethercom;	/* ethernet common data */
191 	void *sc_sdhook;		/* shutdown hook */
192 
193 	int sc_type;			/* chip type; see below */
194 	int sc_flags;			/* flags; see below */
195 
196 	void *sc_ih;			/* interrupt cookie */
197 
198 	struct mii_data sc_mii;		/* MII/media information */
199 
200 	struct callout sc_tick_ch;	/* tick callout */
201 
202 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
203 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
204 
205 	/*
206 	 * Software state for the transmit and receive descriptors.
207 	 */
208 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210 
211 	/*
212 	 * Control data structures.
213 	 */
214 	struct wm_control_data *sc_control_data;
215 #define	sc_txdescs	sc_control_data->wcd_txdescs
216 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
217 
218 #ifdef WM_EVENT_COUNTERS
219 	/* Event counters. */
220 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
221 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
222 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
223 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
224 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
225 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
226 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
227 
228 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
229 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
230 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
231 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
232 
233 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
234 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
235 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
236 
237 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
239 
240 	struct evcnt sc_ev_tu;		/* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242 
243 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
244 
245 	int	sc_txfree;		/* number of free Tx descriptors */
246 	int	sc_txnext;		/* next ready Tx descriptor */
247 	int	sc_txwin;		/* Tx descriptors since last Tx int */
248 
249 	int	sc_txsfree;		/* number of free Tx jobs */
250 	int	sc_txsnext;		/* next free Tx job */
251 	int	sc_txsdirty;		/* dirty Tx jobs */
252 
253 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
254 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
255 
256 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
257 
258 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
259 	int	sc_rxdiscard;
260 	int	sc_rxlen;
261 	struct mbuf *sc_rxhead;
262 	struct mbuf *sc_rxtail;
263 	struct mbuf **sc_rxtailp;
264 
265 	uint32_t sc_ctrl;		/* prototype CTRL register */
266 #if 0
267 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
268 #endif
269 	uint32_t sc_icr;		/* prototype interrupt bits */
270 	uint32_t sc_tctl;		/* prototype TCTL register */
271 	uint32_t sc_rctl;		/* prototype RCTL register */
272 	uint32_t sc_txcw;		/* prototype TXCW register */
273 	uint32_t sc_tipg;		/* prototype TIPG register */
274 
275 	int sc_tbi_linkup;		/* TBI link status */
276 	int sc_tbi_anstate;		/* autonegotiation state */
277 
278 	int sc_mchash_type;		/* multicast filter offset */
279 };
280 
281 #define	WM_RXCHAIN_RESET(sc)						\
282 do {									\
283 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
284 	*(sc)->sc_rxtailp = NULL;					\
285 	(sc)->sc_rxlen = 0;						\
286 } while (/*CONSTCOND*/0)
287 
288 #define	WM_RXCHAIN_LINK(sc, m)						\
289 do {									\
290 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
291 	(sc)->sc_rxtailp = &(m)->m_next;				\
292 } while (/*CONSTCOND*/0)
293 
294 /* sc_type */
295 #define	WM_T_WISEMAN_2_0	0	/* Wiseman (i82542) 2.0 (really old) */
296 #define	WM_T_WISEMAN_2_1	1	/* Wiseman (i82542) 2.1+ (old) */
297 #define	WM_T_LIVENGOOD		2	/* Livengood (i82543) */
298 #define	WM_T_CORDOVA		3	/* Cordova (i82544) */
299 
300 /* sc_flags */
301 #define	WM_F_HAS_MII		0x01	/* has MII */
302 
303 #ifdef WM_EVENT_COUNTERS
304 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
305 #else
306 #define	WM_EVCNT_INCR(ev)	/* nothing */
307 #endif
308 
309 #define	CSR_READ(sc, reg)						\
310 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
311 #define	CSR_WRITE(sc, reg, val)						\
312 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
313 
314 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
315 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
316 
317 #define	WM_CDTXSYNC(sc, x, n, ops)					\
318 do {									\
319 	int __x, __n;							\
320 									\
321 	__x = (x);							\
322 	__n = (n);							\
323 									\
324 	/* If it will wrap around, sync to the end of the ring. */	\
325 	if ((__x + __n) > WM_NTXDESC) {					\
326 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
327 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
328 		    (WM_NTXDESC - __x), (ops));				\
329 		__n -= (WM_NTXDESC - __x);				\
330 		__x = 0;						\
331 	}								\
332 									\
333 	/* Now sync whatever is left. */				\
334 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
335 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
336 } while (/*CONSTCOND*/0)
337 
338 #define	WM_CDRXSYNC(sc, x, ops)						\
339 do {									\
340 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
341 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
342 } while (/*CONSTCOND*/0)
343 
344 #define	WM_INIT_RXDESC(sc, x)						\
345 do {									\
346 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
347 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
348 	struct mbuf *__m = __rxs->rxs_mbuf;				\
349 									\
350 	/*								\
351 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
352 	 * so that the payload after the Ethernet header is aligned	\
353 	 * to a 4-byte boundary.					\
354 	 *								\
355 	 * XXX BRAINDAMAGE ALERT!					\
356 	 * The stupid chip uses the same size for every buffer, which	\
357 	 * is set in the Receive Control register.  We are using the 2K	\
358 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
359 	 * reason, we can't accept packets longer than the standard	\
360 	 * Ethernet MTU, without incurring a big penalty to copy every	\
361 	 * incoming packet to a new, suitably aligned buffer.		\
362 	 *								\
363 	 * We'll need to make some changes to the layer 3/4 parts of	\
364 	 * the stack (to copy the headers to a new buffer if not	\
365 	 * aligned) in order to support large MTU on this chip.  Lame.	\
366 	 */								\
367 	__m->m_data = __m->m_ext.ext_buf + 2;				\
368 									\
369 	__rxd->wrx_addr.wa_low =					\
370 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2);		\
371 	__rxd->wrx_addr.wa_high = 0;					\
372 	__rxd->wrx_len = 0;						\
373 	__rxd->wrx_cksum = 0;						\
374 	__rxd->wrx_status = 0;						\
375 	__rxd->wrx_errors = 0;						\
376 	__rxd->wrx_special = 0;						\
377 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
378 									\
379 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
380 } while (/*CONSTCOND*/0)
381 
382 void	wm_start(struct ifnet *);
383 void	wm_watchdog(struct ifnet *);
384 int	wm_ioctl(struct ifnet *, u_long, caddr_t);
385 int	wm_init(struct ifnet *);
386 void	wm_stop(struct ifnet *, int);
387 
388 void	wm_shutdown(void *);
389 
390 void	wm_reset(struct wm_softc *);
391 void	wm_rxdrain(struct wm_softc *);
392 int	wm_add_rxbuf(struct wm_softc *, int);
393 void	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
394 void	wm_tick(void *);
395 
396 void	wm_set_filter(struct wm_softc *);
397 
398 int	wm_intr(void *);
399 void	wm_txintr(struct wm_softc *);
400 void	wm_rxintr(struct wm_softc *);
401 void	wm_linkintr(struct wm_softc *, uint32_t);
402 
403 void	wm_tbi_mediainit(struct wm_softc *);
404 int	wm_tbi_mediachange(struct ifnet *);
405 void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
406 
407 void	wm_tbi_set_linkled(struct wm_softc *);
408 void	wm_tbi_check_link(struct wm_softc *);
409 
410 void	wm_gmii_reset(struct wm_softc *);
411 
412 int	wm_gmii_livengood_readreg(struct device *, int, int);
413 void	wm_gmii_livengood_writereg(struct device *, int, int, int);
414 
415 int	wm_gmii_cordova_readreg(struct device *, int, int);
416 void	wm_gmii_cordova_writereg(struct device *, int, int, int);
417 
418 void	wm_gmii_statchg(struct device *);
419 
420 void	wm_gmii_mediainit(struct wm_softc *);
421 int	wm_gmii_mediachange(struct ifnet *);
422 void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
423 
424 int	wm_match(struct device *, struct cfdata *, void *);
425 void	wm_attach(struct device *, struct device *, void *);
426 
427 int	wm_copy_small = 0;
428 
429 struct cfattach wm_ca = {
430 	sizeof(struct wm_softc), wm_match, wm_attach,
431 };
432 
433 /*
434  * Devices supported by this driver.
435  */
436 const struct wm_product {
437 	pci_vendor_id_t		wmp_vendor;
438 	pci_product_id_t	wmp_product;
439 	const char		*wmp_name;
440 	int			wmp_type;
441 	int			wmp_flags;
442 #define	WMP_F_1000X		0x01
443 #define	WMP_F_1000T		0x02
444 } wm_products[] = {
445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
446 	  "Intel i82542 1000BASE-X Ethernet",
447 	  WM_T_WISEMAN_2_1,	WMP_F_1000X },
448 
449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543_FIBER,
450 	  "Intel i82543 1000BASE-X Ethernet",
451 	  WM_T_LIVENGOOD,	WMP_F_1000X },
452 
453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543_SC,
454 	  "Intel i82543-SC 1000BASE-X Ethernet",
455 	  WM_T_LIVENGOOD,	WMP_F_1000X },
456 
457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543_COPPER,
458 	  "Intel i82543 1000BASE-T Ethernet",
459 	  WM_T_LIVENGOOD,	WMP_F_1000T },
460 
461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544_XT,
462 	  "Intel i82544 1000BASE-T Ethernet",
463 	  WM_T_CORDOVA,		WMP_F_1000T },
464 
465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544_XF,
466 	  "Intel i82544 1000BASE-X Ethernet",
467 	  WM_T_CORDOVA,		WMP_F_1000X },
468 
469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC,
470 	  "Intel i82544GC 1000BASE-T Ethernet",
471 	  WM_T_CORDOVA,		WMP_F_1000T },
472 
473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_64,
474 	  "Intel i82544GC 1000BASE-T Ethernet",
475 	  WM_T_CORDOVA,		WMP_F_1000T },
476 
477 	{ 0,			0,
478 	  NULL,
479 	  0,			0 },
480 };
481 
482 #ifdef WM_EVENT_COUNTERS
483 #if WM_NTXSEGS != 16
484 #error Update wm_txseg_evcnt_names
485 #endif
486 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
487 	"txseg1",
488 	"txseg2",
489 	"txseg3",
490 	"txseg4",
491 	"txseg5",
492 	"txseg6",
493 	"txseg7",
494 	"txseg8",
495 	"txseg9",
496 	"txseg10",
497 	"txseg11",
498 	"txseg12",
499 	"txseg13",
500 	"txseg14",
501 	"txseg15",
502 	"txseg16",
503 };
504 #endif /* WM_EVENT_COUNTERS */
505 
506 static const struct wm_product *
507 wm_lookup(const struct pci_attach_args *pa)
508 {
509 	const struct wm_product *wmp;
510 
511 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
512 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
513 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
514 			return (wmp);
515 	}
516 	return (NULL);
517 }
518 
519 int
520 wm_match(struct device *parent, struct cfdata *cf, void *aux)
521 {
522 	struct pci_attach_args *pa = aux;
523 
524 	if (wm_lookup(pa) != NULL)
525 		return (1);
526 
527 	return (0);
528 }
529 
530 void
531 wm_attach(struct device *parent, struct device *self, void *aux)
532 {
533 	struct wm_softc *sc = (void *) self;
534 	struct pci_attach_args *pa = aux;
535 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
536 	pci_chipset_tag_t pc = pa->pa_pc;
537 	pci_intr_handle_t ih;
538 	const char *intrstr = NULL;
539 	bus_space_tag_t memt;
540 	bus_space_handle_t memh;
541 	bus_dma_segment_t seg;
542 	int memh_valid;
543 	int i, rseg, error;
544 	const struct wm_product *wmp;
545 	uint8_t enaddr[ETHER_ADDR_LEN];
546 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
547 	pcireg_t preg, memtype;
548 	int pmreg;
549 
550 	callout_init(&sc->sc_tick_ch);
551 
552 	wmp = wm_lookup(pa);
553 	if (wmp == NULL) {
554 		printf("\n");
555 		panic("wm_attach: impossible");
556 	}
557 
558 	sc->sc_dmat = pa->pa_dmat;
559 
560 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
561 	printf(": %s, rev. %d\n", wmp->wmp_name, preg);
562 
563 	sc->sc_type = wmp->wmp_type;
564 	if (sc->sc_type < WM_T_LIVENGOOD) {
565 		if (preg < 2) {
566 			printf("%s: Wiseman must be at least rev. 2\n",
567 			    sc->sc_dev.dv_xname);
568 			return;
569 		}
570 		if (preg < 3)
571 			sc->sc_type = WM_T_WISEMAN_2_0;
572 	}
573 
574 	/*
575 	 * Map the device.
576 	 */
577 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
578 	switch (memtype) {
579 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
580 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
581 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
582 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
583 		break;
584 	default:
585 		memh_valid = 0;
586 	}
587 
588 	if (memh_valid) {
589 		sc->sc_st = memt;
590 		sc->sc_sh = memh;
591 	} else {
592 		printf("%s: unable to map device registers\n",
593 		    sc->sc_dev.dv_xname);
594 		return;
595 	}
596 
597 	/* Enable bus mastering.  Disable MWI on the Wiseman 2.0. */
598 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
599 	preg |= PCI_COMMAND_MASTER_ENABLE;
600 	if (sc->sc_type < WM_T_WISEMAN_2_1)
601 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
602 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
603 
604 	/* Get it out of power save mode, if needed. */
605 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
606 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
607 		if (preg == 3) {
608 			/*
609 			 * The card has lost all configuration data in
610 			 * this state, so punt.
611 			 */
612 			printf("%s: unable to wake from power state D3\n",
613 			    sc->sc_dev.dv_xname);
614 			return;
615 		}
616 		if (preg != 0) {
617 			printf("%s: waking up from power state D%d\n",
618 			    sc->sc_dev.dv_xname, preg);
619 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
620 		}
621 	}
622 
623 	/*
624 	 * Map and establish our interrupt.
625 	 */
626 	if (pci_intr_map(pa, &ih)) {
627 		printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
628 		return;
629 	}
630 	intrstr = pci_intr_string(pc, ih);
631 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
632 	if (sc->sc_ih == NULL) {
633 		printf("%s: unable to establish interrupt",
634 		    sc->sc_dev.dv_xname);
635 		if (intrstr != NULL)
636 			printf(" at %s", intrstr);
637 		printf("\n");
638 		return;
639 	}
640 	printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
641 
642 	/*
643 	 * Allocate the control data structures, and create and load the
644 	 * DMA map for it.
645 	 */
646 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
647 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
648 	    0)) != 0) {
649 		printf("%s: unable to allocate control data, error = %d\n",
650 		    sc->sc_dev.dv_xname, error);
651 		goto fail_0;
652 	}
653 
654 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
655 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
656 	    BUS_DMA_COHERENT)) != 0) {
657 		printf("%s: unable to map control data, error = %d\n",
658 		    sc->sc_dev.dv_xname, error);
659 		goto fail_1;
660 	}
661 
662 	if ((error = bus_dmamap_create(sc->sc_dmat,
663 	    sizeof(struct wm_control_data), 1,
664 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
665 		printf("%s: unable to create control data DMA map, "
666 		    "error = %d\n", sc->sc_dev.dv_xname, error);
667 		goto fail_2;
668 	}
669 
670 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
671 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
672 	    0)) != 0) {
673 		printf("%s: unable to load control data DMA map, error = %d\n",
674 		    sc->sc_dev.dv_xname, error);
675 		goto fail_3;
676 	}
677 
678 	/*
679 	 * Create the transmit buffer DMA maps.
680 	 */
681 	for (i = 0; i < WM_TXQUEUELEN; i++) {
682 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
683 		    WM_NTXSEGS, MCLBYTES, 0, 0,
684 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
685 			printf("%s: unable to create Tx DMA map %d, "
686 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
687 			goto fail_4;
688 		}
689 	}
690 
691 	/*
692 	 * Create the receive buffer DMA maps.
693 	 */
694 	for (i = 0; i < WM_NRXDESC; i++) {
695 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
696 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
697 			printf("%s: unable to create Rx DMA map %d, "
698 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
699 			goto fail_5;
700 		}
701 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
702 	}
703 
704 	/*
705 	 * Reset the chip to a known state.
706 	 */
707 	wm_reset(sc);
708 
709 	/*
710 	 * Read the Ethernet address from the EEPROM.
711 	 */
712 	wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
713 	    sizeof(myea) / sizeof(myea[0]), myea);
714 	enaddr[0] = myea[0] & 0xff;
715 	enaddr[1] = myea[0] >> 8;
716 	enaddr[2] = myea[1] & 0xff;
717 	enaddr[3] = myea[1] >> 8;
718 	enaddr[4] = myea[2] & 0xff;
719 	enaddr[5] = myea[2] >> 8;
720 
721 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
722 	    ether_sprintf(enaddr));
723 
724 	/*
725 	 * Read the config info from the EEPROM, and set up various
726 	 * bits in the control registers based on their contents.
727 	 */
728 	wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
729 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
730 	if (sc->sc_type >= WM_T_CORDOVA)
731 		wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
732 
733 	if (cfg1 & EEPROM_CFG1_ILOS)
734 		sc->sc_ctrl |= CTRL_ILOS;
735 	if (sc->sc_type >= WM_T_CORDOVA) {
736 		sc->sc_ctrl |=
737 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
738 		    CTRL_SWDPIO_SHIFT;
739 		sc->sc_ctrl |=
740 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
741 		    CTRL_SWDPINS_SHIFT;
742 	} else {
743 		sc->sc_ctrl |=
744 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
745 		    CTRL_SWDPIO_SHIFT;
746 	}
747 
748 #if 0
749 	if (sc->sc_type >= WM_T_CORDOVA) {
750 		if (cfg1 & EEPROM_CFG1_IPS0)
751 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
752 		if (cfg1 & EEPROM_CFG1_IPS1)
753 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
754 		sc->sc_ctrl_ext |=
755 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
756 		    CTRL_EXT_SWDPIO_SHIFT;
757 		sc->sc_ctrl_ext |=
758 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
759 		    CTRL_EXT_SWDPINS_SHIFT;
760 	} else {
761 		sc->sc_ctrl_ext |=
762 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
763 		    CTRL_EXT_SWDPIO_SHIFT;
764 	}
765 #endif
766 
767 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
768 #if 0
769 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
770 #endif
771 
772 	/*
773 	 * Set up some register offsets that are different between
774 	 * the Wiseman and the Livengood and later chips.
775 	 */
776 	if (sc->sc_type < WM_T_LIVENGOOD) {
777 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
778 		sc->sc_tdt_reg = WMREG_OLD_TDT;
779 	} else {
780 		sc->sc_rdt_reg = WMREG_RDT;
781 		sc->sc_tdt_reg = WMREG_TDT;
782 	}
783 
784 	/*
785 	 * Determine if we should use flow control.  We should
786 	 * always use it, unless we're on a Wiseman < 2.1.
787 	 */
788 	if (sc->sc_type >= WM_T_WISEMAN_2_1)
789 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
790 
791 	/*
792 	 * Determine if we're TBI or GMII mode, and initialize the
793 	 * media structures accordingly.
794 	 */
795 	if (sc->sc_type < WM_T_LIVENGOOD ||
796 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
797 		if (wmp->wmp_flags & WMP_F_1000T)
798 			printf("%s: WARNING: TBIMODE set on 1000BASE-T "
799 			    "product!\n", sc->sc_dev.dv_xname);
800 		wm_tbi_mediainit(sc);
801 	} else {
802 		if (wmp->wmp_flags & WMP_F_1000X)
803 			printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
804 			    "product!\n", sc->sc_dev.dv_xname);
805 		wm_gmii_mediainit(sc);
806 	}
807 
808 	ifp = &sc->sc_ethercom.ec_if;
809 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
810 	ifp->if_softc = sc;
811 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
812 	ifp->if_ioctl = wm_ioctl;
813 	ifp->if_start = wm_start;
814 	ifp->if_watchdog = wm_watchdog;
815 	ifp->if_init = wm_init;
816 	ifp->if_stop = wm_stop;
817 	IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
818 	IFQ_SET_READY(&ifp->if_snd);
819 
820 	/*
821 	 * If we're a Livengood or greater, we can support VLANs.
822 	 */
823 	if (sc->sc_type >= WM_T_LIVENGOOD)
824 		sc->sc_ethercom.ec_capabilities |=
825 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
826 
827 	/*
828 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
829 	 * on Livengood and later.
830 	 */
831 	if (sc->sc_type >= WM_T_LIVENGOOD)
832 		ifp->if_capabilities |=
833 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
834 
835 	/*
836 	 * Attach the interface.
837 	 */
838 	if_attach(ifp);
839 	ether_ifattach(ifp, enaddr);
840 
841 #ifdef WM_EVENT_COUNTERS
842 	/* Attach event counters. */
843 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
844 	    NULL, sc->sc_dev.dv_xname, "txsstall");
845 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
846 	    NULL, sc->sc_dev.dv_xname, "txdstall");
847 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
848 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
849 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
850 	    NULL, sc->sc_dev.dv_xname, "txdw");
851 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
852 	    NULL, sc->sc_dev.dv_xname, "txqe");
853 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
854 	    NULL, sc->sc_dev.dv_xname, "rxintr");
855 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
856 	    NULL, sc->sc_dev.dv_xname, "linkintr");
857 
858 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
859 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
860 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
861 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
862 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
863 	    NULL, sc->sc_dev.dv_xname, "txipsum");
864 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
865 	    NULL, sc->sc_dev.dv_xname, "txtusum");
866 
867 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
868 	    NULL, sc->sc_dev.dv_xname, "txctx init");
869 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
870 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
871 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
872 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
873 
874 	for (i = 0; i < WM_NTXSEGS; i++)
875 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
876 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
877 
878 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
879 	    NULL, sc->sc_dev.dv_xname, "txdrop");
880 
881 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
882 	    NULL, sc->sc_dev.dv_xname, "tu");
883 #endif /* WM_EVENT_COUNTERS */
884 
885 	/*
886 	 * Make sure the interface is shutdown during reboot.
887 	 */
888 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
889 	if (sc->sc_sdhook == NULL)
890 		printf("%s: WARNING: unable to establish shutdown hook\n",
891 		    sc->sc_dev.dv_xname);
892 	return;
893 
894 	/*
895 	 * Free any resources we've allocated during the failed attach
896 	 * attempt.  Do this in reverse order and fall through.
897 	 */
898  fail_5:
899 	for (i = 0; i < WM_NRXDESC; i++) {
900 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
901 			bus_dmamap_destroy(sc->sc_dmat,
902 			    sc->sc_rxsoft[i].rxs_dmamap);
903 	}
904  fail_4:
905 	for (i = 0; i < WM_TXQUEUELEN; i++) {
906 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
907 			bus_dmamap_destroy(sc->sc_dmat,
908 			    sc->sc_txsoft[i].txs_dmamap);
909 	}
910 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
911  fail_3:
912 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
913  fail_2:
914 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
915 	    sizeof(struct wm_control_data));
916  fail_1:
917 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
918  fail_0:
919 	return;
920 }
921 
922 /*
923  * wm_shutdown:
924  *
925  *	Make sure the interface is stopped at reboot time.
926  */
927 void
928 wm_shutdown(void *arg)
929 {
930 	struct wm_softc *sc = arg;
931 
932 	wm_stop(&sc->sc_ethercom.ec_if, 1);
933 }
934 
935 /*
936  * wm_tx_cksum:
937  *
938  *	Set up TCP/IP checksumming parameters for the
939  *	specified packet.
940  */
941 static int
942 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
943     uint32_t *fieldsp)
944 {
945 	struct mbuf *m0 = txs->txs_mbuf;
946 	struct livengood_tcpip_ctxdesc *t;
947 	uint32_t fields = 0, ipcs, tucs;
948 	struct ip *ip;
949 	int offset, iphl;
950 
951 	/*
952 	 * XXX It would be nice if the mbuf pkthdr had offset
953 	 * fields for the protocol headers.
954 	 */
955 
956 	/* XXX Assumes normal Ethernet encap. */
957 	offset = ETHER_HDR_LEN;
958 
959 	/* XXX */
960 	if (m0->m_len < (offset + sizeof(struct ip))) {
961 		printf("%s: wm_tx_cksum: need to m_pullup, "
962 		    "packet dropped\n", sc->sc_dev.dv_xname);
963 		return (EINVAL);
964 	}
965 
966 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
967 	iphl = ip->ip_hl << 2;
968 
969 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
970 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
971 		fields |= htole32(WTX_IXSM);
972 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
973 		    WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
974 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
975 	} else
976 		ipcs = 0;
977 
978 	offset += iphl;
979 
980 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
981 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
982 		fields |= htole32(WTX_TXSM);
983 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
984 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
985 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
986 	} else
987 		tucs = 0;
988 
989 	if (sc->sc_txctx_ipcs == ipcs &&
990 	    sc->sc_txctx_tucs == tucs) {
991 		/* Cached context is fine. */
992 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
993 	} else {
994 		/* Fill in the context descriptor. */
995 #ifdef WM_EVENT_COUNTERS
996 		if (sc->sc_txctx_ipcs == 0xffffffff &&
997 		    sc->sc_txctx_tucs == 0xffffffff)
998 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
999 		else
1000 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1001 #endif
1002 		t = (struct livengood_tcpip_ctxdesc *)
1003 		    &sc->sc_txdescs[sc->sc_txnext];
1004 		t->tcpip_ipcs = ipcs;
1005 		t->tcpip_tucs = tucs;
1006 		t->tcpip_cmdlen =
1007 		    htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1008 		t->tcpip_seg = 0;
1009 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1010 
1011 		sc->sc_txctx_ipcs = ipcs;
1012 		sc->sc_txctx_tucs = tucs;
1013 
1014 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1015 		txs->txs_ndesc++;
1016 	}
1017 
1018 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1019 	*fieldsp = fields;
1020 
1021 	return (0);
1022 }
1023 
1024 /*
1025  * wm_start:		[ifnet interface function]
1026  *
1027  *	Start packet transmission on the interface.
1028  */
1029 void
1030 wm_start(struct ifnet *ifp)
1031 {
1032 	struct wm_softc *sc = ifp->if_softc;
1033 	struct mbuf *m0/*, *m*/;
1034 	struct wm_txsoft *txs;
1035 	bus_dmamap_t dmamap;
1036 	int error, nexttx, lasttx, ofree, seg;
1037 	uint32_t cksumcmd, cksumfields;
1038 
1039 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1040 		return;
1041 
1042 	/*
1043 	 * Remember the previous number of free descriptors.
1044 	 */
1045 	ofree = sc->sc_txfree;
1046 
1047 	/*
1048 	 * Loop through the send queue, setting up transmit descriptors
1049 	 * until we drain the queue, or use up all available transmit
1050 	 * descriptors.
1051 	 */
1052 	for (;;) {
1053 		/* Grab a packet off the queue. */
1054 		IFQ_POLL(&ifp->if_snd, m0);
1055 		if (m0 == NULL)
1056 			break;
1057 
1058 		DPRINTF(WM_DEBUG_TX,
1059 		    ("%s: TX: have packet to transmit: %p\n",
1060 		    sc->sc_dev.dv_xname, m0));
1061 
1062 		/* Get a work queue entry. */
1063 		if (sc->sc_txsfree == 0) {
1064 			DPRINTF(WM_DEBUG_TX,
1065 			    ("%s: TX: no free job descriptors\n",
1066 				sc->sc_dev.dv_xname));
1067 			WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1068 			break;
1069 		}
1070 
1071 		txs = &sc->sc_txsoft[sc->sc_txsnext];
1072 		dmamap = txs->txs_dmamap;
1073 
1074 		/*
1075 		 * Load the DMA map.  If this fails, the packet either
1076 		 * didn't fit in the allotted number of segments, or we
1077 		 * were short on resources.  For the too-many-segments
1078 		 * case, we simply report an error and drop the packet,
1079 		 * since we can't sanely copy a jumbo packet to a single
1080 		 * buffer.
1081 		 */
1082 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1083 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1084 		if (error) {
1085 			if (error == EFBIG) {
1086 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1087 				printf("%s: Tx packet consumes too many "
1088 				    "DMA segments, dropping...\n",
1089 				    sc->sc_dev.dv_xname);
1090 				IFQ_DEQUEUE(&ifp->if_snd, m0);
1091 				m_freem(m0);
1092 				continue;
1093 			}
1094 			/*
1095 			 * Short on resources, just stop for now.
1096 			 */
1097 			DPRINTF(WM_DEBUG_TX,
1098 			    ("%s: TX: dmamap load failed: %d\n",
1099 			    sc->sc_dev.dv_xname, error));
1100 			break;
1101 		}
1102 
1103 		/*
1104 		 * Ensure we have enough descriptors free to describe
1105 		 * the packet.  Note, we always reserve one descriptor
1106 		 * at the end of the ring due to the semantics of the
1107 		 * TDT register, plus one more in the event we need
1108 		 * to re-load checksum offload context.
1109 		 */
1110 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1111 			/*
1112 			 * Not enough free descriptors to transmit this
1113 			 * packet.  We haven't committed anything yet,
1114 			 * so just unload the DMA map, put the packet
1115 			 * pack on the queue, and punt.  Notify the upper
1116 			 * layer that there are no more slots left.
1117 			 */
1118 			DPRINTF(WM_DEBUG_TX,
1119 			    ("%s: TX: need %d descriptors, have %d\n",
1120 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1121 			    sc->sc_txfree - 1));
1122 			ifp->if_flags |= IFF_OACTIVE;
1123 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1124 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1125 			break;
1126 		}
1127 
1128 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1129 
1130 		/*
1131 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1132 		 */
1133 
1134 		/* Sync the DMA map. */
1135 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1136 		    BUS_DMASYNC_PREWRITE);
1137 
1138 		DPRINTF(WM_DEBUG_TX,
1139 		    ("%s: TX: packet has %d DMA segments\n",
1140 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1141 
1142 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1143 
1144 		/*
1145 		 * Store a pointer to the packet so that we can free it
1146 		 * later.
1147 		 *
1148 		 * Initially, we consider the number of descriptors the
1149 		 * packet uses the number of DMA segments.  This may be
1150 		 * incremented by 1 if we do checksum offload (a descriptor
1151 		 * is used to set the checksum context).
1152 		 */
1153 		txs->txs_mbuf = m0;
1154 		txs->txs_firstdesc = sc->sc_txnext;
1155 		txs->txs_ndesc = dmamap->dm_nsegs;
1156 
1157 		/*
1158 		 * Set up checksum offload parameters for
1159 		 * this packet.
1160 		 */
1161 		if (m0->m_pkthdr.csum_flags &
1162 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1163 			if (wm_tx_cksum(sc, txs, &cksumcmd,
1164 					&cksumfields) != 0) {
1165 				/* Error message already displayed. */
1166 				m_freem(m0);
1167 				bus_dmamap_unload(sc->sc_dmat, dmamap);
1168 				txs->txs_mbuf = NULL;
1169 				continue;
1170 			}
1171 		} else {
1172 			cksumcmd = 0;
1173 			cksumfields = 0;
1174 		}
1175 
1176 		cksumcmd |= htole32(WTX_CMD_IDE);
1177 
1178 		/*
1179 		 * Initialize the transmit descriptor.
1180 		 */
1181 		for (nexttx = sc->sc_txnext, seg = 0;
1182 		     seg < dmamap->dm_nsegs;
1183 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
1184 			/*
1185 			 * Note: we currently only use 32-bit DMA
1186 			 * addresses.
1187 			 */
1188 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1189 			    htole32(dmamap->dm_segs[seg].ds_addr);
1190 			sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1191 			    htole32(dmamap->dm_segs[seg].ds_len);
1192 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1193 			    cksumfields;
1194 			lasttx = nexttx;
1195 
1196 			DPRINTF(WM_DEBUG_TX,
1197 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1198 			    sc->sc_dev.dv_xname, nexttx,
1199 			    (uint32_t) dmamap->dm_segs[seg].ds_addr,
1200 			    (uint32_t) dmamap->dm_segs[seg].ds_len));
1201 		}
1202 
1203 		/*
1204 		 * Set up the command byte on the last descriptor of
1205 		 * the packet.  If we're in the interrupt delay window,
1206 		 * delay the interrupt.
1207 		 */
1208 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
1209 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1210 		if (++sc->sc_txwin >= (WM_TXQUEUELEN * 2 / 3)) {
1211 			WM_EVCNT_INCR(&sc->sc_ev_txforceintr);
1212 			sc->sc_txdescs[lasttx].wtx_cmdlen &=
1213 			    htole32(~WTX_CMD_IDE);
1214 			sc->sc_txwin = 0;
1215 		}
1216 
1217 #if 0 /* XXXJRT */
1218 		/*
1219 		 * If VLANs are enabled and the packet has a VLAN tag, set
1220 		 * up the descriptor to encapsulate the packet for us.
1221 		 *
1222 		 * This is only valid on the last descriptor of the packet.
1223 		 */
1224 		if (sc->sc_ethercom.ec_nvlans != 0 &&
1225 		    (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1226 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
1227 			    htole32(WTX_CMD_VLE);
1228 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1229 			    = htole16(*mtod(m, int *) & 0xffff);
1230 		}
1231 #endif /* XXXJRT */
1232 
1233 		txs->txs_lastdesc = lasttx;
1234 
1235 		DPRINTF(WM_DEBUG_TX,
1236 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1237 		    lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1238 
1239 		/* Sync the descriptors we're using. */
1240 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1241 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1242 
1243 		/* Give the packet to the chip. */
1244 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1245 
1246 		DPRINTF(WM_DEBUG_TX,
1247 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1248 
1249 		DPRINTF(WM_DEBUG_TX,
1250 		    ("%s: TX: finished transmitting packet, job %d\n",
1251 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
1252 
1253 		/* Advance the tx pointer. */
1254 		sc->sc_txfree -= txs->txs_ndesc;
1255 		sc->sc_txnext = nexttx;
1256 
1257 		sc->sc_txsfree--;
1258 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1259 
1260 #if NBPFILTER > 0
1261 		/* Pass the packet to any BPF listeners. */
1262 		if (ifp->if_bpf)
1263 			bpf_mtap(ifp->if_bpf, m0);
1264 #endif /* NBPFILTER > 0 */
1265 	}
1266 
1267 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1268 		/* No more slots; notify upper layer. */
1269 		ifp->if_flags |= IFF_OACTIVE;
1270 	}
1271 
1272 	if (sc->sc_txfree != ofree) {
1273 		/* Set a watchdog timer in case the chip flakes out. */
1274 		ifp->if_timer = 5;
1275 	}
1276 }
1277 
1278 /*
1279  * wm_watchdog:		[ifnet interface function]
1280  *
1281  *	Watchdog timer handler.
1282  */
1283 void
1284 wm_watchdog(struct ifnet *ifp)
1285 {
1286 	struct wm_softc *sc = ifp->if_softc;
1287 
1288 	/*
1289 	 * Since we're using delayed interrupts, sweep up
1290 	 * before we report an error.
1291 	 */
1292 	wm_txintr(sc);
1293 
1294 	if (sc->sc_txfree != WM_NTXDESC) {
1295 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1296 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1297 		    sc->sc_txnext);
1298 		ifp->if_oerrors++;
1299 
1300 		/* Reset the interface. */
1301 		(void) wm_init(ifp);
1302 	}
1303 
1304 	/* Try to get more packets going. */
1305 	wm_start(ifp);
1306 }
1307 
1308 /*
1309  * wm_ioctl:		[ifnet interface function]
1310  *
1311  *	Handle control requests from the operator.
1312  */
1313 int
1314 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1315 {
1316 	struct wm_softc *sc = ifp->if_softc;
1317 	struct ifreq *ifr = (struct ifreq *) data;
1318 	int s, error;
1319 
1320 	s = splnet();
1321 
1322 	switch (cmd) {
1323 	case SIOCSIFMEDIA:
1324 	case SIOCGIFMEDIA:
1325 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1326 		break;
1327 
1328 	default:
1329 		error = ether_ioctl(ifp, cmd, data);
1330 		if (error == ENETRESET) {
1331 			/*
1332 			 * Multicast list has changed; set the hardware filter
1333 			 * accordingly.
1334 			 */
1335 			wm_set_filter(sc);
1336 			error = 0;
1337 		}
1338 		break;
1339 	}
1340 
1341 	/* Try to get more packets going. */
1342 	wm_start(ifp);
1343 
1344 	splx(s);
1345 	return (error);
1346 }
1347 
1348 /*
1349  * wm_intr:
1350  *
1351  *	Interrupt service routine.
1352  */
1353 int
1354 wm_intr(void *arg)
1355 {
1356 	struct wm_softc *sc = arg;
1357 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1358 	uint32_t icr;
1359 	int wantinit, handled = 0;
1360 
1361 	for (wantinit = 0; wantinit == 0;) {
1362 		icr = CSR_READ(sc, WMREG_ICR);
1363 		if ((icr & sc->sc_icr) == 0)
1364 			break;
1365 
1366 		handled = 1;
1367 
1368 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1369 			DPRINTF(WM_DEBUG_RX,
1370 			    ("%s: RX: got Rx intr 0x%08x\n",
1371 			    sc->sc_dev.dv_xname,
1372 			    icr & (ICR_RXDMT0|ICR_RXT0)));
1373 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1374 			wm_rxintr(sc);
1375 		}
1376 
1377 		if (icr & (ICR_TXDW|ICR_TXQE)) {
1378 			DPRINTF(WM_DEBUG_TX,
1379 			    ("%s: TX: got TDXW|TXQE interrupt\n",
1380 			    sc->sc_dev.dv_xname));
1381 #ifdef WM_EVENT_COUNTERS
1382 			if (icr & ICR_TXDW)
1383 				WM_EVCNT_INCR(&sc->sc_ev_txdw);
1384 			else if (icr & ICR_TXQE)
1385 				WM_EVCNT_INCR(&sc->sc_ev_txqe);
1386 #endif
1387 			wm_txintr(sc);
1388 		}
1389 
1390 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1391 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1392 			wm_linkintr(sc, icr);
1393 		}
1394 
1395 		if (icr & ICR_RXO) {
1396 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1397 			wantinit = 1;
1398 		}
1399 	}
1400 
1401 	if (handled) {
1402 		if (wantinit)
1403 			wm_init(ifp);
1404 
1405 		/* Try to get more packets going. */
1406 		wm_start(ifp);
1407 	}
1408 
1409 	return (handled);
1410 }
1411 
1412 /*
1413  * wm_txintr:
1414  *
1415  *	Helper; handle transmit interrupts.
1416  */
1417 void
1418 wm_txintr(struct wm_softc *sc)
1419 {
1420 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1421 	struct wm_txsoft *txs;
1422 	uint8_t status;
1423 	int i;
1424 
1425 	ifp->if_flags &= ~IFF_OACTIVE;
1426 
1427 	/*
1428 	 * Go through the Tx list and free mbufs for those
1429 	 * frams which have been transmitted.
1430 	 */
1431 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1432 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1433 		txs = &sc->sc_txsoft[i];
1434 
1435 		DPRINTF(WM_DEBUG_TX,
1436 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1437 
1438 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1439 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1440 
1441 		status = le32toh(sc->sc_txdescs[
1442 		    txs->txs_lastdesc].wtx_fields.wtxu_bits);
1443 		if ((status & WTX_ST_DD) == 0)
1444 			break;
1445 
1446 		DPRINTF(WM_DEBUG_TX,
1447 		    ("%s: TX: job %d done: descs %d..%d\n",
1448 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1449 		    txs->txs_lastdesc));
1450 
1451 		/*
1452 		 * XXX We should probably be using the statistics
1453 		 * XXX registers, but I don't know if they exist
1454 		 * XXX on chips before the Cordova.
1455 		 */
1456 
1457 #ifdef WM_EVENT_COUNTERS
1458 		if (status & WTX_ST_TU)
1459 			WM_EVCNT_INCR(&sc->sc_ev_tu);
1460 #endif /* WM_EVENT_COUNTERS */
1461 
1462 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
1463 			ifp->if_oerrors++;
1464 			if (status & WTX_ST_LC)
1465 				printf("%s: late collision\n",
1466 				    sc->sc_dev.dv_xname);
1467 			else if (status & WTX_ST_EC) {
1468 				ifp->if_collisions += 16;
1469 				printf("%s: excessive collisions\n",
1470 				    sc->sc_dev.dv_xname);
1471 			}
1472 		} else
1473 			ifp->if_opackets++;
1474 
1475 		sc->sc_txfree += txs->txs_ndesc;
1476 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1477 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1478 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1479 		m_freem(txs->txs_mbuf);
1480 		txs->txs_mbuf = NULL;
1481 	}
1482 
1483 	/* Update the dirty transmit buffer pointer. */
1484 	sc->sc_txsdirty = i;
1485 	DPRINTF(WM_DEBUG_TX,
1486 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1487 
1488 	/*
1489 	 * If there are no more pending transmissions, cancel the watchdog
1490 	 * timer.
1491 	 */
1492 	if (sc->sc_txsfree == WM_TXQUEUELEN) {
1493 		ifp->if_timer = 0;
1494 		sc->sc_txwin = 0;
1495 	}
1496 }
1497 
1498 /*
1499  * wm_rxintr:
1500  *
1501  *	Helper; handle receive interrupts.
1502  */
1503 void
1504 wm_rxintr(struct wm_softc *sc)
1505 {
1506 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1507 	struct wm_rxsoft *rxs;
1508 	struct mbuf *m;
1509 	int i, len;
1510 	uint8_t status, errors;
1511 
1512 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1513 		rxs = &sc->sc_rxsoft[i];
1514 
1515 		DPRINTF(WM_DEBUG_RX,
1516 		    ("%s: RX: checking descriptor %d\n",
1517 		    sc->sc_dev.dv_xname, i));
1518 
1519 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1520 
1521 		status = sc->sc_rxdescs[i].wrx_status;
1522 		errors = sc->sc_rxdescs[i].wrx_errors;
1523 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
1524 
1525 		if ((status & WRX_ST_DD) == 0) {
1526 			/*
1527 			 * We have processed all of the receive descriptors.
1528 			 */
1529 			break;
1530 		}
1531 
1532 		if (__predict_false(sc->sc_rxdiscard)) {
1533 			DPRINTF(WM_DEBUG_RX,
1534 			    ("%s: RX: discarding contents of descriptor %d\n",
1535 			    sc->sc_dev.dv_xname, i));
1536 			WM_INIT_RXDESC(sc, i);
1537 			if (status & WRX_ST_EOP) {
1538 				/* Reset our state. */
1539 				DPRINTF(WM_DEBUG_RX,
1540 				    ("%s: RX: resetting rxdiscard -> 0\n",
1541 				    sc->sc_dev.dv_xname));
1542 				sc->sc_rxdiscard = 0;
1543 			}
1544 			continue;
1545 		}
1546 
1547 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1548 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1549 
1550 		m = rxs->rxs_mbuf;
1551 
1552 		/*
1553 		 * Add a new receive buffer to the ring.
1554 		 */
1555 		if (wm_add_rxbuf(sc, i) != 0) {
1556 			/*
1557 			 * Failed, throw away what we've done so
1558 			 * far, and discard the rest of the packet.
1559 			 */
1560 			ifp->if_ierrors++;
1561 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1562 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1563 			WM_INIT_RXDESC(sc, i);
1564 			if ((status & WRX_ST_EOP) == 0)
1565 				sc->sc_rxdiscard = 1;
1566 			if (sc->sc_rxhead != NULL)
1567 				m_freem(sc->sc_rxhead);
1568 			WM_RXCHAIN_RESET(sc);
1569 			DPRINTF(WM_DEBUG_RX,
1570 			    ("%s: RX: Rx buffer allocation failed, "
1571 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
1572 			    sc->sc_rxdiscard ? " (discard)" : ""));
1573 			continue;
1574 		}
1575 
1576 		WM_RXCHAIN_LINK(sc, m);
1577 
1578 		m->m_len = len;
1579 
1580 		DPRINTF(WM_DEBUG_RX,
1581 		    ("%s: RX: buffer at %p len %d\n",
1582 		    sc->sc_dev.dv_xname, m->m_data, len));
1583 
1584 		/*
1585 		 * If this is not the end of the packet, keep
1586 		 * looking.
1587 		 */
1588 		if ((status & WRX_ST_EOP) == 0) {
1589 			sc->sc_rxlen += len;
1590 			DPRINTF(WM_DEBUG_RX,
1591 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
1592 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
1593 			continue;
1594 		}
1595 
1596 		/*
1597 		 * Okay, we have the entire packet now...
1598 		 */
1599 		*sc->sc_rxtailp = NULL;
1600 		m = sc->sc_rxhead;
1601 		len += sc->sc_rxlen;
1602 
1603 		WM_RXCHAIN_RESET(sc);
1604 
1605 		DPRINTF(WM_DEBUG_RX,
1606 		    ("%s: RX: have entire packet, len -> %d\n",
1607 		    sc->sc_dev.dv_xname, len));
1608 
1609 		/*
1610 		 * If an error occurred, update stats and drop the packet.
1611 		 */
1612 		if (errors &
1613 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1614 			ifp->if_ierrors++;
1615 			if (errors & WRX_ER_SE)
1616 				printf("%s: symbol error\n",
1617 				    sc->sc_dev.dv_xname);
1618 			else if (errors & WRX_ER_SEQ)
1619 				printf("%s: receive sequence error\n",
1620 				    sc->sc_dev.dv_xname);
1621 			else if (errors & WRX_ER_CE)
1622 				printf("%s: CRC error\n",
1623 				    sc->sc_dev.dv_xname);
1624 			m_freem(m);
1625 			continue;
1626 		}
1627 
1628 		/*
1629 		 * No errors.  Receive the packet.
1630 		 *
1631 		 * Note, we have configured the chip to include the
1632 		 * CRC with every packet.
1633 		 */
1634 		m->m_flags |= M_HASFCS;
1635 		m->m_pkthdr.rcvif = ifp;
1636 		m->m_pkthdr.len = len;
1637 
1638 #if 0 /* XXXJRT */
1639 		/*
1640 		 * If VLANs are enabled, VLAN packets have been unwrapped
1641 		 * for us.  Associate the tag with the packet.
1642 		 */
1643 		if (sc->sc_ethercom.ec_nvlans != 0 &&
1644 		    (status & WRX_ST_VP) != 0) {
1645 			struct mbuf *vtag;
1646 
1647 			vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1648 			if (vtag == NULL) {
1649 				ifp->if_ierrors++;
1650 				printf("%s: unable to allocate VLAN tag\n",
1651 				    sc->sc_dev.dv_xname);
1652 				m_freem(m);
1653 				continue;
1654 			}
1655 
1656 			*mtod(m, int *) =
1657 			    le16toh(sc->sc_rxdescs[i].wrx_special);
1658 			vtag->m_len = sizeof(int);
1659 		}
1660 #endif /* XXXJRT */
1661 
1662 		/*
1663 		 * Set up checksum info for this packet.
1664 		 */
1665 		if (status & WRX_ST_IPCS) {
1666 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1667 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1668 			if (errors & WRX_ER_IPE)
1669 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1670 		}
1671 		if (status & WRX_ST_TCPCS) {
1672 			/*
1673 			 * Note: we don't know if this was TCP or UDP,
1674 			 * so we just set both bits, and expect the
1675 			 * upper layers to deal.
1676 			 */
1677 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1678 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1679 			if (errors & WRX_ER_TCPE)
1680 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1681 		}
1682 
1683 		ifp->if_ipackets++;
1684 
1685 #if NBPFILTER > 0
1686 		/* Pass this up to any BPF listeners. */
1687 		if (ifp->if_bpf)
1688 			bpf_mtap(ifp->if_bpf, m);
1689 #endif /* NBPFILTER > 0 */
1690 
1691 		/* Pass it on. */
1692 		(*ifp->if_input)(ifp, m);
1693 	}
1694 
1695 	/* Update the receive pointer. */
1696 	sc->sc_rxptr = i;
1697 
1698 	DPRINTF(WM_DEBUG_RX,
1699 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1700 }
1701 
1702 /*
1703  * wm_linkintr:
1704  *
1705  *	Helper; handle link interrupts.
1706  */
1707 void
1708 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1709 {
1710 	uint32_t status;
1711 
1712 	/*
1713 	 * If we get a link status interrupt on a 1000BASE-T
1714 	 * device, just fall into the normal MII tick path.
1715 	 */
1716 	if (sc->sc_flags & WM_F_HAS_MII) {
1717 		if (icr & ICR_LSC) {
1718 			DPRINTF(WM_DEBUG_LINK,
1719 			    ("%s: LINK: LSC -> mii_tick\n",
1720 			    sc->sc_dev.dv_xname));
1721 			mii_tick(&sc->sc_mii);
1722 		} else if (icr & ICR_RXSEQ) {
1723 			DPRINTF(WM_DEBUG_LINK,
1724 			    ("%s: LINK Receive sequence error\n",
1725 			    sc->sc_dev.dv_xname));
1726 		}
1727 		return;
1728 	}
1729 
1730 	/*
1731 	 * If we are now receiving /C/, check for link again in
1732 	 * a couple of link clock ticks.
1733 	 */
1734 	if (icr & ICR_RXCFG) {
1735 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1736 		    sc->sc_dev.dv_xname));
1737 		sc->sc_tbi_anstate = 2;
1738 	}
1739 
1740 	if (icr & ICR_LSC) {
1741 		status = CSR_READ(sc, WMREG_STATUS);
1742 		if (status & STATUS_LU) {
1743 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1744 			    sc->sc_dev.dv_xname,
1745 			    (status & STATUS_FD) ? "FDX" : "HDX"));
1746 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1747 			if (status & STATUS_FD)
1748 				sc->sc_tctl |=
1749 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1750 			else
1751 				sc->sc_tctl |=
1752 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1753 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1754 			sc->sc_tbi_linkup = 1;
1755 		} else {
1756 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1757 			    sc->sc_dev.dv_xname));
1758 			sc->sc_tbi_linkup = 0;
1759 		}
1760 		sc->sc_tbi_anstate = 2;
1761 		wm_tbi_set_linkled(sc);
1762 	} else if (icr & ICR_RXSEQ) {
1763 		DPRINTF(WM_DEBUG_LINK,
1764 		    ("%s: LINK: Receive sequence error\n",
1765 		    sc->sc_dev.dv_xname));
1766 	}
1767 }
1768 
1769 /*
1770  * wm_tick:
1771  *
1772  *	One second timer, used to check link status, sweep up
1773  *	completed transmit jobs, etc.
1774  */
1775 void
1776 wm_tick(void *arg)
1777 {
1778 	struct wm_softc *sc = arg;
1779 	int s;
1780 
1781 	s = splnet();
1782 
1783 	if (sc->sc_flags & WM_F_HAS_MII)
1784 		mii_tick(&sc->sc_mii);
1785 	else
1786 		wm_tbi_check_link(sc);
1787 
1788 	splx(s);
1789 
1790 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1791 }
1792 
1793 /*
1794  * wm_reset:
1795  *
1796  *	Reset the i82542 chip.
1797  */
1798 void
1799 wm_reset(struct wm_softc *sc)
1800 {
1801 	int i;
1802 
1803 	CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1804 	delay(10000);
1805 
1806 	for (i = 0; i < 1000; i++) {
1807 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1808 			return;
1809 		delay(20);
1810 	}
1811 
1812 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1813 		printf("%s: WARNING: reset failed to complete\n",
1814 		    sc->sc_dev.dv_xname);
1815 }
1816 
1817 /*
1818  * wm_init:		[ifnet interface function]
1819  *
1820  *	Initialize the interface.  Must be called at splnet().
1821  */
1822 int
1823 wm_init(struct ifnet *ifp)
1824 {
1825 	struct wm_softc *sc = ifp->if_softc;
1826 	struct wm_rxsoft *rxs;
1827 	int i, error = 0;
1828 	uint32_t reg;
1829 
1830 	/* Cancel any pending I/O. */
1831 	wm_stop(ifp, 0);
1832 
1833 	/* Reset the chip to a known state. */
1834 	wm_reset(sc);
1835 
1836 	/* Initialize the transmit descriptor ring. */
1837 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1838 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1839 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1840 	sc->sc_txfree = WM_NTXDESC;
1841 	sc->sc_txnext = 0;
1842 	sc->sc_txwin = 0;
1843 
1844 	sc->sc_txctx_ipcs = 0xffffffff;
1845 	sc->sc_txctx_tucs = 0xffffffff;
1846 
1847 	if (sc->sc_type < WM_T_LIVENGOOD) {
1848 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1849 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1850 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1851 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1852 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1853 		CSR_WRITE(sc, WMREG_OLD_TIDV, 1024);
1854 	} else {
1855 		CSR_WRITE(sc, WMREG_TBDAH, 0);
1856 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1857 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1858 		CSR_WRITE(sc, WMREG_TDH, 0);
1859 		CSR_WRITE(sc, WMREG_TDT, 0);
1860 		CSR_WRITE(sc, WMREG_TIDV, 1024);
1861 
1862 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1863 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1864 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1865 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1866 	}
1867 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1868 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1869 
1870 	/* Initialize the transmit job descriptors. */
1871 	for (i = 0; i < WM_TXQUEUELEN; i++)
1872 		sc->sc_txsoft[i].txs_mbuf = NULL;
1873 	sc->sc_txsfree = WM_TXQUEUELEN;
1874 	sc->sc_txsnext = 0;
1875 	sc->sc_txsdirty = 0;
1876 
1877 	/*
1878 	 * Initialize the receive descriptor and receive job
1879 	 * descriptor rings.
1880 	 */
1881 	if (sc->sc_type < WM_T_LIVENGOOD) {
1882 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1883 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1884 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1885 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1886 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1887 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
1888 
1889 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1890 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1891 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1892 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1893 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1894 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1895 	} else {
1896 		CSR_WRITE(sc, WMREG_RDBAH, 0);
1897 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1898 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1899 		CSR_WRITE(sc, WMREG_RDH, 0);
1900 		CSR_WRITE(sc, WMREG_RDT, 0);
1901 		CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
1902 	}
1903 	for (i = 0; i < WM_NRXDESC; i++) {
1904 		rxs = &sc->sc_rxsoft[i];
1905 		if (rxs->rxs_mbuf == NULL) {
1906 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
1907 				printf("%s: unable to allocate or map rx "
1908 				    "buffer %d, error = %d\n",
1909 				    sc->sc_dev.dv_xname, i, error);
1910 				/*
1911 				 * XXX Should attempt to run with fewer receive
1912 				 * XXX buffers instead of just failing.
1913 				 */
1914 				wm_rxdrain(sc);
1915 				goto out;
1916 			}
1917 		} else
1918 			WM_INIT_RXDESC(sc, i);
1919 	}
1920 	sc->sc_rxptr = 0;
1921 	sc->sc_rxdiscard = 0;
1922 	WM_RXCHAIN_RESET(sc);
1923 
1924 	/*
1925 	 * Clear out the VLAN table -- we don't use it (yet).
1926 	 */
1927 	CSR_WRITE(sc, WMREG_VET, 0);
1928 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
1929 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1930 
1931 	/*
1932 	 * Set up flow-control parameters.
1933 	 *
1934 	 * XXX Values could probably stand some tuning.
1935 	 */
1936 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1937 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1938 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1939 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1940 
1941 		if (sc->sc_type < WM_T_LIVENGOOD) {
1942 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1943 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1944 		} else {
1945 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1946 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1947 		}
1948 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1949 	}
1950 
1951 #if 0 /* XXXJRT */
1952 	/* Deal with VLAN enables. */
1953 	if (sc->sc_ethercom.ec_nvlans != 0)
1954 		sc->sc_ctrl |= CTRL_VME;
1955 	else
1956 #endif /* XXXJRT */
1957 		sc->sc_ctrl &= ~CTRL_VME;
1958 
1959 	/* Write the control registers. */
1960 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1961 #if 0
1962 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1963 #endif
1964 
1965 	/*
1966 	 * Set up checksum offload parameters.
1967 	 */
1968 	reg = CSR_READ(sc, WMREG_RXCSUM);
1969 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1970 		reg |= RXCSUM_IPOFL;
1971 	else
1972 		reg &= ~RXCSUM_IPOFL;
1973 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1974 		reg |= RXCSUM_TUOFL;
1975 	else
1976 		reg &= ~RXCSUM_TUOFL;
1977 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
1978 
1979 	/*
1980 	 * Set up the interrupt registers.
1981 	 */
1982 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1983 	sc->sc_icr = ICR_TXDW | ICR_TXQE | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1984 	    ICR_RXO | ICR_RXT0;
1985 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1986 		sc->sc_icr |= ICR_RXCFG;
1987 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1988 
1989 	/* Set up the inter-packet gap. */
1990 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1991 
1992 #if 0 /* XXXJRT */
1993 	/* Set the VLAN ethernetype. */
1994 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
1995 #endif
1996 
1997 	/*
1998 	 * Set up the transmit control register; we start out with
1999 	 * a collision distance suitable for FDX, but update it whe
2000 	 * we resolve the media type.
2001 	 */
2002 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2003 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2004 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2005 
2006 	/* Set the media. */
2007 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2008 
2009 	/*
2010 	 * Set up the receive control register; we actually program
2011 	 * the register when we set the receive filter.  Use multicast
2012 	 * address offset type 0.
2013 	 *
2014 	 * Only the Cordova has the ability to strip the incoming
2015 	 * CRC, so we don't enable that feature.
2016 	 */
2017 	sc->sc_mchash_type = 0;
2018 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2019 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2020 
2021 	/* Set the receive filter. */
2022 	wm_set_filter(sc);
2023 
2024 	/* Start the one second link check clock. */
2025 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2026 
2027 	/* ...all done! */
2028 	ifp->if_flags |= IFF_RUNNING;
2029 	ifp->if_flags &= ~IFF_OACTIVE;
2030 
2031  out:
2032 	if (error)
2033 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2034 	return (error);
2035 }
2036 
2037 /*
2038  * wm_rxdrain:
2039  *
2040  *	Drain the receive queue.
2041  */
2042 void
2043 wm_rxdrain(struct wm_softc *sc)
2044 {
2045 	struct wm_rxsoft *rxs;
2046 	int i;
2047 
2048 	for (i = 0; i < WM_NRXDESC; i++) {
2049 		rxs = &sc->sc_rxsoft[i];
2050 		if (rxs->rxs_mbuf != NULL) {
2051 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2052 			m_freem(rxs->rxs_mbuf);
2053 			rxs->rxs_mbuf = NULL;
2054 		}
2055 	}
2056 }
2057 
2058 /*
2059  * wm_stop:		[ifnet interface function]
2060  *
2061  *	Stop transmission on the interface.
2062  */
2063 void
2064 wm_stop(struct ifnet *ifp, int disable)
2065 {
2066 	struct wm_softc *sc = ifp->if_softc;
2067 	struct wm_txsoft *txs;
2068 	int i;
2069 
2070 	/* Stop the one second clock. */
2071 	callout_stop(&sc->sc_tick_ch);
2072 
2073 	if (sc->sc_flags & WM_F_HAS_MII) {
2074 		/* Down the MII. */
2075 		mii_down(&sc->sc_mii);
2076 	}
2077 
2078 	/* Stop the transmit and receive processes. */
2079 	CSR_WRITE(sc, WMREG_TCTL, 0);
2080 	CSR_WRITE(sc, WMREG_RCTL, 0);
2081 
2082 	/* Release any queued transmit buffers. */
2083 	for (i = 0; i < WM_TXQUEUELEN; i++) {
2084 		txs = &sc->sc_txsoft[i];
2085 		if (txs->txs_mbuf != NULL) {
2086 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2087 			m_freem(txs->txs_mbuf);
2088 			txs->txs_mbuf = NULL;
2089 		}
2090 	}
2091 
2092 	if (disable)
2093 		wm_rxdrain(sc);
2094 
2095 	/* Mark the interface as down and cancel the watchdog timer. */
2096 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2097 	ifp->if_timer = 0;
2098 }
2099 
2100 /*
2101  * wm_read_eeprom:
2102  *
2103  *	Read data from the serial EEPROM.
2104  */
2105 void
2106 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2107 {
2108 	uint32_t reg;
2109 	int i, x;
2110 
2111 	for (i = 0; i < wordcnt; i++) {
2112 		/* Send CHIP SELECT for one clock tick. */
2113 		CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2114 		delay(2);
2115 
2116 		/* Shift in the READ command. */
2117 		for (x = 3; x > 0; x--) {
2118 			reg = EECD_CS;
2119 			if (UWIRE_OPC_READ & (1 << (x - 1)))
2120 				reg |= EECD_DI;
2121 			CSR_WRITE(sc, WMREG_EECD, reg);
2122 			delay(2);
2123 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2124 			delay(2);
2125 			CSR_WRITE(sc, WMREG_EECD, reg);
2126 			delay(2);
2127 		}
2128 
2129 		/* Shift in address. */
2130 		for (x = 6; x > 0; x--) {
2131 			reg = EECD_CS;
2132 			if ((word + i) & (1 << (x - 1)))
2133 				reg |= EECD_DI;
2134 			CSR_WRITE(sc, WMREG_EECD, reg);
2135 			delay(2);
2136 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2137 			delay(2);
2138 			CSR_WRITE(sc, WMREG_EECD, reg);
2139 			delay(2);
2140 		}
2141 
2142 		/* Shift out the data. */
2143 		reg = EECD_CS;
2144 		data[i] = 0;
2145 		for (x = 16; x > 0; x--) {
2146 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2147 			delay(2);
2148 			if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2149 				data[i] |= (1 << (x - 1));
2150 			CSR_WRITE(sc, WMREG_EECD, reg);
2151 			delay(2);
2152 		}
2153 
2154 		/* Clear CHIP SELECT. */
2155 		CSR_WRITE(sc, WMREG_EECD, 0);
2156 	}
2157 }
2158 
2159 /*
2160  * wm_add_rxbuf:
2161  *
2162  *	Add a receive buffer to the indiciated descriptor.
2163  */
2164 int
2165 wm_add_rxbuf(struct wm_softc *sc, int idx)
2166 {
2167 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2168 	struct mbuf *m;
2169 	int error;
2170 
2171 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2172 	if (m == NULL)
2173 		return (ENOBUFS);
2174 
2175 	MCLGET(m, M_DONTWAIT);
2176 	if ((m->m_flags & M_EXT) == 0) {
2177 		m_freem(m);
2178 		return (ENOBUFS);
2179 	}
2180 
2181 	if (rxs->rxs_mbuf != NULL)
2182 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2183 
2184 	rxs->rxs_mbuf = m;
2185 
2186 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2187 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2188 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
2189 	if (error) {
2190 		printf("%s: unable to load rx DMA map %d, error = %d\n",
2191 		    sc->sc_dev.dv_xname, idx, error);
2192 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
2193 	}
2194 
2195 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2196 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2197 
2198 	WM_INIT_RXDESC(sc, idx);
2199 
2200 	return (0);
2201 }
2202 
2203 /*
2204  * wm_set_ral:
2205  *
2206  *	Set an entery in the receive address list.
2207  */
2208 static void
2209 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2210 {
2211 	uint32_t ral_lo, ral_hi;
2212 
2213 	if (enaddr != NULL) {
2214 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2215 		    (enaddr[3] << 24);
2216 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2217 		ral_hi |= RAL_AV;
2218 	} else {
2219 		ral_lo = 0;
2220 		ral_hi = 0;
2221 	}
2222 
2223 	if (sc->sc_type >= WM_T_CORDOVA) {
2224 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2225 		    ral_lo);
2226 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2227 		    ral_hi);
2228 	} else {
2229 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2230 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2231 	}
2232 }
2233 
2234 /*
2235  * wm_mchash:
2236  *
2237  *	Compute the hash of the multicast address for the 4096-bit
2238  *	multicast filter.
2239  */
2240 static uint32_t
2241 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2242 {
2243 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2244 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2245 	uint32_t hash;
2246 
2247 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2248 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2249 
2250 	return (hash & 0xfff);
2251 }
2252 
2253 /*
2254  * wm_set_filter:
2255  *
2256  *	Set up the receive filter.
2257  */
2258 void
2259 wm_set_filter(struct wm_softc *sc)
2260 {
2261 	struct ethercom *ec = &sc->sc_ethercom;
2262 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2263 	struct ether_multi *enm;
2264 	struct ether_multistep step;
2265 	bus_addr_t mta_reg;
2266 	uint32_t hash, reg, bit;
2267 	int i;
2268 
2269 	if (sc->sc_type >= WM_T_CORDOVA)
2270 		mta_reg = WMREG_CORDOVA_MTA;
2271 	else
2272 		mta_reg = WMREG_MTA;
2273 
2274 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2275 
2276 	if (ifp->if_flags & IFF_BROADCAST)
2277 		sc->sc_rctl |= RCTL_BAM;
2278 	if (ifp->if_flags & IFF_PROMISC) {
2279 		sc->sc_rctl |= RCTL_UPE;
2280 		goto allmulti;
2281 	}
2282 
2283 	/*
2284 	 * Set the station address in the first RAL slot, and
2285 	 * clear the remaining slots.
2286 	 */
2287 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2288 	for (i = 1; i < WM_RAL_TABSIZE; i++)
2289 		wm_set_ral(sc, NULL, i);
2290 
2291 	/* Clear out the multicast table. */
2292 	for (i = 0; i < WM_MC_TABSIZE; i++)
2293 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
2294 
2295 	ETHER_FIRST_MULTI(step, ec, enm);
2296 	while (enm != NULL) {
2297 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2298 			/*
2299 			 * We must listen to a range of multicast addresses.
2300 			 * For now, just accept all multicasts, rather than
2301 			 * trying to set only those filter bits needed to match
2302 			 * the range.  (At this time, the only use of address
2303 			 * ranges is for IP multicast routing, for which the
2304 			 * range is big enough to require all bits set.)
2305 			 */
2306 			goto allmulti;
2307 		}
2308 
2309 		hash = wm_mchash(sc, enm->enm_addrlo);
2310 
2311 		reg = (hash >> 5) & 0x7f;
2312 		bit = hash & 0x1f;
2313 
2314 		hash = CSR_READ(sc, mta_reg + (reg << 2));
2315 		hash |= 1U << bit;
2316 
2317 		/* XXX Hardware bug?? */
2318 		if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2319 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2320 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2321 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2322 		} else
2323 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2324 
2325 		ETHER_NEXT_MULTI(step, enm);
2326 	}
2327 
2328 	ifp->if_flags &= ~IFF_ALLMULTI;
2329 	goto setit;
2330 
2331  allmulti:
2332 	ifp->if_flags |= IFF_ALLMULTI;
2333 	sc->sc_rctl |= RCTL_MPE;
2334 
2335  setit:
2336 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2337 }
2338 
2339 /*
2340  * wm_tbi_mediainit:
2341  *
2342  *	Initialize media for use on 1000BASE-X devices.
2343  */
2344 void
2345 wm_tbi_mediainit(struct wm_softc *sc)
2346 {
2347 	const char *sep = "";
2348 
2349 	if (sc->sc_type < WM_T_LIVENGOOD)
2350 		sc->sc_tipg = TIPG_WM_DFLT;
2351 	else
2352 		sc->sc_tipg = TIPG_LG_DFLT;
2353 
2354 	ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2355 	    wm_tbi_mediastatus);
2356 
2357 	/*
2358 	 * SWD Pins:
2359 	 *
2360 	 *	0 = Link LED (output)
2361 	 *	1 = Loss Of Signal (input)
2362 	 */
2363 	sc->sc_ctrl |= CTRL_SWDPIO(0);
2364 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2365 
2366 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2367 
2368 #define	ADD(s, m, d)							\
2369 do {									\
2370 	printf("%s%s", sep, s);						\
2371 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL);	\
2372 	sep = ", ";							\
2373 } while (/*CONSTCOND*/0)
2374 
2375 	printf("%s: ", sc->sc_dev.dv_xname);
2376 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2377 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2378 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2379 	printf("\n");
2380 
2381 #undef ADD
2382 
2383 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2384 }
2385 
2386 /*
2387  * wm_tbi_mediastatus:	[ifmedia interface function]
2388  *
2389  *	Get the current interface media status on a 1000BASE-X device.
2390  */
2391 void
2392 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2393 {
2394 	struct wm_softc *sc = ifp->if_softc;
2395 
2396 	ifmr->ifm_status = IFM_AVALID;
2397 	ifmr->ifm_active = IFM_ETHER;
2398 
2399 	if (sc->sc_tbi_linkup == 0) {
2400 		ifmr->ifm_active |= IFM_NONE;
2401 		return;
2402 	}
2403 
2404 	ifmr->ifm_status |= IFM_ACTIVE;
2405 	ifmr->ifm_active |= IFM_1000_SX;
2406 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2407 		ifmr->ifm_active |= IFM_FDX;
2408 }
2409 
2410 /*
2411  * wm_tbi_mediachange:	[ifmedia interface function]
2412  *
2413  *	Set hardware to newly-selected media on a 1000BASE-X device.
2414  */
2415 int
2416 wm_tbi_mediachange(struct ifnet *ifp)
2417 {
2418 	struct wm_softc *sc = ifp->if_softc;
2419 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2420 	uint32_t status;
2421 	int i;
2422 
2423 	sc->sc_txcw = ife->ifm_data;
2424 	if (sc->sc_ctrl & CTRL_RFCE)
2425 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2426 	if (sc->sc_ctrl & CTRL_TFCE)
2427 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2428 	sc->sc_txcw |= TXCW_ANE;
2429 
2430 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2431 	delay(10000);
2432 
2433 	sc->sc_tbi_anstate = 0;
2434 
2435 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2436 		/* Have signal; wait for the link to come up. */
2437 		for (i = 0; i < 50; i++) {
2438 			delay(10000);
2439 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2440 				break;
2441 		}
2442 
2443 		status = CSR_READ(sc, WMREG_STATUS);
2444 		if (status & STATUS_LU) {
2445 			/* Link is up. */
2446 			DPRINTF(WM_DEBUG_LINK,
2447 			    ("%s: LINK: set media -> link up %s\n",
2448 			    sc->sc_dev.dv_xname,
2449 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2450 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2451 			if (status & STATUS_FD)
2452 				sc->sc_tctl |=
2453 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2454 			else
2455 				sc->sc_tctl |=
2456 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2457 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2458 			sc->sc_tbi_linkup = 1;
2459 		} else {
2460 			/* Link is down. */
2461 			DPRINTF(WM_DEBUG_LINK,
2462 			    ("%s: LINK: set media -> link down\n",
2463 			    sc->sc_dev.dv_xname));
2464 			sc->sc_tbi_linkup = 0;
2465 		}
2466 	} else {
2467 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2468 		    sc->sc_dev.dv_xname));
2469 		sc->sc_tbi_linkup = 0;
2470 	}
2471 
2472 	wm_tbi_set_linkled(sc);
2473 
2474 	return (0);
2475 }
2476 
2477 /*
2478  * wm_tbi_set_linkled:
2479  *
2480  *	Update the link LED on 1000BASE-X devices.
2481  */
2482 void
2483 wm_tbi_set_linkled(struct wm_softc *sc)
2484 {
2485 
2486 	if (sc->sc_tbi_linkup)
2487 		sc->sc_ctrl |= CTRL_SWDPIN(0);
2488 	else
2489 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2490 
2491 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2492 }
2493 
2494 /*
2495  * wm_tbi_check_link:
2496  *
2497  *	Check the link on 1000BASE-X devices.
2498  */
2499 void
2500 wm_tbi_check_link(struct wm_softc *sc)
2501 {
2502 	uint32_t rxcw, ctrl, status;
2503 
2504 	if (sc->sc_tbi_anstate == 0)
2505 		return;
2506 	else if (sc->sc_tbi_anstate > 1) {
2507 		DPRINTF(WM_DEBUG_LINK,
2508 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2509 		    sc->sc_tbi_anstate));
2510 		sc->sc_tbi_anstate--;
2511 		return;
2512 	}
2513 
2514 	sc->sc_tbi_anstate = 0;
2515 
2516 	rxcw = CSR_READ(sc, WMREG_RXCW);
2517 	ctrl = CSR_READ(sc, WMREG_CTRL);
2518 	status = CSR_READ(sc, WMREG_STATUS);
2519 
2520 	if ((status & STATUS_LU) == 0) {
2521 		DPRINTF(WM_DEBUG_LINK,
2522 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2523 		sc->sc_tbi_linkup = 0;
2524 	} else {
2525 		DPRINTF(WM_DEBUG_LINK,
2526 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2527 		    (status & STATUS_FD) ? "FDX" : "HDX"));
2528 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2529 		if (status & STATUS_FD)
2530 			sc->sc_tctl |=
2531 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2532 		else
2533 			sc->sc_tctl |=
2534 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2535 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2536 		sc->sc_tbi_linkup = 1;
2537 	}
2538 
2539 	wm_tbi_set_linkled(sc);
2540 }
2541 
2542 /*
2543  * wm_gmii_reset:
2544  *
2545  *	Reset the PHY.
2546  */
2547 void
2548 wm_gmii_reset(struct wm_softc *sc)
2549 {
2550 	uint32_t reg;
2551 
2552 	if (sc->sc_type >= WM_T_CORDOVA) {
2553 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2554 		delay(20000);
2555 
2556 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2557 		delay(20000);
2558 	} else {
2559 		/* The PHY reset pin is active-low. */
2560 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2561 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2562 		    CTRL_EXT_SWDPIN(4));
2563 		reg |= CTRL_EXT_SWDPIO(4);
2564 
2565 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2566 		delay(10);
2567 
2568 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2569 		delay(10);
2570 
2571 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2572 		delay(10);
2573 #if 0
2574 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2575 #endif
2576 	}
2577 }
2578 
2579 /*
2580  * wm_gmii_mediainit:
2581  *
2582  *	Initialize media for use on 1000BASE-T devices.
2583  */
2584 void
2585 wm_gmii_mediainit(struct wm_softc *sc)
2586 {
2587 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2588 
2589 	/* We have MII. */
2590 	sc->sc_flags |= WM_F_HAS_MII;
2591 
2592 	sc->sc_tipg = TIPG_1000T_DFLT;
2593 
2594 	/*
2595 	 * Let the chip set speed/duplex on its own based on
2596 	 * signals from the PHY.
2597 	 */
2598 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2599 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2600 
2601 	/* Initialize our media structures and probe the GMII. */
2602 	sc->sc_mii.mii_ifp = ifp;
2603 
2604 	if (sc->sc_type >= WM_T_CORDOVA) {
2605 		sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2606 		sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2607 	} else {
2608 		sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2609 		sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2610 	}
2611 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
2612 
2613 	wm_gmii_reset(sc);
2614 
2615 	ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2616 	    wm_gmii_mediastatus);
2617 
2618 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2619 	    MII_OFFSET_ANY, 0);
2620 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2621 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2622 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2623 	} else
2624 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2625 }
2626 
2627 /*
2628  * wm_gmii_mediastatus:	[ifmedia interface function]
2629  *
2630  *	Get the current interface media status on a 1000BASE-T device.
2631  */
2632 void
2633 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2634 {
2635 	struct wm_softc *sc = ifp->if_softc;
2636 
2637 	mii_pollstat(&sc->sc_mii);
2638 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
2639 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
2640 }
2641 
2642 /*
2643  * wm_gmii_mediachange:	[ifmedia interface function]
2644  *
2645  *	Set hardware to newly-selected media on a 1000BASE-T device.
2646  */
2647 int
2648 wm_gmii_mediachange(struct ifnet *ifp)
2649 {
2650 	struct wm_softc *sc = ifp->if_softc;
2651 
2652 	if (ifp->if_flags & IFF_UP)
2653 		mii_mediachg(&sc->sc_mii);
2654 	return (0);
2655 }
2656 
2657 #define	MDI_IO		CTRL_SWDPIN(2)
2658 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
2659 #define	MDI_CLK		CTRL_SWDPIN(3)
2660 
2661 static void
2662 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2663 {
2664 	uint32_t i, v;
2665 
2666 	v = CSR_READ(sc, WMREG_CTRL);
2667 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2668 	v |= MDI_DIR | CTRL_SWDPIO(3);
2669 
2670 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2671 		if (data & i)
2672 			v |= MDI_IO;
2673 		else
2674 			v &= ~MDI_IO;
2675 		CSR_WRITE(sc, WMREG_CTRL, v);
2676 		delay(10);
2677 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2678 		delay(10);
2679 		CSR_WRITE(sc, WMREG_CTRL, v);
2680 		delay(10);
2681 	}
2682 }
2683 
2684 static uint32_t
2685 livengood_mii_recvbits(struct wm_softc *sc)
2686 {
2687 	uint32_t v, i, data = 0;
2688 
2689 	v = CSR_READ(sc, WMREG_CTRL);
2690 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2691 	v |= CTRL_SWDPIO(3);
2692 
2693 	CSR_WRITE(sc, WMREG_CTRL, v);
2694 	delay(10);
2695 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2696 	delay(10);
2697 	CSR_WRITE(sc, WMREG_CTRL, v);
2698 	delay(10);
2699 
2700 	for (i = 0; i < 16; i++) {
2701 		data <<= 1;
2702 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2703 		delay(10);
2704 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2705 			data |= 1;
2706 		CSR_WRITE(sc, WMREG_CTRL, v);
2707 		delay(10);
2708 	}
2709 
2710 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2711 	delay(10);
2712 	CSR_WRITE(sc, WMREG_CTRL, v);
2713 	delay(10);
2714 
2715 	return (data);
2716 }
2717 
2718 #undef MDI_IO
2719 #undef MDI_DIR
2720 #undef MDI_CLK
2721 
2722 /*
2723  * wm_gmii_livengood_readreg:	[mii interface function]
2724  *
2725  *	Read a PHY register on the GMII (Livengood version).
2726  */
2727 int
2728 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2729 {
2730 	struct wm_softc *sc = (void *) self;
2731 	int rv;
2732 
2733 	livengood_mii_sendbits(sc, 0xffffffffU, 32);
2734 	livengood_mii_sendbits(sc, reg | (phy << 5) |
2735 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2736 	rv = livengood_mii_recvbits(sc) & 0xffff;
2737 
2738 	DPRINTF(WM_DEBUG_GMII,
2739 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2740 	    sc->sc_dev.dv_xname, phy, reg, rv));
2741 
2742 	return (rv);
2743 }
2744 
2745 /*
2746  * wm_gmii_livengood_writereg:	[mii interface function]
2747  *
2748  *	Write a PHY register on the GMII (Livengood version).
2749  */
2750 void
2751 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2752 {
2753 	struct wm_softc *sc = (void *) self;
2754 
2755 	livengood_mii_sendbits(sc, 0xffffffffU, 32);
2756 	livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2757 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2758 	    (MII_COMMAND_START << 30), 32);
2759 }
2760 
2761 /*
2762  * wm_gmii_cordova_readreg:	[mii interface function]
2763  *
2764  *	Read a PHY register on the GMII.
2765  */
2766 int
2767 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2768 {
2769 	struct wm_softc *sc = (void *) self;
2770 	uint32_t mdic;
2771 	int i, rv;
2772 
2773 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2774 	    MDIC_REGADD(reg));
2775 
2776 	for (i = 0; i < 100; i++) {
2777 		mdic = CSR_READ(sc, WMREG_MDIC);
2778 		if (mdic & MDIC_READY)
2779 			break;
2780 		delay(10);
2781 	}
2782 
2783 	if ((mdic & MDIC_READY) == 0) {
2784 		printf("%s: MDIC read timed out: phy %d reg %d\n",
2785 		    sc->sc_dev.dv_xname, phy, reg);
2786 		rv = 0;
2787 	} else if (mdic & MDIC_E) {
2788 #if 0 /* This is normal if no PHY is present. */
2789 		printf("%s: MDIC read error: phy %d reg %d\n",
2790 		    sc->sc_dev.dv_xname, phy, reg);
2791 #endif
2792 		rv = 0;
2793 	} else {
2794 		rv = MDIC_DATA(mdic);
2795 		if (rv == 0xffff)
2796 			rv = 0;
2797 	}
2798 
2799 	return (rv);
2800 }
2801 
2802 /*
2803  * wm_gmii_cordova_writereg:	[mii interface function]
2804  *
2805  *	Write a PHY register on the GMII.
2806  */
2807 void
2808 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2809 {
2810 	struct wm_softc *sc = (void *) self;
2811 	uint32_t mdic;
2812 	int i;
2813 
2814 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2815 	    MDIC_REGADD(reg) | MDIC_DATA(val));
2816 
2817 	for (i = 0; i < 100; i++) {
2818 		mdic = CSR_READ(sc, WMREG_MDIC);
2819 		if (mdic & MDIC_READY)
2820 			break;
2821 		delay(10);
2822 	}
2823 
2824 	if ((mdic & MDIC_READY) == 0)
2825 		printf("%s: MDIC write timed out: phy %d reg %d\n",
2826 		    sc->sc_dev.dv_xname, phy, reg);
2827 	else if (mdic & MDIC_E)
2828 		printf("%s: MDIC write error: phy %d reg %d\n",
2829 		    sc->sc_dev.dv_xname, phy, reg);
2830 }
2831 
2832 /*
2833  * wm_gmii_statchg:	[mii interface function]
2834  *
2835  *	Callback from MII layer when media changes.
2836  */
2837 void
2838 wm_gmii_statchg(struct device *self)
2839 {
2840 	struct wm_softc *sc = (void *) self;
2841 
2842 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2843 
2844 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2845 		DPRINTF(WM_DEBUG_LINK,
2846 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2847 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2848 	} else  {
2849 		DPRINTF(WM_DEBUG_LINK,
2850 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2851 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2852 	}
2853 
2854 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2855 }
2856