xref: /netbsd/sys/dev/pci/if_wm.c (revision c4a72b64)
1 /*	$NetBSD: if_wm.c,v 1.27 2002/10/23 01:34:58 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40  *
41  * TODO (in order of importance):
42  *
43  *	- Make GMII work on the i82543.
44  *
45  *	- Fix hw VLAN assist.
46  *
47  *	- Jumbo frames -- requires changes to network stack due to
48  *	  lame buffer length handling on chip.
49  */
50 
51 #include "bpfilter.h"
52 #include "rnd.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65 
66 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
67 
68 #if NRND > 0
69 #include <sys/rnd.h>
70 #endif
71 
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76 
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80 
81 #include <netinet/in.h>			/* XXX for struct ip */
82 #include <netinet/in_systm.h>		/* XXX for struct ip */
83 #include <netinet/ip.h>			/* XXX for struct ip */
84 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
85 
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89 
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93 
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97 
98 #include <dev/pci/if_wmreg.h>
99 
100 #ifdef WM_DEBUG
101 #define	WM_DEBUG_LINK		0x01
102 #define	WM_DEBUG_TX		0x02
103 #define	WM_DEBUG_RX		0x04
104 #define	WM_DEBUG_GMII		0x08
105 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106 
107 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
108 #else
109 #define	DPRINTF(x, y)	/* nothing */
110 #endif /* WM_DEBUG */
111 
112 /*
113  * Transmit descriptor list size.  Due to errata, we can only have
114  * 256 hardware descriptors in the ring.  We tell the upper layers
115  * that they can queue a lot of packets, and we go ahead and manage
116  * up to 64 of them at a time.  We allow up to 16 DMA segments per
117  * packet.
118  */
119 #define	WM_NTXSEGS		16
120 #define	WM_IFQUEUELEN		256
121 #define	WM_TXQUEUELEN		64
122 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
123 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
124 #define	WM_NTXDESC		256
125 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
126 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
127 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
128 
129 /*
130  * Receive descriptor list size.  We have one Rx buffer for normal
131  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
132  * packet.  We allocate 256 receive descriptors, each with a 2k
133  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134  */
135 #define	WM_NRXDESC		256
136 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
137 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
138 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
139 
140 /*
141  * Control structures are DMA'd to the i82542 chip.  We allocate them in
142  * a single clump that maps to a single DMA segment to make serveral things
143  * easier.
144  */
145 struct wm_control_data {
146 	/*
147 	 * The transmit descriptors.
148 	 */
149 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150 
151 	/*
152 	 * The receive descriptors.
153 	 */
154 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156 
157 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
158 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
159 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
160 
161 /*
162  * Software state for transmit jobs.
163  */
164 struct wm_txsoft {
165 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
166 	bus_dmamap_t txs_dmamap;	/* our DMA map */
167 	int txs_firstdesc;		/* first descriptor in packet */
168 	int txs_lastdesc;		/* last descriptor in packet */
169 	int txs_ndesc;			/* # of descriptors used */
170 };
171 
172 /*
173  * Software state for receive buffers.  Each descriptor gets a
174  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
175  * more than one buffer, we chain them together.
176  */
177 struct wm_rxsoft {
178 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
179 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
180 };
181 
182 /*
183  * Software state per device.
184  */
185 struct wm_softc {
186 	struct device sc_dev;		/* generic device information */
187 	bus_space_tag_t sc_st;		/* bus space tag */
188 	bus_space_handle_t sc_sh;	/* bus space handle */
189 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
190 	struct ethercom sc_ethercom;	/* ethernet common data */
191 	void *sc_sdhook;		/* shutdown hook */
192 
193 	int sc_type;			/* chip type; see below */
194 	int sc_flags;			/* flags; see below */
195 
196 	void *sc_ih;			/* interrupt cookie */
197 
198 	struct mii_data sc_mii;		/* MII/media information */
199 
200 	struct callout sc_tick_ch;	/* tick callout */
201 
202 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
203 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
204 
205 	/*
206 	 * Software state for the transmit and receive descriptors.
207 	 */
208 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210 
211 	/*
212 	 * Control data structures.
213 	 */
214 	struct wm_control_data *sc_control_data;
215 #define	sc_txdescs	sc_control_data->wcd_txdescs
216 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
217 
218 #ifdef WM_EVENT_COUNTERS
219 	/* Event counters. */
220 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
221 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
222 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
223 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
224 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
225 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
226 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
227 
228 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
229 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
230 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
231 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
232 
233 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
234 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
235 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
236 
237 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
239 
240 	struct evcnt sc_ev_tu;		/* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242 
243 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
244 
245 	int	sc_txfree;		/* number of free Tx descriptors */
246 	int	sc_txnext;		/* next ready Tx descriptor */
247 
248 	int	sc_txsfree;		/* number of free Tx jobs */
249 	int	sc_txsnext;		/* next free Tx job */
250 	int	sc_txsdirty;		/* dirty Tx jobs */
251 
252 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
253 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
254 
255 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
256 
257 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
258 	int	sc_rxdiscard;
259 	int	sc_rxlen;
260 	struct mbuf *sc_rxhead;
261 	struct mbuf *sc_rxtail;
262 	struct mbuf **sc_rxtailp;
263 
264 	uint32_t sc_ctrl;		/* prototype CTRL register */
265 #if 0
266 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
267 #endif
268 	uint32_t sc_icr;		/* prototype interrupt bits */
269 	uint32_t sc_tctl;		/* prototype TCTL register */
270 	uint32_t sc_rctl;		/* prototype RCTL register */
271 	uint32_t sc_txcw;		/* prototype TXCW register */
272 	uint32_t sc_tipg;		/* prototype TIPG register */
273 
274 	int sc_tbi_linkup;		/* TBI link status */
275 	int sc_tbi_anstate;		/* autonegotiation state */
276 
277 	int sc_mchash_type;		/* multicast filter offset */
278 
279 #if NRND > 0
280 	rndsource_element_t rnd_source;	/* random source */
281 #endif
282 };
283 
284 #define	WM_RXCHAIN_RESET(sc)						\
285 do {									\
286 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
287 	*(sc)->sc_rxtailp = NULL;					\
288 	(sc)->sc_rxlen = 0;						\
289 } while (/*CONSTCOND*/0)
290 
291 #define	WM_RXCHAIN_LINK(sc, m)						\
292 do {									\
293 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
294 	(sc)->sc_rxtailp = &(m)->m_next;				\
295 } while (/*CONSTCOND*/0)
296 
297 /* sc_type */
298 #define	WM_T_82542_2_0		0	/* i82542 2.0 (really old) */
299 #define	WM_T_82542_2_1		1	/* i82542 2.1+ (old) */
300 #define	WM_T_82543		2	/* i82543 */
301 #define	WM_T_82544		3	/* i82544 */
302 #define	WM_T_82540		4	/* i82540 */
303 #define	WM_T_82545		5	/* i82545 */
304 #define	WM_T_82546		6	/* i82546 */
305 
306 /* sc_flags */
307 #define	WM_F_HAS_MII		0x01	/* has MII */
308 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
309 
310 #ifdef WM_EVENT_COUNTERS
311 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
312 #else
313 #define	WM_EVCNT_INCR(ev)	/* nothing */
314 #endif
315 
316 #define	CSR_READ(sc, reg)						\
317 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
318 #define	CSR_WRITE(sc, reg, val)						\
319 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
320 
321 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
322 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
323 
324 #define	WM_CDTXSYNC(sc, x, n, ops)					\
325 do {									\
326 	int __x, __n;							\
327 									\
328 	__x = (x);							\
329 	__n = (n);							\
330 									\
331 	/* If it will wrap around, sync to the end of the ring. */	\
332 	if ((__x + __n) > WM_NTXDESC) {					\
333 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
334 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
335 		    (WM_NTXDESC - __x), (ops));				\
336 		__n -= (WM_NTXDESC - __x);				\
337 		__x = 0;						\
338 	}								\
339 									\
340 	/* Now sync whatever is left. */				\
341 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
342 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
343 } while (/*CONSTCOND*/0)
344 
345 #define	WM_CDRXSYNC(sc, x, ops)						\
346 do {									\
347 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
348 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
349 } while (/*CONSTCOND*/0)
350 
351 #define	WM_INIT_RXDESC(sc, x)						\
352 do {									\
353 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
354 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
355 	struct mbuf *__m = __rxs->rxs_mbuf;				\
356 									\
357 	/*								\
358 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
359 	 * so that the payload after the Ethernet header is aligned	\
360 	 * to a 4-byte boundary.					\
361 	 *								\
362 	 * XXX BRAINDAMAGE ALERT!					\
363 	 * The stupid chip uses the same size for every buffer, which	\
364 	 * is set in the Receive Control register.  We are using the 2K	\
365 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
366 	 * reason, we can't accept packets longer than the standard	\
367 	 * Ethernet MTU, without incurring a big penalty to copy every	\
368 	 * incoming packet to a new, suitably aligned buffer.		\
369 	 *								\
370 	 * We'll need to make some changes to the layer 3/4 parts of	\
371 	 * the stack (to copy the headers to a new buffer if not	\
372 	 * aligned) in order to support large MTU on this chip.  Lame.	\
373 	 */								\
374 	__m->m_data = __m->m_ext.ext_buf + 2;				\
375 									\
376 	__rxd->wrx_addr.wa_low =					\
377 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2);		\
378 	__rxd->wrx_addr.wa_high = 0;					\
379 	__rxd->wrx_len = 0;						\
380 	__rxd->wrx_cksum = 0;						\
381 	__rxd->wrx_status = 0;						\
382 	__rxd->wrx_errors = 0;						\
383 	__rxd->wrx_special = 0;						\
384 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
385 									\
386 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
387 } while (/*CONSTCOND*/0)
388 
389 void	wm_start(struct ifnet *);
390 void	wm_watchdog(struct ifnet *);
391 int	wm_ioctl(struct ifnet *, u_long, caddr_t);
392 int	wm_init(struct ifnet *);
393 void	wm_stop(struct ifnet *, int);
394 
395 void	wm_shutdown(void *);
396 
397 void	wm_reset(struct wm_softc *);
398 void	wm_rxdrain(struct wm_softc *);
399 int	wm_add_rxbuf(struct wm_softc *, int);
400 void	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
401 void	wm_tick(void *);
402 
403 void	wm_set_filter(struct wm_softc *);
404 
405 int	wm_intr(void *);
406 void	wm_txintr(struct wm_softc *);
407 void	wm_rxintr(struct wm_softc *);
408 void	wm_linkintr(struct wm_softc *, uint32_t);
409 
410 void	wm_tbi_mediainit(struct wm_softc *);
411 int	wm_tbi_mediachange(struct ifnet *);
412 void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
413 
414 void	wm_tbi_set_linkled(struct wm_softc *);
415 void	wm_tbi_check_link(struct wm_softc *);
416 
417 void	wm_gmii_reset(struct wm_softc *);
418 
419 int	wm_gmii_i82543_readreg(struct device *, int, int);
420 void	wm_gmii_i82543_writereg(struct device *, int, int, int);
421 
422 int	wm_gmii_i82544_readreg(struct device *, int, int);
423 void	wm_gmii_i82544_writereg(struct device *, int, int, int);
424 
425 void	wm_gmii_statchg(struct device *);
426 
427 void	wm_gmii_mediainit(struct wm_softc *);
428 int	wm_gmii_mediachange(struct ifnet *);
429 void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
430 
431 int	wm_match(struct device *, struct cfdata *, void *);
432 void	wm_attach(struct device *, struct device *, void *);
433 
434 int	wm_copy_small = 0;
435 
436 CFATTACH_DECL(wm, sizeof(struct wm_softc),
437     wm_match, wm_attach, NULL, NULL);
438 
439 /*
440  * Devices supported by this driver.
441  */
442 const struct wm_product {
443 	pci_vendor_id_t		wmp_vendor;
444 	pci_product_id_t	wmp_product;
445 	const char		*wmp_name;
446 	int			wmp_type;
447 	int			wmp_flags;
448 #define	WMP_F_1000X		0x01
449 #define	WMP_F_1000T		0x02
450 } wm_products[] = {
451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
452 	  "Intel i82542 1000BASE-X Ethernet",
453 	  WM_T_82542_2_1,	WMP_F_1000X },
454 
455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
456 	  "Intel i82543GC 1000BASE-X Ethernet",
457 	  WM_T_82543,		WMP_F_1000X },
458 
459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
460 	  "Intel i82543GC 1000BASE-T Ethernet",
461 	  WM_T_82543,		WMP_F_1000T },
462 
463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
464 	  "Intel i82544EI 1000BASE-T Ethernet",
465 	  WM_T_82544,		WMP_F_1000T },
466 
467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
468 	  "Intel i82544EI 1000BASE-X Ethernet",
469 	  WM_T_82544,		WMP_F_1000X },
470 
471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
472 	  "Intel i82544GC 1000BASE-T Ethernet",
473 	  WM_T_82544,		WMP_F_1000T },
474 
475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
476 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
477 	  WM_T_82544,		WMP_F_1000T },
478 
479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
480 	  "Intel i82540EM 1000BASE-T Ethernet",
481 	  WM_T_82540,		WMP_F_1000T },
482 
483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
484 	  "Intel i82545EM 1000BASE-T Ethernet",
485 	  WM_T_82545,		WMP_F_1000T },
486 
487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
488 	  "Intel i82546EB 1000BASE-T Ethernet",
489 	  WM_T_82546,		WMP_F_1000T },
490 
491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
492 	  "Intel i82545EM 1000BASE-X Ethernet",
493 	  WM_T_82545,		WMP_F_1000X },
494 
495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
496 	  "Intel i82546EB 1000BASE-X Ethernet",
497 	  WM_T_82546,		WMP_F_1000X },
498 
499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
500 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
501 	  WM_T_82540,		WMP_F_1000T },
502 
503 	{ 0,			0,
504 	  NULL,
505 	  0,			0 },
506 };
507 
508 #ifdef WM_EVENT_COUNTERS
509 #if WM_NTXSEGS != 16
510 #error Update wm_txseg_evcnt_names
511 #endif
512 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
513 	"txseg1",
514 	"txseg2",
515 	"txseg3",
516 	"txseg4",
517 	"txseg5",
518 	"txseg6",
519 	"txseg7",
520 	"txseg8",
521 	"txseg9",
522 	"txseg10",
523 	"txseg11",
524 	"txseg12",
525 	"txseg13",
526 	"txseg14",
527 	"txseg15",
528 	"txseg16",
529 };
530 #endif /* WM_EVENT_COUNTERS */
531 
532 static const struct wm_product *
533 wm_lookup(const struct pci_attach_args *pa)
534 {
535 	const struct wm_product *wmp;
536 
537 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
538 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
539 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
540 			return (wmp);
541 	}
542 	return (NULL);
543 }
544 
545 int
546 wm_match(struct device *parent, struct cfdata *cf, void *aux)
547 {
548 	struct pci_attach_args *pa = aux;
549 
550 	if (wm_lookup(pa) != NULL)
551 		return (1);
552 
553 	return (0);
554 }
555 
556 void
557 wm_attach(struct device *parent, struct device *self, void *aux)
558 {
559 	struct wm_softc *sc = (void *) self;
560 	struct pci_attach_args *pa = aux;
561 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
562 	pci_chipset_tag_t pc = pa->pa_pc;
563 	pci_intr_handle_t ih;
564 	const char *intrstr = NULL;
565 	bus_space_tag_t memt;
566 	bus_space_handle_t memh;
567 	bus_dma_segment_t seg;
568 	int memh_valid;
569 	int i, rseg, error;
570 	const struct wm_product *wmp;
571 	uint8_t enaddr[ETHER_ADDR_LEN];
572 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
573 	pcireg_t preg, memtype;
574 	int pmreg;
575 
576 	callout_init(&sc->sc_tick_ch);
577 
578 	wmp = wm_lookup(pa);
579 	if (wmp == NULL) {
580 		printf("\n");
581 		panic("wm_attach: impossible");
582 	}
583 
584 	sc->sc_dmat = pa->pa_dmat;
585 
586 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
587 	printf(": %s, rev. %d\n", wmp->wmp_name, preg);
588 
589 	sc->sc_type = wmp->wmp_type;
590 	if (sc->sc_type < WM_T_82543) {
591 		if (preg < 2) {
592 			printf("%s: i82542 must be at least rev. 2\n",
593 			    sc->sc_dev.dv_xname);
594 			return;
595 		}
596 		if (preg < 3)
597 			sc->sc_type = WM_T_82542_2_0;
598 	}
599 
600 	/*
601 	 * Some chips require a handshake to access the EEPROM.
602 	 */
603 	if (sc->sc_type >= WM_T_82540)
604 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
605 
606 	/*
607 	 * Map the device.
608 	 */
609 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
610 	switch (memtype) {
611 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
612 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
613 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
614 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
615 		break;
616 	default:
617 		memh_valid = 0;
618 	}
619 
620 	if (memh_valid) {
621 		sc->sc_st = memt;
622 		sc->sc_sh = memh;
623 	} else {
624 		printf("%s: unable to map device registers\n",
625 		    sc->sc_dev.dv_xname);
626 		return;
627 	}
628 
629 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
630 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
631 	preg |= PCI_COMMAND_MASTER_ENABLE;
632 	if (sc->sc_type < WM_T_82542_2_1)
633 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
634 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
635 
636 	/* Get it out of power save mode, if needed. */
637 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
638 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
639 		if (preg == 3) {
640 			/*
641 			 * The card has lost all configuration data in
642 			 * this state, so punt.
643 			 */
644 			printf("%s: unable to wake from power state D3\n",
645 			    sc->sc_dev.dv_xname);
646 			return;
647 		}
648 		if (preg != 0) {
649 			printf("%s: waking up from power state D%d\n",
650 			    sc->sc_dev.dv_xname, preg);
651 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
652 		}
653 	}
654 
655 	/*
656 	 * Map and establish our interrupt.
657 	 */
658 	if (pci_intr_map(pa, &ih)) {
659 		printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
660 		return;
661 	}
662 	intrstr = pci_intr_string(pc, ih);
663 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
664 	if (sc->sc_ih == NULL) {
665 		printf("%s: unable to establish interrupt",
666 		    sc->sc_dev.dv_xname);
667 		if (intrstr != NULL)
668 			printf(" at %s", intrstr);
669 		printf("\n");
670 		return;
671 	}
672 	printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
673 
674 	/*
675 	 * Allocate the control data structures, and create and load the
676 	 * DMA map for it.
677 	 */
678 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
679 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
680 	    0)) != 0) {
681 		printf("%s: unable to allocate control data, error = %d\n",
682 		    sc->sc_dev.dv_xname, error);
683 		goto fail_0;
684 	}
685 
686 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
687 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
688 	    0)) != 0) {
689 		printf("%s: unable to map control data, error = %d\n",
690 		    sc->sc_dev.dv_xname, error);
691 		goto fail_1;
692 	}
693 
694 	if ((error = bus_dmamap_create(sc->sc_dmat,
695 	    sizeof(struct wm_control_data), 1,
696 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
697 		printf("%s: unable to create control data DMA map, "
698 		    "error = %d\n", sc->sc_dev.dv_xname, error);
699 		goto fail_2;
700 	}
701 
702 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
703 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
704 	    0)) != 0) {
705 		printf("%s: unable to load control data DMA map, error = %d\n",
706 		    sc->sc_dev.dv_xname, error);
707 		goto fail_3;
708 	}
709 
710 	/*
711 	 * Create the transmit buffer DMA maps.
712 	 */
713 	for (i = 0; i < WM_TXQUEUELEN; i++) {
714 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
715 		    WM_NTXSEGS, MCLBYTES, 0, 0,
716 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
717 			printf("%s: unable to create Tx DMA map %d, "
718 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
719 			goto fail_4;
720 		}
721 	}
722 
723 	/*
724 	 * Create the receive buffer DMA maps.
725 	 */
726 	for (i = 0; i < WM_NRXDESC; i++) {
727 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
728 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
729 			printf("%s: unable to create Rx DMA map %d, "
730 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
731 			goto fail_5;
732 		}
733 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
734 	}
735 
736 	/*
737 	 * Reset the chip to a known state.
738 	 */
739 	wm_reset(sc);
740 
741 	/*
742 	 * Read the Ethernet address from the EEPROM.
743 	 */
744 	wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
745 	    sizeof(myea) / sizeof(myea[0]), myea);
746 	enaddr[0] = myea[0] & 0xff;
747 	enaddr[1] = myea[0] >> 8;
748 	enaddr[2] = myea[1] & 0xff;
749 	enaddr[3] = myea[1] >> 8;
750 	enaddr[4] = myea[2] & 0xff;
751 	enaddr[5] = myea[2] >> 8;
752 
753 	/*
754 	 * Toggle the LSB of the MAC address on the second port
755 	 * of the i82546.
756 	 */
757 	if (sc->sc_type == WM_T_82546) {
758 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
759 			enaddr[5] ^= 1;
760 	}
761 
762 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
763 	    ether_sprintf(enaddr));
764 
765 	/*
766 	 * Read the config info from the EEPROM, and set up various
767 	 * bits in the control registers based on their contents.
768 	 */
769 	wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
770 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
771 	if (sc->sc_type >= WM_T_82544)
772 		wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
773 
774 	if (cfg1 & EEPROM_CFG1_ILOS)
775 		sc->sc_ctrl |= CTRL_ILOS;
776 	if (sc->sc_type >= WM_T_82544) {
777 		sc->sc_ctrl |=
778 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
779 		    CTRL_SWDPIO_SHIFT;
780 		sc->sc_ctrl |=
781 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
782 		    CTRL_SWDPINS_SHIFT;
783 	} else {
784 		sc->sc_ctrl |=
785 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
786 		    CTRL_SWDPIO_SHIFT;
787 	}
788 
789 #if 0
790 	if (sc->sc_type >= WM_T_82544) {
791 		if (cfg1 & EEPROM_CFG1_IPS0)
792 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
793 		if (cfg1 & EEPROM_CFG1_IPS1)
794 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
795 		sc->sc_ctrl_ext |=
796 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
797 		    CTRL_EXT_SWDPIO_SHIFT;
798 		sc->sc_ctrl_ext |=
799 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
800 		    CTRL_EXT_SWDPINS_SHIFT;
801 	} else {
802 		sc->sc_ctrl_ext |=
803 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
804 		    CTRL_EXT_SWDPIO_SHIFT;
805 	}
806 #endif
807 
808 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
809 #if 0
810 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
811 #endif
812 
813 	/*
814 	 * Set up some register offsets that are different between
815 	 * the i82542 and the i82543 and later chips.
816 	 */
817 	if (sc->sc_type < WM_T_82543) {
818 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
819 		sc->sc_tdt_reg = WMREG_OLD_TDT;
820 	} else {
821 		sc->sc_rdt_reg = WMREG_RDT;
822 		sc->sc_tdt_reg = WMREG_TDT;
823 	}
824 
825 	/*
826 	 * Determine if we should use flow control.  We should
827 	 * always use it, unless we're on a i82542 < 2.1.
828 	 */
829 	if (sc->sc_type >= WM_T_82542_2_1)
830 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
831 
832 	/*
833 	 * Determine if we're TBI or GMII mode, and initialize the
834 	 * media structures accordingly.
835 	 */
836 	if (sc->sc_type < WM_T_82543 ||
837 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
838 		if (wmp->wmp_flags & WMP_F_1000T)
839 			printf("%s: WARNING: TBIMODE set on 1000BASE-T "
840 			    "product!\n", sc->sc_dev.dv_xname);
841 		wm_tbi_mediainit(sc);
842 	} else {
843 		if (wmp->wmp_flags & WMP_F_1000X)
844 			printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
845 			    "product!\n", sc->sc_dev.dv_xname);
846 		wm_gmii_mediainit(sc);
847 	}
848 
849 	ifp = &sc->sc_ethercom.ec_if;
850 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
851 	ifp->if_softc = sc;
852 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
853 	ifp->if_ioctl = wm_ioctl;
854 	ifp->if_start = wm_start;
855 	ifp->if_watchdog = wm_watchdog;
856 	ifp->if_init = wm_init;
857 	ifp->if_stop = wm_stop;
858 	IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
859 	IFQ_SET_READY(&ifp->if_snd);
860 
861 	/*
862 	 * If we're a i82543 or greater, we can support VLANs.
863 	 */
864 	if (sc->sc_type >= WM_T_82543)
865 		sc->sc_ethercom.ec_capabilities |=
866 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
867 
868 	/*
869 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
870 	 * on i82543 and later.
871 	 */
872 	if (sc->sc_type >= WM_T_82543)
873 		ifp->if_capabilities |=
874 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
875 
876 	/*
877 	 * Attach the interface.
878 	 */
879 	if_attach(ifp);
880 	ether_ifattach(ifp, enaddr);
881 #if NRND > 0
882 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
883 	    RND_TYPE_NET, 0);
884 #endif
885 
886 #ifdef WM_EVENT_COUNTERS
887 	/* Attach event counters. */
888 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
889 	    NULL, sc->sc_dev.dv_xname, "txsstall");
890 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
891 	    NULL, sc->sc_dev.dv_xname, "txdstall");
892 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
893 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
894 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
895 	    NULL, sc->sc_dev.dv_xname, "txdw");
896 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
897 	    NULL, sc->sc_dev.dv_xname, "txqe");
898 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
899 	    NULL, sc->sc_dev.dv_xname, "rxintr");
900 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
901 	    NULL, sc->sc_dev.dv_xname, "linkintr");
902 
903 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
904 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
905 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
906 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
907 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
908 	    NULL, sc->sc_dev.dv_xname, "txipsum");
909 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
910 	    NULL, sc->sc_dev.dv_xname, "txtusum");
911 
912 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
913 	    NULL, sc->sc_dev.dv_xname, "txctx init");
914 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
915 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
916 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
917 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
918 
919 	for (i = 0; i < WM_NTXSEGS; i++)
920 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
921 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
922 
923 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
924 	    NULL, sc->sc_dev.dv_xname, "txdrop");
925 
926 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
927 	    NULL, sc->sc_dev.dv_xname, "tu");
928 #endif /* WM_EVENT_COUNTERS */
929 
930 	/*
931 	 * Make sure the interface is shutdown during reboot.
932 	 */
933 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
934 	if (sc->sc_sdhook == NULL)
935 		printf("%s: WARNING: unable to establish shutdown hook\n",
936 		    sc->sc_dev.dv_xname);
937 	return;
938 
939 	/*
940 	 * Free any resources we've allocated during the failed attach
941 	 * attempt.  Do this in reverse order and fall through.
942 	 */
943  fail_5:
944 	for (i = 0; i < WM_NRXDESC; i++) {
945 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
946 			bus_dmamap_destroy(sc->sc_dmat,
947 			    sc->sc_rxsoft[i].rxs_dmamap);
948 	}
949  fail_4:
950 	for (i = 0; i < WM_TXQUEUELEN; i++) {
951 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
952 			bus_dmamap_destroy(sc->sc_dmat,
953 			    sc->sc_txsoft[i].txs_dmamap);
954 	}
955 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
956  fail_3:
957 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
958  fail_2:
959 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
960 	    sizeof(struct wm_control_data));
961  fail_1:
962 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
963  fail_0:
964 	return;
965 }
966 
967 /*
968  * wm_shutdown:
969  *
970  *	Make sure the interface is stopped at reboot time.
971  */
972 void
973 wm_shutdown(void *arg)
974 {
975 	struct wm_softc *sc = arg;
976 
977 	wm_stop(&sc->sc_ethercom.ec_if, 1);
978 }
979 
980 /*
981  * wm_tx_cksum:
982  *
983  *	Set up TCP/IP checksumming parameters for the
984  *	specified packet.
985  */
986 static int
987 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
988     uint32_t *fieldsp)
989 {
990 	struct mbuf *m0 = txs->txs_mbuf;
991 	struct livengood_tcpip_ctxdesc *t;
992 	uint32_t fields = 0, ipcs, tucs;
993 	struct ip *ip;
994 	struct ether_header *eh;
995 	int offset, iphl;
996 
997 	/*
998 	 * XXX It would be nice if the mbuf pkthdr had offset
999 	 * fields for the protocol headers.
1000 	 */
1001 
1002 	eh = mtod(m0, struct ether_header *);
1003 	switch (htons(eh->ether_type)) {
1004 	case ETHERTYPE_IP:
1005 		iphl = sizeof(struct ip);
1006 		offset = ETHER_HDR_LEN;
1007 		break;
1008 
1009 	default:
1010 		/*
1011 		 * Don't support this protocol or encapsulation.
1012 		 */
1013 		*fieldsp = 0;
1014 		*cmdp = 0;
1015 		return (0);
1016 	}
1017 
1018 	/* XXX */
1019 	if (m0->m_len < (offset + iphl)) {
1020 		printf("%s: wm_tx_cksum: need to m_pullup, "
1021 		    "packet dropped\n", sc->sc_dev.dv_xname);
1022 		return (EINVAL);
1023 	}
1024 
1025 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1026 	iphl = ip->ip_hl << 2;
1027 
1028 	/*
1029 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1030 	 * offload feature, if we load the context descriptor, we
1031 	 * MUST provide valid values for IPCSS and TUCSS fields.
1032 	 */
1033 
1034 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1035 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1036 		fields |= htole32(WTX_IXSM);
1037 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1038 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1039 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
1040 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1041 		/* Use the cached value. */
1042 		ipcs = sc->sc_txctx_ipcs;
1043 	} else {
1044 		/* Just initialize it to the likely value anyway. */
1045 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1046 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1047 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
1048 	}
1049 
1050 	offset += iphl;
1051 
1052 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1053 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1054 		fields |= htole32(WTX_TXSM);
1055 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1056 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1057 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
1058 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1059 		/* Use the cached value. */
1060 		tucs = sc->sc_txctx_tucs;
1061 	} else {
1062 		/* Just initialize it to a valid TCP context. */
1063 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1064 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1065 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
1066 	}
1067 
1068 	if (sc->sc_txctx_ipcs == ipcs &&
1069 	    sc->sc_txctx_tucs == tucs) {
1070 		/* Cached context is fine. */
1071 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1072 	} else {
1073 		/* Fill in the context descriptor. */
1074 #ifdef WM_EVENT_COUNTERS
1075 		if (sc->sc_txctx_ipcs == 0xffffffff &&
1076 		    sc->sc_txctx_tucs == 0xffffffff)
1077 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1078 		else
1079 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1080 #endif
1081 		t = (struct livengood_tcpip_ctxdesc *)
1082 		    &sc->sc_txdescs[sc->sc_txnext];
1083 		t->tcpip_ipcs = ipcs;
1084 		t->tcpip_tucs = tucs;
1085 		t->tcpip_cmdlen =
1086 		    htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1087 		t->tcpip_seg = 0;
1088 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1089 
1090 		sc->sc_txctx_ipcs = ipcs;
1091 		sc->sc_txctx_tucs = tucs;
1092 
1093 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1094 		txs->txs_ndesc++;
1095 	}
1096 
1097 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1098 	*fieldsp = fields;
1099 
1100 	return (0);
1101 }
1102 
1103 /*
1104  * wm_start:		[ifnet interface function]
1105  *
1106  *	Start packet transmission on the interface.
1107  */
1108 void
1109 wm_start(struct ifnet *ifp)
1110 {
1111 	struct wm_softc *sc = ifp->if_softc;
1112 	struct mbuf *m0/*, *m*/;
1113 	struct wm_txsoft *txs;
1114 	bus_dmamap_t dmamap;
1115 	int error, nexttx, lasttx, ofree, seg;
1116 	uint32_t cksumcmd, cksumfields;
1117 
1118 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1119 		return;
1120 
1121 	/*
1122 	 * Remember the previous number of free descriptors.
1123 	 */
1124 	ofree = sc->sc_txfree;
1125 
1126 	/*
1127 	 * Loop through the send queue, setting up transmit descriptors
1128 	 * until we drain the queue, or use up all available transmit
1129 	 * descriptors.
1130 	 */
1131 	for (;;) {
1132 		/* Grab a packet off the queue. */
1133 		IFQ_POLL(&ifp->if_snd, m0);
1134 		if (m0 == NULL)
1135 			break;
1136 
1137 		DPRINTF(WM_DEBUG_TX,
1138 		    ("%s: TX: have packet to transmit: %p\n",
1139 		    sc->sc_dev.dv_xname, m0));
1140 
1141 		/* Get a work queue entry. */
1142 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1143 			wm_txintr(sc);
1144 			if (sc->sc_txsfree == 0) {
1145 				DPRINTF(WM_DEBUG_TX,
1146 				    ("%s: TX: no free job descriptors\n",
1147 					sc->sc_dev.dv_xname));
1148 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1149 				break;
1150 			}
1151 		}
1152 
1153 		txs = &sc->sc_txsoft[sc->sc_txsnext];
1154 		dmamap = txs->txs_dmamap;
1155 
1156 		/*
1157 		 * Load the DMA map.  If this fails, the packet either
1158 		 * didn't fit in the allotted number of segments, or we
1159 		 * were short on resources.  For the too-many-segments
1160 		 * case, we simply report an error and drop the packet,
1161 		 * since we can't sanely copy a jumbo packet to a single
1162 		 * buffer.
1163 		 */
1164 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1165 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1166 		if (error) {
1167 			if (error == EFBIG) {
1168 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1169 				printf("%s: Tx packet consumes too many "
1170 				    "DMA segments, dropping...\n",
1171 				    sc->sc_dev.dv_xname);
1172 				IFQ_DEQUEUE(&ifp->if_snd, m0);
1173 				m_freem(m0);
1174 				continue;
1175 			}
1176 			/*
1177 			 * Short on resources, just stop for now.
1178 			 */
1179 			DPRINTF(WM_DEBUG_TX,
1180 			    ("%s: TX: dmamap load failed: %d\n",
1181 			    sc->sc_dev.dv_xname, error));
1182 			break;
1183 		}
1184 
1185 		/*
1186 		 * Ensure we have enough descriptors free to describe
1187 		 * the packet.  Note, we always reserve one descriptor
1188 		 * at the end of the ring due to the semantics of the
1189 		 * TDT register, plus one more in the event we need
1190 		 * to re-load checksum offload context.
1191 		 */
1192 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1193 			/*
1194 			 * Not enough free descriptors to transmit this
1195 			 * packet.  We haven't committed anything yet,
1196 			 * so just unload the DMA map, put the packet
1197 			 * pack on the queue, and punt.  Notify the upper
1198 			 * layer that there are no more slots left.
1199 			 */
1200 			DPRINTF(WM_DEBUG_TX,
1201 			    ("%s: TX: need %d descriptors, have %d\n",
1202 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1203 			    sc->sc_txfree - 1));
1204 			ifp->if_flags |= IFF_OACTIVE;
1205 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1206 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1207 			break;
1208 		}
1209 
1210 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1211 
1212 		/*
1213 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1214 		 */
1215 
1216 		/* Sync the DMA map. */
1217 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1218 		    BUS_DMASYNC_PREWRITE);
1219 
1220 		DPRINTF(WM_DEBUG_TX,
1221 		    ("%s: TX: packet has %d DMA segments\n",
1222 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1223 
1224 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1225 
1226 		/*
1227 		 * Store a pointer to the packet so that we can free it
1228 		 * later.
1229 		 *
1230 		 * Initially, we consider the number of descriptors the
1231 		 * packet uses the number of DMA segments.  This may be
1232 		 * incremented by 1 if we do checksum offload (a descriptor
1233 		 * is used to set the checksum context).
1234 		 */
1235 		txs->txs_mbuf = m0;
1236 		txs->txs_firstdesc = sc->sc_txnext;
1237 		txs->txs_ndesc = dmamap->dm_nsegs;
1238 
1239 		/*
1240 		 * Set up checksum offload parameters for
1241 		 * this packet.
1242 		 */
1243 		if (m0->m_pkthdr.csum_flags &
1244 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1245 			if (wm_tx_cksum(sc, txs, &cksumcmd,
1246 					&cksumfields) != 0) {
1247 				/* Error message already displayed. */
1248 				m_freem(m0);
1249 				bus_dmamap_unload(sc->sc_dmat, dmamap);
1250 				txs->txs_mbuf = NULL;
1251 				continue;
1252 			}
1253 		} else {
1254 			cksumcmd = 0;
1255 			cksumfields = 0;
1256 		}
1257 
1258 		cksumcmd |= htole32(WTX_CMD_IDE);
1259 
1260 		/*
1261 		 * Initialize the transmit descriptor.
1262 		 */
1263 		for (nexttx = sc->sc_txnext, seg = 0;
1264 		     seg < dmamap->dm_nsegs;
1265 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
1266 			/*
1267 			 * Note: we currently only use 32-bit DMA
1268 			 * addresses.
1269 			 */
1270 			sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1271 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1272 			    htole32(dmamap->dm_segs[seg].ds_addr);
1273 			sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1274 			    htole32(dmamap->dm_segs[seg].ds_len);
1275 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1276 			    cksumfields;
1277 			lasttx = nexttx;
1278 
1279 			DPRINTF(WM_DEBUG_TX,
1280 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1281 			    sc->sc_dev.dv_xname, nexttx,
1282 			    (uint32_t) dmamap->dm_segs[seg].ds_addr,
1283 			    (uint32_t) dmamap->dm_segs[seg].ds_len));
1284 		}
1285 
1286 		/*
1287 		 * Set up the command byte on the last descriptor of
1288 		 * the packet.  If we're in the interrupt delay window,
1289 		 * delay the interrupt.
1290 		 */
1291 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
1292 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1293 
1294 #if 0 /* XXXJRT */
1295 		/*
1296 		 * If VLANs are enabled and the packet has a VLAN tag, set
1297 		 * up the descriptor to encapsulate the packet for us.
1298 		 *
1299 		 * This is only valid on the last descriptor of the packet.
1300 		 */
1301 		if (sc->sc_ethercom.ec_nvlans != 0 &&
1302 		    (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1303 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
1304 			    htole32(WTX_CMD_VLE);
1305 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1306 			    = htole16(*mtod(m, int *) & 0xffff);
1307 		}
1308 #endif /* XXXJRT */
1309 
1310 		txs->txs_lastdesc = lasttx;
1311 
1312 		DPRINTF(WM_DEBUG_TX,
1313 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1314 		    lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1315 
1316 		/* Sync the descriptors we're using. */
1317 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1318 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1319 
1320 		/* Give the packet to the chip. */
1321 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1322 
1323 		DPRINTF(WM_DEBUG_TX,
1324 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1325 
1326 		DPRINTF(WM_DEBUG_TX,
1327 		    ("%s: TX: finished transmitting packet, job %d\n",
1328 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
1329 
1330 		/* Advance the tx pointer. */
1331 		sc->sc_txfree -= txs->txs_ndesc;
1332 		sc->sc_txnext = nexttx;
1333 
1334 		sc->sc_txsfree--;
1335 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1336 
1337 #if NBPFILTER > 0
1338 		/* Pass the packet to any BPF listeners. */
1339 		if (ifp->if_bpf)
1340 			bpf_mtap(ifp->if_bpf, m0);
1341 #endif /* NBPFILTER > 0 */
1342 	}
1343 
1344 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1345 		/* No more slots; notify upper layer. */
1346 		ifp->if_flags |= IFF_OACTIVE;
1347 	}
1348 
1349 	if (sc->sc_txfree != ofree) {
1350 		/* Set a watchdog timer in case the chip flakes out. */
1351 		ifp->if_timer = 5;
1352 	}
1353 }
1354 
1355 /*
1356  * wm_watchdog:		[ifnet interface function]
1357  *
1358  *	Watchdog timer handler.
1359  */
1360 void
1361 wm_watchdog(struct ifnet *ifp)
1362 {
1363 	struct wm_softc *sc = ifp->if_softc;
1364 
1365 	/*
1366 	 * Since we're using delayed interrupts, sweep up
1367 	 * before we report an error.
1368 	 */
1369 	wm_txintr(sc);
1370 
1371 	if (sc->sc_txfree != WM_NTXDESC) {
1372 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1373 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1374 		    sc->sc_txnext);
1375 		ifp->if_oerrors++;
1376 
1377 		/* Reset the interface. */
1378 		(void) wm_init(ifp);
1379 	}
1380 
1381 	/* Try to get more packets going. */
1382 	wm_start(ifp);
1383 }
1384 
1385 /*
1386  * wm_ioctl:		[ifnet interface function]
1387  *
1388  *	Handle control requests from the operator.
1389  */
1390 int
1391 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1392 {
1393 	struct wm_softc *sc = ifp->if_softc;
1394 	struct ifreq *ifr = (struct ifreq *) data;
1395 	int s, error;
1396 
1397 	s = splnet();
1398 
1399 	switch (cmd) {
1400 	case SIOCSIFMEDIA:
1401 	case SIOCGIFMEDIA:
1402 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1403 		break;
1404 
1405 	default:
1406 		error = ether_ioctl(ifp, cmd, data);
1407 		if (error == ENETRESET) {
1408 			/*
1409 			 * Multicast list has changed; set the hardware filter
1410 			 * accordingly.
1411 			 */
1412 			wm_set_filter(sc);
1413 			error = 0;
1414 		}
1415 		break;
1416 	}
1417 
1418 	/* Try to get more packets going. */
1419 	wm_start(ifp);
1420 
1421 	splx(s);
1422 	return (error);
1423 }
1424 
1425 /*
1426  * wm_intr:
1427  *
1428  *	Interrupt service routine.
1429  */
1430 int
1431 wm_intr(void *arg)
1432 {
1433 	struct wm_softc *sc = arg;
1434 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1435 	uint32_t icr;
1436 	int wantinit, handled = 0;
1437 
1438 	for (wantinit = 0; wantinit == 0;) {
1439 		icr = CSR_READ(sc, WMREG_ICR);
1440 		if ((icr & sc->sc_icr) == 0)
1441 			break;
1442 
1443 #if 0 /*NRND > 0*/
1444 		if (RND_ENABLED(&sc->rnd_source))
1445 			rnd_add_uint32(&sc->rnd_source, icr);
1446 #endif
1447 
1448 		handled = 1;
1449 
1450 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1451 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1452 			DPRINTF(WM_DEBUG_RX,
1453 			    ("%s: RX: got Rx intr 0x%08x\n",
1454 			    sc->sc_dev.dv_xname,
1455 			    icr & (ICR_RXDMT0|ICR_RXT0)));
1456 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1457 		}
1458 #endif
1459 		wm_rxintr(sc);
1460 
1461 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1462 		if (icr & ICR_TXDW) {
1463 			DPRINTF(WM_DEBUG_TX,
1464 			    ("%s: TX: got TDXW interrupt\n",
1465 			    sc->sc_dev.dv_xname));
1466 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
1467 		}
1468 #endif
1469 		wm_txintr(sc);
1470 
1471 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1472 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1473 			wm_linkintr(sc, icr);
1474 		}
1475 
1476 		if (icr & ICR_RXO) {
1477 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1478 			wantinit = 1;
1479 		}
1480 	}
1481 
1482 	if (handled) {
1483 		if (wantinit)
1484 			wm_init(ifp);
1485 
1486 		/* Try to get more packets going. */
1487 		wm_start(ifp);
1488 	}
1489 
1490 	return (handled);
1491 }
1492 
1493 /*
1494  * wm_txintr:
1495  *
1496  *	Helper; handle transmit interrupts.
1497  */
1498 void
1499 wm_txintr(struct wm_softc *sc)
1500 {
1501 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1502 	struct wm_txsoft *txs;
1503 	uint8_t status;
1504 	int i;
1505 
1506 	ifp->if_flags &= ~IFF_OACTIVE;
1507 
1508 	/*
1509 	 * Go through the Tx list and free mbufs for those
1510 	 * frames which have been transmitted.
1511 	 */
1512 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1513 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1514 		txs = &sc->sc_txsoft[i];
1515 
1516 		DPRINTF(WM_DEBUG_TX,
1517 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1518 
1519 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1520 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1521 
1522 		status = le32toh(sc->sc_txdescs[
1523 		    txs->txs_lastdesc].wtx_fields.wtxu_bits);
1524 		if ((status & WTX_ST_DD) == 0) {
1525 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1526 			    BUS_DMASYNC_PREREAD);
1527 			break;
1528 		}
1529 
1530 		DPRINTF(WM_DEBUG_TX,
1531 		    ("%s: TX: job %d done: descs %d..%d\n",
1532 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1533 		    txs->txs_lastdesc));
1534 
1535 		/*
1536 		 * XXX We should probably be using the statistics
1537 		 * XXX registers, but I don't know if they exist
1538 		 * XXX on chips before the i82544.
1539 		 */
1540 
1541 #ifdef WM_EVENT_COUNTERS
1542 		if (status & WTX_ST_TU)
1543 			WM_EVCNT_INCR(&sc->sc_ev_tu);
1544 #endif /* WM_EVENT_COUNTERS */
1545 
1546 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
1547 			ifp->if_oerrors++;
1548 			if (status & WTX_ST_LC)
1549 				printf("%s: late collision\n",
1550 				    sc->sc_dev.dv_xname);
1551 			else if (status & WTX_ST_EC) {
1552 				ifp->if_collisions += 16;
1553 				printf("%s: excessive collisions\n",
1554 				    sc->sc_dev.dv_xname);
1555 			}
1556 		} else
1557 			ifp->if_opackets++;
1558 
1559 		sc->sc_txfree += txs->txs_ndesc;
1560 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1561 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1562 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1563 		m_freem(txs->txs_mbuf);
1564 		txs->txs_mbuf = NULL;
1565 	}
1566 
1567 	/* Update the dirty transmit buffer pointer. */
1568 	sc->sc_txsdirty = i;
1569 	DPRINTF(WM_DEBUG_TX,
1570 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1571 
1572 	/*
1573 	 * If there are no more pending transmissions, cancel the watchdog
1574 	 * timer.
1575 	 */
1576 	if (sc->sc_txsfree == WM_TXQUEUELEN)
1577 		ifp->if_timer = 0;
1578 }
1579 
1580 /*
1581  * wm_rxintr:
1582  *
1583  *	Helper; handle receive interrupts.
1584  */
1585 void
1586 wm_rxintr(struct wm_softc *sc)
1587 {
1588 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1589 	struct wm_rxsoft *rxs;
1590 	struct mbuf *m;
1591 	int i, len;
1592 	uint8_t status, errors;
1593 
1594 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1595 		rxs = &sc->sc_rxsoft[i];
1596 
1597 		DPRINTF(WM_DEBUG_RX,
1598 		    ("%s: RX: checking descriptor %d\n",
1599 		    sc->sc_dev.dv_xname, i));
1600 
1601 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1602 
1603 		status = sc->sc_rxdescs[i].wrx_status;
1604 		errors = sc->sc_rxdescs[i].wrx_errors;
1605 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
1606 
1607 		if ((status & WRX_ST_DD) == 0) {
1608 			/*
1609 			 * We have processed all of the receive descriptors.
1610 			 */
1611 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1612 			break;
1613 		}
1614 
1615 		if (__predict_false(sc->sc_rxdiscard)) {
1616 			DPRINTF(WM_DEBUG_RX,
1617 			    ("%s: RX: discarding contents of descriptor %d\n",
1618 			    sc->sc_dev.dv_xname, i));
1619 			WM_INIT_RXDESC(sc, i);
1620 			if (status & WRX_ST_EOP) {
1621 				/* Reset our state. */
1622 				DPRINTF(WM_DEBUG_RX,
1623 				    ("%s: RX: resetting rxdiscard -> 0\n",
1624 				    sc->sc_dev.dv_xname));
1625 				sc->sc_rxdiscard = 0;
1626 			}
1627 			continue;
1628 		}
1629 
1630 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1631 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1632 
1633 		m = rxs->rxs_mbuf;
1634 
1635 		/*
1636 		 * Add a new receive buffer to the ring.
1637 		 */
1638 		if (wm_add_rxbuf(sc, i) != 0) {
1639 			/*
1640 			 * Failed, throw away what we've done so
1641 			 * far, and discard the rest of the packet.
1642 			 */
1643 			ifp->if_ierrors++;
1644 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1645 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1646 			WM_INIT_RXDESC(sc, i);
1647 			if ((status & WRX_ST_EOP) == 0)
1648 				sc->sc_rxdiscard = 1;
1649 			if (sc->sc_rxhead != NULL)
1650 				m_freem(sc->sc_rxhead);
1651 			WM_RXCHAIN_RESET(sc);
1652 			DPRINTF(WM_DEBUG_RX,
1653 			    ("%s: RX: Rx buffer allocation failed, "
1654 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
1655 			    sc->sc_rxdiscard ? " (discard)" : ""));
1656 			continue;
1657 		}
1658 
1659 		WM_RXCHAIN_LINK(sc, m);
1660 
1661 		m->m_len = len;
1662 
1663 		DPRINTF(WM_DEBUG_RX,
1664 		    ("%s: RX: buffer at %p len %d\n",
1665 		    sc->sc_dev.dv_xname, m->m_data, len));
1666 
1667 		/*
1668 		 * If this is not the end of the packet, keep
1669 		 * looking.
1670 		 */
1671 		if ((status & WRX_ST_EOP) == 0) {
1672 			sc->sc_rxlen += len;
1673 			DPRINTF(WM_DEBUG_RX,
1674 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
1675 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
1676 			continue;
1677 		}
1678 
1679 		/*
1680 		 * Okay, we have the entire packet now...
1681 		 */
1682 		*sc->sc_rxtailp = NULL;
1683 		m = sc->sc_rxhead;
1684 		len += sc->sc_rxlen;
1685 
1686 		WM_RXCHAIN_RESET(sc);
1687 
1688 		DPRINTF(WM_DEBUG_RX,
1689 		    ("%s: RX: have entire packet, len -> %d\n",
1690 		    sc->sc_dev.dv_xname, len));
1691 
1692 		/*
1693 		 * If an error occurred, update stats and drop the packet.
1694 		 */
1695 		if (errors &
1696 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1697 			ifp->if_ierrors++;
1698 			if (errors & WRX_ER_SE)
1699 				printf("%s: symbol error\n",
1700 				    sc->sc_dev.dv_xname);
1701 			else if (errors & WRX_ER_SEQ)
1702 				printf("%s: receive sequence error\n",
1703 				    sc->sc_dev.dv_xname);
1704 			else if (errors & WRX_ER_CE)
1705 				printf("%s: CRC error\n",
1706 				    sc->sc_dev.dv_xname);
1707 			m_freem(m);
1708 			continue;
1709 		}
1710 
1711 		/*
1712 		 * No errors.  Receive the packet.
1713 		 *
1714 		 * Note, we have configured the chip to include the
1715 		 * CRC with every packet.
1716 		 */
1717 		m->m_flags |= M_HASFCS;
1718 		m->m_pkthdr.rcvif = ifp;
1719 		m->m_pkthdr.len = len;
1720 
1721 #if 0 /* XXXJRT */
1722 		/*
1723 		 * If VLANs are enabled, VLAN packets have been unwrapped
1724 		 * for us.  Associate the tag with the packet.
1725 		 */
1726 		if (sc->sc_ethercom.ec_nvlans != 0 &&
1727 		    (status & WRX_ST_VP) != 0) {
1728 			struct mbuf *vtag;
1729 
1730 			vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1731 			if (vtag == NULL) {
1732 				ifp->if_ierrors++;
1733 				printf("%s: unable to allocate VLAN tag\n",
1734 				    sc->sc_dev.dv_xname);
1735 				m_freem(m);
1736 				continue;
1737 			}
1738 
1739 			*mtod(m, int *) =
1740 			    le16toh(sc->sc_rxdescs[i].wrx_special);
1741 			vtag->m_len = sizeof(int);
1742 		}
1743 #endif /* XXXJRT */
1744 
1745 		/*
1746 		 * Set up checksum info for this packet.
1747 		 */
1748 		if (status & WRX_ST_IPCS) {
1749 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1750 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1751 			if (errors & WRX_ER_IPE)
1752 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1753 		}
1754 		if (status & WRX_ST_TCPCS) {
1755 			/*
1756 			 * Note: we don't know if this was TCP or UDP,
1757 			 * so we just set both bits, and expect the
1758 			 * upper layers to deal.
1759 			 */
1760 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1761 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1762 			if (errors & WRX_ER_TCPE)
1763 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1764 		}
1765 
1766 		ifp->if_ipackets++;
1767 
1768 #if NBPFILTER > 0
1769 		/* Pass this up to any BPF listeners. */
1770 		if (ifp->if_bpf)
1771 			bpf_mtap(ifp->if_bpf, m);
1772 #endif /* NBPFILTER > 0 */
1773 
1774 		/* Pass it on. */
1775 		(*ifp->if_input)(ifp, m);
1776 	}
1777 
1778 	/* Update the receive pointer. */
1779 	sc->sc_rxptr = i;
1780 
1781 	DPRINTF(WM_DEBUG_RX,
1782 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1783 }
1784 
1785 /*
1786  * wm_linkintr:
1787  *
1788  *	Helper; handle link interrupts.
1789  */
1790 void
1791 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1792 {
1793 	uint32_t status;
1794 
1795 	/*
1796 	 * If we get a link status interrupt on a 1000BASE-T
1797 	 * device, just fall into the normal MII tick path.
1798 	 */
1799 	if (sc->sc_flags & WM_F_HAS_MII) {
1800 		if (icr & ICR_LSC) {
1801 			DPRINTF(WM_DEBUG_LINK,
1802 			    ("%s: LINK: LSC -> mii_tick\n",
1803 			    sc->sc_dev.dv_xname));
1804 			mii_tick(&sc->sc_mii);
1805 		} else if (icr & ICR_RXSEQ) {
1806 			DPRINTF(WM_DEBUG_LINK,
1807 			    ("%s: LINK Receive sequence error\n",
1808 			    sc->sc_dev.dv_xname));
1809 		}
1810 		return;
1811 	}
1812 
1813 	/*
1814 	 * If we are now receiving /C/, check for link again in
1815 	 * a couple of link clock ticks.
1816 	 */
1817 	if (icr & ICR_RXCFG) {
1818 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1819 		    sc->sc_dev.dv_xname));
1820 		sc->sc_tbi_anstate = 2;
1821 	}
1822 
1823 	if (icr & ICR_LSC) {
1824 		status = CSR_READ(sc, WMREG_STATUS);
1825 		if (status & STATUS_LU) {
1826 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1827 			    sc->sc_dev.dv_xname,
1828 			    (status & STATUS_FD) ? "FDX" : "HDX"));
1829 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1830 			if (status & STATUS_FD)
1831 				sc->sc_tctl |=
1832 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1833 			else
1834 				sc->sc_tctl |=
1835 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1836 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1837 			sc->sc_tbi_linkup = 1;
1838 		} else {
1839 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1840 			    sc->sc_dev.dv_xname));
1841 			sc->sc_tbi_linkup = 0;
1842 		}
1843 		sc->sc_tbi_anstate = 2;
1844 		wm_tbi_set_linkled(sc);
1845 	} else if (icr & ICR_RXSEQ) {
1846 		DPRINTF(WM_DEBUG_LINK,
1847 		    ("%s: LINK: Receive sequence error\n",
1848 		    sc->sc_dev.dv_xname));
1849 	}
1850 }
1851 
1852 /*
1853  * wm_tick:
1854  *
1855  *	One second timer, used to check link status, sweep up
1856  *	completed transmit jobs, etc.
1857  */
1858 void
1859 wm_tick(void *arg)
1860 {
1861 	struct wm_softc *sc = arg;
1862 	int s;
1863 
1864 	s = splnet();
1865 
1866 	if (sc->sc_flags & WM_F_HAS_MII)
1867 		mii_tick(&sc->sc_mii);
1868 	else
1869 		wm_tbi_check_link(sc);
1870 
1871 	splx(s);
1872 
1873 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1874 }
1875 
1876 /*
1877  * wm_reset:
1878  *
1879  *	Reset the i82542 chip.
1880  */
1881 void
1882 wm_reset(struct wm_softc *sc)
1883 {
1884 	int i;
1885 
1886 	CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1887 	delay(10000);
1888 
1889 	for (i = 0; i < 1000; i++) {
1890 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1891 			return;
1892 		delay(20);
1893 	}
1894 
1895 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1896 		printf("%s: WARNING: reset failed to complete\n",
1897 		    sc->sc_dev.dv_xname);
1898 }
1899 
1900 /*
1901  * wm_init:		[ifnet interface function]
1902  *
1903  *	Initialize the interface.  Must be called at splnet().
1904  */
1905 int
1906 wm_init(struct ifnet *ifp)
1907 {
1908 	struct wm_softc *sc = ifp->if_softc;
1909 	struct wm_rxsoft *rxs;
1910 	int i, error = 0;
1911 	uint32_t reg;
1912 
1913 	/* Cancel any pending I/O. */
1914 	wm_stop(ifp, 0);
1915 
1916 	/* Reset the chip to a known state. */
1917 	wm_reset(sc);
1918 
1919 	/* Initialize the transmit descriptor ring. */
1920 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1921 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1922 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1923 	sc->sc_txfree = WM_NTXDESC;
1924 	sc->sc_txnext = 0;
1925 
1926 	sc->sc_txctx_ipcs = 0xffffffff;
1927 	sc->sc_txctx_tucs = 0xffffffff;
1928 
1929 	if (sc->sc_type < WM_T_82543) {
1930 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1931 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1932 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1933 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1934 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1935 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1936 	} else {
1937 		CSR_WRITE(sc, WMREG_TBDAH, 0);
1938 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1939 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1940 		CSR_WRITE(sc, WMREG_TDH, 0);
1941 		CSR_WRITE(sc, WMREG_TDT, 0);
1942 		CSR_WRITE(sc, WMREG_TIDV, 128);
1943 
1944 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1945 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1946 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1947 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1948 	}
1949 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1950 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1951 
1952 	/* Initialize the transmit job descriptors. */
1953 	for (i = 0; i < WM_TXQUEUELEN; i++)
1954 		sc->sc_txsoft[i].txs_mbuf = NULL;
1955 	sc->sc_txsfree = WM_TXQUEUELEN;
1956 	sc->sc_txsnext = 0;
1957 	sc->sc_txsdirty = 0;
1958 
1959 	/*
1960 	 * Initialize the receive descriptor and receive job
1961 	 * descriptor rings.
1962 	 */
1963 	if (sc->sc_type < WM_T_82543) {
1964 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1965 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1966 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1967 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1968 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1969 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1970 
1971 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1972 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1973 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1974 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1975 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1976 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1977 	} else {
1978 		CSR_WRITE(sc, WMREG_RDBAH, 0);
1979 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1980 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1981 		CSR_WRITE(sc, WMREG_RDH, 0);
1982 		CSR_WRITE(sc, WMREG_RDT, 0);
1983 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1984 	}
1985 	for (i = 0; i < WM_NRXDESC; i++) {
1986 		rxs = &sc->sc_rxsoft[i];
1987 		if (rxs->rxs_mbuf == NULL) {
1988 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
1989 				printf("%s: unable to allocate or map rx "
1990 				    "buffer %d, error = %d\n",
1991 				    sc->sc_dev.dv_xname, i, error);
1992 				/*
1993 				 * XXX Should attempt to run with fewer receive
1994 				 * XXX buffers instead of just failing.
1995 				 */
1996 				wm_rxdrain(sc);
1997 				goto out;
1998 			}
1999 		} else
2000 			WM_INIT_RXDESC(sc, i);
2001 	}
2002 	sc->sc_rxptr = 0;
2003 	sc->sc_rxdiscard = 0;
2004 	WM_RXCHAIN_RESET(sc);
2005 
2006 	/*
2007 	 * Clear out the VLAN table -- we don't use it (yet).
2008 	 */
2009 	CSR_WRITE(sc, WMREG_VET, 0);
2010 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
2011 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2012 
2013 	/*
2014 	 * Set up flow-control parameters.
2015 	 *
2016 	 * XXX Values could probably stand some tuning.
2017 	 */
2018 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2019 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2020 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2021 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2022 
2023 		if (sc->sc_type < WM_T_82543) {
2024 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2025 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2026 		} else {
2027 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2028 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2029 		}
2030 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2031 	}
2032 
2033 #if 0 /* XXXJRT */
2034 	/* Deal with VLAN enables. */
2035 	if (sc->sc_ethercom.ec_nvlans != 0)
2036 		sc->sc_ctrl |= CTRL_VME;
2037 	else
2038 #endif /* XXXJRT */
2039 		sc->sc_ctrl &= ~CTRL_VME;
2040 
2041 	/* Write the control registers. */
2042 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2043 #if 0
2044 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2045 #endif
2046 
2047 	/*
2048 	 * Set up checksum offload parameters.
2049 	 */
2050 	reg = CSR_READ(sc, WMREG_RXCSUM);
2051 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2052 		reg |= RXCSUM_IPOFL;
2053 	else
2054 		reg &= ~RXCSUM_IPOFL;
2055 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2056 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2057 	else {
2058 		reg &= ~RXCSUM_TUOFL;
2059 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2060 			reg &= ~RXCSUM_IPOFL;
2061 	}
2062 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
2063 
2064 	/*
2065 	 * Set up the interrupt registers.
2066 	 */
2067 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2068 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2069 	    ICR_RXO | ICR_RXT0;
2070 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2071 		sc->sc_icr |= ICR_RXCFG;
2072 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2073 
2074 	/* Set up the inter-packet gap. */
2075 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2076 
2077 #if 0 /* XXXJRT */
2078 	/* Set the VLAN ethernetype. */
2079 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2080 #endif
2081 
2082 	/*
2083 	 * Set up the transmit control register; we start out with
2084 	 * a collision distance suitable for FDX, but update it whe
2085 	 * we resolve the media type.
2086 	 */
2087 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2088 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2089 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2090 
2091 	/* Set the media. */
2092 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2093 
2094 	/*
2095 	 * Set up the receive control register; we actually program
2096 	 * the register when we set the receive filter.  Use multicast
2097 	 * address offset type 0.
2098 	 *
2099 	 * Only the i82544 has the ability to strip the incoming
2100 	 * CRC, so we don't enable that feature.
2101 	 */
2102 	sc->sc_mchash_type = 0;
2103 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2104 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2105 
2106 	/* Set the receive filter. */
2107 	wm_set_filter(sc);
2108 
2109 	/* Start the one second link check clock. */
2110 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2111 
2112 	/* ...all done! */
2113 	ifp->if_flags |= IFF_RUNNING;
2114 	ifp->if_flags &= ~IFF_OACTIVE;
2115 
2116  out:
2117 	if (error)
2118 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2119 	return (error);
2120 }
2121 
2122 /*
2123  * wm_rxdrain:
2124  *
2125  *	Drain the receive queue.
2126  */
2127 void
2128 wm_rxdrain(struct wm_softc *sc)
2129 {
2130 	struct wm_rxsoft *rxs;
2131 	int i;
2132 
2133 	for (i = 0; i < WM_NRXDESC; i++) {
2134 		rxs = &sc->sc_rxsoft[i];
2135 		if (rxs->rxs_mbuf != NULL) {
2136 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2137 			m_freem(rxs->rxs_mbuf);
2138 			rxs->rxs_mbuf = NULL;
2139 		}
2140 	}
2141 }
2142 
2143 /*
2144  * wm_stop:		[ifnet interface function]
2145  *
2146  *	Stop transmission on the interface.
2147  */
2148 void
2149 wm_stop(struct ifnet *ifp, int disable)
2150 {
2151 	struct wm_softc *sc = ifp->if_softc;
2152 	struct wm_txsoft *txs;
2153 	int i;
2154 
2155 	/* Stop the one second clock. */
2156 	callout_stop(&sc->sc_tick_ch);
2157 
2158 	if (sc->sc_flags & WM_F_HAS_MII) {
2159 		/* Down the MII. */
2160 		mii_down(&sc->sc_mii);
2161 	}
2162 
2163 	/* Stop the transmit and receive processes. */
2164 	CSR_WRITE(sc, WMREG_TCTL, 0);
2165 	CSR_WRITE(sc, WMREG_RCTL, 0);
2166 
2167 	/* Release any queued transmit buffers. */
2168 	for (i = 0; i < WM_TXQUEUELEN; i++) {
2169 		txs = &sc->sc_txsoft[i];
2170 		if (txs->txs_mbuf != NULL) {
2171 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2172 			m_freem(txs->txs_mbuf);
2173 			txs->txs_mbuf = NULL;
2174 		}
2175 	}
2176 
2177 	if (disable)
2178 		wm_rxdrain(sc);
2179 
2180 	/* Mark the interface as down and cancel the watchdog timer. */
2181 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2182 	ifp->if_timer = 0;
2183 }
2184 
2185 /*
2186  * wm_read_eeprom:
2187  *
2188  *	Read data from the serial EEPROM.
2189  */
2190 void
2191 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2192 {
2193 	uint32_t reg;
2194 	int i, x, addrbits = 6;
2195 
2196 	for (i = 0; i < wordcnt; i++) {
2197 		if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2198 			reg = CSR_READ(sc, WMREG_EECD);
2199 
2200 			/* Get number of address bits. */
2201 			if (reg & EECD_EE_SIZE)
2202 				addrbits = 8;
2203 
2204 			/* Request EEPROM access. */
2205 			reg |= EECD_EE_REQ;
2206 			CSR_WRITE(sc, WMREG_EECD, reg);
2207 
2208 			/* ..and wait for it to be granted. */
2209 			for (x = 0; x < 100; x++) {
2210 				reg = CSR_READ(sc, WMREG_EECD);
2211 				if (reg & EECD_EE_GNT)
2212 					break;
2213 				delay(5);
2214 			}
2215 			if ((reg & EECD_EE_GNT) == 0) {
2216 				printf("%s: could not acquire EEPROM GNT\n",
2217 				    sc->sc_dev.dv_xname);
2218 				*data = 0xffff;
2219 				reg &= ~EECD_EE_REQ;
2220 				CSR_WRITE(sc, WMREG_EECD, reg);
2221 				continue;
2222 			}
2223 		} else
2224 			reg = 0;
2225 
2226 		/* Clear SK and DI. */
2227 		reg &= ~(EECD_SK | EECD_DI);
2228 		CSR_WRITE(sc, WMREG_EECD, reg);
2229 
2230 		/* Set CHIP SELECT. */
2231 		reg |= EECD_CS;
2232 		CSR_WRITE(sc, WMREG_EECD, reg);
2233 		delay(2);
2234 
2235 		/* Shift in the READ command. */
2236 		for (x = 3; x > 0; x--) {
2237 			if (UWIRE_OPC_READ & (1 << (x - 1)))
2238 				reg |= EECD_DI;
2239 			else
2240 				reg &= ~EECD_DI;
2241 			CSR_WRITE(sc, WMREG_EECD, reg);
2242 			delay(2);
2243 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2244 			delay(2);
2245 			CSR_WRITE(sc, WMREG_EECD, reg);
2246 			delay(2);
2247 		}
2248 
2249 		/* Shift in address. */
2250 		for (x = addrbits; x > 0; x--) {
2251 			if ((word + i) & (1 << (x - 1)))
2252 				reg |= EECD_DI;
2253 			else
2254 				reg &= ~EECD_DI;
2255 			CSR_WRITE(sc, WMREG_EECD, reg);
2256 			delay(2);
2257 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2258 			delay(2);
2259 			CSR_WRITE(sc, WMREG_EECD, reg);
2260 			delay(2);
2261 		}
2262 
2263 		/* Shift out the data. */
2264 		reg &= ~EECD_DI;
2265 		data[i] = 0;
2266 		for (x = 16; x > 0; x--) {
2267 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2268 			delay(2);
2269 			if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2270 				data[i] |= (1 << (x - 1));
2271 			CSR_WRITE(sc, WMREG_EECD, reg);
2272 			delay(2);
2273 		}
2274 
2275 		/* Clear CHIP SELECT. */
2276 		reg &= ~EECD_CS;
2277 		CSR_WRITE(sc, WMREG_EECD, reg);
2278 		delay(2);
2279 
2280 		if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2281 			/* Release the EEPROM. */
2282 			reg &= ~EECD_EE_REQ;
2283 			CSR_WRITE(sc, WMREG_EECD, reg);
2284 		}
2285 	}
2286 }
2287 
2288 /*
2289  * wm_add_rxbuf:
2290  *
2291  *	Add a receive buffer to the indiciated descriptor.
2292  */
2293 int
2294 wm_add_rxbuf(struct wm_softc *sc, int idx)
2295 {
2296 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2297 	struct mbuf *m;
2298 	int error;
2299 
2300 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2301 	if (m == NULL)
2302 		return (ENOBUFS);
2303 
2304 	MCLGET(m, M_DONTWAIT);
2305 	if ((m->m_flags & M_EXT) == 0) {
2306 		m_freem(m);
2307 		return (ENOBUFS);
2308 	}
2309 
2310 	if (rxs->rxs_mbuf != NULL)
2311 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2312 
2313 	rxs->rxs_mbuf = m;
2314 
2315 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2316 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2317 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
2318 	if (error) {
2319 		printf("%s: unable to load rx DMA map %d, error = %d\n",
2320 		    sc->sc_dev.dv_xname, idx, error);
2321 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
2322 	}
2323 
2324 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2325 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2326 
2327 	WM_INIT_RXDESC(sc, idx);
2328 
2329 	return (0);
2330 }
2331 
2332 /*
2333  * wm_set_ral:
2334  *
2335  *	Set an entery in the receive address list.
2336  */
2337 static void
2338 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2339 {
2340 	uint32_t ral_lo, ral_hi;
2341 
2342 	if (enaddr != NULL) {
2343 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2344 		    (enaddr[3] << 24);
2345 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2346 		ral_hi |= RAL_AV;
2347 	} else {
2348 		ral_lo = 0;
2349 		ral_hi = 0;
2350 	}
2351 
2352 	if (sc->sc_type >= WM_T_82544) {
2353 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2354 		    ral_lo);
2355 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2356 		    ral_hi);
2357 	} else {
2358 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2359 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2360 	}
2361 }
2362 
2363 /*
2364  * wm_mchash:
2365  *
2366  *	Compute the hash of the multicast address for the 4096-bit
2367  *	multicast filter.
2368  */
2369 static uint32_t
2370 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2371 {
2372 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2373 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2374 	uint32_t hash;
2375 
2376 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2377 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2378 
2379 	return (hash & 0xfff);
2380 }
2381 
2382 /*
2383  * wm_set_filter:
2384  *
2385  *	Set up the receive filter.
2386  */
2387 void
2388 wm_set_filter(struct wm_softc *sc)
2389 {
2390 	struct ethercom *ec = &sc->sc_ethercom;
2391 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2392 	struct ether_multi *enm;
2393 	struct ether_multistep step;
2394 	bus_addr_t mta_reg;
2395 	uint32_t hash, reg, bit;
2396 	int i;
2397 
2398 	if (sc->sc_type >= WM_T_82544)
2399 		mta_reg = WMREG_CORDOVA_MTA;
2400 	else
2401 		mta_reg = WMREG_MTA;
2402 
2403 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2404 
2405 	if (ifp->if_flags & IFF_BROADCAST)
2406 		sc->sc_rctl |= RCTL_BAM;
2407 	if (ifp->if_flags & IFF_PROMISC) {
2408 		sc->sc_rctl |= RCTL_UPE;
2409 		goto allmulti;
2410 	}
2411 
2412 	/*
2413 	 * Set the station address in the first RAL slot, and
2414 	 * clear the remaining slots.
2415 	 */
2416 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2417 	for (i = 1; i < WM_RAL_TABSIZE; i++)
2418 		wm_set_ral(sc, NULL, i);
2419 
2420 	/* Clear out the multicast table. */
2421 	for (i = 0; i < WM_MC_TABSIZE; i++)
2422 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
2423 
2424 	ETHER_FIRST_MULTI(step, ec, enm);
2425 	while (enm != NULL) {
2426 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2427 			/*
2428 			 * We must listen to a range of multicast addresses.
2429 			 * For now, just accept all multicasts, rather than
2430 			 * trying to set only those filter bits needed to match
2431 			 * the range.  (At this time, the only use of address
2432 			 * ranges is for IP multicast routing, for which the
2433 			 * range is big enough to require all bits set.)
2434 			 */
2435 			goto allmulti;
2436 		}
2437 
2438 		hash = wm_mchash(sc, enm->enm_addrlo);
2439 
2440 		reg = (hash >> 5) & 0x7f;
2441 		bit = hash & 0x1f;
2442 
2443 		hash = CSR_READ(sc, mta_reg + (reg << 2));
2444 		hash |= 1U << bit;
2445 
2446 		/* XXX Hardware bug?? */
2447 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2448 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2449 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2450 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2451 		} else
2452 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2453 
2454 		ETHER_NEXT_MULTI(step, enm);
2455 	}
2456 
2457 	ifp->if_flags &= ~IFF_ALLMULTI;
2458 	goto setit;
2459 
2460  allmulti:
2461 	ifp->if_flags |= IFF_ALLMULTI;
2462 	sc->sc_rctl |= RCTL_MPE;
2463 
2464  setit:
2465 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2466 }
2467 
2468 /*
2469  * wm_tbi_mediainit:
2470  *
2471  *	Initialize media for use on 1000BASE-X devices.
2472  */
2473 void
2474 wm_tbi_mediainit(struct wm_softc *sc)
2475 {
2476 	const char *sep = "";
2477 
2478 	if (sc->sc_type < WM_T_82543)
2479 		sc->sc_tipg = TIPG_WM_DFLT;
2480 	else
2481 		sc->sc_tipg = TIPG_LG_DFLT;
2482 
2483 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2484 	    wm_tbi_mediastatus);
2485 
2486 	/*
2487 	 * SWD Pins:
2488 	 *
2489 	 *	0 = Link LED (output)
2490 	 *	1 = Loss Of Signal (input)
2491 	 */
2492 	sc->sc_ctrl |= CTRL_SWDPIO(0);
2493 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2494 
2495 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2496 
2497 #define	ADD(ss, mm, dd)							\
2498 do {									\
2499 	printf("%s%s", sep, ss);					\
2500 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
2501 	sep = ", ";							\
2502 } while (/*CONSTCOND*/0)
2503 
2504 	printf("%s: ", sc->sc_dev.dv_xname);
2505 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2506 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2507 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2508 	printf("\n");
2509 
2510 #undef ADD
2511 
2512 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2513 }
2514 
2515 /*
2516  * wm_tbi_mediastatus:	[ifmedia interface function]
2517  *
2518  *	Get the current interface media status on a 1000BASE-X device.
2519  */
2520 void
2521 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2522 {
2523 	struct wm_softc *sc = ifp->if_softc;
2524 
2525 	ifmr->ifm_status = IFM_AVALID;
2526 	ifmr->ifm_active = IFM_ETHER;
2527 
2528 	if (sc->sc_tbi_linkup == 0) {
2529 		ifmr->ifm_active |= IFM_NONE;
2530 		return;
2531 	}
2532 
2533 	ifmr->ifm_status |= IFM_ACTIVE;
2534 	ifmr->ifm_active |= IFM_1000_SX;
2535 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2536 		ifmr->ifm_active |= IFM_FDX;
2537 }
2538 
2539 /*
2540  * wm_tbi_mediachange:	[ifmedia interface function]
2541  *
2542  *	Set hardware to newly-selected media on a 1000BASE-X device.
2543  */
2544 int
2545 wm_tbi_mediachange(struct ifnet *ifp)
2546 {
2547 	struct wm_softc *sc = ifp->if_softc;
2548 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2549 	uint32_t status;
2550 	int i;
2551 
2552 	sc->sc_txcw = ife->ifm_data;
2553 	if (sc->sc_ctrl & CTRL_RFCE)
2554 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2555 	if (sc->sc_ctrl & CTRL_TFCE)
2556 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2557 	sc->sc_txcw |= TXCW_ANE;
2558 
2559 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2560 	delay(10000);
2561 
2562 	sc->sc_tbi_anstate = 0;
2563 
2564 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2565 		/* Have signal; wait for the link to come up. */
2566 		for (i = 0; i < 50; i++) {
2567 			delay(10000);
2568 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2569 				break;
2570 		}
2571 
2572 		status = CSR_READ(sc, WMREG_STATUS);
2573 		if (status & STATUS_LU) {
2574 			/* Link is up. */
2575 			DPRINTF(WM_DEBUG_LINK,
2576 			    ("%s: LINK: set media -> link up %s\n",
2577 			    sc->sc_dev.dv_xname,
2578 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2579 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2580 			if (status & STATUS_FD)
2581 				sc->sc_tctl |=
2582 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2583 			else
2584 				sc->sc_tctl |=
2585 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2586 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2587 			sc->sc_tbi_linkup = 1;
2588 		} else {
2589 			/* Link is down. */
2590 			DPRINTF(WM_DEBUG_LINK,
2591 			    ("%s: LINK: set media -> link down\n",
2592 			    sc->sc_dev.dv_xname));
2593 			sc->sc_tbi_linkup = 0;
2594 		}
2595 	} else {
2596 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2597 		    sc->sc_dev.dv_xname));
2598 		sc->sc_tbi_linkup = 0;
2599 	}
2600 
2601 	wm_tbi_set_linkled(sc);
2602 
2603 	return (0);
2604 }
2605 
2606 /*
2607  * wm_tbi_set_linkled:
2608  *
2609  *	Update the link LED on 1000BASE-X devices.
2610  */
2611 void
2612 wm_tbi_set_linkled(struct wm_softc *sc)
2613 {
2614 
2615 	if (sc->sc_tbi_linkup)
2616 		sc->sc_ctrl |= CTRL_SWDPIN(0);
2617 	else
2618 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2619 
2620 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2621 }
2622 
2623 /*
2624  * wm_tbi_check_link:
2625  *
2626  *	Check the link on 1000BASE-X devices.
2627  */
2628 void
2629 wm_tbi_check_link(struct wm_softc *sc)
2630 {
2631 	uint32_t rxcw, ctrl, status;
2632 
2633 	if (sc->sc_tbi_anstate == 0)
2634 		return;
2635 	else if (sc->sc_tbi_anstate > 1) {
2636 		DPRINTF(WM_DEBUG_LINK,
2637 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2638 		    sc->sc_tbi_anstate));
2639 		sc->sc_tbi_anstate--;
2640 		return;
2641 	}
2642 
2643 	sc->sc_tbi_anstate = 0;
2644 
2645 	rxcw = CSR_READ(sc, WMREG_RXCW);
2646 	ctrl = CSR_READ(sc, WMREG_CTRL);
2647 	status = CSR_READ(sc, WMREG_STATUS);
2648 
2649 	if ((status & STATUS_LU) == 0) {
2650 		DPRINTF(WM_DEBUG_LINK,
2651 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2652 		sc->sc_tbi_linkup = 0;
2653 	} else {
2654 		DPRINTF(WM_DEBUG_LINK,
2655 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2656 		    (status & STATUS_FD) ? "FDX" : "HDX"));
2657 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2658 		if (status & STATUS_FD)
2659 			sc->sc_tctl |=
2660 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2661 		else
2662 			sc->sc_tctl |=
2663 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2664 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2665 		sc->sc_tbi_linkup = 1;
2666 	}
2667 
2668 	wm_tbi_set_linkled(sc);
2669 }
2670 
2671 /*
2672  * wm_gmii_reset:
2673  *
2674  *	Reset the PHY.
2675  */
2676 void
2677 wm_gmii_reset(struct wm_softc *sc)
2678 {
2679 	uint32_t reg;
2680 
2681 	if (sc->sc_type >= WM_T_82544) {
2682 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2683 		delay(20000);
2684 
2685 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2686 		delay(20000);
2687 	} else {
2688 		/* The PHY reset pin is active-low. */
2689 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2690 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2691 		    CTRL_EXT_SWDPIN(4));
2692 		reg |= CTRL_EXT_SWDPIO(4);
2693 
2694 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2695 		delay(10);
2696 
2697 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2698 		delay(10);
2699 
2700 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2701 		delay(10);
2702 #if 0
2703 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2704 #endif
2705 	}
2706 }
2707 
2708 /*
2709  * wm_gmii_mediainit:
2710  *
2711  *	Initialize media for use on 1000BASE-T devices.
2712  */
2713 void
2714 wm_gmii_mediainit(struct wm_softc *sc)
2715 {
2716 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2717 
2718 	/* We have MII. */
2719 	sc->sc_flags |= WM_F_HAS_MII;
2720 
2721 	sc->sc_tipg = TIPG_1000T_DFLT;
2722 
2723 	/*
2724 	 * Let the chip set speed/duplex on its own based on
2725 	 * signals from the PHY.
2726 	 */
2727 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2728 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2729 
2730 	/* Initialize our media structures and probe the GMII. */
2731 	sc->sc_mii.mii_ifp = ifp;
2732 
2733 	if (sc->sc_type >= WM_T_82544) {
2734 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2735 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2736 	} else {
2737 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2738 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2739 	}
2740 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
2741 
2742 	wm_gmii_reset(sc);
2743 
2744 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2745 	    wm_gmii_mediastatus);
2746 
2747 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2748 	    MII_OFFSET_ANY, 0);
2749 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2750 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2751 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2752 	} else
2753 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2754 }
2755 
2756 /*
2757  * wm_gmii_mediastatus:	[ifmedia interface function]
2758  *
2759  *	Get the current interface media status on a 1000BASE-T device.
2760  */
2761 void
2762 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2763 {
2764 	struct wm_softc *sc = ifp->if_softc;
2765 
2766 	mii_pollstat(&sc->sc_mii);
2767 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
2768 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
2769 }
2770 
2771 /*
2772  * wm_gmii_mediachange:	[ifmedia interface function]
2773  *
2774  *	Set hardware to newly-selected media on a 1000BASE-T device.
2775  */
2776 int
2777 wm_gmii_mediachange(struct ifnet *ifp)
2778 {
2779 	struct wm_softc *sc = ifp->if_softc;
2780 
2781 	if (ifp->if_flags & IFF_UP)
2782 		mii_mediachg(&sc->sc_mii);
2783 	return (0);
2784 }
2785 
2786 #define	MDI_IO		CTRL_SWDPIN(2)
2787 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
2788 #define	MDI_CLK		CTRL_SWDPIN(3)
2789 
2790 static void
2791 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2792 {
2793 	uint32_t i, v;
2794 
2795 	v = CSR_READ(sc, WMREG_CTRL);
2796 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2797 	v |= MDI_DIR | CTRL_SWDPIO(3);
2798 
2799 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2800 		if (data & i)
2801 			v |= MDI_IO;
2802 		else
2803 			v &= ~MDI_IO;
2804 		CSR_WRITE(sc, WMREG_CTRL, v);
2805 		delay(10);
2806 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2807 		delay(10);
2808 		CSR_WRITE(sc, WMREG_CTRL, v);
2809 		delay(10);
2810 	}
2811 }
2812 
2813 static uint32_t
2814 i82543_mii_recvbits(struct wm_softc *sc)
2815 {
2816 	uint32_t v, i, data = 0;
2817 
2818 	v = CSR_READ(sc, WMREG_CTRL);
2819 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2820 	v |= CTRL_SWDPIO(3);
2821 
2822 	CSR_WRITE(sc, WMREG_CTRL, v);
2823 	delay(10);
2824 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2825 	delay(10);
2826 	CSR_WRITE(sc, WMREG_CTRL, v);
2827 	delay(10);
2828 
2829 	for (i = 0; i < 16; i++) {
2830 		data <<= 1;
2831 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2832 		delay(10);
2833 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2834 			data |= 1;
2835 		CSR_WRITE(sc, WMREG_CTRL, v);
2836 		delay(10);
2837 	}
2838 
2839 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2840 	delay(10);
2841 	CSR_WRITE(sc, WMREG_CTRL, v);
2842 	delay(10);
2843 
2844 	return (data);
2845 }
2846 
2847 #undef MDI_IO
2848 #undef MDI_DIR
2849 #undef MDI_CLK
2850 
2851 /*
2852  * wm_gmii_i82543_readreg:	[mii interface function]
2853  *
2854  *	Read a PHY register on the GMII (i82543 version).
2855  */
2856 int
2857 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2858 {
2859 	struct wm_softc *sc = (void *) self;
2860 	int rv;
2861 
2862 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
2863 	i82543_mii_sendbits(sc, reg | (phy << 5) |
2864 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2865 	rv = i82543_mii_recvbits(sc) & 0xffff;
2866 
2867 	DPRINTF(WM_DEBUG_GMII,
2868 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2869 	    sc->sc_dev.dv_xname, phy, reg, rv));
2870 
2871 	return (rv);
2872 }
2873 
2874 /*
2875  * wm_gmii_i82543_writereg:	[mii interface function]
2876  *
2877  *	Write a PHY register on the GMII (i82543 version).
2878  */
2879 void
2880 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2881 {
2882 	struct wm_softc *sc = (void *) self;
2883 
2884 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
2885 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2886 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2887 	    (MII_COMMAND_START << 30), 32);
2888 }
2889 
2890 /*
2891  * wm_gmii_i82544_readreg:	[mii interface function]
2892  *
2893  *	Read a PHY register on the GMII.
2894  */
2895 int
2896 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2897 {
2898 	struct wm_softc *sc = (void *) self;
2899 	uint32_t mdic;
2900 	int i, rv;
2901 
2902 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2903 	    MDIC_REGADD(reg));
2904 
2905 	for (i = 0; i < 100; i++) {
2906 		mdic = CSR_READ(sc, WMREG_MDIC);
2907 		if (mdic & MDIC_READY)
2908 			break;
2909 		delay(10);
2910 	}
2911 
2912 	if ((mdic & MDIC_READY) == 0) {
2913 		printf("%s: MDIC read timed out: phy %d reg %d\n",
2914 		    sc->sc_dev.dv_xname, phy, reg);
2915 		rv = 0;
2916 	} else if (mdic & MDIC_E) {
2917 #if 0 /* This is normal if no PHY is present. */
2918 		printf("%s: MDIC read error: phy %d reg %d\n",
2919 		    sc->sc_dev.dv_xname, phy, reg);
2920 #endif
2921 		rv = 0;
2922 	} else {
2923 		rv = MDIC_DATA(mdic);
2924 		if (rv == 0xffff)
2925 			rv = 0;
2926 	}
2927 
2928 	return (rv);
2929 }
2930 
2931 /*
2932  * wm_gmii_i82544_writereg:	[mii interface function]
2933  *
2934  *	Write a PHY register on the GMII.
2935  */
2936 void
2937 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2938 {
2939 	struct wm_softc *sc = (void *) self;
2940 	uint32_t mdic;
2941 	int i;
2942 
2943 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2944 	    MDIC_REGADD(reg) | MDIC_DATA(val));
2945 
2946 	for (i = 0; i < 100; i++) {
2947 		mdic = CSR_READ(sc, WMREG_MDIC);
2948 		if (mdic & MDIC_READY)
2949 			break;
2950 		delay(10);
2951 	}
2952 
2953 	if ((mdic & MDIC_READY) == 0)
2954 		printf("%s: MDIC write timed out: phy %d reg %d\n",
2955 		    sc->sc_dev.dv_xname, phy, reg);
2956 	else if (mdic & MDIC_E)
2957 		printf("%s: MDIC write error: phy %d reg %d\n",
2958 		    sc->sc_dev.dv_xname, phy, reg);
2959 }
2960 
2961 /*
2962  * wm_gmii_statchg:	[mii interface function]
2963  *
2964  *	Callback from MII layer when media changes.
2965  */
2966 void
2967 wm_gmii_statchg(struct device *self)
2968 {
2969 	struct wm_softc *sc = (void *) self;
2970 
2971 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2972 
2973 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2974 		DPRINTF(WM_DEBUG_LINK,
2975 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2976 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2977 	} else  {
2978 		DPRINTF(WM_DEBUG_LINK,
2979 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2980 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2981 	}
2982 
2983 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2984 }
2985