xref: /netbsd/sys/dev/pci/if_wm.c (revision cd475a12)
1 /*	$NetBSD: if_wm.c,v 1.782 2023/06/23 05:36:28 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.782 2023/06/23 05:36:28 msaitoh Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90 
91 #include <sys/param.h>
92 
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <net/rss_config.h>
120 
121 #include <netinet/in.h>			/* XXX for struct ip */
122 #include <netinet/in_systm.h>		/* XXX for struct ip */
123 #include <netinet/ip.h>			/* XXX for struct ip */
124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
126 
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142 
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146 
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149 
150 #ifdef WM_DEBUG
151 #define	WM_DEBUG_LINK		__BIT(0)
152 #define	WM_DEBUG_TX		__BIT(1)
153 #define	WM_DEBUG_RX		__BIT(2)
154 #define	WM_DEBUG_GMII		__BIT(3)
155 #define	WM_DEBUG_MANAGE		__BIT(4)
156 #define	WM_DEBUG_NVM		__BIT(5)
157 #define	WM_DEBUG_INIT		__BIT(6)
158 #define	WM_DEBUG_LOCK		__BIT(7)
159 
160 #if 0
161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
163 	WM_DEBUG_LOCK
164 #endif
165 
166 #define	DPRINTF(sc, x, y)			  \
167 	do {					  \
168 		if ((sc)->sc_debug & (x))	  \
169 			printf y;		  \
170 	} while (0)
171 #else
172 #define	DPRINTF(sc, x, y)	__nothing
173 #endif /* WM_DEBUG */
174 
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176 
177 /*
178  * This device driver's max interrupt numbers.
179  */
180 #define WM_MAX_NQUEUEINTR	16
181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
182 
183 #ifndef WM_DISABLE_MSI
184 #define	WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define	WM_DISABLE_MSIX 0
188 #endif
189 
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192 
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197 
198 /*
199  * Transmit descriptor list size.  Due to errata, we can only have
200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
201  * on >= 82544. We tell the upper layers that they can queue a lot
202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203  * of them at a time.
204  *
205  * We allow up to 64 DMA segments per packet.  Pathological packet
206  * chains containing many small mbufs have been observed in zero-copy
207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208  * m_defrag() is called to reduce it.
209  */
210 #define	WM_NTXSEGS		64
211 #define	WM_IFQUEUELEN		256
212 #define	WM_TXQUEUELEN_MAX	64
213 #define	WM_TXQUEUELEN_MAX_82547	16
214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
217 #define	WM_NTXDESC_82542	256
218 #define	WM_NTXDESC_82544	4096
219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224 
225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
226 
227 #define	WM_TXINTERQSIZE		256
228 
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
234 #endif
235 
236 /*
237  * Receive descriptor list size.  We have one Rx buffer for normal
238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
239  * packet.  We allocate 256 receive descriptors, each with a 2k
240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241  */
242 #define	WM_NRXDESC		256U
243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
246 
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
252 #endif
253 
254 typedef union txdescs {
255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258 
259 typedef union rxdescs {
260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264 
265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
267 
268 /*
269  * Software state for transmit jobs.
270  */
271 struct wm_txsoft {
272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
274 	int txs_firstdesc;		/* first descriptor in packet */
275 	int txs_lastdesc;		/* last descriptor in packet */
276 	int txs_ndesc;			/* # of descriptors used */
277 };
278 
279 /*
280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
282  * them together.
283  */
284 struct wm_rxsoft {
285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
287 };
288 
289 #define WM_LINKUP_TIMEOUT	50
290 
291 static uint16_t swfwphysem[] = {
292 	SWFW_PHY0_SM,
293 	SWFW_PHY1_SM,
294 	SWFW_PHY2_SM,
295 	SWFW_PHY3_SM
296 };
297 
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301 
302 struct wm_softc;
303 
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309 
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 	struct evcnt qname##_ev_##evname
314 
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
316 	do {								\
317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
319 		    "%s%02d%s", #qname, (qnum), #evname);		\
320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
321 		    (evtype), NULL, (xname),				\
322 		    (q)->qname##_##evname##_evcnt_name);		\
323 	} while (0)
324 
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327 
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330 
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
332 	evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334 
335 struct wm_txqueue {
336 	kmutex_t *txq_lock;		/* lock for tx operations */
337 
338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
339 
340 	/* Software state for the transmit descriptors. */
341 	int txq_num;			/* must be a power of two */
342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343 
344 	/* TX control data structures. */
345 	int txq_ndesc;			/* must be a power of two */
346 	size_t txq_descsize;		/* a tx descriptor size */
347 	txdescs_t *txq_descs_u;
348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
350 	int txq_desc_rseg;		/* real number of control segment */
351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
352 #define	txq_descs	txq_descs_u->sctxu_txdescs
353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
354 
355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
356 
357 	int txq_free;			/* number of free Tx descriptors */
358 	int txq_next;			/* next ready Tx descriptor */
359 
360 	int txq_sfree;			/* number of free Tx jobs */
361 	int txq_snext;			/* next free Tx job */
362 	int txq_sdirty;			/* dirty Tx jobs */
363 
364 	/* These 4 variables are used only on the 82547. */
365 	int txq_fifo_size;		/* Tx FIFO size */
366 	int txq_fifo_head;		/* current head of FIFO */
367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
369 
370 	/*
371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 	 * CPUs. This queue intermediate them without block.
373 	 */
374 	pcq_t *txq_interq;
375 
376 	/*
377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 	 * to manage Tx H/W queue's busy flag.
379 	 */
380 	int txq_flags;			/* flags for H/W queue, see below */
381 #define	WM_TXQ_NO_SPACE		0x1
382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
383 
384 	bool txq_stopping;
385 
386 	bool txq_sending;
387 	time_t txq_lastsent;
388 
389 	/* Checksum flags used for previous packet */
390 	uint32_t	txq_last_hw_cmd;
391 	uint8_t		txq_last_hw_fields;
392 	uint16_t	txq_last_hw_ipcs;
393 	uint16_t	txq_last_hw_tucs;
394 
395 	uint32_t txq_packets;		/* for AIM */
396 	uint32_t txq_bytes;		/* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 	/* TX event counters */
399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
404 					    /* XXX not used? */
405 
406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
414 					    /* other than toomanyseg */
415 
416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420 
421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425 
426 struct wm_rxqueue {
427 	kmutex_t *rxq_lock;		/* lock for rx operations */
428 
429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
430 
431 	/* Software state for the receive descriptors. */
432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
433 
434 	/* RX control data structures. */
435 	int rxq_ndesc;			/* must be a power of two */
436 	size_t rxq_descsize;		/* a rx descriptor size */
437 	rxdescs_t *rxq_descs_u;
438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
440 	int rxq_desc_rseg;		/* real number of control segment */
441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
445 
446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
447 
448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
449 	int rxq_discard;
450 	int rxq_len;
451 	struct mbuf *rxq_head;
452 	struct mbuf *rxq_tail;
453 	struct mbuf **rxq_tailp;
454 
455 	bool rxq_stopping;
456 
457 	uint32_t rxq_packets;		/* for AIM */
458 	uint32_t rxq_bytes;		/* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 	/* RX event counters */
461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
463 
464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
466 #endif
467 };
468 
469 struct wm_queue {
470 	int wmq_id;			/* index of TX/RX queues */
471 	int wmq_intr_idx;		/* index of MSI-X tables */
472 
473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
474 	bool wmq_set_itr;
475 
476 	struct wm_txqueue wmq_txq;
477 	struct wm_rxqueue wmq_rxq;
478 	char sysctlname[32];		/* Name for sysctl */
479 
480 	bool wmq_txrx_use_workqueue;
481 	bool wmq_wq_enqueued;
482 	struct work wmq_cookie;
483 	void *wmq_si;
484 };
485 
486 struct wm_phyop {
487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 	void (*release)(struct wm_softc *);
489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
490 	int (*writereg_locked)(device_t, int, int, uint16_t);
491 	int reset_delay_us;
492 	bool no_errprint;
493 };
494 
495 struct wm_nvmop {
496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 	void (*release)(struct wm_softc *);
498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500 
501 /*
502  * Software state per device.
503  */
504 struct wm_softc {
505 	device_t sc_dev;		/* generic device information */
506 	bus_space_tag_t sc_st;		/* bus space tag */
507 	bus_space_handle_t sc_sh;	/* bus space handle */
508 	bus_size_t sc_ss;		/* bus space size */
509 	bus_space_tag_t sc_iot;		/* I/O space tag */
510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
511 	bus_size_t sc_ios;		/* I/O space size */
512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
514 	bus_size_t sc_flashs;		/* flash registers space size */
515 	off_t sc_flashreg_offset;	/*
516 					 * offset to flash registers from
517 					 * start of BAR
518 					 */
519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
520 
521 	struct ethercom sc_ethercom;	/* Ethernet common data */
522 	struct mii_data sc_mii;		/* MII/media information */
523 
524 	pci_chipset_tag_t sc_pc;
525 	pcitag_t sc_pcitag;
526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
528 
529 	uint16_t sc_pcidevid;		/* PCI device ID */
530 	wm_chip_type sc_type;		/* MAC type */
531 	int sc_rev;			/* MAC revision */
532 	wm_phy_type sc_phytype;		/* PHY type */
533 	uint8_t sc_sfptype;		/* SFP type */
534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
535 #define	WM_MEDIATYPE_UNKNOWN		0x00
536 #define	WM_MEDIATYPE_FIBER		0x01
537 #define	WM_MEDIATYPE_COPPER		0x02
538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
540 	int sc_flags;			/* flags; see below */
541 	u_short sc_if_flags;		/* last if_flags */
542 	int sc_ec_capenable;		/* last ec_capenable */
543 	int sc_flowflags;		/* 802.3x flow control flags */
544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
545 	int sc_align_tweak;
546 
547 	void *sc_ihs[WM_MAX_NINTR];	/*
548 					 * interrupt cookie.
549 					 * - legacy and msi use sc_ihs[0] only
550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 					 */
552 	pci_intr_handle_t *sc_intrs;	/*
553 					 * legacy and msi use sc_intrs[0] only
554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 					 */
556 	int sc_nintrs;			/* number of interrupts */
557 
558 	int sc_link_intr_idx;		/* index of MSI-X tables */
559 
560 	callout_t sc_tick_ch;		/* tick callout */
561 	bool sc_core_stopping;
562 
563 	int sc_nvm_ver_major;
564 	int sc_nvm_ver_minor;
565 	int sc_nvm_ver_build;
566 	int sc_nvm_addrbits;		/* NVM address bits */
567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
568 	int sc_ich8_flash_base;
569 	int sc_ich8_flash_bank_size;
570 	int sc_nvm_k1_enabled;
571 
572 	int sc_nqueues;
573 	struct wm_queue *sc_queue;
574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
578 	struct workqueue *sc_queue_wq;
579 	bool sc_txrx_use_workqueue;
580 
581 	int sc_affinity_offset;
582 
583 #ifdef WM_EVENT_COUNTERS
584 	/* Event counters. */
585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
586 
587 	/* >= WM_T_82542_2_1 */
588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
593 
594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
599 	struct evcnt sc_ev_scc;		/* Single Collision */
600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
602 	struct evcnt sc_ev_latecol;	/* Late Collision */
603 	struct evcnt sc_ev_colc;	/* Collision */
604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
605 	struct evcnt sc_ev_dc;		/* Defer */
606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
607 	struct evcnt sc_ev_sec;		/* Sequence Error */
608 
609 	/* Old */
610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
611 	/* New */
612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
613 
614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
649 
650 	/* Old */
651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
652 	/* New */
653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
654 
655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
656 
657 	/* Old */
658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
664 	/*
665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
666 	 * non "Intr. cause" register.
667 	 */
668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
670 	/* New */
671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
689 #endif /* WM_EVENT_COUNTERS */
690 
691 	struct sysctllog *sc_sysctllog;
692 
693 	/* This variable are used only on the 82547. */
694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
695 
696 	uint32_t sc_ctrl;		/* prototype CTRL register */
697 #if 0
698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
699 #endif
700 	uint32_t sc_icr;		/* prototype interrupt bits */
701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
702 	uint32_t sc_tctl;		/* prototype TCTL register */
703 	uint32_t sc_rctl;		/* prototype RCTL register */
704 	uint32_t sc_txcw;		/* prototype TXCW register */
705 	uint32_t sc_tipg;		/* prototype TIPG register */
706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
707 	uint32_t sc_pba;		/* prototype PBA register */
708 
709 	int sc_tbi_linkup;		/* TBI link status */
710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
712 
713 	int sc_mchash_type;		/* multicast filter offset */
714 
715 	krndsource_t rnd_source;	/* random source */
716 
717 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
718 
719 	kmutex_t *sc_core_lock;		/* lock for softc operations */
720 	kmutex_t *sc_ich_phymtx;	/*
721 					 * 82574/82583/ICH/PCH specific PHY
722 					 * mutex. For 82574/82583, the mutex
723 					 * is used for both PHY and NVM.
724 					 */
725 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
726 
727 	struct wm_phyop phy;
728 	struct wm_nvmop nvm;
729 
730 	struct workqueue *sc_reset_wq;
731 	struct work sc_reset_work;
732 	volatile unsigned sc_reset_pending;
733 
734 	bool sc_dying;
735 
736 #ifdef WM_DEBUG
737 	uint32_t sc_debug;
738 	bool sc_trigger_reset;
739 #endif
740 };
741 
742 #define	WM_RXCHAIN_RESET(rxq)						\
743 do {									\
744 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
745 	*(rxq)->rxq_tailp = NULL;					\
746 	(rxq)->rxq_len = 0;						\
747 } while (/*CONSTCOND*/0)
748 
749 #define	WM_RXCHAIN_LINK(rxq, m)						\
750 do {									\
751 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
752 	(rxq)->rxq_tailp = &(m)->m_next;				\
753 } while (/*CONSTCOND*/0)
754 
755 #ifdef WM_EVENT_COUNTERS
756 #ifdef __HAVE_ATOMIC64_LOADSTORE
757 #define	WM_EVCNT_INCR(ev)						\
758 	atomic_store_relaxed(&((ev)->ev_count),				\
759 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
760 #define	WM_EVCNT_STORE(ev, val)						\
761 	atomic_store_relaxed(&((ev)->ev_count), (val))
762 #define	WM_EVCNT_ADD(ev, val)						\
763 	atomic_store_relaxed(&((ev)->ev_count),				\
764 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
765 #else
766 #define	WM_EVCNT_INCR(ev)						\
767 	((ev)->ev_count)++
768 #define	WM_EVCNT_STORE(ev, val)						\
769 	((ev)->ev_count = (val))
770 #define	WM_EVCNT_ADD(ev, val)						\
771 	(ev)->ev_count += (val)
772 #endif
773 
774 #define WM_Q_EVCNT_INCR(qname, evname)			\
775 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
776 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
777 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
778 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
779 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
780 #else /* !WM_EVENT_COUNTERS */
781 #define	WM_EVCNT_INCR(ev)	/* nothing */
782 #define	WM_EVCNT_STORE(ev, val)	/* nothing */
783 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
784 
785 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
786 #define WM_Q_EVCNT_STORE(qname, evname, val)	/* nothing */
787 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
788 #endif /* !WM_EVENT_COUNTERS */
789 
790 #define	CSR_READ(sc, reg)						\
791 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
792 #define	CSR_WRITE(sc, reg, val)						\
793 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
794 #define	CSR_WRITE_FLUSH(sc)						\
795 	(void)CSR_READ((sc), WMREG_STATUS)
796 
797 #define ICH8_FLASH_READ32(sc, reg)					\
798 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
799 	    (reg) + sc->sc_flashreg_offset)
800 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
801 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
802 	    (reg) + sc->sc_flashreg_offset, (data))
803 
804 #define ICH8_FLASH_READ16(sc, reg)					\
805 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
806 	    (reg) + sc->sc_flashreg_offset)
807 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
808 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
809 	    (reg) + sc->sc_flashreg_offset, (data))
810 
811 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
812 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
813 
814 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
815 #define	WM_CDTXADDR_HI(txq, x)						\
816 	(sizeof(bus_addr_t) == 8 ?					\
817 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
818 
819 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
820 #define	WM_CDRXADDR_HI(rxq, x)						\
821 	(sizeof(bus_addr_t) == 8 ?					\
822 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
823 
824 /*
825  * Register read/write functions.
826  * Other than CSR_{READ|WRITE}().
827  */
828 #if 0
829 static inline uint32_t wm_io_read(struct wm_softc *, int);
830 #endif
831 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
832 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
833     uint32_t, uint32_t);
834 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
835 
836 /*
837  * Descriptor sync/init functions.
838  */
839 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
840 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
841 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
842 
843 /*
844  * Device driver interface functions and commonly used functions.
845  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
846  */
847 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
848 static int	wm_match(device_t, cfdata_t, void *);
849 static void	wm_attach(device_t, device_t, void *);
850 static int	wm_detach(device_t, int);
851 static bool	wm_suspend(device_t, const pmf_qual_t *);
852 static bool	wm_resume(device_t, const pmf_qual_t *);
853 static bool	wm_watchdog(struct ifnet *);
854 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
855     uint16_t *);
856 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
857     uint16_t *);
858 static void	wm_tick(void *);
859 static int	wm_ifflags_cb(struct ethercom *);
860 static int	wm_ioctl(struct ifnet *, u_long, void *);
861 /* MAC address related */
862 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
863 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
864 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
865 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
866 static int	wm_rar_count(struct wm_softc *);
867 static void	wm_set_filter(struct wm_softc *);
868 /* Reset and init related */
869 static void	wm_set_vlan(struct wm_softc *);
870 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
871 static void	wm_get_auto_rd_done(struct wm_softc *);
872 static void	wm_lan_init_done(struct wm_softc *);
873 static void	wm_get_cfg_done(struct wm_softc *);
874 static int	wm_phy_post_reset(struct wm_softc *);
875 static int	wm_write_smbus_addr(struct wm_softc *);
876 static int	wm_init_lcd_from_nvm(struct wm_softc *);
877 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
878 static void	wm_initialize_hardware_bits(struct wm_softc *);
879 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
880 static int	wm_reset_phy(struct wm_softc *);
881 static void	wm_flush_desc_rings(struct wm_softc *);
882 static void	wm_reset(struct wm_softc *);
883 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
884 static void	wm_rxdrain(struct wm_rxqueue *);
885 static void	wm_init_rss(struct wm_softc *);
886 static void	wm_adjust_qnum(struct wm_softc *, int);
887 static inline bool	wm_is_using_msix(struct wm_softc *);
888 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
889 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
890 static int	wm_setup_legacy(struct wm_softc *);
891 static int	wm_setup_msix(struct wm_softc *);
892 static int	wm_init(struct ifnet *);
893 static int	wm_init_locked(struct ifnet *);
894 static void	wm_init_sysctls(struct wm_softc *);
895 static void	wm_update_stats(struct wm_softc *);
896 static void	wm_clear_evcnt(struct wm_softc *);
897 static void	wm_unset_stopping_flags(struct wm_softc *);
898 static void	wm_set_stopping_flags(struct wm_softc *);
899 static void	wm_stop(struct ifnet *, int);
900 static void	wm_stop_locked(struct ifnet *, bool, bool);
901 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
902 static void	wm_82547_txfifo_stall(void *);
903 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
904 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
905 /* DMA related */
906 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
907 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
908 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
909 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
910     struct wm_txqueue *);
911 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
912 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
913 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
914     struct wm_rxqueue *);
915 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
916 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
917 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
918 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
919 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
920 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
921 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
922     struct wm_txqueue *);
923 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
924     struct wm_rxqueue *);
925 static int	wm_alloc_txrx_queues(struct wm_softc *);
926 static void	wm_free_txrx_queues(struct wm_softc *);
927 static int	wm_init_txrx_queues(struct wm_softc *);
928 /* Start */
929 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
930     struct wm_txsoft *, uint32_t *, uint8_t *);
931 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
932 static void	wm_start(struct ifnet *);
933 static void	wm_start_locked(struct ifnet *);
934 static int	wm_transmit(struct ifnet *, struct mbuf *);
935 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
936 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
937     bool);
938 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
939     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
940 static void	wm_nq_start(struct ifnet *);
941 static void	wm_nq_start_locked(struct ifnet *);
942 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
943 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
944 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
945     bool);
946 static void	wm_deferred_start_locked(struct wm_txqueue *);
947 static void	wm_handle_queue(void *);
948 static void	wm_handle_queue_work(struct work *, void *);
949 static void	wm_handle_reset_work(struct work *, void *);
950 /* Interrupt */
951 static bool	wm_txeof(struct wm_txqueue *, u_int);
952 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
953 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
954 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
955 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
956 static void	wm_linkintr(struct wm_softc *, uint32_t);
957 static int	wm_intr_legacy(void *);
958 static inline void	wm_txrxintr_disable(struct wm_queue *);
959 static inline void	wm_txrxintr_enable(struct wm_queue *);
960 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
961 static int	wm_txrxintr_msix(void *);
962 static int	wm_linkintr_msix(void *);
963 
964 /*
965  * Media related.
966  * GMII, SGMII, TBI, SERDES and SFP.
967  */
968 /* Common */
969 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
970 /* GMII related */
971 static void	wm_gmii_reset(struct wm_softc *);
972 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
973 static int	wm_get_phy_id_82575(struct wm_softc *);
974 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
975 static int	wm_gmii_mediachange(struct ifnet *);
976 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
977 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
978 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
979 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
980 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
981 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
982 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
983 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
984 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
985 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
986 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
987 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
988 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
989 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
990 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
991 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
992 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
993 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
994 	bool);
995 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
996 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
997 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
998 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
999 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
1000 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
1001 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
1002 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
1003 static void	wm_gmii_statchg(struct ifnet *);
1004 /*
1005  * kumeran related (80003, ICH* and PCH*).
1006  * These functions are not for accessing MII registers but for accessing
1007  * kumeran specific registers.
1008  */
1009 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
1010 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
1011 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
1012 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
1013 /* EMI register related */
1014 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
1015 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
1016 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
1017 /* SGMII */
1018 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
1019 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
1020 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
1021 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
1022 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
1023 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
1024 /* TBI related */
1025 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
1026 static void	wm_tbi_mediainit(struct wm_softc *);
1027 static int	wm_tbi_mediachange(struct ifnet *);
1028 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
1029 static int	wm_check_for_link(struct wm_softc *);
1030 static void	wm_tbi_tick(struct wm_softc *);
1031 /* SERDES related */
1032 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
1033 static int	wm_serdes_mediachange(struct ifnet *);
1034 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
1035 static void	wm_serdes_tick(struct wm_softc *);
1036 /* SFP related */
1037 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
1038 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
1039 
1040 /*
1041  * NVM related.
1042  * Microwire, SPI (w/wo EERD) and Flash.
1043  */
1044 /* Misc functions */
1045 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1046 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1047 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1048 /* Microwire */
1049 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1050 /* SPI */
1051 static int	wm_nvm_ready_spi(struct wm_softc *);
1052 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1053 /* Using with EERD */
1054 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
1055 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1056 /* Flash */
1057 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1058     unsigned int *);
1059 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
1060 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1061 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1062     uint32_t *);
1063 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1064 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1065 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1066 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1067 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1068 /* iNVM */
1069 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1070 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1071 /* Lock, detecting NVM type, validate checksum and read */
1072 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
1073 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
1074 static int	wm_nvm_validate_checksum(struct wm_softc *);
1075 static void	wm_nvm_version_invm(struct wm_softc *);
1076 static void	wm_nvm_version(struct wm_softc *);
1077 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1078 
1079 /*
1080  * Hardware semaphores.
1081  * Very complexed...
1082  */
1083 static int	wm_get_null(struct wm_softc *);
1084 static void	wm_put_null(struct wm_softc *);
1085 static int	wm_get_eecd(struct wm_softc *);
1086 static void	wm_put_eecd(struct wm_softc *);
1087 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1088 static void	wm_put_swsm_semaphore(struct wm_softc *);
1089 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1090 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1091 static int	wm_get_nvm_80003(struct wm_softc *);
1092 static void	wm_put_nvm_80003(struct wm_softc *);
1093 static int	wm_get_nvm_82571(struct wm_softc *);
1094 static void	wm_put_nvm_82571(struct wm_softc *);
1095 static int	wm_get_phy_82575(struct wm_softc *);
1096 static void	wm_put_phy_82575(struct wm_softc *);
1097 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1098 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
1099 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
1100 static void	wm_put_swflag_ich8lan(struct wm_softc *);
1101 static int	wm_get_nvm_ich8lan(struct wm_softc *);
1102 static void	wm_put_nvm_ich8lan(struct wm_softc *);
1103 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
1104 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
1105 
1106 /*
1107  * Management mode and power management related subroutines.
1108  * BMC, AMT, suspend/resume and EEE.
1109  */
1110 #if 0
1111 static int	wm_check_mng_mode(struct wm_softc *);
1112 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
1113 static int	wm_check_mng_mode_82574(struct wm_softc *);
1114 static int	wm_check_mng_mode_generic(struct wm_softc *);
1115 #endif
1116 static int	wm_enable_mng_pass_thru(struct wm_softc *);
1117 static bool	wm_phy_resetisblocked(struct wm_softc *);
1118 static void	wm_get_hw_control(struct wm_softc *);
1119 static void	wm_release_hw_control(struct wm_softc *);
1120 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1121 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
1122 static void	wm_init_manageability(struct wm_softc *);
1123 static void	wm_release_manageability(struct wm_softc *);
1124 static void	wm_get_wakeup(struct wm_softc *);
1125 static int	wm_ulp_disable(struct wm_softc *);
1126 static int	wm_enable_phy_wakeup(struct wm_softc *);
1127 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1128 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
1129 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
1130 static void	wm_enable_wakeup(struct wm_softc *);
1131 static void	wm_disable_aspm(struct wm_softc *);
1132 /* LPLU (Low Power Link Up) */
1133 static void	wm_lplu_d0_disable(struct wm_softc *);
1134 /* EEE */
1135 static int	wm_set_eee_i350(struct wm_softc *);
1136 static int	wm_set_eee_pchlan(struct wm_softc *);
1137 static int	wm_set_eee(struct wm_softc *);
1138 
1139 /*
1140  * Workarounds (mainly PHY related).
1141  * Basically, PHY's workarounds are in the PHY drivers.
1142  */
1143 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1144 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1145 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1146 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1147 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1148 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1149 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1150 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1151 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1152 static int	wm_k1_workaround_lv(struct wm_softc *);
1153 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1154 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1155 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1156 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1157 static void	wm_reset_init_script_82575(struct wm_softc *);
1158 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1159 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1160 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1161 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1162 static int	wm_pll_workaround_i210(struct wm_softc *);
1163 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1164 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
1165 static void	wm_set_linkdown_discard(struct wm_softc *);
1166 static void	wm_clear_linkdown_discard(struct wm_softc *);
1167 
1168 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1169 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1170 #ifdef WM_DEBUG
1171 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
1172 #endif
1173 
1174 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1175     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1176 
1177 /*
1178  * Devices supported by this driver.
1179  */
1180 static const struct wm_product {
1181 	pci_vendor_id_t		wmp_vendor;
1182 	pci_product_id_t	wmp_product;
1183 	const char		*wmp_name;
1184 	wm_chip_type		wmp_type;
1185 	uint32_t		wmp_flags;
1186 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1187 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1188 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1189 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1190 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1191 } wm_products[] = {
1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1193 	  "Intel i82542 1000BASE-X Ethernet",
1194 	  WM_T_82542_2_1,	WMP_F_FIBER },
1195 
1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1197 	  "Intel i82543GC 1000BASE-X Ethernet",
1198 	  WM_T_82543,		WMP_F_FIBER },
1199 
1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1201 	  "Intel i82543GC 1000BASE-T Ethernet",
1202 	  WM_T_82543,		WMP_F_COPPER },
1203 
1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1205 	  "Intel i82544EI 1000BASE-T Ethernet",
1206 	  WM_T_82544,		WMP_F_COPPER },
1207 
1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1209 	  "Intel i82544EI 1000BASE-X Ethernet",
1210 	  WM_T_82544,		WMP_F_FIBER },
1211 
1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1213 	  "Intel i82544GC 1000BASE-T Ethernet",
1214 	  WM_T_82544,		WMP_F_COPPER },
1215 
1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1217 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1218 	  WM_T_82544,		WMP_F_COPPER },
1219 
1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1221 	  "Intel i82540EM 1000BASE-T Ethernet",
1222 	  WM_T_82540,		WMP_F_COPPER },
1223 
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1225 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1226 	  WM_T_82540,		WMP_F_COPPER },
1227 
1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1229 	  "Intel i82540EP 1000BASE-T Ethernet",
1230 	  WM_T_82540,		WMP_F_COPPER },
1231 
1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1233 	  "Intel i82540EP 1000BASE-T Ethernet",
1234 	  WM_T_82540,		WMP_F_COPPER },
1235 
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1237 	  "Intel i82540EP 1000BASE-T Ethernet",
1238 	  WM_T_82540,		WMP_F_COPPER },
1239 
1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1241 	  "Intel i82545EM 1000BASE-T Ethernet",
1242 	  WM_T_82545,		WMP_F_COPPER },
1243 
1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1245 	  "Intel i82545GM 1000BASE-T Ethernet",
1246 	  WM_T_82545_3,		WMP_F_COPPER },
1247 
1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1249 	  "Intel i82545GM 1000BASE-X Ethernet",
1250 	  WM_T_82545_3,		WMP_F_FIBER },
1251 
1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1253 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1254 	  WM_T_82545_3,		WMP_F_SERDES },
1255 
1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1257 	  "Intel i82546EB 1000BASE-T Ethernet",
1258 	  WM_T_82546,		WMP_F_COPPER },
1259 
1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1261 	  "Intel i82546EB 1000BASE-T Ethernet",
1262 	  WM_T_82546,		WMP_F_COPPER },
1263 
1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1265 	  "Intel i82545EM 1000BASE-X Ethernet",
1266 	  WM_T_82545,		WMP_F_FIBER },
1267 
1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1269 	  "Intel i82546EB 1000BASE-X Ethernet",
1270 	  WM_T_82546,		WMP_F_FIBER },
1271 
1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1273 	  "Intel i82546GB 1000BASE-T Ethernet",
1274 	  WM_T_82546_3,		WMP_F_COPPER },
1275 
1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1277 	  "Intel i82546GB 1000BASE-X Ethernet",
1278 	  WM_T_82546_3,		WMP_F_FIBER },
1279 
1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1281 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1282 	  WM_T_82546_3,		WMP_F_SERDES },
1283 
1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1285 	  "i82546GB quad-port Gigabit Ethernet",
1286 	  WM_T_82546_3,		WMP_F_COPPER },
1287 
1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1289 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1290 	  WM_T_82546_3,		WMP_F_COPPER },
1291 
1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1293 	  "Intel PRO/1000MT (82546GB)",
1294 	  WM_T_82546_3,		WMP_F_COPPER },
1295 
1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1297 	  "Intel i82541EI 1000BASE-T Ethernet",
1298 	  WM_T_82541,		WMP_F_COPPER },
1299 
1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1301 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1302 	  WM_T_82541,		WMP_F_COPPER },
1303 
1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1305 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1306 	  WM_T_82541,		WMP_F_COPPER },
1307 
1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1309 	  "Intel i82541ER 1000BASE-T Ethernet",
1310 	  WM_T_82541_2,		WMP_F_COPPER },
1311 
1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1313 	  "Intel i82541GI 1000BASE-T Ethernet",
1314 	  WM_T_82541_2,		WMP_F_COPPER },
1315 
1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1317 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1318 	  WM_T_82541_2,		WMP_F_COPPER },
1319 
1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1321 	  "Intel i82541PI 1000BASE-T Ethernet",
1322 	  WM_T_82541_2,		WMP_F_COPPER },
1323 
1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1325 	  "Intel i82547EI 1000BASE-T Ethernet",
1326 	  WM_T_82547,		WMP_F_COPPER },
1327 
1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1329 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1330 	  WM_T_82547,		WMP_F_COPPER },
1331 
1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1333 	  "Intel i82547GI 1000BASE-T Ethernet",
1334 	  WM_T_82547_2,		WMP_F_COPPER },
1335 
1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1337 	  "Intel PRO/1000 PT (82571EB)",
1338 	  WM_T_82571,		WMP_F_COPPER },
1339 
1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1341 	  "Intel PRO/1000 PF (82571EB)",
1342 	  WM_T_82571,		WMP_F_FIBER },
1343 
1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1345 	  "Intel PRO/1000 PB (82571EB)",
1346 	  WM_T_82571,		WMP_F_SERDES },
1347 
1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1349 	  "Intel PRO/1000 QT (82571EB)",
1350 	  WM_T_82571,		WMP_F_COPPER },
1351 
1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1353 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1354 	  WM_T_82571,		WMP_F_COPPER },
1355 
1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1357 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1358 	  WM_T_82571,		WMP_F_COPPER },
1359 
1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1361 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1362 	  WM_T_82571,		WMP_F_SERDES },
1363 
1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1365 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1366 	  WM_T_82571,		WMP_F_SERDES },
1367 
1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1369 	  "Intel 82571EB Quad 1000baseX Ethernet",
1370 	  WM_T_82571,		WMP_F_FIBER },
1371 
1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1373 	  "Intel i82572EI 1000baseT Ethernet",
1374 	  WM_T_82572,		WMP_F_COPPER },
1375 
1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1377 	  "Intel i82572EI 1000baseX Ethernet",
1378 	  WM_T_82572,		WMP_F_FIBER },
1379 
1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1381 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1382 	  WM_T_82572,		WMP_F_SERDES },
1383 
1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1385 	  "Intel i82572EI 1000baseT Ethernet",
1386 	  WM_T_82572,		WMP_F_COPPER },
1387 
1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1389 	  "Intel i82573E",
1390 	  WM_T_82573,		WMP_F_COPPER },
1391 
1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1393 	  "Intel i82573E IAMT",
1394 	  WM_T_82573,		WMP_F_COPPER },
1395 
1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1397 	  "Intel i82573L Gigabit Ethernet",
1398 	  WM_T_82573,		WMP_F_COPPER },
1399 
1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1401 	  "Intel i82574L",
1402 	  WM_T_82574,		WMP_F_COPPER },
1403 
1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1405 	  "Intel i82574L",
1406 	  WM_T_82574,		WMP_F_COPPER },
1407 
1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1409 	  "Intel i82583V",
1410 	  WM_T_82583,		WMP_F_COPPER },
1411 
1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1413 	  "i80003 dual 1000baseT Ethernet",
1414 	  WM_T_80003,		WMP_F_COPPER },
1415 
1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1417 	  "i80003 dual 1000baseX Ethernet",
1418 	  WM_T_80003,		WMP_F_COPPER },
1419 
1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1421 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1422 	  WM_T_80003,		WMP_F_SERDES },
1423 
1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1425 	  "Intel i80003 1000baseT Ethernet",
1426 	  WM_T_80003,		WMP_F_COPPER },
1427 
1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1429 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1430 	  WM_T_80003,		WMP_F_SERDES },
1431 
1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1433 	  "Intel i82801H (M_AMT) LAN Controller",
1434 	  WM_T_ICH8,		WMP_F_COPPER },
1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1436 	  "Intel i82801H (AMT) LAN Controller",
1437 	  WM_T_ICH8,		WMP_F_COPPER },
1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1439 	  "Intel i82801H LAN Controller",
1440 	  WM_T_ICH8,		WMP_F_COPPER },
1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1442 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1443 	  WM_T_ICH8,		WMP_F_COPPER },
1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1445 	  "Intel i82801H (M) LAN Controller",
1446 	  WM_T_ICH8,		WMP_F_COPPER },
1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1448 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1449 	  WM_T_ICH8,		WMP_F_COPPER },
1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1451 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1452 	  WM_T_ICH8,		WMP_F_COPPER },
1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1454 	  "82567V-3 LAN Controller",
1455 	  WM_T_ICH8,		WMP_F_COPPER },
1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1457 	  "82801I (AMT) LAN Controller",
1458 	  WM_T_ICH9,		WMP_F_COPPER },
1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1460 	  "82801I 10/100 LAN Controller",
1461 	  WM_T_ICH9,		WMP_F_COPPER },
1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1463 	  "82801I (G) 10/100 LAN Controller",
1464 	  WM_T_ICH9,		WMP_F_COPPER },
1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1466 	  "82801I (GT) 10/100 LAN Controller",
1467 	  WM_T_ICH9,		WMP_F_COPPER },
1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1469 	  "82801I (C) LAN Controller",
1470 	  WM_T_ICH9,		WMP_F_COPPER },
1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1472 	  "82801I mobile LAN Controller",
1473 	  WM_T_ICH9,		WMP_F_COPPER },
1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1475 	  "82801I mobile (V) LAN Controller",
1476 	  WM_T_ICH9,		WMP_F_COPPER },
1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1478 	  "82801I mobile (AMT) LAN Controller",
1479 	  WM_T_ICH9,		WMP_F_COPPER },
1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1481 	  "82567LM-4 LAN Controller",
1482 	  WM_T_ICH9,		WMP_F_COPPER },
1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1484 	  "82567LM-2 LAN Controller",
1485 	  WM_T_ICH10,		WMP_F_COPPER },
1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1487 	  "82567LF-2 LAN Controller",
1488 	  WM_T_ICH10,		WMP_F_COPPER },
1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1490 	  "82567LM-3 LAN Controller",
1491 	  WM_T_ICH10,		WMP_F_COPPER },
1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1493 	  "82567LF-3 LAN Controller",
1494 	  WM_T_ICH10,		WMP_F_COPPER },
1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1496 	  "82567V-2 LAN Controller",
1497 	  WM_T_ICH10,		WMP_F_COPPER },
1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1499 	  "82567V-3? LAN Controller",
1500 	  WM_T_ICH10,		WMP_F_COPPER },
1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1502 	  "HANKSVILLE LAN Controller",
1503 	  WM_T_ICH10,		WMP_F_COPPER },
1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1505 	  "PCH LAN (82577LM) Controller",
1506 	  WM_T_PCH,		WMP_F_COPPER },
1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1508 	  "PCH LAN (82577LC) Controller",
1509 	  WM_T_PCH,		WMP_F_COPPER },
1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1511 	  "PCH LAN (82578DM) Controller",
1512 	  WM_T_PCH,		WMP_F_COPPER },
1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1514 	  "PCH LAN (82578DC) Controller",
1515 	  WM_T_PCH,		WMP_F_COPPER },
1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1517 	  "PCH2 LAN (82579LM) Controller",
1518 	  WM_T_PCH2,		WMP_F_COPPER },
1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1520 	  "PCH2 LAN (82579V) Controller",
1521 	  WM_T_PCH2,		WMP_F_COPPER },
1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1523 	  "82575EB dual-1000baseT Ethernet",
1524 	  WM_T_82575,		WMP_F_COPPER },
1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1526 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1527 	  WM_T_82575,		WMP_F_SERDES },
1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1529 	  "82575GB quad-1000baseT Ethernet",
1530 	  WM_T_82575,		WMP_F_COPPER },
1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1532 	  "82575GB quad-1000baseT Ethernet (PM)",
1533 	  WM_T_82575,		WMP_F_COPPER },
1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1535 	  "82576 1000BaseT Ethernet",
1536 	  WM_T_82576,		WMP_F_COPPER },
1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1538 	  "82576 1000BaseX Ethernet",
1539 	  WM_T_82576,		WMP_F_FIBER },
1540 
1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1542 	  "82576 gigabit Ethernet (SERDES)",
1543 	  WM_T_82576,		WMP_F_SERDES },
1544 
1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1546 	  "82576 quad-1000BaseT Ethernet",
1547 	  WM_T_82576,		WMP_F_COPPER },
1548 
1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1550 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1551 	  WM_T_82576,		WMP_F_COPPER },
1552 
1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1554 	  "82576 gigabit Ethernet",
1555 	  WM_T_82576,		WMP_F_COPPER },
1556 
1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1558 	  "82576 gigabit Ethernet (SERDES)",
1559 	  WM_T_82576,		WMP_F_SERDES },
1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1561 	  "82576 quad-gigabit Ethernet (SERDES)",
1562 	  WM_T_82576,		WMP_F_SERDES },
1563 
1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1565 	  "82580 1000BaseT Ethernet",
1566 	  WM_T_82580,		WMP_F_COPPER },
1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1568 	  "82580 1000BaseX Ethernet",
1569 	  WM_T_82580,		WMP_F_FIBER },
1570 
1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1572 	  "82580 1000BaseT Ethernet (SERDES)",
1573 	  WM_T_82580,		WMP_F_SERDES },
1574 
1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1576 	  "82580 gigabit Ethernet (SGMII)",
1577 	  WM_T_82580,		WMP_F_COPPER },
1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1579 	  "82580 dual-1000BaseT Ethernet",
1580 	  WM_T_82580,		WMP_F_COPPER },
1581 
1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1583 	  "82580 quad-1000BaseX Ethernet",
1584 	  WM_T_82580,		WMP_F_FIBER },
1585 
1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1587 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1588 	  WM_T_82580,		WMP_F_COPPER },
1589 
1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1591 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1592 	  WM_T_82580,		WMP_F_SERDES },
1593 
1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1595 	  "DH89XXCC 1000BASE-KX Ethernet",
1596 	  WM_T_82580,		WMP_F_SERDES },
1597 
1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1599 	  "DH89XXCC Gigabit Ethernet (SFP)",
1600 	  WM_T_82580,		WMP_F_SERDES },
1601 
1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1603 	  "I350 Gigabit Network Connection",
1604 	  WM_T_I350,		WMP_F_COPPER },
1605 
1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1607 	  "I350 Gigabit Fiber Network Connection",
1608 	  WM_T_I350,		WMP_F_FIBER },
1609 
1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1611 	  "I350 Gigabit Backplane Connection",
1612 	  WM_T_I350,		WMP_F_SERDES },
1613 
1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1615 	  "I350 Quad Port Gigabit Ethernet",
1616 	  WM_T_I350,		WMP_F_SERDES },
1617 
1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1619 	  "I350 Gigabit Connection",
1620 	  WM_T_I350,		WMP_F_COPPER },
1621 
1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1623 	  "I354 Gigabit Ethernet (KX)",
1624 	  WM_T_I354,		WMP_F_SERDES },
1625 
1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1627 	  "I354 Gigabit Ethernet (SGMII)",
1628 	  WM_T_I354,		WMP_F_COPPER },
1629 
1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1631 	  "I354 Gigabit Ethernet (2.5G)",
1632 	  WM_T_I354,		WMP_F_COPPER },
1633 
1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1635 	  "I210-T1 Ethernet Server Adapter",
1636 	  WM_T_I210,		WMP_F_COPPER },
1637 
1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1639 	  "I210 Ethernet (Copper OEM)",
1640 	  WM_T_I210,		WMP_F_COPPER },
1641 
1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1643 	  "I210 Ethernet (Copper IT)",
1644 	  WM_T_I210,		WMP_F_COPPER },
1645 
1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1647 	  "I210 Ethernet (Copper, FLASH less)",
1648 	  WM_T_I210,		WMP_F_COPPER },
1649 
1650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1651 	  "I210 Gigabit Ethernet (Fiber)",
1652 	  WM_T_I210,		WMP_F_FIBER },
1653 
1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1655 	  "I210 Gigabit Ethernet (SERDES)",
1656 	  WM_T_I210,		WMP_F_SERDES },
1657 
1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1659 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1660 	  WM_T_I210,		WMP_F_SERDES },
1661 
1662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1663 	  "I210 Gigabit Ethernet (SGMII)",
1664 	  WM_T_I210,		WMP_F_COPPER },
1665 
1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1667 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1668 	  WM_T_I210,		WMP_F_COPPER },
1669 
1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1671 	  "I211 Ethernet (COPPER)",
1672 	  WM_T_I211,		WMP_F_COPPER },
1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1674 	  "I217 V Ethernet Connection",
1675 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1677 	  "I217 LM Ethernet Connection",
1678 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1680 	  "I218 V Ethernet Connection",
1681 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1683 	  "I218 V Ethernet Connection",
1684 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1686 	  "I218 V Ethernet Connection",
1687 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1689 	  "I218 LM Ethernet Connection",
1690 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1692 	  "I218 LM Ethernet Connection",
1693 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1695 	  "I218 LM Ethernet Connection",
1696 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1698 	  "I219 LM Ethernet Connection",
1699 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1701 	  "I219 LM (2) Ethernet Connection",
1702 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1704 	  "I219 LM (3) Ethernet Connection",
1705 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1707 	  "I219 LM (4) Ethernet Connection",
1708 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1710 	  "I219 LM (5) Ethernet Connection",
1711 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1713 	  "I219 LM (6) Ethernet Connection",
1714 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1716 	  "I219 LM (7) Ethernet Connection",
1717 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1719 	  "I219 LM (8) Ethernet Connection",
1720 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1722 	  "I219 LM (9) Ethernet Connection",
1723 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1725 	  "I219 LM (10) Ethernet Connection",
1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1728 	  "I219 LM (11) Ethernet Connection",
1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1731 	  "I219 LM (12) Ethernet Connection",
1732 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1734 	  "I219 LM (13) Ethernet Connection",
1735 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1737 	  "I219 LM (14) Ethernet Connection",
1738 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1740 	  "I219 LM (15) Ethernet Connection",
1741 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
1743 	  "I219 LM (16) Ethernet Connection",
1744 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
1746 	  "I219 LM (17) Ethernet Connection",
1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
1749 	  "I219 LM (18) Ethernet Connection",
1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
1752 	  "I219 LM (19) Ethernet Connection",
1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1755 	  "I219 V Ethernet Connection",
1756 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1758 	  "I219 V (2) Ethernet Connection",
1759 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1761 	  "I219 V (4) Ethernet Connection",
1762 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1764 	  "I219 V (5) Ethernet Connection",
1765 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1767 	  "I219 V (6) Ethernet Connection",
1768 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1770 	  "I219 V (7) Ethernet Connection",
1771 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1773 	  "I219 V (8) Ethernet Connection",
1774 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1775 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1776 	  "I219 V (9) Ethernet Connection",
1777 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1779 	  "I219 V (10) Ethernet Connection",
1780 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1782 	  "I219 V (11) Ethernet Connection",
1783 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1785 	  "I219 V (12) Ethernet Connection",
1786 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1787 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1788 	  "I219 V (13) Ethernet Connection",
1789 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1791 	  "I219 V (14) Ethernet Connection",
1792 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
1794 	  "I219 V (15) Ethernet Connection",
1795 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
1797 	  "I219 V (16) Ethernet Connection",
1798 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
1800 	  "I219 V (17) Ethernet Connection",
1801 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
1803 	  "I219 V (18) Ethernet Connection",
1804 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1805 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
1806 	  "I219 V (19) Ethernet Connection",
1807 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1808 	{ 0,			0,
1809 	  NULL,
1810 	  0,			0 },
1811 };
1812 
1813 /*
1814  * Register read/write functions.
1815  * Other than CSR_{READ|WRITE}().
1816  */
1817 
1818 #if 0 /* Not currently used */
1819 static inline uint32_t
1820 wm_io_read(struct wm_softc *sc, int reg)
1821 {
1822 
1823 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1824 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1825 }
1826 #endif
1827 
1828 static inline void
wm_io_write(struct wm_softc * sc,int reg,uint32_t val)1829 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1830 {
1831 
1832 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1833 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1834 }
1835 
1836 static inline void
wm_82575_write_8bit_ctlr_reg(struct wm_softc * sc,uint32_t reg,uint32_t off,uint32_t data)1837 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1838     uint32_t data)
1839 {
1840 	uint32_t regval;
1841 	int i;
1842 
1843 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1844 
1845 	CSR_WRITE(sc, reg, regval);
1846 
1847 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1848 		delay(5);
1849 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1850 			break;
1851 	}
1852 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1853 		aprint_error("%s: WARNING:"
1854 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1855 		    device_xname(sc->sc_dev), reg);
1856 	}
1857 }
1858 
1859 static inline void
wm_set_dma_addr(volatile wiseman_addr_t * wa,bus_addr_t v)1860 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1861 {
1862 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
1863 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
1864 }
1865 
1866 /*
1867  * Descriptor sync/init functions.
1868  */
1869 static inline void
wm_cdtxsync(struct wm_txqueue * txq,int start,int num,int ops)1870 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1871 {
1872 	struct wm_softc *sc = txq->txq_sc;
1873 
1874 	/* If it will wrap around, sync to the end of the ring. */
1875 	if ((start + num) > WM_NTXDESC(txq)) {
1876 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1877 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1878 		    (WM_NTXDESC(txq) - start), ops);
1879 		num -= (WM_NTXDESC(txq) - start);
1880 		start = 0;
1881 	}
1882 
1883 	/* Now sync whatever is left. */
1884 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1885 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1886 }
1887 
1888 static inline void
wm_cdrxsync(struct wm_rxqueue * rxq,int start,int ops)1889 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1890 {
1891 	struct wm_softc *sc = rxq->rxq_sc;
1892 
1893 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1894 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1895 }
1896 
1897 static inline void
wm_init_rxdesc(struct wm_rxqueue * rxq,int start)1898 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1899 {
1900 	struct wm_softc *sc = rxq->rxq_sc;
1901 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1902 	struct mbuf *m = rxs->rxs_mbuf;
1903 
1904 	/*
1905 	 * Note: We scoot the packet forward 2 bytes in the buffer
1906 	 * so that the payload after the Ethernet header is aligned
1907 	 * to a 4-byte boundary.
1908 
1909 	 * XXX BRAINDAMAGE ALERT!
1910 	 * The stupid chip uses the same size for every buffer, which
1911 	 * is set in the Receive Control register.  We are using the 2K
1912 	 * size option, but what we REALLY want is (2K - 2)!  For this
1913 	 * reason, we can't "scoot" packets longer than the standard
1914 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1915 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1916 	 * the upper layer copy the headers.
1917 	 */
1918 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1919 
1920 	if (sc->sc_type == WM_T_82574) {
1921 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1922 		rxd->erx_data.erxd_addr =
1923 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1924 		rxd->erx_data.erxd_dd = 0;
1925 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1926 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1927 
1928 		rxd->nqrx_data.nrxd_paddr =
1929 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1930 		/* Currently, split header is not supported. */
1931 		rxd->nqrx_data.nrxd_haddr = 0;
1932 	} else {
1933 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1934 
1935 		wm_set_dma_addr(&rxd->wrx_addr,
1936 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1937 		rxd->wrx_len = 0;
1938 		rxd->wrx_cksum = 0;
1939 		rxd->wrx_status = 0;
1940 		rxd->wrx_errors = 0;
1941 		rxd->wrx_special = 0;
1942 	}
1943 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1944 
1945 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1946 }
1947 
1948 /*
1949  * Device driver interface functions and commonly used functions.
1950  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1951  */
1952 
1953 /* Lookup supported device table */
1954 static const struct wm_product *
wm_lookup(const struct pci_attach_args * pa)1955 wm_lookup(const struct pci_attach_args *pa)
1956 {
1957 	const struct wm_product *wmp;
1958 
1959 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1960 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1961 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1962 			return wmp;
1963 	}
1964 	return NULL;
1965 }
1966 
1967 /* The match function (ca_match) */
1968 static int
wm_match(device_t parent,cfdata_t cf,void * aux)1969 wm_match(device_t parent, cfdata_t cf, void *aux)
1970 {
1971 	struct pci_attach_args *pa = aux;
1972 
1973 	if (wm_lookup(pa) != NULL)
1974 		return 1;
1975 
1976 	return 0;
1977 }
1978 
1979 /* The attach function (ca_attach) */
1980 static void
wm_attach(device_t parent,device_t self,void * aux)1981 wm_attach(device_t parent, device_t self, void *aux)
1982 {
1983 	struct wm_softc *sc = device_private(self);
1984 	struct pci_attach_args *pa = aux;
1985 	prop_dictionary_t dict;
1986 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1987 	pci_chipset_tag_t pc = pa->pa_pc;
1988 	int counts[PCI_INTR_TYPE_SIZE];
1989 	pci_intr_type_t max_type;
1990 	const char *eetype, *xname;
1991 	bus_space_tag_t memt;
1992 	bus_space_handle_t memh;
1993 	bus_size_t memsize;
1994 	int memh_valid;
1995 	int i, error;
1996 	const struct wm_product *wmp;
1997 	prop_data_t ea;
1998 	prop_number_t pn;
1999 	uint8_t enaddr[ETHER_ADDR_LEN];
2000 	char buf[256];
2001 	char wqname[MAXCOMLEN];
2002 	uint16_t cfg1, cfg2, swdpin, nvmword;
2003 	pcireg_t preg, memtype;
2004 	uint16_t eeprom_data, apme_mask;
2005 	bool force_clear_smbi;
2006 	uint32_t link_mode;
2007 	uint32_t reg;
2008 
2009 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
2010 	sc->sc_debug = WM_DEBUG_DEFAULT;
2011 #endif
2012 	sc->sc_dev = self;
2013 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
2014 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
2015 	sc->sc_core_stopping = false;
2016 
2017 	wmp = wm_lookup(pa);
2018 #ifdef DIAGNOSTIC
2019 	if (wmp == NULL) {
2020 		printf("\n");
2021 		panic("wm_attach: impossible");
2022 	}
2023 #endif
2024 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
2025 
2026 	sc->sc_pc = pa->pa_pc;
2027 	sc->sc_pcitag = pa->pa_tag;
2028 
2029 	if (pci_dma64_available(pa)) {
2030 		aprint_verbose(", 64-bit DMA");
2031 		sc->sc_dmat = pa->pa_dmat64;
2032 	} else {
2033 		aprint_verbose(", 32-bit DMA");
2034 		sc->sc_dmat = pa->pa_dmat;
2035 	}
2036 
2037 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
2038 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
2039 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
2040 
2041 	sc->sc_type = wmp->wmp_type;
2042 
2043 	/* Set default function pointers */
2044 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2045 	sc->phy.release = sc->nvm.release = wm_put_null;
2046 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2047 
2048 	if (sc->sc_type < WM_T_82543) {
2049 		if (sc->sc_rev < 2) {
2050 			aprint_error_dev(sc->sc_dev,
2051 			    "i82542 must be at least rev. 2\n");
2052 			return;
2053 		}
2054 		if (sc->sc_rev < 3)
2055 			sc->sc_type = WM_T_82542_2_0;
2056 	}
2057 
2058 	/*
2059 	 * Disable MSI for Errata:
2060 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2061 	 *
2062 	 *  82544: Errata 25
2063 	 *  82540: Errata  6 (easy to reproduce device timeout)
2064 	 *  82545: Errata  4 (easy to reproduce device timeout)
2065 	 *  82546: Errata 26 (easy to reproduce device timeout)
2066 	 *  82541: Errata  7 (easy to reproduce device timeout)
2067 	 *
2068 	 * "Byte Enables 2 and 3 are not set on MSI writes"
2069 	 *
2070 	 *  82571 & 82572: Errata 63
2071 	 */
2072 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2073 	    || (sc->sc_type == WM_T_82572))
2074 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2075 
2076 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2077 	    || (sc->sc_type == WM_T_82580)
2078 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2079 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2080 		sc->sc_flags |= WM_F_NEWQUEUE;
2081 
2082 	/* Set device properties (mactype) */
2083 	dict = device_properties(sc->sc_dev);
2084 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2085 
2086 	/*
2087 	 * Map the device.  All devices support memory-mapped acccess,
2088 	 * and it is really required for normal operation.
2089 	 */
2090 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2091 	switch (memtype) {
2092 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2093 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2094 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2095 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2096 		break;
2097 	default:
2098 		memh_valid = 0;
2099 		break;
2100 	}
2101 
2102 	if (memh_valid) {
2103 		sc->sc_st = memt;
2104 		sc->sc_sh = memh;
2105 		sc->sc_ss = memsize;
2106 	} else {
2107 		aprint_error_dev(sc->sc_dev,
2108 		    "unable to map device registers\n");
2109 		return;
2110 	}
2111 
2112 	/*
2113 	 * In addition, i82544 and later support I/O mapped indirect
2114 	 * register access.  It is not desirable (nor supported in
2115 	 * this driver) to use it for normal operation, though it is
2116 	 * required to work around bugs in some chip versions.
2117 	 */
2118 	switch (sc->sc_type) {
2119 	case WM_T_82544:
2120 	case WM_T_82541:
2121 	case WM_T_82541_2:
2122 	case WM_T_82547:
2123 	case WM_T_82547_2:
2124 		/* First we have to find the I/O BAR. */
2125 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2126 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2127 			if (memtype == PCI_MAPREG_TYPE_IO)
2128 				break;
2129 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
2130 			    PCI_MAPREG_MEM_TYPE_64BIT)
2131 				i += 4;	/* skip high bits, too */
2132 		}
2133 		if (i < PCI_MAPREG_END) {
2134 			/*
2135 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2136 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2137 			 * It's no problem because newer chips has no this
2138 			 * bug.
2139 			 *
2140 			 * The i8254x doesn't apparently respond when the
2141 			 * I/O BAR is 0, which looks somewhat like it's not
2142 			 * been configured.
2143 			 */
2144 			preg = pci_conf_read(pc, pa->pa_tag, i);
2145 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2146 				aprint_error_dev(sc->sc_dev,
2147 				    "WARNING: I/O BAR at zero.\n");
2148 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2149 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2150 			    == 0) {
2151 				sc->sc_flags |= WM_F_IOH_VALID;
2152 			} else
2153 				aprint_error_dev(sc->sc_dev,
2154 				    "WARNING: unable to map I/O space\n");
2155 		}
2156 		break;
2157 	default:
2158 		break;
2159 	}
2160 
2161 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
2162 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2163 	preg |= PCI_COMMAND_MASTER_ENABLE;
2164 	if (sc->sc_type < WM_T_82542_2_1)
2165 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2166 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2167 
2168 	/* Power up chip */
2169 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2170 	    && error != EOPNOTSUPP) {
2171 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2172 		return;
2173 	}
2174 
2175 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2176 	/*
2177 	 *  Don't use MSI-X if we can use only one queue to save interrupt
2178 	 * resource.
2179 	 */
2180 	if (sc->sc_nqueues > 1) {
2181 		max_type = PCI_INTR_TYPE_MSIX;
2182 		/*
2183 		 *  82583 has a MSI-X capability in the PCI configuration space
2184 		 * but it doesn't support it. At least the document doesn't
2185 		 * say anything about MSI-X.
2186 		 */
2187 		counts[PCI_INTR_TYPE_MSIX]
2188 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2189 	} else {
2190 		max_type = PCI_INTR_TYPE_MSI;
2191 		counts[PCI_INTR_TYPE_MSIX] = 0;
2192 	}
2193 
2194 	/* Allocation settings */
2195 	counts[PCI_INTR_TYPE_MSI] = 1;
2196 	counts[PCI_INTR_TYPE_INTX] = 1;
2197 	/* overridden by disable flags */
2198 	if (wm_disable_msi != 0) {
2199 		counts[PCI_INTR_TYPE_MSI] = 0;
2200 		if (wm_disable_msix != 0) {
2201 			max_type = PCI_INTR_TYPE_INTX;
2202 			counts[PCI_INTR_TYPE_MSIX] = 0;
2203 		}
2204 	} else if (wm_disable_msix != 0) {
2205 		max_type = PCI_INTR_TYPE_MSI;
2206 		counts[PCI_INTR_TYPE_MSIX] = 0;
2207 	}
2208 
2209 alloc_retry:
2210 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2211 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2212 		return;
2213 	}
2214 
2215 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2216 		error = wm_setup_msix(sc);
2217 		if (error) {
2218 			pci_intr_release(pc, sc->sc_intrs,
2219 			    counts[PCI_INTR_TYPE_MSIX]);
2220 
2221 			/* Setup for MSI: Disable MSI-X */
2222 			max_type = PCI_INTR_TYPE_MSI;
2223 			counts[PCI_INTR_TYPE_MSI] = 1;
2224 			counts[PCI_INTR_TYPE_INTX] = 1;
2225 			goto alloc_retry;
2226 		}
2227 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2228 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2229 		error = wm_setup_legacy(sc);
2230 		if (error) {
2231 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2232 			    counts[PCI_INTR_TYPE_MSI]);
2233 
2234 			/* The next try is for INTx: Disable MSI */
2235 			max_type = PCI_INTR_TYPE_INTX;
2236 			counts[PCI_INTR_TYPE_INTX] = 1;
2237 			goto alloc_retry;
2238 		}
2239 	} else {
2240 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2241 		error = wm_setup_legacy(sc);
2242 		if (error) {
2243 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2244 			    counts[PCI_INTR_TYPE_INTX]);
2245 			return;
2246 		}
2247 	}
2248 
2249 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2250 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2251 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2252 	    WQ_PERCPU | WQ_MPSAFE);
2253 	if (error) {
2254 		aprint_error_dev(sc->sc_dev,
2255 		    "unable to create TxRx workqueue\n");
2256 		goto out;
2257 	}
2258 
2259 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2260 	error = workqueue_create(&sc->sc_reset_wq, wqname,
2261 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2262 	    WQ_MPSAFE);
2263 	if (error) {
2264 		workqueue_destroy(sc->sc_queue_wq);
2265 		aprint_error_dev(sc->sc_dev,
2266 		    "unable to create reset workqueue\n");
2267 		goto out;
2268 	}
2269 
2270 	/*
2271 	 * Check the function ID (unit number of the chip).
2272 	 */
2273 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2274 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2275 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2276 	    || (sc->sc_type == WM_T_82580)
2277 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2278 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2279 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2280 	else
2281 		sc->sc_funcid = 0;
2282 
2283 	/*
2284 	 * Determine a few things about the bus we're connected to.
2285 	 */
2286 	if (sc->sc_type < WM_T_82543) {
2287 		/* We don't really know the bus characteristics here. */
2288 		sc->sc_bus_speed = 33;
2289 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2290 		/*
2291 		 * CSA (Communication Streaming Architecture) is about as fast
2292 		 * a 32-bit 66MHz PCI Bus.
2293 		 */
2294 		sc->sc_flags |= WM_F_CSA;
2295 		sc->sc_bus_speed = 66;
2296 		aprint_verbose_dev(sc->sc_dev,
2297 		    "Communication Streaming Architecture\n");
2298 		if (sc->sc_type == WM_T_82547) {
2299 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2300 			callout_setfunc(&sc->sc_txfifo_ch,
2301 			    wm_82547_txfifo_stall, sc);
2302 			aprint_verbose_dev(sc->sc_dev,
2303 			    "using 82547 Tx FIFO stall work-around\n");
2304 		}
2305 	} else if (sc->sc_type >= WM_T_82571) {
2306 		sc->sc_flags |= WM_F_PCIE;
2307 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2308 		    && (sc->sc_type != WM_T_ICH10)
2309 		    && (sc->sc_type != WM_T_PCH)
2310 		    && (sc->sc_type != WM_T_PCH2)
2311 		    && (sc->sc_type != WM_T_PCH_LPT)
2312 		    && (sc->sc_type != WM_T_PCH_SPT)
2313 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2314 			/* ICH* and PCH* have no PCIe capability registers */
2315 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2316 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2317 				NULL) == 0)
2318 				aprint_error_dev(sc->sc_dev,
2319 				    "unable to find PCIe capability\n");
2320 		}
2321 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2322 	} else {
2323 		reg = CSR_READ(sc, WMREG_STATUS);
2324 		if (reg & STATUS_BUS64)
2325 			sc->sc_flags |= WM_F_BUS64;
2326 		if ((reg & STATUS_PCIX_MODE) != 0) {
2327 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2328 
2329 			sc->sc_flags |= WM_F_PCIX;
2330 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2331 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2332 				aprint_error_dev(sc->sc_dev,
2333 				    "unable to find PCIX capability\n");
2334 			else if (sc->sc_type != WM_T_82545_3 &&
2335 			    sc->sc_type != WM_T_82546_3) {
2336 				/*
2337 				 * Work around a problem caused by the BIOS
2338 				 * setting the max memory read byte count
2339 				 * incorrectly.
2340 				 */
2341 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2342 				    sc->sc_pcixe_capoff + PCIX_CMD);
2343 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2344 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2345 
2346 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2347 				    PCIX_CMD_BYTECNT_SHIFT;
2348 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2349 				    PCIX_STATUS_MAXB_SHIFT;
2350 				if (bytecnt > maxb) {
2351 					aprint_verbose_dev(sc->sc_dev,
2352 					    "resetting PCI-X MMRBC: %d -> %d\n",
2353 					    512 << bytecnt, 512 << maxb);
2354 					pcix_cmd = (pcix_cmd &
2355 					    ~PCIX_CMD_BYTECNT_MASK) |
2356 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2357 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2358 					    sc->sc_pcixe_capoff + PCIX_CMD,
2359 					    pcix_cmd);
2360 				}
2361 			}
2362 		}
2363 		/*
2364 		 * The quad port adapter is special; it has a PCIX-PCIX
2365 		 * bridge on the board, and can run the secondary bus at
2366 		 * a higher speed.
2367 		 */
2368 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2369 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2370 								      : 66;
2371 		} else if (sc->sc_flags & WM_F_PCIX) {
2372 			switch (reg & STATUS_PCIXSPD_MASK) {
2373 			case STATUS_PCIXSPD_50_66:
2374 				sc->sc_bus_speed = 66;
2375 				break;
2376 			case STATUS_PCIXSPD_66_100:
2377 				sc->sc_bus_speed = 100;
2378 				break;
2379 			case STATUS_PCIXSPD_100_133:
2380 				sc->sc_bus_speed = 133;
2381 				break;
2382 			default:
2383 				aprint_error_dev(sc->sc_dev,
2384 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2385 				    reg & STATUS_PCIXSPD_MASK);
2386 				sc->sc_bus_speed = 66;
2387 				break;
2388 			}
2389 		} else
2390 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2391 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2392 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2393 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2394 	}
2395 
2396 	/* clear interesting stat counters */
2397 	CSR_READ(sc, WMREG_COLC);
2398 	CSR_READ(sc, WMREG_RXERRC);
2399 
2400 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2401 	    || (sc->sc_type >= WM_T_ICH8))
2402 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2403 	if (sc->sc_type >= WM_T_ICH8)
2404 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2405 
2406 	/* Set PHY, NVM mutex related stuff */
2407 	switch (sc->sc_type) {
2408 	case WM_T_82542_2_0:
2409 	case WM_T_82542_2_1:
2410 	case WM_T_82543:
2411 	case WM_T_82544:
2412 		/* Microwire */
2413 		sc->nvm.read = wm_nvm_read_uwire;
2414 		sc->sc_nvm_wordsize = 64;
2415 		sc->sc_nvm_addrbits = 6;
2416 		break;
2417 	case WM_T_82540:
2418 	case WM_T_82545:
2419 	case WM_T_82545_3:
2420 	case WM_T_82546:
2421 	case WM_T_82546_3:
2422 		/* Microwire */
2423 		sc->nvm.read = wm_nvm_read_uwire;
2424 		reg = CSR_READ(sc, WMREG_EECD);
2425 		if (reg & EECD_EE_SIZE) {
2426 			sc->sc_nvm_wordsize = 256;
2427 			sc->sc_nvm_addrbits = 8;
2428 		} else {
2429 			sc->sc_nvm_wordsize = 64;
2430 			sc->sc_nvm_addrbits = 6;
2431 		}
2432 		sc->sc_flags |= WM_F_LOCK_EECD;
2433 		sc->nvm.acquire = wm_get_eecd;
2434 		sc->nvm.release = wm_put_eecd;
2435 		break;
2436 	case WM_T_82541:
2437 	case WM_T_82541_2:
2438 	case WM_T_82547:
2439 	case WM_T_82547_2:
2440 		reg = CSR_READ(sc, WMREG_EECD);
2441 		/*
2442 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2443 		 * on 8254[17], so set flags and functios before calling it.
2444 		 */
2445 		sc->sc_flags |= WM_F_LOCK_EECD;
2446 		sc->nvm.acquire = wm_get_eecd;
2447 		sc->nvm.release = wm_put_eecd;
2448 		if (reg & EECD_EE_TYPE) {
2449 			/* SPI */
2450 			sc->nvm.read = wm_nvm_read_spi;
2451 			sc->sc_flags |= WM_F_EEPROM_SPI;
2452 			wm_nvm_set_addrbits_size_eecd(sc);
2453 		} else {
2454 			/* Microwire */
2455 			sc->nvm.read = wm_nvm_read_uwire;
2456 			if ((reg & EECD_EE_ABITS) != 0) {
2457 				sc->sc_nvm_wordsize = 256;
2458 				sc->sc_nvm_addrbits = 8;
2459 			} else {
2460 				sc->sc_nvm_wordsize = 64;
2461 				sc->sc_nvm_addrbits = 6;
2462 			}
2463 		}
2464 		break;
2465 	case WM_T_82571:
2466 	case WM_T_82572:
2467 		/* SPI */
2468 		sc->nvm.read = wm_nvm_read_eerd;
2469 		/* Not use WM_F_LOCK_EECD because we use EERD */
2470 		sc->sc_flags |= WM_F_EEPROM_SPI;
2471 		wm_nvm_set_addrbits_size_eecd(sc);
2472 		sc->phy.acquire = wm_get_swsm_semaphore;
2473 		sc->phy.release = wm_put_swsm_semaphore;
2474 		sc->nvm.acquire = wm_get_nvm_82571;
2475 		sc->nvm.release = wm_put_nvm_82571;
2476 		break;
2477 	case WM_T_82573:
2478 	case WM_T_82574:
2479 	case WM_T_82583:
2480 		sc->nvm.read = wm_nvm_read_eerd;
2481 		/* Not use WM_F_LOCK_EECD because we use EERD */
2482 		if (sc->sc_type == WM_T_82573) {
2483 			sc->phy.acquire = wm_get_swsm_semaphore;
2484 			sc->phy.release = wm_put_swsm_semaphore;
2485 			sc->nvm.acquire = wm_get_nvm_82571;
2486 			sc->nvm.release = wm_put_nvm_82571;
2487 		} else {
2488 			/* Both PHY and NVM use the same semaphore. */
2489 			sc->phy.acquire = sc->nvm.acquire
2490 			    = wm_get_swfwhw_semaphore;
2491 			sc->phy.release = sc->nvm.release
2492 			    = wm_put_swfwhw_semaphore;
2493 		}
2494 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2495 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2496 			sc->sc_nvm_wordsize = 2048;
2497 		} else {
2498 			/* SPI */
2499 			sc->sc_flags |= WM_F_EEPROM_SPI;
2500 			wm_nvm_set_addrbits_size_eecd(sc);
2501 		}
2502 		break;
2503 	case WM_T_82575:
2504 	case WM_T_82576:
2505 	case WM_T_82580:
2506 	case WM_T_I350:
2507 	case WM_T_I354:
2508 	case WM_T_80003:
2509 		/* SPI */
2510 		sc->sc_flags |= WM_F_EEPROM_SPI;
2511 		wm_nvm_set_addrbits_size_eecd(sc);
2512 		if ((sc->sc_type == WM_T_80003)
2513 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2514 			sc->nvm.read = wm_nvm_read_eerd;
2515 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2516 		} else {
2517 			sc->nvm.read = wm_nvm_read_spi;
2518 			sc->sc_flags |= WM_F_LOCK_EECD;
2519 		}
2520 		sc->phy.acquire = wm_get_phy_82575;
2521 		sc->phy.release = wm_put_phy_82575;
2522 		sc->nvm.acquire = wm_get_nvm_80003;
2523 		sc->nvm.release = wm_put_nvm_80003;
2524 		break;
2525 	case WM_T_ICH8:
2526 	case WM_T_ICH9:
2527 	case WM_T_ICH10:
2528 	case WM_T_PCH:
2529 	case WM_T_PCH2:
2530 	case WM_T_PCH_LPT:
2531 		sc->nvm.read = wm_nvm_read_ich8;
2532 		/* FLASH */
2533 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2534 		sc->sc_nvm_wordsize = 2048;
2535 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2536 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2537 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2538 			aprint_error_dev(sc->sc_dev,
2539 			    "can't map FLASH registers\n");
2540 			goto out;
2541 		}
2542 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2543 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2544 		    ICH_FLASH_SECTOR_SIZE;
2545 		sc->sc_ich8_flash_bank_size =
2546 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2547 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2548 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2549 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2550 		sc->sc_flashreg_offset = 0;
2551 		sc->phy.acquire = wm_get_swflag_ich8lan;
2552 		sc->phy.release = wm_put_swflag_ich8lan;
2553 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2554 		sc->nvm.release = wm_put_nvm_ich8lan;
2555 		break;
2556 	case WM_T_PCH_SPT:
2557 	case WM_T_PCH_CNP:
2558 		sc->nvm.read = wm_nvm_read_spt;
2559 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2560 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2561 		sc->sc_flasht = sc->sc_st;
2562 		sc->sc_flashh = sc->sc_sh;
2563 		sc->sc_ich8_flash_base = 0;
2564 		sc->sc_nvm_wordsize =
2565 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2566 		    * NVM_SIZE_MULTIPLIER;
2567 		/* It is size in bytes, we want words */
2568 		sc->sc_nvm_wordsize /= 2;
2569 		/* Assume 2 banks */
2570 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2571 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2572 		sc->phy.acquire = wm_get_swflag_ich8lan;
2573 		sc->phy.release = wm_put_swflag_ich8lan;
2574 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2575 		sc->nvm.release = wm_put_nvm_ich8lan;
2576 		break;
2577 	case WM_T_I210:
2578 	case WM_T_I211:
2579 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2580 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2581 		if (wm_nvm_flash_presence_i210(sc)) {
2582 			sc->nvm.read = wm_nvm_read_eerd;
2583 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2584 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2585 			wm_nvm_set_addrbits_size_eecd(sc);
2586 		} else {
2587 			sc->nvm.read = wm_nvm_read_invm;
2588 			sc->sc_flags |= WM_F_EEPROM_INVM;
2589 			sc->sc_nvm_wordsize = INVM_SIZE;
2590 		}
2591 		sc->phy.acquire = wm_get_phy_82575;
2592 		sc->phy.release = wm_put_phy_82575;
2593 		sc->nvm.acquire = wm_get_nvm_80003;
2594 		sc->nvm.release = wm_put_nvm_80003;
2595 		break;
2596 	default:
2597 		break;
2598 	}
2599 
2600 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2601 	switch (sc->sc_type) {
2602 	case WM_T_82571:
2603 	case WM_T_82572:
2604 		reg = CSR_READ(sc, WMREG_SWSM2);
2605 		if ((reg & SWSM2_LOCK) == 0) {
2606 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2607 			force_clear_smbi = true;
2608 		} else
2609 			force_clear_smbi = false;
2610 		break;
2611 	case WM_T_82573:
2612 	case WM_T_82574:
2613 	case WM_T_82583:
2614 		force_clear_smbi = true;
2615 		break;
2616 	default:
2617 		force_clear_smbi = false;
2618 		break;
2619 	}
2620 	if (force_clear_smbi) {
2621 		reg = CSR_READ(sc, WMREG_SWSM);
2622 		if ((reg & SWSM_SMBI) != 0)
2623 			aprint_error_dev(sc->sc_dev,
2624 			    "Please update the Bootagent\n");
2625 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2626 	}
2627 
2628 	/*
2629 	 * Defer printing the EEPROM type until after verifying the checksum
2630 	 * This allows the EEPROM type to be printed correctly in the case
2631 	 * that no EEPROM is attached.
2632 	 */
2633 	/*
2634 	 * Validate the EEPROM checksum. If the checksum fails, flag
2635 	 * this for later, so we can fail future reads from the EEPROM.
2636 	 */
2637 	if (wm_nvm_validate_checksum(sc)) {
2638 		/*
2639 		 * Read twice again because some PCI-e parts fail the
2640 		 * first check due to the link being in sleep state.
2641 		 */
2642 		if (wm_nvm_validate_checksum(sc))
2643 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2644 	}
2645 
2646 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2647 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2648 	else {
2649 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2650 		    sc->sc_nvm_wordsize);
2651 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2652 			aprint_verbose("iNVM");
2653 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2654 			aprint_verbose("FLASH(HW)");
2655 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2656 			aprint_verbose("FLASH");
2657 		else {
2658 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2659 				eetype = "SPI";
2660 			else
2661 				eetype = "MicroWire";
2662 			aprint_verbose("(%d address bits) %s EEPROM",
2663 			    sc->sc_nvm_addrbits, eetype);
2664 		}
2665 	}
2666 	wm_nvm_version(sc);
2667 	aprint_verbose("\n");
2668 
2669 	/*
2670 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2671 	 * incorrect.
2672 	 */
2673 	wm_gmii_setup_phytype(sc, 0, 0);
2674 
2675 	/* Check for WM_F_WOL on some chips before wm_reset() */
2676 	switch (sc->sc_type) {
2677 	case WM_T_ICH8:
2678 	case WM_T_ICH9:
2679 	case WM_T_ICH10:
2680 	case WM_T_PCH:
2681 	case WM_T_PCH2:
2682 	case WM_T_PCH_LPT:
2683 	case WM_T_PCH_SPT:
2684 	case WM_T_PCH_CNP:
2685 		apme_mask = WUC_APME;
2686 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2687 		if ((eeprom_data & apme_mask) != 0)
2688 			sc->sc_flags |= WM_F_WOL;
2689 		break;
2690 	default:
2691 		break;
2692 	}
2693 
2694 	/* Reset the chip to a known state. */
2695 	wm_reset(sc);
2696 
2697 	/*
2698 	 * Check for I21[01] PLL workaround.
2699 	 *
2700 	 * Three cases:
2701 	 * a) Chip is I211.
2702 	 * b) Chip is I210 and it uses INVM (not FLASH).
2703 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2704 	 */
2705 	if (sc->sc_type == WM_T_I211)
2706 		sc->sc_flags |= WM_F_PLL_WA_I210;
2707 	if (sc->sc_type == WM_T_I210) {
2708 		if (!wm_nvm_flash_presence_i210(sc))
2709 			sc->sc_flags |= WM_F_PLL_WA_I210;
2710 		else if ((sc->sc_nvm_ver_major < 3)
2711 		    || ((sc->sc_nvm_ver_major == 3)
2712 			&& (sc->sc_nvm_ver_minor < 25))) {
2713 			aprint_verbose_dev(sc->sc_dev,
2714 			    "ROM image version %d.%d is older than 3.25\n",
2715 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2716 			sc->sc_flags |= WM_F_PLL_WA_I210;
2717 		}
2718 	}
2719 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2720 		wm_pll_workaround_i210(sc);
2721 
2722 	wm_get_wakeup(sc);
2723 
2724 	/* Non-AMT based hardware can now take control from firmware */
2725 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2726 		wm_get_hw_control(sc);
2727 
2728 	/*
2729 	 * Read the Ethernet address from the EEPROM, if not first found
2730 	 * in device properties.
2731 	 */
2732 	ea = prop_dictionary_get(dict, "mac-address");
2733 	if (ea != NULL) {
2734 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2735 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2736 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2737 	} else {
2738 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2739 			aprint_error_dev(sc->sc_dev,
2740 			    "unable to read Ethernet address\n");
2741 			goto out;
2742 		}
2743 	}
2744 
2745 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2746 	    ether_sprintf(enaddr));
2747 
2748 	/*
2749 	 * Read the config info from the EEPROM, and set up various
2750 	 * bits in the control registers based on their contents.
2751 	 */
2752 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2753 	if (pn != NULL) {
2754 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2755 		cfg1 = (uint16_t) prop_number_signed_value(pn);
2756 	} else {
2757 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2758 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2759 			goto out;
2760 		}
2761 	}
2762 
2763 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2764 	if (pn != NULL) {
2765 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2766 		cfg2 = (uint16_t) prop_number_signed_value(pn);
2767 	} else {
2768 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2769 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2770 			goto out;
2771 		}
2772 	}
2773 
2774 	/* check for WM_F_WOL */
2775 	switch (sc->sc_type) {
2776 	case WM_T_82542_2_0:
2777 	case WM_T_82542_2_1:
2778 	case WM_T_82543:
2779 		/* dummy? */
2780 		eeprom_data = 0;
2781 		apme_mask = NVM_CFG3_APME;
2782 		break;
2783 	case WM_T_82544:
2784 		apme_mask = NVM_CFG2_82544_APM_EN;
2785 		eeprom_data = cfg2;
2786 		break;
2787 	case WM_T_82546:
2788 	case WM_T_82546_3:
2789 	case WM_T_82571:
2790 	case WM_T_82572:
2791 	case WM_T_82573:
2792 	case WM_T_82574:
2793 	case WM_T_82583:
2794 	case WM_T_80003:
2795 	case WM_T_82575:
2796 	case WM_T_82576:
2797 		apme_mask = NVM_CFG3_APME;
2798 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2799 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2800 		break;
2801 	case WM_T_82580:
2802 	case WM_T_I350:
2803 	case WM_T_I354:
2804 	case WM_T_I210:
2805 	case WM_T_I211:
2806 		apme_mask = NVM_CFG3_APME;
2807 		wm_nvm_read(sc,
2808 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2809 		    1, &eeprom_data);
2810 		break;
2811 	case WM_T_ICH8:
2812 	case WM_T_ICH9:
2813 	case WM_T_ICH10:
2814 	case WM_T_PCH:
2815 	case WM_T_PCH2:
2816 	case WM_T_PCH_LPT:
2817 	case WM_T_PCH_SPT:
2818 	case WM_T_PCH_CNP:
2819 		/* Already checked before wm_reset () */
2820 		apme_mask = eeprom_data = 0;
2821 		break;
2822 	default: /* XXX 82540 */
2823 		apme_mask = NVM_CFG3_APME;
2824 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2825 		break;
2826 	}
2827 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2828 	if ((eeprom_data & apme_mask) != 0)
2829 		sc->sc_flags |= WM_F_WOL;
2830 
2831 	/*
2832 	 * We have the eeprom settings, now apply the special cases
2833 	 * where the eeprom may be wrong or the board won't support
2834 	 * wake on lan on a particular port
2835 	 */
2836 	switch (sc->sc_pcidevid) {
2837 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2838 		sc->sc_flags &= ~WM_F_WOL;
2839 		break;
2840 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2841 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2842 		/* Wake events only supported on port A for dual fiber
2843 		 * regardless of eeprom setting */
2844 		if (sc->sc_funcid == 1)
2845 			sc->sc_flags &= ~WM_F_WOL;
2846 		break;
2847 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2848 		/* If quad port adapter, disable WoL on all but port A */
2849 		if (sc->sc_funcid != 0)
2850 			sc->sc_flags &= ~WM_F_WOL;
2851 		break;
2852 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2853 		/* Wake events only supported on port A for dual fiber
2854 		 * regardless of eeprom setting */
2855 		if (sc->sc_funcid == 1)
2856 			sc->sc_flags &= ~WM_F_WOL;
2857 		break;
2858 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2859 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2860 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2861 		/* If quad port adapter, disable WoL on all but port A */
2862 		if (sc->sc_funcid != 0)
2863 			sc->sc_flags &= ~WM_F_WOL;
2864 		break;
2865 	}
2866 
2867 	if (sc->sc_type >= WM_T_82575) {
2868 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2869 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2870 			    nvmword);
2871 			if ((sc->sc_type == WM_T_82575) ||
2872 			    (sc->sc_type == WM_T_82576)) {
2873 				/* Check NVM for autonegotiation */
2874 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2875 				    != 0)
2876 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2877 			}
2878 			if ((sc->sc_type == WM_T_82575) ||
2879 			    (sc->sc_type == WM_T_I350)) {
2880 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2881 					sc->sc_flags |= WM_F_MAS;
2882 			}
2883 		}
2884 	}
2885 
2886 	/*
2887 	 * XXX need special handling for some multiple port cards
2888 	 * to disable a paticular port.
2889 	 */
2890 
2891 	if (sc->sc_type >= WM_T_82544) {
2892 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2893 		if (pn != NULL) {
2894 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2895 			swdpin = (uint16_t) prop_number_signed_value(pn);
2896 		} else {
2897 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2898 				aprint_error_dev(sc->sc_dev,
2899 				    "unable to read SWDPIN\n");
2900 				goto out;
2901 			}
2902 		}
2903 	}
2904 
2905 	if (cfg1 & NVM_CFG1_ILOS)
2906 		sc->sc_ctrl |= CTRL_ILOS;
2907 
2908 	/*
2909 	 * XXX
2910 	 * This code isn't correct because pin 2 and 3 are located
2911 	 * in different position on newer chips. Check all datasheet.
2912 	 *
2913 	 * Until resolve this problem, check if a chip < 82580
2914 	 */
2915 	if (sc->sc_type <= WM_T_82580) {
2916 		if (sc->sc_type >= WM_T_82544) {
2917 			sc->sc_ctrl |=
2918 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2919 			    CTRL_SWDPIO_SHIFT;
2920 			sc->sc_ctrl |=
2921 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2922 			    CTRL_SWDPINS_SHIFT;
2923 		} else {
2924 			sc->sc_ctrl |=
2925 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2926 			    CTRL_SWDPIO_SHIFT;
2927 		}
2928 	}
2929 
2930 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2931 		wm_nvm_read(sc,
2932 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2933 		    1, &nvmword);
2934 		if (nvmword & NVM_CFG3_ILOS)
2935 			sc->sc_ctrl |= CTRL_ILOS;
2936 	}
2937 
2938 #if 0
2939 	if (sc->sc_type >= WM_T_82544) {
2940 		if (cfg1 & NVM_CFG1_IPS0)
2941 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2942 		if (cfg1 & NVM_CFG1_IPS1)
2943 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2944 		sc->sc_ctrl_ext |=
2945 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2946 		    CTRL_EXT_SWDPIO_SHIFT;
2947 		sc->sc_ctrl_ext |=
2948 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2949 		    CTRL_EXT_SWDPINS_SHIFT;
2950 	} else {
2951 		sc->sc_ctrl_ext |=
2952 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2953 		    CTRL_EXT_SWDPIO_SHIFT;
2954 	}
2955 #endif
2956 
2957 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2958 #if 0
2959 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2960 #endif
2961 
2962 	if (sc->sc_type == WM_T_PCH) {
2963 		uint16_t val;
2964 
2965 		/* Save the NVM K1 bit setting */
2966 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2967 
2968 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2969 			sc->sc_nvm_k1_enabled = 1;
2970 		else
2971 			sc->sc_nvm_k1_enabled = 0;
2972 	}
2973 
2974 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2975 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2976 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2977 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2978 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2979 	    || sc->sc_type == WM_T_82573
2980 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2981 		/* Copper only */
2982 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2983 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2984 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2985 	    || (sc->sc_type ==WM_T_I211)) {
2986 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2987 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2988 		switch (link_mode) {
2989 		case CTRL_EXT_LINK_MODE_1000KX:
2990 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
2991 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2992 			break;
2993 		case CTRL_EXT_LINK_MODE_SGMII:
2994 			if (wm_sgmii_uses_mdio(sc)) {
2995 				aprint_normal_dev(sc->sc_dev,
2996 				    "SGMII(MDIO)\n");
2997 				sc->sc_flags |= WM_F_SGMII;
2998 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2999 				break;
3000 			}
3001 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
3002 			/*FALLTHROUGH*/
3003 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
3004 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
3005 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
3006 				if (link_mode
3007 				    == CTRL_EXT_LINK_MODE_SGMII) {
3008 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3009 					sc->sc_flags |= WM_F_SGMII;
3010 					aprint_verbose_dev(sc->sc_dev,
3011 					    "SGMII\n");
3012 				} else {
3013 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3014 					aprint_verbose_dev(sc->sc_dev,
3015 					    "SERDES\n");
3016 				}
3017 				break;
3018 			}
3019 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
3020 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
3021 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3022 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
3023 				sc->sc_flags |= WM_F_SGMII;
3024 			}
3025 			/* Do not change link mode for 100BaseFX */
3026 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
3027 				break;
3028 
3029 			/* Change current link mode setting */
3030 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
3031 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3032 				reg |= CTRL_EXT_LINK_MODE_SGMII;
3033 			else
3034 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
3035 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3036 			break;
3037 		case CTRL_EXT_LINK_MODE_GMII:
3038 		default:
3039 			aprint_normal_dev(sc->sc_dev, "Copper\n");
3040 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3041 			break;
3042 		}
3043 
3044 		reg &= ~CTRL_EXT_I2C_ENA;
3045 		if ((sc->sc_flags & WM_F_SGMII) != 0)
3046 			reg |= CTRL_EXT_I2C_ENA;
3047 		else
3048 			reg &= ~CTRL_EXT_I2C_ENA;
3049 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3050 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
3051 			if (!wm_sgmii_uses_mdio(sc))
3052 				wm_gmii_setup_phytype(sc, 0, 0);
3053 			wm_reset_mdicnfg_82580(sc);
3054 		}
3055 	} else if (sc->sc_type < WM_T_82543 ||
3056 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3057 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3058 			aprint_error_dev(sc->sc_dev,
3059 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
3060 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3061 		}
3062 	} else {
3063 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3064 			aprint_error_dev(sc->sc_dev,
3065 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3066 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3067 		}
3068 	}
3069 
3070 	if (sc->sc_type >= WM_T_PCH2)
3071 		sc->sc_flags |= WM_F_EEE;
3072 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3073 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3074 		/* XXX: Need special handling for I354. (not yet) */
3075 		if (sc->sc_type != WM_T_I354)
3076 			sc->sc_flags |= WM_F_EEE;
3077 	}
3078 
3079 	/*
3080 	 * The I350 has a bug where it always strips the CRC whether
3081 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
3082 	 */
3083 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3084 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3085 		sc->sc_flags |= WM_F_CRC_STRIP;
3086 
3087 	/* Set device properties (macflags) */
3088 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3089 
3090 	if (sc->sc_flags != 0) {
3091 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3092 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3093 	}
3094 
3095 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3096 
3097 	/* Initialize the media structures accordingly. */
3098 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3099 		wm_gmii_mediainit(sc, wmp->wmp_product);
3100 	else
3101 		wm_tbi_mediainit(sc); /* All others */
3102 
3103 	ifp = &sc->sc_ethercom.ec_if;
3104 	xname = device_xname(sc->sc_dev);
3105 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3106 	ifp->if_softc = sc;
3107 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3108 	ifp->if_extflags = IFEF_MPSAFE;
3109 	ifp->if_ioctl = wm_ioctl;
3110 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3111 		ifp->if_start = wm_nq_start;
3112 		/*
3113 		 * When the number of CPUs is one and the controller can use
3114 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3115 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3116 		 * and the other is used for link status changing.
3117 		 * In this situation, wm_nq_transmit() is disadvantageous
3118 		 * because of wm_select_txqueue() and pcq(9) overhead.
3119 		 */
3120 		if (wm_is_using_multiqueue(sc))
3121 			ifp->if_transmit = wm_nq_transmit;
3122 	} else {
3123 		ifp->if_start = wm_start;
3124 		/*
3125 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3126 		 * described above.
3127 		 */
3128 		if (wm_is_using_multiqueue(sc))
3129 			ifp->if_transmit = wm_transmit;
3130 	}
3131 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3132 	ifp->if_init = wm_init;
3133 	ifp->if_stop = wm_stop;
3134 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3135 	IFQ_SET_READY(&ifp->if_snd);
3136 
3137 	/* Check for jumbo frame */
3138 	switch (sc->sc_type) {
3139 	case WM_T_82573:
3140 		/* XXX limited to 9234 if ASPM is disabled */
3141 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3142 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3143 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3144 		break;
3145 	case WM_T_82571:
3146 	case WM_T_82572:
3147 	case WM_T_82574:
3148 	case WM_T_82583:
3149 	case WM_T_82575:
3150 	case WM_T_82576:
3151 	case WM_T_82580:
3152 	case WM_T_I350:
3153 	case WM_T_I354:
3154 	case WM_T_I210:
3155 	case WM_T_I211:
3156 	case WM_T_80003:
3157 	case WM_T_ICH9:
3158 	case WM_T_ICH10:
3159 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
3160 	case WM_T_PCH_LPT:
3161 	case WM_T_PCH_SPT:
3162 	case WM_T_PCH_CNP:
3163 		/* XXX limited to 9234 */
3164 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3165 		break;
3166 	case WM_T_PCH:
3167 		/* XXX limited to 4096 */
3168 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3169 		break;
3170 	case WM_T_82542_2_0:
3171 	case WM_T_82542_2_1:
3172 	case WM_T_ICH8:
3173 		/* No support for jumbo frame */
3174 		break;
3175 	default:
3176 		/* ETHER_MAX_LEN_JUMBO */
3177 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3178 		break;
3179 	}
3180 
3181 	/* If we're a i82543 or greater, we can support VLANs. */
3182 	if (sc->sc_type >= WM_T_82543) {
3183 		sc->sc_ethercom.ec_capabilities |=
3184 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3185 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3186 	}
3187 
3188 	if ((sc->sc_flags & WM_F_EEE) != 0)
3189 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3190 
3191 	/*
3192 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
3193 	 * on i82543 and later.
3194 	 */
3195 	if (sc->sc_type >= WM_T_82543) {
3196 		ifp->if_capabilities |=
3197 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3198 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3199 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3200 		    IFCAP_CSUM_TCPv6_Tx |
3201 		    IFCAP_CSUM_UDPv6_Tx;
3202 	}
3203 
3204 	/*
3205 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3206 	 *
3207 	 *	82541GI (8086:1076) ... no
3208 	 *	82572EI (8086:10b9) ... yes
3209 	 */
3210 	if (sc->sc_type >= WM_T_82571) {
3211 		ifp->if_capabilities |=
3212 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3213 	}
3214 
3215 	/*
3216 	 * If we're a i82544 or greater (except i82547), we can do
3217 	 * TCP segmentation offload.
3218 	 */
3219 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3220 		ifp->if_capabilities |= IFCAP_TSOv4;
3221 
3222 	if (sc->sc_type >= WM_T_82571)
3223 		ifp->if_capabilities |= IFCAP_TSOv6;
3224 
3225 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3226 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3227 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3228 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3229 
3230 	/* Attach the interface. */
3231 	if_initialize(ifp);
3232 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3233 	ether_ifattach(ifp, enaddr);
3234 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3235 	if_register(ifp);
3236 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3237 	    RND_FLAG_DEFAULT);
3238 
3239 #ifdef WM_EVENT_COUNTERS
3240 	/* Attach event counters. */
3241 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3242 	    NULL, xname, "linkintr");
3243 
3244 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3245 	    NULL, xname, "CRC Error");
3246 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3247 	    NULL, xname, "Symbol Error");
3248 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3249 	    NULL, xname, "Missed Packets");
3250 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3251 	    NULL, xname, "Collision");
3252 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3253 	    NULL, xname, "Sequence Error");
3254 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3255 	    NULL, xname, "Receive Length Error");
3256 
3257 	if (sc->sc_type >= WM_T_82543) {
3258 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3259 		    NULL, xname, "Alignment Error");
3260 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3261 		    NULL, xname, "Receive Error");
3262 		/* XXX Does 82575 have HTDPMC? */
3263 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3264 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
3265 			    EVCNT_TYPE_MISC, NULL, xname,
3266 			    "Carrier Extension Error");
3267 		else
3268 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
3269 			    EVCNT_TYPE_MISC, NULL, xname,
3270 			    "Host Transmit Discarded Packets by MAC");
3271 
3272 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3273 		    NULL, xname, "Tx with No CRS");
3274 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3275 		    NULL, xname, "TCP Segmentation Context Tx");
3276 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3277 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
3278 			    EVCNT_TYPE_MISC, NULL, xname,
3279 			    "TCP Segmentation Context Tx Fail");
3280 		else {
3281 			/* XXX Is the circuit breaker only for 82576? */
3282 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
3283 			    EVCNT_TYPE_MISC, NULL, xname,
3284 			    "Circuit Breaker Rx Dropped Packet");
3285 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
3286 			    EVCNT_TYPE_MISC, NULL, xname,
3287 			    "Circuit Breaker Rx Manageability Packet");
3288 		}
3289 	}
3290 
3291 	if (sc->sc_type >= WM_T_82542_2_1) {
3292 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3293 		    NULL, xname, "tx_xoff");
3294 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3295 		    NULL, xname, "tx_xon");
3296 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3297 		    NULL, xname, "rx_xoff");
3298 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3299 		    NULL, xname, "rx_xon");
3300 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3301 		    NULL, xname, "rx_macctl");
3302 	}
3303 
3304 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3305 	    NULL, xname, "Single Collision");
3306 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3307 	    NULL, xname, "Excessive Collisions");
3308 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3309 	    NULL, xname, "Multiple Collision");
3310 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3311 	    NULL, xname, "Late Collisions");
3312 
3313 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3314 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
3315 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
3316 
3317 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3318 	    NULL, xname, "Defer");
3319 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3320 	    NULL, xname, "Packets Rx (64 bytes)");
3321 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3322 	    NULL, xname, "Packets Rx (65-127 bytes)");
3323 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3324 	    NULL, xname, "Packets Rx (128-255 bytes)");
3325 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3326 	    NULL, xname, "Packets Rx (256-511 bytes)");
3327 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3328 	    NULL, xname, "Packets Rx (512-1023 bytes)");
3329 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3330 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
3331 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3332 	    NULL, xname, "Good Packets Rx");
3333 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3334 	    NULL, xname, "Broadcast Packets Rx");
3335 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3336 	    NULL, xname, "Multicast Packets Rx");
3337 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3338 	    NULL, xname, "Good Packets Tx");
3339 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3340 	    NULL, xname, "Good Octets Rx");
3341 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3342 	    NULL, xname, "Good Octets Tx");
3343 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3344 	    NULL, xname, "Rx No Buffers");
3345 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3346 	    NULL, xname, "Rx Undersize");
3347 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3348 	    NULL, xname, "Rx Fragment");
3349 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3350 	    NULL, xname, "Rx Oversize");
3351 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3352 	    NULL, xname, "Rx Jabber");
3353 	if (sc->sc_type >= WM_T_82540) {
3354 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3355 		    NULL, xname, "Management Packets RX");
3356 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3357 		    NULL, xname, "Management Packets Dropped");
3358 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3359 		    NULL, xname, "Management Packets TX");
3360 	}
3361 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3362 	    NULL, xname, "Total Octets Rx");
3363 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3364 	    NULL, xname, "Total Octets Tx");
3365 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3366 	    NULL, xname, "Total Packets Rx");
3367 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3368 	    NULL, xname, "Total Packets Tx");
3369 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3370 	    NULL, xname, "Packets Tx (64 bytes)");
3371 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3372 	    NULL, xname, "Packets Tx (65-127 bytes)");
3373 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3374 	    NULL, xname, "Packets Tx (128-255 bytes)");
3375 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3376 	    NULL, xname, "Packets Tx (256-511 bytes)");
3377 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3378 	    NULL, xname, "Packets Tx (512-1023 bytes)");
3379 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3380 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
3381 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3382 	    NULL, xname, "Multicast Packets Tx");
3383 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3384 	    NULL, xname, "Broadcast Packets Tx");
3385 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3386 	    NULL, xname, "Interrupt Assertion");
3387 	if (sc->sc_type < WM_T_82575) {
3388 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3389 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3390 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3391 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3392 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3393 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3394 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
3395 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3396 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3397 		    NULL, xname, "Intr. Cause Tx Queue Empty");
3398 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3399 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3400 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3401 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3402 
3403 		/* XXX 82575 document says it has ICRXOC. Is that right? */
3404 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3405 		    NULL, xname, "Interrupt Cause Receiver Overrun");
3406 	} else if (!WM_IS_ICHPCH(sc)) {
3407 		/*
3408 		 * For 82575 and newer.
3409 		 *
3410 		 * On 80003, ICHs and PCHs, it seems all of the following
3411 		 * registers are zero.
3412 		 */
3413 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
3414 		    NULL, xname, "Rx Packets To Host");
3415 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
3416 		    NULL, xname, "Debug Counter 1");
3417 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
3418 		    NULL, xname, "Debug Counter 2");
3419 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
3420 		    NULL, xname, "Debug Counter 3");
3421 
3422 		/*
3423 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
3424 		 * I think it's wrong. The real count I observed is the same
3425 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
3426 		 * It's HGPTC(Host Good Packets Tx) which is described in
3427 		 * 82576's datasheet.
3428 		 */
3429 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
3430 		    NULL, xname, "Host Good Packets TX");
3431 
3432 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
3433 		    NULL, xname, "Debug Counter 4");
3434 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3435 		    NULL, xname, "Rx Desc Min Thresh");
3436 		/* XXX Is the circuit breaker only for 82576? */
3437 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
3438 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
3439 
3440 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
3441 		    NULL, xname, "Host Good Octets Rx");
3442 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
3443 		    NULL, xname, "Host Good Octets Tx");
3444 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
3445 		    NULL, xname, "Length Errors");
3446 	}
3447 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3448 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
3449 		    NULL, xname, "EEE Tx LPI");
3450 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
3451 		    NULL, xname, "EEE Rx LPI");
3452 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3453 		    NULL, xname, "BMC2OS Packets received by host");
3454 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3455 		    NULL, xname, "OS2BMC Packets transmitted by host");
3456 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3457 		    NULL, xname, "BMC2OS Packets sent by BMC");
3458 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3459 		    NULL, xname, "OS2BMC Packets received by BMC");
3460 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
3461 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
3462 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
3463 		    NULL, xname, "Header Redirection Missed Packet");
3464 	}
3465 #endif /* WM_EVENT_COUNTERS */
3466 
3467 	sc->sc_txrx_use_workqueue = false;
3468 
3469 	if (wm_phy_need_linkdown_discard(sc)) {
3470 		DPRINTF(sc, WM_DEBUG_LINK,
3471 		    ("%s: %s: Set linkdown discard flag\n",
3472 			device_xname(sc->sc_dev), __func__));
3473 		wm_set_linkdown_discard(sc);
3474 	}
3475 
3476 	wm_init_sysctls(sc);
3477 
3478 	if (pmf_device_register(self, wm_suspend, wm_resume))
3479 		pmf_class_network_register(self, ifp);
3480 	else
3481 		aprint_error_dev(self, "couldn't establish power handler\n");
3482 
3483 	sc->sc_flags |= WM_F_ATTACHED;
3484 out:
3485 	return;
3486 }
3487 
3488 /* The detach function (ca_detach) */
3489 static int
wm_detach(device_t self,int flags __unused)3490 wm_detach(device_t self, int flags __unused)
3491 {
3492 	struct wm_softc *sc = device_private(self);
3493 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3494 	int i;
3495 
3496 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3497 		return 0;
3498 
3499 	/* Stop the interface. Callouts are stopped in it. */
3500 	IFNET_LOCK(ifp);
3501 	sc->sc_dying = true;
3502 	wm_stop(ifp, 1);
3503 	IFNET_UNLOCK(ifp);
3504 
3505 	pmf_device_deregister(self);
3506 
3507 	sysctl_teardown(&sc->sc_sysctllog);
3508 
3509 #ifdef WM_EVENT_COUNTERS
3510 	evcnt_detach(&sc->sc_ev_linkintr);
3511 
3512 	evcnt_detach(&sc->sc_ev_crcerrs);
3513 	evcnt_detach(&sc->sc_ev_symerrc);
3514 	evcnt_detach(&sc->sc_ev_mpc);
3515 	evcnt_detach(&sc->sc_ev_colc);
3516 	evcnt_detach(&sc->sc_ev_sec);
3517 	evcnt_detach(&sc->sc_ev_rlec);
3518 
3519 	if (sc->sc_type >= WM_T_82543) {
3520 		evcnt_detach(&sc->sc_ev_algnerrc);
3521 		evcnt_detach(&sc->sc_ev_rxerrc);
3522 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3523 			evcnt_detach(&sc->sc_ev_cexterr);
3524 		else
3525 			evcnt_detach(&sc->sc_ev_htdpmc);
3526 
3527 		evcnt_detach(&sc->sc_ev_tncrs);
3528 		evcnt_detach(&sc->sc_ev_tsctc);
3529 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3530 			evcnt_detach(&sc->sc_ev_tsctfc);
3531 		else {
3532 			evcnt_detach(&sc->sc_ev_cbrdpc);
3533 			evcnt_detach(&sc->sc_ev_cbrmpc);
3534 		}
3535 	}
3536 
3537 	if (sc->sc_type >= WM_T_82542_2_1) {
3538 		evcnt_detach(&sc->sc_ev_tx_xoff);
3539 		evcnt_detach(&sc->sc_ev_tx_xon);
3540 		evcnt_detach(&sc->sc_ev_rx_xoff);
3541 		evcnt_detach(&sc->sc_ev_rx_xon);
3542 		evcnt_detach(&sc->sc_ev_rx_macctl);
3543 	}
3544 
3545 	evcnt_detach(&sc->sc_ev_scc);
3546 	evcnt_detach(&sc->sc_ev_ecol);
3547 	evcnt_detach(&sc->sc_ev_mcc);
3548 	evcnt_detach(&sc->sc_ev_latecol);
3549 
3550 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3551 		evcnt_detach(&sc->sc_ev_cbtmpc);
3552 
3553 	evcnt_detach(&sc->sc_ev_dc);
3554 	evcnt_detach(&sc->sc_ev_prc64);
3555 	evcnt_detach(&sc->sc_ev_prc127);
3556 	evcnt_detach(&sc->sc_ev_prc255);
3557 	evcnt_detach(&sc->sc_ev_prc511);
3558 	evcnt_detach(&sc->sc_ev_prc1023);
3559 	evcnt_detach(&sc->sc_ev_prc1522);
3560 	evcnt_detach(&sc->sc_ev_gprc);
3561 	evcnt_detach(&sc->sc_ev_bprc);
3562 	evcnt_detach(&sc->sc_ev_mprc);
3563 	evcnt_detach(&sc->sc_ev_gptc);
3564 	evcnt_detach(&sc->sc_ev_gorc);
3565 	evcnt_detach(&sc->sc_ev_gotc);
3566 	evcnt_detach(&sc->sc_ev_rnbc);
3567 	evcnt_detach(&sc->sc_ev_ruc);
3568 	evcnt_detach(&sc->sc_ev_rfc);
3569 	evcnt_detach(&sc->sc_ev_roc);
3570 	evcnt_detach(&sc->sc_ev_rjc);
3571 	if (sc->sc_type >= WM_T_82540) {
3572 		evcnt_detach(&sc->sc_ev_mgtprc);
3573 		evcnt_detach(&sc->sc_ev_mgtpdc);
3574 		evcnt_detach(&sc->sc_ev_mgtptc);
3575 	}
3576 	evcnt_detach(&sc->sc_ev_tor);
3577 	evcnt_detach(&sc->sc_ev_tot);
3578 	evcnt_detach(&sc->sc_ev_tpr);
3579 	evcnt_detach(&sc->sc_ev_tpt);
3580 	evcnt_detach(&sc->sc_ev_ptc64);
3581 	evcnt_detach(&sc->sc_ev_ptc127);
3582 	evcnt_detach(&sc->sc_ev_ptc255);
3583 	evcnt_detach(&sc->sc_ev_ptc511);
3584 	evcnt_detach(&sc->sc_ev_ptc1023);
3585 	evcnt_detach(&sc->sc_ev_ptc1522);
3586 	evcnt_detach(&sc->sc_ev_mptc);
3587 	evcnt_detach(&sc->sc_ev_bptc);
3588 	evcnt_detach(&sc->sc_ev_iac);
3589 	if (sc->sc_type < WM_T_82575) {
3590 		evcnt_detach(&sc->sc_ev_icrxptc);
3591 		evcnt_detach(&sc->sc_ev_icrxatc);
3592 		evcnt_detach(&sc->sc_ev_ictxptc);
3593 		evcnt_detach(&sc->sc_ev_ictxatc);
3594 		evcnt_detach(&sc->sc_ev_ictxqec);
3595 		evcnt_detach(&sc->sc_ev_ictxqmtc);
3596 		evcnt_detach(&sc->sc_ev_rxdmtc);
3597 		evcnt_detach(&sc->sc_ev_icrxoc);
3598 	} else if (!WM_IS_ICHPCH(sc)) {
3599 		evcnt_detach(&sc->sc_ev_rpthc);
3600 		evcnt_detach(&sc->sc_ev_debug1);
3601 		evcnt_detach(&sc->sc_ev_debug2);
3602 		evcnt_detach(&sc->sc_ev_debug3);
3603 		evcnt_detach(&sc->sc_ev_hgptc);
3604 		evcnt_detach(&sc->sc_ev_debug4);
3605 		evcnt_detach(&sc->sc_ev_rxdmtc);
3606 		evcnt_detach(&sc->sc_ev_htcbdpc);
3607 
3608 		evcnt_detach(&sc->sc_ev_hgorc);
3609 		evcnt_detach(&sc->sc_ev_hgotc);
3610 		evcnt_detach(&sc->sc_ev_lenerrs);
3611 	}
3612 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3613 		evcnt_detach(&sc->sc_ev_tlpic);
3614 		evcnt_detach(&sc->sc_ev_rlpic);
3615 		evcnt_detach(&sc->sc_ev_b2ogprc);
3616 		evcnt_detach(&sc->sc_ev_o2bspc);
3617 		evcnt_detach(&sc->sc_ev_b2ospc);
3618 		evcnt_detach(&sc->sc_ev_o2bgptc);
3619 		evcnt_detach(&sc->sc_ev_scvpc);
3620 		evcnt_detach(&sc->sc_ev_hrmpc);
3621 	}
3622 #endif /* WM_EVENT_COUNTERS */
3623 
3624 	rnd_detach_source(&sc->rnd_source);
3625 
3626 	/* Tell the firmware about the release */
3627 	mutex_enter(sc->sc_core_lock);
3628 	wm_release_manageability(sc);
3629 	wm_release_hw_control(sc);
3630 	wm_enable_wakeup(sc);
3631 	mutex_exit(sc->sc_core_lock);
3632 
3633 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3634 
3635 	ether_ifdetach(ifp);
3636 	if_detach(ifp);
3637 	if_percpuq_destroy(sc->sc_ipq);
3638 
3639 	/* Delete all remaining media. */
3640 	ifmedia_fini(&sc->sc_mii.mii_media);
3641 
3642 	/* Unload RX dmamaps and free mbufs */
3643 	for (i = 0; i < sc->sc_nqueues; i++) {
3644 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3645 		mutex_enter(rxq->rxq_lock);
3646 		wm_rxdrain(rxq);
3647 		mutex_exit(rxq->rxq_lock);
3648 	}
3649 	/* Must unlock here */
3650 
3651 	/* Disestablish the interrupt handler */
3652 	for (i = 0; i < sc->sc_nintrs; i++) {
3653 		if (sc->sc_ihs[i] != NULL) {
3654 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3655 			sc->sc_ihs[i] = NULL;
3656 		}
3657 	}
3658 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3659 
3660 	/* wm_stop() ensured that the workqueues are stopped. */
3661 	workqueue_destroy(sc->sc_queue_wq);
3662 	workqueue_destroy(sc->sc_reset_wq);
3663 
3664 	for (i = 0; i < sc->sc_nqueues; i++)
3665 		softint_disestablish(sc->sc_queue[i].wmq_si);
3666 
3667 	wm_free_txrx_queues(sc);
3668 
3669 	/* Unmap the registers */
3670 	if (sc->sc_ss) {
3671 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3672 		sc->sc_ss = 0;
3673 	}
3674 	if (sc->sc_ios) {
3675 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3676 		sc->sc_ios = 0;
3677 	}
3678 	if (sc->sc_flashs) {
3679 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3680 		sc->sc_flashs = 0;
3681 	}
3682 
3683 	if (sc->sc_core_lock)
3684 		mutex_obj_free(sc->sc_core_lock);
3685 	if (sc->sc_ich_phymtx)
3686 		mutex_obj_free(sc->sc_ich_phymtx);
3687 	if (sc->sc_ich_nvmmtx)
3688 		mutex_obj_free(sc->sc_ich_nvmmtx);
3689 
3690 	return 0;
3691 }
3692 
3693 static bool
wm_suspend(device_t self,const pmf_qual_t * qual)3694 wm_suspend(device_t self, const pmf_qual_t *qual)
3695 {
3696 	struct wm_softc *sc = device_private(self);
3697 
3698 	wm_release_manageability(sc);
3699 	wm_release_hw_control(sc);
3700 	wm_enable_wakeup(sc);
3701 
3702 	return true;
3703 }
3704 
3705 static bool
wm_resume(device_t self,const pmf_qual_t * qual)3706 wm_resume(device_t self, const pmf_qual_t *qual)
3707 {
3708 	struct wm_softc *sc = device_private(self);
3709 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3710 	pcireg_t reg;
3711 	char buf[256];
3712 
3713 	reg = CSR_READ(sc, WMREG_WUS);
3714 	if (reg != 0) {
3715 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3716 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3717 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3718 	}
3719 
3720 	if (sc->sc_type >= WM_T_PCH2)
3721 		wm_resume_workarounds_pchlan(sc);
3722 	IFNET_LOCK(ifp);
3723 	if ((ifp->if_flags & IFF_UP) == 0) {
3724 		/* >= PCH_SPT hardware workaround before reset. */
3725 		if (sc->sc_type >= WM_T_PCH_SPT)
3726 			wm_flush_desc_rings(sc);
3727 
3728 		wm_reset(sc);
3729 		/* Non-AMT based hardware can now take control from firmware */
3730 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3731 			wm_get_hw_control(sc);
3732 		wm_init_manageability(sc);
3733 	} else {
3734 		/*
3735 		 * We called pmf_class_network_register(), so if_init() is
3736 		 * automatically called when IFF_UP. wm_reset(),
3737 		 * wm_get_hw_control() and wm_init_manageability() are called
3738 		 * via wm_init().
3739 		 */
3740 	}
3741 	IFNET_UNLOCK(ifp);
3742 
3743 	return true;
3744 }
3745 
3746 /*
3747  * wm_watchdog:
3748  *
3749  *	Watchdog checker.
3750  */
3751 static bool
wm_watchdog(struct ifnet * ifp)3752 wm_watchdog(struct ifnet *ifp)
3753 {
3754 	int qid;
3755 	struct wm_softc *sc = ifp->if_softc;
3756 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3757 
3758 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3759 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3760 
3761 		wm_watchdog_txq(ifp, txq, &hang_queue);
3762 	}
3763 
3764 #ifdef WM_DEBUG
3765 	if (sc->sc_trigger_reset) {
3766 		/* debug operation, no need for atomicity or reliability */
3767 		sc->sc_trigger_reset = 0;
3768 		hang_queue++;
3769 	}
3770 #endif
3771 
3772 	if (hang_queue == 0)
3773 		return true;
3774 
3775 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3776 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3777 
3778 	return false;
3779 }
3780 
3781 /*
3782  * Perform an interface watchdog reset.
3783  */
3784 static void
wm_handle_reset_work(struct work * work,void * arg)3785 wm_handle_reset_work(struct work *work, void *arg)
3786 {
3787 	struct wm_softc * const sc = arg;
3788 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3789 
3790 	/* Don't want ioctl operations to happen */
3791 	IFNET_LOCK(ifp);
3792 
3793 	/* reset the interface. */
3794 	wm_init(ifp);
3795 
3796 	IFNET_UNLOCK(ifp);
3797 
3798 	/*
3799 	 * There are still some upper layer processing which call
3800 	 * ifp->if_start(). e.g. ALTQ or one CPU system
3801 	 */
3802 	/* Try to get more packets going. */
3803 	ifp->if_start(ifp);
3804 
3805 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
3806 }
3807 
3808 
3809 static void
wm_watchdog_txq(struct ifnet * ifp,struct wm_txqueue * txq,uint16_t * hang)3810 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3811 {
3812 
3813 	mutex_enter(txq->txq_lock);
3814 	if (txq->txq_sending &&
3815 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3816 		wm_watchdog_txq_locked(ifp, txq, hang);
3817 
3818 	mutex_exit(txq->txq_lock);
3819 }
3820 
3821 static void
wm_watchdog_txq_locked(struct ifnet * ifp,struct wm_txqueue * txq,uint16_t * hang)3822 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3823     uint16_t *hang)
3824 {
3825 	struct wm_softc *sc = ifp->if_softc;
3826 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3827 
3828 	KASSERT(mutex_owned(txq->txq_lock));
3829 
3830 	/*
3831 	 * Since we're using delayed interrupts, sweep up
3832 	 * before we report an error.
3833 	 */
3834 	wm_txeof(txq, UINT_MAX);
3835 
3836 	if (txq->txq_sending)
3837 		*hang |= __BIT(wmq->wmq_id);
3838 
3839 	if (txq->txq_free == WM_NTXDESC(txq)) {
3840 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3841 		    device_xname(sc->sc_dev));
3842 	} else {
3843 #ifdef WM_DEBUG
3844 		int i, j;
3845 		struct wm_txsoft *txs;
3846 #endif
3847 		log(LOG_ERR,
3848 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3849 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3850 		    txq->txq_next);
3851 		if_statinc(ifp, if_oerrors);
3852 #ifdef WM_DEBUG
3853 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3854 		     i = WM_NEXTTXS(txq, i)) {
3855 			txs = &txq->txq_soft[i];
3856 			printf("txs %d tx %d -> %d\n",
3857 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3858 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3859 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3860 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3861 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3862 					printf("\t %#08x%08x\n",
3863 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3864 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3865 				} else {
3866 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3867 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3868 					    txq->txq_descs[j].wtx_addr.wa_low);
3869 					printf("\t %#04x%02x%02x%08x\n",
3870 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3871 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3872 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3873 					    txq->txq_descs[j].wtx_cmdlen);
3874 				}
3875 				if (j == txs->txs_lastdesc)
3876 					break;
3877 			}
3878 		}
3879 #endif
3880 	}
3881 }
3882 
3883 /*
3884  * wm_tick:
3885  *
3886  *	One second timer, used to check link status, sweep up
3887  *	completed transmit jobs, etc.
3888  */
3889 static void
wm_tick(void * arg)3890 wm_tick(void *arg)
3891 {
3892 	struct wm_softc *sc = arg;
3893 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3894 
3895 	mutex_enter(sc->sc_core_lock);
3896 
3897 	if (sc->sc_core_stopping) {
3898 		mutex_exit(sc->sc_core_lock);
3899 		return;
3900 	}
3901 
3902 	wm_update_stats(sc);
3903 
3904 	if (sc->sc_flags & WM_F_HAS_MII)
3905 		mii_tick(&sc->sc_mii);
3906 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3907 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3908 		wm_serdes_tick(sc);
3909 	else
3910 		wm_tbi_tick(sc);
3911 
3912 	mutex_exit(sc->sc_core_lock);
3913 
3914 	if (wm_watchdog(ifp))
3915 		callout_schedule(&sc->sc_tick_ch, hz);
3916 }
3917 
3918 static int
wm_ifflags_cb(struct ethercom * ec)3919 wm_ifflags_cb(struct ethercom *ec)
3920 {
3921 	struct ifnet *ifp = &ec->ec_if;
3922 	struct wm_softc *sc = ifp->if_softc;
3923 	u_short iffchange;
3924 	int ecchange;
3925 	bool needreset = false;
3926 	int rc = 0;
3927 
3928 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3929 		device_xname(sc->sc_dev), __func__));
3930 
3931 	KASSERT(IFNET_LOCKED(ifp));
3932 
3933 	mutex_enter(sc->sc_core_lock);
3934 
3935 	/*
3936 	 * Check for if_flags.
3937 	 * Main usage is to prevent linkdown when opening bpf.
3938 	 */
3939 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
3940 	sc->sc_if_flags = ifp->if_flags;
3941 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3942 		needreset = true;
3943 		goto ec;
3944 	}
3945 
3946 	/* iff related updates */
3947 	if ((iffchange & IFF_PROMISC) != 0)
3948 		wm_set_filter(sc);
3949 
3950 	wm_set_vlan(sc);
3951 
3952 ec:
3953 	/* Check for ec_capenable. */
3954 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3955 	sc->sc_ec_capenable = ec->ec_capenable;
3956 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
3957 		needreset = true;
3958 		goto out;
3959 	}
3960 
3961 	/* ec related updates */
3962 	wm_set_eee(sc);
3963 
3964 out:
3965 	if (needreset)
3966 		rc = ENETRESET;
3967 	mutex_exit(sc->sc_core_lock);
3968 
3969 	return rc;
3970 }
3971 
3972 static bool
wm_phy_need_linkdown_discard(struct wm_softc * sc)3973 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3974 {
3975 
3976 	switch (sc->sc_phytype) {
3977 	case WMPHY_82577: /* ihphy */
3978 	case WMPHY_82578: /* atphy */
3979 	case WMPHY_82579: /* ihphy */
3980 	case WMPHY_I217: /* ihphy */
3981 	case WMPHY_82580: /* ihphy */
3982 	case WMPHY_I350: /* ihphy */
3983 		return true;
3984 	default:
3985 		return false;
3986 	}
3987 }
3988 
3989 static void
wm_set_linkdown_discard(struct wm_softc * sc)3990 wm_set_linkdown_discard(struct wm_softc *sc)
3991 {
3992 
3993 	for (int i = 0; i < sc->sc_nqueues; i++) {
3994 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3995 
3996 		mutex_enter(txq->txq_lock);
3997 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3998 		mutex_exit(txq->txq_lock);
3999 	}
4000 }
4001 
4002 static void
wm_clear_linkdown_discard(struct wm_softc * sc)4003 wm_clear_linkdown_discard(struct wm_softc *sc)
4004 {
4005 
4006 	for (int i = 0; i < sc->sc_nqueues; i++) {
4007 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4008 
4009 		mutex_enter(txq->txq_lock);
4010 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
4011 		mutex_exit(txq->txq_lock);
4012 	}
4013 }
4014 
4015 /*
4016  * wm_ioctl:		[ifnet interface function]
4017  *
4018  *	Handle control requests from the operator.
4019  */
4020 static int
wm_ioctl(struct ifnet * ifp,u_long cmd,void * data)4021 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4022 {
4023 	struct wm_softc *sc = ifp->if_softc;
4024 	struct ifreq *ifr = (struct ifreq *)data;
4025 	struct ifaddr *ifa = (struct ifaddr *)data;
4026 	struct sockaddr_dl *sdl;
4027 	int error;
4028 
4029 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4030 		device_xname(sc->sc_dev), __func__));
4031 
4032 	switch (cmd) {
4033 	case SIOCADDMULTI:
4034 	case SIOCDELMULTI:
4035 		break;
4036 	default:
4037 		KASSERT(IFNET_LOCKED(ifp));
4038 	}
4039 
4040 	if (cmd == SIOCZIFDATA) {
4041 		/*
4042 		 * Special handling for SIOCZIFDATA.
4043 		 * Copying and clearing the if_data structure is done with
4044 		 * ether_ioctl() below.
4045 		 */
4046 		mutex_enter(sc->sc_core_lock);
4047 		wm_update_stats(sc);
4048 		wm_clear_evcnt(sc);
4049 		mutex_exit(sc->sc_core_lock);
4050 	}
4051 
4052 	switch (cmd) {
4053 	case SIOCSIFMEDIA:
4054 		mutex_enter(sc->sc_core_lock);
4055 		/* Flow control requires full-duplex mode. */
4056 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4057 		    (ifr->ifr_media & IFM_FDX) == 0)
4058 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4059 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4060 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4061 				/* We can do both TXPAUSE and RXPAUSE. */
4062 				ifr->ifr_media |=
4063 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4064 			}
4065 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4066 		}
4067 		mutex_exit(sc->sc_core_lock);
4068 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4069 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4070 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4071 				DPRINTF(sc, WM_DEBUG_LINK,
4072 				    ("%s: %s: Set linkdown discard flag\n",
4073 					device_xname(sc->sc_dev), __func__));
4074 				wm_set_linkdown_discard(sc);
4075 			}
4076 		}
4077 		break;
4078 	case SIOCINITIFADDR:
4079 		mutex_enter(sc->sc_core_lock);
4080 		if (ifa->ifa_addr->sa_family == AF_LINK) {
4081 			sdl = satosdl(ifp->if_dl->ifa_addr);
4082 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4083 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4084 			/* Unicast address is the first multicast entry */
4085 			wm_set_filter(sc);
4086 			error = 0;
4087 			mutex_exit(sc->sc_core_lock);
4088 			break;
4089 		}
4090 		mutex_exit(sc->sc_core_lock);
4091 		/*FALLTHROUGH*/
4092 	default:
4093 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4094 			if (((ifp->if_flags & IFF_UP) != 0) &&
4095 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
4096 				DPRINTF(sc, WM_DEBUG_LINK,
4097 				    ("%s: %s: Set linkdown discard flag\n",
4098 					device_xname(sc->sc_dev), __func__));
4099 				wm_set_linkdown_discard(sc);
4100 			}
4101 		}
4102 		const int s = splnet();
4103 		/* It may call wm_start, so unlock here */
4104 		error = ether_ioctl(ifp, cmd, data);
4105 		splx(s);
4106 		if (error != ENETRESET)
4107 			break;
4108 
4109 		error = 0;
4110 
4111 		if (cmd == SIOCSIFCAP)
4112 			error = if_init(ifp);
4113 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4114 			mutex_enter(sc->sc_core_lock);
4115 			if (sc->sc_if_flags & IFF_RUNNING) {
4116 				/*
4117 				 * Multicast list has changed; set the
4118 				 * hardware filter accordingly.
4119 				 */
4120 				wm_set_filter(sc);
4121 			}
4122 			mutex_exit(sc->sc_core_lock);
4123 		}
4124 		break;
4125 	}
4126 
4127 	return error;
4128 }
4129 
4130 /* MAC address related */
4131 
4132 /*
4133  * Get the offset of MAC address and return it.
4134  * If error occured, use offset 0.
4135  */
4136 static uint16_t
wm_check_alt_mac_addr(struct wm_softc * sc)4137 wm_check_alt_mac_addr(struct wm_softc *sc)
4138 {
4139 	uint16_t myea[ETHER_ADDR_LEN / 2];
4140 	uint16_t offset = NVM_OFF_MACADDR;
4141 
4142 	/* Try to read alternative MAC address pointer */
4143 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4144 		return 0;
4145 
4146 	/* Check pointer if it's valid or not. */
4147 	if ((offset == 0x0000) || (offset == 0xffff))
4148 		return 0;
4149 
4150 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4151 	/*
4152 	 * Check whether alternative MAC address is valid or not.
4153 	 * Some cards have non 0xffff pointer but those don't use
4154 	 * alternative MAC address in reality.
4155 	 *
4156 	 * Check whether the broadcast bit is set or not.
4157 	 */
4158 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
4159 		if (((myea[0] & 0xff) & 0x01) == 0)
4160 			return offset; /* Found */
4161 
4162 	/* Not found */
4163 	return 0;
4164 }
4165 
4166 static int
wm_read_mac_addr(struct wm_softc * sc,uint8_t * enaddr)4167 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4168 {
4169 	uint16_t myea[ETHER_ADDR_LEN / 2];
4170 	uint16_t offset = NVM_OFF_MACADDR;
4171 	int do_invert = 0;
4172 
4173 	switch (sc->sc_type) {
4174 	case WM_T_82580:
4175 	case WM_T_I350:
4176 	case WM_T_I354:
4177 		/* EEPROM Top Level Partitioning */
4178 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4179 		break;
4180 	case WM_T_82571:
4181 	case WM_T_82575:
4182 	case WM_T_82576:
4183 	case WM_T_80003:
4184 	case WM_T_I210:
4185 	case WM_T_I211:
4186 		offset = wm_check_alt_mac_addr(sc);
4187 		if (offset == 0)
4188 			if ((sc->sc_funcid & 0x01) == 1)
4189 				do_invert = 1;
4190 		break;
4191 	default:
4192 		if ((sc->sc_funcid & 0x01) == 1)
4193 			do_invert = 1;
4194 		break;
4195 	}
4196 
4197 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4198 		goto bad;
4199 
4200 	enaddr[0] = myea[0] & 0xff;
4201 	enaddr[1] = myea[0] >> 8;
4202 	enaddr[2] = myea[1] & 0xff;
4203 	enaddr[3] = myea[1] >> 8;
4204 	enaddr[4] = myea[2] & 0xff;
4205 	enaddr[5] = myea[2] >> 8;
4206 
4207 	/*
4208 	 * Toggle the LSB of the MAC address on the second port
4209 	 * of some dual port cards.
4210 	 */
4211 	if (do_invert != 0)
4212 		enaddr[5] ^= 1;
4213 
4214 	return 0;
4215 
4216 bad:
4217 	return -1;
4218 }
4219 
4220 /*
4221  * wm_set_ral:
4222  *
4223  *	Set an entery in the receive address list.
4224  */
4225 static void
wm_set_ral(struct wm_softc * sc,const uint8_t * enaddr,int idx)4226 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4227 {
4228 	uint32_t ral_lo, ral_hi, addrl, addrh;
4229 	uint32_t wlock_mac;
4230 	int rv;
4231 
4232 	if (enaddr != NULL) {
4233 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4234 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4235 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4236 		ral_hi |= RAL_AV;
4237 	} else {
4238 		ral_lo = 0;
4239 		ral_hi = 0;
4240 	}
4241 
4242 	switch (sc->sc_type) {
4243 	case WM_T_82542_2_0:
4244 	case WM_T_82542_2_1:
4245 	case WM_T_82543:
4246 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4247 		CSR_WRITE_FLUSH(sc);
4248 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4249 		CSR_WRITE_FLUSH(sc);
4250 		break;
4251 	case WM_T_PCH2:
4252 	case WM_T_PCH_LPT:
4253 	case WM_T_PCH_SPT:
4254 	case WM_T_PCH_CNP:
4255 		if (idx == 0) {
4256 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4257 			CSR_WRITE_FLUSH(sc);
4258 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4259 			CSR_WRITE_FLUSH(sc);
4260 			return;
4261 		}
4262 		if (sc->sc_type != WM_T_PCH2) {
4263 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4264 			    FWSM_WLOCK_MAC);
4265 			addrl = WMREG_SHRAL(idx - 1);
4266 			addrh = WMREG_SHRAH(idx - 1);
4267 		} else {
4268 			wlock_mac = 0;
4269 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4270 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4271 		}
4272 
4273 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4274 			rv = wm_get_swflag_ich8lan(sc);
4275 			if (rv != 0)
4276 				return;
4277 			CSR_WRITE(sc, addrl, ral_lo);
4278 			CSR_WRITE_FLUSH(sc);
4279 			CSR_WRITE(sc, addrh, ral_hi);
4280 			CSR_WRITE_FLUSH(sc);
4281 			wm_put_swflag_ich8lan(sc);
4282 		}
4283 
4284 		break;
4285 	default:
4286 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4287 		CSR_WRITE_FLUSH(sc);
4288 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4289 		CSR_WRITE_FLUSH(sc);
4290 		break;
4291 	}
4292 }
4293 
4294 /*
4295  * wm_mchash:
4296  *
4297  *	Compute the hash of the multicast address for the 4096-bit
4298  *	multicast filter.
4299  */
4300 static uint32_t
wm_mchash(struct wm_softc * sc,const uint8_t * enaddr)4301 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4302 {
4303 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4304 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4305 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4306 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4307 	uint32_t hash;
4308 
4309 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4310 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4311 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4312 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4313 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4314 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4315 		return (hash & 0x3ff);
4316 	}
4317 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4318 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4319 
4320 	return (hash & 0xfff);
4321 }
4322 
4323 /*
4324  *
4325  *
4326  */
4327 static int
wm_rar_count(struct wm_softc * sc)4328 wm_rar_count(struct wm_softc *sc)
4329 {
4330 	int size;
4331 
4332 	switch (sc->sc_type) {
4333 	case WM_T_ICH8:
4334 		size = WM_RAL_TABSIZE_ICH8 -1;
4335 		break;
4336 	case WM_T_ICH9:
4337 	case WM_T_ICH10:
4338 	case WM_T_PCH:
4339 		size = WM_RAL_TABSIZE_ICH8;
4340 		break;
4341 	case WM_T_PCH2:
4342 		size = WM_RAL_TABSIZE_PCH2;
4343 		break;
4344 	case WM_T_PCH_LPT:
4345 	case WM_T_PCH_SPT:
4346 	case WM_T_PCH_CNP:
4347 		size = WM_RAL_TABSIZE_PCH_LPT;
4348 		break;
4349 	case WM_T_82575:
4350 	case WM_T_I210:
4351 	case WM_T_I211:
4352 		size = WM_RAL_TABSIZE_82575;
4353 		break;
4354 	case WM_T_82576:
4355 	case WM_T_82580:
4356 		size = WM_RAL_TABSIZE_82576;
4357 		break;
4358 	case WM_T_I350:
4359 	case WM_T_I354:
4360 		size = WM_RAL_TABSIZE_I350;
4361 		break;
4362 	default:
4363 		size = WM_RAL_TABSIZE;
4364 	}
4365 
4366 	return size;
4367 }
4368 
4369 /*
4370  * wm_set_filter:
4371  *
4372  *	Set up the receive filter.
4373  */
4374 static void
wm_set_filter(struct wm_softc * sc)4375 wm_set_filter(struct wm_softc *sc)
4376 {
4377 	struct ethercom *ec = &sc->sc_ethercom;
4378 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4379 	struct ether_multi *enm;
4380 	struct ether_multistep step;
4381 	bus_addr_t mta_reg;
4382 	uint32_t hash, reg, bit;
4383 	int i, size, ralmax, rv;
4384 
4385 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4386 		device_xname(sc->sc_dev), __func__));
4387 	KASSERT(mutex_owned(sc->sc_core_lock));
4388 
4389 	if (sc->sc_type >= WM_T_82544)
4390 		mta_reg = WMREG_CORDOVA_MTA;
4391 	else
4392 		mta_reg = WMREG_MTA;
4393 
4394 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4395 
4396 	if (sc->sc_if_flags & IFF_BROADCAST)
4397 		sc->sc_rctl |= RCTL_BAM;
4398 	if (sc->sc_if_flags & IFF_PROMISC) {
4399 		sc->sc_rctl |= RCTL_UPE;
4400 		ETHER_LOCK(ec);
4401 		ec->ec_flags |= ETHER_F_ALLMULTI;
4402 		ETHER_UNLOCK(ec);
4403 		goto allmulti;
4404 	}
4405 
4406 	/*
4407 	 * Set the station address in the first RAL slot, and
4408 	 * clear the remaining slots.
4409 	 */
4410 	size = wm_rar_count(sc);
4411 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4412 
4413 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
4414 	    || (sc->sc_type == WM_T_PCH_CNP)) {
4415 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4416 		switch (i) {
4417 		case 0:
4418 			/* We can use all entries */
4419 			ralmax = size;
4420 			break;
4421 		case 1:
4422 			/* Only RAR[0] */
4423 			ralmax = 1;
4424 			break;
4425 		default:
4426 			/* Available SHRA + RAR[0] */
4427 			ralmax = i + 1;
4428 		}
4429 	} else
4430 		ralmax = size;
4431 	for (i = 1; i < size; i++) {
4432 		if (i < ralmax)
4433 			wm_set_ral(sc, NULL, i);
4434 	}
4435 
4436 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4437 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4438 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4439 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4440 		size = WM_ICH8_MC_TABSIZE;
4441 	else
4442 		size = WM_MC_TABSIZE;
4443 	/* Clear out the multicast table. */
4444 	for (i = 0; i < size; i++) {
4445 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4446 		CSR_WRITE_FLUSH(sc);
4447 	}
4448 
4449 	ETHER_LOCK(ec);
4450 	ETHER_FIRST_MULTI(step, ec, enm);
4451 	while (enm != NULL) {
4452 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4453 			ec->ec_flags |= ETHER_F_ALLMULTI;
4454 			ETHER_UNLOCK(ec);
4455 			/*
4456 			 * We must listen to a range of multicast addresses.
4457 			 * For now, just accept all multicasts, rather than
4458 			 * trying to set only those filter bits needed to match
4459 			 * the range.  (At this time, the only use of address
4460 			 * ranges is for IP multicast routing, for which the
4461 			 * range is big enough to require all bits set.)
4462 			 */
4463 			goto allmulti;
4464 		}
4465 
4466 		hash = wm_mchash(sc, enm->enm_addrlo);
4467 
4468 		reg = (hash >> 5);
4469 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4470 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4471 		    || (sc->sc_type == WM_T_PCH2)
4472 		    || (sc->sc_type == WM_T_PCH_LPT)
4473 		    || (sc->sc_type == WM_T_PCH_SPT)
4474 		    || (sc->sc_type == WM_T_PCH_CNP))
4475 			reg &= 0x1f;
4476 		else
4477 			reg &= 0x7f;
4478 		bit = hash & 0x1f;
4479 
4480 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4481 		hash |= 1U << bit;
4482 
4483 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4484 			/*
4485 			 * 82544 Errata 9: Certain register cannot be written
4486 			 * with particular alignments in PCI-X bus operation
4487 			 * (FCAH, MTA and VFTA).
4488 			 */
4489 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4490 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4491 			CSR_WRITE_FLUSH(sc);
4492 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4493 			CSR_WRITE_FLUSH(sc);
4494 		} else {
4495 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4496 			CSR_WRITE_FLUSH(sc);
4497 		}
4498 
4499 		ETHER_NEXT_MULTI(step, enm);
4500 	}
4501 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
4502 	ETHER_UNLOCK(ec);
4503 
4504 	goto setit;
4505 
4506 allmulti:
4507 	sc->sc_rctl |= RCTL_MPE;
4508 
4509 setit:
4510 	if (sc->sc_type >= WM_T_PCH2) {
4511 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4512 		    && (ifp->if_mtu > ETHERMTU))
4513 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4514 		else
4515 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4516 		if (rv != 0)
4517 			device_printf(sc->sc_dev,
4518 			    "Failed to do workaround for jumbo frame.\n");
4519 	}
4520 
4521 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4522 }
4523 
4524 /* Reset and init related */
4525 
4526 static void
wm_set_vlan(struct wm_softc * sc)4527 wm_set_vlan(struct wm_softc *sc)
4528 {
4529 
4530 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4531 		device_xname(sc->sc_dev), __func__));
4532 
4533 	/* Deal with VLAN enables. */
4534 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4535 		sc->sc_ctrl |= CTRL_VME;
4536 	else
4537 		sc->sc_ctrl &= ~CTRL_VME;
4538 
4539 	/* Write the control registers. */
4540 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4541 }
4542 
4543 static void
wm_set_pcie_completion_timeout(struct wm_softc * sc)4544 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4545 {
4546 	uint32_t gcr;
4547 	pcireg_t ctrl2;
4548 
4549 	gcr = CSR_READ(sc, WMREG_GCR);
4550 
4551 	/* Only take action if timeout value is defaulted to 0 */
4552 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4553 		goto out;
4554 
4555 	if ((gcr & GCR_CAP_VER2) == 0) {
4556 		gcr |= GCR_CMPL_TMOUT_10MS;
4557 		goto out;
4558 	}
4559 
4560 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4561 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
4562 	ctrl2 |= WM_PCIE_DCSR2_16MS;
4563 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4564 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4565 
4566 out:
4567 	/* Disable completion timeout resend */
4568 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
4569 
4570 	CSR_WRITE(sc, WMREG_GCR, gcr);
4571 }
4572 
4573 void
wm_get_auto_rd_done(struct wm_softc * sc)4574 wm_get_auto_rd_done(struct wm_softc *sc)
4575 {
4576 	int i;
4577 
4578 	/* wait for eeprom to reload */
4579 	switch (sc->sc_type) {
4580 	case WM_T_82571:
4581 	case WM_T_82572:
4582 	case WM_T_82573:
4583 	case WM_T_82574:
4584 	case WM_T_82583:
4585 	case WM_T_82575:
4586 	case WM_T_82576:
4587 	case WM_T_82580:
4588 	case WM_T_I350:
4589 	case WM_T_I354:
4590 	case WM_T_I210:
4591 	case WM_T_I211:
4592 	case WM_T_80003:
4593 	case WM_T_ICH8:
4594 	case WM_T_ICH9:
4595 		for (i = 0; i < 10; i++) {
4596 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4597 				break;
4598 			delay(1000);
4599 		}
4600 		if (i == 10) {
4601 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4602 			    "complete\n", device_xname(sc->sc_dev));
4603 		}
4604 		break;
4605 	default:
4606 		break;
4607 	}
4608 }
4609 
4610 void
wm_lan_init_done(struct wm_softc * sc)4611 wm_lan_init_done(struct wm_softc *sc)
4612 {
4613 	uint32_t reg = 0;
4614 	int i;
4615 
4616 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4617 		device_xname(sc->sc_dev), __func__));
4618 
4619 	/* Wait for eeprom to reload */
4620 	switch (sc->sc_type) {
4621 	case WM_T_ICH10:
4622 	case WM_T_PCH:
4623 	case WM_T_PCH2:
4624 	case WM_T_PCH_LPT:
4625 	case WM_T_PCH_SPT:
4626 	case WM_T_PCH_CNP:
4627 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4628 			reg = CSR_READ(sc, WMREG_STATUS);
4629 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4630 				break;
4631 			delay(100);
4632 		}
4633 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4634 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4635 			    "complete\n", device_xname(sc->sc_dev), __func__);
4636 		}
4637 		break;
4638 	default:
4639 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4640 		    __func__);
4641 		break;
4642 	}
4643 
4644 	reg &= ~STATUS_LAN_INIT_DONE;
4645 	CSR_WRITE(sc, WMREG_STATUS, reg);
4646 }
4647 
4648 void
wm_get_cfg_done(struct wm_softc * sc)4649 wm_get_cfg_done(struct wm_softc *sc)
4650 {
4651 	int mask;
4652 	uint32_t reg;
4653 	int i;
4654 
4655 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4656 		device_xname(sc->sc_dev), __func__));
4657 
4658 	/* Wait for eeprom to reload */
4659 	switch (sc->sc_type) {
4660 	case WM_T_82542_2_0:
4661 	case WM_T_82542_2_1:
4662 		/* null */
4663 		break;
4664 	case WM_T_82543:
4665 	case WM_T_82544:
4666 	case WM_T_82540:
4667 	case WM_T_82545:
4668 	case WM_T_82545_3:
4669 	case WM_T_82546:
4670 	case WM_T_82546_3:
4671 	case WM_T_82541:
4672 	case WM_T_82541_2:
4673 	case WM_T_82547:
4674 	case WM_T_82547_2:
4675 	case WM_T_82573:
4676 	case WM_T_82574:
4677 	case WM_T_82583:
4678 		/* generic */
4679 		delay(10*1000);
4680 		break;
4681 	case WM_T_80003:
4682 	case WM_T_82571:
4683 	case WM_T_82572:
4684 	case WM_T_82575:
4685 	case WM_T_82576:
4686 	case WM_T_82580:
4687 	case WM_T_I350:
4688 	case WM_T_I354:
4689 	case WM_T_I210:
4690 	case WM_T_I211:
4691 		if (sc->sc_type == WM_T_82571) {
4692 			/* Only 82571 shares port 0 */
4693 			mask = EEMNGCTL_CFGDONE_0;
4694 		} else
4695 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4696 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4697 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4698 				break;
4699 			delay(1000);
4700 		}
4701 		if (i >= WM_PHY_CFG_TIMEOUT)
4702 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4703 				device_xname(sc->sc_dev), __func__));
4704 		break;
4705 	case WM_T_ICH8:
4706 	case WM_T_ICH9:
4707 	case WM_T_ICH10:
4708 	case WM_T_PCH:
4709 	case WM_T_PCH2:
4710 	case WM_T_PCH_LPT:
4711 	case WM_T_PCH_SPT:
4712 	case WM_T_PCH_CNP:
4713 		delay(10*1000);
4714 		if (sc->sc_type >= WM_T_ICH10)
4715 			wm_lan_init_done(sc);
4716 		else
4717 			wm_get_auto_rd_done(sc);
4718 
4719 		/* Clear PHY Reset Asserted bit */
4720 		reg = CSR_READ(sc, WMREG_STATUS);
4721 		if ((reg & STATUS_PHYRA) != 0)
4722 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4723 		break;
4724 	default:
4725 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4726 		    __func__);
4727 		break;
4728 	}
4729 }
4730 
4731 int
wm_phy_post_reset(struct wm_softc * sc)4732 wm_phy_post_reset(struct wm_softc *sc)
4733 {
4734 	device_t dev = sc->sc_dev;
4735 	uint16_t reg;
4736 	int rv = 0;
4737 
4738 	/* This function is only for ICH8 and newer. */
4739 	if (sc->sc_type < WM_T_ICH8)
4740 		return 0;
4741 
4742 	if (wm_phy_resetisblocked(sc)) {
4743 		/* XXX */
4744 		device_printf(dev, "PHY is blocked\n");
4745 		return -1;
4746 	}
4747 
4748 	/* Allow time for h/w to get to quiescent state after reset */
4749 	delay(10*1000);
4750 
4751 	/* Perform any necessary post-reset workarounds */
4752 	if (sc->sc_type == WM_T_PCH)
4753 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4754 	else if (sc->sc_type == WM_T_PCH2)
4755 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4756 	if (rv != 0)
4757 		return rv;
4758 
4759 	/* Clear the host wakeup bit after lcd reset */
4760 	if (sc->sc_type >= WM_T_PCH) {
4761 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4762 		reg &= ~BM_WUC_HOST_WU_BIT;
4763 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4764 	}
4765 
4766 	/* Configure the LCD with the extended configuration region in NVM */
4767 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4768 		return rv;
4769 
4770 	/* Configure the LCD with the OEM bits in NVM */
4771 	rv = wm_oem_bits_config_ich8lan(sc, true);
4772 
4773 	if (sc->sc_type == WM_T_PCH2) {
4774 		/* Ungate automatic PHY configuration on non-managed 82579 */
4775 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4776 			delay(10 * 1000);
4777 			wm_gate_hw_phy_config_ich8lan(sc, false);
4778 		}
4779 		/* Set EEE LPI Update Timer to 200usec */
4780 		rv = sc->phy.acquire(sc);
4781 		if (rv)
4782 			return rv;
4783 		rv = wm_write_emi_reg_locked(dev,
4784 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4785 		sc->phy.release(sc);
4786 	}
4787 
4788 	return rv;
4789 }
4790 
4791 /* Only for PCH and newer */
4792 static int
wm_write_smbus_addr(struct wm_softc * sc)4793 wm_write_smbus_addr(struct wm_softc *sc)
4794 {
4795 	uint32_t strap, freq;
4796 	uint16_t phy_data;
4797 	int rv;
4798 
4799 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4800 		device_xname(sc->sc_dev), __func__));
4801 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4802 
4803 	strap = CSR_READ(sc, WMREG_STRAP);
4804 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4805 
4806 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4807 	if (rv != 0)
4808 		return rv;
4809 
4810 	phy_data &= ~HV_SMB_ADDR_ADDR;
4811 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4812 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4813 
4814 	if (sc->sc_phytype == WMPHY_I217) {
4815 		/* Restore SMBus frequency */
4816 		if (freq --) {
4817 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4818 			    | HV_SMB_ADDR_FREQ_HIGH);
4819 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4820 			    HV_SMB_ADDR_FREQ_LOW);
4821 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4822 			    HV_SMB_ADDR_FREQ_HIGH);
4823 		} else
4824 			DPRINTF(sc, WM_DEBUG_INIT,
4825 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4826 				device_xname(sc->sc_dev), __func__));
4827 	}
4828 
4829 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4830 	    phy_data);
4831 }
4832 
4833 static int
wm_init_lcd_from_nvm(struct wm_softc * sc)4834 wm_init_lcd_from_nvm(struct wm_softc *sc)
4835 {
4836 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4837 	uint16_t phy_page = 0;
4838 	int rv = 0;
4839 
4840 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4841 		device_xname(sc->sc_dev), __func__));
4842 
4843 	switch (sc->sc_type) {
4844 	case WM_T_ICH8:
4845 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4846 		    || (sc->sc_phytype != WMPHY_IGP_3))
4847 			return 0;
4848 
4849 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4850 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4851 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4852 			break;
4853 		}
4854 		/* FALLTHROUGH */
4855 	case WM_T_PCH:
4856 	case WM_T_PCH2:
4857 	case WM_T_PCH_LPT:
4858 	case WM_T_PCH_SPT:
4859 	case WM_T_PCH_CNP:
4860 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4861 		break;
4862 	default:
4863 		return 0;
4864 	}
4865 
4866 	if ((rv = sc->phy.acquire(sc)) != 0)
4867 		return rv;
4868 
4869 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4870 	if ((reg & sw_cfg_mask) == 0)
4871 		goto release;
4872 
4873 	/*
4874 	 * Make sure HW does not configure LCD from PHY extended configuration
4875 	 * before SW configuration
4876 	 */
4877 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4878 	if ((sc->sc_type < WM_T_PCH2)
4879 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4880 		goto release;
4881 
4882 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4883 		device_xname(sc->sc_dev), __func__));
4884 	/* word_addr is in DWORD */
4885 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4886 
4887 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4888 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4889 	if (cnf_size == 0)
4890 		goto release;
4891 
4892 	if (((sc->sc_type == WM_T_PCH)
4893 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4894 	    || (sc->sc_type > WM_T_PCH)) {
4895 		/*
4896 		 * HW configures the SMBus address and LEDs when the OEM and
4897 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4898 		 * are cleared, SW will configure them instead.
4899 		 */
4900 		DPRINTF(sc, WM_DEBUG_INIT,
4901 		    ("%s: %s: Configure SMBus and LED\n",
4902 			device_xname(sc->sc_dev), __func__));
4903 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4904 			goto release;
4905 
4906 		reg = CSR_READ(sc, WMREG_LEDCTL);
4907 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4908 		    (uint16_t)reg);
4909 		if (rv != 0)
4910 			goto release;
4911 	}
4912 
4913 	/* Configure LCD from extended configuration region. */
4914 	for (i = 0; i < cnf_size; i++) {
4915 		uint16_t reg_data, reg_addr;
4916 
4917 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
4918 			goto release;
4919 
4920 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
4921 			goto release;
4922 
4923 		if (reg_addr == IGPHY_PAGE_SELECT)
4924 			phy_page = reg_data;
4925 
4926 		reg_addr &= IGPHY_MAXREGADDR;
4927 		reg_addr |= phy_page;
4928 
4929 		KASSERT(sc->phy.writereg_locked != NULL);
4930 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4931 		    reg_data);
4932 	}
4933 
4934 release:
4935 	sc->phy.release(sc);
4936 	return rv;
4937 }
4938 
4939 /*
4940  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4941  *  @sc:       pointer to the HW structure
4942  *  @d0_state: boolean if entering d0 or d3 device state
4943  *
4944  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4945  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
4946  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
4947  */
4948 int
wm_oem_bits_config_ich8lan(struct wm_softc * sc,bool d0_state)4949 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4950 {
4951 	uint32_t mac_reg;
4952 	uint16_t oem_reg;
4953 	int rv;
4954 
4955 	if (sc->sc_type < WM_T_PCH)
4956 		return 0;
4957 
4958 	rv = sc->phy.acquire(sc);
4959 	if (rv != 0)
4960 		return rv;
4961 
4962 	if (sc->sc_type == WM_T_PCH) {
4963 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4964 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4965 			goto release;
4966 	}
4967 
4968 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4969 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4970 		goto release;
4971 
4972 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4973 
4974 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4975 	if (rv != 0)
4976 		goto release;
4977 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4978 
4979 	if (d0_state) {
4980 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4981 			oem_reg |= HV_OEM_BITS_A1KDIS;
4982 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4983 			oem_reg |= HV_OEM_BITS_LPLU;
4984 	} else {
4985 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4986 		    != 0)
4987 			oem_reg |= HV_OEM_BITS_A1KDIS;
4988 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4989 		    != 0)
4990 			oem_reg |= HV_OEM_BITS_LPLU;
4991 	}
4992 
4993 	/* Set Restart auto-neg to activate the bits */
4994 	if ((d0_state || (sc->sc_type != WM_T_PCH))
4995 	    && (wm_phy_resetisblocked(sc) == false))
4996 		oem_reg |= HV_OEM_BITS_ANEGNOW;
4997 
4998 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4999 
5000 release:
5001 	sc->phy.release(sc);
5002 
5003 	return rv;
5004 }
5005 
5006 /* Init hardware bits */
5007 void
wm_initialize_hardware_bits(struct wm_softc * sc)5008 wm_initialize_hardware_bits(struct wm_softc *sc)
5009 {
5010 	uint32_t tarc0, tarc1, reg;
5011 
5012 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5013 		device_xname(sc->sc_dev), __func__));
5014 
5015 	/* For 82571 variant, 80003 and ICHs */
5016 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
5017 	    || WM_IS_ICHPCH(sc)) {
5018 
5019 		/* Transmit Descriptor Control 0 */
5020 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
5021 		reg |= TXDCTL_COUNT_DESC;
5022 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
5023 
5024 		/* Transmit Descriptor Control 1 */
5025 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
5026 		reg |= TXDCTL_COUNT_DESC;
5027 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
5028 
5029 		/* TARC0 */
5030 		tarc0 = CSR_READ(sc, WMREG_TARC0);
5031 		switch (sc->sc_type) {
5032 		case WM_T_82571:
5033 		case WM_T_82572:
5034 		case WM_T_82573:
5035 		case WM_T_82574:
5036 		case WM_T_82583:
5037 		case WM_T_80003:
5038 			/* Clear bits 30..27 */
5039 			tarc0 &= ~__BITS(30, 27);
5040 			break;
5041 		default:
5042 			break;
5043 		}
5044 
5045 		switch (sc->sc_type) {
5046 		case WM_T_82571:
5047 		case WM_T_82572:
5048 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5049 
5050 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5051 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5052 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5053 			/* 8257[12] Errata No.7 */
5054 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
5055 
5056 			/* TARC1 bit 28 */
5057 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5058 				tarc1 &= ~__BIT(28);
5059 			else
5060 				tarc1 |= __BIT(28);
5061 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5062 
5063 			/*
5064 			 * 8257[12] Errata No.13
5065 			 * Disable Dyamic Clock Gating.
5066 			 */
5067 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5068 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
5069 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5070 			break;
5071 		case WM_T_82573:
5072 		case WM_T_82574:
5073 		case WM_T_82583:
5074 			if ((sc->sc_type == WM_T_82574)
5075 			    || (sc->sc_type == WM_T_82583))
5076 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
5077 
5078 			/* Extended Device Control */
5079 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5080 			reg &= ~__BIT(23);	/* Clear bit 23 */
5081 			reg |= __BIT(22);	/* Set bit 22 */
5082 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5083 
5084 			/* Device Control */
5085 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
5086 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5087 
5088 			/* PCIe Control Register */
5089 			/*
5090 			 * 82573 Errata (unknown).
5091 			 *
5092 			 * 82574 Errata 25 and 82583 Errata 12
5093 			 * "Dropped Rx Packets":
5094 			 *   NVM Image Version 2.1.4 and newer has no this bug.
5095 			 */
5096 			reg = CSR_READ(sc, WMREG_GCR);
5097 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5098 			CSR_WRITE(sc, WMREG_GCR, reg);
5099 
5100 			if ((sc->sc_type == WM_T_82574)
5101 			    || (sc->sc_type == WM_T_82583)) {
5102 				/*
5103 				 * Document says this bit must be set for
5104 				 * proper operation.
5105 				 */
5106 				reg = CSR_READ(sc, WMREG_GCR);
5107 				reg |= __BIT(22);
5108 				CSR_WRITE(sc, WMREG_GCR, reg);
5109 
5110 				/*
5111 				 * Apply workaround for hardware errata
5112 				 * documented in errata docs Fixes issue where
5113 				 * some error prone or unreliable PCIe
5114 				 * completions are occurring, particularly
5115 				 * with ASPM enabled. Without fix, issue can
5116 				 * cause Tx timeouts.
5117 				 */
5118 				reg = CSR_READ(sc, WMREG_GCR2);
5119 				reg |= __BIT(0);
5120 				CSR_WRITE(sc, WMREG_GCR2, reg);
5121 			}
5122 			break;
5123 		case WM_T_80003:
5124 			/* TARC0 */
5125 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5126 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5127 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
5128 
5129 			/* TARC1 bit 28 */
5130 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5131 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5132 				tarc1 &= ~__BIT(28);
5133 			else
5134 				tarc1 |= __BIT(28);
5135 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5136 			break;
5137 		case WM_T_ICH8:
5138 		case WM_T_ICH9:
5139 		case WM_T_ICH10:
5140 		case WM_T_PCH:
5141 		case WM_T_PCH2:
5142 		case WM_T_PCH_LPT:
5143 		case WM_T_PCH_SPT:
5144 		case WM_T_PCH_CNP:
5145 			/* TARC0 */
5146 			if (sc->sc_type == WM_T_ICH8) {
5147 				/* Set TARC0 bits 29 and 28 */
5148 				tarc0 |= __BITS(29, 28);
5149 			} else if (sc->sc_type == WM_T_PCH_SPT) {
5150 				tarc0 |= __BIT(29);
5151 				/*
5152 				 *  Drop bit 28. From Linux.
5153 				 * See I218/I219 spec update
5154 				 * "5. Buffer Overrun While the I219 is
5155 				 * Processing DMA Transactions"
5156 				 */
5157 				tarc0 &= ~__BIT(28);
5158 			}
5159 			/* Set TARC0 bits 23,24,26,27 */
5160 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5161 
5162 			/* CTRL_EXT */
5163 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5164 			reg |= __BIT(22);	/* Set bit 22 */
5165 			/*
5166 			 * Enable PHY low-power state when MAC is at D3
5167 			 * w/o WoL
5168 			 */
5169 			if (sc->sc_type >= WM_T_PCH)
5170 				reg |= CTRL_EXT_PHYPDEN;
5171 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5172 
5173 			/* TARC1 */
5174 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5175 			/* bit 28 */
5176 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5177 				tarc1 &= ~__BIT(28);
5178 			else
5179 				tarc1 |= __BIT(28);
5180 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5181 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5182 
5183 			/* Device Status */
5184 			if (sc->sc_type == WM_T_ICH8) {
5185 				reg = CSR_READ(sc, WMREG_STATUS);
5186 				reg &= ~__BIT(31);
5187 				CSR_WRITE(sc, WMREG_STATUS, reg);
5188 
5189 			}
5190 
5191 			/* IOSFPC */
5192 			if (sc->sc_type == WM_T_PCH_SPT) {
5193 				reg = CSR_READ(sc, WMREG_IOSFPC);
5194 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5195 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
5196 			}
5197 			/*
5198 			 * Work-around descriptor data corruption issue during
5199 			 * NFS v2 UDP traffic, just disable the NFS filtering
5200 			 * capability.
5201 			 */
5202 			reg = CSR_READ(sc, WMREG_RFCTL);
5203 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5204 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5205 			break;
5206 		default:
5207 			break;
5208 		}
5209 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
5210 
5211 		switch (sc->sc_type) {
5212 		case WM_T_82571:
5213 		case WM_T_82572:
5214 		case WM_T_82573:
5215 		case WM_T_80003:
5216 		case WM_T_ICH8:
5217 			/*
5218 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5219 			 * others to avoid RSS Hash Value bug.
5220 			 */
5221 			reg = CSR_READ(sc, WMREG_RFCTL);
5222 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5223 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5224 			break;
5225 		case WM_T_82574:
5226 			/* Use extened Rx descriptor. */
5227 			reg = CSR_READ(sc, WMREG_RFCTL);
5228 			reg |= WMREG_RFCTL_EXSTEN;
5229 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5230 			break;
5231 		default:
5232 			break;
5233 		}
5234 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5235 		/*
5236 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5237 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5238 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
5239 		 * Correctly by the Device"
5240 		 *
5241 		 * I354(C2000) Errata AVR53:
5242 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
5243 		 * Hang"
5244 		 */
5245 		reg = CSR_READ(sc, WMREG_RFCTL);
5246 		reg |= WMREG_RFCTL_IPV6EXDIS;
5247 		CSR_WRITE(sc, WMREG_RFCTL, reg);
5248 	}
5249 }
5250 
5251 static uint32_t
wm_rxpbs_adjust_82580(uint32_t val)5252 wm_rxpbs_adjust_82580(uint32_t val)
5253 {
5254 	uint32_t rv = 0;
5255 
5256 	if (val < __arraycount(wm_82580_rxpbs_table))
5257 		rv = wm_82580_rxpbs_table[val];
5258 
5259 	return rv;
5260 }
5261 
5262 /*
5263  * wm_reset_phy:
5264  *
5265  *	generic PHY reset function.
5266  *	Same as e1000_phy_hw_reset_generic()
5267  */
5268 static int
wm_reset_phy(struct wm_softc * sc)5269 wm_reset_phy(struct wm_softc *sc)
5270 {
5271 	uint32_t reg;
5272 	int rv;
5273 
5274 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5275 		device_xname(sc->sc_dev), __func__));
5276 	if (wm_phy_resetisblocked(sc))
5277 		return -1;
5278 
5279 	rv = sc->phy.acquire(sc);
5280 	if (rv) {
5281 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5282 		    __func__, rv);
5283 		return rv;
5284 	}
5285 
5286 	reg = CSR_READ(sc, WMREG_CTRL);
5287 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5288 	CSR_WRITE_FLUSH(sc);
5289 
5290 	delay(sc->phy.reset_delay_us);
5291 
5292 	CSR_WRITE(sc, WMREG_CTRL, reg);
5293 	CSR_WRITE_FLUSH(sc);
5294 
5295 	delay(150);
5296 
5297 	sc->phy.release(sc);
5298 
5299 	wm_get_cfg_done(sc);
5300 	wm_phy_post_reset(sc);
5301 
5302 	return 0;
5303 }
5304 
5305 /*
5306  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5307  *
5308  * In i219, the descriptor rings must be emptied before resetting the HW
5309  * or before changing the device state to D3 during runtime (runtime PM).
5310  *
5311  * Failure to do this will cause the HW to enter a unit hang state which can
5312  * only be released by PCI reset on the device.
5313  *
5314  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5315  */
5316 static void
wm_flush_desc_rings(struct wm_softc * sc)5317 wm_flush_desc_rings(struct wm_softc *sc)
5318 {
5319 	pcireg_t preg;
5320 	uint32_t reg;
5321 	struct wm_txqueue *txq;
5322 	wiseman_txdesc_t *txd;
5323 	int nexttx;
5324 	uint32_t rctl;
5325 
5326 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5327 
5328 	/* First, disable MULR fix in FEXTNVM11 */
5329 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
5330 	reg |= FEXTNVM11_DIS_MULRFIX;
5331 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5332 
5333 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5334 	reg = CSR_READ(sc, WMREG_TDLEN(0));
5335 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5336 		return;
5337 
5338 	/*
5339 	 * Remove all descriptors from the tx_ring.
5340 	 *
5341 	 * We want to clear all pending descriptors from the TX ring. Zeroing
5342 	 * happens when the HW reads the regs. We assign the ring itself as
5343 	 * the data of the next descriptor. We don't care about the data we are
5344 	 * about to reset the HW.
5345 	 */
5346 #ifdef WM_DEBUG
5347 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5348 #endif
5349 	reg = CSR_READ(sc, WMREG_TCTL);
5350 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5351 
5352 	txq = &sc->sc_queue[0].wmq_txq;
5353 	nexttx = txq->txq_next;
5354 	txd = &txq->txq_descs[nexttx];
5355 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5356 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5357 	txd->wtx_fields.wtxu_status = 0;
5358 	txd->wtx_fields.wtxu_options = 0;
5359 	txd->wtx_fields.wtxu_vlan = 0;
5360 
5361 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5362 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5363 
5364 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5365 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5366 	CSR_WRITE_FLUSH(sc);
5367 	delay(250);
5368 
5369 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5370 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5371 		return;
5372 
5373 	/*
5374 	 * Mark all descriptors in the RX ring as consumed and disable the
5375 	 * rx ring.
5376 	 */
5377 #ifdef WM_DEBUG
5378 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5379 #endif
5380 	rctl = CSR_READ(sc, WMREG_RCTL);
5381 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5382 	CSR_WRITE_FLUSH(sc);
5383 	delay(150);
5384 
5385 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
5386 	/* Zero the lower 14 bits (prefetch and host thresholds) */
5387 	reg &= 0xffffc000;
5388 	/*
5389 	 * Update thresholds: prefetch threshold to 31, host threshold
5390 	 * to 1 and make sure the granularity is "descriptors" and not
5391 	 * "cache lines"
5392 	 */
5393 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5394 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5395 
5396 	/* Momentarily enable the RX ring for the changes to take effect */
5397 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5398 	CSR_WRITE_FLUSH(sc);
5399 	delay(150);
5400 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5401 }
5402 
5403 /*
5404  * wm_reset:
5405  *
5406  *	Reset the i82542 chip.
5407  */
5408 static void
wm_reset(struct wm_softc * sc)5409 wm_reset(struct wm_softc *sc)
5410 {
5411 	int phy_reset = 0;
5412 	int i, error = 0;
5413 	uint32_t reg;
5414 	uint16_t kmreg;
5415 	int rv;
5416 
5417 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5418 		device_xname(sc->sc_dev), __func__));
5419 	KASSERT(sc->sc_type != 0);
5420 
5421 	/*
5422 	 * Allocate on-chip memory according to the MTU size.
5423 	 * The Packet Buffer Allocation register must be written
5424 	 * before the chip is reset.
5425 	 */
5426 	switch (sc->sc_type) {
5427 	case WM_T_82547:
5428 	case WM_T_82547_2:
5429 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5430 		    PBA_22K : PBA_30K;
5431 		for (i = 0; i < sc->sc_nqueues; i++) {
5432 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5433 			txq->txq_fifo_head = 0;
5434 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5435 			txq->txq_fifo_size =
5436 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5437 			txq->txq_fifo_stall = 0;
5438 		}
5439 		break;
5440 	case WM_T_82571:
5441 	case WM_T_82572:
5442 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
5443 	case WM_T_80003:
5444 		sc->sc_pba = PBA_32K;
5445 		break;
5446 	case WM_T_82573:
5447 		sc->sc_pba = PBA_12K;
5448 		break;
5449 	case WM_T_82574:
5450 	case WM_T_82583:
5451 		sc->sc_pba = PBA_20K;
5452 		break;
5453 	case WM_T_82576:
5454 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5455 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5456 		break;
5457 	case WM_T_82580:
5458 	case WM_T_I350:
5459 	case WM_T_I354:
5460 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5461 		break;
5462 	case WM_T_I210:
5463 	case WM_T_I211:
5464 		sc->sc_pba = PBA_34K;
5465 		break;
5466 	case WM_T_ICH8:
5467 		/* Workaround for a bit corruption issue in FIFO memory */
5468 		sc->sc_pba = PBA_8K;
5469 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5470 		break;
5471 	case WM_T_ICH9:
5472 	case WM_T_ICH10:
5473 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5474 		    PBA_14K : PBA_10K;
5475 		break;
5476 	case WM_T_PCH:
5477 	case WM_T_PCH2:	/* XXX 14K? */
5478 	case WM_T_PCH_LPT:
5479 	case WM_T_PCH_SPT:
5480 	case WM_T_PCH_CNP:
5481 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5482 		    PBA_12K : PBA_26K;
5483 		break;
5484 	default:
5485 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5486 		    PBA_40K : PBA_48K;
5487 		break;
5488 	}
5489 	/*
5490 	 * Only old or non-multiqueue devices have the PBA register
5491 	 * XXX Need special handling for 82575.
5492 	 */
5493 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5494 	    || (sc->sc_type == WM_T_82575))
5495 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5496 
5497 	/* Prevent the PCI-E bus from sticking */
5498 	if (sc->sc_flags & WM_F_PCIE) {
5499 		int timeout = 800;
5500 
5501 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
5502 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5503 
5504 		while (timeout--) {
5505 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5506 			    == 0)
5507 				break;
5508 			delay(100);
5509 		}
5510 		if (timeout == 0)
5511 			device_printf(sc->sc_dev,
5512 			    "failed to disable bus mastering\n");
5513 	}
5514 
5515 	/* Set the completion timeout for interface */
5516 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5517 	    || (sc->sc_type == WM_T_82580)
5518 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5519 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5520 		wm_set_pcie_completion_timeout(sc);
5521 
5522 	/* Clear interrupt */
5523 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5524 	if (wm_is_using_msix(sc)) {
5525 		if (sc->sc_type != WM_T_82574) {
5526 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5527 			CSR_WRITE(sc, WMREG_EIAC, 0);
5528 		} else
5529 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5530 	}
5531 
5532 	/* Stop the transmit and receive processes. */
5533 	CSR_WRITE(sc, WMREG_RCTL, 0);
5534 	sc->sc_rctl &= ~RCTL_EN;
5535 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5536 	CSR_WRITE_FLUSH(sc);
5537 
5538 	/* XXX set_tbi_sbp_82543() */
5539 
5540 	delay(10*1000);
5541 
5542 	/* Must acquire the MDIO ownership before MAC reset */
5543 	switch (sc->sc_type) {
5544 	case WM_T_82573:
5545 	case WM_T_82574:
5546 	case WM_T_82583:
5547 		error = wm_get_hw_semaphore_82573(sc);
5548 		break;
5549 	default:
5550 		break;
5551 	}
5552 
5553 	/*
5554 	 * 82541 Errata 29? & 82547 Errata 28?
5555 	 * See also the description about PHY_RST bit in CTRL register
5556 	 * in 8254x_GBe_SDM.pdf.
5557 	 */
5558 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5559 		CSR_WRITE(sc, WMREG_CTRL,
5560 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5561 		CSR_WRITE_FLUSH(sc);
5562 		delay(5000);
5563 	}
5564 
5565 	switch (sc->sc_type) {
5566 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5567 	case WM_T_82541:
5568 	case WM_T_82541_2:
5569 	case WM_T_82547:
5570 	case WM_T_82547_2:
5571 		/*
5572 		 * On some chipsets, a reset through a memory-mapped write
5573 		 * cycle can cause the chip to reset before completing the
5574 		 * write cycle. This causes major headache that can be avoided
5575 		 * by issuing the reset via indirect register writes through
5576 		 * I/O space.
5577 		 *
5578 		 * So, if we successfully mapped the I/O BAR at attach time,
5579 		 * use that. Otherwise, try our luck with a memory-mapped
5580 		 * reset.
5581 		 */
5582 		if (sc->sc_flags & WM_F_IOH_VALID)
5583 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5584 		else
5585 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5586 		break;
5587 	case WM_T_82545_3:
5588 	case WM_T_82546_3:
5589 		/* Use the shadow control register on these chips. */
5590 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5591 		break;
5592 	case WM_T_80003:
5593 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5594 		if (sc->phy.acquire(sc) != 0)
5595 			break;
5596 		CSR_WRITE(sc, WMREG_CTRL, reg);
5597 		sc->phy.release(sc);
5598 		break;
5599 	case WM_T_ICH8:
5600 	case WM_T_ICH9:
5601 	case WM_T_ICH10:
5602 	case WM_T_PCH:
5603 	case WM_T_PCH2:
5604 	case WM_T_PCH_LPT:
5605 	case WM_T_PCH_SPT:
5606 	case WM_T_PCH_CNP:
5607 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5608 		if (wm_phy_resetisblocked(sc) == false) {
5609 			/*
5610 			 * Gate automatic PHY configuration by hardware on
5611 			 * non-managed 82579
5612 			 */
5613 			if ((sc->sc_type == WM_T_PCH2)
5614 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5615 				== 0))
5616 				wm_gate_hw_phy_config_ich8lan(sc, true);
5617 
5618 			reg |= CTRL_PHY_RESET;
5619 			phy_reset = 1;
5620 		} else
5621 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5622 		if (sc->phy.acquire(sc) != 0)
5623 			break;
5624 		CSR_WRITE(sc, WMREG_CTRL, reg);
5625 		/* Don't insert a completion barrier when reset */
5626 		delay(20*1000);
5627 		/*
5628 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5629 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5630 		 * only. See also wm_get_swflag_ich8lan().
5631 		 */
5632 		mutex_exit(sc->sc_ich_phymtx);
5633 		break;
5634 	case WM_T_82580:
5635 	case WM_T_I350:
5636 	case WM_T_I354:
5637 	case WM_T_I210:
5638 	case WM_T_I211:
5639 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5640 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5641 			CSR_WRITE_FLUSH(sc);
5642 		delay(5000);
5643 		break;
5644 	case WM_T_82542_2_0:
5645 	case WM_T_82542_2_1:
5646 	case WM_T_82543:
5647 	case WM_T_82540:
5648 	case WM_T_82545:
5649 	case WM_T_82546:
5650 	case WM_T_82571:
5651 	case WM_T_82572:
5652 	case WM_T_82573:
5653 	case WM_T_82574:
5654 	case WM_T_82575:
5655 	case WM_T_82576:
5656 	case WM_T_82583:
5657 	default:
5658 		/* Everything else can safely use the documented method. */
5659 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5660 		break;
5661 	}
5662 
5663 	/* Must release the MDIO ownership after MAC reset */
5664 	switch (sc->sc_type) {
5665 	case WM_T_82573:
5666 	case WM_T_82574:
5667 	case WM_T_82583:
5668 		if (error == 0)
5669 			wm_put_hw_semaphore_82573(sc);
5670 		break;
5671 	default:
5672 		break;
5673 	}
5674 
5675 	/* Set Phy Config Counter to 50msec */
5676 	if (sc->sc_type == WM_T_PCH2) {
5677 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5678 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5679 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5680 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5681 	}
5682 
5683 	if (phy_reset != 0)
5684 		wm_get_cfg_done(sc);
5685 
5686 	/* Reload EEPROM */
5687 	switch (sc->sc_type) {
5688 	case WM_T_82542_2_0:
5689 	case WM_T_82542_2_1:
5690 	case WM_T_82543:
5691 	case WM_T_82544:
5692 		delay(10);
5693 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5694 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5695 		CSR_WRITE_FLUSH(sc);
5696 		delay(2000);
5697 		break;
5698 	case WM_T_82540:
5699 	case WM_T_82545:
5700 	case WM_T_82545_3:
5701 	case WM_T_82546:
5702 	case WM_T_82546_3:
5703 		delay(5*1000);
5704 		/* XXX Disable HW ARPs on ASF enabled adapters */
5705 		break;
5706 	case WM_T_82541:
5707 	case WM_T_82541_2:
5708 	case WM_T_82547:
5709 	case WM_T_82547_2:
5710 		delay(20000);
5711 		/* XXX Disable HW ARPs on ASF enabled adapters */
5712 		break;
5713 	case WM_T_82571:
5714 	case WM_T_82572:
5715 	case WM_T_82573:
5716 	case WM_T_82574:
5717 	case WM_T_82583:
5718 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5719 			delay(10);
5720 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5721 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5722 			CSR_WRITE_FLUSH(sc);
5723 		}
5724 		/* check EECD_EE_AUTORD */
5725 		wm_get_auto_rd_done(sc);
5726 		/*
5727 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5728 		 * is set.
5729 		 */
5730 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5731 		    || (sc->sc_type == WM_T_82583))
5732 			delay(25*1000);
5733 		break;
5734 	case WM_T_82575:
5735 	case WM_T_82576:
5736 	case WM_T_82580:
5737 	case WM_T_I350:
5738 	case WM_T_I354:
5739 	case WM_T_I210:
5740 	case WM_T_I211:
5741 	case WM_T_80003:
5742 		/* check EECD_EE_AUTORD */
5743 		wm_get_auto_rd_done(sc);
5744 		break;
5745 	case WM_T_ICH8:
5746 	case WM_T_ICH9:
5747 	case WM_T_ICH10:
5748 	case WM_T_PCH:
5749 	case WM_T_PCH2:
5750 	case WM_T_PCH_LPT:
5751 	case WM_T_PCH_SPT:
5752 	case WM_T_PCH_CNP:
5753 		break;
5754 	default:
5755 		panic("%s: unknown type\n", __func__);
5756 	}
5757 
5758 	/* Check whether EEPROM is present or not */
5759 	switch (sc->sc_type) {
5760 	case WM_T_82575:
5761 	case WM_T_82576:
5762 	case WM_T_82580:
5763 	case WM_T_I350:
5764 	case WM_T_I354:
5765 	case WM_T_ICH8:
5766 	case WM_T_ICH9:
5767 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5768 			/* Not found */
5769 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5770 			if (sc->sc_type == WM_T_82575)
5771 				wm_reset_init_script_82575(sc);
5772 		}
5773 		break;
5774 	default:
5775 		break;
5776 	}
5777 
5778 	if (phy_reset != 0)
5779 		wm_phy_post_reset(sc);
5780 
5781 	if ((sc->sc_type == WM_T_82580)
5782 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5783 		/* Clear global device reset status bit */
5784 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5785 	}
5786 
5787 	/* Clear any pending interrupt events. */
5788 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5789 	reg = CSR_READ(sc, WMREG_ICR);
5790 	if (wm_is_using_msix(sc)) {
5791 		if (sc->sc_type != WM_T_82574) {
5792 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5793 			CSR_WRITE(sc, WMREG_EIAC, 0);
5794 		} else
5795 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5796 	}
5797 
5798 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5799 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5800 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5801 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5802 		reg = CSR_READ(sc, WMREG_KABGTXD);
5803 		reg |= KABGTXD_BGSQLBIAS;
5804 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5805 	}
5806 
5807 	/* Reload sc_ctrl */
5808 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5809 
5810 	wm_set_eee(sc);
5811 
5812 	/*
5813 	 * For PCH, this write will make sure that any noise will be detected
5814 	 * as a CRC error and be dropped rather than show up as a bad packet
5815 	 * to the DMA engine
5816 	 */
5817 	if (sc->sc_type == WM_T_PCH)
5818 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5819 
5820 	if (sc->sc_type >= WM_T_82544)
5821 		CSR_WRITE(sc, WMREG_WUC, 0);
5822 
5823 	if (sc->sc_type < WM_T_82575)
5824 		wm_disable_aspm(sc); /* Workaround for some chips */
5825 
5826 	wm_reset_mdicnfg_82580(sc);
5827 
5828 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5829 		wm_pll_workaround_i210(sc);
5830 
5831 	if (sc->sc_type == WM_T_80003) {
5832 		/* Default to TRUE to enable the MDIC W/A */
5833 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5834 
5835 		rv = wm_kmrn_readreg(sc,
5836 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5837 		if (rv == 0) {
5838 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5839 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5840 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5841 			else
5842 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5843 		}
5844 	}
5845 }
5846 
5847 /*
5848  * wm_add_rxbuf:
5849  *
5850  *	Add a receive buffer to the indiciated descriptor.
5851  */
5852 static int
wm_add_rxbuf(struct wm_rxqueue * rxq,int idx)5853 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5854 {
5855 	struct wm_softc *sc = rxq->rxq_sc;
5856 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5857 	struct mbuf *m;
5858 	int error;
5859 
5860 	KASSERT(mutex_owned(rxq->rxq_lock));
5861 
5862 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5863 	if (m == NULL)
5864 		return ENOBUFS;
5865 
5866 	MCLGET(m, M_DONTWAIT);
5867 	if ((m->m_flags & M_EXT) == 0) {
5868 		m_freem(m);
5869 		return ENOBUFS;
5870 	}
5871 
5872 	if (rxs->rxs_mbuf != NULL)
5873 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5874 
5875 	rxs->rxs_mbuf = m;
5876 
5877 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5878 	/*
5879 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5880 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5881 	 */
5882 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5883 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5884 	if (error) {
5885 		/* XXX XXX XXX */
5886 		aprint_error_dev(sc->sc_dev,
5887 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5888 		panic("wm_add_rxbuf");
5889 	}
5890 
5891 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5892 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5893 
5894 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5895 		if ((sc->sc_rctl & RCTL_EN) != 0)
5896 			wm_init_rxdesc(rxq, idx);
5897 	} else
5898 		wm_init_rxdesc(rxq, idx);
5899 
5900 	return 0;
5901 }
5902 
5903 /*
5904  * wm_rxdrain:
5905  *
5906  *	Drain the receive queue.
5907  */
5908 static void
wm_rxdrain(struct wm_rxqueue * rxq)5909 wm_rxdrain(struct wm_rxqueue *rxq)
5910 {
5911 	struct wm_softc *sc = rxq->rxq_sc;
5912 	struct wm_rxsoft *rxs;
5913 	int i;
5914 
5915 	KASSERT(mutex_owned(rxq->rxq_lock));
5916 
5917 	for (i = 0; i < WM_NRXDESC; i++) {
5918 		rxs = &rxq->rxq_soft[i];
5919 		if (rxs->rxs_mbuf != NULL) {
5920 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5921 			m_freem(rxs->rxs_mbuf);
5922 			rxs->rxs_mbuf = NULL;
5923 		}
5924 	}
5925 }
5926 
5927 /*
5928  * Setup registers for RSS.
5929  *
5930  * XXX not yet VMDq support
5931  */
5932 static void
wm_init_rss(struct wm_softc * sc)5933 wm_init_rss(struct wm_softc *sc)
5934 {
5935 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5936 	int i;
5937 
5938 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5939 
5940 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5941 		unsigned int qid, reta_ent;
5942 
5943 		qid  = i % sc->sc_nqueues;
5944 		switch (sc->sc_type) {
5945 		case WM_T_82574:
5946 			reta_ent = __SHIFTIN(qid,
5947 			    RETA_ENT_QINDEX_MASK_82574);
5948 			break;
5949 		case WM_T_82575:
5950 			reta_ent = __SHIFTIN(qid,
5951 			    RETA_ENT_QINDEX1_MASK_82575);
5952 			break;
5953 		default:
5954 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5955 			break;
5956 		}
5957 
5958 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5959 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5960 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5961 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5962 	}
5963 
5964 	rss_getkey((uint8_t *)rss_key);
5965 	for (i = 0; i < RSSRK_NUM_REGS; i++)
5966 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5967 
5968 	if (sc->sc_type == WM_T_82574)
5969 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
5970 	else
5971 		mrqc = MRQC_ENABLE_RSS_MQ;
5972 
5973 	/*
5974 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5975 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5976 	 */
5977 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5978 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5979 #if 0
5980 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5981 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5982 #endif
5983 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5984 
5985 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
5986 }
5987 
5988 /*
5989  * Adjust TX and RX queue numbers which the system actulally uses.
5990  *
5991  * The numbers are affected by below parameters.
5992  *     - The nubmer of hardware queues
5993  *     - The number of MSI-X vectors (= "nvectors" argument)
5994  *     - ncpu
5995  */
5996 static void
wm_adjust_qnum(struct wm_softc * sc,int nvectors)5997 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5998 {
5999 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
6000 
6001 	if (nvectors < 2) {
6002 		sc->sc_nqueues = 1;
6003 		return;
6004 	}
6005 
6006 	switch (sc->sc_type) {
6007 	case WM_T_82572:
6008 		hw_ntxqueues = 2;
6009 		hw_nrxqueues = 2;
6010 		break;
6011 	case WM_T_82574:
6012 		hw_ntxqueues = 2;
6013 		hw_nrxqueues = 2;
6014 		break;
6015 	case WM_T_82575:
6016 		hw_ntxqueues = 4;
6017 		hw_nrxqueues = 4;
6018 		break;
6019 	case WM_T_82576:
6020 		hw_ntxqueues = 16;
6021 		hw_nrxqueues = 16;
6022 		break;
6023 	case WM_T_82580:
6024 	case WM_T_I350:
6025 	case WM_T_I354:
6026 		hw_ntxqueues = 8;
6027 		hw_nrxqueues = 8;
6028 		break;
6029 	case WM_T_I210:
6030 		hw_ntxqueues = 4;
6031 		hw_nrxqueues = 4;
6032 		break;
6033 	case WM_T_I211:
6034 		hw_ntxqueues = 2;
6035 		hw_nrxqueues = 2;
6036 		break;
6037 		/*
6038 		 * The below Ethernet controllers do not support MSI-X;
6039 		 * this driver doesn't let them use multiqueue.
6040 		 *     - WM_T_80003
6041 		 *     - WM_T_ICH8
6042 		 *     - WM_T_ICH9
6043 		 *     - WM_T_ICH10
6044 		 *     - WM_T_PCH
6045 		 *     - WM_T_PCH2
6046 		 *     - WM_T_PCH_LPT
6047 		 */
6048 	default:
6049 		hw_ntxqueues = 1;
6050 		hw_nrxqueues = 1;
6051 		break;
6052 	}
6053 
6054 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6055 
6056 	/*
6057 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
6058 	 * the number of queues used actually.
6059 	 */
6060 	if (nvectors < hw_nqueues + 1)
6061 		sc->sc_nqueues = nvectors - 1;
6062 	else
6063 		sc->sc_nqueues = hw_nqueues;
6064 
6065 	/*
6066 	 * As queues more than CPUs cannot improve scaling, we limit
6067 	 * the number of queues used actually.
6068 	 */
6069 	if (ncpu < sc->sc_nqueues)
6070 		sc->sc_nqueues = ncpu;
6071 }
6072 
6073 static inline bool
wm_is_using_msix(struct wm_softc * sc)6074 wm_is_using_msix(struct wm_softc *sc)
6075 {
6076 
6077 	return (sc->sc_nintrs > 1);
6078 }
6079 
6080 static inline bool
wm_is_using_multiqueue(struct wm_softc * sc)6081 wm_is_using_multiqueue(struct wm_softc *sc)
6082 {
6083 
6084 	return (sc->sc_nqueues > 1);
6085 }
6086 
6087 static int
wm_softint_establish_queue(struct wm_softc * sc,int qidx,int intr_idx)6088 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6089 {
6090 	struct wm_queue *wmq = &sc->sc_queue[qidx];
6091 
6092 	wmq->wmq_id = qidx;
6093 	wmq->wmq_intr_idx = intr_idx;
6094 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6095 	    wm_handle_queue, wmq);
6096 	if (wmq->wmq_si != NULL)
6097 		return 0;
6098 
6099 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6100 	    wmq->wmq_id);
6101 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6102 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6103 	return ENOMEM;
6104 }
6105 
6106 /*
6107  * Both single interrupt MSI and INTx can use this function.
6108  */
6109 static int
wm_setup_legacy(struct wm_softc * sc)6110 wm_setup_legacy(struct wm_softc *sc)
6111 {
6112 	pci_chipset_tag_t pc = sc->sc_pc;
6113 	const char *intrstr = NULL;
6114 	char intrbuf[PCI_INTRSTR_LEN];
6115 	int error;
6116 
6117 	error = wm_alloc_txrx_queues(sc);
6118 	if (error) {
6119 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6120 		    error);
6121 		return ENOMEM;
6122 	}
6123 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6124 	    sizeof(intrbuf));
6125 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6126 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6127 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6128 	if (sc->sc_ihs[0] == NULL) {
6129 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6130 		    (pci_intr_type(pc, sc->sc_intrs[0])
6131 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6132 		return ENOMEM;
6133 	}
6134 
6135 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6136 	sc->sc_nintrs = 1;
6137 
6138 	return wm_softint_establish_queue(sc, 0, 0);
6139 }
6140 
6141 static int
wm_setup_msix(struct wm_softc * sc)6142 wm_setup_msix(struct wm_softc *sc)
6143 {
6144 	void *vih;
6145 	kcpuset_t *affinity;
6146 	int qidx, error, intr_idx, txrx_established;
6147 	pci_chipset_tag_t pc = sc->sc_pc;
6148 	const char *intrstr = NULL;
6149 	char intrbuf[PCI_INTRSTR_LEN];
6150 	char intr_xname[INTRDEVNAMEBUF];
6151 
6152 	if (sc->sc_nqueues < ncpu) {
6153 		/*
6154 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
6155 		 * interrupts start from CPU#1.
6156 		 */
6157 		sc->sc_affinity_offset = 1;
6158 	} else {
6159 		/*
6160 		 * In this case, this device use all CPUs. So, we unify
6161 		 * affinitied cpu_index to msix vector number for readability.
6162 		 */
6163 		sc->sc_affinity_offset = 0;
6164 	}
6165 
6166 	error = wm_alloc_txrx_queues(sc);
6167 	if (error) {
6168 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6169 		    error);
6170 		return ENOMEM;
6171 	}
6172 
6173 	kcpuset_create(&affinity, false);
6174 	intr_idx = 0;
6175 
6176 	/*
6177 	 * TX and RX
6178 	 */
6179 	txrx_established = 0;
6180 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6181 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6182 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6183 
6184 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6185 		    sizeof(intrbuf));
6186 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6187 		    PCI_INTR_MPSAFE, true);
6188 		memset(intr_xname, 0, sizeof(intr_xname));
6189 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6190 		    device_xname(sc->sc_dev), qidx);
6191 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6192 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6193 		if (vih == NULL) {
6194 			aprint_error_dev(sc->sc_dev,
6195 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
6196 			    intrstr ? " at " : "",
6197 			    intrstr ? intrstr : "");
6198 
6199 			goto fail;
6200 		}
6201 		kcpuset_zero(affinity);
6202 		/* Round-robin affinity */
6203 		kcpuset_set(affinity, affinity_to);
6204 		error = interrupt_distribute(vih, affinity, NULL);
6205 		if (error == 0) {
6206 			aprint_normal_dev(sc->sc_dev,
6207 			    "for TX and RX interrupting at %s affinity to %u\n",
6208 			    intrstr, affinity_to);
6209 		} else {
6210 			aprint_normal_dev(sc->sc_dev,
6211 			    "for TX and RX interrupting at %s\n", intrstr);
6212 		}
6213 		sc->sc_ihs[intr_idx] = vih;
6214 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6215 			goto fail;
6216 		txrx_established++;
6217 		intr_idx++;
6218 	}
6219 
6220 	/* LINK */
6221 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6222 	    sizeof(intrbuf));
6223 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6224 	memset(intr_xname, 0, sizeof(intr_xname));
6225 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6226 	    device_xname(sc->sc_dev));
6227 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6228 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
6229 	if (vih == NULL) {
6230 		aprint_error_dev(sc->sc_dev,
6231 		    "unable to establish MSI-X(for LINK)%s%s\n",
6232 		    intrstr ? " at " : "",
6233 		    intrstr ? intrstr : "");
6234 
6235 		goto fail;
6236 	}
6237 	/* Keep default affinity to LINK interrupt */
6238 	aprint_normal_dev(sc->sc_dev,
6239 	    "for LINK interrupting at %s\n", intrstr);
6240 	sc->sc_ihs[intr_idx] = vih;
6241 	sc->sc_link_intr_idx = intr_idx;
6242 
6243 	sc->sc_nintrs = sc->sc_nqueues + 1;
6244 	kcpuset_destroy(affinity);
6245 	return 0;
6246 
6247 fail:
6248 	for (qidx = 0; qidx < txrx_established; qidx++) {
6249 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6250 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6251 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6252 	}
6253 
6254 	kcpuset_destroy(affinity);
6255 	return ENOMEM;
6256 }
6257 
6258 static void
wm_unset_stopping_flags(struct wm_softc * sc)6259 wm_unset_stopping_flags(struct wm_softc *sc)
6260 {
6261 	int i;
6262 
6263 	KASSERT(mutex_owned(sc->sc_core_lock));
6264 
6265 	/* Must unset stopping flags in ascending order. */
6266 	for (i = 0; i < sc->sc_nqueues; i++) {
6267 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6268 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6269 
6270 		mutex_enter(txq->txq_lock);
6271 		txq->txq_stopping = false;
6272 		mutex_exit(txq->txq_lock);
6273 
6274 		mutex_enter(rxq->rxq_lock);
6275 		rxq->rxq_stopping = false;
6276 		mutex_exit(rxq->rxq_lock);
6277 	}
6278 
6279 	sc->sc_core_stopping = false;
6280 }
6281 
6282 static void
wm_set_stopping_flags(struct wm_softc * sc)6283 wm_set_stopping_flags(struct wm_softc *sc)
6284 {
6285 	int i;
6286 
6287 	KASSERT(mutex_owned(sc->sc_core_lock));
6288 
6289 	sc->sc_core_stopping = true;
6290 
6291 	/* Must set stopping flags in ascending order. */
6292 	for (i = 0; i < sc->sc_nqueues; i++) {
6293 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6294 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6295 
6296 		mutex_enter(rxq->rxq_lock);
6297 		rxq->rxq_stopping = true;
6298 		mutex_exit(rxq->rxq_lock);
6299 
6300 		mutex_enter(txq->txq_lock);
6301 		txq->txq_stopping = true;
6302 		mutex_exit(txq->txq_lock);
6303 	}
6304 }
6305 
6306 /*
6307  * Write interrupt interval value to ITR or EITR
6308  */
6309 static void
wm_itrs_writereg(struct wm_softc * sc,struct wm_queue * wmq)6310 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6311 {
6312 
6313 	if (!wmq->wmq_set_itr)
6314 		return;
6315 
6316 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6317 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6318 
6319 		/*
6320 		 * 82575 doesn't have CNT_INGR field.
6321 		 * So, overwrite counter field by software.
6322 		 */
6323 		if (sc->sc_type == WM_T_82575)
6324 			eitr |= __SHIFTIN(wmq->wmq_itr,
6325 			    EITR_COUNTER_MASK_82575);
6326 		else
6327 			eitr |= EITR_CNT_INGR;
6328 
6329 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6330 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6331 		/*
6332 		 * 82574 has both ITR and EITR. SET EITR when we use
6333 		 * the multi queue function with MSI-X.
6334 		 */
6335 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6336 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6337 	} else {
6338 		KASSERT(wmq->wmq_id == 0);
6339 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6340 	}
6341 
6342 	wmq->wmq_set_itr = false;
6343 }
6344 
6345 /*
6346  * TODO
6347  * Below dynamic calculation of itr is almost the same as Linux igb,
6348  * however it does not fit to wm(4). So, we will have been disable AIM
6349  * until we will find appropriate calculation of itr.
6350  */
6351 /*
6352  * Calculate interrupt interval value to be going to write register in
6353  * wm_itrs_writereg(). This function does not write ITR/EITR register.
6354  */
6355 static void
wm_itrs_calculate(struct wm_softc * sc,struct wm_queue * wmq)6356 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6357 {
6358 #ifdef NOTYET
6359 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6360 	struct wm_txqueue *txq = &wmq->wmq_txq;
6361 	uint32_t avg_size = 0;
6362 	uint32_t new_itr;
6363 
6364 	if (rxq->rxq_packets)
6365 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
6366 	if (txq->txq_packets)
6367 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6368 
6369 	if (avg_size == 0) {
6370 		new_itr = 450; /* restore default value */
6371 		goto out;
6372 	}
6373 
6374 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
6375 	avg_size += 24;
6376 
6377 	/* Don't starve jumbo frames */
6378 	avg_size = uimin(avg_size, 3000);
6379 
6380 	/* Give a little boost to mid-size frames */
6381 	if ((avg_size > 300) && (avg_size < 1200))
6382 		new_itr = avg_size / 3;
6383 	else
6384 		new_itr = avg_size / 2;
6385 
6386 out:
6387 	/*
6388 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6389 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6390 	 */
6391 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6392 		new_itr *= 4;
6393 
6394 	if (new_itr != wmq->wmq_itr) {
6395 		wmq->wmq_itr = new_itr;
6396 		wmq->wmq_set_itr = true;
6397 	} else
6398 		wmq->wmq_set_itr = false;
6399 
6400 	rxq->rxq_packets = 0;
6401 	rxq->rxq_bytes = 0;
6402 	txq->txq_packets = 0;
6403 	txq->txq_bytes = 0;
6404 #endif
6405 }
6406 
6407 static void
wm_init_sysctls(struct wm_softc * sc)6408 wm_init_sysctls(struct wm_softc *sc)
6409 {
6410 	struct sysctllog **log;
6411 	const struct sysctlnode *rnode, *qnode, *cnode;
6412 	int i, rv;
6413 	const char *dvname;
6414 
6415 	log = &sc->sc_sysctllog;
6416 	dvname = device_xname(sc->sc_dev);
6417 
6418 	rv = sysctl_createv(log, 0, NULL, &rnode,
6419 	    0, CTLTYPE_NODE, dvname,
6420 	    SYSCTL_DESCR("wm information and settings"),
6421 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6422 	if (rv != 0)
6423 		goto err;
6424 
6425 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6426 	    CTLTYPE_BOOL, "txrx_workqueue",
6427 	    SYSCTL_DESCR("Use workqueue for packet processing"),
6428 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6429 	if (rv != 0)
6430 		goto teardown;
6431 
6432 	for (i = 0; i < sc->sc_nqueues; i++) {
6433 		struct wm_queue *wmq = &sc->sc_queue[i];
6434 		struct wm_txqueue *txq = &wmq->wmq_txq;
6435 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6436 
6437 		snprintf(sc->sc_queue[i].sysctlname,
6438 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6439 
6440 		if (sysctl_createv(log, 0, &rnode, &qnode,
6441 		    0, CTLTYPE_NODE,
6442 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6443 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6444 			break;
6445 
6446 		if (sysctl_createv(log, 0, &qnode, &cnode,
6447 		    CTLFLAG_READONLY, CTLTYPE_INT,
6448 		    "txq_free", SYSCTL_DESCR("TX queue free"),
6449 		    NULL, 0, &txq->txq_free,
6450 		    0, CTL_CREATE, CTL_EOL) != 0)
6451 			break;
6452 		if (sysctl_createv(log, 0, &qnode, &cnode,
6453 		    CTLFLAG_READONLY, CTLTYPE_INT,
6454 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
6455 		    wm_sysctl_tdh_handler, 0, (void *)txq,
6456 		    0, CTL_CREATE, CTL_EOL) != 0)
6457 			break;
6458 		if (sysctl_createv(log, 0, &qnode, &cnode,
6459 		    CTLFLAG_READONLY, CTLTYPE_INT,
6460 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6461 		    wm_sysctl_tdt_handler, 0, (void *)txq,
6462 		    0, CTL_CREATE, CTL_EOL) != 0)
6463 			break;
6464 		if (sysctl_createv(log, 0, &qnode, &cnode,
6465 		    CTLFLAG_READONLY, CTLTYPE_INT,
6466 		    "txq_next", SYSCTL_DESCR("TX queue next"),
6467 		    NULL, 0, &txq->txq_next,
6468 		    0, CTL_CREATE, CTL_EOL) != 0)
6469 			break;
6470 		if (sysctl_createv(log, 0, &qnode, &cnode,
6471 		    CTLFLAG_READONLY, CTLTYPE_INT,
6472 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6473 		    NULL, 0, &txq->txq_sfree,
6474 		    0, CTL_CREATE, CTL_EOL) != 0)
6475 			break;
6476 		if (sysctl_createv(log, 0, &qnode, &cnode,
6477 		    CTLFLAG_READONLY, CTLTYPE_INT,
6478 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
6479 		    NULL, 0, &txq->txq_snext,
6480 		    0, CTL_CREATE, CTL_EOL) != 0)
6481 			break;
6482 		if (sysctl_createv(log, 0, &qnode, &cnode,
6483 		    CTLFLAG_READONLY, CTLTYPE_INT,
6484 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6485 		    NULL, 0, &txq->txq_sdirty,
6486 		    0, CTL_CREATE, CTL_EOL) != 0)
6487 			break;
6488 		if (sysctl_createv(log, 0, &qnode, &cnode,
6489 		    CTLFLAG_READONLY, CTLTYPE_INT,
6490 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
6491 		    NULL, 0, &txq->txq_flags,
6492 		    0, CTL_CREATE, CTL_EOL) != 0)
6493 			break;
6494 		if (sysctl_createv(log, 0, &qnode, &cnode,
6495 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6496 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6497 		    NULL, 0, &txq->txq_stopping,
6498 		    0, CTL_CREATE, CTL_EOL) != 0)
6499 			break;
6500 		if (sysctl_createv(log, 0, &qnode, &cnode,
6501 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6502 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
6503 		    NULL, 0, &txq->txq_sending,
6504 		    0, CTL_CREATE, CTL_EOL) != 0)
6505 			break;
6506 
6507 		if (sysctl_createv(log, 0, &qnode, &cnode,
6508 		    CTLFLAG_READONLY, CTLTYPE_INT,
6509 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6510 		    NULL, 0, &rxq->rxq_ptr,
6511 		    0, CTL_CREATE, CTL_EOL) != 0)
6512 			break;
6513 	}
6514 
6515 #ifdef WM_DEBUG
6516 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6517 	    CTLTYPE_INT, "debug_flags",
6518 	    SYSCTL_DESCR(
6519 		    "Debug flags:\n"	\
6520 		    "\t0x01 LINK\n"	\
6521 		    "\t0x02 TX\n"	\
6522 		    "\t0x04 RX\n"	\
6523 		    "\t0x08 GMII\n"	\
6524 		    "\t0x10 MANAGE\n"	\
6525 		    "\t0x20 NVM\n"	\
6526 		    "\t0x40 INIT\n"	\
6527 		    "\t0x80 LOCK"),
6528 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6529 	if (rv != 0)
6530 		goto teardown;
6531 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6532 	    CTLTYPE_BOOL, "trigger_reset",
6533 	    SYSCTL_DESCR("Trigger an interface reset"),
6534 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6535 	if (rv != 0)
6536 		goto teardown;
6537 #endif
6538 
6539 	return;
6540 
6541 teardown:
6542 	sysctl_teardown(log);
6543 err:
6544 	sc->sc_sysctllog = NULL;
6545 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6546 	    __func__, rv);
6547 }
6548 
6549 static void
wm_update_stats(struct wm_softc * sc)6550 wm_update_stats(struct wm_softc *sc)
6551 {
6552 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6553 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
6554 	    cexterr;
6555 
6556 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
6557 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
6558 	mpc = CSR_READ(sc, WMREG_MPC);
6559 	colc = CSR_READ(sc, WMREG_COLC);
6560 	sec = CSR_READ(sc, WMREG_SEC);
6561 	rlec = CSR_READ(sc, WMREG_RLEC);
6562 
6563 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
6564 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
6565 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
6566 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
6567 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
6568 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
6569 
6570 	if (sc->sc_type >= WM_T_82543) {
6571 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
6572 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
6573 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
6574 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
6575 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
6576 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
6577 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
6578 		} else {
6579 			cexterr = 0;
6580 			/* Excessive collision + Link down */
6581 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
6582 			    CSR_READ(sc, WMREG_HTDPMC));
6583 		}
6584 
6585 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
6586 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
6587 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6588 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
6589 			    CSR_READ(sc, WMREG_TSCTFC));
6590 		else {
6591 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
6592 			    CSR_READ(sc, WMREG_CBRDPC));
6593 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
6594 			    CSR_READ(sc, WMREG_CBRMPC));
6595 		}
6596 	} else
6597 		algnerrc = rxerrc = cexterr = 0;
6598 
6599 	if (sc->sc_type >= WM_T_82542_2_1) {
6600 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
6601 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
6602 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
6603 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
6604 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
6605 	}
6606 
6607 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
6608 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
6609 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
6610 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
6611 
6612 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6613 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
6614 	}
6615 
6616 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
6617 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
6618 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
6619 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
6620 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
6621 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
6622 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
6623 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
6624 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
6625 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
6626 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
6627 
6628 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
6629 	    CSR_READ(sc, WMREG_GORCL) +
6630 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
6631 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
6632 	    CSR_READ(sc, WMREG_GOTCL) +
6633 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
6634 
6635 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
6636 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
6637 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
6638 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
6639 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
6640 
6641 	if (sc->sc_type >= WM_T_82540) {
6642 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
6643 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
6644 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
6645 	}
6646 
6647 	/*
6648 	 * The TOR(L) register includes:
6649 	 *  - Error
6650 	 *  - Flow control
6651 	 *  - Broadcast rejected (This note is described in 82574 and newer
6652 	 *    datasheets. What does "broadcast rejected" mean?)
6653 	 */
6654 	WM_EVCNT_ADD(&sc->sc_ev_tor,
6655 	    CSR_READ(sc, WMREG_TORL) +
6656 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
6657 	WM_EVCNT_ADD(&sc->sc_ev_tot,
6658 	    CSR_READ(sc, WMREG_TOTL) +
6659 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
6660 
6661 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
6662 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
6663 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
6664 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
6665 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
6666 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
6667 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
6668 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
6669 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
6670 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
6671 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
6672 	if (sc->sc_type < WM_T_82575) {
6673 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
6674 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
6675 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
6676 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
6677 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
6678 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
6679 		    CSR_READ(sc, WMREG_ICTXQMTC));
6680 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
6681 		    CSR_READ(sc, WMREG_ICRXDMTC));
6682 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
6683 	} else if (!WM_IS_ICHPCH(sc)) {
6684 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
6685 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
6686 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
6687 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
6688 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
6689 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
6690 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
6691 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
6692 
6693 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
6694 		    CSR_READ(sc, WMREG_HGORCL) +
6695 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
6696 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
6697 		    CSR_READ(sc, WMREG_HGOTCL) +
6698 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
6699 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
6700 	}
6701 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6702 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
6703 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
6704 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
6705 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
6706 			    CSR_READ(sc, WMREG_B2OGPRC));
6707 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
6708 			    CSR_READ(sc, WMREG_O2BSPC));
6709 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
6710 			    CSR_READ(sc, WMREG_B2OSPC));
6711 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
6712 			    CSR_READ(sc, WMREG_O2BGPTC));
6713 		}
6714 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
6715 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
6716 	}
6717 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
6718 	if_statadd_ref(nsr, if_collisions, colc);
6719 	if_statadd_ref(nsr, if_ierrors,
6720 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
6721 	/*
6722 	 * WMREG_RNBC is incremented when there are no available buffers in
6723 	 * host memory. It does not mean the number of dropped packets, because
6724 	 * an Ethernet controller can receive packets in such case if there is
6725 	 * space in the phy's FIFO.
6726 	 *
6727 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
6728 	 * own EVCNT instead of if_iqdrops.
6729 	 */
6730 	if_statadd_ref(nsr, if_iqdrops, mpc);
6731 	IF_STAT_PUTREF(ifp);
6732 }
6733 
6734 void
wm_clear_evcnt(struct wm_softc * sc)6735 wm_clear_evcnt(struct wm_softc *sc)
6736 {
6737 #ifdef WM_EVENT_COUNTERS
6738 	int i;
6739 
6740 	/* RX queues */
6741 	for (i = 0; i < sc->sc_nqueues; i++) {
6742 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6743 
6744 		WM_Q_EVCNT_STORE(rxq, intr, 0);
6745 		WM_Q_EVCNT_STORE(rxq, defer, 0);
6746 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
6747 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
6748 	}
6749 
6750 	/* TX queues */
6751 	for (i = 0; i < sc->sc_nqueues; i++) {
6752 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6753 		int j;
6754 
6755 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
6756 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
6757 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
6758 		WM_Q_EVCNT_STORE(txq, txdw, 0);
6759 		WM_Q_EVCNT_STORE(txq, txqe, 0);
6760 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
6761 		WM_Q_EVCNT_STORE(txq, tusum, 0);
6762 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
6763 		WM_Q_EVCNT_STORE(txq, tso, 0);
6764 		WM_Q_EVCNT_STORE(txq, tso6, 0);
6765 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
6766 
6767 		for (j = 0; j < WM_NTXSEGS; j++)
6768 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
6769 
6770 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
6771 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
6772 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
6773 		WM_Q_EVCNT_STORE(txq, defrag, 0);
6774 		if (sc->sc_type <= WM_T_82544)
6775 			WM_Q_EVCNT_STORE(txq, underrun, 0);
6776 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
6777 	}
6778 
6779 	/* Miscs */
6780 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
6781 
6782 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
6783 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
6784 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
6785 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
6786 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
6787 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
6788 
6789 	if (sc->sc_type >= WM_T_82543) {
6790 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
6791 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
6792 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6793 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
6794 		else
6795 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
6796 
6797 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
6798 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
6799 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6800 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
6801 		else {
6802 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
6803 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
6804 		}
6805 	}
6806 
6807 	if (sc->sc_type >= WM_T_82542_2_1) {
6808 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
6809 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
6810 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
6811 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
6812 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
6813 	}
6814 
6815 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
6816 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
6817 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
6818 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
6819 
6820 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
6821 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
6822 
6823 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
6824 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
6825 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
6826 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
6827 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
6828 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
6829 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
6830 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
6831 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
6832 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
6833 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
6834 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
6835 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
6836 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
6837 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
6838 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
6839 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
6840 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
6841 	if (sc->sc_type >= WM_T_82540) {
6842 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
6843 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
6844 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
6845 	}
6846 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
6847 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
6848 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
6849 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
6850 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
6851 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
6852 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
6853 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
6854 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
6855 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
6856 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
6857 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
6858 	WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
6859 	if (sc->sc_type < WM_T_82575) {
6860 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
6861 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
6862 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
6863 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
6864 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
6865 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
6866 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6867 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
6868 	} else if (!WM_IS_ICHPCH(sc)) {
6869 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
6870 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
6871 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
6872 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
6873 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
6874 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
6875 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6876 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
6877 
6878 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
6879 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
6880 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
6881 	}
6882 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6883 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
6884 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
6885 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
6886 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
6887 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
6888 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
6889 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
6890 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
6891 	}
6892 #endif
6893 }
6894 
6895 /*
6896  * wm_init:		[ifnet interface function]
6897  *
6898  *	Initialize the interface.
6899  */
6900 static int
wm_init(struct ifnet * ifp)6901 wm_init(struct ifnet *ifp)
6902 {
6903 	struct wm_softc *sc = ifp->if_softc;
6904 	int ret;
6905 
6906 	KASSERT(IFNET_LOCKED(ifp));
6907 
6908 	if (sc->sc_dying)
6909 		return ENXIO;
6910 
6911 	mutex_enter(sc->sc_core_lock);
6912 	ret = wm_init_locked(ifp);
6913 	mutex_exit(sc->sc_core_lock);
6914 
6915 	return ret;
6916 }
6917 
6918 static int
wm_init_locked(struct ifnet * ifp)6919 wm_init_locked(struct ifnet *ifp)
6920 {
6921 	struct wm_softc *sc = ifp->if_softc;
6922 	struct ethercom *ec = &sc->sc_ethercom;
6923 	int i, j, trynum, error = 0;
6924 	uint32_t reg, sfp_mask = 0;
6925 
6926 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6927 		device_xname(sc->sc_dev), __func__));
6928 	KASSERT(IFNET_LOCKED(ifp));
6929 	KASSERT(mutex_owned(sc->sc_core_lock));
6930 
6931 	/*
6932 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6933 	 * There is a small but measurable benefit to avoiding the adjusment
6934 	 * of the descriptor so that the headers are aligned, for normal mtu,
6935 	 * on such platforms.  One possibility is that the DMA itself is
6936 	 * slightly more efficient if the front of the entire packet (instead
6937 	 * of the front of the headers) is aligned.
6938 	 *
6939 	 * Note we must always set align_tweak to 0 if we are using
6940 	 * jumbo frames.
6941 	 */
6942 #ifdef __NO_STRICT_ALIGNMENT
6943 	sc->sc_align_tweak = 0;
6944 #else
6945 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6946 		sc->sc_align_tweak = 0;
6947 	else
6948 		sc->sc_align_tweak = 2;
6949 #endif /* __NO_STRICT_ALIGNMENT */
6950 
6951 	/* Cancel any pending I/O. */
6952 	wm_stop_locked(ifp, false, false);
6953 
6954 	/* Update statistics before reset */
6955 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6956 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6957 
6958 	/* >= PCH_SPT hardware workaround before reset. */
6959 	if (sc->sc_type >= WM_T_PCH_SPT)
6960 		wm_flush_desc_rings(sc);
6961 
6962 	/* Reset the chip to a known state. */
6963 	wm_reset(sc);
6964 
6965 	/*
6966 	 * AMT based hardware can now take control from firmware
6967 	 * Do this after reset.
6968 	 */
6969 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6970 		wm_get_hw_control(sc);
6971 
6972 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
6973 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6974 		wm_legacy_irq_quirk_spt(sc);
6975 
6976 	/* Init hardware bits */
6977 	wm_initialize_hardware_bits(sc);
6978 
6979 	/* Reset the PHY. */
6980 	if (sc->sc_flags & WM_F_HAS_MII)
6981 		wm_gmii_reset(sc);
6982 
6983 	if (sc->sc_type >= WM_T_ICH8) {
6984 		reg = CSR_READ(sc, WMREG_GCR);
6985 		/*
6986 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6987 		 * default after reset.
6988 		 */
6989 		if (sc->sc_type == WM_T_ICH8)
6990 			reg |= GCR_NO_SNOOP_ALL;
6991 		else
6992 			reg &= ~GCR_NO_SNOOP_ALL;
6993 		CSR_WRITE(sc, WMREG_GCR, reg);
6994 	}
6995 
6996 	if ((sc->sc_type >= WM_T_ICH8)
6997 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6998 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6999 
7000 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7001 		reg |= CTRL_EXT_RO_DIS;
7002 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7003 	}
7004 
7005 	/* Calculate (E)ITR value */
7006 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
7007 		/*
7008 		 * For NEWQUEUE's EITR (except for 82575).
7009 		 * 82575's EITR should be set same throttling value as other
7010 		 * old controllers' ITR because the interrupt/sec calculation
7011 		 * is the same, that is, 1,000,000,000 / (N * 256).
7012 		 *
7013 		 * 82574's EITR should be set same throttling value as ITR.
7014 		 *
7015 		 * For N interrupts/sec, set this value to:
7016 		 * 1,000,000 / N in contrast to ITR throttling value.
7017 		 */
7018 		sc->sc_itr_init = 450;
7019 	} else if (sc->sc_type >= WM_T_82543) {
7020 		/*
7021 		 * Set up the interrupt throttling register (units of 256ns)
7022 		 * Note that a footnote in Intel's documentation says this
7023 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
7024 		 * or 10Mbit mode.  Empirically, it appears to be the case
7025 		 * that that is also true for the 1024ns units of the other
7026 		 * interrupt-related timer registers -- so, really, we ought
7027 		 * to divide this value by 4 when the link speed is low.
7028 		 *
7029 		 * XXX implement this division at link speed change!
7030 		 */
7031 
7032 		/*
7033 		 * For N interrupts/sec, set this value to:
7034 		 * 1,000,000,000 / (N * 256).  Note that we set the
7035 		 * absolute and packet timer values to this value
7036 		 * divided by 4 to get "simple timer" behavior.
7037 		 */
7038 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
7039 	}
7040 
7041 	error = wm_init_txrx_queues(sc);
7042 	if (error)
7043 		goto out;
7044 
7045 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
7046 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
7047 	    (sc->sc_type >= WM_T_82575))
7048 		wm_serdes_power_up_link_82575(sc);
7049 
7050 	/* Clear out the VLAN table -- we don't use it (yet). */
7051 	CSR_WRITE(sc, WMREG_VET, 0);
7052 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
7053 		trynum = 10; /* Due to hw errata */
7054 	else
7055 		trynum = 1;
7056 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
7057 		for (j = 0; j < trynum; j++)
7058 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
7059 
7060 	/*
7061 	 * Set up flow-control parameters.
7062 	 *
7063 	 * XXX Values could probably stand some tuning.
7064 	 */
7065 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
7066 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
7067 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
7068 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
7069 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
7070 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
7071 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
7072 	}
7073 
7074 	sc->sc_fcrtl = FCRTL_DFLT;
7075 	if (sc->sc_type < WM_T_82543) {
7076 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
7077 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
7078 	} else {
7079 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
7080 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
7081 	}
7082 
7083 	if (sc->sc_type == WM_T_80003)
7084 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
7085 	else
7086 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
7087 
7088 	/* Writes the control register. */
7089 	wm_set_vlan(sc);
7090 
7091 	if (sc->sc_flags & WM_F_HAS_MII) {
7092 		uint16_t kmreg;
7093 
7094 		switch (sc->sc_type) {
7095 		case WM_T_80003:
7096 		case WM_T_ICH8:
7097 		case WM_T_ICH9:
7098 		case WM_T_ICH10:
7099 		case WM_T_PCH:
7100 		case WM_T_PCH2:
7101 		case WM_T_PCH_LPT:
7102 		case WM_T_PCH_SPT:
7103 		case WM_T_PCH_CNP:
7104 			/*
7105 			 * Set the mac to wait the maximum time between each
7106 			 * iteration and increase the max iterations when
7107 			 * polling the phy; this fixes erroneous timeouts at
7108 			 * 10Mbps.
7109 			 */
7110 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
7111 			    0xFFFF);
7112 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7113 			    &kmreg);
7114 			kmreg |= 0x3F;
7115 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7116 			    kmreg);
7117 			break;
7118 		default:
7119 			break;
7120 		}
7121 
7122 		if (sc->sc_type == WM_T_80003) {
7123 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7124 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
7125 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7126 
7127 			/* Bypass RX and TX FIFOs */
7128 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
7129 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
7130 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
7131 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
7132 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
7133 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
7134 		}
7135 	}
7136 #if 0
7137 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
7138 #endif
7139 
7140 	/* Set up checksum offload parameters. */
7141 	reg = CSR_READ(sc, WMREG_RXCSUM);
7142 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
7143 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
7144 		reg |= RXCSUM_IPOFL;
7145 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
7146 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
7147 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
7148 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
7149 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
7150 
7151 	/* Set registers about MSI-X */
7152 	if (wm_is_using_msix(sc)) {
7153 		uint32_t ivar, qintr_idx;
7154 		struct wm_queue *wmq;
7155 		unsigned int qid;
7156 
7157 		if (sc->sc_type == WM_T_82575) {
7158 			/* Interrupt control */
7159 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7160 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
7161 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7162 
7163 			/* TX and RX */
7164 			for (i = 0; i < sc->sc_nqueues; i++) {
7165 				wmq = &sc->sc_queue[i];
7166 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
7167 				    EITR_TX_QUEUE(wmq->wmq_id)
7168 				    | EITR_RX_QUEUE(wmq->wmq_id));
7169 			}
7170 			/* Link status */
7171 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
7172 			    EITR_OTHER);
7173 		} else if (sc->sc_type == WM_T_82574) {
7174 			/* Interrupt control */
7175 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7176 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
7177 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7178 
7179 			/*
7180 			 * Work around issue with spurious interrupts
7181 			 * in MSI-X mode.
7182 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
7183 			 * initialized yet. So re-initialize WMREG_RFCTL here.
7184 			 */
7185 			reg = CSR_READ(sc, WMREG_RFCTL);
7186 			reg |= WMREG_RFCTL_ACKDIS;
7187 			CSR_WRITE(sc, WMREG_RFCTL, reg);
7188 
7189 			ivar = 0;
7190 			/* TX and RX */
7191 			for (i = 0; i < sc->sc_nqueues; i++) {
7192 				wmq = &sc->sc_queue[i];
7193 				qid = wmq->wmq_id;
7194 				qintr_idx = wmq->wmq_intr_idx;
7195 
7196 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7197 				    IVAR_TX_MASK_Q_82574(qid));
7198 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7199 				    IVAR_RX_MASK_Q_82574(qid));
7200 			}
7201 			/* Link status */
7202 			ivar |= __SHIFTIN((IVAR_VALID_82574
7203 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
7204 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
7205 		} else {
7206 			/* Interrupt control */
7207 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
7208 			    | GPIE_EIAME | GPIE_PBA);
7209 
7210 			switch (sc->sc_type) {
7211 			case WM_T_82580:
7212 			case WM_T_I350:
7213 			case WM_T_I354:
7214 			case WM_T_I210:
7215 			case WM_T_I211:
7216 				/* TX and RX */
7217 				for (i = 0; i < sc->sc_nqueues; i++) {
7218 					wmq = &sc->sc_queue[i];
7219 					qid = wmq->wmq_id;
7220 					qintr_idx = wmq->wmq_intr_idx;
7221 
7222 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
7223 					ivar &= ~IVAR_TX_MASK_Q(qid);
7224 					ivar |= __SHIFTIN((qintr_idx
7225 						| IVAR_VALID),
7226 					    IVAR_TX_MASK_Q(qid));
7227 					ivar &= ~IVAR_RX_MASK_Q(qid);
7228 					ivar |= __SHIFTIN((qintr_idx
7229 						| IVAR_VALID),
7230 					    IVAR_RX_MASK_Q(qid));
7231 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
7232 				}
7233 				break;
7234 			case WM_T_82576:
7235 				/* TX and RX */
7236 				for (i = 0; i < sc->sc_nqueues; i++) {
7237 					wmq = &sc->sc_queue[i];
7238 					qid = wmq->wmq_id;
7239 					qintr_idx = wmq->wmq_intr_idx;
7240 
7241 					ivar = CSR_READ(sc,
7242 					    WMREG_IVAR_Q_82576(qid));
7243 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
7244 					ivar |= __SHIFTIN((qintr_idx
7245 						| IVAR_VALID),
7246 					    IVAR_TX_MASK_Q_82576(qid));
7247 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
7248 					ivar |= __SHIFTIN((qintr_idx
7249 						| IVAR_VALID),
7250 					    IVAR_RX_MASK_Q_82576(qid));
7251 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
7252 					    ivar);
7253 				}
7254 				break;
7255 			default:
7256 				break;
7257 			}
7258 
7259 			/* Link status */
7260 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
7261 			    IVAR_MISC_OTHER);
7262 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
7263 		}
7264 
7265 		if (wm_is_using_multiqueue(sc)) {
7266 			wm_init_rss(sc);
7267 
7268 			/*
7269 			** NOTE: Receive Full-Packet Checksum Offload
7270 			** is mutually exclusive with Multiqueue. However
7271 			** this is not the same as TCP/IP checksums which
7272 			** still work.
7273 			*/
7274 			reg = CSR_READ(sc, WMREG_RXCSUM);
7275 			reg |= RXCSUM_PCSD;
7276 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
7277 		}
7278 	}
7279 
7280 	/* Set up the interrupt registers. */
7281 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7282 
7283 	/* Enable SFP module insertion interrupt if it's required */
7284 	if ((sc->sc_flags & WM_F_SFP) != 0) {
7285 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
7286 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7287 		sfp_mask = ICR_GPI(0);
7288 	}
7289 
7290 	if (wm_is_using_msix(sc)) {
7291 		uint32_t mask;
7292 		struct wm_queue *wmq;
7293 
7294 		switch (sc->sc_type) {
7295 		case WM_T_82574:
7296 			mask = 0;
7297 			for (i = 0; i < sc->sc_nqueues; i++) {
7298 				wmq = &sc->sc_queue[i];
7299 				mask |= ICR_TXQ(wmq->wmq_id);
7300 				mask |= ICR_RXQ(wmq->wmq_id);
7301 			}
7302 			mask |= ICR_OTHER;
7303 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
7304 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
7305 			break;
7306 		default:
7307 			if (sc->sc_type == WM_T_82575) {
7308 				mask = 0;
7309 				for (i = 0; i < sc->sc_nqueues; i++) {
7310 					wmq = &sc->sc_queue[i];
7311 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
7312 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
7313 				}
7314 				mask |= EITR_OTHER;
7315 			} else {
7316 				mask = 0;
7317 				for (i = 0; i < sc->sc_nqueues; i++) {
7318 					wmq = &sc->sc_queue[i];
7319 					mask |= 1 << wmq->wmq_intr_idx;
7320 				}
7321 				mask |= 1 << sc->sc_link_intr_idx;
7322 			}
7323 			CSR_WRITE(sc, WMREG_EIAC, mask);
7324 			CSR_WRITE(sc, WMREG_EIAM, mask);
7325 			CSR_WRITE(sc, WMREG_EIMS, mask);
7326 
7327 			/* For other interrupts */
7328 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
7329 			break;
7330 		}
7331 	} else {
7332 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
7333 		    ICR_RXO | ICR_RXT0 | sfp_mask;
7334 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
7335 	}
7336 
7337 	/* Set up the inter-packet gap. */
7338 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7339 
7340 	if (sc->sc_type >= WM_T_82543) {
7341 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7342 			struct wm_queue *wmq = &sc->sc_queue[qidx];
7343 			wm_itrs_writereg(sc, wmq);
7344 		}
7345 		/*
7346 		 * Link interrupts occur much less than TX
7347 		 * interrupts and RX interrupts. So, we don't
7348 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
7349 		 * FreeBSD's if_igb.
7350 		 */
7351 	}
7352 
7353 	/* Set the VLAN EtherType. */
7354 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
7355 
7356 	/*
7357 	 * Set up the transmit control register; we start out with
7358 	 * a collision distance suitable for FDX, but update it when
7359 	 * we resolve the media type.
7360 	 */
7361 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
7362 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
7363 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7364 	if (sc->sc_type >= WM_T_82571)
7365 		sc->sc_tctl |= TCTL_MULR;
7366 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7367 
7368 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7369 		/* Write TDT after TCTL.EN is set. See the document. */
7370 		CSR_WRITE(sc, WMREG_TDT(0), 0);
7371 	}
7372 
7373 	if (sc->sc_type == WM_T_80003) {
7374 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
7375 		reg &= ~TCTL_EXT_GCEX_MASK;
7376 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
7377 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
7378 	}
7379 
7380 	/* Set the media. */
7381 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7382 		goto out;
7383 
7384 	/* Configure for OS presence */
7385 	wm_init_manageability(sc);
7386 
7387 	/*
7388 	 * Set up the receive control register; we actually program the
7389 	 * register when we set the receive filter. Use multicast address
7390 	 * offset type 0.
7391 	 *
7392 	 * Only the i82544 has the ability to strip the incoming CRC, so we
7393 	 * don't enable that feature.
7394 	 */
7395 	sc->sc_mchash_type = 0;
7396 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7397 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7398 
7399 	/* 82574 use one buffer extended Rx descriptor. */
7400 	if (sc->sc_type == WM_T_82574)
7401 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7402 
7403 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7404 		sc->sc_rctl |= RCTL_SECRC;
7405 
7406 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7407 	    && (ifp->if_mtu > ETHERMTU)) {
7408 		sc->sc_rctl |= RCTL_LPE;
7409 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7410 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7411 	}
7412 
7413 	if (MCLBYTES == 2048)
7414 		sc->sc_rctl |= RCTL_2k;
7415 	else {
7416 		if (sc->sc_type >= WM_T_82543) {
7417 			switch (MCLBYTES) {
7418 			case 4096:
7419 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7420 				break;
7421 			case 8192:
7422 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7423 				break;
7424 			case 16384:
7425 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7426 				break;
7427 			default:
7428 				panic("wm_init: MCLBYTES %d unsupported",
7429 				    MCLBYTES);
7430 				break;
7431 			}
7432 		} else
7433 			panic("wm_init: i82542 requires MCLBYTES = 2048");
7434 	}
7435 
7436 	/* Enable ECC */
7437 	switch (sc->sc_type) {
7438 	case WM_T_82571:
7439 		reg = CSR_READ(sc, WMREG_PBA_ECC);
7440 		reg |= PBA_ECC_CORR_EN;
7441 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7442 		break;
7443 	case WM_T_PCH_LPT:
7444 	case WM_T_PCH_SPT:
7445 	case WM_T_PCH_CNP:
7446 		reg = CSR_READ(sc, WMREG_PBECCSTS);
7447 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7448 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7449 
7450 		sc->sc_ctrl |= CTRL_MEHE;
7451 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7452 		break;
7453 	default:
7454 		break;
7455 	}
7456 
7457 	/*
7458 	 * Set the receive filter.
7459 	 *
7460 	 * For 82575 and 82576, the RX descriptors must be initialized after
7461 	 * the setting of RCTL.EN in wm_set_filter()
7462 	 */
7463 	wm_set_filter(sc);
7464 
7465 	/* On 575 and later set RDT only if RX enabled */
7466 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7467 		int qidx;
7468 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7469 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7470 			for (i = 0; i < WM_NRXDESC; i++) {
7471 				mutex_enter(rxq->rxq_lock);
7472 				wm_init_rxdesc(rxq, i);
7473 				mutex_exit(rxq->rxq_lock);
7474 
7475 			}
7476 		}
7477 	}
7478 
7479 	wm_unset_stopping_flags(sc);
7480 
7481 	/* Start the one second link check clock. */
7482 	callout_schedule(&sc->sc_tick_ch, hz);
7483 
7484 	/*
7485 	 * ...all done! (IFNET_LOCKED asserted above.)
7486 	 */
7487 	ifp->if_flags |= IFF_RUNNING;
7488 
7489 out:
7490 	/* Save last flags for the callback */
7491 	sc->sc_if_flags = ifp->if_flags;
7492 	sc->sc_ec_capenable = ec->ec_capenable;
7493 	if (error)
7494 		log(LOG_ERR, "%s: interface not running\n",
7495 		    device_xname(sc->sc_dev));
7496 	return error;
7497 }
7498 
7499 /*
7500  * wm_stop:		[ifnet interface function]
7501  *
7502  *	Stop transmission on the interface.
7503  */
7504 static void
wm_stop(struct ifnet * ifp,int disable)7505 wm_stop(struct ifnet *ifp, int disable)
7506 {
7507 	struct wm_softc *sc = ifp->if_softc;
7508 
7509 	ASSERT_SLEEPABLE();
7510 	KASSERT(IFNET_LOCKED(ifp));
7511 
7512 	mutex_enter(sc->sc_core_lock);
7513 	wm_stop_locked(ifp, disable ? true : false, true);
7514 	mutex_exit(sc->sc_core_lock);
7515 
7516 	/*
7517 	 * After wm_set_stopping_flags(), it is guaranteed that
7518 	 * wm_handle_queue_work() does not call workqueue_enqueue().
7519 	 * However, workqueue_wait() cannot call in wm_stop_locked()
7520 	 * because it can sleep...
7521 	 * so, call workqueue_wait() here.
7522 	 */
7523 	for (int i = 0; i < sc->sc_nqueues; i++)
7524 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7525 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7526 }
7527 
7528 static void
wm_stop_locked(struct ifnet * ifp,bool disable,bool wait)7529 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7530 {
7531 	struct wm_softc *sc = ifp->if_softc;
7532 	struct wm_txsoft *txs;
7533 	int i, qidx;
7534 
7535 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7536 		device_xname(sc->sc_dev), __func__));
7537 	KASSERT(IFNET_LOCKED(ifp));
7538 	KASSERT(mutex_owned(sc->sc_core_lock));
7539 
7540 	wm_set_stopping_flags(sc);
7541 
7542 	if (sc->sc_flags & WM_F_HAS_MII) {
7543 		/* Down the MII. */
7544 		mii_down(&sc->sc_mii);
7545 	} else {
7546 #if 0
7547 		/* Should we clear PHY's status properly? */
7548 		wm_reset(sc);
7549 #endif
7550 	}
7551 
7552 	/* Stop the transmit and receive processes. */
7553 	CSR_WRITE(sc, WMREG_TCTL, 0);
7554 	CSR_WRITE(sc, WMREG_RCTL, 0);
7555 	sc->sc_rctl &= ~RCTL_EN;
7556 
7557 	/*
7558 	 * Clear the interrupt mask to ensure the device cannot assert its
7559 	 * interrupt line.
7560 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7561 	 * service any currently pending or shared interrupt.
7562 	 */
7563 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7564 	sc->sc_icr = 0;
7565 	if (wm_is_using_msix(sc)) {
7566 		if (sc->sc_type != WM_T_82574) {
7567 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7568 			CSR_WRITE(sc, WMREG_EIAC, 0);
7569 		} else
7570 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7571 	}
7572 
7573 	/*
7574 	 * Stop callouts after interrupts are disabled; if we have
7575 	 * to wait for them, we will be releasing the CORE_LOCK
7576 	 * briefly, which will unblock interrupts on the current CPU.
7577 	 */
7578 
7579 	/* Stop the one second clock. */
7580 	if (wait)
7581 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7582 	else
7583 		callout_stop(&sc->sc_tick_ch);
7584 
7585 	/* Stop the 82547 Tx FIFO stall check timer. */
7586 	if (sc->sc_type == WM_T_82547) {
7587 		if (wait)
7588 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7589 		else
7590 			callout_stop(&sc->sc_txfifo_ch);
7591 	}
7592 
7593 	/* Release any queued transmit buffers. */
7594 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7595 		struct wm_queue *wmq = &sc->sc_queue[qidx];
7596 		struct wm_txqueue *txq = &wmq->wmq_txq;
7597 		struct mbuf *m;
7598 
7599 		mutex_enter(txq->txq_lock);
7600 		txq->txq_sending = false; /* Ensure watchdog disabled */
7601 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7602 			txs = &txq->txq_soft[i];
7603 			if (txs->txs_mbuf != NULL) {
7604 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7605 				m_freem(txs->txs_mbuf);
7606 				txs->txs_mbuf = NULL;
7607 			}
7608 		}
7609 		/* Drain txq_interq */
7610 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7611 			m_freem(m);
7612 		mutex_exit(txq->txq_lock);
7613 	}
7614 
7615 	/* Mark the interface as down and cancel the watchdog timer. */
7616 	ifp->if_flags &= ~IFF_RUNNING;
7617 	sc->sc_if_flags = ifp->if_flags;
7618 
7619 	if (disable) {
7620 		for (i = 0; i < sc->sc_nqueues; i++) {
7621 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7622 			mutex_enter(rxq->rxq_lock);
7623 			wm_rxdrain(rxq);
7624 			mutex_exit(rxq->rxq_lock);
7625 		}
7626 	}
7627 
7628 #if 0 /* notyet */
7629 	if (sc->sc_type >= WM_T_82544)
7630 		CSR_WRITE(sc, WMREG_WUC, 0);
7631 #endif
7632 }
7633 
7634 static void
wm_dump_mbuf_chain(struct wm_softc * sc,struct mbuf * m0)7635 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7636 {
7637 	struct mbuf *m;
7638 	int i;
7639 
7640 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7641 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7642 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7643 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7644 		    m->m_data, m->m_len, m->m_flags);
7645 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7646 	    i, i == 1 ? "" : "s");
7647 }
7648 
7649 /*
7650  * wm_82547_txfifo_stall:
7651  *
7652  *	Callout used to wait for the 82547 Tx FIFO to drain,
7653  *	reset the FIFO pointers, and restart packet transmission.
7654  */
7655 static void
wm_82547_txfifo_stall(void * arg)7656 wm_82547_txfifo_stall(void *arg)
7657 {
7658 	struct wm_softc *sc = arg;
7659 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7660 
7661 	mutex_enter(txq->txq_lock);
7662 
7663 	if (txq->txq_stopping)
7664 		goto out;
7665 
7666 	if (txq->txq_fifo_stall) {
7667 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7668 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7669 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7670 			/*
7671 			 * Packets have drained.  Stop transmitter, reset
7672 			 * FIFO pointers, restart transmitter, and kick
7673 			 * the packet queue.
7674 			 */
7675 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7676 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7677 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7678 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7679 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7680 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7681 			CSR_WRITE(sc, WMREG_TCTL, tctl);
7682 			CSR_WRITE_FLUSH(sc);
7683 
7684 			txq->txq_fifo_head = 0;
7685 			txq->txq_fifo_stall = 0;
7686 			wm_start_locked(&sc->sc_ethercom.ec_if);
7687 		} else {
7688 			/*
7689 			 * Still waiting for packets to drain; try again in
7690 			 * another tick.
7691 			 */
7692 			callout_schedule(&sc->sc_txfifo_ch, 1);
7693 		}
7694 	}
7695 
7696 out:
7697 	mutex_exit(txq->txq_lock);
7698 }
7699 
7700 /*
7701  * wm_82547_txfifo_bugchk:
7702  *
7703  *	Check for bug condition in the 82547 Tx FIFO.  We need to
7704  *	prevent enqueueing a packet that would wrap around the end
7705  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
7706  *
7707  *	We do this by checking the amount of space before the end
7708  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
7709  *	the Tx FIFO, wait for all remaining packets to drain, reset
7710  *	the internal FIFO pointers to the beginning, and restart
7711  *	transmission on the interface.
7712  */
7713 #define	WM_FIFO_HDR		0x10
7714 #define	WM_82547_PAD_LEN	0x3e0
7715 static int
wm_82547_txfifo_bugchk(struct wm_softc * sc,struct mbuf * m0)7716 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7717 {
7718 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7719 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
7720 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7721 
7722 	/* Just return if already stalled. */
7723 	if (txq->txq_fifo_stall)
7724 		return 1;
7725 
7726 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7727 		/* Stall only occurs in half-duplex mode. */
7728 		goto send_packet;
7729 	}
7730 
7731 	if (len >= WM_82547_PAD_LEN + space) {
7732 		txq->txq_fifo_stall = 1;
7733 		callout_schedule(&sc->sc_txfifo_ch, 1);
7734 		return 1;
7735 	}
7736 
7737 send_packet:
7738 	txq->txq_fifo_head += len;
7739 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
7740 		txq->txq_fifo_head -= txq->txq_fifo_size;
7741 
7742 	return 0;
7743 }
7744 
7745 static int
wm_alloc_tx_descs(struct wm_softc * sc,struct wm_txqueue * txq)7746 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7747 {
7748 	int error;
7749 
7750 	/*
7751 	 * Allocate the control data structures, and create and load the
7752 	 * DMA map for it.
7753 	 *
7754 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7755 	 * memory.  So must Rx descriptors.  We simplify by allocating
7756 	 * both sets within the same 4G segment.
7757 	 */
7758 	if (sc->sc_type < WM_T_82544)
7759 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
7760 	else
7761 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
7762 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7763 		txq->txq_descsize = sizeof(nq_txdesc_t);
7764 	else
7765 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
7766 
7767 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7768 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7769 		    1, &txq->txq_desc_rseg, 0)) != 0) {
7770 		aprint_error_dev(sc->sc_dev,
7771 		    "unable to allocate TX control data, error = %d\n",
7772 		    error);
7773 		goto fail_0;
7774 	}
7775 
7776 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7777 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7778 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7779 		aprint_error_dev(sc->sc_dev,
7780 		    "unable to map TX control data, error = %d\n", error);
7781 		goto fail_1;
7782 	}
7783 
7784 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7785 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7786 		aprint_error_dev(sc->sc_dev,
7787 		    "unable to create TX control data DMA map, error = %d\n",
7788 		    error);
7789 		goto fail_2;
7790 	}
7791 
7792 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7793 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7794 		aprint_error_dev(sc->sc_dev,
7795 		    "unable to load TX control data DMA map, error = %d\n",
7796 		    error);
7797 		goto fail_3;
7798 	}
7799 
7800 	return 0;
7801 
7802 fail_3:
7803 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7804 fail_2:
7805 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7806 	    WM_TXDESCS_SIZE(txq));
7807 fail_1:
7808 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7809 fail_0:
7810 	return error;
7811 }
7812 
7813 static void
wm_free_tx_descs(struct wm_softc * sc,struct wm_txqueue * txq)7814 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7815 {
7816 
7817 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7818 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7819 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7820 	    WM_TXDESCS_SIZE(txq));
7821 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7822 }
7823 
7824 static int
wm_alloc_rx_descs(struct wm_softc * sc,struct wm_rxqueue * rxq)7825 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7826 {
7827 	int error;
7828 	size_t rxq_descs_size;
7829 
7830 	/*
7831 	 * Allocate the control data structures, and create and load the
7832 	 * DMA map for it.
7833 	 *
7834 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7835 	 * memory.  So must Rx descriptors.  We simplify by allocating
7836 	 * both sets within the same 4G segment.
7837 	 */
7838 	rxq->rxq_ndesc = WM_NRXDESC;
7839 	if (sc->sc_type == WM_T_82574)
7840 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7841 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7842 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7843 	else
7844 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7845 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7846 
7847 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7848 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7849 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
7850 		aprint_error_dev(sc->sc_dev,
7851 		    "unable to allocate RX control data, error = %d\n",
7852 		    error);
7853 		goto fail_0;
7854 	}
7855 
7856 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7857 		    rxq->rxq_desc_rseg, rxq_descs_size,
7858 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7859 		aprint_error_dev(sc->sc_dev,
7860 		    "unable to map RX control data, error = %d\n", error);
7861 		goto fail_1;
7862 	}
7863 
7864 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7865 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7866 		aprint_error_dev(sc->sc_dev,
7867 		    "unable to create RX control data DMA map, error = %d\n",
7868 		    error);
7869 		goto fail_2;
7870 	}
7871 
7872 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7873 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7874 		aprint_error_dev(sc->sc_dev,
7875 		    "unable to load RX control data DMA map, error = %d\n",
7876 		    error);
7877 		goto fail_3;
7878 	}
7879 
7880 	return 0;
7881 
7882  fail_3:
7883 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7884  fail_2:
7885 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7886 	    rxq_descs_size);
7887  fail_1:
7888 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7889  fail_0:
7890 	return error;
7891 }
7892 
7893 static void
wm_free_rx_descs(struct wm_softc * sc,struct wm_rxqueue * rxq)7894 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7895 {
7896 
7897 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7898 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7899 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7900 	    rxq->rxq_descsize * rxq->rxq_ndesc);
7901 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7902 }
7903 
7904 
7905 static int
wm_alloc_tx_buffer(struct wm_softc * sc,struct wm_txqueue * txq)7906 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7907 {
7908 	int i, error;
7909 
7910 	/* Create the transmit buffer DMA maps. */
7911 	WM_TXQUEUELEN(txq) =
7912 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7913 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7914 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7915 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7916 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7917 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
7918 			aprint_error_dev(sc->sc_dev,
7919 			    "unable to create Tx DMA map %d, error = %d\n",
7920 			    i, error);
7921 			goto fail;
7922 		}
7923 	}
7924 
7925 	return 0;
7926 
7927 fail:
7928 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7929 		if (txq->txq_soft[i].txs_dmamap != NULL)
7930 			bus_dmamap_destroy(sc->sc_dmat,
7931 			    txq->txq_soft[i].txs_dmamap);
7932 	}
7933 	return error;
7934 }
7935 
7936 static void
wm_free_tx_buffer(struct wm_softc * sc,struct wm_txqueue * txq)7937 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7938 {
7939 	int i;
7940 
7941 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7942 		if (txq->txq_soft[i].txs_dmamap != NULL)
7943 			bus_dmamap_destroy(sc->sc_dmat,
7944 			    txq->txq_soft[i].txs_dmamap);
7945 	}
7946 }
7947 
7948 static int
wm_alloc_rx_buffer(struct wm_softc * sc,struct wm_rxqueue * rxq)7949 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7950 {
7951 	int i, error;
7952 
7953 	/* Create the receive buffer DMA maps. */
7954 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7955 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7956 			    MCLBYTES, 0, 0,
7957 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7958 			aprint_error_dev(sc->sc_dev,
7959 			    "unable to create Rx DMA map %d error = %d\n",
7960 			    i, error);
7961 			goto fail;
7962 		}
7963 		rxq->rxq_soft[i].rxs_mbuf = NULL;
7964 	}
7965 
7966 	return 0;
7967 
7968  fail:
7969 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7970 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7971 			bus_dmamap_destroy(sc->sc_dmat,
7972 			    rxq->rxq_soft[i].rxs_dmamap);
7973 	}
7974 	return error;
7975 }
7976 
7977 static void
wm_free_rx_buffer(struct wm_softc * sc,struct wm_rxqueue * rxq)7978 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7979 {
7980 	int i;
7981 
7982 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7983 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7984 			bus_dmamap_destroy(sc->sc_dmat,
7985 			    rxq->rxq_soft[i].rxs_dmamap);
7986 	}
7987 }
7988 
7989 /*
7990  * wm_alloc_quques:
7991  *	Allocate {tx,rx}descs and {tx,rx} buffers
7992  */
7993 static int
wm_alloc_txrx_queues(struct wm_softc * sc)7994 wm_alloc_txrx_queues(struct wm_softc *sc)
7995 {
7996 	int i, error, tx_done, rx_done;
7997 
7998 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7999 	    KM_SLEEP);
8000 	if (sc->sc_queue == NULL) {
8001 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
8002 		error = ENOMEM;
8003 		goto fail_0;
8004 	}
8005 
8006 	/* For transmission */
8007 	error = 0;
8008 	tx_done = 0;
8009 	for (i = 0; i < sc->sc_nqueues; i++) {
8010 #ifdef WM_EVENT_COUNTERS
8011 		int j;
8012 		const char *xname;
8013 #endif
8014 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8015 		txq->txq_sc = sc;
8016 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8017 
8018 		error = wm_alloc_tx_descs(sc, txq);
8019 		if (error)
8020 			break;
8021 		error = wm_alloc_tx_buffer(sc, txq);
8022 		if (error) {
8023 			wm_free_tx_descs(sc, txq);
8024 			break;
8025 		}
8026 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
8027 		if (txq->txq_interq == NULL) {
8028 			wm_free_tx_descs(sc, txq);
8029 			wm_free_tx_buffer(sc, txq);
8030 			error = ENOMEM;
8031 			break;
8032 		}
8033 
8034 #ifdef WM_EVENT_COUNTERS
8035 		xname = device_xname(sc->sc_dev);
8036 
8037 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
8038 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
8039 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
8040 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
8041 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
8042 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
8043 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
8044 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
8045 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
8046 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
8047 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
8048 
8049 		for (j = 0; j < WM_NTXSEGS; j++) {
8050 			snprintf(txq->txq_txseg_evcnt_names[j],
8051 			    sizeof(txq->txq_txseg_evcnt_names[j]),
8052 			    "txq%02dtxseg%d", i, j);
8053 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
8054 			    EVCNT_TYPE_MISC,
8055 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
8056 		}
8057 
8058 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
8059 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
8060 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
8061 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
8062 		/* Only for 82544 (and earlier?) */
8063 		if (sc->sc_type <= WM_T_82544)
8064 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
8065 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
8066 #endif /* WM_EVENT_COUNTERS */
8067 
8068 		tx_done++;
8069 	}
8070 	if (error)
8071 		goto fail_1;
8072 
8073 	/* For receive */
8074 	error = 0;
8075 	rx_done = 0;
8076 	for (i = 0; i < sc->sc_nqueues; i++) {
8077 #ifdef WM_EVENT_COUNTERS
8078 		const char *xname;
8079 #endif
8080 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8081 		rxq->rxq_sc = sc;
8082 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8083 
8084 		error = wm_alloc_rx_descs(sc, rxq);
8085 		if (error)
8086 			break;
8087 
8088 		error = wm_alloc_rx_buffer(sc, rxq);
8089 		if (error) {
8090 			wm_free_rx_descs(sc, rxq);
8091 			break;
8092 		}
8093 
8094 #ifdef WM_EVENT_COUNTERS
8095 		xname = device_xname(sc->sc_dev);
8096 
8097 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
8098 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
8099 
8100 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
8101 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
8102 #endif /* WM_EVENT_COUNTERS */
8103 
8104 		rx_done++;
8105 	}
8106 	if (error)
8107 		goto fail_2;
8108 
8109 	return 0;
8110 
8111 fail_2:
8112 	for (i = 0; i < rx_done; i++) {
8113 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8114 		wm_free_rx_buffer(sc, rxq);
8115 		wm_free_rx_descs(sc, rxq);
8116 		if (rxq->rxq_lock)
8117 			mutex_obj_free(rxq->rxq_lock);
8118 	}
8119 fail_1:
8120 	for (i = 0; i < tx_done; i++) {
8121 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8122 		pcq_destroy(txq->txq_interq);
8123 		wm_free_tx_buffer(sc, txq);
8124 		wm_free_tx_descs(sc, txq);
8125 		if (txq->txq_lock)
8126 			mutex_obj_free(txq->txq_lock);
8127 	}
8128 
8129 	kmem_free(sc->sc_queue,
8130 	    sizeof(struct wm_queue) * sc->sc_nqueues);
8131 fail_0:
8132 	return error;
8133 }
8134 
8135 /*
8136  * wm_free_quques:
8137  *	Free {tx,rx}descs and {tx,rx} buffers
8138  */
8139 static void
wm_free_txrx_queues(struct wm_softc * sc)8140 wm_free_txrx_queues(struct wm_softc *sc)
8141 {
8142 	int i;
8143 
8144 	for (i = 0; i < sc->sc_nqueues; i++) {
8145 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8146 
8147 #ifdef WM_EVENT_COUNTERS
8148 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
8149 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
8150 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
8151 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
8152 #endif /* WM_EVENT_COUNTERS */
8153 
8154 		wm_free_rx_buffer(sc, rxq);
8155 		wm_free_rx_descs(sc, rxq);
8156 		if (rxq->rxq_lock)
8157 			mutex_obj_free(rxq->rxq_lock);
8158 	}
8159 
8160 	for (i = 0; i < sc->sc_nqueues; i++) {
8161 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8162 		struct mbuf *m;
8163 #ifdef WM_EVENT_COUNTERS
8164 		int j;
8165 
8166 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
8167 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
8168 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
8169 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
8170 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
8171 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
8172 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
8173 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
8174 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
8175 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
8176 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
8177 
8178 		for (j = 0; j < WM_NTXSEGS; j++)
8179 			evcnt_detach(&txq->txq_ev_txseg[j]);
8180 
8181 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
8182 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
8183 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
8184 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
8185 		if (sc->sc_type <= WM_T_82544)
8186 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
8187 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
8188 #endif /* WM_EVENT_COUNTERS */
8189 
8190 		/* Drain txq_interq */
8191 		while ((m = pcq_get(txq->txq_interq)) != NULL)
8192 			m_freem(m);
8193 		pcq_destroy(txq->txq_interq);
8194 
8195 		wm_free_tx_buffer(sc, txq);
8196 		wm_free_tx_descs(sc, txq);
8197 		if (txq->txq_lock)
8198 			mutex_obj_free(txq->txq_lock);
8199 	}
8200 
8201 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
8202 }
8203 
8204 static void
wm_init_tx_descs(struct wm_softc * sc __unused,struct wm_txqueue * txq)8205 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8206 {
8207 
8208 	KASSERT(mutex_owned(txq->txq_lock));
8209 
8210 	/* Initialize the transmit descriptor ring. */
8211 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
8212 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
8213 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8214 	txq->txq_free = WM_NTXDESC(txq);
8215 	txq->txq_next = 0;
8216 }
8217 
8218 static void
wm_init_tx_regs(struct wm_softc * sc,struct wm_queue * wmq,struct wm_txqueue * txq)8219 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8220     struct wm_txqueue *txq)
8221 {
8222 
8223 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8224 		device_xname(sc->sc_dev), __func__));
8225 	KASSERT(mutex_owned(txq->txq_lock));
8226 
8227 	if (sc->sc_type < WM_T_82543) {
8228 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
8229 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
8230 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
8231 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
8232 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
8233 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
8234 	} else {
8235 		int qid = wmq->wmq_id;
8236 
8237 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
8238 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
8239 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
8240 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
8241 
8242 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8243 			/*
8244 			 * Don't write TDT before TCTL.EN is set.
8245 			 * See the document.
8246 			 */
8247 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
8248 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
8249 			    | TXDCTL_WTHRESH(0));
8250 		else {
8251 			/* XXX should update with AIM? */
8252 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
8253 			if (sc->sc_type >= WM_T_82540) {
8254 				/* Should be the same */
8255 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
8256 			}
8257 
8258 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
8259 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
8260 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
8261 		}
8262 	}
8263 }
8264 
8265 static void
wm_init_tx_buffer(struct wm_softc * sc __unused,struct wm_txqueue * txq)8266 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8267 {
8268 	int i;
8269 
8270 	KASSERT(mutex_owned(txq->txq_lock));
8271 
8272 	/* Initialize the transmit job descriptors. */
8273 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
8274 		txq->txq_soft[i].txs_mbuf = NULL;
8275 	txq->txq_sfree = WM_TXQUEUELEN(txq);
8276 	txq->txq_snext = 0;
8277 	txq->txq_sdirty = 0;
8278 }
8279 
8280 static void
wm_init_tx_queue(struct wm_softc * sc,struct wm_queue * wmq,struct wm_txqueue * txq)8281 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8282     struct wm_txqueue *txq)
8283 {
8284 
8285 	KASSERT(mutex_owned(txq->txq_lock));
8286 
8287 	/*
8288 	 * Set up some register offsets that are different between
8289 	 * the i82542 and the i82543 and later chips.
8290 	 */
8291 	if (sc->sc_type < WM_T_82543)
8292 		txq->txq_tdt_reg = WMREG_OLD_TDT;
8293 	else
8294 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8295 
8296 	wm_init_tx_descs(sc, txq);
8297 	wm_init_tx_regs(sc, wmq, txq);
8298 	wm_init_tx_buffer(sc, txq);
8299 
8300 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
8301 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
8302 
8303 	txq->txq_sending = false;
8304 }
8305 
8306 static void
wm_init_rx_regs(struct wm_softc * sc,struct wm_queue * wmq,struct wm_rxqueue * rxq)8307 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8308     struct wm_rxqueue *rxq)
8309 {
8310 
8311 	KASSERT(mutex_owned(rxq->rxq_lock));
8312 
8313 	/*
8314 	 * Initialize the receive descriptor and receive job
8315 	 * descriptor rings.
8316 	 */
8317 	if (sc->sc_type < WM_T_82543) {
8318 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
8319 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
8320 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
8321 		    rxq->rxq_descsize * rxq->rxq_ndesc);
8322 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
8323 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
8324 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
8325 
8326 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
8327 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
8328 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
8329 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
8330 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
8331 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
8332 	} else {
8333 		int qid = wmq->wmq_id;
8334 
8335 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
8336 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
8337 		CSR_WRITE(sc, WMREG_RDLEN(qid),
8338 		    rxq->rxq_descsize * rxq->rxq_ndesc);
8339 
8340 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8341 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
8342 				panic("%s: MCLBYTES %d unsupported for 82575 "
8343 				    "or higher\n", __func__, MCLBYTES);
8344 
8345 			/*
8346 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
8347 			 * only.
8348 			 */
8349 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
8350 			    SRRCTL_DESCTYPE_ADV_ONEBUF
8351 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
8352 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
8353 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
8354 			    | RXDCTL_WTHRESH(1));
8355 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
8356 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
8357 		} else {
8358 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
8359 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
8360 			/* XXX should update with AIM? */
8361 			CSR_WRITE(sc, WMREG_RDTR,
8362 			    (wmq->wmq_itr / 4) | RDTR_FPD);
8363 			/* MUST be same */
8364 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
8365 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
8366 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
8367 		}
8368 	}
8369 }
8370 
8371 static int
wm_init_rx_buffer(struct wm_softc * sc,struct wm_rxqueue * rxq)8372 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8373 {
8374 	struct wm_rxsoft *rxs;
8375 	int error, i;
8376 
8377 	KASSERT(mutex_owned(rxq->rxq_lock));
8378 
8379 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8380 		rxs = &rxq->rxq_soft[i];
8381 		if (rxs->rxs_mbuf == NULL) {
8382 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8383 				log(LOG_ERR, "%s: unable to allocate or map "
8384 				    "rx buffer %d, error = %d\n",
8385 				    device_xname(sc->sc_dev), i, error);
8386 				/*
8387 				 * XXX Should attempt to run with fewer receive
8388 				 * XXX buffers instead of just failing.
8389 				 */
8390 				wm_rxdrain(rxq);
8391 				return ENOMEM;
8392 			}
8393 		} else {
8394 			/*
8395 			 * For 82575 and 82576, the RX descriptors must be
8396 			 * initialized after the setting of RCTL.EN in
8397 			 * wm_set_filter()
8398 			 */
8399 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8400 				wm_init_rxdesc(rxq, i);
8401 		}
8402 	}
8403 	rxq->rxq_ptr = 0;
8404 	rxq->rxq_discard = 0;
8405 	WM_RXCHAIN_RESET(rxq);
8406 
8407 	return 0;
8408 }
8409 
8410 static int
wm_init_rx_queue(struct wm_softc * sc,struct wm_queue * wmq,struct wm_rxqueue * rxq)8411 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8412     struct wm_rxqueue *rxq)
8413 {
8414 
8415 	KASSERT(mutex_owned(rxq->rxq_lock));
8416 
8417 	/*
8418 	 * Set up some register offsets that are different between
8419 	 * the i82542 and the i82543 and later chips.
8420 	 */
8421 	if (sc->sc_type < WM_T_82543)
8422 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8423 	else
8424 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8425 
8426 	wm_init_rx_regs(sc, wmq, rxq);
8427 	return wm_init_rx_buffer(sc, rxq);
8428 }
8429 
8430 /*
8431  * wm_init_quques:
8432  *	Initialize {tx,rx}descs and {tx,rx} buffers
8433  */
8434 static int
wm_init_txrx_queues(struct wm_softc * sc)8435 wm_init_txrx_queues(struct wm_softc *sc)
8436 {
8437 	int i, error = 0;
8438 
8439 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8440 		device_xname(sc->sc_dev), __func__));
8441 
8442 	for (i = 0; i < sc->sc_nqueues; i++) {
8443 		struct wm_queue *wmq = &sc->sc_queue[i];
8444 		struct wm_txqueue *txq = &wmq->wmq_txq;
8445 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8446 
8447 		/*
8448 		 * TODO
8449 		 * Currently, use constant variable instead of AIM.
8450 		 * Furthermore, the interrupt interval of multiqueue which use
8451 		 * polling mode is less than default value.
8452 		 * More tuning and AIM are required.
8453 		 */
8454 		if (wm_is_using_multiqueue(sc))
8455 			wmq->wmq_itr = 50;
8456 		else
8457 			wmq->wmq_itr = sc->sc_itr_init;
8458 		wmq->wmq_set_itr = true;
8459 
8460 		mutex_enter(txq->txq_lock);
8461 		wm_init_tx_queue(sc, wmq, txq);
8462 		mutex_exit(txq->txq_lock);
8463 
8464 		mutex_enter(rxq->rxq_lock);
8465 		error = wm_init_rx_queue(sc, wmq, rxq);
8466 		mutex_exit(rxq->rxq_lock);
8467 		if (error)
8468 			break;
8469 	}
8470 
8471 	return error;
8472 }
8473 
8474 /*
8475  * wm_tx_offload:
8476  *
8477  *	Set up TCP/IP checksumming parameters for the
8478  *	specified packet.
8479  */
8480 static void
wm_tx_offload(struct wm_softc * sc,struct wm_txqueue * txq,struct wm_txsoft * txs,uint32_t * cmdp,uint8_t * fieldsp)8481 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8482     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8483 {
8484 	struct mbuf *m0 = txs->txs_mbuf;
8485 	struct livengood_tcpip_ctxdesc *t;
8486 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
8487 	uint32_t ipcse;
8488 	struct ether_header *eh;
8489 	int offset, iphl;
8490 	uint8_t fields;
8491 
8492 	/*
8493 	 * XXX It would be nice if the mbuf pkthdr had offset
8494 	 * fields for the protocol headers.
8495 	 */
8496 
8497 	eh = mtod(m0, struct ether_header *);
8498 	switch (htons(eh->ether_type)) {
8499 	case ETHERTYPE_IP:
8500 	case ETHERTYPE_IPV6:
8501 		offset = ETHER_HDR_LEN;
8502 		break;
8503 
8504 	case ETHERTYPE_VLAN:
8505 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8506 		break;
8507 
8508 	default:
8509 		/* Don't support this protocol or encapsulation. */
8510 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8511 		txq->txq_last_hw_ipcs = 0;
8512 		txq->txq_last_hw_tucs = 0;
8513 		*fieldsp = 0;
8514 		*cmdp = 0;
8515 		return;
8516 	}
8517 
8518 	if ((m0->m_pkthdr.csum_flags &
8519 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8520 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8521 	} else
8522 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8523 
8524 	ipcse = offset + iphl - 1;
8525 
8526 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8527 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8528 	seg = 0;
8529 	fields = 0;
8530 
8531 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8532 		int hlen = offset + iphl;
8533 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8534 
8535 		if (__predict_false(m0->m_len <
8536 				    (hlen + sizeof(struct tcphdr)))) {
8537 			/*
8538 			 * TCP/IP headers are not in the first mbuf; we need
8539 			 * to do this the slow and painful way. Let's just
8540 			 * hope this doesn't happen very often.
8541 			 */
8542 			struct tcphdr th;
8543 
8544 			WM_Q_EVCNT_INCR(txq, tsopain);
8545 
8546 			m_copydata(m0, hlen, sizeof(th), &th);
8547 			if (v4) {
8548 				struct ip ip;
8549 
8550 				m_copydata(m0, offset, sizeof(ip), &ip);
8551 				ip.ip_len = 0;
8552 				m_copyback(m0,
8553 				    offset + offsetof(struct ip, ip_len),
8554 				    sizeof(ip.ip_len), &ip.ip_len);
8555 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8556 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8557 			} else {
8558 				struct ip6_hdr ip6;
8559 
8560 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8561 				ip6.ip6_plen = 0;
8562 				m_copyback(m0,
8563 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8564 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8565 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8566 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8567 			}
8568 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8569 			    sizeof(th.th_sum), &th.th_sum);
8570 
8571 			hlen += th.th_off << 2;
8572 		} else {
8573 			/*
8574 			 * TCP/IP headers are in the first mbuf; we can do
8575 			 * this the easy way.
8576 			 */
8577 			struct tcphdr *th;
8578 
8579 			if (v4) {
8580 				struct ip *ip =
8581 				    (void *)(mtod(m0, char *) + offset);
8582 				th = (void *)(mtod(m0, char *) + hlen);
8583 
8584 				ip->ip_len = 0;
8585 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8586 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8587 			} else {
8588 				struct ip6_hdr *ip6 =
8589 				    (void *)(mtod(m0, char *) + offset);
8590 				th = (void *)(mtod(m0, char *) + hlen);
8591 
8592 				ip6->ip6_plen = 0;
8593 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8594 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8595 			}
8596 			hlen += th->th_off << 2;
8597 		}
8598 
8599 		if (v4) {
8600 			WM_Q_EVCNT_INCR(txq, tso);
8601 			cmdlen |= WTX_TCPIP_CMD_IP;
8602 		} else {
8603 			WM_Q_EVCNT_INCR(txq, tso6);
8604 			ipcse = 0;
8605 		}
8606 		cmd |= WTX_TCPIP_CMD_TSE;
8607 		cmdlen |= WTX_TCPIP_CMD_TSE |
8608 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8609 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8610 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8611 	}
8612 
8613 	/*
8614 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8615 	 * offload feature, if we load the context descriptor, we
8616 	 * MUST provide valid values for IPCSS and TUCSS fields.
8617 	 */
8618 
8619 	ipcs = WTX_TCPIP_IPCSS(offset) |
8620 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8621 	    WTX_TCPIP_IPCSE(ipcse);
8622 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8623 		WM_Q_EVCNT_INCR(txq, ipsum);
8624 		fields |= WTX_IXSM;
8625 	}
8626 
8627 	offset += iphl;
8628 
8629 	if (m0->m_pkthdr.csum_flags &
8630 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8631 		WM_Q_EVCNT_INCR(txq, tusum);
8632 		fields |= WTX_TXSM;
8633 		tucs = WTX_TCPIP_TUCSS(offset) |
8634 		    WTX_TCPIP_TUCSO(offset +
8635 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8636 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8637 	} else if ((m0->m_pkthdr.csum_flags &
8638 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8639 		WM_Q_EVCNT_INCR(txq, tusum6);
8640 		fields |= WTX_TXSM;
8641 		tucs = WTX_TCPIP_TUCSS(offset) |
8642 		    WTX_TCPIP_TUCSO(offset +
8643 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8644 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8645 	} else {
8646 		/* Just initialize it to a valid TCP context. */
8647 		tucs = WTX_TCPIP_TUCSS(offset) |
8648 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8649 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8650 	}
8651 
8652 	*cmdp = cmd;
8653 	*fieldsp = fields;
8654 
8655 	/*
8656 	 * We don't have to write context descriptor for every packet
8657 	 * except for 82574. For 82574, we must write context descriptor
8658 	 * for every packet when we use two descriptor queues.
8659 	 *
8660 	 * The 82574L can only remember the *last* context used
8661 	 * regardless of queue that it was use for.  We cannot reuse
8662 	 * contexts on this hardware platform and must generate a new
8663 	 * context every time.  82574L hardware spec, section 7.2.6,
8664 	 * second note.
8665 	 */
8666 	if (sc->sc_nqueues < 2) {
8667 		/*
8668 		 * Setting up new checksum offload context for every
8669 		 * frames takes a lot of processing time for hardware.
8670 		 * This also reduces performance a lot for small sized
8671 		 * frames so avoid it if driver can use previously
8672 		 * configured checksum offload context.
8673 		 * For TSO, in theory we can use the same TSO context only if
8674 		 * frame is the same type(IP/TCP) and the same MSS. However
8675 		 * checking whether a frame has the same IP/TCP structure is a
8676 		 * hard thing so just ignore that and always restablish a
8677 		 * new TSO context.
8678 		 */
8679 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8680 		    == 0) {
8681 			if (txq->txq_last_hw_cmd == cmd &&
8682 			    txq->txq_last_hw_fields == fields &&
8683 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8684 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8685 				WM_Q_EVCNT_INCR(txq, skipcontext);
8686 				return;
8687 			}
8688 		}
8689 
8690 		txq->txq_last_hw_cmd = cmd;
8691 		txq->txq_last_hw_fields = fields;
8692 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8693 		txq->txq_last_hw_tucs = (tucs & 0xffff);
8694 	}
8695 
8696 	/* Fill in the context descriptor. */
8697 	t = (struct livengood_tcpip_ctxdesc *)
8698 	    &txq->txq_descs[txq->txq_next];
8699 	t->tcpip_ipcs = htole32(ipcs);
8700 	t->tcpip_tucs = htole32(tucs);
8701 	t->tcpip_cmdlen = htole32(cmdlen);
8702 	t->tcpip_seg = htole32(seg);
8703 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8704 
8705 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8706 	txs->txs_ndesc++;
8707 }
8708 
8709 static inline int
wm_select_txqueue(struct ifnet * ifp,struct mbuf * m)8710 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8711 {
8712 	struct wm_softc *sc = ifp->if_softc;
8713 	u_int cpuid = cpu_index(curcpu());
8714 
8715 	/*
8716 	 * Currently, simple distribute strategy.
8717 	 * TODO:
8718 	 * distribute by flowid(RSS has value).
8719 	 */
8720 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8721 }
8722 
8723 static inline bool
wm_linkdown_discard(struct wm_txqueue * txq)8724 wm_linkdown_discard(struct wm_txqueue *txq)
8725 {
8726 
8727 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8728 		return true;
8729 
8730 	return false;
8731 }
8732 
8733 /*
8734  * wm_start:		[ifnet interface function]
8735  *
8736  *	Start packet transmission on the interface.
8737  */
8738 static void
wm_start(struct ifnet * ifp)8739 wm_start(struct ifnet *ifp)
8740 {
8741 	struct wm_softc *sc = ifp->if_softc;
8742 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8743 
8744 	KASSERT(if_is_mpsafe(ifp));
8745 	/*
8746 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8747 	 */
8748 
8749 	mutex_enter(txq->txq_lock);
8750 	if (!txq->txq_stopping)
8751 		wm_start_locked(ifp);
8752 	mutex_exit(txq->txq_lock);
8753 }
8754 
8755 static void
wm_start_locked(struct ifnet * ifp)8756 wm_start_locked(struct ifnet *ifp)
8757 {
8758 	struct wm_softc *sc = ifp->if_softc;
8759 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8760 
8761 	wm_send_common_locked(ifp, txq, false);
8762 }
8763 
8764 static int
wm_transmit(struct ifnet * ifp,struct mbuf * m)8765 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8766 {
8767 	int qid;
8768 	struct wm_softc *sc = ifp->if_softc;
8769 	struct wm_txqueue *txq;
8770 
8771 	qid = wm_select_txqueue(ifp, m);
8772 	txq = &sc->sc_queue[qid].wmq_txq;
8773 
8774 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8775 		m_freem(m);
8776 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8777 		return ENOBUFS;
8778 	}
8779 
8780 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8781 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8782 	if (m->m_flags & M_MCAST)
8783 		if_statinc_ref(nsr, if_omcasts);
8784 	IF_STAT_PUTREF(ifp);
8785 
8786 	if (mutex_tryenter(txq->txq_lock)) {
8787 		if (!txq->txq_stopping)
8788 			wm_transmit_locked(ifp, txq);
8789 		mutex_exit(txq->txq_lock);
8790 	}
8791 
8792 	return 0;
8793 }
8794 
8795 static void
wm_transmit_locked(struct ifnet * ifp,struct wm_txqueue * txq)8796 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8797 {
8798 
8799 	wm_send_common_locked(ifp, txq, true);
8800 }
8801 
8802 static void
wm_send_common_locked(struct ifnet * ifp,struct wm_txqueue * txq,bool is_transmit)8803 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8804     bool is_transmit)
8805 {
8806 	struct wm_softc *sc = ifp->if_softc;
8807 	struct mbuf *m0;
8808 	struct wm_txsoft *txs;
8809 	bus_dmamap_t dmamap;
8810 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8811 	bus_addr_t curaddr;
8812 	bus_size_t seglen, curlen;
8813 	uint32_t cksumcmd;
8814 	uint8_t cksumfields;
8815 	bool remap = true;
8816 
8817 	KASSERT(mutex_owned(txq->txq_lock));
8818 	KASSERT(!txq->txq_stopping);
8819 
8820 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8821 		return;
8822 
8823 	if (__predict_false(wm_linkdown_discard(txq))) {
8824 		do {
8825 			if (is_transmit)
8826 				m0 = pcq_get(txq->txq_interq);
8827 			else
8828 				IFQ_DEQUEUE(&ifp->if_snd, m0);
8829 			/*
8830 			 * increment successed packet counter as in the case
8831 			 * which the packet is discarded by link down PHY.
8832 			 */
8833 			if (m0 != NULL) {
8834 				if_statinc(ifp, if_opackets);
8835 				m_freem(m0);
8836 			}
8837 		} while (m0 != NULL);
8838 		return;
8839 	}
8840 
8841 	/* Remember the previous number of free descriptors. */
8842 	ofree = txq->txq_free;
8843 
8844 	/*
8845 	 * Loop through the send queue, setting up transmit descriptors
8846 	 * until we drain the queue, or use up all available transmit
8847 	 * descriptors.
8848 	 */
8849 	for (;;) {
8850 		m0 = NULL;
8851 
8852 		/* Get a work queue entry. */
8853 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8854 			wm_txeof(txq, UINT_MAX);
8855 			if (txq->txq_sfree == 0) {
8856 				DPRINTF(sc, WM_DEBUG_TX,
8857 				    ("%s: TX: no free job descriptors\n",
8858 					device_xname(sc->sc_dev)));
8859 				WM_Q_EVCNT_INCR(txq, txsstall);
8860 				break;
8861 			}
8862 		}
8863 
8864 		/* Grab a packet off the queue. */
8865 		if (is_transmit)
8866 			m0 = pcq_get(txq->txq_interq);
8867 		else
8868 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8869 		if (m0 == NULL)
8870 			break;
8871 
8872 		DPRINTF(sc, WM_DEBUG_TX,
8873 		    ("%s: TX: have packet to transmit: %p\n",
8874 			device_xname(sc->sc_dev), m0));
8875 
8876 		txs = &txq->txq_soft[txq->txq_snext];
8877 		dmamap = txs->txs_dmamap;
8878 
8879 		use_tso = (m0->m_pkthdr.csum_flags &
8880 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8881 
8882 		/*
8883 		 * So says the Linux driver:
8884 		 * The controller does a simple calculation to make sure
8885 		 * there is enough room in the FIFO before initiating the
8886 		 * DMA for each buffer. The calc is:
8887 		 *	4 = ceil(buffer len / MSS)
8888 		 * To make sure we don't overrun the FIFO, adjust the max
8889 		 * buffer len if the MSS drops.
8890 		 */
8891 		dmamap->dm_maxsegsz =
8892 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8893 		    ? m0->m_pkthdr.segsz << 2
8894 		    : WTX_MAX_LEN;
8895 
8896 		/*
8897 		 * Load the DMA map.  If this fails, the packet either
8898 		 * didn't fit in the allotted number of segments, or we
8899 		 * were short on resources.  For the too-many-segments
8900 		 * case, we simply report an error and drop the packet,
8901 		 * since we can't sanely copy a jumbo packet to a single
8902 		 * buffer.
8903 		 */
8904 retry:
8905 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8906 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8907 		if (__predict_false(error)) {
8908 			if (error == EFBIG) {
8909 				if (remap == true) {
8910 					struct mbuf *m;
8911 
8912 					remap = false;
8913 					m = m_defrag(m0, M_NOWAIT);
8914 					if (m != NULL) {
8915 						WM_Q_EVCNT_INCR(txq, defrag);
8916 						m0 = m;
8917 						goto retry;
8918 					}
8919 				}
8920 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8921 				log(LOG_ERR, "%s: Tx packet consumes too many "
8922 				    "DMA segments, dropping...\n",
8923 				    device_xname(sc->sc_dev));
8924 				wm_dump_mbuf_chain(sc, m0);
8925 				m_freem(m0);
8926 				continue;
8927 			}
8928 			/* Short on resources, just stop for now. */
8929 			DPRINTF(sc, WM_DEBUG_TX,
8930 			    ("%s: TX: dmamap load failed: %d\n",
8931 				device_xname(sc->sc_dev), error));
8932 			break;
8933 		}
8934 
8935 		segs_needed = dmamap->dm_nsegs;
8936 		if (use_tso) {
8937 			/* For sentinel descriptor; see below. */
8938 			segs_needed++;
8939 		}
8940 
8941 		/*
8942 		 * Ensure we have enough descriptors free to describe
8943 		 * the packet. Note, we always reserve one descriptor
8944 		 * at the end of the ring due to the semantics of the
8945 		 * TDT register, plus one more in the event we need
8946 		 * to load offload context.
8947 		 */
8948 		if (segs_needed > txq->txq_free - 2) {
8949 			/*
8950 			 * Not enough free descriptors to transmit this
8951 			 * packet.  We haven't committed anything yet,
8952 			 * so just unload the DMA map, put the packet
8953 			 * pack on the queue, and punt. Notify the upper
8954 			 * layer that there are no more slots left.
8955 			 */
8956 			DPRINTF(sc, WM_DEBUG_TX,
8957 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8958 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8959 				segs_needed, txq->txq_free - 1));
8960 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8961 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8962 			WM_Q_EVCNT_INCR(txq, txdstall);
8963 			break;
8964 		}
8965 
8966 		/*
8967 		 * Check for 82547 Tx FIFO bug. We need to do this
8968 		 * once we know we can transmit the packet, since we
8969 		 * do some internal FIFO space accounting here.
8970 		 */
8971 		if (sc->sc_type == WM_T_82547 &&
8972 		    wm_82547_txfifo_bugchk(sc, m0)) {
8973 			DPRINTF(sc, WM_DEBUG_TX,
8974 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
8975 				device_xname(sc->sc_dev)));
8976 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8977 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8978 			WM_Q_EVCNT_INCR(txq, fifo_stall);
8979 			break;
8980 		}
8981 
8982 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8983 
8984 		DPRINTF(sc, WM_DEBUG_TX,
8985 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8986 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8987 
8988 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8989 
8990 		/*
8991 		 * Store a pointer to the packet so that we can free it
8992 		 * later.
8993 		 *
8994 		 * Initially, we consider the number of descriptors the
8995 		 * packet uses the number of DMA segments.  This may be
8996 		 * incremented by 1 if we do checksum offload (a descriptor
8997 		 * is used to set the checksum context).
8998 		 */
8999 		txs->txs_mbuf = m0;
9000 		txs->txs_firstdesc = txq->txq_next;
9001 		txs->txs_ndesc = segs_needed;
9002 
9003 		/* Set up offload parameters for this packet. */
9004 		if (m0->m_pkthdr.csum_flags &
9005 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9006 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9007 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9008 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
9009 		} else {
9010 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
9011 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
9012 			cksumcmd = 0;
9013 			cksumfields = 0;
9014 		}
9015 
9016 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
9017 
9018 		/* Sync the DMA map. */
9019 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9020 		    BUS_DMASYNC_PREWRITE);
9021 
9022 		/* Initialize the transmit descriptor. */
9023 		for (nexttx = txq->txq_next, seg = 0;
9024 		     seg < dmamap->dm_nsegs; seg++) {
9025 			for (seglen = dmamap->dm_segs[seg].ds_len,
9026 			     curaddr = dmamap->dm_segs[seg].ds_addr;
9027 			     seglen != 0;
9028 			     curaddr += curlen, seglen -= curlen,
9029 			     nexttx = WM_NEXTTX(txq, nexttx)) {
9030 				curlen = seglen;
9031 
9032 				/*
9033 				 * So says the Linux driver:
9034 				 * Work around for premature descriptor
9035 				 * write-backs in TSO mode.  Append a
9036 				 * 4-byte sentinel descriptor.
9037 				 */
9038 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
9039 				    curlen > 8)
9040 					curlen -= 4;
9041 
9042 				wm_set_dma_addr(
9043 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
9044 				txq->txq_descs[nexttx].wtx_cmdlen
9045 				    = htole32(cksumcmd | curlen);
9046 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
9047 				    = 0;
9048 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
9049 				    = cksumfields;
9050 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9051 				lasttx = nexttx;
9052 
9053 				DPRINTF(sc, WM_DEBUG_TX,
9054 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
9055 					"len %#04zx\n",
9056 					device_xname(sc->sc_dev), nexttx,
9057 					(uint64_t)curaddr, curlen));
9058 			}
9059 		}
9060 
9061 		KASSERT(lasttx != -1);
9062 
9063 		/*
9064 		 * Set up the command byte on the last descriptor of
9065 		 * the packet. If we're in the interrupt delay window,
9066 		 * delay the interrupt.
9067 		 */
9068 		txq->txq_descs[lasttx].wtx_cmdlen |=
9069 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
9070 
9071 		/*
9072 		 * If VLANs are enabled and the packet has a VLAN tag, set
9073 		 * up the descriptor to encapsulate the packet for us.
9074 		 *
9075 		 * This is only valid on the last descriptor of the packet.
9076 		 */
9077 		if (vlan_has_tag(m0)) {
9078 			txq->txq_descs[lasttx].wtx_cmdlen |=
9079 			    htole32(WTX_CMD_VLE);
9080 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
9081 			    = htole16(vlan_get_tag(m0));
9082 		}
9083 
9084 		txs->txs_lastdesc = lasttx;
9085 
9086 		DPRINTF(sc, WM_DEBUG_TX,
9087 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
9088 			device_xname(sc->sc_dev),
9089 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9090 
9091 		/* Sync the descriptors we're using. */
9092 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9093 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9094 
9095 		/* Give the packet to the chip. */
9096 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9097 
9098 		DPRINTF(sc, WM_DEBUG_TX,
9099 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9100 
9101 		DPRINTF(sc, WM_DEBUG_TX,
9102 		    ("%s: TX: finished transmitting packet, job %d\n",
9103 			device_xname(sc->sc_dev), txq->txq_snext));
9104 
9105 		/* Advance the tx pointer. */
9106 		txq->txq_free -= txs->txs_ndesc;
9107 		txq->txq_next = nexttx;
9108 
9109 		txq->txq_sfree--;
9110 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9111 
9112 		/* Pass the packet to any BPF listeners. */
9113 		bpf_mtap(ifp, m0, BPF_D_OUT);
9114 	}
9115 
9116 	if (m0 != NULL) {
9117 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9118 		WM_Q_EVCNT_INCR(txq, descdrop);
9119 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9120 			__func__));
9121 		m_freem(m0);
9122 	}
9123 
9124 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9125 		/* No more slots; notify upper layer. */
9126 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9127 	}
9128 
9129 	if (txq->txq_free != ofree) {
9130 		/* Set a watchdog timer in case the chip flakes out. */
9131 		txq->txq_lastsent = time_uptime;
9132 		txq->txq_sending = true;
9133 	}
9134 }
9135 
9136 /*
9137  * wm_nq_tx_offload:
9138  *
9139  *	Set up TCP/IP checksumming parameters for the
9140  *	specified packet, for NEWQUEUE devices
9141  */
9142 static void
wm_nq_tx_offload(struct wm_softc * sc,struct wm_txqueue * txq,struct wm_txsoft * txs,uint32_t * cmdlenp,uint32_t * fieldsp,bool * do_csum)9143 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
9144     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
9145 {
9146 	struct mbuf *m0 = txs->txs_mbuf;
9147 	uint32_t vl_len, mssidx, cmdc;
9148 	struct ether_header *eh;
9149 	int offset, iphl;
9150 
9151 	/*
9152 	 * XXX It would be nice if the mbuf pkthdr had offset
9153 	 * fields for the protocol headers.
9154 	 */
9155 	*cmdlenp = 0;
9156 	*fieldsp = 0;
9157 
9158 	eh = mtod(m0, struct ether_header *);
9159 	switch (htons(eh->ether_type)) {
9160 	case ETHERTYPE_IP:
9161 	case ETHERTYPE_IPV6:
9162 		offset = ETHER_HDR_LEN;
9163 		break;
9164 
9165 	case ETHERTYPE_VLAN:
9166 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
9167 		break;
9168 
9169 	default:
9170 		/* Don't support this protocol or encapsulation. */
9171 		*do_csum = false;
9172 		return;
9173 	}
9174 	*do_csum = true;
9175 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
9176 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
9177 
9178 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
9179 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
9180 
9181 	if ((m0->m_pkthdr.csum_flags &
9182 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
9183 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
9184 	} else {
9185 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
9186 	}
9187 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
9188 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
9189 
9190 	if (vlan_has_tag(m0)) {
9191 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
9192 		    << NQTXC_VLLEN_VLAN_SHIFT);
9193 		*cmdlenp |= NQTX_CMD_VLE;
9194 	}
9195 
9196 	mssidx = 0;
9197 
9198 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
9199 		int hlen = offset + iphl;
9200 		int tcp_hlen;
9201 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
9202 
9203 		if (__predict_false(m0->m_len <
9204 				    (hlen + sizeof(struct tcphdr)))) {
9205 			/*
9206 			 * TCP/IP headers are not in the first mbuf; we need
9207 			 * to do this the slow and painful way. Let's just
9208 			 * hope this doesn't happen very often.
9209 			 */
9210 			struct tcphdr th;
9211 
9212 			WM_Q_EVCNT_INCR(txq, tsopain);
9213 
9214 			m_copydata(m0, hlen, sizeof(th), &th);
9215 			if (v4) {
9216 				struct ip ip;
9217 
9218 				m_copydata(m0, offset, sizeof(ip), &ip);
9219 				ip.ip_len = 0;
9220 				m_copyback(m0,
9221 				    offset + offsetof(struct ip, ip_len),
9222 				    sizeof(ip.ip_len), &ip.ip_len);
9223 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
9224 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
9225 			} else {
9226 				struct ip6_hdr ip6;
9227 
9228 				m_copydata(m0, offset, sizeof(ip6), &ip6);
9229 				ip6.ip6_plen = 0;
9230 				m_copyback(m0,
9231 				    offset + offsetof(struct ip6_hdr, ip6_plen),
9232 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
9233 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
9234 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
9235 			}
9236 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
9237 			    sizeof(th.th_sum), &th.th_sum);
9238 
9239 			tcp_hlen = th.th_off << 2;
9240 		} else {
9241 			/*
9242 			 * TCP/IP headers are in the first mbuf; we can do
9243 			 * this the easy way.
9244 			 */
9245 			struct tcphdr *th;
9246 
9247 			if (v4) {
9248 				struct ip *ip =
9249 				    (void *)(mtod(m0, char *) + offset);
9250 				th = (void *)(mtod(m0, char *) + hlen);
9251 
9252 				ip->ip_len = 0;
9253 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
9254 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
9255 			} else {
9256 				struct ip6_hdr *ip6 =
9257 				    (void *)(mtod(m0, char *) + offset);
9258 				th = (void *)(mtod(m0, char *) + hlen);
9259 
9260 				ip6->ip6_plen = 0;
9261 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
9262 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
9263 			}
9264 			tcp_hlen = th->th_off << 2;
9265 		}
9266 		hlen += tcp_hlen;
9267 		*cmdlenp |= NQTX_CMD_TSE;
9268 
9269 		if (v4) {
9270 			WM_Q_EVCNT_INCR(txq, tso);
9271 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
9272 		} else {
9273 			WM_Q_EVCNT_INCR(txq, tso6);
9274 			*fieldsp |= NQTXD_FIELDS_TUXSM;
9275 		}
9276 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
9277 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9278 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
9279 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
9280 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
9281 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
9282 	} else {
9283 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
9284 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9285 	}
9286 
9287 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
9288 		*fieldsp |= NQTXD_FIELDS_IXSM;
9289 		cmdc |= NQTXC_CMD_IP4;
9290 	}
9291 
9292 	if (m0->m_pkthdr.csum_flags &
9293 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
9294 		WM_Q_EVCNT_INCR(txq, tusum);
9295 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
9296 			cmdc |= NQTXC_CMD_TCP;
9297 		else
9298 			cmdc |= NQTXC_CMD_UDP;
9299 
9300 		cmdc |= NQTXC_CMD_IP4;
9301 		*fieldsp |= NQTXD_FIELDS_TUXSM;
9302 	}
9303 	if (m0->m_pkthdr.csum_flags &
9304 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
9305 		WM_Q_EVCNT_INCR(txq, tusum6);
9306 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
9307 			cmdc |= NQTXC_CMD_TCP;
9308 		else
9309 			cmdc |= NQTXC_CMD_UDP;
9310 
9311 		cmdc |= NQTXC_CMD_IP6;
9312 		*fieldsp |= NQTXD_FIELDS_TUXSM;
9313 	}
9314 
9315 	/*
9316 	 * We don't have to write context descriptor for every packet to
9317 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
9318 	 * I210 and I211. It is enough to write once per a Tx queue for these
9319 	 * controllers.
9320 	 * It would be overhead to write context descriptor for every packet,
9321 	 * however it does not cause problems.
9322 	 */
9323 	/* Fill in the context descriptor. */
9324 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
9325 	    htole32(vl_len);
9326 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
9327 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
9328 	    htole32(cmdc);
9329 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
9330 	    htole32(mssidx);
9331 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
9332 	DPRINTF(sc, WM_DEBUG_TX,
9333 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
9334 		txq->txq_next, 0, vl_len));
9335 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
9336 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
9337 	txs->txs_ndesc++;
9338 }
9339 
9340 /*
9341  * wm_nq_start:		[ifnet interface function]
9342  *
9343  *	Start packet transmission on the interface for NEWQUEUE devices
9344  */
9345 static void
wm_nq_start(struct ifnet * ifp)9346 wm_nq_start(struct ifnet *ifp)
9347 {
9348 	struct wm_softc *sc = ifp->if_softc;
9349 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9350 
9351 	KASSERT(if_is_mpsafe(ifp));
9352 	/*
9353 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
9354 	 */
9355 
9356 	mutex_enter(txq->txq_lock);
9357 	if (!txq->txq_stopping)
9358 		wm_nq_start_locked(ifp);
9359 	mutex_exit(txq->txq_lock);
9360 }
9361 
9362 static void
wm_nq_start_locked(struct ifnet * ifp)9363 wm_nq_start_locked(struct ifnet *ifp)
9364 {
9365 	struct wm_softc *sc = ifp->if_softc;
9366 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9367 
9368 	wm_nq_send_common_locked(ifp, txq, false);
9369 }
9370 
9371 static int
wm_nq_transmit(struct ifnet * ifp,struct mbuf * m)9372 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
9373 {
9374 	int qid;
9375 	struct wm_softc *sc = ifp->if_softc;
9376 	struct wm_txqueue *txq;
9377 
9378 	qid = wm_select_txqueue(ifp, m);
9379 	txq = &sc->sc_queue[qid].wmq_txq;
9380 
9381 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9382 		m_freem(m);
9383 		WM_Q_EVCNT_INCR(txq, pcqdrop);
9384 		return ENOBUFS;
9385 	}
9386 
9387 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
9388 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
9389 	if (m->m_flags & M_MCAST)
9390 		if_statinc_ref(nsr, if_omcasts);
9391 	IF_STAT_PUTREF(ifp);
9392 
9393 	/*
9394 	 * The situations which this mutex_tryenter() fails at running time
9395 	 * are below two patterns.
9396 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
9397 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
9398 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
9399 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9400 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
9401 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
9402 	 * stuck, either.
9403 	 */
9404 	if (mutex_tryenter(txq->txq_lock)) {
9405 		if (!txq->txq_stopping)
9406 			wm_nq_transmit_locked(ifp, txq);
9407 		mutex_exit(txq->txq_lock);
9408 	}
9409 
9410 	return 0;
9411 }
9412 
9413 static void
wm_nq_transmit_locked(struct ifnet * ifp,struct wm_txqueue * txq)9414 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9415 {
9416 
9417 	wm_nq_send_common_locked(ifp, txq, true);
9418 }
9419 
9420 static void
wm_nq_send_common_locked(struct ifnet * ifp,struct wm_txqueue * txq,bool is_transmit)9421 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9422     bool is_transmit)
9423 {
9424 	struct wm_softc *sc = ifp->if_softc;
9425 	struct mbuf *m0;
9426 	struct wm_txsoft *txs;
9427 	bus_dmamap_t dmamap;
9428 	int error, nexttx, lasttx = -1, seg, segs_needed;
9429 	bool do_csum, sent;
9430 	bool remap = true;
9431 
9432 	KASSERT(mutex_owned(txq->txq_lock));
9433 	KASSERT(!txq->txq_stopping);
9434 
9435 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9436 		return;
9437 
9438 	if (__predict_false(wm_linkdown_discard(txq))) {
9439 		do {
9440 			if (is_transmit)
9441 				m0 = pcq_get(txq->txq_interq);
9442 			else
9443 				IFQ_DEQUEUE(&ifp->if_snd, m0);
9444 			/*
9445 			 * increment successed packet counter as in the case
9446 			 * which the packet is discarded by link down PHY.
9447 			 */
9448 			if (m0 != NULL) {
9449 				if_statinc(ifp, if_opackets);
9450 				m_freem(m0);
9451 			}
9452 		} while (m0 != NULL);
9453 		return;
9454 	}
9455 
9456 	sent = false;
9457 
9458 	/*
9459 	 * Loop through the send queue, setting up transmit descriptors
9460 	 * until we drain the queue, or use up all available transmit
9461 	 * descriptors.
9462 	 */
9463 	for (;;) {
9464 		m0 = NULL;
9465 
9466 		/* Get a work queue entry. */
9467 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9468 			wm_txeof(txq, UINT_MAX);
9469 			if (txq->txq_sfree == 0) {
9470 				DPRINTF(sc, WM_DEBUG_TX,
9471 				    ("%s: TX: no free job descriptors\n",
9472 					device_xname(sc->sc_dev)));
9473 				WM_Q_EVCNT_INCR(txq, txsstall);
9474 				break;
9475 			}
9476 		}
9477 
9478 		/* Grab a packet off the queue. */
9479 		if (is_transmit)
9480 			m0 = pcq_get(txq->txq_interq);
9481 		else
9482 			IFQ_DEQUEUE(&ifp->if_snd, m0);
9483 		if (m0 == NULL)
9484 			break;
9485 
9486 		DPRINTF(sc, WM_DEBUG_TX,
9487 		    ("%s: TX: have packet to transmit: %p\n",
9488 			device_xname(sc->sc_dev), m0));
9489 
9490 		txs = &txq->txq_soft[txq->txq_snext];
9491 		dmamap = txs->txs_dmamap;
9492 
9493 		/*
9494 		 * Load the DMA map.  If this fails, the packet either
9495 		 * didn't fit in the allotted number of segments, or we
9496 		 * were short on resources.  For the too-many-segments
9497 		 * case, we simply report an error and drop the packet,
9498 		 * since we can't sanely copy a jumbo packet to a single
9499 		 * buffer.
9500 		 */
9501 retry:
9502 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9503 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9504 		if (__predict_false(error)) {
9505 			if (error == EFBIG) {
9506 				if (remap == true) {
9507 					struct mbuf *m;
9508 
9509 					remap = false;
9510 					m = m_defrag(m0, M_NOWAIT);
9511 					if (m != NULL) {
9512 						WM_Q_EVCNT_INCR(txq, defrag);
9513 						m0 = m;
9514 						goto retry;
9515 					}
9516 				}
9517 				WM_Q_EVCNT_INCR(txq, toomanyseg);
9518 				log(LOG_ERR, "%s: Tx packet consumes too many "
9519 				    "DMA segments, dropping...\n",
9520 				    device_xname(sc->sc_dev));
9521 				wm_dump_mbuf_chain(sc, m0);
9522 				m_freem(m0);
9523 				continue;
9524 			}
9525 			/* Short on resources, just stop for now. */
9526 			DPRINTF(sc, WM_DEBUG_TX,
9527 			    ("%s: TX: dmamap load failed: %d\n",
9528 				device_xname(sc->sc_dev), error));
9529 			break;
9530 		}
9531 
9532 		segs_needed = dmamap->dm_nsegs;
9533 
9534 		/*
9535 		 * Ensure we have enough descriptors free to describe
9536 		 * the packet. Note, we always reserve one descriptor
9537 		 * at the end of the ring due to the semantics of the
9538 		 * TDT register, plus one more in the event we need
9539 		 * to load offload context.
9540 		 */
9541 		if (segs_needed > txq->txq_free - 2) {
9542 			/*
9543 			 * Not enough free descriptors to transmit this
9544 			 * packet.  We haven't committed anything yet,
9545 			 * so just unload the DMA map, put the packet
9546 			 * pack on the queue, and punt. Notify the upper
9547 			 * layer that there are no more slots left.
9548 			 */
9549 			DPRINTF(sc, WM_DEBUG_TX,
9550 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
9551 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
9552 				segs_needed, txq->txq_free - 1));
9553 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9554 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9555 			WM_Q_EVCNT_INCR(txq, txdstall);
9556 			break;
9557 		}
9558 
9559 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9560 
9561 		DPRINTF(sc, WM_DEBUG_TX,
9562 		    ("%s: TX: packet has %d (%d) DMA segments\n",
9563 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9564 
9565 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9566 
9567 		/*
9568 		 * Store a pointer to the packet so that we can free it
9569 		 * later.
9570 		 *
9571 		 * Initially, we consider the number of descriptors the
9572 		 * packet uses the number of DMA segments.  This may be
9573 		 * incremented by 1 if we do checksum offload (a descriptor
9574 		 * is used to set the checksum context).
9575 		 */
9576 		txs->txs_mbuf = m0;
9577 		txs->txs_firstdesc = txq->txq_next;
9578 		txs->txs_ndesc = segs_needed;
9579 
9580 		/* Set up offload parameters for this packet. */
9581 		uint32_t cmdlen, fields, dcmdlen;
9582 		if (m0->m_pkthdr.csum_flags &
9583 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9584 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9585 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9586 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9587 			    &do_csum);
9588 		} else {
9589 			do_csum = false;
9590 			cmdlen = 0;
9591 			fields = 0;
9592 		}
9593 
9594 		/* Sync the DMA map. */
9595 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9596 		    BUS_DMASYNC_PREWRITE);
9597 
9598 		/* Initialize the first transmit descriptor. */
9599 		nexttx = txq->txq_next;
9600 		if (!do_csum) {
9601 			/* Set up a legacy descriptor */
9602 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9603 			    dmamap->dm_segs[0].ds_addr);
9604 			txq->txq_descs[nexttx].wtx_cmdlen =
9605 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9606 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9607 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9608 			if (vlan_has_tag(m0)) {
9609 				txq->txq_descs[nexttx].wtx_cmdlen |=
9610 				    htole32(WTX_CMD_VLE);
9611 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9612 				    htole16(vlan_get_tag(m0));
9613 			} else
9614 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9615 
9616 			dcmdlen = 0;
9617 		} else {
9618 			/* Set up an advanced data descriptor */
9619 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9620 			    htole64(dmamap->dm_segs[0].ds_addr);
9621 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9622 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9623 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9624 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9625 			    htole32(fields);
9626 			DPRINTF(sc, WM_DEBUG_TX,
9627 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9628 				device_xname(sc->sc_dev), nexttx,
9629 				(uint64_t)dmamap->dm_segs[0].ds_addr));
9630 			DPRINTF(sc, WM_DEBUG_TX,
9631 			    ("\t 0x%08x%08x\n", fields,
9632 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9633 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9634 		}
9635 
9636 		lasttx = nexttx;
9637 		nexttx = WM_NEXTTX(txq, nexttx);
9638 		/*
9639 		 * Fill in the next descriptors. Legacy or advanced format
9640 		 * is the same here.
9641 		 */
9642 		for (seg = 1; seg < dmamap->dm_nsegs;
9643 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9644 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9645 			    htole64(dmamap->dm_segs[seg].ds_addr);
9646 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9647 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9648 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9649 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9650 			lasttx = nexttx;
9651 
9652 			DPRINTF(sc, WM_DEBUG_TX,
9653 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9654 				device_xname(sc->sc_dev), nexttx,
9655 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
9656 				dmamap->dm_segs[seg].ds_len));
9657 		}
9658 
9659 		KASSERT(lasttx != -1);
9660 
9661 		/*
9662 		 * Set up the command byte on the last descriptor of
9663 		 * the packet. If we're in the interrupt delay window,
9664 		 * delay the interrupt.
9665 		 */
9666 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9667 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
9668 		txq->txq_descs[lasttx].wtx_cmdlen |=
9669 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
9670 
9671 		txs->txs_lastdesc = lasttx;
9672 
9673 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9674 		    device_xname(sc->sc_dev),
9675 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9676 
9677 		/* Sync the descriptors we're using. */
9678 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9679 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9680 
9681 		/* Give the packet to the chip. */
9682 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9683 		sent = true;
9684 
9685 		DPRINTF(sc, WM_DEBUG_TX,
9686 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9687 
9688 		DPRINTF(sc, WM_DEBUG_TX,
9689 		    ("%s: TX: finished transmitting packet, job %d\n",
9690 			device_xname(sc->sc_dev), txq->txq_snext));
9691 
9692 		/* Advance the tx pointer. */
9693 		txq->txq_free -= txs->txs_ndesc;
9694 		txq->txq_next = nexttx;
9695 
9696 		txq->txq_sfree--;
9697 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9698 
9699 		/* Pass the packet to any BPF listeners. */
9700 		bpf_mtap(ifp, m0, BPF_D_OUT);
9701 	}
9702 
9703 	if (m0 != NULL) {
9704 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9705 		WM_Q_EVCNT_INCR(txq, descdrop);
9706 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9707 			__func__));
9708 		m_freem(m0);
9709 	}
9710 
9711 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9712 		/* No more slots; notify upper layer. */
9713 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9714 	}
9715 
9716 	if (sent) {
9717 		/* Set a watchdog timer in case the chip flakes out. */
9718 		txq->txq_lastsent = time_uptime;
9719 		txq->txq_sending = true;
9720 	}
9721 }
9722 
9723 static void
wm_deferred_start_locked(struct wm_txqueue * txq)9724 wm_deferred_start_locked(struct wm_txqueue *txq)
9725 {
9726 	struct wm_softc *sc = txq->txq_sc;
9727 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9728 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9729 	int qid = wmq->wmq_id;
9730 
9731 	KASSERT(mutex_owned(txq->txq_lock));
9732 	KASSERT(!txq->txq_stopping);
9733 
9734 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9735 		/* XXX need for ALTQ or one CPU system */
9736 		if (qid == 0)
9737 			wm_nq_start_locked(ifp);
9738 		wm_nq_transmit_locked(ifp, txq);
9739 	} else {
9740 		/* XXX need for ALTQ or one CPU system */
9741 		if (qid == 0)
9742 			wm_start_locked(ifp);
9743 		wm_transmit_locked(ifp, txq);
9744 	}
9745 }
9746 
9747 /* Interrupt */
9748 
9749 /*
9750  * wm_txeof:
9751  *
9752  *	Helper; handle transmit interrupts.
9753  */
9754 static bool
wm_txeof(struct wm_txqueue * txq,u_int limit)9755 wm_txeof(struct wm_txqueue *txq, u_int limit)
9756 {
9757 	struct wm_softc *sc = txq->txq_sc;
9758 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9759 	struct wm_txsoft *txs;
9760 	int count = 0;
9761 	int i;
9762 	uint8_t status;
9763 	bool more = false;
9764 
9765 	KASSERT(mutex_owned(txq->txq_lock));
9766 
9767 	if (txq->txq_stopping)
9768 		return false;
9769 
9770 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9771 
9772 	/*
9773 	 * Go through the Tx list and free mbufs for those
9774 	 * frames which have been transmitted.
9775 	 */
9776 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9777 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9778 		txs = &txq->txq_soft[i];
9779 
9780 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9781 			device_xname(sc->sc_dev), i));
9782 
9783 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9784 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9785 
9786 		status =
9787 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9788 		if ((status & WTX_ST_DD) == 0) {
9789 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9790 			    BUS_DMASYNC_PREREAD);
9791 			break;
9792 		}
9793 
9794 		if (limit-- == 0) {
9795 			more = true;
9796 			DPRINTF(sc, WM_DEBUG_TX,
9797 			    ("%s: TX: loop limited, job %d is not processed\n",
9798 				device_xname(sc->sc_dev), i));
9799 			break;
9800 		}
9801 
9802 		count++;
9803 		DPRINTF(sc, WM_DEBUG_TX,
9804 		    ("%s: TX: job %d done: descs %d..%d\n",
9805 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9806 		    txs->txs_lastdesc));
9807 
9808 #ifdef WM_EVENT_COUNTERS
9809 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
9810 			WM_Q_EVCNT_INCR(txq, underrun);
9811 #endif /* WM_EVENT_COUNTERS */
9812 
9813 		/*
9814 		 * 82574 and newer's document says the status field has neither
9815 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9816 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
9817 		 * Developer's Manual", 82574 datasheet and newer.
9818 		 *
9819 		 * XXX I saw the LC bit was set on I218 even though the media
9820 		 * was full duplex, so the bit might be used for other
9821 		 * meaning ...(I have no document).
9822 		 */
9823 
9824 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9825 		    && ((sc->sc_type < WM_T_82574)
9826 			|| (sc->sc_type == WM_T_80003))) {
9827 			if_statinc(ifp, if_oerrors);
9828 			if (status & WTX_ST_LC)
9829 				log(LOG_WARNING, "%s: late collision\n",
9830 				    device_xname(sc->sc_dev));
9831 			else if (status & WTX_ST_EC) {
9832 				if_statadd(ifp, if_collisions,
9833 				    TX_COLLISION_THRESHOLD + 1);
9834 				log(LOG_WARNING, "%s: excessive collisions\n",
9835 				    device_xname(sc->sc_dev));
9836 			}
9837 		} else
9838 			if_statinc(ifp, if_opackets);
9839 
9840 		txq->txq_packets++;
9841 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9842 
9843 		txq->txq_free += txs->txs_ndesc;
9844 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9845 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9846 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9847 		m_freem(txs->txs_mbuf);
9848 		txs->txs_mbuf = NULL;
9849 	}
9850 
9851 	/* Update the dirty transmit buffer pointer. */
9852 	txq->txq_sdirty = i;
9853 	DPRINTF(sc, WM_DEBUG_TX,
9854 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9855 
9856 	if (count != 0)
9857 		rnd_add_uint32(&sc->rnd_source, count);
9858 
9859 	/*
9860 	 * If there are no more pending transmissions, cancel the watchdog
9861 	 * timer.
9862 	 */
9863 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9864 		txq->txq_sending = false;
9865 
9866 	return more;
9867 }
9868 
9869 static inline uint32_t
wm_rxdesc_get_status(struct wm_rxqueue * rxq,int idx)9870 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9871 {
9872 	struct wm_softc *sc = rxq->rxq_sc;
9873 
9874 	if (sc->sc_type == WM_T_82574)
9875 		return EXTRXC_STATUS(
9876 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9877 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9878 		return NQRXC_STATUS(
9879 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9880 	else
9881 		return rxq->rxq_descs[idx].wrx_status;
9882 }
9883 
9884 static inline uint32_t
wm_rxdesc_get_errors(struct wm_rxqueue * rxq,int idx)9885 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9886 {
9887 	struct wm_softc *sc = rxq->rxq_sc;
9888 
9889 	if (sc->sc_type == WM_T_82574)
9890 		return EXTRXC_ERROR(
9891 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9892 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9893 		return NQRXC_ERROR(
9894 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9895 	else
9896 		return rxq->rxq_descs[idx].wrx_errors;
9897 }
9898 
9899 static inline uint16_t
wm_rxdesc_get_vlantag(struct wm_rxqueue * rxq,int idx)9900 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9901 {
9902 	struct wm_softc *sc = rxq->rxq_sc;
9903 
9904 	if (sc->sc_type == WM_T_82574)
9905 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9906 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9907 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9908 	else
9909 		return rxq->rxq_descs[idx].wrx_special;
9910 }
9911 
9912 static inline int
wm_rxdesc_get_pktlen(struct wm_rxqueue * rxq,int idx)9913 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9914 {
9915 	struct wm_softc *sc = rxq->rxq_sc;
9916 
9917 	if (sc->sc_type == WM_T_82574)
9918 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9919 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9920 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9921 	else
9922 		return rxq->rxq_descs[idx].wrx_len;
9923 }
9924 
9925 #ifdef WM_DEBUG
9926 static inline uint32_t
wm_rxdesc_get_rsshash(struct wm_rxqueue * rxq,int idx)9927 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9928 {
9929 	struct wm_softc *sc = rxq->rxq_sc;
9930 
9931 	if (sc->sc_type == WM_T_82574)
9932 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9933 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9934 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9935 	else
9936 		return 0;
9937 }
9938 
9939 static inline uint8_t
wm_rxdesc_get_rsstype(struct wm_rxqueue * rxq,int idx)9940 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9941 {
9942 	struct wm_softc *sc = rxq->rxq_sc;
9943 
9944 	if (sc->sc_type == WM_T_82574)
9945 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9946 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9947 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9948 	else
9949 		return 0;
9950 }
9951 #endif /* WM_DEBUG */
9952 
9953 static inline bool
wm_rxdesc_is_set_status(struct wm_softc * sc,uint32_t status,uint32_t legacy_bit,uint32_t ext_bit,uint32_t nq_bit)9954 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9955     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9956 {
9957 
9958 	if (sc->sc_type == WM_T_82574)
9959 		return (status & ext_bit) != 0;
9960 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9961 		return (status & nq_bit) != 0;
9962 	else
9963 		return (status & legacy_bit) != 0;
9964 }
9965 
9966 static inline bool
wm_rxdesc_is_set_error(struct wm_softc * sc,uint32_t error,uint32_t legacy_bit,uint32_t ext_bit,uint32_t nq_bit)9967 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9968     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9969 {
9970 
9971 	if (sc->sc_type == WM_T_82574)
9972 		return (error & ext_bit) != 0;
9973 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9974 		return (error & nq_bit) != 0;
9975 	else
9976 		return (error & legacy_bit) != 0;
9977 }
9978 
9979 static inline bool
wm_rxdesc_is_eop(struct wm_rxqueue * rxq,uint32_t status)9980 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9981 {
9982 
9983 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9984 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9985 		return true;
9986 	else
9987 		return false;
9988 }
9989 
9990 static inline bool
wm_rxdesc_has_errors(struct wm_rxqueue * rxq,uint32_t errors)9991 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9992 {
9993 	struct wm_softc *sc = rxq->rxq_sc;
9994 
9995 	/* XXX missing error bit for newqueue? */
9996 	if (wm_rxdesc_is_set_error(sc, errors,
9997 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9998 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9999 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
10000 		NQRXC_ERROR_RXE)) {
10001 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
10002 		    EXTRXC_ERROR_SE, 0))
10003 			log(LOG_WARNING, "%s: symbol error\n",
10004 			    device_xname(sc->sc_dev));
10005 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
10006 		    EXTRXC_ERROR_SEQ, 0))
10007 			log(LOG_WARNING, "%s: receive sequence error\n",
10008 			    device_xname(sc->sc_dev));
10009 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
10010 		    EXTRXC_ERROR_CE, 0))
10011 			log(LOG_WARNING, "%s: CRC error\n",
10012 			    device_xname(sc->sc_dev));
10013 		return true;
10014 	}
10015 
10016 	return false;
10017 }
10018 
10019 static inline bool
wm_rxdesc_dd(struct wm_rxqueue * rxq,int idx,uint32_t status)10020 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
10021 {
10022 	struct wm_softc *sc = rxq->rxq_sc;
10023 
10024 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
10025 		NQRXC_STATUS_DD)) {
10026 		/* We have processed all of the receive descriptors. */
10027 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
10028 		return false;
10029 	}
10030 
10031 	return true;
10032 }
10033 
10034 static inline bool
wm_rxdesc_input_vlantag(struct wm_rxqueue * rxq,uint32_t status,uint16_t vlantag,struct mbuf * m)10035 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
10036     uint16_t vlantag, struct mbuf *m)
10037 {
10038 
10039 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10040 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
10041 		vlan_set_tag(m, le16toh(vlantag));
10042 	}
10043 
10044 	return true;
10045 }
10046 
10047 static inline void
wm_rxdesc_ensure_checksum(struct wm_rxqueue * rxq,uint32_t status,uint32_t errors,struct mbuf * m)10048 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
10049     uint32_t errors, struct mbuf *m)
10050 {
10051 	struct wm_softc *sc = rxq->rxq_sc;
10052 
10053 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
10054 		if (wm_rxdesc_is_set_status(sc, status,
10055 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
10056 			WM_Q_EVCNT_INCR(rxq, ipsum);
10057 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
10058 			if (wm_rxdesc_is_set_error(sc, errors,
10059 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
10060 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
10061 		}
10062 		if (wm_rxdesc_is_set_status(sc, status,
10063 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
10064 			/*
10065 			 * Note: we don't know if this was TCP or UDP,
10066 			 * so we just set both bits, and expect the
10067 			 * upper layers to deal.
10068 			 */
10069 			WM_Q_EVCNT_INCR(rxq, tusum);
10070 			m->m_pkthdr.csum_flags |=
10071 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
10072 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
10073 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
10074 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
10075 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
10076 		}
10077 	}
10078 }
10079 
10080 /*
10081  * wm_rxeof:
10082  *
10083  *	Helper; handle receive interrupts.
10084  */
10085 static bool
wm_rxeof(struct wm_rxqueue * rxq,u_int limit)10086 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
10087 {
10088 	struct wm_softc *sc = rxq->rxq_sc;
10089 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10090 	struct wm_rxsoft *rxs;
10091 	struct mbuf *m;
10092 	int i, len;
10093 	int count = 0;
10094 	uint32_t status, errors;
10095 	uint16_t vlantag;
10096 	bool more = false;
10097 
10098 	KASSERT(mutex_owned(rxq->rxq_lock));
10099 
10100 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
10101 		rxs = &rxq->rxq_soft[i];
10102 
10103 		DPRINTF(sc, WM_DEBUG_RX,
10104 		    ("%s: RX: checking descriptor %d\n",
10105 			device_xname(sc->sc_dev), i));
10106 		wm_cdrxsync(rxq, i,
10107 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10108 
10109 		status = wm_rxdesc_get_status(rxq, i);
10110 		errors = wm_rxdesc_get_errors(rxq, i);
10111 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
10112 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
10113 #ifdef WM_DEBUG
10114 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
10115 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
10116 #endif
10117 
10118 		if (!wm_rxdesc_dd(rxq, i, status))
10119 			break;
10120 
10121 		if (limit-- == 0) {
10122 			more = true;
10123 			DPRINTF(sc, WM_DEBUG_RX,
10124 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
10125 				device_xname(sc->sc_dev), i));
10126 			break;
10127 		}
10128 
10129 		count++;
10130 		if (__predict_false(rxq->rxq_discard)) {
10131 			DPRINTF(sc, WM_DEBUG_RX,
10132 			    ("%s: RX: discarding contents of descriptor %d\n",
10133 				device_xname(sc->sc_dev), i));
10134 			wm_init_rxdesc(rxq, i);
10135 			if (wm_rxdesc_is_eop(rxq, status)) {
10136 				/* Reset our state. */
10137 				DPRINTF(sc, WM_DEBUG_RX,
10138 				    ("%s: RX: resetting rxdiscard -> 0\n",
10139 					device_xname(sc->sc_dev)));
10140 				rxq->rxq_discard = 0;
10141 			}
10142 			continue;
10143 		}
10144 
10145 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10146 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
10147 
10148 		m = rxs->rxs_mbuf;
10149 
10150 		/*
10151 		 * Add a new receive buffer to the ring, unless of
10152 		 * course the length is zero. Treat the latter as a
10153 		 * failed mapping.
10154 		 */
10155 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
10156 			/*
10157 			 * Failed, throw away what we've done so
10158 			 * far, and discard the rest of the packet.
10159 			 */
10160 			if_statinc(ifp, if_ierrors);
10161 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10162 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
10163 			wm_init_rxdesc(rxq, i);
10164 			if (!wm_rxdesc_is_eop(rxq, status))
10165 				rxq->rxq_discard = 1;
10166 			if (rxq->rxq_head != NULL)
10167 				m_freem(rxq->rxq_head);
10168 			WM_RXCHAIN_RESET(rxq);
10169 			DPRINTF(sc, WM_DEBUG_RX,
10170 			    ("%s: RX: Rx buffer allocation failed, "
10171 			    "dropping packet%s\n", device_xname(sc->sc_dev),
10172 				rxq->rxq_discard ? " (discard)" : ""));
10173 			continue;
10174 		}
10175 
10176 		m->m_len = len;
10177 		rxq->rxq_len += len;
10178 		DPRINTF(sc, WM_DEBUG_RX,
10179 		    ("%s: RX: buffer at %p len %d\n",
10180 			device_xname(sc->sc_dev), m->m_data, len));
10181 
10182 		/* If this is not the end of the packet, keep looking. */
10183 		if (!wm_rxdesc_is_eop(rxq, status)) {
10184 			WM_RXCHAIN_LINK(rxq, m);
10185 			DPRINTF(sc, WM_DEBUG_RX,
10186 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
10187 				device_xname(sc->sc_dev), rxq->rxq_len));
10188 			continue;
10189 		}
10190 
10191 		/*
10192 		 * Okay, we have the entire packet now. The chip is
10193 		 * configured to include the FCS except I35[04], I21[01].
10194 		 * (not all chips can be configured to strip it), so we need
10195 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
10196 		 * in RCTL register is always set, so we don't trim it.
10197 		 * PCH2 and newer chip also not include FCS when jumbo
10198 		 * frame is used to do workaround an errata.
10199 		 * May need to adjust length of previous mbuf in the
10200 		 * chain if the current mbuf is too short.
10201 		 */
10202 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
10203 			if (m->m_len < ETHER_CRC_LEN) {
10204 				rxq->rxq_tail->m_len
10205 				    -= (ETHER_CRC_LEN - m->m_len);
10206 				m->m_len = 0;
10207 			} else
10208 				m->m_len -= ETHER_CRC_LEN;
10209 			len = rxq->rxq_len - ETHER_CRC_LEN;
10210 		} else
10211 			len = rxq->rxq_len;
10212 
10213 		WM_RXCHAIN_LINK(rxq, m);
10214 
10215 		*rxq->rxq_tailp = NULL;
10216 		m = rxq->rxq_head;
10217 
10218 		WM_RXCHAIN_RESET(rxq);
10219 
10220 		DPRINTF(sc, WM_DEBUG_RX,
10221 		    ("%s: RX: have entire packet, len -> %d\n",
10222 			device_xname(sc->sc_dev), len));
10223 
10224 		/* If an error occurred, update stats and drop the packet. */
10225 		if (wm_rxdesc_has_errors(rxq, errors)) {
10226 			m_freem(m);
10227 			continue;
10228 		}
10229 
10230 		/* No errors.  Receive the packet. */
10231 		m_set_rcvif(m, ifp);
10232 		m->m_pkthdr.len = len;
10233 		/*
10234 		 * TODO
10235 		 * should be save rsshash and rsstype to this mbuf.
10236 		 */
10237 		DPRINTF(sc, WM_DEBUG_RX,
10238 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
10239 			device_xname(sc->sc_dev), rsstype, rsshash));
10240 
10241 		/*
10242 		 * If VLANs are enabled, VLAN packets have been unwrapped
10243 		 * for us.  Associate the tag with the packet.
10244 		 */
10245 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
10246 			continue;
10247 
10248 		/* Set up checksum info for this packet. */
10249 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
10250 
10251 		rxq->rxq_packets++;
10252 		rxq->rxq_bytes += len;
10253 		/* Pass it on. */
10254 		if_percpuq_enqueue(sc->sc_ipq, m);
10255 
10256 		if (rxq->rxq_stopping)
10257 			break;
10258 	}
10259 	rxq->rxq_ptr = i;
10260 
10261 	if (count != 0)
10262 		rnd_add_uint32(&sc->rnd_source, count);
10263 
10264 	DPRINTF(sc, WM_DEBUG_RX,
10265 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
10266 
10267 	return more;
10268 }
10269 
10270 /*
10271  * wm_linkintr_gmii:
10272  *
10273  *	Helper; handle link interrupts for GMII.
10274  */
10275 static void
wm_linkintr_gmii(struct wm_softc * sc,uint32_t icr)10276 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
10277 {
10278 	device_t dev = sc->sc_dev;
10279 	uint32_t status, reg;
10280 	bool link;
10281 	int rv;
10282 
10283 	KASSERT(mutex_owned(sc->sc_core_lock));
10284 
10285 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
10286 		__func__));
10287 
10288 	if ((icr & ICR_LSC) == 0) {
10289 		if (icr & ICR_RXSEQ)
10290 			DPRINTF(sc, WM_DEBUG_LINK,
10291 			    ("%s: LINK Receive sequence error\n",
10292 				device_xname(dev)));
10293 		return;
10294 	}
10295 
10296 	/* Link status changed */
10297 	status = CSR_READ(sc, WMREG_STATUS);
10298 	link = status & STATUS_LU;
10299 	if (link) {
10300 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10301 			device_xname(dev),
10302 			(status & STATUS_FD) ? "FDX" : "HDX"));
10303 		if (wm_phy_need_linkdown_discard(sc)) {
10304 			DPRINTF(sc, WM_DEBUG_LINK,
10305 			    ("%s: linkintr: Clear linkdown discard flag\n",
10306 				device_xname(dev)));
10307 			wm_clear_linkdown_discard(sc);
10308 		}
10309 	} else {
10310 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10311 			device_xname(dev)));
10312 		if (wm_phy_need_linkdown_discard(sc)) {
10313 			DPRINTF(sc, WM_DEBUG_LINK,
10314 			    ("%s: linkintr: Set linkdown discard flag\n",
10315 				device_xname(dev)));
10316 			wm_set_linkdown_discard(sc);
10317 		}
10318 	}
10319 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
10320 		wm_gig_downshift_workaround_ich8lan(sc);
10321 
10322 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
10323 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
10324 
10325 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
10326 		device_xname(dev)));
10327 	mii_pollstat(&sc->sc_mii);
10328 	if (sc->sc_type == WM_T_82543) {
10329 		int miistatus, active;
10330 
10331 		/*
10332 		 * With 82543, we need to force speed and
10333 		 * duplex on the MAC equal to what the PHY
10334 		 * speed and duplex configuration is.
10335 		 */
10336 		miistatus = sc->sc_mii.mii_media_status;
10337 
10338 		if (miistatus & IFM_ACTIVE) {
10339 			active = sc->sc_mii.mii_media_active;
10340 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10341 			switch (IFM_SUBTYPE(active)) {
10342 			case IFM_10_T:
10343 				sc->sc_ctrl |= CTRL_SPEED_10;
10344 				break;
10345 			case IFM_100_TX:
10346 				sc->sc_ctrl |= CTRL_SPEED_100;
10347 				break;
10348 			case IFM_1000_T:
10349 				sc->sc_ctrl |= CTRL_SPEED_1000;
10350 				break;
10351 			default:
10352 				/*
10353 				 * Fiber?
10354 				 * Shoud not enter here.
10355 				 */
10356 				device_printf(dev, "unknown media (%x)\n",
10357 				    active);
10358 				break;
10359 			}
10360 			if (active & IFM_FDX)
10361 				sc->sc_ctrl |= CTRL_FD;
10362 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10363 		}
10364 	} else if (sc->sc_type == WM_T_PCH) {
10365 		wm_k1_gig_workaround_hv(sc,
10366 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10367 	}
10368 
10369 	/*
10370 	 * When connected at 10Mbps half-duplex, some parts are excessively
10371 	 * aggressive resulting in many collisions. To avoid this, increase
10372 	 * the IPG and reduce Rx latency in the PHY.
10373 	 */
10374 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
10375 	    && link) {
10376 		uint32_t tipg_reg;
10377 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
10378 		bool fdx;
10379 		uint16_t emi_addr, emi_val;
10380 
10381 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
10382 		tipg_reg &= ~TIPG_IPGT_MASK;
10383 		fdx = status & STATUS_FD;
10384 
10385 		if (!fdx && (speed == STATUS_SPEED_10)) {
10386 			tipg_reg |= 0xff;
10387 			/* Reduce Rx latency in analog PHY */
10388 			emi_val = 0;
10389 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10390 		    fdx && speed != STATUS_SPEED_1000) {
10391 			tipg_reg |= 0xc;
10392 			emi_val = 1;
10393 		} else {
10394 			/* Roll back the default values */
10395 			tipg_reg |= 0x08;
10396 			emi_val = 1;
10397 		}
10398 
10399 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10400 
10401 		rv = sc->phy.acquire(sc);
10402 		if (rv)
10403 			return;
10404 
10405 		if (sc->sc_type == WM_T_PCH2)
10406 			emi_addr = I82579_RX_CONFIG;
10407 		else
10408 			emi_addr = I217_RX_CONFIG;
10409 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10410 
10411 		if (sc->sc_type >= WM_T_PCH_LPT) {
10412 			uint16_t phy_reg;
10413 
10414 			sc->phy.readreg_locked(dev, 2,
10415 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
10416 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10417 			if (speed == STATUS_SPEED_100
10418 			    || speed == STATUS_SPEED_10)
10419 				phy_reg |= 0x3e8;
10420 			else
10421 				phy_reg |= 0xfa;
10422 			sc->phy.writereg_locked(dev, 2,
10423 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
10424 
10425 			if (speed == STATUS_SPEED_1000) {
10426 				sc->phy.readreg_locked(dev, 2,
10427 				    HV_PM_CTRL, &phy_reg);
10428 
10429 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10430 
10431 				sc->phy.writereg_locked(dev, 2,
10432 				    HV_PM_CTRL, phy_reg);
10433 			}
10434 		}
10435 		sc->phy.release(sc);
10436 
10437 		if (rv)
10438 			return;
10439 
10440 		if (sc->sc_type >= WM_T_PCH_SPT) {
10441 			uint16_t data, ptr_gap;
10442 
10443 			if (speed == STATUS_SPEED_1000) {
10444 				rv = sc->phy.acquire(sc);
10445 				if (rv)
10446 					return;
10447 
10448 				rv = sc->phy.readreg_locked(dev, 2,
10449 				    I82579_UNKNOWN1, &data);
10450 				if (rv) {
10451 					sc->phy.release(sc);
10452 					return;
10453 				}
10454 
10455 				ptr_gap = (data & (0x3ff << 2)) >> 2;
10456 				if (ptr_gap < 0x18) {
10457 					data &= ~(0x3ff << 2);
10458 					data |= (0x18 << 2);
10459 					rv = sc->phy.writereg_locked(dev,
10460 					    2, I82579_UNKNOWN1, data);
10461 				}
10462 				sc->phy.release(sc);
10463 				if (rv)
10464 					return;
10465 			} else {
10466 				rv = sc->phy.acquire(sc);
10467 				if (rv)
10468 					return;
10469 
10470 				rv = sc->phy.writereg_locked(dev, 2,
10471 				    I82579_UNKNOWN1, 0xc023);
10472 				sc->phy.release(sc);
10473 				if (rv)
10474 					return;
10475 
10476 			}
10477 		}
10478 	}
10479 
10480 	/*
10481 	 * I217 Packet Loss issue:
10482 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
10483 	 * on power up.
10484 	 * Set the Beacon Duration for I217 to 8 usec
10485 	 */
10486 	if (sc->sc_type >= WM_T_PCH_LPT) {
10487 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
10488 		reg &= ~FEXTNVM4_BEACON_DURATION;
10489 		reg |= FEXTNVM4_BEACON_DURATION_8US;
10490 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10491 	}
10492 
10493 	/* Work-around I218 hang issue */
10494 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10495 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10496 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10497 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10498 		wm_k1_workaround_lpt_lp(sc, link);
10499 
10500 	if (sc->sc_type >= WM_T_PCH_LPT) {
10501 		/*
10502 		 * Set platform power management values for Latency
10503 		 * Tolerance Reporting (LTR)
10504 		 */
10505 		wm_platform_pm_pch_lpt(sc,
10506 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10507 	}
10508 
10509 	/* Clear link partner's EEE ability */
10510 	sc->eee_lp_ability = 0;
10511 
10512 	/* FEXTNVM6 K1-off workaround */
10513 	if (sc->sc_type == WM_T_PCH_SPT) {
10514 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
10515 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10516 			reg |= FEXTNVM6_K1_OFF_ENABLE;
10517 		else
10518 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10519 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10520 	}
10521 
10522 	if (!link)
10523 		return;
10524 
10525 	switch (sc->sc_type) {
10526 	case WM_T_PCH2:
10527 		wm_k1_workaround_lv(sc);
10528 		/* FALLTHROUGH */
10529 	case WM_T_PCH:
10530 		if (sc->sc_phytype == WMPHY_82578)
10531 			wm_link_stall_workaround_hv(sc);
10532 		break;
10533 	default:
10534 		break;
10535 	}
10536 
10537 	/* Enable/Disable EEE after link up */
10538 	if (sc->sc_phytype > WMPHY_82579)
10539 		wm_set_eee_pchlan(sc);
10540 }
10541 
10542 /*
10543  * wm_linkintr_tbi:
10544  *
10545  *	Helper; handle link interrupts for TBI mode.
10546  */
10547 static void
wm_linkintr_tbi(struct wm_softc * sc,uint32_t icr)10548 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10549 {
10550 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10551 	uint32_t status;
10552 
10553 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10554 		__func__));
10555 
10556 	status = CSR_READ(sc, WMREG_STATUS);
10557 	if (icr & ICR_LSC) {
10558 		wm_check_for_link(sc);
10559 		if (status & STATUS_LU) {
10560 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10561 				device_xname(sc->sc_dev),
10562 				(status & STATUS_FD) ? "FDX" : "HDX"));
10563 			/*
10564 			 * NOTE: CTRL will update TFCE and RFCE automatically,
10565 			 * so we should update sc->sc_ctrl
10566 			 */
10567 
10568 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10569 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10570 			sc->sc_fcrtl &= ~FCRTL_XONE;
10571 			if (status & STATUS_FD)
10572 				sc->sc_tctl |=
10573 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10574 			else
10575 				sc->sc_tctl |=
10576 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10577 			if (sc->sc_ctrl & CTRL_TFCE)
10578 				sc->sc_fcrtl |= FCRTL_XONE;
10579 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10580 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10581 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10582 			sc->sc_tbi_linkup = 1;
10583 			if_link_state_change(ifp, LINK_STATE_UP);
10584 		} else {
10585 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10586 				device_xname(sc->sc_dev)));
10587 			sc->sc_tbi_linkup = 0;
10588 			if_link_state_change(ifp, LINK_STATE_DOWN);
10589 		}
10590 		/* Update LED */
10591 		wm_tbi_serdes_set_linkled(sc);
10592 	} else if (icr & ICR_RXSEQ)
10593 		DPRINTF(sc, WM_DEBUG_LINK,
10594 		    ("%s: LINK: Receive sequence error\n",
10595 			device_xname(sc->sc_dev)));
10596 }
10597 
10598 /*
10599  * wm_linkintr_serdes:
10600  *
10601  *	Helper; handle link interrupts for TBI mode.
10602  */
10603 static void
wm_linkintr_serdes(struct wm_softc * sc,uint32_t icr)10604 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10605 {
10606 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10607 	struct mii_data *mii = &sc->sc_mii;
10608 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10609 	uint32_t pcs_adv, pcs_lpab, reg;
10610 
10611 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10612 		__func__));
10613 
10614 	if (icr & ICR_LSC) {
10615 		/* Check PCS */
10616 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
10617 		if ((reg & PCS_LSTS_LINKOK) != 0) {
10618 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10619 				device_xname(sc->sc_dev)));
10620 			mii->mii_media_status |= IFM_ACTIVE;
10621 			sc->sc_tbi_linkup = 1;
10622 			if_link_state_change(ifp, LINK_STATE_UP);
10623 		} else {
10624 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10625 				device_xname(sc->sc_dev)));
10626 			mii->mii_media_status |= IFM_NONE;
10627 			sc->sc_tbi_linkup = 0;
10628 			if_link_state_change(ifp, LINK_STATE_DOWN);
10629 			wm_tbi_serdes_set_linkled(sc);
10630 			return;
10631 		}
10632 		mii->mii_media_active |= IFM_1000_SX;
10633 		if ((reg & PCS_LSTS_FDX) != 0)
10634 			mii->mii_media_active |= IFM_FDX;
10635 		else
10636 			mii->mii_media_active |= IFM_HDX;
10637 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10638 			/* Check flow */
10639 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
10640 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
10641 				DPRINTF(sc, WM_DEBUG_LINK,
10642 				    ("XXX LINKOK but not ACOMP\n"));
10643 				return;
10644 			}
10645 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10646 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10647 			DPRINTF(sc, WM_DEBUG_LINK,
10648 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10649 			if ((pcs_adv & TXCW_SYM_PAUSE)
10650 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
10651 				mii->mii_media_active |= IFM_FLOW
10652 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10653 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10654 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10655 			    && (pcs_lpab & TXCW_SYM_PAUSE)
10656 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10657 				mii->mii_media_active |= IFM_FLOW
10658 				    | IFM_ETH_TXPAUSE;
10659 			else if ((pcs_adv & TXCW_SYM_PAUSE)
10660 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10661 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10662 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10663 				mii->mii_media_active |= IFM_FLOW
10664 				    | IFM_ETH_RXPAUSE;
10665 		}
10666 		/* Update LED */
10667 		wm_tbi_serdes_set_linkled(sc);
10668 	} else
10669 		DPRINTF(sc, WM_DEBUG_LINK,
10670 		    ("%s: LINK: Receive sequence error\n",
10671 		    device_xname(sc->sc_dev)));
10672 }
10673 
10674 /*
10675  * wm_linkintr:
10676  *
10677  *	Helper; handle link interrupts.
10678  */
10679 static void
wm_linkintr(struct wm_softc * sc,uint32_t icr)10680 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10681 {
10682 
10683 	KASSERT(mutex_owned(sc->sc_core_lock));
10684 
10685 	if (sc->sc_flags & WM_F_HAS_MII)
10686 		wm_linkintr_gmii(sc, icr);
10687 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10688 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10689 		wm_linkintr_serdes(sc, icr);
10690 	else
10691 		wm_linkintr_tbi(sc, icr);
10692 }
10693 
10694 
10695 static inline void
wm_sched_handle_queue(struct wm_softc * sc,struct wm_queue * wmq)10696 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10697 {
10698 
10699 	if (wmq->wmq_txrx_use_workqueue) {
10700 		if (!wmq->wmq_wq_enqueued) {
10701 			wmq->wmq_wq_enqueued = true;
10702 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10703 			    curcpu());
10704 		}
10705 	} else
10706 		softint_schedule(wmq->wmq_si);
10707 }
10708 
10709 static inline void
wm_legacy_intr_disable(struct wm_softc * sc)10710 wm_legacy_intr_disable(struct wm_softc *sc)
10711 {
10712 
10713 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10714 }
10715 
10716 static inline void
wm_legacy_intr_enable(struct wm_softc * sc)10717 wm_legacy_intr_enable(struct wm_softc *sc)
10718 {
10719 
10720 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10721 }
10722 
10723 /*
10724  * wm_intr_legacy:
10725  *
10726  *	Interrupt service routine for INTx and MSI.
10727  */
10728 static int
wm_intr_legacy(void * arg)10729 wm_intr_legacy(void *arg)
10730 {
10731 	struct wm_softc *sc = arg;
10732 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10733 	struct wm_queue *wmq = &sc->sc_queue[0];
10734 	struct wm_txqueue *txq = &wmq->wmq_txq;
10735 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10736 	u_int txlimit = sc->sc_tx_intr_process_limit;
10737 	u_int rxlimit = sc->sc_rx_intr_process_limit;
10738 	uint32_t icr, rndval = 0;
10739 	bool more = false;
10740 
10741 	icr = CSR_READ(sc, WMREG_ICR);
10742 	if ((icr & sc->sc_icr) == 0)
10743 		return 0;
10744 
10745 	DPRINTF(sc, WM_DEBUG_TX,
10746 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10747 	if (rndval == 0)
10748 		rndval = icr;
10749 
10750 	mutex_enter(txq->txq_lock);
10751 
10752 	if (txq->txq_stopping) {
10753 		mutex_exit(txq->txq_lock);
10754 		return 1;
10755 	}
10756 
10757 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10758 	if (icr & ICR_TXDW) {
10759 		DPRINTF(sc, WM_DEBUG_TX,
10760 		    ("%s: TX: got TXDW interrupt\n",
10761 			device_xname(sc->sc_dev)));
10762 		WM_Q_EVCNT_INCR(txq, txdw);
10763 	}
10764 #endif
10765 	if (txlimit > 0) {
10766 		more |= wm_txeof(txq, txlimit);
10767 		if (!IF_IS_EMPTY(&ifp->if_snd))
10768 			more = true;
10769 	} else
10770 		more = true;
10771 	mutex_exit(txq->txq_lock);
10772 
10773 	mutex_enter(rxq->rxq_lock);
10774 
10775 	if (rxq->rxq_stopping) {
10776 		mutex_exit(rxq->rxq_lock);
10777 		return 1;
10778 	}
10779 
10780 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10781 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10782 		DPRINTF(sc, WM_DEBUG_RX,
10783 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10784 			device_xname(sc->sc_dev),
10785 			icr & (ICR_RXDMT0 | ICR_RXT0)));
10786 		WM_Q_EVCNT_INCR(rxq, intr);
10787 	}
10788 #endif
10789 	if (rxlimit > 0) {
10790 		/*
10791 		 * wm_rxeof() does *not* call upper layer functions directly,
10792 		 * as if_percpuq_enqueue() just call softint_schedule().
10793 		 * So, we can call wm_rxeof() in interrupt context.
10794 		 */
10795 		more = wm_rxeof(rxq, rxlimit);
10796 	} else
10797 		more = true;
10798 
10799 	mutex_exit(rxq->rxq_lock);
10800 
10801 	mutex_enter(sc->sc_core_lock);
10802 
10803 	if (sc->sc_core_stopping) {
10804 		mutex_exit(sc->sc_core_lock);
10805 		return 1;
10806 	}
10807 
10808 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
10809 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10810 		wm_linkintr(sc, icr);
10811 	}
10812 	if ((icr & ICR_GPI(0)) != 0)
10813 		device_printf(sc->sc_dev, "got module interrupt\n");
10814 
10815 	mutex_exit(sc->sc_core_lock);
10816 
10817 	if (icr & ICR_RXO) {
10818 #if defined(WM_DEBUG)
10819 		log(LOG_WARNING, "%s: Receive overrun\n",
10820 		    device_xname(sc->sc_dev));
10821 #endif /* defined(WM_DEBUG) */
10822 	}
10823 
10824 	rnd_add_uint32(&sc->rnd_source, rndval);
10825 
10826 	if (more) {
10827 		/* Try to get more packets going. */
10828 		wm_legacy_intr_disable(sc);
10829 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10830 		wm_sched_handle_queue(sc, wmq);
10831 	}
10832 
10833 	return 1;
10834 }
10835 
10836 static inline void
wm_txrxintr_disable(struct wm_queue * wmq)10837 wm_txrxintr_disable(struct wm_queue *wmq)
10838 {
10839 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10840 
10841 	if (__predict_false(!wm_is_using_msix(sc))) {
10842 		wm_legacy_intr_disable(sc);
10843 		return;
10844 	}
10845 
10846 	if (sc->sc_type == WM_T_82574)
10847 		CSR_WRITE(sc, WMREG_IMC,
10848 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10849 	else if (sc->sc_type == WM_T_82575)
10850 		CSR_WRITE(sc, WMREG_EIMC,
10851 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10852 	else
10853 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10854 }
10855 
10856 static inline void
wm_txrxintr_enable(struct wm_queue * wmq)10857 wm_txrxintr_enable(struct wm_queue *wmq)
10858 {
10859 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10860 
10861 	wm_itrs_calculate(sc, wmq);
10862 
10863 	if (__predict_false(!wm_is_using_msix(sc))) {
10864 		wm_legacy_intr_enable(sc);
10865 		return;
10866 	}
10867 
10868 	/*
10869 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10870 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10871 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10872 	 * while each wm_handle_queue(wmq) is runnig.
10873 	 */
10874 	if (sc->sc_type == WM_T_82574)
10875 		CSR_WRITE(sc, WMREG_IMS,
10876 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10877 	else if (sc->sc_type == WM_T_82575)
10878 		CSR_WRITE(sc, WMREG_EIMS,
10879 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10880 	else
10881 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10882 }
10883 
10884 static int
wm_txrxintr_msix(void * arg)10885 wm_txrxintr_msix(void *arg)
10886 {
10887 	struct wm_queue *wmq = arg;
10888 	struct wm_txqueue *txq = &wmq->wmq_txq;
10889 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10890 	struct wm_softc *sc = txq->txq_sc;
10891 	u_int txlimit = sc->sc_tx_intr_process_limit;
10892 	u_int rxlimit = sc->sc_rx_intr_process_limit;
10893 	bool txmore;
10894 	bool rxmore;
10895 
10896 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10897 
10898 	DPRINTF(sc, WM_DEBUG_TX,
10899 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10900 
10901 	wm_txrxintr_disable(wmq);
10902 
10903 	mutex_enter(txq->txq_lock);
10904 
10905 	if (txq->txq_stopping) {
10906 		mutex_exit(txq->txq_lock);
10907 		return 1;
10908 	}
10909 
10910 	WM_Q_EVCNT_INCR(txq, txdw);
10911 	if (txlimit > 0) {
10912 		txmore = wm_txeof(txq, txlimit);
10913 		/* wm_deferred start() is done in wm_handle_queue(). */
10914 	} else
10915 		txmore = true;
10916 	mutex_exit(txq->txq_lock);
10917 
10918 	DPRINTF(sc, WM_DEBUG_RX,
10919 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10920 	mutex_enter(rxq->rxq_lock);
10921 
10922 	if (rxq->rxq_stopping) {
10923 		mutex_exit(rxq->rxq_lock);
10924 		return 1;
10925 	}
10926 
10927 	WM_Q_EVCNT_INCR(rxq, intr);
10928 	if (rxlimit > 0) {
10929 		rxmore = wm_rxeof(rxq, rxlimit);
10930 	} else
10931 		rxmore = true;
10932 	mutex_exit(rxq->rxq_lock);
10933 
10934 	wm_itrs_writereg(sc, wmq);
10935 
10936 	if (txmore || rxmore) {
10937 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10938 		wm_sched_handle_queue(sc, wmq);
10939 	} else
10940 		wm_txrxintr_enable(wmq);
10941 
10942 	return 1;
10943 }
10944 
10945 static void
wm_handle_queue(void * arg)10946 wm_handle_queue(void *arg)
10947 {
10948 	struct wm_queue *wmq = arg;
10949 	struct wm_txqueue *txq = &wmq->wmq_txq;
10950 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10951 	struct wm_softc *sc = txq->txq_sc;
10952 	u_int txlimit = sc->sc_tx_process_limit;
10953 	u_int rxlimit = sc->sc_rx_process_limit;
10954 	bool txmore;
10955 	bool rxmore;
10956 
10957 	mutex_enter(txq->txq_lock);
10958 	if (txq->txq_stopping) {
10959 		mutex_exit(txq->txq_lock);
10960 		return;
10961 	}
10962 	txmore = wm_txeof(txq, txlimit);
10963 	wm_deferred_start_locked(txq);
10964 	mutex_exit(txq->txq_lock);
10965 
10966 	mutex_enter(rxq->rxq_lock);
10967 	if (rxq->rxq_stopping) {
10968 		mutex_exit(rxq->rxq_lock);
10969 		return;
10970 	}
10971 	WM_Q_EVCNT_INCR(rxq, defer);
10972 	rxmore = wm_rxeof(rxq, rxlimit);
10973 	mutex_exit(rxq->rxq_lock);
10974 
10975 	if (txmore || rxmore) {
10976 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10977 		wm_sched_handle_queue(sc, wmq);
10978 	} else
10979 		wm_txrxintr_enable(wmq);
10980 }
10981 
10982 static void
wm_handle_queue_work(struct work * wk,void * context)10983 wm_handle_queue_work(struct work *wk, void *context)
10984 {
10985 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10986 
10987 	/*
10988 	 * Some qemu environment workaround.  They don't stop interrupt
10989 	 * immediately.
10990 	 */
10991 	wmq->wmq_wq_enqueued = false;
10992 	wm_handle_queue(wmq);
10993 }
10994 
10995 /*
10996  * wm_linkintr_msix:
10997  *
10998  *	Interrupt service routine for link status change for MSI-X.
10999  */
11000 static int
wm_linkintr_msix(void * arg)11001 wm_linkintr_msix(void *arg)
11002 {
11003 	struct wm_softc *sc = arg;
11004 	uint32_t reg;
11005 	bool has_rxo;
11006 
11007 	reg = CSR_READ(sc, WMREG_ICR);
11008 	mutex_enter(sc->sc_core_lock);
11009 	DPRINTF(sc, WM_DEBUG_LINK,
11010 	    ("%s: LINK: got link intr. ICR = %08x\n",
11011 		device_xname(sc->sc_dev), reg));
11012 
11013 	if (sc->sc_core_stopping)
11014 		goto out;
11015 
11016 	if ((reg & ICR_LSC) != 0) {
11017 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
11018 		wm_linkintr(sc, ICR_LSC);
11019 	}
11020 	if ((reg & ICR_GPI(0)) != 0)
11021 		device_printf(sc->sc_dev, "got module interrupt\n");
11022 
11023 	/*
11024 	 * XXX 82574 MSI-X mode workaround
11025 	 *
11026 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
11027 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
11028 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
11029 	 * interrupts by writing WMREG_ICS to process receive packets.
11030 	 */
11031 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
11032 #if defined(WM_DEBUG)
11033 		log(LOG_WARNING, "%s: Receive overrun\n",
11034 		    device_xname(sc->sc_dev));
11035 #endif /* defined(WM_DEBUG) */
11036 
11037 		has_rxo = true;
11038 		/*
11039 		 * The RXO interrupt is very high rate when receive traffic is
11040 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
11041 		 * interrupts. ICR_OTHER will be enabled at the end of
11042 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
11043 		 * ICR_RXQ(1) interrupts.
11044 		 */
11045 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
11046 
11047 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
11048 	}
11049 
11050 
11051 
11052 out:
11053 	mutex_exit(sc->sc_core_lock);
11054 
11055 	if (sc->sc_type == WM_T_82574) {
11056 		if (!has_rxo)
11057 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
11058 		else
11059 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
11060 	} else if (sc->sc_type == WM_T_82575)
11061 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
11062 	else
11063 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
11064 
11065 	return 1;
11066 }
11067 
11068 /*
11069  * Media related.
11070  * GMII, SGMII, TBI (and SERDES)
11071  */
11072 
11073 /* Common */
11074 
11075 /*
11076  * wm_tbi_serdes_set_linkled:
11077  *
11078  *	Update the link LED on TBI and SERDES devices.
11079  */
11080 static void
wm_tbi_serdes_set_linkled(struct wm_softc * sc)11081 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
11082 {
11083 
11084 	if (sc->sc_tbi_linkup)
11085 		sc->sc_ctrl |= CTRL_SWDPIN(0);
11086 	else
11087 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
11088 
11089 	/* 82540 or newer devices are active low */
11090 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
11091 
11092 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11093 }
11094 
11095 /* GMII related */
11096 
11097 /*
11098  * wm_gmii_reset:
11099  *
11100  *	Reset the PHY.
11101  */
11102 static void
wm_gmii_reset(struct wm_softc * sc)11103 wm_gmii_reset(struct wm_softc *sc)
11104 {
11105 	uint32_t reg;
11106 	int rv;
11107 
11108 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11109 		device_xname(sc->sc_dev), __func__));
11110 
11111 	rv = sc->phy.acquire(sc);
11112 	if (rv != 0) {
11113 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11114 		    __func__);
11115 		return;
11116 	}
11117 
11118 	switch (sc->sc_type) {
11119 	case WM_T_82542_2_0:
11120 	case WM_T_82542_2_1:
11121 		/* null */
11122 		break;
11123 	case WM_T_82543:
11124 		/*
11125 		 * With 82543, we need to force speed and duplex on the MAC
11126 		 * equal to what the PHY speed and duplex configuration is.
11127 		 * In addition, we need to perform a hardware reset on the PHY
11128 		 * to take it out of reset.
11129 		 */
11130 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11131 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11132 
11133 		/* The PHY reset pin is active-low. */
11134 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11135 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
11136 		    CTRL_EXT_SWDPIN(4));
11137 		reg |= CTRL_EXT_SWDPIO(4);
11138 
11139 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11140 		CSR_WRITE_FLUSH(sc);
11141 		delay(10*1000);
11142 
11143 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
11144 		CSR_WRITE_FLUSH(sc);
11145 		delay(150);
11146 #if 0
11147 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
11148 #endif
11149 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
11150 		break;
11151 	case WM_T_82544:	/* Reset 10000us */
11152 	case WM_T_82540:
11153 	case WM_T_82545:
11154 	case WM_T_82545_3:
11155 	case WM_T_82546:
11156 	case WM_T_82546_3:
11157 	case WM_T_82541:
11158 	case WM_T_82541_2:
11159 	case WM_T_82547:
11160 	case WM_T_82547_2:
11161 	case WM_T_82571:	/* Reset 100us */
11162 	case WM_T_82572:
11163 	case WM_T_82573:
11164 	case WM_T_82574:
11165 	case WM_T_82575:
11166 	case WM_T_82576:
11167 	case WM_T_82580:
11168 	case WM_T_I350:
11169 	case WM_T_I354:
11170 	case WM_T_I210:
11171 	case WM_T_I211:
11172 	case WM_T_82583:
11173 	case WM_T_80003:
11174 		/* Generic reset */
11175 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11176 		CSR_WRITE_FLUSH(sc);
11177 		delay(20000);
11178 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11179 		CSR_WRITE_FLUSH(sc);
11180 		delay(20000);
11181 
11182 		if ((sc->sc_type == WM_T_82541)
11183 		    || (sc->sc_type == WM_T_82541_2)
11184 		    || (sc->sc_type == WM_T_82547)
11185 		    || (sc->sc_type == WM_T_82547_2)) {
11186 			/* Workaround for igp are done in igp_reset() */
11187 			/* XXX add code to set LED after phy reset */
11188 		}
11189 		break;
11190 	case WM_T_ICH8:
11191 	case WM_T_ICH9:
11192 	case WM_T_ICH10:
11193 	case WM_T_PCH:
11194 	case WM_T_PCH2:
11195 	case WM_T_PCH_LPT:
11196 	case WM_T_PCH_SPT:
11197 	case WM_T_PCH_CNP:
11198 		/* Generic reset */
11199 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11200 		CSR_WRITE_FLUSH(sc);
11201 		delay(100);
11202 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11203 		CSR_WRITE_FLUSH(sc);
11204 		delay(150);
11205 		break;
11206 	default:
11207 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
11208 		    __func__);
11209 		break;
11210 	}
11211 
11212 	sc->phy.release(sc);
11213 
11214 	/* get_cfg_done */
11215 	wm_get_cfg_done(sc);
11216 
11217 	/* Extra setup */
11218 	switch (sc->sc_type) {
11219 	case WM_T_82542_2_0:
11220 	case WM_T_82542_2_1:
11221 	case WM_T_82543:
11222 	case WM_T_82544:
11223 	case WM_T_82540:
11224 	case WM_T_82545:
11225 	case WM_T_82545_3:
11226 	case WM_T_82546:
11227 	case WM_T_82546_3:
11228 	case WM_T_82541_2:
11229 	case WM_T_82547_2:
11230 	case WM_T_82571:
11231 	case WM_T_82572:
11232 	case WM_T_82573:
11233 	case WM_T_82574:
11234 	case WM_T_82583:
11235 	case WM_T_82575:
11236 	case WM_T_82576:
11237 	case WM_T_82580:
11238 	case WM_T_I350:
11239 	case WM_T_I354:
11240 	case WM_T_I210:
11241 	case WM_T_I211:
11242 	case WM_T_80003:
11243 		/* Null */
11244 		break;
11245 	case WM_T_82541:
11246 	case WM_T_82547:
11247 		/* XXX Configure actively LED after PHY reset */
11248 		break;
11249 	case WM_T_ICH8:
11250 	case WM_T_ICH9:
11251 	case WM_T_ICH10:
11252 	case WM_T_PCH:
11253 	case WM_T_PCH2:
11254 	case WM_T_PCH_LPT:
11255 	case WM_T_PCH_SPT:
11256 	case WM_T_PCH_CNP:
11257 		wm_phy_post_reset(sc);
11258 		break;
11259 	default:
11260 		panic("%s: unknown type\n", __func__);
11261 		break;
11262 	}
11263 }
11264 
11265 /*
11266  * Set up sc_phytype and mii_{read|write}reg.
11267  *
11268  *  To identify PHY type, correct read/write function should be selected.
11269  * To select correct read/write function, PCI ID or MAC type are required
11270  * without accessing PHY registers.
11271  *
11272  *  On the first call of this function, PHY ID is not known yet. Check
11273  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
11274  * result might be incorrect.
11275  *
11276  *  In the second call, PHY OUI and model is used to identify PHY type.
11277  * It might not be perfect because of the lack of compared entry, but it
11278  * would be better than the first call.
11279  *
11280  *  If the detected new result and previous assumption is different,
11281  * a diagnostic message will be printed.
11282  */
11283 static void
wm_gmii_setup_phytype(struct wm_softc * sc,uint32_t phy_oui,uint16_t phy_model)11284 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
11285     uint16_t phy_model)
11286 {
11287 	device_t dev = sc->sc_dev;
11288 	struct mii_data *mii = &sc->sc_mii;
11289 	uint16_t new_phytype = WMPHY_UNKNOWN;
11290 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
11291 	mii_readreg_t new_readreg;
11292 	mii_writereg_t new_writereg;
11293 	bool dodiag = true;
11294 
11295 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11296 		device_xname(sc->sc_dev), __func__));
11297 
11298 	/*
11299 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
11300 	 * incorrect. So don't print diag output when it's 2nd call.
11301 	 */
11302 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
11303 		dodiag = false;
11304 
11305 	if (mii->mii_readreg == NULL) {
11306 		/*
11307 		 *  This is the first call of this function. For ICH and PCH
11308 		 * variants, it's difficult to determine the PHY access method
11309 		 * by sc_type, so use the PCI product ID for some devices.
11310 		 */
11311 
11312 		switch (sc->sc_pcidevid) {
11313 		case PCI_PRODUCT_INTEL_PCH_M_LM:
11314 		case PCI_PRODUCT_INTEL_PCH_M_LC:
11315 			/* 82577 */
11316 			new_phytype = WMPHY_82577;
11317 			break;
11318 		case PCI_PRODUCT_INTEL_PCH_D_DM:
11319 		case PCI_PRODUCT_INTEL_PCH_D_DC:
11320 			/* 82578 */
11321 			new_phytype = WMPHY_82578;
11322 			break;
11323 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
11324 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
11325 			/* 82579 */
11326 			new_phytype = WMPHY_82579;
11327 			break;
11328 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
11329 		case PCI_PRODUCT_INTEL_82801I_BM:
11330 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
11331 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
11332 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
11333 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
11334 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
11335 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
11336 			/* ICH8, 9, 10 with 82567 */
11337 			new_phytype = WMPHY_BM;
11338 			break;
11339 		default:
11340 			break;
11341 		}
11342 	} else {
11343 		/* It's not the first call. Use PHY OUI and model */
11344 		switch (phy_oui) {
11345 		case MII_OUI_ATTANSIC: /* atphy(4) */
11346 			switch (phy_model) {
11347 			case MII_MODEL_ATTANSIC_AR8021:
11348 				new_phytype = WMPHY_82578;
11349 				break;
11350 			default:
11351 				break;
11352 			}
11353 			break;
11354 		case MII_OUI_xxMARVELL:
11355 			switch (phy_model) {
11356 			case MII_MODEL_xxMARVELL_I210:
11357 				new_phytype = WMPHY_I210;
11358 				break;
11359 			case MII_MODEL_xxMARVELL_E1011:
11360 			case MII_MODEL_xxMARVELL_E1000_3:
11361 			case MII_MODEL_xxMARVELL_E1000_5:
11362 			case MII_MODEL_xxMARVELL_E1112:
11363 				new_phytype = WMPHY_M88;
11364 				break;
11365 			case MII_MODEL_xxMARVELL_E1149:
11366 				new_phytype = WMPHY_BM;
11367 				break;
11368 			case MII_MODEL_xxMARVELL_E1111:
11369 			case MII_MODEL_xxMARVELL_I347:
11370 			case MII_MODEL_xxMARVELL_E1512:
11371 			case MII_MODEL_xxMARVELL_E1340M:
11372 			case MII_MODEL_xxMARVELL_E1543:
11373 				new_phytype = WMPHY_M88;
11374 				break;
11375 			case MII_MODEL_xxMARVELL_I82563:
11376 				new_phytype = WMPHY_GG82563;
11377 				break;
11378 			default:
11379 				break;
11380 			}
11381 			break;
11382 		case MII_OUI_INTEL:
11383 			switch (phy_model) {
11384 			case MII_MODEL_INTEL_I82577:
11385 				new_phytype = WMPHY_82577;
11386 				break;
11387 			case MII_MODEL_INTEL_I82579:
11388 				new_phytype = WMPHY_82579;
11389 				break;
11390 			case MII_MODEL_INTEL_I217:
11391 				new_phytype = WMPHY_I217;
11392 				break;
11393 			case MII_MODEL_INTEL_I82580:
11394 				new_phytype = WMPHY_82580;
11395 				break;
11396 			case MII_MODEL_INTEL_I350:
11397 				new_phytype = WMPHY_I350;
11398 				break;
11399 			default:
11400 				break;
11401 			}
11402 			break;
11403 		case MII_OUI_yyINTEL:
11404 			switch (phy_model) {
11405 			case MII_MODEL_yyINTEL_I82562G:
11406 			case MII_MODEL_yyINTEL_I82562EM:
11407 			case MII_MODEL_yyINTEL_I82562ET:
11408 				new_phytype = WMPHY_IFE;
11409 				break;
11410 			case MII_MODEL_yyINTEL_IGP01E1000:
11411 				new_phytype = WMPHY_IGP;
11412 				break;
11413 			case MII_MODEL_yyINTEL_I82566:
11414 				new_phytype = WMPHY_IGP_3;
11415 				break;
11416 			default:
11417 				break;
11418 			}
11419 			break;
11420 		default:
11421 			break;
11422 		}
11423 
11424 		if (dodiag) {
11425 			if (new_phytype == WMPHY_UNKNOWN)
11426 				aprint_verbose_dev(dev,
11427 				    "%s: Unknown PHY model. OUI=%06x, "
11428 				    "model=%04x\n", __func__, phy_oui,
11429 				    phy_model);
11430 
11431 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
11432 			    && (sc->sc_phytype != new_phytype)) {
11433 				aprint_error_dev(dev, "Previously assumed PHY "
11434 				    "type(%u) was incorrect. PHY type from PHY"
11435 				    "ID = %u\n", sc->sc_phytype, new_phytype);
11436 			}
11437 		}
11438 	}
11439 
11440 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11441 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11442 		/* SGMII */
11443 		new_readreg = wm_sgmii_readreg;
11444 		new_writereg = wm_sgmii_writereg;
11445 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11446 		/* BM2 (phyaddr == 1) */
11447 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11448 		    && (new_phytype != WMPHY_BM)
11449 		    && (new_phytype != WMPHY_UNKNOWN))
11450 			doubt_phytype = new_phytype;
11451 		new_phytype = WMPHY_BM;
11452 		new_readreg = wm_gmii_bm_readreg;
11453 		new_writereg = wm_gmii_bm_writereg;
11454 	} else if (sc->sc_type >= WM_T_PCH) {
11455 		/* All PCH* use _hv_ */
11456 		new_readreg = wm_gmii_hv_readreg;
11457 		new_writereg = wm_gmii_hv_writereg;
11458 	} else if (sc->sc_type >= WM_T_ICH8) {
11459 		/* non-82567 ICH8, 9 and 10 */
11460 		new_readreg = wm_gmii_i82544_readreg;
11461 		new_writereg = wm_gmii_i82544_writereg;
11462 	} else if (sc->sc_type >= WM_T_80003) {
11463 		/* 80003 */
11464 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11465 		    && (new_phytype != WMPHY_GG82563)
11466 		    && (new_phytype != WMPHY_UNKNOWN))
11467 			doubt_phytype = new_phytype;
11468 		new_phytype = WMPHY_GG82563;
11469 		new_readreg = wm_gmii_i80003_readreg;
11470 		new_writereg = wm_gmii_i80003_writereg;
11471 	} else if (sc->sc_type >= WM_T_I210) {
11472 		/* I210 and I211 */
11473 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11474 		    && (new_phytype != WMPHY_I210)
11475 		    && (new_phytype != WMPHY_UNKNOWN))
11476 			doubt_phytype = new_phytype;
11477 		new_phytype = WMPHY_I210;
11478 		new_readreg = wm_gmii_gs40g_readreg;
11479 		new_writereg = wm_gmii_gs40g_writereg;
11480 	} else if (sc->sc_type >= WM_T_82580) {
11481 		/* 82580, I350 and I354 */
11482 		new_readreg = wm_gmii_82580_readreg;
11483 		new_writereg = wm_gmii_82580_writereg;
11484 	} else if (sc->sc_type >= WM_T_82544) {
11485 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
11486 		new_readreg = wm_gmii_i82544_readreg;
11487 		new_writereg = wm_gmii_i82544_writereg;
11488 	} else {
11489 		new_readreg = wm_gmii_i82543_readreg;
11490 		new_writereg = wm_gmii_i82543_writereg;
11491 	}
11492 
11493 	if (new_phytype == WMPHY_BM) {
11494 		/* All BM use _bm_ */
11495 		new_readreg = wm_gmii_bm_readreg;
11496 		new_writereg = wm_gmii_bm_writereg;
11497 	}
11498 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
11499 		/* All PCH* use _hv_ */
11500 		new_readreg = wm_gmii_hv_readreg;
11501 		new_writereg = wm_gmii_hv_writereg;
11502 	}
11503 
11504 	/* Diag output */
11505 	if (dodiag) {
11506 		if (doubt_phytype != WMPHY_UNKNOWN)
11507 			aprint_error_dev(dev, "Assumed new PHY type was "
11508 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11509 			    new_phytype);
11510 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11511 		    && (sc->sc_phytype != new_phytype))
11512 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11513 			    "was incorrect. New PHY type = %u\n",
11514 			    sc->sc_phytype, new_phytype);
11515 
11516 		if ((mii->mii_readreg != NULL) &&
11517 		    (new_phytype == WMPHY_UNKNOWN))
11518 			aprint_error_dev(dev, "PHY type is still unknown.\n");
11519 
11520 		if ((mii->mii_readreg != NULL) &&
11521 		    (mii->mii_readreg != new_readreg))
11522 			aprint_error_dev(dev, "Previously assumed PHY "
11523 			    "read/write function was incorrect.\n");
11524 	}
11525 
11526 	/* Update now */
11527 	sc->sc_phytype = new_phytype;
11528 	mii->mii_readreg = new_readreg;
11529 	mii->mii_writereg = new_writereg;
11530 	if (new_readreg == wm_gmii_hv_readreg) {
11531 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11532 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11533 	} else if (new_readreg == wm_sgmii_readreg) {
11534 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11535 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11536 	} else if (new_readreg == wm_gmii_i82544_readreg) {
11537 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11538 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11539 	}
11540 }
11541 
11542 /*
11543  * wm_get_phy_id_82575:
11544  *
11545  * Return PHY ID. Return -1 if it failed.
11546  */
11547 static int
wm_get_phy_id_82575(struct wm_softc * sc)11548 wm_get_phy_id_82575(struct wm_softc *sc)
11549 {
11550 	uint32_t reg;
11551 	int phyid = -1;
11552 
11553 	/* XXX */
11554 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11555 		return -1;
11556 
11557 	if (wm_sgmii_uses_mdio(sc)) {
11558 		switch (sc->sc_type) {
11559 		case WM_T_82575:
11560 		case WM_T_82576:
11561 			reg = CSR_READ(sc, WMREG_MDIC);
11562 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11563 			break;
11564 		case WM_T_82580:
11565 		case WM_T_I350:
11566 		case WM_T_I354:
11567 		case WM_T_I210:
11568 		case WM_T_I211:
11569 			reg = CSR_READ(sc, WMREG_MDICNFG);
11570 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11571 			break;
11572 		default:
11573 			return -1;
11574 		}
11575 	}
11576 
11577 	return phyid;
11578 }
11579 
11580 /*
11581  * wm_gmii_mediainit:
11582  *
11583  *	Initialize media for use on 1000BASE-T devices.
11584  */
11585 static void
wm_gmii_mediainit(struct wm_softc * sc,pci_product_id_t prodid)11586 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11587 {
11588 	device_t dev = sc->sc_dev;
11589 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11590 	struct mii_data *mii = &sc->sc_mii;
11591 
11592 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11593 		device_xname(sc->sc_dev), __func__));
11594 
11595 	/* We have GMII. */
11596 	sc->sc_flags |= WM_F_HAS_MII;
11597 
11598 	if (sc->sc_type == WM_T_80003)
11599 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11600 	else
11601 		sc->sc_tipg = TIPG_1000T_DFLT;
11602 
11603 	/*
11604 	 * Let the chip set speed/duplex on its own based on
11605 	 * signals from the PHY.
11606 	 * XXXbouyer - I'm not sure this is right for the 80003,
11607 	 * the em driver only sets CTRL_SLU here - but it seems to work.
11608 	 */
11609 	sc->sc_ctrl |= CTRL_SLU;
11610 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11611 
11612 	/* Initialize our media structures and probe the GMII. */
11613 	mii->mii_ifp = ifp;
11614 
11615 	mii->mii_statchg = wm_gmii_statchg;
11616 
11617 	/* get PHY control from SMBus to PCIe */
11618 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11619 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11620 	    || (sc->sc_type == WM_T_PCH_CNP))
11621 		wm_init_phy_workarounds_pchlan(sc);
11622 
11623 	wm_gmii_reset(sc);
11624 
11625 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
11626 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11627 	    wm_gmii_mediastatus, sc->sc_core_lock);
11628 
11629 	/* Setup internal SGMII PHY for SFP */
11630 	wm_sgmii_sfp_preconfig(sc);
11631 
11632 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11633 	    || (sc->sc_type == WM_T_82580)
11634 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11635 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11636 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
11637 			/* Attach only one port */
11638 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11639 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
11640 		} else {
11641 			int i, id;
11642 			uint32_t ctrl_ext;
11643 
11644 			id = wm_get_phy_id_82575(sc);
11645 			if (id != -1) {
11646 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11647 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11648 			}
11649 			if ((id == -1)
11650 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11651 				/* Power on sgmii phy if it is disabled */
11652 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11653 				CSR_WRITE(sc, WMREG_CTRL_EXT,
11654 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11655 				CSR_WRITE_FLUSH(sc);
11656 				delay(300*1000); /* XXX too long */
11657 
11658 				/*
11659 				 * From 1 to 8.
11660 				 *
11661 				 * I2C access fails with I2C register's ERROR
11662 				 * bit set, so prevent error message while
11663 				 * scanning.
11664 				 */
11665 				sc->phy.no_errprint = true;
11666 				for (i = 1; i < 8; i++)
11667 					mii_attach(sc->sc_dev, &sc->sc_mii,
11668 					    0xffffffff, i, MII_OFFSET_ANY,
11669 					    MIIF_DOPAUSE);
11670 				sc->phy.no_errprint = false;
11671 
11672 				/* Restore previous sfp cage power state */
11673 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11674 			}
11675 		}
11676 	} else
11677 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11678 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11679 
11680 	/*
11681 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11682 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11683 	 */
11684 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
11685 		|| (sc->sc_type == WM_T_PCH_SPT)
11686 		|| (sc->sc_type == WM_T_PCH_CNP))
11687 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11688 		wm_set_mdio_slow_mode_hv(sc);
11689 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11690 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11691 	}
11692 
11693 	/*
11694 	 * (For ICH8 variants)
11695 	 * If PHY detection failed, use BM's r/w function and retry.
11696 	 */
11697 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11698 		/* if failed, retry with *_bm_* */
11699 		aprint_verbose_dev(dev, "Assumed PHY access function "
11700 		    "(type = %d) might be incorrect. Use BM and retry.\n",
11701 		    sc->sc_phytype);
11702 		sc->sc_phytype = WMPHY_BM;
11703 		mii->mii_readreg = wm_gmii_bm_readreg;
11704 		mii->mii_writereg = wm_gmii_bm_writereg;
11705 
11706 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11707 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11708 	}
11709 
11710 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11711 		/* Any PHY wasn't found */
11712 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11713 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11714 		sc->sc_phytype = WMPHY_NONE;
11715 	} else {
11716 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11717 
11718 		/*
11719 		 * PHY found! Check PHY type again by the second call of
11720 		 * wm_gmii_setup_phytype.
11721 		 */
11722 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11723 		    child->mii_mpd_model);
11724 
11725 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11726 	}
11727 }
11728 
11729 /*
11730  * wm_gmii_mediachange:	[ifmedia interface function]
11731  *
11732  *	Set hardware to newly-selected media on a 1000BASE-T device.
11733  */
11734 static int
wm_gmii_mediachange(struct ifnet * ifp)11735 wm_gmii_mediachange(struct ifnet *ifp)
11736 {
11737 	struct wm_softc *sc = ifp->if_softc;
11738 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11739 	uint32_t reg;
11740 	int rc;
11741 
11742 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11743 		device_xname(sc->sc_dev), __func__));
11744 
11745 	KASSERT(mutex_owned(sc->sc_core_lock));
11746 
11747 	if ((sc->sc_if_flags & IFF_UP) == 0)
11748 		return 0;
11749 
11750 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11751 	if ((sc->sc_type == WM_T_82580)
11752 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11753 	    || (sc->sc_type == WM_T_I211)) {
11754 		reg = CSR_READ(sc, WMREG_PHPM);
11755 		reg &= ~PHPM_GO_LINK_D;
11756 		CSR_WRITE(sc, WMREG_PHPM, reg);
11757 	}
11758 
11759 	/* Disable D0 LPLU. */
11760 	wm_lplu_d0_disable(sc);
11761 
11762 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11763 	sc->sc_ctrl |= CTRL_SLU;
11764 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11765 	    || (sc->sc_type > WM_T_82543)) {
11766 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11767 	} else {
11768 		sc->sc_ctrl &= ~CTRL_ASDE;
11769 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11770 		if (ife->ifm_media & IFM_FDX)
11771 			sc->sc_ctrl |= CTRL_FD;
11772 		switch (IFM_SUBTYPE(ife->ifm_media)) {
11773 		case IFM_10_T:
11774 			sc->sc_ctrl |= CTRL_SPEED_10;
11775 			break;
11776 		case IFM_100_TX:
11777 			sc->sc_ctrl |= CTRL_SPEED_100;
11778 			break;
11779 		case IFM_1000_T:
11780 			sc->sc_ctrl |= CTRL_SPEED_1000;
11781 			break;
11782 		case IFM_NONE:
11783 			/* There is no specific setting for IFM_NONE */
11784 			break;
11785 		default:
11786 			panic("wm_gmii_mediachange: bad media 0x%x",
11787 			    ife->ifm_media);
11788 		}
11789 	}
11790 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11791 	CSR_WRITE_FLUSH(sc);
11792 
11793 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11794 		wm_serdes_mediachange(ifp);
11795 
11796 	if (sc->sc_type <= WM_T_82543)
11797 		wm_gmii_reset(sc);
11798 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11799 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11800 		/* allow time for SFP cage time to power up phy */
11801 		delay(300 * 1000);
11802 		wm_gmii_reset(sc);
11803 	}
11804 
11805 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11806 		return 0;
11807 	return rc;
11808 }
11809 
11810 /*
11811  * wm_gmii_mediastatus:	[ifmedia interface function]
11812  *
11813  *	Get the current interface media status on a 1000BASE-T device.
11814  */
11815 static void
wm_gmii_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)11816 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11817 {
11818 	struct wm_softc *sc = ifp->if_softc;
11819 
11820 	KASSERT(mutex_owned(sc->sc_core_lock));
11821 
11822 	ether_mediastatus(ifp, ifmr);
11823 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11824 	    | sc->sc_flowflags;
11825 }
11826 
11827 #define	MDI_IO		CTRL_SWDPIN(2)
11828 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
11829 #define	MDI_CLK		CTRL_SWDPIN(3)
11830 
11831 static void
wm_i82543_mii_sendbits(struct wm_softc * sc,uint32_t data,int nbits)11832 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11833 {
11834 	uint32_t i, v;
11835 
11836 	v = CSR_READ(sc, WMREG_CTRL);
11837 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11838 	v |= MDI_DIR | CTRL_SWDPIO(3);
11839 
11840 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11841 		if (data & i)
11842 			v |= MDI_IO;
11843 		else
11844 			v &= ~MDI_IO;
11845 		CSR_WRITE(sc, WMREG_CTRL, v);
11846 		CSR_WRITE_FLUSH(sc);
11847 		delay(10);
11848 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11849 		CSR_WRITE_FLUSH(sc);
11850 		delay(10);
11851 		CSR_WRITE(sc, WMREG_CTRL, v);
11852 		CSR_WRITE_FLUSH(sc);
11853 		delay(10);
11854 	}
11855 }
11856 
11857 static uint16_t
wm_i82543_mii_recvbits(struct wm_softc * sc)11858 wm_i82543_mii_recvbits(struct wm_softc *sc)
11859 {
11860 	uint32_t v, i;
11861 	uint16_t data = 0;
11862 
11863 	v = CSR_READ(sc, WMREG_CTRL);
11864 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11865 	v |= CTRL_SWDPIO(3);
11866 
11867 	CSR_WRITE(sc, WMREG_CTRL, v);
11868 	CSR_WRITE_FLUSH(sc);
11869 	delay(10);
11870 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11871 	CSR_WRITE_FLUSH(sc);
11872 	delay(10);
11873 	CSR_WRITE(sc, WMREG_CTRL, v);
11874 	CSR_WRITE_FLUSH(sc);
11875 	delay(10);
11876 
11877 	for (i = 0; i < 16; i++) {
11878 		data <<= 1;
11879 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11880 		CSR_WRITE_FLUSH(sc);
11881 		delay(10);
11882 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11883 			data |= 1;
11884 		CSR_WRITE(sc, WMREG_CTRL, v);
11885 		CSR_WRITE_FLUSH(sc);
11886 		delay(10);
11887 	}
11888 
11889 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11890 	CSR_WRITE_FLUSH(sc);
11891 	delay(10);
11892 	CSR_WRITE(sc, WMREG_CTRL, v);
11893 	CSR_WRITE_FLUSH(sc);
11894 	delay(10);
11895 
11896 	return data;
11897 }
11898 
11899 #undef MDI_IO
11900 #undef MDI_DIR
11901 #undef MDI_CLK
11902 
11903 /*
11904  * wm_gmii_i82543_readreg:	[mii interface function]
11905  *
11906  *	Read a PHY register on the GMII (i82543 version).
11907  */
11908 static int
wm_gmii_i82543_readreg(device_t dev,int phy,int reg,uint16_t * val)11909 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11910 {
11911 	struct wm_softc *sc = device_private(dev);
11912 
11913 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11914 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11915 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11916 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
11917 
11918 	DPRINTF(sc, WM_DEBUG_GMII,
11919 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11920 		device_xname(dev), phy, reg, *val));
11921 
11922 	return 0;
11923 }
11924 
11925 /*
11926  * wm_gmii_i82543_writereg:	[mii interface function]
11927  *
11928  *	Write a PHY register on the GMII (i82543 version).
11929  */
11930 static int
wm_gmii_i82543_writereg(device_t dev,int phy,int reg,uint16_t val)11931 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11932 {
11933 	struct wm_softc *sc = device_private(dev);
11934 
11935 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11936 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11937 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11938 	    (MII_COMMAND_START << 30), 32);
11939 
11940 	return 0;
11941 }
11942 
11943 /*
11944  * wm_gmii_mdic_readreg:	[mii interface function]
11945  *
11946  *	Read a PHY register on the GMII.
11947  */
11948 static int
wm_gmii_mdic_readreg(device_t dev,int phy,int reg,uint16_t * val)11949 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11950 {
11951 	struct wm_softc *sc = device_private(dev);
11952 	uint32_t mdic = 0;
11953 	int i;
11954 
11955 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11956 	    && (reg > MII_ADDRMASK)) {
11957 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11958 		    __func__, sc->sc_phytype, reg);
11959 		reg &= MII_ADDRMASK;
11960 	}
11961 
11962 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11963 	    MDIC_REGADD(reg));
11964 
11965 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11966 		delay(50);
11967 		mdic = CSR_READ(sc, WMREG_MDIC);
11968 		if (mdic & MDIC_READY)
11969 			break;
11970 	}
11971 
11972 	if ((mdic & MDIC_READY) == 0) {
11973 		DPRINTF(sc, WM_DEBUG_GMII,
11974 		    ("%s: MDIC read timed out: phy %d reg %d\n",
11975 			device_xname(dev), phy, reg));
11976 		return ETIMEDOUT;
11977 	} else if (mdic & MDIC_E) {
11978 		/* This is normal if no PHY is present. */
11979 		DPRINTF(sc, WM_DEBUG_GMII,
11980 		    ("%s: MDIC read error: phy %d reg %d\n",
11981 			device_xname(sc->sc_dev), phy, reg));
11982 		return -1;
11983 	} else
11984 		*val = MDIC_DATA(mdic);
11985 
11986 	/*
11987 	 * Allow some time after each MDIC transaction to avoid
11988 	 * reading duplicate data in the next MDIC transaction.
11989 	 */
11990 	if (sc->sc_type == WM_T_PCH2)
11991 		delay(100);
11992 
11993 	return 0;
11994 }
11995 
11996 /*
11997  * wm_gmii_mdic_writereg:	[mii interface function]
11998  *
11999  *	Write a PHY register on the GMII.
12000  */
12001 static int
wm_gmii_mdic_writereg(device_t dev,int phy,int reg,uint16_t val)12002 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
12003 {
12004 	struct wm_softc *sc = device_private(dev);
12005 	uint32_t mdic = 0;
12006 	int i;
12007 
12008 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
12009 	    && (reg > MII_ADDRMASK)) {
12010 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12011 		    __func__, sc->sc_phytype, reg);
12012 		reg &= MII_ADDRMASK;
12013 	}
12014 
12015 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
12016 	    MDIC_REGADD(reg) | MDIC_DATA(val));
12017 
12018 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
12019 		delay(50);
12020 		mdic = CSR_READ(sc, WMREG_MDIC);
12021 		if (mdic & MDIC_READY)
12022 			break;
12023 	}
12024 
12025 	if ((mdic & MDIC_READY) == 0) {
12026 		DPRINTF(sc, WM_DEBUG_GMII,
12027 		    ("%s: MDIC write timed out: phy %d reg %d\n",
12028 			device_xname(dev), phy, reg));
12029 		return ETIMEDOUT;
12030 	} else if (mdic & MDIC_E) {
12031 		DPRINTF(sc, WM_DEBUG_GMII,
12032 		    ("%s: MDIC write error: phy %d reg %d\n",
12033 			device_xname(dev), phy, reg));
12034 		return -1;
12035 	}
12036 
12037 	/*
12038 	 * Allow some time after each MDIC transaction to avoid
12039 	 * reading duplicate data in the next MDIC transaction.
12040 	 */
12041 	if (sc->sc_type == WM_T_PCH2)
12042 		delay(100);
12043 
12044 	return 0;
12045 }
12046 
12047 /*
12048  * wm_gmii_i82544_readreg:	[mii interface function]
12049  *
12050  *	Read a PHY register on the GMII.
12051  */
12052 static int
wm_gmii_i82544_readreg(device_t dev,int phy,int reg,uint16_t * val)12053 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
12054 {
12055 	struct wm_softc *sc = device_private(dev);
12056 	int rv;
12057 
12058 	rv = sc->phy.acquire(sc);
12059 	if (rv != 0) {
12060 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12061 		return rv;
12062 	}
12063 
12064 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
12065 
12066 	sc->phy.release(sc);
12067 
12068 	return rv;
12069 }
12070 
12071 static int
wm_gmii_i82544_readreg_locked(device_t dev,int phy,int reg,uint16_t * val)12072 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12073 {
12074 	struct wm_softc *sc = device_private(dev);
12075 	int rv;
12076 
12077 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12078 		switch (sc->sc_phytype) {
12079 		case WMPHY_IGP:
12080 		case WMPHY_IGP_2:
12081 		case WMPHY_IGP_3:
12082 			rv = wm_gmii_mdic_writereg(dev, phy,
12083 			    IGPHY_PAGE_SELECT, reg);
12084 			if (rv != 0)
12085 				return rv;
12086 			break;
12087 		default:
12088 #ifdef WM_DEBUG
12089 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
12090 			    __func__, sc->sc_phytype, reg);
12091 #endif
12092 			break;
12093 		}
12094 	}
12095 
12096 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12097 }
12098 
12099 /*
12100  * wm_gmii_i82544_writereg:	[mii interface function]
12101  *
12102  *	Write a PHY register on the GMII.
12103  */
12104 static int
wm_gmii_i82544_writereg(device_t dev,int phy,int reg,uint16_t val)12105 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
12106 {
12107 	struct wm_softc *sc = device_private(dev);
12108 	int rv;
12109 
12110 	rv = sc->phy.acquire(sc);
12111 	if (rv != 0) {
12112 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12113 		return rv;
12114 	}
12115 
12116 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
12117 	sc->phy.release(sc);
12118 
12119 	return rv;
12120 }
12121 
12122 static int
wm_gmii_i82544_writereg_locked(device_t dev,int phy,int reg,uint16_t val)12123 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12124 {
12125 	struct wm_softc *sc = device_private(dev);
12126 	int rv;
12127 
12128 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12129 		switch (sc->sc_phytype) {
12130 		case WMPHY_IGP:
12131 		case WMPHY_IGP_2:
12132 		case WMPHY_IGP_3:
12133 			rv = wm_gmii_mdic_writereg(dev, phy,
12134 			    IGPHY_PAGE_SELECT, reg);
12135 			if (rv != 0)
12136 				return rv;
12137 			break;
12138 		default:
12139 #ifdef WM_DEBUG
12140 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
12141 			    __func__, sc->sc_phytype, reg);
12142 #endif
12143 			break;
12144 		}
12145 	}
12146 
12147 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12148 }
12149 
12150 /*
12151  * wm_gmii_i80003_readreg:	[mii interface function]
12152  *
12153  *	Read a PHY register on the kumeran
12154  * This could be handled by the PHY layer if we didn't have to lock the
12155  * resource ...
12156  */
12157 static int
wm_gmii_i80003_readreg(device_t dev,int phy,int reg,uint16_t * val)12158 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
12159 {
12160 	struct wm_softc *sc = device_private(dev);
12161 	int page_select;
12162 	uint16_t temp, temp2;
12163 	int rv;
12164 
12165 	if (phy != 1) /* Only one PHY on kumeran bus */
12166 		return -1;
12167 
12168 	rv = sc->phy.acquire(sc);
12169 	if (rv != 0) {
12170 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12171 		return rv;
12172 	}
12173 
12174 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12175 		page_select = GG82563_PHY_PAGE_SELECT;
12176 	else {
12177 		/*
12178 		 * Use Alternative Page Select register to access registers
12179 		 * 30 and 31.
12180 		 */
12181 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
12182 	}
12183 	temp = reg >> GG82563_PAGE_SHIFT;
12184 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12185 		goto out;
12186 
12187 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12188 		/*
12189 		 * Wait more 200us for a bug of the ready bit in the MDIC
12190 		 * register.
12191 		 */
12192 		delay(200);
12193 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12194 		if ((rv != 0) || (temp2 != temp)) {
12195 			device_printf(dev, "%s failed\n", __func__);
12196 			rv = -1;
12197 			goto out;
12198 		}
12199 		delay(200);
12200 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12201 		delay(200);
12202 	} else
12203 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12204 
12205 out:
12206 	sc->phy.release(sc);
12207 	return rv;
12208 }
12209 
12210 /*
12211  * wm_gmii_i80003_writereg:	[mii interface function]
12212  *
12213  *	Write a PHY register on the kumeran.
12214  * This could be handled by the PHY layer if we didn't have to lock the
12215  * resource ...
12216  */
12217 static int
wm_gmii_i80003_writereg(device_t dev,int phy,int reg,uint16_t val)12218 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
12219 {
12220 	struct wm_softc *sc = device_private(dev);
12221 	int page_select, rv;
12222 	uint16_t temp, temp2;
12223 
12224 	if (phy != 1) /* Only one PHY on kumeran bus */
12225 		return -1;
12226 
12227 	rv = sc->phy.acquire(sc);
12228 	if (rv != 0) {
12229 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12230 		return rv;
12231 	}
12232 
12233 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12234 		page_select = GG82563_PHY_PAGE_SELECT;
12235 	else {
12236 		/*
12237 		 * Use Alternative Page Select register to access registers
12238 		 * 30 and 31.
12239 		 */
12240 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
12241 	}
12242 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
12243 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12244 		goto out;
12245 
12246 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12247 		/*
12248 		 * Wait more 200us for a bug of the ready bit in the MDIC
12249 		 * register.
12250 		 */
12251 		delay(200);
12252 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12253 		if ((rv != 0) || (temp2 != temp)) {
12254 			device_printf(dev, "%s failed\n", __func__);
12255 			rv = -1;
12256 			goto out;
12257 		}
12258 		delay(200);
12259 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12260 		delay(200);
12261 	} else
12262 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12263 
12264 out:
12265 	sc->phy.release(sc);
12266 	return rv;
12267 }
12268 
12269 /*
12270  * wm_gmii_bm_readreg:	[mii interface function]
12271  *
12272  *	Read a PHY register on the kumeran
12273  * This could be handled by the PHY layer if we didn't have to lock the
12274  * resource ...
12275  */
12276 static int
wm_gmii_bm_readreg(device_t dev,int phy,int reg,uint16_t * val)12277 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
12278 {
12279 	struct wm_softc *sc = device_private(dev);
12280 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
12281 	int rv;
12282 
12283 	rv = sc->phy.acquire(sc);
12284 	if (rv != 0) {
12285 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12286 		return rv;
12287 	}
12288 
12289 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12290 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
12291 		    || (reg == 31)) ? 1 : phy;
12292 	/* Page 800 works differently than the rest so it has its own func */
12293 	if (page == BM_WUC_PAGE) {
12294 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12295 		goto release;
12296 	}
12297 
12298 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12299 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
12300 		    && (sc->sc_type != WM_T_82583))
12301 			rv = wm_gmii_mdic_writereg(dev, phy,
12302 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12303 		else
12304 			rv = wm_gmii_mdic_writereg(dev, phy,
12305 			    BME1000_PHY_PAGE_SELECT, page);
12306 		if (rv != 0)
12307 			goto release;
12308 	}
12309 
12310 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12311 
12312 release:
12313 	sc->phy.release(sc);
12314 	return rv;
12315 }
12316 
12317 /*
12318  * wm_gmii_bm_writereg:	[mii interface function]
12319  *
12320  *	Write a PHY register on the kumeran.
12321  * This could be handled by the PHY layer if we didn't have to lock the
12322  * resource ...
12323  */
12324 static int
wm_gmii_bm_writereg(device_t dev,int phy,int reg,uint16_t val)12325 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
12326 {
12327 	struct wm_softc *sc = device_private(dev);
12328 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
12329 	int rv;
12330 
12331 	rv = sc->phy.acquire(sc);
12332 	if (rv != 0) {
12333 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12334 		return rv;
12335 	}
12336 
12337 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12338 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
12339 		    || (reg == 31)) ? 1 : phy;
12340 	/* Page 800 works differently than the rest so it has its own func */
12341 	if (page == BM_WUC_PAGE) {
12342 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
12343 		goto release;
12344 	}
12345 
12346 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12347 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
12348 		    && (sc->sc_type != WM_T_82583))
12349 			rv = wm_gmii_mdic_writereg(dev, phy,
12350 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12351 		else
12352 			rv = wm_gmii_mdic_writereg(dev, phy,
12353 			    BME1000_PHY_PAGE_SELECT, page);
12354 		if (rv != 0)
12355 			goto release;
12356 	}
12357 
12358 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12359 
12360 release:
12361 	sc->phy.release(sc);
12362 	return rv;
12363 }
12364 
12365 /*
12366  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
12367  *  @dev: pointer to the HW structure
12368  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
12369  *
12370  *  Assumes semaphore already acquired and phy_reg points to a valid memory
12371  *  address to store contents of the BM_WUC_ENABLE_REG register.
12372  */
12373 static int
wm_enable_phy_wakeup_reg_access_bm(device_t dev,uint16_t * phy_regp)12374 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12375 {
12376 #ifdef WM_DEBUG
12377 	struct wm_softc *sc = device_private(dev);
12378 #endif
12379 	uint16_t temp;
12380 	int rv;
12381 
12382 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12383 		device_xname(dev), __func__));
12384 
12385 	if (!phy_regp)
12386 		return -1;
12387 
12388 	/* All page select, port ctrl and wakeup registers use phy address 1 */
12389 
12390 	/* Select Port Control Registers page */
12391 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12392 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12393 	if (rv != 0)
12394 		return rv;
12395 
12396 	/* Read WUCE and save it */
12397 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12398 	if (rv != 0)
12399 		return rv;
12400 
12401 	/* Enable both PHY wakeup mode and Wakeup register page writes.
12402 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
12403 	 */
12404 	temp = *phy_regp;
12405 	temp |= BM_WUC_ENABLE_BIT;
12406 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12407 
12408 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12409 		return rv;
12410 
12411 	/* Select Host Wakeup Registers page - caller now able to write
12412 	 * registers on the Wakeup registers page
12413 	 */
12414 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12415 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12416 }
12417 
12418 /*
12419  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12420  *  @dev: pointer to the HW structure
12421  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12422  *
12423  *  Restore BM_WUC_ENABLE_REG to its original value.
12424  *
12425  *  Assumes semaphore already acquired and *phy_reg is the contents of the
12426  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12427  *  caller.
12428  */
12429 static int
wm_disable_phy_wakeup_reg_access_bm(device_t dev,uint16_t * phy_regp)12430 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12431 {
12432 #ifdef WM_DEBUG
12433 	struct wm_softc *sc = device_private(dev);
12434 #endif
12435 
12436 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12437 		device_xname(dev), __func__));
12438 
12439 	if (!phy_regp)
12440 		return -1;
12441 
12442 	/* Select Port Control Registers page */
12443 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12444 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12445 
12446 	/* Restore 769.17 to its original value */
12447 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12448 
12449 	return 0;
12450 }
12451 
12452 /*
12453  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12454  *  @sc: pointer to the HW structure
12455  *  @offset: register offset to be read or written
12456  *  @val: pointer to the data to read or write
12457  *  @rd: determines if operation is read or write
12458  *  @page_set: BM_WUC_PAGE already set and access enabled
12459  *
12460  *  Read the PHY register at offset and store the retrieved information in
12461  *  data, or write data to PHY register at offset.  Note the procedure to
12462  *  access the PHY wakeup registers is different than reading the other PHY
12463  *  registers. It works as such:
12464  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12465  *  2) Set page to 800 for host (801 if we were manageability)
12466  *  3) Write the address using the address opcode (0x11)
12467  *  4) Read or write the data using the data opcode (0x12)
12468  *  5) Restore 769.17.2 to its original value
12469  *
12470  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12471  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12472  *
12473  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
12474  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12475  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12476  */
12477 static int
wm_access_phy_wakeup_reg_bm(device_t dev,int offset,int16_t * val,int rd,bool page_set)12478 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12479     bool page_set)
12480 {
12481 	struct wm_softc *sc = device_private(dev);
12482 	uint16_t regnum = BM_PHY_REG_NUM(offset);
12483 	uint16_t page = BM_PHY_REG_PAGE(offset);
12484 	uint16_t wuce;
12485 	int rv = 0;
12486 
12487 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12488 		device_xname(dev), __func__));
12489 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
12490 	if ((sc->sc_type == WM_T_PCH)
12491 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12492 		device_printf(dev,
12493 		    "Attempting to access page %d while gig enabled.\n", page);
12494 	}
12495 
12496 	if (!page_set) {
12497 		/* Enable access to PHY wakeup registers */
12498 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12499 		if (rv != 0) {
12500 			device_printf(dev,
12501 			    "%s: Could not enable PHY wakeup reg access\n",
12502 			    __func__);
12503 			return rv;
12504 		}
12505 	}
12506 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12507 		device_xname(sc->sc_dev), __func__, page, regnum));
12508 
12509 	/*
12510 	 * 2) Access PHY wakeup register.
12511 	 * See wm_access_phy_wakeup_reg_bm.
12512 	 */
12513 
12514 	/* Write the Wakeup register page offset value using opcode 0x11 */
12515 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12516 	if (rv != 0)
12517 		return rv;
12518 
12519 	if (rd) {
12520 		/* Read the Wakeup register page value using opcode 0x12 */
12521 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12522 	} else {
12523 		/* Write the Wakeup register page value using opcode 0x12 */
12524 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12525 	}
12526 	if (rv != 0)
12527 		return rv;
12528 
12529 	if (!page_set)
12530 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12531 
12532 	return rv;
12533 }
12534 
12535 /*
12536  * wm_gmii_hv_readreg:	[mii interface function]
12537  *
12538  *	Read a PHY register on the kumeran
12539  * This could be handled by the PHY layer if we didn't have to lock the
12540  * resource ...
12541  */
12542 static int
wm_gmii_hv_readreg(device_t dev,int phy,int reg,uint16_t * val)12543 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12544 {
12545 	struct wm_softc *sc = device_private(dev);
12546 	int rv;
12547 
12548 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12549 		device_xname(dev), __func__));
12550 
12551 	rv = sc->phy.acquire(sc);
12552 	if (rv != 0) {
12553 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12554 		return rv;
12555 	}
12556 
12557 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12558 	sc->phy.release(sc);
12559 	return rv;
12560 }
12561 
12562 static int
wm_gmii_hv_readreg_locked(device_t dev,int phy,int reg,uint16_t * val)12563 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12564 {
12565 	uint16_t page = BM_PHY_REG_PAGE(reg);
12566 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12567 	int rv;
12568 
12569 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12570 
12571 	/* Page 800 works differently than the rest so it has its own func */
12572 	if (page == BM_WUC_PAGE)
12573 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12574 
12575 	/*
12576 	 * Lower than page 768 works differently than the rest so it has its
12577 	 * own func
12578 	 */
12579 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12580 		device_printf(dev, "gmii_hv_readreg!!!\n");
12581 		return -1;
12582 	}
12583 
12584 	/*
12585 	 * XXX I21[789] documents say that the SMBus Address register is at
12586 	 * PHY address 01, Page 0 (not 768), Register 26.
12587 	 */
12588 	if (page == HV_INTC_FC_PAGE_START)
12589 		page = 0;
12590 
12591 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12592 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12593 		    page << BME1000_PAGE_SHIFT);
12594 		if (rv != 0)
12595 			return rv;
12596 	}
12597 
12598 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12599 }
12600 
12601 /*
12602  * wm_gmii_hv_writereg:	[mii interface function]
12603  *
12604  *	Write a PHY register on the kumeran.
12605  * This could be handled by the PHY layer if we didn't have to lock the
12606  * resource ...
12607  */
12608 static int
wm_gmii_hv_writereg(device_t dev,int phy,int reg,uint16_t val)12609 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12610 {
12611 	struct wm_softc *sc = device_private(dev);
12612 	int rv;
12613 
12614 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12615 		device_xname(dev), __func__));
12616 
12617 	rv = sc->phy.acquire(sc);
12618 	if (rv != 0) {
12619 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12620 		return rv;
12621 	}
12622 
12623 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12624 	sc->phy.release(sc);
12625 
12626 	return rv;
12627 }
12628 
12629 static int
wm_gmii_hv_writereg_locked(device_t dev,int phy,int reg,uint16_t val)12630 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12631 {
12632 	struct wm_softc *sc = device_private(dev);
12633 	uint16_t page = BM_PHY_REG_PAGE(reg);
12634 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12635 	int rv;
12636 
12637 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12638 
12639 	/* Page 800 works differently than the rest so it has its own func */
12640 	if (page == BM_WUC_PAGE)
12641 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12642 		    false);
12643 
12644 	/*
12645 	 * Lower than page 768 works differently than the rest so it has its
12646 	 * own func
12647 	 */
12648 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12649 		device_printf(dev, "gmii_hv_writereg!!!\n");
12650 		return -1;
12651 	}
12652 
12653 	{
12654 		/*
12655 		 * XXX I21[789] documents say that the SMBus Address register
12656 		 * is at PHY address 01, Page 0 (not 768), Register 26.
12657 		 */
12658 		if (page == HV_INTC_FC_PAGE_START)
12659 			page = 0;
12660 
12661 		/*
12662 		 * XXX Workaround MDIO accesses being disabled after entering
12663 		 * IEEE Power Down (whenever bit 11 of the PHY control
12664 		 * register is set)
12665 		 */
12666 		if (sc->sc_phytype == WMPHY_82578) {
12667 			struct mii_softc *child;
12668 
12669 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
12670 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
12671 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12672 			    && ((val & (1 << 11)) != 0)) {
12673 				device_printf(dev, "XXX need workaround\n");
12674 			}
12675 		}
12676 
12677 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12678 			rv = wm_gmii_mdic_writereg(dev, 1,
12679 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12680 			if (rv != 0)
12681 				return rv;
12682 		}
12683 	}
12684 
12685 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12686 }
12687 
12688 /*
12689  * wm_gmii_82580_readreg:	[mii interface function]
12690  *
12691  *	Read a PHY register on the 82580 and I350.
12692  * This could be handled by the PHY layer if we didn't have to lock the
12693  * resource ...
12694  */
12695 static int
wm_gmii_82580_readreg(device_t dev,int phy,int reg,uint16_t * val)12696 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12697 {
12698 	struct wm_softc *sc = device_private(dev);
12699 	int rv;
12700 
12701 	rv = sc->phy.acquire(sc);
12702 	if (rv != 0) {
12703 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12704 		return rv;
12705 	}
12706 
12707 #ifdef DIAGNOSTIC
12708 	if (reg > MII_ADDRMASK) {
12709 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12710 		    __func__, sc->sc_phytype, reg);
12711 		reg &= MII_ADDRMASK;
12712 	}
12713 #endif
12714 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12715 
12716 	sc->phy.release(sc);
12717 	return rv;
12718 }
12719 
12720 /*
12721  * wm_gmii_82580_writereg:	[mii interface function]
12722  *
12723  *	Write a PHY register on the 82580 and I350.
12724  * This could be handled by the PHY layer if we didn't have to lock the
12725  * resource ...
12726  */
12727 static int
wm_gmii_82580_writereg(device_t dev,int phy,int reg,uint16_t val)12728 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12729 {
12730 	struct wm_softc *sc = device_private(dev);
12731 	int rv;
12732 
12733 	rv = sc->phy.acquire(sc);
12734 	if (rv != 0) {
12735 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12736 		return rv;
12737 	}
12738 
12739 #ifdef DIAGNOSTIC
12740 	if (reg > MII_ADDRMASK) {
12741 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12742 		    __func__, sc->sc_phytype, reg);
12743 		reg &= MII_ADDRMASK;
12744 	}
12745 #endif
12746 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12747 
12748 	sc->phy.release(sc);
12749 	return rv;
12750 }
12751 
12752 /*
12753  * wm_gmii_gs40g_readreg:	[mii interface function]
12754  *
12755  *	Read a PHY register on the I2100 and I211.
12756  * This could be handled by the PHY layer if we didn't have to lock the
12757  * resource ...
12758  */
12759 static int
wm_gmii_gs40g_readreg(device_t dev,int phy,int reg,uint16_t * val)12760 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12761 {
12762 	struct wm_softc *sc = device_private(dev);
12763 	int page, offset;
12764 	int rv;
12765 
12766 	/* Acquire semaphore */
12767 	rv = sc->phy.acquire(sc);
12768 	if (rv != 0) {
12769 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12770 		return rv;
12771 	}
12772 
12773 	/* Page select */
12774 	page = reg >> GS40G_PAGE_SHIFT;
12775 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12776 	if (rv != 0)
12777 		goto release;
12778 
12779 	/* Read reg */
12780 	offset = reg & GS40G_OFFSET_MASK;
12781 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12782 
12783 release:
12784 	sc->phy.release(sc);
12785 	return rv;
12786 }
12787 
12788 /*
12789  * wm_gmii_gs40g_writereg:	[mii interface function]
12790  *
12791  *	Write a PHY register on the I210 and I211.
12792  * This could be handled by the PHY layer if we didn't have to lock the
12793  * resource ...
12794  */
12795 static int
wm_gmii_gs40g_writereg(device_t dev,int phy,int reg,uint16_t val)12796 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12797 {
12798 	struct wm_softc *sc = device_private(dev);
12799 	uint16_t page;
12800 	int offset, rv;
12801 
12802 	/* Acquire semaphore */
12803 	rv = sc->phy.acquire(sc);
12804 	if (rv != 0) {
12805 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12806 		return rv;
12807 	}
12808 
12809 	/* Page select */
12810 	page = reg >> GS40G_PAGE_SHIFT;
12811 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12812 	if (rv != 0)
12813 		goto release;
12814 
12815 	/* Write reg */
12816 	offset = reg & GS40G_OFFSET_MASK;
12817 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12818 
12819 release:
12820 	/* Release semaphore */
12821 	sc->phy.release(sc);
12822 	return rv;
12823 }
12824 
12825 /*
12826  * wm_gmii_statchg:	[mii interface function]
12827  *
12828  *	Callback from MII layer when media changes.
12829  */
12830 static void
wm_gmii_statchg(struct ifnet * ifp)12831 wm_gmii_statchg(struct ifnet *ifp)
12832 {
12833 	struct wm_softc *sc = ifp->if_softc;
12834 	struct mii_data *mii = &sc->sc_mii;
12835 
12836 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12837 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12838 	sc->sc_fcrtl &= ~FCRTL_XONE;
12839 
12840 	/* Get flow control negotiation result. */
12841 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12842 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12843 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12844 		mii->mii_media_active &= ~IFM_ETH_FMASK;
12845 	}
12846 
12847 	if (sc->sc_flowflags & IFM_FLOW) {
12848 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12849 			sc->sc_ctrl |= CTRL_TFCE;
12850 			sc->sc_fcrtl |= FCRTL_XONE;
12851 		}
12852 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12853 			sc->sc_ctrl |= CTRL_RFCE;
12854 	}
12855 
12856 	if (mii->mii_media_active & IFM_FDX) {
12857 		DPRINTF(sc, WM_DEBUG_LINK,
12858 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12859 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12860 	} else {
12861 		DPRINTF(sc, WM_DEBUG_LINK,
12862 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12863 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12864 	}
12865 
12866 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12867 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12868 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12869 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12870 	if (sc->sc_type == WM_T_80003) {
12871 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
12872 		case IFM_1000_T:
12873 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12874 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12875 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
12876 			break;
12877 		default:
12878 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12879 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12880 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
12881 			break;
12882 		}
12883 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12884 	}
12885 }
12886 
12887 /* kumeran related (80003, ICH* and PCH*) */
12888 
12889 /*
12890  * wm_kmrn_readreg:
12891  *
12892  *	Read a kumeran register
12893  */
12894 static int
wm_kmrn_readreg(struct wm_softc * sc,int reg,uint16_t * val)12895 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12896 {
12897 	int rv;
12898 
12899 	if (sc->sc_type == WM_T_80003)
12900 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12901 	else
12902 		rv = sc->phy.acquire(sc);
12903 	if (rv != 0) {
12904 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12905 		    __func__);
12906 		return rv;
12907 	}
12908 
12909 	rv = wm_kmrn_readreg_locked(sc, reg, val);
12910 
12911 	if (sc->sc_type == WM_T_80003)
12912 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12913 	else
12914 		sc->phy.release(sc);
12915 
12916 	return rv;
12917 }
12918 
12919 static int
wm_kmrn_readreg_locked(struct wm_softc * sc,int reg,uint16_t * val)12920 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12921 {
12922 
12923 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12924 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12925 	    KUMCTRLSTA_REN);
12926 	CSR_WRITE_FLUSH(sc);
12927 	delay(2);
12928 
12929 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12930 
12931 	return 0;
12932 }
12933 
12934 /*
12935  * wm_kmrn_writereg:
12936  *
12937  *	Write a kumeran register
12938  */
12939 static int
wm_kmrn_writereg(struct wm_softc * sc,int reg,uint16_t val)12940 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12941 {
12942 	int rv;
12943 
12944 	if (sc->sc_type == WM_T_80003)
12945 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12946 	else
12947 		rv = sc->phy.acquire(sc);
12948 	if (rv != 0) {
12949 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12950 		    __func__);
12951 		return rv;
12952 	}
12953 
12954 	rv = wm_kmrn_writereg_locked(sc, reg, val);
12955 
12956 	if (sc->sc_type == WM_T_80003)
12957 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12958 	else
12959 		sc->phy.release(sc);
12960 
12961 	return rv;
12962 }
12963 
12964 static int
wm_kmrn_writereg_locked(struct wm_softc * sc,int reg,uint16_t val)12965 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12966 {
12967 
12968 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12969 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12970 
12971 	return 0;
12972 }
12973 
12974 /*
12975  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12976  * This access method is different from IEEE MMD.
12977  */
12978 static int
wm_access_emi_reg_locked(device_t dev,int reg,uint16_t * val,bool rd)12979 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12980 {
12981 	struct wm_softc *sc = device_private(dev);
12982 	int rv;
12983 
12984 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12985 	if (rv != 0)
12986 		return rv;
12987 
12988 	if (rd)
12989 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12990 	else
12991 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12992 	return rv;
12993 }
12994 
12995 static int
wm_read_emi_reg_locked(device_t dev,int reg,uint16_t * val)12996 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12997 {
12998 
12999 	return wm_access_emi_reg_locked(dev, reg, val, true);
13000 }
13001 
13002 static int
wm_write_emi_reg_locked(device_t dev,int reg,uint16_t val)13003 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
13004 {
13005 
13006 	return wm_access_emi_reg_locked(dev, reg, &val, false);
13007 }
13008 
13009 /* SGMII related */
13010 
13011 /*
13012  * wm_sgmii_uses_mdio
13013  *
13014  * Check whether the transaction is to the internal PHY or the external
13015  * MDIO interface. Return true if it's MDIO.
13016  */
13017 static bool
wm_sgmii_uses_mdio(struct wm_softc * sc)13018 wm_sgmii_uses_mdio(struct wm_softc *sc)
13019 {
13020 	uint32_t reg;
13021 	bool ismdio = false;
13022 
13023 	switch (sc->sc_type) {
13024 	case WM_T_82575:
13025 	case WM_T_82576:
13026 		reg = CSR_READ(sc, WMREG_MDIC);
13027 		ismdio = ((reg & MDIC_DEST) != 0);
13028 		break;
13029 	case WM_T_82580:
13030 	case WM_T_I350:
13031 	case WM_T_I354:
13032 	case WM_T_I210:
13033 	case WM_T_I211:
13034 		reg = CSR_READ(sc, WMREG_MDICNFG);
13035 		ismdio = ((reg & MDICNFG_DEST) != 0);
13036 		break;
13037 	default:
13038 		break;
13039 	}
13040 
13041 	return ismdio;
13042 }
13043 
13044 /* Setup internal SGMII PHY for SFP */
13045 static void
wm_sgmii_sfp_preconfig(struct wm_softc * sc)13046 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
13047 {
13048 	uint16_t id1, id2, phyreg;
13049 	int i, rv;
13050 
13051 	if (((sc->sc_flags & WM_F_SGMII) == 0)
13052 	    || ((sc->sc_flags & WM_F_SFP) == 0))
13053 		return;
13054 
13055 	for (i = 0; i < MII_NPHY; i++) {
13056 		sc->phy.no_errprint = true;
13057 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
13058 		if (rv != 0)
13059 			continue;
13060 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
13061 		if (rv != 0)
13062 			continue;
13063 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
13064 			continue;
13065 		sc->phy.no_errprint = false;
13066 
13067 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
13068 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
13069 		phyreg |= ESSR_SGMII_WOC_COPPER;
13070 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
13071 		break;
13072 	}
13073 
13074 }
13075 
13076 /*
13077  * wm_sgmii_readreg:	[mii interface function]
13078  *
13079  *	Read a PHY register on the SGMII
13080  * This could be handled by the PHY layer if we didn't have to lock the
13081  * resource ...
13082  */
13083 static int
wm_sgmii_readreg(device_t dev,int phy,int reg,uint16_t * val)13084 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
13085 {
13086 	struct wm_softc *sc = device_private(dev);
13087 	int rv;
13088 
13089 	rv = sc->phy.acquire(sc);
13090 	if (rv != 0) {
13091 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13092 		return rv;
13093 	}
13094 
13095 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
13096 
13097 	sc->phy.release(sc);
13098 	return rv;
13099 }
13100 
13101 static int
wm_sgmii_readreg_locked(device_t dev,int phy,int reg,uint16_t * val)13102 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
13103 {
13104 	struct wm_softc *sc = device_private(dev);
13105 	uint32_t i2ccmd;
13106 	int i, rv = 0;
13107 
13108 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13109 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13110 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13111 
13112 	/* Poll the ready bit */
13113 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13114 		delay(50);
13115 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13116 		if (i2ccmd & I2CCMD_READY)
13117 			break;
13118 	}
13119 	if ((i2ccmd & I2CCMD_READY) == 0) {
13120 		device_printf(dev, "I2CCMD Read did not complete\n");
13121 		rv = ETIMEDOUT;
13122 	}
13123 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
13124 		if (!sc->phy.no_errprint)
13125 			device_printf(dev, "I2CCMD Error bit set\n");
13126 		rv = EIO;
13127 	}
13128 
13129 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
13130 
13131 	return rv;
13132 }
13133 
13134 /*
13135  * wm_sgmii_writereg:	[mii interface function]
13136  *
13137  *	Write a PHY register on the SGMII.
13138  * This could be handled by the PHY layer if we didn't have to lock the
13139  * resource ...
13140  */
13141 static int
wm_sgmii_writereg(device_t dev,int phy,int reg,uint16_t val)13142 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
13143 {
13144 	struct wm_softc *sc = device_private(dev);
13145 	int rv;
13146 
13147 	rv = sc->phy.acquire(sc);
13148 	if (rv != 0) {
13149 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13150 		return rv;
13151 	}
13152 
13153 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
13154 
13155 	sc->phy.release(sc);
13156 
13157 	return rv;
13158 }
13159 
13160 static int
wm_sgmii_writereg_locked(device_t dev,int phy,int reg,uint16_t val)13161 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
13162 {
13163 	struct wm_softc *sc = device_private(dev);
13164 	uint32_t i2ccmd;
13165 	uint16_t swapdata;
13166 	int rv = 0;
13167 	int i;
13168 
13169 	/* Swap the data bytes for the I2C interface */
13170 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
13171 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13172 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
13173 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13174 
13175 	/* Poll the ready bit */
13176 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13177 		delay(50);
13178 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13179 		if (i2ccmd & I2CCMD_READY)
13180 			break;
13181 	}
13182 	if ((i2ccmd & I2CCMD_READY) == 0) {
13183 		device_printf(dev, "I2CCMD Write did not complete\n");
13184 		rv = ETIMEDOUT;
13185 	}
13186 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
13187 		device_printf(dev, "I2CCMD Error bit set\n");
13188 		rv = EIO;
13189 	}
13190 
13191 	return rv;
13192 }
13193 
13194 /* TBI related */
13195 
13196 static bool
wm_tbi_havesignal(struct wm_softc * sc,uint32_t ctrl)13197 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
13198 {
13199 	bool sig;
13200 
13201 	sig = ctrl & CTRL_SWDPIN(1);
13202 
13203 	/*
13204 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
13205 	 * detect a signal, 1 if they don't.
13206 	 */
13207 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
13208 		sig = !sig;
13209 
13210 	return sig;
13211 }
13212 
13213 /*
13214  * wm_tbi_mediainit:
13215  *
13216  *	Initialize media for use on 1000BASE-X devices.
13217  */
13218 static void
wm_tbi_mediainit(struct wm_softc * sc)13219 wm_tbi_mediainit(struct wm_softc *sc)
13220 {
13221 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13222 	const char *sep = "";
13223 
13224 	if (sc->sc_type < WM_T_82543)
13225 		sc->sc_tipg = TIPG_WM_DFLT;
13226 	else
13227 		sc->sc_tipg = TIPG_LG_DFLT;
13228 
13229 	sc->sc_tbi_serdes_anegticks = 5;
13230 
13231 	/* Initialize our media structures */
13232 	sc->sc_mii.mii_ifp = ifp;
13233 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
13234 
13235 	ifp->if_baudrate = IF_Gbps(1);
13236 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
13237 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13238 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13239 		    wm_serdes_mediachange, wm_serdes_mediastatus,
13240 		    sc->sc_core_lock);
13241 	} else {
13242 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13243 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
13244 	}
13245 
13246 	/*
13247 	 * SWD Pins:
13248 	 *
13249 	 *	0 = Link LED (output)
13250 	 *	1 = Loss Of Signal (input)
13251 	 */
13252 	sc->sc_ctrl |= CTRL_SWDPIO(0);
13253 
13254 	/* XXX Perhaps this is only for TBI */
13255 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13256 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
13257 
13258 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
13259 		sc->sc_ctrl &= ~CTRL_LRST;
13260 
13261 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13262 
13263 #define	ADD(ss, mm, dd)							  \
13264 do {									  \
13265 	aprint_normal("%s%s", sep, ss);					  \
13266 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
13267 	sep = ", ";							  \
13268 } while (/*CONSTCOND*/0)
13269 
13270 	aprint_normal_dev(sc->sc_dev, "");
13271 
13272 	if (sc->sc_type == WM_T_I354) {
13273 		uint32_t status;
13274 
13275 		status = CSR_READ(sc, WMREG_STATUS);
13276 		if (((status & STATUS_2P5_SKU) != 0)
13277 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13278 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
13279 		} else
13280 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
13281 	} else if (sc->sc_type == WM_T_82545) {
13282 		/* Only 82545 is LX (XXX except SFP) */
13283 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13284 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13285 	} else if (sc->sc_sfptype != 0) {
13286 		/* XXX wm(4) fiber/serdes don't use ifm_data */
13287 		switch (sc->sc_sfptype) {
13288 		default:
13289 		case SFF_SFP_ETH_FLAGS_1000SX:
13290 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13291 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13292 			break;
13293 		case SFF_SFP_ETH_FLAGS_1000LX:
13294 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13295 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13296 			break;
13297 		case SFF_SFP_ETH_FLAGS_1000CX:
13298 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
13299 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
13300 			break;
13301 		case SFF_SFP_ETH_FLAGS_1000T:
13302 			ADD("1000baseT", IFM_1000_T, 0);
13303 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
13304 			break;
13305 		case SFF_SFP_ETH_FLAGS_100FX:
13306 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
13307 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
13308 			break;
13309 		}
13310 	} else {
13311 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13312 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13313 	}
13314 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
13315 	aprint_normal("\n");
13316 
13317 #undef ADD
13318 
13319 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
13320 }
13321 
13322 /*
13323  * wm_tbi_mediachange:	[ifmedia interface function]
13324  *
13325  *	Set hardware to newly-selected media on a 1000BASE-X device.
13326  */
13327 static int
wm_tbi_mediachange(struct ifnet * ifp)13328 wm_tbi_mediachange(struct ifnet *ifp)
13329 {
13330 	struct wm_softc *sc = ifp->if_softc;
13331 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13332 	uint32_t status, ctrl;
13333 	bool signal;
13334 	int i;
13335 
13336 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
13337 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13338 		/* XXX need some work for >= 82571 and < 82575 */
13339 		if (sc->sc_type < WM_T_82575)
13340 			return 0;
13341 	}
13342 
13343 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13344 	    || (sc->sc_type >= WM_T_82575))
13345 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13346 
13347 	sc->sc_ctrl &= ~CTRL_LRST;
13348 	sc->sc_txcw = TXCW_ANE;
13349 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13350 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
13351 	else if (ife->ifm_media & IFM_FDX)
13352 		sc->sc_txcw |= TXCW_FD;
13353 	else
13354 		sc->sc_txcw |= TXCW_HD;
13355 
13356 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
13357 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
13358 
13359 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
13360 		device_xname(sc->sc_dev), sc->sc_txcw));
13361 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13362 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13363 	CSR_WRITE_FLUSH(sc);
13364 	delay(1000);
13365 
13366 	ctrl = CSR_READ(sc, WMREG_CTRL);
13367 	signal = wm_tbi_havesignal(sc, ctrl);
13368 
13369 	DPRINTF(sc, WM_DEBUG_LINK,
13370 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
13371 
13372 	if (signal) {
13373 		/* Have signal; wait for the link to come up. */
13374 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
13375 			delay(10000);
13376 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
13377 				break;
13378 		}
13379 
13380 		DPRINTF(sc, WM_DEBUG_LINK,
13381 		    ("%s: i = %d after waiting for link\n",
13382 			device_xname(sc->sc_dev), i));
13383 
13384 		status = CSR_READ(sc, WMREG_STATUS);
13385 		DPRINTF(sc, WM_DEBUG_LINK,
13386 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
13387 			__PRIxBIT "\n",
13388 			device_xname(sc->sc_dev), status, STATUS_LU));
13389 		if (status & STATUS_LU) {
13390 			/* Link is up. */
13391 			DPRINTF(sc, WM_DEBUG_LINK,
13392 			    ("%s: LINK: set media -> link up %s\n",
13393 				device_xname(sc->sc_dev),
13394 				(status & STATUS_FD) ? "FDX" : "HDX"));
13395 
13396 			/*
13397 			 * NOTE: CTRL will update TFCE and RFCE automatically,
13398 			 * so we should update sc->sc_ctrl
13399 			 */
13400 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13401 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13402 			sc->sc_fcrtl &= ~FCRTL_XONE;
13403 			if (status & STATUS_FD)
13404 				sc->sc_tctl |=
13405 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13406 			else
13407 				sc->sc_tctl |=
13408 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13409 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13410 				sc->sc_fcrtl |= FCRTL_XONE;
13411 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13412 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13413 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13414 			sc->sc_tbi_linkup = 1;
13415 		} else {
13416 			if (i == WM_LINKUP_TIMEOUT)
13417 				wm_check_for_link(sc);
13418 			/* Link is down. */
13419 			DPRINTF(sc, WM_DEBUG_LINK,
13420 			    ("%s: LINK: set media -> link down\n",
13421 				device_xname(sc->sc_dev)));
13422 			sc->sc_tbi_linkup = 0;
13423 		}
13424 	} else {
13425 		DPRINTF(sc, WM_DEBUG_LINK,
13426 		    ("%s: LINK: set media -> no signal\n",
13427 			device_xname(sc->sc_dev)));
13428 		sc->sc_tbi_linkup = 0;
13429 	}
13430 
13431 	wm_tbi_serdes_set_linkled(sc);
13432 
13433 	return 0;
13434 }
13435 
13436 /*
13437  * wm_tbi_mediastatus:	[ifmedia interface function]
13438  *
13439  *	Get the current interface media status on a 1000BASE-X device.
13440  */
13441 static void
wm_tbi_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)13442 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13443 {
13444 	struct wm_softc *sc = ifp->if_softc;
13445 	uint32_t ctrl, status;
13446 
13447 	ifmr->ifm_status = IFM_AVALID;
13448 	ifmr->ifm_active = IFM_ETHER;
13449 
13450 	status = CSR_READ(sc, WMREG_STATUS);
13451 	if ((status & STATUS_LU) == 0) {
13452 		ifmr->ifm_active |= IFM_NONE;
13453 		return;
13454 	}
13455 
13456 	ifmr->ifm_status |= IFM_ACTIVE;
13457 	/* Only 82545 is LX */
13458 	if (sc->sc_type == WM_T_82545)
13459 		ifmr->ifm_active |= IFM_1000_LX;
13460 	else
13461 		ifmr->ifm_active |= IFM_1000_SX;
13462 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13463 		ifmr->ifm_active |= IFM_FDX;
13464 	else
13465 		ifmr->ifm_active |= IFM_HDX;
13466 	ctrl = CSR_READ(sc, WMREG_CTRL);
13467 	if (ctrl & CTRL_RFCE)
13468 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13469 	if (ctrl & CTRL_TFCE)
13470 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13471 }
13472 
13473 /* XXX TBI only */
13474 static int
wm_check_for_link(struct wm_softc * sc)13475 wm_check_for_link(struct wm_softc *sc)
13476 {
13477 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13478 	uint32_t rxcw;
13479 	uint32_t ctrl;
13480 	uint32_t status;
13481 	bool signal;
13482 
13483 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13484 		device_xname(sc->sc_dev), __func__));
13485 
13486 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13487 		/* XXX need some work for >= 82571 */
13488 		if (sc->sc_type >= WM_T_82571) {
13489 			sc->sc_tbi_linkup = 1;
13490 			return 0;
13491 		}
13492 	}
13493 
13494 	rxcw = CSR_READ(sc, WMREG_RXCW);
13495 	ctrl = CSR_READ(sc, WMREG_CTRL);
13496 	status = CSR_READ(sc, WMREG_STATUS);
13497 	signal = wm_tbi_havesignal(sc, ctrl);
13498 
13499 	DPRINTF(sc, WM_DEBUG_LINK,
13500 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13501 		device_xname(sc->sc_dev), __func__, signal,
13502 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13503 
13504 	/*
13505 	 * SWDPIN   LU RXCW
13506 	 *	0    0	  0
13507 	 *	0    0	  1	(should not happen)
13508 	 *	0    1	  0	(should not happen)
13509 	 *	0    1	  1	(should not happen)
13510 	 *	1    0	  0	Disable autonego and force linkup
13511 	 *	1    0	  1	got /C/ but not linkup yet
13512 	 *	1    1	  0	(linkup)
13513 	 *	1    1	  1	If IFM_AUTO, back to autonego
13514 	 *
13515 	 */
13516 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13517 		DPRINTF(sc, WM_DEBUG_LINK,
13518 		    ("%s: %s: force linkup and fullduplex\n",
13519 			device_xname(sc->sc_dev), __func__));
13520 		sc->sc_tbi_linkup = 0;
13521 		/* Disable auto-negotiation in the TXCW register */
13522 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13523 
13524 		/*
13525 		 * Force link-up and also force full-duplex.
13526 		 *
13527 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
13528 		 * so we should update sc->sc_ctrl
13529 		 */
13530 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13531 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13532 	} else if (((status & STATUS_LU) != 0)
13533 	    && ((rxcw & RXCW_C) != 0)
13534 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13535 		sc->sc_tbi_linkup = 1;
13536 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13537 			device_xname(sc->sc_dev), __func__));
13538 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13539 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13540 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
13541 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13542 			device_xname(sc->sc_dev), __func__));
13543 	} else {
13544 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13545 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13546 			status));
13547 	}
13548 
13549 	return 0;
13550 }
13551 
13552 /*
13553  * wm_tbi_tick:
13554  *
13555  *	Check the link on TBI devices.
13556  *	This function acts as mii_tick().
13557  */
13558 static void
wm_tbi_tick(struct wm_softc * sc)13559 wm_tbi_tick(struct wm_softc *sc)
13560 {
13561 	struct mii_data *mii = &sc->sc_mii;
13562 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13563 	uint32_t status;
13564 
13565 	KASSERT(mutex_owned(sc->sc_core_lock));
13566 
13567 	status = CSR_READ(sc, WMREG_STATUS);
13568 
13569 	/* XXX is this needed? */
13570 	(void)CSR_READ(sc, WMREG_RXCW);
13571 	(void)CSR_READ(sc, WMREG_CTRL);
13572 
13573 	/* set link status */
13574 	if ((status & STATUS_LU) == 0) {
13575 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13576 			device_xname(sc->sc_dev)));
13577 		sc->sc_tbi_linkup = 0;
13578 	} else if (sc->sc_tbi_linkup == 0) {
13579 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13580 			device_xname(sc->sc_dev),
13581 			(status & STATUS_FD) ? "FDX" : "HDX"));
13582 		sc->sc_tbi_linkup = 1;
13583 		sc->sc_tbi_serdes_ticks = 0;
13584 	}
13585 
13586 	if ((sc->sc_if_flags & IFF_UP) == 0)
13587 		goto setled;
13588 
13589 	if ((status & STATUS_LU) == 0) {
13590 		sc->sc_tbi_linkup = 0;
13591 		/* If the timer expired, retry autonegotiation */
13592 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13593 		    && (++sc->sc_tbi_serdes_ticks
13594 			>= sc->sc_tbi_serdes_anegticks)) {
13595 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13596 				device_xname(sc->sc_dev), __func__));
13597 			sc->sc_tbi_serdes_ticks = 0;
13598 			/*
13599 			 * Reset the link, and let autonegotiation do
13600 			 * its thing
13601 			 */
13602 			sc->sc_ctrl |= CTRL_LRST;
13603 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13604 			CSR_WRITE_FLUSH(sc);
13605 			delay(1000);
13606 			sc->sc_ctrl &= ~CTRL_LRST;
13607 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13608 			CSR_WRITE_FLUSH(sc);
13609 			delay(1000);
13610 			CSR_WRITE(sc, WMREG_TXCW,
13611 			    sc->sc_txcw & ~TXCW_ANE);
13612 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13613 		}
13614 	}
13615 
13616 setled:
13617 	wm_tbi_serdes_set_linkled(sc);
13618 }
13619 
13620 /* SERDES related */
13621 static void
wm_serdes_power_up_link_82575(struct wm_softc * sc)13622 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13623 {
13624 	uint32_t reg;
13625 
13626 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13627 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13628 		return;
13629 
13630 	/* Enable PCS to turn on link */
13631 	reg = CSR_READ(sc, WMREG_PCS_CFG);
13632 	reg |= PCS_CFG_PCS_EN;
13633 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13634 
13635 	/* Power up the laser */
13636 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
13637 	reg &= ~CTRL_EXT_SWDPIN(3);
13638 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13639 
13640 	/* Flush the write to verify completion */
13641 	CSR_WRITE_FLUSH(sc);
13642 	delay(1000);
13643 }
13644 
13645 static int
wm_serdes_mediachange(struct ifnet * ifp)13646 wm_serdes_mediachange(struct ifnet *ifp)
13647 {
13648 	struct wm_softc *sc = ifp->if_softc;
13649 	bool pcs_autoneg = true; /* XXX */
13650 	uint32_t ctrl_ext, pcs_lctl, reg;
13651 
13652 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13653 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13654 		return 0;
13655 
13656 	/* XXX Currently, this function is not called on 8257[12] */
13657 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13658 	    || (sc->sc_type >= WM_T_82575))
13659 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13660 
13661 	/* Power on the sfp cage if present */
13662 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13663 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13664 	ctrl_ext |= CTRL_EXT_I2C_ENA;
13665 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13666 
13667 	sc->sc_ctrl |= CTRL_SLU;
13668 
13669 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13670 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13671 
13672 		reg = CSR_READ(sc, WMREG_CONNSW);
13673 		reg |= CONNSW_ENRGSRC;
13674 		CSR_WRITE(sc, WMREG_CONNSW, reg);
13675 	}
13676 
13677 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13678 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13679 	case CTRL_EXT_LINK_MODE_SGMII:
13680 		/* SGMII mode lets the phy handle forcing speed/duplex */
13681 		pcs_autoneg = true;
13682 		/* Autoneg time out should be disabled for SGMII mode */
13683 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13684 		break;
13685 	case CTRL_EXT_LINK_MODE_1000KX:
13686 		pcs_autoneg = false;
13687 		/* FALLTHROUGH */
13688 	default:
13689 		if ((sc->sc_type == WM_T_82575)
13690 		    || (sc->sc_type == WM_T_82576)) {
13691 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13692 				pcs_autoneg = false;
13693 		}
13694 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13695 		    | CTRL_FRCFDX;
13696 
13697 		/* Set speed of 1000/Full if speed/duplex is forced */
13698 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13699 	}
13700 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13701 
13702 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13703 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13704 
13705 	if (pcs_autoneg) {
13706 		/* Set PCS register for autoneg */
13707 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13708 
13709 		/* Disable force flow control for autoneg */
13710 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13711 
13712 		/* Configure flow control advertisement for autoneg */
13713 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
13714 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13715 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13716 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13717 	} else
13718 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13719 
13720 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13721 
13722 	return 0;
13723 }
13724 
13725 static void
wm_serdes_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)13726 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13727 {
13728 	struct wm_softc *sc = ifp->if_softc;
13729 	struct mii_data *mii = &sc->sc_mii;
13730 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13731 	uint32_t pcs_adv, pcs_lpab, reg;
13732 
13733 	ifmr->ifm_status = IFM_AVALID;
13734 	ifmr->ifm_active = IFM_ETHER;
13735 
13736 	/* Check PCS */
13737 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
13738 	if ((reg & PCS_LSTS_LINKOK) == 0) {
13739 		ifmr->ifm_active |= IFM_NONE;
13740 		sc->sc_tbi_linkup = 0;
13741 		goto setled;
13742 	}
13743 
13744 	sc->sc_tbi_linkup = 1;
13745 	ifmr->ifm_status |= IFM_ACTIVE;
13746 	if (sc->sc_type == WM_T_I354) {
13747 		uint32_t status;
13748 
13749 		status = CSR_READ(sc, WMREG_STATUS);
13750 		if (((status & STATUS_2P5_SKU) != 0)
13751 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13752 			ifmr->ifm_active |= IFM_2500_KX;
13753 		} else
13754 			ifmr->ifm_active |= IFM_1000_KX;
13755 	} else {
13756 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13757 		case PCS_LSTS_SPEED_10:
13758 			ifmr->ifm_active |= IFM_10_T; /* XXX */
13759 			break;
13760 		case PCS_LSTS_SPEED_100:
13761 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
13762 			break;
13763 		case PCS_LSTS_SPEED_1000:
13764 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13765 			break;
13766 		default:
13767 			device_printf(sc->sc_dev, "Unknown speed\n");
13768 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13769 			break;
13770 		}
13771 	}
13772 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13773 	if ((reg & PCS_LSTS_FDX) != 0)
13774 		ifmr->ifm_active |= IFM_FDX;
13775 	else
13776 		ifmr->ifm_active |= IFM_HDX;
13777 	mii->mii_media_active &= ~IFM_ETH_FMASK;
13778 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13779 		/* Check flow */
13780 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
13781 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
13782 			DPRINTF(sc, WM_DEBUG_LINK,
13783 			    ("XXX LINKOK but not ACOMP\n"));
13784 			goto setled;
13785 		}
13786 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13787 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13788 		DPRINTF(sc, WM_DEBUG_LINK,
13789 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13790 		if ((pcs_adv & TXCW_SYM_PAUSE)
13791 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
13792 			mii->mii_media_active |= IFM_FLOW
13793 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13794 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13795 		    && (pcs_adv & TXCW_ASYM_PAUSE)
13796 		    && (pcs_lpab & TXCW_SYM_PAUSE)
13797 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13798 			mii->mii_media_active |= IFM_FLOW
13799 			    | IFM_ETH_TXPAUSE;
13800 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
13801 		    && (pcs_adv & TXCW_ASYM_PAUSE)
13802 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13803 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13804 			mii->mii_media_active |= IFM_FLOW
13805 			    | IFM_ETH_RXPAUSE;
13806 		}
13807 	}
13808 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13809 	    | (mii->mii_media_active & IFM_ETH_FMASK);
13810 setled:
13811 	wm_tbi_serdes_set_linkled(sc);
13812 }
13813 
13814 /*
13815  * wm_serdes_tick:
13816  *
13817  *	Check the link on serdes devices.
13818  */
13819 static void
wm_serdes_tick(struct wm_softc * sc)13820 wm_serdes_tick(struct wm_softc *sc)
13821 {
13822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13823 	struct mii_data *mii = &sc->sc_mii;
13824 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13825 	uint32_t reg;
13826 
13827 	KASSERT(mutex_owned(sc->sc_core_lock));
13828 
13829 	mii->mii_media_status = IFM_AVALID;
13830 	mii->mii_media_active = IFM_ETHER;
13831 
13832 	/* Check PCS */
13833 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
13834 	if ((reg & PCS_LSTS_LINKOK) != 0) {
13835 		mii->mii_media_status |= IFM_ACTIVE;
13836 		sc->sc_tbi_linkup = 1;
13837 		sc->sc_tbi_serdes_ticks = 0;
13838 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
13839 		if ((reg & PCS_LSTS_FDX) != 0)
13840 			mii->mii_media_active |= IFM_FDX;
13841 		else
13842 			mii->mii_media_active |= IFM_HDX;
13843 	} else {
13844 		mii->mii_media_status |= IFM_NONE;
13845 		sc->sc_tbi_linkup = 0;
13846 		/* If the timer expired, retry autonegotiation */
13847 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13848 		    && (++sc->sc_tbi_serdes_ticks
13849 			>= sc->sc_tbi_serdes_anegticks)) {
13850 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13851 				device_xname(sc->sc_dev), __func__));
13852 			sc->sc_tbi_serdes_ticks = 0;
13853 			/* XXX */
13854 			wm_serdes_mediachange(ifp);
13855 		}
13856 	}
13857 
13858 	wm_tbi_serdes_set_linkled(sc);
13859 }
13860 
13861 /* SFP related */
13862 
13863 static int
wm_sfp_read_data_byte(struct wm_softc * sc,uint16_t offset,uint8_t * data)13864 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13865 {
13866 	uint32_t i2ccmd;
13867 	int i;
13868 
13869 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13870 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13871 
13872 	/* Poll the ready bit */
13873 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13874 		delay(50);
13875 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13876 		if (i2ccmd & I2CCMD_READY)
13877 			break;
13878 	}
13879 	if ((i2ccmd & I2CCMD_READY) == 0)
13880 		return -1;
13881 	if ((i2ccmd & I2CCMD_ERROR) != 0)
13882 		return -1;
13883 
13884 	*data = i2ccmd & 0x00ff;
13885 
13886 	return 0;
13887 }
13888 
13889 static uint32_t
wm_sfp_get_media_type(struct wm_softc * sc)13890 wm_sfp_get_media_type(struct wm_softc *sc)
13891 {
13892 	uint32_t ctrl_ext;
13893 	uint8_t val = 0;
13894 	int timeout = 3;
13895 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13896 	int rv = -1;
13897 
13898 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13899 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13900 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13901 	CSR_WRITE_FLUSH(sc);
13902 
13903 	/* Read SFP module data */
13904 	while (timeout) {
13905 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13906 		if (rv == 0)
13907 			break;
13908 		delay(100*1000); /* XXX too big */
13909 		timeout--;
13910 	}
13911 	if (rv != 0)
13912 		goto out;
13913 
13914 	switch (val) {
13915 	case SFF_SFP_ID_SFF:
13916 		aprint_normal_dev(sc->sc_dev,
13917 		    "Module/Connector soldered to board\n");
13918 		break;
13919 	case SFF_SFP_ID_SFP:
13920 		sc->sc_flags |= WM_F_SFP;
13921 		break;
13922 	case SFF_SFP_ID_UNKNOWN:
13923 		goto out;
13924 	default:
13925 		break;
13926 	}
13927 
13928 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13929 	if (rv != 0)
13930 		goto out;
13931 
13932 	sc->sc_sfptype = val;
13933 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13934 		mediatype = WM_MEDIATYPE_SERDES;
13935 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13936 		sc->sc_flags |= WM_F_SGMII;
13937 		mediatype = WM_MEDIATYPE_COPPER;
13938 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13939 		sc->sc_flags |= WM_F_SGMII;
13940 		mediatype = WM_MEDIATYPE_SERDES;
13941 	} else {
13942 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13943 		    __func__, sc->sc_sfptype);
13944 		sc->sc_sfptype = 0; /* XXX unknown */
13945 	}
13946 
13947 out:
13948 	/* Restore I2C interface setting */
13949 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13950 
13951 	return mediatype;
13952 }
13953 
13954 /*
13955  * NVM related.
13956  * Microwire, SPI (w/wo EERD) and Flash.
13957  */
13958 
13959 /* Both spi and uwire */
13960 
13961 /*
13962  * wm_eeprom_sendbits:
13963  *
13964  *	Send a series of bits to the EEPROM.
13965  */
13966 static void
wm_eeprom_sendbits(struct wm_softc * sc,uint32_t bits,int nbits)13967 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13968 {
13969 	uint32_t reg;
13970 	int x;
13971 
13972 	reg = CSR_READ(sc, WMREG_EECD);
13973 
13974 	for (x = nbits; x > 0; x--) {
13975 		if (bits & (1U << (x - 1)))
13976 			reg |= EECD_DI;
13977 		else
13978 			reg &= ~EECD_DI;
13979 		CSR_WRITE(sc, WMREG_EECD, reg);
13980 		CSR_WRITE_FLUSH(sc);
13981 		delay(2);
13982 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13983 		CSR_WRITE_FLUSH(sc);
13984 		delay(2);
13985 		CSR_WRITE(sc, WMREG_EECD, reg);
13986 		CSR_WRITE_FLUSH(sc);
13987 		delay(2);
13988 	}
13989 }
13990 
13991 /*
13992  * wm_eeprom_recvbits:
13993  *
13994  *	Receive a series of bits from the EEPROM.
13995  */
13996 static void
wm_eeprom_recvbits(struct wm_softc * sc,uint32_t * valp,int nbits)13997 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13998 {
13999 	uint32_t reg, val;
14000 	int x;
14001 
14002 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
14003 
14004 	val = 0;
14005 	for (x = nbits; x > 0; x--) {
14006 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
14007 		CSR_WRITE_FLUSH(sc);
14008 		delay(2);
14009 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
14010 			val |= (1U << (x - 1));
14011 		CSR_WRITE(sc, WMREG_EECD, reg);
14012 		CSR_WRITE_FLUSH(sc);
14013 		delay(2);
14014 	}
14015 	*valp = val;
14016 }
14017 
14018 /* Microwire */
14019 
14020 /*
14021  * wm_nvm_read_uwire:
14022  *
14023  *	Read a word from the EEPROM using the MicroWire protocol.
14024  */
14025 static int
wm_nvm_read_uwire(struct wm_softc * sc,int word,int wordcnt,uint16_t * data)14026 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14027 {
14028 	uint32_t reg, val;
14029 	int i, rv;
14030 
14031 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14032 		device_xname(sc->sc_dev), __func__));
14033 
14034 	rv = sc->nvm.acquire(sc);
14035 	if (rv != 0)
14036 		return rv;
14037 
14038 	for (i = 0; i < wordcnt; i++) {
14039 		/* Clear SK and DI. */
14040 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
14041 		CSR_WRITE(sc, WMREG_EECD, reg);
14042 
14043 		/*
14044 		 * XXX: workaround for a bug in qemu-0.12.x and prior
14045 		 * and Xen.
14046 		 *
14047 		 * We use this workaround only for 82540 because qemu's
14048 		 * e1000 act as 82540.
14049 		 */
14050 		if (sc->sc_type == WM_T_82540) {
14051 			reg |= EECD_SK;
14052 			CSR_WRITE(sc, WMREG_EECD, reg);
14053 			reg &= ~EECD_SK;
14054 			CSR_WRITE(sc, WMREG_EECD, reg);
14055 			CSR_WRITE_FLUSH(sc);
14056 			delay(2);
14057 		}
14058 		/* XXX: end of workaround */
14059 
14060 		/* Set CHIP SELECT. */
14061 		reg |= EECD_CS;
14062 		CSR_WRITE(sc, WMREG_EECD, reg);
14063 		CSR_WRITE_FLUSH(sc);
14064 		delay(2);
14065 
14066 		/* Shift in the READ command. */
14067 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
14068 
14069 		/* Shift in address. */
14070 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
14071 
14072 		/* Shift out the data. */
14073 		wm_eeprom_recvbits(sc, &val, 16);
14074 		data[i] = val & 0xffff;
14075 
14076 		/* Clear CHIP SELECT. */
14077 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
14078 		CSR_WRITE(sc, WMREG_EECD, reg);
14079 		CSR_WRITE_FLUSH(sc);
14080 		delay(2);
14081 	}
14082 
14083 	sc->nvm.release(sc);
14084 	return 0;
14085 }
14086 
14087 /* SPI */
14088 
14089 /*
14090  * Set SPI and FLASH related information from the EECD register.
14091  * For 82541 and 82547, the word size is taken from EEPROM.
14092  */
14093 static int
wm_nvm_set_addrbits_size_eecd(struct wm_softc * sc)14094 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
14095 {
14096 	int size;
14097 	uint32_t reg;
14098 	uint16_t data;
14099 
14100 	reg = CSR_READ(sc, WMREG_EECD);
14101 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
14102 
14103 	/* Read the size of NVM from EECD by default */
14104 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14105 	switch (sc->sc_type) {
14106 	case WM_T_82541:
14107 	case WM_T_82541_2:
14108 	case WM_T_82547:
14109 	case WM_T_82547_2:
14110 		/* Set dummy value to access EEPROM */
14111 		sc->sc_nvm_wordsize = 64;
14112 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
14113 			aprint_error_dev(sc->sc_dev,
14114 			    "%s: failed to read EEPROM size\n", __func__);
14115 		}
14116 		reg = data;
14117 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14118 		if (size == 0)
14119 			size = 6; /* 64 word size */
14120 		else
14121 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
14122 		break;
14123 	case WM_T_80003:
14124 	case WM_T_82571:
14125 	case WM_T_82572:
14126 	case WM_T_82573: /* SPI case */
14127 	case WM_T_82574: /* SPI case */
14128 	case WM_T_82583: /* SPI case */
14129 		size += NVM_WORD_SIZE_BASE_SHIFT;
14130 		if (size > 14)
14131 			size = 14;
14132 		break;
14133 	case WM_T_82575:
14134 	case WM_T_82576:
14135 	case WM_T_82580:
14136 	case WM_T_I350:
14137 	case WM_T_I354:
14138 	case WM_T_I210:
14139 	case WM_T_I211:
14140 		size += NVM_WORD_SIZE_BASE_SHIFT;
14141 		if (size > 15)
14142 			size = 15;
14143 		break;
14144 	default:
14145 		aprint_error_dev(sc->sc_dev,
14146 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
14147 		return -1;
14148 		break;
14149 	}
14150 
14151 	sc->sc_nvm_wordsize = 1 << size;
14152 
14153 	return 0;
14154 }
14155 
14156 /*
14157  * wm_nvm_ready_spi:
14158  *
14159  *	Wait for a SPI EEPROM to be ready for commands.
14160  */
14161 static int
wm_nvm_ready_spi(struct wm_softc * sc)14162 wm_nvm_ready_spi(struct wm_softc *sc)
14163 {
14164 	uint32_t val;
14165 	int usec;
14166 
14167 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14168 		device_xname(sc->sc_dev), __func__));
14169 
14170 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
14171 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
14172 		wm_eeprom_recvbits(sc, &val, 8);
14173 		if ((val & SPI_SR_RDY) == 0)
14174 			break;
14175 	}
14176 	if (usec >= SPI_MAX_RETRIES) {
14177 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
14178 		return -1;
14179 	}
14180 	return 0;
14181 }
14182 
14183 /*
14184  * wm_nvm_read_spi:
14185  *
14186  *	Read a work from the EEPROM using the SPI protocol.
14187  */
14188 static int
wm_nvm_read_spi(struct wm_softc * sc,int word,int wordcnt,uint16_t * data)14189 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14190 {
14191 	uint32_t reg, val;
14192 	int i;
14193 	uint8_t opc;
14194 	int rv;
14195 
14196 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14197 		device_xname(sc->sc_dev), __func__));
14198 
14199 	rv = sc->nvm.acquire(sc);
14200 	if (rv != 0)
14201 		return rv;
14202 
14203 	/* Clear SK and CS. */
14204 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
14205 	CSR_WRITE(sc, WMREG_EECD, reg);
14206 	CSR_WRITE_FLUSH(sc);
14207 	delay(2);
14208 
14209 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
14210 		goto out;
14211 
14212 	/* Toggle CS to flush commands. */
14213 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
14214 	CSR_WRITE_FLUSH(sc);
14215 	delay(2);
14216 	CSR_WRITE(sc, WMREG_EECD, reg);
14217 	CSR_WRITE_FLUSH(sc);
14218 	delay(2);
14219 
14220 	opc = SPI_OPC_READ;
14221 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
14222 		opc |= SPI_OPC_A8;
14223 
14224 	wm_eeprom_sendbits(sc, opc, 8);
14225 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
14226 
14227 	for (i = 0; i < wordcnt; i++) {
14228 		wm_eeprom_recvbits(sc, &val, 16);
14229 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
14230 	}
14231 
14232 	/* Raise CS and clear SK. */
14233 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
14234 	CSR_WRITE(sc, WMREG_EECD, reg);
14235 	CSR_WRITE_FLUSH(sc);
14236 	delay(2);
14237 
14238 out:
14239 	sc->nvm.release(sc);
14240 	return rv;
14241 }
14242 
14243 /* Using with EERD */
14244 
14245 static int
wm_poll_eerd_eewr_done(struct wm_softc * sc,int rw)14246 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
14247 {
14248 	uint32_t attempts = 100000;
14249 	uint32_t i, reg = 0;
14250 	int32_t done = -1;
14251 
14252 	for (i = 0; i < attempts; i++) {
14253 		reg = CSR_READ(sc, rw);
14254 
14255 		if (reg & EERD_DONE) {
14256 			done = 0;
14257 			break;
14258 		}
14259 		delay(5);
14260 	}
14261 
14262 	return done;
14263 }
14264 
14265 static int
wm_nvm_read_eerd(struct wm_softc * sc,int offset,int wordcnt,uint16_t * data)14266 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
14267 {
14268 	int i, eerd = 0;
14269 	int rv;
14270 
14271 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14272 		device_xname(sc->sc_dev), __func__));
14273 
14274 	rv = sc->nvm.acquire(sc);
14275 	if (rv != 0)
14276 		return rv;
14277 
14278 	for (i = 0; i < wordcnt; i++) {
14279 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
14280 		CSR_WRITE(sc, WMREG_EERD, eerd);
14281 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
14282 		if (rv != 0) {
14283 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
14284 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
14285 			break;
14286 		}
14287 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
14288 	}
14289 
14290 	sc->nvm.release(sc);
14291 	return rv;
14292 }
14293 
14294 /* Flash */
14295 
14296 static int
wm_nvm_valid_bank_detect_ich8lan(struct wm_softc * sc,unsigned int * bank)14297 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
14298 {
14299 	uint32_t eecd;
14300 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
14301 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
14302 	uint32_t nvm_dword = 0;
14303 	uint8_t sig_byte = 0;
14304 	int rv;
14305 
14306 	switch (sc->sc_type) {
14307 	case WM_T_PCH_SPT:
14308 	case WM_T_PCH_CNP:
14309 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
14310 		act_offset = ICH_NVM_SIG_WORD * 2;
14311 
14312 		/* Set bank to 0 in case flash read fails. */
14313 		*bank = 0;
14314 
14315 		/* Check bank 0 */
14316 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
14317 		if (rv != 0)
14318 			return rv;
14319 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14320 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14321 			*bank = 0;
14322 			return 0;
14323 		}
14324 
14325 		/* Check bank 1 */
14326 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
14327 		    &nvm_dword);
14328 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14329 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14330 			*bank = 1;
14331 			return 0;
14332 		}
14333 		aprint_error_dev(sc->sc_dev,
14334 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
14335 		return -1;
14336 	case WM_T_ICH8:
14337 	case WM_T_ICH9:
14338 		eecd = CSR_READ(sc, WMREG_EECD);
14339 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
14340 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
14341 			return 0;
14342 		}
14343 		/* FALLTHROUGH */
14344 	default:
14345 		/* Default to 0 */
14346 		*bank = 0;
14347 
14348 		/* Check bank 0 */
14349 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
14350 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14351 			*bank = 0;
14352 			return 0;
14353 		}
14354 
14355 		/* Check bank 1 */
14356 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
14357 		    &sig_byte);
14358 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14359 			*bank = 1;
14360 			return 0;
14361 		}
14362 	}
14363 
14364 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
14365 		device_xname(sc->sc_dev)));
14366 	return -1;
14367 }
14368 
14369 /******************************************************************************
14370  * This function does initial flash setup so that a new read/write/erase cycle
14371  * can be started.
14372  *
14373  * sc - The pointer to the hw structure
14374  ****************************************************************************/
14375 static int32_t
wm_ich8_cycle_init(struct wm_softc * sc)14376 wm_ich8_cycle_init(struct wm_softc *sc)
14377 {
14378 	uint16_t hsfsts;
14379 	int32_t error = 1;
14380 	int32_t i     = 0;
14381 
14382 	if (sc->sc_type >= WM_T_PCH_SPT)
14383 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14384 	else
14385 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14386 
14387 	/* May be check the Flash Des Valid bit in Hw status */
14388 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
14389 		return error;
14390 
14391 	/* Clear FCERR in Hw status by writing 1 */
14392 	/* Clear DAEL in Hw status by writing a 1 */
14393 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14394 
14395 	if (sc->sc_type >= WM_T_PCH_SPT)
14396 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14397 	else
14398 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14399 
14400 	/*
14401 	 * Either we should have a hardware SPI cycle in progress bit to check
14402 	 * against, in order to start a new cycle or FDONE bit should be
14403 	 * changed in the hardware so that it is 1 after hardware reset, which
14404 	 * can then be used as an indication whether a cycle is in progress or
14405 	 * has been completed .. we should also have some software semaphore
14406 	 * mechanism to guard FDONE or the cycle in progress bit so that two
14407 	 * threads access to those bits can be sequentiallized or a way so that
14408 	 * 2 threads don't start the cycle at the same time
14409 	 */
14410 
14411 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14412 		/*
14413 		 * There is no cycle running at present, so we can start a
14414 		 * cycle
14415 		 */
14416 
14417 		/* Begin by setting Flash Cycle Done. */
14418 		hsfsts |= HSFSTS_DONE;
14419 		if (sc->sc_type >= WM_T_PCH_SPT)
14420 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14421 			    hsfsts & 0xffffUL);
14422 		else
14423 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14424 		error = 0;
14425 	} else {
14426 		/*
14427 		 * Otherwise poll for sometime so the current cycle has a
14428 		 * chance to end before giving up.
14429 		 */
14430 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14431 			if (sc->sc_type >= WM_T_PCH_SPT)
14432 				hsfsts = ICH8_FLASH_READ32(sc,
14433 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14434 			else
14435 				hsfsts = ICH8_FLASH_READ16(sc,
14436 				    ICH_FLASH_HSFSTS);
14437 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14438 				error = 0;
14439 				break;
14440 			}
14441 			delay(1);
14442 		}
14443 		if (error == 0) {
14444 			/*
14445 			 * Successful in waiting for previous cycle to timeout,
14446 			 * now set the Flash Cycle Done.
14447 			 */
14448 			hsfsts |= HSFSTS_DONE;
14449 			if (sc->sc_type >= WM_T_PCH_SPT)
14450 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14451 				    hsfsts & 0xffffUL);
14452 			else
14453 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14454 				    hsfsts);
14455 		}
14456 	}
14457 	return error;
14458 }
14459 
14460 /******************************************************************************
14461  * This function starts a flash cycle and waits for its completion
14462  *
14463  * sc - The pointer to the hw structure
14464  ****************************************************************************/
14465 static int32_t
wm_ich8_flash_cycle(struct wm_softc * sc,uint32_t timeout)14466 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14467 {
14468 	uint16_t hsflctl;
14469 	uint16_t hsfsts;
14470 	int32_t error = 1;
14471 	uint32_t i = 0;
14472 
14473 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14474 	if (sc->sc_type >= WM_T_PCH_SPT)
14475 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14476 	else
14477 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14478 	hsflctl |= HSFCTL_GO;
14479 	if (sc->sc_type >= WM_T_PCH_SPT)
14480 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14481 		    (uint32_t)hsflctl << 16);
14482 	else
14483 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14484 
14485 	/* Wait till FDONE bit is set to 1 */
14486 	do {
14487 		if (sc->sc_type >= WM_T_PCH_SPT)
14488 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14489 			    & 0xffffUL;
14490 		else
14491 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14492 		if (hsfsts & HSFSTS_DONE)
14493 			break;
14494 		delay(1);
14495 		i++;
14496 	} while (i < timeout);
14497 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14498 		error = 0;
14499 
14500 	return error;
14501 }
14502 
14503 /******************************************************************************
14504  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14505  *
14506  * sc - The pointer to the hw structure
14507  * index - The index of the byte or word to read.
14508  * size - Size of data to read, 1=byte 2=word, 4=dword
14509  * data - Pointer to the word to store the value read.
14510  *****************************************************************************/
14511 static int32_t
wm_read_ich8_data(struct wm_softc * sc,uint32_t index,uint32_t size,uint32_t * data)14512 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14513     uint32_t size, uint32_t *data)
14514 {
14515 	uint16_t hsfsts;
14516 	uint16_t hsflctl;
14517 	uint32_t flash_linear_address;
14518 	uint32_t flash_data = 0;
14519 	int32_t error = 1;
14520 	int32_t count = 0;
14521 
14522 	if (size < 1  || size > 4 || data == 0x0 ||
14523 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
14524 		return error;
14525 
14526 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14527 	    sc->sc_ich8_flash_base;
14528 
14529 	do {
14530 		delay(1);
14531 		/* Steps */
14532 		error = wm_ich8_cycle_init(sc);
14533 		if (error)
14534 			break;
14535 
14536 		if (sc->sc_type >= WM_T_PCH_SPT)
14537 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14538 			    >> 16;
14539 		else
14540 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14541 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14542 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14543 		    & HSFCTL_BCOUNT_MASK;
14544 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14545 		if (sc->sc_type >= WM_T_PCH_SPT) {
14546 			/*
14547 			 * In SPT, This register is in Lan memory space, not
14548 			 * flash. Therefore, only 32 bit access is supported.
14549 			 */
14550 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14551 			    (uint32_t)hsflctl << 16);
14552 		} else
14553 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14554 
14555 		/*
14556 		 * Write the last 24 bits of index into Flash Linear address
14557 		 * field in Flash Address
14558 		 */
14559 		/* TODO: TBD maybe check the index against the size of flash */
14560 
14561 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14562 
14563 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14564 
14565 		/*
14566 		 * Check if FCERR is set to 1, if set to 1, clear it and try
14567 		 * the whole sequence a few more times, else read in (shift in)
14568 		 * the Flash Data0, the order is least significant byte first
14569 		 * msb to lsb
14570 		 */
14571 		if (error == 0) {
14572 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14573 			if (size == 1)
14574 				*data = (uint8_t)(flash_data & 0x000000FF);
14575 			else if (size == 2)
14576 				*data = (uint16_t)(flash_data & 0x0000FFFF);
14577 			else if (size == 4)
14578 				*data = (uint32_t)flash_data;
14579 			break;
14580 		} else {
14581 			/*
14582 			 * If we've gotten here, then things are probably
14583 			 * completely hosed, but if the error condition is
14584 			 * detected, it won't hurt to give it another try...
14585 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14586 			 */
14587 			if (sc->sc_type >= WM_T_PCH_SPT)
14588 				hsfsts = ICH8_FLASH_READ32(sc,
14589 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14590 			else
14591 				hsfsts = ICH8_FLASH_READ16(sc,
14592 				    ICH_FLASH_HSFSTS);
14593 
14594 			if (hsfsts & HSFSTS_ERR) {
14595 				/* Repeat for some time before giving up. */
14596 				continue;
14597 			} else if ((hsfsts & HSFSTS_DONE) == 0)
14598 				break;
14599 		}
14600 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14601 
14602 	return error;
14603 }
14604 
14605 /******************************************************************************
14606  * Reads a single byte from the NVM using the ICH8 flash access registers.
14607  *
14608  * sc - pointer to wm_hw structure
14609  * index - The index of the byte to read.
14610  * data - Pointer to a byte to store the value read.
14611  *****************************************************************************/
14612 static int32_t
wm_read_ich8_byte(struct wm_softc * sc,uint32_t index,uint8_t * data)14613 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14614 {
14615 	int32_t status;
14616 	uint32_t word = 0;
14617 
14618 	status = wm_read_ich8_data(sc, index, 1, &word);
14619 	if (status == 0)
14620 		*data = (uint8_t)word;
14621 	else
14622 		*data = 0;
14623 
14624 	return status;
14625 }
14626 
14627 /******************************************************************************
14628  * Reads a word from the NVM using the ICH8 flash access registers.
14629  *
14630  * sc - pointer to wm_hw structure
14631  * index - The starting byte index of the word to read.
14632  * data - Pointer to a word to store the value read.
14633  *****************************************************************************/
14634 static int32_t
wm_read_ich8_word(struct wm_softc * sc,uint32_t index,uint16_t * data)14635 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14636 {
14637 	int32_t status;
14638 	uint32_t word = 0;
14639 
14640 	status = wm_read_ich8_data(sc, index, 2, &word);
14641 	if (status == 0)
14642 		*data = (uint16_t)word;
14643 	else
14644 		*data = 0;
14645 
14646 	return status;
14647 }
14648 
14649 /******************************************************************************
14650  * Reads a dword from the NVM using the ICH8 flash access registers.
14651  *
14652  * sc - pointer to wm_hw structure
14653  * index - The starting byte index of the word to read.
14654  * data - Pointer to a word to store the value read.
14655  *****************************************************************************/
14656 static int32_t
wm_read_ich8_dword(struct wm_softc * sc,uint32_t index,uint32_t * data)14657 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14658 {
14659 	int32_t status;
14660 
14661 	status = wm_read_ich8_data(sc, index, 4, data);
14662 	return status;
14663 }
14664 
14665 /******************************************************************************
14666  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14667  * register.
14668  *
14669  * sc - Struct containing variables accessed by shared code
14670  * offset - offset of word in the EEPROM to read
14671  * data - word read from the EEPROM
14672  * words - number of words to read
14673  *****************************************************************************/
14674 static int
wm_nvm_read_ich8(struct wm_softc * sc,int offset,int words,uint16_t * data)14675 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14676 {
14677 	int rv;
14678 	uint32_t flash_bank = 0;
14679 	uint32_t act_offset = 0;
14680 	uint32_t bank_offset = 0;
14681 	uint16_t word = 0;
14682 	uint16_t i = 0;
14683 
14684 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14685 		device_xname(sc->sc_dev), __func__));
14686 
14687 	rv = sc->nvm.acquire(sc);
14688 	if (rv != 0)
14689 		return rv;
14690 
14691 	/*
14692 	 * We need to know which is the valid flash bank.  In the event
14693 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14694 	 * managing flash_bank. So it cannot be trusted and needs
14695 	 * to be updated with each read.
14696 	 */
14697 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14698 	if (rv) {
14699 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14700 			device_xname(sc->sc_dev)));
14701 		flash_bank = 0;
14702 	}
14703 
14704 	/*
14705 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14706 	 * size
14707 	 */
14708 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14709 
14710 	for (i = 0; i < words; i++) {
14711 		/* The NVM part needs a byte offset, hence * 2 */
14712 		act_offset = bank_offset + ((offset + i) * 2);
14713 		rv = wm_read_ich8_word(sc, act_offset, &word);
14714 		if (rv) {
14715 			aprint_error_dev(sc->sc_dev,
14716 			    "%s: failed to read NVM\n", __func__);
14717 			break;
14718 		}
14719 		data[i] = word;
14720 	}
14721 
14722 	sc->nvm.release(sc);
14723 	return rv;
14724 }
14725 
14726 /******************************************************************************
14727  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14728  * register.
14729  *
14730  * sc - Struct containing variables accessed by shared code
14731  * offset - offset of word in the EEPROM to read
14732  * data - word read from the EEPROM
14733  * words - number of words to read
14734  *****************************************************************************/
14735 static int
wm_nvm_read_spt(struct wm_softc * sc,int offset,int words,uint16_t * data)14736 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14737 {
14738 	int	 rv;
14739 	uint32_t flash_bank = 0;
14740 	uint32_t act_offset = 0;
14741 	uint32_t bank_offset = 0;
14742 	uint32_t dword = 0;
14743 	uint16_t i = 0;
14744 
14745 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14746 		device_xname(sc->sc_dev), __func__));
14747 
14748 	rv = sc->nvm.acquire(sc);
14749 	if (rv != 0)
14750 		return rv;
14751 
14752 	/*
14753 	 * We need to know which is the valid flash bank.  In the event
14754 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14755 	 * managing flash_bank. So it cannot be trusted and needs
14756 	 * to be updated with each read.
14757 	 */
14758 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14759 	if (rv) {
14760 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14761 			device_xname(sc->sc_dev)));
14762 		flash_bank = 0;
14763 	}
14764 
14765 	/*
14766 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14767 	 * size
14768 	 */
14769 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14770 
14771 	for (i = 0; i < words; i++) {
14772 		/* The NVM part needs a byte offset, hence * 2 */
14773 		act_offset = bank_offset + ((offset + i) * 2);
14774 		/* but we must read dword aligned, so mask ... */
14775 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14776 		if (rv) {
14777 			aprint_error_dev(sc->sc_dev,
14778 			    "%s: failed to read NVM\n", __func__);
14779 			break;
14780 		}
14781 		/* ... and pick out low or high word */
14782 		if ((act_offset & 0x2) == 0)
14783 			data[i] = (uint16_t)(dword & 0xFFFF);
14784 		else
14785 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14786 	}
14787 
14788 	sc->nvm.release(sc);
14789 	return rv;
14790 }
14791 
14792 /* iNVM */
14793 
14794 static int
wm_nvm_read_word_invm(struct wm_softc * sc,uint16_t address,uint16_t * data)14795 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14796 {
14797 	int32_t	 rv = 0;
14798 	uint32_t invm_dword;
14799 	uint16_t i;
14800 	uint8_t record_type, word_address;
14801 
14802 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14803 		device_xname(sc->sc_dev), __func__));
14804 
14805 	for (i = 0; i < INVM_SIZE; i++) {
14806 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14807 		/* Get record type */
14808 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14809 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14810 			break;
14811 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14812 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14813 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14814 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14815 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14816 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14817 			if (word_address == address) {
14818 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14819 				rv = 0;
14820 				break;
14821 			}
14822 		}
14823 	}
14824 
14825 	return rv;
14826 }
14827 
14828 static int
wm_nvm_read_invm(struct wm_softc * sc,int offset,int words,uint16_t * data)14829 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14830 {
14831 	int i, rv;
14832 
14833 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14834 		device_xname(sc->sc_dev), __func__));
14835 
14836 	rv = sc->nvm.acquire(sc);
14837 	if (rv != 0)
14838 		return rv;
14839 
14840 	for (i = 0; i < words; i++) {
14841 		switch (offset + i) {
14842 		case NVM_OFF_MACADDR:
14843 		case NVM_OFF_MACADDR1:
14844 		case NVM_OFF_MACADDR2:
14845 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14846 			if (rv != 0) {
14847 				data[i] = 0xffff;
14848 				rv = -1;
14849 			}
14850 			break;
14851 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14852 			rv = wm_nvm_read_word_invm(sc, offset, data);
14853 			if (rv != 0) {
14854 				*data = INVM_DEFAULT_AL;
14855 				rv = 0;
14856 			}
14857 			break;
14858 		case NVM_OFF_CFG2:
14859 			rv = wm_nvm_read_word_invm(sc, offset, data);
14860 			if (rv != 0) {
14861 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
14862 				rv = 0;
14863 			}
14864 			break;
14865 		case NVM_OFF_CFG4:
14866 			rv = wm_nvm_read_word_invm(sc, offset, data);
14867 			if (rv != 0) {
14868 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
14869 				rv = 0;
14870 			}
14871 			break;
14872 		case NVM_OFF_LED_1_CFG:
14873 			rv = wm_nvm_read_word_invm(sc, offset, data);
14874 			if (rv != 0) {
14875 				*data = NVM_LED_1_CFG_DEFAULT_I211;
14876 				rv = 0;
14877 			}
14878 			break;
14879 		case NVM_OFF_LED_0_2_CFG:
14880 			rv = wm_nvm_read_word_invm(sc, offset, data);
14881 			if (rv != 0) {
14882 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
14883 				rv = 0;
14884 			}
14885 			break;
14886 		case NVM_OFF_ID_LED_SETTINGS:
14887 			rv = wm_nvm_read_word_invm(sc, offset, data);
14888 			if (rv != 0) {
14889 				*data = ID_LED_RESERVED_FFFF;
14890 				rv = 0;
14891 			}
14892 			break;
14893 		default:
14894 			DPRINTF(sc, WM_DEBUG_NVM,
14895 			    ("NVM word 0x%02x is not mapped.\n", offset));
14896 			*data = NVM_RESERVED_WORD;
14897 			break;
14898 		}
14899 	}
14900 
14901 	sc->nvm.release(sc);
14902 	return rv;
14903 }
14904 
14905 /* Lock, detecting NVM type, validate checksum, version and read */
14906 
14907 static int
wm_nvm_is_onboard_eeprom(struct wm_softc * sc)14908 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14909 {
14910 	uint32_t eecd = 0;
14911 
14912 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14913 	    || sc->sc_type == WM_T_82583) {
14914 		eecd = CSR_READ(sc, WMREG_EECD);
14915 
14916 		/* Isolate bits 15 & 16 */
14917 		eecd = ((eecd >> 15) & 0x03);
14918 
14919 		/* If both bits are set, device is Flash type */
14920 		if (eecd == 0x03)
14921 			return 0;
14922 	}
14923 	return 1;
14924 }
14925 
14926 static int
wm_nvm_flash_presence_i210(struct wm_softc * sc)14927 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14928 {
14929 	uint32_t eec;
14930 
14931 	eec = CSR_READ(sc, WMREG_EEC);
14932 	if ((eec & EEC_FLASH_DETECTED) != 0)
14933 		return 1;
14934 
14935 	return 0;
14936 }
14937 
14938 /*
14939  * wm_nvm_validate_checksum
14940  *
14941  * The checksum is defined as the sum of the first 64 (16 bit) words.
14942  */
14943 static int
wm_nvm_validate_checksum(struct wm_softc * sc)14944 wm_nvm_validate_checksum(struct wm_softc *sc)
14945 {
14946 	uint16_t checksum;
14947 	uint16_t eeprom_data;
14948 #ifdef WM_DEBUG
14949 	uint16_t csum_wordaddr, valid_checksum;
14950 #endif
14951 	int i;
14952 
14953 	checksum = 0;
14954 
14955 	/* Don't check for I211 */
14956 	if (sc->sc_type == WM_T_I211)
14957 		return 0;
14958 
14959 #ifdef WM_DEBUG
14960 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14961 	    || (sc->sc_type == WM_T_PCH_CNP)) {
14962 		csum_wordaddr = NVM_OFF_COMPAT;
14963 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14964 	} else {
14965 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14966 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14967 	}
14968 
14969 	/* Dump EEPROM image for debug */
14970 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14971 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14972 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14973 		/* XXX PCH_SPT? */
14974 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14975 		if ((eeprom_data & valid_checksum) == 0)
14976 			DPRINTF(sc, WM_DEBUG_NVM,
14977 			    ("%s: NVM need to be updated (%04x != %04x)\n",
14978 				device_xname(sc->sc_dev), eeprom_data,
14979 				valid_checksum));
14980 	}
14981 
14982 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14983 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14984 		for (i = 0; i < NVM_SIZE; i++) {
14985 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
14986 				printf("XXXX ");
14987 			else
14988 				printf("%04hx ", eeprom_data);
14989 			if (i % 8 == 7)
14990 				printf("\n");
14991 		}
14992 	}
14993 
14994 #endif /* WM_DEBUG */
14995 
14996 	for (i = 0; i < NVM_SIZE; i++) {
14997 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
14998 			return -1;
14999 		checksum += eeprom_data;
15000 	}
15001 
15002 	if (checksum != (uint16_t) NVM_CHECKSUM) {
15003 #ifdef WM_DEBUG
15004 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
15005 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
15006 #endif
15007 	}
15008 
15009 	return 0;
15010 }
15011 
15012 static void
wm_nvm_version_invm(struct wm_softc * sc)15013 wm_nvm_version_invm(struct wm_softc *sc)
15014 {
15015 	uint32_t dword;
15016 
15017 	/*
15018 	 * Linux's code to decode version is very strange, so we don't
15019 	 * obey that algorithm and just use word 61 as the document.
15020 	 * Perhaps it's not perfect though...
15021 	 *
15022 	 * Example:
15023 	 *
15024 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
15025 	 */
15026 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
15027 	dword = __SHIFTOUT(dword, INVM_VER_1);
15028 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
15029 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
15030 }
15031 
15032 static void
wm_nvm_version(struct wm_softc * sc)15033 wm_nvm_version(struct wm_softc *sc)
15034 {
15035 	uint16_t major, minor, build, patch;
15036 	uint16_t uid0, uid1;
15037 	uint16_t nvm_data;
15038 	uint16_t off;
15039 	bool check_version = false;
15040 	bool check_optionrom = false;
15041 	bool have_build = false;
15042 	bool have_uid = true;
15043 
15044 	/*
15045 	 * Version format:
15046 	 *
15047 	 * XYYZ
15048 	 * X0YZ
15049 	 * X0YY
15050 	 *
15051 	 * Example:
15052 	 *
15053 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
15054 	 *	82571	0x50a6	5.10.6?
15055 	 *	82572	0x506a	5.6.10?
15056 	 *	82572EI	0x5069	5.6.9?
15057 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
15058 	 *		0x2013	2.1.3?
15059 	 *	82583	0x10a0	1.10.0? (document says it's default value)
15060 	 * ICH8+82567	0x0040	0.4.0?
15061 	 * ICH9+82566	0x1040	1.4.0?
15062 	 *ICH10+82567	0x0043	0.4.3?
15063 	 *  PCH+82577	0x00c1	0.12.1?
15064 	 * PCH2+82579	0x00d3	0.13.3?
15065 	 *		0x00d4	0.13.4?
15066 	 *  LPT+I218	0x0023	0.2.3?
15067 	 *  SPT+I219	0x0084	0.8.4?
15068 	 *  CNP+I219	0x0054	0.5.4?
15069 	 */
15070 
15071 	/*
15072 	 * XXX
15073 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
15074 	 * I've never seen real 82574 hardware with such small SPI ROM.
15075 	 */
15076 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
15077 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
15078 		have_uid = false;
15079 
15080 	switch (sc->sc_type) {
15081 	case WM_T_82571:
15082 	case WM_T_82572:
15083 	case WM_T_82574:
15084 	case WM_T_82583:
15085 		check_version = true;
15086 		check_optionrom = true;
15087 		have_build = true;
15088 		break;
15089 	case WM_T_ICH8:
15090 	case WM_T_ICH9:
15091 	case WM_T_ICH10:
15092 	case WM_T_PCH:
15093 	case WM_T_PCH2:
15094 	case WM_T_PCH_LPT:
15095 	case WM_T_PCH_SPT:
15096 	case WM_T_PCH_CNP:
15097 		check_version = true;
15098 		have_build = true;
15099 		have_uid = false;
15100 		break;
15101 	case WM_T_82575:
15102 	case WM_T_82576:
15103 	case WM_T_82580:
15104 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
15105 			check_version = true;
15106 		break;
15107 	case WM_T_I211:
15108 		wm_nvm_version_invm(sc);
15109 		have_uid = false;
15110 		goto printver;
15111 	case WM_T_I210:
15112 		if (!wm_nvm_flash_presence_i210(sc)) {
15113 			wm_nvm_version_invm(sc);
15114 			have_uid = false;
15115 			goto printver;
15116 		}
15117 		/* FALLTHROUGH */
15118 	case WM_T_I350:
15119 	case WM_T_I354:
15120 		check_version = true;
15121 		check_optionrom = true;
15122 		break;
15123 	default:
15124 		return;
15125 	}
15126 	if (check_version
15127 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
15128 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
15129 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
15130 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
15131 			build = nvm_data & NVM_BUILD_MASK;
15132 			have_build = true;
15133 		} else
15134 			minor = nvm_data & 0x00ff;
15135 
15136 		/* Decimal */
15137 		minor = (minor / 16) * 10 + (minor % 16);
15138 		sc->sc_nvm_ver_major = major;
15139 		sc->sc_nvm_ver_minor = minor;
15140 
15141 printver:
15142 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
15143 		    sc->sc_nvm_ver_minor);
15144 		if (have_build) {
15145 			sc->sc_nvm_ver_build = build;
15146 			aprint_verbose(".%d", build);
15147 		}
15148 	}
15149 
15150 	/* Assume the Option ROM area is at avove NVM_SIZE */
15151 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
15152 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
15153 		/* Option ROM Version */
15154 		if ((off != 0x0000) && (off != 0xffff)) {
15155 			int rv;
15156 
15157 			off += NVM_COMBO_VER_OFF;
15158 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
15159 			rv |= wm_nvm_read(sc, off, 1, &uid0);
15160 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
15161 			    && (uid1 != 0) && (uid1 != 0xffff)) {
15162 				/* 16bits */
15163 				major = uid0 >> 8;
15164 				build = (uid0 << 8) | (uid1 >> 8);
15165 				patch = uid1 & 0x00ff;
15166 				aprint_verbose(", option ROM Version %d.%d.%d",
15167 				    major, build, patch);
15168 			}
15169 		}
15170 	}
15171 
15172 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
15173 		aprint_verbose(", Image Unique ID %08x",
15174 		    ((uint32_t)uid1 << 16) | uid0);
15175 }
15176 
15177 /*
15178  * wm_nvm_read:
15179  *
15180  *	Read data from the serial EEPROM.
15181  */
15182 static int
wm_nvm_read(struct wm_softc * sc,int word,int wordcnt,uint16_t * data)15183 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
15184 {
15185 	int rv;
15186 
15187 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15188 		device_xname(sc->sc_dev), __func__));
15189 
15190 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
15191 		return -1;
15192 
15193 	rv = sc->nvm.read(sc, word, wordcnt, data);
15194 
15195 	return rv;
15196 }
15197 
15198 /*
15199  * Hardware semaphores.
15200  * Very complexed...
15201  */
15202 
15203 static int
wm_get_null(struct wm_softc * sc)15204 wm_get_null(struct wm_softc *sc)
15205 {
15206 
15207 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15208 		device_xname(sc->sc_dev), __func__));
15209 	return 0;
15210 }
15211 
15212 static void
wm_put_null(struct wm_softc * sc)15213 wm_put_null(struct wm_softc *sc)
15214 {
15215 
15216 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15217 		device_xname(sc->sc_dev), __func__));
15218 	return;
15219 }
15220 
15221 static int
wm_get_eecd(struct wm_softc * sc)15222 wm_get_eecd(struct wm_softc *sc)
15223 {
15224 	uint32_t reg;
15225 	int x;
15226 
15227 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15228 		device_xname(sc->sc_dev), __func__));
15229 
15230 	reg = CSR_READ(sc, WMREG_EECD);
15231 
15232 	/* Request EEPROM access. */
15233 	reg |= EECD_EE_REQ;
15234 	CSR_WRITE(sc, WMREG_EECD, reg);
15235 
15236 	/* ..and wait for it to be granted. */
15237 	for (x = 0; x < 1000; x++) {
15238 		reg = CSR_READ(sc, WMREG_EECD);
15239 		if (reg & EECD_EE_GNT)
15240 			break;
15241 		delay(5);
15242 	}
15243 	if ((reg & EECD_EE_GNT) == 0) {
15244 		aprint_error_dev(sc->sc_dev,
15245 		    "could not acquire EEPROM GNT\n");
15246 		reg &= ~EECD_EE_REQ;
15247 		CSR_WRITE(sc, WMREG_EECD, reg);
15248 		return -1;
15249 	}
15250 
15251 	return 0;
15252 }
15253 
15254 static void
wm_nvm_eec_clock_raise(struct wm_softc * sc,uint32_t * eecd)15255 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
15256 {
15257 
15258 	*eecd |= EECD_SK;
15259 	CSR_WRITE(sc, WMREG_EECD, *eecd);
15260 	CSR_WRITE_FLUSH(sc);
15261 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15262 		delay(1);
15263 	else
15264 		delay(50);
15265 }
15266 
15267 static void
wm_nvm_eec_clock_lower(struct wm_softc * sc,uint32_t * eecd)15268 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
15269 {
15270 
15271 	*eecd &= ~EECD_SK;
15272 	CSR_WRITE(sc, WMREG_EECD, *eecd);
15273 	CSR_WRITE_FLUSH(sc);
15274 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15275 		delay(1);
15276 	else
15277 		delay(50);
15278 }
15279 
15280 static void
wm_put_eecd(struct wm_softc * sc)15281 wm_put_eecd(struct wm_softc *sc)
15282 {
15283 	uint32_t reg;
15284 
15285 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15286 		device_xname(sc->sc_dev), __func__));
15287 
15288 	/* Stop nvm */
15289 	reg = CSR_READ(sc, WMREG_EECD);
15290 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
15291 		/* Pull CS high */
15292 		reg |= EECD_CS;
15293 		wm_nvm_eec_clock_lower(sc, &reg);
15294 	} else {
15295 		/* CS on Microwire is active-high */
15296 		reg &= ~(EECD_CS | EECD_DI);
15297 		CSR_WRITE(sc, WMREG_EECD, reg);
15298 		wm_nvm_eec_clock_raise(sc, &reg);
15299 		wm_nvm_eec_clock_lower(sc, &reg);
15300 	}
15301 
15302 	reg = CSR_READ(sc, WMREG_EECD);
15303 	reg &= ~EECD_EE_REQ;
15304 	CSR_WRITE(sc, WMREG_EECD, reg);
15305 
15306 	return;
15307 }
15308 
15309 /*
15310  * Get hardware semaphore.
15311  * Same as e1000_get_hw_semaphore_generic()
15312  */
15313 static int
wm_get_swsm_semaphore(struct wm_softc * sc)15314 wm_get_swsm_semaphore(struct wm_softc *sc)
15315 {
15316 	int32_t timeout;
15317 	uint32_t swsm;
15318 
15319 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15320 		device_xname(sc->sc_dev), __func__));
15321 	KASSERT(sc->sc_nvm_wordsize > 0);
15322 
15323 retry:
15324 	/* Get the SW semaphore. */
15325 	timeout = sc->sc_nvm_wordsize + 1;
15326 	while (timeout) {
15327 		swsm = CSR_READ(sc, WMREG_SWSM);
15328 
15329 		if ((swsm & SWSM_SMBI) == 0)
15330 			break;
15331 
15332 		delay(50);
15333 		timeout--;
15334 	}
15335 
15336 	if (timeout == 0) {
15337 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
15338 			/*
15339 			 * In rare circumstances, the SW semaphore may already
15340 			 * be held unintentionally. Clear the semaphore once
15341 			 * before giving up.
15342 			 */
15343 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
15344 			wm_put_swsm_semaphore(sc);
15345 			goto retry;
15346 		}
15347 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
15348 		return -1;
15349 	}
15350 
15351 	/* Get the FW semaphore. */
15352 	timeout = sc->sc_nvm_wordsize + 1;
15353 	while (timeout) {
15354 		swsm = CSR_READ(sc, WMREG_SWSM);
15355 		swsm |= SWSM_SWESMBI;
15356 		CSR_WRITE(sc, WMREG_SWSM, swsm);
15357 		/* If we managed to set the bit we got the semaphore. */
15358 		swsm = CSR_READ(sc, WMREG_SWSM);
15359 		if (swsm & SWSM_SWESMBI)
15360 			break;
15361 
15362 		delay(50);
15363 		timeout--;
15364 	}
15365 
15366 	if (timeout == 0) {
15367 		aprint_error_dev(sc->sc_dev,
15368 		    "could not acquire SWSM SWESMBI\n");
15369 		/* Release semaphores */
15370 		wm_put_swsm_semaphore(sc);
15371 		return -1;
15372 	}
15373 	return 0;
15374 }
15375 
15376 /*
15377  * Put hardware semaphore.
15378  * Same as e1000_put_hw_semaphore_generic()
15379  */
15380 static void
wm_put_swsm_semaphore(struct wm_softc * sc)15381 wm_put_swsm_semaphore(struct wm_softc *sc)
15382 {
15383 	uint32_t swsm;
15384 
15385 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15386 		device_xname(sc->sc_dev), __func__));
15387 
15388 	swsm = CSR_READ(sc, WMREG_SWSM);
15389 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15390 	CSR_WRITE(sc, WMREG_SWSM, swsm);
15391 }
15392 
15393 /*
15394  * Get SW/FW semaphore.
15395  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15396  */
15397 static int
wm_get_swfw_semaphore(struct wm_softc * sc,uint16_t mask)15398 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15399 {
15400 	uint32_t swfw_sync;
15401 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15402 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15403 	int timeout;
15404 
15405 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15406 		device_xname(sc->sc_dev), __func__));
15407 
15408 	if (sc->sc_type == WM_T_80003)
15409 		timeout = 50;
15410 	else
15411 		timeout = 200;
15412 
15413 	while (timeout) {
15414 		if (wm_get_swsm_semaphore(sc)) {
15415 			aprint_error_dev(sc->sc_dev,
15416 			    "%s: failed to get semaphore\n",
15417 			    __func__);
15418 			return -1;
15419 		}
15420 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15421 		if ((swfw_sync & (swmask | fwmask)) == 0) {
15422 			swfw_sync |= swmask;
15423 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15424 			wm_put_swsm_semaphore(sc);
15425 			return 0;
15426 		}
15427 		wm_put_swsm_semaphore(sc);
15428 		delay(5000);
15429 		timeout--;
15430 	}
15431 	device_printf(sc->sc_dev,
15432 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15433 	    mask, swfw_sync);
15434 	return -1;
15435 }
15436 
15437 static void
wm_put_swfw_semaphore(struct wm_softc * sc,uint16_t mask)15438 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15439 {
15440 	uint32_t swfw_sync;
15441 
15442 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15443 		device_xname(sc->sc_dev), __func__));
15444 
15445 	while (wm_get_swsm_semaphore(sc) != 0)
15446 		continue;
15447 
15448 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15449 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15450 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15451 
15452 	wm_put_swsm_semaphore(sc);
15453 }
15454 
15455 static int
wm_get_nvm_80003(struct wm_softc * sc)15456 wm_get_nvm_80003(struct wm_softc *sc)
15457 {
15458 	int rv;
15459 
15460 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15461 		device_xname(sc->sc_dev), __func__));
15462 
15463 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15464 		aprint_error_dev(sc->sc_dev,
15465 		    "%s: failed to get semaphore(SWFW)\n", __func__);
15466 		return rv;
15467 	}
15468 
15469 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15470 	    && (rv = wm_get_eecd(sc)) != 0) {
15471 		aprint_error_dev(sc->sc_dev,
15472 		    "%s: failed to get semaphore(EECD)\n", __func__);
15473 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15474 		return rv;
15475 	}
15476 
15477 	return 0;
15478 }
15479 
15480 static void
wm_put_nvm_80003(struct wm_softc * sc)15481 wm_put_nvm_80003(struct wm_softc *sc)
15482 {
15483 
15484 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15485 		device_xname(sc->sc_dev), __func__));
15486 
15487 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15488 		wm_put_eecd(sc);
15489 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15490 }
15491 
15492 static int
wm_get_nvm_82571(struct wm_softc * sc)15493 wm_get_nvm_82571(struct wm_softc *sc)
15494 {
15495 	int rv;
15496 
15497 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15498 		device_xname(sc->sc_dev), __func__));
15499 
15500 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15501 		return rv;
15502 
15503 	switch (sc->sc_type) {
15504 	case WM_T_82573:
15505 		break;
15506 	default:
15507 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15508 			rv = wm_get_eecd(sc);
15509 		break;
15510 	}
15511 
15512 	if (rv != 0) {
15513 		aprint_error_dev(sc->sc_dev,
15514 		    "%s: failed to get semaphore\n",
15515 		    __func__);
15516 		wm_put_swsm_semaphore(sc);
15517 	}
15518 
15519 	return rv;
15520 }
15521 
15522 static void
wm_put_nvm_82571(struct wm_softc * sc)15523 wm_put_nvm_82571(struct wm_softc *sc)
15524 {
15525 
15526 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15527 		device_xname(sc->sc_dev), __func__));
15528 
15529 	switch (sc->sc_type) {
15530 	case WM_T_82573:
15531 		break;
15532 	default:
15533 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15534 			wm_put_eecd(sc);
15535 		break;
15536 	}
15537 
15538 	wm_put_swsm_semaphore(sc);
15539 }
15540 
15541 static int
wm_get_phy_82575(struct wm_softc * sc)15542 wm_get_phy_82575(struct wm_softc *sc)
15543 {
15544 
15545 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15546 		device_xname(sc->sc_dev), __func__));
15547 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15548 }
15549 
15550 static void
wm_put_phy_82575(struct wm_softc * sc)15551 wm_put_phy_82575(struct wm_softc *sc)
15552 {
15553 
15554 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15555 		device_xname(sc->sc_dev), __func__));
15556 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15557 }
15558 
15559 static int
wm_get_swfwhw_semaphore(struct wm_softc * sc)15560 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15561 {
15562 	uint32_t ext_ctrl;
15563 	int timeout = 200;
15564 
15565 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15566 		device_xname(sc->sc_dev), __func__));
15567 
15568 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15569 	for (timeout = 0; timeout < 200; timeout++) {
15570 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15571 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15572 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15573 
15574 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15575 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15576 			return 0;
15577 		delay(5000);
15578 	}
15579 	device_printf(sc->sc_dev,
15580 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15581 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15582 	return -1;
15583 }
15584 
15585 static void
wm_put_swfwhw_semaphore(struct wm_softc * sc)15586 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15587 {
15588 	uint32_t ext_ctrl;
15589 
15590 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15591 		device_xname(sc->sc_dev), __func__));
15592 
15593 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15594 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15595 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15596 
15597 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15598 }
15599 
15600 static int
wm_get_swflag_ich8lan(struct wm_softc * sc)15601 wm_get_swflag_ich8lan(struct wm_softc *sc)
15602 {
15603 	uint32_t ext_ctrl;
15604 	int timeout;
15605 
15606 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15607 		device_xname(sc->sc_dev), __func__));
15608 	mutex_enter(sc->sc_ich_phymtx);
15609 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15610 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15611 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15612 			break;
15613 		delay(1000);
15614 	}
15615 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
15616 		device_printf(sc->sc_dev,
15617 		    "SW has already locked the resource\n");
15618 		goto out;
15619 	}
15620 
15621 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15622 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15623 	for (timeout = 0; timeout < 1000; timeout++) {
15624 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15625 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15626 			break;
15627 		delay(1000);
15628 	}
15629 	if (timeout >= 1000) {
15630 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15631 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15632 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15633 		goto out;
15634 	}
15635 	return 0;
15636 
15637 out:
15638 	mutex_exit(sc->sc_ich_phymtx);
15639 	return -1;
15640 }
15641 
15642 static void
wm_put_swflag_ich8lan(struct wm_softc * sc)15643 wm_put_swflag_ich8lan(struct wm_softc *sc)
15644 {
15645 	uint32_t ext_ctrl;
15646 
15647 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15648 		device_xname(sc->sc_dev), __func__));
15649 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15650 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15651 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15652 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15653 	} else
15654 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15655 
15656 	mutex_exit(sc->sc_ich_phymtx);
15657 }
15658 
15659 static int
wm_get_nvm_ich8lan(struct wm_softc * sc)15660 wm_get_nvm_ich8lan(struct wm_softc *sc)
15661 {
15662 
15663 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15664 		device_xname(sc->sc_dev), __func__));
15665 	mutex_enter(sc->sc_ich_nvmmtx);
15666 
15667 	return 0;
15668 }
15669 
15670 static void
wm_put_nvm_ich8lan(struct wm_softc * sc)15671 wm_put_nvm_ich8lan(struct wm_softc *sc)
15672 {
15673 
15674 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15675 		device_xname(sc->sc_dev), __func__));
15676 	mutex_exit(sc->sc_ich_nvmmtx);
15677 }
15678 
15679 static int
wm_get_hw_semaphore_82573(struct wm_softc * sc)15680 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15681 {
15682 	int i = 0;
15683 	uint32_t reg;
15684 
15685 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15686 		device_xname(sc->sc_dev), __func__));
15687 
15688 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15689 	do {
15690 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
15691 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15692 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15693 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15694 			break;
15695 		delay(2*1000);
15696 		i++;
15697 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15698 
15699 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15700 		wm_put_hw_semaphore_82573(sc);
15701 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
15702 		    device_xname(sc->sc_dev));
15703 		return -1;
15704 	}
15705 
15706 	return 0;
15707 }
15708 
15709 static void
wm_put_hw_semaphore_82573(struct wm_softc * sc)15710 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15711 {
15712 	uint32_t reg;
15713 
15714 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15715 		device_xname(sc->sc_dev), __func__));
15716 
15717 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15718 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15719 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15720 }
15721 
15722 /*
15723  * Management mode and power management related subroutines.
15724  * BMC, AMT, suspend/resume and EEE.
15725  */
15726 
15727 #ifdef WM_WOL
15728 static int
wm_check_mng_mode(struct wm_softc * sc)15729 wm_check_mng_mode(struct wm_softc *sc)
15730 {
15731 	int rv;
15732 
15733 	switch (sc->sc_type) {
15734 	case WM_T_ICH8:
15735 	case WM_T_ICH9:
15736 	case WM_T_ICH10:
15737 	case WM_T_PCH:
15738 	case WM_T_PCH2:
15739 	case WM_T_PCH_LPT:
15740 	case WM_T_PCH_SPT:
15741 	case WM_T_PCH_CNP:
15742 		rv = wm_check_mng_mode_ich8lan(sc);
15743 		break;
15744 	case WM_T_82574:
15745 	case WM_T_82583:
15746 		rv = wm_check_mng_mode_82574(sc);
15747 		break;
15748 	case WM_T_82571:
15749 	case WM_T_82572:
15750 	case WM_T_82573:
15751 	case WM_T_80003:
15752 		rv = wm_check_mng_mode_generic(sc);
15753 		break;
15754 	default:
15755 		/* Noting to do */
15756 		rv = 0;
15757 		break;
15758 	}
15759 
15760 	return rv;
15761 }
15762 
15763 static int
wm_check_mng_mode_ich8lan(struct wm_softc * sc)15764 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15765 {
15766 	uint32_t fwsm;
15767 
15768 	fwsm = CSR_READ(sc, WMREG_FWSM);
15769 
15770 	if (((fwsm & FWSM_FW_VALID) != 0)
15771 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15772 		return 1;
15773 
15774 	return 0;
15775 }
15776 
15777 static int
wm_check_mng_mode_82574(struct wm_softc * sc)15778 wm_check_mng_mode_82574(struct wm_softc *sc)
15779 {
15780 	uint16_t data;
15781 
15782 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15783 
15784 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
15785 		return 1;
15786 
15787 	return 0;
15788 }
15789 
15790 static int
wm_check_mng_mode_generic(struct wm_softc * sc)15791 wm_check_mng_mode_generic(struct wm_softc *sc)
15792 {
15793 	uint32_t fwsm;
15794 
15795 	fwsm = CSR_READ(sc, WMREG_FWSM);
15796 
15797 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15798 		return 1;
15799 
15800 	return 0;
15801 }
15802 #endif /* WM_WOL */
15803 
15804 static int
wm_enable_mng_pass_thru(struct wm_softc * sc)15805 wm_enable_mng_pass_thru(struct wm_softc *sc)
15806 {
15807 	uint32_t manc, fwsm, factps;
15808 
15809 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15810 		return 0;
15811 
15812 	manc = CSR_READ(sc, WMREG_MANC);
15813 
15814 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15815 		device_xname(sc->sc_dev), manc));
15816 	if ((manc & MANC_RECV_TCO_EN) == 0)
15817 		return 0;
15818 
15819 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15820 		fwsm = CSR_READ(sc, WMREG_FWSM);
15821 		factps = CSR_READ(sc, WMREG_FACTPS);
15822 		if (((factps & FACTPS_MNGCG) == 0)
15823 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15824 			return 1;
15825 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15826 		uint16_t data;
15827 
15828 		factps = CSR_READ(sc, WMREG_FACTPS);
15829 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15830 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15831 			device_xname(sc->sc_dev), factps, data));
15832 		if (((factps & FACTPS_MNGCG) == 0)
15833 		    && ((data & NVM_CFG2_MNGM_MASK)
15834 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15835 			return 1;
15836 	} else if (((manc & MANC_SMBUS_EN) != 0)
15837 	    && ((manc & MANC_ASF_EN) == 0))
15838 		return 1;
15839 
15840 	return 0;
15841 }
15842 
15843 static bool
wm_phy_resetisblocked(struct wm_softc * sc)15844 wm_phy_resetisblocked(struct wm_softc *sc)
15845 {
15846 	bool blocked = false;
15847 	uint32_t reg;
15848 	int i = 0;
15849 
15850 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15851 		device_xname(sc->sc_dev), __func__));
15852 
15853 	switch (sc->sc_type) {
15854 	case WM_T_ICH8:
15855 	case WM_T_ICH9:
15856 	case WM_T_ICH10:
15857 	case WM_T_PCH:
15858 	case WM_T_PCH2:
15859 	case WM_T_PCH_LPT:
15860 	case WM_T_PCH_SPT:
15861 	case WM_T_PCH_CNP:
15862 		do {
15863 			reg = CSR_READ(sc, WMREG_FWSM);
15864 			if ((reg & FWSM_RSPCIPHY) == 0) {
15865 				blocked = true;
15866 				delay(10*1000);
15867 				continue;
15868 			}
15869 			blocked = false;
15870 		} while (blocked && (i++ < 30));
15871 		return blocked;
15872 		break;
15873 	case WM_T_82571:
15874 	case WM_T_82572:
15875 	case WM_T_82573:
15876 	case WM_T_82574:
15877 	case WM_T_82583:
15878 	case WM_T_80003:
15879 		reg = CSR_READ(sc, WMREG_MANC);
15880 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15881 			return true;
15882 		else
15883 			return false;
15884 		break;
15885 	default:
15886 		/* No problem */
15887 		break;
15888 	}
15889 
15890 	return false;
15891 }
15892 
15893 static void
wm_get_hw_control(struct wm_softc * sc)15894 wm_get_hw_control(struct wm_softc *sc)
15895 {
15896 	uint32_t reg;
15897 
15898 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15899 		device_xname(sc->sc_dev), __func__));
15900 
15901 	if (sc->sc_type == WM_T_82573) {
15902 		reg = CSR_READ(sc, WMREG_SWSM);
15903 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15904 	} else if (sc->sc_type >= WM_T_82571) {
15905 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15906 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15907 	}
15908 }
15909 
15910 static void
wm_release_hw_control(struct wm_softc * sc)15911 wm_release_hw_control(struct wm_softc *sc)
15912 {
15913 	uint32_t reg;
15914 
15915 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15916 		device_xname(sc->sc_dev), __func__));
15917 
15918 	if (sc->sc_type == WM_T_82573) {
15919 		reg = CSR_READ(sc, WMREG_SWSM);
15920 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15921 	} else if (sc->sc_type >= WM_T_82571) {
15922 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15923 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15924 	}
15925 }
15926 
15927 static void
wm_gate_hw_phy_config_ich8lan(struct wm_softc * sc,bool gate)15928 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15929 {
15930 	uint32_t reg;
15931 
15932 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15933 		device_xname(sc->sc_dev), __func__));
15934 
15935 	if (sc->sc_type < WM_T_PCH2)
15936 		return;
15937 
15938 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15939 
15940 	if (gate)
15941 		reg |= EXTCNFCTR_GATE_PHY_CFG;
15942 	else
15943 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15944 
15945 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15946 }
15947 
15948 static int
wm_init_phy_workarounds_pchlan(struct wm_softc * sc)15949 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15950 {
15951 	uint32_t fwsm, reg;
15952 	int rv;
15953 
15954 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15955 		device_xname(sc->sc_dev), __func__));
15956 
15957 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
15958 	wm_gate_hw_phy_config_ich8lan(sc, true);
15959 
15960 	/* Disable ULP */
15961 	wm_ulp_disable(sc);
15962 
15963 	/* Acquire PHY semaphore */
15964 	rv = sc->phy.acquire(sc);
15965 	if (rv != 0) {
15966 		DPRINTF(sc, WM_DEBUG_INIT,
15967 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15968 		return rv;
15969 	}
15970 
15971 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
15972 	 * inaccessible and resetting the PHY is not blocked, toggle the
15973 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15974 	 */
15975 	fwsm = CSR_READ(sc, WMREG_FWSM);
15976 	switch (sc->sc_type) {
15977 	case WM_T_PCH_LPT:
15978 	case WM_T_PCH_SPT:
15979 	case WM_T_PCH_CNP:
15980 		if (wm_phy_is_accessible_pchlan(sc))
15981 			break;
15982 
15983 		/* Before toggling LANPHYPC, see if PHY is accessible by
15984 		 * forcing MAC to SMBus mode first.
15985 		 */
15986 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15987 		reg |= CTRL_EXT_FORCE_SMBUS;
15988 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15989 #if 0
15990 		/* XXX Isn't this required??? */
15991 		CSR_WRITE_FLUSH(sc);
15992 #endif
15993 		/* Wait 50 milliseconds for MAC to finish any retries
15994 		 * that it might be trying to perform from previous
15995 		 * attempts to acknowledge any phy read requests.
15996 		 */
15997 		delay(50 * 1000);
15998 		/* FALLTHROUGH */
15999 	case WM_T_PCH2:
16000 		if (wm_phy_is_accessible_pchlan(sc) == true)
16001 			break;
16002 		/* FALLTHROUGH */
16003 	case WM_T_PCH:
16004 		if (sc->sc_type == WM_T_PCH)
16005 			if ((fwsm & FWSM_FW_VALID) != 0)
16006 				break;
16007 
16008 		if (wm_phy_resetisblocked(sc) == true) {
16009 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
16010 			break;
16011 		}
16012 
16013 		/* Toggle LANPHYPC Value bit */
16014 		wm_toggle_lanphypc_pch_lpt(sc);
16015 
16016 		if (sc->sc_type >= WM_T_PCH_LPT) {
16017 			if (wm_phy_is_accessible_pchlan(sc) == true)
16018 				break;
16019 
16020 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
16021 			 * so ensure that the MAC is also out of SMBus mode
16022 			 */
16023 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16024 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16025 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16026 
16027 			if (wm_phy_is_accessible_pchlan(sc) == true)
16028 				break;
16029 			rv = -1;
16030 		}
16031 		break;
16032 	default:
16033 		break;
16034 	}
16035 
16036 	/* Release semaphore */
16037 	sc->phy.release(sc);
16038 
16039 	if (rv == 0) {
16040 		/* Check to see if able to reset PHY.  Print error if not */
16041 		if (wm_phy_resetisblocked(sc)) {
16042 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
16043 			goto out;
16044 		}
16045 
16046 		/* Reset the PHY before any access to it.  Doing so, ensures
16047 		 * that the PHY is in a known good state before we read/write
16048 		 * PHY registers.  The generic reset is sufficient here,
16049 		 * because we haven't determined the PHY type yet.
16050 		 */
16051 		if (wm_reset_phy(sc) != 0)
16052 			goto out;
16053 
16054 		/* On a successful reset, possibly need to wait for the PHY
16055 		 * to quiesce to an accessible state before returning control
16056 		 * to the calling function.  If the PHY does not quiesce, then
16057 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
16058 		 *  the PHY is in.
16059 		 */
16060 		if (wm_phy_resetisblocked(sc))
16061 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
16062 	}
16063 
16064 out:
16065 	/* Ungate automatic PHY configuration on non-managed 82579 */
16066 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
16067 		delay(10*1000);
16068 		wm_gate_hw_phy_config_ich8lan(sc, false);
16069 	}
16070 
16071 	return 0;
16072 }
16073 
16074 static void
wm_init_manageability(struct wm_softc * sc)16075 wm_init_manageability(struct wm_softc *sc)
16076 {
16077 
16078 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16079 		device_xname(sc->sc_dev), __func__));
16080 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
16081 
16082 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
16083 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
16084 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
16085 
16086 		/* Disable hardware interception of ARP */
16087 		manc &= ~MANC_ARP_EN;
16088 
16089 		/* Enable receiving management packets to the host */
16090 		if (sc->sc_type >= WM_T_82571) {
16091 			manc |= MANC_EN_MNG2HOST;
16092 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
16093 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
16094 		}
16095 
16096 		CSR_WRITE(sc, WMREG_MANC, manc);
16097 	}
16098 }
16099 
16100 static void
wm_release_manageability(struct wm_softc * sc)16101 wm_release_manageability(struct wm_softc *sc)
16102 {
16103 
16104 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
16105 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
16106 
16107 		manc |= MANC_ARP_EN;
16108 		if (sc->sc_type >= WM_T_82571)
16109 			manc &= ~MANC_EN_MNG2HOST;
16110 
16111 		CSR_WRITE(sc, WMREG_MANC, manc);
16112 	}
16113 }
16114 
16115 static void
wm_get_wakeup(struct wm_softc * sc)16116 wm_get_wakeup(struct wm_softc *sc)
16117 {
16118 
16119 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
16120 	switch (sc->sc_type) {
16121 	case WM_T_82573:
16122 	case WM_T_82583:
16123 		sc->sc_flags |= WM_F_HAS_AMT;
16124 		/* FALLTHROUGH */
16125 	case WM_T_80003:
16126 	case WM_T_82575:
16127 	case WM_T_82576:
16128 	case WM_T_82580:
16129 	case WM_T_I350:
16130 	case WM_T_I354:
16131 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
16132 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
16133 		/* FALLTHROUGH */
16134 	case WM_T_82541:
16135 	case WM_T_82541_2:
16136 	case WM_T_82547:
16137 	case WM_T_82547_2:
16138 	case WM_T_82571:
16139 	case WM_T_82572:
16140 	case WM_T_82574:
16141 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16142 		break;
16143 	case WM_T_ICH8:
16144 	case WM_T_ICH9:
16145 	case WM_T_ICH10:
16146 	case WM_T_PCH:
16147 	case WM_T_PCH2:
16148 	case WM_T_PCH_LPT:
16149 	case WM_T_PCH_SPT:
16150 	case WM_T_PCH_CNP:
16151 		sc->sc_flags |= WM_F_HAS_AMT;
16152 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16153 		break;
16154 	default:
16155 		break;
16156 	}
16157 
16158 	/* 1: HAS_MANAGE */
16159 	if (wm_enable_mng_pass_thru(sc) != 0)
16160 		sc->sc_flags |= WM_F_HAS_MANAGE;
16161 
16162 	/*
16163 	 * Note that the WOL flags is set after the resetting of the eeprom
16164 	 * stuff
16165 	 */
16166 }
16167 
16168 /*
16169  * Unconfigure Ultra Low Power mode.
16170  * Only for I217 and newer (see below).
16171  */
16172 static int
wm_ulp_disable(struct wm_softc * sc)16173 wm_ulp_disable(struct wm_softc *sc)
16174 {
16175 	uint32_t reg;
16176 	uint16_t phyreg;
16177 	int i = 0, rv;
16178 
16179 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16180 		device_xname(sc->sc_dev), __func__));
16181 	/* Exclude old devices */
16182 	if ((sc->sc_type < WM_T_PCH_LPT)
16183 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
16184 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
16185 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
16186 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
16187 		return 0;
16188 
16189 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
16190 		/* Request ME un-configure ULP mode in the PHY */
16191 		reg = CSR_READ(sc, WMREG_H2ME);
16192 		reg &= ~H2ME_ULP;
16193 		reg |= H2ME_ENFORCE_SETTINGS;
16194 		CSR_WRITE(sc, WMREG_H2ME, reg);
16195 
16196 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
16197 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
16198 			if (i++ == 30) {
16199 				device_printf(sc->sc_dev, "%s timed out\n",
16200 				    __func__);
16201 				return -1;
16202 			}
16203 			delay(10 * 1000);
16204 		}
16205 		reg = CSR_READ(sc, WMREG_H2ME);
16206 		reg &= ~H2ME_ENFORCE_SETTINGS;
16207 		CSR_WRITE(sc, WMREG_H2ME, reg);
16208 
16209 		return 0;
16210 	}
16211 
16212 	/* Acquire semaphore */
16213 	rv = sc->phy.acquire(sc);
16214 	if (rv != 0) {
16215 		DPRINTF(sc, WM_DEBUG_INIT,
16216 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16217 		return rv;
16218 	}
16219 
16220 	/* Toggle LANPHYPC */
16221 	wm_toggle_lanphypc_pch_lpt(sc);
16222 
16223 	/* Unforce SMBus mode in PHY */
16224 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
16225 	if (rv != 0) {
16226 		uint32_t reg2;
16227 
16228 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
16229 		    __func__);
16230 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
16231 		reg2 |= CTRL_EXT_FORCE_SMBUS;
16232 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
16233 		delay(50 * 1000);
16234 
16235 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
16236 		    &phyreg);
16237 		if (rv != 0)
16238 			goto release;
16239 	}
16240 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16241 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
16242 
16243 	/* Unforce SMBus mode in MAC */
16244 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
16245 	reg &= ~CTRL_EXT_FORCE_SMBUS;
16246 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16247 
16248 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
16249 	if (rv != 0)
16250 		goto release;
16251 	phyreg |= HV_PM_CTRL_K1_ENA;
16252 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
16253 
16254 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
16255 	    &phyreg);
16256 	if (rv != 0)
16257 		goto release;
16258 	phyreg &= ~(I218_ULP_CONFIG1_IND
16259 	    | I218_ULP_CONFIG1_STICKY_ULP
16260 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
16261 	    | I218_ULP_CONFIG1_WOL_HOST
16262 	    | I218_ULP_CONFIG1_INBAND_EXIT
16263 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
16264 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
16265 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
16266 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16267 	phyreg |= I218_ULP_CONFIG1_START;
16268 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16269 
16270 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
16271 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
16272 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16273 
16274 release:
16275 	/* Release semaphore */
16276 	sc->phy.release(sc);
16277 	wm_gmii_reset(sc);
16278 	delay(50 * 1000);
16279 
16280 	return rv;
16281 }
16282 
16283 /* WOL in the newer chipset interfaces (pchlan) */
16284 static int
wm_enable_phy_wakeup(struct wm_softc * sc)16285 wm_enable_phy_wakeup(struct wm_softc *sc)
16286 {
16287 	device_t dev = sc->sc_dev;
16288 	uint32_t mreg, moff;
16289 	uint16_t wuce, wuc, wufc, preg;
16290 	int i, rv;
16291 
16292 	KASSERT(sc->sc_type >= WM_T_PCH);
16293 
16294 	/* Copy MAC RARs to PHY RARs */
16295 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
16296 
16297 	/* Activate PHY wakeup */
16298 	rv = sc->phy.acquire(sc);
16299 	if (rv != 0) {
16300 		device_printf(dev, "%s: failed to acquire semaphore\n",
16301 		    __func__);
16302 		return rv;
16303 	}
16304 
16305 	/*
16306 	 * Enable access to PHY wakeup registers.
16307 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
16308 	 */
16309 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
16310 	if (rv != 0) {
16311 		device_printf(dev,
16312 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
16313 		goto release;
16314 	}
16315 
16316 	/* Copy MAC MTA to PHY MTA */
16317 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
16318 		uint16_t lo, hi;
16319 
16320 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
16321 		lo = (uint16_t)(mreg & 0xffff);
16322 		hi = (uint16_t)((mreg >> 16) & 0xffff);
16323 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
16324 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
16325 	}
16326 
16327 	/* Configure PHY Rx Control register */
16328 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
16329 	mreg = CSR_READ(sc, WMREG_RCTL);
16330 	if (mreg & RCTL_UPE)
16331 		preg |= BM_RCTL_UPE;
16332 	if (mreg & RCTL_MPE)
16333 		preg |= BM_RCTL_MPE;
16334 	preg &= ~(BM_RCTL_MO_MASK);
16335 	moff = __SHIFTOUT(mreg, RCTL_MO);
16336 	if (moff != 0)
16337 		preg |= moff << BM_RCTL_MO_SHIFT;
16338 	if (mreg & RCTL_BAM)
16339 		preg |= BM_RCTL_BAM;
16340 	if (mreg & RCTL_PMCF)
16341 		preg |= BM_RCTL_PMCF;
16342 	mreg = CSR_READ(sc, WMREG_CTRL);
16343 	if (mreg & CTRL_RFCE)
16344 		preg |= BM_RCTL_RFCE;
16345 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
16346 
16347 	wuc = WUC_APME | WUC_PME_EN;
16348 	wufc = WUFC_MAG;
16349 	/* Enable PHY wakeup in MAC register */
16350 	CSR_WRITE(sc, WMREG_WUC,
16351 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
16352 	CSR_WRITE(sc, WMREG_WUFC, wufc);
16353 
16354 	/* Configure and enable PHY wakeup in PHY registers */
16355 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
16356 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
16357 
16358 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
16359 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16360 
16361 release:
16362 	sc->phy.release(sc);
16363 
16364 	return 0;
16365 }
16366 
16367 /* Power down workaround on D3 */
16368 static void
wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc * sc)16369 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
16370 {
16371 	uint32_t reg;
16372 	uint16_t phyreg;
16373 	int i;
16374 
16375 	for (i = 0; i < 2; i++) {
16376 		/* Disable link */
16377 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
16378 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16379 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16380 
16381 		/*
16382 		 * Call gig speed drop workaround on Gig disable before
16383 		 * accessing any PHY registers
16384 		 */
16385 		if (sc->sc_type == WM_T_ICH8)
16386 			wm_gig_downshift_workaround_ich8lan(sc);
16387 
16388 		/* Write VR power-down enable */
16389 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16390 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16391 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16392 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16393 
16394 		/* Read it back and test */
16395 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16396 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16397 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16398 			break;
16399 
16400 		/* Issue PHY reset and repeat at most one more time */
16401 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16402 	}
16403 }
16404 
16405 /*
16406  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16407  *  @sc: pointer to the HW structure
16408  *
16409  *  During S0 to Sx transition, it is possible the link remains at gig
16410  *  instead of negotiating to a lower speed.  Before going to Sx, set
16411  *  'Gig Disable' to force link speed negotiation to a lower speed based on
16412  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
16413  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16414  *  needs to be written.
16415  *  Parts that support (and are linked to a partner which support) EEE in
16416  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16417  *  than 10Mbps w/o EEE.
16418  */
16419 static void
wm_suspend_workarounds_ich8lan(struct wm_softc * sc)16420 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16421 {
16422 	device_t dev = sc->sc_dev;
16423 	struct ethercom *ec = &sc->sc_ethercom;
16424 	uint32_t phy_ctrl;
16425 	int rv;
16426 
16427 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16428 	phy_ctrl |= PHY_CTRL_GBE_DIS;
16429 
16430 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
16431 
16432 	if (sc->sc_phytype == WMPHY_I217) {
16433 		uint16_t devid = sc->sc_pcidevid;
16434 
16435 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16436 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
16437 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16438 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16439 		    (sc->sc_type >= WM_T_PCH_SPT))
16440 			CSR_WRITE(sc, WMREG_FEXTNVM6,
16441 			    CSR_READ(sc, WMREG_FEXTNVM6)
16442 			    & ~FEXTNVM6_REQ_PLL_CLK);
16443 
16444 		if (sc->phy.acquire(sc) != 0)
16445 			goto out;
16446 
16447 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16448 			uint16_t eee_advert;
16449 
16450 			rv = wm_read_emi_reg_locked(dev,
16451 			    I217_EEE_ADVERTISEMENT, &eee_advert);
16452 			if (rv)
16453 				goto release;
16454 
16455 			/*
16456 			 * Disable LPLU if both link partners support 100BaseT
16457 			 * EEE and 100Full is advertised on both ends of the
16458 			 * link, and enable Auto Enable LPI since there will
16459 			 * be no driver to enable LPI while in Sx.
16460 			 */
16461 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
16462 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16463 				uint16_t anar, phy_reg;
16464 
16465 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
16466 				    &anar);
16467 				if (anar & ANAR_TX_FD) {
16468 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16469 					    PHY_CTRL_NOND0A_LPLU);
16470 
16471 					/* Set Auto Enable LPI after link up */
16472 					sc->phy.readreg_locked(dev, 2,
16473 					    I217_LPI_GPIO_CTRL, &phy_reg);
16474 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16475 					sc->phy.writereg_locked(dev, 2,
16476 					    I217_LPI_GPIO_CTRL, phy_reg);
16477 				}
16478 			}
16479 		}
16480 
16481 		/*
16482 		 * For i217 Intel Rapid Start Technology support,
16483 		 * when the system is going into Sx and no manageability engine
16484 		 * is present, the driver must configure proxy to reset only on
16485 		 * power good.	LPI (Low Power Idle) state must also reset only
16486 		 * on power good, as well as the MTA (Multicast table array).
16487 		 * The SMBus release must also be disabled on LCD reset.
16488 		 */
16489 
16490 		/*
16491 		 * Enable MTA to reset for Intel Rapid Start Technology
16492 		 * Support
16493 		 */
16494 
16495 release:
16496 		sc->phy.release(sc);
16497 	}
16498 out:
16499 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16500 
16501 	if (sc->sc_type == WM_T_ICH8)
16502 		wm_gig_downshift_workaround_ich8lan(sc);
16503 
16504 	if (sc->sc_type >= WM_T_PCH) {
16505 		wm_oem_bits_config_ich8lan(sc, false);
16506 
16507 		/* Reset PHY to activate OEM bits on 82577/8 */
16508 		if (sc->sc_type == WM_T_PCH)
16509 			wm_reset_phy(sc);
16510 
16511 		if (sc->phy.acquire(sc) != 0)
16512 			return;
16513 		wm_write_smbus_addr(sc);
16514 		sc->phy.release(sc);
16515 	}
16516 }
16517 
16518 /*
16519  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16520  *  @sc: pointer to the HW structure
16521  *
16522  *  During Sx to S0 transitions on non-managed devices or managed devices
16523  *  on which PHY resets are not blocked, if the PHY registers cannot be
16524  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
16525  *  the PHY.
16526  *  On i217, setup Intel Rapid Start Technology.
16527  */
16528 static int
wm_resume_workarounds_pchlan(struct wm_softc * sc)16529 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16530 {
16531 	device_t dev = sc->sc_dev;
16532 	int rv;
16533 
16534 	if (sc->sc_type < WM_T_PCH2)
16535 		return 0;
16536 
16537 	rv = wm_init_phy_workarounds_pchlan(sc);
16538 	if (rv != 0)
16539 		return rv;
16540 
16541 	/* For i217 Intel Rapid Start Technology support when the system
16542 	 * is transitioning from Sx and no manageability engine is present
16543 	 * configure SMBus to restore on reset, disable proxy, and enable
16544 	 * the reset on MTA (Multicast table array).
16545 	 */
16546 	if (sc->sc_phytype == WMPHY_I217) {
16547 		uint16_t phy_reg;
16548 
16549 		rv = sc->phy.acquire(sc);
16550 		if (rv != 0)
16551 			return rv;
16552 
16553 		/* Clear Auto Enable LPI after link up */
16554 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16555 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16556 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16557 
16558 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16559 			/* Restore clear on SMB if no manageability engine
16560 			 * is present
16561 			 */
16562 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16563 			    &phy_reg);
16564 			if (rv != 0)
16565 				goto release;
16566 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16567 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16568 
16569 			/* Disable Proxy */
16570 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16571 		}
16572 		/* Enable reset on MTA */
16573 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16574 		if (rv != 0)
16575 			goto release;
16576 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16577 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16578 
16579 release:
16580 		sc->phy.release(sc);
16581 		return rv;
16582 	}
16583 
16584 	return 0;
16585 }
16586 
16587 static void
wm_enable_wakeup(struct wm_softc * sc)16588 wm_enable_wakeup(struct wm_softc *sc)
16589 {
16590 	uint32_t reg, pmreg;
16591 	pcireg_t pmode;
16592 	int rv = 0;
16593 
16594 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16595 		device_xname(sc->sc_dev), __func__));
16596 
16597 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16598 	    &pmreg, NULL) == 0)
16599 		return;
16600 
16601 	if ((sc->sc_flags & WM_F_WOL) == 0)
16602 		goto pme;
16603 
16604 	/* Advertise the wakeup capability */
16605 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16606 	    | CTRL_SWDPIN(3));
16607 
16608 	/* Keep the laser running on fiber adapters */
16609 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16610 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16611 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16612 		reg |= CTRL_EXT_SWDPIN(3);
16613 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16614 	}
16615 
16616 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16617 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16618 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16619 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
16620 		wm_suspend_workarounds_ich8lan(sc);
16621 
16622 #if 0	/* For the multicast packet */
16623 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16624 	reg |= WUFC_MC;
16625 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16626 #endif
16627 
16628 	if (sc->sc_type >= WM_T_PCH) {
16629 		rv = wm_enable_phy_wakeup(sc);
16630 		if (rv != 0)
16631 			goto pme;
16632 	} else {
16633 		/* Enable wakeup by the MAC */
16634 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16635 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16636 	}
16637 
16638 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16639 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16640 		|| (sc->sc_type == WM_T_PCH2))
16641 	    && (sc->sc_phytype == WMPHY_IGP_3))
16642 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16643 
16644 pme:
16645 	/* Request PME */
16646 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16647 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16648 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16649 		/* For WOL */
16650 		pmode |= PCI_PMCSR_PME_EN;
16651 	} else {
16652 		/* Disable WOL */
16653 		pmode &= ~PCI_PMCSR_PME_EN;
16654 	}
16655 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16656 }
16657 
16658 /* Disable ASPM L0s and/or L1 for workaround */
16659 static void
wm_disable_aspm(struct wm_softc * sc)16660 wm_disable_aspm(struct wm_softc *sc)
16661 {
16662 	pcireg_t reg, mask = 0;
16663 	unsigned const char *str = "";
16664 
16665 	/*
16666 	 *  Only for PCIe device which has PCIe capability in the PCI config
16667 	 * space.
16668 	 */
16669 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16670 		return;
16671 
16672 	switch (sc->sc_type) {
16673 	case WM_T_82571:
16674 	case WM_T_82572:
16675 		/*
16676 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16677 		 * State Power management L1 State (ASPM L1).
16678 		 */
16679 		mask = PCIE_LCSR_ASPM_L1;
16680 		str = "L1 is";
16681 		break;
16682 	case WM_T_82573:
16683 	case WM_T_82574:
16684 	case WM_T_82583:
16685 		/*
16686 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
16687 		 *
16688 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
16689 		 * some chipset.  The document of 82574 and 82583 says that
16690 		 * disabling L0s with some specific chipset is sufficient,
16691 		 * but we follow as of the Intel em driver does.
16692 		 *
16693 		 * References:
16694 		 * Errata 8 of the Specification Update of i82573.
16695 		 * Errata 20 of the Specification Update of i82574.
16696 		 * Errata 9 of the Specification Update of i82583.
16697 		 */
16698 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16699 		str = "L0s and L1 are";
16700 		break;
16701 	default:
16702 		return;
16703 	}
16704 
16705 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16706 	    sc->sc_pcixe_capoff + PCIE_LCSR);
16707 	reg &= ~mask;
16708 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16709 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16710 
16711 	/* Print only in wm_attach() */
16712 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16713 		aprint_verbose_dev(sc->sc_dev,
16714 		    "ASPM %s disabled to workaround the errata.\n", str);
16715 }
16716 
16717 /* LPLU */
16718 
16719 static void
wm_lplu_d0_disable(struct wm_softc * sc)16720 wm_lplu_d0_disable(struct wm_softc *sc)
16721 {
16722 	struct mii_data *mii = &sc->sc_mii;
16723 	uint32_t reg;
16724 	uint16_t phyval;
16725 
16726 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16727 		device_xname(sc->sc_dev), __func__));
16728 
16729 	if (sc->sc_phytype == WMPHY_IFE)
16730 		return;
16731 
16732 	switch (sc->sc_type) {
16733 	case WM_T_82571:
16734 	case WM_T_82572:
16735 	case WM_T_82573:
16736 	case WM_T_82575:
16737 	case WM_T_82576:
16738 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16739 		phyval &= ~PMR_D0_LPLU;
16740 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16741 		break;
16742 	case WM_T_82580:
16743 	case WM_T_I350:
16744 	case WM_T_I210:
16745 	case WM_T_I211:
16746 		reg = CSR_READ(sc, WMREG_PHPM);
16747 		reg &= ~PHPM_D0A_LPLU;
16748 		CSR_WRITE(sc, WMREG_PHPM, reg);
16749 		break;
16750 	case WM_T_82574:
16751 	case WM_T_82583:
16752 	case WM_T_ICH8:
16753 	case WM_T_ICH9:
16754 	case WM_T_ICH10:
16755 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
16756 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16757 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16758 		CSR_WRITE_FLUSH(sc);
16759 		break;
16760 	case WM_T_PCH:
16761 	case WM_T_PCH2:
16762 	case WM_T_PCH_LPT:
16763 	case WM_T_PCH_SPT:
16764 	case WM_T_PCH_CNP:
16765 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16766 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16767 		if (wm_phy_resetisblocked(sc) == false)
16768 			phyval |= HV_OEM_BITS_ANEGNOW;
16769 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16770 		break;
16771 	default:
16772 		break;
16773 	}
16774 }
16775 
16776 /* EEE */
16777 
16778 static int
wm_set_eee_i350(struct wm_softc * sc)16779 wm_set_eee_i350(struct wm_softc *sc)
16780 {
16781 	struct ethercom *ec = &sc->sc_ethercom;
16782 	uint32_t ipcnfg, eeer;
16783 	uint32_t ipcnfg_mask
16784 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16785 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16786 
16787 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16788 
16789 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16790 	eeer = CSR_READ(sc, WMREG_EEER);
16791 
16792 	/* Enable or disable per user setting */
16793 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16794 		ipcnfg |= ipcnfg_mask;
16795 		eeer |= eeer_mask;
16796 	} else {
16797 		ipcnfg &= ~ipcnfg_mask;
16798 		eeer &= ~eeer_mask;
16799 	}
16800 
16801 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16802 	CSR_WRITE(sc, WMREG_EEER, eeer);
16803 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16804 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16805 
16806 	return 0;
16807 }
16808 
16809 static int
wm_set_eee_pchlan(struct wm_softc * sc)16810 wm_set_eee_pchlan(struct wm_softc *sc)
16811 {
16812 	device_t dev = sc->sc_dev;
16813 	struct ethercom *ec = &sc->sc_ethercom;
16814 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16815 	int rv;
16816 
16817 	switch (sc->sc_phytype) {
16818 	case WMPHY_82579:
16819 		lpa = I82579_EEE_LP_ABILITY;
16820 		pcs_status = I82579_EEE_PCS_STATUS;
16821 		adv_addr = I82579_EEE_ADVERTISEMENT;
16822 		break;
16823 	case WMPHY_I217:
16824 		lpa = I217_EEE_LP_ABILITY;
16825 		pcs_status = I217_EEE_PCS_STATUS;
16826 		adv_addr = I217_EEE_ADVERTISEMENT;
16827 		break;
16828 	default:
16829 		return 0;
16830 	}
16831 
16832 	rv = sc->phy.acquire(sc);
16833 	if (rv != 0) {
16834 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
16835 		return rv;
16836 	}
16837 
16838 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16839 	if (rv != 0)
16840 		goto release;
16841 
16842 	/* Clear bits that enable EEE in various speeds */
16843 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16844 
16845 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16846 		/* Save off link partner's EEE ability */
16847 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16848 		if (rv != 0)
16849 			goto release;
16850 
16851 		/* Read EEE advertisement */
16852 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16853 			goto release;
16854 
16855 		/*
16856 		 * Enable EEE only for speeds in which the link partner is
16857 		 * EEE capable and for which we advertise EEE.
16858 		 */
16859 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16860 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16861 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16862 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16863 			if ((data & ANLPAR_TX_FD) != 0)
16864 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16865 			else {
16866 				/*
16867 				 * EEE is not supported in 100Half, so ignore
16868 				 * partner's EEE in 100 ability if full-duplex
16869 				 * is not advertised.
16870 				 */
16871 				sc->eee_lp_ability
16872 				    &= ~AN_EEEADVERT_100_TX;
16873 			}
16874 		}
16875 	}
16876 
16877 	if (sc->sc_phytype == WMPHY_82579) {
16878 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16879 		if (rv != 0)
16880 			goto release;
16881 
16882 		data &= ~I82579_LPI_PLL_SHUT_100;
16883 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16884 	}
16885 
16886 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16887 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16888 		goto release;
16889 
16890 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16891 release:
16892 	sc->phy.release(sc);
16893 
16894 	return rv;
16895 }
16896 
16897 static int
wm_set_eee(struct wm_softc * sc)16898 wm_set_eee(struct wm_softc *sc)
16899 {
16900 	struct ethercom *ec = &sc->sc_ethercom;
16901 
16902 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16903 		return 0;
16904 
16905 	if (sc->sc_type == WM_T_I354) {
16906 		/* I354 uses an external PHY */
16907 		return 0; /* not yet */
16908 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16909 		return wm_set_eee_i350(sc);
16910 	else if (sc->sc_type >= WM_T_PCH2)
16911 		return wm_set_eee_pchlan(sc);
16912 
16913 	return 0;
16914 }
16915 
16916 /*
16917  * Workarounds (mainly PHY related).
16918  * Basically, PHY's workarounds are in the PHY drivers.
16919  */
16920 
16921 /* Workaround for 82566 Kumeran PCS lock loss */
16922 static int
wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc * sc)16923 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16924 {
16925 	struct mii_data *mii = &sc->sc_mii;
16926 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
16927 	int i, reg, rv;
16928 	uint16_t phyreg;
16929 
16930 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16931 		device_xname(sc->sc_dev), __func__));
16932 
16933 	/* If the link is not up, do nothing */
16934 	if ((status & STATUS_LU) == 0)
16935 		return 0;
16936 
16937 	/* Nothing to do if the link is other than 1Gbps */
16938 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16939 		return 0;
16940 
16941 	for (i = 0; i < 10; i++) {
16942 		/* read twice */
16943 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16944 		if (rv != 0)
16945 			return rv;
16946 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16947 		if (rv != 0)
16948 			return rv;
16949 
16950 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16951 			goto out;	/* GOOD! */
16952 
16953 		/* Reset the PHY */
16954 		wm_reset_phy(sc);
16955 		delay(5*1000);
16956 	}
16957 
16958 	/* Disable GigE link negotiation */
16959 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
16960 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16961 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16962 
16963 	/*
16964 	 * Call gig speed drop workaround on Gig disable before accessing
16965 	 * any PHY registers.
16966 	 */
16967 	wm_gig_downshift_workaround_ich8lan(sc);
16968 
16969 out:
16970 	return 0;
16971 }
16972 
16973 /*
16974  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16975  *  @sc: pointer to the HW structure
16976  *
16977  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16978  *  LPLU, Gig disable, MDIC PHY reset):
16979  *    1) Set Kumeran Near-end loopback
16980  *    2) Clear Kumeran Near-end loopback
16981  *  Should only be called for ICH8[m] devices with any 1G Phy.
16982  */
16983 static void
wm_gig_downshift_workaround_ich8lan(struct wm_softc * sc)16984 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16985 {
16986 	uint16_t kmreg;
16987 
16988 	/* Only for igp3 */
16989 	if (sc->sc_phytype == WMPHY_IGP_3) {
16990 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16991 			return;
16992 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16993 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16994 			return;
16995 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16996 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16997 	}
16998 }
16999 
17000 /*
17001  * Workaround for pch's PHYs
17002  * XXX should be moved to new PHY driver?
17003  */
17004 static int
wm_hv_phy_workarounds_ich8lan(struct wm_softc * sc)17005 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
17006 {
17007 	device_t dev = sc->sc_dev;
17008 	struct mii_data *mii = &sc->sc_mii;
17009 	struct mii_softc *child;
17010 	uint16_t phy_data, phyrev = 0;
17011 	int phytype = sc->sc_phytype;
17012 	int rv;
17013 
17014 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17015 		device_xname(dev), __func__));
17016 	KASSERT(sc->sc_type == WM_T_PCH);
17017 
17018 	/* Set MDIO slow mode before any other MDIO access */
17019 	if (phytype == WMPHY_82577)
17020 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
17021 			return rv;
17022 
17023 	child = LIST_FIRST(&mii->mii_phys);
17024 	if (child != NULL)
17025 		phyrev = child->mii_mpd_rev;
17026 
17027 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
17028 	if ((child != NULL) &&
17029 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
17030 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
17031 		/* Disable generation of early preamble (0x4431) */
17032 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17033 		    &phy_data);
17034 		if (rv != 0)
17035 			return rv;
17036 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
17037 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
17038 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17039 		    phy_data);
17040 		if (rv != 0)
17041 			return rv;
17042 
17043 		/* Preamble tuning for SSC */
17044 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
17045 		if (rv != 0)
17046 			return rv;
17047 	}
17048 
17049 	/* 82578 */
17050 	if (phytype == WMPHY_82578) {
17051 		/*
17052 		 * Return registers to default by doing a soft reset then
17053 		 * writing 0x3140 to the control register
17054 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
17055 		 */
17056 		if ((child != NULL) && (phyrev < 2)) {
17057 			PHY_RESET(child);
17058 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
17059 			if (rv != 0)
17060 				return rv;
17061 		}
17062 	}
17063 
17064 	/* Select page 0 */
17065 	if ((rv = sc->phy.acquire(sc)) != 0)
17066 		return rv;
17067 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
17068 	sc->phy.release(sc);
17069 	if (rv != 0)
17070 		return rv;
17071 
17072 	/*
17073 	 * Configure the K1 Si workaround during phy reset assuming there is
17074 	 * link so that it disables K1 if link is in 1Gbps.
17075 	 */
17076 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
17077 		return rv;
17078 
17079 	/* Workaround for link disconnects on a busy hub in half duplex */
17080 	rv = sc->phy.acquire(sc);
17081 	if (rv)
17082 		return rv;
17083 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
17084 	if (rv)
17085 		goto release;
17086 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
17087 	    phy_data & 0x00ff);
17088 	if (rv)
17089 		goto release;
17090 
17091 	/* Set MSE higher to enable link to stay up when noise is high */
17092 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
17093 release:
17094 	sc->phy.release(sc);
17095 
17096 	return rv;
17097 }
17098 
17099 /*
17100  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
17101  *  @sc:   pointer to the HW structure
17102  */
17103 static void
wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc * sc)17104 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
17105 {
17106 
17107 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17108 		device_xname(sc->sc_dev), __func__));
17109 
17110 	if (sc->phy.acquire(sc) != 0)
17111 		return;
17112 
17113 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17114 
17115 	sc->phy.release(sc);
17116 }
17117 
17118 static void
wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc * sc)17119 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
17120 {
17121 	device_t dev = sc->sc_dev;
17122 	uint32_t mac_reg;
17123 	uint16_t i, wuce;
17124 	int count;
17125 
17126 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17127 		device_xname(dev), __func__));
17128 
17129 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
17130 		return;
17131 
17132 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
17133 	count = wm_rar_count(sc);
17134 	for (i = 0; i < count; i++) {
17135 		uint16_t lo, hi;
17136 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17137 		lo = (uint16_t)(mac_reg & 0xffff);
17138 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
17139 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
17140 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
17141 
17142 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17143 		lo = (uint16_t)(mac_reg & 0xffff);
17144 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
17145 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
17146 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
17147 	}
17148 
17149 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
17150 }
17151 
17152 /*
17153  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
17154  *  with 82579 PHY
17155  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
17156  */
17157 static int
wm_lv_jumbo_workaround_ich8lan(struct wm_softc * sc,bool enable)17158 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
17159 {
17160 	device_t dev = sc->sc_dev;
17161 	int rar_count;
17162 	int rv;
17163 	uint32_t mac_reg;
17164 	uint16_t dft_ctrl, data;
17165 	uint16_t i;
17166 
17167 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17168 		device_xname(dev), __func__));
17169 
17170 	if (sc->sc_type < WM_T_PCH2)
17171 		return 0;
17172 
17173 	/* Acquire PHY semaphore */
17174 	rv = sc->phy.acquire(sc);
17175 	if (rv != 0)
17176 		return rv;
17177 
17178 	/* Disable Rx path while enabling/disabling workaround */
17179 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
17180 	if (rv != 0)
17181 		goto out;
17182 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17183 	    dft_ctrl | (1 << 14));
17184 	if (rv != 0)
17185 		goto out;
17186 
17187 	if (enable) {
17188 		/* Write Rx addresses (rar_entry_count for RAL/H, and
17189 		 * SHRAL/H) and initial CRC values to the MAC
17190 		 */
17191 		rar_count = wm_rar_count(sc);
17192 		for (i = 0; i < rar_count; i++) {
17193 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
17194 			uint32_t addr_high, addr_low;
17195 
17196 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17197 			if (!(addr_high & RAL_AV))
17198 				continue;
17199 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17200 			mac_addr[0] = (addr_low & 0xFF);
17201 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
17202 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
17203 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
17204 			mac_addr[4] = (addr_high & 0xFF);
17205 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
17206 
17207 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
17208 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
17209 		}
17210 
17211 		/* Write Rx addresses to the PHY */
17212 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17213 	}
17214 
17215 	/*
17216 	 * If enable ==
17217 	 *	true: Enable jumbo frame workaround in the MAC.
17218 	 *	false: Write MAC register values back to h/w defaults.
17219 	 */
17220 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
17221 	if (enable) {
17222 		mac_reg &= ~(1 << 14);
17223 		mac_reg |= (7 << 15);
17224 	} else
17225 		mac_reg &= ~(0xf << 14);
17226 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
17227 
17228 	mac_reg = CSR_READ(sc, WMREG_RCTL);
17229 	if (enable) {
17230 		mac_reg |= RCTL_SECRC;
17231 		sc->sc_rctl |= RCTL_SECRC;
17232 		sc->sc_flags |= WM_F_CRC_STRIP;
17233 	} else {
17234 		mac_reg &= ~RCTL_SECRC;
17235 		sc->sc_rctl &= ~RCTL_SECRC;
17236 		sc->sc_flags &= ~WM_F_CRC_STRIP;
17237 	}
17238 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
17239 
17240 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
17241 	if (rv != 0)
17242 		goto out;
17243 	if (enable)
17244 		data |= 1 << 0;
17245 	else
17246 		data &= ~(1 << 0);
17247 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
17248 	if (rv != 0)
17249 		goto out;
17250 
17251 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
17252 	if (rv != 0)
17253 		goto out;
17254 	/*
17255 	 * XXX FreeBSD and Linux do the same thing that they set the same value
17256 	 * on both the enable case and the disable case. Is it correct?
17257 	 */
17258 	data &= ~(0xf << 8);
17259 	data |= (0xb << 8);
17260 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
17261 	if (rv != 0)
17262 		goto out;
17263 
17264 	/*
17265 	 * If enable ==
17266 	 *	true: Enable jumbo frame workaround in the PHY.
17267 	 *	false: Write PHY register values back to h/w defaults.
17268 	 */
17269 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
17270 	if (rv != 0)
17271 		goto out;
17272 	data &= ~(0x7F << 5);
17273 	if (enable)
17274 		data |= (0x37 << 5);
17275 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
17276 	if (rv != 0)
17277 		goto out;
17278 
17279 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
17280 	if (rv != 0)
17281 		goto out;
17282 	if (enable)
17283 		data &= ~(1 << 13);
17284 	else
17285 		data |= (1 << 13);
17286 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
17287 	if (rv != 0)
17288 		goto out;
17289 
17290 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
17291 	if (rv != 0)
17292 		goto out;
17293 	data &= ~(0x3FF << 2);
17294 	if (enable)
17295 		data |= (I82579_TX_PTR_GAP << 2);
17296 	else
17297 		data |= (0x8 << 2);
17298 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
17299 	if (rv != 0)
17300 		goto out;
17301 
17302 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
17303 	    enable ? 0xf100 : 0x7e00);
17304 	if (rv != 0)
17305 		goto out;
17306 
17307 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
17308 	if (rv != 0)
17309 		goto out;
17310 	if (enable)
17311 		data |= 1 << 10;
17312 	else
17313 		data &= ~(1 << 10);
17314 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
17315 	if (rv != 0)
17316 		goto out;
17317 
17318 	/* Re-enable Rx path after enabling/disabling workaround */
17319 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17320 	    dft_ctrl & ~(1 << 14));
17321 
17322 out:
17323 	sc->phy.release(sc);
17324 
17325 	return rv;
17326 }
17327 
17328 /*
17329  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
17330  *  done after every PHY reset.
17331  */
17332 static int
wm_lv_phy_workarounds_ich8lan(struct wm_softc * sc)17333 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
17334 {
17335 	device_t dev = sc->sc_dev;
17336 	int rv;
17337 
17338 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17339 		device_xname(dev), __func__));
17340 	KASSERT(sc->sc_type == WM_T_PCH2);
17341 
17342 	/* Set MDIO slow mode before any other MDIO access */
17343 	rv = wm_set_mdio_slow_mode_hv(sc);
17344 	if (rv != 0)
17345 		return rv;
17346 
17347 	rv = sc->phy.acquire(sc);
17348 	if (rv != 0)
17349 		return rv;
17350 	/* Set MSE higher to enable link to stay up when noise is high */
17351 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
17352 	if (rv != 0)
17353 		goto release;
17354 	/* Drop link after 5 times MSE threshold was reached */
17355 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
17356 release:
17357 	sc->phy.release(sc);
17358 
17359 	return rv;
17360 }
17361 
17362 /**
17363  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
17364  *  @link: link up bool flag
17365  *
17366  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
17367  *  preventing further DMA write requests.  Workaround the issue by disabling
17368  *  the de-assertion of the clock request when in 1Gpbs mode.
17369  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
17370  *  speeds in order to avoid Tx hangs.
17371  **/
17372 static int
wm_k1_workaround_lpt_lp(struct wm_softc * sc,bool link)17373 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
17374 {
17375 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
17376 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
17377 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
17378 	uint16_t phyreg;
17379 
17380 	if (link && (speed == STATUS_SPEED_1000)) {
17381 		int rv;
17382 
17383 		rv = sc->phy.acquire(sc);
17384 		if (rv != 0)
17385 			return rv;
17386 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17387 		    &phyreg);
17388 		if (rv != 0)
17389 			goto release;
17390 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17391 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
17392 		if (rv != 0)
17393 			goto release;
17394 		delay(20);
17395 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17396 
17397 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17398 		    &phyreg);
17399 release:
17400 		sc->phy.release(sc);
17401 		return rv;
17402 	}
17403 
17404 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17405 
17406 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17407 	if (((child != NULL) && (child->mii_mpd_rev > 5))
17408 	    || !link
17409 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17410 		goto update_fextnvm6;
17411 
17412 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17413 
17414 	/* Clear link status transmit timeout */
17415 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17416 	if (speed == STATUS_SPEED_100) {
17417 		/* Set inband Tx timeout to 5x10us for 100Half */
17418 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17419 
17420 		/* Do not extend the K1 entry latency for 100Half */
17421 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17422 	} else {
17423 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
17424 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17425 
17426 		/* Extend the K1 entry latency for 10 Mbps */
17427 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17428 	}
17429 
17430 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17431 
17432 update_fextnvm6:
17433 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17434 	return 0;
17435 }
17436 
17437 /*
17438  *  wm_k1_gig_workaround_hv - K1 Si workaround
17439  *  @sc:   pointer to the HW structure
17440  *  @link: link up bool flag
17441  *
17442  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17443  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
17444  *  If link is down, the function will restore the default K1 setting located
17445  *  in the NVM.
17446  */
17447 static int
wm_k1_gig_workaround_hv(struct wm_softc * sc,int link)17448 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17449 {
17450 	int k1_enable = sc->sc_nvm_k1_enabled;
17451 	int rv;
17452 
17453 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17454 		device_xname(sc->sc_dev), __func__));
17455 
17456 	rv = sc->phy.acquire(sc);
17457 	if (rv != 0)
17458 		return rv;
17459 
17460 	if (link) {
17461 		k1_enable = 0;
17462 
17463 		/* Link stall fix for link up */
17464 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17465 		    0x0100);
17466 	} else {
17467 		/* Link stall fix for link down */
17468 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17469 		    0x4100);
17470 	}
17471 
17472 	wm_configure_k1_ich8lan(sc, k1_enable);
17473 	sc->phy.release(sc);
17474 
17475 	return 0;
17476 }
17477 
17478 /*
17479  *  wm_k1_workaround_lv - K1 Si workaround
17480  *  @sc:   pointer to the HW structure
17481  *
17482  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17483  *  Disable K1 for 1000 and 100 speeds
17484  */
17485 static int
wm_k1_workaround_lv(struct wm_softc * sc)17486 wm_k1_workaround_lv(struct wm_softc *sc)
17487 {
17488 	uint32_t reg;
17489 	uint16_t phyreg;
17490 	int rv;
17491 
17492 	if (sc->sc_type != WM_T_PCH2)
17493 		return 0;
17494 
17495 	/* Set K1 beacon duration based on 10Mbps speed */
17496 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17497 	if (rv != 0)
17498 		return rv;
17499 
17500 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17501 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17502 		if (phyreg &
17503 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17504 			/* LV 1G/100 Packet drop issue wa  */
17505 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17506 			    &phyreg);
17507 			if (rv != 0)
17508 				return rv;
17509 			phyreg &= ~HV_PM_CTRL_K1_ENA;
17510 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17511 			    phyreg);
17512 			if (rv != 0)
17513 				return rv;
17514 		} else {
17515 			/* For 10Mbps */
17516 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
17517 			reg &= ~FEXTNVM4_BEACON_DURATION;
17518 			reg |= FEXTNVM4_BEACON_DURATION_16US;
17519 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17520 		}
17521 	}
17522 
17523 	return 0;
17524 }
17525 
17526 /*
17527  *  wm_link_stall_workaround_hv - Si workaround
17528  *  @sc: pointer to the HW structure
17529  *
17530  *  This function works around a Si bug where the link partner can get
17531  *  a link up indication before the PHY does. If small packets are sent
17532  *  by the link partner they can be placed in the packet buffer without
17533  *  being properly accounted for by the PHY and will stall preventing
17534  *  further packets from being received.  The workaround is to clear the
17535  *  packet buffer after the PHY detects link up.
17536  */
17537 static int
wm_link_stall_workaround_hv(struct wm_softc * sc)17538 wm_link_stall_workaround_hv(struct wm_softc *sc)
17539 {
17540 	uint16_t phyreg;
17541 
17542 	if (sc->sc_phytype != WMPHY_82578)
17543 		return 0;
17544 
17545 	/* Do not apply workaround if in PHY loopback bit 14 set */
17546 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17547 	if ((phyreg & BMCR_LOOP) != 0)
17548 		return 0;
17549 
17550 	/* Check if link is up and at 1Gbps */
17551 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17552 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17553 	    | BM_CS_STATUS_SPEED_MASK;
17554 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17555 		| BM_CS_STATUS_SPEED_1000))
17556 		return 0;
17557 
17558 	delay(200 * 1000);	/* XXX too big */
17559 
17560 	/* Flush the packets in the fifo buffer */
17561 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17562 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17563 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17564 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
17565 
17566 	return 0;
17567 }
17568 
17569 static int
wm_set_mdio_slow_mode_hv(struct wm_softc * sc)17570 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17571 {
17572 	int rv;
17573 
17574 	rv = sc->phy.acquire(sc);
17575 	if (rv != 0) {
17576 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17577 		    __func__);
17578 		return rv;
17579 	}
17580 
17581 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
17582 
17583 	sc->phy.release(sc);
17584 
17585 	return rv;
17586 }
17587 
17588 static int
wm_set_mdio_slow_mode_hv_locked(struct wm_softc * sc)17589 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17590 {
17591 	int rv;
17592 	uint16_t reg;
17593 
17594 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
17595 	if (rv != 0)
17596 		return rv;
17597 
17598 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17599 	    reg | HV_KMRN_MDIO_SLOW);
17600 }
17601 
17602 /*
17603  *  wm_configure_k1_ich8lan - Configure K1 power state
17604  *  @sc: pointer to the HW structure
17605  *  @enable: K1 state to configure
17606  *
17607  *  Configure the K1 power state based on the provided parameter.
17608  *  Assumes semaphore already acquired.
17609  */
17610 static void
wm_configure_k1_ich8lan(struct wm_softc * sc,int k1_enable)17611 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17612 {
17613 	uint32_t ctrl, ctrl_ext, tmp;
17614 	uint16_t kmreg;
17615 	int rv;
17616 
17617 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17618 
17619 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17620 	if (rv != 0)
17621 		return;
17622 
17623 	if (k1_enable)
17624 		kmreg |= KUMCTRLSTA_K1_ENABLE;
17625 	else
17626 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17627 
17628 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17629 	if (rv != 0)
17630 		return;
17631 
17632 	delay(20);
17633 
17634 	ctrl = CSR_READ(sc, WMREG_CTRL);
17635 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17636 
17637 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17638 	tmp |= CTRL_FRCSPD;
17639 
17640 	CSR_WRITE(sc, WMREG_CTRL, tmp);
17641 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17642 	CSR_WRITE_FLUSH(sc);
17643 	delay(20);
17644 
17645 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
17646 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17647 	CSR_WRITE_FLUSH(sc);
17648 	delay(20);
17649 
17650 	return;
17651 }
17652 
17653 /* special case - for 82575 - need to do manual init ... */
17654 static void
wm_reset_init_script_82575(struct wm_softc * sc)17655 wm_reset_init_script_82575(struct wm_softc *sc)
17656 {
17657 	/*
17658 	 * Remark: this is untested code - we have no board without EEPROM
17659 	 *  same setup as mentioned int the FreeBSD driver for the i82575
17660 	 */
17661 
17662 	/* SerDes configuration via SERDESCTRL */
17663 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17664 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17665 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17666 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17667 
17668 	/* CCM configuration via CCMCTL register */
17669 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17670 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17671 
17672 	/* PCIe lanes configuration */
17673 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17674 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17675 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17676 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17677 
17678 	/* PCIe PLL Configuration */
17679 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17680 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17681 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17682 }
17683 
17684 static void
wm_reset_mdicnfg_82580(struct wm_softc * sc)17685 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17686 {
17687 	uint32_t reg;
17688 	uint16_t nvmword;
17689 	int rv;
17690 
17691 	if (sc->sc_type != WM_T_82580)
17692 		return;
17693 	if ((sc->sc_flags & WM_F_SGMII) == 0)
17694 		return;
17695 
17696 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17697 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17698 	if (rv != 0) {
17699 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17700 		    __func__);
17701 		return;
17702 	}
17703 
17704 	reg = CSR_READ(sc, WMREG_MDICNFG);
17705 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17706 		reg |= MDICNFG_DEST;
17707 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17708 		reg |= MDICNFG_COM_MDIO;
17709 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
17710 }
17711 
17712 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
17713 
17714 static bool
wm_phy_is_accessible_pchlan(struct wm_softc * sc)17715 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17716 {
17717 	uint32_t reg;
17718 	uint16_t id1, id2;
17719 	int i, rv;
17720 
17721 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17722 		device_xname(sc->sc_dev), __func__));
17723 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17724 
17725 	id1 = id2 = 0xffff;
17726 	for (i = 0; i < 2; i++) {
17727 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17728 		    &id1);
17729 		if ((rv != 0) || MII_INVALIDID(id1))
17730 			continue;
17731 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17732 		    &id2);
17733 		if ((rv != 0) || MII_INVALIDID(id2))
17734 			continue;
17735 		break;
17736 	}
17737 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17738 		goto out;
17739 
17740 	/*
17741 	 * In case the PHY needs to be in mdio slow mode,
17742 	 * set slow mode and try to get the PHY id again.
17743 	 */
17744 	rv = 0;
17745 	if (sc->sc_type < WM_T_PCH_LPT) {
17746 		wm_set_mdio_slow_mode_hv_locked(sc);
17747 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17748 		    &id1);
17749 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17750 		    &id2);
17751 	}
17752 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17753 		device_printf(sc->sc_dev, "XXX return with false\n");
17754 		return false;
17755 	}
17756 out:
17757 	if (sc->sc_type >= WM_T_PCH_LPT) {
17758 		/* Only unforce SMBus if ME is not active */
17759 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17760 			uint16_t phyreg;
17761 
17762 			/* Unforce SMBus mode in PHY */
17763 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17764 			    CV_SMB_CTRL, &phyreg);
17765 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17766 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17767 			    CV_SMB_CTRL, phyreg);
17768 
17769 			/* Unforce SMBus mode in MAC */
17770 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
17771 			reg &= ~CTRL_EXT_FORCE_SMBUS;
17772 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17773 		}
17774 	}
17775 	return true;
17776 }
17777 
17778 static void
wm_toggle_lanphypc_pch_lpt(struct wm_softc * sc)17779 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17780 {
17781 	uint32_t reg;
17782 	int i;
17783 
17784 	/* Set PHY Config Counter to 50msec */
17785 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
17786 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17787 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17788 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17789 
17790 	/* Toggle LANPHYPC */
17791 	reg = CSR_READ(sc, WMREG_CTRL);
17792 	reg |= CTRL_LANPHYPC_OVERRIDE;
17793 	reg &= ~CTRL_LANPHYPC_VALUE;
17794 	CSR_WRITE(sc, WMREG_CTRL, reg);
17795 	CSR_WRITE_FLUSH(sc);
17796 	delay(1000);
17797 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
17798 	CSR_WRITE(sc, WMREG_CTRL, reg);
17799 	CSR_WRITE_FLUSH(sc);
17800 
17801 	if (sc->sc_type < WM_T_PCH_LPT)
17802 		delay(50 * 1000);
17803 	else {
17804 		i = 20;
17805 
17806 		do {
17807 			delay(5 * 1000);
17808 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17809 		    && i--);
17810 
17811 		delay(30 * 1000);
17812 	}
17813 }
17814 
17815 static int
wm_platform_pm_pch_lpt(struct wm_softc * sc,bool link)17816 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17817 {
17818 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17819 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17820 	uint32_t rxa;
17821 	uint16_t scale = 0, lat_enc = 0;
17822 	int32_t obff_hwm = 0;
17823 	int64_t lat_ns, value;
17824 
17825 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17826 		device_xname(sc->sc_dev), __func__));
17827 
17828 	if (link) {
17829 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17830 		uint32_t status;
17831 		uint16_t speed;
17832 		pcireg_t preg;
17833 
17834 		status = CSR_READ(sc, WMREG_STATUS);
17835 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
17836 		case STATUS_SPEED_10:
17837 			speed = 10;
17838 			break;
17839 		case STATUS_SPEED_100:
17840 			speed = 100;
17841 			break;
17842 		case STATUS_SPEED_1000:
17843 			speed = 1000;
17844 			break;
17845 		default:
17846 			device_printf(sc->sc_dev, "Unknown speed "
17847 			    "(status = %08x)\n", status);
17848 			return -1;
17849 		}
17850 
17851 		/* Rx Packet Buffer Allocation size (KB) */
17852 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
17853 
17854 		/*
17855 		 * Determine the maximum latency tolerated by the device.
17856 		 *
17857 		 * Per the PCIe spec, the tolerated latencies are encoded as
17858 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17859 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
17860 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
17861 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17862 		 */
17863 		lat_ns = ((int64_t)rxa * 1024 -
17864 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17865 			+ ETHER_HDR_LEN))) * 8 * 1000;
17866 		if (lat_ns < 0)
17867 			lat_ns = 0;
17868 		else
17869 			lat_ns /= speed;
17870 		value = lat_ns;
17871 
17872 		while (value > LTRV_VALUE) {
17873 			scale ++;
17874 			value = howmany(value, __BIT(5));
17875 		}
17876 		if (scale > LTRV_SCALE_MAX) {
17877 			device_printf(sc->sc_dev,
17878 			    "Invalid LTR latency scale %d\n", scale);
17879 			return -1;
17880 		}
17881 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17882 
17883 		/* Determine the maximum latency tolerated by the platform */
17884 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17885 		    WM_PCI_LTR_CAP_LPT);
17886 		max_snoop = preg & 0xffff;
17887 		max_nosnoop = preg >> 16;
17888 
17889 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
17890 
17891 		if (lat_enc > max_ltr_enc) {
17892 			lat_enc = max_ltr_enc;
17893 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17894 			    * PCI_LTR_SCALETONS(
17895 				    __SHIFTOUT(lat_enc,
17896 					PCI_LTR_MAXSNOOPLAT_SCALE));
17897 		}
17898 
17899 		if (lat_ns) {
17900 			lat_ns *= speed * 1000;
17901 			lat_ns /= 8;
17902 			lat_ns /= 1000000000;
17903 			obff_hwm = (int32_t)(rxa - lat_ns);
17904 		}
17905 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17906 			device_printf(sc->sc_dev, "Invalid high water mark %d"
17907 			    "(rxa = %d, lat_ns = %d)\n",
17908 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17909 			return -1;
17910 		}
17911 	}
17912 	/* Snoop and No-Snoop latencies the same */
17913 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17914 	CSR_WRITE(sc, WMREG_LTRV, reg);
17915 
17916 	/* Set OBFF high water mark */
17917 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17918 	reg |= obff_hwm;
17919 	CSR_WRITE(sc, WMREG_SVT, reg);
17920 
17921 	/* Enable OBFF */
17922 	reg = CSR_READ(sc, WMREG_SVCR);
17923 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17924 	CSR_WRITE(sc, WMREG_SVCR, reg);
17925 
17926 	return 0;
17927 }
17928 
17929 /*
17930  * I210 Errata 25 and I211 Errata 10
17931  * Slow System Clock.
17932  *
17933  * Note that this function is called on both FLASH and iNVM case on NetBSD.
17934  */
17935 static int
wm_pll_workaround_i210(struct wm_softc * sc)17936 wm_pll_workaround_i210(struct wm_softc *sc)
17937 {
17938 	uint32_t mdicnfg, wuc;
17939 	uint32_t reg;
17940 	pcireg_t pcireg;
17941 	uint32_t pmreg;
17942 	uint16_t nvmword, tmp_nvmword;
17943 	uint16_t phyval;
17944 	bool wa_done = false;
17945 	int i, rv = 0;
17946 
17947 	/* Get Power Management cap offset */
17948 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17949 	    &pmreg, NULL) == 0)
17950 		return -1;
17951 
17952 	/* Save WUC and MDICNFG registers */
17953 	wuc = CSR_READ(sc, WMREG_WUC);
17954 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17955 
17956 	reg = mdicnfg & ~MDICNFG_DEST;
17957 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
17958 
17959 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17960 		/*
17961 		 * The default value of the Initialization Control Word 1
17962 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17963 		 */
17964 		nvmword = INVM_DEFAULT_AL;
17965 	}
17966 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17967 
17968 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17969 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17970 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17971 
17972 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17973 			rv = 0;
17974 			break; /* OK */
17975 		} else
17976 			rv = -1;
17977 
17978 		wa_done = true;
17979 		/* Directly reset the internal PHY */
17980 		reg = CSR_READ(sc, WMREG_CTRL);
17981 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17982 
17983 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
17984 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17985 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17986 
17987 		CSR_WRITE(sc, WMREG_WUC, 0);
17988 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17989 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17990 
17991 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17992 		    pmreg + PCI_PMCSR);
17993 		pcireg |= PCI_PMCSR_STATE_D3;
17994 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17995 		    pmreg + PCI_PMCSR, pcireg);
17996 		delay(1000);
17997 		pcireg &= ~PCI_PMCSR_STATE_D3;
17998 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17999 		    pmreg + PCI_PMCSR, pcireg);
18000 
18001 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
18002 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
18003 
18004 		/* Restore WUC register */
18005 		CSR_WRITE(sc, WMREG_WUC, wuc);
18006 	}
18007 
18008 	/* Restore MDICNFG setting */
18009 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
18010 	if (wa_done)
18011 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
18012 	return rv;
18013 }
18014 
18015 static void
wm_legacy_irq_quirk_spt(struct wm_softc * sc)18016 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
18017 {
18018 	uint32_t reg;
18019 
18020 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
18021 		device_xname(sc->sc_dev), __func__));
18022 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
18023 	    || (sc->sc_type == WM_T_PCH_CNP));
18024 
18025 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
18026 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
18027 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
18028 
18029 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
18030 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
18031 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
18032 }
18033 
18034 /* Sysctl functions */
18035 static int
wm_sysctl_tdh_handler(SYSCTLFN_ARGS)18036 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
18037 {
18038 	struct sysctlnode node = *rnode;
18039 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18040 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18041 	struct wm_softc *sc = txq->txq_sc;
18042 	uint32_t reg;
18043 
18044 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
18045 	node.sysctl_data = &reg;
18046 	return sysctl_lookup(SYSCTLFN_CALL(&node));
18047 }
18048 
18049 static int
wm_sysctl_tdt_handler(SYSCTLFN_ARGS)18050 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
18051 {
18052 	struct sysctlnode node = *rnode;
18053 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18054 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18055 	struct wm_softc *sc = txq->txq_sc;
18056 	uint32_t reg;
18057 
18058 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
18059 	node.sysctl_data = &reg;
18060 	return sysctl_lookup(SYSCTLFN_CALL(&node));
18061 }
18062 
18063 #ifdef WM_DEBUG
18064 static int
wm_sysctl_debug(SYSCTLFN_ARGS)18065 wm_sysctl_debug(SYSCTLFN_ARGS)
18066 {
18067 	struct sysctlnode node = *rnode;
18068 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
18069 	uint32_t dflags;
18070 	int error;
18071 
18072 	dflags = sc->sc_debug;
18073 	node.sysctl_data = &dflags;
18074 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
18075 
18076 	if (error || newp == NULL)
18077 		return error;
18078 
18079 	sc->sc_debug = dflags;
18080 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
18081 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
18082 
18083 	return 0;
18084 }
18085 #endif
18086