xref: /openbsd/sys/dev/pci/if_em.c (revision 91f110e0)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_em.c,v 1.279 2014/03/10 04:09:53 jsg Exp $ */
35 /* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
36 
37 #include <dev/pci/if_em.h>
38 #include <dev/pci/if_em_soc.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 
44 #define EM_DRIVER_VERSION	"6.2.9"
45 
46 /*********************************************************************
47  *  PCI Device ID Table
48  *********************************************************************/
49 const struct pci_matchid em_devices[] = {
50 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT },
51 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT },
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_LF },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_2 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AF },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AT },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_DUAL },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_QUAD },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_CPR },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER },
103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER },
104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES },
105 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI },
106 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E },
107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT },
108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_PM },
109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L },
110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_1 },
111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_2 },
112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573V_PM },
113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L },
114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA },
115 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER },
116 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_SERDES },
117 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_CPR },
118 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QP_PM },
119 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576 },
120 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER },
121 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES },
122 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER },
123 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_CU_ET2 },
124 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS },
125 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES },
126 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD },
127 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LC },
128 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LM },
129 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DC },
130 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DM },
131 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579LM },
132 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579V },
133 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER },
134 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER },
135 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES },
136 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII },
137 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_NF },
138 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_NF },
139 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER },
140 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM },
141 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V },
142 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM },
143 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_2 },
144 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_3 },
145 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V },
146 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_2 },
147 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_3 },
148 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER },
149 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER },
150 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES },
151 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII },
152 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL },
153 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER },
154 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII },
155 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES },
156 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE },
157 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP },
158 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V },
159 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER },
160 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER },
161 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES },
162 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII },
163 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_1GBPS },
164 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_2_5GBPS },
165 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_SGMII },
166 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_82567V_3 },
167 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE },
168 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_G },
169 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_GT },
170 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_AMT },
171 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_C },
172 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M },
173 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT },
174 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_BM },
175 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE },
176 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_G },
177 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_GT },
178 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_AMT },
179 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_C },
180 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M },
181 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_AMT },
182 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_V },
183 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LF },
184 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LM },
185 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LF },
186 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LM },
187 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_V },
188 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_1 },
189 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_2 },
190 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_3 }
191 };
192 
193 /*********************************************************************
194  *  Function prototypes
195  *********************************************************************/
196 int  em_probe(struct device *, void *, void *);
197 void em_attach(struct device *, struct device *, void *);
198 void em_defer_attach(struct device*);
199 int  em_detach(struct device *, int);
200 int  em_activate(struct device *, int);
201 int  em_intr(void *);
202 void em_start(struct ifnet *);
203 int  em_ioctl(struct ifnet *, u_long, caddr_t);
204 void em_watchdog(struct ifnet *);
205 void em_init(void *);
206 void em_stop(void *, int);
207 void em_media_status(struct ifnet *, struct ifmediareq *);
208 int  em_media_change(struct ifnet *);
209 int  em_flowstatus(struct em_softc *);
210 void em_identify_hardware(struct em_softc *);
211 int  em_allocate_pci_resources(struct em_softc *);
212 void em_free_pci_resources(struct em_softc *);
213 void em_local_timer(void *);
214 int  em_hardware_init(struct em_softc *);
215 void em_setup_interface(struct em_softc *);
216 int  em_setup_transmit_structures(struct em_softc *);
217 void em_initialize_transmit_unit(struct em_softc *);
218 int  em_setup_receive_structures(struct em_softc *);
219 void em_initialize_receive_unit(struct em_softc *);
220 void em_enable_intr(struct em_softc *);
221 void em_disable_intr(struct em_softc *);
222 void em_free_transmit_structures(struct em_softc *);
223 void em_free_receive_structures(struct em_softc *);
224 void em_update_stats_counters(struct em_softc *);
225 void em_txeof(struct em_softc *);
226 int  em_allocate_receive_structures(struct em_softc *);
227 int  em_allocate_transmit_structures(struct em_softc *);
228 #ifdef __STRICT_ALIGNMENT
229 void em_realign(struct em_softc *, struct mbuf *, u_int16_t *);
230 #else
231 #define em_realign(a, b, c) /* a, b, c */
232 #endif
233 int  em_rxfill(struct em_softc *);
234 void em_rxeof(struct em_softc *);
235 void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
236 			 struct mbuf *);
237 void em_transmit_checksum_setup(struct em_softc *, struct mbuf *,
238 				u_int32_t *, u_int32_t *);
239 void em_iff(struct em_softc *);
240 #ifdef EM_DEBUG
241 void em_print_hw_stats(struct em_softc *);
242 #endif
243 void em_update_link_status(struct em_softc *);
244 int  em_get_buf(struct em_softc *, int);
245 void em_enable_hw_vlans(struct em_softc *);
246 int  em_encap(struct em_softc *, struct mbuf *);
247 void em_smartspeed(struct em_softc *);
248 int  em_82547_fifo_workaround(struct em_softc *, int);
249 void em_82547_update_fifo_head(struct em_softc *, int);
250 int  em_82547_tx_fifo_reset(struct em_softc *);
251 void em_82547_move_tail(void *arg);
252 void em_82547_move_tail_locked(struct em_softc *);
253 int  em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *,
254 		   int);
255 void em_dma_free(struct em_softc *, struct em_dma_alloc *);
256 u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
257 			      PDESC_ARRAY desc_array);
258 
259 /*********************************************************************
260  *  OpenBSD Device Interface Entry Points
261  *********************************************************************/
262 
263 struct cfattach em_ca = {
264 	sizeof(struct em_softc), em_probe, em_attach, em_detach,
265 	em_activate
266 };
267 
268 struct cfdriver em_cd = {
269 	NULL, "em", DV_IFNET
270 };
271 
272 static int em_smart_pwr_down = FALSE;
273 
274 /*********************************************************************
275  *  Device identification routine
276  *
277  *  em_probe determines if the driver should be loaded on
278  *  adapter based on PCI vendor/device id of the adapter.
279  *
280  *  return 0 on no match, positive on match
281  *********************************************************************/
282 
283 int
284 em_probe(struct device *parent, void *match, void *aux)
285 {
286 	INIT_DEBUGOUT("em_probe: begin");
287 
288 	return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
289 	    nitems(em_devices)));
290 }
291 
292 void
293 em_defer_attach(struct device *self)
294 {
295 	struct em_softc *sc = (struct em_softc *)self;
296 	struct pci_attach_args *pa = &sc->osdep.em_pa;
297 	pci_chipset_tag_t	pc = pa->pa_pc;
298 	void *gcu;
299 
300 	if ((gcu = em_lookup_gcu(self)) == 0) {
301 		printf("%s: No GCU found, defered attachment failed\n",
302 		    sc->sc_dv.dv_xname);
303 
304 		if (sc->sc_intrhand)
305 			pci_intr_disestablish(pc, sc->sc_intrhand);
306 		sc->sc_intrhand = 0;
307 
308 		em_stop(sc, 1);
309 
310 		em_free_pci_resources(sc);
311 		em_dma_free(sc, &sc->rxdma);
312 		em_dma_free(sc, &sc->txdma);
313 
314 		return;
315 	}
316 
317 	sc->hw.gcu = gcu;
318 
319 	em_attach_miibus(self);
320 
321 	em_setup_interface(sc);
322 
323 	em_update_link_status(sc);
324 
325 	em_setup_link(&sc->hw);
326 }
327 
328 /*********************************************************************
329  *  Device initialization routine
330  *
331  *  The attach entry point is called when the driver is being loaded.
332  *  This routine identifies the type of hardware, allocates all resources
333  *  and initializes the hardware.
334  *
335  *********************************************************************/
336 
337 void
338 em_attach(struct device *parent, struct device *self, void *aux)
339 {
340 	struct pci_attach_args *pa = aux;
341 	struct em_softc *sc;
342 	int tsize, rsize;
343 	int defer = 0;
344 
345 	INIT_DEBUGOUT("em_attach: begin");
346 
347 	sc = (struct em_softc *)self;
348 	sc->osdep.em_pa = *pa;
349 
350 	timeout_set(&sc->timer_handle, em_local_timer, sc);
351 	timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
352 
353 	/* Determine hardware revision */
354 	em_identify_hardware(sc);
355 
356 	/*
357 	 * Only use MSI on the newer PCIe parts, with the exception
358 	 * of 82571/82572 due to "Byte Enables 2 and 3 Are Not Set" errata
359 	 */
360 	if (sc->hw.mac_type <= em_82572)
361 		sc->osdep.em_pa.pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
362 
363 	/* Parameters (to be read from user) */
364 	if (sc->hw.mac_type >= em_82544) {
365 		sc->num_tx_desc = EM_MAX_TXD;
366 		sc->num_rx_desc = EM_MAX_RXD;
367 	} else {
368 		sc->num_tx_desc = EM_MAX_TXD_82543;
369 		sc->num_rx_desc = EM_MAX_RXD_82543;
370 	}
371 	sc->tx_int_delay = EM_TIDV;
372 	sc->tx_abs_int_delay = EM_TADV;
373 	sc->rx_int_delay = EM_RDTR;
374 	sc->rx_abs_int_delay = EM_RADV;
375 	sc->hw.autoneg = DO_AUTO_NEG;
376 	sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
377 	sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
378 	sc->hw.tbi_compatibility_en = TRUE;
379 	sc->rx_buffer_len = EM_RXBUFFER_2048;
380 
381 	sc->hw.phy_init_script = 1;
382 	sc->hw.phy_reset_disable = FALSE;
383 
384 #ifndef EM_MASTER_SLAVE
385 	sc->hw.master_slave = em_ms_hw_default;
386 #else
387 	sc->hw.master_slave = EM_MASTER_SLAVE;
388 #endif
389 
390 	/*
391 	 * This controls when hardware reports transmit completion
392 	 * status.
393 	 */
394 	sc->hw.report_tx_early = 1;
395 
396 	if (em_allocate_pci_resources(sc))
397 		goto err_pci;
398 
399 	/* Initialize eeprom parameters */
400 	em_init_eeprom_params(&sc->hw);
401 
402 	/*
403 	 * Set the max frame size assuming standard Ethernet
404 	 * sized frames.
405 	 */
406 	switch (sc->hw.mac_type) {
407 		case em_82573:
408 		{
409 			uint16_t	eeprom_data = 0;
410 
411 			/*
412 			 * 82573 only supports Jumbo frames
413 			 * if ASPM is disabled.
414 			 */
415 			em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_3,
416 			    1, &eeprom_data);
417 			if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
418 				sc->hw.max_frame_size = ETHER_MAX_LEN;
419 				break;
420 			}
421 			/* Allow Jumbo frames */
422 			/* FALLTHROUGH */
423 		}
424 		case em_82571:
425 		case em_82572:
426 		case em_82574:
427 		case em_82575:
428 		case em_82580:
429 		case em_i210:
430 		case em_i350:
431 		case em_ich9lan:
432 		case em_ich10lan:
433 		case em_80003es2lan:
434 			/* Limit Jumbo Frame size */
435 			sc->hw.max_frame_size = 9234;
436 			break;
437 		case em_pchlan:
438 			sc->hw.max_frame_size = 4096;
439 			break;
440 		case em_82542_rev2_0:
441 		case em_82542_rev2_1:
442 		case em_ich8lan:
443 			/* Adapters that do not support Jumbo frames */
444 			sc->hw.max_frame_size = ETHER_MAX_LEN;
445 			break;
446 		default:
447 			sc->hw.max_frame_size =
448 			    MAX_JUMBO_FRAME_SIZE;
449 	}
450 
451 	sc->hw.min_frame_size =
452 	    ETHER_MIN_LEN + ETHER_CRC_LEN;
453 
454 	if (sc->hw.mac_type >= em_82544)
455 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
456 		EM_MAX_TXD * sizeof(struct em_tx_desc));
457 	else
458 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
459 		EM_MAX_TXD_82543 * sizeof(struct em_tx_desc));
460 	tsize = EM_ROUNDUP(tsize, PAGE_SIZE);
461 
462 	/* Allocate Transmit Descriptor ring */
463 	if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
464 		printf("%s: Unable to allocate tx_desc memory\n",
465 		       sc->sc_dv.dv_xname);
466 		goto err_tx_desc;
467 	}
468 	sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
469 
470 	if (sc->hw.mac_type >= em_82544)
471 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
472 		EM_MAX_RXD * sizeof(struct em_rx_desc));
473 	else
474 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
475 		EM_MAX_RXD_82543 * sizeof(struct em_rx_desc));
476 	rsize = EM_ROUNDUP(rsize, PAGE_SIZE);
477 
478 	/* Allocate Receive Descriptor ring */
479 	if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
480 		printf("%s: Unable to allocate rx_desc memory\n",
481 		       sc->sc_dv.dv_xname);
482 		goto err_rx_desc;
483 	}
484 	sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
485 
486 	/* Initialize the hardware */
487 	if ((defer = em_hardware_init(sc))) {
488 		if (defer == EAGAIN)
489 			config_defer(self, em_defer_attach);
490 		else {
491 			printf("%s: Unable to initialize the hardware\n",
492 			    sc->sc_dv.dv_xname);
493 			goto err_hw_init;
494 		}
495 	}
496 
497 	if (sc->hw.mac_type == em_80003es2lan || sc->hw.mac_type == em_82575 ||
498 	    sc->hw.mac_type == em_82580 || sc->hw.mac_type == em_i210 ||
499 	    sc->hw.mac_type == em_i350) {
500 		uint32_t reg = EM_READ_REG(&sc->hw, E1000_STATUS);
501 		sc->hw.bus_func = (reg & E1000_STATUS_FUNC_MASK) >>
502 		    E1000_STATUS_FUNC_SHIFT;
503 
504 		switch (sc->hw.bus_func) {
505 		case 0:
506 			sc->hw.swfw = E1000_SWFW_PHY0_SM;
507 			break;
508 		case 1:
509 			sc->hw.swfw = E1000_SWFW_PHY1_SM;
510 			break;
511 		case 2:
512 			sc->hw.swfw = E1000_SWFW_PHY2_SM;
513 			break;
514 		case 3:
515 			sc->hw.swfw = E1000_SWFW_PHY3_SM;
516 			break;
517 		}
518 	} else {
519 		sc->hw.bus_func = 0;
520 	}
521 
522 	/* Copy the permanent MAC address out of the EEPROM */
523 	if (em_read_mac_addr(&sc->hw) < 0) {
524 		printf("%s: EEPROM read error while reading mac address\n",
525 		       sc->sc_dv.dv_xname);
526 		goto err_mac_addr;
527 	}
528 
529 	bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
530 	    ETHER_ADDR_LEN);
531 
532 	/* Setup OS specific network interface */
533 	if (!defer)
534 		em_setup_interface(sc);
535 
536 	/* Initialize statistics */
537 	em_clear_hw_cntrs(&sc->hw);
538 #ifndef SMALL_KERNEL
539 	em_update_stats_counters(sc);
540 #endif
541 	sc->hw.get_link_status = 1;
542 	if (!defer)
543 		em_update_link_status(sc);
544 
545 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
546 
547 	/* Indicate SOL/IDER usage */
548 	if (em_check_phy_reset_block(&sc->hw))
549 		printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
550 		    sc->sc_dv.dv_xname);
551 
552 	/* Identify 82544 on PCI-X */
553 	em_get_bus_info(&sc->hw);
554 	if (sc->hw.bus_type == em_bus_type_pcix &&
555 	    sc->hw.mac_type == em_82544)
556 		sc->pcix_82544 = TRUE;
557         else
558 		sc->pcix_82544 = FALSE;
559 
560 	sc->hw.icp_xxxx_is_link_up = FALSE;
561 
562 	INIT_DEBUGOUT("em_attach: end");
563 	return;
564 
565 err_mac_addr:
566 err_hw_init:
567 	em_dma_free(sc, &sc->rxdma);
568 err_rx_desc:
569 	em_dma_free(sc, &sc->txdma);
570 err_tx_desc:
571 err_pci:
572 	em_free_pci_resources(sc);
573 }
574 
575 /*********************************************************************
576  *  Transmit entry point
577  *
578  *  em_start is called by the stack to initiate a transmit.
579  *  The driver will remain in this routine as long as there are
580  *  packets to transmit and transmit resources are available.
581  *  In case resources are not available stack is notified and
582  *  the packet is requeued.
583  **********************************************************************/
584 
585 void
586 em_start(struct ifnet *ifp)
587 {
588 	struct mbuf    *m_head;
589 	struct em_softc *sc = ifp->if_softc;
590 	int		post = 0;
591 
592 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
593 		return;
594 
595 	if (!sc->link_active)
596 		return;
597 
598 	if (sc->hw.mac_type != em_82547) {
599 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
600 		    sc->txdma.dma_map->dm_mapsize,
601 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
602 	}
603 
604 	for (;;) {
605 		IFQ_POLL(&ifp->if_snd, m_head);
606 		if (m_head == NULL)
607 			break;
608 
609 		if (em_encap(sc, m_head)) {
610 			ifp->if_flags |= IFF_OACTIVE;
611 			break;
612 		}
613 
614 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
615 
616 #if NBPFILTER > 0
617 		/* Send a copy of the frame to the BPF listener */
618 		if (ifp->if_bpf)
619 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
620 #endif
621 
622 		/* Set timeout in case hardware has problems transmitting */
623 		ifp->if_timer = EM_TX_TIMEOUT;
624 
625 		post = 1;
626 	}
627 
628 	if (sc->hw.mac_type != em_82547) {
629 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
630 		    sc->txdma.dma_map->dm_mapsize,
631 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
632 		/*
633 		 * Advance the Transmit Descriptor Tail (Tdt),
634 		 * this tells the E1000 that this frame is
635 		 * available to transmit.
636 		 */
637 		if (post)
638 			E1000_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
639 	}
640 }
641 
642 /*********************************************************************
643  *  Ioctl entry point
644  *
645  *  em_ioctl is called when the user wants to configure the
646  *  interface.
647  *
648  *  return 0 on success, positive on failure
649  **********************************************************************/
650 
651 int
652 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
653 {
654 	int		error = 0;
655 	struct ifreq   *ifr = (struct ifreq *) data;
656 	struct ifaddr  *ifa = (struct ifaddr *)data;
657 	struct em_softc *sc = ifp->if_softc;
658 	int s;
659 
660 	s = splnet();
661 
662 	switch (command) {
663 	case SIOCSIFADDR:
664 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
665 			       "Addr)");
666 		if (!(ifp->if_flags & IFF_UP)) {
667 			ifp->if_flags |= IFF_UP;
668 			em_init(sc);
669 		}
670 #ifdef INET
671 		if (ifa->ifa_addr->sa_family == AF_INET)
672 			arp_ifinit(&sc->interface_data, ifa);
673 #endif /* INET */
674 		break;
675 
676 	case SIOCSIFFLAGS:
677 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
678 		if (ifp->if_flags & IFF_UP) {
679 			if (ifp->if_flags & IFF_RUNNING)
680 				error = ENETRESET;
681 			else
682 				em_init(sc);
683 		} else {
684 			if (ifp->if_flags & IFF_RUNNING)
685 				em_stop(sc, 0);
686 		}
687 		break;
688 
689 	case SIOCSIFMEDIA:
690 		/* Check SOL/IDER usage */
691 		if (em_check_phy_reset_block(&sc->hw)) {
692 			printf("%s: Media change is blocked due to SOL/IDER session.\n",
693 			    sc->sc_dv.dv_xname);
694 			break;
695 		}
696 	case SIOCGIFMEDIA:
697 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
698 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
699 		break;
700 
701 	default:
702 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
703 	}
704 
705 	if (error == ENETRESET) {
706 		if (ifp->if_flags & IFF_RUNNING) {
707 			em_disable_intr(sc);
708 			em_iff(sc);
709 			if (sc->hw.mac_type == em_82542_rev2_0)
710 				em_initialize_receive_unit(sc);
711 			em_enable_intr(sc);
712 		}
713 		error = 0;
714 	}
715 
716 	splx(s);
717 	return (error);
718 }
719 
720 /*********************************************************************
721  *  Watchdog entry point
722  *
723  *  This routine is called whenever hardware quits transmitting.
724  *
725  **********************************************************************/
726 
727 void
728 em_watchdog(struct ifnet *ifp)
729 {
730 	struct em_softc *sc = ifp->if_softc;
731 
732 	/* If we are in this routine because of pause frames, then
733 	 * don't reset the hardware.
734 	 */
735 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
736 		ifp->if_timer = EM_TX_TIMEOUT;
737 		return;
738 	}
739 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
740 
741 	em_init(sc);
742 
743 	sc->watchdog_events++;
744 }
745 
746 /*********************************************************************
747  *  Init entry point
748  *
749  *  This routine is used in two ways. It is used by the stack as
750  *  init entry point in network interface structure. It is also used
751  *  by the driver as a hw/sw initialization routine to get to a
752  *  consistent state.
753  *
754  **********************************************************************/
755 
756 void
757 em_init(void *arg)
758 {
759 	struct em_softc *sc = arg;
760 	struct ifnet   *ifp = &sc->interface_data.ac_if;
761 	uint32_t	pba;
762 	int s;
763 
764 	s = splnet();
765 
766 	INIT_DEBUGOUT("em_init: begin");
767 
768 	em_stop(sc, 0);
769 
770 	/*
771 	 * Packet Buffer Allocation (PBA)
772 	 * Writing PBA sets the receive portion of the buffer
773 	 * the remainder is used for the transmit buffer.
774 	 *
775 	 * Devices before the 82547 had a Packet Buffer of 64K.
776 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
777 	 * After the 82547 the buffer was reduced to 40K.
778 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
779 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
780 	 */
781 	switch (sc->hw.mac_type) {
782 	case em_82547:
783 	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
784 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
785 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
786 		else
787 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
788 		sc->tx_fifo_head = 0;
789 		sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
790 		sc->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
791 		break;
792 	case em_82571:
793 	case em_82572: /* Total Packet Buffer on these is 48k */
794 	case em_82575:
795 	case em_82580:
796 	case em_80003es2lan:
797 	case em_i350:
798 		pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
799 		break;
800 	case em_i210:
801 		pba = E1000_PBA_34K;
802 		break;
803 	case em_82573: /* 82573: Total Packet Buffer is 32K */
804 		/* Jumbo frames not supported */
805 		pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
806 		break;
807 	case em_82574: /* Total Packet Buffer is 40k */
808 		pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
809 		break;
810 	case em_ich8lan:
811 		pba = E1000_PBA_8K;
812 		break;
813 	case em_ich9lan:
814 	case em_ich10lan:
815 		pba = E1000_PBA_10K;
816 		break;
817 	case em_pchlan:
818 	case em_pch2lan:
819 	case em_pch_lpt:
820 		pba = E1000_PBA_26K;
821 		break;
822 	default:
823 		/* Devices before 82547 had a Packet Buffer of 64K.   */
824 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
825 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
826 		else
827 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
828 	}
829 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
830 	E1000_WRITE_REG(&sc->hw, PBA, pba);
831 
832 	/* Get the latest mac address, User can use a LAA */
833 	bcopy(sc->interface_data.ac_enaddr, sc->hw.mac_addr,
834 	      ETHER_ADDR_LEN);
835 
836 	/* Initialize the hardware */
837 	if (em_hardware_init(sc)) {
838 		printf("%s: Unable to initialize the hardware\n",
839 		       sc->sc_dv.dv_xname);
840 		splx(s);
841 		return;
842 	}
843 	em_update_link_status(sc);
844 
845 	E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_VLAN);
846 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
847 		em_enable_hw_vlans(sc);
848 
849 	/* Prepare transmit descriptors and buffers */
850 	if (em_setup_transmit_structures(sc)) {
851 		printf("%s: Could not setup transmit structures\n",
852 		       sc->sc_dv.dv_xname);
853 		em_stop(sc, 0);
854 		splx(s);
855 		return;
856 	}
857 	em_initialize_transmit_unit(sc);
858 
859 	/* Prepare receive descriptors and buffers */
860 	if (em_setup_receive_structures(sc)) {
861 		printf("%s: Could not setup receive structures\n",
862 		       sc->sc_dv.dv_xname);
863 		em_stop(sc, 0);
864 		splx(s);
865 		return;
866 	}
867 	em_initialize_receive_unit(sc);
868 
869 	/* Program promiscuous mode and multicast filters. */
870 	em_iff(sc);
871 
872 	ifp->if_flags |= IFF_RUNNING;
873 	ifp->if_flags &= ~IFF_OACTIVE;
874 
875 	timeout_add_sec(&sc->timer_handle, 1);
876 	em_clear_hw_cntrs(&sc->hw);
877 	em_enable_intr(sc);
878 
879 	/* Don't reset the phy next time init gets called */
880 	sc->hw.phy_reset_disable = TRUE;
881 
882 	splx(s);
883 }
884 
885 /*********************************************************************
886  *
887  *  Interrupt Service routine
888  *
889  **********************************************************************/
890 int
891 em_intr(void *arg)
892 {
893 	struct em_softc	*sc = arg;
894 	struct ifnet	*ifp = &sc->interface_data.ac_if;
895 	u_int32_t	reg_icr, test_icr;
896 	int		refill = 0;
897 
898 	test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR);
899 	if (sc->hw.mac_type >= em_82571)
900 		test_icr = (reg_icr & E1000_ICR_INT_ASSERTED);
901 	if (!test_icr)
902 		return (0);
903 
904 	if (ifp->if_flags & IFF_RUNNING) {
905 		em_rxeof(sc);
906 		em_txeof(sc);
907 		refill = 1;
908 	}
909 
910 	/* Link status change */
911 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
912 		timeout_del(&sc->timer_handle);
913 		sc->hw.get_link_status = 1;
914 		em_check_for_link(&sc->hw);
915 		em_update_link_status(sc);
916 		timeout_add_sec(&sc->timer_handle, 1);
917 	}
918 
919 	if (reg_icr & E1000_ICR_RXO) {
920 		sc->rx_overruns++;
921 		refill = 1;
922 	}
923 
924 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
925 		em_start(ifp);
926 
927 	if (refill && em_rxfill(sc)) {
928 		/* Advance the Rx Queue #0 "Tail Pointer". */
929 		E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
930 	}
931 
932 	return (1);
933 }
934 
935 /*********************************************************************
936  *
937  *  Media Ioctl callback
938  *
939  *  This routine is called whenever the user queries the status of
940  *  the interface using ifconfig.
941  *
942  **********************************************************************/
943 void
944 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
945 {
946 	struct em_softc *sc = ifp->if_softc;
947 	u_char fiber_type = IFM_1000_SX;
948 	u_int16_t gsr;
949 
950 	INIT_DEBUGOUT("em_media_status: begin");
951 
952 	em_check_for_link(&sc->hw);
953 	em_update_link_status(sc);
954 
955 	ifmr->ifm_status = IFM_AVALID;
956 	ifmr->ifm_active = IFM_ETHER;
957 
958 	if (!sc->link_active) {
959 		ifmr->ifm_active |= IFM_NONE;
960 		return;
961 	}
962 
963 	ifmr->ifm_status |= IFM_ACTIVE;
964 
965 	if (sc->hw.media_type == em_media_type_fiber ||
966 	    sc->hw.media_type == em_media_type_internal_serdes) {
967 		if (sc->hw.mac_type == em_82545)
968 			fiber_type = IFM_1000_LX;
969 		ifmr->ifm_active |= fiber_type | IFM_FDX;
970 	} else {
971 		switch (sc->link_speed) {
972 		case 10:
973 			ifmr->ifm_active |= IFM_10_T;
974 			break;
975 		case 100:
976 			ifmr->ifm_active |= IFM_100_TX;
977 			break;
978 		case 1000:
979 			ifmr->ifm_active |= IFM_1000_T;
980 			break;
981 		}
982 
983 		if (sc->link_duplex == FULL_DUPLEX)
984 			ifmr->ifm_active |= em_flowstatus(sc) | IFM_FDX;
985 		else
986 			ifmr->ifm_active |= IFM_HDX;
987 
988 		if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_1000_T) {
989 			em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &gsr);
990 			if (gsr & SR_1000T_MS_CONFIG_RES)
991 				ifmr->ifm_active |= IFM_ETH_MASTER;
992 		}
993 	}
994 }
995 
996 /*********************************************************************
997  *
998  *  Media Ioctl callback
999  *
1000  *  This routine is called when the user changes speed/duplex using
1001  *  media/mediopt option with ifconfig.
1002  *
1003  **********************************************************************/
1004 int
1005 em_media_change(struct ifnet *ifp)
1006 {
1007 	struct em_softc *sc = ifp->if_softc;
1008 	struct ifmedia	*ifm = &sc->media;
1009 
1010 	INIT_DEBUGOUT("em_media_change: begin");
1011 
1012 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1013 		return (EINVAL);
1014 
1015 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1016 	case IFM_AUTO:
1017 		sc->hw.autoneg = DO_AUTO_NEG;
1018 		sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1019 		break;
1020 	case IFM_1000_LX:
1021 	case IFM_1000_SX:
1022 	case IFM_1000_T:
1023 		sc->hw.autoneg = DO_AUTO_NEG;
1024 		sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1025 		break;
1026 	case IFM_100_TX:
1027 		sc->hw.autoneg = FALSE;
1028 		sc->hw.autoneg_advertised = 0;
1029 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1030 			sc->hw.forced_speed_duplex = em_100_full;
1031 		else
1032 			sc->hw.forced_speed_duplex = em_100_half;
1033 		break;
1034 	case IFM_10_T:
1035 		sc->hw.autoneg = FALSE;
1036 		sc->hw.autoneg_advertised = 0;
1037 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1038 			sc->hw.forced_speed_duplex = em_10_full;
1039 		else
1040 			sc->hw.forced_speed_duplex = em_10_half;
1041 		break;
1042 	default:
1043 		printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
1044 	}
1045 
1046 	/*
1047 	 * As the speed/duplex settings may have changed we need to
1048 	 * reset the PHY.
1049 	 */
1050 	sc->hw.phy_reset_disable = FALSE;
1051 
1052 	em_init(sc);
1053 
1054 	return (0);
1055 }
1056 
1057 int
1058 em_flowstatus(struct em_softc *sc)
1059 {
1060 	u_int16_t ar, lpar;
1061 
1062 	if (sc->hw.media_type == em_media_type_fiber ||
1063 	    sc->hw.media_type == em_media_type_internal_serdes)
1064 		return (0);
1065 
1066 	em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV, &ar);
1067 	em_read_phy_reg(&sc->hw, PHY_LP_ABILITY, &lpar);
1068 
1069 	if ((ar & NWAY_AR_PAUSE) && (lpar & NWAY_LPAR_PAUSE))
1070 		return (IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE);
1071 	else if (!(ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1072 		(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1073 		return (IFM_FLOW|IFM_ETH_TXPAUSE);
1074 	else if ((ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1075 		!(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1076 		return (IFM_FLOW|IFM_ETH_RXPAUSE);
1077 
1078 	return (0);
1079 }
1080 
1081 /*********************************************************************
1082  *
1083  *  This routine maps the mbufs to tx descriptors.
1084  *
1085  *  return 0 on success, positive on failure
1086  **********************************************************************/
1087 int
1088 em_encap(struct em_softc *sc, struct mbuf *m_head)
1089 {
1090 	u_int32_t	txd_upper;
1091 	u_int32_t	txd_lower, txd_used = 0, txd_saved = 0;
1092 	int		i, j, first, error = 0, last = 0;
1093 	bus_dmamap_t	map;
1094 
1095 	/* For 82544 Workaround */
1096 	DESC_ARRAY		desc_array;
1097 	u_int32_t		array_elements;
1098 	u_int32_t		counter;
1099 
1100 	struct em_buffer   *tx_buffer, *tx_buffer_mapped;
1101 	struct em_tx_desc *current_tx_desc = NULL;
1102 
1103 	/*
1104 	 * Force a cleanup if number of TX descriptors
1105 	 * available hits the threshold
1106 	 */
1107 	if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1108 		em_txeof(sc);
1109 		/* Now do we at least have a minimal? */
1110 		if (sc->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1111 			sc->no_tx_desc_avail1++;
1112 			return (ENOBUFS);
1113 		}
1114 	}
1115 
1116 	if (sc->hw.mac_type == em_82547) {
1117 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1118 		    sc->txdma.dma_map->dm_mapsize,
1119 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1120 	}
1121 
1122 	/*
1123 	 * Map the packet for DMA.
1124 	 *
1125 	 * Capture the first descriptor index,
1126 	 * this descriptor will have the index
1127 	 * of the EOP which is the only one that
1128 	 * no gets a DONE bit writeback.
1129 	 */
1130 	first = sc->next_avail_tx_desc;
1131 	tx_buffer = &sc->tx_buffer_area[first];
1132 	tx_buffer_mapped = tx_buffer;
1133 	map = tx_buffer->map;
1134 
1135 	error = bus_dmamap_load_mbuf(sc->txtag, map, m_head, BUS_DMA_NOWAIT);
1136 	if (error != 0) {
1137 		sc->no_tx_dma_setup++;
1138 		goto loaderr;
1139 	}
1140 	EM_KASSERT(map->dm_nsegs!= 0, ("em_encap: empty packet"));
1141 
1142 	if (map->dm_nsegs > sc->num_tx_desc_avail - 2)
1143 		goto fail;
1144 
1145 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1146 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1147 	    sc->hw.mac_type != em_i350)
1148 		em_transmit_checksum_setup(sc, m_head, &txd_upper, &txd_lower);
1149 	else
1150 		txd_upper = txd_lower = 0;
1151 
1152 	i = sc->next_avail_tx_desc;
1153 	if (sc->pcix_82544)
1154 		txd_saved = i;
1155 
1156 	for (j = 0; j < map->dm_nsegs; j++) {
1157 		/* If sc is 82544 and on PCI-X bus */
1158 		if (sc->pcix_82544) {
1159 			/*
1160 			 * Check the Address and Length combination and
1161 			 * split the data accordingly
1162 			 */
1163 			array_elements = em_fill_descriptors(map->dm_segs[j].ds_addr,
1164 							     map->dm_segs[j].ds_len,
1165 							     &desc_array);
1166 			for (counter = 0; counter < array_elements; counter++) {
1167 				if (txd_used == sc->num_tx_desc_avail) {
1168 					sc->next_avail_tx_desc = txd_saved;
1169 					goto fail;
1170 				}
1171 				tx_buffer = &sc->tx_buffer_area[i];
1172 				current_tx_desc = &sc->tx_desc_base[i];
1173 				current_tx_desc->buffer_addr = htole64(
1174 					desc_array.descriptor[counter].address);
1175 				current_tx_desc->lower.data = htole32(
1176 					(sc->txd_cmd | txd_lower |
1177 					 (u_int16_t)desc_array.descriptor[counter].length));
1178 				current_tx_desc->upper.data = htole32((txd_upper));
1179 				last = i;
1180 				if (++i == sc->num_tx_desc)
1181 					i = 0;
1182 
1183 				tx_buffer->m_head = NULL;
1184 				tx_buffer->next_eop = -1;
1185 				txd_used++;
1186 			}
1187 		} else {
1188 			tx_buffer = &sc->tx_buffer_area[i];
1189 			current_tx_desc = &sc->tx_desc_base[i];
1190 
1191 			current_tx_desc->buffer_addr = htole64(map->dm_segs[j].ds_addr);
1192 			current_tx_desc->lower.data = htole32(
1193 				sc->txd_cmd | txd_lower | map->dm_segs[j].ds_len);
1194 			current_tx_desc->upper.data = htole32(txd_upper);
1195 			last = i;
1196 			if (++i == sc->num_tx_desc)
1197 	        		i = 0;
1198 
1199 			tx_buffer->m_head = NULL;
1200 			tx_buffer->next_eop = -1;
1201 		}
1202 	}
1203 
1204 	sc->next_avail_tx_desc = i;
1205 	if (sc->pcix_82544)
1206 		sc->num_tx_desc_avail -= txd_used;
1207 	else
1208 		sc->num_tx_desc_avail -= map->dm_nsegs;
1209 
1210 #if NVLAN > 0
1211 	/* Find out if we are in VLAN mode */
1212 	if (m_head->m_flags & M_VLANTAG) {
1213 		/* Set the VLAN id */
1214 		current_tx_desc->upper.fields.special =
1215 			htole16(m_head->m_pkthdr.ether_vtag);
1216 
1217 		/* Tell hardware to add tag */
1218 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1219 	}
1220 #endif
1221 
1222 	tx_buffer->m_head = m_head;
1223 	tx_buffer_mapped->map = tx_buffer->map;
1224 	tx_buffer->map = map;
1225 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
1226 	    BUS_DMASYNC_PREWRITE);
1227 
1228 	/*
1229 	 * Last Descriptor of Packet
1230 	 * needs End Of Packet (EOP)
1231 	 * and Report Status (RS)
1232 	 */
1233 	current_tx_desc->lower.data |=
1234 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1235 
1236 	/*
1237 	 * Keep track in the first buffer which
1238 	 * descriptor will be written back
1239 	 */
1240 	tx_buffer = &sc->tx_buffer_area[first];
1241 	tx_buffer->next_eop = last;
1242 
1243 	/*
1244 	 * Advance the Transmit Descriptor Tail (Tdt),
1245 	 * this tells the E1000 that this frame is
1246 	 * available to transmit.
1247 	 */
1248 	if (sc->hw.mac_type == em_82547) {
1249 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1250 		    sc->txdma.dma_map->dm_mapsize,
1251 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1252 		if (sc->link_duplex == HALF_DUPLEX)
1253 			em_82547_move_tail_locked(sc);
1254 		else {
1255 			E1000_WRITE_REG(&sc->hw, TDT, i);
1256 			em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
1257 		}
1258 	}
1259 
1260 	return (0);
1261 
1262 fail:
1263 	sc->no_tx_desc_avail2++;
1264 	bus_dmamap_unload(sc->txtag, map);
1265 	error = ENOBUFS;
1266 loaderr:
1267 	if (sc->hw.mac_type == em_82547) {
1268 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1269 		    sc->txdma.dma_map->dm_mapsize,
1270 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1271 	}
1272 	return (error);
1273 }
1274 
1275 /*********************************************************************
1276  *
1277  * 82547 workaround to avoid controller hang in half-duplex environment.
1278  * The workaround is to avoid queuing a large packet that would span
1279  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1280  * in this case. We do that only when FIFO is quiescent.
1281  *
1282  **********************************************************************/
1283 void
1284 em_82547_move_tail_locked(struct em_softc *sc)
1285 {
1286 	uint16_t hw_tdt;
1287 	uint16_t sw_tdt;
1288 	struct em_tx_desc *tx_desc;
1289 	uint16_t length = 0;
1290 	boolean_t eop = 0;
1291 
1292 	hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1293 	sw_tdt = sc->next_avail_tx_desc;
1294 
1295 	while (hw_tdt != sw_tdt) {
1296 		tx_desc = &sc->tx_desc_base[hw_tdt];
1297 		length += tx_desc->lower.flags.length;
1298 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1299 		if (++hw_tdt == sc->num_tx_desc)
1300 			hw_tdt = 0;
1301 
1302 		if (eop) {
1303 			if (em_82547_fifo_workaround(sc, length)) {
1304 				sc->tx_fifo_wrk_cnt++;
1305 				timeout_add(&sc->tx_fifo_timer_handle, 1);
1306 				break;
1307 			}
1308 			E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1309 			em_82547_update_fifo_head(sc, length);
1310 			length = 0;
1311 		}
1312 	}
1313 }
1314 
1315 void
1316 em_82547_move_tail(void *arg)
1317 {
1318 	struct em_softc *sc = arg;
1319 	int s;
1320 
1321 	s = splnet();
1322 	em_82547_move_tail_locked(sc);
1323 	splx(s);
1324 }
1325 
1326 int
1327 em_82547_fifo_workaround(struct em_softc *sc, int len)
1328 {
1329 	int fifo_space, fifo_pkt_len;
1330 
1331 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1332 
1333 	if (sc->link_duplex == HALF_DUPLEX) {
1334 		fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
1335 
1336 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1337 			if (em_82547_tx_fifo_reset(sc))
1338 				return (0);
1339 			else
1340 				return (1);
1341 		}
1342 	}
1343 
1344 	return (0);
1345 }
1346 
1347 void
1348 em_82547_update_fifo_head(struct em_softc *sc, int len)
1349 {
1350 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1351 
1352 	/* tx_fifo_head is always 16 byte aligned */
1353 	sc->tx_fifo_head += fifo_pkt_len;
1354 	if (sc->tx_fifo_head >= sc->tx_fifo_size)
1355 		sc->tx_fifo_head -= sc->tx_fifo_size;
1356 }
1357 
1358 int
1359 em_82547_tx_fifo_reset(struct em_softc *sc)
1360 {
1361 	uint32_t tctl;
1362 
1363 	if ((E1000_READ_REG(&sc->hw, TDT) ==
1364 	     E1000_READ_REG(&sc->hw, TDH)) &&
1365 	    (E1000_READ_REG(&sc->hw, TDFT) ==
1366 	     E1000_READ_REG(&sc->hw, TDFH)) &&
1367 	    (E1000_READ_REG(&sc->hw, TDFTS) ==
1368 	     E1000_READ_REG(&sc->hw, TDFHS)) &&
1369 	    (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1370 
1371 		/* Disable TX unit */
1372 		tctl = E1000_READ_REG(&sc->hw, TCTL);
1373 		E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1374 
1375 		/* Reset FIFO pointers */
1376 		E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr);
1377 		E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr);
1378 		E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr);
1379 		E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr);
1380 
1381 		/* Re-enable TX unit */
1382 		E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1383 		E1000_WRITE_FLUSH(&sc->hw);
1384 
1385 		sc->tx_fifo_head = 0;
1386 		sc->tx_fifo_reset_cnt++;
1387 
1388 		return (TRUE);
1389 	} else
1390 		return (FALSE);
1391 }
1392 
1393 void
1394 em_iff(struct em_softc *sc)
1395 {
1396 	struct ifnet *ifp = &sc->interface_data.ac_if;
1397 	struct arpcom *ac = &sc->interface_data;
1398 	u_int32_t reg_rctl = 0;
1399 	u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1400 	struct ether_multi *enm;
1401 	struct ether_multistep step;
1402 	int i = 0;
1403 
1404 	IOCTL_DEBUGOUT("em_iff: begin");
1405 
1406 	if (sc->hw.mac_type == em_82542_rev2_0) {
1407 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1408 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1409 			em_pci_clear_mwi(&sc->hw);
1410 		reg_rctl |= E1000_RCTL_RST;
1411 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1412 		msec_delay(5);
1413 	}
1414 
1415 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1416 	reg_rctl &= ~(E1000_RCTL_MPE | E1000_RCTL_UPE);
1417 	ifp->if_flags &= ~IFF_ALLMULTI;
1418 
1419 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1420 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1421 		ifp->if_flags |= IFF_ALLMULTI;
1422 		reg_rctl |= E1000_RCTL_MPE;
1423 		if (ifp->if_flags & IFF_PROMISC)
1424 			reg_rctl |= E1000_RCTL_UPE;
1425 	} else {
1426 		ETHER_FIRST_MULTI(step, ac, enm);
1427 		while (enm != NULL) {
1428 			bcopy(enm->enm_addrlo, mta + i, ETH_LENGTH_OF_ADDRESS);
1429 			i += ETH_LENGTH_OF_ADDRESS;
1430 
1431 			ETHER_NEXT_MULTI(step, enm);
1432 		}
1433 
1434 		em_mc_addr_list_update(&sc->hw, mta, ac->ac_multicnt, 0, 1);
1435 	}
1436 
1437 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1438 
1439 	if (sc->hw.mac_type == em_82542_rev2_0) {
1440 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1441 		reg_rctl &= ~E1000_RCTL_RST;
1442 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1443 		msec_delay(5);
1444 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1445 			em_pci_set_mwi(&sc->hw);
1446 	}
1447 }
1448 
1449 /*********************************************************************
1450  *  Timer routine
1451  *
1452  *  This routine checks for link status and updates statistics.
1453  *
1454  **********************************************************************/
1455 
1456 void
1457 em_local_timer(void *arg)
1458 {
1459 	struct ifnet   *ifp;
1460 	struct em_softc *sc = arg;
1461 	int s;
1462 
1463 	ifp = &sc->interface_data.ac_if;
1464 
1465 	s = splnet();
1466 
1467 	em_check_for_link(&sc->hw);
1468 	em_update_link_status(sc);
1469 #ifndef SMALL_KERNEL
1470 	em_update_stats_counters(sc);
1471 #ifdef EM_DEBUG
1472 	if (ifp->if_flags & IFF_DEBUG && ifp->if_flags & IFF_RUNNING)
1473 		em_print_hw_stats(sc);
1474 #endif
1475 #endif
1476 	em_smartspeed(sc);
1477 
1478 	timeout_add_sec(&sc->timer_handle, 1);
1479 
1480 	splx(s);
1481 }
1482 
1483 void
1484 em_update_link_status(struct em_softc *sc)
1485 {
1486 	struct ifnet *ifp = &sc->interface_data.ac_if;
1487 
1488 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1489 		if (sc->link_active == 0) {
1490 			em_get_speed_and_duplex(&sc->hw,
1491 						&sc->link_speed,
1492 						&sc->link_duplex);
1493 			/* Check if we may set SPEED_MODE bit on PCI-E */
1494 			if ((sc->link_speed == SPEED_1000) &&
1495 			    ((sc->hw.mac_type == em_82571) ||
1496 			    (sc->hw.mac_type == em_82572) ||
1497 			    (sc->hw.mac_type == em_82575) ||
1498 			    (sc->hw.mac_type == em_82580))) {
1499 				int tarc0;
1500 
1501 				tarc0 = E1000_READ_REG(&sc->hw, TARC0);
1502 				tarc0 |= SPEED_MODE_BIT;
1503 				E1000_WRITE_REG(&sc->hw, TARC0, tarc0);
1504 			}
1505 			sc->link_active = 1;
1506 			sc->smartspeed = 0;
1507 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
1508 		}
1509 		if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1510 			if (sc->link_duplex == FULL_DUPLEX)
1511 				ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1512 			else
1513 				ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
1514 			if_link_state_change(ifp);
1515 		}
1516 	} else {
1517 		if (sc->link_active == 1) {
1518 			ifp->if_baudrate = sc->link_speed = 0;
1519 			sc->link_duplex = 0;
1520 			sc->link_active = 0;
1521 		}
1522 		if (ifp->if_link_state != LINK_STATE_DOWN) {
1523 			ifp->if_link_state = LINK_STATE_DOWN;
1524 			if_link_state_change(ifp);
1525 		}
1526 	}
1527 }
1528 
1529 /*********************************************************************
1530  *
1531  *  This routine disables all traffic on the adapter by issuing a
1532  *  global reset on the MAC and deallocates TX/RX buffers.
1533  *
1534  **********************************************************************/
1535 
1536 void
1537 em_stop(void *arg, int softonly)
1538 {
1539 	struct em_softc *sc = arg;
1540 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1541 
1542 	/* Tell the stack that the interface is no longer active */
1543 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1544 	ifp->if_timer = 0;
1545 
1546 	INIT_DEBUGOUT("em_stop: begin");
1547 
1548 	timeout_del(&sc->timer_handle);
1549 	timeout_del(&sc->tx_fifo_timer_handle);
1550 
1551 	if (!softonly) {
1552 		em_disable_intr(sc);
1553 		em_reset_hw(&sc->hw);
1554 	}
1555 
1556 	em_free_transmit_structures(sc);
1557 	em_free_receive_structures(sc);
1558 }
1559 
1560 /*********************************************************************
1561  *
1562  *  Determine hardware revision.
1563  *
1564  **********************************************************************/
1565 void
1566 em_identify_hardware(struct em_softc *sc)
1567 {
1568 	u_int32_t reg;
1569 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1570 
1571 	/* Make sure our PCI config space has the necessary stuff set */
1572 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1573 					    PCI_COMMAND_STATUS_REG);
1574 
1575 	/* Save off the information about this board */
1576 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1577 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1578 
1579 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1580 	sc->hw.revision_id = PCI_REVISION(reg);
1581 
1582 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1583 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1584 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
1585 
1586 	/* Identify the MAC */
1587 	if (em_set_mac_type(&sc->hw))
1588 		printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
1589 
1590 	if (sc->hw.mac_type == em_pchlan)
1591 		sc->hw.revision_id = PCI_PRODUCT(pa->pa_id) & 0x0f;
1592 
1593 	if (sc->hw.mac_type == em_82541 ||
1594 	    sc->hw.mac_type == em_82541_rev_2 ||
1595 	    sc->hw.mac_type == em_82547 ||
1596 	    sc->hw.mac_type == em_82547_rev_2)
1597 		sc->hw.phy_init_script = TRUE;
1598 }
1599 
1600 int
1601 em_allocate_pci_resources(struct em_softc *sc)
1602 {
1603 	int		val, rid;
1604 	pci_intr_handle_t	ih;
1605 	const char		*intrstr = NULL;
1606 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1607 	pci_chipset_tag_t	pc = pa->pa_pc;
1608 
1609 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1610 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1611 		printf(": mmba is not mem space\n");
1612 		return (ENXIO);
1613 	}
1614 	if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1615 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1616 	    &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1617 		printf(": cannot find mem space\n");
1618 		return (ENXIO);
1619 	}
1620 
1621 	switch (sc->hw.mac_type) {
1622 	case em_82544:
1623 	case em_82540:
1624 	case em_82545:
1625 	case em_82546:
1626 	case em_82541:
1627 	case em_82541_rev_2:
1628 		/* Figure out where our I/O BAR is ? */
1629 		for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END;) {
1630 			val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1631 			if (PCI_MAPREG_TYPE(val) == PCI_MAPREG_TYPE_IO) {
1632 				sc->io_rid = rid;
1633 				break;
1634 			}
1635 			rid += 4;
1636 			if (PCI_MAPREG_MEM_TYPE(val) ==
1637 			    PCI_MAPREG_MEM_TYPE_64BIT)
1638 				rid += 4;	/* skip high bits, too */
1639 		}
1640 
1641 		if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1642 		    &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
1643 		    &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
1644 			printf(": cannot find i/o space\n");
1645 			return (ENXIO);
1646 		}
1647 
1648 		sc->hw.io_base = 0;
1649 		break;
1650 	default:
1651 		break;
1652 	}
1653 
1654 	/* for ICH8 and family we need to find the flash memory */
1655 	if (IS_ICH8(sc->hw.mac_type)) {
1656 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH);
1657 		if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1658 			printf(": flash is not mem space\n");
1659 			return (ENXIO);
1660 		}
1661 
1662 		if (pci_mapreg_map(pa, EM_FLASH, PCI_MAPREG_MEM_TYPE(val), 0,
1663 		    &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
1664 		    &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
1665 			printf(": cannot find mem space\n");
1666 			return (ENXIO);
1667 		}
1668         }
1669 
1670 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
1671 		printf(": couldn't map interrupt\n");
1672 		return (ENXIO);
1673 	}
1674 
1675 	sc->osdep.dev = (struct device *)sc;
1676 	sc->hw.back = &sc->osdep;
1677 
1678 	intrstr = pci_intr_string(pc, ih);
1679 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
1680 					      sc->sc_dv.dv_xname);
1681 	if (sc->sc_intrhand == NULL) {
1682 		printf(": couldn't establish interrupt");
1683 		if (intrstr != NULL)
1684 			printf(" at %s", intrstr);
1685 		printf("\n");
1686 		return (ENXIO);
1687 	}
1688 	printf(": %s", intrstr);
1689 
1690 	/*
1691 	 * the ICP_xxxx device has multiple, duplicate register sets for
1692 	 * use when it is being used as a network processor. Disable those
1693 	 * registers here, as they are not necessary in this context and
1694 	 * can confuse the system
1695 	 */
1696 	if(sc->hw.mac_type == em_icp_xxxx) {
1697 		int offset;
1698 		pcireg_t val;
1699 
1700 		if (!pci_get_capability(sc->osdep.em_pa.pa_pc,
1701 		    sc->osdep.em_pa.pa_tag, PCI_CAP_ID_ST, &offset, &val)) {
1702 			return (0);
1703 		}
1704 		offset += PCI_ST_SMIA_OFFSET;
1705 		pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
1706 		    offset, 0x06);
1707 		E1000_WRITE_REG(&sc->hw, IMC1, ~0x0);
1708 		E1000_WRITE_REG(&sc->hw, IMC2, ~0x0);
1709 	}
1710 	return (0);
1711 }
1712 
1713 void
1714 em_free_pci_resources(struct em_softc *sc)
1715 {
1716 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1717 	pci_chipset_tag_t	pc = pa->pa_pc;
1718 
1719 	if (sc->sc_intrhand)
1720 		pci_intr_disestablish(pc, sc->sc_intrhand);
1721 	sc->sc_intrhand = 0;
1722 
1723 	if (sc->osdep.em_flashbase)
1724 		bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
1725 				sc->osdep.em_flashsize);
1726 	sc->osdep.em_flashbase = 0;
1727 
1728 	if (sc->osdep.em_iobase)
1729 		bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
1730 				sc->osdep.em_iosize);
1731 	sc->osdep.em_iobase = 0;
1732 
1733 	if (sc->osdep.em_membase)
1734 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1735 				sc->osdep.em_memsize);
1736 	sc->osdep.em_membase = 0;
1737 }
1738 
1739 /*********************************************************************
1740  *
1741  *  Initialize the hardware to a configuration as specified by the
1742  *  em_softc structure. The controller is reset, the EEPROM is
1743  *  verified, the MAC address is set, then the shared initialization
1744  *  routines are called.
1745  *
1746  **********************************************************************/
1747 int
1748 em_hardware_init(struct em_softc *sc)
1749 {
1750 	uint32_t ret_val;
1751 	u_int16_t rx_buffer_size;
1752 
1753 	INIT_DEBUGOUT("em_hardware_init: begin");
1754 	/* Issue a global reset */
1755 	em_reset_hw(&sc->hw);
1756 
1757 	/* When hardware is reset, fifo_head is also reset */
1758 	sc->tx_fifo_head = 0;
1759 
1760 	/* Make sure we have a good EEPROM before we read from it */
1761 	if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1762 		/*
1763 		 * Some PCIe parts fail the first check due to
1764 		 * the link being in sleep state, call it again,
1765 		 * if it fails a second time its a real issue.
1766 		 */
1767 		if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1768 			printf("%s: The EEPROM Checksum Is Not Valid\n",
1769 			       sc->sc_dv.dv_xname);
1770 			return (EIO);
1771 		}
1772 	}
1773 
1774 	if (em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1775 		printf("%s: EEPROM read error while reading part number\n",
1776 		       sc->sc_dv.dv_xname);
1777 		return (EIO);
1778 	}
1779 
1780 	/* Set up smart power down as default off on newer adapters */
1781 	if (!em_smart_pwr_down &&
1782 	     (sc->hw.mac_type == em_82571 ||
1783 	      sc->hw.mac_type == em_82572 ||
1784 	      sc->hw.mac_type == em_82575 ||
1785 	      sc->hw.mac_type == em_82580 ||
1786 	      sc->hw.mac_type == em_i210 ||
1787 	      sc->hw.mac_type == em_i350 )) {
1788 		uint16_t phy_tmp = 0;
1789 
1790 		/* Speed up time to link by disabling smart power down */
1791 		em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1792 		phy_tmp &= ~IGP02E1000_PM_SPD;
1793 		em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1794 	}
1795 
1796 	/*
1797 	 * These parameters control the automatic generation (Tx) and
1798 	 * response (Rx) to Ethernet PAUSE frames.
1799 	 * - High water mark should allow for at least two frames to be
1800 	 *   received after sending an XOFF.
1801 	 * - Low water mark works best when it is very near the high water mark.
1802 	 *   This allows the receiver to restart by sending XON when it has
1803 	 *   drained a bit.  Here we use an arbitary value of 1500 which will
1804 	 *   restart after one full frame is pulled from the buffer.  There
1805 	 *   could be several smaller frames in the buffer and if so they will
1806 	 *   not trigger the XON until their total number reduces the buffer
1807 	 *   by 1500.
1808 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1809 	 */
1810 	rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA) & 0xffff) << 10 );
1811 
1812 	sc->hw.fc_high_water = rx_buffer_size -
1813 	    EM_ROUNDUP(sc->hw.max_frame_size, 1024);
1814 	sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
1815 	if (sc->hw.mac_type == em_80003es2lan)
1816 		sc->hw.fc_pause_time = 0xFFFF;
1817 	else
1818 		sc->hw.fc_pause_time = 1000;
1819 	sc->hw.fc_send_xon = TRUE;
1820 	sc->hw.fc = E1000_FC_FULL;
1821 
1822 	if ((ret_val = em_init_hw(&sc->hw)) != 0) {
1823 		if (ret_val == E1000_DEFER_INIT) {
1824 			INIT_DEBUGOUT("\nHardware Initialization Deferred ");
1825 			return (EAGAIN);
1826 		}
1827 		printf("%s: Hardware Initialization Failed",
1828 		       sc->sc_dv.dv_xname);
1829 		return (EIO);
1830 	}
1831 
1832 	em_check_for_link(&sc->hw);
1833 
1834 	return (0);
1835 }
1836 
1837 /*********************************************************************
1838  *
1839  *  Setup networking device structure and register an interface.
1840  *
1841  **********************************************************************/
1842 void
1843 em_setup_interface(struct em_softc *sc)
1844 {
1845 	struct ifnet   *ifp;
1846 	u_char fiber_type = IFM_1000_SX;
1847 
1848 	INIT_DEBUGOUT("em_setup_interface: begin");
1849 
1850 	ifp = &sc->interface_data.ac_if;
1851 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1852 	ifp->if_softc = sc;
1853 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1854 	ifp->if_ioctl = em_ioctl;
1855 	ifp->if_start = em_start;
1856 	ifp->if_watchdog = em_watchdog;
1857 	ifp->if_hardmtu =
1858 		sc->hw.max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN;
1859 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1860 	IFQ_SET_READY(&ifp->if_snd);
1861 
1862 	m_clsetwms(ifp, MCLBYTES, 4, sc->num_rx_desc);
1863 
1864 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1865 
1866 #if NVLAN > 0
1867 	if (sc->hw.mac_type != em_82575 && sc->hw.mac_type != em_82580 &&
1868 	    sc->hw.mac_type != em_i210 && sc->hw.mac_type != em_i350)
1869 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1870 #endif
1871 
1872 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1873 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1874 	    sc->hw.mac_type != em_i350)
1875 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1876 
1877 	/*
1878 	 * Specify the media types supported by this adapter and register
1879 	 * callbacks to update media and link information
1880 	 */
1881 	ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1882 		     em_media_status);
1883 	if (sc->hw.media_type == em_media_type_fiber ||
1884 	    sc->hw.media_type == em_media_type_internal_serdes) {
1885 		if (sc->hw.mac_type == em_82545)
1886 			fiber_type = IFM_1000_LX;
1887 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type | IFM_FDX,
1888 			    0, NULL);
1889 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type,
1890 			    0, NULL);
1891 	} else {
1892 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1893 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1894 			    0, NULL);
1895 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1896 			    0, NULL);
1897 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1898 			    0, NULL);
1899 		if (sc->hw.phy_type != em_phy_ife) {
1900 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1901 				    0, NULL);
1902 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1903 		}
1904 	}
1905 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1906 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1907 
1908 	if_attach(ifp);
1909 	ether_ifattach(ifp);
1910 }
1911 
1912 int
1913 em_detach(struct device *self, int flags)
1914 {
1915 	struct em_softc *sc = (struct em_softc *)self;
1916 	struct ifnet *ifp = &sc->interface_data.ac_if;
1917 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1918 	pci_chipset_tag_t	pc = pa->pa_pc;
1919 
1920 	if (sc->sc_intrhand)
1921 		pci_intr_disestablish(pc, sc->sc_intrhand);
1922 	sc->sc_intrhand = 0;
1923 
1924 	em_stop(sc, 1);
1925 
1926 	em_free_pci_resources(sc);
1927 	em_dma_free(sc, &sc->rxdma);
1928 	em_dma_free(sc, &sc->txdma);
1929 
1930 	ether_ifdetach(ifp);
1931 	if_detach(ifp);
1932 
1933 	return (0);
1934 }
1935 
1936 int
1937 em_activate(struct device *self, int act)
1938 {
1939 	struct em_softc *sc = (struct em_softc *)self;
1940 	struct ifnet *ifp = &sc->interface_data.ac_if;
1941 	int rv = 0;
1942 
1943 	switch (act) {
1944 	case DVACT_SUSPEND:
1945 		if (ifp->if_flags & IFF_RUNNING)
1946 			em_stop(sc, 0);
1947 		/* We have no children atm, but we will soon */
1948 		rv = config_activate_children(self, act);
1949 		break;
1950 	case DVACT_RESUME:
1951 		if (ifp->if_flags & IFF_UP)
1952 			em_init(sc);
1953 		break;
1954 	default:
1955 		rv = config_activate_children(self, act);
1956 		break;
1957 	}
1958 	return (rv);
1959 }
1960 
1961 /*********************************************************************
1962  *
1963  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1964  *
1965  **********************************************************************/
1966 void
1967 em_smartspeed(struct em_softc *sc)
1968 {
1969 	uint16_t phy_tmp;
1970 
1971 	if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
1972 	    !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1973 		return;
1974 
1975 	if (sc->smartspeed == 0) {
1976 		/* If Master/Slave config fault is asserted twice,
1977 		 * we assume back-to-back */
1978 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1979 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1980 			return;
1981 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1982 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1983 			em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
1984 					&phy_tmp);
1985 			if (phy_tmp & CR_1000T_MS_ENABLE) {
1986 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1987 				em_write_phy_reg(&sc->hw,
1988 						    PHY_1000T_CTRL, phy_tmp);
1989 				sc->smartspeed++;
1990 				if (sc->hw.autoneg &&
1991 				    !em_phy_setup_autoneg(&sc->hw) &&
1992 				    !em_read_phy_reg(&sc->hw, PHY_CTRL,
1993 						       &phy_tmp)) {
1994 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
1995 						    MII_CR_RESTART_AUTO_NEG);
1996 					em_write_phy_reg(&sc->hw,
1997 							 PHY_CTRL, phy_tmp);
1998 				}
1999 			}
2000 		}
2001 		return;
2002 	} else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2003 		/* If still no link, perhaps using 2/3 pair cable */
2004 		em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2005 		phy_tmp |= CR_1000T_MS_ENABLE;
2006 		em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2007 		if (sc->hw.autoneg &&
2008 		    !em_phy_setup_autoneg(&sc->hw) &&
2009 		    !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
2010 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
2011 				    MII_CR_RESTART_AUTO_NEG);
2012 			em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
2013 		}
2014 	}
2015 	/* Restart process after EM_SMARTSPEED_MAX iterations */
2016 	if (sc->smartspeed++ == EM_SMARTSPEED_MAX)
2017 		sc->smartspeed = 0;
2018 }
2019 
2020 /*
2021  * Manage DMA'able memory.
2022  */
2023 int
2024 em_dma_malloc(struct em_softc *sc, bus_size_t size,
2025     struct em_dma_alloc *dma, int mapflags)
2026 {
2027 	int r;
2028 
2029 	dma->dma_tag = sc->osdep.em_pa.pa_dmat;
2030 	r = bus_dmamap_create(dma->dma_tag, size, 1,
2031 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
2032 	if (r != 0) {
2033 		printf("%s: em_dma_malloc: bus_dmamap_create failed; "
2034 			"error %u\n", sc->sc_dv.dv_xname, r);
2035 		goto fail_0;
2036 	}
2037 
2038 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
2039 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
2040 	if (r != 0) {
2041 		printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
2042 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2043 			(unsigned long)size, r);
2044 		goto fail_1;
2045 	}
2046 
2047 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
2048 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
2049 	if (r != 0) {
2050 		printf("%s: em_dma_malloc: bus_dmammem_map failed; "
2051 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2052 			(unsigned long)size, r);
2053 		goto fail_2;
2054 	}
2055 
2056 	r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
2057 			    dma->dma_vaddr, size, NULL,
2058 			    mapflags | BUS_DMA_NOWAIT);
2059 	if (r != 0) {
2060 		printf("%s: em_dma_malloc: bus_dmamap_load failed; "
2061 			"error %u\n", sc->sc_dv.dv_xname, r);
2062 		goto fail_3;
2063 	}
2064 
2065 	dma->dma_size = size;
2066 	return (0);
2067 
2068 fail_3:
2069 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
2070 fail_2:
2071 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2072 fail_1:
2073 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2074 fail_0:
2075 	dma->dma_map = NULL;
2076 	dma->dma_tag = NULL;
2077 
2078 	return (r);
2079 }
2080 
2081 void
2082 em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
2083 {
2084 	if (dma->dma_tag == NULL)
2085 		return;
2086 
2087 	if (dma->dma_map != NULL) {
2088 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
2089 		    dma->dma_map->dm_mapsize,
2090 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2091 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2092 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
2093 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2094 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2095 	}
2096 	dma->dma_tag = NULL;
2097 }
2098 
2099 /*********************************************************************
2100  *
2101  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2102  *  the information needed to transmit a packet on the wire.
2103  *
2104  **********************************************************************/
2105 int
2106 em_allocate_transmit_structures(struct em_softc *sc)
2107 {
2108 	if (!(sc->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2109 	    sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2110 		printf("%s: Unable to allocate tx_buffer memory\n",
2111 		       sc->sc_dv.dv_xname);
2112 		return (ENOMEM);
2113 	}
2114 
2115 	return (0);
2116 }
2117 
2118 /*********************************************************************
2119  *
2120  *  Allocate and initialize transmit structures.
2121  *
2122  **********************************************************************/
2123 int
2124 em_setup_transmit_structures(struct em_softc *sc)
2125 {
2126 	struct  em_buffer *tx_buffer;
2127 	int error, i;
2128 
2129 	if ((error = em_allocate_transmit_structures(sc)) != 0)
2130 		goto fail;
2131 
2132 	bzero((void *) sc->tx_desc_base,
2133 	      (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
2134 
2135 	sc->txtag = sc->osdep.em_pa.pa_dmat;
2136 
2137 	tx_buffer = sc->tx_buffer_area;
2138 	for (i = 0; i < sc->num_tx_desc; i++) {
2139 		error = bus_dmamap_create(sc->txtag, MAX_JUMBO_FRAME_SIZE,
2140 			    EM_MAX_SCATTER, MAX_JUMBO_FRAME_SIZE, 0,
2141 			    BUS_DMA_NOWAIT, &tx_buffer->map);
2142 		if (error != 0) {
2143 			printf("%s: Unable to create TX DMA map\n",
2144 			    sc->sc_dv.dv_xname);
2145 			goto fail;
2146 		}
2147 		tx_buffer++;
2148 	}
2149 
2150 	sc->next_avail_tx_desc = 0;
2151 	sc->next_tx_to_clean = 0;
2152 
2153 	/* Set number of descriptors available */
2154 	sc->num_tx_desc_avail = sc->num_tx_desc;
2155 
2156 	/* Set checksum context */
2157 	sc->active_checksum_context = OFFLOAD_NONE;
2158 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2159 	    sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2160 
2161 	return (0);
2162 
2163 fail:
2164 	em_free_transmit_structures(sc);
2165 	return (error);
2166 }
2167 
2168 /*********************************************************************
2169  *
2170  *  Enable transmit unit.
2171  *
2172  **********************************************************************/
2173 void
2174 em_initialize_transmit_unit(struct em_softc *sc)
2175 {
2176 	u_int32_t	reg_tctl, reg_tipg = 0;
2177 	u_int64_t	bus_addr;
2178 
2179 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2180 
2181 	/* Setup the Base and Length of the Tx Descriptor Ring */
2182 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
2183 	E1000_WRITE_REG(&sc->hw, TDLEN,
2184 			sc->num_tx_desc *
2185 			sizeof(struct em_tx_desc));
2186 	E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2187 	E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
2188 
2189 	/* Setup the HW Tx Head and Tail descriptor pointers */
2190 	E1000_WRITE_REG(&sc->hw, TDT, 0);
2191 	E1000_WRITE_REG(&sc->hw, TDH, 0);
2192 
2193 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2194 		     E1000_READ_REG(&sc->hw, TDBAL),
2195 		     E1000_READ_REG(&sc->hw, TDLEN));
2196 
2197 	/* Set the default values for the Tx Inter Packet Gap timer */
2198 	switch (sc->hw.mac_type) {
2199 	case em_82542_rev2_0:
2200 	case em_82542_rev2_1:
2201 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
2202 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2203 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2204 		break;
2205 	case em_80003es2lan:
2206 		reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2207 		reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2208 		break;
2209 	default:
2210 		if (sc->hw.media_type == em_media_type_fiber ||
2211 		    sc->hw.media_type == em_media_type_internal_serdes)
2212 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2213 		else
2214 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2215 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2216 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2217 	}
2218 
2219 
2220 	E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2221 	E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2222 	if (sc->hw.mac_type >= em_82540)
2223 		E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2224 
2225 	/* Setup Transmit Descriptor Base Settings */
2226 	sc->txd_cmd = E1000_TXD_CMD_IFCS;
2227 
2228 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2229 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2230 		/* 82575/6 need to enable the TX queue and lack the IDE bit */
2231 		reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL);
2232 		reg_tctl |= E1000_TXDCTL_QUEUE_ENABLE;
2233 		E1000_WRITE_REG(&sc->hw, TXDCTL, reg_tctl);
2234 	} else if (sc->tx_int_delay > 0)
2235 		sc->txd_cmd |= E1000_TXD_CMD_IDE;
2236 
2237 	/* Program the Transmit Control Register */
2238 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2239 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2240 	if (sc->hw.mac_type >= em_82571)
2241 		reg_tctl |= E1000_TCTL_MULR;
2242 	if (sc->link_duplex == FULL_DUPLEX)
2243 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2244 	else
2245 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2246 	/* This write will effectively turn on the transmit unit */
2247 	E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2248 }
2249 
2250 /*********************************************************************
2251  *
2252  *  Free all transmit related data structures.
2253  *
2254  **********************************************************************/
2255 void
2256 em_free_transmit_structures(struct em_softc *sc)
2257 {
2258 	struct em_buffer   *tx_buffer;
2259 	int		i;
2260 
2261 	INIT_DEBUGOUT("free_transmit_structures: begin");
2262 
2263 	if (sc->tx_buffer_area != NULL) {
2264 		tx_buffer = sc->tx_buffer_area;
2265 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2266 			if (tx_buffer->map != NULL &&
2267 			    tx_buffer->map->dm_nsegs > 0) {
2268 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
2269 				    0, tx_buffer->map->dm_mapsize,
2270 				    BUS_DMASYNC_POSTWRITE);
2271 				bus_dmamap_unload(sc->txtag,
2272 				    tx_buffer->map);
2273 			}
2274 			if (tx_buffer->m_head != NULL) {
2275 				m_freem(tx_buffer->m_head);
2276 				tx_buffer->m_head = NULL;
2277 			}
2278 			if (tx_buffer->map != NULL) {
2279 				bus_dmamap_destroy(sc->txtag,
2280 				    tx_buffer->map);
2281 				tx_buffer->map = NULL;
2282 			}
2283 		}
2284 	}
2285 	if (sc->tx_buffer_area != NULL) {
2286 		free(sc->tx_buffer_area, M_DEVBUF);
2287 		sc->tx_buffer_area = NULL;
2288 	}
2289 	if (sc->txtag != NULL)
2290 		sc->txtag = NULL;
2291 }
2292 
2293 /*********************************************************************
2294  *
2295  *  The offload context needs to be set when we transfer the first
2296  *  packet of a particular protocol (TCP/UDP). We change the
2297  *  context only if the protocol type changes.
2298  *
2299  **********************************************************************/
2300 void
2301 em_transmit_checksum_setup(struct em_softc *sc, struct mbuf *mp,
2302     u_int32_t *txd_upper, u_int32_t *txd_lower)
2303 {
2304 	struct em_context_desc *TXD;
2305 	struct em_buffer *tx_buffer;
2306 	int curr_txd;
2307 
2308 	if (mp->m_pkthdr.csum_flags) {
2309 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
2310 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2311 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2312 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2313 				return;
2314 			else
2315 				sc->active_checksum_context = OFFLOAD_TCP_IP;
2316 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
2317 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2318 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2319 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2320 				return;
2321 			else
2322 				sc->active_checksum_context = OFFLOAD_UDP_IP;
2323 		} else {
2324 			*txd_upper = 0;
2325 			*txd_lower = 0;
2326 			return;
2327 		}
2328 	} else {
2329 		*txd_upper = 0;
2330 		*txd_lower = 0;
2331 		return;
2332 	}
2333 
2334 	/* If we reach this point, the checksum offload context
2335 	 * needs to be reset.
2336 	 */
2337 	curr_txd = sc->next_avail_tx_desc;
2338 	tx_buffer = &sc->tx_buffer_area[curr_txd];
2339 	TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
2340 
2341 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2342 	TXD->lower_setup.ip_fields.ipcso =
2343 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2344 	TXD->lower_setup.ip_fields.ipcse =
2345 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2346 
2347 	TXD->upper_setup.tcp_fields.tucss =
2348 	    ETHER_HDR_LEN + sizeof(struct ip);
2349 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2350 
2351 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2352 		TXD->upper_setup.tcp_fields.tucso =
2353 		    ETHER_HDR_LEN + sizeof(struct ip) +
2354 		    offsetof(struct tcphdr, th_sum);
2355 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2356 		TXD->upper_setup.tcp_fields.tucso =
2357 		    ETHER_HDR_LEN + sizeof(struct ip) +
2358 		    offsetof(struct udphdr, uh_sum);
2359 	}
2360 
2361 	TXD->tcp_seg_setup.data = htole32(0);
2362 	TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
2363 
2364 	tx_buffer->m_head = NULL;
2365 	tx_buffer->next_eop = -1;
2366 
2367 	if (++curr_txd == sc->num_tx_desc)
2368 		curr_txd = 0;
2369 
2370 	sc->num_tx_desc_avail--;
2371 	sc->next_avail_tx_desc = curr_txd;
2372 }
2373 
2374 /**********************************************************************
2375  *
2376  *  Examine each tx_buffer in the used queue. If the hardware is done
2377  *  processing the packet then free associated resources. The
2378  *  tx_buffer is put back on the free queue.
2379  *
2380  **********************************************************************/
2381 void
2382 em_txeof(struct em_softc *sc)
2383 {
2384 	int first, last, done, num_avail;
2385 	struct em_buffer *tx_buffer;
2386 	struct em_tx_desc   *tx_desc, *eop_desc;
2387 	struct ifnet   *ifp = &sc->interface_data.ac_if;
2388 
2389 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
2390 		return;
2391 
2392 	num_avail = sc->num_tx_desc_avail;
2393 	first = sc->next_tx_to_clean;
2394 	tx_desc = &sc->tx_desc_base[first];
2395 	tx_buffer = &sc->tx_buffer_area[first];
2396 	last = tx_buffer->next_eop;
2397 	eop_desc = &sc->tx_desc_base[last];
2398 
2399 	/*
2400 	 * What this does is get the index of the
2401 	 * first descriptor AFTER the EOP of the
2402 	 * first packet, that way we can do the
2403 	 * simple comparison on the inner while loop.
2404 	 */
2405 	if (++last == sc->num_tx_desc)
2406 		last = 0;
2407 	done = last;
2408 
2409 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2410 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2411 	while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2412 		/* We clean the range of the packet */
2413 		while (first != done) {
2414 			tx_desc->upper.data = 0;
2415 			tx_desc->lower.data = 0;
2416 			num_avail++;
2417 
2418 			if (tx_buffer->m_head != NULL) {
2419 				ifp->if_opackets++;
2420 				if (tx_buffer->map->dm_nsegs > 0) {
2421 					bus_dmamap_sync(sc->txtag,
2422 					    tx_buffer->map, 0,
2423 					    tx_buffer->map->dm_mapsize,
2424 					    BUS_DMASYNC_POSTWRITE);
2425 					bus_dmamap_unload(sc->txtag,
2426 					    tx_buffer->map);
2427 				}
2428 				m_freem(tx_buffer->m_head);
2429 				tx_buffer->m_head = NULL;
2430 			}
2431 			tx_buffer->next_eop = -1;
2432 
2433 			if (++first == sc->num_tx_desc)
2434 				first = 0;
2435 
2436 			tx_buffer = &sc->tx_buffer_area[first];
2437 			tx_desc = &sc->tx_desc_base[first];
2438 		}
2439 		/* See if we can continue to the next packet */
2440 		last = tx_buffer->next_eop;
2441 		if (last != -1) {
2442 			eop_desc = &sc->tx_desc_base[last];
2443 			/* Get new done point */
2444 			if (++last == sc->num_tx_desc)
2445 				last = 0;
2446 			done = last;
2447 		} else
2448 			break;
2449 	}
2450 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2451 	    sc->txdma.dma_map->dm_mapsize,
2452 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2453 
2454 	sc->next_tx_to_clean = first;
2455 
2456 	/*
2457 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2458 	 * that it is OK to send packets.
2459 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2460 	 * if some descriptors have been freed, restart the timeout.
2461 	 */
2462 	if (num_avail > EM_TX_CLEANUP_THRESHOLD)
2463 		ifp->if_flags &= ~IFF_OACTIVE;
2464 
2465 	/* All clean, turn off the timer */
2466 	if (num_avail == sc->num_tx_desc)
2467 		ifp->if_timer = 0;
2468 	/* Some cleaned, reset the timer */
2469 	else if (num_avail != sc->num_tx_desc_avail)
2470 		ifp->if_timer = EM_TX_TIMEOUT;
2471 
2472 	sc->num_tx_desc_avail = num_avail;
2473 }
2474 
2475 /*********************************************************************
2476  *
2477  *  Get a buffer from system mbuf buffer pool.
2478  *
2479  **********************************************************************/
2480 int
2481 em_get_buf(struct em_softc *sc, int i)
2482 {
2483 	struct mbuf    *m;
2484 	struct em_buffer *pkt;
2485 	struct em_rx_desc *desc;
2486 	int error;
2487 
2488 	pkt = &sc->rx_buffer_area[i];
2489 	desc = &sc->rx_desc_base[i];
2490 
2491 	if (pkt->m_head != NULL) {
2492 		printf("%s: em_get_buf: slot %d already has an mbuf\n",
2493 		    sc->sc_dv.dv_xname, i);
2494 		return (ENOBUFS);
2495 	}
2496 
2497 	m = MCLGETI(NULL, M_DONTWAIT, &sc->interface_data.ac_if, MCLBYTES);
2498 	if (!m) {
2499 		sc->mbuf_cluster_failed++;
2500 		return (ENOBUFS);
2501 	}
2502 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2503 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2504 		m_adj(m, ETHER_ALIGN);
2505 
2506 	error = bus_dmamap_load_mbuf(sc->rxtag, pkt->map, m, BUS_DMA_NOWAIT);
2507 	if (error) {
2508 		m_freem(m);
2509 		return (error);
2510 	}
2511 
2512 	bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2513 	    BUS_DMASYNC_PREREAD);
2514 	pkt->m_head = m;
2515 
2516 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2517 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_POSTWRITE);
2518 
2519 	bzero(desc, sizeof(*desc));
2520 	desc->buffer_addr = htole64(pkt->map->dm_segs[0].ds_addr);
2521 
2522 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2523 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_PREWRITE);
2524 
2525 	sc->rx_ndescs++;
2526 
2527 	return (0);
2528 }
2529 
2530 /*********************************************************************
2531  *
2532  *  Allocate memory for rx_buffer structures. Since we use one
2533  *  rx_buffer per received packet, the maximum number of rx_buffer's
2534  *  that we'll need is equal to the number of receive descriptors
2535  *  that we've allocated.
2536  *
2537  **********************************************************************/
2538 int
2539 em_allocate_receive_structures(struct em_softc *sc)
2540 {
2541 	int		i, error;
2542 	struct em_buffer *rx_buffer;
2543 
2544 	if (!(sc->rx_buffer_area = malloc(sizeof(struct em_buffer) *
2545 	    sc->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2546 		printf("%s: Unable to allocate rx_buffer memory\n",
2547 		       sc->sc_dv.dv_xname);
2548 		return (ENOMEM);
2549 	}
2550 
2551 	sc->rxtag = sc->osdep.em_pa.pa_dmat;
2552 
2553 	rx_buffer = sc->rx_buffer_area;
2554 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2555 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
2556 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rx_buffer->map);
2557 		if (error != 0) {
2558 			printf("%s: em_allocate_receive_structures: "
2559 			    "bus_dmamap_create failed; error %u\n",
2560 			    sc->sc_dv.dv_xname, error);
2561 			goto fail;
2562 		}
2563 		rx_buffer->m_head = NULL;
2564 	}
2565 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
2566 	    sc->rxdma.dma_map->dm_mapsize,
2567 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2568 
2569         return (0);
2570 
2571 fail:
2572 	em_free_receive_structures(sc);
2573 	return (error);
2574 }
2575 
2576 /*********************************************************************
2577  *
2578  *  Allocate and initialize receive structures.
2579  *
2580  **********************************************************************/
2581 int
2582 em_setup_receive_structures(struct em_softc *sc)
2583 {
2584 	bzero((void *) sc->rx_desc_base,
2585 	    (sizeof(struct em_rx_desc)) * sc->num_rx_desc);
2586 
2587 	if (em_allocate_receive_structures(sc))
2588 		return (ENOMEM);
2589 
2590 	/* Setup our descriptor pointers */
2591 	sc->next_rx_desc_to_check = 0;
2592 	sc->last_rx_desc_filled = sc->num_rx_desc - 1;
2593 	sc->rx_ndescs = 0;
2594 
2595 	em_rxfill(sc);
2596 	if (sc->rx_ndescs < 1) {
2597 		printf("%s: unable to fill any rx descriptors\n",
2598 		    sc->sc_dv.dv_xname);
2599 	}
2600 
2601 	return (0);
2602 }
2603 
2604 /*********************************************************************
2605  *
2606  *  Enable receive unit.
2607  *
2608  **********************************************************************/
2609 void
2610 em_initialize_receive_unit(struct em_softc *sc)
2611 {
2612 	u_int32_t	reg_rctl;
2613 	u_int32_t	reg_rxcsum;
2614 	u_int64_t	bus_addr;
2615 
2616 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2617 
2618 	/* Make sure receives are disabled while setting up the descriptor ring */
2619 	E1000_WRITE_REG(&sc->hw, RCTL, 0);
2620 
2621 	/* Set the Receive Delay Timer Register */
2622 	E1000_WRITE_REG(&sc->hw, RDTR,
2623 			sc->rx_int_delay | E1000_RDT_FPDB);
2624 
2625 	if (sc->hw.mac_type >= em_82540) {
2626 		if (sc->rx_int_delay)
2627 			E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2628 
2629 		/* Set the interrupt throttling rate.  Value is calculated
2630 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2631 		E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2632 	}
2633 
2634 	/* Setup the Base and Length of the Rx Descriptor Ring */
2635 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
2636 	E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
2637 			sizeof(struct em_rx_desc));
2638 	E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2639 	E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2640 
2641 	/* Setup the Receive Control Register */
2642 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2643 	    E1000_RCTL_RDMTS_HALF |
2644 	    (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2645 
2646 	if (sc->hw.tbi_compatibility_on == TRUE)
2647 		reg_rctl |= E1000_RCTL_SBP;
2648 
2649 	/*
2650 	 * The i350 has a bug where it always strips the CRC whether
2651 	 * asked to or not.  So ask for stripped CRC here and
2652 	 * cope in rxeof
2653 	 */
2654 	if (sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350)
2655 		reg_rctl |= E1000_RCTL_SECRC;
2656 
2657 	switch (sc->rx_buffer_len) {
2658 	default:
2659 	case EM_RXBUFFER_2048:
2660 		reg_rctl |= E1000_RCTL_SZ_2048;
2661 		break;
2662 	case EM_RXBUFFER_4096:
2663 		reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2664 		break;
2665 	case EM_RXBUFFER_8192:
2666 		reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2667 		break;
2668 	case EM_RXBUFFER_16384:
2669 		reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2670 		break;
2671 	}
2672 
2673 	if (sc->hw.max_frame_size != ETHER_MAX_LEN)
2674 		reg_rctl |= E1000_RCTL_LPE;
2675 
2676 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2677 	if (sc->hw.mac_type >= em_82543) {
2678 		reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2679 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2680 		E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2681 	}
2682 
2683 	/*
2684 	 * XXX TEMPORARY WORKAROUND: on some systems with 82573
2685 	 * long latencies are observed, like Lenovo X60.
2686 	 */
2687 	if (sc->hw.mac_type == em_82573)
2688 		E1000_WRITE_REG(&sc->hw, RDTR, 0x20);
2689 
2690 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2691 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2692 		/* 82575/6 need to enable the RX queue */
2693 		uint32_t reg;
2694 		reg = E1000_READ_REG(&sc->hw, RXDCTL);
2695 		reg |= E1000_RXDCTL_QUEUE_ENABLE;
2696 		E1000_WRITE_REG(&sc->hw, RXDCTL, reg);
2697 	}
2698 
2699 	/* Enable Receives */
2700 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2701 
2702 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2703 	E1000_WRITE_REG(&sc->hw, RDH, 0);
2704 	E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
2705 }
2706 
2707 /*********************************************************************
2708  *
2709  *  Free receive related data structures.
2710  *
2711  **********************************************************************/
2712 void
2713 em_free_receive_structures(struct em_softc *sc)
2714 {
2715 	struct em_buffer   *rx_buffer;
2716 	int		i;
2717 
2718 	INIT_DEBUGOUT("free_receive_structures: begin");
2719 
2720 	if (sc->rx_buffer_area != NULL) {
2721 		rx_buffer = sc->rx_buffer_area;
2722 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2723 			if (rx_buffer->m_head != NULL) {
2724 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
2725 				    0, rx_buffer->map->dm_mapsize,
2726 				    BUS_DMASYNC_POSTREAD);
2727 				bus_dmamap_unload(sc->rxtag, rx_buffer->map);
2728 				m_freem(rx_buffer->m_head);
2729 				rx_buffer->m_head = NULL;
2730 			}
2731 			bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
2732 		}
2733 	}
2734 	if (sc->rx_buffer_area != NULL) {
2735 		free(sc->rx_buffer_area, M_DEVBUF);
2736 		sc->rx_buffer_area = NULL;
2737 	}
2738 	if (sc->rxtag != NULL)
2739 		sc->rxtag = NULL;
2740 
2741 	if (sc->fmp != NULL) {
2742 		m_freem(sc->fmp);
2743 		sc->fmp = NULL;
2744 		sc->lmp = NULL;
2745 	}
2746 }
2747 
2748 #ifdef __STRICT_ALIGNMENT
2749 void
2750 em_realign(struct em_softc *sc, struct mbuf *m, u_int16_t *prev_len_adj)
2751 {
2752 	unsigned char tmp_align_buf[ETHER_ALIGN];
2753 	int tmp_align_buf_len = 0;
2754 
2755 	/*
2756 	 * The Ethernet payload is not 32-bit aligned when
2757 	 * Jumbo packets are enabled, so on architectures with
2758 	 * strict alignment we need to shift the entire packet
2759 	 * ETHER_ALIGN bytes. Ugh.
2760 	 */
2761 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2762 		return;
2763 
2764 	if (*prev_len_adj > sc->align_buf_len)
2765 		*prev_len_adj -= sc->align_buf_len;
2766 	else
2767 		*prev_len_adj = 0;
2768 
2769 	if (m->m_len > (MCLBYTES - ETHER_ALIGN)) {
2770 		bcopy(m->m_data + (MCLBYTES - ETHER_ALIGN),
2771 		    &tmp_align_buf, ETHER_ALIGN);
2772 		tmp_align_buf_len = m->m_len -
2773 		    (MCLBYTES - ETHER_ALIGN);
2774 		m->m_len -= ETHER_ALIGN;
2775 	}
2776 
2777 	if (m->m_len) {
2778 		bcopy(m->m_data, m->m_data + ETHER_ALIGN, m->m_len);
2779 		if (!sc->align_buf_len)
2780 			m->m_data += ETHER_ALIGN;
2781 	}
2782 
2783 	if (sc->align_buf_len) {
2784 		m->m_len += sc->align_buf_len;
2785 		bcopy(&sc->align_buf, m->m_data, sc->align_buf_len);
2786 	}
2787 
2788 	if (tmp_align_buf_len)
2789 		bcopy(&tmp_align_buf, &sc->align_buf, tmp_align_buf_len);
2790 
2791 	sc->align_buf_len = tmp_align_buf_len;
2792 }
2793 #endif /* __STRICT_ALIGNMENT */
2794 
2795 int
2796 em_rxfill(struct em_softc *sc)
2797 {
2798 	int post = 0;
2799 	int i;
2800 
2801 	i = sc->last_rx_desc_filled;
2802 
2803 	while (sc->rx_ndescs < sc->num_rx_desc) {
2804 		if (++i == sc->num_rx_desc)
2805 			i = 0;
2806 
2807 		if (em_get_buf(sc, i) != 0)
2808 			break;
2809 
2810 		sc->last_rx_desc_filled = i;
2811 		post = 1;
2812 	}
2813 
2814 	return (post);
2815 }
2816 
2817 /*********************************************************************
2818  *
2819  *  This routine executes in interrupt context. It replenishes
2820  *  the mbufs in the descriptor and sends data which has been
2821  *  dma'ed into host memory to upper layer.
2822  *
2823  *********************************************************************/
2824 void
2825 em_rxeof(struct em_softc *sc)
2826 {
2827 	struct ifnet	    *ifp = &sc->interface_data.ac_if;
2828 	struct mbuf	    *m;
2829 	u_int8_t	    accept_frame = 0;
2830 	u_int8_t	    eop = 0;
2831 	u_int16_t	    len, desc_len, prev_len_adj;
2832 	int		    i;
2833 
2834 	/* Pointer to the receive descriptor being examined. */
2835 	struct em_rx_desc   *desc;
2836 	struct em_buffer    *pkt;
2837 	u_int8_t	    status;
2838 
2839 	ifp = &sc->interface_data.ac_if;
2840 
2841 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2842 		return;
2843 
2844 	i = sc->next_rx_desc_to_check;
2845 
2846 	while (sc->rx_ndescs > 0) {
2847 		m = NULL;
2848 
2849 		desc = &sc->rx_desc_base[i];
2850 		pkt = &sc->rx_buffer_area[i];
2851 
2852 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2853 		    sizeof(*desc) * i, sizeof(*desc),
2854 		    BUS_DMASYNC_POSTREAD);
2855 
2856 		status = desc->status;
2857 		if (!ISSET(status, E1000_RXD_STAT_DD)) {
2858 			bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2859 			    sizeof(*desc) * i, sizeof(*desc),
2860 			    BUS_DMASYNC_PREREAD);
2861 			break;
2862 		}
2863 
2864 		/* pull the mbuf off the ring */
2865 		bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2866 		    BUS_DMASYNC_POSTREAD);
2867 		bus_dmamap_unload(sc->rxtag, pkt->map);
2868 		m = pkt->m_head;
2869 		pkt->m_head = NULL;
2870 
2871 		if (m == NULL) {
2872 			panic("em_rxeof: NULL mbuf in slot %d "
2873 			    "(nrx %d, filled %d)", i, sc->rx_ndescs,
2874 			    sc->last_rx_desc_filled);
2875 		}
2876 
2877 		m_cluncount(m, 1);
2878 		sc->rx_ndescs--;
2879 
2880 		accept_frame = 1;
2881 		prev_len_adj = 0;
2882 		desc_len = letoh16(desc->length);
2883 
2884 		if (status & E1000_RXD_STAT_EOP) {
2885 			eop = 1;
2886 			if (desc_len < ETHER_CRC_LEN) {
2887 				len = 0;
2888 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2889 			} else if (sc->hw.mac_type == em_i210 ||
2890 			    sc->hw.mac_type == em_i350)
2891 				len = desc_len;
2892 			else
2893 				len = desc_len - ETHER_CRC_LEN;
2894 		} else {
2895 			eop = 0;
2896 			len = desc_len;
2897 		}
2898 
2899 		if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2900 			u_int8_t last_byte;
2901 			u_int32_t pkt_len = desc_len;
2902 
2903 			if (sc->fmp != NULL)
2904 				pkt_len += sc->fmp->m_pkthdr.len;
2905 
2906 			last_byte = *(mtod(m, caddr_t) + desc_len - 1);
2907 			if (TBI_ACCEPT(&sc->hw, status, desc->errors,
2908 			    pkt_len, last_byte)) {
2909 #ifndef SMALL_KERNEL
2910 				em_tbi_adjust_stats(&sc->hw, &sc->stats,
2911 				    pkt_len, sc->hw.mac_addr);
2912 #endif
2913 				if (len > 0)
2914 					len--;
2915 			} else
2916 				accept_frame = 0;
2917 		}
2918 
2919 		if (accept_frame) {
2920 			/* Assign correct length to the current fragment */
2921 			m->m_len = len;
2922 
2923 			em_realign(sc, m, &prev_len_adj); /* STRICT_ALIGN */
2924 
2925 			if (sc->fmp == NULL) {
2926 				m->m_pkthdr.len = m->m_len;
2927 				sc->fmp = m;	 /* Store the first mbuf */
2928 				sc->lmp = m;
2929 			} else {
2930 				/* Chain mbuf's together */
2931 				m->m_flags &= ~M_PKTHDR;
2932 				/*
2933 				 * Adjust length of previous mbuf in chain if
2934 				 * we received less than 4 bytes in the last
2935 				 * descriptor.
2936 				 */
2937 				if (prev_len_adj > 0) {
2938 					sc->lmp->m_len -= prev_len_adj;
2939 					sc->fmp->m_pkthdr.len -= prev_len_adj;
2940 				}
2941 				sc->lmp->m_next = m;
2942 				sc->lmp = m;
2943 				sc->fmp->m_pkthdr.len += m->m_len;
2944 			}
2945 
2946 			if (eop) {
2947 				ifp->if_ipackets++;
2948 
2949 				m = sc->fmp;
2950 				m->m_pkthdr.rcvif = ifp;
2951 
2952 				em_receive_checksum(sc, desc, m);
2953 #if NVLAN > 0
2954 				if (desc->status & E1000_RXD_STAT_VP) {
2955 					m->m_pkthdr.ether_vtag =
2956 					    letoh16(desc->special);
2957 					m->m_flags |= M_VLANTAG;
2958 				}
2959 #endif
2960 #if NBPFILTER > 0
2961 				if (ifp->if_bpf) {
2962 					bpf_mtap_ether(ifp->if_bpf, m,
2963 					    BPF_DIRECTION_IN);
2964 				}
2965 #endif
2966 
2967 				ether_input_mbuf(ifp, m);
2968 
2969 				sc->fmp = NULL;
2970 				sc->lmp = NULL;
2971 			}
2972 		} else {
2973 			sc->dropped_pkts++;
2974 
2975 			if (sc->fmp != NULL) {
2976  				m_freem(sc->fmp);
2977 				sc->fmp = NULL;
2978 				sc->lmp = NULL;
2979 			}
2980 
2981 			m_freem(m);
2982 		}
2983 
2984 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2985 		    sizeof(*desc) * i, sizeof(*desc),
2986 		    BUS_DMASYNC_PREREAD);
2987 
2988 		/* Advance our pointers to the next descriptor. */
2989 		if (++i == sc->num_rx_desc)
2990 			i = 0;
2991 	}
2992 	sc->next_rx_desc_to_check = i;
2993 }
2994 
2995 /*********************************************************************
2996  *
2997  *  Verify that the hardware indicated that the checksum is valid.
2998  *  Inform the stack about the status of checksum so that stack
2999  *  doesn't spend time verifying the checksum.
3000  *
3001  *********************************************************************/
3002 void
3003 em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
3004     struct mbuf *mp)
3005 {
3006 	/* 82543 or newer only */
3007 	if ((sc->hw.mac_type < em_82543) ||
3008 	    /* Ignore Checksum bit is set */
3009 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3010 		mp->m_pkthdr.csum_flags = 0;
3011 		return;
3012 	}
3013 
3014 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3015 		/* Did it pass? */
3016 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3017 			/* IP Checksum Good */
3018 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3019 
3020 		} else
3021 			mp->m_pkthdr.csum_flags = 0;
3022 	}
3023 
3024 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3025 		/* Did it pass? */
3026 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE))
3027 			mp->m_pkthdr.csum_flags |=
3028 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3029 	}
3030 }
3031 
3032 /*
3033  * This turns on the hardware offload of the VLAN
3034  * tag insertion and strip
3035  */
3036 void
3037 em_enable_hw_vlans(struct em_softc *sc)
3038 {
3039 	uint32_t ctrl;
3040 
3041 	ctrl = E1000_READ_REG(&sc->hw, CTRL);
3042 	ctrl |= E1000_CTRL_VME;
3043 	E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
3044 }
3045 
3046 void
3047 em_enable_intr(struct em_softc *sc)
3048 {
3049 	E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
3050 }
3051 
3052 void
3053 em_disable_intr(struct em_softc *sc)
3054 {
3055 	/*
3056 	 * The first version of 82542 had an errata where when link
3057 	 * was forced it would stay up even if the cable was disconnected
3058 	 * Sequence errors were used to detect the disconnect and then
3059 	 * the driver would unforce the link.  This code is in the ISR.
3060 	 * For this to work correctly the Sequence error interrupt had
3061 	 * to be enabled all the time.
3062 	 */
3063 
3064 	if (sc->hw.mac_type == em_82542_rev2_0)
3065 		E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ));
3066 	else
3067 		E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff);
3068 }
3069 
3070 void
3071 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3072 {
3073 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3074 	pcireg_t val;
3075 
3076 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3077 	if (reg & 0x2) {
3078 		val &= 0x0000ffff;
3079 		val |= (*value << 16);
3080 	} else {
3081 		val &= 0xffff0000;
3082 		val |= *value;
3083 	}
3084 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg & ~0x3, val);
3085 }
3086 
3087 void
3088 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3089 {
3090 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3091 	pcireg_t val;
3092 
3093 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3094 	if (reg & 0x2)
3095 		*value = (val >> 16) & 0xffff;
3096 	else
3097 		*value = val & 0xffff;
3098 }
3099 
3100 void
3101 em_pci_set_mwi(struct em_hw *hw)
3102 {
3103 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3104 
3105 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3106 		(hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
3107 }
3108 
3109 void
3110 em_pci_clear_mwi(struct em_hw *hw)
3111 {
3112 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3113 
3114 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3115 		(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
3116 }
3117 
3118 /*
3119  * We may eventually really do this, but its unnecessary
3120  * for now so we just return unsupported.
3121  */
3122 int32_t
3123 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3124 {
3125 	return -E1000_NOT_IMPLEMENTED;
3126 }
3127 
3128 /*********************************************************************
3129 * 82544 Coexistence issue workaround.
3130 *    There are 2 issues.
3131 *       1. Transmit Hang issue.
3132 *    To detect this issue, following equation can be used...
3133 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3134 *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3135 *
3136 *       2. DAC issue.
3137 *    To detect this issue, following equation can be used...
3138 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3139 *          If SUM[3:0] is in between 9 to c, we will have this issue.
3140 *
3141 *
3142 *    WORKAROUND:
3143 *          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3144 *
3145 *** *********************************************************************/
3146 u_int32_t
3147 em_fill_descriptors(u_int64_t address, u_int32_t length,
3148     PDESC_ARRAY desc_array)
3149 {
3150         /* Since issue is sensitive to length and address.*/
3151         /* Let us first check the address...*/
3152         u_int32_t safe_terminator;
3153         if (length <= 4) {
3154                 desc_array->descriptor[0].address = address;
3155                 desc_array->descriptor[0].length = length;
3156                 desc_array->elements = 1;
3157                 return desc_array->elements;
3158         }
3159         safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3160         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3161         if (safe_terminator == 0   ||
3162         (safe_terminator > 4   &&
3163         safe_terminator < 9)   ||
3164         (safe_terminator > 0xC &&
3165         safe_terminator <= 0xF)) {
3166                 desc_array->descriptor[0].address = address;
3167                 desc_array->descriptor[0].length = length;
3168                 desc_array->elements = 1;
3169                 return desc_array->elements;
3170         }
3171 
3172         desc_array->descriptor[0].address = address;
3173         desc_array->descriptor[0].length = length - 4;
3174         desc_array->descriptor[1].address = address + (length - 4);
3175         desc_array->descriptor[1].length = 4;
3176         desc_array->elements = 2;
3177         return desc_array->elements;
3178 }
3179 
3180 #ifndef SMALL_KERNEL
3181 /**********************************************************************
3182  *
3183  *  Update the board statistics counters.
3184  *
3185  **********************************************************************/
3186 void
3187 em_update_stats_counters(struct em_softc *sc)
3188 {
3189 	struct ifnet   *ifp;
3190 
3191 	if (sc->hw.media_type == em_media_type_copper ||
3192 	    (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)) {
3193 		sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
3194 		sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
3195 	}
3196 	sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
3197 	sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
3198 	sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
3199 	sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
3200 
3201 	sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
3202 	sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
3203 	sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
3204 	sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
3205 	sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
3206 	sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
3207 	sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
3208 	sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
3209 	sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
3210 	sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
3211 	sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
3212 	sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
3213 	sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
3214 	sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
3215 	sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
3216 	sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
3217 	sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
3218 	sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
3219 	sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
3220 	sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
3221 
3222 	/* For the 64-bit byte counters the low dword must be read first. */
3223 	/* Both registers clear on the read of the high dword */
3224 
3225 	sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
3226 	sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
3227 	sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
3228 	sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
3229 
3230 	sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
3231 	sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
3232 	sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
3233 	sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
3234 	sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
3235 
3236 	sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
3237 	sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
3238 	sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
3239 	sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
3240 
3241 	sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
3242 	sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
3243 	sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
3244 	sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
3245 	sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
3246 	sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
3247 	sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
3248 	sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
3249 	sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
3250 	sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
3251 
3252 	if (sc->hw.mac_type >= em_82543) {
3253 		sc->stats.algnerrc +=
3254 		E1000_READ_REG(&sc->hw, ALGNERRC);
3255 		sc->stats.rxerrc +=
3256 		E1000_READ_REG(&sc->hw, RXERRC);
3257 		sc->stats.tncrs +=
3258 		E1000_READ_REG(&sc->hw, TNCRS);
3259 		sc->stats.cexterr +=
3260 		E1000_READ_REG(&sc->hw, CEXTERR);
3261 		sc->stats.tsctc +=
3262 		E1000_READ_REG(&sc->hw, TSCTC);
3263 		sc->stats.tsctfc +=
3264 		E1000_READ_REG(&sc->hw, TSCTFC);
3265 	}
3266 	ifp = &sc->interface_data.ac_if;
3267 
3268 	/* Fill out the OS statistics structure */
3269 	ifp->if_collisions = sc->stats.colc;
3270 
3271 	/* Rx Errors */
3272 	ifp->if_ierrors =
3273 	    sc->dropped_pkts +
3274 	    sc->stats.rxerrc +
3275 	    sc->stats.crcerrs +
3276 	    sc->stats.algnerrc +
3277 	    sc->stats.ruc + sc->stats.roc +
3278 	    sc->stats.mpc + sc->stats.cexterr +
3279 	    sc->rx_overruns;
3280 
3281 	/* Tx Errors */
3282 	ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol +
3283 	    sc->watchdog_events;
3284 }
3285 
3286 #ifdef EM_DEBUG
3287 /**********************************************************************
3288  *
3289  *  This routine is called only when IFF_DEBUG is enabled.
3290  *  This routine provides a way to take a look at important statistics
3291  *  maintained by the driver and hardware.
3292  *
3293  **********************************************************************/
3294 void
3295 em_print_hw_stats(struct em_softc *sc)
3296 {
3297 	const char * const unit = sc->sc_dv.dv_xname;
3298 
3299 	printf("%s: Excessive collisions = %lld\n", unit,
3300 		(long long)sc->stats.ecol);
3301 	printf("%s: Symbol errors = %lld\n", unit,
3302 		(long long)sc->stats.symerrs);
3303 	printf("%s: Sequence errors = %lld\n", unit,
3304 		(long long)sc->stats.sec);
3305 	printf("%s: Defer count = %lld\n", unit,
3306 		(long long)sc->stats.dc);
3307 
3308 	printf("%s: Missed Packets = %lld\n", unit,
3309 		(long long)sc->stats.mpc);
3310 	printf("%s: Receive No Buffers = %lld\n", unit,
3311 		(long long)sc->stats.rnbc);
3312 	/* RLEC is inaccurate on some hardware, calculate our own */
3313 	printf("%s: Receive Length Errors = %lld\n", unit,
3314 		((long long)sc->stats.roc +
3315 		(long long)sc->stats.ruc));
3316 	printf("%s: Receive errors = %lld\n", unit,
3317 		(long long)sc->stats.rxerrc);
3318 	printf("%s: Crc errors = %lld\n", unit,
3319 		(long long)sc->stats.crcerrs);
3320 	printf("%s: Alignment errors = %lld\n", unit,
3321 		(long long)sc->stats.algnerrc);
3322 	printf("%s: Carrier extension errors = %lld\n", unit,
3323 		(long long)sc->stats.cexterr);
3324 
3325 	printf("%s: RX overruns = %ld\n", unit,
3326 		sc->rx_overruns);
3327 	printf("%s: watchdog timeouts = %ld\n", unit,
3328 		sc->watchdog_events);
3329 
3330 	printf("%s: XON Rcvd = %lld\n", unit,
3331 		(long long)sc->stats.xonrxc);
3332 	printf("%s: XON Xmtd = %lld\n", unit,
3333 		(long long)sc->stats.xontxc);
3334 	printf("%s: XOFF Rcvd = %lld\n", unit,
3335 		(long long)sc->stats.xoffrxc);
3336 	printf("%s: XOFF Xmtd = %lld\n", unit,
3337 		(long long)sc->stats.xofftxc);
3338 
3339 	printf("%s: Good Packets Rcvd = %lld\n", unit,
3340 		(long long)sc->stats.gprc);
3341 	printf("%s: Good Packets Xmtd = %lld\n", unit,
3342 		(long long)sc->stats.gptc);
3343 }
3344 #endif
3345 #endif /* !SMALL_KERNEL */
3346