xref: /openbsd/sys/dev/pci/if_em.c (revision cecf84d4)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_em.c,v 1.297 2015/05/12 20:20:18 kettenis Exp $ */
35 /* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
36 
37 #include <dev/pci/if_em.h>
38 #include <dev/pci/if_em_soc.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 
44 #define EM_DRIVER_VERSION	"6.2.9"
45 
46 /*********************************************************************
47  *  PCI Device ID Table
48  *********************************************************************/
49 const struct pci_matchid em_devices[] = {
50 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT },
51 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT },
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_LF },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_2 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AF },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AT },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_DUAL },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_QUAD },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_CPR },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER },
103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER },
104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES },
105 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI },
106 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E },
107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT },
108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_PM },
109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L },
110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_1 },
111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_2 },
112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573V_PM },
113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L },
114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA },
115 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER },
116 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_SERDES },
117 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_CPR },
118 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QP_PM },
119 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576 },
120 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER },
121 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES },
122 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER },
123 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_CU_ET2 },
124 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS },
125 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES },
126 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD },
127 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LC },
128 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LM },
129 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DC },
130 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DM },
131 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579LM },
132 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579V },
133 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER },
134 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER },
135 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES },
136 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII },
137 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_NF },
138 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_NF },
139 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER },
140 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM },
141 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V },
142 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM },
143 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_2 },
144 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_3 },
145 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V },
146 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_2 },
147 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_3 },
148 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER },
149 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER },
150 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES },
151 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII },
152 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL },
153 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER },
154 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII },
155 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES },
156 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE },
157 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP },
158 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V },
159 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER },
160 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER },
161 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES },
162 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII },
163 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_1GBPS },
164 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_2_5GBPS },
165 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_SGMII },
166 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_82567V_3 },
167 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE },
168 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_G },
169 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_GT },
170 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_AMT },
171 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_C },
172 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M },
173 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT },
174 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_BM },
175 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE },
176 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_G },
177 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_GT },
178 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_AMT },
179 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_C },
180 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M },
181 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_AMT },
182 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_V },
183 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LF },
184 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LM },
185 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LF },
186 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LM },
187 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_V },
188 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_1 },
189 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_2 },
190 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_3 }
191 };
192 
193 /*********************************************************************
194  *  Function prototypes
195  *********************************************************************/
196 int  em_probe(struct device *, void *, void *);
197 void em_attach(struct device *, struct device *, void *);
198 void em_defer_attach(struct device*);
199 int  em_detach(struct device *, int);
200 int  em_activate(struct device *, int);
201 int  em_intr(void *);
202 void em_start(struct ifnet *);
203 int  em_ioctl(struct ifnet *, u_long, caddr_t);
204 void em_watchdog(struct ifnet *);
205 void em_init(void *);
206 void em_stop(void *, int);
207 void em_media_status(struct ifnet *, struct ifmediareq *);
208 int  em_media_change(struct ifnet *);
209 int  em_flowstatus(struct em_softc *);
210 void em_identify_hardware(struct em_softc *);
211 int  em_allocate_pci_resources(struct em_softc *);
212 void em_free_pci_resources(struct em_softc *);
213 void em_local_timer(void *);
214 int  em_hardware_init(struct em_softc *);
215 void em_setup_interface(struct em_softc *);
216 int  em_setup_transmit_structures(struct em_softc *);
217 void em_initialize_transmit_unit(struct em_softc *);
218 int  em_setup_receive_structures(struct em_softc *);
219 void em_initialize_receive_unit(struct em_softc *);
220 void em_enable_intr(struct em_softc *);
221 void em_disable_intr(struct em_softc *);
222 void em_free_transmit_structures(struct em_softc *);
223 void em_free_receive_structures(struct em_softc *);
224 void em_update_stats_counters(struct em_softc *);
225 void em_disable_aspm(struct em_softc *);
226 void em_txeof(struct em_softc *);
227 int  em_allocate_receive_structures(struct em_softc *);
228 int  em_allocate_transmit_structures(struct em_softc *);
229 #ifdef __STRICT_ALIGNMENT
230 void em_realign(struct em_softc *, struct mbuf *, u_int16_t *);
231 #else
232 #define em_realign(a, b, c) /* a, b, c */
233 #endif
234 int  em_rxfill(struct em_softc *);
235 void em_rxeof(struct em_softc *);
236 void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
237 			 struct mbuf *);
238 void em_transmit_checksum_setup(struct em_softc *, struct mbuf *,
239 				u_int32_t *, u_int32_t *);
240 void em_iff(struct em_softc *);
241 #ifdef EM_DEBUG
242 void em_print_hw_stats(struct em_softc *);
243 #endif
244 void em_update_link_status(struct em_softc *);
245 int  em_get_buf(struct em_softc *, int);
246 void em_enable_hw_vlans(struct em_softc *);
247 int  em_encap(struct em_softc *, struct mbuf *);
248 void em_smartspeed(struct em_softc *);
249 int  em_82547_fifo_workaround(struct em_softc *, int);
250 void em_82547_update_fifo_head(struct em_softc *, int);
251 int  em_82547_tx_fifo_reset(struct em_softc *);
252 void em_82547_move_tail(void *arg);
253 void em_82547_move_tail_locked(struct em_softc *);
254 int  em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *,
255 		   int);
256 void em_dma_free(struct em_softc *, struct em_dma_alloc *);
257 u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
258 			      PDESC_ARRAY desc_array);
259 
260 /*********************************************************************
261  *  OpenBSD Device Interface Entry Points
262  *********************************************************************/
263 
264 struct cfattach em_ca = {
265 	sizeof(struct em_softc), em_probe, em_attach, em_detach,
266 	em_activate
267 };
268 
269 struct cfdriver em_cd = {
270 	NULL, "em", DV_IFNET
271 };
272 
273 static int em_smart_pwr_down = FALSE;
274 
275 /*********************************************************************
276  *  Device identification routine
277  *
278  *  em_probe determines if the driver should be loaded on
279  *  adapter based on PCI vendor/device id of the adapter.
280  *
281  *  return 0 on no match, positive on match
282  *********************************************************************/
283 
284 int
285 em_probe(struct device *parent, void *match, void *aux)
286 {
287 	INIT_DEBUGOUT("em_probe: begin");
288 
289 	return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
290 	    nitems(em_devices)));
291 }
292 
293 void
294 em_defer_attach(struct device *self)
295 {
296 	struct em_softc *sc = (struct em_softc *)self;
297 	struct pci_attach_args *pa = &sc->osdep.em_pa;
298 	pci_chipset_tag_t	pc = pa->pa_pc;
299 	void *gcu;
300 
301 	if ((gcu = em_lookup_gcu(self)) == 0) {
302 		printf("%s: No GCU found, defered attachment failed\n",
303 		    sc->sc_dv.dv_xname);
304 
305 		if (sc->sc_intrhand)
306 			pci_intr_disestablish(pc, sc->sc_intrhand);
307 		sc->sc_intrhand = 0;
308 
309 		em_stop(sc, 1);
310 
311 		em_free_pci_resources(sc);
312 		em_dma_free(sc, &sc->rxdma);
313 		em_dma_free(sc, &sc->txdma);
314 
315 		return;
316 	}
317 
318 	sc->hw.gcu = gcu;
319 
320 	em_attach_miibus(self);
321 
322 	em_setup_interface(sc);
323 
324 	em_update_link_status(sc);
325 
326 	em_setup_link(&sc->hw);
327 }
328 
329 /*********************************************************************
330  *  Device initialization routine
331  *
332  *  The attach entry point is called when the driver is being loaded.
333  *  This routine identifies the type of hardware, allocates all resources
334  *  and initializes the hardware.
335  *
336  *********************************************************************/
337 
338 void
339 em_attach(struct device *parent, struct device *self, void *aux)
340 {
341 	struct pci_attach_args *pa = aux;
342 	struct em_softc *sc;
343 	int tsize, rsize;
344 	int defer = 0;
345 
346 	INIT_DEBUGOUT("em_attach: begin");
347 
348 	sc = (struct em_softc *)self;
349 	sc->osdep.em_pa = *pa;
350 
351 	timeout_set(&sc->timer_handle, em_local_timer, sc);
352 	timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
353 
354 	/* Determine hardware revision */
355 	em_identify_hardware(sc);
356 
357 	/*
358 	 * Only use MSI on the newer PCIe parts, with the exception
359 	 * of 82571/82572 due to "Byte Enables 2 and 3 Are Not Set" errata
360 	 */
361 	if (sc->hw.mac_type <= em_82572)
362 		sc->osdep.em_pa.pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
363 
364 	/* Parameters (to be read from user) */
365 	if (sc->hw.mac_type >= em_82544) {
366 		sc->num_tx_desc = EM_MAX_TXD;
367 		sc->num_rx_desc = EM_MAX_RXD;
368 	} else {
369 		sc->num_tx_desc = EM_MAX_TXD_82543;
370 		sc->num_rx_desc = EM_MAX_RXD_82543;
371 	}
372 	sc->tx_int_delay = EM_TIDV;
373 	sc->tx_abs_int_delay = EM_TADV;
374 	sc->rx_int_delay = EM_RDTR;
375 	sc->rx_abs_int_delay = EM_RADV;
376 	sc->hw.autoneg = DO_AUTO_NEG;
377 	sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
378 	sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
379 	sc->hw.tbi_compatibility_en = TRUE;
380 	sc->rx_buffer_len = EM_RXBUFFER_2048;
381 
382 	sc->hw.phy_init_script = 1;
383 	sc->hw.phy_reset_disable = FALSE;
384 
385 #ifndef EM_MASTER_SLAVE
386 	sc->hw.master_slave = em_ms_hw_default;
387 #else
388 	sc->hw.master_slave = EM_MASTER_SLAVE;
389 #endif
390 
391 	/*
392 	 * This controls when hardware reports transmit completion
393 	 * status.
394 	 */
395 	sc->hw.report_tx_early = 1;
396 
397 	if (em_allocate_pci_resources(sc))
398 		goto err_pci;
399 
400 	/* Initialize eeprom parameters */
401 	em_init_eeprom_params(&sc->hw);
402 
403 	/*
404 	 * Set the max frame size assuming standard Ethernet
405 	 * sized frames.
406 	 */
407 	switch (sc->hw.mac_type) {
408 		case em_82573:
409 		{
410 			uint16_t	eeprom_data = 0;
411 
412 			/*
413 			 * 82573 only supports Jumbo frames
414 			 * if ASPM is disabled.
415 			 */
416 			em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_3,
417 			    1, &eeprom_data);
418 			if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
419 				sc->hw.max_frame_size = ETHER_MAX_LEN;
420 				break;
421 			}
422 			/* Allow Jumbo frames */
423 			/* FALLTHROUGH */
424 		}
425 		case em_82571:
426 		case em_82572:
427 		case em_82574:
428 		case em_82575:
429 		case em_82580:
430 		case em_i210:
431 		case em_i350:
432 		case em_ich9lan:
433 		case em_ich10lan:
434 		case em_pch2lan:
435 		case em_pch_lpt:
436 		case em_80003es2lan:
437 			/* 9K Jumbo Frame size */
438 			sc->hw.max_frame_size = 9234;
439 			break;
440 		case em_pchlan:
441 			sc->hw.max_frame_size = 4096;
442 			break;
443 		case em_82542_rev2_0:
444 		case em_82542_rev2_1:
445 		case em_ich8lan:
446 			/* Adapters that do not support Jumbo frames */
447 			sc->hw.max_frame_size = ETHER_MAX_LEN;
448 			break;
449 		default:
450 			sc->hw.max_frame_size =
451 			    MAX_JUMBO_FRAME_SIZE;
452 	}
453 
454 	sc->hw.min_frame_size =
455 	    ETHER_MIN_LEN + ETHER_CRC_LEN;
456 
457 	if (sc->hw.mac_type >= em_82544)
458 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
459 		EM_MAX_TXD * sizeof(struct em_tx_desc));
460 	else
461 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
462 		EM_MAX_TXD_82543 * sizeof(struct em_tx_desc));
463 	tsize = EM_ROUNDUP(tsize, PAGE_SIZE);
464 
465 	/* Allocate Transmit Descriptor ring */
466 	if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
467 		printf("%s: Unable to allocate tx_desc memory\n",
468 		       sc->sc_dv.dv_xname);
469 		goto err_tx_desc;
470 	}
471 	sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
472 
473 	if (sc->hw.mac_type >= em_82544)
474 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
475 		EM_MAX_RXD * sizeof(struct em_rx_desc));
476 	else
477 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
478 		EM_MAX_RXD_82543 * sizeof(struct em_rx_desc));
479 	rsize = EM_ROUNDUP(rsize, PAGE_SIZE);
480 
481 	/* Allocate Receive Descriptor ring */
482 	if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
483 		printf("%s: Unable to allocate rx_desc memory\n",
484 		       sc->sc_dv.dv_xname);
485 		goto err_rx_desc;
486 	}
487 	sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
488 
489 	/* Initialize the hardware */
490 	if ((defer = em_hardware_init(sc))) {
491 		if (defer == EAGAIN)
492 			config_defer(self, em_defer_attach);
493 		else {
494 			printf("%s: Unable to initialize the hardware\n",
495 			    sc->sc_dv.dv_xname);
496 			goto err_hw_init;
497 		}
498 	}
499 
500 	if (sc->hw.mac_type == em_80003es2lan || sc->hw.mac_type == em_82575 ||
501 	    sc->hw.mac_type == em_82580 || sc->hw.mac_type == em_i210 ||
502 	    sc->hw.mac_type == em_i350) {
503 		uint32_t reg = EM_READ_REG(&sc->hw, E1000_STATUS);
504 		sc->hw.bus_func = (reg & E1000_STATUS_FUNC_MASK) >>
505 		    E1000_STATUS_FUNC_SHIFT;
506 
507 		switch (sc->hw.bus_func) {
508 		case 0:
509 			sc->hw.swfw = E1000_SWFW_PHY0_SM;
510 			break;
511 		case 1:
512 			sc->hw.swfw = E1000_SWFW_PHY1_SM;
513 			break;
514 		case 2:
515 			sc->hw.swfw = E1000_SWFW_PHY2_SM;
516 			break;
517 		case 3:
518 			sc->hw.swfw = E1000_SWFW_PHY3_SM;
519 			break;
520 		}
521 	} else {
522 		sc->hw.bus_func = 0;
523 	}
524 
525 	/* Copy the permanent MAC address out of the EEPROM */
526 	if (em_read_mac_addr(&sc->hw) < 0) {
527 		printf("%s: EEPROM read error while reading mac address\n",
528 		       sc->sc_dv.dv_xname);
529 		goto err_mac_addr;
530 	}
531 
532 	bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
533 	    ETHER_ADDR_LEN);
534 
535 	/* Setup OS specific network interface */
536 	if (!defer)
537 		em_setup_interface(sc);
538 
539 	/* Initialize statistics */
540 	em_clear_hw_cntrs(&sc->hw);
541 #ifndef SMALL_KERNEL
542 	em_update_stats_counters(sc);
543 #endif
544 	sc->hw.get_link_status = 1;
545 	if (!defer)
546 		em_update_link_status(sc);
547 
548 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
549 
550 	/* Indicate SOL/IDER usage */
551 	if (em_check_phy_reset_block(&sc->hw))
552 		printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
553 		    sc->sc_dv.dv_xname);
554 
555 	/* Identify 82544 on PCI-X */
556 	em_get_bus_info(&sc->hw);
557 	if (sc->hw.bus_type == em_bus_type_pcix &&
558 	    sc->hw.mac_type == em_82544)
559 		sc->pcix_82544 = TRUE;
560         else
561 		sc->pcix_82544 = FALSE;
562 
563 	sc->hw.icp_xxxx_is_link_up = FALSE;
564 
565 	INIT_DEBUGOUT("em_attach: end");
566 	return;
567 
568 err_mac_addr:
569 err_hw_init:
570 	em_dma_free(sc, &sc->rxdma);
571 err_rx_desc:
572 	em_dma_free(sc, &sc->txdma);
573 err_tx_desc:
574 err_pci:
575 	em_free_pci_resources(sc);
576 }
577 
578 /*********************************************************************
579  *  Transmit entry point
580  *
581  *  em_start is called by the stack to initiate a transmit.
582  *  The driver will remain in this routine as long as there are
583  *  packets to transmit and transmit resources are available.
584  *  In case resources are not available stack is notified and
585  *  the packet is requeued.
586  **********************************************************************/
587 
588 void
589 em_start(struct ifnet *ifp)
590 {
591 	struct mbuf    *m_head;
592 	struct em_softc *sc = ifp->if_softc;
593 	int		post = 0;
594 
595 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
596 		return;
597 
598 	if (!sc->link_active)
599 		return;
600 
601 	if (sc->hw.mac_type != em_82547) {
602 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
603 		    sc->txdma.dma_map->dm_mapsize,
604 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
605 	}
606 
607 	for (;;) {
608 		IFQ_POLL(&ifp->if_snd, m_head);
609 		if (m_head == NULL)
610 			break;
611 
612 		if (em_encap(sc, m_head)) {
613 			ifp->if_flags |= IFF_OACTIVE;
614 			break;
615 		}
616 
617 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
618 
619 #if NBPFILTER > 0
620 		/* Send a copy of the frame to the BPF listener */
621 		if (ifp->if_bpf)
622 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
623 #endif
624 
625 		/* Set timeout in case hardware has problems transmitting */
626 		ifp->if_timer = EM_TX_TIMEOUT;
627 
628 		post = 1;
629 	}
630 
631 	if (sc->hw.mac_type != em_82547) {
632 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
633 		    sc->txdma.dma_map->dm_mapsize,
634 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
635 		/*
636 		 * Advance the Transmit Descriptor Tail (Tdt),
637 		 * this tells the E1000 that this frame is
638 		 * available to transmit.
639 		 */
640 		if (post)
641 			E1000_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
642 	}
643 }
644 
645 /*********************************************************************
646  *  Ioctl entry point
647  *
648  *  em_ioctl is called when the user wants to configure the
649  *  interface.
650  *
651  *  return 0 on success, positive on failure
652  **********************************************************************/
653 
654 int
655 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
656 {
657 	int		error = 0;
658 	struct ifreq   *ifr = (struct ifreq *) data;
659 	struct ifaddr  *ifa = (struct ifaddr *)data;
660 	struct em_softc *sc = ifp->if_softc;
661 	int s;
662 
663 	s = splnet();
664 
665 	switch (command) {
666 	case SIOCSIFADDR:
667 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
668 			       "Addr)");
669 		if (!(ifp->if_flags & IFF_UP)) {
670 			ifp->if_flags |= IFF_UP;
671 			em_init(sc);
672 		}
673 		if (ifa->ifa_addr->sa_family == AF_INET)
674 			arp_ifinit(&sc->interface_data, ifa);
675 		break;
676 
677 	case SIOCSIFFLAGS:
678 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
679 		if (ifp->if_flags & IFF_UP) {
680 			if (ifp->if_flags & IFF_RUNNING)
681 				error = ENETRESET;
682 			else
683 				em_init(sc);
684 		} else {
685 			if (ifp->if_flags & IFF_RUNNING)
686 				em_stop(sc, 0);
687 		}
688 		break;
689 
690 	case SIOCSIFMEDIA:
691 		/* Check SOL/IDER usage */
692 		if (em_check_phy_reset_block(&sc->hw)) {
693 			printf("%s: Media change is blocked due to SOL/IDER session.\n",
694 			    sc->sc_dv.dv_xname);
695 			break;
696 		}
697 	case SIOCGIFMEDIA:
698 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
699 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
700 		break;
701 
702 	case SIOCGIFRXR:
703 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
704 		    NULL, MCLBYTES, &sc->rx_ring);
705 		break;
706 
707 	default:
708 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
709 	}
710 
711 	if (error == ENETRESET) {
712 		if (ifp->if_flags & IFF_RUNNING) {
713 			em_disable_intr(sc);
714 			em_iff(sc);
715 			if (sc->hw.mac_type == em_82542_rev2_0)
716 				em_initialize_receive_unit(sc);
717 			em_enable_intr(sc);
718 		}
719 		error = 0;
720 	}
721 
722 	splx(s);
723 	return (error);
724 }
725 
726 /*********************************************************************
727  *  Watchdog entry point
728  *
729  *  This routine is called whenever hardware quits transmitting.
730  *
731  **********************************************************************/
732 
733 void
734 em_watchdog(struct ifnet *ifp)
735 {
736 	struct em_softc *sc = ifp->if_softc;
737 
738 	/* If we are in this routine because of pause frames, then
739 	 * don't reset the hardware.
740 	 */
741 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
742 		ifp->if_timer = EM_TX_TIMEOUT;
743 		return;
744 	}
745 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
746 
747 	em_init(sc);
748 
749 	sc->watchdog_events++;
750 }
751 
752 /*********************************************************************
753  *  Init entry point
754  *
755  *  This routine is used in two ways. It is used by the stack as
756  *  init entry point in network interface structure. It is also used
757  *  by the driver as a hw/sw initialization routine to get to a
758  *  consistent state.
759  *
760  **********************************************************************/
761 
762 void
763 em_init(void *arg)
764 {
765 	struct em_softc *sc = arg;
766 	struct ifnet   *ifp = &sc->interface_data.ac_if;
767 	uint32_t	pba;
768 	int s;
769 
770 	s = splnet();
771 
772 	INIT_DEBUGOUT("em_init: begin");
773 
774 	em_stop(sc, 0);
775 
776 	/*
777 	 * Packet Buffer Allocation (PBA)
778 	 * Writing PBA sets the receive portion of the buffer
779 	 * the remainder is used for the transmit buffer.
780 	 *
781 	 * Devices before the 82547 had a Packet Buffer of 64K.
782 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
783 	 * After the 82547 the buffer was reduced to 40K.
784 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
785 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
786 	 */
787 	switch (sc->hw.mac_type) {
788 	case em_82547:
789 	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
790 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
791 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
792 		else
793 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
794 		sc->tx_fifo_head = 0;
795 		sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
796 		sc->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
797 		break;
798 	case em_82571:
799 	case em_82572: /* Total Packet Buffer on these is 48k */
800 	case em_82575:
801 	case em_82580:
802 	case em_80003es2lan:
803 	case em_i350:
804 		pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
805 		break;
806 	case em_i210:
807 		pba = E1000_PBA_34K;
808 		break;
809 	case em_82573: /* 82573: Total Packet Buffer is 32K */
810 		/* Jumbo frames not supported */
811 		pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
812 		break;
813 	case em_82574: /* Total Packet Buffer is 40k */
814 		pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
815 		break;
816 	case em_ich8lan:
817 		pba = E1000_PBA_8K;
818 		break;
819 	case em_ich9lan:
820 	case em_ich10lan:
821 		/* Boost Receive side for jumbo frames */
822 		if (sc->hw.max_frame_size > EM_RXBUFFER_4096)
823 			pba = E1000_PBA_14K;
824 		else
825 			pba = E1000_PBA_10K;
826 		break;
827 	case em_pchlan:
828 	case em_pch2lan:
829 	case em_pch_lpt:
830 		pba = E1000_PBA_26K;
831 		break;
832 	default:
833 		/* Devices before 82547 had a Packet Buffer of 64K.   */
834 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
835 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
836 		else
837 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
838 	}
839 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
840 	E1000_WRITE_REG(&sc->hw, PBA, pba);
841 
842 	/* Get the latest mac address, User can use a LAA */
843 	bcopy(sc->interface_data.ac_enaddr, sc->hw.mac_addr,
844 	      ETHER_ADDR_LEN);
845 
846 	/* Initialize the hardware */
847 	if (em_hardware_init(sc)) {
848 		printf("%s: Unable to initialize the hardware\n",
849 		       sc->sc_dv.dv_xname);
850 		splx(s);
851 		return;
852 	}
853 	em_update_link_status(sc);
854 
855 	E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_VLAN);
856 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
857 		em_enable_hw_vlans(sc);
858 
859 	/* Prepare transmit descriptors and buffers */
860 	if (em_setup_transmit_structures(sc)) {
861 		printf("%s: Could not setup transmit structures\n",
862 		       sc->sc_dv.dv_xname);
863 		em_stop(sc, 0);
864 		splx(s);
865 		return;
866 	}
867 	em_initialize_transmit_unit(sc);
868 
869 	/* Prepare receive descriptors and buffers */
870 	if (em_setup_receive_structures(sc)) {
871 		printf("%s: Could not setup receive structures\n",
872 		       sc->sc_dv.dv_xname);
873 		em_stop(sc, 0);
874 		splx(s);
875 		return;
876 	}
877 	em_initialize_receive_unit(sc);
878 
879 	/* Program promiscuous mode and multicast filters. */
880 	em_iff(sc);
881 
882 	ifp->if_flags |= IFF_RUNNING;
883 	ifp->if_flags &= ~IFF_OACTIVE;
884 
885 	timeout_add_sec(&sc->timer_handle, 1);
886 	em_clear_hw_cntrs(&sc->hw);
887 	em_enable_intr(sc);
888 
889 	/* Don't reset the phy next time init gets called */
890 	sc->hw.phy_reset_disable = TRUE;
891 
892 	splx(s);
893 }
894 
895 /*********************************************************************
896  *
897  *  Interrupt Service routine
898  *
899  **********************************************************************/
900 int
901 em_intr(void *arg)
902 {
903 	struct em_softc	*sc = arg;
904 	struct ifnet	*ifp = &sc->interface_data.ac_if;
905 	u_int32_t	reg_icr, test_icr;
906 	int		refill = 0;
907 
908 	test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR);
909 	if (sc->hw.mac_type >= em_82571)
910 		test_icr = (reg_icr & E1000_ICR_INT_ASSERTED);
911 	if (!test_icr)
912 		return (0);
913 
914 	if (ifp->if_flags & IFF_RUNNING) {
915 		em_rxeof(sc);
916 		em_txeof(sc);
917 		refill = 1;
918 	}
919 
920 	/* Link status change */
921 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
922 		sc->hw.get_link_status = 1;
923 		em_check_for_link(&sc->hw);
924 		em_update_link_status(sc);
925 	}
926 
927 	if (reg_icr & E1000_ICR_RXO) {
928 		sc->rx_overruns++;
929 		refill = 1;
930 	}
931 
932 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
933 		em_start(ifp);
934 
935 	if (refill && em_rxfill(sc)) {
936 		/* Advance the Rx Queue #0 "Tail Pointer". */
937 		E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
938 	}
939 
940 	return (1);
941 }
942 
943 /*********************************************************************
944  *
945  *  Media Ioctl callback
946  *
947  *  This routine is called whenever the user queries the status of
948  *  the interface using ifconfig.
949  *
950  **********************************************************************/
951 void
952 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
953 {
954 	struct em_softc *sc = ifp->if_softc;
955 	u_char fiber_type = IFM_1000_SX;
956 	u_int16_t gsr;
957 
958 	INIT_DEBUGOUT("em_media_status: begin");
959 
960 	em_check_for_link(&sc->hw);
961 	em_update_link_status(sc);
962 
963 	ifmr->ifm_status = IFM_AVALID;
964 	ifmr->ifm_active = IFM_ETHER;
965 
966 	if (!sc->link_active) {
967 		ifmr->ifm_active |= IFM_NONE;
968 		return;
969 	}
970 
971 	ifmr->ifm_status |= IFM_ACTIVE;
972 
973 	if (sc->hw.media_type == em_media_type_fiber ||
974 	    sc->hw.media_type == em_media_type_internal_serdes) {
975 		if (sc->hw.mac_type == em_82545)
976 			fiber_type = IFM_1000_LX;
977 		ifmr->ifm_active |= fiber_type | IFM_FDX;
978 	} else {
979 		switch (sc->link_speed) {
980 		case 10:
981 			ifmr->ifm_active |= IFM_10_T;
982 			break;
983 		case 100:
984 			ifmr->ifm_active |= IFM_100_TX;
985 			break;
986 		case 1000:
987 			ifmr->ifm_active |= IFM_1000_T;
988 			break;
989 		}
990 
991 		if (sc->link_duplex == FULL_DUPLEX)
992 			ifmr->ifm_active |= em_flowstatus(sc) | IFM_FDX;
993 		else
994 			ifmr->ifm_active |= IFM_HDX;
995 
996 		if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_1000_T) {
997 			em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &gsr);
998 			if (gsr & SR_1000T_MS_CONFIG_RES)
999 				ifmr->ifm_active |= IFM_ETH_MASTER;
1000 		}
1001 	}
1002 }
1003 
1004 /*********************************************************************
1005  *
1006  *  Media Ioctl callback
1007  *
1008  *  This routine is called when the user changes speed/duplex using
1009  *  media/mediopt option with ifconfig.
1010  *
1011  **********************************************************************/
1012 int
1013 em_media_change(struct ifnet *ifp)
1014 {
1015 	struct em_softc *sc = ifp->if_softc;
1016 	struct ifmedia	*ifm = &sc->media;
1017 
1018 	INIT_DEBUGOUT("em_media_change: begin");
1019 
1020 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1021 		return (EINVAL);
1022 
1023 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1024 	case IFM_AUTO:
1025 		sc->hw.autoneg = DO_AUTO_NEG;
1026 		sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1027 		break;
1028 	case IFM_1000_LX:
1029 	case IFM_1000_SX:
1030 	case IFM_1000_T:
1031 		sc->hw.autoneg = DO_AUTO_NEG;
1032 		sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1033 		break;
1034 	case IFM_100_TX:
1035 		sc->hw.autoneg = FALSE;
1036 		sc->hw.autoneg_advertised = 0;
1037 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1038 			sc->hw.forced_speed_duplex = em_100_full;
1039 		else
1040 			sc->hw.forced_speed_duplex = em_100_half;
1041 		break;
1042 	case IFM_10_T:
1043 		sc->hw.autoneg = FALSE;
1044 		sc->hw.autoneg_advertised = 0;
1045 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1046 			sc->hw.forced_speed_duplex = em_10_full;
1047 		else
1048 			sc->hw.forced_speed_duplex = em_10_half;
1049 		break;
1050 	default:
1051 		printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
1052 	}
1053 
1054 	/*
1055 	 * As the speed/duplex settings may have changed we need to
1056 	 * reset the PHY.
1057 	 */
1058 	sc->hw.phy_reset_disable = FALSE;
1059 
1060 	em_init(sc);
1061 
1062 	return (0);
1063 }
1064 
1065 int
1066 em_flowstatus(struct em_softc *sc)
1067 {
1068 	u_int16_t ar, lpar;
1069 
1070 	if (sc->hw.media_type == em_media_type_fiber ||
1071 	    sc->hw.media_type == em_media_type_internal_serdes)
1072 		return (0);
1073 
1074 	em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV, &ar);
1075 	em_read_phy_reg(&sc->hw, PHY_LP_ABILITY, &lpar);
1076 
1077 	if ((ar & NWAY_AR_PAUSE) && (lpar & NWAY_LPAR_PAUSE))
1078 		return (IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE);
1079 	else if (!(ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1080 		(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1081 		return (IFM_FLOW|IFM_ETH_TXPAUSE);
1082 	else if ((ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1083 		!(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1084 		return (IFM_FLOW|IFM_ETH_RXPAUSE);
1085 
1086 	return (0);
1087 }
1088 
1089 /*********************************************************************
1090  *
1091  *  This routine maps the mbufs to tx descriptors.
1092  *
1093  *  return 0 on success, positive on failure
1094  **********************************************************************/
1095 int
1096 em_encap(struct em_softc *sc, struct mbuf *m_head)
1097 {
1098 	u_int32_t	txd_upper;
1099 	u_int32_t	txd_lower, txd_used = 0, txd_saved = 0;
1100 	int		i, j, first, error = 0, last = 0;
1101 	bus_dmamap_t	map;
1102 
1103 	/* For 82544 Workaround */
1104 	DESC_ARRAY		desc_array;
1105 	u_int32_t		array_elements;
1106 	u_int32_t		counter;
1107 
1108 	struct em_buffer   *tx_buffer, *tx_buffer_mapped;
1109 	struct em_tx_desc *current_tx_desc = NULL;
1110 
1111 	/*
1112 	 * Force a cleanup if number of TX descriptors
1113 	 * available hits the threshold
1114 	 */
1115 	if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1116 		em_txeof(sc);
1117 		/* Now do we at least have a minimal? */
1118 		if (sc->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1119 			sc->no_tx_desc_avail1++;
1120 			return (ENOBUFS);
1121 		}
1122 	}
1123 
1124 	if (sc->hw.mac_type == em_82547) {
1125 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1126 		    sc->txdma.dma_map->dm_mapsize,
1127 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1128 	}
1129 
1130 	/*
1131 	 * Map the packet for DMA.
1132 	 *
1133 	 * Capture the first descriptor index,
1134 	 * this descriptor will have the index
1135 	 * of the EOP which is the only one that
1136 	 * no gets a DONE bit writeback.
1137 	 */
1138 	first = sc->next_avail_tx_desc;
1139 	tx_buffer = &sc->tx_buffer_area[first];
1140 	tx_buffer_mapped = tx_buffer;
1141 	map = tx_buffer->map;
1142 
1143 	error = bus_dmamap_load_mbuf(sc->txtag, map, m_head, BUS_DMA_NOWAIT);
1144 	switch (error) {
1145 	case 0:
1146 		break;
1147 	case EFBIG:
1148 		if ((error = m_defrag(m_head, M_DONTWAIT)) == 0 &&
1149 		    (error = bus_dmamap_load_mbuf(sc->txtag, map, m_head,
1150 		     BUS_DMA_NOWAIT)) == 0)
1151 			break;
1152 
1153 		/* FALLTHROUGH */
1154 	default:
1155 		sc->no_tx_dma_setup++;
1156 		goto loaderr;
1157 	}
1158 
1159 	EM_KASSERT(map->dm_nsegs!= 0, ("em_encap: empty packet"));
1160 
1161 	if (map->dm_nsegs > sc->num_tx_desc_avail - 2)
1162 		goto fail;
1163 
1164 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1165 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1166 	    sc->hw.mac_type != em_i350)
1167 		em_transmit_checksum_setup(sc, m_head, &txd_upper, &txd_lower);
1168 	else
1169 		txd_upper = txd_lower = 0;
1170 
1171 	i = sc->next_avail_tx_desc;
1172 	if (sc->pcix_82544)
1173 		txd_saved = i;
1174 
1175 	for (j = 0; j < map->dm_nsegs; j++) {
1176 		/* If sc is 82544 and on PCI-X bus */
1177 		if (sc->pcix_82544) {
1178 			/*
1179 			 * Check the Address and Length combination and
1180 			 * split the data accordingly
1181 			 */
1182 			array_elements = em_fill_descriptors(map->dm_segs[j].ds_addr,
1183 							     map->dm_segs[j].ds_len,
1184 							     &desc_array);
1185 			for (counter = 0; counter < array_elements; counter++) {
1186 				if (txd_used == sc->num_tx_desc_avail) {
1187 					sc->next_avail_tx_desc = txd_saved;
1188 					goto fail;
1189 				}
1190 				tx_buffer = &sc->tx_buffer_area[i];
1191 				current_tx_desc = &sc->tx_desc_base[i];
1192 				current_tx_desc->buffer_addr = htole64(
1193 					desc_array.descriptor[counter].address);
1194 				current_tx_desc->lower.data = htole32(
1195 					(sc->txd_cmd | txd_lower |
1196 					 (u_int16_t)desc_array.descriptor[counter].length));
1197 				current_tx_desc->upper.data = htole32((txd_upper));
1198 				last = i;
1199 				if (++i == sc->num_tx_desc)
1200 					i = 0;
1201 
1202 				tx_buffer->m_head = NULL;
1203 				tx_buffer->next_eop = -1;
1204 				txd_used++;
1205 			}
1206 		} else {
1207 			tx_buffer = &sc->tx_buffer_area[i];
1208 			current_tx_desc = &sc->tx_desc_base[i];
1209 
1210 			current_tx_desc->buffer_addr = htole64(map->dm_segs[j].ds_addr);
1211 			current_tx_desc->lower.data = htole32(
1212 				sc->txd_cmd | txd_lower | map->dm_segs[j].ds_len);
1213 			current_tx_desc->upper.data = htole32(txd_upper);
1214 			last = i;
1215 			if (++i == sc->num_tx_desc)
1216 	        		i = 0;
1217 
1218 			tx_buffer->m_head = NULL;
1219 			tx_buffer->next_eop = -1;
1220 		}
1221 	}
1222 
1223 	sc->next_avail_tx_desc = i;
1224 	if (sc->pcix_82544)
1225 		sc->num_tx_desc_avail -= txd_used;
1226 	else
1227 		sc->num_tx_desc_avail -= map->dm_nsegs;
1228 
1229 #if NVLAN > 0
1230 	/* Find out if we are in VLAN mode */
1231 	if (m_head->m_flags & M_VLANTAG) {
1232 		/* Set the VLAN id */
1233 		current_tx_desc->upper.fields.special =
1234 			htole16(m_head->m_pkthdr.ether_vtag);
1235 
1236 		/* Tell hardware to add tag */
1237 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1238 	}
1239 #endif
1240 
1241 	tx_buffer->m_head = m_head;
1242 	tx_buffer_mapped->map = tx_buffer->map;
1243 	tx_buffer->map = map;
1244 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
1245 	    BUS_DMASYNC_PREWRITE);
1246 
1247 	/*
1248 	 * Last Descriptor of Packet
1249 	 * needs End Of Packet (EOP)
1250 	 * and Report Status (RS)
1251 	 */
1252 	current_tx_desc->lower.data |=
1253 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1254 
1255 	/*
1256 	 * Keep track in the first buffer which
1257 	 * descriptor will be written back
1258 	 */
1259 	tx_buffer = &sc->tx_buffer_area[first];
1260 	tx_buffer->next_eop = last;
1261 
1262 	/*
1263 	 * Advance the Transmit Descriptor Tail (Tdt),
1264 	 * this tells the E1000 that this frame is
1265 	 * available to transmit.
1266 	 */
1267 	if (sc->hw.mac_type == em_82547) {
1268 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1269 		    sc->txdma.dma_map->dm_mapsize,
1270 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1271 		if (sc->link_duplex == HALF_DUPLEX)
1272 			em_82547_move_tail_locked(sc);
1273 		else {
1274 			E1000_WRITE_REG(&sc->hw, TDT, i);
1275 			em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
1276 		}
1277 	}
1278 
1279 	return (0);
1280 
1281 fail:
1282 	sc->no_tx_desc_avail2++;
1283 	bus_dmamap_unload(sc->txtag, map);
1284 	error = ENOBUFS;
1285 loaderr:
1286 	if (sc->hw.mac_type == em_82547) {
1287 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1288 		    sc->txdma.dma_map->dm_mapsize,
1289 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1290 	}
1291 	return (error);
1292 }
1293 
1294 /*********************************************************************
1295  *
1296  * 82547 workaround to avoid controller hang in half-duplex environment.
1297  * The workaround is to avoid queuing a large packet that would span
1298  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1299  * in this case. We do that only when FIFO is quiescent.
1300  *
1301  **********************************************************************/
1302 void
1303 em_82547_move_tail_locked(struct em_softc *sc)
1304 {
1305 	uint16_t hw_tdt;
1306 	uint16_t sw_tdt;
1307 	struct em_tx_desc *tx_desc;
1308 	uint16_t length = 0;
1309 	boolean_t eop = 0;
1310 
1311 	hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1312 	sw_tdt = sc->next_avail_tx_desc;
1313 
1314 	while (hw_tdt != sw_tdt) {
1315 		tx_desc = &sc->tx_desc_base[hw_tdt];
1316 		length += tx_desc->lower.flags.length;
1317 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1318 		if (++hw_tdt == sc->num_tx_desc)
1319 			hw_tdt = 0;
1320 
1321 		if (eop) {
1322 			if (em_82547_fifo_workaround(sc, length)) {
1323 				sc->tx_fifo_wrk_cnt++;
1324 				timeout_add(&sc->tx_fifo_timer_handle, 1);
1325 				break;
1326 			}
1327 			E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1328 			em_82547_update_fifo_head(sc, length);
1329 			length = 0;
1330 		}
1331 	}
1332 }
1333 
1334 void
1335 em_82547_move_tail(void *arg)
1336 {
1337 	struct em_softc *sc = arg;
1338 	int s;
1339 
1340 	s = splnet();
1341 	em_82547_move_tail_locked(sc);
1342 	splx(s);
1343 }
1344 
1345 int
1346 em_82547_fifo_workaround(struct em_softc *sc, int len)
1347 {
1348 	int fifo_space, fifo_pkt_len;
1349 
1350 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1351 
1352 	if (sc->link_duplex == HALF_DUPLEX) {
1353 		fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
1354 
1355 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1356 			if (em_82547_tx_fifo_reset(sc))
1357 				return (0);
1358 			else
1359 				return (1);
1360 		}
1361 	}
1362 
1363 	return (0);
1364 }
1365 
1366 void
1367 em_82547_update_fifo_head(struct em_softc *sc, int len)
1368 {
1369 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1370 
1371 	/* tx_fifo_head is always 16 byte aligned */
1372 	sc->tx_fifo_head += fifo_pkt_len;
1373 	if (sc->tx_fifo_head >= sc->tx_fifo_size)
1374 		sc->tx_fifo_head -= sc->tx_fifo_size;
1375 }
1376 
1377 int
1378 em_82547_tx_fifo_reset(struct em_softc *sc)
1379 {
1380 	uint32_t tctl;
1381 
1382 	if ((E1000_READ_REG(&sc->hw, TDT) ==
1383 	     E1000_READ_REG(&sc->hw, TDH)) &&
1384 	    (E1000_READ_REG(&sc->hw, TDFT) ==
1385 	     E1000_READ_REG(&sc->hw, TDFH)) &&
1386 	    (E1000_READ_REG(&sc->hw, TDFTS) ==
1387 	     E1000_READ_REG(&sc->hw, TDFHS)) &&
1388 	    (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1389 
1390 		/* Disable TX unit */
1391 		tctl = E1000_READ_REG(&sc->hw, TCTL);
1392 		E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1393 
1394 		/* Reset FIFO pointers */
1395 		E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr);
1396 		E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr);
1397 		E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr);
1398 		E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr);
1399 
1400 		/* Re-enable TX unit */
1401 		E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1402 		E1000_WRITE_FLUSH(&sc->hw);
1403 
1404 		sc->tx_fifo_head = 0;
1405 		sc->tx_fifo_reset_cnt++;
1406 
1407 		return (TRUE);
1408 	} else
1409 		return (FALSE);
1410 }
1411 
1412 void
1413 em_iff(struct em_softc *sc)
1414 {
1415 	struct ifnet *ifp = &sc->interface_data.ac_if;
1416 	struct arpcom *ac = &sc->interface_data;
1417 	u_int32_t reg_rctl = 0;
1418 	u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1419 	struct ether_multi *enm;
1420 	struct ether_multistep step;
1421 	int i = 0;
1422 
1423 	IOCTL_DEBUGOUT("em_iff: begin");
1424 
1425 	if (sc->hw.mac_type == em_82542_rev2_0) {
1426 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1427 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1428 			em_pci_clear_mwi(&sc->hw);
1429 		reg_rctl |= E1000_RCTL_RST;
1430 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1431 		msec_delay(5);
1432 	}
1433 
1434 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1435 	reg_rctl &= ~(E1000_RCTL_MPE | E1000_RCTL_UPE);
1436 	ifp->if_flags &= ~IFF_ALLMULTI;
1437 
1438 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1439 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1440 		ifp->if_flags |= IFF_ALLMULTI;
1441 		reg_rctl |= E1000_RCTL_MPE;
1442 		if (ifp->if_flags & IFF_PROMISC)
1443 			reg_rctl |= E1000_RCTL_UPE;
1444 	} else {
1445 		ETHER_FIRST_MULTI(step, ac, enm);
1446 		while (enm != NULL) {
1447 			bcopy(enm->enm_addrlo, mta + i, ETH_LENGTH_OF_ADDRESS);
1448 			i += ETH_LENGTH_OF_ADDRESS;
1449 
1450 			ETHER_NEXT_MULTI(step, enm);
1451 		}
1452 
1453 		em_mc_addr_list_update(&sc->hw, mta, ac->ac_multicnt, 0, 1);
1454 	}
1455 
1456 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1457 
1458 	if (sc->hw.mac_type == em_82542_rev2_0) {
1459 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1460 		reg_rctl &= ~E1000_RCTL_RST;
1461 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1462 		msec_delay(5);
1463 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1464 			em_pci_set_mwi(&sc->hw);
1465 	}
1466 }
1467 
1468 /*********************************************************************
1469  *  Timer routine
1470  *
1471  *  This routine checks for link status and updates statistics.
1472  *
1473  **********************************************************************/
1474 
1475 void
1476 em_local_timer(void *arg)
1477 {
1478 	struct ifnet   *ifp;
1479 	struct em_softc *sc = arg;
1480 	int s;
1481 
1482 	ifp = &sc->interface_data.ac_if;
1483 
1484 	s = splnet();
1485 
1486 #ifndef SMALL_KERNEL
1487 	em_update_stats_counters(sc);
1488 #ifdef EM_DEBUG
1489 	if (ifp->if_flags & IFF_DEBUG && ifp->if_flags & IFF_RUNNING)
1490 		em_print_hw_stats(sc);
1491 #endif
1492 #endif
1493 	em_smartspeed(sc);
1494 
1495 	timeout_add_sec(&sc->timer_handle, 1);
1496 
1497 	splx(s);
1498 }
1499 
1500 void
1501 em_update_link_status(struct em_softc *sc)
1502 {
1503 	struct ifnet *ifp = &sc->interface_data.ac_if;
1504 
1505 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1506 		if (sc->link_active == 0) {
1507 			em_get_speed_and_duplex(&sc->hw,
1508 						&sc->link_speed,
1509 						&sc->link_duplex);
1510 			/* Check if we may set SPEED_MODE bit on PCI-E */
1511 			if ((sc->link_speed == SPEED_1000) &&
1512 			    ((sc->hw.mac_type == em_82571) ||
1513 			    (sc->hw.mac_type == em_82572) ||
1514 			    (sc->hw.mac_type == em_82575) ||
1515 			    (sc->hw.mac_type == em_82580))) {
1516 				int tarc0;
1517 
1518 				tarc0 = E1000_READ_REG(&sc->hw, TARC0);
1519 				tarc0 |= SPEED_MODE_BIT;
1520 				E1000_WRITE_REG(&sc->hw, TARC0, tarc0);
1521 			}
1522 			sc->link_active = 1;
1523 			sc->smartspeed = 0;
1524 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
1525 		}
1526 		if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1527 			if (sc->link_duplex == FULL_DUPLEX)
1528 				ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1529 			else
1530 				ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
1531 			if_link_state_change(ifp);
1532 		}
1533 	} else {
1534 		if (sc->link_active == 1) {
1535 			ifp->if_baudrate = sc->link_speed = 0;
1536 			sc->link_duplex = 0;
1537 			sc->link_active = 0;
1538 		}
1539 		if (ifp->if_link_state != LINK_STATE_DOWN) {
1540 			ifp->if_link_state = LINK_STATE_DOWN;
1541 			if_link_state_change(ifp);
1542 		}
1543 	}
1544 }
1545 
1546 /*********************************************************************
1547  *
1548  *  This routine disables all traffic on the adapter by issuing a
1549  *  global reset on the MAC and deallocates TX/RX buffers.
1550  *
1551  **********************************************************************/
1552 
1553 void
1554 em_stop(void *arg, int softonly)
1555 {
1556 	struct em_softc *sc = arg;
1557 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1558 
1559 	/* Tell the stack that the interface is no longer active */
1560 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1561 	ifp->if_timer = 0;
1562 
1563 	INIT_DEBUGOUT("em_stop: begin");
1564 
1565 	timeout_del(&sc->timer_handle);
1566 	timeout_del(&sc->tx_fifo_timer_handle);
1567 
1568 	if (!softonly) {
1569 		em_disable_intr(sc);
1570 		em_reset_hw(&sc->hw);
1571 	}
1572 
1573 	em_free_transmit_structures(sc);
1574 	em_free_receive_structures(sc);
1575 }
1576 
1577 /*********************************************************************
1578  *
1579  *  Determine hardware revision.
1580  *
1581  **********************************************************************/
1582 void
1583 em_identify_hardware(struct em_softc *sc)
1584 {
1585 	u_int32_t reg;
1586 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1587 
1588 	/* Make sure our PCI config space has the necessary stuff set */
1589 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1590 					    PCI_COMMAND_STATUS_REG);
1591 
1592 	/* Save off the information about this board */
1593 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1594 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1595 
1596 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1597 	sc->hw.revision_id = PCI_REVISION(reg);
1598 
1599 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1600 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1601 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
1602 
1603 	/* Identify the MAC */
1604 	if (em_set_mac_type(&sc->hw))
1605 		printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
1606 
1607 	if (sc->hw.mac_type == em_pchlan)
1608 		sc->hw.revision_id = PCI_PRODUCT(pa->pa_id) & 0x0f;
1609 
1610 	if (sc->hw.mac_type == em_82541 ||
1611 	    sc->hw.mac_type == em_82541_rev_2 ||
1612 	    sc->hw.mac_type == em_82547 ||
1613 	    sc->hw.mac_type == em_82547_rev_2)
1614 		sc->hw.phy_init_script = TRUE;
1615 }
1616 
1617 int
1618 em_allocate_pci_resources(struct em_softc *sc)
1619 {
1620 	int		val, rid;
1621 	pci_intr_handle_t	ih;
1622 	const char		*intrstr = NULL;
1623 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1624 	pci_chipset_tag_t	pc = pa->pa_pc;
1625 
1626 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1627 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1628 		printf(": mmba is not mem space\n");
1629 		return (ENXIO);
1630 	}
1631 	if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1632 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1633 	    &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1634 		printf(": cannot find mem space\n");
1635 		return (ENXIO);
1636 	}
1637 
1638 	switch (sc->hw.mac_type) {
1639 	case em_82544:
1640 	case em_82540:
1641 	case em_82545:
1642 	case em_82546:
1643 	case em_82541:
1644 	case em_82541_rev_2:
1645 		/* Figure out where our I/O BAR is ? */
1646 		for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END;) {
1647 			val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1648 			if (PCI_MAPREG_TYPE(val) == PCI_MAPREG_TYPE_IO) {
1649 				sc->io_rid = rid;
1650 				break;
1651 			}
1652 			rid += 4;
1653 			if (PCI_MAPREG_MEM_TYPE(val) ==
1654 			    PCI_MAPREG_MEM_TYPE_64BIT)
1655 				rid += 4;	/* skip high bits, too */
1656 		}
1657 
1658 		if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1659 		    &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
1660 		    &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
1661 			printf(": cannot find i/o space\n");
1662 			return (ENXIO);
1663 		}
1664 
1665 		sc->hw.io_base = 0;
1666 		break;
1667 	default:
1668 		break;
1669 	}
1670 
1671 	/* for ICH8 and family we need to find the flash memory */
1672 	if (IS_ICH8(sc->hw.mac_type)) {
1673 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH);
1674 		if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1675 			printf(": flash is not mem space\n");
1676 			return (ENXIO);
1677 		}
1678 
1679 		if (pci_mapreg_map(pa, EM_FLASH, PCI_MAPREG_MEM_TYPE(val), 0,
1680 		    &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
1681 		    &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
1682 			printf(": cannot find mem space\n");
1683 			return (ENXIO);
1684 		}
1685         }
1686 
1687 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
1688 		printf(": couldn't map interrupt\n");
1689 		return (ENXIO);
1690 	}
1691 
1692 	sc->osdep.dev = (struct device *)sc;
1693 	sc->hw.back = &sc->osdep;
1694 
1695 	intrstr = pci_intr_string(pc, ih);
1696 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
1697 					      sc->sc_dv.dv_xname);
1698 	if (sc->sc_intrhand == NULL) {
1699 		printf(": couldn't establish interrupt");
1700 		if (intrstr != NULL)
1701 			printf(" at %s", intrstr);
1702 		printf("\n");
1703 		return (ENXIO);
1704 	}
1705 	printf(": %s", intrstr);
1706 
1707 	/*
1708 	 * the ICP_xxxx device has multiple, duplicate register sets for
1709 	 * use when it is being used as a network processor. Disable those
1710 	 * registers here, as they are not necessary in this context and
1711 	 * can confuse the system
1712 	 */
1713 	if(sc->hw.mac_type == em_icp_xxxx) {
1714 		int offset;
1715 		pcireg_t val;
1716 
1717 		if (!pci_get_capability(sc->osdep.em_pa.pa_pc,
1718 		    sc->osdep.em_pa.pa_tag, PCI_CAP_ID_ST, &offset, &val)) {
1719 			return (0);
1720 		}
1721 		offset += PCI_ST_SMIA_OFFSET;
1722 		pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
1723 		    offset, 0x06);
1724 		E1000_WRITE_REG(&sc->hw, IMC1, ~0x0);
1725 		E1000_WRITE_REG(&sc->hw, IMC2, ~0x0);
1726 	}
1727 	return (0);
1728 }
1729 
1730 void
1731 em_free_pci_resources(struct em_softc *sc)
1732 {
1733 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1734 	pci_chipset_tag_t	pc = pa->pa_pc;
1735 
1736 	if (sc->sc_intrhand)
1737 		pci_intr_disestablish(pc, sc->sc_intrhand);
1738 	sc->sc_intrhand = 0;
1739 
1740 	if (sc->osdep.em_flashbase)
1741 		bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
1742 				sc->osdep.em_flashsize);
1743 	sc->osdep.em_flashbase = 0;
1744 
1745 	if (sc->osdep.em_iobase)
1746 		bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
1747 				sc->osdep.em_iosize);
1748 	sc->osdep.em_iobase = 0;
1749 
1750 	if (sc->osdep.em_membase)
1751 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1752 				sc->osdep.em_memsize);
1753 	sc->osdep.em_membase = 0;
1754 }
1755 
1756 /*********************************************************************
1757  *
1758  *  Initialize the hardware to a configuration as specified by the
1759  *  em_softc structure. The controller is reset, the EEPROM is
1760  *  verified, the MAC address is set, then the shared initialization
1761  *  routines are called.
1762  *
1763  **********************************************************************/
1764 int
1765 em_hardware_init(struct em_softc *sc)
1766 {
1767 	uint32_t ret_val;
1768 	u_int16_t rx_buffer_size;
1769 
1770 	INIT_DEBUGOUT("em_hardware_init: begin");
1771 	/* Issue a global reset */
1772 	em_reset_hw(&sc->hw);
1773 
1774 	/* When hardware is reset, fifo_head is also reset */
1775 	sc->tx_fifo_head = 0;
1776 
1777 	/* Make sure we have a good EEPROM before we read from it */
1778 	if (em_get_flash_presence_i210(&sc->hw) &&
1779 	    em_validate_eeprom_checksum(&sc->hw) < 0) {
1780 		/*
1781 		 * Some PCIe parts fail the first check due to
1782 		 * the link being in sleep state, call it again,
1783 		 * if it fails a second time its a real issue.
1784 		 */
1785 		if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1786 			printf("%s: The EEPROM Checksum Is Not Valid\n",
1787 			       sc->sc_dv.dv_xname);
1788 			return (EIO);
1789 		}
1790 	}
1791 
1792 	if (em_get_flash_presence_i210(&sc->hw) &&
1793 	    em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1794 		printf("%s: EEPROM read error while reading part number\n",
1795 		       sc->sc_dv.dv_xname);
1796 		return (EIO);
1797 	}
1798 
1799 	/* Set up smart power down as default off on newer adapters */
1800 	if (!em_smart_pwr_down &&
1801 	     (sc->hw.mac_type == em_82571 ||
1802 	      sc->hw.mac_type == em_82572 ||
1803 	      sc->hw.mac_type == em_82575 ||
1804 	      sc->hw.mac_type == em_82580 ||
1805 	      sc->hw.mac_type == em_i210 ||
1806 	      sc->hw.mac_type == em_i350 )) {
1807 		uint16_t phy_tmp = 0;
1808 
1809 		/* Speed up time to link by disabling smart power down */
1810 		em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1811 		phy_tmp &= ~IGP02E1000_PM_SPD;
1812 		em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1813 	}
1814 
1815 	/*
1816 	 * These parameters control the automatic generation (Tx) and
1817 	 * response (Rx) to Ethernet PAUSE frames.
1818 	 * - High water mark should allow for at least two frames to be
1819 	 *   received after sending an XOFF.
1820 	 * - Low water mark works best when it is very near the high water mark.
1821 	 *   This allows the receiver to restart by sending XON when it has
1822 	 *   drained a bit.  Here we use an arbitary value of 1500 which will
1823 	 *   restart after one full frame is pulled from the buffer.  There
1824 	 *   could be several smaller frames in the buffer and if so they will
1825 	 *   not trigger the XON until their total number reduces the buffer
1826 	 *   by 1500.
1827 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1828 	 */
1829 	rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA) & 0xffff) << 10 );
1830 
1831 	sc->hw.fc_high_water = rx_buffer_size -
1832 	    EM_ROUNDUP(sc->hw.max_frame_size, 1024);
1833 	sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
1834 	if (sc->hw.mac_type == em_80003es2lan)
1835 		sc->hw.fc_pause_time = 0xFFFF;
1836 	else
1837 		sc->hw.fc_pause_time = 1000;
1838 	sc->hw.fc_send_xon = TRUE;
1839 	sc->hw.fc = E1000_FC_FULL;
1840 
1841 	em_disable_aspm(sc);
1842 
1843 	if ((ret_val = em_init_hw(&sc->hw)) != 0) {
1844 		if (ret_val == E1000_DEFER_INIT) {
1845 			INIT_DEBUGOUT("\nHardware Initialization Deferred ");
1846 			return (EAGAIN);
1847 		}
1848 		printf("%s: Hardware Initialization Failed",
1849 		       sc->sc_dv.dv_xname);
1850 		return (EIO);
1851 	}
1852 
1853 	em_check_for_link(&sc->hw);
1854 
1855 	return (0);
1856 }
1857 
1858 /*********************************************************************
1859  *
1860  *  Setup networking device structure and register an interface.
1861  *
1862  **********************************************************************/
1863 void
1864 em_setup_interface(struct em_softc *sc)
1865 {
1866 	struct ifnet   *ifp;
1867 	u_char fiber_type = IFM_1000_SX;
1868 
1869 	INIT_DEBUGOUT("em_setup_interface: begin");
1870 
1871 	ifp = &sc->interface_data.ac_if;
1872 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1873 	ifp->if_softc = sc;
1874 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1875 	ifp->if_ioctl = em_ioctl;
1876 	ifp->if_start = em_start;
1877 	ifp->if_watchdog = em_watchdog;
1878 	ifp->if_hardmtu =
1879 		sc->hw.max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN;
1880 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1881 	IFQ_SET_READY(&ifp->if_snd);
1882 
1883 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1884 
1885 #if NVLAN > 0
1886 	if (sc->hw.mac_type != em_82575 && sc->hw.mac_type != em_82580 &&
1887 	    sc->hw.mac_type != em_i210 && sc->hw.mac_type != em_i350)
1888 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1889 #endif
1890 
1891 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1892 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1893 	    sc->hw.mac_type != em_i350)
1894 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1895 
1896 	/*
1897 	 * Specify the media types supported by this adapter and register
1898 	 * callbacks to update media and link information
1899 	 */
1900 	ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1901 		     em_media_status);
1902 	if (sc->hw.media_type == em_media_type_fiber ||
1903 	    sc->hw.media_type == em_media_type_internal_serdes) {
1904 		if (sc->hw.mac_type == em_82545)
1905 			fiber_type = IFM_1000_LX;
1906 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type | IFM_FDX,
1907 			    0, NULL);
1908 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type,
1909 			    0, NULL);
1910 	} else {
1911 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1912 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1913 			    0, NULL);
1914 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1915 			    0, NULL);
1916 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1917 			    0, NULL);
1918 		if (sc->hw.phy_type != em_phy_ife) {
1919 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1920 				    0, NULL);
1921 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1922 		}
1923 	}
1924 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1925 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1926 
1927 	if_attach(ifp);
1928 	ether_ifattach(ifp);
1929 }
1930 
1931 int
1932 em_detach(struct device *self, int flags)
1933 {
1934 	struct em_softc *sc = (struct em_softc *)self;
1935 	struct ifnet *ifp = &sc->interface_data.ac_if;
1936 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1937 	pci_chipset_tag_t	pc = pa->pa_pc;
1938 
1939 	if (sc->sc_intrhand)
1940 		pci_intr_disestablish(pc, sc->sc_intrhand);
1941 	sc->sc_intrhand = 0;
1942 
1943 	em_stop(sc, 1);
1944 
1945 	em_free_pci_resources(sc);
1946 	em_dma_free(sc, &sc->rxdma);
1947 	em_dma_free(sc, &sc->txdma);
1948 
1949 	ether_ifdetach(ifp);
1950 	if_detach(ifp);
1951 
1952 	return (0);
1953 }
1954 
1955 int
1956 em_activate(struct device *self, int act)
1957 {
1958 	struct em_softc *sc = (struct em_softc *)self;
1959 	struct ifnet *ifp = &sc->interface_data.ac_if;
1960 	int rv = 0;
1961 
1962 	switch (act) {
1963 	case DVACT_SUSPEND:
1964 		if (ifp->if_flags & IFF_RUNNING)
1965 			em_stop(sc, 0);
1966 		/* We have no children atm, but we will soon */
1967 		rv = config_activate_children(self, act);
1968 		break;
1969 	case DVACT_RESUME:
1970 		if (ifp->if_flags & IFF_UP)
1971 			em_init(sc);
1972 		break;
1973 	default:
1974 		rv = config_activate_children(self, act);
1975 		break;
1976 	}
1977 	return (rv);
1978 }
1979 
1980 /*********************************************************************
1981  *
1982  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1983  *
1984  **********************************************************************/
1985 void
1986 em_smartspeed(struct em_softc *sc)
1987 {
1988 	uint16_t phy_tmp;
1989 
1990 	if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
1991 	    !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1992 		return;
1993 
1994 	if (sc->smartspeed == 0) {
1995 		/* If Master/Slave config fault is asserted twice,
1996 		 * we assume back-to-back */
1997 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1998 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1999 			return;
2000 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2001 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2002 			em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
2003 					&phy_tmp);
2004 			if (phy_tmp & CR_1000T_MS_ENABLE) {
2005 				phy_tmp &= ~CR_1000T_MS_ENABLE;
2006 				em_write_phy_reg(&sc->hw,
2007 						    PHY_1000T_CTRL, phy_tmp);
2008 				sc->smartspeed++;
2009 				if (sc->hw.autoneg &&
2010 				    !em_phy_setup_autoneg(&sc->hw) &&
2011 				    !em_read_phy_reg(&sc->hw, PHY_CTRL,
2012 						       &phy_tmp)) {
2013 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
2014 						    MII_CR_RESTART_AUTO_NEG);
2015 					em_write_phy_reg(&sc->hw,
2016 							 PHY_CTRL, phy_tmp);
2017 				}
2018 			}
2019 		}
2020 		return;
2021 	} else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2022 		/* If still no link, perhaps using 2/3 pair cable */
2023 		em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2024 		phy_tmp |= CR_1000T_MS_ENABLE;
2025 		em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2026 		if (sc->hw.autoneg &&
2027 		    !em_phy_setup_autoneg(&sc->hw) &&
2028 		    !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
2029 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
2030 				    MII_CR_RESTART_AUTO_NEG);
2031 			em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
2032 		}
2033 	}
2034 	/* Restart process after EM_SMARTSPEED_MAX iterations */
2035 	if (sc->smartspeed++ == EM_SMARTSPEED_MAX)
2036 		sc->smartspeed = 0;
2037 }
2038 
2039 /*
2040  * Manage DMA'able memory.
2041  */
2042 int
2043 em_dma_malloc(struct em_softc *sc, bus_size_t size,
2044     struct em_dma_alloc *dma, int mapflags)
2045 {
2046 	int r;
2047 
2048 	dma->dma_tag = sc->osdep.em_pa.pa_dmat;
2049 	r = bus_dmamap_create(dma->dma_tag, size, 1,
2050 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
2051 	if (r != 0) {
2052 		printf("%s: em_dma_malloc: bus_dmamap_create failed; "
2053 			"error %u\n", sc->sc_dv.dv_xname, r);
2054 		goto fail_0;
2055 	}
2056 
2057 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
2058 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
2059 	if (r != 0) {
2060 		printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
2061 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2062 			(unsigned long)size, r);
2063 		goto fail_1;
2064 	}
2065 
2066 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
2067 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
2068 	if (r != 0) {
2069 		printf("%s: em_dma_malloc: bus_dmammem_map failed; "
2070 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2071 			(unsigned long)size, r);
2072 		goto fail_2;
2073 	}
2074 
2075 	r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
2076 			    dma->dma_vaddr, size, NULL,
2077 			    mapflags | BUS_DMA_NOWAIT);
2078 	if (r != 0) {
2079 		printf("%s: em_dma_malloc: bus_dmamap_load failed; "
2080 			"error %u\n", sc->sc_dv.dv_xname, r);
2081 		goto fail_3;
2082 	}
2083 
2084 	dma->dma_size = size;
2085 	return (0);
2086 
2087 fail_3:
2088 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
2089 fail_2:
2090 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2091 fail_1:
2092 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2093 fail_0:
2094 	dma->dma_map = NULL;
2095 	dma->dma_tag = NULL;
2096 
2097 	return (r);
2098 }
2099 
2100 void
2101 em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
2102 {
2103 	if (dma->dma_tag == NULL)
2104 		return;
2105 
2106 	if (dma->dma_map != NULL) {
2107 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
2108 		    dma->dma_map->dm_mapsize,
2109 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2110 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2111 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
2112 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2113 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2114 	}
2115 	dma->dma_tag = NULL;
2116 }
2117 
2118 /*********************************************************************
2119  *
2120  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2121  *  the information needed to transmit a packet on the wire.
2122  *
2123  **********************************************************************/
2124 int
2125 em_allocate_transmit_structures(struct em_softc *sc)
2126 {
2127 	if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
2128 	    sizeof(struct em_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2129 		printf("%s: Unable to allocate tx_buffer memory\n",
2130 		       sc->sc_dv.dv_xname);
2131 		return (ENOMEM);
2132 	}
2133 
2134 	return (0);
2135 }
2136 
2137 /*********************************************************************
2138  *
2139  *  Allocate and initialize transmit structures.
2140  *
2141  **********************************************************************/
2142 int
2143 em_setup_transmit_structures(struct em_softc *sc)
2144 {
2145 	struct  em_buffer *tx_buffer;
2146 	int error, i;
2147 
2148 	if ((error = em_allocate_transmit_structures(sc)) != 0)
2149 		goto fail;
2150 
2151 	bzero((void *) sc->tx_desc_base,
2152 	      (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
2153 
2154 	sc->txtag = sc->osdep.em_pa.pa_dmat;
2155 
2156 	tx_buffer = sc->tx_buffer_area;
2157 	for (i = 0; i < sc->num_tx_desc; i++) {
2158 		error = bus_dmamap_create(sc->txtag, MAX_JUMBO_FRAME_SIZE,
2159 			    EM_MAX_SCATTER, MAX_JUMBO_FRAME_SIZE, 0,
2160 			    BUS_DMA_NOWAIT, &tx_buffer->map);
2161 		if (error != 0) {
2162 			printf("%s: Unable to create TX DMA map\n",
2163 			    sc->sc_dv.dv_xname);
2164 			goto fail;
2165 		}
2166 		tx_buffer++;
2167 	}
2168 
2169 	sc->next_avail_tx_desc = 0;
2170 	sc->next_tx_to_clean = 0;
2171 
2172 	/* Set number of descriptors available */
2173 	sc->num_tx_desc_avail = sc->num_tx_desc;
2174 
2175 	/* Set checksum context */
2176 	sc->active_checksum_context = OFFLOAD_NONE;
2177 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2178 	    sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2179 
2180 	return (0);
2181 
2182 fail:
2183 	em_free_transmit_structures(sc);
2184 	return (error);
2185 }
2186 
2187 /*********************************************************************
2188  *
2189  *  Enable transmit unit.
2190  *
2191  **********************************************************************/
2192 void
2193 em_initialize_transmit_unit(struct em_softc *sc)
2194 {
2195 	u_int32_t	reg_tctl, reg_tipg = 0;
2196 	u_int64_t	bus_addr;
2197 
2198 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2199 
2200 	/* Setup the Base and Length of the Tx Descriptor Ring */
2201 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
2202 	E1000_WRITE_REG(&sc->hw, TDLEN,
2203 			sc->num_tx_desc *
2204 			sizeof(struct em_tx_desc));
2205 	E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2206 	E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
2207 
2208 	/* Setup the HW Tx Head and Tail descriptor pointers */
2209 	E1000_WRITE_REG(&sc->hw, TDT, 0);
2210 	E1000_WRITE_REG(&sc->hw, TDH, 0);
2211 
2212 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2213 		     E1000_READ_REG(&sc->hw, TDBAL),
2214 		     E1000_READ_REG(&sc->hw, TDLEN));
2215 
2216 	/* Set the default values for the Tx Inter Packet Gap timer */
2217 	switch (sc->hw.mac_type) {
2218 	case em_82542_rev2_0:
2219 	case em_82542_rev2_1:
2220 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
2221 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2222 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2223 		break;
2224 	case em_80003es2lan:
2225 		reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2226 		reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2227 		break;
2228 	default:
2229 		if (sc->hw.media_type == em_media_type_fiber ||
2230 		    sc->hw.media_type == em_media_type_internal_serdes)
2231 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2232 		else
2233 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2234 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2235 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2236 	}
2237 
2238 
2239 	E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2240 	E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2241 	if (sc->hw.mac_type >= em_82540)
2242 		E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2243 
2244 	/* Setup Transmit Descriptor Base Settings */
2245 	sc->txd_cmd = E1000_TXD_CMD_IFCS;
2246 
2247 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2248 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2249 		/* 82575/6 need to enable the TX queue and lack the IDE bit */
2250 		reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL);
2251 		reg_tctl |= E1000_TXDCTL_QUEUE_ENABLE;
2252 		E1000_WRITE_REG(&sc->hw, TXDCTL, reg_tctl);
2253 	} else if (sc->tx_int_delay > 0)
2254 		sc->txd_cmd |= E1000_TXD_CMD_IDE;
2255 
2256 	/* Program the Transmit Control Register */
2257 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2258 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2259 	if (sc->hw.mac_type >= em_82571)
2260 		reg_tctl |= E1000_TCTL_MULR;
2261 	if (sc->link_duplex == FULL_DUPLEX)
2262 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2263 	else
2264 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2265 	/* This write will effectively turn on the transmit unit */
2266 	E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2267 }
2268 
2269 /*********************************************************************
2270  *
2271  *  Free all transmit related data structures.
2272  *
2273  **********************************************************************/
2274 void
2275 em_free_transmit_structures(struct em_softc *sc)
2276 {
2277 	struct em_buffer   *tx_buffer;
2278 	int		i;
2279 
2280 	INIT_DEBUGOUT("free_transmit_structures: begin");
2281 
2282 	if (sc->tx_buffer_area != NULL) {
2283 		tx_buffer = sc->tx_buffer_area;
2284 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2285 			if (tx_buffer->map != NULL &&
2286 			    tx_buffer->map->dm_nsegs > 0) {
2287 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
2288 				    0, tx_buffer->map->dm_mapsize,
2289 				    BUS_DMASYNC_POSTWRITE);
2290 				bus_dmamap_unload(sc->txtag,
2291 				    tx_buffer->map);
2292 			}
2293 			if (tx_buffer->m_head != NULL) {
2294 				m_freem(tx_buffer->m_head);
2295 				tx_buffer->m_head = NULL;
2296 			}
2297 			if (tx_buffer->map != NULL) {
2298 				bus_dmamap_destroy(sc->txtag,
2299 				    tx_buffer->map);
2300 				tx_buffer->map = NULL;
2301 			}
2302 		}
2303 	}
2304 	if (sc->tx_buffer_area != NULL) {
2305 		free(sc->tx_buffer_area, M_DEVBUF, 0);
2306 		sc->tx_buffer_area = NULL;
2307 	}
2308 	if (sc->txtag != NULL)
2309 		sc->txtag = NULL;
2310 }
2311 
2312 /*********************************************************************
2313  *
2314  *  The offload context needs to be set when we transfer the first
2315  *  packet of a particular protocol (TCP/UDP). We change the
2316  *  context only if the protocol type changes.
2317  *
2318  **********************************************************************/
2319 void
2320 em_transmit_checksum_setup(struct em_softc *sc, struct mbuf *mp,
2321     u_int32_t *txd_upper, u_int32_t *txd_lower)
2322 {
2323 	struct em_context_desc *TXD;
2324 	struct em_buffer *tx_buffer;
2325 	int curr_txd;
2326 
2327 	if (mp->m_pkthdr.csum_flags) {
2328 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
2329 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2330 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2331 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2332 				return;
2333 			else
2334 				sc->active_checksum_context = OFFLOAD_TCP_IP;
2335 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
2336 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2337 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2338 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2339 				return;
2340 			else
2341 				sc->active_checksum_context = OFFLOAD_UDP_IP;
2342 		} else {
2343 			*txd_upper = 0;
2344 			*txd_lower = 0;
2345 			return;
2346 		}
2347 	} else {
2348 		*txd_upper = 0;
2349 		*txd_lower = 0;
2350 		return;
2351 	}
2352 
2353 	/* If we reach this point, the checksum offload context
2354 	 * needs to be reset.
2355 	 */
2356 	curr_txd = sc->next_avail_tx_desc;
2357 	tx_buffer = &sc->tx_buffer_area[curr_txd];
2358 	TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
2359 
2360 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2361 	TXD->lower_setup.ip_fields.ipcso =
2362 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2363 	TXD->lower_setup.ip_fields.ipcse =
2364 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2365 
2366 	TXD->upper_setup.tcp_fields.tucss =
2367 	    ETHER_HDR_LEN + sizeof(struct ip);
2368 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2369 
2370 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2371 		TXD->upper_setup.tcp_fields.tucso =
2372 		    ETHER_HDR_LEN + sizeof(struct ip) +
2373 		    offsetof(struct tcphdr, th_sum);
2374 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2375 		TXD->upper_setup.tcp_fields.tucso =
2376 		    ETHER_HDR_LEN + sizeof(struct ip) +
2377 		    offsetof(struct udphdr, uh_sum);
2378 	}
2379 
2380 	TXD->tcp_seg_setup.data = htole32(0);
2381 	TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
2382 
2383 	tx_buffer->m_head = NULL;
2384 	tx_buffer->next_eop = -1;
2385 
2386 	if (++curr_txd == sc->num_tx_desc)
2387 		curr_txd = 0;
2388 
2389 	sc->num_tx_desc_avail--;
2390 	sc->next_avail_tx_desc = curr_txd;
2391 }
2392 
2393 /**********************************************************************
2394  *
2395  *  Examine each tx_buffer in the used queue. If the hardware is done
2396  *  processing the packet then free associated resources. The
2397  *  tx_buffer is put back on the free queue.
2398  *
2399  **********************************************************************/
2400 void
2401 em_txeof(struct em_softc *sc)
2402 {
2403 	int first, last, done, num_avail;
2404 	struct em_buffer *tx_buffer;
2405 	struct em_tx_desc   *tx_desc, *eop_desc;
2406 	struct ifnet   *ifp = &sc->interface_data.ac_if;
2407 
2408 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
2409 		return;
2410 
2411 	num_avail = sc->num_tx_desc_avail;
2412 	first = sc->next_tx_to_clean;
2413 	tx_desc = &sc->tx_desc_base[first];
2414 	tx_buffer = &sc->tx_buffer_area[first];
2415 	last = tx_buffer->next_eop;
2416 	eop_desc = &sc->tx_desc_base[last];
2417 
2418 	/*
2419 	 * What this does is get the index of the
2420 	 * first descriptor AFTER the EOP of the
2421 	 * first packet, that way we can do the
2422 	 * simple comparison on the inner while loop.
2423 	 */
2424 	if (++last == sc->num_tx_desc)
2425 		last = 0;
2426 	done = last;
2427 
2428 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2429 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2430 	while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2431 		/* We clean the range of the packet */
2432 		while (first != done) {
2433 			tx_desc->upper.data = 0;
2434 			tx_desc->lower.data = 0;
2435 			num_avail++;
2436 
2437 			if (tx_buffer->m_head != NULL) {
2438 				ifp->if_opackets++;
2439 				if (tx_buffer->map->dm_nsegs > 0) {
2440 					bus_dmamap_sync(sc->txtag,
2441 					    tx_buffer->map, 0,
2442 					    tx_buffer->map->dm_mapsize,
2443 					    BUS_DMASYNC_POSTWRITE);
2444 					bus_dmamap_unload(sc->txtag,
2445 					    tx_buffer->map);
2446 				}
2447 				m_freem(tx_buffer->m_head);
2448 				tx_buffer->m_head = NULL;
2449 			}
2450 			tx_buffer->next_eop = -1;
2451 
2452 			if (++first == sc->num_tx_desc)
2453 				first = 0;
2454 
2455 			tx_buffer = &sc->tx_buffer_area[first];
2456 			tx_desc = &sc->tx_desc_base[first];
2457 		}
2458 		/* See if we can continue to the next packet */
2459 		last = tx_buffer->next_eop;
2460 		if (last != -1) {
2461 			eop_desc = &sc->tx_desc_base[last];
2462 			/* Get new done point */
2463 			if (++last == sc->num_tx_desc)
2464 				last = 0;
2465 			done = last;
2466 		} else
2467 			break;
2468 	}
2469 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2470 	    sc->txdma.dma_map->dm_mapsize,
2471 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2472 
2473 	sc->next_tx_to_clean = first;
2474 
2475 	/*
2476 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2477 	 * that it is OK to send packets.
2478 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2479 	 * if some descriptors have been freed, restart the timeout.
2480 	 */
2481 	if (num_avail > EM_TX_CLEANUP_THRESHOLD)
2482 		ifp->if_flags &= ~IFF_OACTIVE;
2483 
2484 	/* All clean, turn off the timer */
2485 	if (num_avail == sc->num_tx_desc)
2486 		ifp->if_timer = 0;
2487 	/* Some cleaned, reset the timer */
2488 	else if (num_avail != sc->num_tx_desc_avail)
2489 		ifp->if_timer = EM_TX_TIMEOUT;
2490 
2491 	sc->num_tx_desc_avail = num_avail;
2492 }
2493 
2494 /*********************************************************************
2495  *
2496  *  Get a buffer from system mbuf buffer pool.
2497  *
2498  **********************************************************************/
2499 int
2500 em_get_buf(struct em_softc *sc, int i)
2501 {
2502 	struct mbuf    *m;
2503 	struct em_buffer *pkt;
2504 	struct em_rx_desc *desc;
2505 	int error;
2506 
2507 	pkt = &sc->rx_buffer_area[i];
2508 	desc = &sc->rx_desc_base[i];
2509 
2510 	if (pkt->m_head != NULL) {
2511 		printf("%s: em_get_buf: slot %d already has an mbuf\n",
2512 		    sc->sc_dv.dv_xname, i);
2513 		return (ENOBUFS);
2514 	}
2515 
2516 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
2517 	if (!m) {
2518 		sc->mbuf_cluster_failed++;
2519 		return (ENOBUFS);
2520 	}
2521 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2522 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2523 		m_adj(m, ETHER_ALIGN);
2524 
2525 	error = bus_dmamap_load_mbuf(sc->rxtag, pkt->map, m, BUS_DMA_NOWAIT);
2526 	if (error) {
2527 		m_freem(m);
2528 		return (error);
2529 	}
2530 
2531 	bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2532 	    BUS_DMASYNC_PREREAD);
2533 	pkt->m_head = m;
2534 
2535 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2536 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_POSTWRITE);
2537 
2538 	bzero(desc, sizeof(*desc));
2539 	desc->buffer_addr = htole64(pkt->map->dm_segs[0].ds_addr);
2540 
2541 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2542 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_PREWRITE);
2543 
2544 	return (0);
2545 }
2546 
2547 /*********************************************************************
2548  *
2549  *  Allocate memory for rx_buffer structures. Since we use one
2550  *  rx_buffer per received packet, the maximum number of rx_buffer's
2551  *  that we'll need is equal to the number of receive descriptors
2552  *  that we've allocated.
2553  *
2554  **********************************************************************/
2555 int
2556 em_allocate_receive_structures(struct em_softc *sc)
2557 {
2558 	int		i, error;
2559 	struct em_buffer *rx_buffer;
2560 
2561 	if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
2562 	    sizeof(struct em_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2563 		printf("%s: Unable to allocate rx_buffer memory\n",
2564 		       sc->sc_dv.dv_xname);
2565 		return (ENOMEM);
2566 	}
2567 
2568 	sc->rxtag = sc->osdep.em_pa.pa_dmat;
2569 
2570 	rx_buffer = sc->rx_buffer_area;
2571 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2572 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
2573 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rx_buffer->map);
2574 		if (error != 0) {
2575 			printf("%s: em_allocate_receive_structures: "
2576 			    "bus_dmamap_create failed; error %u\n",
2577 			    sc->sc_dv.dv_xname, error);
2578 			goto fail;
2579 		}
2580 		rx_buffer->m_head = NULL;
2581 	}
2582 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
2583 	    sc->rxdma.dma_map->dm_mapsize,
2584 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2585 
2586         return (0);
2587 
2588 fail:
2589 	em_free_receive_structures(sc);
2590 	return (error);
2591 }
2592 
2593 /*********************************************************************
2594  *
2595  *  Allocate and initialize receive structures.
2596  *
2597  **********************************************************************/
2598 int
2599 em_setup_receive_structures(struct em_softc *sc)
2600 {
2601 	struct ifnet *ifp = &sc->interface_data.ac_if;
2602 	u_int lwm;
2603 
2604 	memset(sc->rx_desc_base, 0,
2605 	    sizeof(struct em_rx_desc) * sc->num_rx_desc);
2606 
2607 	if (em_allocate_receive_structures(sc))
2608 		return (ENOMEM);
2609 
2610 	/* Setup our descriptor pointers */
2611 	sc->next_rx_desc_to_check = 0;
2612 	sc->last_rx_desc_filled = sc->num_rx_desc - 1;
2613 
2614 	lwm = max(4, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1));
2615 	if_rxr_init(&sc->rx_ring, lwm, sc->num_rx_desc);
2616 
2617 	if (em_rxfill(sc) == 0) {
2618 		printf("%s: unable to fill any rx descriptors\n",
2619 		    sc->sc_dv.dv_xname);
2620 	}
2621 
2622 	return (0);
2623 }
2624 
2625 /*********************************************************************
2626  *
2627  *  Enable receive unit.
2628  *
2629  **********************************************************************/
2630 void
2631 em_initialize_receive_unit(struct em_softc *sc)
2632 {
2633 	u_int32_t	reg_rctl;
2634 	u_int32_t	reg_rxcsum;
2635 	u_int64_t	bus_addr;
2636 
2637 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2638 
2639 	/* Make sure receives are disabled while setting up the descriptor ring */
2640 	E1000_WRITE_REG(&sc->hw, RCTL, 0);
2641 
2642 	/* Set the Receive Delay Timer Register */
2643 	E1000_WRITE_REG(&sc->hw, RDTR,
2644 			sc->rx_int_delay | E1000_RDT_FPDB);
2645 
2646 	if (sc->hw.mac_type >= em_82540) {
2647 		if (sc->rx_int_delay)
2648 			E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2649 
2650 		/* Set the interrupt throttling rate.  Value is calculated
2651 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2652 		E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2653 	}
2654 
2655 	/* Setup the Base and Length of the Rx Descriptor Ring */
2656 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
2657 	E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
2658 			sizeof(struct em_rx_desc));
2659 	E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2660 	E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2661 
2662 	/* Setup the Receive Control Register */
2663 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2664 	    E1000_RCTL_RDMTS_HALF |
2665 	    (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2666 
2667 	if (sc->hw.tbi_compatibility_on == TRUE)
2668 		reg_rctl |= E1000_RCTL_SBP;
2669 
2670 	/*
2671 	 * The i350 has a bug where it always strips the CRC whether
2672 	 * asked to or not.  So ask for stripped CRC here and
2673 	 * cope in rxeof
2674 	 */
2675 	if (sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350)
2676 		reg_rctl |= E1000_RCTL_SECRC;
2677 
2678 	switch (sc->rx_buffer_len) {
2679 	default:
2680 	case EM_RXBUFFER_2048:
2681 		reg_rctl |= E1000_RCTL_SZ_2048;
2682 		break;
2683 	case EM_RXBUFFER_4096:
2684 		reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2685 		break;
2686 	case EM_RXBUFFER_8192:
2687 		reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2688 		break;
2689 	case EM_RXBUFFER_16384:
2690 		reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2691 		break;
2692 	}
2693 
2694 	if (sc->hw.max_frame_size != ETHER_MAX_LEN)
2695 		reg_rctl |= E1000_RCTL_LPE;
2696 
2697 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2698 	if (sc->hw.mac_type >= em_82543) {
2699 		reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2700 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2701 		E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2702 	}
2703 
2704 	/*
2705 	 * XXX TEMPORARY WORKAROUND: on some systems with 82573
2706 	 * long latencies are observed, like Lenovo X60.
2707 	 */
2708 	if (sc->hw.mac_type == em_82573)
2709 		E1000_WRITE_REG(&sc->hw, RDTR, 0x20);
2710 
2711 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2712 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2713 		/* 82575/6 need to enable the RX queue */
2714 		uint32_t reg;
2715 		reg = E1000_READ_REG(&sc->hw, RXDCTL);
2716 		reg |= E1000_RXDCTL_QUEUE_ENABLE;
2717 		E1000_WRITE_REG(&sc->hw, RXDCTL, reg);
2718 	}
2719 
2720 	/* Enable Receives */
2721 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2722 
2723 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2724 	E1000_WRITE_REG(&sc->hw, RDH, 0);
2725 	E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
2726 }
2727 
2728 /*********************************************************************
2729  *
2730  *  Free receive related data structures.
2731  *
2732  **********************************************************************/
2733 void
2734 em_free_receive_structures(struct em_softc *sc)
2735 {
2736 	struct em_buffer   *rx_buffer;
2737 	int		i;
2738 
2739 	INIT_DEBUGOUT("free_receive_structures: begin");
2740 
2741 	if (sc->rx_buffer_area != NULL) {
2742 		rx_buffer = sc->rx_buffer_area;
2743 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2744 			if (rx_buffer->m_head != NULL) {
2745 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
2746 				    0, rx_buffer->map->dm_mapsize,
2747 				    BUS_DMASYNC_POSTREAD);
2748 				bus_dmamap_unload(sc->rxtag, rx_buffer->map);
2749 				m_freem(rx_buffer->m_head);
2750 				rx_buffer->m_head = NULL;
2751 			}
2752 			bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
2753 		}
2754 	}
2755 	if (sc->rx_buffer_area != NULL) {
2756 		free(sc->rx_buffer_area, M_DEVBUF, 0);
2757 		sc->rx_buffer_area = NULL;
2758 	}
2759 	if (sc->rxtag != NULL)
2760 		sc->rxtag = NULL;
2761 
2762 	if (sc->fmp != NULL) {
2763 		m_freem(sc->fmp);
2764 		sc->fmp = NULL;
2765 		sc->lmp = NULL;
2766 	}
2767 }
2768 
2769 #ifdef __STRICT_ALIGNMENT
2770 void
2771 em_realign(struct em_softc *sc, struct mbuf *m, u_int16_t *prev_len_adj)
2772 {
2773 	unsigned char tmp_align_buf[ETHER_ALIGN];
2774 	int tmp_align_buf_len = 0;
2775 
2776 	/*
2777 	 * The Ethernet payload is not 32-bit aligned when
2778 	 * Jumbo packets are enabled, so on architectures with
2779 	 * strict alignment we need to shift the entire packet
2780 	 * ETHER_ALIGN bytes. Ugh.
2781 	 */
2782 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2783 		return;
2784 
2785 	if (*prev_len_adj > sc->align_buf_len)
2786 		*prev_len_adj -= sc->align_buf_len;
2787 	else
2788 		*prev_len_adj = 0;
2789 
2790 	if (m->m_len > (MCLBYTES - ETHER_ALIGN)) {
2791 		bcopy(m->m_data + (MCLBYTES - ETHER_ALIGN),
2792 		    &tmp_align_buf, ETHER_ALIGN);
2793 		tmp_align_buf_len = m->m_len -
2794 		    (MCLBYTES - ETHER_ALIGN);
2795 		m->m_len -= ETHER_ALIGN;
2796 	}
2797 
2798 	if (m->m_len) {
2799 		bcopy(m->m_data, m->m_data + ETHER_ALIGN, m->m_len);
2800 		if (!sc->align_buf_len)
2801 			m->m_data += ETHER_ALIGN;
2802 	}
2803 
2804 	if (sc->align_buf_len) {
2805 		m->m_len += sc->align_buf_len;
2806 		bcopy(&sc->align_buf, m->m_data, sc->align_buf_len);
2807 	}
2808 
2809 	if (tmp_align_buf_len)
2810 		bcopy(&tmp_align_buf, &sc->align_buf, tmp_align_buf_len);
2811 
2812 	sc->align_buf_len = tmp_align_buf_len;
2813 }
2814 #endif /* __STRICT_ALIGNMENT */
2815 
2816 int
2817 em_rxfill(struct em_softc *sc)
2818 {
2819 	u_int slots;
2820 	int post = 0;
2821 	int i;
2822 
2823 	i = sc->last_rx_desc_filled;
2824 
2825 	for (slots = if_rxr_get(&sc->rx_ring, sc->num_rx_desc);
2826 	    slots > 0; slots--) {
2827 		if (++i == sc->num_rx_desc)
2828 			i = 0;
2829 
2830 		if (em_get_buf(sc, i) != 0)
2831 			break;
2832 
2833 		sc->last_rx_desc_filled = i;
2834 		post = 1;
2835 	}
2836 
2837 	if_rxr_put(&sc->rx_ring, slots);
2838 
2839 	return (post);
2840 }
2841 
2842 /*********************************************************************
2843  *
2844  *  This routine executes in interrupt context. It replenishes
2845  *  the mbufs in the descriptor and sends data which has been
2846  *  dma'ed into host memory to upper layer.
2847  *
2848  *********************************************************************/
2849 void
2850 em_rxeof(struct em_softc *sc)
2851 {
2852 	struct ifnet	    *ifp = &sc->interface_data.ac_if;
2853 	struct mbuf_list    ml = MBUF_LIST_INITIALIZER();
2854 	struct mbuf	    *m;
2855 	u_int8_t	    accept_frame = 0;
2856 	u_int8_t	    eop = 0;
2857 	u_int16_t	    len, desc_len, prev_len_adj;
2858 	int		    i;
2859 
2860 	/* Pointer to the receive descriptor being examined. */
2861 	struct em_rx_desc   *desc;
2862 	struct em_buffer    *pkt;
2863 	u_int8_t	    status;
2864 
2865 	if (if_rxr_inuse(&sc->rx_ring) == 0)
2866 		return;
2867 
2868 	i = sc->next_rx_desc_to_check;
2869 
2870 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2871 	    0, sizeof(*desc) * sc->num_rx_desc,
2872 	    BUS_DMASYNC_POSTREAD);
2873 
2874 	do {
2875 		m = NULL;
2876 
2877 		desc = &sc->rx_desc_base[i];
2878 		pkt = &sc->rx_buffer_area[i];
2879 
2880 		status = desc->status;
2881 		if (!ISSET(status, E1000_RXD_STAT_DD))
2882 			break;
2883 
2884 		/* pull the mbuf off the ring */
2885 		bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2886 		    BUS_DMASYNC_POSTREAD);
2887 		bus_dmamap_unload(sc->rxtag, pkt->map);
2888 		m = pkt->m_head;
2889 		pkt->m_head = NULL;
2890 
2891 		if (m == NULL) {
2892 			panic("em_rxeof: NULL mbuf in slot %d "
2893 			    "(nrx %d, filled %d)", i,
2894 			    if_rxr_inuse(&sc->rx_ring),
2895 			    sc->last_rx_desc_filled);
2896 		}
2897 
2898 		if_rxr_put(&sc->rx_ring, 1);
2899 
2900 		accept_frame = 1;
2901 		prev_len_adj = 0;
2902 		desc_len = letoh16(desc->length);
2903 
2904 		if (status & E1000_RXD_STAT_EOP) {
2905 			eop = 1;
2906 			if (desc_len < ETHER_CRC_LEN) {
2907 				len = 0;
2908 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2909 			} else if (sc->hw.mac_type == em_i210 ||
2910 			    sc->hw.mac_type == em_i350)
2911 				len = desc_len;
2912 			else
2913 				len = desc_len - ETHER_CRC_LEN;
2914 		} else {
2915 			eop = 0;
2916 			len = desc_len;
2917 		}
2918 
2919 		if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2920 			u_int8_t last_byte;
2921 			u_int32_t pkt_len = desc_len;
2922 
2923 			if (sc->fmp != NULL)
2924 				pkt_len += sc->fmp->m_pkthdr.len;
2925 
2926 			last_byte = *(mtod(m, caddr_t) + desc_len - 1);
2927 			if (TBI_ACCEPT(&sc->hw, status, desc->errors,
2928 			    pkt_len, last_byte)) {
2929 #ifndef SMALL_KERNEL
2930 				em_tbi_adjust_stats(&sc->hw, &sc->stats,
2931 				    pkt_len, sc->hw.mac_addr);
2932 #endif
2933 				if (len > 0)
2934 					len--;
2935 			} else
2936 				accept_frame = 0;
2937 		}
2938 
2939 		if (accept_frame) {
2940 			/* Assign correct length to the current fragment */
2941 			m->m_len = len;
2942 
2943 			em_realign(sc, m, &prev_len_adj); /* STRICT_ALIGN */
2944 
2945 			if (sc->fmp == NULL) {
2946 				m->m_pkthdr.len = m->m_len;
2947 				sc->fmp = m;	 /* Store the first mbuf */
2948 				sc->lmp = m;
2949 			} else {
2950 				/* Chain mbuf's together */
2951 				m->m_flags &= ~M_PKTHDR;
2952 				/*
2953 				 * Adjust length of previous mbuf in chain if
2954 				 * we received less than 4 bytes in the last
2955 				 * descriptor.
2956 				 */
2957 				if (prev_len_adj > 0) {
2958 					sc->lmp->m_len -= prev_len_adj;
2959 					sc->fmp->m_pkthdr.len -= prev_len_adj;
2960 				}
2961 				sc->lmp->m_next = m;
2962 				sc->lmp = m;
2963 				sc->fmp->m_pkthdr.len += m->m_len;
2964 			}
2965 
2966 			if (eop) {
2967 				ifp->if_ipackets++;
2968 
2969 				m = sc->fmp;
2970 
2971 				em_receive_checksum(sc, desc, m);
2972 #if NVLAN > 0
2973 				if (desc->status & E1000_RXD_STAT_VP) {
2974 					m->m_pkthdr.ether_vtag =
2975 					    letoh16(desc->special);
2976 					m->m_flags |= M_VLANTAG;
2977 				}
2978 #endif
2979 				ml_enqueue(&ml, m);
2980 
2981 				sc->fmp = NULL;
2982 				sc->lmp = NULL;
2983 			}
2984 		} else {
2985 			sc->dropped_pkts++;
2986 
2987 			if (sc->fmp != NULL) {
2988  				m_freem(sc->fmp);
2989 				sc->fmp = NULL;
2990 				sc->lmp = NULL;
2991 			}
2992 
2993 			m_freem(m);
2994 		}
2995 
2996 		/* Advance our pointers to the next descriptor. */
2997 		if (++i == sc->num_rx_desc)
2998 			i = 0;
2999 	} while (if_rxr_inuse(&sc->rx_ring) > 0);
3000 
3001 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
3002 	    0, sizeof(*desc) * sc->num_rx_desc,
3003 	    BUS_DMASYNC_PREREAD);
3004 
3005 	if_input(ifp, &ml);
3006 
3007 	sc->next_rx_desc_to_check = i;
3008 }
3009 
3010 /*********************************************************************
3011  *
3012  *  Verify that the hardware indicated that the checksum is valid.
3013  *  Inform the stack about the status of checksum so that stack
3014  *  doesn't spend time verifying the checksum.
3015  *
3016  *********************************************************************/
3017 void
3018 em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
3019     struct mbuf *mp)
3020 {
3021 	/* 82543 or newer only */
3022 	if ((sc->hw.mac_type < em_82543) ||
3023 	    /* Ignore Checksum bit is set */
3024 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3025 		mp->m_pkthdr.csum_flags = 0;
3026 		return;
3027 	}
3028 
3029 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3030 		/* Did it pass? */
3031 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3032 			/* IP Checksum Good */
3033 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3034 
3035 		} else
3036 			mp->m_pkthdr.csum_flags = 0;
3037 	}
3038 
3039 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3040 		/* Did it pass? */
3041 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE))
3042 			mp->m_pkthdr.csum_flags |=
3043 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3044 	}
3045 }
3046 
3047 /*
3048  * This turns on the hardware offload of the VLAN
3049  * tag insertion and strip
3050  */
3051 void
3052 em_enable_hw_vlans(struct em_softc *sc)
3053 {
3054 	uint32_t ctrl;
3055 
3056 	ctrl = E1000_READ_REG(&sc->hw, CTRL);
3057 	ctrl |= E1000_CTRL_VME;
3058 	E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
3059 }
3060 
3061 void
3062 em_enable_intr(struct em_softc *sc)
3063 {
3064 	E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
3065 }
3066 
3067 void
3068 em_disable_intr(struct em_softc *sc)
3069 {
3070 	/*
3071 	 * The first version of 82542 had an errata where when link
3072 	 * was forced it would stay up even if the cable was disconnected
3073 	 * Sequence errors were used to detect the disconnect and then
3074 	 * the driver would unforce the link.  This code is in the ISR.
3075 	 * For this to work correctly the Sequence error interrupt had
3076 	 * to be enabled all the time.
3077 	 */
3078 
3079 	if (sc->hw.mac_type == em_82542_rev2_0)
3080 		E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ));
3081 	else
3082 		E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff);
3083 }
3084 
3085 void
3086 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3087 {
3088 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3089 	pcireg_t val;
3090 
3091 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3092 	if (reg & 0x2) {
3093 		val &= 0x0000ffff;
3094 		val |= (*value << 16);
3095 	} else {
3096 		val &= 0xffff0000;
3097 		val |= *value;
3098 	}
3099 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg & ~0x3, val);
3100 }
3101 
3102 void
3103 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3104 {
3105 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3106 	pcireg_t val;
3107 
3108 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3109 	if (reg & 0x2)
3110 		*value = (val >> 16) & 0xffff;
3111 	else
3112 		*value = val & 0xffff;
3113 }
3114 
3115 void
3116 em_pci_set_mwi(struct em_hw *hw)
3117 {
3118 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3119 
3120 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3121 		(hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
3122 }
3123 
3124 void
3125 em_pci_clear_mwi(struct em_hw *hw)
3126 {
3127 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3128 
3129 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3130 		(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
3131 }
3132 
3133 /*
3134  * We may eventually really do this, but its unnecessary
3135  * for now so we just return unsupported.
3136  */
3137 int32_t
3138 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3139 {
3140 	return -E1000_NOT_IMPLEMENTED;
3141 }
3142 
3143 /*********************************************************************
3144 * 82544 Coexistence issue workaround.
3145 *    There are 2 issues.
3146 *       1. Transmit Hang issue.
3147 *    To detect this issue, following equation can be used...
3148 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3149 *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3150 *
3151 *       2. DAC issue.
3152 *    To detect this issue, following equation can be used...
3153 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3154 *          If SUM[3:0] is in between 9 to c, we will have this issue.
3155 *
3156 *
3157 *    WORKAROUND:
3158 *          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3159 *
3160 *** *********************************************************************/
3161 u_int32_t
3162 em_fill_descriptors(u_int64_t address, u_int32_t length,
3163     PDESC_ARRAY desc_array)
3164 {
3165         /* Since issue is sensitive to length and address.*/
3166         /* Let us first check the address...*/
3167         u_int32_t safe_terminator;
3168         if (length <= 4) {
3169                 desc_array->descriptor[0].address = address;
3170                 desc_array->descriptor[0].length = length;
3171                 desc_array->elements = 1;
3172                 return desc_array->elements;
3173         }
3174         safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3175         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3176         if (safe_terminator == 0   ||
3177         (safe_terminator > 4   &&
3178         safe_terminator < 9)   ||
3179         (safe_terminator > 0xC &&
3180         safe_terminator <= 0xF)) {
3181                 desc_array->descriptor[0].address = address;
3182                 desc_array->descriptor[0].length = length;
3183                 desc_array->elements = 1;
3184                 return desc_array->elements;
3185         }
3186 
3187         desc_array->descriptor[0].address = address;
3188         desc_array->descriptor[0].length = length - 4;
3189         desc_array->descriptor[1].address = address + (length - 4);
3190         desc_array->descriptor[1].length = 4;
3191         desc_array->elements = 2;
3192         return desc_array->elements;
3193 }
3194 
3195 /*
3196  * Disable the L0S and L1 LINK states.
3197  */
3198 void
3199 em_disable_aspm(struct em_softc *sc)
3200 {
3201 	int offset;
3202 	pcireg_t val;
3203 
3204 	switch (sc->hw.mac_type) {
3205 		case em_82571:
3206 		case em_82572:
3207 		case em_82573:
3208 		case em_82574:
3209 			break;
3210 		default:
3211 			return;
3212 	}
3213 
3214 	if (!pci_get_capability(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3215 	    PCI_CAP_PCIEXPRESS, &offset, NULL))
3216 		return;
3217 
3218 	/* Disable PCIe Active State Power Management (ASPM). */
3219 	val = pci_conf_read(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3220 	    offset + PCI_PCIE_LCSR);
3221 
3222 	switch (sc->hw.mac_type) {
3223 		case em_82571:
3224 		case em_82572:
3225 			val &= ~PCI_PCIE_LCSR_ASPM_L1;
3226 			break;
3227 		case em_82573:
3228 		case em_82574:
3229 			val &= ~(PCI_PCIE_LCSR_ASPM_L0S |
3230 			    PCI_PCIE_LCSR_ASPM_L1);
3231 			break;
3232 		default:
3233 			break;
3234 	}
3235 
3236 	pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3237 	    offset + PCI_PCIE_LCSR, val);
3238 }
3239 
3240 #ifndef SMALL_KERNEL
3241 /**********************************************************************
3242  *
3243  *  Update the board statistics counters.
3244  *
3245  **********************************************************************/
3246 void
3247 em_update_stats_counters(struct em_softc *sc)
3248 {
3249 	struct ifnet   *ifp = &sc->interface_data.ac_if;
3250 
3251 	sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
3252 	sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
3253 	sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
3254 
3255 	sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
3256 	sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
3257 
3258 	sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
3259 	sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
3260 
3261 	if (sc->hw.mac_type >= em_82543) {
3262 		sc->stats.algnerrc +=
3263 		E1000_READ_REG(&sc->hw, ALGNERRC);
3264 		sc->stats.rxerrc +=
3265 		E1000_READ_REG(&sc->hw, RXERRC);
3266 		sc->stats.cexterr +=
3267 		E1000_READ_REG(&sc->hw, CEXTERR);
3268 	}
3269 
3270 #ifdef EM_DEBUG
3271 	if (sc->hw.media_type == em_media_type_copper ||
3272 	    (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)) {
3273 		sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
3274 		sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
3275 	}
3276 	sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
3277 
3278 	sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
3279 	sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
3280 	sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
3281 	sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
3282 	sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
3283 	sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
3284 	sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
3285 	sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
3286 	sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
3287 	sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
3288 	sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
3289 	sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
3290 	sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
3291 	sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
3292 	sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
3293 	sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
3294 	sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
3295 	sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
3296 
3297 	/* For the 64-bit byte counters the low dword must be read first. */
3298 	/* Both registers clear on the read of the high dword */
3299 
3300 	sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
3301 	sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
3302 	sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
3303 	sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
3304 
3305 	sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
3306 	sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
3307 	sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
3308 
3309 	sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
3310 	sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
3311 	sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
3312 	sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
3313 
3314 	sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
3315 	sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
3316 	sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
3317 	sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
3318 	sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
3319 	sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
3320 	sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
3321 	sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
3322 	sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
3323 	sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
3324 
3325 	if (sc->hw.mac_type >= em_82543) {
3326 		sc->stats.tncrs +=
3327 		E1000_READ_REG(&sc->hw, TNCRS);
3328 		sc->stats.tsctc +=
3329 		E1000_READ_REG(&sc->hw, TSCTC);
3330 		sc->stats.tsctfc +=
3331 		E1000_READ_REG(&sc->hw, TSCTFC);
3332 	}
3333 #endif
3334 
3335 	/* Fill out the OS statistics structure */
3336 	ifp->if_collisions = sc->stats.colc;
3337 
3338 	/* Rx Errors */
3339 	ifp->if_ierrors =
3340 	    sc->dropped_pkts +
3341 	    sc->stats.rxerrc +
3342 	    sc->stats.crcerrs +
3343 	    sc->stats.algnerrc +
3344 	    sc->stats.ruc + sc->stats.roc +
3345 	    sc->stats.mpc + sc->stats.cexterr +
3346 	    sc->rx_overruns;
3347 
3348 	/* Tx Errors */
3349 	ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol +
3350 	    sc->watchdog_events;
3351 }
3352 
3353 #ifdef EM_DEBUG
3354 /**********************************************************************
3355  *
3356  *  This routine is called only when IFF_DEBUG is enabled.
3357  *  This routine provides a way to take a look at important statistics
3358  *  maintained by the driver and hardware.
3359  *
3360  **********************************************************************/
3361 void
3362 em_print_hw_stats(struct em_softc *sc)
3363 {
3364 	const char * const unit = sc->sc_dv.dv_xname;
3365 
3366 	printf("%s: Excessive collisions = %lld\n", unit,
3367 		(long long)sc->stats.ecol);
3368 	printf("%s: Symbol errors = %lld\n", unit,
3369 		(long long)sc->stats.symerrs);
3370 	printf("%s: Sequence errors = %lld\n", unit,
3371 		(long long)sc->stats.sec);
3372 	printf("%s: Defer count = %lld\n", unit,
3373 		(long long)sc->stats.dc);
3374 
3375 	printf("%s: Missed Packets = %lld\n", unit,
3376 		(long long)sc->stats.mpc);
3377 	printf("%s: Receive No Buffers = %lld\n", unit,
3378 		(long long)sc->stats.rnbc);
3379 	/* RLEC is inaccurate on some hardware, calculate our own */
3380 	printf("%s: Receive Length Errors = %lld\n", unit,
3381 		((long long)sc->stats.roc +
3382 		(long long)sc->stats.ruc));
3383 	printf("%s: Receive errors = %lld\n", unit,
3384 		(long long)sc->stats.rxerrc);
3385 	printf("%s: Crc errors = %lld\n", unit,
3386 		(long long)sc->stats.crcerrs);
3387 	printf("%s: Alignment errors = %lld\n", unit,
3388 		(long long)sc->stats.algnerrc);
3389 	printf("%s: Carrier extension errors = %lld\n", unit,
3390 		(long long)sc->stats.cexterr);
3391 
3392 	printf("%s: RX overruns = %ld\n", unit,
3393 		sc->rx_overruns);
3394 	printf("%s: watchdog timeouts = %ld\n", unit,
3395 		sc->watchdog_events);
3396 
3397 	printf("%s: XON Rcvd = %lld\n", unit,
3398 		(long long)sc->stats.xonrxc);
3399 	printf("%s: XON Xmtd = %lld\n", unit,
3400 		(long long)sc->stats.xontxc);
3401 	printf("%s: XOFF Rcvd = %lld\n", unit,
3402 		(long long)sc->stats.xoffrxc);
3403 	printf("%s: XOFF Xmtd = %lld\n", unit,
3404 		(long long)sc->stats.xofftxc);
3405 
3406 	printf("%s: Good Packets Rcvd = %lld\n", unit,
3407 		(long long)sc->stats.gprc);
3408 	printf("%s: Good Packets Xmtd = %lld\n", unit,
3409 		(long long)sc->stats.gptc);
3410 }
3411 #endif
3412 #endif /* !SMALL_KERNEL */
3413