xref: /dragonfly/sys/dev/netif/em/if_em.c (revision 222a27c4)
1 /**************************************************************************
2 
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4 
5 Copyright (c) 2001-2003, Intel Corporation
6 All rights reserved.
7 
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 
11  1. Redistributions of source code must retain the above copyright notice,
12     this list of conditions and the following disclaimer.
13 
14  2. Redistributions in binary form must reproduce the above copyright
15     notice, this list of conditions and the following disclaimer in the
16     documentation and/or other materials provided with the distribution.
17 
18  3. Neither the name of the Intel Corporation nor the names of its
19     contributors may be used to endorse or promote products derived from
20     this software without specific prior written permission.
21 
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33 
34 ***************************************************************************/
35 
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.29 2005/02/14 17:11:12 joerg Exp $*/
38 
39 #include "if_em.h"
40 #include <net/ifq_var.h>
41 
42 /*********************************************************************
43  *  Set this to one to display debug statistics
44  *********************************************************************/
45 int             em_display_debug_stats = 0;
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 
51 char em_driver_version[] = "1.7.25";
52 
53 
54 /*********************************************************************
55  *  PCI Device ID Table
56  *
57  *  Used by probe to select devices to load on
58  *  Last field stores an index into em_strings
59  *  Last entry must be all 0s
60  *
61  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62  *********************************************************************/
63 
64 static em_vendor_info_t em_vendor_info_array[] =
65 {
66 	/* Intel(R) PRO/1000 Network Connection */
67 	{ 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
68 	{ 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
69 	{ 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
70 	{ 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
71 	{ 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
72 	{ 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
73 	{ 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
74 	{ 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
75 	{ 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
76 	{ 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
77 	{ 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
78 	{ 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
79 	{ 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
80 	{ 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
81 	{ 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
82 	{ 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
83 	{ 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
84 	{ 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
85 	{ 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
86 	{ 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
87 	{ 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
88 	{ 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
89 	{ 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
90 	{ 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
91 	{ 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
92 	{ 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
93 	{ 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
94 	{ 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
95 	{ 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
96 	{ 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
97 	{ 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
98 	{ 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
99 	/* required last entry */
100 	{ 0, 0, 0, 0, 0}
101 };
102 
103 /*********************************************************************
104  *  Table of branding strings for all supported NICs.
105  *********************************************************************/
106 
107 static const char *em_strings[] = {
108 	"Intel(R) PRO/1000 Network Connection"
109 };
110 
111 /*********************************************************************
112  *  Function prototypes
113  *********************************************************************/
114 static int	em_probe(device_t);
115 static int	em_attach(device_t);
116 static int	em_detach(device_t);
117 static int	em_shutdown(device_t);
118 static void	em_intr(void *);
119 static void	em_start(struct ifnet *);
120 static int	em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
121 static void	em_watchdog(struct ifnet *);
122 static void	em_init(void *);
123 static void	em_stop(void *);
124 static void	em_media_status(struct ifnet *, struct ifmediareq *);
125 static int	em_media_change(struct ifnet *);
126 static void	em_identify_hardware(struct adapter *);
127 static void	em_local_timer(void *);
128 static int	em_hardware_init(struct adapter *);
129 static void	em_setup_interface(device_t, struct adapter *);
130 static int	em_setup_transmit_structures(struct adapter *);
131 static void	em_initialize_transmit_unit(struct adapter *);
132 static int	em_setup_receive_structures(struct adapter *);
133 static void	em_initialize_receive_unit(struct adapter *);
134 static void	em_enable_intr(struct adapter *);
135 static void	em_disable_intr(struct adapter *);
136 static void	em_free_transmit_structures(struct adapter *);
137 static void	em_free_receive_structures(struct adapter *);
138 static void	em_update_stats_counters(struct adapter *);
139 static void	em_clean_transmit_interrupts(struct adapter *);
140 static int	em_allocate_receive_structures(struct adapter *);
141 static int	em_allocate_transmit_structures(struct adapter *);
142 static void	em_process_receive_interrupts(struct adapter *, int);
143 static void	em_receive_checksum(struct adapter *, struct em_rx_desc *,
144 				    struct mbuf *);
145 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
146 					   uint32_t *, uint32_t *);
147 static void	em_set_promisc(struct adapter *);
148 static void	em_disable_promisc(struct adapter *);
149 static void	em_set_multi(struct adapter *);
150 static void	em_print_hw_stats(struct adapter *);
151 static void	em_print_link_status(struct adapter *);
152 static int	em_get_buf(int i, struct adapter *, struct mbuf *, int how);
153 static void	em_enable_vlans(struct adapter *);
154 static int	em_encap(struct adapter *, struct mbuf *);
155 static void	em_smartspeed(struct adapter *);
156 static int	em_82547_fifo_workaround(struct adapter *, int);
157 static void	em_82547_update_fifo_head(struct adapter *, int);
158 static int	em_82547_tx_fifo_reset(struct adapter *);
159 static void	em_82547_move_tail(void *arg);
160 static int	em_dma_malloc(struct adapter *, bus_size_t,
161 			      struct em_dma_alloc *, int);
162 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
163 static void	em_print_debug_info(struct adapter *);
164 static int	em_is_valid_ether_addr(uint8_t *);
165 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
166 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
167 static uint32_t	em_fill_descriptors(uint64_t address, uint32_t length,
168 				   PDESC_ARRAY desc_array);
169 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
170 static int	em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
171 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
172 					const char *,
173 					struct em_int_delay_info *, int, int);
174 
175 /*********************************************************************
176  *  FreeBSD Device Interface Entry Points
177  *********************************************************************/
178 
179 static device_method_t em_methods[] = {
180 	/* Device interface */
181 	DEVMETHOD(device_probe, em_probe),
182 	DEVMETHOD(device_attach, em_attach),
183 	DEVMETHOD(device_detach, em_detach),
184 	DEVMETHOD(device_shutdown, em_shutdown),
185 	{0, 0}
186 };
187 
188 static driver_t em_driver = {
189 	"em", em_methods, sizeof(struct adapter),
190 };
191 
192 static devclass_t em_devclass;
193 
194 DECLARE_DUMMY_MODULE(if_em);
195 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
196 
197 /*********************************************************************
198  *  Tunable default values.
199  *********************************************************************/
200 
201 #define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
202 #define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
203 
204 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
205 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
206 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
207 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
208 static int em_int_throttle_ceil = 10000;
209 
210 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
211 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
212 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
213 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
214 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
215 
216 /*********************************************************************
217  *  Device identification routine
218  *
219  *  em_probe determines if the driver should be loaded on
220  *  adapter based on PCI vendor/device id of the adapter.
221  *
222  *  return 0 on success, positive on failure
223  *********************************************************************/
224 
225 static int
226 em_probe(device_t dev)
227 {
228 	em_vendor_info_t *ent;
229 
230 	uint16_t pci_vendor_id = 0;
231 	uint16_t pci_device_id = 0;
232 	uint16_t pci_subvendor_id = 0;
233 	uint16_t pci_subdevice_id = 0;
234 	char adapter_name[60];
235 
236 	INIT_DEBUGOUT("em_probe: begin");
237 
238 	pci_vendor_id = pci_get_vendor(dev);
239 	if (pci_vendor_id != EM_VENDOR_ID)
240 		return(ENXIO);
241 
242 	pci_device_id = pci_get_device(dev);
243 	pci_subvendor_id = pci_get_subvendor(dev);
244 	pci_subdevice_id = pci_get_subdevice(dev);
245 
246 	ent = em_vendor_info_array;
247 	while (ent->vendor_id != 0) {
248 		if ((pci_vendor_id == ent->vendor_id) &&
249 		    (pci_device_id == ent->device_id) &&
250 
251 		    ((pci_subvendor_id == ent->subvendor_id) ||
252 		     (ent->subvendor_id == PCI_ANY_ID)) &&
253 
254 		    ((pci_subdevice_id == ent->subdevice_id) ||
255 		     (ent->subdevice_id == PCI_ANY_ID))) {
256 			snprintf(adapter_name, sizeof(adapter_name),
257 				 "%s, Version - %s",  em_strings[ent->index],
258 				 em_driver_version);
259 			device_set_desc_copy(dev, adapter_name);
260 			return(0);
261 		}
262 		ent++;
263 	}
264 
265 	return(ENXIO);
266 }
267 
268 /*********************************************************************
269  *  Device initialization routine
270  *
271  *  The attach entry point is called when the driver is being loaded.
272  *  This routine identifies the type of hardware, allocates all resources
273  *  and initializes the hardware.
274  *
275  *  return 0 on success, positive on failure
276  *********************************************************************/
277 
278 static int
279 em_attach(device_t dev)
280 {
281 	struct adapter *adapter;
282 	int tsize, rsize;
283 	int i, val, rid;
284 	int error = 0;
285 
286 	INIT_DEBUGOUT("em_attach: begin");
287 
288 	adapter = device_get_softc(dev);
289 
290 	bzero(adapter, sizeof(struct adapter));
291 
292 	callout_init(&adapter->timer);
293 	callout_init(&adapter->tx_fifo_timer);
294 
295 	adapter->dev = dev;
296 	adapter->osdep.dev = dev;
297 
298 	/* SYSCTL stuff */
299 	sysctl_ctx_init(&adapter->sysctl_ctx);
300 	adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
301 					       SYSCTL_STATIC_CHILDREN(_hw),
302 					       OID_AUTO,
303 					       device_get_nameunit(dev),
304 					       CTLFLAG_RD,
305 					       0, "");
306 
307 	if (adapter->sysctl_tree == NULL) {
308 		error = EIO;
309 		goto fail;
310 	}
311 
312 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
313 			SYSCTL_CHILDREN(adapter->sysctl_tree),
314 			OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
315 			(void *)adapter, 0,
316 			em_sysctl_debug_info, "I", "Debug Information");
317 
318 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
319 			SYSCTL_CHILDREN(adapter->sysctl_tree),
320 			OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
321 			(void *)adapter, 0,
322 			em_sysctl_stats, "I", "Statistics");
323 
324 	/* Determine hardware revision */
325 	em_identify_hardware(adapter);
326 
327 	/* Set up some sysctls for the tunable interrupt delays */
328 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
329 				"receive interrupt delay in usecs",
330 				&adapter->rx_int_delay,
331 				E1000_REG_OFFSET(&adapter->hw, RDTR),
332 				em_rx_int_delay_dflt);
333         em_add_int_delay_sysctl(adapter, "tx_int_delay",
334 				"transmit interrupt delay in usecs",
335 				&adapter->tx_int_delay,
336 				E1000_REG_OFFSET(&adapter->hw, TIDV),
337 				em_tx_int_delay_dflt);
338 	if (adapter->hw.mac_type >= em_82540) {
339 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
340 					"receive interrupt delay limit in usecs",
341 					&adapter->rx_abs_int_delay,
342 					E1000_REG_OFFSET(&adapter->hw, RADV),
343 					em_rx_abs_int_delay_dflt);
344 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
345 					"transmit interrupt delay limit in usecs",
346 					&adapter->tx_abs_int_delay,
347 					E1000_REG_OFFSET(&adapter->hw, TADV),
348 					em_tx_abs_int_delay_dflt);
349 		SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
350 			SYSCTL_CHILDREN(adapter->sysctl_tree),
351 			OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
352 			adapter, 0, em_sysctl_int_throttle, "I", NULL);
353 	}
354 
355 	/* Parameters (to be read from user) */
356 	adapter->num_tx_desc = EM_MAX_TXD;
357 	adapter->num_rx_desc = EM_MAX_RXD;
358 	adapter->hw.autoneg = DO_AUTO_NEG;
359 	adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
360 	adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
361 	adapter->hw.tbi_compatibility_en = TRUE;
362 	adapter->rx_buffer_len = EM_RXBUFFER_2048;
363 
364 	/*
365 	 * These parameters control the automatic generation(Tx) and
366 	 * response(Rx) to Ethernet PAUSE frames.
367 	 */
368 	adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
369 	adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
370 	adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
371 	adapter->hw.fc_send_xon   = TRUE;
372 	adapter->hw.fc = em_fc_full;
373 
374 	adapter->hw.phy_init_script = 1;
375 	adapter->hw.phy_reset_disable = FALSE;
376 
377 #ifndef EM_MASTER_SLAVE
378 	adapter->hw.master_slave = em_ms_hw_default;
379 #else
380 	adapter->hw.master_slave = EM_MASTER_SLAVE;
381 #endif
382 
383 	/*
384 	 * Set the max frame size assuming standard ethernet
385 	 * sized frames
386 	 */
387 	adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
388 
389 	adapter->hw.min_frame_size =
390 	    MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
391 
392 	/*
393 	 * This controls when hardware reports transmit completion
394 	 * status.
395 	 */
396 	adapter->hw.report_tx_early = 1;
397 
398 	rid = EM_MMBA;
399 	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
400 						     &rid, RF_ACTIVE);
401 	if (!(adapter->res_memory)) {
402 		device_printf(dev, "Unable to allocate bus resource: memory\n");
403 		error = ENXIO;
404 		goto fail;
405 	}
406 	adapter->osdep.mem_bus_space_tag =
407 	    rman_get_bustag(adapter->res_memory);
408 	adapter->osdep.mem_bus_space_handle =
409 	    rman_get_bushandle(adapter->res_memory);
410 	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
411 
412 	if (adapter->hw.mac_type > em_82543) {
413 		/* Figure our where our IO BAR is ? */
414 		rid = EM_MMBA;
415 		for (i = 0; i < 5; i++) {
416 			val = pci_read_config(dev, rid, 4);
417 			if (val & 0x00000001) {
418 				adapter->io_rid = rid;
419 				break;
420 			}
421 			rid += 4;
422 		}
423 
424 		adapter->res_ioport = bus_alloc_resource_any(dev,
425 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
426 		if (!(adapter->res_ioport)) {
427 			device_printf(dev, "Unable to allocate bus resource: ioport\n");
428 			error = ENXIO;
429 			goto fail;
430 		}
431 
432 		adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
433 		adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
434 	}
435 
436 	rid = 0x0;
437 	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
438 	    &rid, RF_SHAREABLE | RF_ACTIVE);
439 	if (!(adapter->res_interrupt)) {
440 		device_printf(dev, "Unable to allocate bus resource: interrupt\n");
441 		error = ENXIO;
442 		goto fail;
443 	}
444 
445 	adapter->hw.back = &adapter->osdep;
446 
447 	/* Initialize eeprom parameters */
448 	em_init_eeprom_params(&adapter->hw);
449 
450 	tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
451 
452 	/* Allocate Transmit Descriptor ring */
453 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
454 		device_printf(dev, "Unable to allocate TxDescriptor memory\n");
455 		error = ENOMEM;
456 		goto fail;
457 	}
458 	adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
459 
460 	rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
461 
462 	/* Allocate Receive Descriptor ring */
463 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
464 		device_printf(dev, "Unable to allocate rx_desc memory\n");
465 		error = ENOMEM;
466 		goto fail;
467 	}
468 	adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
469 
470 	/* Initialize the hardware */
471 	if (em_hardware_init(adapter)) {
472 		device_printf(dev, "Unable to initialize the hardware\n");
473 		error = EIO;
474 		goto fail;
475 	}
476 
477 	/* Copy the permanent MAC address out of the EEPROM */
478 	if (em_read_mac_addr(&adapter->hw) < 0) {
479 		device_printf(dev, "EEPROM read error while reading mac address\n");
480 		error = EIO;
481 		goto fail;
482 	}
483 
484 	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
485 		device_printf(dev, "Invalid mac address\n");
486 		error = EIO;
487 		goto fail;
488 	}
489 
490 	/* Setup OS specific network interface */
491 	em_setup_interface(dev, adapter);
492 
493 	/* Initialize statistics */
494 	em_clear_hw_cntrs(&adapter->hw);
495 	em_update_stats_counters(adapter);
496 	adapter->hw.get_link_status = 1;
497 	em_check_for_link(&adapter->hw);
498 
499 	/* Print the link status */
500 	if (adapter->link_active == 1) {
501 		em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
502 					&adapter->link_duplex);
503 		device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
504 		    adapter->link_speed,
505 		    adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
506 	} else
507 		device_printf(dev, "Speed: N/A, Duplex:N/A\n");
508 
509 	/* Identify 82544 on PCIX */
510 	em_get_bus_info(&adapter->hw);
511 	if (adapter->hw.bus_type == em_bus_type_pcix &&
512 	    adapter->hw.mac_type == em_82544)
513 		adapter->pcix_82544 = TRUE;
514         else
515 		adapter->pcix_82544 = FALSE;
516 
517 	error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET,
518 			   (void (*)(void *)) em_intr, adapter,
519 			   &adapter->int_handler_tag);
520 	if (error) {
521 		device_printf(dev, "Error registering interrupt handler!\n");
522 		ether_ifdetach(&adapter->interface_data.ac_if);
523 		goto fail;
524 	}
525 
526 	INIT_DEBUGOUT("em_attach: end");
527 	return(0);
528 
529 fail:
530 	em_detach(dev);
531 	return(error);
532 }
533 
534 /*********************************************************************
535  *  Device removal routine
536  *
537  *  The detach entry point is called when the driver is being removed.
538  *  This routine stops the adapter and deallocates all the resources
539  *  that were allocated for driver operation.
540  *
541  *  return 0 on success, positive on failure
542  *********************************************************************/
543 
544 static int
545 em_detach(device_t dev)
546 {
547 	struct adapter * adapter = device_get_softc(dev);
548 	int s;
549 
550 	INIT_DEBUGOUT("em_detach: begin");
551 	s = splimp();
552 
553 	adapter->in_detach = 1;
554 
555 	if (device_is_attached(dev)) {
556 		em_stop(adapter);
557 		em_phy_hw_reset(&adapter->hw);
558 		ether_ifdetach(&adapter->interface_data.ac_if);
559 	}
560 	bus_generic_detach(dev);
561 
562 	if (adapter->res_interrupt != NULL) {
563 		bus_teardown_intr(dev, adapter->res_interrupt,
564 				  adapter->int_handler_tag);
565 		bus_release_resource(dev, SYS_RES_IRQ, 0,
566 				     adapter->res_interrupt);
567 	}
568 	if (adapter->res_memory != NULL) {
569 		bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
570 				     adapter->res_memory);
571 	}
572 
573 	if (adapter->res_ioport != NULL) {
574 		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
575 				     adapter->res_ioport);
576 	}
577 
578 	/* Free Transmit Descriptor ring */
579 	if (adapter->tx_desc_base != NULL) {
580 		em_dma_free(adapter, &adapter->txdma);
581 		adapter->tx_desc_base = NULL;
582 	}
583 
584 	/* Free Receive Descriptor ring */
585 	if (adapter->rx_desc_base != NULL) {
586 		em_dma_free(adapter, &adapter->rxdma);
587 		adapter->rx_desc_base = NULL;
588 	}
589 
590 	adapter->sysctl_tree = NULL;
591 	sysctl_ctx_free(&adapter->sysctl_ctx);
592 
593 	splx(s);
594 	return(0);
595 }
596 
597 /*********************************************************************
598  *
599  *  Shutdown entry point
600  *
601  **********************************************************************/
602 
603 static int
604 em_shutdown(device_t dev)
605 {
606 	struct adapter *adapter = device_get_softc(dev);
607 	em_stop(adapter);
608 	return(0);
609 }
610 
611 /*********************************************************************
612  *  Transmit entry point
613  *
614  *  em_start is called by the stack to initiate a transmit.
615  *  The driver will remain in this routine as long as there are
616  *  packets to transmit and transmit resources are available.
617  *  In case resources are not available stack is notified and
618  *  the packet is requeued.
619  **********************************************************************/
620 
621 static void
622 em_start(struct ifnet *ifp)
623 {
624 	int s;
625 	struct mbuf *m_head;
626 	struct adapter *adapter = ifp->if_softc;
627 
628 	if (!adapter->link_active)
629 		return;
630 
631 	s = splimp();
632 	while (!ifq_is_empty(&ifp->if_snd)) {
633 		m_head = ifq_poll(&ifp->if_snd);
634 
635 		if (m_head == NULL)
636 			break;
637 
638 		if (em_encap(adapter, m_head)) {
639 			ifp->if_flags |= IFF_OACTIVE;
640 			break;
641 		}
642 		m_head = ifq_dequeue(&ifp->if_snd);
643 
644 		/* Send a copy of the frame to the BPF listener */
645 		BPF_MTAP(ifp, m_head);
646 
647 		/* Set timeout in case hardware has problems transmitting */
648 		ifp->if_timer = EM_TX_TIMEOUT;
649 	}
650 	splx(s);
651 }
652 
653 /*********************************************************************
654  *  Ioctl entry point
655  *
656  *  em_ioctl is called when the user wants to configure the
657  *  interface.
658  *
659  *  return 0 on success, positive on failure
660  **********************************************************************/
661 
662 static int
663 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
664 {
665 	int s, mask, error = 0;
666 	struct ifreq *ifr = (struct ifreq *) data;
667 	struct adapter *adapter = ifp->if_softc;
668 
669 	s = splimp();
670 
671 	if (adapter->in_detach)
672 		goto out;
673 
674 	switch (command) {
675 	case SIOCSIFADDR:
676 	case SIOCGIFADDR:
677 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
678 		ether_ioctl(ifp, command, data);
679 		break;
680 	case SIOCSIFMTU:
681 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
682 		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
683 			error = EINVAL;
684 		} else {
685 			ifp->if_mtu = ifr->ifr_mtu;
686 			adapter->hw.max_frame_size =
687 			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
688 			em_init(adapter);
689 		}
690 		break;
691 	case SIOCSIFFLAGS:
692 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
693 		if (ifp->if_flags & IFF_UP) {
694 			if (!(ifp->if_flags & IFF_RUNNING))
695 				em_init(adapter);
696 			em_disable_promisc(adapter);
697 			em_set_promisc(adapter);
698 		} else {
699 			if (ifp->if_flags & IFF_RUNNING)
700 				em_stop(adapter);
701 		}
702 		break;
703 	case SIOCADDMULTI:
704 	case SIOCDELMULTI:
705 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
706 		if (ifp->if_flags & IFF_RUNNING) {
707 			em_disable_intr(adapter);
708 			em_set_multi(adapter);
709 			if (adapter->hw.mac_type == em_82542_rev2_0)
710 				em_initialize_receive_unit(adapter);
711 #ifdef DEVICE_POLLING
712 			if (!(ifp->if_flags & IFF_POLLING))
713 #endif
714 				em_enable_intr(adapter);
715 		}
716 		break;
717 	case SIOCSIFMEDIA:
718 	case SIOCGIFMEDIA:
719 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
720 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
721 		break;
722 	case SIOCSIFCAP:
723 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
724 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
725 		if (mask & IFCAP_HWCSUM) {
726 			if (IFCAP_HWCSUM & ifp->if_capenable)
727 				ifp->if_capenable &= ~IFCAP_HWCSUM;
728 			else
729 				ifp->if_capenable |= IFCAP_HWCSUM;
730 			if (ifp->if_flags & IFF_RUNNING)
731 				em_init(adapter);
732 		}
733 		break;
734 	default:
735 		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
736 		error = EINVAL;
737 	}
738 
739 out:
740 	splx(s);
741 	return(error);
742 }
743 
744 /*********************************************************************
745  *  Watchdog entry point
746  *
747  *  This routine is called whenever hardware quits transmitting.
748  *
749  **********************************************************************/
750 
751 static void
752 em_watchdog(struct ifnet *ifp)
753 {
754 	struct adapter * adapter;
755 	adapter = ifp->if_softc;
756 
757 	/* If we are in this routine because of pause frames, then
758 	 * don't reset the hardware.
759 	 */
760 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
761 		ifp->if_timer = EM_TX_TIMEOUT;
762 		return;
763 	}
764 
765 	if (em_check_for_link(&adapter->hw))
766 		if_printf(ifp, "watchdog timeout -- resetting\n");
767 
768 	ifp->if_flags &= ~IFF_RUNNING;
769 
770 	em_init(adapter);
771 
772 	ifp->if_oerrors++;
773 }
774 
775 /*********************************************************************
776  *  Init entry point
777  *
778  *  This routine is used in two ways. It is used by the stack as
779  *  init entry point in network interface structure. It is also used
780  *  by the driver as a hw/sw initialization routine to get to a
781  *  consistent state.
782  *
783  *  return 0 on success, positive on failure
784  **********************************************************************/
785 
786 static void
787 em_init(void *arg)
788 {
789 	int s;
790 	struct adapter *adapter = arg;
791 	struct ifnet *ifp = &adapter->interface_data.ac_if;
792 
793 	INIT_DEBUGOUT("em_init: begin");
794 
795 	s = splimp();
796 
797 	em_stop(adapter);
798 
799 	/* Get the latest mac address, User can use a LAA */
800 	bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
801 	      ETHER_ADDR_LEN);
802 
803 	/* Initialize the hardware */
804 	if (em_hardware_init(adapter)) {
805 		if_printf(ifp, "Unable to initialize the hardware\n");
806 		splx(s);
807 		return;
808 	}
809 
810 	em_enable_vlans(adapter);
811 
812 	/* Prepare transmit descriptors and buffers */
813 	if (em_setup_transmit_structures(adapter)) {
814 		if_printf(ifp, "Could not setup transmit structures\n");
815 		em_stop(adapter);
816 		splx(s);
817 		return;
818 	}
819 	em_initialize_transmit_unit(adapter);
820 
821 	/* Setup Multicast table */
822 	em_set_multi(adapter);
823 
824 	/* Prepare receive descriptors and buffers */
825 	if (em_setup_receive_structures(adapter)) {
826 		if_printf(ifp, "Could not setup receive structures\n");
827 		em_stop(adapter);
828 		splx(s);
829 		return;
830 	}
831 	em_initialize_receive_unit(adapter);
832 
833 	/* Don't loose promiscuous settings */
834 	em_set_promisc(adapter);
835 
836 	ifp->if_flags |= IFF_RUNNING;
837 	ifp->if_flags &= ~IFF_OACTIVE;
838 
839 	if (adapter->hw.mac_type >= em_82543) {
840 		if (ifp->if_capenable & IFCAP_TXCSUM)
841 			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
842 		else
843 			ifp->if_hwassist = 0;
844 	}
845 
846 	callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
847 	em_clear_hw_cntrs(&adapter->hw);
848 #ifdef DEVICE_POLLING
849 	/*
850 	 * Only enable interrupts if we are not polling, make sure
851 	 * they are off otherwise.
852 	 */
853 	if (ifp->if_flags & IFF_POLLING)
854 		em_disable_intr(adapter);
855 	else
856 #endif /* DEVICE_POLLING */
857 		em_enable_intr(adapter);
858 
859 	/* Don't reset the phy next time init gets called */
860 	adapter->hw.phy_reset_disable = TRUE;
861 
862 	splx(s);
863 }
864 
865 #ifdef DEVICE_POLLING
866 static poll_handler_t em_poll;
867 
868 static void
869 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
870 {
871 	struct adapter *adapter = ifp->if_softc;
872 	uint32_t reg_icr;
873 
874 	if (cmd == POLL_DEREGISTER) {       /* final call, enable interrupts */
875 		em_enable_intr(adapter);
876 		return;
877 	}
878 	if (cmd == POLL_AND_CHECK_STATUS) {
879 		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
880 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
881 			callout_stop(&adapter->timer);
882 			adapter->hw.get_link_status = 1;
883 			em_check_for_link(&adapter->hw);
884 			em_print_link_status(adapter);
885 			callout_reset(&adapter->timer, 2*hz, em_local_timer,
886 				      adapter);
887 		}
888 	}
889 	if (ifp->if_flags & IFF_RUNNING) {
890 		em_process_receive_interrupts(adapter, count);
891 		em_clean_transmit_interrupts(adapter);
892 	}
893 
894 	if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
895 		em_start(ifp);
896 }
897 #endif /* DEVICE_POLLING */
898 
899 /*********************************************************************
900  *
901  *  Interrupt Service routine
902  *
903  **********************************************************************/
904 static void
905 em_intr(void *arg)
906 {
907 	uint32_t reg_icr;
908 	struct ifnet *ifp;
909 	struct adapter *adapter = arg;
910 
911 	ifp = &adapter->interface_data.ac_if;
912 
913 #ifdef DEVICE_POLLING
914 	if (ifp->if_flags & IFF_POLLING)
915 		return;
916 
917 	if (ether_poll_register(em_poll, ifp)) {
918 		em_disable_intr(adapter);
919 		em_poll(ifp, 0, 1);
920 		return;
921 	}
922 #endif /* DEVICE_POLLING */
923 
924 	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
925 	if (!reg_icr)
926 		return;
927 
928 	/* Link status change */
929 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
930 		callout_stop(&adapter->timer);
931 		adapter->hw.get_link_status = 1;
932 		em_check_for_link(&adapter->hw);
933 		em_print_link_status(adapter);
934 		callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
935 	}
936 
937 	/*
938 	 * note: do not attempt to improve efficiency by looping.  This
939 	 * only results in unnecessary piecemeal collection of received
940 	 * packets and unnecessary piecemeal cleanups of the transmit ring.
941 	 */
942 	if (ifp->if_flags & IFF_RUNNING) {
943 		em_process_receive_interrupts(adapter, -1);
944 		em_clean_transmit_interrupts(adapter);
945 	}
946 
947 	if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
948 		em_start(ifp);
949 }
950 
951 /*********************************************************************
952  *
953  *  Media Ioctl callback
954  *
955  *  This routine is called whenever the user queries the status of
956  *  the interface using ifconfig.
957  *
958  **********************************************************************/
959 static void
960 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
961 {
962 	struct adapter * adapter = ifp->if_softc;
963 
964 	INIT_DEBUGOUT("em_media_status: begin");
965 
966 	em_check_for_link(&adapter->hw);
967 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
968 		if (adapter->link_active == 0) {
969 			em_get_speed_and_duplex(&adapter->hw,
970 						&adapter->link_speed,
971 						&adapter->link_duplex);
972 			adapter->link_active = 1;
973 		}
974 	} else {
975 		if (adapter->link_active == 1) {
976 			adapter->link_speed = 0;
977 			adapter->link_duplex = 0;
978 			adapter->link_active = 0;
979 		}
980 	}
981 
982 	ifmr->ifm_status = IFM_AVALID;
983 	ifmr->ifm_active = IFM_ETHER;
984 
985 	if (!adapter->link_active)
986 		return;
987 
988 	ifmr->ifm_status |= IFM_ACTIVE;
989 
990 	if (adapter->hw.media_type == em_media_type_fiber) {
991 		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
992 	} else {
993 		switch (adapter->link_speed) {
994 		case 10:
995 			ifmr->ifm_active |= IFM_10_T;
996 			break;
997 		case 100:
998 			ifmr->ifm_active |= IFM_100_TX;
999 			break;
1000 		case 1000:
1001 			ifmr->ifm_active |= IFM_1000_T;
1002 			break;
1003 		}
1004 		if (adapter->link_duplex == FULL_DUPLEX)
1005 			ifmr->ifm_active |= IFM_FDX;
1006 		else
1007 			ifmr->ifm_active |= IFM_HDX;
1008 	}
1009 }
1010 
1011 /*********************************************************************
1012  *
1013  *  Media Ioctl callback
1014  *
1015  *  This routine is called when the user changes speed/duplex using
1016  *  media/mediopt option with ifconfig.
1017  *
1018  **********************************************************************/
1019 static int
1020 em_media_change(struct ifnet *ifp)
1021 {
1022 	struct adapter * adapter = ifp->if_softc;
1023 	struct ifmedia  *ifm = &adapter->media;
1024 
1025 	INIT_DEBUGOUT("em_media_change: begin");
1026 
1027 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1028 		return(EINVAL);
1029 
1030 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1031 	case IFM_AUTO:
1032 		adapter->hw.autoneg = DO_AUTO_NEG;
1033 		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1034 		break;
1035 	case IFM_1000_SX:
1036 	case IFM_1000_T:
1037 		adapter->hw.autoneg = DO_AUTO_NEG;
1038 		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1039 		break;
1040 	case IFM_100_TX:
1041 		adapter->hw.autoneg = FALSE;
1042 		adapter->hw.autoneg_advertised = 0;
1043 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1044 			adapter->hw.forced_speed_duplex = em_100_full;
1045 		else
1046 			adapter->hw.forced_speed_duplex	= em_100_half;
1047 		break;
1048 	case IFM_10_T:
1049 		adapter->hw.autoneg = FALSE;
1050 		adapter->hw.autoneg_advertised = 0;
1051 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1052 			adapter->hw.forced_speed_duplex = em_10_full;
1053 		else
1054 			adapter->hw.forced_speed_duplex	= em_10_half;
1055 		break;
1056 	default:
1057 		if_printf(ifp, "Unsupported media type\n");
1058 	}
1059 	/*
1060 	 * As the speed/duplex settings may have changed we need to
1061 	 * reset the PHY.
1062 	 */
1063 	adapter->hw.phy_reset_disable = FALSE;
1064 
1065 	em_init(adapter);
1066 
1067 	return(0);
1068 }
1069 
1070 static void
1071 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1072 	 int error)
1073 {
1074 	struct em_q *q = arg;
1075 
1076 	if (error)
1077 		return;
1078 	KASSERT(nsegs <= EM_MAX_SCATTER,
1079 		("Too many DMA segments returned when mapping tx packet"));
1080 	q->nsegs = nsegs;
1081 	bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1082 }
1083 
1084 #define EM_FIFO_HDR              0x10
1085 #define EM_82547_PKT_THRESH      0x3e0
1086 #define EM_82547_TX_FIFO_SIZE    0x2800
1087 #define EM_82547_TX_FIFO_BEGIN   0xf00
1088 /*********************************************************************
1089  *
1090  *  This routine maps the mbufs to tx descriptors.
1091  *
1092  *  return 0 on success, positive on failure
1093  **********************************************************************/
1094 static int
1095 em_encap(struct adapter *adapter, struct mbuf *m_head)
1096 {
1097 	uint32_t txd_upper;
1098 	uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1099 	int i, j, error;
1100 	uint64_t address;
1101 
1102 	/* For 82544 Workaround */
1103 	DESC_ARRAY desc_array;
1104 	uint32_t array_elements;
1105 	uint32_t counter;
1106 
1107 	struct ifvlan *ifv = NULL;
1108 	struct em_q q;
1109         struct em_buffer *tx_buffer = NULL;
1110         struct em_tx_desc *current_tx_desc = NULL;
1111         struct ifnet *ifp = &adapter->interface_data.ac_if;
1112 
1113 	/*
1114 	 * Force a cleanup if number of TX descriptors
1115 	 * available hits the threshold
1116 	 */
1117 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1118 		em_clean_transmit_interrupts(adapter);
1119 		if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1120 			adapter->no_tx_desc_avail1++;
1121 			return(ENOBUFS);
1122 		}
1123 	}
1124 	/*
1125 	 * Map the packet for DMA.
1126 	 */
1127 	if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1128 		adapter->no_tx_map_avail++;
1129 		return(ENOMEM);
1130 	}
1131 	error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1132 				     &q, BUS_DMA_NOWAIT);
1133 	if (error != 0) {
1134 		adapter->no_tx_dma_setup++;
1135 		bus_dmamap_destroy(adapter->txtag, q.map);
1136 		return(error);
1137 	}
1138 	KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1139 
1140 	if (q.nsegs > adapter->num_tx_desc_avail) {
1141 		adapter->no_tx_desc_avail2++;
1142 		bus_dmamap_unload(adapter->txtag, q.map);
1143 		bus_dmamap_destroy(adapter->txtag, q.map);
1144 		return(ENOBUFS);
1145 	}
1146 
1147 	if (ifp->if_hwassist > 0) {
1148 		em_transmit_checksum_setup(adapter,  m_head,
1149 					   &txd_upper, &txd_lower);
1150 	}
1151 	else
1152 		txd_upper = txd_lower = 0;
1153 
1154 	/* Find out if we are in vlan mode */
1155 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1156 	    m_head->m_pkthdr.rcvif != NULL &&
1157 	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1158 		ifv = m_head->m_pkthdr.rcvif->if_softc;
1159 
1160 	i = adapter->next_avail_tx_desc;
1161 	if (adapter->pcix_82544) {
1162 		txd_saved = i;
1163 		txd_used = 0;
1164 	}
1165 	for (j = 0; j < q.nsegs; j++) {
1166 		/* If adapter is 82544 and on PCIX bus */
1167 		if(adapter->pcix_82544) {
1168 			array_elements = 0;
1169 			address = htole64(q.segs[j].ds_addr);
1170 			/*
1171 			 * Check the Address and Length combination and
1172 			 * split the data accordingly
1173 			 */
1174 			array_elements = em_fill_descriptors(address,
1175 							     htole32(q.segs[j].ds_len),
1176 							     &desc_array);
1177 			for (counter = 0; counter < array_elements; counter++) {
1178 				if (txd_used == adapter->num_tx_desc_avail) {
1179 					adapter->next_avail_tx_desc = txd_saved;
1180 					adapter->no_tx_desc_avail2++;
1181 					bus_dmamap_unload(adapter->txtag, q.map);
1182 					bus_dmamap_destroy(adapter->txtag, q.map);
1183 					return(ENOBUFS);
1184 				}
1185 				tx_buffer = &adapter->tx_buffer_area[i];
1186 				current_tx_desc = &adapter->tx_desc_base[i];
1187 				current_tx_desc->buffer_addr = htole64(
1188 				desc_array.descriptor[counter].address);
1189 				current_tx_desc->lower.data = htole32(
1190 				(adapter->txd_cmd | txd_lower |
1191 				(uint16_t)desc_array.descriptor[counter].length));
1192 				current_tx_desc->upper.data = htole32((txd_upper));
1193 				if (++i == adapter->num_tx_desc)
1194 					i = 0;
1195 
1196 				tx_buffer->m_head = NULL;
1197 				txd_used++;
1198 			}
1199 		} else {
1200 			tx_buffer = &adapter->tx_buffer_area[i];
1201 			current_tx_desc = &adapter->tx_desc_base[i];
1202 
1203 			current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1204 			current_tx_desc->lower.data = htole32(
1205 				adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1206 			current_tx_desc->upper.data = htole32(txd_upper);
1207 
1208 			if (++i == adapter->num_tx_desc)
1209 				i = 0;
1210 
1211 			tx_buffer->m_head = NULL;
1212 		}
1213 	}
1214 
1215 	adapter->next_avail_tx_desc = i;
1216 	if (adapter->pcix_82544)
1217 		adapter->num_tx_desc_avail -= txd_used;
1218 	else
1219 		adapter->num_tx_desc_avail -= q.nsegs;
1220 
1221 	if (ifv != NULL) {
1222 		/* Set the vlan id */
1223 		current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1224 
1225 		/* Tell hardware to add tag */
1226 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1227 	}
1228 
1229 	tx_buffer->m_head = m_head;
1230 	tx_buffer->map = q.map;
1231 	bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1232 
1233 	/*
1234 	 * Last Descriptor of Packet needs End Of Packet (EOP)
1235 	 */
1236 	current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1237 
1238 	/*
1239 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1240 	 * that this frame is available to transmit.
1241 	 */
1242 	if (adapter->hw.mac_type == em_82547 &&
1243 	    adapter->link_duplex == HALF_DUPLEX) {
1244 		em_82547_move_tail(adapter);
1245 	} else {
1246 		E1000_WRITE_REG(&adapter->hw, TDT, i);
1247 		if (adapter->hw.mac_type == em_82547) {
1248 			em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1249 		}
1250 	}
1251 
1252 	return(0);
1253 }
1254 
1255 /*********************************************************************
1256  *
1257  * 82547 workaround to avoid controller hang in half-duplex environment.
1258  * The workaround is to avoid queuing a large packet that would span
1259  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1260  * in this case. We do that only when FIFO is quiescent.
1261  *
1262  **********************************************************************/
1263 static void
1264 em_82547_move_tail(void *arg)
1265 {
1266 	int s;
1267 	struct adapter *adapter = arg;
1268 	uint16_t hw_tdt;
1269 	uint16_t sw_tdt;
1270 	struct em_tx_desc *tx_desc;
1271 	uint16_t length = 0;
1272 	boolean_t eop = 0;
1273 
1274 	s = splimp();
1275 	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1276 	sw_tdt = adapter->next_avail_tx_desc;
1277 
1278 	while (hw_tdt != sw_tdt) {
1279 		tx_desc = &adapter->tx_desc_base[hw_tdt];
1280 		length += tx_desc->lower.flags.length;
1281 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1282 		if(++hw_tdt == adapter->num_tx_desc)
1283 			hw_tdt = 0;
1284 
1285 		if(eop) {
1286 			if (em_82547_fifo_workaround(adapter, length)) {
1287 				adapter->tx_fifo_wrk++;
1288 				callout_reset(&adapter->tx_fifo_timer, 1,
1289 					em_82547_move_tail, adapter);
1290 				break;
1291 			}
1292 			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1293 			em_82547_update_fifo_head(adapter, length);
1294 			length = 0;
1295 		}
1296 	}
1297 	splx(s);
1298 }
1299 
1300 static int
1301 em_82547_fifo_workaround(struct adapter *adapter, int len)
1302 {
1303 	int fifo_space, fifo_pkt_len;
1304 
1305 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1306 
1307 	if (adapter->link_duplex == HALF_DUPLEX) {
1308 		fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1309 
1310 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1311 			if (em_82547_tx_fifo_reset(adapter))
1312 				return(0);
1313 			else
1314 				return(1);
1315 		}
1316 	}
1317 
1318 	return(0);
1319 }
1320 
1321 static void
1322 em_82547_update_fifo_head(struct adapter *adapter, int len)
1323 {
1324 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1325 
1326 	/* tx_fifo_head is always 16 byte aligned */
1327 	adapter->tx_fifo_head += fifo_pkt_len;
1328 	if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1329 		adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1330 }
1331 
1332 static int
1333 em_82547_tx_fifo_reset(struct adapter *adapter)
1334 {
1335 	uint32_t tctl;
1336 
1337 	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1338 	      E1000_READ_REG(&adapter->hw, TDH)) &&
1339 	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1340 	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1341 	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1342 	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1343 	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1344 
1345 		/* Disable TX unit */
1346 		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1347 		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1348 
1349 		/* Reset FIFO pointers */
1350 		E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1351 		E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1352 		E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1353 		E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1354 
1355 		/* Re-enable TX unit */
1356 		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1357 		E1000_WRITE_FLUSH(&adapter->hw);
1358 
1359 		adapter->tx_fifo_head = 0;
1360 		adapter->tx_fifo_reset++;
1361 
1362 		return(TRUE);
1363 	}
1364 	else {
1365 		return(FALSE);
1366 	}
1367 }
1368 
1369 static void
1370 em_set_promisc(struct adapter *adapter)
1371 {
1372 	uint32_t reg_rctl;
1373 	struct ifnet *ifp = &adapter->interface_data.ac_if;
1374 
1375 	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1376 
1377 	if (ifp->if_flags & IFF_PROMISC) {
1378 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1379 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1380 	} else if (ifp->if_flags & IFF_ALLMULTI) {
1381 		reg_rctl |= E1000_RCTL_MPE;
1382 		reg_rctl &= ~E1000_RCTL_UPE;
1383 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1384 	}
1385 }
1386 
1387 static void
1388 em_disable_promisc(struct adapter *adapter)
1389 {
1390 	uint32_t reg_rctl;
1391 
1392 	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1393 
1394 	reg_rctl &=  (~E1000_RCTL_UPE);
1395 	reg_rctl &=  (~E1000_RCTL_MPE);
1396 	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1397 }
1398 
1399 /*********************************************************************
1400  *  Multicast Update
1401  *
1402  *  This routine is called whenever multicast address list is updated.
1403  *
1404  **********************************************************************/
1405 
1406 static void
1407 em_set_multi(struct adapter *adapter)
1408 {
1409 	uint32_t reg_rctl = 0;
1410 	uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1411 	struct ifmultiaddr *ifma;
1412 	int mcnt = 0;
1413 	struct ifnet *ifp = &adapter->interface_data.ac_if;
1414 
1415 	IOCTL_DEBUGOUT("em_set_multi: begin");
1416 
1417 	if (adapter->hw.mac_type == em_82542_rev2_0) {
1418 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1419 		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1420 			em_pci_clear_mwi(&adapter->hw);
1421 		reg_rctl |= E1000_RCTL_RST;
1422 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1423 		msec_delay(5);
1424 	}
1425 
1426 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1427 		if (ifma->ifma_addr->sa_family != AF_LINK)
1428 			continue;
1429 
1430 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1431 			break;
1432 
1433 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1434 		      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1435 		mcnt++;
1436 	}
1437 
1438 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1439 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1440 		reg_rctl |= E1000_RCTL_MPE;
1441 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1442 	} else
1443 		em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1444 
1445 	if (adapter->hw.mac_type == em_82542_rev2_0) {
1446 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1447 		reg_rctl &= ~E1000_RCTL_RST;
1448 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1449 		msec_delay(5);
1450 		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1451                         em_pci_set_mwi(&adapter->hw);
1452 	}
1453 }
1454 
1455 /*********************************************************************
1456  *  Timer routine
1457  *
1458  *  This routine checks for link status and updates statistics.
1459  *
1460  **********************************************************************/
1461 
1462 static void
1463 em_local_timer(void *arg)
1464 {
1465 	int s;
1466 	struct ifnet *ifp;
1467 	struct adapter *adapter = arg;
1468 	ifp = &adapter->interface_data.ac_if;
1469 
1470 	s = splimp();
1471 
1472 	em_check_for_link(&adapter->hw);
1473 	em_print_link_status(adapter);
1474 	em_update_stats_counters(adapter);
1475 	if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1476 		em_print_hw_stats(adapter);
1477 	em_smartspeed(adapter);
1478 
1479 	callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1480 
1481 	splx(s);
1482 }
1483 
1484 static void
1485 em_print_link_status(struct adapter *adapter)
1486 {
1487 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1488 		if (adapter->link_active == 0) {
1489 			em_get_speed_and_duplex(&adapter->hw,
1490 						&adapter->link_speed,
1491 						&adapter->link_duplex);
1492 			device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1493 			       adapter->link_speed,
1494 			       ((adapter->link_duplex == FULL_DUPLEX) ?
1495 				"Full Duplex" : "Half Duplex"));
1496 			adapter->link_active = 1;
1497 			adapter->smartspeed = 0;
1498 		}
1499 	} else {
1500 		if (adapter->link_active == 1) {
1501 			adapter->link_speed = 0;
1502 			adapter->link_duplex = 0;
1503 			device_printf(adapter->dev, "Link is Down\n");
1504 			adapter->link_active = 0;
1505 		}
1506 	}
1507 }
1508 
1509 /*********************************************************************
1510  *
1511  *  This routine disables all traffic on the adapter by issuing a
1512  *  global reset on the MAC and deallocates TX/RX buffers.
1513  *
1514  **********************************************************************/
1515 
1516 static void
1517 em_stop(void *arg)
1518 {
1519 	struct ifnet   *ifp;
1520 	struct adapter * adapter = arg;
1521 	ifp = &adapter->interface_data.ac_if;
1522 
1523 	INIT_DEBUGOUT("em_stop: begin");
1524 	em_disable_intr(adapter);
1525 	em_reset_hw(&adapter->hw);
1526 	callout_stop(&adapter->timer);
1527 	callout_stop(&adapter->tx_fifo_timer);
1528 	em_free_transmit_structures(adapter);
1529 	em_free_receive_structures(adapter);
1530 
1531 	/* Tell the stack that the interface is no longer active */
1532 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1533 	ifp->if_timer = 0;
1534 }
1535 
1536 /*********************************************************************
1537  *
1538  *  Determine hardware revision.
1539  *
1540  **********************************************************************/
1541 static void
1542 em_identify_hardware(struct adapter * adapter)
1543 {
1544 	device_t dev = adapter->dev;
1545 
1546 	/* Make sure our PCI config space has the necessary stuff set */
1547 	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1548 	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1549 	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1550 		device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1551 		adapter->hw.pci_cmd_word |=
1552 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1553 		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1554 	}
1555 
1556 	/* Save off the information about this board */
1557 	adapter->hw.vendor_id = pci_get_vendor(dev);
1558 	adapter->hw.device_id = pci_get_device(dev);
1559 	adapter->hw.revision_id = pci_get_revid(dev);
1560 	adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1561 	adapter->hw.subsystem_id = pci_get_subdevice(dev);
1562 
1563 	/* Identify the MAC */
1564 	if (em_set_mac_type(&adapter->hw))
1565 		device_printf(dev, "Unknown MAC Type\n");
1566 
1567 	if (adapter->hw.mac_type == em_82541 ||
1568 	    adapter->hw.mac_type == em_82541_rev_2 ||
1569 	    adapter->hw.mac_type == em_82547 ||
1570 	    adapter->hw.mac_type == em_82547_rev_2)
1571 		adapter->hw.phy_init_script = TRUE;
1572 }
1573 
1574 /*********************************************************************
1575  *
1576  *  Initialize the hardware to a configuration as specified by the
1577  *  adapter structure. The controller is reset, the EEPROM is
1578  *  verified, the MAC address is set, then the shared initialization
1579  *  routines are called.
1580  *
1581  **********************************************************************/
1582 static int
1583 em_hardware_init(struct adapter *adapter)
1584 {
1585 	INIT_DEBUGOUT("em_hardware_init: begin");
1586 	/* Issue a global reset */
1587 	em_reset_hw(&adapter->hw);
1588 
1589 	/* When hardware is reset, fifo_head is also reset */
1590 	adapter->tx_fifo_head = 0;
1591 
1592 	/* Make sure we have a good EEPROM before we read from it */
1593 	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1594 		device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1595 		return(EIO);
1596 	}
1597 
1598 	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1599 		device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1600 		return(EIO);
1601 	}
1602 
1603 	if (em_init_hw(&adapter->hw) < 0) {
1604 		device_printf(adapter->dev, "Hardware Initialization Failed");
1605 		return(EIO);
1606 	}
1607 
1608 	em_check_for_link(&adapter->hw);
1609 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1610 		adapter->link_active = 1;
1611 	else
1612 		adapter->link_active = 0;
1613 
1614 	if (adapter->link_active) {
1615 		em_get_speed_and_duplex(&adapter->hw,
1616 					&adapter->link_speed,
1617 					&adapter->link_duplex);
1618 	} else {
1619 		adapter->link_speed = 0;
1620 		adapter->link_duplex = 0;
1621 	}
1622 
1623 	return(0);
1624 }
1625 
1626 /*********************************************************************
1627  *
1628  *  Setup networking device structure and register an interface.
1629  *
1630  **********************************************************************/
1631 static void
1632 em_setup_interface(device_t dev, struct adapter *adapter)
1633 {
1634 	struct ifnet   *ifp;
1635 	INIT_DEBUGOUT("em_setup_interface: begin");
1636 
1637 	ifp = &adapter->interface_data.ac_if;
1638 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1639 	ifp->if_mtu = ETHERMTU;
1640 	ifp->if_baudrate = 1000000000;
1641 	ifp->if_init =  em_init;
1642 	ifp->if_softc = adapter;
1643 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1644 	ifp->if_ioctl = em_ioctl;
1645 	ifp->if_start = em_start;
1646 	ifp->if_watchdog = em_watchdog;
1647 	ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
1648 	ifq_set_ready(&ifp->if_snd);
1649 
1650 	ether_ifattach(ifp, adapter->hw.mac_addr);
1651 
1652 	if (adapter->hw.mac_type >= em_82543) {
1653 		ifp->if_capabilities = IFCAP_HWCSUM;
1654 		ifp->if_capenable = ifp->if_capabilities;
1655 	}
1656 
1657 	/*
1658 	 * Tell the upper layer(s) we support long frames.
1659 	 */
1660 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1661         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1662 
1663 	/*
1664 	 * Specify the media types supported by this adapter and register
1665 	 * callbacks to update media and link information
1666 	 */
1667 	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1668 		     em_media_status);
1669 	if (adapter->hw.media_type == em_media_type_fiber) {
1670 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1671 			    0, NULL);
1672 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1673 			    0, NULL);
1674 	} else {
1675 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1676 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1677 			    0, NULL);
1678 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1679 			    0, NULL);
1680 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1681 			    0, NULL);
1682 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1683 			    0, NULL);
1684 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1685 	}
1686 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1687 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1688 }
1689 
1690 /*********************************************************************
1691  *
1692  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1693  *
1694  **********************************************************************/
1695 static void
1696 em_smartspeed(struct adapter *adapter)
1697 {
1698 	uint16_t phy_tmp;
1699 
1700 	if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1701 	    !adapter->hw.autoneg ||
1702 	    !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1703 		return;
1704 
1705 	if (adapter->smartspeed == 0) {
1706 		/*
1707 		 * If Master/Slave config fault is asserted twice,
1708 		 * we assume back-to-back.
1709 		 */
1710 		em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1711 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1712 			return;
1713 		em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1714 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1715 			em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1716 					&phy_tmp);
1717 			if (phy_tmp & CR_1000T_MS_ENABLE) {
1718 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1719 				em_write_phy_reg(&adapter->hw,
1720 						 PHY_1000T_CTRL, phy_tmp);
1721 				adapter->smartspeed++;
1722 				if (adapter->hw.autoneg &&
1723 				    !em_phy_setup_autoneg(&adapter->hw) &&
1724 				    !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1725 						     &phy_tmp)) {
1726 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
1727 						    MII_CR_RESTART_AUTO_NEG);
1728 					em_write_phy_reg(&adapter->hw,
1729 							 PHY_CTRL, phy_tmp);
1730 				}
1731 			}
1732 		}
1733                 return;
1734 	} else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1735 		/* If still no link, perhaps using 2/3 pair cable */
1736 		em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1737 		phy_tmp |= CR_1000T_MS_ENABLE;
1738 		em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1739 		if (adapter->hw.autoneg &&
1740 		    !em_phy_setup_autoneg(&adapter->hw) &&
1741 		    !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1742 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
1743 				    MII_CR_RESTART_AUTO_NEG);
1744 			em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1745 		}
1746 	}
1747 	/* Restart process after EM_SMARTSPEED_MAX iterations */
1748 	if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1749 		adapter->smartspeed = 0;
1750 }
1751 
1752 /*
1753  * Manage DMA'able memory.
1754  */
1755 static void
1756 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1757 {
1758 	if (error)
1759 		return;
1760 	*(bus_addr_t*) arg = segs->ds_addr;
1761 }
1762 
1763 static int
1764 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1765 	      struct em_dma_alloc *dma, int mapflags)
1766 {
1767 	int r;
1768 	device_t dev = adapter->dev;
1769 
1770 	r = bus_dma_tag_create(NULL,                    /* parent */
1771 			       PAGE_SIZE, 0,            /* alignment, bounds */
1772 			       BUS_SPACE_MAXADDR,       /* lowaddr */
1773 			       BUS_SPACE_MAXADDR,       /* highaddr */
1774 			       NULL, NULL,              /* filter, filterarg */
1775 			       size,                    /* maxsize */
1776 			       1,                       /* nsegments */
1777 			       size,                    /* maxsegsize */
1778 			       BUS_DMA_ALLOCNOW,        /* flags */
1779 			       &dma->dma_tag);
1780 	if (r != 0) {
1781 		device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1782 			      "error %u\n", r);
1783 		goto fail_0;
1784 	}
1785 
1786 	r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1787 			     BUS_DMA_NOWAIT, &dma->dma_map);
1788 	if (r != 0) {
1789 		device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1790 			      "size %llu, error %d\n", (uintmax_t)size, r);
1791 		goto fail_2;
1792 	}
1793 
1794 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1795 			    size,
1796 			    em_dmamap_cb,
1797 			    &dma->dma_paddr,
1798 			    mapflags | BUS_DMA_NOWAIT);
1799 	if (r != 0) {
1800 		device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1801 			      "error %u\n", r);
1802 		goto fail_3;
1803 	}
1804 
1805 	dma->dma_size = size;
1806 	return(0);
1807 
1808 fail_3:
1809 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1810 fail_2:
1811 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1812 	bus_dma_tag_destroy(dma->dma_tag);
1813 fail_0:
1814 	dma->dma_map = NULL;
1815 	dma->dma_tag = NULL;
1816 	return(r);
1817 }
1818 
1819 static void
1820 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1821 {
1822 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1823 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1824 	bus_dma_tag_destroy(dma->dma_tag);
1825 }
1826 
1827 /*********************************************************************
1828  *
1829  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1830  *  the information needed to transmit a packet on the wire.
1831  *
1832  **********************************************************************/
1833 static int
1834 em_allocate_transmit_structures(struct adapter * adapter)
1835 {
1836 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1837 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1838 	if (adapter->tx_buffer_area == NULL) {
1839 		device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1840 		return(ENOMEM);
1841 	}
1842 
1843 	return(0);
1844 }
1845 
1846 /*********************************************************************
1847  *
1848  *  Allocate and initialize transmit structures.
1849  *
1850  **********************************************************************/
1851 static int
1852 em_setup_transmit_structures(struct adapter * adapter)
1853 {
1854 	/*
1855 	 * Setup DMA descriptor areas.
1856 	 */
1857 	if (bus_dma_tag_create(NULL,                    /* parent */
1858 			       1, 0,			/* alignment, bounds */
1859 			       BUS_SPACE_MAXADDR,       /* lowaddr */
1860 			       BUS_SPACE_MAXADDR,       /* highaddr */
1861 			       NULL, NULL,              /* filter, filterarg */
1862 			       MCLBYTES * 8,            /* maxsize */
1863 			       EM_MAX_SCATTER,          /* nsegments */
1864 			       MCLBYTES * 8,            /* maxsegsize */
1865 			       BUS_DMA_ALLOCNOW,        /* flags */
1866 			       &adapter->txtag)) {
1867 		device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1868 		return(ENOMEM);
1869 	}
1870 
1871 	if (em_allocate_transmit_structures(adapter))
1872 		return(ENOMEM);
1873 
1874         bzero((void *) adapter->tx_desc_base,
1875               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1876 
1877         adapter->next_avail_tx_desc = 0;
1878 	adapter->oldest_used_tx_desc = 0;
1879 
1880 	/* Set number of descriptors available */
1881 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
1882 
1883 	/* Set checksum context */
1884 	adapter->active_checksum_context = OFFLOAD_NONE;
1885 
1886 	return(0);
1887 }
1888 
1889 /*********************************************************************
1890  *
1891  *  Enable transmit unit.
1892  *
1893  **********************************************************************/
1894 static void
1895 em_initialize_transmit_unit(struct adapter * adapter)
1896 {
1897 	uint32_t reg_tctl;
1898 	uint32_t reg_tipg = 0;
1899 	uint64_t bus_addr;
1900 
1901 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1902 
1903 	/* Setup the Base and Length of the Tx Descriptor Ring */
1904 	bus_addr = adapter->txdma.dma_paddr;
1905 	E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1906 	E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1907 	E1000_WRITE_REG(&adapter->hw, TDLEN,
1908 			adapter->num_tx_desc * sizeof(struct em_tx_desc));
1909 
1910 	/* Setup the HW Tx Head and Tail descriptor pointers */
1911 	E1000_WRITE_REG(&adapter->hw, TDH, 0);
1912 	E1000_WRITE_REG(&adapter->hw, TDT, 0);
1913 
1914 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1915 		     E1000_READ_REG(&adapter->hw, TDBAL),
1916 		     E1000_READ_REG(&adapter->hw, TDLEN));
1917 
1918 	/* Set the default values for the Tx Inter Packet Gap timer */
1919 	switch (adapter->hw.mac_type) {
1920 	case em_82542_rev2_0:
1921 	case em_82542_rev2_1:
1922 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
1923 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1924 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1925 		break;
1926 	default:
1927 		if (adapter->hw.media_type == em_media_type_fiber)
1928 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1929 		else
1930 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1931 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1932 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1933 	}
1934 
1935 	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1936 	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1937 	if (adapter->hw.mac_type >= em_82540)
1938 		E1000_WRITE_REG(&adapter->hw, TADV,
1939 				adapter->tx_abs_int_delay.value);
1940 
1941 	/* Program the Transmit Control Register */
1942 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1943 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1944 	if (adapter->link_duplex == 1)
1945 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1946 	else
1947 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1948 	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1949 
1950 	/* Setup Transmit Descriptor Settings for this adapter */
1951 	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1952 
1953 	if (adapter->tx_int_delay.value > 0)
1954 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1955 }
1956 
1957 /*********************************************************************
1958  *
1959  *  Free all transmit related data structures.
1960  *
1961  **********************************************************************/
1962 static void
1963 em_free_transmit_structures(struct adapter * adapter)
1964 {
1965 	struct em_buffer *tx_buffer;
1966 	int i;
1967 
1968 	INIT_DEBUGOUT("free_transmit_structures: begin");
1969 
1970 	if (adapter->tx_buffer_area != NULL) {
1971 		tx_buffer = adapter->tx_buffer_area;
1972 		for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1973 			if (tx_buffer->m_head != NULL) {
1974 				bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1975 				bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1976 				m_freem(tx_buffer->m_head);
1977 			}
1978 			tx_buffer->m_head = NULL;
1979 		}
1980 	}
1981 	if (adapter->tx_buffer_area != NULL) {
1982 		free(adapter->tx_buffer_area, M_DEVBUF);
1983 		adapter->tx_buffer_area = NULL;
1984 	}
1985 	if (adapter->txtag != NULL) {
1986 		bus_dma_tag_destroy(adapter->txtag);
1987 		adapter->txtag = NULL;
1988 	}
1989 }
1990 
1991 /*********************************************************************
1992  *
1993  *  The offload context needs to be set when we transfer the first
1994  *  packet of a particular protocol (TCP/UDP). We change the
1995  *  context only if the protocol type changes.
1996  *
1997  **********************************************************************/
1998 static void
1999 em_transmit_checksum_setup(struct adapter * adapter,
2000 			   struct mbuf *mp,
2001 			   uint32_t *txd_upper,
2002 			   uint32_t *txd_lower)
2003 {
2004 	struct em_context_desc *TXD;
2005 	struct em_buffer *tx_buffer;
2006 	int curr_txd;
2007 
2008 	if (mp->m_pkthdr.csum_flags) {
2009 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2010 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2011 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2012 			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2013 				return;
2014 			else
2015 				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2016 		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2017 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2018 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2019 			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2020 				return;
2021 			else
2022 				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2023 		} else {
2024 			*txd_upper = 0;
2025 			*txd_lower = 0;
2026 			return;
2027 		}
2028 	} else {
2029 		*txd_upper = 0;
2030 		*txd_lower = 0;
2031 		return;
2032 	}
2033 
2034 	/* If we reach this point, the checksum offload context
2035 	 * needs to be reset.
2036 	 */
2037 	curr_txd = adapter->next_avail_tx_desc;
2038 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2039 	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2040 
2041 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2042 	TXD->lower_setup.ip_fields.ipcso =
2043 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2044 	TXD->lower_setup.ip_fields.ipcse =
2045 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2046 
2047 	TXD->upper_setup.tcp_fields.tucss =
2048 	    ETHER_HDR_LEN + sizeof(struct ip);
2049 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2050 
2051 	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2052 		TXD->upper_setup.tcp_fields.tucso =
2053 		    ETHER_HDR_LEN + sizeof(struct ip) +
2054 		    offsetof(struct tcphdr, th_sum);
2055 	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2056 		TXD->upper_setup.tcp_fields.tucso =
2057 			ETHER_HDR_LEN + sizeof(struct ip) +
2058 			offsetof(struct udphdr, uh_sum);
2059 	}
2060 
2061 	TXD->tcp_seg_setup.data = htole32(0);
2062 	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2063 
2064 	tx_buffer->m_head = NULL;
2065 
2066 	if (++curr_txd == adapter->num_tx_desc)
2067 		curr_txd = 0;
2068 
2069 	adapter->num_tx_desc_avail--;
2070 	adapter->next_avail_tx_desc = curr_txd;
2071 }
2072 
2073 /**********************************************************************
2074  *
2075  *  Examine each tx_buffer in the used queue. If the hardware is done
2076  *  processing the packet then free associated resources. The
2077  *  tx_buffer is put back on the free queue.
2078  *
2079  **********************************************************************/
2080 
2081 static void
2082 em_clean_transmit_interrupts(struct adapter *adapter)
2083 {
2084 	int s;
2085 	int i, num_avail;
2086 	struct em_buffer *tx_buffer;
2087 	struct em_tx_desc *tx_desc;
2088 	struct ifnet *ifp = &adapter->interface_data.ac_if;
2089 
2090 	if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2091 		return;
2092 
2093 	s = splimp();
2094 #ifdef DBG_STATS
2095 	adapter->clean_tx_interrupts++;
2096 #endif
2097 	num_avail = adapter->num_tx_desc_avail;
2098 	i = adapter->oldest_used_tx_desc;
2099 
2100 	tx_buffer = &adapter->tx_buffer_area[i];
2101 	tx_desc = &adapter->tx_desc_base[i];
2102 
2103 	while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2104 		tx_desc->upper.data = 0;
2105 		num_avail++;
2106 
2107 		if (tx_buffer->m_head) {
2108 			ifp->if_opackets++;
2109 			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2110 					BUS_DMASYNC_POSTWRITE);
2111 			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2112 			bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2113 
2114 			m_freem(tx_buffer->m_head);
2115 			tx_buffer->m_head = NULL;
2116 		}
2117 
2118 		if (++i == adapter->num_tx_desc)
2119 			i = 0;
2120 
2121 		tx_buffer = &adapter->tx_buffer_area[i];
2122 		tx_desc = &adapter->tx_desc_base[i];
2123 	}
2124 
2125 	adapter->oldest_used_tx_desc = i;
2126 
2127 	/*
2128 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2129 	 * that it is OK to send packets.
2130 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2131 	 * if some descriptors have been freed, restart the timeout.
2132 	 */
2133 	if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2134 		ifp->if_flags &= ~IFF_OACTIVE;
2135 		if (num_avail == adapter->num_tx_desc)
2136 			ifp->if_timer = 0;
2137 		else if (num_avail == adapter->num_tx_desc_avail)
2138 			ifp->if_timer = EM_TX_TIMEOUT;
2139 	}
2140 	adapter->num_tx_desc_avail = num_avail;
2141 	splx(s);
2142 }
2143 
2144 /*********************************************************************
2145  *
2146  *  Get a buffer from system mbuf buffer pool.
2147  *
2148  **********************************************************************/
2149 static int
2150 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2151 {
2152 	struct mbuf *mp = nmp;
2153 	struct em_buffer *rx_buffer;
2154 	struct ifnet *ifp;
2155 	bus_addr_t paddr;
2156 	int error;
2157 
2158 	ifp = &adapter->interface_data.ac_if;
2159 
2160 	if (mp == NULL) {
2161 		mp = m_getcl(how, MT_DATA, M_PKTHDR);
2162 		if (mp == NULL) {
2163 			adapter->mbuf_cluster_failed++;
2164 			return(ENOBUFS);
2165 		}
2166 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2167 	} else {
2168 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2169 		mp->m_data = mp->m_ext.ext_buf;
2170 		mp->m_next = NULL;
2171 	}
2172 	if (ifp->if_mtu <= ETHERMTU)
2173 		m_adj(mp, ETHER_ALIGN);
2174 
2175 	rx_buffer = &adapter->rx_buffer_area[i];
2176 
2177 	/*
2178 	 * Using memory from the mbuf cluster pool, invoke the
2179 	 * bus_dma machinery to arrange the memory mapping.
2180 	 */
2181 	error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2182 				mtod(mp, void *), mp->m_len,
2183 				em_dmamap_cb, &paddr, 0);
2184 	if (error) {
2185 		m_free(mp);
2186 		return(error);
2187 	}
2188 	rx_buffer->m_head = mp;
2189 	adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2190 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2191 
2192 	return(0);
2193 }
2194 
2195 /*********************************************************************
2196  *
2197  *  Allocate memory for rx_buffer structures. Since we use one
2198  *  rx_buffer per received packet, the maximum number of rx_buffer's
2199  *  that we'll need is equal to the number of receive descriptors
2200  *  that we've allocated.
2201  *
2202  **********************************************************************/
2203 static int
2204 em_allocate_receive_structures(struct adapter *adapter)
2205 {
2206 	int i, error, size;
2207 	struct em_buffer *rx_buffer;
2208 
2209 	size = adapter->num_rx_desc * sizeof(struct em_buffer);
2210 	adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2211 
2212 	error = bus_dma_tag_create(NULL,		/* parent */
2213 				   1, 0,		/* alignment, bounds */
2214 				   BUS_SPACE_MAXADDR,	/* lowaddr */
2215 				   BUS_SPACE_MAXADDR,	/* highaddr */
2216 				   NULL, NULL,		/* filter, filterarg */
2217 				   MCLBYTES,		/* maxsize */
2218 				   1,			/* nsegments */
2219 				   MCLBYTES,		/* maxsegsize */
2220 				   BUS_DMA_ALLOCNOW,	/* flags */
2221 				   &adapter->rxtag);
2222 	if (error != 0) {
2223 		device_printf(adapter->dev, "em_allocate_receive_structures: "
2224 			      "bus_dma_tag_create failed; error %u\n", error);
2225 		goto fail_0;
2226 	}
2227 
2228 	rx_buffer = adapter->rx_buffer_area;
2229 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2230 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2231 					  &rx_buffer->map);
2232 		if (error != 0) {
2233 			device_printf(adapter->dev,
2234 				      "em_allocate_receive_structures: "
2235 				      "bus_dmamap_create failed; error %u\n",
2236 				      error);
2237 			goto fail_1;
2238 		}
2239 	}
2240 
2241 	for (i = 0; i < adapter->num_rx_desc; i++) {
2242 		error = em_get_buf(i, adapter, NULL, MB_WAIT);
2243 		if (error != 0) {
2244 			adapter->rx_buffer_area[i].m_head = NULL;
2245 			adapter->rx_desc_base[i].buffer_addr = 0;
2246 			return(error);
2247 		}
2248 	}
2249 
2250 	return(0);
2251 
2252 fail_1:
2253 	bus_dma_tag_destroy(adapter->rxtag);
2254 fail_0:
2255 	adapter->rxtag = NULL;
2256 	free(adapter->rx_buffer_area, M_DEVBUF);
2257 	adapter->rx_buffer_area = NULL;
2258 	return(error);
2259 }
2260 
2261 /*********************************************************************
2262  *
2263  *  Allocate and initialize receive structures.
2264  *
2265  **********************************************************************/
2266 static int
2267 em_setup_receive_structures(struct adapter *adapter)
2268 {
2269 	bzero((void *) adapter->rx_desc_base,
2270 	      (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2271 
2272 	if (em_allocate_receive_structures(adapter))
2273 		return(ENOMEM);
2274 
2275 	/* Setup our descriptor pointers */
2276 	adapter->next_rx_desc_to_check = 0;
2277 	return(0);
2278 }
2279 
2280 /*********************************************************************
2281  *
2282  *  Enable receive unit.
2283  *
2284  **********************************************************************/
2285 static void
2286 em_initialize_receive_unit(struct adapter *adapter)
2287 {
2288 	uint32_t reg_rctl;
2289 	uint32_t reg_rxcsum;
2290 	struct ifnet *ifp;
2291 	uint64_t bus_addr;
2292 
2293 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2294 
2295 	ifp = &adapter->interface_data.ac_if;
2296 
2297 	/* Make sure receives are disabled while setting up the descriptor ring */
2298 	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2299 
2300 	/* Set the Receive Delay Timer Register */
2301 	E1000_WRITE_REG(&adapter->hw, RDTR,
2302 			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2303 
2304 	if(adapter->hw.mac_type >= em_82540) {
2305 		E1000_WRITE_REG(&adapter->hw, RADV,
2306 				adapter->rx_abs_int_delay.value);
2307 
2308 		/* Set the interrupt throttling rate in 256ns increments */
2309 		if (em_int_throttle_ceil) {
2310 			E1000_WRITE_REG(&adapter->hw, ITR,
2311 				1000000000 / 256 / em_int_throttle_ceil);
2312 		} else {
2313 			E1000_WRITE_REG(&adapter->hw, ITR, 0);
2314 		}
2315 	}
2316 
2317 	/* Setup the Base and Length of the Rx Descriptor Ring */
2318 	bus_addr = adapter->rxdma.dma_paddr;
2319 	E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2320 	E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2321 	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2322 			sizeof(struct em_rx_desc));
2323 
2324 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2325 	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2326 	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2327 
2328 	/* Setup the Receive Control Register */
2329 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2330 		   E1000_RCTL_RDMTS_HALF |
2331 		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2332 
2333 	if (adapter->hw.tbi_compatibility_on == TRUE)
2334 		reg_rctl |= E1000_RCTL_SBP;
2335 
2336 	switch (adapter->rx_buffer_len) {
2337 	default:
2338 	case EM_RXBUFFER_2048:
2339 		reg_rctl |= E1000_RCTL_SZ_2048;
2340 		break;
2341 	case EM_RXBUFFER_4096:
2342 		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2343 		break;
2344 	case EM_RXBUFFER_8192:
2345 		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2346 		break;
2347 	case EM_RXBUFFER_16384:
2348 		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2349 		break;
2350 	}
2351 
2352 	if (ifp->if_mtu > ETHERMTU)
2353 		reg_rctl |= E1000_RCTL_LPE;
2354 
2355 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2356 	if ((adapter->hw.mac_type >= em_82543) &&
2357 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2358 		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2359 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2360 		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2361 	}
2362 
2363 	/* Enable Receives */
2364 	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2365 }
2366 
2367 /*********************************************************************
2368  *
2369  *  Free receive related data structures.
2370  *
2371  **********************************************************************/
2372 static void
2373 em_free_receive_structures(struct adapter *adapter)
2374 {
2375 	struct em_buffer *rx_buffer;
2376 	int i;
2377 
2378 	INIT_DEBUGOUT("free_receive_structures: begin");
2379 
2380 	if (adapter->rx_buffer_area != NULL) {
2381 		rx_buffer = adapter->rx_buffer_area;
2382 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2383 			if (rx_buffer->map != NULL) {
2384 				bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2385 				bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2386 			}
2387 			if (rx_buffer->m_head != NULL)
2388 				m_freem(rx_buffer->m_head);
2389 			rx_buffer->m_head = NULL;
2390 		}
2391 	}
2392 	if (adapter->rx_buffer_area != NULL) {
2393 		free(adapter->rx_buffer_area, M_DEVBUF);
2394 		adapter->rx_buffer_area = NULL;
2395 	}
2396 	if (adapter->rxtag != NULL) {
2397 		bus_dma_tag_destroy(adapter->rxtag);
2398 		adapter->rxtag = NULL;
2399 	}
2400 }
2401 
2402 /*********************************************************************
2403  *
2404  *  This routine executes in interrupt context. It replenishes
2405  *  the mbufs in the descriptor and sends data which has been
2406  *  dma'ed into host memory to upper layer.
2407  *
2408  *  We loop at most count times if count is > 0, or until done if
2409  *  count < 0.
2410  *
2411  *********************************************************************/
2412 static void
2413 em_process_receive_interrupts(struct adapter *adapter, int count)
2414 {
2415 	struct ifnet *ifp;
2416 	struct mbuf *mp;
2417 	uint8_t accept_frame = 0;
2418 	uint8_t eop = 0;
2419 	uint16_t len, desc_len, prev_len_adj;
2420 	int i;
2421 
2422 	/* Pointer to the receive descriptor being examined. */
2423 	struct em_rx_desc *current_desc;
2424 
2425 	ifp = &adapter->interface_data.ac_if;
2426 	i = adapter->next_rx_desc_to_check;
2427 	current_desc = &adapter->rx_desc_base[i];
2428 
2429 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2430 #ifdef DBG_STATS
2431 		adapter->no_pkts_avail++;
2432 #endif
2433 		return;
2434 	}
2435 	while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2436 		mp = adapter->rx_buffer_area[i].m_head;
2437 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2438 				BUS_DMASYNC_POSTREAD);
2439 
2440 		accept_frame = 1;
2441 		prev_len_adj = 0;
2442 		desc_len = le16toh(current_desc->length);
2443 		if (current_desc->status & E1000_RXD_STAT_EOP) {
2444 			count--;
2445 			eop = 1;
2446 			if (desc_len < ETHER_CRC_LEN) {
2447 				len = 0;
2448 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2449 			}
2450 			else {
2451 				len = desc_len - ETHER_CRC_LEN;
2452 			}
2453 		} else {
2454 			eop = 0;
2455 			len = desc_len;
2456 		}
2457 
2458 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2459 			uint8_t last_byte;
2460 			uint32_t pkt_len = desc_len;
2461 
2462 			if (adapter->fmp != NULL)
2463 				pkt_len += adapter->fmp->m_pkthdr.len;
2464 
2465 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2466 
2467 			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2468 				       current_desc->errors,
2469 				       pkt_len, last_byte)) {
2470 				em_tbi_adjust_stats(&adapter->hw,
2471 						    &adapter->stats,
2472 						    pkt_len,
2473 						    adapter->hw.mac_addr);
2474 				if (len > 0)
2475 					len--;
2476 			}
2477 			else {
2478 				accept_frame = 0;
2479 			}
2480 		}
2481 
2482 		if (accept_frame) {
2483 			if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2484 				adapter->dropped_pkts++;
2485 				em_get_buf(i, adapter, mp, MB_DONTWAIT);
2486 				if (adapter->fmp != NULL)
2487 					m_freem(adapter->fmp);
2488 				adapter->fmp = NULL;
2489 				adapter->lmp = NULL;
2490 				break;
2491 			}
2492 
2493 			/* Assign correct length to the current fragment */
2494 			mp->m_len = len;
2495 
2496 			if (adapter->fmp == NULL) {
2497 				mp->m_pkthdr.len = len;
2498 				adapter->fmp = mp;	 /* Store the first mbuf */
2499 				adapter->lmp = mp;
2500 			} else {
2501 				/* Chain mbuf's together */
2502 				mp->m_flags &= ~M_PKTHDR;
2503 				/*
2504 				 * Adjust length of previous mbuf in chain if we
2505 				 * received less than 4 bytes in the last descriptor.
2506 				 */
2507 				if (prev_len_adj > 0) {
2508 					adapter->lmp->m_len -= prev_len_adj;
2509 					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2510 				}
2511 				adapter->lmp->m_next = mp;
2512 				adapter->lmp = adapter->lmp->m_next;
2513 				adapter->fmp->m_pkthdr.len += len;
2514 			}
2515 
2516 			if (eop) {
2517 				adapter->fmp->m_pkthdr.rcvif = ifp;
2518 				ifp->if_ipackets++;
2519 
2520 				em_receive_checksum(adapter, current_desc,
2521 						    adapter->fmp);
2522 				if (current_desc->status & E1000_RXD_STAT_VP)
2523 					VLAN_INPUT_TAG(adapter->fmp,
2524 						       (current_desc->special &
2525 							E1000_RXD_SPC_VLAN_MASK));
2526 				else
2527 					(*ifp->if_input)(ifp, adapter->fmp);
2528 				adapter->fmp = NULL;
2529 				adapter->lmp = NULL;
2530 			}
2531 		} else {
2532 			adapter->dropped_pkts++;
2533 			em_get_buf(i, adapter, mp, MB_DONTWAIT);
2534 			if (adapter->fmp != NULL)
2535 				m_freem(adapter->fmp);
2536 			adapter->fmp = NULL;
2537 			adapter->lmp = NULL;
2538 		}
2539 
2540 		/* Zero out the receive descriptors status  */
2541 		current_desc->status = 0;
2542 
2543 		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2544 		E1000_WRITE_REG(&adapter->hw, RDT, i);
2545 
2546 		/* Advance our pointers to the next descriptor */
2547 		if (++i == adapter->num_rx_desc) {
2548 			i = 0;
2549 			current_desc = adapter->rx_desc_base;
2550 		} else
2551 			current_desc++;
2552 	}
2553 	adapter->next_rx_desc_to_check = i;
2554 }
2555 
2556 /*********************************************************************
2557  *
2558  *  Verify that the hardware indicated that the checksum is valid.
2559  *  Inform the stack about the status of checksum so that stack
2560  *  doesn't spend time verifying the checksum.
2561  *
2562  *********************************************************************/
2563 static void
2564 em_receive_checksum(struct adapter *adapter,
2565 		    struct em_rx_desc *rx_desc,
2566 		    struct mbuf *mp)
2567 {
2568 	/* 82543 or newer only */
2569 	if ((adapter->hw.mac_type < em_82543) ||
2570 	    /* Ignore Checksum bit is set */
2571 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2572 		mp->m_pkthdr.csum_flags = 0;
2573 		return;
2574 	}
2575 
2576 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2577 		/* Did it pass? */
2578 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2579 			/* IP Checksum Good */
2580 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2581 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2582 		} else {
2583 			mp->m_pkthdr.csum_flags = 0;
2584 		}
2585 	}
2586 
2587 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2588 		/* Did it pass? */
2589 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2590 			mp->m_pkthdr.csum_flags |=
2591 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2592 			mp->m_pkthdr.csum_data = htons(0xffff);
2593 		}
2594 	}
2595 }
2596 
2597 
2598 static void
2599 em_enable_vlans(struct adapter *adapter)
2600 {
2601 	uint32_t ctrl;
2602 
2603 	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2604 
2605 	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2606 	ctrl |= E1000_CTRL_VME;
2607 	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2608 }
2609 
2610 static void
2611 em_enable_intr(struct adapter *adapter)
2612 {
2613 	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2614 }
2615 
2616 static void
2617 em_disable_intr(struct adapter *adapter)
2618 {
2619 	E1000_WRITE_REG(&adapter->hw, IMC,
2620 			(0xffffffff & ~E1000_IMC_RXSEQ));
2621 }
2622 
2623 static int
2624 em_is_valid_ether_addr(uint8_t *addr)
2625 {
2626 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2627 
2628 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2629 		return(FALSE);
2630 	else
2631 		return(TRUE);
2632 }
2633 
2634 void
2635 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2636 {
2637 	pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2638 }
2639 
2640 void
2641 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2642 {
2643 	*value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2644 }
2645 
2646 void
2647 em_pci_set_mwi(struct em_hw *hw)
2648 {
2649 	pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2650 			 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2651 }
2652 
2653 void
2654 em_pci_clear_mwi(struct em_hw *hw)
2655 {
2656 	pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2657 			 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2658 }
2659 
2660 uint32_t
2661 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2662 {
2663 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2664 	return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2665 }
2666 
2667 void
2668 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2669 {
2670 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2671 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2672 }
2673 
2674 /*********************************************************************
2675  * 82544 Coexistence issue workaround.
2676  *    There are 2 issues.
2677  *	1. Transmit Hang issue.
2678  *    To detect this issue, following equation can be used...
2679  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2680  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
2681  *
2682  *	2. DAC issue.
2683  *    To detect this issue, following equation can be used...
2684  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2685  *          If SUM[3:0] is in between 9 to c, we will have this issue.
2686  *
2687  *
2688  *    WORKAROUND:
2689  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
2690  *          9,a,b,c (DAC)
2691  *
2692 *************************************************************************/
2693 static uint32_t
2694 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2695 {
2696 	/* Since issue is sensitive to length and address.*/
2697 	/* Let us first check the address...*/
2698 	uint32_t safe_terminator;
2699 	if (length <= 4) {
2700 		desc_array->descriptor[0].address = address;
2701 		desc_array->descriptor[0].length = length;
2702 		desc_array->elements = 1;
2703 		return(desc_array->elements);
2704 	}
2705 	safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2706 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2707 	if (safe_terminator == 0 ||
2708 	    (safe_terminator > 4 && safe_terminator < 9) ||
2709 	    (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2710 		desc_array->descriptor[0].address = address;
2711 		desc_array->descriptor[0].length = length;
2712 		desc_array->elements = 1;
2713 		return(desc_array->elements);
2714 	}
2715 
2716 	desc_array->descriptor[0].address = address;
2717 	desc_array->descriptor[0].length = length - 4;
2718 	desc_array->descriptor[1].address = address + (length - 4);
2719 	desc_array->descriptor[1].length = 4;
2720 	desc_array->elements = 2;
2721 	return(desc_array->elements);
2722 }
2723 
2724 /**********************************************************************
2725  *
2726  *  Update the board statistics counters.
2727  *
2728  **********************************************************************/
2729 static void
2730 em_update_stats_counters(struct adapter *adapter)
2731 {
2732 	struct ifnet   *ifp;
2733 
2734 	if (adapter->hw.media_type == em_media_type_copper ||
2735 	    (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2736 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2737 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2738 	}
2739 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2740 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2741 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2742 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2743 
2744 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2745 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2746 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2747 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2748 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2749 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2750 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2751 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2752 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2753 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2754 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2755 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2756 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2757 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2758 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2759 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2760 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2761 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2762 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2763 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2764 
2765 	/* For the 64-bit byte counters the low dword must be read first. */
2766 	/* Both registers clear on the read of the high dword */
2767 
2768 	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2769 	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2770 	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2771 	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2772 
2773 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2774 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2775 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2776 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2777 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2778 
2779 	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2780 	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2781 	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2782 	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2783 
2784 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2785 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2786 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2787 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2788 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2789 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2790 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2791 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2792 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2793 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2794 
2795 	if (adapter->hw.mac_type >= em_82543) {
2796 		adapter->stats.algnerrc +=
2797 		    E1000_READ_REG(&adapter->hw, ALGNERRC);
2798 		adapter->stats.rxerrc +=
2799 		    E1000_READ_REG(&adapter->hw, RXERRC);
2800 		adapter->stats.tncrs +=
2801 		    E1000_READ_REG(&adapter->hw, TNCRS);
2802 		adapter->stats.cexterr +=
2803 		    E1000_READ_REG(&adapter->hw, CEXTERR);
2804 		adapter->stats.tsctc +=
2805 		    E1000_READ_REG(&adapter->hw, TSCTC);
2806 		adapter->stats.tsctfc +=
2807 		    E1000_READ_REG(&adapter->hw, TSCTFC);
2808 	}
2809 	ifp = &adapter->interface_data.ac_if;
2810 
2811 	/* Fill out the OS statistics structure */
2812 	ifp->if_ibytes = adapter->stats.gorcl;
2813 	ifp->if_obytes = adapter->stats.gotcl;
2814 	ifp->if_imcasts = adapter->stats.mprc;
2815 	ifp->if_collisions = adapter->stats.colc;
2816 
2817 	/* Rx Errors */
2818 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2819 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
2820 	    adapter->stats.rlec + adapter->stats.rnbc +
2821 	    adapter->stats.mpc + adapter->stats.cexterr;
2822 
2823 	/* Tx Errors */
2824 	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2825 }
2826 
2827 
2828 /**********************************************************************
2829  *
2830  *  This routine is called only when em_display_debug_stats is enabled.
2831  *  This routine provides a way to take a look at important statistics
2832  *  maintained by the driver and hardware.
2833  *
2834  **********************************************************************/
2835 static void
2836 em_print_debug_info(struct adapter *adapter)
2837 {
2838 	device_t dev= adapter->dev;
2839 	uint8_t *hw_addr = adapter->hw.hw_addr;
2840 
2841 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2842 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2843 		      E1000_READ_REG(&adapter->hw, TIDV),
2844 		      E1000_READ_REG(&adapter->hw, TADV));
2845 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2846 		      E1000_READ_REG(&adapter->hw, RDTR),
2847 		      E1000_READ_REG(&adapter->hw, RADV));
2848 #ifdef DBG_STATS
2849 	device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2850 	device_printf(dev, "CleanTxInterrupts = %ld\n",
2851 		      adapter->clean_tx_interrupts);
2852 #endif
2853 	device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2854 		      (long long)adapter->tx_fifo_wrk,
2855 		      (long long)adapter->tx_fifo_reset);
2856 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2857 		      E1000_READ_REG(&adapter->hw, TDH),
2858 		      E1000_READ_REG(&adapter->hw, TDT));
2859 	device_printf(dev, "Num Tx descriptors avail = %d\n",
2860 		      adapter->num_tx_desc_avail);
2861 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2862 		      adapter->no_tx_desc_avail1);
2863 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2864 		      adapter->no_tx_desc_avail2);
2865 	device_printf(dev, "Std mbuf failed = %ld\n",
2866 		      adapter->mbuf_alloc_failed);
2867 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
2868 		      adapter->mbuf_cluster_failed);
2869 	device_printf(dev, "Driver dropped packets = %ld\n",
2870 		      adapter->dropped_pkts);
2871 }
2872 
2873 static void
2874 em_print_hw_stats(struct adapter *adapter)
2875 {
2876 	device_t dev= adapter->dev;
2877 
2878 	device_printf(dev, "Adapter: %p\n", adapter);
2879 
2880 	device_printf(dev, "Excessive collisions = %lld\n",
2881 		      (long long)adapter->stats.ecol);
2882 	device_printf(dev, "Symbol errors = %lld\n",
2883 		      (long long)adapter->stats.symerrs);
2884 	device_printf(dev, "Sequence errors = %lld\n",
2885 		      (long long)adapter->stats.sec);
2886 	device_printf(dev, "Defer count = %lld\n",
2887 		      (long long)adapter->stats.dc);
2888 
2889 	device_printf(dev, "Missed Packets = %lld\n",
2890 		      (long long)adapter->stats.mpc);
2891 	device_printf(dev, "Receive No Buffers = %lld\n",
2892 		      (long long)adapter->stats.rnbc);
2893 	device_printf(dev, "Receive length errors = %lld\n",
2894 		      (long long)adapter->stats.rlec);
2895 	device_printf(dev, "Receive errors = %lld\n",
2896 		      (long long)adapter->stats.rxerrc);
2897 	device_printf(dev, "Crc errors = %lld\n",
2898 		      (long long)adapter->stats.crcerrs);
2899 	device_printf(dev, "Alignment errors = %lld\n",
2900 		      (long long)adapter->stats.algnerrc);
2901 	device_printf(dev, "Carrier extension errors = %lld\n",
2902 		      (long long)adapter->stats.cexterr);
2903 
2904 	device_printf(dev, "XON Rcvd = %lld\n",
2905 		      (long long)adapter->stats.xonrxc);
2906 	device_printf(dev, "XON Xmtd = %lld\n",
2907 		      (long long)adapter->stats.xontxc);
2908 	device_printf(dev, "XOFF Rcvd = %lld\n",
2909 		      (long long)adapter->stats.xoffrxc);
2910 	device_printf(dev, "XOFF Xmtd = %lld\n",
2911 		      (long long)adapter->stats.xofftxc);
2912 
2913 	device_printf(dev, "Good Packets Rcvd = %lld\n",
2914 		      (long long)adapter->stats.gprc);
2915 	device_printf(dev, "Good Packets Xmtd = %lld\n",
2916 		      (long long)adapter->stats.gptc);
2917 }
2918 
2919 static int
2920 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2921 {
2922 	int error;
2923 	int result;
2924 	struct adapter *adapter;
2925 
2926 	result = -1;
2927 	error = sysctl_handle_int(oidp, &result, 0, req);
2928 
2929 	if (error || !req->newptr)
2930 		return(error);
2931 
2932 	if (result == 1) {
2933 		adapter = (struct adapter *)arg1;
2934 		em_print_debug_info(adapter);
2935 	}
2936 
2937 	return(error);
2938 }
2939 
2940 static int
2941 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2942 {
2943 	int error;
2944 	int result;
2945 	struct adapter *adapter;
2946 
2947 	result = -1;
2948 	error = sysctl_handle_int(oidp, &result, 0, req);
2949 
2950 	if (error || !req->newptr)
2951 		return(error);
2952 
2953 	if (result == 1) {
2954 		adapter = (struct adapter *)arg1;
2955 		em_print_hw_stats(adapter);
2956 	}
2957 
2958 	return(error);
2959 }
2960 
2961 static int
2962 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2963 {
2964 	struct em_int_delay_info *info;
2965 	struct adapter *adapter;
2966 	uint32_t regval;
2967 	int error;
2968 	int usecs;
2969 	int ticks;
2970 	int s;
2971 
2972 	info = (struct em_int_delay_info *)arg1;
2973 	adapter = info->adapter;
2974 	usecs = info->value;
2975 	error = sysctl_handle_int(oidp, &usecs, 0, req);
2976 	if (error != 0 || req->newptr == NULL)
2977 		return(error);
2978 	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
2979 		return(EINVAL);
2980 	info->value = usecs;
2981 	ticks = E1000_USECS_TO_TICKS(usecs);
2982 
2983 	s = splimp();
2984 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
2985 	regval = (regval & ~0xffff) | (ticks & 0xffff);
2986 	/* Handle a few special cases. */
2987 	switch (info->offset) {
2988 	case E1000_RDTR:
2989 	case E1000_82542_RDTR:
2990 		regval |= E1000_RDT_FPDB;
2991 		break;
2992 	case E1000_TIDV:
2993 	case E1000_82542_TIDV:
2994 		if (ticks == 0) {
2995 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
2996 			/* Don't write 0 into the TIDV register. */
2997 			regval++;
2998 		} else
2999 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3000 		break;
3001 	}
3002 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3003 	splx(s);
3004 	return(0);
3005 }
3006 
3007 static void
3008 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3009 			const char *description, struct em_int_delay_info *info,
3010 			int offset, int value)
3011 {
3012 	info->adapter = adapter;
3013 	info->offset = offset;
3014 	info->value = value;
3015 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3016 			SYSCTL_CHILDREN(adapter->sysctl_tree),
3017 			OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3018 			info, 0, em_sysctl_int_delay, "I", description);
3019 }
3020 
3021 static int
3022 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3023 {
3024 	struct adapter *adapter = (void *)arg1;
3025 	int error;
3026 	int throttle;
3027 
3028 	throttle = em_int_throttle_ceil;
3029 	error = sysctl_handle_int(oidp, &throttle, 0, req);
3030 	if (error || req->newptr == NULL)
3031 		return error;
3032 	if (throttle < 0 || throttle > 1000000000 / 256)
3033 		return EINVAL;
3034 	if (throttle) {
3035 		/*
3036 		 * Set the interrupt throttling rate in 256ns increments,
3037 		 * recalculate sysctl value assignment to get exact frequency.
3038 		 */
3039 		throttle = 1000000000 / 256 / throttle;
3040 		em_int_throttle_ceil = 1000000000 / 256 / throttle;
3041 		crit_enter();
3042 		E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3043 		crit_exit();
3044 	} else {
3045 		em_int_throttle_ceil = 0;
3046 		crit_enter();
3047 		E1000_WRITE_REG(&adapter->hw, ITR, 0);
3048 		crit_exit();
3049 	}
3050 	device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n",
3051 			em_int_throttle_ceil);
3052 	return 0;
3053 }
3054 
3055