xref: /dragonfly/sys/dev/netif/em/if_em.c (revision 86fe9e07)
1 /**************************************************************************
2 
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4 
5 Copyright (c) 2001-2003, Intel Corporation
6 All rights reserved.
7 
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 
11  1. Redistributions of source code must retain the above copyright notice,
12     this list of conditions and the following disclaimer.
13 
14  2. Redistributions in binary form must reproduce the above copyright
15     notice, this list of conditions and the following disclaimer in the
16     documentation and/or other materials provided with the distribution.
17 
18  3. Neither the name of the Intel Corporation nor the names of its
19     contributors may be used to endorse or promote products derived from
20     this software without specific prior written permission.
21 
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33 
34 ***************************************************************************/
35 
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.19 2004/07/23 07:16:25 joerg Exp $*/
38 
39 #include <dev/netif/em/if_em.h>
40 
41 /*********************************************************************
42  *  Set this to one to display debug statistics
43  *********************************************************************/
44 int             em_display_debug_stats = 0;
45 
46 /*********************************************************************
47  *  Driver version
48  *********************************************************************/
49 
50 char em_driver_version[] = "1.7.25";
51 
52 
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into em_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62 
63 static em_vendor_info_t em_vendor_info_array[] =
64 {
65 	/* Intel(R) PRO/1000 Network Connection */
66 	{ 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
67 	{ 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
68 	{ 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
69 	{ 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
70 	{ 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
71 	{ 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
72 	{ 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
73 	{ 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
74 	{ 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
75 	{ 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
76 	{ 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
77 	{ 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
78 	{ 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
79 	{ 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
80 	{ 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
81 	{ 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
82 	{ 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
83 	{ 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
84 	{ 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
85 	{ 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
86 	{ 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
87 	{ 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
88 	{ 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
89 	{ 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
90 	{ 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
91 	{ 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
92 	{ 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
93 	{ 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
94 	{ 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
95 	{ 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
96 	{ 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
97 	{ 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
98 	/* required last entry */
99 	{ 0, 0, 0, 0, 0}
100 };
101 
102 /*********************************************************************
103  *  Table of branding strings for all supported NICs.
104  *********************************************************************/
105 
106 static const char *em_strings[] = {
107 	"Intel(R) PRO/1000 Network Connection"
108 };
109 
110 /*********************************************************************
111  *  Function prototypes
112  *********************************************************************/
113 static int	em_probe(device_t);
114 static int	em_attach(device_t);
115 static int	em_detach(device_t);
116 static int	em_shutdown(device_t);
117 static void	em_intr(void *);
118 static void	em_start(struct ifnet *);
119 static int	em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
120 static void	em_watchdog(struct ifnet *);
121 static void	em_init(void *);
122 static void	em_stop(void *);
123 static void	em_media_status(struct ifnet *, struct ifmediareq *);
124 static int	em_media_change(struct ifnet *);
125 static void	em_identify_hardware(struct adapter *);
126 static void	em_local_timer(void *);
127 static int	em_hardware_init(struct adapter *);
128 static void	em_setup_interface(device_t, struct adapter *);
129 static int	em_setup_transmit_structures(struct adapter *);
130 static void	em_initialize_transmit_unit(struct adapter *);
131 static int	em_setup_receive_structures(struct adapter *);
132 static void	em_initialize_receive_unit(struct adapter *);
133 static void	em_enable_intr(struct adapter *);
134 static void	em_disable_intr(struct adapter *);
135 static void	em_free_transmit_structures(struct adapter *);
136 static void	em_free_receive_structures(struct adapter *);
137 static void	em_update_stats_counters(struct adapter *);
138 static void	em_clean_transmit_interrupts(struct adapter *);
139 static int	em_allocate_receive_structures(struct adapter *);
140 static int	em_allocate_transmit_structures(struct adapter *);
141 static void	em_process_receive_interrupts(struct adapter *, int);
142 static void	em_receive_checksum(struct adapter *, struct em_rx_desc *,
143 				    struct mbuf *);
144 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
145 					   uint32_t *, uint32_t *);
146 static void	em_set_promisc(struct adapter *);
147 static void	em_disable_promisc(struct adapter *);
148 static void	em_set_multi(struct adapter *);
149 static void	em_print_hw_stats(struct adapter *);
150 static void	em_print_link_status(struct adapter *);
151 static int	em_get_buf(int i, struct adapter *, struct mbuf *);
152 static void	em_enable_vlans(struct adapter *);
153 static int	em_encap(struct adapter *, struct mbuf *);
154 static void	em_smartspeed(struct adapter *);
155 static int	em_82547_fifo_workaround(struct adapter *, int);
156 static void	em_82547_update_fifo_head(struct adapter *, int);
157 static int	em_82547_tx_fifo_reset(struct adapter *);
158 static void	em_82547_move_tail(void *arg);
159 static int	em_dma_malloc(struct adapter *, bus_size_t,
160 			      struct em_dma_alloc *, int);
161 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
162 static void	em_print_debug_info(struct adapter *);
163 static int	em_is_valid_ether_addr(uint8_t *);
164 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
165 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
166 static uint32_t	em_fill_descriptors(uint64_t address, uint32_t length,
167 				   PDESC_ARRAY desc_array);
168 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
169 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
170 					const char *,
171 					struct em_int_delay_info *, int, int);
172 
173 /*********************************************************************
174  *  FreeBSD Device Interface Entry Points
175  *********************************************************************/
176 
177 static device_method_t em_methods[] = {
178 	/* Device interface */
179 	DEVMETHOD(device_probe, em_probe),
180 	DEVMETHOD(device_attach, em_attach),
181 	DEVMETHOD(device_detach, em_detach),
182 	DEVMETHOD(device_shutdown, em_shutdown),
183 	{0, 0}
184 };
185 
186 static driver_t em_driver = {
187 	"em", em_methods, sizeof(struct adapter),
188 };
189 
190 static devclass_t em_devclass;
191 
192 DECLARE_DUMMY_MODULE(if_em);
193 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
194 
195 /*********************************************************************
196  *  Tunable default values.
197  *********************************************************************/
198 
199 #define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
200 #define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
201 
202 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
203 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
204 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
205 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
206 
207 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
208 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
209 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
210 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
211 
212 /*********************************************************************
213  *  Device identification routine
214  *
215  *  em_probe determines if the driver should be loaded on
216  *  adapter based on PCI vendor/device id of the adapter.
217  *
218  *  return 0 on success, positive on failure
219  *********************************************************************/
220 
221 static int
222 em_probe(device_t dev)
223 {
224 	em_vendor_info_t *ent;
225 
226 	uint16_t pci_vendor_id = 0;
227 	uint16_t pci_device_id = 0;
228 	uint16_t pci_subvendor_id = 0;
229 	uint16_t pci_subdevice_id = 0;
230 	char adapter_name[60];
231 
232 	INIT_DEBUGOUT("em_probe: begin");
233 
234 	pci_vendor_id = pci_get_vendor(dev);
235 	if (pci_vendor_id != EM_VENDOR_ID)
236 		return(ENXIO);
237 
238 	pci_device_id = pci_get_device(dev);
239 	pci_subvendor_id = pci_get_subvendor(dev);
240 	pci_subdevice_id = pci_get_subdevice(dev);
241 
242 	ent = em_vendor_info_array;
243 	while (ent->vendor_id != 0) {
244 		if ((pci_vendor_id == ent->vendor_id) &&
245 		    (pci_device_id == ent->device_id) &&
246 
247 		    ((pci_subvendor_id == ent->subvendor_id) ||
248 		     (ent->subvendor_id == PCI_ANY_ID)) &&
249 
250 		    ((pci_subdevice_id == ent->subdevice_id) ||
251 		     (ent->subdevice_id == PCI_ANY_ID))) {
252 			snprintf(adapter_name, sizeof(adapter_name),
253 				 "%s, Version - %s",  em_strings[ent->index],
254 				 em_driver_version);
255 			device_set_desc_copy(dev, adapter_name);
256 			return(0);
257 		}
258 		ent++;
259 	}
260 
261 	return(ENXIO);
262 }
263 
264 /*********************************************************************
265  *  Device initialization routine
266  *
267  *  The attach entry point is called when the driver is being loaded.
268  *  This routine identifies the type of hardware, allocates all resources
269  *  and initializes the hardware.
270  *
271  *  return 0 on success, positive on failure
272  *********************************************************************/
273 
274 static int
275 em_attach(device_t dev)
276 {
277 	struct adapter *adapter;
278 	int tsize, rsize;
279 	int i, val, rid;
280 	int error = 0;
281 
282 	INIT_DEBUGOUT("em_attach: begin");
283 
284 	adapter = device_get_softc(dev);
285 
286 	bzero(adapter, sizeof(struct adapter));
287 
288 	callout_init(&adapter->timer);
289 	callout_init(&adapter->tx_fifo_timer);
290 
291 	adapter->dev = dev;
292 	adapter->osdep.dev = dev;
293 
294 	/* SYSCTL stuff */
295 	sysctl_ctx_init(&adapter->sysctl_ctx);
296 	adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
297 					       SYSCTL_STATIC_CHILDREN(_hw),
298 					       OID_AUTO,
299 					       device_get_nameunit(dev),
300 					       CTLFLAG_RD,
301 					       0, "");
302 
303 	if (adapter->sysctl_tree == NULL) {
304 		error = EIO;
305 		goto fail;
306 	}
307 
308 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
309 			SYSCTL_CHILDREN(adapter->sysctl_tree),
310 			OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
311 			(void *)adapter, 0,
312 			em_sysctl_debug_info, "I", "Debug Information");
313 
314 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
315 			SYSCTL_CHILDREN(adapter->sysctl_tree),
316 			OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
317 			(void *)adapter, 0,
318 			em_sysctl_stats, "I", "Statistics");
319 
320 	/* Determine hardware revision */
321 	em_identify_hardware(adapter);
322 
323 	/* Set up some sysctls for the tunable interrupt delays */
324 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
325 				"receive interrupt delay in usecs",
326 				&adapter->rx_int_delay,
327 				E1000_REG_OFFSET(&adapter->hw, RDTR),
328 				em_rx_int_delay_dflt);
329         em_add_int_delay_sysctl(adapter, "tx_int_delay",
330 				"transmit interrupt delay in usecs",
331 				&adapter->tx_int_delay,
332 				E1000_REG_OFFSET(&adapter->hw, TIDV),
333 				em_tx_int_delay_dflt);
334 	if (adapter->hw.mac_type >= em_82540) {
335 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
336 					"receive interrupt delay limit in usecs",
337 					&adapter->rx_abs_int_delay,
338 					E1000_REG_OFFSET(&adapter->hw, RADV),
339 					em_rx_abs_int_delay_dflt);
340 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
341 					"transmit interrupt delay limit in usecs",
342 					&adapter->tx_abs_int_delay,
343 					E1000_REG_OFFSET(&adapter->hw, TADV),
344 					em_tx_abs_int_delay_dflt);
345 	}
346 
347 	/* Parameters (to be read from user) */
348 	adapter->num_tx_desc = EM_MAX_TXD;
349 	adapter->num_rx_desc = EM_MAX_RXD;
350 	adapter->hw.autoneg = DO_AUTO_NEG;
351 	adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
352 	adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
353 	adapter->hw.tbi_compatibility_en = TRUE;
354 	adapter->rx_buffer_len = EM_RXBUFFER_2048;
355 
356 	/*
357 	 * These parameters control the automatic generation(Tx) and
358 	 * response(Rx) to Ethernet PAUSE frames.
359 	 */
360 	adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
361 	adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
362 	adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
363 	adapter->hw.fc_send_xon   = TRUE;
364 	adapter->hw.fc = em_fc_full;
365 
366 	adapter->hw.phy_init_script = 1;
367 	adapter->hw.phy_reset_disable = FALSE;
368 
369 #ifndef EM_MASTER_SLAVE
370 	adapter->hw.master_slave = em_ms_hw_default;
371 #else
372 	adapter->hw.master_slave = EM_MASTER_SLAVE;
373 #endif
374 
375 	/*
376 	 * Set the max frame size assuming standard ethernet
377 	 * sized frames
378 	 */
379 	adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
380 
381 	adapter->hw.min_frame_size =
382 	    MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
383 
384 	/*
385 	 * This controls when hardware reports transmit completion
386 	 * status.
387 	 */
388 	adapter->hw.report_tx_early = 1;
389 
390 	rid = EM_MMBA;
391 	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
392 						     &rid, RF_ACTIVE);
393 	if (!(adapter->res_memory)) {
394 		device_printf(dev, "Unable to allocate bus resource: memory\n");
395 		error = ENXIO;
396 		goto fail;
397 	}
398 	adapter->osdep.mem_bus_space_tag =
399 	    rman_get_bustag(adapter->res_memory);
400 	adapter->osdep.mem_bus_space_handle =
401 	    rman_get_bushandle(adapter->res_memory);
402 	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
403 
404 	if (adapter->hw.mac_type > em_82543) {
405 		/* Figure our where our IO BAR is ? */
406 		rid = EM_MMBA;
407 		for (i = 0; i < 5; i++) {
408 			val = pci_read_config(dev, rid, 4);
409 			if (val & 0x00000001) {
410 				adapter->io_rid = rid;
411 				break;
412 			}
413 			rid += 4;
414 		}
415 
416 		adapter->res_ioport = bus_alloc_resource_any(dev,
417 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
418 		if (!(adapter->res_ioport)) {
419 			device_printf(dev, "Unable to allocate bus resource: ioport\n");
420 			error = ENXIO;
421 			goto fail;
422 		}
423 
424 		adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
425 		adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
426 	}
427 
428 	rid = 0x0;
429 	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
430 	    &rid, RF_SHAREABLE | RF_ACTIVE);
431 	if (!(adapter->res_interrupt)) {
432 		device_printf(dev, "Unable to allocate bus resource: interrupt\n");
433 		error = ENXIO;
434 		goto fail;
435 	}
436 
437 	adapter->hw.back = &adapter->osdep;
438 
439 	/* Initialize eeprom parameters */
440 	em_init_eeprom_params(&adapter->hw);
441 
442 	tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
443 
444 	/* Allocate Transmit Descriptor ring */
445 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
446 		device_printf(dev, "Unable to allocate TxDescriptor memory\n");
447 		error = ENOMEM;
448 		goto fail;
449 	}
450 	adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
451 
452 	rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
453 
454 	/* Allocate Receive Descriptor ring */
455 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
456 		device_printf(dev, "Unable to allocate rx_desc memory\n");
457 		error = ENOMEM;
458 		goto fail;
459 	}
460 	adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
461 
462 	/* Initialize the hardware */
463 	if (em_hardware_init(adapter)) {
464 		device_printf(dev, "Unable to initialize the hardware\n");
465 		error = EIO;
466 		goto fail;
467 	}
468 
469 	/* Copy the permanent MAC address out of the EEPROM */
470 	if (em_read_mac_addr(&adapter->hw) < 0) {
471 		device_printf(dev, "EEPROM read error while reading mac address\n");
472 		error = EIO;
473 		goto fail;
474 	}
475 
476 	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
477 		device_printf(dev, "Invalid mac address\n");
478 		error = EIO;
479 		goto fail;
480 	}
481 
482 	/* Setup OS specific network interface */
483 	em_setup_interface(dev, adapter);
484 
485 	/* Initialize statistics */
486 	em_clear_hw_cntrs(&adapter->hw);
487 	em_update_stats_counters(adapter);
488 	adapter->hw.get_link_status = 1;
489 	em_check_for_link(&adapter->hw);
490 
491 	/* Print the link status */
492 	if (adapter->link_active == 1) {
493 		em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
494 					&adapter->link_duplex);
495 		device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
496 		    adapter->link_speed,
497 		    adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
498 	} else
499 		device_printf(dev, "Speed: N/A, Duplex:N/A\n");
500 
501 	/* Identify 82544 on PCIX */
502 	em_get_bus_info(&adapter->hw);
503 	if (adapter->hw.bus_type == em_bus_type_pcix &&
504 	    adapter->hw.mac_type == em_82544)
505 		adapter->pcix_82544 = TRUE;
506         else
507 		adapter->pcix_82544 = FALSE;
508 
509 	error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET,
510 			   (void (*)(void *)) em_intr, adapter,
511 			   &adapter->int_handler_tag);
512 	if (error) {
513 		device_printf(dev, "Error registering interrupt handler!\n");
514 		ether_ifdetach(&adapter->interface_data.ac_if);
515 		goto fail;
516 	}
517 
518 	INIT_DEBUGOUT("em_attach: end");
519 	return(0);
520 
521 fail:
522 	em_detach(dev);
523 	return(error);
524 }
525 
526 /*********************************************************************
527  *  Device removal routine
528  *
529  *  The detach entry point is called when the driver is being removed.
530  *  This routine stops the adapter and deallocates all the resources
531  *  that were allocated for driver operation.
532  *
533  *  return 0 on success, positive on failure
534  *********************************************************************/
535 
536 static int
537 em_detach(device_t dev)
538 {
539 	struct adapter * adapter = device_get_softc(dev);
540 	int s;
541 
542 	INIT_DEBUGOUT("em_detach: begin");
543 	s = splimp();
544 
545 	adapter->in_detach = 1;
546 
547 	if (device_is_attached(dev)) {
548 		em_stop(adapter);
549 		em_phy_hw_reset(&adapter->hw);
550 		ether_ifdetach(&adapter->interface_data.ac_if);
551 	}
552 	bus_generic_detach(dev);
553 
554 	if (adapter->res_interrupt != NULL) {
555 		bus_teardown_intr(dev, adapter->res_interrupt,
556 				  adapter->int_handler_tag);
557 		bus_release_resource(dev, SYS_RES_IRQ, 0,
558 				     adapter->res_interrupt);
559 	}
560 	if (adapter->res_memory != NULL) {
561 		bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
562 				     adapter->res_memory);
563 	}
564 
565 	if (adapter->res_ioport != NULL) {
566 		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
567 				     adapter->res_ioport);
568 	}
569 
570 	/* Free Transmit Descriptor ring */
571 	if (adapter->tx_desc_base != NULL) {
572 		em_dma_free(adapter, &adapter->txdma);
573 		adapter->tx_desc_base = NULL;
574 	}
575 
576 	/* Free Receive Descriptor ring */
577 	if (adapter->rx_desc_base != NULL) {
578 		em_dma_free(adapter, &adapter->rxdma);
579 		adapter->rx_desc_base = NULL;
580 	}
581 
582 	adapter->sysctl_tree = NULL;
583 	sysctl_ctx_free(&adapter->sysctl_ctx);
584 
585 	splx(s);
586 	return(0);
587 }
588 
589 /*********************************************************************
590  *
591  *  Shutdown entry point
592  *
593  **********************************************************************/
594 
595 static int
596 em_shutdown(device_t dev)
597 {
598 	struct adapter *adapter = device_get_softc(dev);
599 	em_stop(adapter);
600 	return(0);
601 }
602 
603 /*********************************************************************
604  *  Transmit entry point
605  *
606  *  em_start is called by the stack to initiate a transmit.
607  *  The driver will remain in this routine as long as there are
608  *  packets to transmit and transmit resources are available.
609  *  In case resources are not available stack is notified and
610  *  the packet is requeued.
611  **********************************************************************/
612 
613 static void
614 em_start(struct ifnet *ifp)
615 {
616 	int s;
617 	struct mbuf *m_head;
618 	struct adapter *adapter = ifp->if_softc;
619 
620 	if (!adapter->link_active)
621 		return;
622 
623 	s = splimp();
624 	while (ifp->if_snd.ifq_head != NULL) {
625 		IF_DEQUEUE(&ifp->if_snd, m_head);
626 
627 		if (m_head == NULL)
628 			break;
629 
630 		if (em_encap(adapter, m_head)) {
631 			ifp->if_flags |= IFF_OACTIVE;
632 			IF_PREPEND(&ifp->if_snd, m_head);
633 			break;
634 		}
635 
636 		/* Send a copy of the frame to the BPF listener */
637 		BPF_MTAP(ifp, m_head);
638 
639 		/* Set timeout in case hardware has problems transmitting */
640 		ifp->if_timer = EM_TX_TIMEOUT;
641 	}
642 	splx(s);
643 }
644 
645 /*********************************************************************
646  *  Ioctl entry point
647  *
648  *  em_ioctl is called when the user wants to configure the
649  *  interface.
650  *
651  *  return 0 on success, positive on failure
652  **********************************************************************/
653 
654 static int
655 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
656 {
657 	int s, mask, error = 0;
658 	struct ifreq *ifr = (struct ifreq *) data;
659 	struct adapter *adapter = ifp->if_softc;
660 
661 	s = splimp();
662 
663 	if (adapter->in_detach)
664 		goto out;
665 
666 	switch (command) {
667 	case SIOCSIFADDR:
668 	case SIOCGIFADDR:
669 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
670 		ether_ioctl(ifp, command, data);
671 		break;
672 	case SIOCSIFMTU:
673 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
674 		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
675 			error = EINVAL;
676 		} else {
677 			ifp->if_mtu = ifr->ifr_mtu;
678 			adapter->hw.max_frame_size =
679 			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
680 			em_init(adapter);
681 		}
682 		break;
683 	case SIOCSIFFLAGS:
684 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
685 		if (ifp->if_flags & IFF_UP) {
686 			if (!(ifp->if_flags & IFF_RUNNING))
687 				em_init(adapter);
688 			em_disable_promisc(adapter);
689 			em_set_promisc(adapter);
690 		} else {
691 			if (ifp->if_flags & IFF_RUNNING)
692 				em_stop(adapter);
693 		}
694 		break;
695 	case SIOCADDMULTI:
696 	case SIOCDELMULTI:
697 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
698 		if (ifp->if_flags & IFF_RUNNING) {
699 			em_disable_intr(adapter);
700 			em_set_multi(adapter);
701 			if (adapter->hw.mac_type == em_82542_rev2_0)
702 				em_initialize_receive_unit(adapter);
703 #ifdef DEVICE_POLLING
704 			if (!(ifp->if_flags & IFF_POLLING))
705 #endif
706 				em_enable_intr(adapter);
707 		}
708 		break;
709 	case SIOCSIFMEDIA:
710 	case SIOCGIFMEDIA:
711 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
712 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
713 		break;
714 	case SIOCSIFCAP:
715 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
716 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
717 		if (mask & IFCAP_HWCSUM) {
718 			if (IFCAP_HWCSUM & ifp->if_capenable)
719 				ifp->if_capenable &= ~IFCAP_HWCSUM;
720 			else
721 				ifp->if_capenable |= IFCAP_HWCSUM;
722 			if (ifp->if_flags & IFF_RUNNING)
723 				em_init(adapter);
724 		}
725 		break;
726 	default:
727 		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
728 		error = EINVAL;
729 	}
730 
731 out:
732 	splx(s);
733 	return(error);
734 }
735 
736 /*********************************************************************
737  *  Watchdog entry point
738  *
739  *  This routine is called whenever hardware quits transmitting.
740  *
741  **********************************************************************/
742 
743 static void
744 em_watchdog(struct ifnet *ifp)
745 {
746 	struct adapter * adapter;
747 	adapter = ifp->if_softc;
748 
749 	/* If we are in this routine because of pause frames, then
750 	 * don't reset the hardware.
751 	 */
752 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
753 		ifp->if_timer = EM_TX_TIMEOUT;
754 		return;
755 	}
756 
757 	if (em_check_for_link(&adapter->hw))
758 		if_printf(ifp, "watchdog timeout -- resetting\n");
759 
760 	ifp->if_flags &= ~IFF_RUNNING;
761 
762 	em_init(adapter);
763 
764 	ifp->if_oerrors++;
765 }
766 
767 /*********************************************************************
768  *  Init entry point
769  *
770  *  This routine is used in two ways. It is used by the stack as
771  *  init entry point in network interface structure. It is also used
772  *  by the driver as a hw/sw initialization routine to get to a
773  *  consistent state.
774  *
775  *  return 0 on success, positive on failure
776  **********************************************************************/
777 
778 static void
779 em_init(void *arg)
780 {
781 	int s;
782 	struct adapter *adapter = arg;
783 	struct ifnet *ifp = &adapter->interface_data.ac_if;
784 
785 	INIT_DEBUGOUT("em_init: begin");
786 
787 	s = splimp();
788 
789 	em_stop(adapter);
790 
791 	/* Get the latest mac address, User can use a LAA */
792 	bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
793 	      ETHER_ADDR_LEN);
794 
795 	/* Initialize the hardware */
796 	if (em_hardware_init(adapter)) {
797 		if_printf(ifp, "Unable to initialize the hardware\n");
798 		splx(s);
799 		return;
800 	}
801 
802 	em_enable_vlans(adapter);
803 
804 	/* Prepare transmit descriptors and buffers */
805 	if (em_setup_transmit_structures(adapter)) {
806 		if_printf(ifp, "Could not setup transmit structures\n");
807 		em_stop(adapter);
808 		splx(s);
809 		return;
810 	}
811 	em_initialize_transmit_unit(adapter);
812 
813 	/* Setup Multicast table */
814 	em_set_multi(adapter);
815 
816 	/* Prepare receive descriptors and buffers */
817 	if (em_setup_receive_structures(adapter)) {
818 		if_printf(ifp, "Could not setup receive structures\n");
819 		em_stop(adapter);
820 		splx(s);
821 		return;
822 	}
823 	em_initialize_receive_unit(adapter);
824 
825 	/* Don't loose promiscuous settings */
826 	em_set_promisc(adapter);
827 
828 	ifp->if_flags |= IFF_RUNNING;
829 	ifp->if_flags &= ~IFF_OACTIVE;
830 
831 	if (adapter->hw.mac_type >= em_82543) {
832 		if (ifp->if_capenable & IFCAP_TXCSUM)
833 			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
834 		else
835 			ifp->if_hwassist = 0;
836 	}
837 
838 	callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
839 	em_clear_hw_cntrs(&adapter->hw);
840 #ifdef DEVICE_POLLING
841 	/*
842 	 * Only enable interrupts if we are not polling, make sure
843 	 * they are off otherwise.
844 	 */
845 	if (ifp->if_flags & IFF_POLLING)
846 		em_disable_intr(adapter);
847 	else
848 #endif /* DEVICE_POLLING */
849 		em_enable_intr(adapter);
850 
851 	/* Don't reset the phy next time init gets called */
852 	adapter->hw.phy_reset_disable = TRUE;
853 
854 	splx(s);
855 }
856 
857 #ifdef DEVICE_POLLING
858 static poll_handler_t em_poll;
859 
860 static void
861 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
862 {
863 	struct adapter *adapter = ifp->if_softc;
864 	uint32_t reg_icr;
865 
866 	if (cmd == POLL_DEREGISTER) {       /* final call, enable interrupts */
867 		em_enable_intr(adapter);
868 		return;
869 	}
870 	if (cmd == POLL_AND_CHECK_STATUS) {
871 		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
872 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
873 			callout_stop(&adapter->timer);
874 			adapter->hw.get_link_status = 1;
875 			em_check_for_link(&adapter->hw);
876 			em_print_link_status(adapter);
877 			callout_reset(&adapter->timer, 2*hz, em_local_timer,
878 				      adapter);
879 		}
880 	}
881 	if (ifp->if_flags & IFF_RUNNING) {
882 		em_process_receive_interrupts(adapter, count);
883 		em_clean_transmit_interrupts(adapter);
884 	}
885 
886 	if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
887 		em_start(ifp);
888 }
889 #endif /* DEVICE_POLLING */
890 
891 /*********************************************************************
892  *
893  *  Interrupt Service routine
894  *
895  **********************************************************************/
896 static void
897 em_intr(void *arg)
898 {
899 	uint32_t loop_cnt = EM_MAX_INTR;
900 	uint32_t reg_icr;
901 	struct ifnet *ifp;
902 	struct adapter *adapter = arg;
903 
904 	ifp = &adapter->interface_data.ac_if;
905 
906 #ifdef DEVICE_POLLING
907 	if (ifp->if_flags & IFF_POLLING)
908 		return;
909 
910 	if (ether_poll_register(em_poll, ifp)) {
911 		em_disable_intr(adapter);
912 		em_poll(ifp, 0, 1);
913 		return;
914 	}
915 #endif /* DEVICE_POLLING */
916 
917 	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
918 	if (!reg_icr)
919 		return;
920 
921 	/* Link status change */
922 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
923 		callout_stop(&adapter->timer);
924 		adapter->hw.get_link_status = 1;
925 		em_check_for_link(&adapter->hw);
926 		em_print_link_status(adapter);
927 		callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
928 	}
929 
930 	while (loop_cnt > 0) {
931 		if (ifp->if_flags & IFF_RUNNING) {
932 			em_process_receive_interrupts(adapter, -1);
933 			em_clean_transmit_interrupts(adapter);
934 		}
935 		loop_cnt--;
936 	}
937 
938 	if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
939 		em_start(ifp);
940 }
941 
942 /*********************************************************************
943  *
944  *  Media Ioctl callback
945  *
946  *  This routine is called whenever the user queries the status of
947  *  the interface using ifconfig.
948  *
949  **********************************************************************/
950 static void
951 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
952 {
953 	struct adapter * adapter = ifp->if_softc;
954 
955 	INIT_DEBUGOUT("em_media_status: begin");
956 
957 	em_check_for_link(&adapter->hw);
958 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
959 		if (adapter->link_active == 0) {
960 			em_get_speed_and_duplex(&adapter->hw,
961 						&adapter->link_speed,
962 						&adapter->link_duplex);
963 			adapter->link_active = 1;
964 		}
965 	} else {
966 		if (adapter->link_active == 1) {
967 			adapter->link_speed = 0;
968 			adapter->link_duplex = 0;
969 			adapter->link_active = 0;
970 		}
971 	}
972 
973 	ifmr->ifm_status = IFM_AVALID;
974 	ifmr->ifm_active = IFM_ETHER;
975 
976 	if (!adapter->link_active)
977 		return;
978 
979 	ifmr->ifm_status |= IFM_ACTIVE;
980 
981 	if (adapter->hw.media_type == em_media_type_fiber) {
982 		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
983 	} else {
984 		switch (adapter->link_speed) {
985 		case 10:
986 			ifmr->ifm_active |= IFM_10_T;
987 			break;
988 		case 100:
989 			ifmr->ifm_active |= IFM_100_TX;
990 			break;
991 		case 1000:
992 			ifmr->ifm_active |= IFM_1000_TX;
993 			break;
994 		}
995 		if (adapter->link_duplex == FULL_DUPLEX)
996 			ifmr->ifm_active |= IFM_FDX;
997 		else
998 			ifmr->ifm_active |= IFM_HDX;
999 	}
1000 }
1001 
1002 /*********************************************************************
1003  *
1004  *  Media Ioctl callback
1005  *
1006  *  This routine is called when the user changes speed/duplex using
1007  *  media/mediopt option with ifconfig.
1008  *
1009  **********************************************************************/
1010 static int
1011 em_media_change(struct ifnet *ifp)
1012 {
1013 	struct adapter * adapter = ifp->if_softc;
1014 	struct ifmedia  *ifm = &adapter->media;
1015 
1016 	INIT_DEBUGOUT("em_media_change: begin");
1017 
1018 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1019 		return(EINVAL);
1020 
1021 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1022 	case IFM_AUTO:
1023 		adapter->hw.autoneg = DO_AUTO_NEG;
1024 		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1025 		break;
1026 	case IFM_1000_SX:
1027 	case IFM_1000_TX:
1028 		adapter->hw.autoneg = DO_AUTO_NEG;
1029 		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1030 		break;
1031 	case IFM_100_TX:
1032 		adapter->hw.autoneg = FALSE;
1033 		adapter->hw.autoneg_advertised = 0;
1034 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1035 			adapter->hw.forced_speed_duplex = em_100_full;
1036 		else
1037 			adapter->hw.forced_speed_duplex	= em_100_half;
1038 		break;
1039 	case IFM_10_T:
1040 		adapter->hw.autoneg = FALSE;
1041 		adapter->hw.autoneg_advertised = 0;
1042 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1043 			adapter->hw.forced_speed_duplex = em_10_full;
1044 		else
1045 			adapter->hw.forced_speed_duplex	= em_10_half;
1046 		break;
1047 	default:
1048 		if_printf(ifp, "Unsupported media type\n");
1049 	}
1050 	/*
1051 	 * As the speed/duplex settings may have changed we need to
1052 	 * reset the PHY.
1053 	 */
1054 	adapter->hw.phy_reset_disable = FALSE;
1055 
1056 	em_init(adapter);
1057 
1058 	return(0);
1059 }
1060 
1061 static void
1062 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1063 	 int error)
1064 {
1065 	struct em_q *q = arg;
1066 
1067 	if (error)
1068 		return;
1069 	KASSERT(nsegs <= EM_MAX_SCATTER,
1070 		("Too many DMA segments returned when mapping tx packet"));
1071 	q->nsegs = nsegs;
1072 	bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1073 }
1074 
1075 #define EM_FIFO_HDR              0x10
1076 #define EM_82547_PKT_THRESH      0x3e0
1077 #define EM_82547_TX_FIFO_SIZE    0x2800
1078 #define EM_82547_TX_FIFO_BEGIN   0xf00
1079 /*********************************************************************
1080  *
1081  *  This routine maps the mbufs to tx descriptors.
1082  *
1083  *  return 0 on success, positive on failure
1084  **********************************************************************/
1085 static int
1086 em_encap(struct adapter *adapter, struct mbuf *m_head)
1087 {
1088 	uint32_t txd_upper;
1089 	uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1090 	int i, j, error;
1091 	uint64_t address;
1092 
1093 	/* For 82544 Workaround */
1094 	DESC_ARRAY desc_array;
1095 	uint32_t array_elements;
1096 	uint32_t counter;
1097 
1098 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1099 	struct ifvlan *ifv = NULL;
1100 #else
1101 	struct m_tag *mtag;
1102 #endif
1103 	struct em_q q;
1104         struct em_buffer *tx_buffer = NULL;
1105         struct em_tx_desc *current_tx_desc = NULL;
1106         struct ifnet *ifp = &adapter->interface_data.ac_if;
1107 
1108 	/*
1109 	 * Force a cleanup if number of TX descriptors
1110 	 * available hits the threshold
1111 	 */
1112 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1113 		em_clean_transmit_interrupts(adapter);
1114 		if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1115 			adapter->no_tx_desc_avail1++;
1116 			return(ENOBUFS);
1117 		}
1118 	}
1119 	/*
1120 	 * Map the packet for DMA.
1121 	 */
1122 	if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1123 		adapter->no_tx_map_avail++;
1124 		return(ENOMEM);
1125 	}
1126 	error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1127 				     &q, BUS_DMA_NOWAIT);
1128 	if (error != 0) {
1129 		adapter->no_tx_dma_setup++;
1130 		bus_dmamap_destroy(adapter->txtag, q.map);
1131 		return(error);
1132 	}
1133 	KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1134 
1135 	if (q.nsegs > adapter->num_tx_desc_avail) {
1136 		adapter->no_tx_desc_avail2++;
1137 		bus_dmamap_unload(adapter->txtag, q.map);
1138 		bus_dmamap_destroy(adapter->txtag, q.map);
1139 		return(ENOBUFS);
1140 	}
1141 
1142 	if (ifp->if_hwassist > 0) {
1143 		em_transmit_checksum_setup(adapter,  m_head,
1144 					   &txd_upper, &txd_lower);
1145 	}
1146 	else
1147 		txd_upper = txd_lower = 0;
1148 
1149 	/* Find out if we are in vlan mode */
1150 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1151 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1152 	    m_head->m_pkthdr.rcvif != NULL &&
1153 	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1154 		ifv = m_head->m_pkthdr.rcvif->if_softc;
1155 #else
1156 	mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1157 #endif
1158 
1159 	i = adapter->next_avail_tx_desc;
1160 	if (adapter->pcix_82544) {
1161 		txd_saved = i;
1162 		txd_used = 0;
1163 	}
1164 	for (j = 0; j < q.nsegs; j++) {
1165 		/* If adapter is 82544 and on PCIX bus */
1166 		if(adapter->pcix_82544) {
1167 			array_elements = 0;
1168 			address = htole64(q.segs[j].ds_addr);
1169 			/*
1170 			 * Check the Address and Length combination and
1171 			 * split the data accordingly
1172 			 */
1173 			array_elements = em_fill_descriptors(address,
1174 							     htole32(q.segs[j].ds_len),
1175 							     &desc_array);
1176 			for (counter = 0; counter < array_elements; counter++) {
1177 				if (txd_used == adapter->num_tx_desc_avail) {
1178 					adapter->next_avail_tx_desc = txd_saved;
1179 					adapter->no_tx_desc_avail2++;
1180 					bus_dmamap_unload(adapter->txtag, q.map);
1181 					bus_dmamap_destroy(adapter->txtag, q.map);
1182 					return(ENOBUFS);
1183 				}
1184 				tx_buffer = &adapter->tx_buffer_area[i];
1185 				current_tx_desc = &adapter->tx_desc_base[i];
1186 				current_tx_desc->buffer_addr = htole64(
1187 				desc_array.descriptor[counter].address);
1188 				current_tx_desc->lower.data = htole32(
1189 				(adapter->txd_cmd | txd_lower |
1190 				(uint16_t)desc_array.descriptor[counter].length));
1191 				current_tx_desc->upper.data = htole32((txd_upper));
1192 				if (++i == adapter->num_tx_desc)
1193 					i = 0;
1194 
1195 				tx_buffer->m_head = NULL;
1196 				txd_used++;
1197 			}
1198 		} else {
1199 			tx_buffer = &adapter->tx_buffer_area[i];
1200 			current_tx_desc = &adapter->tx_desc_base[i];
1201 
1202 			current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1203 			current_tx_desc->lower.data = htole32(
1204 				adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1205 			current_tx_desc->upper.data = htole32(txd_upper);
1206 
1207 			if (++i == adapter->num_tx_desc)
1208 				i = 0;
1209 
1210 			tx_buffer->m_head = NULL;
1211 		}
1212 	}
1213 
1214 	adapter->next_avail_tx_desc = i;
1215 	if (adapter->pcix_82544)
1216 		adapter->num_tx_desc_avail -= txd_used;
1217 	else
1218 		adapter->num_tx_desc_avail -= q.nsegs;
1219 
1220 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1221 	if (ifv != NULL) {
1222 		/* Set the vlan id */
1223 		current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1224 #else
1225 	if (mtag != NULL) {
1226 		/* Set the vlan id */
1227 		current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1228 #endif
1229 
1230 		/* Tell hardware to add tag */
1231 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1232 	}
1233 
1234 	tx_buffer->m_head = m_head;
1235 	tx_buffer->map = q.map;
1236 	bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1237 
1238 	/*
1239 	 * Last Descriptor of Packet needs End Of Packet (EOP)
1240 	 */
1241 	current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1242 
1243 	/*
1244 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1245 	 * that this frame is available to transmit.
1246 	 */
1247 	if (adapter->hw.mac_type == em_82547 &&
1248 	    adapter->link_duplex == HALF_DUPLEX) {
1249 		em_82547_move_tail(adapter);
1250 	} else {
1251 		E1000_WRITE_REG(&adapter->hw, TDT, i);
1252 		if (adapter->hw.mac_type == em_82547) {
1253 			em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1254 		}
1255 	}
1256 
1257 	return(0);
1258 }
1259 
1260 /*********************************************************************
1261  *
1262  * 82547 workaround to avoid controller hang in half-duplex environment.
1263  * The workaround is to avoid queuing a large packet that would span
1264  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1265  * in this case. We do that only when FIFO is quiescent.
1266  *
1267  **********************************************************************/
1268 static void
1269 em_82547_move_tail(void *arg)
1270 {
1271 	int s;
1272 	struct adapter *adapter = arg;
1273 	uint16_t hw_tdt;
1274 	uint16_t sw_tdt;
1275 	struct em_tx_desc *tx_desc;
1276 	uint16_t length = 0;
1277 	boolean_t eop = 0;
1278 
1279 	s = splimp();
1280 	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1281 	sw_tdt = adapter->next_avail_tx_desc;
1282 
1283 	while (hw_tdt != sw_tdt) {
1284 		tx_desc = &adapter->tx_desc_base[hw_tdt];
1285 		length += tx_desc->lower.flags.length;
1286 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1287 		if(++hw_tdt == adapter->num_tx_desc)
1288 			hw_tdt = 0;
1289 
1290 		if(eop) {
1291 			if (em_82547_fifo_workaround(adapter, length)) {
1292 				adapter->tx_fifo_wrk++;
1293 				callout_reset(&adapter->tx_fifo_timer, 1,
1294 					em_82547_move_tail, adapter);
1295 				break;
1296 			}
1297 			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1298 			em_82547_update_fifo_head(adapter, length);
1299 			length = 0;
1300 		}
1301 	}
1302 	splx(s);
1303 }
1304 
1305 static int
1306 em_82547_fifo_workaround(struct adapter *adapter, int len)
1307 {
1308 	int fifo_space, fifo_pkt_len;
1309 
1310 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1311 
1312 	if (adapter->link_duplex == HALF_DUPLEX) {
1313 		fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1314 
1315 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1316 			if (em_82547_tx_fifo_reset(adapter))
1317 				return(0);
1318 			else
1319 				return(1);
1320 		}
1321 	}
1322 
1323 	return(0);
1324 }
1325 
1326 static void
1327 em_82547_update_fifo_head(struct adapter *adapter, int len)
1328 {
1329 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1330 
1331 	/* tx_fifo_head is always 16 byte aligned */
1332 	adapter->tx_fifo_head += fifo_pkt_len;
1333 	if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1334 		adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1335 }
1336 
1337 static int
1338 em_82547_tx_fifo_reset(struct adapter *adapter)
1339 {
1340 	uint32_t tctl;
1341 
1342 	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1343 	      E1000_READ_REG(&adapter->hw, TDH)) &&
1344 	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1345 	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1346 	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1347 	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1348 	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1349 
1350 		/* Disable TX unit */
1351 		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1352 		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1353 
1354 		/* Reset FIFO pointers */
1355 		E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1356 		E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1357 		E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1358 		E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1359 
1360 		/* Re-enable TX unit */
1361 		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1362 		E1000_WRITE_FLUSH(&adapter->hw);
1363 
1364 		adapter->tx_fifo_head = 0;
1365 		adapter->tx_fifo_reset++;
1366 
1367 		return(TRUE);
1368 	}
1369 	else {
1370 		return(FALSE);
1371 	}
1372 }
1373 
1374 static void
1375 em_set_promisc(struct adapter *adapter)
1376 {
1377 	uint32_t reg_rctl;
1378 	struct ifnet *ifp = &adapter->interface_data.ac_if;
1379 
1380 	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1381 
1382 	if (ifp->if_flags & IFF_PROMISC) {
1383 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1384 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1385 	} else if (ifp->if_flags & IFF_ALLMULTI) {
1386 		reg_rctl |= E1000_RCTL_MPE;
1387 		reg_rctl &= ~E1000_RCTL_UPE;
1388 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1389 	}
1390 }
1391 
1392 static void
1393 em_disable_promisc(struct adapter *adapter)
1394 {
1395 	uint32_t reg_rctl;
1396 
1397 	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1398 
1399 	reg_rctl &=  (~E1000_RCTL_UPE);
1400 	reg_rctl &=  (~E1000_RCTL_MPE);
1401 	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1402 }
1403 
1404 /*********************************************************************
1405  *  Multicast Update
1406  *
1407  *  This routine is called whenever multicast address list is updated.
1408  *
1409  **********************************************************************/
1410 
1411 static void
1412 em_set_multi(struct adapter *adapter)
1413 {
1414 	uint32_t reg_rctl = 0;
1415 	uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1416 	struct ifmultiaddr *ifma;
1417 	int mcnt = 0;
1418 	struct ifnet *ifp = &adapter->interface_data.ac_if;
1419 
1420 	IOCTL_DEBUGOUT("em_set_multi: begin");
1421 
1422 	if (adapter->hw.mac_type == em_82542_rev2_0) {
1423 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1424 		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1425 			em_pci_clear_mwi(&adapter->hw);
1426 		reg_rctl |= E1000_RCTL_RST;
1427 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1428 		msec_delay(5);
1429 	}
1430 
1431 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1432 		if (ifma->ifma_addr->sa_family != AF_LINK)
1433 			continue;
1434 
1435 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1436 			break;
1437 
1438 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1439 		      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1440 		mcnt++;
1441 	}
1442 
1443 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1444 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1445 		reg_rctl |= E1000_RCTL_MPE;
1446 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1447 	} else
1448 		em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1449 
1450 	if (adapter->hw.mac_type == em_82542_rev2_0) {
1451 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1452 		reg_rctl &= ~E1000_RCTL_RST;
1453 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1454 		msec_delay(5);
1455 		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1456                         em_pci_set_mwi(&adapter->hw);
1457 	}
1458 }
1459 
1460 /*********************************************************************
1461  *  Timer routine
1462  *
1463  *  This routine checks for link status and updates statistics.
1464  *
1465  **********************************************************************/
1466 
1467 static void
1468 em_local_timer(void *arg)
1469 {
1470 	int s;
1471 	struct ifnet *ifp;
1472 	struct adapter *adapter = arg;
1473 	ifp = &adapter->interface_data.ac_if;
1474 
1475 	s = splimp();
1476 
1477 	em_check_for_link(&adapter->hw);
1478 	em_print_link_status(adapter);
1479 	em_update_stats_counters(adapter);
1480 	if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1481 		em_print_hw_stats(adapter);
1482 	em_smartspeed(adapter);
1483 
1484 	callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1485 
1486 	splx(s);
1487 }
1488 
1489 static void
1490 em_print_link_status(struct adapter *adapter)
1491 {
1492 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1493 		if (adapter->link_active == 0) {
1494 			em_get_speed_and_duplex(&adapter->hw,
1495 						&adapter->link_speed,
1496 						&adapter->link_duplex);
1497 			device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1498 			       adapter->link_speed,
1499 			       ((adapter->link_duplex == FULL_DUPLEX) ?
1500 				"Full Duplex" : "Half Duplex"));
1501 			adapter->link_active = 1;
1502 			adapter->smartspeed = 0;
1503 		}
1504 	} else {
1505 		if (adapter->link_active == 1) {
1506 			adapter->link_speed = 0;
1507 			adapter->link_duplex = 0;
1508 			device_printf(adapter->dev, "Link is Down\n");
1509 			adapter->link_active = 0;
1510 		}
1511 	}
1512 }
1513 
1514 /*********************************************************************
1515  *
1516  *  This routine disables all traffic on the adapter by issuing a
1517  *  global reset on the MAC and deallocates TX/RX buffers.
1518  *
1519  **********************************************************************/
1520 
1521 static void
1522 em_stop(void *arg)
1523 {
1524 	struct ifnet   *ifp;
1525 	struct adapter * adapter = arg;
1526 	ifp = &adapter->interface_data.ac_if;
1527 
1528 	INIT_DEBUGOUT("em_stop: begin");
1529 	em_disable_intr(adapter);
1530 	em_reset_hw(&adapter->hw);
1531 	callout_stop(&adapter->timer);
1532 	callout_stop(&adapter->tx_fifo_timer);
1533 	em_free_transmit_structures(adapter);
1534 	em_free_receive_structures(adapter);
1535 
1536 	/* Tell the stack that the interface is no longer active */
1537 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1538 	ifp->if_timer = 0;
1539 }
1540 
1541 /*********************************************************************
1542  *
1543  *  Determine hardware revision.
1544  *
1545  **********************************************************************/
1546 static void
1547 em_identify_hardware(struct adapter * adapter)
1548 {
1549 	device_t dev = adapter->dev;
1550 
1551 	/* Make sure our PCI config space has the necessary stuff set */
1552 	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1553 	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1554 	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1555 		device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1556 		adapter->hw.pci_cmd_word |=
1557 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1558 		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1559 	}
1560 
1561 	/* Save off the information about this board */
1562 	adapter->hw.vendor_id = pci_get_vendor(dev);
1563 	adapter->hw.device_id = pci_get_device(dev);
1564 	adapter->hw.revision_id = pci_get_revid(dev);
1565 	adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1566 	adapter->hw.subsystem_id = pci_get_subdevice(dev);
1567 
1568 	/* Identify the MAC */
1569 	if (em_set_mac_type(&adapter->hw))
1570 		device_printf(dev, "Unknown MAC Type\n");
1571 
1572 	if (adapter->hw.mac_type == em_82541 ||
1573 	    adapter->hw.mac_type == em_82541_rev_2 ||
1574 	    adapter->hw.mac_type == em_82547 ||
1575 	    adapter->hw.mac_type == em_82547_rev_2)
1576 		adapter->hw.phy_init_script = TRUE;
1577 }
1578 
1579 /*********************************************************************
1580  *
1581  *  Initialize the hardware to a configuration as specified by the
1582  *  adapter structure. The controller is reset, the EEPROM is
1583  *  verified, the MAC address is set, then the shared initialization
1584  *  routines are called.
1585  *
1586  **********************************************************************/
1587 static int
1588 em_hardware_init(struct adapter *adapter)
1589 {
1590 	INIT_DEBUGOUT("em_hardware_init: begin");
1591 	/* Issue a global reset */
1592 	em_reset_hw(&adapter->hw);
1593 
1594 	/* When hardware is reset, fifo_head is also reset */
1595 	adapter->tx_fifo_head = 0;
1596 
1597 	/* Make sure we have a good EEPROM before we read from it */
1598 	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1599 		device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1600 		return(EIO);
1601 	}
1602 
1603 	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1604 		device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1605 		return(EIO);
1606 	}
1607 
1608 	if (em_init_hw(&adapter->hw) < 0) {
1609 		device_printf(adapter->dev, "Hardware Initialization Failed");
1610 		return(EIO);
1611 	}
1612 
1613 	em_check_for_link(&adapter->hw);
1614 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1615 		adapter->link_active = 1;
1616 	else
1617 		adapter->link_active = 0;
1618 
1619 	if (adapter->link_active) {
1620 		em_get_speed_and_duplex(&adapter->hw,
1621 					&adapter->link_speed,
1622 					&adapter->link_duplex);
1623 	} else {
1624 		adapter->link_speed = 0;
1625 		adapter->link_duplex = 0;
1626 	}
1627 
1628 	return(0);
1629 }
1630 
1631 /*********************************************************************
1632  *
1633  *  Setup networking device structure and register an interface.
1634  *
1635  **********************************************************************/
1636 static void
1637 em_setup_interface(device_t dev, struct adapter *adapter)
1638 {
1639 	struct ifnet   *ifp;
1640 	INIT_DEBUGOUT("em_setup_interface: begin");
1641 
1642 	ifp = &adapter->interface_data.ac_if;
1643 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1644 	ifp->if_mtu = ETHERMTU;
1645 	ifp->if_baudrate = 1000000000;
1646 	ifp->if_init =  em_init;
1647 	ifp->if_softc = adapter;
1648 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1649 	ifp->if_ioctl = em_ioctl;
1650 	ifp->if_start = em_start;
1651 	ifp->if_watchdog = em_watchdog;
1652 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1653 
1654 	ether_ifattach(ifp, adapter->hw.mac_addr);
1655 
1656 	if (adapter->hw.mac_type >= em_82543) {
1657 		ifp->if_capabilities = IFCAP_HWCSUM;
1658 		ifp->if_capenable = ifp->if_capabilities;
1659 	}
1660 
1661 	/*
1662 	 * Tell the upper layer(s) we support long frames.
1663 	 */
1664 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1665 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
1666         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1667 #endif
1668 
1669 	/*
1670 	 * Specify the media types supported by this adapter and register
1671 	 * callbacks to update media and link information
1672 	 */
1673 	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1674 		     em_media_status);
1675 	if (adapter->hw.media_type == em_media_type_fiber) {
1676 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1677 			    0, NULL);
1678 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1679 			    0, NULL);
1680 	} else {
1681 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1682 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1683 			    0, NULL);
1684 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1685 			    0, NULL);
1686 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1687 			    0, NULL);
1688 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX | IFM_FDX,
1689 			    0, NULL);
1690 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX, 0, NULL);
1691 	}
1692 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1693 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1694 }
1695 
1696 /*********************************************************************
1697  *
1698  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1699  *
1700  **********************************************************************/
1701 static void
1702 em_smartspeed(struct adapter *adapter)
1703 {
1704 	uint16_t phy_tmp;
1705 
1706 	if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1707 	    !adapter->hw.autoneg ||
1708 	    !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1709 		return;
1710 
1711 	if (adapter->smartspeed == 0) {
1712 		/*
1713 		 * If Master/Slave config fault is asserted twice,
1714 		 * we assume back-to-back.
1715 		 */
1716 		em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1717 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1718 			return;
1719 		em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1720 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1721 			em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1722 					&phy_tmp);
1723 			if (phy_tmp & CR_1000T_MS_ENABLE) {
1724 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1725 				em_write_phy_reg(&adapter->hw,
1726 						 PHY_1000T_CTRL, phy_tmp);
1727 				adapter->smartspeed++;
1728 				if (adapter->hw.autoneg &&
1729 				    !em_phy_setup_autoneg(&adapter->hw) &&
1730 				    !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1731 						     &phy_tmp)) {
1732 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
1733 						    MII_CR_RESTART_AUTO_NEG);
1734 					em_write_phy_reg(&adapter->hw,
1735 							 PHY_CTRL, phy_tmp);
1736 				}
1737 			}
1738 		}
1739                 return;
1740 	} else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1741 		/* If still no link, perhaps using 2/3 pair cable */
1742 		em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1743 		phy_tmp |= CR_1000T_MS_ENABLE;
1744 		em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1745 		if (adapter->hw.autoneg &&
1746 		    !em_phy_setup_autoneg(&adapter->hw) &&
1747 		    !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1748 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
1749 				    MII_CR_RESTART_AUTO_NEG);
1750 			em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1751 		}
1752 	}
1753 	/* Restart process after EM_SMARTSPEED_MAX iterations */
1754 	if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1755 		adapter->smartspeed = 0;
1756 }
1757 
1758 /*
1759  * Manage DMA'able memory.
1760  */
1761 static void
1762 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1763 {
1764 	if (error)
1765 		return;
1766 	*(bus_addr_t*) arg = segs->ds_addr;
1767 }
1768 
1769 static int
1770 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1771 	      struct em_dma_alloc *dma, int mapflags)
1772 {
1773 	int r;
1774 	device_t dev = adapter->dev;
1775 
1776 	r = bus_dma_tag_create(NULL,                    /* parent */
1777 			       PAGE_SIZE, 0,            /* alignment, bounds */
1778 			       BUS_SPACE_MAXADDR,       /* lowaddr */
1779 			       BUS_SPACE_MAXADDR,       /* highaddr */
1780 			       NULL, NULL,              /* filter, filterarg */
1781 			       size,                    /* maxsize */
1782 			       1,                       /* nsegments */
1783 			       size,                    /* maxsegsize */
1784 			       BUS_DMA_ALLOCNOW,        /* flags */
1785 			       &dma->dma_tag);
1786 	if (r != 0) {
1787 		device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1788 			      "error %u\n", r);
1789 		goto fail_0;
1790 	}
1791 
1792 	r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1793 	if (r != 0) {
1794 		device_printf(dev, "em_dma_malloc: bus_dmamap_create failed; "
1795 			      "error %u\n", r);
1796 		goto fail_1;
1797 	}
1798 
1799 	r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1800 			     BUS_DMA_NOWAIT, &dma->dma_map);
1801 	if (r != 0) {
1802 		device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1803 			      "size %llu, error %d\n", (uintmax_t)size, r);
1804 		goto fail_2;
1805 	}
1806 
1807 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1808 			    size,
1809 			    em_dmamap_cb,
1810 			    &dma->dma_paddr,
1811 			    mapflags | BUS_DMA_NOWAIT);
1812 	if (r != 0) {
1813 		device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1814 			      "error %u\n", r);
1815 		goto fail_3;
1816 	}
1817 
1818 	dma->dma_size = size;
1819 	return(0);
1820 
1821 fail_3:
1822 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1823 fail_2:
1824 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1825 fail_1:
1826 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1827 	bus_dma_tag_destroy(dma->dma_tag);
1828 fail_0:
1829 	dma->dma_map = NULL;
1830 	dma->dma_tag = NULL;
1831 	return(r);
1832 }
1833 
1834 static void
1835 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1836 {
1837 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1838 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1839 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1840 	bus_dma_tag_destroy(dma->dma_tag);
1841 }
1842 
1843 /*********************************************************************
1844  *
1845  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1846  *  the information needed to transmit a packet on the wire.
1847  *
1848  **********************************************************************/
1849 static int
1850 em_allocate_transmit_structures(struct adapter * adapter)
1851 {
1852 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1853 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1854 	if (adapter->tx_buffer_area == NULL) {
1855 		device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1856 		return(ENOMEM);
1857 	}
1858 
1859 	return(0);
1860 }
1861 
1862 /*********************************************************************
1863  *
1864  *  Allocate and initialize transmit structures.
1865  *
1866  **********************************************************************/
1867 static int
1868 em_setup_transmit_structures(struct adapter * adapter)
1869 {
1870 	/*
1871 	 * Setup DMA descriptor areas.
1872 	 */
1873 	if (bus_dma_tag_create(NULL,                    /* parent */
1874 			       PAGE_SIZE, 0,            /* alignment, bounds */
1875 			       BUS_SPACE_MAXADDR,       /* lowaddr */
1876 			       BUS_SPACE_MAXADDR,       /* highaddr */
1877 			       NULL, NULL,              /* filter, filterarg */
1878 			       MCLBYTES * 8,            /* maxsize */
1879 			       EM_MAX_SCATTER,          /* nsegments */
1880 			       MCLBYTES * 8,            /* maxsegsize */
1881 			       BUS_DMA_ALLOCNOW,        /* flags */
1882 			       &adapter->txtag)) {
1883 		device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1884 		return(ENOMEM);
1885 	}
1886 
1887 	if (em_allocate_transmit_structures(adapter))
1888 		return(ENOMEM);
1889 
1890         bzero((void *) adapter->tx_desc_base,
1891               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1892 
1893         adapter->next_avail_tx_desc = 0;
1894 	adapter->oldest_used_tx_desc = 0;
1895 
1896 	/* Set number of descriptors available */
1897 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
1898 
1899 	/* Set checksum context */
1900 	adapter->active_checksum_context = OFFLOAD_NONE;
1901 
1902 	return(0);
1903 }
1904 
1905 /*********************************************************************
1906  *
1907  *  Enable transmit unit.
1908  *
1909  **********************************************************************/
1910 static void
1911 em_initialize_transmit_unit(struct adapter * adapter)
1912 {
1913 	uint32_t reg_tctl;
1914 	uint32_t reg_tipg = 0;
1915 	uint64_t bus_addr;
1916 
1917 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1918 
1919 	/* Setup the Base and Length of the Tx Descriptor Ring */
1920 	bus_addr = adapter->txdma.dma_paddr;
1921 	E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1922 	E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1923 	E1000_WRITE_REG(&adapter->hw, TDLEN,
1924 			adapter->num_tx_desc * sizeof(struct em_tx_desc));
1925 
1926 	/* Setup the HW Tx Head and Tail descriptor pointers */
1927 	E1000_WRITE_REG(&adapter->hw, TDH, 0);
1928 	E1000_WRITE_REG(&adapter->hw, TDT, 0);
1929 
1930 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1931 		     E1000_READ_REG(&adapter->hw, TDBAL),
1932 		     E1000_READ_REG(&adapter->hw, TDLEN));
1933 
1934 	/* Set the default values for the Tx Inter Packet Gap timer */
1935 	switch (adapter->hw.mac_type) {
1936 	case em_82542_rev2_0:
1937 	case em_82542_rev2_1:
1938 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
1939 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1940 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1941 		break;
1942 	default:
1943 		if (adapter->hw.media_type == em_media_type_fiber)
1944 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1945 		else
1946 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1947 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1948 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1949 	}
1950 
1951 	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1952 	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1953 	if (adapter->hw.mac_type >= em_82540)
1954 		E1000_WRITE_REG(&adapter->hw, TADV,
1955 				adapter->tx_abs_int_delay.value);
1956 
1957 	/* Program the Transmit Control Register */
1958 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1959 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1960 	if (adapter->link_duplex == 1)
1961 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1962 	else
1963 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1964 	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1965 
1966 	/* Setup Transmit Descriptor Settings for this adapter */
1967 	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1968 
1969 	if (adapter->tx_int_delay.value > 0)
1970 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1971 }
1972 
1973 /*********************************************************************
1974  *
1975  *  Free all transmit related data structures.
1976  *
1977  **********************************************************************/
1978 static void
1979 em_free_transmit_structures(struct adapter * adapter)
1980 {
1981 	struct em_buffer *tx_buffer;
1982 	int i;
1983 
1984 	INIT_DEBUGOUT("free_transmit_structures: begin");
1985 
1986 	if (adapter->tx_buffer_area != NULL) {
1987 		tx_buffer = adapter->tx_buffer_area;
1988 		for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1989 			if (tx_buffer->m_head != NULL) {
1990 				bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1991 				bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1992 				m_freem(tx_buffer->m_head);
1993 			}
1994 			tx_buffer->m_head = NULL;
1995 		}
1996 	}
1997 	if (adapter->tx_buffer_area != NULL) {
1998 		free(adapter->tx_buffer_area, M_DEVBUF);
1999 		adapter->tx_buffer_area = NULL;
2000 	}
2001 	if (adapter->txtag != NULL) {
2002 		bus_dma_tag_destroy(adapter->txtag);
2003 		adapter->txtag = NULL;
2004 	}
2005 }
2006 
2007 /*********************************************************************
2008  *
2009  *  The offload context needs to be set when we transfer the first
2010  *  packet of a particular protocol (TCP/UDP). We change the
2011  *  context only if the protocol type changes.
2012  *
2013  **********************************************************************/
2014 static void
2015 em_transmit_checksum_setup(struct adapter * adapter,
2016 			   struct mbuf *mp,
2017 			   uint32_t *txd_upper,
2018 			   uint32_t *txd_lower)
2019 {
2020 	struct em_context_desc *TXD;
2021 	struct em_buffer *tx_buffer;
2022 	int curr_txd;
2023 
2024 	if (mp->m_pkthdr.csum_flags) {
2025 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2026 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2027 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2028 			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2029 				return;
2030 			else
2031 				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2032 		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2033 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2034 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2035 			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2036 				return;
2037 			else
2038 				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2039 		} else {
2040 			*txd_upper = 0;
2041 			*txd_lower = 0;
2042 			return;
2043 		}
2044 	} else {
2045 		*txd_upper = 0;
2046 		*txd_lower = 0;
2047 		return;
2048 	}
2049 
2050 	/* If we reach this point, the checksum offload context
2051 	 * needs to be reset.
2052 	 */
2053 	curr_txd = adapter->next_avail_tx_desc;
2054 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2055 	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2056 
2057 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2058 	TXD->lower_setup.ip_fields.ipcso =
2059 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2060 	TXD->lower_setup.ip_fields.ipcse =
2061 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2062 
2063 	TXD->upper_setup.tcp_fields.tucss =
2064 	    ETHER_HDR_LEN + sizeof(struct ip);
2065 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2066 
2067 	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2068 		TXD->upper_setup.tcp_fields.tucso =
2069 		    ETHER_HDR_LEN + sizeof(struct ip) +
2070 		    offsetof(struct tcphdr, th_sum);
2071 	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2072 		TXD->upper_setup.tcp_fields.tucso =
2073 			ETHER_HDR_LEN + sizeof(struct ip) +
2074 			offsetof(struct udphdr, uh_sum);
2075 	}
2076 
2077 	TXD->tcp_seg_setup.data = htole32(0);
2078 	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2079 
2080 	tx_buffer->m_head = NULL;
2081 
2082 	if (++curr_txd == adapter->num_tx_desc)
2083 		curr_txd = 0;
2084 
2085 	adapter->num_tx_desc_avail--;
2086 	adapter->next_avail_tx_desc = curr_txd;
2087 }
2088 
2089 /**********************************************************************
2090  *
2091  *  Examine each tx_buffer in the used queue. If the hardware is done
2092  *  processing the packet then free associated resources. The
2093  *  tx_buffer is put back on the free queue.
2094  *
2095  **********************************************************************/
2096 static void
2097 em_clean_transmit_interrupts(struct adapter *adapter)
2098 {
2099 	int s;
2100 	int i, num_avail;
2101 	struct em_buffer *tx_buffer;
2102 	struct em_tx_desc *tx_desc;
2103 	struct ifnet *ifp = &adapter->interface_data.ac_if;
2104 
2105 	if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2106 		return;
2107 
2108 	s = splimp();
2109 #ifdef DBG_STATS
2110 	adapter->clean_tx_interrupts++;
2111 #endif
2112 	num_avail = adapter->num_tx_desc_avail;
2113 	i = adapter->oldest_used_tx_desc;
2114 
2115 	tx_buffer = &adapter->tx_buffer_area[i];
2116 	tx_desc = &adapter->tx_desc_base[i];
2117 
2118 	while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2119 		tx_desc->upper.data = 0;
2120 		num_avail++;
2121 
2122 		if (tx_buffer->m_head) {
2123 			ifp->if_opackets++;
2124 			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2125 					BUS_DMASYNC_POSTWRITE);
2126 			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2127 			bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2128 
2129 			m_freem(tx_buffer->m_head);
2130 			tx_buffer->m_head = NULL;
2131 		}
2132 
2133 		if (++i == adapter->num_tx_desc)
2134 			i = 0;
2135 
2136 		tx_buffer = &adapter->tx_buffer_area[i];
2137 		tx_desc = &adapter->tx_desc_base[i];
2138 	}
2139 
2140 	adapter->oldest_used_tx_desc = i;
2141 
2142 	/*
2143 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2144 	 * that it is OK to send packets.
2145 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2146 	 * if some descriptors have been freed, restart the timeout.
2147 	 */
2148 	if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2149 		ifp->if_flags &= ~IFF_OACTIVE;
2150 		if (num_avail == adapter->num_tx_desc)
2151 			ifp->if_timer = 0;
2152 		else if (num_avail == adapter->num_tx_desc_avail)
2153 			ifp->if_timer = EM_TX_TIMEOUT;
2154 	}
2155 	adapter->num_tx_desc_avail = num_avail;
2156 	splx(s);
2157 }
2158 
2159 /*********************************************************************
2160  *
2161  *  Get a buffer from system mbuf buffer pool.
2162  *
2163  **********************************************************************/
2164 static int
2165 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp)
2166 {
2167 	struct mbuf *mp = nmp;
2168 	struct em_buffer *rx_buffer;
2169 	struct ifnet *ifp;
2170 	bus_addr_t paddr;
2171 	int error;
2172 
2173 	ifp = &adapter->interface_data.ac_if;
2174 
2175 	if (mp == NULL) {
2176 		mp = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
2177 		if (mp == NULL) {
2178 			adapter->mbuf_cluster_failed++;
2179 			return(ENOBUFS);
2180 		}
2181 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2182 	} else {
2183 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2184 		mp->m_data = mp->m_ext.ext_buf;
2185 		mp->m_next = NULL;
2186 	}
2187 	if (ifp->if_mtu <= ETHERMTU)
2188 		m_adj(mp, ETHER_ALIGN);
2189 
2190 	rx_buffer = &adapter->rx_buffer_area[i];
2191 
2192 	/*
2193 	 * Using memory from the mbuf cluster pool, invoke the
2194 	 * bus_dma machinery to arrange the memory mapping.
2195 	 */
2196 	error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2197 				mtod(mp, void *), mp->m_len,
2198 				em_dmamap_cb, &paddr, 0);
2199 	if (error) {
2200 		m_free(mp);
2201 		return(error);
2202 	}
2203 	rx_buffer->m_head = mp;
2204 	adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2205 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2206 
2207 	return(0);
2208 }
2209 
2210 /*********************************************************************
2211  *
2212  *  Allocate memory for rx_buffer structures. Since we use one
2213  *  rx_buffer per received packet, the maximum number of rx_buffer's
2214  *  that we'll need is equal to the number of receive descriptors
2215  *  that we've allocated.
2216  *
2217  **********************************************************************/
2218 static int
2219 em_allocate_receive_structures(struct adapter *adapter)
2220 {
2221 	int i, error, size;
2222 	struct em_buffer *rx_buffer;
2223 
2224 	size = adapter->num_rx_desc * sizeof(struct em_buffer);
2225 	adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2226 
2227 	error = bus_dma_tag_create(NULL,		/* parent */
2228 				   PAGE_SIZE, 0,	/* alignment, bounds */
2229 				   BUS_SPACE_MAXADDR,	/* lowaddr */
2230 				   BUS_SPACE_MAXADDR,	/* highaddr */
2231 				   NULL, NULL,		/* filter, filterarg */
2232 				   MCLBYTES,		/* maxsize */
2233 				   1,			/* nsegments */
2234 				   MCLBYTES,		/* maxsegsize */
2235 				   BUS_DMA_ALLOCNOW,	/* flags */
2236 				   &adapter->rxtag);
2237 	if (error != 0) {
2238 		device_printf(adapter->dev, "em_allocate_receive_structures: "
2239 			      "bus_dma_tag_create failed; error %u\n", error);
2240 		goto fail_0;
2241 	}
2242 
2243 	rx_buffer = adapter->rx_buffer_area;
2244 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2245 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2246 					  &rx_buffer->map);
2247 		if (error != 0) {
2248 			device_printf(adapter->dev,
2249 				      "em_allocate_receive_structures: "
2250 				      "bus_dmamap_create failed; error %u\n",
2251 				      error);
2252 			goto fail_1;
2253 		}
2254 	}
2255 
2256 	for (i = 0; i < adapter->num_rx_desc; i++) {
2257 		error = em_get_buf(i, adapter, NULL);
2258 		if (error != 0) {
2259 			adapter->rx_buffer_area[i].m_head = NULL;
2260 			adapter->rx_desc_base[i].buffer_addr = 0;
2261 			return(error);
2262 		}
2263 	}
2264 
2265 	return(0);
2266 
2267 fail_1:
2268 	bus_dma_tag_destroy(adapter->rxtag);
2269 fail_0:
2270 	adapter->rxtag = NULL;
2271 	free(adapter->rx_buffer_area, M_DEVBUF);
2272 	adapter->rx_buffer_area = NULL;
2273 	return(error);
2274 }
2275 
2276 /*********************************************************************
2277  *
2278  *  Allocate and initialize receive structures.
2279  *
2280  **********************************************************************/
2281 static int
2282 em_setup_receive_structures(struct adapter *adapter)
2283 {
2284 	bzero((void *) adapter->rx_desc_base,
2285 	      (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2286 
2287 	if (em_allocate_receive_structures(adapter))
2288 		return(ENOMEM);
2289 
2290 	/* Setup our descriptor pointers */
2291 	adapter->next_rx_desc_to_check = 0;
2292 	return(0);
2293 }
2294 
2295 /*********************************************************************
2296  *
2297  *  Enable receive unit.
2298  *
2299  **********************************************************************/
2300 static void
2301 em_initialize_receive_unit(struct adapter *adapter)
2302 {
2303 	uint32_t reg_rctl;
2304 	uint32_t reg_rxcsum;
2305 	struct ifnet *ifp;
2306 	uint64_t bus_addr;
2307 
2308 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2309 
2310 	ifp = &adapter->interface_data.ac_if;
2311 
2312 	/* Make sure receives are disabled while setting up the descriptor ring */
2313 	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2314 
2315 	/* Set the Receive Delay Timer Register */
2316 	E1000_WRITE_REG(&adapter->hw, RDTR,
2317 			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2318 
2319 	if(adapter->hw.mac_type >= em_82540) {
2320 		E1000_WRITE_REG(&adapter->hw, RADV,
2321 				adapter->rx_abs_int_delay.value);
2322 
2323 		/* Set the interrupt throttling rate.  Value is calculated
2324 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
2325 		 */
2326 #define MAX_INTS_PER_SEC	8000
2327 #define DEFAULT_ITR		1000000000/(MAX_INTS_PER_SEC * 256)
2328 		E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2329 	}
2330 
2331 	/* Setup the Base and Length of the Rx Descriptor Ring */
2332 	bus_addr = adapter->rxdma.dma_paddr;
2333 	E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2334 	E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2335 	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2336 			sizeof(struct em_rx_desc));
2337 
2338 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2339 	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2340 	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2341 
2342 	/* Setup the Receive Control Register */
2343 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2344 		   E1000_RCTL_RDMTS_HALF |
2345 		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2346 
2347 	if (adapter->hw.tbi_compatibility_on == TRUE)
2348 		reg_rctl |= E1000_RCTL_SBP;
2349 
2350 	switch (adapter->rx_buffer_len) {
2351 	default:
2352 	case EM_RXBUFFER_2048:
2353 		reg_rctl |= E1000_RCTL_SZ_2048;
2354 		break;
2355 	case EM_RXBUFFER_4096:
2356 		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2357 		break;
2358 	case EM_RXBUFFER_8192:
2359 		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2360 		break;
2361 	case EM_RXBUFFER_16384:
2362 		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2363 		break;
2364 	}
2365 
2366 	if (ifp->if_mtu > ETHERMTU)
2367 		reg_rctl |= E1000_RCTL_LPE;
2368 
2369 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2370 	if ((adapter->hw.mac_type >= em_82543) &&
2371 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2372 		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2373 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2374 		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2375 	}
2376 
2377 	/* Enable Receives */
2378 	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2379 }
2380 
2381 /*********************************************************************
2382  *
2383  *  Free receive related data structures.
2384  *
2385  **********************************************************************/
2386 static void
2387 em_free_receive_structures(struct adapter *adapter)
2388 {
2389 	struct em_buffer *rx_buffer;
2390 	int i;
2391 
2392 	INIT_DEBUGOUT("free_receive_structures: begin");
2393 
2394 	if (adapter->rx_buffer_area != NULL) {
2395 		rx_buffer = adapter->rx_buffer_area;
2396 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2397 			if (rx_buffer->map != NULL) {
2398 				bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2399 				bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2400 			}
2401 			if (rx_buffer->m_head != NULL)
2402 				m_freem(rx_buffer->m_head);
2403 			rx_buffer->m_head = NULL;
2404 		}
2405 	}
2406 	if (adapter->rx_buffer_area != NULL) {
2407 		free(adapter->rx_buffer_area, M_DEVBUF);
2408 		adapter->rx_buffer_area = NULL;
2409 	}
2410 	if (adapter->rxtag != NULL) {
2411 		bus_dma_tag_destroy(adapter->rxtag);
2412 		adapter->rxtag = NULL;
2413 	}
2414 }
2415 
2416 /*********************************************************************
2417  *
2418  *  This routine executes in interrupt context. It replenishes
2419  *  the mbufs in the descriptor and sends data which has been
2420  *  dma'ed into host memory to upper layer.
2421  *
2422  *  We loop at most count times if count is > 0, or until done if
2423  *  count < 0.
2424  *
2425  *********************************************************************/
2426 static void
2427 em_process_receive_interrupts(struct adapter *adapter, int count)
2428 {
2429 	struct ifnet *ifp;
2430 	struct mbuf *mp;
2431 	uint8_t accept_frame = 0;
2432 	uint8_t eop = 0;
2433 	uint16_t len, desc_len, prev_len_adj;
2434 	int i;
2435 
2436 	/* Pointer to the receive descriptor being examined. */
2437 	struct em_rx_desc *current_desc;
2438 
2439 	ifp = &adapter->interface_data.ac_if;
2440 	i = adapter->next_rx_desc_to_check;
2441 	current_desc = &adapter->rx_desc_base[i];
2442 
2443 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2444 #ifdef DBG_STATS
2445 		adapter->no_pkts_avail++;
2446 #endif
2447 		return;
2448 	}
2449 	while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2450 		mp = adapter->rx_buffer_area[i].m_head;
2451 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2452 				BUS_DMASYNC_POSTREAD);
2453 
2454 		accept_frame = 1;
2455 		prev_len_adj = 0;
2456 		desc_len = le16toh(current_desc->length);
2457 		if (current_desc->status & E1000_RXD_STAT_EOP) {
2458 			count--;
2459 			eop = 1;
2460 			if (desc_len < ETHER_CRC_LEN) {
2461 				len = 0;
2462 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2463 			}
2464 			else {
2465 				len = desc_len - ETHER_CRC_LEN;
2466 			}
2467 		} else {
2468 			eop = 0;
2469 			len = desc_len;
2470 		}
2471 
2472 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2473 			uint8_t last_byte;
2474 			uint32_t pkt_len = desc_len;
2475 
2476 			if (adapter->fmp != NULL)
2477 				pkt_len += adapter->fmp->m_pkthdr.len;
2478 
2479 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2480 
2481 			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2482 				       current_desc->errors,
2483 				       pkt_len, last_byte)) {
2484 				em_tbi_adjust_stats(&adapter->hw,
2485 						    &adapter->stats,
2486 						    pkt_len,
2487 						    adapter->hw.mac_addr);
2488 				if (len > 0)
2489 					len--;
2490 			}
2491 			else {
2492 				accept_frame = 0;
2493 			}
2494 		}
2495 
2496 		if (accept_frame) {
2497 			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2498 				adapter->dropped_pkts++;
2499 				em_get_buf(i, adapter, mp);
2500 				if (adapter->fmp != NULL)
2501 					m_freem(adapter->fmp);
2502 				adapter->fmp = NULL;
2503 				adapter->lmp = NULL;
2504 				break;
2505 			}
2506 
2507 			/* Assign correct length to the current fragment */
2508 			mp->m_len = len;
2509 
2510 			if (adapter->fmp == NULL) {
2511 				mp->m_pkthdr.len = len;
2512 				adapter->fmp = mp;	 /* Store the first mbuf */
2513 				adapter->lmp = mp;
2514 			} else {
2515 				/* Chain mbuf's together */
2516 				mp->m_flags &= ~M_PKTHDR;
2517 				/*
2518 				 * Adjust length of previous mbuf in chain if we
2519 				 * received less than 4 bytes in the last descriptor.
2520 				 */
2521 				if (prev_len_adj > 0) {
2522 					adapter->lmp->m_len -= prev_len_adj;
2523 					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2524 				}
2525 				adapter->lmp->m_next = mp;
2526 				adapter->lmp = adapter->lmp->m_next;
2527 				adapter->fmp->m_pkthdr.len += len;
2528 			}
2529 
2530 			if (eop) {
2531 				adapter->fmp->m_pkthdr.rcvif = ifp;
2532 				ifp->if_ipackets++;
2533 
2534 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2535 				em_receive_checksum(adapter, current_desc,
2536 						    adapter->fmp);
2537 				if (current_desc->status & E1000_RXD_STAT_VP)
2538 					VLAN_INPUT_TAG(adapter->fmp,
2539 						       (current_desc->special &
2540 							E1000_RXD_SPC_VLAN_MASK));
2541 				else
2542 					(*ifp->if_input)(ifp, adapter->fmp);
2543 #else
2544 				em_receive_checksum(adapter, current_desc,
2545 						    adapter->fmp);
2546 				if (current_desc->status & E1000_RXD_STAT_VP)
2547 					VLAN_INPUT_TAG(ifp, adapter->fmp,
2548 						       (current_desc->special &
2549 							E1000_RXD_SPC_VLAN_MASK),
2550 						       adapter->fmp = NULL);
2551 
2552 				if (adapter->fmp != NULL)
2553 					(*ifp->if_input)(ifp, adapter->fmp);
2554 #endif
2555 				adapter->fmp = NULL;
2556 				adapter->lmp = NULL;
2557 			}
2558 		} else {
2559 			adapter->dropped_pkts++;
2560 			em_get_buf(i, adapter, mp);
2561 			if (adapter->fmp != NULL)
2562 				m_freem(adapter->fmp);
2563 			adapter->fmp = NULL;
2564 			adapter->lmp = NULL;
2565 		}
2566 
2567 		/* Zero out the receive descriptors status  */
2568 		current_desc->status = 0;
2569 
2570 		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2571 		E1000_WRITE_REG(&adapter->hw, RDT, i);
2572 
2573 		/* Advance our pointers to the next descriptor */
2574 		if (++i == adapter->num_rx_desc) {
2575 			i = 0;
2576 			current_desc = adapter->rx_desc_base;
2577 		} else
2578 			current_desc++;
2579 	}
2580 	adapter->next_rx_desc_to_check = i;
2581 }
2582 
2583 /*********************************************************************
2584  *
2585  *  Verify that the hardware indicated that the checksum is valid.
2586  *  Inform the stack about the status of checksum so that stack
2587  *  doesn't spend time verifying the checksum.
2588  *
2589  *********************************************************************/
2590 static void
2591 em_receive_checksum(struct adapter *adapter,
2592 		    struct em_rx_desc *rx_desc,
2593 		    struct mbuf *mp)
2594 {
2595 	/* 82543 or newer only */
2596 	if ((adapter->hw.mac_type < em_82543) ||
2597 	    /* Ignore Checksum bit is set */
2598 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2599 		mp->m_pkthdr.csum_flags = 0;
2600 		return;
2601 	}
2602 
2603 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2604 		/* Did it pass? */
2605 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2606 			/* IP Checksum Good */
2607 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2608 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2609 		} else {
2610 			mp->m_pkthdr.csum_flags = 0;
2611 		}
2612 	}
2613 
2614 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2615 		/* Did it pass? */
2616 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2617 			mp->m_pkthdr.csum_flags |=
2618 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2619 			mp->m_pkthdr.csum_data = htons(0xffff);
2620 		}
2621 	}
2622 }
2623 
2624 
2625 static void
2626 em_enable_vlans(struct adapter *adapter)
2627 {
2628 	uint32_t ctrl;
2629 
2630 	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2631 
2632 	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2633 	ctrl |= E1000_CTRL_VME;
2634 	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2635 }
2636 
2637 static void
2638 em_enable_intr(struct adapter *adapter)
2639 {
2640 	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2641 }
2642 
2643 static void
2644 em_disable_intr(struct adapter *adapter)
2645 {
2646 	E1000_WRITE_REG(&adapter->hw, IMC,
2647 			(0xffffffff & ~E1000_IMC_RXSEQ));
2648 }
2649 
2650 static int
2651 em_is_valid_ether_addr(uint8_t *addr)
2652 {
2653 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2654 
2655 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2656 		return(FALSE);
2657 	else
2658 		return(TRUE);
2659 }
2660 
2661 void
2662 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2663 {
2664 	pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2665 }
2666 
2667 void
2668 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2669 {
2670 	*value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2671 }
2672 
2673 void
2674 em_pci_set_mwi(struct em_hw *hw)
2675 {
2676 	pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2677 			 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2678 }
2679 
2680 void
2681 em_pci_clear_mwi(struct em_hw *hw)
2682 {
2683 	pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2684 			 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2685 }
2686 
2687 uint32_t
2688 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2689 {
2690 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2691 	return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2692 }
2693 
2694 void
2695 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2696 {
2697 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2698 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2699 }
2700 
2701 /*********************************************************************
2702  * 82544 Coexistence issue workaround.
2703  *    There are 2 issues.
2704  *	1. Transmit Hang issue.
2705  *    To detect this issue, following equation can be used...
2706  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2707  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
2708  *
2709  *	2. DAC issue.
2710  *    To detect this issue, following equation can be used...
2711  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2712  *          If SUM[3:0] is in between 9 to c, we will have this issue.
2713  *
2714  *
2715  *    WORKAROUND:
2716  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
2717  *          9,a,b,c (DAC)
2718  *
2719 *************************************************************************/
2720 static uint32_t
2721 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2722 {
2723 	/* Since issue is sensitive to length and address.*/
2724 	/* Let us first check the address...*/
2725 	uint32_t safe_terminator;
2726 	if (length <= 4) {
2727 		desc_array->descriptor[0].address = address;
2728 		desc_array->descriptor[0].length = length;
2729 		desc_array->elements = 1;
2730 		return(desc_array->elements);
2731 	}
2732 	safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2733 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2734 	if (safe_terminator == 0 ||
2735 	    (safe_terminator > 4 && safe_terminator < 9) ||
2736 	    (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2737 		desc_array->descriptor[0].address = address;
2738 		desc_array->descriptor[0].length = length;
2739 		desc_array->elements = 1;
2740 		return(desc_array->elements);
2741 	}
2742 
2743 	desc_array->descriptor[0].address = address;
2744 	desc_array->descriptor[0].length = length - 4;
2745 	desc_array->descriptor[1].address = address + (length - 4);
2746 	desc_array->descriptor[1].length = 4;
2747 	desc_array->elements = 2;
2748 	return(desc_array->elements);
2749 }
2750 
2751 /**********************************************************************
2752  *
2753  *  Update the board statistics counters.
2754  *
2755  **********************************************************************/
2756 static void
2757 em_update_stats_counters(struct adapter *adapter)
2758 {
2759 	struct ifnet   *ifp;
2760 
2761 	if (adapter->hw.media_type == em_media_type_copper ||
2762 	    (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2763 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2764 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2765 	}
2766 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2767 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2768 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2769 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2770 
2771 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2772 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2773 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2774 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2775 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2776 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2777 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2778 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2779 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2780 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2781 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2782 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2783 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2784 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2785 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2786 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2787 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2788 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2789 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2790 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2791 
2792 	/* For the 64-bit byte counters the low dword must be read first. */
2793 	/* Both registers clear on the read of the high dword */
2794 
2795 	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2796 	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2797 	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2798 	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2799 
2800 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2801 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2802 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2803 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2804 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2805 
2806 	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2807 	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2808 	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2809 	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2810 
2811 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2812 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2813 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2814 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2815 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2816 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2817 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2818 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2819 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2820 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2821 
2822 	if (adapter->hw.mac_type >= em_82543) {
2823 		adapter->stats.algnerrc +=
2824 		    E1000_READ_REG(&adapter->hw, ALGNERRC);
2825 		adapter->stats.rxerrc +=
2826 		    E1000_READ_REG(&adapter->hw, RXERRC);
2827 		adapter->stats.tncrs +=
2828 		    E1000_READ_REG(&adapter->hw, TNCRS);
2829 		adapter->stats.cexterr +=
2830 		    E1000_READ_REG(&adapter->hw, CEXTERR);
2831 		adapter->stats.tsctc +=
2832 		    E1000_READ_REG(&adapter->hw, TSCTC);
2833 		adapter->stats.tsctfc +=
2834 		    E1000_READ_REG(&adapter->hw, TSCTFC);
2835 	}
2836 	ifp = &adapter->interface_data.ac_if;
2837 
2838 	/* Fill out the OS statistics structure */
2839 	ifp->if_ibytes = adapter->stats.gorcl;
2840 	ifp->if_obytes = adapter->stats.gotcl;
2841 	ifp->if_imcasts = adapter->stats.mprc;
2842 	ifp->if_collisions = adapter->stats.colc;
2843 
2844 	/* Rx Errors */
2845 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2846 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
2847 	    adapter->stats.rlec + adapter->stats.rnbc +
2848 	    adapter->stats.mpc + adapter->stats.cexterr;
2849 
2850 	/* Tx Errors */
2851 	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2852 }
2853 
2854 
2855 /**********************************************************************
2856  *
2857  *  This routine is called only when em_display_debug_stats is enabled.
2858  *  This routine provides a way to take a look at important statistics
2859  *  maintained by the driver and hardware.
2860  *
2861  **********************************************************************/
2862 static void
2863 em_print_debug_info(struct adapter *adapter)
2864 {
2865 	device_t dev= adapter->dev;
2866 	uint8_t *hw_addr = adapter->hw.hw_addr;
2867 
2868 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2869 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2870 		      E1000_READ_REG(&adapter->hw, TIDV),
2871 		      E1000_READ_REG(&adapter->hw, TADV));
2872 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2873 		      E1000_READ_REG(&adapter->hw, RDTR),
2874 		      E1000_READ_REG(&adapter->hw, RADV));
2875 #ifdef DBG_STATS
2876 	device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2877 	device_printf(dev, "CleanTxInterrupts = %ld\n",
2878 		      adapter->clean_tx_interrupts);
2879 #endif
2880 	device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2881 		      (long long)adapter->tx_fifo_wrk,
2882 		      (long long)adapter->tx_fifo_reset);
2883 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2884 		      E1000_READ_REG(&adapter->hw, TDH),
2885 		      E1000_READ_REG(&adapter->hw, TDT));
2886 	device_printf(dev, "Num Tx descriptors avail = %d\n",
2887 		      adapter->num_tx_desc_avail);
2888 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2889 		      adapter->no_tx_desc_avail1);
2890 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2891 		      adapter->no_tx_desc_avail2);
2892 	device_printf(dev, "Std mbuf failed = %ld\n",
2893 		      adapter->mbuf_alloc_failed);
2894 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
2895 		      adapter->mbuf_cluster_failed);
2896 	device_printf(dev, "Driver dropped packets = %ld\n",
2897 		      adapter->dropped_pkts);
2898 }
2899 
2900 static void
2901 em_print_hw_stats(struct adapter *adapter)
2902 {
2903 	device_t dev= adapter->dev;
2904 
2905 	device_printf(dev, "Excessive collisions = %lld\n",
2906 		      (long long)adapter->stats.ecol);
2907 	device_printf(dev, "Symbol errors = %lld\n",
2908 		      (long long)adapter->stats.symerrs);
2909 	device_printf(dev, "Sequence errors = %lld\n",
2910 		      (long long)adapter->stats.sec);
2911 	device_printf(dev, "Defer count = %lld\n",
2912 		      (long long)adapter->stats.dc);
2913 
2914 	device_printf(dev, "Missed Packets = %lld\n",
2915 		      (long long)adapter->stats.mpc);
2916 	device_printf(dev, "Receive No Buffers = %lld\n",
2917 		      (long long)adapter->stats.rnbc);
2918 	device_printf(dev, "Receive length errors = %lld\n",
2919 		      (long long)adapter->stats.rlec);
2920 	device_printf(dev, "Receive errors = %lld\n",
2921 		      (long long)adapter->stats.rxerrc);
2922 	device_printf(dev, "Crc errors = %lld\n",
2923 		      (long long)adapter->stats.crcerrs);
2924 	device_printf(dev, "Alignment errors = %lld\n",
2925 		      (long long)adapter->stats.algnerrc);
2926 	device_printf(dev, "Carrier extension errors = %lld\n",
2927 		      (long long)adapter->stats.cexterr);
2928 
2929 	device_printf(dev, "XON Rcvd = %lld\n",
2930 		      (long long)adapter->stats.xonrxc);
2931 	device_printf(dev, "XON Xmtd = %lld\n",
2932 		      (long long)adapter->stats.xontxc);
2933 	device_printf(dev, "XOFF Rcvd = %lld\n",
2934 		      (long long)adapter->stats.xoffrxc);
2935 	device_printf(dev, "XOFF Xmtd = %lld\n",
2936 		      (long long)adapter->stats.xofftxc);
2937 
2938 	device_printf(dev, "Good Packets Rcvd = %lld\n",
2939 		      (long long)adapter->stats.gprc);
2940 	device_printf(dev, "Good Packets Xmtd = %lld\n",
2941 		      (long long)adapter->stats.gptc);
2942 }
2943 
2944 static int
2945 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2946 {
2947 	int error;
2948 	int result;
2949 	struct adapter *adapter;
2950 
2951 	result = -1;
2952 	error = sysctl_handle_int(oidp, &result, 0, req);
2953 
2954 	if (error || !req->newptr)
2955 		return(error);
2956 
2957 	if (result == 1) {
2958 		adapter = (struct adapter *)arg1;
2959 		em_print_debug_info(adapter);
2960 	}
2961 
2962 	return(error);
2963 }
2964 
2965 static int
2966 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2967 {
2968 	int error;
2969 	int result;
2970 	struct adapter *adapter;
2971 
2972 	result = -1;
2973 	error = sysctl_handle_int(oidp, &result, 0, req);
2974 
2975 	if (error || !req->newptr)
2976 		return(error);
2977 
2978 	if (result == 1) {
2979 		adapter = (struct adapter *)arg1;
2980 		em_print_hw_stats(adapter);
2981 	}
2982 
2983 	return(error);
2984 }
2985 
2986 static int
2987 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2988 {
2989 	struct em_int_delay_info *info;
2990 	struct adapter *adapter;
2991 	uint32_t regval;
2992 	int error;
2993 	int usecs;
2994 	int ticks;
2995 	int s;
2996 
2997 	info = (struct em_int_delay_info *)arg1;
2998 	adapter = info->adapter;
2999 	usecs = info->value;
3000 	error = sysctl_handle_int(oidp, &usecs, 0, req);
3001 	if (error != 0 || req->newptr == NULL)
3002 		return(error);
3003 	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3004 		return(EINVAL);
3005 	info->value = usecs;
3006 	ticks = E1000_USECS_TO_TICKS(usecs);
3007 
3008 	s = splimp();
3009 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3010 	regval = (regval & ~0xffff) | (ticks & 0xffff);
3011 	/* Handle a few special cases. */
3012 	switch (info->offset) {
3013 	case E1000_RDTR:
3014 	case E1000_82542_RDTR:
3015 		regval |= E1000_RDT_FPDB;
3016 		break;
3017 	case E1000_TIDV:
3018 	case E1000_82542_TIDV:
3019 		if (ticks == 0) {
3020 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3021 			/* Don't write 0 into the TIDV register. */
3022 			regval++;
3023 		} else
3024 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3025 		break;
3026 	}
3027 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3028 	splx(s);
3029 	return(0);
3030 }
3031 
3032 static void
3033 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3034 			const char *description, struct em_int_delay_info *info,
3035 			int offset, int value)
3036 {
3037 	info->adapter = adapter;
3038 	info->offset = offset;
3039 	info->value = value;
3040 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3041 			SYSCTL_CHILDREN(adapter->sysctl_tree),
3042 			OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3043 			info, 0, em_sysctl_int_delay, "I", description);
3044 }
3045