xref: /freebsd/usr.sbin/bhyve/pci_e82545.c (revision f374ba41)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6  * Copyright (c) 2013 Jeremiah Lott, Avere Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer
14  *    in this position and unchanged.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/types.h>
36 #ifndef WITHOUT_CAPSICUM
37 #include <sys/capsicum.h>
38 #endif
39 #include <sys/limits.h>
40 #include <sys/ioctl.h>
41 #include <sys/uio.h>
42 #include <net/ethernet.h>
43 #include <netinet/in.h>
44 #include <netinet/tcp.h>
45 
46 #ifndef WITHOUT_CAPSICUM
47 #include <capsicum_helpers.h>
48 #endif
49 #include <machine/vmm_snapshot.h>
50 
51 #include <err.h>
52 #include <errno.h>
53 #include <fcntl.h>
54 #include <md5.h>
55 #include <stdio.h>
56 #include <stdlib.h>
57 #include <string.h>
58 #include <sysexits.h>
59 #include <unistd.h>
60 #include <pthread.h>
61 #include <pthread_np.h>
62 
63 #include "e1000_regs.h"
64 #include "e1000_defines.h"
65 #include "mii.h"
66 
67 #include "bhyverun.h"
68 #include "config.h"
69 #include "debug.h"
70 #include "pci_emul.h"
71 #include "mevent.h"
72 #include "net_utils.h"
73 #include "net_backends.h"
74 
75 /* Hardware/register definitions XXX: move some to common code. */
76 #define E82545_VENDOR_ID_INTEL			0x8086
77 #define E82545_DEV_ID_82545EM_COPPER		0x100F
78 #define E82545_SUBDEV_ID			0x1008
79 
80 #define E82545_REVISION_4			4
81 
82 #define E82545_MDIC_DATA_MASK			0x0000FFFF
83 #define E82545_MDIC_OP_MASK			0x0c000000
84 #define E82545_MDIC_IE				0x20000000
85 
86 #define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
87 #define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
88 #define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
89 
90 #define E82545_BAR_REGISTER			0
91 #define E82545_BAR_REGISTER_LEN			(128*1024)
92 #define E82545_BAR_FLASH			1
93 #define E82545_BAR_FLASH_LEN			(64*1024)
94 #define E82545_BAR_IO				2
95 #define E82545_BAR_IO_LEN			8
96 
97 #define E82545_IOADDR				0x00000000
98 #define E82545_IODATA				0x00000004
99 #define E82545_IO_REGISTER_MAX			0x0001FFFF
100 #define E82545_IO_FLASH_BASE			0x00080000
101 #define E82545_IO_FLASH_MAX			0x000FFFFF
102 
103 #define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
104 #define E82545_RAR_MAX				15
105 #define E82545_MTA_MAX				127
106 #define E82545_VFTA_MAX				127
107 
108 /* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
109  * followed by 6 address bits.
110  * TODO: make opcode bits and addr bits configurable?
111  * NVM Commands - Microwire */
112 #define E82545_NVM_OPCODE_BITS	3
113 #define E82545_NVM_ADDR_BITS	6
114 #define E82545_NVM_DATA_BITS	16
115 #define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
116 #define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
117 #define E82545_NVM_OPCODE_MASK	\
118     (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
119 #define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
120 #define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
121 #define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
122 #define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
123 
124 #define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
125 
126 #define E1000_ICR_SRPD		0x00010000
127 
128 /* This is an arbitrary number.  There is no hard limit on the chip. */
129 #define I82545_MAX_TXSEGS	64
130 
131 /* Legacy receive descriptor */
132 struct e1000_rx_desc {
133 	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
134 	uint16_t length;	/* Length of data DMAed into data buffer */
135 	uint16_t csum;		/* Packet checksum */
136 	uint8_t	 status;       	/* Descriptor status */
137 	uint8_t  errors;	/* Descriptor Errors */
138 	uint16_t special;
139 };
140 
141 /* Transmit descriptor types */
142 #define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
143 #define E1000_TXD_TYP_L		(0)
144 #define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
145 #define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
146 
147 /* Legacy transmit descriptor */
148 struct e1000_tx_desc {
149 	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
150 	union {
151 		uint32_t data;
152 		struct {
153 			uint16_t length;  /* Data buffer length */
154 			uint8_t  cso;  /* Checksum offset */
155 			uint8_t  cmd;  /* Descriptor control */
156 		} flags;
157 	} lower;
158 	union {
159 		uint32_t data;
160 		struct {
161 			uint8_t status; /* Descriptor status */
162 			uint8_t css;  /* Checksum start */
163 			uint16_t special;
164 		} fields;
165 	} upper;
166 };
167 
168 /* Context descriptor */
169 struct e1000_context_desc {
170 	union {
171 		uint32_t ip_config;
172 		struct {
173 			uint8_t ipcss;  /* IP checksum start */
174 			uint8_t ipcso;  /* IP checksum offset */
175 			uint16_t ipcse;  /* IP checksum end */
176 		} ip_fields;
177 	} lower_setup;
178 	union {
179 		uint32_t tcp_config;
180 		struct {
181 			uint8_t tucss;  /* TCP checksum start */
182 			uint8_t tucso;  /* TCP checksum offset */
183 			uint16_t tucse;  /* TCP checksum end */
184 		} tcp_fields;
185 	} upper_setup;
186 	uint32_t cmd_and_length;
187 	union {
188 		uint32_t data;
189 		struct {
190 			uint8_t status;  /* Descriptor status */
191 			uint8_t hdr_len;  /* Header length */
192 			uint16_t mss;  /* Maximum segment size */
193 		} fields;
194 	} tcp_seg_setup;
195 };
196 
197 /* Data descriptor */
198 struct e1000_data_desc {
199 	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
200 	union {
201 		uint32_t data;
202 		struct {
203 			uint16_t length;  /* Data buffer length */
204 			uint8_t typ_len_ext;
205 			uint8_t cmd;
206 		} flags;
207 	} lower;
208 	union {
209 		uint32_t data;
210 		struct {
211 			uint8_t status;  /* Descriptor status */
212 			uint8_t popts;  /* Packet Options */
213 			uint16_t special;
214 		} fields;
215 	} upper;
216 };
217 
218 union e1000_tx_udesc {
219 	struct e1000_tx_desc td;
220 	struct e1000_context_desc cd;
221 	struct e1000_data_desc dd;
222 };
223 
224 /* Tx checksum info for a packet. */
225 struct ck_info {
226 	int	ck_valid;	/* ck_info is valid */
227 	uint8_t	ck_start;	/* start byte of cksum calcuation */
228 	uint8_t	ck_off;		/* offset of cksum insertion */
229 	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
230 };
231 
232 /*
233  * Debug printf
234  */
235 static int e82545_debug = 0;
236 #define WPRINTF(msg,params...) PRINTLN("e82545: " msg, ##params)
237 #define DPRINTF(msg,params...) if (e82545_debug) WPRINTF(msg, params)
238 
239 #define	MIN(a,b) (((a)<(b))?(a):(b))
240 #define	MAX(a,b) (((a)>(b))?(a):(b))
241 
242 /* s/w representation of the RAL/RAH regs */
243 struct  eth_uni {
244 	int		eu_valid;
245 	int		eu_addrsel;
246 	struct ether_addr eu_eth;
247 };
248 
249 
250 struct e82545_softc {
251 	struct pci_devinst *esc_pi;
252 	struct vmctx	*esc_ctx;
253 	struct mevent   *esc_mevpitr;
254 	pthread_mutex_t	esc_mtx;
255 	struct ether_addr esc_mac;
256 	net_backend_t	*esc_be;
257 
258 	/* General */
259 	uint32_t	esc_CTRL;	/* x0000 device ctl */
260 	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
261 	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
262 	uint32_t	esc_FCT;	/* x0030 flow ctl type */
263 	uint32_t	esc_VET;	/* x0038 VLAN eth type */
264 	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
265 	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
266 	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
267 
268 	/* Interrupt control */
269 	int		esc_irq_asserted;
270 	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
271 	uint32_t	esc_ITR;	/* x00C4 intr throttling */
272 	uint32_t	esc_ICS;	/* x00C8 cause set */
273 	uint32_t	esc_IMS;	/* x00D0 mask set/read */
274 	uint32_t	esc_IMC;	/* x00D8 mask clear */
275 
276 	/* Transmit */
277 	union e1000_tx_udesc *esc_txdesc;
278 	struct e1000_context_desc esc_txctx;
279 	pthread_t	esc_tx_tid;
280 	pthread_cond_t	esc_tx_cond;
281 	int		esc_tx_enabled;
282 	int		esc_tx_active;
283 	uint32_t	esc_TXCW;	/* x0178 transmit config */
284 	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
285 	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
286 	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
287 	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
288 	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
289 	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
290 	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
291 	uint16_t	esc_TDH;	/* x3810 desc table head idx */
292 	uint16_t	esc_TDHr;	/* internal read version of TDH */
293 	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
294 	uint32_t	esc_TIDV;	/* x3820 intr delay */
295 	uint32_t	esc_TXDCTL;	/* x3828 desc control */
296 	uint32_t	esc_TADV;	/* x382C intr absolute delay */
297 
298 	/* L2 frame acceptance */
299 	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
300 	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
301 	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
302 
303 	/* Receive */
304 	struct e1000_rx_desc *esc_rxdesc;
305 	pthread_cond_t	esc_rx_cond;
306 	int		esc_rx_enabled;
307 	int		esc_rx_active;
308 	int		esc_rx_loopback;
309 	uint32_t	esc_RCTL;	/* x0100 receive ctl */
310 	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
311 	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
312 	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
313 	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
314 	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
315 	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
316 	uint16_t	esc_RDH;	/* x2810 desc table head idx */
317 	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
318 	uint32_t	esc_RDTR;	/* x2820 intr delay */
319 	uint32_t	esc_RXDCTL;	/* x2828 desc control */
320 	uint32_t	esc_RADV;	/* x282C intr absolute delay */
321 	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
322 	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
323 
324 	/* IO Port register access */
325 	uint32_t io_addr;
326 
327 	/* Shadow copy of MDIC */
328 	uint32_t mdi_control;
329 	/* Shadow copy of EECD */
330 	uint32_t eeprom_control;
331 	/* Latest NVM in/out */
332 	uint16_t nvm_data;
333 	uint16_t nvm_opaddr;
334 	/* stats */
335 	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
336 	uint32_t pkt_rx_by_size[6];
337 	uint32_t pkt_tx_by_size[6];
338 	uint32_t good_pkt_rx_count;
339 	uint32_t bcast_pkt_rx_count;
340 	uint32_t mcast_pkt_rx_count;
341 	uint32_t good_pkt_tx_count;
342 	uint32_t bcast_pkt_tx_count;
343 	uint32_t mcast_pkt_tx_count;
344 	uint32_t oversize_rx_count;
345 	uint32_t tso_tx_count;
346 	uint64_t good_octets_rx;
347 	uint64_t good_octets_tx;
348 	uint64_t missed_octets; /* counts missed and oversized */
349 
350 	uint8_t nvm_bits:6; /* number of bits remaining in/out */
351 	uint8_t nvm_mode:2;
352 #define E82545_NVM_MODE_OPADDR  0x0
353 #define E82545_NVM_MODE_DATAIN  0x1
354 #define E82545_NVM_MODE_DATAOUT 0x2
355 	/* EEPROM data */
356 	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
357 };
358 
359 static void e82545_reset(struct e82545_softc *sc, int dev);
360 static void e82545_rx_enable(struct e82545_softc *sc);
361 static void e82545_rx_disable(struct e82545_softc *sc);
362 static void e82545_rx_callback(int fd, enum ev_type type, void *param);
363 static void e82545_tx_start(struct e82545_softc *sc);
364 static void e82545_tx_enable(struct e82545_softc *sc);
365 static void e82545_tx_disable(struct e82545_softc *sc);
366 
367 static inline int __unused
368 e82545_size_stat_index(uint32_t size)
369 {
370 	if (size <= 64) {
371 		return 0;
372 	} else if (size >= 1024) {
373 		return 5;
374 	} else {
375 		/* should be 1-4 */
376 		return (ffs(size) - 6);
377 	}
378 }
379 
380 static void
381 e82545_init_eeprom(struct e82545_softc *sc)
382 {
383 	uint16_t checksum, i;
384 
385         /* mac addr */
386 	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
387 		(((uint16_t)sc->esc_mac.octet[1]) << 8);
388 	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
389 		(((uint16_t)sc->esc_mac.octet[3]) << 8);
390 	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
391 		(((uint16_t)sc->esc_mac.octet[5]) << 8);
392 
393 	/* pci ids */
394 	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
395 	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
396 	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
397 	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
398 
399 	/* fill in the checksum */
400         checksum = 0;
401 	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
402 		checksum += sc->eeprom_data[i];
403 	}
404 	checksum = NVM_SUM - checksum;
405 	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
406 	DPRINTF("eeprom checksum: 0x%x", checksum);
407 }
408 
409 static void
410 e82545_write_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
411     uint8_t phy_addr, uint32_t data)
412 {
413 	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x", reg_addr, phy_addr, data);
414 }
415 
416 static uint32_t
417 e82545_read_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
418     uint8_t phy_addr)
419 {
420 	//DPRINTF("Read mdi reg:0x%x phy:0x%x", reg_addr, phy_addr);
421 	switch (reg_addr) {
422 	case PHY_STATUS:
423 		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
424 			MII_SR_AUTONEG_COMPLETE);
425 	case PHY_AUTONEG_ADV:
426 		return NWAY_AR_SELECTOR_FIELD;
427 	case PHY_LP_ABILITY:
428 		return 0;
429 	case PHY_1000T_STATUS:
430 		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
431 			SR_1000T_LOCAL_RX_STATUS);
432 	case PHY_ID1:
433 		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
434 	case PHY_ID2:
435 		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
436 	default:
437 		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x", reg_addr, phy_addr);
438 		return 0;
439 	}
440 	/* not reached */
441 }
442 
443 static void
444 e82545_eecd_strobe(struct e82545_softc *sc)
445 {
446 	/* Microwire state machine */
447 	/*
448 	DPRINTF("eeprom state machine srtobe "
449 		"0x%x 0x%x 0x%x 0x%x",
450 		sc->nvm_mode, sc->nvm_bits,
451 		sc->nvm_opaddr, sc->nvm_data);*/
452 
453 	if (sc->nvm_bits == 0) {
454 		DPRINTF("eeprom state machine not expecting data! "
455 			"0x%x 0x%x 0x%x 0x%x",
456 			sc->nvm_mode, sc->nvm_bits,
457 			sc->nvm_opaddr, sc->nvm_data);
458 		return;
459 	}
460 	sc->nvm_bits--;
461 	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
462 		/* shifting out */
463 		if (sc->nvm_data & 0x8000) {
464 			sc->eeprom_control |= E1000_EECD_DO;
465 		} else {
466 			sc->eeprom_control &= ~E1000_EECD_DO;
467 		}
468 		sc->nvm_data <<= 1;
469 		if (sc->nvm_bits == 0) {
470 			/* read done, back to opcode mode. */
471 			sc->nvm_opaddr = 0;
472 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
473 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
474 		}
475 	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
476 		/* shifting in */
477 		sc->nvm_data <<= 1;
478 		if (sc->eeprom_control & E1000_EECD_DI) {
479 			sc->nvm_data |= 1;
480 		}
481 		if (sc->nvm_bits == 0) {
482 			/* eeprom write */
483 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
484 			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
485 			if (op != E82545_NVM_OPCODE_WRITE) {
486 				DPRINTF("Illegal eeprom write op 0x%x",
487 					sc->nvm_opaddr);
488 			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
489 				DPRINTF("Illegal eeprom write addr 0x%x",
490 					sc->nvm_opaddr);
491 			} else {
492 				DPRINTF("eeprom write eeprom[0x%x] = 0x%x",
493 				addr, sc->nvm_data);
494 				sc->eeprom_data[addr] = sc->nvm_data;
495 			}
496 			/* back to opcode mode */
497 			sc->nvm_opaddr = 0;
498 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
499 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
500 		}
501 	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
502 		sc->nvm_opaddr <<= 1;
503 		if (sc->eeprom_control & E1000_EECD_DI) {
504 			sc->nvm_opaddr |= 1;
505 		}
506 		if (sc->nvm_bits == 0) {
507 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
508 			switch (op) {
509 			case E82545_NVM_OPCODE_EWEN:
510 				DPRINTF("eeprom write enable: 0x%x",
511 					sc->nvm_opaddr);
512 				/* back to opcode mode */
513 				sc->nvm_opaddr = 0;
514 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
515 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
516 				break;
517 			case E82545_NVM_OPCODE_READ:
518 			{
519 				uint16_t addr = sc->nvm_opaddr &
520 					E82545_NVM_ADDR_MASK;
521 				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
522 				sc->nvm_bits = E82545_NVM_DATA_BITS;
523 				if (addr < E82545_NVM_EEPROM_SIZE) {
524 					sc->nvm_data = sc->eeprom_data[addr];
525 					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x",
526 						addr, sc->nvm_data);
527 				} else {
528 					DPRINTF("eeprom illegal read: 0x%x",
529 						sc->nvm_opaddr);
530 					sc->nvm_data = 0;
531 				}
532 				break;
533 			}
534 			case E82545_NVM_OPCODE_WRITE:
535 				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
536 				sc->nvm_bits = E82545_NVM_DATA_BITS;
537 				sc->nvm_data = 0;
538 				break;
539 			default:
540 				DPRINTF("eeprom unknown op: 0x%x",
541 					sc->nvm_opaddr);
542 				/* back to opcode mode */
543 				sc->nvm_opaddr = 0;
544 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
545 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
546 			}
547 		}
548 	} else {
549 		DPRINTF("eeprom state machine wrong state! "
550 			"0x%x 0x%x 0x%x 0x%x",
551 			sc->nvm_mode, sc->nvm_bits,
552 			sc->nvm_opaddr, sc->nvm_data);
553 	}
554 }
555 
556 static void
557 e82545_itr_callback(int fd __unused, enum ev_type type __unused, void *param)
558 {
559 	uint32_t new;
560 	struct e82545_softc *sc = param;
561 
562 	pthread_mutex_lock(&sc->esc_mtx);
563 	new = sc->esc_ICR & sc->esc_IMS;
564 	if (new && !sc->esc_irq_asserted) {
565 		DPRINTF("itr callback: lintr assert %x", new);
566 		sc->esc_irq_asserted = 1;
567 		pci_lintr_assert(sc->esc_pi);
568 	} else {
569 		mevent_delete(sc->esc_mevpitr);
570 		sc->esc_mevpitr = NULL;
571 	}
572 	pthread_mutex_unlock(&sc->esc_mtx);
573 }
574 
575 static void
576 e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
577 {
578 	uint32_t new;
579 
580 	DPRINTF("icr assert: 0x%x", bits);
581 
582 	/*
583 	 * An interrupt is only generated if bits are set that
584 	 * aren't already in the ICR, these bits are unmasked,
585 	 * and there isn't an interrupt already pending.
586 	 */
587 	new = bits & ~sc->esc_ICR & sc->esc_IMS;
588 	sc->esc_ICR |= bits;
589 
590 	if (new == 0) {
591 		DPRINTF("icr assert: masked %x, ims %x", new, sc->esc_IMS);
592 	} else if (sc->esc_mevpitr != NULL) {
593 		DPRINTF("icr assert: throttled %x, ims %x", new, sc->esc_IMS);
594 	} else if (!sc->esc_irq_asserted) {
595 		DPRINTF("icr assert: lintr assert %x", new);
596 		sc->esc_irq_asserted = 1;
597 		pci_lintr_assert(sc->esc_pi);
598 		if (sc->esc_ITR != 0) {
599 			sc->esc_mevpitr = mevent_add(
600 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
601 			    EVF_TIMER, e82545_itr_callback, sc);
602 		}
603 	}
604 }
605 
606 static void
607 e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
608 {
609 	uint32_t new;
610 
611 	/*
612 	 * Changing the mask may allow previously asserted
613 	 * but masked interrupt requests to generate an interrupt.
614 	 */
615 	new = bits & sc->esc_ICR & ~sc->esc_IMS;
616 	sc->esc_IMS |= bits;
617 
618 	if (new == 0) {
619 		DPRINTF("ims change: masked %x, ims %x", new, sc->esc_IMS);
620 	} else if (sc->esc_mevpitr != NULL) {
621 		DPRINTF("ims change: throttled %x, ims %x", new, sc->esc_IMS);
622 	} else if (!sc->esc_irq_asserted) {
623 		DPRINTF("ims change: lintr assert %x", new);
624 		sc->esc_irq_asserted = 1;
625 		pci_lintr_assert(sc->esc_pi);
626 		if (sc->esc_ITR != 0) {
627 			sc->esc_mevpitr = mevent_add(
628 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
629 			    EVF_TIMER, e82545_itr_callback, sc);
630 		}
631 	}
632 }
633 
634 static void
635 e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
636 {
637 
638 	DPRINTF("icr deassert: 0x%x", bits);
639 	sc->esc_ICR &= ~bits;
640 
641 	/*
642 	 * If there are no longer any interrupt sources and there
643 	 * was an asserted interrupt, clear it
644 	 */
645 	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
646 		DPRINTF("icr deassert: lintr deassert %x", bits);
647 		pci_lintr_deassert(sc->esc_pi);
648 		sc->esc_irq_asserted = 0;
649 	}
650 }
651 
652 static void
653 e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
654 {
655 
656 	DPRINTF("intr_write: off %x, val %x", offset, value);
657 
658 	switch (offset) {
659 	case E1000_ICR:
660 		e82545_icr_deassert(sc, value);
661 		break;
662 	case E1000_ITR:
663 		sc->esc_ITR = value;
664 		break;
665 	case E1000_ICS:
666 		sc->esc_ICS = value;	/* not used: store for debug */
667 		e82545_icr_assert(sc, value);
668 		break;
669 	case E1000_IMS:
670 		e82545_ims_change(sc, value);
671 		break;
672 	case E1000_IMC:
673 		sc->esc_IMC = value;	/* for debug */
674 		sc->esc_IMS &= ~value;
675 		// XXX clear interrupts if all ICR bits now masked
676 		// and interrupt was pending ?
677 		break;
678 	default:
679 		break;
680 	}
681 }
682 
683 static uint32_t
684 e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
685 {
686 	uint32_t retval;
687 
688 	retval = 0;
689 
690 	DPRINTF("intr_read: off %x", offset);
691 
692 	switch (offset) {
693 	case E1000_ICR:
694 		retval = sc->esc_ICR;
695 		sc->esc_ICR = 0;
696 		e82545_icr_deassert(sc, ~0);
697 		break;
698 	case E1000_ITR:
699 		retval = sc->esc_ITR;
700 		break;
701 	case E1000_ICS:
702 		/* write-only register */
703 		break;
704 	case E1000_IMS:
705 		retval = sc->esc_IMS;
706 		break;
707 	case E1000_IMC:
708 		/* write-only register */
709 		break;
710 	default:
711 		break;
712 	}
713 
714 	return (retval);
715 }
716 
717 static void
718 e82545_devctl(struct e82545_softc *sc, uint32_t val)
719 {
720 
721 	sc->esc_CTRL = val & ~E1000_CTRL_RST;
722 
723 	if (val & E1000_CTRL_RST) {
724 		DPRINTF("e1k: s/w reset, ctl %x", val);
725 		e82545_reset(sc, 1);
726 	}
727 	/* XXX check for phy reset ? */
728 }
729 
730 static void
731 e82545_rx_update_rdba(struct e82545_softc *sc)
732 {
733 
734 	/* XXX verify desc base/len within phys mem range */
735 	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
736 	    sc->esc_RDBAL;
737 
738 	/* Cache host mapping of guest descriptor array */
739 	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
740 	    sc->esc_rdba, sc->esc_RDLEN);
741 }
742 
743 static void
744 e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
745 {
746 	int on;
747 
748 	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
749 
750 	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
751 	sc->esc_RCTL = val & ~0xF9204c01;
752 
753 	DPRINTF("rx_ctl - %s RCTL %x, val %x",
754 		on ? "on" : "off", sc->esc_RCTL, val);
755 
756 	/* state change requested */
757 	if (on != sc->esc_rx_enabled) {
758 		if (on) {
759 			/* Catch disallowed/unimplemented settings */
760 			//assert(!(val & E1000_RCTL_LBM_TCVR));
761 
762 			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
763 				sc->esc_rx_loopback = 1;
764 			} else {
765 				sc->esc_rx_loopback = 0;
766 			}
767 
768 			e82545_rx_update_rdba(sc);
769 			e82545_rx_enable(sc);
770 		} else {
771 			e82545_rx_disable(sc);
772 			sc->esc_rx_loopback = 0;
773 			sc->esc_rdba = 0;
774 			sc->esc_rxdesc = NULL;
775 		}
776 	}
777 }
778 
779 static void
780 e82545_tx_update_tdba(struct e82545_softc *sc)
781 {
782 
783 	/* XXX verify desc base/len within phys mem range */
784 	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
785 
786 	/* Cache host mapping of guest descriptor array */
787 	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
788             sc->esc_TDLEN);
789 }
790 
791 static void
792 e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
793 {
794 	int on;
795 
796 	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
797 
798 	/* ignore TCTL_EN settings that don't change state */
799 	if (on == sc->esc_tx_enabled)
800 		return;
801 
802 	if (on) {
803 		e82545_tx_update_tdba(sc);
804 		e82545_tx_enable(sc);
805 	} else {
806 		e82545_tx_disable(sc);
807 		sc->esc_tdba = 0;
808 		sc->esc_txdesc = NULL;
809 	}
810 
811 	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
812 	sc->esc_TCTL = val & ~0xFE800005;
813 }
814 
815 static int
816 e82545_bufsz(uint32_t rctl)
817 {
818 
819 	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
820 	case (E1000_RCTL_SZ_2048): return (2048);
821 	case (E1000_RCTL_SZ_1024): return (1024);
822 	case (E1000_RCTL_SZ_512): return (512);
823 	case (E1000_RCTL_SZ_256): return (256);
824 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
825 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
826 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
827 	}
828 	return (256);	/* Forbidden value. */
829 }
830 
831 /* XXX one packet at a time until this is debugged */
832 static void
833 e82545_rx_callback(int fd __unused, enum ev_type type __unused, void *param)
834 {
835 	struct e82545_softc *sc = param;
836 	struct e1000_rx_desc *rxd;
837 	struct iovec vec[64];
838 	ssize_t len;
839 	int left, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
840 	uint32_t cause = 0;
841 	uint16_t *tp, tag, head;
842 
843 	pthread_mutex_lock(&sc->esc_mtx);
844 	DPRINTF("rx_run: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
845 
846 	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
847 		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped",
848 		    sc->esc_rx_enabled, sc->esc_rx_loopback);
849 		while (netbe_rx_discard(sc->esc_be) > 0) {
850 		}
851 		goto done1;
852 	}
853 	bufsz = e82545_bufsz(sc->esc_RCTL);
854 	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
855 	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
856 	size = sc->esc_RDLEN / 16;
857 	head = sc->esc_RDH;
858 	left = (size + sc->esc_RDT - head) % size;
859 	if (left < maxpktdesc) {
860 		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped",
861 		    left, maxpktdesc);
862 		while (netbe_rx_discard(sc->esc_be) > 0) {
863 		}
864 		goto done1;
865 	}
866 
867 	sc->esc_rx_active = 1;
868 	pthread_mutex_unlock(&sc->esc_mtx);
869 
870 	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
871 
872 		/* Grab rx descriptor pointed to by the head pointer */
873 		for (i = 0; i < maxpktdesc; i++) {
874 			rxd = &sc->esc_rxdesc[(head + i) % size];
875 			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
876 			    rxd->buffer_addr, bufsz);
877 			vec[i].iov_len = bufsz;
878 		}
879 		len = netbe_recv(sc->esc_be, vec, maxpktdesc);
880 		if (len <= 0) {
881 			DPRINTF("netbe_recv() returned %zd", len);
882 			goto done;
883 		}
884 
885 		/*
886 		 * Adjust the packet length based on whether the CRC needs
887 		 * to be stripped or if the packet is less than the minimum
888 		 * eth packet size.
889 		 */
890 		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
891 			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
892 		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
893 			len += ETHER_CRC_LEN;
894 		n = (len + bufsz - 1) / bufsz;
895 
896 		DPRINTF("packet read %zd bytes, %d segs, head %d",
897 		    len, n, head);
898 
899 		/* Apply VLAN filter. */
900 		tp = (uint16_t *)vec[0].iov_base + 6;
901 		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
902 		    (ntohs(tp[0]) == sc->esc_VET)) {
903 			tag = ntohs(tp[1]) & 0x0fff;
904 			if ((sc->esc_fvlan[tag >> 5] &
905 			    (1 << (tag & 0x1f))) != 0) {
906 				DPRINTF("known VLAN %d", tag);
907 			} else {
908 				DPRINTF("unknown VLAN %d", tag);
909 				n = 0;
910 				continue;
911 			}
912 		}
913 
914 		/* Update all consumed descriptors. */
915 		for (i = 0; i < n - 1; i++) {
916 			rxd = &sc->esc_rxdesc[(head + i) % size];
917 			rxd->length = bufsz;
918 			rxd->csum = 0;
919 			rxd->errors = 0;
920 			rxd->special = 0;
921 			rxd->status = E1000_RXD_STAT_DD;
922 		}
923 		rxd = &sc->esc_rxdesc[(head + i) % size];
924 		rxd->length = len % bufsz;
925 		rxd->csum = 0;
926 		rxd->errors = 0;
927 		rxd->special = 0;
928 		/* XXX signal no checksum for now */
929 		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
930 		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
931 
932 		/* Schedule receive interrupts. */
933 		if ((uint32_t)len <= sc->esc_RSRPD) {
934 			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
935 		} else {
936 			/* XXX: RDRT and RADV timers should be here. */
937 			cause |= E1000_ICR_RXT0;
938 		}
939 
940 		head = (head + n) % size;
941 		left -= n;
942 	}
943 
944 done:
945 	pthread_mutex_lock(&sc->esc_mtx);
946 	sc->esc_rx_active = 0;
947 	if (sc->esc_rx_enabled == 0)
948 		pthread_cond_signal(&sc->esc_rx_cond);
949 
950 	sc->esc_RDH = head;
951 	/* Respect E1000_RCTL_RDMTS */
952 	left = (size + sc->esc_RDT - head) % size;
953 	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
954 		cause |= E1000_ICR_RXDMT0;
955 	/* Assert all accumulated interrupts. */
956 	if (cause != 0)
957 		e82545_icr_assert(sc, cause);
958 done1:
959 	DPRINTF("rx_run done: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
960 	pthread_mutex_unlock(&sc->esc_mtx);
961 }
962 
963 static uint16_t
964 e82545_carry(uint32_t sum)
965 {
966 
967 	sum = (sum & 0xFFFF) + (sum >> 16);
968 	if (sum > 0xFFFF)
969 		sum -= 0xFFFF;
970 	return (sum);
971 }
972 
973 static uint16_t
974 e82545_buf_checksum(uint8_t *buf, int len)
975 {
976 	int i;
977 	uint32_t sum = 0;
978 
979 	/* Checksum all the pairs of bytes first... */
980 	for (i = 0; i < (len & ~1); i += 2)
981 		sum += *((u_int16_t *)(buf + i));
982 
983 	/*
984 	 * If there's a single byte left over, checksum it, too.
985 	 * Network byte order is big-endian, so the remaining byte is
986 	 * the high byte.
987 	 */
988 	if (i < len)
989 		sum += htons(buf[i] << 8);
990 
991 	return (e82545_carry(sum));
992 }
993 
994 static uint16_t
995 e82545_iov_checksum(struct iovec *iov, int iovcnt, unsigned int off,
996     unsigned int len)
997 {
998 	unsigned int now, odd;
999 	uint32_t sum = 0, s;
1000 
1001 	/* Skip completely unneeded vectors. */
1002 	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
1003 		off -= iov->iov_len;
1004 		iov++;
1005 		iovcnt--;
1006 	}
1007 
1008 	/* Calculate checksum of requested range. */
1009 	odd = 0;
1010 	while (len > 0 && iovcnt > 0) {
1011 		now = MIN(len, iov->iov_len - off);
1012 		s = e82545_buf_checksum((uint8_t *)iov->iov_base + off, now);
1013 		sum += odd ? (s << 8) : s;
1014 		odd ^= (now & 1);
1015 		len -= now;
1016 		off = 0;
1017 		iov++;
1018 		iovcnt--;
1019 	}
1020 
1021 	return (e82545_carry(sum));
1022 }
1023 
1024 /*
1025  * Return the transmit descriptor type.
1026  */
1027 static int
1028 e82545_txdesc_type(uint32_t lower)
1029 {
1030 	int type;
1031 
1032 	type = 0;
1033 
1034 	if (lower & E1000_TXD_CMD_DEXT)
1035 		type = lower & E1000_TXD_MASK;
1036 
1037 	return (type);
1038 }
1039 
1040 static void
1041 e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1042 {
1043 	uint16_t cksum;
1044 	unsigned int cklen;
1045 
1046 	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d",
1047 	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1048 	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1U : UINT_MAX;
1049 	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1050 	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1051 }
1052 
1053 static void
1054 e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1055 {
1056 
1057 	if (sc->esc_be == NULL)
1058 		return;
1059 
1060 	(void) netbe_send(sc->esc_be, iov, iovcnt);
1061 }
1062 
1063 static void
1064 e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1065     uint16_t dsize, int *tdwb)
1066 {
1067 	union e1000_tx_udesc *dsc;
1068 
1069 	for ( ; head != tail; head = (head + 1) % dsize) {
1070 		dsc = &sc->esc_txdesc[head];
1071 		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1072 			dsc->td.upper.data |= E1000_TXD_STAT_DD;
1073 			*tdwb = 1;
1074 		}
1075 	}
1076 }
1077 
1078 static int
1079 e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1080     uint16_t dsize, uint16_t *rhead, int *tdwb)
1081 {
1082 	uint8_t *hdr, *hdrp;
1083 	struct iovec iovb[I82545_MAX_TXSEGS + 2];
1084 	struct iovec tiov[I82545_MAX_TXSEGS + 2];
1085 	struct e1000_context_desc *cd;
1086 	struct ck_info ckinfo[2];
1087 	struct iovec *iov;
1088 	union  e1000_tx_udesc *dsc;
1089 	int desc, dtype, ntype, iovcnt, tcp, tso, paylen, seg, tiovcnt, pv;
1090 	unsigned hdrlen, vlen, pktlen, len, left, mss, now, nnow, nleft, pvoff;
1091 	uint32_t tcpsum, tcpseq;
1092 	uint16_t ipcs, tcpcs, ipid, ohead;
1093 	bool invalid;
1094 
1095 	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1096 	iovcnt = 0;
1097 	ntype = 0;
1098 	tso = 0;
1099 	pktlen = 0;
1100 	ohead = head;
1101 	invalid = false;
1102 
1103 	/* iovb[0/1] may be used for writable copy of headers. */
1104 	iov = &iovb[2];
1105 
1106 	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1107 		if (head == tail) {
1108 			*rhead = head;
1109 			return (0);
1110 		}
1111 		dsc = &sc->esc_txdesc[head];
1112 		dtype = e82545_txdesc_type(dsc->td.lower.data);
1113 
1114 		if (desc == 0) {
1115 			switch (dtype) {
1116 			case E1000_TXD_TYP_C:
1117 				DPRINTF("tx ctxt desc idx %d: %016jx "
1118 				    "%08x%08x",
1119 				    head, dsc->td.buffer_addr,
1120 				    dsc->td.upper.data, dsc->td.lower.data);
1121 				/* Save context and return */
1122 				sc->esc_txctx = dsc->cd;
1123 				goto done;
1124 			case E1000_TXD_TYP_L:
1125 				DPRINTF("tx legacy desc idx %d: %08x%08x",
1126 				    head, dsc->td.upper.data, dsc->td.lower.data);
1127 				/*
1128 				 * legacy cksum start valid in first descriptor
1129 				 */
1130 				ntype = dtype;
1131 				ckinfo[0].ck_start = dsc->td.upper.fields.css;
1132 				break;
1133 			case E1000_TXD_TYP_D:
1134 				DPRINTF("tx data desc idx %d: %08x%08x",
1135 				    head, dsc->td.upper.data, dsc->td.lower.data);
1136 				ntype = dtype;
1137 				break;
1138 			default:
1139 				break;
1140 			}
1141 		} else {
1142 			/* Descriptor type must be consistent */
1143 			assert(dtype == ntype);
1144 			DPRINTF("tx next desc idx %d: %08x%08x",
1145 			    head, dsc->td.upper.data, dsc->td.lower.data);
1146 		}
1147 
1148 		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1149 		    dsc->dd.lower.data & 0xFFFFF;
1150 
1151 		/* Strip checksum supplied by guest. */
1152 		if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1153 		    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0) {
1154 			if (len <= 2) {
1155 				WPRINTF("final descriptor too short (%d) -- dropped",
1156 				    len);
1157 				invalid = true;
1158 			} else
1159 				len -= 2;
1160 		}
1161 
1162 		if (len > 0 && iovcnt < I82545_MAX_TXSEGS) {
1163 			iov[iovcnt].iov_base = paddr_guest2host(sc->esc_ctx,
1164 			    dsc->td.buffer_addr, len);
1165 			iov[iovcnt].iov_len = len;
1166 			iovcnt++;
1167 			pktlen += len;
1168 		}
1169 
1170 		/*
1171 		 * Pull out info that is valid in the final descriptor
1172 		 * and exit descriptor loop.
1173 		 */
1174 		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1175 			if (dtype == E1000_TXD_TYP_L) {
1176 				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1177 					ckinfo[0].ck_valid = 1;
1178 					ckinfo[0].ck_off =
1179 					    dsc->td.lower.flags.cso;
1180 					ckinfo[0].ck_len = 0;
1181 				}
1182 			} else {
1183 				cd = &sc->esc_txctx;
1184 				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1185 					tso = 1;
1186 				if (dsc->dd.upper.fields.popts &
1187 				    E1000_TXD_POPTS_IXSM)
1188 					ckinfo[0].ck_valid = 1;
1189 				if (dsc->dd.upper.fields.popts &
1190 				    E1000_TXD_POPTS_IXSM || tso) {
1191 					ckinfo[0].ck_start =
1192 					    cd->lower_setup.ip_fields.ipcss;
1193 					ckinfo[0].ck_off =
1194 					    cd->lower_setup.ip_fields.ipcso;
1195 					ckinfo[0].ck_len =
1196 					    cd->lower_setup.ip_fields.ipcse;
1197 				}
1198 				if (dsc->dd.upper.fields.popts &
1199 				    E1000_TXD_POPTS_TXSM)
1200 					ckinfo[1].ck_valid = 1;
1201 				if (dsc->dd.upper.fields.popts &
1202 				    E1000_TXD_POPTS_TXSM || tso) {
1203 					ckinfo[1].ck_start =
1204 					    cd->upper_setup.tcp_fields.tucss;
1205 					ckinfo[1].ck_off =
1206 					    cd->upper_setup.tcp_fields.tucso;
1207 					ckinfo[1].ck_len =
1208 					    cd->upper_setup.tcp_fields.tucse;
1209 				}
1210 			}
1211 			break;
1212 		}
1213 	}
1214 
1215 	if (invalid)
1216 		goto done;
1217 
1218 	if (iovcnt > I82545_MAX_TXSEGS) {
1219 		WPRINTF("tx too many descriptors (%d > %d) -- dropped",
1220 		    iovcnt, I82545_MAX_TXSEGS);
1221 		goto done;
1222 	}
1223 
1224 	hdrlen = vlen = 0;
1225 	/* Estimate writable space for VLAN header insertion. */
1226 	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1227 	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1228 		hdrlen = ETHER_ADDR_LEN*2;
1229 		vlen = ETHER_VLAN_ENCAP_LEN;
1230 	}
1231 	if (!tso) {
1232 		/* Estimate required writable space for checksums. */
1233 		if (ckinfo[0].ck_valid)
1234 			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2U);
1235 		if (ckinfo[1].ck_valid)
1236 			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2U);
1237 		/* Round up writable space to the first vector. */
1238 		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1239 		    iov[0].iov_len < hdrlen + 100)
1240 			hdrlen = iov[0].iov_len;
1241 	} else {
1242 		/* In case of TSO header length provided by software. */
1243 		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1244 
1245 		/*
1246 		 * Cap the header length at 240 based on 7.2.4.5 of
1247 		 * the Intel 82576EB (Rev 2.63) datasheet.
1248 		 */
1249 		if (hdrlen > 240) {
1250 			WPRINTF("TSO hdrlen too large: %d", hdrlen);
1251 			goto done;
1252 		}
1253 
1254 		/*
1255 		 * If VLAN insertion is requested, ensure the header
1256 		 * at least holds the amount of data copied during
1257 		 * VLAN insertion below.
1258 		 *
1259 		 * XXX: Realistic packets will include a full Ethernet
1260 		 * header before the IP header at ckinfo[0].ck_start,
1261 		 * but this check is sufficient to prevent
1262 		 * out-of-bounds access below.
1263 		 */
1264 		if (vlen != 0 && hdrlen < ETHER_ADDR_LEN*2) {
1265 			WPRINTF("TSO hdrlen too small for vlan insertion "
1266 			    "(%d vs %d) -- dropped", hdrlen,
1267 			    ETHER_ADDR_LEN*2);
1268 			goto done;
1269 		}
1270 
1271 		/*
1272 		 * Ensure that the header length covers the used fields
1273 		 * in the IP and TCP headers as well as the IP and TCP
1274 		 * checksums.  The following fields are accessed below:
1275 		 *
1276 		 * Header | Field | Offset | Length
1277 		 * -------+-------+--------+-------
1278 		 * IPv4   | len   | 2      | 2
1279 		 * IPv4   | ID    | 4      | 2
1280 		 * IPv6   | len   | 4      | 2
1281 		 * TCP    | seq # | 4      | 4
1282 		 * TCP    | flags | 13     | 1
1283 		 * UDP    | len   | 4      | 4
1284 		 */
1285 		if (hdrlen < ckinfo[0].ck_start + 6U ||
1286 		    hdrlen < ckinfo[0].ck_off + 2U) {
1287 			WPRINTF("TSO hdrlen too small for IP fields (%d) "
1288 			    "-- dropped", hdrlen);
1289 			goto done;
1290 		}
1291 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) {
1292 			if (hdrlen < ckinfo[1].ck_start + 14U) {
1293 				WPRINTF("TSO hdrlen too small for TCP fields "
1294 				    "(%d) -- dropped", hdrlen);
1295 				goto done;
1296 			}
1297 		} else {
1298 			if (hdrlen < ckinfo[1].ck_start + 8U) {
1299 				WPRINTF("TSO hdrlen too small for UDP fields "
1300 				    "(%d) -- dropped", hdrlen);
1301 				goto done;
1302 			}
1303 		}
1304 		if (ckinfo[1].ck_valid && hdrlen < ckinfo[1].ck_off + 2U) {
1305 			WPRINTF("TSO hdrlen too small for TCP/UDP fields "
1306 			    "(%d) -- dropped", hdrlen);
1307 			goto done;
1308 		}
1309 	}
1310 
1311 	if (pktlen < hdrlen + vlen) {
1312 		WPRINTF("packet too small for writable header");
1313 		goto done;
1314 	}
1315 
1316 	/* Allocate, fill and prepend writable header vector. */
1317 	if (hdrlen + vlen != 0) {
1318 		hdr = __builtin_alloca(hdrlen + vlen);
1319 		hdr += vlen;
1320 		for (left = hdrlen, hdrp = hdr; left > 0;
1321 		    left -= now, hdrp += now) {
1322 			now = MIN(left, iov->iov_len);
1323 			memcpy(hdrp, iov->iov_base, now);
1324 			iov->iov_base = (uint8_t *)iov->iov_base + now;
1325 			iov->iov_len -= now;
1326 			if (iov->iov_len == 0) {
1327 				iov++;
1328 				iovcnt--;
1329 			}
1330 		}
1331 		iov--;
1332 		iovcnt++;
1333 		iov->iov_base = hdr;
1334 		iov->iov_len = hdrlen;
1335 	} else
1336 		hdr = NULL;
1337 
1338 	/* Insert VLAN tag. */
1339 	if (vlen != 0) {
1340 		hdr -= ETHER_VLAN_ENCAP_LEN;
1341 		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1342 		hdrlen += ETHER_VLAN_ENCAP_LEN;
1343 		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1344 		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1345 		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1346 		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1347 		iov->iov_base = hdr;
1348 		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1349 		/* Correct checksum offsets after VLAN tag insertion. */
1350 		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1351 		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1352 		if (ckinfo[0].ck_len != 0)
1353 			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1354 		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1355 		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1356 		if (ckinfo[1].ck_len != 0)
1357 			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1358 	}
1359 
1360 	/* Simple non-TSO case. */
1361 	if (!tso) {
1362 		/* Calculate checksums and transmit. */
1363 		if (ckinfo[0].ck_valid)
1364 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1365 		if (ckinfo[1].ck_valid)
1366 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1367 		e82545_transmit_backend(sc, iov, iovcnt);
1368 		goto done;
1369 	}
1370 
1371 	/* Doing TSO. */
1372 	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1373 	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1374 	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1375 	DPRINTF("tx %s segmentation offload %d+%d/%u bytes %d iovs",
1376 	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1377 	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1378 	tcpseq = 0;
1379 	if (tcp)
1380 		tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1381 	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1382 	tcpcs = 0;
1383 	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
1384 		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1385 	pv = 1;
1386 	pvoff = 0;
1387 	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1388 		now = MIN(left, mss);
1389 
1390 		/* Construct IOVs for the segment. */
1391 		/* Include whole original header. */
1392 		tiov[0].iov_base = hdr;
1393 		tiov[0].iov_len = hdrlen;
1394 		tiovcnt = 1;
1395 		/* Include respective part of payload IOV. */
1396 		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1397 			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1398 			tiov[tiovcnt].iov_base = (uint8_t *)iov[pv].iov_base +
1399 			    pvoff;
1400 			tiov[tiovcnt++].iov_len = nnow;
1401 			if (pvoff + nnow == iov[pv].iov_len) {
1402 				pv++;
1403 				pvoff = 0;
1404 			} else
1405 				pvoff += nnow;
1406 		}
1407 		DPRINTF("tx segment %d %d+%d bytes %d iovs",
1408 		    seg, hdrlen, now, tiovcnt);
1409 
1410 		/* Update IP header. */
1411 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1412 			/* IPv4 -- set length and ID */
1413 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1414 			    htons(hdrlen - ckinfo[0].ck_start + now);
1415 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1416 			    htons(ipid + seg);
1417 		} else {
1418 			/* IPv6 -- set length */
1419 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1420 			    htons(hdrlen - ckinfo[0].ck_start - 40 +
1421 				  now);
1422 		}
1423 
1424 		/* Update pseudo-header checksum. */
1425 		tcpsum = tcpcs;
1426 		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1427 
1428 		/* Update TCP/UDP headers. */
1429 		if (tcp) {
1430 			/* Update sequence number and FIN/PUSH flags. */
1431 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1432 			    htonl(tcpseq + paylen - left);
1433 			if (now < left) {
1434 				hdr[ckinfo[1].ck_start + 13] &=
1435 				    ~(TH_FIN | TH_PUSH);
1436 			}
1437 		} else {
1438 			/* Update payload length. */
1439 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1440 			    hdrlen - ckinfo[1].ck_start + now;
1441 		}
1442 
1443 		/* Calculate checksums and transmit. */
1444 		if (ckinfo[0].ck_valid) {
1445 			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1446 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1447 		}
1448 		if (ckinfo[1].ck_valid) {
1449 			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
1450 			    e82545_carry(tcpsum);
1451 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1452 		}
1453 		e82545_transmit_backend(sc, tiov, tiovcnt);
1454 	}
1455 
1456 done:
1457 	head = (head + 1) % dsize;
1458 	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1459 
1460 	*rhead = head;
1461 	return (desc + 1);
1462 }
1463 
1464 static void
1465 e82545_tx_run(struct e82545_softc *sc)
1466 {
1467 	uint32_t cause;
1468 	uint16_t head, rhead, tail, size;
1469 	int lim, tdwb, sent;
1470 
1471 	size = sc->esc_TDLEN / 16;
1472 	if (size == 0)
1473 		return;
1474 
1475 	head = sc->esc_TDH % size;
1476 	tail = sc->esc_TDT % size;
1477 	DPRINTF("tx_run: head %x, rhead %x, tail %x",
1478 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1479 
1480 	pthread_mutex_unlock(&sc->esc_mtx);
1481 	rhead = head;
1482 	tdwb = 0;
1483 	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1484 		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1485 		if (sent == 0)
1486 			break;
1487 		head = rhead;
1488 	}
1489 	pthread_mutex_lock(&sc->esc_mtx);
1490 
1491 	sc->esc_TDH = head;
1492 	sc->esc_TDHr = rhead;
1493 	cause = 0;
1494 	if (tdwb)
1495 		cause |= E1000_ICR_TXDW;
1496 	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1497 		cause |= E1000_ICR_TXQE;
1498 	if (cause)
1499 		e82545_icr_assert(sc, cause);
1500 
1501 	DPRINTF("tx_run done: head %x, rhead %x, tail %x",
1502 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1503 }
1504 
1505 static _Noreturn void *
1506 e82545_tx_thread(void *param)
1507 {
1508 	struct e82545_softc *sc = param;
1509 
1510 	pthread_mutex_lock(&sc->esc_mtx);
1511 	for (;;) {
1512 		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1513 			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1514 				break;
1515 			sc->esc_tx_active = 0;
1516 			if (sc->esc_tx_enabled == 0)
1517 				pthread_cond_signal(&sc->esc_tx_cond);
1518 			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1519 		}
1520 		sc->esc_tx_active = 1;
1521 
1522 		/* Process some tx descriptors.  Lock dropped inside. */
1523 		e82545_tx_run(sc);
1524 	}
1525 }
1526 
1527 static void
1528 e82545_tx_start(struct e82545_softc *sc)
1529 {
1530 
1531 	if (sc->esc_tx_active == 0)
1532 		pthread_cond_signal(&sc->esc_tx_cond);
1533 }
1534 
1535 static void
1536 e82545_tx_enable(struct e82545_softc *sc)
1537 {
1538 
1539 	sc->esc_tx_enabled = 1;
1540 }
1541 
1542 static void
1543 e82545_tx_disable(struct e82545_softc *sc)
1544 {
1545 
1546 	sc->esc_tx_enabled = 0;
1547 	while (sc->esc_tx_active)
1548 		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1549 }
1550 
1551 static void
1552 e82545_rx_enable(struct e82545_softc *sc)
1553 {
1554 
1555 	sc->esc_rx_enabled = 1;
1556 }
1557 
1558 static void
1559 e82545_rx_disable(struct e82545_softc *sc)
1560 {
1561 
1562 	sc->esc_rx_enabled = 0;
1563 	while (sc->esc_rx_active)
1564 		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1565 }
1566 
1567 static void
1568 e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1569 {
1570 	struct eth_uni *eu;
1571 	int idx;
1572 
1573 	idx = reg >> 1;
1574 	assert(idx < 15);
1575 
1576 	eu = &sc->esc_uni[idx];
1577 
1578 	if (reg & 0x1) {
1579 		/* RAH */
1580 		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1581 		eu->eu_addrsel = (wval >> 16) & 0x3;
1582 		eu->eu_eth.octet[5] = wval >> 8;
1583 		eu->eu_eth.octet[4] = wval;
1584 	} else {
1585 		/* RAL */
1586 		eu->eu_eth.octet[3] = wval >> 24;
1587 		eu->eu_eth.octet[2] = wval >> 16;
1588 		eu->eu_eth.octet[1] = wval >> 8;
1589 		eu->eu_eth.octet[0] = wval;
1590 	}
1591 }
1592 
1593 static uint32_t
1594 e82545_read_ra(struct e82545_softc *sc, int reg)
1595 {
1596 	struct eth_uni *eu;
1597 	uint32_t retval;
1598 	int idx;
1599 
1600 	idx = reg >> 1;
1601 	assert(idx < 15);
1602 
1603 	eu = &sc->esc_uni[idx];
1604 
1605 	if (reg & 0x1) {
1606 		/* RAH */
1607 		retval = (eu->eu_valid << 31) |
1608 			 (eu->eu_addrsel << 16) |
1609 			 (eu->eu_eth.octet[5] << 8) |
1610 			 eu->eu_eth.octet[4];
1611 	} else {
1612 		/* RAL */
1613 		retval = (eu->eu_eth.octet[3] << 24) |
1614 			 (eu->eu_eth.octet[2] << 16) |
1615 			 (eu->eu_eth.octet[1] << 8) |
1616 			 eu->eu_eth.octet[0];
1617 	}
1618 
1619 	return (retval);
1620 }
1621 
1622 static void
1623 e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1624 {
1625 	int ridx;
1626 
1627 	if (offset & 0x3) {
1628 		DPRINTF("Unaligned register write offset:0x%x value:0x%x", offset, value);
1629 		return;
1630 	}
1631 	DPRINTF("Register write: 0x%x value: 0x%x", offset, value);
1632 
1633 	switch (offset) {
1634 	case E1000_CTRL:
1635 	case E1000_CTRL_DUP:
1636 		e82545_devctl(sc, value);
1637 		break;
1638 	case E1000_FCAL:
1639 		sc->esc_FCAL = value;
1640 		break;
1641 	case E1000_FCAH:
1642 		sc->esc_FCAH = value & ~0xFFFF0000;
1643 		break;
1644 	case E1000_FCT:
1645 		sc->esc_FCT = value & ~0xFFFF0000;
1646 		break;
1647 	case E1000_VET:
1648 		sc->esc_VET = value & ~0xFFFF0000;
1649 		break;
1650 	case E1000_FCTTV:
1651 		sc->esc_FCTTV = value & ~0xFFFF0000;
1652 		break;
1653 	case E1000_LEDCTL:
1654 		sc->esc_LEDCTL = value & ~0x30303000;
1655 		break;
1656 	case E1000_PBA:
1657 		sc->esc_PBA = value & 0x0000FF80;
1658 		break;
1659 	case E1000_ICR:
1660 	case E1000_ITR:
1661 	case E1000_ICS:
1662 	case E1000_IMS:
1663 	case E1000_IMC:
1664 		e82545_intr_write(sc, offset, value);
1665 		break;
1666 	case E1000_RCTL:
1667 		e82545_rx_ctl(sc, value);
1668 		break;
1669 	case E1000_FCRTL:
1670 		sc->esc_FCRTL = value & ~0xFFFF0007;
1671 		break;
1672 	case E1000_FCRTH:
1673 		sc->esc_FCRTH = value & ~0xFFFF0007;
1674 		break;
1675 	case E1000_RDBAL(0):
1676 		sc->esc_RDBAL = value & ~0xF;
1677 		if (sc->esc_rx_enabled) {
1678 			/* Apparently legal: update cached address */
1679 			e82545_rx_update_rdba(sc);
1680 		}
1681 		break;
1682 	case E1000_RDBAH(0):
1683 		assert(!sc->esc_rx_enabled);
1684 		sc->esc_RDBAH = value;
1685 		break;
1686 	case E1000_RDLEN(0):
1687 		assert(!sc->esc_rx_enabled);
1688 		sc->esc_RDLEN = value & ~0xFFF0007F;
1689 		break;
1690 	case E1000_RDH(0):
1691 		/* XXX should only ever be zero ? Range check ? */
1692 		sc->esc_RDH = value;
1693 		break;
1694 	case E1000_RDT(0):
1695 		/* XXX if this opens up the rx ring, do something ? */
1696 		sc->esc_RDT = value;
1697 		break;
1698 	case E1000_RDTR:
1699 		/* ignore FPD bit 31 */
1700 		sc->esc_RDTR = value & ~0xFFFF0000;
1701 		break;
1702 	case E1000_RXDCTL(0):
1703 		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1704 		break;
1705 	case E1000_RADV:
1706 		sc->esc_RADV = value & ~0xFFFF0000;
1707 		break;
1708 	case E1000_RSRPD:
1709 		sc->esc_RSRPD = value & ~0xFFFFF000;
1710 		break;
1711 	case E1000_RXCSUM:
1712 		sc->esc_RXCSUM = value & ~0xFFFFF800;
1713 		break;
1714 	case E1000_TXCW:
1715 		sc->esc_TXCW = value & ~0x3FFF0000;
1716 		break;
1717 	case E1000_TCTL:
1718 		e82545_tx_ctl(sc, value);
1719 		break;
1720 	case E1000_TIPG:
1721 		sc->esc_TIPG = value;
1722 		break;
1723 	case E1000_AIT:
1724 		sc->esc_AIT = value;
1725 		break;
1726 	case E1000_TDBAL(0):
1727 		sc->esc_TDBAL = value & ~0xF;
1728 		if (sc->esc_tx_enabled)
1729 			e82545_tx_update_tdba(sc);
1730 		break;
1731 	case E1000_TDBAH(0):
1732 		sc->esc_TDBAH = value;
1733 		if (sc->esc_tx_enabled)
1734 			e82545_tx_update_tdba(sc);
1735 		break;
1736 	case E1000_TDLEN(0):
1737 		sc->esc_TDLEN = value & ~0xFFF0007F;
1738 		if (sc->esc_tx_enabled)
1739 			e82545_tx_update_tdba(sc);
1740 		break;
1741 	case E1000_TDH(0):
1742 		if (sc->esc_tx_enabled) {
1743 			WPRINTF("ignoring write to TDH while transmit enabled");
1744 			break;
1745 		}
1746 		if (value != 0) {
1747 			WPRINTF("ignoring non-zero value written to TDH");
1748 			break;
1749 		}
1750 		sc->esc_TDHr = sc->esc_TDH = value;
1751 		break;
1752 	case E1000_TDT(0):
1753 		sc->esc_TDT = value;
1754 		if (sc->esc_tx_enabled)
1755 			e82545_tx_start(sc);
1756 		break;
1757 	case E1000_TIDV:
1758 		sc->esc_TIDV = value & ~0xFFFF0000;
1759 		break;
1760 	case E1000_TXDCTL(0):
1761 		//assert(!sc->esc_tx_enabled);
1762 		sc->esc_TXDCTL = value & ~0xC0C0C0;
1763 		break;
1764 	case E1000_TADV:
1765 		sc->esc_TADV = value & ~0xFFFF0000;
1766 		break;
1767 	case E1000_RAL(0) ... E1000_RAH(15):
1768 		/* convert to u32 offset */
1769 		ridx = (offset - E1000_RAL(0)) >> 2;
1770 		e82545_write_ra(sc, ridx, value);
1771 		break;
1772 	case E1000_MTA ... (E1000_MTA + (127*4)):
1773 		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1774 		break;
1775 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1776 		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1777 		break;
1778 	case E1000_EECD:
1779 	{
1780 		//DPRINTF("EECD write 0x%x -> 0x%x", sc->eeprom_control, value);
1781 		/* edge triggered low->high */
1782 		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1783 			0 : (value & E1000_EECD_SK));
1784 		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1785 					E1000_EECD_DI|E1000_EECD_REQ);
1786 		sc->eeprom_control &= ~eecd_mask;
1787 		sc->eeprom_control |= (value & eecd_mask);
1788 		/* grant/revoke immediately */
1789 		if (value & E1000_EECD_REQ) {
1790 			sc->eeprom_control |= E1000_EECD_GNT;
1791 		} else {
1792                         sc->eeprom_control &= ~E1000_EECD_GNT;
1793 		}
1794 		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1795 			e82545_eecd_strobe(sc);
1796 		}
1797 		return;
1798 	}
1799 	case E1000_MDIC:
1800 	{
1801 		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1802 						E1000_MDIC_REG_SHIFT);
1803 		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1804 						E1000_MDIC_PHY_SHIFT);
1805 		sc->mdi_control =
1806 			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1807 		if ((value & E1000_MDIC_READY) != 0) {
1808 			DPRINTF("Incorrect MDIC ready bit: 0x%x", value);
1809 			return;
1810 		}
1811 		switch (value & E82545_MDIC_OP_MASK) {
1812 		case E1000_MDIC_OP_READ:
1813 			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1814 			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1815 			break;
1816 		case E1000_MDIC_OP_WRITE:
1817 			e82545_write_mdi(sc, reg_addr, phy_addr,
1818 				value & E82545_MDIC_DATA_MASK);
1819 			break;
1820 		default:
1821 			DPRINTF("Unknown MDIC op: 0x%x", value);
1822 			return;
1823 		}
1824 		/* TODO: barrier? */
1825 		sc->mdi_control |= E1000_MDIC_READY;
1826 		if (value & E82545_MDIC_IE) {
1827 			// TODO: generate interrupt
1828 		}
1829 		return;
1830 	}
1831 	case E1000_MANC:
1832 	case E1000_STATUS:
1833 		return;
1834 	default:
1835 		DPRINTF("Unknown write register: 0x%x value:%x", offset, value);
1836 		return;
1837 	}
1838 }
1839 
1840 static uint32_t
1841 e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1842 {
1843 	uint32_t retval;
1844 	int ridx;
1845 
1846 	if (offset & 0x3) {
1847 		DPRINTF("Unaligned register read offset:0x%x", offset);
1848 		return 0;
1849 	}
1850 
1851 	DPRINTF("Register read: 0x%x", offset);
1852 
1853 	switch (offset) {
1854 	case E1000_CTRL:
1855 		retval = sc->esc_CTRL;
1856 		break;
1857 	case E1000_STATUS:
1858 		retval = E1000_STATUS_FD | E1000_STATUS_LU |
1859 		    E1000_STATUS_SPEED_1000;
1860 		break;
1861 	case E1000_FCAL:
1862 		retval = sc->esc_FCAL;
1863 		break;
1864 	case E1000_FCAH:
1865 		retval = sc->esc_FCAH;
1866 		break;
1867 	case E1000_FCT:
1868 		retval = sc->esc_FCT;
1869 		break;
1870 	case E1000_VET:
1871 		retval = sc->esc_VET;
1872 		break;
1873 	case E1000_FCTTV:
1874 		retval = sc->esc_FCTTV;
1875 		break;
1876 	case E1000_LEDCTL:
1877 		retval = sc->esc_LEDCTL;
1878 		break;
1879 	case E1000_PBA:
1880 		retval = sc->esc_PBA;
1881 		break;
1882 	case E1000_ICR:
1883 	case E1000_ITR:
1884 	case E1000_ICS:
1885 	case E1000_IMS:
1886 	case E1000_IMC:
1887 		retval = e82545_intr_read(sc, offset);
1888 		break;
1889 	case E1000_RCTL:
1890 		retval = sc->esc_RCTL;
1891 		break;
1892 	case E1000_FCRTL:
1893 		retval = sc->esc_FCRTL;
1894 		break;
1895 	case E1000_FCRTH:
1896 		retval = sc->esc_FCRTH;
1897 		break;
1898 	case E1000_RDBAL(0):
1899 		retval = sc->esc_RDBAL;
1900 		break;
1901 	case E1000_RDBAH(0):
1902 		retval = sc->esc_RDBAH;
1903 		break;
1904 	case E1000_RDLEN(0):
1905 		retval = sc->esc_RDLEN;
1906 		break;
1907 	case E1000_RDH(0):
1908 		retval = sc->esc_RDH;
1909 		break;
1910 	case E1000_RDT(0):
1911 		retval = sc->esc_RDT;
1912 		break;
1913 	case E1000_RDTR:
1914 		retval = sc->esc_RDTR;
1915 		break;
1916 	case E1000_RXDCTL(0):
1917 		retval = sc->esc_RXDCTL;
1918 		break;
1919 	case E1000_RADV:
1920 		retval = sc->esc_RADV;
1921 		break;
1922 	case E1000_RSRPD:
1923 		retval = sc->esc_RSRPD;
1924 		break;
1925 	case E1000_RXCSUM:
1926 		retval = sc->esc_RXCSUM;
1927 		break;
1928 	case E1000_TXCW:
1929 		retval = sc->esc_TXCW;
1930 		break;
1931 	case E1000_TCTL:
1932 		retval = sc->esc_TCTL;
1933 		break;
1934 	case E1000_TIPG:
1935 		retval = sc->esc_TIPG;
1936 		break;
1937 	case E1000_AIT:
1938 		retval = sc->esc_AIT;
1939 		break;
1940 	case E1000_TDBAL(0):
1941 		retval = sc->esc_TDBAL;
1942 		break;
1943 	case E1000_TDBAH(0):
1944 		retval = sc->esc_TDBAH;
1945 		break;
1946 	case E1000_TDLEN(0):
1947 		retval = sc->esc_TDLEN;
1948 		break;
1949 	case E1000_TDH(0):
1950 		retval = sc->esc_TDH;
1951 		break;
1952 	case E1000_TDT(0):
1953 		retval = sc->esc_TDT;
1954 		break;
1955 	case E1000_TIDV:
1956 		retval = sc->esc_TIDV;
1957 		break;
1958 	case E1000_TXDCTL(0):
1959 		retval = sc->esc_TXDCTL;
1960 		break;
1961 	case E1000_TADV:
1962 		retval = sc->esc_TADV;
1963 		break;
1964 	case E1000_RAL(0) ... E1000_RAH(15):
1965 		/* convert to u32 offset */
1966 		ridx = (offset - E1000_RAL(0)) >> 2;
1967 		retval = e82545_read_ra(sc, ridx);
1968 		break;
1969 	case E1000_MTA ... (E1000_MTA + (127*4)):
1970 		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
1971 		break;
1972 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1973 		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
1974 		break;
1975 	case E1000_EECD:
1976 		//DPRINTF("EECD read %x", sc->eeprom_control);
1977 		retval = sc->eeprom_control;
1978 		break;
1979 	case E1000_MDIC:
1980 		retval = sc->mdi_control;
1981 		break;
1982 	case E1000_MANC:
1983 		retval = 0;
1984 		break;
1985 	/* stats that we emulate. */
1986 	case E1000_MPC:
1987 		retval = sc->missed_pkt_count;
1988 		break;
1989 	case E1000_PRC64:
1990 		retval = sc->pkt_rx_by_size[0];
1991 		break;
1992 	case E1000_PRC127:
1993 		retval = sc->pkt_rx_by_size[1];
1994 		break;
1995 	case E1000_PRC255:
1996 		retval = sc->pkt_rx_by_size[2];
1997 		break;
1998 	case E1000_PRC511:
1999 		retval = sc->pkt_rx_by_size[3];
2000 		break;
2001 	case E1000_PRC1023:
2002 		retval = sc->pkt_rx_by_size[4];
2003 		break;
2004 	case E1000_PRC1522:
2005 		retval = sc->pkt_rx_by_size[5];
2006 		break;
2007 	case E1000_GPRC:
2008 		retval = sc->good_pkt_rx_count;
2009 		break;
2010 	case E1000_BPRC:
2011 		retval = sc->bcast_pkt_rx_count;
2012 		break;
2013 	case E1000_MPRC:
2014 		retval = sc->mcast_pkt_rx_count;
2015 		break;
2016 	case E1000_GPTC:
2017 	case E1000_TPT:
2018 		retval = sc->good_pkt_tx_count;
2019 		break;
2020 	case E1000_GORCL:
2021 		retval = (uint32_t)sc->good_octets_rx;
2022 		break;
2023 	case E1000_GORCH:
2024 		retval = (uint32_t)(sc->good_octets_rx >> 32);
2025 		break;
2026 	case E1000_TOTL:
2027 	case E1000_GOTCL:
2028 		retval = (uint32_t)sc->good_octets_tx;
2029 		break;
2030 	case E1000_TOTH:
2031 	case E1000_GOTCH:
2032 		retval = (uint32_t)(sc->good_octets_tx >> 32);
2033 		break;
2034 	case E1000_ROC:
2035 		retval = sc->oversize_rx_count;
2036 		break;
2037 	case E1000_TORL:
2038 		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
2039 		break;
2040 	case E1000_TORH:
2041 		retval = (uint32_t)((sc->good_octets_rx +
2042 		    sc->missed_octets) >> 32);
2043 		break;
2044 	case E1000_TPR:
2045 		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
2046 		    sc->oversize_rx_count;
2047 		break;
2048 	case E1000_PTC64:
2049 		retval = sc->pkt_tx_by_size[0];
2050 		break;
2051 	case E1000_PTC127:
2052 		retval = sc->pkt_tx_by_size[1];
2053 		break;
2054 	case E1000_PTC255:
2055 		retval = sc->pkt_tx_by_size[2];
2056 		break;
2057 	case E1000_PTC511:
2058 		retval = sc->pkt_tx_by_size[3];
2059 		break;
2060 	case E1000_PTC1023:
2061 		retval = sc->pkt_tx_by_size[4];
2062 		break;
2063 	case E1000_PTC1522:
2064 		retval = sc->pkt_tx_by_size[5];
2065 		break;
2066 	case E1000_MPTC:
2067 		retval = sc->mcast_pkt_tx_count;
2068 		break;
2069 	case E1000_BPTC:
2070 		retval = sc->bcast_pkt_tx_count;
2071 		break;
2072 	case E1000_TSCTC:
2073 		retval = sc->tso_tx_count;
2074 		break;
2075 	/* stats that are always 0. */
2076 	case E1000_CRCERRS:
2077 	case E1000_ALGNERRC:
2078 	case E1000_SYMERRS:
2079 	case E1000_RXERRC:
2080 	case E1000_SCC:
2081 	case E1000_ECOL:
2082 	case E1000_MCC:
2083 	case E1000_LATECOL:
2084 	case E1000_COLC:
2085 	case E1000_DC:
2086 	case E1000_TNCRS:
2087 	case E1000_SEC:
2088 	case E1000_CEXTERR:
2089 	case E1000_RLEC:
2090 	case E1000_XONRXC:
2091 	case E1000_XONTXC:
2092 	case E1000_XOFFRXC:
2093 	case E1000_XOFFTXC:
2094 	case E1000_FCRUC:
2095 	case E1000_RNBC:
2096 	case E1000_RUC:
2097 	case E1000_RFC:
2098 	case E1000_RJC:
2099 	case E1000_MGTPRC:
2100 	case E1000_MGTPDC:
2101 	case E1000_MGTPTC:
2102 	case E1000_TSCTFC:
2103 		retval = 0;
2104 		break;
2105 	default:
2106 		DPRINTF("Unknown read register: 0x%x", offset);
2107 		retval = 0;
2108 		break;
2109 	}
2110 
2111 	return (retval);
2112 }
2113 
2114 static void
2115 e82545_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
2116     uint64_t value)
2117 {
2118 	struct e82545_softc *sc;
2119 
2120 	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d", baridx, offset, value, size);
2121 
2122 	sc = pi->pi_arg;
2123 
2124 	pthread_mutex_lock(&sc->esc_mtx);
2125 
2126 	switch (baridx) {
2127 	case E82545_BAR_IO:
2128 		switch (offset) {
2129 		case E82545_IOADDR:
2130 			if (size != 4) {
2131 				DPRINTF("Wrong io addr write sz:%d value:0x%lx", size, value);
2132 			} else
2133 				sc->io_addr = (uint32_t)value;
2134 			break;
2135 		case E82545_IODATA:
2136 			if (size != 4) {
2137 				DPRINTF("Wrong io data write size:%d value:0x%lx", size, value);
2138 			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2139 				DPRINTF("Non-register io write addr:0x%x value:0x%lx", sc->io_addr, value);
2140 			} else
2141 				e82545_write_register(sc, sc->io_addr,
2142 						      (uint32_t)value);
2143 			break;
2144 		default:
2145 			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d", offset, value, size);
2146 			break;
2147 		}
2148 		break;
2149 	case E82545_BAR_REGISTER:
2150 		if (size != 4) {
2151 			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx", size, offset, value);
2152 		} else
2153 			e82545_write_register(sc, (uint32_t)offset,
2154 					      (uint32_t)value);
2155 		break;
2156 	default:
2157 		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d",
2158 			baridx, offset, value, size);
2159 	}
2160 
2161 	pthread_mutex_unlock(&sc->esc_mtx);
2162 }
2163 
2164 static uint64_t
2165 e82545_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
2166 {
2167 	struct e82545_softc *sc;
2168 	uint64_t retval;
2169 
2170 	//DPRINTF("Read  bar:%d offset:0x%lx size:%d", baridx, offset, size);
2171 	sc = pi->pi_arg;
2172 	retval = 0;
2173 
2174 	pthread_mutex_lock(&sc->esc_mtx);
2175 
2176 	switch (baridx) {
2177 	case E82545_BAR_IO:
2178 		switch (offset) {
2179 		case E82545_IOADDR:
2180 			if (size != 4) {
2181 				DPRINTF("Wrong io addr read sz:%d", size);
2182 			} else
2183 				retval = sc->io_addr;
2184 			break;
2185 		case E82545_IODATA:
2186 			if (size != 4) {
2187 				DPRINTF("Wrong io data read sz:%d", size);
2188 			}
2189 			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2190 				DPRINTF("Non-register io read addr:0x%x",
2191 					sc->io_addr);
2192 			} else
2193 				retval = e82545_read_register(sc, sc->io_addr);
2194 			break;
2195 		default:
2196 			DPRINTF("Unknown io bar read offset:0x%lx size:%d",
2197 				offset, size);
2198 			break;
2199 		}
2200 		break;
2201 	case E82545_BAR_REGISTER:
2202 		if (size != 4) {
2203 			DPRINTF("Wrong register read size:%d offset:0x%lx",
2204 				size, offset);
2205 		} else
2206 			retval = e82545_read_register(sc, (uint32_t)offset);
2207 		break;
2208 	default:
2209 		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d",
2210 			baridx, offset, size);
2211 		break;
2212 	}
2213 
2214 	pthread_mutex_unlock(&sc->esc_mtx);
2215 
2216 	return (retval);
2217 }
2218 
2219 static void
2220 e82545_reset(struct e82545_softc *sc, int drvr)
2221 {
2222 	int i;
2223 
2224 	e82545_rx_disable(sc);
2225 	e82545_tx_disable(sc);
2226 
2227 	/* clear outstanding interrupts */
2228 	if (sc->esc_irq_asserted)
2229 		pci_lintr_deassert(sc->esc_pi);
2230 
2231 	/* misc */
2232 	if (!drvr) {
2233 		sc->esc_FCAL = 0;
2234 		sc->esc_FCAH = 0;
2235 		sc->esc_FCT = 0;
2236 		sc->esc_VET = 0;
2237 		sc->esc_FCTTV = 0;
2238 	}
2239 	sc->esc_LEDCTL = 0x07061302;
2240 	sc->esc_PBA = 0x00100030;
2241 
2242 	/* start nvm in opcode mode. */
2243 	sc->nvm_opaddr = 0;
2244 	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2245 	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2246 	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2247 	e82545_init_eeprom(sc);
2248 
2249 	/* interrupt */
2250 	sc->esc_ICR = 0;
2251 	sc->esc_ITR = 250;
2252 	sc->esc_ICS = 0;
2253 	sc->esc_IMS = 0;
2254 	sc->esc_IMC = 0;
2255 
2256 	/* L2 filters */
2257 	if (!drvr) {
2258 		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2259 		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2260 		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2261 
2262 		/* XXX not necessary on 82545 ?? */
2263 		sc->esc_uni[0].eu_valid = 1;
2264 		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2265 		    ETHER_ADDR_LEN);
2266 	} else {
2267 		/* Clear RAH valid bits */
2268 		for (i = 0; i < 16; i++)
2269 			sc->esc_uni[i].eu_valid = 0;
2270 	}
2271 
2272 	/* receive */
2273 	if (!drvr) {
2274 		sc->esc_RDBAL = 0;
2275 		sc->esc_RDBAH = 0;
2276 	}
2277 	sc->esc_RCTL = 0;
2278 	sc->esc_FCRTL = 0;
2279 	sc->esc_FCRTH = 0;
2280 	sc->esc_RDLEN = 0;
2281 	sc->esc_RDH = 0;
2282 	sc->esc_RDT = 0;
2283 	sc->esc_RDTR = 0;
2284 	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2285 	sc->esc_RADV = 0;
2286 	sc->esc_RXCSUM = 0;
2287 
2288 	/* transmit */
2289 	if (!drvr) {
2290 		sc->esc_TDBAL = 0;
2291 		sc->esc_TDBAH = 0;
2292 		sc->esc_TIPG = 0;
2293 		sc->esc_AIT = 0;
2294 		sc->esc_TIDV = 0;
2295 		sc->esc_TADV = 0;
2296 	}
2297 	sc->esc_tdba = 0;
2298 	sc->esc_txdesc = NULL;
2299 	sc->esc_TXCW = 0;
2300 	sc->esc_TCTL = 0;
2301 	sc->esc_TDLEN = 0;
2302 	sc->esc_TDT = 0;
2303 	sc->esc_TDHr = sc->esc_TDH = 0;
2304 	sc->esc_TXDCTL = 0;
2305 }
2306 
2307 static int
2308 e82545_init(struct pci_devinst *pi, nvlist_t *nvl)
2309 {
2310 	char nstr[80];
2311 	struct e82545_softc *sc;
2312 	const char *mac;
2313 	int err;
2314 
2315 	/* Setup our softc */
2316 	sc = calloc(1, sizeof(*sc));
2317 
2318 	pi->pi_arg = sc;
2319 	sc->esc_pi = pi;
2320 	sc->esc_ctx = pi->pi_vmctx;
2321 
2322 	pthread_mutex_init(&sc->esc_mtx, NULL);
2323 	pthread_cond_init(&sc->esc_rx_cond, NULL);
2324 	pthread_cond_init(&sc->esc_tx_cond, NULL);
2325 	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2326 	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2327 	    pi->pi_func);
2328         pthread_set_name_np(sc->esc_tx_tid, nstr);
2329 
2330 	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2331 	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2332 	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
2333 	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2334 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2335 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2336 
2337 	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2338 	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
2339 
2340 	/* TODO: this card also supports msi, but the freebsd driver for it
2341 	 * does not, so I have not implemented it. */
2342 	pci_lintr_request(pi);
2343 
2344 	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2345 		E82545_BAR_REGISTER_LEN);
2346 	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2347 		E82545_BAR_FLASH_LEN);
2348 	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2349 		E82545_BAR_IO_LEN);
2350 
2351 	mac = get_config_value_node(nvl, "mac");
2352 	if (mac != NULL) {
2353 		err = net_parsemac(mac, sc->esc_mac.octet);
2354 		if (err) {
2355 			free(sc);
2356 			return (err);
2357 		}
2358 	} else
2359 		net_genmac(pi, sc->esc_mac.octet);
2360 
2361 	err = netbe_init(&sc->esc_be, nvl, e82545_rx_callback, sc);
2362 	if (err) {
2363 		free(sc);
2364 		return (err);
2365 	}
2366 
2367 	netbe_rx_enable(sc->esc_be);
2368 
2369 	/* H/w initiated reset */
2370 	e82545_reset(sc, 0);
2371 
2372 	return (0);
2373 }
2374 
2375 #ifdef BHYVE_SNAPSHOT
2376 static int
2377 e82545_snapshot(struct vm_snapshot_meta *meta)
2378 {
2379 	int i;
2380 	int ret;
2381 	struct e82545_softc *sc;
2382 	struct pci_devinst *pi;
2383 	uint64_t bitmap_value;
2384 
2385 	pi = meta->dev_data;
2386 	sc = pi->pi_arg;
2387 
2388 	/* esc_mevp and esc_mevpitr should be reinitiated at init. */
2389 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_mac, meta, ret, done);
2390 
2391 	/* General */
2392 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_CTRL, meta, ret, done);
2393 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCAL, meta, ret, done);
2394 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCAH, meta, ret, done);
2395 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCT, meta, ret, done);
2396 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_VET, meta, ret, done);
2397 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCTTV, meta, ret, done);
2398 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_LEDCTL, meta, ret, done);
2399 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_PBA, meta, ret, done);
2400 
2401 	/* Interrupt control */
2402 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_irq_asserted, meta, ret, done);
2403 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ICR, meta, ret, done);
2404 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ITR, meta, ret, done);
2405 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ICS, meta, ret, done);
2406 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_IMS, meta, ret, done);
2407 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_IMC, meta, ret, done);
2408 
2409 	/*
2410 	 * Transmit
2411 	 *
2412 	 * The fields in the unions are in superposition to access certain
2413 	 * bytes in the larger uint variables.
2414 	 * e.g., ip_config = [ipcss|ipcso|ipcse0|ipcse1]
2415 	 */
2416 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.lower_setup.ip_config, meta, ret, done);
2417 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.upper_setup.tcp_config, meta, ret, done);
2418 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.cmd_and_length, meta, ret, done);
2419 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.tcp_seg_setup.data, meta, ret, done);
2420 
2421 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tx_enabled, meta, ret, done);
2422 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tx_active, meta, ret, done);
2423 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TXCW, meta, ret, done);
2424 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TCTL, meta, ret, done);
2425 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TIPG, meta, ret, done);
2426 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_AIT, meta, ret, done);
2427 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tdba, meta, ret, done);
2428 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDBAL, meta, ret, done);
2429 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDBAH, meta, ret, done);
2430 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDLEN, meta, ret, done);
2431 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDH, meta, ret, done);
2432 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDHr, meta, ret, done);
2433 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDT, meta, ret, done);
2434 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TIDV, meta, ret, done);
2435 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TXDCTL, meta, ret, done);
2436 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TADV, meta, ret, done);
2437 
2438 	/* Has dependency on esc_TDLEN; reoreder of fields from struct. */
2439 	SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->esc_txdesc, sc->esc_TDLEN,
2440 		true, meta, ret, done);
2441 
2442 	/* L2 frame acceptance */
2443 	for (i = 0; i < (int)nitems(sc->esc_uni); i++) {
2444 		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_valid, meta, ret, done);
2445 		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_addrsel, meta, ret, done);
2446 		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_eth, meta, ret, done);
2447 	}
2448 
2449 	SNAPSHOT_BUF_OR_LEAVE(sc->esc_fmcast, sizeof(sc->esc_fmcast),
2450 			      meta, ret, done);
2451 	SNAPSHOT_BUF_OR_LEAVE(sc->esc_fvlan, sizeof(sc->esc_fvlan),
2452 			      meta, ret, done);
2453 
2454 	/* Receive */
2455 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_enabled, meta, ret, done);
2456 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_active, meta, ret, done);
2457 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_loopback, meta, ret, done);
2458 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RCTL, meta, ret, done);
2459 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCRTL, meta, ret, done);
2460 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCRTH, meta, ret, done);
2461 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rdba, meta, ret, done);
2462 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDBAL, meta, ret, done);
2463 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDBAH, meta, ret, done);
2464 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDLEN, meta, ret, done);
2465 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDH, meta, ret, done);
2466 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDT, meta, ret, done);
2467 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDTR, meta, ret, done);
2468 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RXDCTL, meta, ret, done);
2469 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RADV, meta, ret, done);
2470 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RSRPD, meta, ret, done);
2471 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RXCSUM, meta, ret, done);
2472 
2473 	/* Has dependency on esc_RDLEN; reoreder of fields from struct. */
2474 	SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->esc_rxdesc, sc->esc_TDLEN,
2475 		true, meta, ret, done);
2476 
2477 	/* IO Port register access */
2478 	SNAPSHOT_VAR_OR_LEAVE(sc->io_addr, meta, ret, done);
2479 
2480 	/* Shadow copy of MDIC */
2481 	SNAPSHOT_VAR_OR_LEAVE(sc->mdi_control, meta, ret, done);
2482 
2483 	/* Shadow copy of EECD */
2484 	SNAPSHOT_VAR_OR_LEAVE(sc->eeprom_control, meta, ret, done);
2485 
2486 	/* Latest NVM in/out */
2487 	SNAPSHOT_VAR_OR_LEAVE(sc->nvm_data, meta, ret, done);
2488 	SNAPSHOT_VAR_OR_LEAVE(sc->nvm_opaddr, meta, ret, done);
2489 
2490 	/* Stats */
2491 	SNAPSHOT_VAR_OR_LEAVE(sc->missed_pkt_count, meta, ret, done);
2492 	SNAPSHOT_BUF_OR_LEAVE(sc->pkt_rx_by_size, sizeof(sc->pkt_rx_by_size),
2493 			      meta, ret, done);
2494 	SNAPSHOT_BUF_OR_LEAVE(sc->pkt_tx_by_size, sizeof(sc->pkt_tx_by_size),
2495 			      meta, ret, done);
2496 	SNAPSHOT_VAR_OR_LEAVE(sc->good_pkt_rx_count, meta, ret, done);
2497 	SNAPSHOT_VAR_OR_LEAVE(sc->bcast_pkt_rx_count, meta, ret, done);
2498 	SNAPSHOT_VAR_OR_LEAVE(sc->mcast_pkt_rx_count, meta, ret, done);
2499 	SNAPSHOT_VAR_OR_LEAVE(sc->good_pkt_tx_count, meta, ret, done);
2500 	SNAPSHOT_VAR_OR_LEAVE(sc->bcast_pkt_tx_count, meta, ret, done);
2501 	SNAPSHOT_VAR_OR_LEAVE(sc->mcast_pkt_tx_count, meta, ret, done);
2502 	SNAPSHOT_VAR_OR_LEAVE(sc->oversize_rx_count, meta, ret, done);
2503 	SNAPSHOT_VAR_OR_LEAVE(sc->tso_tx_count, meta, ret, done);
2504 	SNAPSHOT_VAR_OR_LEAVE(sc->good_octets_rx, meta, ret, done);
2505 	SNAPSHOT_VAR_OR_LEAVE(sc->good_octets_tx, meta, ret, done);
2506 	SNAPSHOT_VAR_OR_LEAVE(sc->missed_octets, meta, ret, done);
2507 
2508 	if (meta->op == VM_SNAPSHOT_SAVE)
2509 		bitmap_value = sc->nvm_bits;
2510 	SNAPSHOT_VAR_OR_LEAVE(bitmap_value, meta, ret, done);
2511 	if (meta->op == VM_SNAPSHOT_RESTORE)
2512 		sc->nvm_bits = bitmap_value;
2513 
2514 	if (meta->op == VM_SNAPSHOT_SAVE)
2515 		bitmap_value = sc->nvm_bits;
2516 	SNAPSHOT_VAR_OR_LEAVE(bitmap_value, meta, ret, done);
2517 	if (meta->op == VM_SNAPSHOT_RESTORE)
2518 		sc->nvm_bits = bitmap_value;
2519 
2520 	/* EEPROM data */
2521 	SNAPSHOT_BUF_OR_LEAVE(sc->eeprom_data, sizeof(sc->eeprom_data),
2522 			      meta, ret, done);
2523 
2524 done:
2525 	return (ret);
2526 }
2527 #endif
2528 
2529 static const struct pci_devemu pci_de_e82545 = {
2530 	.pe_emu = 	"e1000",
2531 	.pe_init =	e82545_init,
2532 	.pe_legacy_config = netbe_legacy_config,
2533 	.pe_barwrite =	e82545_write,
2534 	.pe_barread =	e82545_read,
2535 #ifdef BHYVE_SNAPSHOT
2536 	.pe_snapshot =	e82545_snapshot,
2537 #endif
2538 };
2539 PCI_EMUL_SET(pci_de_e82545);
2540