xref: /freebsd/usr.sbin/bhyve/pci_e82545.c (revision 0957b409)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6  * Copyright (c) 2013 Jeremiah Lott, Avere Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer
14  *    in this position and unchanged.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/types.h>
36 #ifndef WITHOUT_CAPSICUM
37 #include <sys/capsicum.h>
38 #endif
39 #include <sys/limits.h>
40 #include <sys/ioctl.h>
41 #include <sys/uio.h>
42 #include <net/ethernet.h>
43 #include <netinet/in.h>
44 #include <netinet/tcp.h>
45 
46 #ifndef WITHOUT_CAPSICUM
47 #include <capsicum_helpers.h>
48 #endif
49 #include <err.h>
50 #include <errno.h>
51 #include <fcntl.h>
52 #include <md5.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <sysexits.h>
57 #include <unistd.h>
58 #include <pthread.h>
59 #include <pthread_np.h>
60 
61 #include "e1000_regs.h"
62 #include "e1000_defines.h"
63 #include "mii.h"
64 
65 #include "bhyverun.h"
66 #include "pci_emul.h"
67 #include "mevent.h"
68 
69 /* Hardware/register definitions XXX: move some to common code. */
70 #define E82545_VENDOR_ID_INTEL			0x8086
71 #define E82545_DEV_ID_82545EM_COPPER		0x100F
72 #define E82545_SUBDEV_ID			0x1008
73 
74 #define E82545_REVISION_4			4
75 
76 #define E82545_MDIC_DATA_MASK			0x0000FFFF
77 #define E82545_MDIC_OP_MASK			0x0c000000
78 #define E82545_MDIC_IE				0x20000000
79 
80 #define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
81 #define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
82 #define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
83 
84 #define E82545_BAR_REGISTER			0
85 #define E82545_BAR_REGISTER_LEN			(128*1024)
86 #define E82545_BAR_FLASH			1
87 #define E82545_BAR_FLASH_LEN			(64*1024)
88 #define E82545_BAR_IO				2
89 #define E82545_BAR_IO_LEN			8
90 
91 #define E82545_IOADDR				0x00000000
92 #define E82545_IODATA				0x00000004
93 #define E82545_IO_REGISTER_MAX			0x0001FFFF
94 #define E82545_IO_FLASH_BASE			0x00080000
95 #define E82545_IO_FLASH_MAX			0x000FFFFF
96 
97 #define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
98 #define E82545_RAR_MAX				15
99 #define E82545_MTA_MAX				127
100 #define E82545_VFTA_MAX				127
101 
102 /* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
103  * followed by 6 address bits.
104  * TODO: make opcode bits and addr bits configurable?
105  * NVM Commands - Microwire */
106 #define E82545_NVM_OPCODE_BITS	3
107 #define E82545_NVM_ADDR_BITS	6
108 #define E82545_NVM_DATA_BITS	16
109 #define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
110 #define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
111 #define E82545_NVM_OPCODE_MASK	\
112     (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
113 #define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
114 #define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
115 #define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
116 #define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
117 
118 #define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
119 
120 #define E1000_ICR_SRPD		0x00010000
121 
122 /* This is an arbitrary number.  There is no hard limit on the chip. */
123 #define I82545_MAX_TXSEGS	64
124 
125 /* Legacy receive descriptor */
126 struct e1000_rx_desc {
127 	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
128 	uint16_t length;	/* Length of data DMAed into data buffer */
129 	uint16_t csum;		/* Packet checksum */
130 	uint8_t	 status;       	/* Descriptor status */
131 	uint8_t  errors;	/* Descriptor Errors */
132 	uint16_t special;
133 };
134 
135 /* Transmit descriptor types */
136 #define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
137 #define E1000_TXD_TYP_L		(0)
138 #define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
139 #define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
140 
141 /* Legacy transmit descriptor */
142 struct e1000_tx_desc {
143 	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
144 	union {
145 		uint32_t data;
146 		struct {
147 			uint16_t length;  /* Data buffer length */
148 			uint8_t  cso;  /* Checksum offset */
149 			uint8_t  cmd;  /* Descriptor control */
150 		} flags;
151 	} lower;
152 	union {
153 		uint32_t data;
154 		struct {
155 			uint8_t status; /* Descriptor status */
156 			uint8_t css;  /* Checksum start */
157 			uint16_t special;
158 		} fields;
159 	} upper;
160 };
161 
162 /* Context descriptor */
163 struct e1000_context_desc {
164 	union {
165 		uint32_t ip_config;
166 		struct {
167 			uint8_t ipcss;  /* IP checksum start */
168 			uint8_t ipcso;  /* IP checksum offset */
169 			uint16_t ipcse;  /* IP checksum end */
170 		} ip_fields;
171 	} lower_setup;
172 	union {
173 		uint32_t tcp_config;
174 		struct {
175 			uint8_t tucss;  /* TCP checksum start */
176 			uint8_t tucso;  /* TCP checksum offset */
177 			uint16_t tucse;  /* TCP checksum end */
178 		} tcp_fields;
179 	} upper_setup;
180 	uint32_t cmd_and_length;
181 	union {
182 		uint32_t data;
183 		struct {
184 			uint8_t status;  /* Descriptor status */
185 			uint8_t hdr_len;  /* Header length */
186 			uint16_t mss;  /* Maximum segment size */
187 		} fields;
188 	} tcp_seg_setup;
189 };
190 
191 /* Data descriptor */
192 struct e1000_data_desc {
193 	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
194 	union {
195 		uint32_t data;
196 		struct {
197 			uint16_t length;  /* Data buffer length */
198 			uint8_t typ_len_ext;
199 			uint8_t cmd;
200 		} flags;
201 	} lower;
202 	union {
203 		uint32_t data;
204 		struct {
205 			uint8_t status;  /* Descriptor status */
206 			uint8_t popts;  /* Packet Options */
207 			uint16_t special;
208 		} fields;
209 	} upper;
210 };
211 
212 union e1000_tx_udesc {
213 	struct e1000_tx_desc td;
214 	struct e1000_context_desc cd;
215 	struct e1000_data_desc dd;
216 };
217 
218 /* Tx checksum info for a packet. */
219 struct ck_info {
220 	int	ck_valid;	/* ck_info is valid */
221 	uint8_t	ck_start;	/* start byte of cksum calcuation */
222 	uint8_t	ck_off;		/* offset of cksum insertion */
223 	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
224 };
225 
226 /*
227  * Debug printf
228  */
229 static int e82545_debug = 0;
230 #define DPRINTF(msg,params...) if (e82545_debug) fprintf(stderr, "e82545: " msg, params)
231 #define WPRINTF(msg,params...) fprintf(stderr, "e82545: " msg, params)
232 
233 #define	MIN(a,b) (((a)<(b))?(a):(b))
234 #define	MAX(a,b) (((a)>(b))?(a):(b))
235 
236 /* s/w representation of the RAL/RAH regs */
237 struct  eth_uni {
238 	int		eu_valid;
239 	int		eu_addrsel;
240 	struct ether_addr eu_eth;
241 };
242 
243 
244 struct e82545_softc {
245 	struct pci_devinst *esc_pi;
246 	struct vmctx	*esc_ctx;
247 	struct mevent   *esc_mevp;
248 	struct mevent   *esc_mevpitr;
249 	pthread_mutex_t	esc_mtx;
250 	struct ether_addr esc_mac;
251 	int		esc_tapfd;
252 
253 	/* General */
254 	uint32_t	esc_CTRL;	/* x0000 device ctl */
255 	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
256 	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
257 	uint32_t	esc_FCT;	/* x0030 flow ctl type */
258 	uint32_t	esc_VET;	/* x0038 VLAN eth type */
259 	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
260 	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
261 	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
262 
263 	/* Interrupt control */
264 	int		esc_irq_asserted;
265 	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
266 	uint32_t	esc_ITR;	/* x00C4 intr throttling */
267 	uint32_t	esc_ICS;	/* x00C8 cause set */
268 	uint32_t	esc_IMS;	/* x00D0 mask set/read */
269 	uint32_t	esc_IMC;	/* x00D8 mask clear */
270 
271 	/* Transmit */
272 	union e1000_tx_udesc *esc_txdesc;
273 	struct e1000_context_desc esc_txctx;
274 	pthread_t	esc_tx_tid;
275 	pthread_cond_t	esc_tx_cond;
276 	int		esc_tx_enabled;
277 	int		esc_tx_active;
278 	uint32_t	esc_TXCW;	/* x0178 transmit config */
279 	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
280 	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
281 	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
282 	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
283 	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
284 	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
285 	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
286 	uint16_t	esc_TDH;	/* x3810 desc table head idx */
287 	uint16_t	esc_TDHr;	/* internal read version of TDH */
288 	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
289 	uint32_t	esc_TIDV;	/* x3820 intr delay */
290 	uint32_t	esc_TXDCTL;	/* x3828 desc control */
291 	uint32_t	esc_TADV;	/* x382C intr absolute delay */
292 
293 	/* L2 frame acceptance */
294 	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
295 	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
296 	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
297 
298 	/* Receive */
299 	struct e1000_rx_desc *esc_rxdesc;
300 	pthread_cond_t	esc_rx_cond;
301 	int		esc_rx_enabled;
302 	int		esc_rx_active;
303 	int		esc_rx_loopback;
304 	uint32_t	esc_RCTL;	/* x0100 receive ctl */
305 	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
306 	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
307 	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
308 	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
309 	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
310 	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
311 	uint16_t	esc_RDH;	/* x2810 desc table head idx */
312 	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
313 	uint32_t	esc_RDTR;	/* x2820 intr delay */
314 	uint32_t	esc_RXDCTL;	/* x2828 desc control */
315 	uint32_t	esc_RADV;	/* x282C intr absolute delay */
316 	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
317 	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
318 
319 	/* IO Port register access */
320 	uint32_t io_addr;
321 
322 	/* Shadow copy of MDIC */
323 	uint32_t mdi_control;
324 	/* Shadow copy of EECD */
325 	uint32_t eeprom_control;
326 	/* Latest NVM in/out */
327 	uint16_t nvm_data;
328 	uint16_t nvm_opaddr;
329 	/* stats */
330 	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
331 	uint32_t pkt_rx_by_size[6];
332 	uint32_t pkt_tx_by_size[6];
333 	uint32_t good_pkt_rx_count;
334 	uint32_t bcast_pkt_rx_count;
335 	uint32_t mcast_pkt_rx_count;
336 	uint32_t good_pkt_tx_count;
337 	uint32_t bcast_pkt_tx_count;
338 	uint32_t mcast_pkt_tx_count;
339 	uint32_t oversize_rx_count;
340 	uint32_t tso_tx_count;
341 	uint64_t good_octets_rx;
342 	uint64_t good_octets_tx;
343 	uint64_t missed_octets; /* counts missed and oversized */
344 
345 	uint8_t nvm_bits:6; /* number of bits remaining in/out */
346 	uint8_t nvm_mode:2;
347 #define E82545_NVM_MODE_OPADDR  0x0
348 #define E82545_NVM_MODE_DATAIN  0x1
349 #define E82545_NVM_MODE_DATAOUT 0x2
350 	/* EEPROM data */
351 	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
352 };
353 
354 static void e82545_reset(struct e82545_softc *sc, int dev);
355 static void e82545_rx_enable(struct e82545_softc *sc);
356 static void e82545_rx_disable(struct e82545_softc *sc);
357 static void e82545_tap_callback(int fd, enum ev_type type, void *param);
358 static void e82545_tx_start(struct e82545_softc *sc);
359 static void e82545_tx_enable(struct e82545_softc *sc);
360 static void e82545_tx_disable(struct e82545_softc *sc);
361 
362 static inline int
363 e82545_size_stat_index(uint32_t size)
364 {
365 	if (size <= 64) {
366 		return 0;
367 	} else if (size >= 1024) {
368 		return 5;
369 	} else {
370 		/* should be 1-4 */
371 		return (ffs(size) - 6);
372 	}
373 }
374 
375 static void
376 e82545_init_eeprom(struct e82545_softc *sc)
377 {
378 	uint16_t checksum, i;
379 
380         /* mac addr */
381 	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
382 		(((uint16_t)sc->esc_mac.octet[1]) << 8);
383 	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
384 		(((uint16_t)sc->esc_mac.octet[3]) << 8);
385 	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
386 		(((uint16_t)sc->esc_mac.octet[5]) << 8);
387 
388 	/* pci ids */
389 	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
390 	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
391 	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
392 	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
393 
394 	/* fill in the checksum */
395         checksum = 0;
396 	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
397 		checksum += sc->eeprom_data[i];
398 	}
399 	checksum = NVM_SUM - checksum;
400 	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
401 	DPRINTF("eeprom checksum: 0x%x\r\n", checksum);
402 }
403 
404 static void
405 e82545_write_mdi(struct e82545_softc *sc, uint8_t reg_addr,
406 			uint8_t phy_addr, uint32_t data)
407 {
408 	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x\r\n", reg_addr, phy_addr, data);
409 }
410 
411 static uint32_t
412 e82545_read_mdi(struct e82545_softc *sc, uint8_t reg_addr,
413 			uint8_t phy_addr)
414 {
415 	//DPRINTF("Read mdi reg:0x%x phy:0x%x\r\n", reg_addr, phy_addr);
416 	switch (reg_addr) {
417 	case PHY_STATUS:
418 		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
419 			MII_SR_AUTONEG_COMPLETE);
420 	case PHY_AUTONEG_ADV:
421 		return NWAY_AR_SELECTOR_FIELD;
422 	case PHY_LP_ABILITY:
423 		return 0;
424 	case PHY_1000T_STATUS:
425 		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
426 			SR_1000T_LOCAL_RX_STATUS);
427 	case PHY_ID1:
428 		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
429 	case PHY_ID2:
430 		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
431 	default:
432 		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x\r\n", reg_addr, phy_addr);
433 		return 0;
434 	}
435 	/* not reached */
436 }
437 
438 static void
439 e82545_eecd_strobe(struct e82545_softc *sc)
440 {
441 	/* Microwire state machine */
442 	/*
443 	DPRINTF("eeprom state machine srtobe "
444 		"0x%x 0x%x 0x%x 0x%x\r\n",
445 		sc->nvm_mode, sc->nvm_bits,
446 		sc->nvm_opaddr, sc->nvm_data);*/
447 
448 	if (sc->nvm_bits == 0) {
449 		DPRINTF("eeprom state machine not expecting data! "
450 			"0x%x 0x%x 0x%x 0x%x\r\n",
451 			sc->nvm_mode, sc->nvm_bits,
452 			sc->nvm_opaddr, sc->nvm_data);
453 		return;
454 	}
455 	sc->nvm_bits--;
456 	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
457 		/* shifting out */
458 		if (sc->nvm_data & 0x8000) {
459 			sc->eeprom_control |= E1000_EECD_DO;
460 		} else {
461 			sc->eeprom_control &= ~E1000_EECD_DO;
462 		}
463 		sc->nvm_data <<= 1;
464 		if (sc->nvm_bits == 0) {
465 			/* read done, back to opcode mode. */
466 			sc->nvm_opaddr = 0;
467 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
468 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
469 		}
470 	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
471 		/* shifting in */
472 		sc->nvm_data <<= 1;
473 		if (sc->eeprom_control & E1000_EECD_DI) {
474 			sc->nvm_data |= 1;
475 		}
476 		if (sc->nvm_bits == 0) {
477 			/* eeprom write */
478 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
479 			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
480 			if (op != E82545_NVM_OPCODE_WRITE) {
481 				DPRINTF("Illegal eeprom write op 0x%x\r\n",
482 					sc->nvm_opaddr);
483 			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
484 				DPRINTF("Illegal eeprom write addr 0x%x\r\n",
485 					sc->nvm_opaddr);
486 			} else {
487 				DPRINTF("eeprom write eeprom[0x%x] = 0x%x\r\n",
488 				addr, sc->nvm_data);
489 				sc->eeprom_data[addr] = sc->nvm_data;
490 			}
491 			/* back to opcode mode */
492 			sc->nvm_opaddr = 0;
493 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
494 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
495 		}
496 	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
497 		sc->nvm_opaddr <<= 1;
498 		if (sc->eeprom_control & E1000_EECD_DI) {
499 			sc->nvm_opaddr |= 1;
500 		}
501 		if (sc->nvm_bits == 0) {
502 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
503 			switch (op) {
504 			case E82545_NVM_OPCODE_EWEN:
505 				DPRINTF("eeprom write enable: 0x%x\r\n",
506 					sc->nvm_opaddr);
507 				/* back to opcode mode */
508 				sc->nvm_opaddr = 0;
509 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
510 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
511 				break;
512 			case E82545_NVM_OPCODE_READ:
513 			{
514 				uint16_t addr = sc->nvm_opaddr &
515 					E82545_NVM_ADDR_MASK;
516 				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
517 				sc->nvm_bits = E82545_NVM_DATA_BITS;
518 				if (addr < E82545_NVM_EEPROM_SIZE) {
519 					sc->nvm_data = sc->eeprom_data[addr];
520 					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x\r\n",
521 						addr, sc->nvm_data);
522 				} else {
523 					DPRINTF("eeprom illegal read: 0x%x\r\n",
524 						sc->nvm_opaddr);
525 					sc->nvm_data = 0;
526 				}
527 				break;
528 			}
529 			case E82545_NVM_OPCODE_WRITE:
530 				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
531 				sc->nvm_bits = E82545_NVM_DATA_BITS;
532 				sc->nvm_data = 0;
533 				break;
534 			default:
535 				DPRINTF("eeprom unknown op: 0x%x\r\r",
536 					sc->nvm_opaddr);
537 				/* back to opcode mode */
538 				sc->nvm_opaddr = 0;
539 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
540 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
541 			}
542 		}
543 	} else {
544 		DPRINTF("eeprom state machine wrong state! "
545 			"0x%x 0x%x 0x%x 0x%x\r\n",
546 			sc->nvm_mode, sc->nvm_bits,
547 			sc->nvm_opaddr, sc->nvm_data);
548 	}
549 }
550 
551 static void
552 e82545_itr_callback(int fd, enum ev_type type, void *param)
553 {
554 	uint32_t new;
555 	struct e82545_softc *sc = param;
556 
557 	pthread_mutex_lock(&sc->esc_mtx);
558 	new = sc->esc_ICR & sc->esc_IMS;
559 	if (new && !sc->esc_irq_asserted) {
560 		DPRINTF("itr callback: lintr assert %x\r\n", new);
561 		sc->esc_irq_asserted = 1;
562 		pci_lintr_assert(sc->esc_pi);
563 	} else {
564 		mevent_delete(sc->esc_mevpitr);
565 		sc->esc_mevpitr = NULL;
566 	}
567 	pthread_mutex_unlock(&sc->esc_mtx);
568 }
569 
570 static void
571 e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
572 {
573 	uint32_t new;
574 
575 	DPRINTF("icr assert: 0x%x\r\n", bits);
576 
577 	/*
578 	 * An interrupt is only generated if bits are set that
579 	 * aren't already in the ICR, these bits are unmasked,
580 	 * and there isn't an interrupt already pending.
581 	 */
582 	new = bits & ~sc->esc_ICR & sc->esc_IMS;
583 	sc->esc_ICR |= bits;
584 
585 	if (new == 0) {
586 		DPRINTF("icr assert: masked %x, ims %x\r\n", new, sc->esc_IMS);
587 	} else if (sc->esc_mevpitr != NULL) {
588 		DPRINTF("icr assert: throttled %x, ims %x\r\n", new, sc->esc_IMS);
589 	} else if (!sc->esc_irq_asserted) {
590 		DPRINTF("icr assert: lintr assert %x\r\n", new);
591 		sc->esc_irq_asserted = 1;
592 		pci_lintr_assert(sc->esc_pi);
593 		if (sc->esc_ITR != 0) {
594 			sc->esc_mevpitr = mevent_add(
595 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
596 			    EVF_TIMER, e82545_itr_callback, sc);
597 		}
598 	}
599 }
600 
601 static void
602 e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
603 {
604 	uint32_t new;
605 
606 	/*
607 	 * Changing the mask may allow previously asserted
608 	 * but masked interrupt requests to generate an interrupt.
609 	 */
610 	new = bits & sc->esc_ICR & ~sc->esc_IMS;
611 	sc->esc_IMS |= bits;
612 
613 	if (new == 0) {
614 		DPRINTF("ims change: masked %x, ims %x\r\n", new, sc->esc_IMS);
615 	} else if (sc->esc_mevpitr != NULL) {
616 		DPRINTF("ims change: throttled %x, ims %x\r\n", new, sc->esc_IMS);
617 	} else if (!sc->esc_irq_asserted) {
618 		DPRINTF("ims change: lintr assert %x\n\r", new);
619 		sc->esc_irq_asserted = 1;
620 		pci_lintr_assert(sc->esc_pi);
621 		if (sc->esc_ITR != 0) {
622 			sc->esc_mevpitr = mevent_add(
623 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
624 			    EVF_TIMER, e82545_itr_callback, sc);
625 		}
626 	}
627 }
628 
629 static void
630 e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
631 {
632 
633 	DPRINTF("icr deassert: 0x%x\r\n", bits);
634 	sc->esc_ICR &= ~bits;
635 
636 	/*
637 	 * If there are no longer any interrupt sources and there
638 	 * was an asserted interrupt, clear it
639 	 */
640 	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
641 		DPRINTF("icr deassert: lintr deassert %x\r\n", bits);
642 		pci_lintr_deassert(sc->esc_pi);
643 		sc->esc_irq_asserted = 0;
644 	}
645 }
646 
647 static void
648 e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
649 {
650 
651 	DPRINTF("intr_write: off %x, val %x\n\r", offset, value);
652 
653 	switch (offset) {
654 	case E1000_ICR:
655 		e82545_icr_deassert(sc, value);
656 		break;
657 	case E1000_ITR:
658 		sc->esc_ITR = value;
659 		break;
660 	case E1000_ICS:
661 		sc->esc_ICS = value;	/* not used: store for debug */
662 		e82545_icr_assert(sc, value);
663 		break;
664 	case E1000_IMS:
665 		e82545_ims_change(sc, value);
666 		break;
667 	case E1000_IMC:
668 		sc->esc_IMC = value;	/* for debug */
669 		sc->esc_IMS &= ~value;
670 		// XXX clear interrupts if all ICR bits now masked
671 		// and interrupt was pending ?
672 		break;
673 	default:
674 		break;
675 	}
676 }
677 
678 static uint32_t
679 e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
680 {
681 	uint32_t retval;
682 
683 	retval = 0;
684 
685 	DPRINTF("intr_read: off %x\n\r", offset);
686 
687 	switch (offset) {
688 	case E1000_ICR:
689 		retval = sc->esc_ICR;
690 		sc->esc_ICR = 0;
691 		e82545_icr_deassert(sc, ~0);
692 		break;
693 	case E1000_ITR:
694 		retval = sc->esc_ITR;
695 		break;
696 	case E1000_ICS:
697 		/* write-only register */
698 		break;
699 	case E1000_IMS:
700 		retval = sc->esc_IMS;
701 		break;
702 	case E1000_IMC:
703 		/* write-only register */
704 		break;
705 	default:
706 		break;
707 	}
708 
709 	return (retval);
710 }
711 
712 static void
713 e82545_devctl(struct e82545_softc *sc, uint32_t val)
714 {
715 
716 	sc->esc_CTRL = val & ~E1000_CTRL_RST;
717 
718 	if (val & E1000_CTRL_RST) {
719 		DPRINTF("e1k: s/w reset, ctl %x\n", val);
720 		e82545_reset(sc, 1);
721 	}
722 	/* XXX check for phy reset ? */
723 }
724 
725 static void
726 e82545_rx_update_rdba(struct e82545_softc *sc)
727 {
728 
729 	/* XXX verify desc base/len within phys mem range */
730 	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
731 	    sc->esc_RDBAL;
732 
733 	/* Cache host mapping of guest descriptor array */
734 	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
735 	    sc->esc_rdba, sc->esc_RDLEN);
736 }
737 
738 static void
739 e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
740 {
741 	int on;
742 
743 	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
744 
745 	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
746 	sc->esc_RCTL = val & ~0xF9204c01;
747 
748 	DPRINTF("rx_ctl - %s RCTL %x, val %x\n",
749 		on ? "on" : "off", sc->esc_RCTL, val);
750 
751 	/* state change requested */
752 	if (on != sc->esc_rx_enabled) {
753 		if (on) {
754 			/* Catch disallowed/unimplemented settings */
755 			//assert(!(val & E1000_RCTL_LBM_TCVR));
756 
757 			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
758 				sc->esc_rx_loopback = 1;
759 			} else {
760 				sc->esc_rx_loopback = 0;
761 			}
762 
763 			e82545_rx_update_rdba(sc);
764 			e82545_rx_enable(sc);
765 		} else {
766 			e82545_rx_disable(sc);
767 			sc->esc_rx_loopback = 0;
768 			sc->esc_rdba = 0;
769 			sc->esc_rxdesc = NULL;
770 		}
771 	}
772 }
773 
774 static void
775 e82545_tx_update_tdba(struct e82545_softc *sc)
776 {
777 
778 	/* XXX verify desc base/len within phys mem range */
779 	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
780 
781 	/* Cache host mapping of guest descriptor array */
782 	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
783             sc->esc_TDLEN);
784 }
785 
786 static void
787 e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
788 {
789 	int on;
790 
791 	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
792 
793 	/* ignore TCTL_EN settings that don't change state */
794 	if (on == sc->esc_tx_enabled)
795 		return;
796 
797 	if (on) {
798 		e82545_tx_update_tdba(sc);
799 		e82545_tx_enable(sc);
800 	} else {
801 		e82545_tx_disable(sc);
802 		sc->esc_tdba = 0;
803 		sc->esc_txdesc = NULL;
804 	}
805 
806 	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
807 	sc->esc_TCTL = val & ~0xFE800005;
808 }
809 
810 int
811 e82545_bufsz(uint32_t rctl)
812 {
813 
814 	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
815 	case (E1000_RCTL_SZ_2048): return (2048);
816 	case (E1000_RCTL_SZ_1024): return (1024);
817 	case (E1000_RCTL_SZ_512): return (512);
818 	case (E1000_RCTL_SZ_256): return (256);
819 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
820 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
821 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
822 	}
823 	return (256);	/* Forbidden value. */
824 }
825 
826 static uint8_t dummybuf[2048];
827 
828 /* XXX one packet at a time until this is debugged */
829 static void
830 e82545_tap_callback(int fd, enum ev_type type, void *param)
831 {
832 	struct e82545_softc *sc = param;
833 	struct e1000_rx_desc *rxd;
834 	struct iovec vec[64];
835 	int left, len, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
836 	uint32_t cause = 0;
837 	uint16_t *tp, tag, head;
838 
839 	pthread_mutex_lock(&sc->esc_mtx);
840 	DPRINTF("rx_run: head %x, tail %x\r\n", sc->esc_RDH, sc->esc_RDT);
841 
842 	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
843 		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped\r\n",
844 		    sc->esc_rx_enabled, sc->esc_rx_loopback);
845 		while (read(sc->esc_tapfd, dummybuf, sizeof(dummybuf)) > 0) {
846 		}
847 		goto done1;
848 	}
849 	bufsz = e82545_bufsz(sc->esc_RCTL);
850 	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
851 	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
852 	size = sc->esc_RDLEN / 16;
853 	head = sc->esc_RDH;
854 	left = (size + sc->esc_RDT - head) % size;
855 	if (left < maxpktdesc) {
856 		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped\r\n",
857 		    left, maxpktdesc);
858 		while (read(sc->esc_tapfd, dummybuf, sizeof(dummybuf)) > 0) {
859 		}
860 		goto done1;
861 	}
862 
863 	sc->esc_rx_active = 1;
864 	pthread_mutex_unlock(&sc->esc_mtx);
865 
866 	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
867 
868 		/* Grab rx descriptor pointed to by the head pointer */
869 		for (i = 0; i < maxpktdesc; i++) {
870 			rxd = &sc->esc_rxdesc[(head + i) % size];
871 			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
872 			    rxd->buffer_addr, bufsz);
873 			vec[i].iov_len = bufsz;
874 		}
875 		len = readv(sc->esc_tapfd, vec, maxpktdesc);
876 		if (len <= 0) {
877 			DPRINTF("tap: readv() returned %d\n", len);
878 			goto done;
879 		}
880 
881 		/*
882 		 * Adjust the packet length based on whether the CRC needs
883 		 * to be stripped or if the packet is less than the minimum
884 		 * eth packet size.
885 		 */
886 		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
887 			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
888 		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
889 			len += ETHER_CRC_LEN;
890 		n = (len + bufsz - 1) / bufsz;
891 
892 		DPRINTF("packet read %d bytes, %d segs, head %d\r\n",
893 		    len, n, head);
894 
895 		/* Apply VLAN filter. */
896 		tp = (uint16_t *)vec[0].iov_base + 6;
897 		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
898 		    (ntohs(tp[0]) == sc->esc_VET)) {
899 			tag = ntohs(tp[1]) & 0x0fff;
900 			if ((sc->esc_fvlan[tag >> 5] &
901 			    (1 << (tag & 0x1f))) != 0) {
902 				DPRINTF("known VLAN %d\r\n", tag);
903 			} else {
904 				DPRINTF("unknown VLAN %d\r\n", tag);
905 				n = 0;
906 				continue;
907 			}
908 		}
909 
910 		/* Update all consumed descriptors. */
911 		for (i = 0; i < n - 1; i++) {
912 			rxd = &sc->esc_rxdesc[(head + i) % size];
913 			rxd->length = bufsz;
914 			rxd->csum = 0;
915 			rxd->errors = 0;
916 			rxd->special = 0;
917 			rxd->status = E1000_RXD_STAT_DD;
918 		}
919 		rxd = &sc->esc_rxdesc[(head + i) % size];
920 		rxd->length = len % bufsz;
921 		rxd->csum = 0;
922 		rxd->errors = 0;
923 		rxd->special = 0;
924 		/* XXX signal no checksum for now */
925 		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
926 		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
927 
928 		/* Schedule receive interrupts. */
929 		if (len <= sc->esc_RSRPD) {
930 			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
931 		} else {
932 			/* XXX: RDRT and RADV timers should be here. */
933 			cause |= E1000_ICR_RXT0;
934 		}
935 
936 		head = (head + n) % size;
937 		left -= n;
938 	}
939 
940 done:
941 	pthread_mutex_lock(&sc->esc_mtx);
942 	sc->esc_rx_active = 0;
943 	if (sc->esc_rx_enabled == 0)
944 		pthread_cond_signal(&sc->esc_rx_cond);
945 
946 	sc->esc_RDH = head;
947 	/* Respect E1000_RCTL_RDMTS */
948 	left = (size + sc->esc_RDT - head) % size;
949 	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
950 		cause |= E1000_ICR_RXDMT0;
951 	/* Assert all accumulated interrupts. */
952 	if (cause != 0)
953 		e82545_icr_assert(sc, cause);
954 done1:
955 	DPRINTF("rx_run done: head %x, tail %x\r\n", sc->esc_RDH, sc->esc_RDT);
956 	pthread_mutex_unlock(&sc->esc_mtx);
957 }
958 
959 static uint16_t
960 e82545_carry(uint32_t sum)
961 {
962 
963 	sum = (sum & 0xFFFF) + (sum >> 16);
964 	if (sum > 0xFFFF)
965 		sum -= 0xFFFF;
966 	return (sum);
967 }
968 
969 static uint16_t
970 e82545_buf_checksum(uint8_t *buf, int len)
971 {
972 	int i;
973 	uint32_t sum = 0;
974 
975 	/* Checksum all the pairs of bytes first... */
976 	for (i = 0; i < (len & ~1U); i += 2)
977 		sum += *((u_int16_t *)(buf + i));
978 
979 	/*
980 	 * If there's a single byte left over, checksum it, too.
981 	 * Network byte order is big-endian, so the remaining byte is
982 	 * the high byte.
983 	 */
984 	if (i < len)
985 		sum += htons(buf[i] << 8);
986 
987 	return (e82545_carry(sum));
988 }
989 
990 static uint16_t
991 e82545_iov_checksum(struct iovec *iov, int iovcnt, int off, int len)
992 {
993 	int now, odd;
994 	uint32_t sum = 0, s;
995 
996 	/* Skip completely unneeded vectors. */
997 	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
998 		off -= iov->iov_len;
999 		iov++;
1000 		iovcnt--;
1001 	}
1002 
1003 	/* Calculate checksum of requested range. */
1004 	odd = 0;
1005 	while (len > 0 && iovcnt > 0) {
1006 		now = MIN(len, iov->iov_len - off);
1007 		s = e82545_buf_checksum(iov->iov_base + off, now);
1008 		sum += odd ? (s << 8) : s;
1009 		odd ^= (now & 1);
1010 		len -= now;
1011 		off = 0;
1012 		iov++;
1013 		iovcnt--;
1014 	}
1015 
1016 	return (e82545_carry(sum));
1017 }
1018 
1019 /*
1020  * Return the transmit descriptor type.
1021  */
1022 int
1023 e82545_txdesc_type(uint32_t lower)
1024 {
1025 	int type;
1026 
1027 	type = 0;
1028 
1029 	if (lower & E1000_TXD_CMD_DEXT)
1030 		type = lower & E1000_TXD_MASK;
1031 
1032 	return (type);
1033 }
1034 
1035 static void
1036 e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1037 {
1038 	uint16_t cksum;
1039 	int cklen;
1040 
1041 	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d\r\n",
1042 	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1043 	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1 : INT_MAX;
1044 	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1045 	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1046 }
1047 
1048 static void
1049 e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1050 {
1051 
1052 	if (sc->esc_tapfd == -1)
1053 		return;
1054 
1055 	(void) writev(sc->esc_tapfd, iov, iovcnt);
1056 }
1057 
1058 static void
1059 e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1060     uint16_t dsize, int *tdwb)
1061 {
1062 	union e1000_tx_udesc *dsc;
1063 
1064 	for ( ; head != tail; head = (head + 1) % dsize) {
1065 		dsc = &sc->esc_txdesc[head];
1066 		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1067 			dsc->td.upper.data |= E1000_TXD_STAT_DD;
1068 			*tdwb = 1;
1069 		}
1070 	}
1071 }
1072 
1073 static int
1074 e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1075     uint16_t dsize, uint16_t *rhead, int *tdwb)
1076 {
1077 	uint8_t *hdr, *hdrp;
1078 	struct iovec iovb[I82545_MAX_TXSEGS + 2];
1079 	struct iovec tiov[I82545_MAX_TXSEGS + 2];
1080 	struct e1000_context_desc *cd;
1081 	struct ck_info ckinfo[2];
1082 	struct iovec *iov;
1083 	union  e1000_tx_udesc *dsc;
1084 	int desc, dtype, len, ntype, iovcnt, tlen, hdrlen, vlen, tcp, tso;
1085 	int mss, paylen, seg, tiovcnt, left, now, nleft, nnow, pv, pvoff;
1086 	uint32_t tcpsum, tcpseq;
1087 	uint16_t ipcs, tcpcs, ipid, ohead;
1088 
1089 	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1090 	iovcnt = 0;
1091 	tlen = 0;
1092 	ntype = 0;
1093 	tso = 0;
1094 	ohead = head;
1095 
1096 	/* iovb[0/1] may be used for writable copy of headers. */
1097 	iov = &iovb[2];
1098 
1099 	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1100 		if (head == tail) {
1101 			*rhead = head;
1102 			return (0);
1103 		}
1104 		dsc = &sc->esc_txdesc[head];
1105 		dtype = e82545_txdesc_type(dsc->td.lower.data);
1106 
1107 		if (desc == 0) {
1108 			switch (dtype) {
1109 			case E1000_TXD_TYP_C:
1110 				DPRINTF("tx ctxt desc idx %d: %016jx "
1111 				    "%08x%08x\r\n",
1112 				    head, dsc->td.buffer_addr,
1113 				    dsc->td.upper.data, dsc->td.lower.data);
1114 				/* Save context and return */
1115 				sc->esc_txctx = dsc->cd;
1116 				goto done;
1117 			case E1000_TXD_TYP_L:
1118 				DPRINTF("tx legacy desc idx %d: %08x%08x\r\n",
1119 				    head, dsc->td.upper.data, dsc->td.lower.data);
1120 				/*
1121 				 * legacy cksum start valid in first descriptor
1122 				 */
1123 				ntype = dtype;
1124 				ckinfo[0].ck_start = dsc->td.upper.fields.css;
1125 				break;
1126 			case E1000_TXD_TYP_D:
1127 				DPRINTF("tx data desc idx %d: %08x%08x\r\n",
1128 				    head, dsc->td.upper.data, dsc->td.lower.data);
1129 				ntype = dtype;
1130 				break;
1131 			default:
1132 				break;
1133 			}
1134 		} else {
1135 			/* Descriptor type must be consistent */
1136 			assert(dtype == ntype);
1137 			DPRINTF("tx next desc idx %d: %08x%08x\r\n",
1138 			    head, dsc->td.upper.data, dsc->td.lower.data);
1139 		}
1140 
1141 		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1142 		    dsc->dd.lower.data & 0xFFFFF;
1143 
1144 		if (len > 0) {
1145 			/* Strip checksum supplied by guest. */
1146 			if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1147 			    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0)
1148 				len -= 2;
1149 			tlen += len;
1150 			if (iovcnt < I82545_MAX_TXSEGS) {
1151 				iov[iovcnt].iov_base = paddr_guest2host(
1152 				    sc->esc_ctx, dsc->td.buffer_addr, len);
1153 				iov[iovcnt].iov_len = len;
1154 			}
1155 			iovcnt++;
1156 		}
1157 
1158 		/*
1159 		 * Pull out info that is valid in the final descriptor
1160 		 * and exit descriptor loop.
1161 		 */
1162 		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1163 			if (dtype == E1000_TXD_TYP_L) {
1164 				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1165 					ckinfo[0].ck_valid = 1;
1166 					ckinfo[0].ck_off =
1167 					    dsc->td.lower.flags.cso;
1168 					ckinfo[0].ck_len = 0;
1169 				}
1170 			} else {
1171 				cd = &sc->esc_txctx;
1172 				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1173 					tso = 1;
1174 				if (dsc->dd.upper.fields.popts &
1175 				    E1000_TXD_POPTS_IXSM)
1176 					ckinfo[0].ck_valid = 1;
1177 				if (dsc->dd.upper.fields.popts &
1178 				    E1000_TXD_POPTS_IXSM || tso) {
1179 					ckinfo[0].ck_start =
1180 					    cd->lower_setup.ip_fields.ipcss;
1181 					ckinfo[0].ck_off =
1182 					    cd->lower_setup.ip_fields.ipcso;
1183 					ckinfo[0].ck_len =
1184 					    cd->lower_setup.ip_fields.ipcse;
1185 				}
1186 				if (dsc->dd.upper.fields.popts &
1187 				    E1000_TXD_POPTS_TXSM)
1188 					ckinfo[1].ck_valid = 1;
1189 				if (dsc->dd.upper.fields.popts &
1190 				    E1000_TXD_POPTS_TXSM || tso) {
1191 					ckinfo[1].ck_start =
1192 					    cd->upper_setup.tcp_fields.tucss;
1193 					ckinfo[1].ck_off =
1194 					    cd->upper_setup.tcp_fields.tucso;
1195 					ckinfo[1].ck_len =
1196 					    cd->upper_setup.tcp_fields.tucse;
1197 				}
1198 			}
1199 			break;
1200 		}
1201 	}
1202 
1203 	if (iovcnt > I82545_MAX_TXSEGS) {
1204 		WPRINTF("tx too many descriptors (%d > %d) -- dropped\r\n",
1205 		    iovcnt, I82545_MAX_TXSEGS);
1206 		goto done;
1207 	}
1208 
1209 	hdrlen = vlen = 0;
1210 	/* Estimate writable space for VLAN header insertion. */
1211 	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1212 	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1213 		hdrlen = ETHER_ADDR_LEN*2;
1214 		vlen = ETHER_VLAN_ENCAP_LEN;
1215 	}
1216 	if (!tso) {
1217 		/* Estimate required writable space for checksums. */
1218 		if (ckinfo[0].ck_valid)
1219 			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2);
1220 		if (ckinfo[1].ck_valid)
1221 			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2);
1222 		/* Round up writable space to the first vector. */
1223 		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1224 		    iov[0].iov_len < hdrlen + 100)
1225 			hdrlen = iov[0].iov_len;
1226 	} else {
1227 		/* In case of TSO header length provided by software. */
1228 		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1229 	}
1230 
1231 	/* Allocate, fill and prepend writable header vector. */
1232 	if (hdrlen != 0) {
1233 		hdr = __builtin_alloca(hdrlen + vlen);
1234 		hdr += vlen;
1235 		for (left = hdrlen, hdrp = hdr; left > 0;
1236 		    left -= now, hdrp += now) {
1237 			now = MIN(left, iov->iov_len);
1238 			memcpy(hdrp, iov->iov_base, now);
1239 			iov->iov_base += now;
1240 			iov->iov_len -= now;
1241 			if (iov->iov_len == 0) {
1242 				iov++;
1243 				iovcnt--;
1244 			}
1245 		}
1246 		iov--;
1247 		iovcnt++;
1248 		iov->iov_base = hdr;
1249 		iov->iov_len = hdrlen;
1250 	}
1251 
1252 	/* Insert VLAN tag. */
1253 	if (vlen != 0) {
1254 		hdr -= ETHER_VLAN_ENCAP_LEN;
1255 		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1256 		hdrlen += ETHER_VLAN_ENCAP_LEN;
1257 		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1258 		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1259 		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1260 		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1261 		iov->iov_base = hdr;
1262 		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1263 		/* Correct checksum offsets after VLAN tag insertion. */
1264 		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1265 		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1266 		if (ckinfo[0].ck_len != 0)
1267 			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1268 		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1269 		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1270 		if (ckinfo[1].ck_len != 0)
1271 			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1272 	}
1273 
1274 	/* Simple non-TSO case. */
1275 	if (!tso) {
1276 		/* Calculate checksums and transmit. */
1277 		if (ckinfo[0].ck_valid)
1278 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1279 		if (ckinfo[1].ck_valid)
1280 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1281 		e82545_transmit_backend(sc, iov, iovcnt);
1282 		goto done;
1283 	}
1284 
1285 	/* Doing TSO. */
1286 	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1287 	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1288 	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1289 	DPRINTF("tx %s segmentation offload %d+%d/%d bytes %d iovs\r\n",
1290 	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1291 	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1292 	tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1293 	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1294 	tcpcs = 0;
1295 	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
1296 		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1297 	pv = 1;
1298 	pvoff = 0;
1299 	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1300 		now = MIN(left, mss);
1301 
1302 		/* Construct IOVs for the segment. */
1303 		/* Include whole original header. */
1304 		tiov[0].iov_base = hdr;
1305 		tiov[0].iov_len = hdrlen;
1306 		tiovcnt = 1;
1307 		/* Include respective part of payload IOV. */
1308 		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1309 			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1310 			tiov[tiovcnt].iov_base = iov[pv].iov_base + pvoff;
1311 			tiov[tiovcnt++].iov_len = nnow;
1312 			if (pvoff + nnow == iov[pv].iov_len) {
1313 				pv++;
1314 				pvoff = 0;
1315 			} else
1316 				pvoff += nnow;
1317 		}
1318 		DPRINTF("tx segment %d %d+%d bytes %d iovs\r\n",
1319 		    seg, hdrlen, now, tiovcnt);
1320 
1321 		/* Update IP header. */
1322 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1323 			/* IPv4 -- set length and ID */
1324 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1325 			    htons(hdrlen - ckinfo[0].ck_start + now);
1326 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1327 			    htons(ipid + seg);
1328 		} else {
1329 			/* IPv6 -- set length */
1330 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1331 			    htons(hdrlen - ckinfo[0].ck_start - 40 +
1332 				  now);
1333 		}
1334 
1335 		/* Update pseudo-header checksum. */
1336 		tcpsum = tcpcs;
1337 		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1338 
1339 		/* Update TCP/UDP headers. */
1340 		if (tcp) {
1341 			/* Update sequence number and FIN/PUSH flags. */
1342 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1343 			    htonl(tcpseq + paylen - left);
1344 			if (now < left) {
1345 				hdr[ckinfo[1].ck_start + 13] &=
1346 				    ~(TH_FIN | TH_PUSH);
1347 			}
1348 		} else {
1349 			/* Update payload length. */
1350 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1351 			    hdrlen - ckinfo[1].ck_start + now;
1352 		}
1353 
1354 		/* Calculate checksums and transmit. */
1355 		if (ckinfo[0].ck_valid) {
1356 			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1357 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1358 		}
1359 		if (ckinfo[1].ck_valid) {
1360 			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
1361 			    e82545_carry(tcpsum);
1362 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1363 		}
1364 		e82545_transmit_backend(sc, tiov, tiovcnt);
1365 	}
1366 
1367 done:
1368 	head = (head + 1) % dsize;
1369 	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1370 
1371 	*rhead = head;
1372 	return (desc + 1);
1373 }
1374 
1375 static void
1376 e82545_tx_run(struct e82545_softc *sc)
1377 {
1378 	uint32_t cause;
1379 	uint16_t head, rhead, tail, size;
1380 	int lim, tdwb, sent;
1381 
1382 	head = sc->esc_TDH;
1383 	tail = sc->esc_TDT;
1384 	size = sc->esc_TDLEN / 16;
1385 	DPRINTF("tx_run: head %x, rhead %x, tail %x\r\n",
1386 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1387 
1388 	pthread_mutex_unlock(&sc->esc_mtx);
1389 	rhead = head;
1390 	tdwb = 0;
1391 	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1392 		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1393 		if (sent == 0)
1394 			break;
1395 		head = rhead;
1396 	}
1397 	pthread_mutex_lock(&sc->esc_mtx);
1398 
1399 	sc->esc_TDH = head;
1400 	sc->esc_TDHr = rhead;
1401 	cause = 0;
1402 	if (tdwb)
1403 		cause |= E1000_ICR_TXDW;
1404 	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1405 		cause |= E1000_ICR_TXQE;
1406 	if (cause)
1407 		e82545_icr_assert(sc, cause);
1408 
1409 	DPRINTF("tx_run done: head %x, rhead %x, tail %x\r\n",
1410 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1411 }
1412 
1413 static _Noreturn void *
1414 e82545_tx_thread(void *param)
1415 {
1416 	struct e82545_softc *sc = param;
1417 
1418 	pthread_mutex_lock(&sc->esc_mtx);
1419 	for (;;) {
1420 		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1421 			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1422 				break;
1423 			sc->esc_tx_active = 0;
1424 			if (sc->esc_tx_enabled == 0)
1425 				pthread_cond_signal(&sc->esc_tx_cond);
1426 			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1427 		}
1428 		sc->esc_tx_active = 1;
1429 
1430 		/* Process some tx descriptors.  Lock dropped inside. */
1431 		e82545_tx_run(sc);
1432 	}
1433 }
1434 
1435 static void
1436 e82545_tx_start(struct e82545_softc *sc)
1437 {
1438 
1439 	if (sc->esc_tx_active == 0)
1440 		pthread_cond_signal(&sc->esc_tx_cond);
1441 }
1442 
1443 static void
1444 e82545_tx_enable(struct e82545_softc *sc)
1445 {
1446 
1447 	sc->esc_tx_enabled = 1;
1448 }
1449 
1450 static void
1451 e82545_tx_disable(struct e82545_softc *sc)
1452 {
1453 
1454 	sc->esc_tx_enabled = 0;
1455 	while (sc->esc_tx_active)
1456 		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1457 }
1458 
1459 static void
1460 e82545_rx_enable(struct e82545_softc *sc)
1461 {
1462 
1463 	sc->esc_rx_enabled = 1;
1464 }
1465 
1466 static void
1467 e82545_rx_disable(struct e82545_softc *sc)
1468 {
1469 
1470 	sc->esc_rx_enabled = 0;
1471 	while (sc->esc_rx_active)
1472 		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1473 }
1474 
1475 static void
1476 e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1477 {
1478 	struct eth_uni *eu;
1479 	int idx;
1480 
1481 	idx = reg >> 1;
1482 	assert(idx < 15);
1483 
1484 	eu = &sc->esc_uni[idx];
1485 
1486 	if (reg & 0x1) {
1487 		/* RAH */
1488 		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1489 		eu->eu_addrsel = (wval >> 16) & 0x3;
1490 		eu->eu_eth.octet[5] = wval >> 8;
1491 		eu->eu_eth.octet[4] = wval;
1492 	} else {
1493 		/* RAL */
1494 		eu->eu_eth.octet[3] = wval >> 24;
1495 		eu->eu_eth.octet[2] = wval >> 16;
1496 		eu->eu_eth.octet[1] = wval >> 8;
1497 		eu->eu_eth.octet[0] = wval;
1498 	}
1499 }
1500 
1501 static uint32_t
1502 e82545_read_ra(struct e82545_softc *sc, int reg)
1503 {
1504 	struct eth_uni *eu;
1505 	uint32_t retval;
1506 	int idx;
1507 
1508 	idx = reg >> 1;
1509 	assert(idx < 15);
1510 
1511 	eu = &sc->esc_uni[idx];
1512 
1513 	if (reg & 0x1) {
1514 		/* RAH */
1515 		retval = (eu->eu_valid << 31) |
1516 			 (eu->eu_addrsel << 16) |
1517 			 (eu->eu_eth.octet[5] << 8) |
1518 			 eu->eu_eth.octet[4];
1519 	} else {
1520 		/* RAL */
1521 		retval = (eu->eu_eth.octet[3] << 24) |
1522 			 (eu->eu_eth.octet[2] << 16) |
1523 			 (eu->eu_eth.octet[1] << 8) |
1524 			 eu->eu_eth.octet[0];
1525 	}
1526 
1527 	return (retval);
1528 }
1529 
1530 static void
1531 e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1532 {
1533 	int ridx;
1534 
1535 	if (offset & 0x3) {
1536 		DPRINTF("Unaligned register write offset:0x%x value:0x%x\r\n", offset, value);
1537 		return;
1538 	}
1539 	DPRINTF("Register write: 0x%x value: 0x%x\r\n", offset, value);
1540 
1541 	switch (offset) {
1542 	case E1000_CTRL:
1543 	case E1000_CTRL_DUP:
1544 		e82545_devctl(sc, value);
1545 		break;
1546 	case E1000_FCAL:
1547 		sc->esc_FCAL = value;
1548 		break;
1549 	case E1000_FCAH:
1550 		sc->esc_FCAH = value & ~0xFFFF0000;
1551 		break;
1552 	case E1000_FCT:
1553 		sc->esc_FCT = value & ~0xFFFF0000;
1554 		break;
1555 	case E1000_VET:
1556 		sc->esc_VET = value & ~0xFFFF0000;
1557 		break;
1558 	case E1000_FCTTV:
1559 		sc->esc_FCTTV = value & ~0xFFFF0000;
1560 		break;
1561 	case E1000_LEDCTL:
1562 		sc->esc_LEDCTL = value & ~0x30303000;
1563 		break;
1564 	case E1000_PBA:
1565 		sc->esc_PBA = value & 0x0000FF80;
1566 		break;
1567 	case E1000_ICR:
1568 	case E1000_ITR:
1569 	case E1000_ICS:
1570 	case E1000_IMS:
1571 	case E1000_IMC:
1572 		e82545_intr_write(sc, offset, value);
1573 		break;
1574 	case E1000_RCTL:
1575 		e82545_rx_ctl(sc, value);
1576 		break;
1577 	case E1000_FCRTL:
1578 		sc->esc_FCRTL = value & ~0xFFFF0007;
1579 		break;
1580 	case E1000_FCRTH:
1581 		sc->esc_FCRTH = value & ~0xFFFF0007;
1582 		break;
1583 	case E1000_RDBAL(0):
1584 		sc->esc_RDBAL = value & ~0xF;
1585 		if (sc->esc_rx_enabled) {
1586 			/* Apparently legal: update cached address */
1587 			e82545_rx_update_rdba(sc);
1588 		}
1589 		break;
1590 	case E1000_RDBAH(0):
1591 		assert(!sc->esc_rx_enabled);
1592 		sc->esc_RDBAH = value;
1593 		break;
1594 	case E1000_RDLEN(0):
1595 		assert(!sc->esc_rx_enabled);
1596 		sc->esc_RDLEN = value & ~0xFFF0007F;
1597 		break;
1598 	case E1000_RDH(0):
1599 		/* XXX should only ever be zero ? Range check ? */
1600 		sc->esc_RDH = value;
1601 		break;
1602 	case E1000_RDT(0):
1603 		/* XXX if this opens up the rx ring, do something ? */
1604 		sc->esc_RDT = value;
1605 		break;
1606 	case E1000_RDTR:
1607 		/* ignore FPD bit 31 */
1608 		sc->esc_RDTR = value & ~0xFFFF0000;
1609 		break;
1610 	case E1000_RXDCTL(0):
1611 		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1612 		break;
1613 	case E1000_RADV:
1614 		sc->esc_RADV = value & ~0xFFFF0000;
1615 		break;
1616 	case E1000_RSRPD:
1617 		sc->esc_RSRPD = value & ~0xFFFFF000;
1618 		break;
1619 	case E1000_RXCSUM:
1620 		sc->esc_RXCSUM = value & ~0xFFFFF800;
1621 		break;
1622 	case E1000_TXCW:
1623 		sc->esc_TXCW = value & ~0x3FFF0000;
1624 		break;
1625 	case E1000_TCTL:
1626 		e82545_tx_ctl(sc, value);
1627 		break;
1628 	case E1000_TIPG:
1629 		sc->esc_TIPG = value;
1630 		break;
1631 	case E1000_AIT:
1632 		sc->esc_AIT = value;
1633 		break;
1634 	case E1000_TDBAL(0):
1635 		sc->esc_TDBAL = value & ~0xF;
1636 		if (sc->esc_tx_enabled) {
1637 			/* Apparently legal */
1638 			e82545_tx_update_tdba(sc);
1639 		}
1640 		break;
1641 	case E1000_TDBAH(0):
1642 		//assert(!sc->esc_tx_enabled);
1643 		sc->esc_TDBAH = value;
1644 		break;
1645 	case E1000_TDLEN(0):
1646 		//assert(!sc->esc_tx_enabled);
1647 		sc->esc_TDLEN = value & ~0xFFF0007F;
1648 		break;
1649 	case E1000_TDH(0):
1650 		//assert(!sc->esc_tx_enabled);
1651 		/* XXX should only ever be zero ? Range check ? */
1652 		sc->esc_TDHr = sc->esc_TDH = value;
1653 		break;
1654 	case E1000_TDT(0):
1655 		/* XXX range check ? */
1656 		sc->esc_TDT = value;
1657 		if (sc->esc_tx_enabled)
1658 			e82545_tx_start(sc);
1659 		break;
1660 	case E1000_TIDV:
1661 		sc->esc_TIDV = value & ~0xFFFF0000;
1662 		break;
1663 	case E1000_TXDCTL(0):
1664 		//assert(!sc->esc_tx_enabled);
1665 		sc->esc_TXDCTL = value & ~0xC0C0C0;
1666 		break;
1667 	case E1000_TADV:
1668 		sc->esc_TADV = value & ~0xFFFF0000;
1669 		break;
1670 	case E1000_RAL(0) ... E1000_RAH(15):
1671 		/* convert to u32 offset */
1672 		ridx = (offset - E1000_RAL(0)) >> 2;
1673 		e82545_write_ra(sc, ridx, value);
1674 		break;
1675 	case E1000_MTA ... (E1000_MTA + (127*4)):
1676 		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1677 		break;
1678 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1679 		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1680 		break;
1681 	case E1000_EECD:
1682 	{
1683 		//DPRINTF("EECD write 0x%x -> 0x%x\r\n", sc->eeprom_control, value);
1684 		/* edge triggered low->high */
1685 		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1686 			0 : (value & E1000_EECD_SK));
1687 		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1688 					E1000_EECD_DI|E1000_EECD_REQ);
1689 		sc->eeprom_control &= ~eecd_mask;
1690 		sc->eeprom_control |= (value & eecd_mask);
1691 		/* grant/revoke immediately */
1692 		if (value & E1000_EECD_REQ) {
1693 			sc->eeprom_control |= E1000_EECD_GNT;
1694 		} else {
1695                         sc->eeprom_control &= ~E1000_EECD_GNT;
1696 		}
1697 		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1698 			e82545_eecd_strobe(sc);
1699 		}
1700 		return;
1701 	}
1702 	case E1000_MDIC:
1703 	{
1704 		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1705 						E1000_MDIC_REG_SHIFT);
1706 		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1707 						E1000_MDIC_PHY_SHIFT);
1708 		sc->mdi_control =
1709 			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1710 		if ((value & E1000_MDIC_READY) != 0) {
1711 			DPRINTF("Incorrect MDIC ready bit: 0x%x\r\n", value);
1712 			return;
1713 		}
1714 		switch (value & E82545_MDIC_OP_MASK) {
1715 		case E1000_MDIC_OP_READ:
1716 			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1717 			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1718 			break;
1719 		case E1000_MDIC_OP_WRITE:
1720 			e82545_write_mdi(sc, reg_addr, phy_addr,
1721 				value & E82545_MDIC_DATA_MASK);
1722 			break;
1723 		default:
1724 			DPRINTF("Unknown MDIC op: 0x%x\r\n", value);
1725 			return;
1726 		}
1727 		/* TODO: barrier? */
1728 		sc->mdi_control |= E1000_MDIC_READY;
1729 		if (value & E82545_MDIC_IE) {
1730 			// TODO: generate interrupt
1731 		}
1732 		return;
1733 	}
1734 	case E1000_MANC:
1735 	case E1000_STATUS:
1736 		return;
1737 	default:
1738 		DPRINTF("Unknown write register: 0x%x value:%x\r\n", offset, value);
1739 		return;
1740 	}
1741 }
1742 
1743 static uint32_t
1744 e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1745 {
1746 	uint32_t retval;
1747 	int ridx;
1748 
1749 	if (offset & 0x3) {
1750 		DPRINTF("Unaligned register read offset:0x%x\r\n", offset);
1751 		return 0;
1752 	}
1753 
1754 	DPRINTF("Register read: 0x%x\r\n", offset);
1755 
1756 	switch (offset) {
1757 	case E1000_CTRL:
1758 		retval = sc->esc_CTRL;
1759 		break;
1760 	case E1000_STATUS:
1761 		retval = E1000_STATUS_FD | E1000_STATUS_LU |
1762 		    E1000_STATUS_SPEED_1000;
1763 		break;
1764 	case E1000_FCAL:
1765 		retval = sc->esc_FCAL;
1766 		break;
1767 	case E1000_FCAH:
1768 		retval = sc->esc_FCAH;
1769 		break;
1770 	case E1000_FCT:
1771 		retval = sc->esc_FCT;
1772 		break;
1773 	case E1000_VET:
1774 		retval = sc->esc_VET;
1775 		break;
1776 	case E1000_FCTTV:
1777 		retval = sc->esc_FCTTV;
1778 		break;
1779 	case E1000_LEDCTL:
1780 		retval = sc->esc_LEDCTL;
1781 		break;
1782 	case E1000_PBA:
1783 		retval = sc->esc_PBA;
1784 		break;
1785 	case E1000_ICR:
1786 	case E1000_ITR:
1787 	case E1000_ICS:
1788 	case E1000_IMS:
1789 	case E1000_IMC:
1790 		retval = e82545_intr_read(sc, offset);
1791 		break;
1792 	case E1000_RCTL:
1793 		retval = sc->esc_RCTL;
1794 		break;
1795 	case E1000_FCRTL:
1796 		retval = sc->esc_FCRTL;
1797 		break;
1798 	case E1000_FCRTH:
1799 		retval = sc->esc_FCRTH;
1800 		break;
1801 	case E1000_RDBAL(0):
1802 		retval = sc->esc_RDBAL;
1803 		break;
1804 	case E1000_RDBAH(0):
1805 		retval = sc->esc_RDBAH;
1806 		break;
1807 	case E1000_RDLEN(0):
1808 		retval = sc->esc_RDLEN;
1809 		break;
1810 	case E1000_RDH(0):
1811 		retval = sc->esc_RDH;
1812 		break;
1813 	case E1000_RDT(0):
1814 		retval = sc->esc_RDT;
1815 		break;
1816 	case E1000_RDTR:
1817 		retval = sc->esc_RDTR;
1818 		break;
1819 	case E1000_RXDCTL(0):
1820 		retval = sc->esc_RXDCTL;
1821 		break;
1822 	case E1000_RADV:
1823 		retval = sc->esc_RADV;
1824 		break;
1825 	case E1000_RSRPD:
1826 		retval = sc->esc_RSRPD;
1827 		break;
1828 	case E1000_RXCSUM:
1829 		retval = sc->esc_RXCSUM;
1830 		break;
1831 	case E1000_TXCW:
1832 		retval = sc->esc_TXCW;
1833 		break;
1834 	case E1000_TCTL:
1835 		retval = sc->esc_TCTL;
1836 		break;
1837 	case E1000_TIPG:
1838 		retval = sc->esc_TIPG;
1839 		break;
1840 	case E1000_AIT:
1841 		retval = sc->esc_AIT;
1842 		break;
1843 	case E1000_TDBAL(0):
1844 		retval = sc->esc_TDBAL;
1845 		break;
1846 	case E1000_TDBAH(0):
1847 		retval = sc->esc_TDBAH;
1848 		break;
1849 	case E1000_TDLEN(0):
1850 		retval = sc->esc_TDLEN;
1851 		break;
1852 	case E1000_TDH(0):
1853 		retval = sc->esc_TDH;
1854 		break;
1855 	case E1000_TDT(0):
1856 		retval = sc->esc_TDT;
1857 		break;
1858 	case E1000_TIDV:
1859 		retval = sc->esc_TIDV;
1860 		break;
1861 	case E1000_TXDCTL(0):
1862 		retval = sc->esc_TXDCTL;
1863 		break;
1864 	case E1000_TADV:
1865 		retval = sc->esc_TADV;
1866 		break;
1867 	case E1000_RAL(0) ... E1000_RAH(15):
1868 		/* convert to u32 offset */
1869 		ridx = (offset - E1000_RAL(0)) >> 2;
1870 		retval = e82545_read_ra(sc, ridx);
1871 		break;
1872 	case E1000_MTA ... (E1000_MTA + (127*4)):
1873 		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
1874 		break;
1875 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1876 		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
1877 		break;
1878 	case E1000_EECD:
1879 		//DPRINTF("EECD read %x\r\n", sc->eeprom_control);
1880 		retval = sc->eeprom_control;
1881 		break;
1882 	case E1000_MDIC:
1883 		retval = sc->mdi_control;
1884 		break;
1885 	case E1000_MANC:
1886 		retval = 0;
1887 		break;
1888 	/* stats that we emulate. */
1889 	case E1000_MPC:
1890 		retval = sc->missed_pkt_count;
1891 		break;
1892 	case E1000_PRC64:
1893 		retval = sc->pkt_rx_by_size[0];
1894 		break;
1895 	case E1000_PRC127:
1896 		retval = sc->pkt_rx_by_size[1];
1897 		break;
1898 	case E1000_PRC255:
1899 		retval = sc->pkt_rx_by_size[2];
1900 		break;
1901 	case E1000_PRC511:
1902 		retval = sc->pkt_rx_by_size[3];
1903 		break;
1904 	case E1000_PRC1023:
1905 		retval = sc->pkt_rx_by_size[4];
1906 		break;
1907 	case E1000_PRC1522:
1908 		retval = sc->pkt_rx_by_size[5];
1909 		break;
1910 	case E1000_GPRC:
1911 		retval = sc->good_pkt_rx_count;
1912 		break;
1913 	case E1000_BPRC:
1914 		retval = sc->bcast_pkt_rx_count;
1915 		break;
1916 	case E1000_MPRC:
1917 		retval = sc->mcast_pkt_rx_count;
1918 		break;
1919 	case E1000_GPTC:
1920 	case E1000_TPT:
1921 		retval = sc->good_pkt_tx_count;
1922 		break;
1923 	case E1000_GORCL:
1924 		retval = (uint32_t)sc->good_octets_rx;
1925 		break;
1926 	case E1000_GORCH:
1927 		retval = (uint32_t)(sc->good_octets_rx >> 32);
1928 		break;
1929 	case E1000_TOTL:
1930 	case E1000_GOTCL:
1931 		retval = (uint32_t)sc->good_octets_tx;
1932 		break;
1933 	case E1000_TOTH:
1934 	case E1000_GOTCH:
1935 		retval = (uint32_t)(sc->good_octets_tx >> 32);
1936 		break;
1937 	case E1000_ROC:
1938 		retval = sc->oversize_rx_count;
1939 		break;
1940 	case E1000_TORL:
1941 		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
1942 		break;
1943 	case E1000_TORH:
1944 		retval = (uint32_t)((sc->good_octets_rx +
1945 		    sc->missed_octets) >> 32);
1946 		break;
1947 	case E1000_TPR:
1948 		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
1949 		    sc->oversize_rx_count;
1950 		break;
1951 	case E1000_PTC64:
1952 		retval = sc->pkt_tx_by_size[0];
1953 		break;
1954 	case E1000_PTC127:
1955 		retval = sc->pkt_tx_by_size[1];
1956 		break;
1957 	case E1000_PTC255:
1958 		retval = sc->pkt_tx_by_size[2];
1959 		break;
1960 	case E1000_PTC511:
1961 		retval = sc->pkt_tx_by_size[3];
1962 		break;
1963 	case E1000_PTC1023:
1964 		retval = sc->pkt_tx_by_size[4];
1965 		break;
1966 	case E1000_PTC1522:
1967 		retval = sc->pkt_tx_by_size[5];
1968 		break;
1969 	case E1000_MPTC:
1970 		retval = sc->mcast_pkt_tx_count;
1971 		break;
1972 	case E1000_BPTC:
1973 		retval = sc->bcast_pkt_tx_count;
1974 		break;
1975 	case E1000_TSCTC:
1976 		retval = sc->tso_tx_count;
1977 		break;
1978 	/* stats that are always 0. */
1979 	case E1000_CRCERRS:
1980 	case E1000_ALGNERRC:
1981 	case E1000_SYMERRS:
1982 	case E1000_RXERRC:
1983 	case E1000_SCC:
1984 	case E1000_ECOL:
1985 	case E1000_MCC:
1986 	case E1000_LATECOL:
1987 	case E1000_COLC:
1988 	case E1000_DC:
1989 	case E1000_TNCRS:
1990 	case E1000_SEC:
1991 	case E1000_CEXTERR:
1992 	case E1000_RLEC:
1993 	case E1000_XONRXC:
1994 	case E1000_XONTXC:
1995 	case E1000_XOFFRXC:
1996 	case E1000_XOFFTXC:
1997 	case E1000_FCRUC:
1998 	case E1000_RNBC:
1999 	case E1000_RUC:
2000 	case E1000_RFC:
2001 	case E1000_RJC:
2002 	case E1000_MGTPRC:
2003 	case E1000_MGTPDC:
2004 	case E1000_MGTPTC:
2005 	case E1000_TSCTFC:
2006 		retval = 0;
2007 		break;
2008 	default:
2009 		DPRINTF("Unknown read register: 0x%x\r\n", offset);
2010 		retval = 0;
2011 		break;
2012 	}
2013 
2014 	return (retval);
2015 }
2016 
2017 static void
2018 e82545_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2019 	     uint64_t offset, int size, uint64_t value)
2020 {
2021 	struct e82545_softc *sc;
2022 
2023 	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d\r\n", baridx, offset, value, size);
2024 
2025 	sc = pi->pi_arg;
2026 
2027 	pthread_mutex_lock(&sc->esc_mtx);
2028 
2029 	switch (baridx) {
2030 	case E82545_BAR_IO:
2031 		switch (offset) {
2032 		case E82545_IOADDR:
2033 			if (size != 4) {
2034 				DPRINTF("Wrong io addr write sz:%d value:0x%lx\r\n", size, value);
2035 			} else
2036 				sc->io_addr = (uint32_t)value;
2037 			break;
2038 		case E82545_IODATA:
2039 			if (size != 4) {
2040 				DPRINTF("Wrong io data write size:%d value:0x%lx\r\n", size, value);
2041 			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2042 				DPRINTF("Non-register io write addr:0x%x value:0x%lx\r\n", sc->io_addr, value);
2043 			} else
2044 				e82545_write_register(sc, sc->io_addr,
2045 						      (uint32_t)value);
2046 			break;
2047 		default:
2048 			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d\r\n", offset, value, size);
2049 			break;
2050 		}
2051 		break;
2052 	case E82545_BAR_REGISTER:
2053 		if (size != 4) {
2054 			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx\r\n", size, offset, value);
2055 		} else
2056 			e82545_write_register(sc, (uint32_t)offset,
2057 					      (uint32_t)value);
2058 		break;
2059 	default:
2060 		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d\r\n",
2061 			baridx, offset, value, size);
2062 	}
2063 
2064 	pthread_mutex_unlock(&sc->esc_mtx);
2065 }
2066 
2067 static uint64_t
2068 e82545_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2069 	    uint64_t offset, int size)
2070 {
2071 	struct e82545_softc *sc;
2072 	uint64_t retval;
2073 
2074 	//DPRINTF("Read  bar:%d offset:0x%lx size:%d\r\n", baridx, offset, size);
2075 	sc = pi->pi_arg;
2076 	retval = 0;
2077 
2078 	pthread_mutex_lock(&sc->esc_mtx);
2079 
2080 	switch (baridx) {
2081 	case E82545_BAR_IO:
2082 		switch (offset) {
2083 		case E82545_IOADDR:
2084 			if (size != 4) {
2085 				DPRINTF("Wrong io addr read sz:%d\r\n", size);
2086 			} else
2087 				retval = sc->io_addr;
2088 			break;
2089 		case E82545_IODATA:
2090 			if (size != 4) {
2091 				DPRINTF("Wrong io data read sz:%d\r\n", size);
2092 			}
2093 			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2094 				DPRINTF("Non-register io read addr:0x%x\r\n",
2095 					sc->io_addr);
2096 			} else
2097 				retval = e82545_read_register(sc, sc->io_addr);
2098 			break;
2099 		default:
2100 			DPRINTF("Unknown io bar read offset:0x%lx size:%d\r\n",
2101 				offset, size);
2102 			break;
2103 		}
2104 		break;
2105 	case E82545_BAR_REGISTER:
2106 		if (size != 4) {
2107 			DPRINTF("Wrong register read size:%d offset:0x%lx\r\n",
2108 				size, offset);
2109 		} else
2110 			retval = e82545_read_register(sc, (uint32_t)offset);
2111 		break;
2112 	default:
2113 		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d\r\n",
2114 			baridx, offset, size);
2115 		break;
2116 	}
2117 
2118 	pthread_mutex_unlock(&sc->esc_mtx);
2119 
2120 	return (retval);
2121 }
2122 
2123 static void
2124 e82545_reset(struct e82545_softc *sc, int drvr)
2125 {
2126 	int i;
2127 
2128 	e82545_rx_disable(sc);
2129 	e82545_tx_disable(sc);
2130 
2131 	/* clear outstanding interrupts */
2132 	if (sc->esc_irq_asserted)
2133 		pci_lintr_deassert(sc->esc_pi);
2134 
2135 	/* misc */
2136 	if (!drvr) {
2137 		sc->esc_FCAL = 0;
2138 		sc->esc_FCAH = 0;
2139 		sc->esc_FCT = 0;
2140 		sc->esc_VET = 0;
2141 		sc->esc_FCTTV = 0;
2142 	}
2143 	sc->esc_LEDCTL = 0x07061302;
2144 	sc->esc_PBA = 0x00100030;
2145 
2146 	/* start nvm in opcode mode. */
2147 	sc->nvm_opaddr = 0;
2148 	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2149 	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2150 	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2151 	e82545_init_eeprom(sc);
2152 
2153 	/* interrupt */
2154 	sc->esc_ICR = 0;
2155 	sc->esc_ITR = 250;
2156 	sc->esc_ICS = 0;
2157 	sc->esc_IMS = 0;
2158 	sc->esc_IMC = 0;
2159 
2160 	/* L2 filters */
2161 	if (!drvr) {
2162 		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2163 		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2164 		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2165 
2166 		/* XXX not necessary on 82545 ?? */
2167 		sc->esc_uni[0].eu_valid = 1;
2168 		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2169 		    ETHER_ADDR_LEN);
2170 	} else {
2171 		/* Clear RAH valid bits */
2172 		for (i = 0; i < 16; i++)
2173 			sc->esc_uni[i].eu_valid = 0;
2174 	}
2175 
2176 	/* receive */
2177 	if (!drvr) {
2178 		sc->esc_RDBAL = 0;
2179 		sc->esc_RDBAH = 0;
2180 	}
2181 	sc->esc_RCTL = 0;
2182 	sc->esc_FCRTL = 0;
2183 	sc->esc_FCRTH = 0;
2184 	sc->esc_RDLEN = 0;
2185 	sc->esc_RDH = 0;
2186 	sc->esc_RDT = 0;
2187 	sc->esc_RDTR = 0;
2188 	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2189 	sc->esc_RADV = 0;
2190 	sc->esc_RXCSUM = 0;
2191 
2192 	/* transmit */
2193 	if (!drvr) {
2194 		sc->esc_TDBAL = 0;
2195 		sc->esc_TDBAH = 0;
2196 		sc->esc_TIPG = 0;
2197 		sc->esc_AIT = 0;
2198 		sc->esc_TIDV = 0;
2199 		sc->esc_TADV = 0;
2200 	}
2201 	sc->esc_tdba = 0;
2202 	sc->esc_txdesc = NULL;
2203 	sc->esc_TXCW = 0;
2204 	sc->esc_TCTL = 0;
2205 	sc->esc_TDLEN = 0;
2206 	sc->esc_TDT = 0;
2207 	sc->esc_TDHr = sc->esc_TDH = 0;
2208 	sc->esc_TXDCTL = 0;
2209 }
2210 
2211 static void
2212 e82545_open_tap(struct e82545_softc *sc, char *opts)
2213 {
2214 	char tbuf[80];
2215 #ifndef WITHOUT_CAPSICUM
2216 	cap_rights_t rights;
2217 #endif
2218 
2219 	if (opts == NULL) {
2220 		sc->esc_tapfd = -1;
2221 		return;
2222 	}
2223 
2224 	strcpy(tbuf, "/dev/");
2225 	strlcat(tbuf, opts, sizeof(tbuf));
2226 
2227 	sc->esc_tapfd = open(tbuf, O_RDWR);
2228 	if (sc->esc_tapfd == -1) {
2229 		DPRINTF("unable to open tap device %s\n", opts);
2230 		exit(4);
2231 	}
2232 
2233 	/*
2234 	 * Set non-blocking and register for read
2235 	 * notifications with the event loop
2236 	 */
2237 	int opt = 1;
2238 	if (ioctl(sc->esc_tapfd, FIONBIO, &opt) < 0) {
2239 		WPRINTF("tap device O_NONBLOCK failed: %d\n", errno);
2240 		close(sc->esc_tapfd);
2241 		sc->esc_tapfd = -1;
2242 	}
2243 
2244 #ifndef WITHOUT_CAPSICUM
2245 	cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
2246 	if (caph_rights_limit(sc->esc_tapfd, &rights) == -1)
2247 		errx(EX_OSERR, "Unable to apply rights for sandbox");
2248 #endif
2249 
2250 	sc->esc_mevp = mevent_add(sc->esc_tapfd,
2251 				  EVF_READ,
2252 				  e82545_tap_callback,
2253 				  sc);
2254 	if (sc->esc_mevp == NULL) {
2255 		DPRINTF("Could not register mevent %d\n", EVF_READ);
2256 		close(sc->esc_tapfd);
2257 		sc->esc_tapfd = -1;
2258 	}
2259 }
2260 
2261 static int
2262 e82545_parsemac(char *mac_str, uint8_t *mac_addr)
2263 {
2264 	struct ether_addr *ea;
2265 	char *tmpstr;
2266 	char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2267 
2268 	tmpstr = strsep(&mac_str,"=");
2269 	if ((mac_str != NULL) && (!strcmp(tmpstr,"mac"))) {
2270 		ea = ether_aton(mac_str);
2271 		if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) ||
2272 		    memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) {
2273 			fprintf(stderr, "Invalid MAC %s\n", mac_str);
2274 			return (1);
2275 		} else
2276 			memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN);
2277 	}
2278 	return (0);
2279 }
2280 
2281 static int
2282 e82545_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2283 {
2284 	DPRINTF("Loading with options: %s\r\n", opts);
2285 
2286 	MD5_CTX mdctx;
2287 	unsigned char digest[16];
2288 	char nstr[80];
2289 	struct e82545_softc *sc;
2290 	char *devname;
2291 	char *vtopts;
2292 	int mac_provided;
2293 
2294 	/* Setup our softc */
2295 	sc = calloc(1, sizeof(*sc));
2296 
2297 	pi->pi_arg = sc;
2298 	sc->esc_pi = pi;
2299 	sc->esc_ctx = ctx;
2300 
2301 	pthread_mutex_init(&sc->esc_mtx, NULL);
2302 	pthread_cond_init(&sc->esc_rx_cond, NULL);
2303 	pthread_cond_init(&sc->esc_tx_cond, NULL);
2304 	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2305 	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2306 	    pi->pi_func);
2307         pthread_set_name_np(sc->esc_tx_tid, nstr);
2308 
2309 	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2310 	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2311 	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
2312 	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2313 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2314 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2315 
2316 	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2317 	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
2318 
2319 	/* TODO: this card also supports msi, but the freebsd driver for it
2320 	 * does not, so I have not implemented it. */
2321 	pci_lintr_request(pi);
2322 
2323 	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2324 		E82545_BAR_REGISTER_LEN);
2325 	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2326 		E82545_BAR_FLASH_LEN);
2327 	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2328 		E82545_BAR_IO_LEN);
2329 
2330 	/*
2331 	 * Attempt to open the tap device and read the MAC address
2332 	 * if specified.  Copied from virtio-net, slightly modified.
2333 	 */
2334 	mac_provided = 0;
2335 	sc->esc_tapfd = -1;
2336 	if (opts != NULL) {
2337 		int err;
2338 
2339 		devname = vtopts = strdup(opts);
2340 		(void) strsep(&vtopts, ",");
2341 
2342 		if (vtopts != NULL) {
2343 			err = e82545_parsemac(vtopts, sc->esc_mac.octet);
2344 			if (err != 0) {
2345 				free(devname);
2346 				return (err);
2347 			}
2348 			mac_provided = 1;
2349 		}
2350 
2351 		if (strncmp(devname, "tap", 3) == 0 ||
2352 		    strncmp(devname, "vmnet", 5) == 0)
2353 			e82545_open_tap(sc, devname);
2354 
2355 		free(devname);
2356 	}
2357 
2358 	/*
2359 	 * The default MAC address is the standard NetApp OUI of 00-a0-98,
2360 	 * followed by an MD5 of the PCI slot/func number and dev name
2361 	 */
2362 	if (!mac_provided) {
2363 		snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
2364 		    pi->pi_func, vmname);
2365 
2366 		MD5Init(&mdctx);
2367 		MD5Update(&mdctx, nstr, strlen(nstr));
2368 		MD5Final(digest, &mdctx);
2369 
2370 		sc->esc_mac.octet[0] = 0x00;
2371 		sc->esc_mac.octet[1] = 0xa0;
2372 		sc->esc_mac.octet[2] = 0x98;
2373 		sc->esc_mac.octet[3] = digest[0];
2374 		sc->esc_mac.octet[4] = digest[1];
2375 		sc->esc_mac.octet[5] = digest[2];
2376 	}
2377 
2378 	/* H/w initiated reset */
2379 	e82545_reset(sc, 0);
2380 
2381 	return (0);
2382 }
2383 
2384 struct pci_devemu pci_de_e82545 = {
2385 	.pe_emu = 	"e1000",
2386 	.pe_init =	e82545_init,
2387 	.pe_barwrite =	e82545_write,
2388 	.pe_barread =	e82545_read
2389 };
2390 PCI_EMUL_SET(pci_de_e82545);
2391 
2392