1 /*
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*
27  * This file and its contents are supplied under the terms of the
28  * Common Development and Distribution License ("CDDL"), version 1.0.
29  * You may only use this file in accordance with the terms of version
30  * 1.0 of the CDDL.
31  *
32  * A full copy of the text of the CDDL should have accompanied this
33  * source.  A copy of the CDDL is also available via the Internet at
34  * http://www.illumos.org/license/CDDL.
35  *
36  * Copyright 2015 Pluribus Networks Inc.
37  * Copyright 2019 Joyent, Inc.
38  */
39 
40 #include <sys/cdefs.h>
41 
42 #include <sys/param.h>
43 #include <sys/linker_set.h>
44 #include <sys/ioctl.h>
45 #include <sys/viona_io.h>
46 
47 #include <errno.h>
48 #include <fcntl.h>
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <stdint.h>
52 #include <string.h>
53 #include <strings.h>
54 #include <unistd.h>
55 #include <assert.h>
56 #include <pthread.h>
57 #include <signal.h>
58 #include <poll.h>
59 #include <libdladm.h>
60 #include <libdllink.h>
61 #include <libdlvnic.h>
62 
63 #include <machine/vmm.h>
64 #include <vmmapi.h>
65 
66 #include "bhyverun.h"
67 #include "config.h"
68 #include "pci_emul.h"
69 #include "virtio.h"
70 
71 #define	VIONA_RINGSZ	1024
72 
73 /*
74  * PCI config-space register offsets
75  */
76 #define	VIONA_R_CFG0	24
77 #define	VIONA_R_CFG1	25
78 #define	VIONA_R_CFG2	26
79 #define	VIONA_R_CFG3	27
80 #define	VIONA_R_CFG4	28
81 #define	VIONA_R_CFG5	29
82 #define	VIONA_R_CFG6	30
83 #define	VIONA_R_CFG7	31
84 #define	VIONA_R_MAX	31
85 
86 #define	VIONA_REGSZ	VIONA_R_MAX+1
87 
88 /*
89  * Queue definitions.
90  */
91 #define	VIONA_RXQ	0
92 #define	VIONA_TXQ	1
93 #define	VIONA_CTLQ	2
94 
95 #define	VIONA_MAXQ	3
96 
97 /*
98  * Debug printf
99  */
100 static volatile int pci_viona_debug;
101 #define	DPRINTF(params) if (pci_viona_debug) printf params
102 #define	WPRINTF(params) printf params
103 
104 /*
105  * Per-device softc
106  */
107 struct pci_viona_softc {
108 	struct pci_devinst *vsc_pi;
109 	pthread_mutex_t vsc_mtx;
110 
111 	int		vsc_curq;
112 	int		vsc_status;
113 	int		vsc_isr;
114 
115 	datalink_id_t	vsc_linkid;
116 	int		vsc_vnafd;
117 
118 	/* Configurable parameters */
119 	char		vsc_linkname[MAXLINKNAMELEN];
120 	uint32_t	vsc_feature_mask;
121 	uint16_t	vsc_vq_size;
122 
123 	uint32_t	vsc_features;
124 	uint8_t		vsc_macaddr[6];
125 
126 	uint64_t	vsc_pfn[VIONA_MAXQ];
127 	uint16_t	vsc_msix_table_idx[VIONA_MAXQ];
128 	boolean_t	vsc_msix_active;
129 };
130 
131 /*
132  * Return the size of IO BAR that maps virtio header and device specific
133  * region. The size would vary depending on whether MSI-X is enabled or
134  * not.
135  */
136 static uint64_t
137 pci_viona_iosize(struct pci_devinst *pi)
138 {
139 	if (pci_msix_enabled(pi)) {
140 		return (VIONA_REGSZ);
141 	} else {
142 		return (VIONA_REGSZ -
143 		    (VIRTIO_PCI_CONFIG_OFF(1) - VIRTIO_PCI_CONFIG_OFF(0)));
144 	}
145 }
146 
147 static uint16_t
148 pci_viona_qsize(struct pci_viona_softc *sc, int qnum)
149 {
150 	/* XXX no ctl queue currently */
151 	if (qnum == VIONA_CTLQ) {
152 		return (0);
153 	}
154 
155 	return (sc->vsc_vq_size);
156 }
157 
158 static void
159 pci_viona_ring_reset(struct pci_viona_softc *sc, int ring)
160 {
161 	assert(ring < VIONA_MAXQ);
162 
163 	switch (ring) {
164 	case VIONA_RXQ:
165 	case VIONA_TXQ:
166 		break;
167 	case VIONA_CTLQ:
168 	default:
169 		return;
170 	}
171 
172 	for (;;) {
173 		int res;
174 
175 		res = ioctl(sc->vsc_vnafd, VNA_IOC_RING_RESET, ring);
176 		if (res == 0) {
177 			break;
178 		} else if (errno != EINTR) {
179 			WPRINTF(("ioctl viona ring %d reset failed %d\n",
180 			    ring, errno));
181 			return;
182 		}
183 	}
184 
185 	sc->vsc_pfn[ring] = 0;
186 }
187 
188 static void
189 pci_viona_update_status(struct pci_viona_softc *sc, uint32_t value)
190 {
191 
192 	if (value == 0) {
193 		DPRINTF(("viona: device reset requested !\n"));
194 		pci_viona_ring_reset(sc, VIONA_RXQ);
195 		pci_viona_ring_reset(sc, VIONA_TXQ);
196 	}
197 
198 	sc->vsc_status = value;
199 }
200 
201 static void *
202 pci_viona_poll_thread(void *param)
203 {
204 	struct pci_viona_softc *sc = param;
205 	pollfd_t pollset;
206 	const int fd = sc->vsc_vnafd;
207 
208 	pollset.fd = fd;
209 	pollset.events = POLLRDBAND;
210 
211 	for (;;) {
212 		if (poll(&pollset, 1, -1) < 0) {
213 			if (errno == EINTR || errno == EAGAIN) {
214 				continue;
215 			} else {
216 				WPRINTF(("pci_viona_poll_thread poll()"
217 				    "error %d\n", errno));
218 				break;
219 			}
220 		}
221 		if (pollset.revents & POLLRDBAND) {
222 			vioc_intr_poll_t vip;
223 			uint_t i;
224 			int res;
225 			boolean_t assert_lintr = B_FALSE;
226 			const boolean_t do_msix = pci_msix_enabled(sc->vsc_pi);
227 
228 			res = ioctl(fd, VNA_IOC_INTR_POLL, &vip);
229 			for (i = 0; res > 0 && i < VIONA_VQ_MAX; i++) {
230 				if (vip.vip_status[i] == 0) {
231 					continue;
232 				}
233 				if (do_msix) {
234 					pci_generate_msix(sc->vsc_pi,
235 					    sc->vsc_msix_table_idx[i]);
236 				} else {
237 					assert_lintr = B_TRUE;
238 				}
239 				res = ioctl(fd, VNA_IOC_RING_INTR_CLR, i);
240 				if (res != 0) {
241 					WPRINTF(("ioctl viona vq %d intr "
242 					    "clear failed %d\n", i, errno));
243 				}
244 			}
245 			if (assert_lintr) {
246 				pthread_mutex_lock(&sc->vsc_mtx);
247 				sc->vsc_isr |= VTCFG_ISR_QUEUES;
248 				pci_lintr_assert(sc->vsc_pi);
249 				pthread_mutex_unlock(&sc->vsc_mtx);
250 			}
251 		}
252 	}
253 
254 	pthread_exit(NULL);
255 }
256 
257 static void
258 pci_viona_ring_init(struct pci_viona_softc *sc, uint64_t pfn)
259 {
260 	int			qnum = sc->vsc_curq;
261 	vioc_ring_init_t	vna_ri;
262 	int			error;
263 
264 	assert(qnum < VIONA_MAXQ);
265 
266 	if (qnum == VIONA_CTLQ) {
267 		return;
268 	}
269 
270 	sc->vsc_pfn[qnum] = (pfn << VRING_PFN);
271 
272 	vna_ri.ri_index = qnum;
273 	vna_ri.ri_qsize = pci_viona_qsize(sc, qnum);
274 	vna_ri.ri_qaddr = (pfn << VRING_PFN);
275 	error = ioctl(sc->vsc_vnafd, VNA_IOC_RING_INIT, &vna_ri);
276 
277 	if (error != 0) {
278 		WPRINTF(("ioctl viona ring %u init failed %d\n", qnum, errno));
279 	}
280 }
281 
282 static int
283 pci_viona_viona_init(struct vmctx *ctx, struct pci_viona_softc *sc)
284 {
285 	vioc_create_t		vna_create;
286 	int			error;
287 
288 	sc->vsc_vnafd = open("/dev/viona", O_RDWR | O_EXCL);
289 	if (sc->vsc_vnafd == -1) {
290 		WPRINTF(("open viona ctl failed: %d\n", errno));
291 		return (-1);
292 	}
293 
294 	vna_create.c_linkid = sc->vsc_linkid;
295 	vna_create.c_vmfd = vm_get_device_fd(ctx);
296 	error = ioctl(sc->vsc_vnafd, VNA_IOC_CREATE, &vna_create);
297 	if (error != 0) {
298 		(void) close(sc->vsc_vnafd);
299 		WPRINTF(("ioctl viona create failed %d\n", errno));
300 		return (-1);
301 	}
302 
303 	return (0);
304 }
305 
306 static int
307 pci_viona_legacy_config(nvlist_t *nvl, const char *opt)
308 {
309 	char *config, *name, *tofree, *value;
310 
311 	if (opt == NULL)
312 		return (0);
313 
314 	config = tofree = strdup(opt);
315 	while ((name = strsep(&config, ",")) != NULL) {
316 		value = strchr(name, '=');
317 		if (value != NULL) {
318 			*value++ = '\0';
319 			set_config_value_node(nvl, name, value);
320 		} else {
321 			set_config_value_node(nvl, "vnic", name);
322 		}
323 	}
324 	free(tofree);
325 	return (0);
326 }
327 
328 static int
329 pci_viona_parse_opts(struct pci_viona_softc *sc, nvlist_t *nvl)
330 {
331 	const char *value;
332 	int err = 0;
333 
334 	sc->vsc_vq_size = VIONA_RINGSZ;
335 	sc->vsc_feature_mask = 0;
336 	sc->vsc_linkname[0] = '\0';
337 
338 	value = get_config_value_node(nvl, "feature_mask");
339 	if (value != NULL) {
340 		long num;
341 
342 		errno = 0;
343 		num = strtol(value, NULL, 0);
344 		if (errno != 0 || num < 0) {
345 			fprintf(stderr,
346 			    "viona: invalid mask '%s'", value);
347 		} else {
348 			sc->vsc_feature_mask = num;
349 		}
350 	}
351 
352 	value = get_config_value_node(nvl, "vqsize");
353 	if (value != NULL) {
354 		long num;
355 
356 		errno = 0;
357 		num = strtol(value, NULL, 0);
358 		if (errno != 0) {
359 			fprintf(stderr,
360 			    "viona: invalid vsqize '%s'", value);
361 			err = -1;
362 		} else if (num <= 2 || num > 32768) {
363 			fprintf(stderr,
364 			    "viona: vqsize out of range", num);
365 			err = -1;
366 		} else if ((1 << (ffs(num) - 1)) != num) {
367 			fprintf(stderr,
368 			    "viona: vqsize must be power of 2", num);
369 			err = -1;
370 		} else {
371 			sc->vsc_vq_size = num;
372 		}
373 	}
374 
375 	value = get_config_value_node(nvl, "vnic");
376 	if (value == NULL) {
377 		fprintf(stderr, "viona: vnic name required");
378 		err = -1;
379 	} else {
380 		(void) strlcpy(sc->vsc_linkname, value, MAXLINKNAMELEN);
381 	}
382 
383 	DPRINTF(("viona=%p dev=%s vqsize=%x feature_mask=%x\n", sc,
384 	    sc->vsc_linkname, sc->vsc_vq_size, sc->vsc_feature_mask));
385 	return (err);
386 }
387 
388 static int
389 pci_viona_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl)
390 {
391 	dladm_handle_t		handle;
392 	dladm_status_t		status;
393 	dladm_vnic_attr_t	attr;
394 	char			errmsg[DLADM_STRSIZE];
395 	int error, i;
396 	struct pci_viona_softc *sc;
397 	uint64_t ioport;
398 	const char *vnic;
399 
400 	vnic = get_config_value_node(nvl, "vnic");
401 	if (vnic == NULL) {
402 		printf("virtio-viona: vnic required\n");
403 		return (1);
404 	}
405 
406 	sc = malloc(sizeof (struct pci_viona_softc));
407 	memset(sc, 0, sizeof (struct pci_viona_softc));
408 
409 	pi->pi_arg = sc;
410 	sc->vsc_pi = pi;
411 
412 	pthread_mutex_init(&sc->vsc_mtx, NULL);
413 
414 	if (pci_viona_parse_opts(sc, nvl) != 0) {
415 		free(sc);
416 		return (1);
417 	}
418 
419 	if ((status = dladm_open(&handle)) != DLADM_STATUS_OK) {
420 		WPRINTF(("could not open /dev/dld"));
421 		free(sc);
422 		return (1);
423 	}
424 
425 	if ((status = dladm_name2info(handle, sc->vsc_linkname, &sc->vsc_linkid,
426 	    NULL, NULL, NULL)) != DLADM_STATUS_OK) {
427 		WPRINTF(("dladm_name2info() for %s failed: %s\n", vnic,
428 		    dladm_status2str(status, errmsg)));
429 		dladm_close(handle);
430 		free(sc);
431 		return (1);
432 	}
433 
434 	if ((status = dladm_vnic_info(handle, sc->vsc_linkid, &attr,
435 	    DLADM_OPT_ACTIVE)) != DLADM_STATUS_OK) {
436 		WPRINTF(("dladm_vnic_info() for %s failed: %s\n", vnic,
437 		    dladm_status2str(status, errmsg)));
438 		dladm_close(handle);
439 		free(sc);
440 		return (1);
441 	}
442 
443 	memcpy(sc->vsc_macaddr, attr.va_mac_addr, ETHERADDRL);
444 
445 	dladm_close(handle);
446 
447 	error = pci_viona_viona_init(ctx, sc);
448 	if (error != 0) {
449 		free(sc);
450 		return (1);
451 	}
452 
453 	error = pthread_create(NULL, NULL, pci_viona_poll_thread, sc);
454 	assert(error == 0);
455 
456 	/* initialize config space */
457 	pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
458 	pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
459 	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
460 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK);
461 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
462 
463 	/* MSI-X support */
464 	for (i = 0; i < VIONA_MAXQ; i++)
465 		sc->vsc_msix_table_idx[i] = VIRTIO_MSI_NO_VECTOR;
466 
467 	/* BAR 1 used to map MSI-X table and PBA */
468 	if (pci_emul_add_msixcap(pi, VIONA_MAXQ, 1)) {
469 		free(sc);
470 		return (1);
471 	}
472 
473 	/* BAR 0 for legacy-style virtio register access. */
474 	error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VIONA_REGSZ);
475 	if (error != 0) {
476 		WPRINTF(("could not allocate virtio BAR\n"));
477 		free(sc);
478 		return (1);
479 	}
480 
481 	/* Install ioport hook for virtqueue notification */
482 	ioport = pi->pi_bar[0].addr + VIRTIO_PCI_QUEUE_NOTIFY;
483 	error = ioctl(sc->vsc_vnafd, VNA_IOC_SET_NOTIFY_IOP, ioport);
484 	if (error != 0) {
485 		WPRINTF(("could not install ioport hook at %x\n", ioport));
486 		free(sc);
487 		return (1);
488 	}
489 
490 	/*
491 	 * Need a legacy interrupt for virtio compliance, even though MSI-X
492 	 * operation is _strongly_ suggested for adequate performance.
493 	 */
494 	pci_lintr_request(pi);
495 
496 	return (0);
497 }
498 
499 static uint64_t
500 viona_adjust_offset(struct pci_devinst *pi, uint64_t offset)
501 {
502 	/*
503 	 * Device specific offsets used by guest would change based on
504 	 * whether MSI-X capability is enabled or not
505 	 */
506 	if (!pci_msix_enabled(pi)) {
507 		if (offset >= VIRTIO_PCI_CONFIG_OFF(0)) {
508 			return (offset + (VIRTIO_PCI_CONFIG_OFF(1) -
509 			    VIRTIO_PCI_CONFIG_OFF(0)));
510 		}
511 	}
512 
513 	return (offset);
514 }
515 
516 static void
517 pci_viona_ring_set_msix(struct pci_devinst *pi, uint_t ring)
518 {
519 	struct pci_viona_softc *sc = pi->pi_arg;
520 	struct msix_table_entry mte;
521 	uint16_t tab_index;
522 	vioc_ring_msi_t vrm;
523 	int res;
524 
525 	assert(ring <= VIONA_VQ_TX);
526 
527 	vrm.rm_index = ring;
528 	vrm.rm_addr = 0;
529 	vrm.rm_msg = 0;
530 	tab_index = sc->vsc_msix_table_idx[ring];
531 
532 	if (tab_index != VIRTIO_MSI_NO_VECTOR && sc->vsc_msix_active) {
533 		mte = pi->pi_msix.table[tab_index];
534 		if ((mte.vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
535 			vrm.rm_addr = mte.addr;
536 			vrm.rm_msg = mte.msg_data;
537 		}
538 	}
539 
540 	res = ioctl(sc->vsc_vnafd, VNA_IOC_RING_SET_MSI, &vrm);
541 	if (res != 0) {
542 		WPRINTF(("ioctl viona set_msi %d failed %d\n", ring, errno));
543 	}
544 }
545 
546 static void
547 pci_viona_lintrupdate(struct pci_devinst *pi)
548 {
549 	struct pci_viona_softc *sc = pi->pi_arg;
550 	boolean_t msix_on = B_FALSE;
551 
552 	pthread_mutex_lock(&sc->vsc_mtx);
553 	msix_on = pci_msix_enabled(pi) && (pi->pi_msix.function_mask == 0);
554 	if ((sc->vsc_msix_active && !msix_on) ||
555 	    (msix_on && !sc->vsc_msix_active)) {
556 		uint_t i;
557 
558 		sc->vsc_msix_active = msix_on;
559 		/* Update in-kernel ring configs */
560 		for (i = 0; i <= VIONA_VQ_TX; i++) {
561 			pci_viona_ring_set_msix(pi, i);
562 		}
563 	}
564 	pthread_mutex_unlock(&sc->vsc_mtx);
565 }
566 
567 static void
568 pci_viona_msix_update(struct pci_devinst *pi, uint64_t offset)
569 {
570 	struct pci_viona_softc *sc = pi->pi_arg;
571 	uint_t tab_index, i;
572 
573 	pthread_mutex_lock(&sc->vsc_mtx);
574 	if (!sc->vsc_msix_active) {
575 		pthread_mutex_unlock(&sc->vsc_mtx);
576 		return;
577 	}
578 
579 	/*
580 	 * Rather than update every possible MSI-X vector, cheat and use the
581 	 * offset to calculate the entry within the table.  Since this should
582 	 * only be called when a write to the table succeeds, the index should
583 	 * be valid.
584 	 */
585 	tab_index = offset / MSIX_TABLE_ENTRY_SIZE;
586 
587 	for (i = 0; i <= VIONA_VQ_TX; i++) {
588 		if (sc->vsc_msix_table_idx[i] != tab_index) {
589 			continue;
590 		}
591 		pci_viona_ring_set_msix(pi, i);
592 	}
593 
594 	pthread_mutex_unlock(&sc->vsc_mtx);
595 }
596 
597 static void
598 pci_viona_qnotify(struct pci_viona_softc *sc, int ring)
599 {
600 	int error;
601 
602 	switch (ring) {
603 	case VIONA_TXQ:
604 	case VIONA_RXQ:
605 		error = ioctl(sc->vsc_vnafd, VNA_IOC_RING_KICK, ring);
606 		if (error != 0) {
607 			WPRINTF(("ioctl viona ring %d kick failed %d\n",
608 			    ring, errno));
609 		}
610 		break;
611 	case VIONA_CTLQ:
612 		DPRINTF(("viona: control qnotify!\n"));
613 		break;
614 	default:
615 		break;
616 	}
617 }
618 
619 static void
620 pci_viona_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
621     int baridx, uint64_t offset, int size, uint64_t value)
622 {
623 	struct pci_viona_softc *sc = pi->pi_arg;
624 	void *ptr;
625 	int err = 0;
626 
627 	if (baridx == pci_msix_table_bar(pi) ||
628 	    baridx == pci_msix_pba_bar(pi)) {
629 		if (pci_emul_msix_twrite(pi, offset, size, value) == 0) {
630 			pci_viona_msix_update(pi, offset);
631 		}
632 		return;
633 	}
634 
635 	assert(baridx == 0);
636 
637 	if (offset + size > pci_viona_iosize(pi)) {
638 		DPRINTF(("viona_write: 2big, offset %ld size %d\n",
639 		    offset, size));
640 		return;
641 	}
642 
643 	pthread_mutex_lock(&sc->vsc_mtx);
644 
645 	offset = viona_adjust_offset(pi, offset);
646 
647 	switch (offset) {
648 	case VIRTIO_PCI_GUEST_FEATURES:
649 		assert(size == 4);
650 		value &= ~(sc->vsc_feature_mask);
651 		err = ioctl(sc->vsc_vnafd, VNA_IOC_SET_FEATURES, &value);
652 		if (err != 0) {
653 			WPRINTF(("ioctl feature negotiation returned"
654 			    " err = %d\n", errno));
655 		} else {
656 			sc->vsc_features = value;
657 		}
658 		break;
659 	case VIRTIO_PCI_QUEUE_PFN:
660 		assert(size == 4);
661 		pci_viona_ring_init(sc, value);
662 		break;
663 	case VIRTIO_PCI_QUEUE_SEL:
664 		assert(size == 2);
665 		assert(value < VIONA_MAXQ);
666 		sc->vsc_curq = value;
667 		break;
668 	case VIRTIO_PCI_QUEUE_NOTIFY:
669 		assert(size == 2);
670 		assert(value < VIONA_MAXQ);
671 		pci_viona_qnotify(sc, value);
672 		break;
673 	case VIRTIO_PCI_STATUS:
674 		assert(size == 1);
675 		pci_viona_update_status(sc, value);
676 		break;
677 	case VIRTIO_MSI_CONFIG_VECTOR:
678 		assert(size == 2);
679 		sc->vsc_msix_table_idx[VIONA_CTLQ] = value;
680 		break;
681 	case VIRTIO_MSI_QUEUE_VECTOR:
682 		assert(size == 2);
683 		assert(sc->vsc_curq != VIONA_CTLQ);
684 		sc->vsc_msix_table_idx[sc->vsc_curq] = value;
685 		pci_viona_ring_set_msix(pi, sc->vsc_curq);
686 		break;
687 	case VIONA_R_CFG0:
688 	case VIONA_R_CFG1:
689 	case VIONA_R_CFG2:
690 	case VIONA_R_CFG3:
691 	case VIONA_R_CFG4:
692 	case VIONA_R_CFG5:
693 		assert((size + offset) <= (VIONA_R_CFG5 + 1));
694 		ptr = &sc->vsc_macaddr[offset - VIONA_R_CFG0];
695 		/*
696 		 * The driver is allowed to change the MAC address
697 		 */
698 		sc->vsc_macaddr[offset - VIONA_R_CFG0] = value;
699 		if (size == 1) {
700 			*(uint8_t *)ptr = value;
701 		} else if (size == 2) {
702 			*(uint16_t *)ptr = value;
703 		} else {
704 			*(uint32_t *)ptr = value;
705 		}
706 		break;
707 	case VIRTIO_PCI_HOST_FEATURES:
708 	case VIRTIO_PCI_QUEUE_NUM:
709 	case VIRTIO_PCI_ISR:
710 	case VIONA_R_CFG6:
711 	case VIONA_R_CFG7:
712 		DPRINTF(("viona: write to readonly reg %ld\n\r", offset));
713 		break;
714 	default:
715 		DPRINTF(("viona: unknown i/o write offset %ld\n\r", offset));
716 		value = 0;
717 		break;
718 	}
719 
720 	pthread_mutex_unlock(&sc->vsc_mtx);
721 }
722 
723 static uint64_t
724 pci_viona_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
725     int baridx, uint64_t offset, int size)
726 {
727 	struct pci_viona_softc *sc = pi->pi_arg;
728 	void *ptr;
729 	uint64_t value;
730 	int err = 0;
731 
732 	if (baridx == pci_msix_table_bar(pi) ||
733 	    baridx == pci_msix_pba_bar(pi)) {
734 		return (pci_emul_msix_tread(pi, offset, size));
735 	}
736 
737 	assert(baridx == 0);
738 
739 	if (offset + size > pci_viona_iosize(pi)) {
740 		DPRINTF(("viona_read: 2big, offset %ld size %d\n",
741 		    offset, size));
742 		return (0);
743 	}
744 
745 	pthread_mutex_lock(&sc->vsc_mtx);
746 
747 	offset = viona_adjust_offset(pi, offset);
748 
749 	switch (offset) {
750 	case VIRTIO_PCI_HOST_FEATURES:
751 		assert(size == 4);
752 		err = ioctl(sc->vsc_vnafd, VNA_IOC_GET_FEATURES, &value);
753 		if (err != 0) {
754 			WPRINTF(("ioctl get host features returned"
755 			    " err = %d\n", errno));
756 		}
757 		value &= ~sc->vsc_feature_mask;
758 		break;
759 	case VIRTIO_PCI_GUEST_FEATURES:
760 		assert(size == 4);
761 		value = sc->vsc_features; /* XXX never read ? */
762 		break;
763 	case VIRTIO_PCI_QUEUE_PFN:
764 		assert(size == 4);
765 		value = sc->vsc_pfn[sc->vsc_curq] >> VRING_PFN;
766 		break;
767 	case VIRTIO_PCI_QUEUE_NUM:
768 		assert(size == 2);
769 		value = pci_viona_qsize(sc, sc->vsc_curq);
770 		break;
771 	case VIRTIO_PCI_QUEUE_SEL:
772 		assert(size == 2);
773 		value = sc->vsc_curq;  /* XXX never read ? */
774 		break;
775 	case VIRTIO_PCI_QUEUE_NOTIFY:
776 		assert(size == 2);
777 		value = sc->vsc_curq;  /* XXX never read ? */
778 		break;
779 	case VIRTIO_PCI_STATUS:
780 		assert(size == 1);
781 		value = sc->vsc_status;
782 		break;
783 	case VIRTIO_PCI_ISR:
784 		assert(size == 1);
785 		value = sc->vsc_isr;
786 		sc->vsc_isr = 0;	/* a read clears this flag */
787 		if (value != 0) {
788 			pci_lintr_deassert(pi);
789 		}
790 		break;
791 	case VIRTIO_MSI_CONFIG_VECTOR:
792 		assert(size == 2);
793 		value = sc->vsc_msix_table_idx[VIONA_CTLQ];
794 		break;
795 	case VIRTIO_MSI_QUEUE_VECTOR:
796 		assert(size == 2);
797 		assert(sc->vsc_curq != VIONA_CTLQ);
798 		value = sc->vsc_msix_table_idx[sc->vsc_curq];
799 		break;
800 	case VIONA_R_CFG0:
801 	case VIONA_R_CFG1:
802 	case VIONA_R_CFG2:
803 	case VIONA_R_CFG3:
804 	case VIONA_R_CFG4:
805 	case VIONA_R_CFG5:
806 		assert((size + offset) <= (VIONA_R_CFG5 + 1));
807 		ptr = &sc->vsc_macaddr[offset - VIONA_R_CFG0];
808 		if (size == 1) {
809 			value = *(uint8_t *)ptr;
810 		} else if (size == 2) {
811 			value = *(uint16_t *)ptr;
812 		} else {
813 			value = *(uint32_t *)ptr;
814 		}
815 		break;
816 	case VIONA_R_CFG6:
817 		assert(size != 4);
818 		value = 0x01;	/* XXX link always up */
819 		break;
820 	case VIONA_R_CFG7:
821 		assert(size == 1);
822 		value = 0;	/* XXX link status in LSB */
823 		break;
824 	default:
825 		DPRINTF(("viona: unknown i/o read offset %ld\n\r", offset));
826 		value = 0;
827 		break;
828 	}
829 
830 	pthread_mutex_unlock(&sc->vsc_mtx);
831 
832 	return (value);
833 }
834 
835 struct pci_devemu pci_de_viona = {
836 	.pe_emu =	"virtio-net-viona",
837 	.pe_init =	pci_viona_init,
838 	.pe_legacy_config = pci_viona_legacy_config,
839 	.pe_barwrite =	pci_viona_write,
840 	.pe_barread =	pci_viona_read,
841 	.pe_lintrupdate = pci_viona_lintrupdate
842 };
843 PCI_EMUL_SET(pci_de_viona);
844