xref: /minix/minix/lib/libvirtio/virtio.c (revision 0a6a1f1d)
1 /*
2  * Generic virtio library for MINIX 3
3  *
4  * Copyright (c) 2013, A. Welzel, <arne.welzel@gmail.com>
5  *
6  * This software is released under the BSD license. See the LICENSE file
7  * included in the main directory of this source distribution for the
8  * license terms and conditions.
9  */
10 
11 #define _SYSTEM 1
12 
13 #include <assert.h>
14 #include <errno.h>				/* for OK... */
15 #include <string.h>				/* memset() */
16 #include <stdlib.h>				/* malloc() */
17 
18 #include <machine/pci.h>			/* PCI_ILR, PCI_BAR... */
19 #include <machine/vmparam.h>			/* PAGE_SIZE */
20 
21 #include <minix/syslib.h>			/* umap, vumap, alloc_..*/
22 #include <minix/sysutil.h>			/* panic(), at least */
23 #include <minix/virtio.h>			/* virtio system include */
24 
25 #include "virtio_ring.h"			/* virtio types / helper */
26 
27 /*
28  * About indirect descriptors:
29  *
30  * For each possible thread, a single indirect descriptor table is allocated.
31  * If using direct descriptors would lead to the situation that another thread
32  * might not be able to add another descriptor to the ring, indirect descriptors
33  * are used.
34  *
35  * Indirect descriptors are pre-allocated. Each alloc_contig() call involves a
36  * kernel call which is critical for performance.
37  *
38  * The size of indirect descriptor tables is chosen based on MAPVEC_NR. A driver
39  * using this library should never add more than
40  *
41  *    MAPVEC_NR + MAPVEC_NR / 2
42  *
43  * descriptors to a queue as this represent the maximum size of an indirect
44  * descriptor table.
45  */
46 
47 struct indirect_desc_table {
48 	int in_use;
49 	struct vring_desc *descs;
50 	phys_bytes paddr;
51 	size_t len;
52 };
53 
54 struct virtio_queue {
55 
56 	void *vaddr;				/* virtual addr of ring */
57 	phys_bytes paddr;			/* physical addr of ring */
58 	u32_t page;				/* physical guest page */
59 
60 	u16_t num;				/* number of descriptors */
61 	u32_t ring_size;			/* size of ring in bytes */
62 	struct vring vring;
63 
64 	u16_t free_num;				/* free descriptors */
65 	u16_t free_head;			/* next free descriptor */
66 	u16_t free_tail;			/* last free descriptor */
67 	u16_t last_used;			/* we checked in used */
68 
69 	void **data;				/* points to pointers */
70 };
71 
72 struct virtio_device {
73 
74 	const char *name;			/* for debugging */
75 
76 	u16_t  port;				/* io port */
77 
78 	struct virtio_feature *features;	/* host / guest features */
79 	u8_t num_features;			/* max 32 */
80 
81 	struct virtio_queue *queues;		/* our queues */
82 	u16_t num_queues;
83 
84 	int irq;				/* interrupt line */
85 	int irq_hook;				/* hook id */
86 	int msi;				/* is MSI enabled? */
87 
88 	int threads;				/* max number of threads */
89 
90 	struct indirect_desc_table *indirect;	/* indirect descriptor tables */
91 	int num_indirect;
92 };
93 
94 static int is_matching_device(u16_t expected_sdid, u16_t vid, u16_t sdid);
95 static int init_device(int devind, struct virtio_device *dev);
96 static int init_phys_queues(struct virtio_device *dev);
97 static int exchange_features(struct virtio_device *dev);
98 static int alloc_phys_queue(struct virtio_queue *q);
99 static void free_phys_queue(struct virtio_queue *q);
100 static void init_phys_queue(struct virtio_queue *q);
101 static int init_indirect_desc_table(struct indirect_desc_table *desc);
102 static int init_indirect_desc_tables(struct virtio_device *dev);
103 static void virtio_irq_register(struct virtio_device *dev);
104 static void virtio_irq_unregister(struct virtio_device *dev);
105 static int wants_kick(struct virtio_queue *q);
106 static void kick_queue(struct virtio_device *dev, int qidx);
107 
108 struct virtio_device *
109 virtio_setup_device(u16_t subdevid, const char *name,
110 		struct virtio_feature *features, int num_features,
111 		int threads, int skip)
112 {
113 	int r, devind;
114 	u16_t vid, did, sdid;
115 	struct virtio_device *ret;
116 
117 	/* bogus values? */
118 	if (skip < 0 || name == NULL || num_features < 0 || threads <= 0)
119 		return NULL;
120 
121 	pci_init();
122 
123 	r = pci_first_dev(&devind, &vid, &did);
124 
125 	while (r > 0) {
126 		sdid = pci_attr_r16(devind, PCI_SUBDID);
127 		if (is_matching_device(subdevid, vid, sdid)) {
128 
129 			/* this is the device we are looking for */
130 			if (skip == 0)
131 				break;
132 
133 			skip--;
134 		}
135 
136 		r = pci_next_dev(&devind, &vid, &did);
137 	}
138 
139 	/* pci_[first|next_dev()] return 0 if no device was found */
140 	if (r == 0 || skip > 0)
141 		return NULL;
142 
143 	/* allocate and set known info about the device */
144 	ret = malloc(sizeof(*ret));
145 
146 	if (ret == NULL)
147 		return NULL;
148 
149 	/* Prepare virtio_device intance */
150 	memset(ret, 0, sizeof(*ret));
151 	ret->name = name;
152 	ret->features = features;
153 	ret->num_features = num_features;
154 	ret->threads = threads;
155 	/* see comment in the beginning of this file */
156 	ret->num_indirect = threads;
157 
158 	if (init_device(devind, ret) != OK) {
159 		printf("%s: Could not initialize device\n", ret->name);
160 		goto err;
161 	}
162 
163 	/* Ack the device */
164 	virtio_write8(ret, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_ACK);
165 
166 	if (exchange_features(ret) != OK) {
167 		printf("%s: Could not exchange features\n", ret->name);
168 		goto err;
169 	}
170 
171 	if (init_indirect_desc_tables(ret) != OK) {
172 		printf("%s: Could not initialize indirect tables\n", ret->name);
173 		goto err;
174 	}
175 
176 	/* We know how to drive the device... */
177 	virtio_write8(ret, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_DRV);
178 
179 	return ret;
180 
181 /* Error path */
182 err:
183 	free(ret);
184 	return NULL;
185 }
186 
187 static int
188 init_device(int devind, struct virtio_device *dev)
189 {
190 	u32_t base, size;
191 	int iof, r;
192 
193 	pci_reserve(devind);
194 
195 	if ((r = pci_get_bar(devind, PCI_BAR, &base, &size, &iof)) != OK) {
196 		printf("%s: Could not get BAR (%d)", dev->name, r);
197 		return r;
198 	}
199 
200 	if (!iof) {
201 		printf("%s: PCI not IO space?", dev->name);
202 		return EINVAL;
203 	}
204 
205 	if (base & 0xFFFF0000) {
206 		printf("%s: IO port weird (%08x)", dev->name, base);
207 		return EINVAL;
208 	}
209 
210 	/* store the I/O port */
211 	dev->port = base;
212 
213 	/* Reset the device */
214 	virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, 0);
215 
216 	/* Read IRQ line */
217 	dev->irq = pci_attr_r8(devind, PCI_ILR);
218 
219 	return OK;
220 }
221 
222 static int
223 exchange_features(struct virtio_device *dev)
224 {
225 	u32_t guest_features = 0, host_features = 0;
226 	struct virtio_feature *f;
227 
228 	host_features = virtio_read32(dev, VIRTIO_HOST_F_OFF);
229 
230 	for (int i = 0; i < dev->num_features; i++) {
231 		f = &dev->features[i];
232 
233 		/* prepare the features the driver supports */
234 		guest_features |= (f->guest_support << f->bit);
235 
236 		/* just load the host feature int the struct */
237 		f->host_support =  ((host_features >> f->bit) & 1);
238 	}
239 
240 	/* let the device know about our features */
241 	virtio_write32(dev, VIRTIO_GUEST_F_OFF, guest_features);
242 
243 	return OK;
244 }
245 
246 int
247 virtio_alloc_queues(struct virtio_device *dev, int num_queues)
248 {
249 	int r = OK;
250 
251 	assert(dev != NULL);
252 
253 	/* Assume there's no device with more than 256 queues */
254 	if (num_queues < 0 || num_queues > 256)
255 		return EINVAL;
256 
257 	dev->num_queues = num_queues;
258 	/* allocate queue memory */
259 	dev->queues = malloc(num_queues * sizeof(dev->queues[0]));
260 
261 	if (dev->queues == NULL)
262 		return ENOMEM;
263 
264 	memset(dev->queues, 0, num_queues * sizeof(dev->queues[0]));
265 
266 	if ((r = init_phys_queues(dev) != OK)) {
267 		printf("%s: Could not initialize queues (%d)\n", dev->name, r);
268 		free(dev->queues);
269 		dev->queues = NULL;
270 	}
271 
272 	return r;
273 }
274 
275 static int
276 init_phys_queues(struct virtio_device *dev)
277 {
278 	/* Initialize all queues */
279 	int i, j, r;
280 	struct virtio_queue *q;
281 
282 	for (i = 0; i < dev->num_queues; i++) {
283 		q = &dev->queues[i];
284 		/* select the queue */
285 		virtio_write16(dev, VIRTIO_QSEL_OFF, i);
286 		q->num = virtio_read16(dev, VIRTIO_QSIZE_OFF);
287 
288 		if (q->num & (q->num - 1)) {
289 			printf("%s: Queue %d num=%d not ^2", dev->name, i,
290 							     q->num);
291 			r = EINVAL;
292 			goto free_phys_queues;
293 		}
294 
295 		if ((r = alloc_phys_queue(q)) != OK)
296 			goto free_phys_queues;
297 
298 		init_phys_queue(q);
299 
300 		/* Let the host know about the guest physical page */
301 		virtio_write32(dev, VIRTIO_QADDR_OFF, q->page);
302 	}
303 
304 	return OK;
305 
306 /* Error path */
307 free_phys_queues:
308 	for (j = 0; j < i; j++)
309 		free_phys_queue(&dev->queues[i]);
310 
311 	return r;
312 }
313 
314 static int
315 alloc_phys_queue(struct virtio_queue *q)
316 {
317 	assert(q != NULL);
318 
319 	/* How much memory do we need? */
320 	q->ring_size = vring_size(q->num, PAGE_SIZE);
321 
322 	q->vaddr = alloc_contig(q->ring_size, AC_ALIGN4K, &q->paddr);
323 
324 	if (q->vaddr == NULL)
325 		return ENOMEM;
326 
327 	q->data = alloc_contig(sizeof(q->data[0]) * q->num, AC_ALIGN4K, NULL);
328 
329 	if (q->data == NULL) {
330 		free_contig(q->vaddr, q->ring_size);
331 		q->vaddr = NULL;
332 		q->paddr = 0;
333 		return ENOMEM;
334 	}
335 
336 	return OK;
337 }
338 
339 void
340 virtio_device_ready(struct virtio_device *dev)
341 {
342 	assert(dev != NULL);
343 
344 	/* Register IRQ line */
345 	virtio_irq_register(dev);
346 
347 	/* Driver is ready to go! */
348 	virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_DRV_OK);
349 }
350 
351 void
352 virtio_free_queues(struct virtio_device *dev)
353 {
354 	int i;
355 	assert(dev != NULL);
356 	assert(dev->queues != NULL);
357 	assert(dev->num_queues > 0);
358 
359 	for (i = 0; i < dev->num_queues; i++)
360 		free_phys_queue(&dev->queues[i]);
361 
362 	dev->num_queues = 0;
363 	dev->queues = NULL;
364 }
365 
366 static void
367 free_phys_queue(struct virtio_queue *q)
368 {
369 	assert(q != NULL);
370 	assert(q->vaddr != NULL);
371 
372 	free_contig(q->vaddr, q->ring_size);
373 	q->vaddr = NULL;
374 	q->paddr = 0;
375 	q->num = 0;
376 	free_contig(q->data, sizeof(q->data[0]));
377 	q->data = NULL;
378 }
379 
380 static void
381 init_phys_queue(struct virtio_queue *q)
382 {
383 	memset(q->vaddr, 0, q->ring_size);
384 	memset(q->data, 0, sizeof(q->data[0]) * q->num);
385 
386 	/* physical page in guest */
387 	q->page = q->paddr / PAGE_SIZE;
388 
389 	/* Set pointers in q->vring according to size */
390 	vring_init(&q->vring, q->num, q->vaddr, PAGE_SIZE);
391 
392 	/* Everything's free at this point */
393 	for (int i = 0; i < q->num; i++) {
394 		q->vring.desc[i].flags = VRING_DESC_F_NEXT;
395 		q->vring.desc[i].next = (i + 1) & (q->num - 1);
396 	}
397 
398 	q->free_num = q->num;
399 	q->free_head = 0;
400 	q->free_tail = q->num - 1;
401 	q->last_used = 0;
402 
403 	return;
404 }
405 
406 void
407 virtio_free_device(struct virtio_device *dev)
408 {
409 	int i;
410 	struct indirect_desc_table *desc;
411 
412 	assert(dev != NULL);
413 
414 	assert(dev->num_indirect > 0);
415 
416 	for (i = 0; i < dev->num_indirect; i++) {
417 		desc = &dev->indirect[i];
418 		free_contig(desc->descs, desc->len);
419 	}
420 
421 	dev->num_indirect = 0;
422 
423 	assert(dev->indirect != NULL);
424 	free(dev->indirect);
425 	dev->indirect = NULL;
426 
427 	free(dev);
428 }
429 
430 static int
431 init_indirect_desc_table(struct indirect_desc_table *desc)
432 {
433 	desc->in_use = 0;
434 	desc->len = (MAPVEC_NR + MAPVEC_NR / 2) * sizeof(struct vring_desc);
435 
436 	desc->descs = alloc_contig(desc->len, AC_ALIGN4K, &desc->paddr);
437 	memset(desc->descs, 0, desc->len);
438 
439 	if (desc->descs == NULL)
440 		return ENOMEM;
441 
442 	return OK;
443 }
444 
445 static int
446 init_indirect_desc_tables(struct virtio_device *dev)
447 {
448 	int i, j, r;
449 	struct indirect_desc_table *desc;
450 
451 	dev->indirect = malloc(dev->num_indirect * sizeof(dev->indirect[0]));
452 
453 	if (dev->indirect == NULL) {
454 		printf("%s: Could not allocate indirect tables\n", dev->name);
455 		return ENOMEM;
456 	}
457 
458 	memset(dev->indirect, 0, dev->num_indirect* sizeof(dev->indirect[0]));
459 
460 	for (i = 0; i < dev->num_indirect; i++) {
461 		desc = &dev->indirect[i];
462 		if ((r = init_indirect_desc_table(desc)) != OK) {
463 
464 			/* error path */
465 			for (j = 0; j < i; j++) {
466 				desc = &dev->indirect[j];
467 				free_contig(desc->descs, desc->len);
468 			}
469 
470 			free(dev->indirect);
471 
472 			return r;
473 		}
474 	}
475 
476 	return OK;
477 }
478 
479 static void
480 clear_indirect_table(struct virtio_device *dev, struct vring_desc *vd)
481 {
482 	int i;
483 	struct indirect_desc_table *desc;
484 
485 	assert(vd->len > 0);
486 	assert(vd->flags & VRING_DESC_F_INDIRECT);
487 	vd->flags = vd->flags & ~VRING_DESC_F_INDIRECT;
488 	vd->len = 0;;
489 
490 	for (i = 0; i < dev->num_indirect; i++) {
491 		desc = &dev->indirect[i];
492 
493 		if (desc->paddr == vd->addr) {
494 			assert(desc->in_use);
495 			desc->in_use = 0;
496 			break;
497 		}
498 	}
499 
500 	if (i >= dev->num_indirect)
501 		panic("Could not clear indirect descriptor table ");
502 }
503 
504 
505 inline static void
506 use_vring_desc(struct vring_desc *vd, struct vumap_phys *vp)
507 {
508 	vd->addr = vp->vp_addr & ~1UL;
509 	vd->len = vp->vp_size;
510 	vd->flags = VRING_DESC_F_NEXT;
511 
512 	if (vp->vp_addr & 1)
513 		vd->flags |= VRING_DESC_F_WRITE;
514 }
515 
516 static void
517 set_indirect_descriptors(struct virtio_device *dev, struct virtio_queue *q,
518 	struct vumap_phys *bufs, size_t num)
519 {
520 	/* Indirect descriptor tables are simply filled from left to right */
521 	int i;
522 	struct indirect_desc_table *desc;
523 	struct vring *vring = &q->vring;
524 	struct vring_desc *vd, *ivd = NULL;
525 
526 	if (0 == num)
527 		return;
528 
529 	/* Find the first unused indirect descriptor table */
530 	for (i = 0; i < dev->num_indirect; i++) {
531 		desc = &dev->indirect[i];
532 
533 		/* If an unused indirect descriptor table was found,
534 		 * mark it as being used and exit the loop.
535 		 */
536 		if (!desc->in_use) {
537 			desc->in_use = 1;
538 			break;
539 		}
540 	}
541 
542 	/* Sanity check */
543 	if (i >= dev->num_indirect)
544 		panic("No indirect descriptor tables left");
545 
546 	/* For indirect descriptor tables, only a single descriptor from
547 	 * the main ring is used.
548 	 */
549 	vd = &vring->desc[q->free_head];
550 	vd->flags = VRING_DESC_F_INDIRECT;
551 	vd->addr = desc->paddr;
552 	vd->len = num * sizeof(desc->descs[0]);
553 
554 	/* Initialize the descriptors in the indirect descriptor table */
555 	for (i = 0; i < (int)num; i++) {
556 		ivd = &desc->descs[i];
557 
558 		use_vring_desc(ivd, &bufs[i]);
559 		ivd->next = i + 1;
560 	}
561 
562 	/* Unset the next bit of the last descriptor */
563 	if (NULL != ivd)
564 		ivd->flags = ivd->flags & ~VRING_DESC_F_NEXT;
565 
566 	/* Update queue, only a single descriptor was used */
567 	q->free_num -= 1;
568 	q->free_head = vd->next;
569 }
570 
571 static void
572 set_direct_descriptors(struct virtio_queue *q, struct vumap_phys *bufs,
573 	size_t num)
574 {
575 	u16_t i;
576 	size_t count;
577 	struct vring *vring = &q->vring;
578 	struct vring_desc *vd;
579 
580 	if (0 == num)
581 		return;
582 
583 	for (i = q->free_head, count = 0; count < num; count++) {
584 
585 		/* The next free descriptor */
586 		vd = &vring->desc[i];
587 
588 		/* The descriptor is linked in the free list, so
589 		 * it always has the next bit set.
590 		 */
591 		assert(vd->flags & VRING_DESC_F_NEXT);
592 
593 		use_vring_desc(vd, &bufs[count]);
594 		i = vd->next;
595 	}
596 
597 	/* Unset the next bit of the last descriptor */
598 	vd->flags = vd->flags & ~VRING_DESC_F_NEXT;
599 
600 	/* Update queue */
601 	q->free_num -= num;
602 	q->free_head = i;
603 }
604 
605 int
606 virtio_to_queue(struct virtio_device *dev, int qidx, struct vumap_phys *bufs,
607 	size_t num, void *data)
608 {
609 	u16_t free_first;
610 	int left;
611 	struct virtio_queue *q = &dev->queues[qidx];
612 	struct vring *vring = &q->vring;
613 
614 	assert(0 <= qidx && qidx <= dev->num_queues);
615 
616 	if (!data)
617 		panic("%s: NULL data received queue %d", dev->name, qidx);
618 
619 	free_first = q->free_head;
620 
621 	left = (int)q->free_num - (int)num;
622 
623 	if (left < dev->threads)
624 		set_indirect_descriptors(dev, q, bufs, num);
625 	else
626 		set_direct_descriptors(q, bufs, num);
627 
628 	/* Next index for host is old free_head */
629 	vring->avail->ring[vring->avail->idx % q->num] = free_first;
630 
631 	/* Provided by the caller to identify this slot */
632 	q->data[free_first] = data;
633 
634 	/* Make sure the host sees the new descriptors */
635 	__insn_barrier();
636 
637 	/* advance last idx */
638 	vring->avail->idx += 1;
639 
640 	/* Make sure the host sees the avail->idx */
641 	__insn_barrier();
642 
643 	/* kick it! */
644 	kick_queue(dev, qidx);
645 	return 0;
646 }
647 
648 int
649 virtio_from_queue(struct virtio_device *dev, int qidx, void **data,
650 	size_t *len)
651 {
652 	struct virtio_queue *q;
653 	struct vring *vring;
654 	struct vring_used_elem *uel;
655 	struct vring_desc *vd;
656 	int count = 0;
657 	u16_t idx;
658 	u16_t used_idx;
659 
660 	assert(0 <= qidx && qidx < dev->num_queues);
661 
662 	q = &dev->queues[qidx];
663 	vring = &q->vring;
664 
665 	/* Make sure we see changes done by the host */
666 	__insn_barrier();
667 
668 	/* The index from the host */
669 	used_idx = vring->used->idx % q->num;
670 
671 	/* We already saw this one, nothing to do here */
672 	if (q->last_used == used_idx)
673 		return -1;
674 
675 	/* Get the vring_used element */
676 	uel = &q->vring.used->ring[q->last_used];
677 
678 	/* Update the last used element */
679 	q->last_used = (q->last_used + 1) % q->num;
680 
681 	/* index of the used element */
682 	idx = uel->id % q->num;
683 
684 	assert(q->data[idx] != NULL);
685 
686 	/* Get the descriptor */
687 	vd = &vring->desc[idx];
688 
689 	/* Unconditionally set the tail->next to the first used one */
690 	assert(vring->desc[q->free_tail].flags & VRING_DESC_F_NEXT);
691 	vring->desc[q->free_tail].next = idx;
692 
693 	/* Find the last index, eventually there has to be one
694 	 * without a the next flag.
695 	 *
696 	 * FIXME: Protect from endless loop
697 	 */
698 	while (vd->flags & VRING_DESC_F_NEXT) {
699 
700 		if (vd->flags & VRING_DESC_F_INDIRECT)
701 			clear_indirect_table(dev, vd);
702 
703 		idx = vd->next;
704 		vd = &vring->desc[idx];
705 		count++;
706 	}
707 
708 	/* Didn't count the last one */
709 	count++;
710 
711 	if (vd->flags & VRING_DESC_F_INDIRECT)
712 		clear_indirect_table(dev, vd);
713 
714 	/* idx points to the tail now, update the queue */
715 	q->free_tail = idx;
716 	assert(!(vd->flags & VRING_DESC_F_NEXT));
717 
718 	/* We can always connect the tail with the head */
719 	vring->desc[q->free_tail].next = q->free_head;
720 	vring->desc[q->free_tail].flags = VRING_DESC_F_NEXT;
721 
722 	q->free_num += count;
723 
724 	assert(q->free_num <= q->num);
725 
726 	*data = q->data[uel->id];
727 	q->data[uel->id] = NULL;
728 
729 	if (len != NULL)
730 		*len = uel->len;
731 
732 	return 0;
733 }
734 
735 int
736 virtio_had_irq(struct virtio_device *dev)
737 {
738 	return virtio_read8(dev, VIRTIO_ISR_STATUS_OFF) & 1;
739 }
740 
741 void
742 virtio_reset_device(struct virtio_device *dev)
743 {
744 	virtio_irq_unregister(dev);
745 	virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, 0);
746 }
747 
748 
749 void
750 virtio_irq_enable(struct virtio_device *dev)
751 {
752 	int r;
753 	if ((r = sys_irqenable(&dev->irq_hook) != OK))
754 		panic("%s Unable to enable IRQ %d", dev->name, r);
755 }
756 
757 void
758 virtio_irq_disable(struct virtio_device *dev)
759 {
760 	int r;
761 	if ((r = sys_irqdisable(&dev->irq_hook) != OK))
762 		panic("%s: Unable to disable IRQ %d", dev->name, r);
763 }
764 
765 static int
766 wants_kick(struct virtio_queue *q)
767 {
768 	assert(q != NULL);
769 	return !(q->vring.used->flags & VRING_USED_F_NO_NOTIFY);
770 }
771 
772 static void
773 kick_queue(struct virtio_device *dev, int qidx)
774 {
775 	assert(0 <= qidx && qidx < dev->num_queues);
776 
777 	if (wants_kick(&dev->queues[qidx]))
778 		virtio_write16(dev, VIRTIO_QNOTFIY_OFF, qidx);
779 
780 	return;
781 }
782 
783 static int
784 is_matching_device(u16_t expected_sdid, u16_t vid, u16_t sdid)
785 {
786 	return vid == VIRTIO_VENDOR_ID && sdid == expected_sdid;
787 }
788 
789 static void
790 virtio_irq_register(struct virtio_device *dev)
791 {
792 	int r;
793 	if ((r = sys_irqsetpolicy(dev->irq, 0, &dev->irq_hook) != OK))
794 		panic("%s: Unable to register IRQ %d", dev->name, r);
795 }
796 
797 static void
798 virtio_irq_unregister(struct virtio_device *dev)
799 {
800 	int r;
801 	if ((r = sys_irqrmpolicy(&dev->irq_hook) != OK))
802 		panic("%s: Unable to unregister IRQ %d", dev->name, r);
803 }
804 
805 static int
806 _supports(struct virtio_device *dev, int bit, int host)
807 {
808 	for (int i = 0; i < dev->num_features; i++) {
809 		struct virtio_feature *f = &dev->features[i];
810 
811 		if (f->bit == bit)
812 			return host ? f->host_support : f->guest_support;
813 	}
814 
815 	panic("%s: Feature not found bit=%d", dev->name, bit);
816 }
817 
818 int
819 virtio_host_supports(struct virtio_device *dev, int bit)
820 {
821 	return _supports(dev, bit, 1);
822 }
823 
824 int
825 virtio_guest_supports(struct virtio_device *dev, int bit)
826 {
827 	return _supports(dev, bit, 0);
828 }
829 
830 
831 /* Just some wrappers around sys_read */
832 #define VIRTIO_READ_XX(xx, suff)					\
833 u##xx##_t								\
834 virtio_read##xx(struct virtio_device *dev, i32_t off)			\
835 {									\
836 	int r;								\
837 	u32_t ret;							\
838 	if ((r = sys_in##suff(dev->port + off, &ret)) != OK)		\
839 		panic("%s: Read failed %d %d r=%d", dev->name,		\
840 						    dev->port,		\
841 						    off,		\
842 						    r);			\
843 									\
844 	return ret;							\
845 }
846 
847 VIRTIO_READ_XX(32, l)
848 VIRTIO_READ_XX(16, w)
849 VIRTIO_READ_XX(8, b)
850 
851 /* Just some wrappers around sys_write */
852 #define VIRTIO_WRITE_XX(xx, suff)					\
853 void									\
854 virtio_write##xx(struct virtio_device *dev, i32_t off, u##xx##_t val)	\
855 {									\
856 	int r;								\
857 	if ((r = sys_out##suff(dev->port + off, val)) != OK)		\
858 		panic("%s: Write failed %d %d r=%d", dev->name,		\
859 						     dev->port,		\
860 						     off,		\
861 						     r);		\
862 }
863 
864 VIRTIO_WRITE_XX(32, l)
865 VIRTIO_WRITE_XX(16, w)
866 VIRTIO_WRITE_XX(8, b)
867 
868 /* Just some wrappers around sys_read */
869 #define VIRTIO_SREAD_XX(xx, suff)					\
870 u##xx##_t								\
871 virtio_sread##xx(struct virtio_device *dev, i32_t off)			\
872 {									\
873 	int r;								\
874 	u32_t ret;							\
875 	off += VIRTIO_DEV_SPECIFIC_OFF; 				\
876 									\
877 	if (dev->msi)							\
878 		off += VIRTIO_MSI_ADD_OFF;				\
879 									\
880 	if ((r = sys_in##suff(dev->port + off, &ret)) != OK)		\
881 		panic("%s: Read failed %d %d r=%d", dev->name,		\
882 						    dev->port,		\
883 						    off,		\
884 						    r);			\
885 									\
886 	return ret;							\
887 }
888 
889 VIRTIO_SREAD_XX(32, l)
890 VIRTIO_SREAD_XX(16, w)
891 VIRTIO_SREAD_XX(8, b)
892 
893 /* Just some wrappers around sys_write */
894 #define VIRTIO_SWRITE_XX(xx, suff)					\
895 void									\
896 virtio_swrite##xx(struct virtio_device *dev, i32_t off, u##xx##_t val)	\
897 {									\
898 	int r;								\
899 	off += VIRTIO_DEV_SPECIFIC_OFF; 				\
900 									\
901 	if (dev->msi)							\
902 		off += VIRTIO_MSI_ADD_OFF;				\
903 									\
904 	if ((r = sys_out##suff(dev->port + off, val)) != OK)		\
905 		panic("%s: Write failed %d %d r=%d", dev->name,		\
906 						     dev->port,		\
907 						     off,		\
908 						     r);		\
909 }
910 
911 VIRTIO_SWRITE_XX(32, l)
912 VIRTIO_SWRITE_XX(16, w)
913 VIRTIO_SWRITE_XX(8, b)
914