xref: /freebsd/sys/dev/virtio/virtqueue.c (revision 7cc42f6d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * Implements the virtqueue interface as basically described
31  * in the original VirtIO paper.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/sglist.h>
42 #include <vm/vm.h>
43 #include <vm/pmap.h>
44 
45 #include <machine/cpu.h>
46 #include <machine/bus.h>
47 #include <machine/atomic.h>
48 #include <machine/resource.h>
49 #include <sys/bus.h>
50 #include <sys/rman.h>
51 
52 #include <dev/virtio/virtio.h>
53 #include <dev/virtio/virtqueue.h>
54 #include <dev/virtio/virtio_ring.h>
55 
56 #include "virtio_bus_if.h"
57 
58 struct virtqueue {
59 	device_t		 vq_dev;
60 	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
61 	uint16_t		 vq_queue_index;
62 	uint16_t		 vq_nentries;
63 	uint32_t		 vq_flags;
64 #define	VIRTQUEUE_FLAG_INDIRECT	 0x0001
65 #define	VIRTQUEUE_FLAG_EVENT_IDX 0x0002
66 
67 	int			 vq_alignment;
68 	int			 vq_ring_size;
69 	void			*vq_ring_mem;
70 	int			 vq_max_indirect_size;
71 	int			 vq_indirect_mem_size;
72 	virtqueue_intr_t	*vq_intrhand;
73 	void			*vq_intrhand_arg;
74 
75 	struct vring		 vq_ring;
76 	uint16_t		 vq_free_cnt;
77 	uint16_t		 vq_queued_cnt;
78 	/*
79 	 * Head of the free chain in the descriptor table. If
80 	 * there are no free descriptors, this will be set to
81 	 * VQ_RING_DESC_CHAIN_END.
82 	 */
83 	uint16_t		 vq_desc_head_idx;
84 	/*
85 	 * Last consumed descriptor in the used table,
86 	 * trails vq_ring.used->idx.
87 	 */
88 	uint16_t		 vq_used_cons_idx;
89 
90 	struct vq_desc_extra {
91 		void		  *cookie;
92 		struct vring_desc *indirect;
93 		vm_paddr_t	   indirect_paddr;
94 		uint16_t	   ndescs;
95 	} vq_descx[0];
96 };
97 
98 /*
99  * The maximum virtqueue size is 2^15. Use that value as the end of
100  * descriptor chain terminator since it will never be a valid index
101  * in the descriptor table. This is used to verify we are correctly
102  * handling vq_free_cnt.
103  */
104 #define VQ_RING_DESC_CHAIN_END 32768
105 
106 #define VQASSERT(_vq, _exp, _msg, ...)				\
107     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,	\
108 	##__VA_ARGS__))
109 
110 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)			\
111     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,		\
112 	"invalid ring index: %d, max: %d", (_idx),		\
113 	(_vq)->vq_nentries)
114 
115 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)				\
116     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==			\
117 	VQ_RING_DESC_CHAIN_END,	"full ring terminated "		\
118 	"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
119 
120 static int	virtqueue_init_indirect(struct virtqueue *vq, int);
121 static void	virtqueue_free_indirect(struct virtqueue *vq);
122 static void	virtqueue_init_indirect_list(struct virtqueue *,
123 		    struct vring_desc *);
124 
125 static void	vq_ring_init(struct virtqueue *);
126 static void	vq_ring_update_avail(struct virtqueue *, uint16_t);
127 static uint16_t	vq_ring_enqueue_segments(struct virtqueue *,
128 		    struct vring_desc *, uint16_t, struct sglist *, int, int);
129 static int	vq_ring_use_indirect(struct virtqueue *, int);
130 static void	vq_ring_enqueue_indirect(struct virtqueue *, void *,
131 		    struct sglist *, int, int);
132 static int	vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
133 static int	vq_ring_must_notify_host(struct virtqueue *);
134 static void	vq_ring_notify_host(struct virtqueue *);
135 static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
136 
137 uint64_t
138 virtqueue_filter_features(uint64_t features)
139 {
140 	uint64_t mask;
141 
142 	mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
143 	mask |= VIRTIO_RING_F_INDIRECT_DESC;
144 	mask |= VIRTIO_RING_F_EVENT_IDX;
145 	mask |= VIRTIO_F_VERSION_1;
146 
147 	return (features & mask);
148 }
149 
150 int
151 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
152     vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
153 {
154 	struct virtqueue *vq;
155 	int error;
156 
157 	*vqp = NULL;
158 	error = 0;
159 
160 	if (size == 0) {
161 		device_printf(dev,
162 		    "virtqueue %d (%s) does not exist (size is zero)\n",
163 		    queue, info->vqai_name);
164 		return (ENODEV);
165 	} else if (!powerof2(size)) {
166 		device_printf(dev,
167 		    "virtqueue %d (%s) size is not a power of 2: %d\n",
168 		    queue, info->vqai_name, size);
169 		return (ENXIO);
170 	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
171 		device_printf(dev, "virtqueue %d (%s) requested too many "
172 		    "indirect descriptors: %d, max %d\n",
173 		    queue, info->vqai_name, info->vqai_maxindirsz,
174 		    VIRTIO_MAX_INDIRECT);
175 		return (EINVAL);
176 	}
177 
178 	vq = malloc(sizeof(struct virtqueue) +
179 	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
180 	if (vq == NULL) {
181 		device_printf(dev, "cannot allocate virtqueue\n");
182 		return (ENOMEM);
183 	}
184 
185 	vq->vq_dev = dev;
186 	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
187 	vq->vq_queue_index = queue;
188 	vq->vq_alignment = align;
189 	vq->vq_nentries = size;
190 	vq->vq_free_cnt = size;
191 	vq->vq_intrhand = info->vqai_intr;
192 	vq->vq_intrhand_arg = info->vqai_intr_arg;
193 
194 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
195 		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
196 
197 	if (info->vqai_maxindirsz > 1) {
198 		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
199 		if (error)
200 			goto fail;
201 	}
202 
203 	vq->vq_ring_size = round_page(vring_size(size, align));
204 	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
205 	    M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
206 	if (vq->vq_ring_mem == NULL) {
207 		device_printf(dev,
208 		    "cannot allocate memory for virtqueue ring\n");
209 		error = ENOMEM;
210 		goto fail;
211 	}
212 
213 	vq_ring_init(vq);
214 	virtqueue_disable_intr(vq);
215 
216 	*vqp = vq;
217 
218 fail:
219 	if (error)
220 		virtqueue_free(vq);
221 
222 	return (error);
223 }
224 
225 static int
226 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
227 {
228 	device_t dev;
229 	struct vq_desc_extra *dxp;
230 	int i, size;
231 
232 	dev = vq->vq_dev;
233 
234 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
235 		/*
236 		 * Indirect descriptors requested by the driver but not
237 		 * negotiated. Return zero to keep the initialization
238 		 * going: we'll run fine without.
239 		 */
240 		if (bootverbose)
241 			device_printf(dev, "virtqueue %d (%s) requested "
242 			    "indirect descriptors but not negotiated\n",
243 			    vq->vq_queue_index, vq->vq_name);
244 		return (0);
245 	}
246 
247 	size = indirect_size * sizeof(struct vring_desc);
248 	vq->vq_max_indirect_size = indirect_size;
249 	vq->vq_indirect_mem_size = size;
250 	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
251 
252 	for (i = 0; i < vq->vq_nentries; i++) {
253 		dxp = &vq->vq_descx[i];
254 
255 		dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
256 		if (dxp->indirect == NULL) {
257 			device_printf(dev, "cannot allocate indirect list\n");
258 			return (ENOMEM);
259 		}
260 
261 		dxp->indirect_paddr = vtophys(dxp->indirect);
262 		virtqueue_init_indirect_list(vq, dxp->indirect);
263 	}
264 
265 	return (0);
266 }
267 
268 static void
269 virtqueue_free_indirect(struct virtqueue *vq)
270 {
271 	struct vq_desc_extra *dxp;
272 	int i;
273 
274 	for (i = 0; i < vq->vq_nentries; i++) {
275 		dxp = &vq->vq_descx[i];
276 
277 		if (dxp->indirect == NULL)
278 			break;
279 
280 		free(dxp->indirect, M_DEVBUF);
281 		dxp->indirect = NULL;
282 		dxp->indirect_paddr = 0;
283 	}
284 
285 	vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
286 	vq->vq_indirect_mem_size = 0;
287 }
288 
289 static void
290 virtqueue_init_indirect_list(struct virtqueue *vq,
291     struct vring_desc *indirect)
292 {
293 	int i;
294 
295 	bzero(indirect, vq->vq_indirect_mem_size);
296 
297 	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
298 		indirect[i].next = i + 1;
299 	indirect[i].next = VQ_RING_DESC_CHAIN_END;
300 }
301 
302 int
303 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
304 {
305 	struct vq_desc_extra *dxp;
306 	int i;
307 
308 	if (vq->vq_nentries != size) {
309 		device_printf(vq->vq_dev,
310 		    "%s: '%s' changed size; old=%hu, new=%hu\n",
311 		    __func__, vq->vq_name, vq->vq_nentries, size);
312 		return (EINVAL);
313 	}
314 
315 	/* Warn if the virtqueue was not properly cleaned up. */
316 	if (vq->vq_free_cnt != vq->vq_nentries) {
317 		device_printf(vq->vq_dev,
318 		    "%s: warning '%s' virtqueue not empty, "
319 		    "leaking %d entries\n", __func__, vq->vq_name,
320 		    vq->vq_nentries - vq->vq_free_cnt);
321 	}
322 
323 	vq->vq_desc_head_idx = 0;
324 	vq->vq_used_cons_idx = 0;
325 	vq->vq_queued_cnt = 0;
326 	vq->vq_free_cnt = vq->vq_nentries;
327 
328 	/* To be safe, reset all our allocated memory. */
329 	bzero(vq->vq_ring_mem, vq->vq_ring_size);
330 	for (i = 0; i < vq->vq_nentries; i++) {
331 		dxp = &vq->vq_descx[i];
332 		dxp->cookie = NULL;
333 		dxp->ndescs = 0;
334 		if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
335 			virtqueue_init_indirect_list(vq, dxp->indirect);
336 	}
337 
338 	vq_ring_init(vq);
339 	virtqueue_disable_intr(vq);
340 
341 	return (0);
342 }
343 
344 void
345 virtqueue_free(struct virtqueue *vq)
346 {
347 
348 	if (vq->vq_free_cnt != vq->vq_nentries) {
349 		device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
350 		    "leaking %d entries\n", vq->vq_name,
351 		    vq->vq_nentries - vq->vq_free_cnt);
352 	}
353 
354 	if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
355 		virtqueue_free_indirect(vq);
356 
357 	if (vq->vq_ring_mem != NULL) {
358 		contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
359 		vq->vq_ring_size = 0;
360 		vq->vq_ring_mem = NULL;
361 	}
362 
363 	free(vq, M_DEVBUF);
364 }
365 
366 vm_paddr_t
367 virtqueue_paddr(struct virtqueue *vq)
368 {
369 
370 	return (vtophys(vq->vq_ring_mem));
371 }
372 
373 vm_paddr_t
374 virtqueue_desc_paddr(struct virtqueue *vq)
375 {
376 
377 	return (vtophys(vq->vq_ring.desc));
378 }
379 
380 vm_paddr_t
381 virtqueue_avail_paddr(struct virtqueue *vq)
382 {
383 
384 	return (vtophys(vq->vq_ring.avail));
385 }
386 
387 vm_paddr_t
388 virtqueue_used_paddr(struct virtqueue *vq)
389 {
390 
391 	return (vtophys(vq->vq_ring.used));
392 }
393 
394 uint16_t
395 virtqueue_index(struct virtqueue *vq)
396 {
397 	return (vq->vq_queue_index);
398 }
399 
400 int
401 virtqueue_size(struct virtqueue *vq)
402 {
403 
404 	return (vq->vq_nentries);
405 }
406 
407 int
408 virtqueue_nfree(struct virtqueue *vq)
409 {
410 
411 	return (vq->vq_free_cnt);
412 }
413 
414 int
415 virtqueue_empty(struct virtqueue *vq)
416 {
417 
418 	return (vq->vq_nentries == vq->vq_free_cnt);
419 }
420 
421 int
422 virtqueue_full(struct virtqueue *vq)
423 {
424 
425 	return (vq->vq_free_cnt == 0);
426 }
427 
428 void
429 virtqueue_notify(struct virtqueue *vq)
430 {
431 
432 	/* Ensure updated avail->idx is visible to host. */
433 	mb();
434 
435 	if (vq_ring_must_notify_host(vq))
436 		vq_ring_notify_host(vq);
437 	vq->vq_queued_cnt = 0;
438 }
439 
440 int
441 virtqueue_nused(struct virtqueue *vq)
442 {
443 	uint16_t used_idx, nused;
444 
445 	used_idx = vq->vq_ring.used->idx;
446 
447 	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
448 	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
449 
450 	return (nused);
451 }
452 
453 int
454 virtqueue_intr_filter(struct virtqueue *vq)
455 {
456 
457 	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
458 		return (0);
459 
460 	virtqueue_disable_intr(vq);
461 
462 	return (1);
463 }
464 
465 void
466 virtqueue_intr(struct virtqueue *vq)
467 {
468 
469 	vq->vq_intrhand(vq->vq_intrhand_arg);
470 }
471 
472 int
473 virtqueue_enable_intr(struct virtqueue *vq)
474 {
475 
476 	return (vq_ring_enable_interrupt(vq, 0));
477 }
478 
479 int
480 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
481 {
482 	uint16_t ndesc, avail_idx;
483 
484 	avail_idx = vq->vq_ring.avail->idx;
485 	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
486 
487 	switch (hint) {
488 	case VQ_POSTPONE_SHORT:
489 		ndesc = ndesc / 4;
490 		break;
491 	case VQ_POSTPONE_LONG:
492 		ndesc = (ndesc * 3) / 4;
493 		break;
494 	case VQ_POSTPONE_EMPTIED:
495 		break;
496 	}
497 
498 	return (vq_ring_enable_interrupt(vq, ndesc));
499 }
500 
501 /*
502  * Note this is only considered a hint to the host.
503  */
504 void
505 virtqueue_disable_intr(struct virtqueue *vq)
506 {
507 
508 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
509 		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
510 		    vq->vq_nentries - 1;
511 	} else
512 		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
513 }
514 
515 int
516 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
517     int readable, int writable)
518 {
519 	struct vq_desc_extra *dxp;
520 	int needed;
521 	uint16_t head_idx, idx;
522 
523 	needed = readable + writable;
524 
525 	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
526 	VQASSERT(vq, needed == sg->sg_nseg,
527 	    "segment count mismatch, %d, %d", needed, sg->sg_nseg);
528 	VQASSERT(vq,
529 	    needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
530 	    "too many segments to enqueue: %d, %d/%d", needed,
531 	    vq->vq_nentries, vq->vq_max_indirect_size);
532 
533 	if (needed < 1)
534 		return (EINVAL);
535 	if (vq->vq_free_cnt == 0)
536 		return (ENOSPC);
537 
538 	if (vq_ring_use_indirect(vq, needed)) {
539 		vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
540 		return (0);
541 	} else if (vq->vq_free_cnt < needed)
542 		return (EMSGSIZE);
543 
544 	head_idx = vq->vq_desc_head_idx;
545 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
546 	dxp = &vq->vq_descx[head_idx];
547 
548 	VQASSERT(vq, dxp->cookie == NULL,
549 	    "cookie already exists for index %d", head_idx);
550 	dxp->cookie = cookie;
551 	dxp->ndescs = needed;
552 
553 	idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
554 	    sg, readable, writable);
555 
556 	vq->vq_desc_head_idx = idx;
557 	vq->vq_free_cnt -= needed;
558 	if (vq->vq_free_cnt == 0)
559 		VQ_RING_ASSERT_CHAIN_TERM(vq);
560 	else
561 		VQ_RING_ASSERT_VALID_IDX(vq, idx);
562 
563 	vq_ring_update_avail(vq, head_idx);
564 
565 	return (0);
566 }
567 
568 void *
569 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
570 {
571 	struct vring_used_elem *uep;
572 	void *cookie;
573 	uint16_t used_idx, desc_idx;
574 
575 	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
576 		return (NULL);
577 
578 	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
579 	uep = &vq->vq_ring.used->ring[used_idx];
580 
581 	rmb();
582 	desc_idx = (uint16_t) uep->id;
583 	if (len != NULL)
584 		*len = uep->len;
585 
586 	vq_ring_free_chain(vq, desc_idx);
587 
588 	cookie = vq->vq_descx[desc_idx].cookie;
589 	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
590 	vq->vq_descx[desc_idx].cookie = NULL;
591 
592 	return (cookie);
593 }
594 
595 void *
596 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
597 {
598 	void *cookie;
599 
600 	VIRTIO_BUS_POLL(vq->vq_dev);
601 	while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
602 		cpu_spinwait();
603 		VIRTIO_BUS_POLL(vq->vq_dev);
604 	}
605 
606 	return (cookie);
607 }
608 
609 void *
610 virtqueue_drain(struct virtqueue *vq, int *last)
611 {
612 	void *cookie;
613 	int idx;
614 
615 	cookie = NULL;
616 	idx = *last;
617 
618 	while (idx < vq->vq_nentries && cookie == NULL) {
619 		if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
620 			vq->vq_descx[idx].cookie = NULL;
621 			/* Free chain to keep free count consistent. */
622 			vq_ring_free_chain(vq, idx);
623 		}
624 		idx++;
625 	}
626 
627 	*last = idx;
628 
629 	return (cookie);
630 }
631 
632 void
633 virtqueue_dump(struct virtqueue *vq)
634 {
635 
636 	if (vq == NULL)
637 		return;
638 
639 	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
640 	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
641 	    "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
642 	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
643 	    virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
644 	    vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
645 	    vq->vq_ring.used->idx,
646 		vring_used_event(&vq->vq_ring),
647 	    vq->vq_ring.avail->flags,
648 	    vq->vq_ring.used->flags);
649 }
650 
651 static void
652 vq_ring_init(struct virtqueue *vq)
653 {
654 	struct vring *vr;
655 	char *ring_mem;
656 	int i, size;
657 
658 	ring_mem = vq->vq_ring_mem;
659 	size = vq->vq_nentries;
660 	vr = &vq->vq_ring;
661 
662 	vring_init(vr, size, ring_mem, vq->vq_alignment);
663 
664 	for (i = 0; i < size - 1; i++)
665 		vr->desc[i].next = i + 1;
666 	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
667 }
668 
669 static void
670 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
671 {
672 	uint16_t avail_idx;
673 
674 	/*
675 	 * Place the head of the descriptor chain into the next slot and make
676 	 * it usable to the host. The chain is made available now rather than
677 	 * deferring to virtqueue_notify() in the hopes that if the host is
678 	 * currently running on another CPU, we can keep it processing the new
679 	 * descriptor.
680 	 */
681 	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
682 	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
683 
684 	wmb();
685 	vq->vq_ring.avail->idx++;
686 
687 	/* Keep pending count until virtqueue_notify(). */
688 	vq->vq_queued_cnt++;
689 }
690 
691 static uint16_t
692 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
693     uint16_t head_idx, struct sglist *sg, int readable, int writable)
694 {
695 	struct sglist_seg *seg;
696 	struct vring_desc *dp;
697 	int i, needed;
698 	uint16_t idx;
699 
700 	needed = readable + writable;
701 
702 	for (i = 0, idx = head_idx, seg = sg->sg_segs;
703 	     i < needed;
704 	     i++, idx = dp->next, seg++) {
705 		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
706 		    "premature end of free desc chain");
707 
708 		dp = &desc[idx];
709 		dp->addr = seg->ss_paddr;
710 		dp->len = seg->ss_len;
711 		dp->flags = 0;
712 
713 		if (i < needed - 1)
714 			dp->flags |= VRING_DESC_F_NEXT;
715 		if (i >= readable)
716 			dp->flags |= VRING_DESC_F_WRITE;
717 	}
718 
719 	return (idx);
720 }
721 
722 static int
723 vq_ring_use_indirect(struct virtqueue *vq, int needed)
724 {
725 
726 	if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
727 		return (0);
728 
729 	if (vq->vq_max_indirect_size < needed)
730 		return (0);
731 
732 	if (needed < 2)
733 		return (0);
734 
735 	return (1);
736 }
737 
738 static void
739 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
740     struct sglist *sg, int readable, int writable)
741 {
742 	struct vring_desc *dp;
743 	struct vq_desc_extra *dxp;
744 	int needed;
745 	uint16_t head_idx;
746 
747 	needed = readable + writable;
748 	VQASSERT(vq, needed <= vq->vq_max_indirect_size,
749 	    "enqueuing too many indirect descriptors");
750 
751 	head_idx = vq->vq_desc_head_idx;
752 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
753 	dp = &vq->vq_ring.desc[head_idx];
754 	dxp = &vq->vq_descx[head_idx];
755 
756 	VQASSERT(vq, dxp->cookie == NULL,
757 	    "cookie already exists for index %d", head_idx);
758 	dxp->cookie = cookie;
759 	dxp->ndescs = 1;
760 
761 	dp->addr = dxp->indirect_paddr;
762 	dp->len = needed * sizeof(struct vring_desc);
763 	dp->flags = VRING_DESC_F_INDIRECT;
764 
765 	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
766 	    sg, readable, writable);
767 
768 	vq->vq_desc_head_idx = dp->next;
769 	vq->vq_free_cnt--;
770 	if (vq->vq_free_cnt == 0)
771 		VQ_RING_ASSERT_CHAIN_TERM(vq);
772 	else
773 		VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
774 
775 	vq_ring_update_avail(vq, head_idx);
776 }
777 
778 static int
779 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
780 {
781 
782 	/*
783 	 * Enable interrupts, making sure we get the latest index of
784 	 * what's already been consumed.
785 	 */
786 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
787 		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
788 	else
789 		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
790 
791 	mb();
792 
793 	/*
794 	 * Enough items may have already been consumed to meet our threshold
795 	 * since we last checked. Let our caller know so it processes the new
796 	 * entries.
797 	 */
798 	if (virtqueue_nused(vq) > ndesc)
799 		return (1);
800 
801 	return (0);
802 }
803 
804 static int
805 vq_ring_must_notify_host(struct virtqueue *vq)
806 {
807 	uint16_t new_idx, prev_idx, event_idx;
808 
809 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
810 		new_idx = vq->vq_ring.avail->idx;
811 		prev_idx = new_idx - vq->vq_queued_cnt;
812 		event_idx = vring_avail_event(&vq->vq_ring);
813 
814 		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
815 	}
816 
817 	return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
818 }
819 
820 static void
821 vq_ring_notify_host(struct virtqueue *vq)
822 {
823 
824 	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
825 }
826 
827 static void
828 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
829 {
830 	struct vring_desc *dp;
831 	struct vq_desc_extra *dxp;
832 
833 	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
834 	dp = &vq->vq_ring.desc[desc_idx];
835 	dxp = &vq->vq_descx[desc_idx];
836 
837 	if (vq->vq_free_cnt == 0)
838 		VQ_RING_ASSERT_CHAIN_TERM(vq);
839 
840 	vq->vq_free_cnt += dxp->ndescs;
841 	dxp->ndescs--;
842 
843 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
844 		while (dp->flags & VRING_DESC_F_NEXT) {
845 			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
846 			dp = &vq->vq_ring.desc[dp->next];
847 			dxp->ndescs--;
848 		}
849 	}
850 
851 	VQASSERT(vq, dxp->ndescs == 0,
852 	    "failed to free entire desc chain, remaining: %d", dxp->ndescs);
853 
854 	/*
855 	 * We must append the existing free chain, if any, to the end of
856 	 * newly freed chain. If the virtqueue was completely used, then
857 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
858 	 */
859 	dp->next = vq->vq_desc_head_idx;
860 	vq->vq_desc_head_idx = desc_idx;
861 }
862