xref: /freebsd/sys/dev/virtio/virtqueue.c (revision c1d255d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * Implements the virtqueue interface as basically described
31  * in the original VirtIO paper.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/sdt.h>
42 #include <sys/sglist.h>
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 
46 #include <machine/cpu.h>
47 #include <machine/bus.h>
48 #include <machine/atomic.h>
49 #include <machine/resource.h>
50 #include <sys/bus.h>
51 #include <sys/rman.h>
52 
53 #include <dev/virtio/virtio.h>
54 #include <dev/virtio/virtqueue.h>
55 #include <dev/virtio/virtio_ring.h>
56 
57 #include "virtio_bus_if.h"
58 
59 struct virtqueue {
60 	device_t		 vq_dev;
61 	uint16_t		 vq_queue_index;
62 	uint16_t		 vq_nentries;
63 	uint32_t		 vq_flags;
64 #define	VIRTQUEUE_FLAG_MODERN	 0x0001
65 #define	VIRTQUEUE_FLAG_INDIRECT	 0x0002
66 #define	VIRTQUEUE_FLAG_EVENT_IDX 0x0004
67 
68 	int			 vq_max_indirect_size;
69 	bus_size_t		 vq_notify_offset;
70 	virtqueue_intr_t	*vq_intrhand;
71 	void			*vq_intrhand_arg;
72 
73 	struct vring		 vq_ring;
74 	uint16_t		 vq_free_cnt;
75 	uint16_t		 vq_queued_cnt;
76 	/*
77 	 * Head of the free chain in the descriptor table. If
78 	 * there are no free descriptors, this will be set to
79 	 * VQ_RING_DESC_CHAIN_END.
80 	 */
81 	uint16_t		 vq_desc_head_idx;
82 	/*
83 	 * Last consumed descriptor in the used table,
84 	 * trails vq_ring.used->idx.
85 	 */
86 	uint16_t		 vq_used_cons_idx;
87 
88 	void			*vq_ring_mem;
89 	int			 vq_indirect_mem_size;
90 	int			 vq_alignment;
91 	int			 vq_ring_size;
92 	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
93 
94 	struct vq_desc_extra {
95 		void		  *cookie;
96 		struct vring_desc *indirect;
97 		vm_paddr_t	   indirect_paddr;
98 		uint16_t	   ndescs;
99 	} vq_descx[0];
100 };
101 
102 /*
103  * The maximum virtqueue size is 2^15. Use that value as the end of
104  * descriptor chain terminator since it will never be a valid index
105  * in the descriptor table. This is used to verify we are correctly
106  * handling vq_free_cnt.
107  */
108 #define VQ_RING_DESC_CHAIN_END 32768
109 
110 #define VQASSERT(_vq, _exp, _msg, ...)				\
111     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,	\
112 	##__VA_ARGS__))
113 
114 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)			\
115     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,		\
116 	"invalid ring index: %d, max: %d", (_idx),		\
117 	(_vq)->vq_nentries)
118 
119 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)				\
120     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==			\
121 	VQ_RING_DESC_CHAIN_END,	"full ring terminated "		\
122 	"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
123 
124 static int	virtqueue_init_indirect(struct virtqueue *vq, int);
125 static void	virtqueue_free_indirect(struct virtqueue *vq);
126 static void	virtqueue_init_indirect_list(struct virtqueue *,
127 		    struct vring_desc *);
128 
129 static void	vq_ring_init(struct virtqueue *);
130 static void	vq_ring_update_avail(struct virtqueue *, uint16_t);
131 static uint16_t	vq_ring_enqueue_segments(struct virtqueue *,
132 		    struct vring_desc *, uint16_t, struct sglist *, int, int);
133 static int	vq_ring_use_indirect(struct virtqueue *, int);
134 static void	vq_ring_enqueue_indirect(struct virtqueue *, void *,
135 		    struct sglist *, int, int);
136 static int	vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
137 static int	vq_ring_must_notify_host(struct virtqueue *);
138 static void	vq_ring_notify_host(struct virtqueue *);
139 static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
140 
141 SDT_PROVIDER_DEFINE(virtqueue);
142 SDT_PROBE_DEFINE6(virtqueue, , enqueue_segments, entry, "struct virtqueue *",
143     "struct vring_desc *", "uint16_t", "struct sglist *", "int", "int");
144 SDT_PROBE_DEFINE1(virtqueue, , enqueue_segments, return, "uint16_t");
145 
146 #define vq_modern(_vq) 		(((_vq)->vq_flags & VIRTQUEUE_FLAG_MODERN) != 0)
147 #define vq_htog16(_vq, _val) 	virtio_htog16(vq_modern(_vq), _val)
148 #define vq_htog32(_vq, _val) 	virtio_htog32(vq_modern(_vq), _val)
149 #define vq_htog64(_vq, _val) 	virtio_htog64(vq_modern(_vq), _val)
150 #define vq_gtoh16(_vq, _val) 	virtio_gtoh16(vq_modern(_vq), _val)
151 #define vq_gtoh32(_vq, _val) 	virtio_gtoh32(vq_modern(_vq), _val)
152 #define vq_gtoh64(_vq, _val) 	virtio_gtoh64(vq_modern(_vq), _val)
153 
154 int
155 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
156     bus_size_t notify_offset, int align, vm_paddr_t highaddr,
157     struct vq_alloc_info *info, struct virtqueue **vqp)
158 {
159 	struct virtqueue *vq;
160 	int error;
161 
162 	*vqp = NULL;
163 	error = 0;
164 
165 	if (size == 0) {
166 		device_printf(dev,
167 		    "virtqueue %d (%s) does not exist (size is zero)\n",
168 		    queue, info->vqai_name);
169 		return (ENODEV);
170 	} else if (!powerof2(size)) {
171 		device_printf(dev,
172 		    "virtqueue %d (%s) size is not a power of 2: %d\n",
173 		    queue, info->vqai_name, size);
174 		return (ENXIO);
175 	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
176 		device_printf(dev, "virtqueue %d (%s) requested too many "
177 		    "indirect descriptors: %d, max %d\n",
178 		    queue, info->vqai_name, info->vqai_maxindirsz,
179 		    VIRTIO_MAX_INDIRECT);
180 		return (EINVAL);
181 	}
182 
183 	vq = malloc(sizeof(struct virtqueue) +
184 	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
185 	if (vq == NULL) {
186 		device_printf(dev, "cannot allocate virtqueue\n");
187 		return (ENOMEM);
188 	}
189 
190 	vq->vq_dev = dev;
191 	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
192 	vq->vq_queue_index = queue;
193 	vq->vq_notify_offset = notify_offset;
194 	vq->vq_alignment = align;
195 	vq->vq_nentries = size;
196 	vq->vq_free_cnt = size;
197 	vq->vq_intrhand = info->vqai_intr;
198 	vq->vq_intrhand_arg = info->vqai_intr_arg;
199 
200 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_F_VERSION_1) != 0)
201 		vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
202 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
203 		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
204 
205 	if (info->vqai_maxindirsz > 1) {
206 		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
207 		if (error)
208 			goto fail;
209 	}
210 
211 	vq->vq_ring_size = round_page(vring_size(size, align));
212 	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
213 	    M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
214 	if (vq->vq_ring_mem == NULL) {
215 		device_printf(dev,
216 		    "cannot allocate memory for virtqueue ring\n");
217 		error = ENOMEM;
218 		goto fail;
219 	}
220 
221 	vq_ring_init(vq);
222 	virtqueue_disable_intr(vq);
223 
224 	*vqp = vq;
225 
226 fail:
227 	if (error)
228 		virtqueue_free(vq);
229 
230 	return (error);
231 }
232 
233 static int
234 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
235 {
236 	device_t dev;
237 	struct vq_desc_extra *dxp;
238 	int i, size;
239 
240 	dev = vq->vq_dev;
241 
242 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
243 		/*
244 		 * Indirect descriptors requested by the driver but not
245 		 * negotiated. Return zero to keep the initialization
246 		 * going: we'll run fine without.
247 		 */
248 		if (bootverbose)
249 			device_printf(dev, "virtqueue %d (%s) requested "
250 			    "indirect descriptors but not negotiated\n",
251 			    vq->vq_queue_index, vq->vq_name);
252 		return (0);
253 	}
254 
255 	size = indirect_size * sizeof(struct vring_desc);
256 	vq->vq_max_indirect_size = indirect_size;
257 	vq->vq_indirect_mem_size = size;
258 	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
259 
260 	for (i = 0; i < vq->vq_nentries; i++) {
261 		dxp = &vq->vq_descx[i];
262 
263 		dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
264 		if (dxp->indirect == NULL) {
265 			device_printf(dev, "cannot allocate indirect list\n");
266 			return (ENOMEM);
267 		}
268 
269 		dxp->indirect_paddr = vtophys(dxp->indirect);
270 		virtqueue_init_indirect_list(vq, dxp->indirect);
271 	}
272 
273 	return (0);
274 }
275 
276 static void
277 virtqueue_free_indirect(struct virtqueue *vq)
278 {
279 	struct vq_desc_extra *dxp;
280 	int i;
281 
282 	for (i = 0; i < vq->vq_nentries; i++) {
283 		dxp = &vq->vq_descx[i];
284 
285 		if (dxp->indirect == NULL)
286 			break;
287 
288 		free(dxp->indirect, M_DEVBUF);
289 		dxp->indirect = NULL;
290 		dxp->indirect_paddr = 0;
291 	}
292 
293 	vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
294 	vq->vq_indirect_mem_size = 0;
295 }
296 
297 static void
298 virtqueue_init_indirect_list(struct virtqueue *vq,
299     struct vring_desc *indirect)
300 {
301 	int i;
302 
303 	bzero(indirect, vq->vq_indirect_mem_size);
304 
305 	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
306 		indirect[i].next = vq_gtoh16(vq, i + 1);
307 	indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
308 }
309 
310 int
311 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
312 {
313 	struct vq_desc_extra *dxp;
314 	int i;
315 
316 	if (vq->vq_nentries != size) {
317 		device_printf(vq->vq_dev,
318 		    "%s: '%s' changed size; old=%hu, new=%hu\n",
319 		    __func__, vq->vq_name, vq->vq_nentries, size);
320 		return (EINVAL);
321 	}
322 
323 	/* Warn if the virtqueue was not properly cleaned up. */
324 	if (vq->vq_free_cnt != vq->vq_nentries) {
325 		device_printf(vq->vq_dev,
326 		    "%s: warning '%s' virtqueue not empty, "
327 		    "leaking %d entries\n", __func__, vq->vq_name,
328 		    vq->vq_nentries - vq->vq_free_cnt);
329 	}
330 
331 	vq->vq_desc_head_idx = 0;
332 	vq->vq_used_cons_idx = 0;
333 	vq->vq_queued_cnt = 0;
334 	vq->vq_free_cnt = vq->vq_nentries;
335 
336 	/* To be safe, reset all our allocated memory. */
337 	bzero(vq->vq_ring_mem, vq->vq_ring_size);
338 	for (i = 0; i < vq->vq_nentries; i++) {
339 		dxp = &vq->vq_descx[i];
340 		dxp->cookie = NULL;
341 		dxp->ndescs = 0;
342 		if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
343 			virtqueue_init_indirect_list(vq, dxp->indirect);
344 	}
345 
346 	vq_ring_init(vq);
347 	virtqueue_disable_intr(vq);
348 
349 	return (0);
350 }
351 
352 void
353 virtqueue_free(struct virtqueue *vq)
354 {
355 
356 	if (vq->vq_free_cnt != vq->vq_nentries) {
357 		device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
358 		    "leaking %d entries\n", vq->vq_name,
359 		    vq->vq_nentries - vq->vq_free_cnt);
360 	}
361 
362 	if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
363 		virtqueue_free_indirect(vq);
364 
365 	if (vq->vq_ring_mem != NULL) {
366 		contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
367 		vq->vq_ring_size = 0;
368 		vq->vq_ring_mem = NULL;
369 	}
370 
371 	free(vq, M_DEVBUF);
372 }
373 
374 vm_paddr_t
375 virtqueue_paddr(struct virtqueue *vq)
376 {
377 
378 	return (vtophys(vq->vq_ring_mem));
379 }
380 
381 vm_paddr_t
382 virtqueue_desc_paddr(struct virtqueue *vq)
383 {
384 
385 	return (vtophys(vq->vq_ring.desc));
386 }
387 
388 vm_paddr_t
389 virtqueue_avail_paddr(struct virtqueue *vq)
390 {
391 
392 	return (vtophys(vq->vq_ring.avail));
393 }
394 
395 vm_paddr_t
396 virtqueue_used_paddr(struct virtqueue *vq)
397 {
398 
399 	return (vtophys(vq->vq_ring.used));
400 }
401 
402 uint16_t
403 virtqueue_index(struct virtqueue *vq)
404 {
405 
406 	return (vq->vq_queue_index);
407 }
408 
409 int
410 virtqueue_size(struct virtqueue *vq)
411 {
412 
413 	return (vq->vq_nentries);
414 }
415 
416 int
417 virtqueue_nfree(struct virtqueue *vq)
418 {
419 
420 	return (vq->vq_free_cnt);
421 }
422 
423 int
424 virtqueue_empty(struct virtqueue *vq)
425 {
426 
427 	return (vq->vq_nentries == vq->vq_free_cnt);
428 }
429 
430 int
431 virtqueue_full(struct virtqueue *vq)
432 {
433 
434 	return (vq->vq_free_cnt == 0);
435 }
436 
437 void
438 virtqueue_notify(struct virtqueue *vq)
439 {
440 
441 	/* Ensure updated avail->idx is visible to host. */
442 	mb();
443 
444 	if (vq_ring_must_notify_host(vq))
445 		vq_ring_notify_host(vq);
446 	vq->vq_queued_cnt = 0;
447 }
448 
449 int
450 virtqueue_nused(struct virtqueue *vq)
451 {
452 	uint16_t used_idx, nused;
453 
454 	used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
455 
456 	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
457 	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
458 
459 	return (nused);
460 }
461 
462 int
463 virtqueue_intr_filter(struct virtqueue *vq)
464 {
465 
466 	if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
467 		return (0);
468 
469 	virtqueue_disable_intr(vq);
470 
471 	return (1);
472 }
473 
474 void
475 virtqueue_intr(struct virtqueue *vq)
476 {
477 
478 	vq->vq_intrhand(vq->vq_intrhand_arg);
479 }
480 
481 int
482 virtqueue_enable_intr(struct virtqueue *vq)
483 {
484 
485 	return (vq_ring_enable_interrupt(vq, 0));
486 }
487 
488 int
489 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
490 {
491 	uint16_t ndesc, avail_idx;
492 
493 	avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
494 	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
495 
496 	switch (hint) {
497 	case VQ_POSTPONE_SHORT:
498 		ndesc = ndesc / 4;
499 		break;
500 	case VQ_POSTPONE_LONG:
501 		ndesc = (ndesc * 3) / 4;
502 		break;
503 	case VQ_POSTPONE_EMPTIED:
504 		break;
505 	}
506 
507 	return (vq_ring_enable_interrupt(vq, ndesc));
508 }
509 
510 /*
511  * Note this is only considered a hint to the host.
512  */
513 void
514 virtqueue_disable_intr(struct virtqueue *vq)
515 {
516 
517 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
518 		vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
519 		    vq->vq_used_cons_idx - vq->vq_nentries - 1);
520 		return;
521 	}
522 
523 	vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
524 }
525 
526 int
527 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
528     int readable, int writable)
529 {
530 	struct vq_desc_extra *dxp;
531 	int needed;
532 	uint16_t head_idx, idx;
533 
534 	needed = readable + writable;
535 
536 	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
537 	VQASSERT(vq, needed == sg->sg_nseg,
538 	    "segment count mismatch, %d, %d", needed, sg->sg_nseg);
539 	VQASSERT(vq,
540 	    needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
541 	    "too many segments to enqueue: %d, %d/%d", needed,
542 	    vq->vq_nentries, vq->vq_max_indirect_size);
543 
544 	if (needed < 1)
545 		return (EINVAL);
546 	if (vq->vq_free_cnt == 0)
547 		return (ENOSPC);
548 
549 	if (vq_ring_use_indirect(vq, needed)) {
550 		vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
551 		return (0);
552 	} else if (vq->vq_free_cnt < needed)
553 		return (EMSGSIZE);
554 
555 	head_idx = vq->vq_desc_head_idx;
556 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
557 	dxp = &vq->vq_descx[head_idx];
558 
559 	VQASSERT(vq, dxp->cookie == NULL,
560 	    "cookie already exists for index %d", head_idx);
561 	dxp->cookie = cookie;
562 	dxp->ndescs = needed;
563 
564 	idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
565 	    sg, readable, writable);
566 
567 	vq->vq_desc_head_idx = idx;
568 	vq->vq_free_cnt -= needed;
569 	if (vq->vq_free_cnt == 0)
570 		VQ_RING_ASSERT_CHAIN_TERM(vq);
571 	else
572 		VQ_RING_ASSERT_VALID_IDX(vq, idx);
573 
574 	vq_ring_update_avail(vq, head_idx);
575 
576 	return (0);
577 }
578 
579 void *
580 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
581 {
582 	struct vring_used_elem *uep;
583 	void *cookie;
584 	uint16_t used_idx, desc_idx;
585 
586 	if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
587 		return (NULL);
588 
589 	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
590 	uep = &vq->vq_ring.used->ring[used_idx];
591 
592 	rmb();
593 	desc_idx = (uint16_t) vq_htog32(vq, uep->id);
594 	if (len != NULL)
595 		*len = vq_htog32(vq, uep->len);
596 
597 	vq_ring_free_chain(vq, desc_idx);
598 
599 	cookie = vq->vq_descx[desc_idx].cookie;
600 	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
601 	vq->vq_descx[desc_idx].cookie = NULL;
602 
603 	return (cookie);
604 }
605 
606 void *
607 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
608 {
609 	void *cookie;
610 
611 	VIRTIO_BUS_POLL(vq->vq_dev);
612 	while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
613 		cpu_spinwait();
614 		VIRTIO_BUS_POLL(vq->vq_dev);
615 	}
616 
617 	return (cookie);
618 }
619 
620 void *
621 virtqueue_drain(struct virtqueue *vq, int *last)
622 {
623 	void *cookie;
624 	int idx;
625 
626 	cookie = NULL;
627 	idx = *last;
628 
629 	while (idx < vq->vq_nentries && cookie == NULL) {
630 		if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
631 			vq->vq_descx[idx].cookie = NULL;
632 			/* Free chain to keep free count consistent. */
633 			vq_ring_free_chain(vq, idx);
634 		}
635 		idx++;
636 	}
637 
638 	*last = idx;
639 
640 	return (cookie);
641 }
642 
643 void
644 virtqueue_dump(struct virtqueue *vq)
645 {
646 
647 	if (vq == NULL)
648 		return;
649 
650 	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
651 	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
652 	    "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
653 	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
654 	    vq->vq_queued_cnt, vq->vq_desc_head_idx,
655 	    vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
656 	    vq_htog16(vq, vq->vq_ring.used->idx),
657 	    vq_htog16(vq, vring_used_event(&vq->vq_ring)),
658 	    vq_htog16(vq, vq->vq_ring.avail->flags),
659 	    vq_htog16(vq, vq->vq_ring.used->flags));
660 }
661 
662 static void
663 vq_ring_init(struct virtqueue *vq)
664 {
665 	struct vring *vr;
666 	char *ring_mem;
667 	int i, size;
668 
669 	ring_mem = vq->vq_ring_mem;
670 	size = vq->vq_nentries;
671 	vr = &vq->vq_ring;
672 
673 	vring_init(vr, size, ring_mem, vq->vq_alignment);
674 
675 	for (i = 0; i < size - 1; i++)
676 		vr->desc[i].next = vq_gtoh16(vq, i + 1);
677 	vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
678 }
679 
680 static void
681 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
682 {
683 	uint16_t avail_idx, avail_ring_idx;
684 
685 	/*
686 	 * Place the head of the descriptor chain into the next slot and make
687 	 * it usable to the host. The chain is made available now rather than
688 	 * deferring to virtqueue_notify() in the hopes that if the host is
689 	 * currently running on another CPU, we can keep it processing the new
690 	 * descriptor.
691 	 */
692 	avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
693 	avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
694 	vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
695 
696 	wmb();
697 	vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
698 
699 	/* Keep pending count until virtqueue_notify(). */
700 	vq->vq_queued_cnt++;
701 }
702 
703 static uint16_t
704 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
705     uint16_t head_idx, struct sglist *sg, int readable, int writable)
706 {
707 	struct sglist_seg *seg;
708 	struct vring_desc *dp;
709 	int i, needed;
710 	uint16_t idx;
711 
712 	SDT_PROBE6(virtqueue, , enqueue_segments, entry, vq, desc, head_idx,
713 	    sg, readable, writable);
714 
715 	needed = readable + writable;
716 
717 	for (i = 0, idx = head_idx, seg = sg->sg_segs;
718 	     i < needed;
719 	     i++, idx = vq_htog16(vq, dp->next), seg++) {
720 		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
721 		    "premature end of free desc chain");
722 
723 		dp = &desc[idx];
724 		dp->addr = vq_gtoh64(vq, seg->ss_paddr);
725 		dp->len = vq_gtoh32(vq, seg->ss_len);
726 		dp->flags = 0;
727 
728 		if (i < needed - 1)
729 			dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
730 		if (i >= readable)
731 			dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
732 	}
733 
734 	SDT_PROBE1(virtqueue, , enqueue_segments, return, idx);
735 	return (idx);
736 }
737 
738 static int
739 vq_ring_use_indirect(struct virtqueue *vq, int needed)
740 {
741 
742 	if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
743 		return (0);
744 
745 	if (vq->vq_max_indirect_size < needed)
746 		return (0);
747 
748 	if (needed < 2)
749 		return (0);
750 
751 	return (1);
752 }
753 
754 static void
755 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
756     struct sglist *sg, int readable, int writable)
757 {
758 	struct vring_desc *dp;
759 	struct vq_desc_extra *dxp;
760 	int needed;
761 	uint16_t head_idx;
762 
763 	needed = readable + writable;
764 	VQASSERT(vq, needed <= vq->vq_max_indirect_size,
765 	    "enqueuing too many indirect descriptors");
766 
767 	head_idx = vq->vq_desc_head_idx;
768 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
769 	dp = &vq->vq_ring.desc[head_idx];
770 	dxp = &vq->vq_descx[head_idx];
771 
772 	VQASSERT(vq, dxp->cookie == NULL,
773 	    "cookie already exists for index %d", head_idx);
774 	dxp->cookie = cookie;
775 	dxp->ndescs = 1;
776 
777 	dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
778 	dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
779 	dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
780 
781 	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
782 	    sg, readable, writable);
783 
784 	vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
785 	vq->vq_free_cnt--;
786 	if (vq->vq_free_cnt == 0)
787 		VQ_RING_ASSERT_CHAIN_TERM(vq);
788 	else
789 		VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
790 
791 	vq_ring_update_avail(vq, head_idx);
792 }
793 
794 static int
795 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
796 {
797 
798 	/*
799 	 * Enable interrupts, making sure we get the latest index of
800 	 * what's already been consumed.
801 	 */
802 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
803 		vring_used_event(&vq->vq_ring) =
804 		    vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
805 	} else {
806 		vq->vq_ring.avail->flags &=
807 		    vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
808 	}
809 
810 	mb();
811 
812 	/*
813 	 * Enough items may have already been consumed to meet our threshold
814 	 * since we last checked. Let our caller know so it processes the new
815 	 * entries.
816 	 */
817 	if (virtqueue_nused(vq) > ndesc)
818 		return (1);
819 
820 	return (0);
821 }
822 
823 static int
824 vq_ring_must_notify_host(struct virtqueue *vq)
825 {
826 	uint16_t new_idx, prev_idx, event_idx, flags;
827 
828 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
829 		new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
830 		prev_idx = new_idx - vq->vq_queued_cnt;
831 		event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
832 
833 		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
834 	}
835 
836 	flags = vq->vq_ring.used->flags;
837 	return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
838 }
839 
840 static void
841 vq_ring_notify_host(struct virtqueue *vq)
842 {
843 
844 	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
845 	    vq->vq_notify_offset);
846 }
847 
848 static void
849 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
850 {
851 	struct vring_desc *dp;
852 	struct vq_desc_extra *dxp;
853 
854 	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
855 	dp = &vq->vq_ring.desc[desc_idx];
856 	dxp = &vq->vq_descx[desc_idx];
857 
858 	if (vq->vq_free_cnt == 0)
859 		VQ_RING_ASSERT_CHAIN_TERM(vq);
860 
861 	vq->vq_free_cnt += dxp->ndescs;
862 	dxp->ndescs--;
863 
864 	if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
865 		while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
866 			uint16_t next_idx = vq_htog16(vq, dp->next);
867 			VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
868 			dp = &vq->vq_ring.desc[next_idx];
869 			dxp->ndescs--;
870 		}
871 	}
872 
873 	VQASSERT(vq, dxp->ndescs == 0,
874 	    "failed to free entire desc chain, remaining: %d", dxp->ndescs);
875 
876 	/*
877 	 * We must append the existing free chain, if any, to the end of
878 	 * newly freed chain. If the virtqueue was completely used, then
879 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
880 	 */
881 	dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
882 	vq->vq_desc_head_idx = desc_idx;
883 }
884