1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/virtio/virtqueue.c,v 1.2 2012/04/14 05:48:04 grehan Exp $
27  */
28 
29 /*
30  * Implements the virtqueue interface as basically described
31  * in the original VirtIO paper.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/sglist.h>
39 #include <sys/serialize.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 
43 #include <machine/cpu.h>
44 #include <machine/atomic.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 
48 #include "virtio.h"
49 #include "virtqueue.h"
50 #include "virtio_ring.h"
51 
52 #include "virtio_bus_if.h"
53 
54 struct virtqueue {
55 	device_t		 vq_dev;
56 	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
57 	uint16_t		 vq_queue_index;
58 	uint16_t		 vq_nentries;
59 	uint32_t		 vq_flags;
60 
61 #define	VIRTQUEUE_FLAG_INDIRECT  0x0001
62 #define	VIRTQUEUE_FLAG_EVENT_IDX 0x0002
63 
64 	int			 vq_alignment;
65 	int			 vq_ring_size;
66 	void			*vq_ring_mem;
67 	int			 vq_max_indirect_size;
68 	int			 vq_indirect_mem_size;
69 
70 	virtqueue_intr_t	*vq_intrhand;
71 	void			*vq_intrhand_arg;
72 
73 	struct vring		 vq_ring;
74 	uint16_t		 vq_free_cnt;
75 	uint16_t		 vq_queued_cnt;
76 	/*
77 	 * Head of the free chain in the descriptor table. If
78 	 * there are no free descriptors, this will be set to
79 	 * VQ_RING_DESC_CHAIN_END.
80 	 */
81 	uint16_t		 vq_desc_head_idx;
82 	/*
83 	 * Last consumed descriptor in the used table,
84 	 * trails vq_ring.used->idx.
85 	 */
86 	uint16_t		 vq_used_cons_idx;
87 
88 	struct vq_desc_extra {
89 		void		  *cookie;
90 		struct vring_desc *indirect;
91 		vm_paddr_t         indirect_paddr;
92 		uint16_t	   ndescs;
93 	} vq_descx[0];
94 };
95 
96 /*
97  * The maximum virtqueue size is 2^15. Use that value as the end of
98  * descriptor chain terminator since it will never be a valid index
99  * in the descriptor table. This is used to verify we are correctly
100  * handling vq_free_cnt.
101  */
102 #define VQ_RING_DESC_CHAIN_END 32768
103 
104 #define VQASSERT(_vq, _exp, _msg, ...)				\
105     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,	\
106 	##__VA_ARGS__))
107 
108 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)			\
109     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,		\
110 	"invalid ring index: %d, max: %d", (_idx),		\
111 	(_vq)->vq_nentries)
112 
113 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)				\
114     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==			\
115 	VQ_RING_DESC_CHAIN_END,	"full ring terminated "		\
116 	"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
117 
118 static int	virtqueue_init_indirect(struct virtqueue *vq, int);
119 static void	virtqueue_free_indirect(struct virtqueue *vq);
120 static void	virtqueue_init_indirect_list(struct virtqueue *,
121 		    struct vring_desc *);
122 
123 static void	vq_ring_init(struct virtqueue *);
124 static void	vq_ring_update_avail(struct virtqueue *, uint16_t);
125 static uint16_t	vq_ring_enqueue_segments(struct virtqueue *,
126 		    struct vring_desc *, uint16_t, struct sglist *, int, int);
127 static int	vq_ring_use_indirect(struct virtqueue *, int);
128 static void	vq_ring_enqueue_indirect(struct virtqueue *, void *,
129 		    struct sglist *, int, int);
130 static int	vq_ring_must_notify_host(struct virtqueue *);
131 static void	vq_ring_notify_host(struct virtqueue *);
132 static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
133 
134 uint64_t
135 virtqueue_filter_features(uint64_t features)
136 {
137 	uint64_t mask;
138 
139 	mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
140 	mask |= VIRTIO_RING_F_INDIRECT_DESC;
141 	mask |= VIRTIO_RING_F_EVENT_IDX;
142 
143 	return (features & mask);
144 }
145 
146 int
147 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
148     vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
149 {
150 	struct virtqueue *vq;
151 	int error;
152 
153 	*vqp = NULL;
154 	error = 0;
155 
156 	if (size == 0) {
157 		device_printf(dev,
158 		    "virtqueue %d (%s) does not exist (size is zero)\n",
159 		    queue, info->vqai_name);
160 		return (ENODEV);
161 	} else if (!powerof2(size)) {
162 		device_printf(dev,
163 		    "virtqueue %d (%s) size is not a power of 2: %d\n",
164 		    queue, info->vqai_name, size);
165 		return (ENXIO);
166 	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
167 		device_printf(dev, "virtqueue %d (%s) requested too many "
168 		    "indirect descriptors: %d, max %d\n",
169 		    queue, info->vqai_name, info->vqai_maxindirsz,
170 		    VIRTIO_MAX_INDIRECT);
171 		return (EINVAL);
172 	}
173 
174 	vq = kmalloc(sizeof(struct virtqueue) +
175 	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_INTWAIT | M_ZERO);
176 	if (vq == NULL) {
177 		device_printf(dev, "cannot allocate virtqueue\n");
178 		return (ENOMEM);
179 	}
180 
181 	vq->vq_dev = dev;
182 	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
183 	vq->vq_queue_index = queue;
184 	vq->vq_alignment = align;
185 	vq->vq_nentries = size;
186 	vq->vq_free_cnt = size;
187 	vq->vq_intrhand = info->vqai_intr;
188 	vq->vq_intrhand_arg = info->vqai_intr_arg;
189 
190 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
191 		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
192 
193 	if (info->vqai_maxindirsz > 1) {
194 		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
195 		if (error)
196 			goto fail;
197 	}
198 
199 	vq->vq_ring_size = round_page(vring_size(size, align));
200 	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
201 	    M_WAITOK | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
202 	if (vq->vq_ring_mem == NULL) {
203 		device_printf(dev,
204 		    "cannot allocate memory for virtqueue ring\n");
205 		error = ENOMEM;
206 		goto fail;
207 	}
208 
209 	vq_ring_init(vq);
210 	virtqueue_disable_intr(vq);
211 
212 	*vqp = vq;
213 
214 fail:
215 	if (error)
216 		virtqueue_free(vq);
217 
218 	return (error);
219 }
220 
221 static int
222 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
223 {
224 	device_t dev;
225 	struct vq_desc_extra *dxp;
226 	int i, size;
227 
228 	dev = vq->vq_dev;
229 
230 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
231 		/*
232 		 * Indirect descriptors requested by the driver but not
233 		 * negotiated. Return zero to keep the initialization
234 		 * going: we'll run fine without.
235 		 */
236 		if (bootverbose)
237 			device_printf(dev, "virtqueue %d (%s) requested "
238 			    "indirect descriptors but not negotiated\n",
239 			    vq->vq_queue_index, vq->vq_name);
240 		return (0);
241 	}
242 
243 	size = indirect_size * sizeof(struct vring_desc);
244 	vq->vq_max_indirect_size = indirect_size;
245 	vq->vq_indirect_mem_size = size;
246 	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
247 
248 	for (i = 0; i < vq->vq_nentries; i++) {
249 		dxp = &vq->vq_descx[i];
250 
251 		dxp->indirect = contigmalloc(size, M_DEVBUF, M_WAITOK,
252 		    0, BUS_SPACE_MAXADDR, 16, 0);
253 		if (dxp->indirect == NULL) {
254 			device_printf(dev, "cannot allocate indirect list\n");
255 			return (ENOMEM);
256 		}
257 
258 		dxp->indirect_paddr = vtophys(dxp->indirect);
259 		virtqueue_init_indirect_list(vq, dxp->indirect);
260 	}
261 
262 	return (0);
263 }
264 
265 static void
266 virtqueue_free_indirect(struct virtqueue *vq)
267 {
268 	struct vq_desc_extra *dxp;
269 	int i;
270 
271 	for (i = 0; i < vq->vq_nentries; i++) {
272 		dxp = &vq->vq_descx[i];
273 
274 		if (dxp->indirect == NULL)
275 			break;
276 
277 		contigfree(dxp->indirect, vq->vq_indirect_mem_size, M_DEVBUF);
278 		dxp->indirect = NULL;
279 		dxp->indirect_paddr = 0;
280 	}
281 
282 	vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
283 	vq->vq_indirect_mem_size = 0;
284 }
285 
286 static void
287 virtqueue_init_indirect_list(struct virtqueue *vq,
288     struct vring_desc *indirect)
289 {
290 	int i;
291 
292 	bzero(indirect, vq->vq_indirect_mem_size);
293 
294 	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
295 		indirect[i].next = i + 1;
296 	indirect[i].next = VQ_RING_DESC_CHAIN_END;
297 }
298 
299 int
300 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
301 {
302 	struct vq_desc_extra *dxp;
303 	int i;
304 
305 	if (vq->vq_nentries != size) {
306 		device_printf(vq->vq_dev,
307 		    "%s: '%s' changed size; old=%hu, new=%hu\n",
308 		    __func__, vq->vq_name, vq->vq_nentries, size);
309 		return (EINVAL);
310 	}
311 
312 	/* Warn if the virtqueue was not properly cleaned up. */
313 	if (vq->vq_free_cnt != vq->vq_nentries) {
314 		device_printf(vq->vq_dev,
315 		    "%s: warning, '%s' virtqueue not empty, "
316 		    "leaking %d entries\n", __func__, vq->vq_name,
317 		    vq->vq_nentries - vq->vq_free_cnt);
318 	}
319 
320 	vq->vq_desc_head_idx = 0;
321 	vq->vq_used_cons_idx = 0;
322 	vq->vq_queued_cnt = 0;
323 	vq->vq_free_cnt = vq->vq_nentries;
324 
325 	/* To be safe, reset all our allocated memory. */
326 	bzero(vq->vq_ring_mem, vq->vq_ring_size);
327 	for (i = 0; i < vq->vq_nentries; i++) {
328 		dxp = &vq->vq_descx[i];
329 		dxp->cookie = NULL;
330 		dxp->ndescs = 0;
331 		if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
332 			virtqueue_init_indirect_list(vq, dxp->indirect);
333 	}
334 
335 	vq_ring_init(vq);
336 	virtqueue_disable_intr(vq);
337 
338 	return (0);
339 }
340 
341 void
342 virtqueue_free(struct virtqueue *vq)
343 {
344 
345 	if (vq->vq_free_cnt != vq->vq_nentries) {
346 		device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
347 		    "leaking %d entries\n", vq->vq_name,
348 		    vq->vq_nentries - vq->vq_free_cnt);
349 	}
350 
351 	if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
352 		virtqueue_free_indirect(vq);
353 
354 	if (vq->vq_ring_mem != NULL) {
355 		contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
356 		vq->vq_ring_size = 0;
357 		vq->vq_ring_mem = NULL;
358 	}
359 
360 	kfree(vq, M_DEVBUF);
361 }
362 
363 vm_paddr_t
364 virtqueue_paddr(struct virtqueue *vq)
365 {
366 	return (vtophys(vq->vq_ring_mem));
367 }
368 
369 int
370 virtqueue_size(struct virtqueue *vq)
371 {
372 	return (vq->vq_nentries);
373 }
374 
375 int
376 virtqueue_empty(struct virtqueue *vq)
377 {
378 
379 	return (vq->vq_nentries == vq->vq_free_cnt);
380 }
381 
382 int
383 virtqueue_full(struct virtqueue *vq)
384 {
385 
386 	return (vq->vq_free_cnt == 0);
387 }
388 
389 void
390 virtqueue_notify(struct virtqueue *vq, lwkt_serialize_t interlock)
391 {
392 	/* Ensure updated avail->idx is visible to host. */
393 	cpu_mfence();
394 
395 	if (vq_ring_must_notify_host(vq)) {
396 		if (interlock != NULL)
397 			lwkt_serialize_exit(interlock);
398 		vq_ring_notify_host(vq);
399 		if (interlock != NULL)
400 			lwkt_serialize_enter(interlock);
401 	}
402 	vq->vq_queued_cnt = 0;
403 }
404 
405 int
406 virtqueue_nused(struct virtqueue *vq)
407 {
408 	uint16_t used_idx, nused;
409 
410 	used_idx = vq->vq_ring.used->idx;
411 	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
412 	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
413 
414 	return (nused);
415 }
416 
417 int
418 virtqueue_intr(struct virtqueue *vq)
419 {
420 
421 	if (vq->vq_intrhand == NULL ||
422 	    vq->vq_used_cons_idx == vq->vq_ring.used->idx)
423 		return (0);
424 
425 	vq->vq_intrhand(vq->vq_intrhand_arg);
426 
427 	return (1);
428 }
429 
430 /*
431  * Enable interrupts on a given virtqueue. Returns 1 if there are
432  * additional entries to process on the virtqueue after we return.
433  */
434 int
435 virtqueue_enable_intr(struct virtqueue *vq)
436 {
437 	/*
438 	 * Enable interrupts, making sure we get the latest
439 	 * index of what's already been consumed.
440 	 */
441 	vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
442 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
443 		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx;
444 	} else {
445 	       vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
446 	}
447 
448 	cpu_mfence();
449 
450 	/*
451 	 * Additional items may have been consumed in the time between
452 	 * since we last checked and enabled interrupts above. Let our
453 	 * caller know so it processes the new entries.
454 	 */
455 	if (vq->vq_used_cons_idx != vq->vq_ring.used->idx)
456 		return (1);
457 
458 	return (0);
459 }
460 
461 int
462 virtqueue_postpone_intr(struct virtqueue *vq)
463 {
464 	uint16_t ndesc;
465 
466 	/*
467 	 * Postpone until at least half of the available descriptors
468 	 * have been consumed.
469 	 *
470 	 * XXX Adaptive factor? (Linux uses 3/4)
471 	 */
472 	ndesc = (uint16_t)(vq->vq_ring.avail->idx - vq->vq_used_cons_idx) / 2;
473 
474 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
475 		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
476 	else
477 		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
478 
479 	cpu_mfence();
480 
481 	/*
482 	 * Enough items may have already been consumed to meet our
483 	 * threshold since we last checked. Let our caller know so
484 	 * it processes the new entries.
485 	 */
486 	if (virtqueue_nused(vq) > ndesc)
487 		return (1);
488 
489 	return (0);
490 }
491 
492 void
493 virtqueue_disable_intr(struct virtqueue *vq)
494 {
495 	/*
496 	 * Note this is only considered a hint to the host.
497 	 */
498 	if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) == 0)
499 		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
500 }
501 
502 int
503 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
504     int readable, int writable)
505 {
506 	struct vq_desc_extra *dxp;
507 	int needed;
508 	uint16_t head_idx, idx;
509 
510 	needed = readable + writable;
511 
512 	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
513 	VQASSERT(vq, needed == sg->sg_nseg,
514 	    "segment count mismatch, %d, %d", needed, sg->sg_nseg);
515 	VQASSERT(vq,
516 	    needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
517 	    "too many segments to enqueue: %d, %d/%d", needed,
518 	    vq->vq_nentries, vq->vq_max_indirect_size);
519 
520 	if (needed < 1)
521 		return (EINVAL);
522 	if (vq->vq_free_cnt == 0)
523 		return (ENOSPC);
524 
525 	if (vq_ring_use_indirect(vq, needed)) {
526 		vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
527 		return (0);
528 	} else if (vq->vq_free_cnt < needed)
529 		return (EMSGSIZE);
530 
531 	head_idx = vq->vq_desc_head_idx;
532 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
533 	dxp = &vq->vq_descx[head_idx];
534 
535 	VQASSERT(vq, dxp->cookie == NULL,
536 	    "cookie already exists for index %d", head_idx);
537 	dxp->cookie = cookie;
538 	dxp->ndescs = needed;
539 
540 	idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
541 	    sg, readable, writable);
542 
543 	vq->vq_desc_head_idx = idx;
544 	vq->vq_free_cnt -= needed;
545 	if (vq->vq_free_cnt == 0)
546 		VQ_RING_ASSERT_CHAIN_TERM(vq);
547 	else
548 		VQ_RING_ASSERT_VALID_IDX(vq, idx);
549 
550 	vq_ring_update_avail(vq, head_idx);
551 
552 	return (0);
553 }
554 
555 void *
556 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
557 {
558 	struct vring_used_elem *uep;
559 	void *cookie;
560 	uint16_t used_idx, desc_idx;
561 
562 	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
563 		return (NULL);
564 
565 	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
566 	uep = &vq->vq_ring.used->ring[used_idx];
567 
568 	cpu_mfence();
569 	desc_idx = (uint16_t) uep->id;
570 	if (len != NULL)
571 		*len = uep->len;
572 
573 	vq_ring_free_chain(vq, desc_idx);
574 
575 	cookie = vq->vq_descx[desc_idx].cookie;
576 	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
577 	vq->vq_descx[desc_idx].cookie = NULL;
578 
579 	return (cookie);
580 }
581 
582 void *
583 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
584 {
585 	void *cookie;
586 
587 	/* We only poll the virtqueue when dumping to virtio-blk */
588 	while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
589 		;
590 
591 	return (cookie);
592 }
593 
594 void *
595 virtqueue_drain(struct virtqueue *vq, int *last)
596 {
597 	void *cookie;
598 	int idx;
599 
600 	cookie = NULL;
601 	idx = *last;
602 
603 	while (idx < vq->vq_nentries && cookie == NULL) {
604 		if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
605 			vq->vq_descx[idx].cookie = NULL;
606 			/* Free chain to keep free count consistent. */
607 			vq_ring_free_chain(vq, idx);
608 		}
609 		idx++;
610 	}
611 
612 	*last = idx;
613 
614 	return (cookie);
615 }
616 
617 void
618 virtqueue_dump(struct virtqueue *vq)
619 {
620 
621 	if (vq == NULL)
622 		return;
623 
624 	kprintf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
625 	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
626 	    "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
627 	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
628 	    virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
629 	    vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
630 	    vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
631 	    vq->vq_ring.used->flags);
632 }
633 
634 static void
635 vq_ring_init(struct virtqueue *vq)
636 {
637 	struct vring *vr;
638 	char *ring_mem;
639 	int i, size;
640 
641 	ring_mem = vq->vq_ring_mem;
642 	size = vq->vq_nentries;
643 	vr = &vq->vq_ring;
644 
645 	vring_init(vr, size, ring_mem, vq->vq_alignment);
646 
647 	for (i = 0; i < size - 1; i++)
648 		vr->desc[i].next = i + 1;
649 	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
650 }
651 
652 static void
653 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
654 {
655 	uint16_t avail_idx;
656 
657 	/*
658 	 * Place the head of the descriptor chain into the next slot and make
659 	 * it usable to the host. The chain is made available now rather than
660 	 * deferring to virtqueue_notify() in the hopes that if the host is
661 	 * currently running on another CPU, we can keep it processing the new
662 	 * descriptor.
663 	 */
664 	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
665 	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
666 
667 	cpu_mfence();
668 	vq->vq_ring.avail->idx++;
669 
670 	/* Keep pending count until virtqueue_notify() for debugging. */
671 	vq->vq_queued_cnt++;
672 }
673 
674 static uint16_t
675 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
676     uint16_t head_idx, struct sglist *sg, int readable, int writable)
677 {
678 	struct sglist_seg *seg;
679 	struct vring_desc *dp;
680 	int i, needed;
681 	uint16_t idx;
682 
683 	needed = readable + writable;
684 
685 	for (i = 0, idx = head_idx, seg = sg->sg_segs;
686 	     i < needed;
687 	     i++, idx = dp->next, seg++) {
688 		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
689 		    "premature end of free desc chain");
690 
691 		dp = &desc[idx];
692 		dp->addr = seg->ss_paddr;
693 		dp->len = seg->ss_len;
694 		dp->flags = 0;
695 
696 		if (i < needed - 1)
697 			dp->flags |= VRING_DESC_F_NEXT;
698 		if (i >= readable)
699 			dp->flags |= VRING_DESC_F_WRITE;
700 	}
701 
702 	return (idx);
703 }
704 
705 static int
706 vq_ring_use_indirect(struct virtqueue *vq, int needed)
707 {
708 
709 	if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
710 		return (0);
711 
712 	if (vq->vq_max_indirect_size < needed)
713 		return (0);
714 
715 	if (needed < 2)
716 		return (0);
717 
718 	return (1);
719 }
720 
721 static void
722 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
723     struct sglist *sg, int readable, int writable)
724 {
725 	struct vring_desc *dp;
726 	struct vq_desc_extra *dxp;
727 	int needed;
728 	uint16_t head_idx;
729 
730 	needed = readable + writable;
731 	VQASSERT(vq, needed <= vq->vq_max_indirect_size,
732 	    "enqueuing too many indirect descriptors");
733 
734 	head_idx = vq->vq_desc_head_idx;
735 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
736 	dp = &vq->vq_ring.desc[head_idx];
737 	dxp = &vq->vq_descx[head_idx];
738 
739 	VQASSERT(vq, dxp->cookie == NULL,
740 	    "cookie already exists for index %d", head_idx);
741 	dxp->cookie = cookie;
742 	dxp->ndescs = 1;
743 
744 	dp->addr = dxp->indirect_paddr;
745 	dp->len = needed * sizeof(struct vring_desc);
746 	dp->flags = VRING_DESC_F_INDIRECT;
747 
748 	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
749 	    sg, readable, writable);
750 
751 	vq->vq_desc_head_idx = dp->next;
752 	vq->vq_free_cnt--;
753 	if (vq->vq_free_cnt == 0)
754 		VQ_RING_ASSERT_CHAIN_TERM(vq);
755 	else
756 		VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
757 
758 	vq_ring_update_avail(vq, head_idx);
759 }
760 
761 static int
762 vq_ring_must_notify_host(struct virtqueue *vq)
763 {
764 	uint16_t new_idx, prev_idx, event_idx;
765 
766 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
767 		new_idx = vq->vq_ring.avail->idx;
768 		prev_idx = new_idx - vq->vq_queued_cnt;
769 		event_idx = vring_avail_event(&vq->vq_ring);
770 
771 		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
772 	}
773 
774 	return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
775 }
776 
777 static void
778 vq_ring_notify_host(struct virtqueue *vq)
779 {
780 	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
781 }
782 
783 static void
784 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
785 {
786 	struct vring_desc *dp;
787 	struct vq_desc_extra *dxp;
788 
789 	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
790 	dp = &vq->vq_ring.desc[desc_idx];
791 	dxp = &vq->vq_descx[desc_idx];
792 
793 	if (vq->vq_free_cnt == 0)
794 		VQ_RING_ASSERT_CHAIN_TERM(vq);
795 
796 	vq->vq_free_cnt += dxp->ndescs;
797 	dxp->ndescs--;
798 
799 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
800 		while (dp->flags & VRING_DESC_F_NEXT) {
801 			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
802 			dp = &vq->vq_ring.desc[dp->next];
803 			dxp->ndescs--;
804 		}
805 	}
806 	VQASSERT(vq, dxp->ndescs == 0, "failed to free entire desc chain");
807 
808 	/*
809 	 * We must append the existing free chain, if any, to the end of
810 	 * newly freed chain. If the virtqueue was completely used, then
811 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
812 	 */
813 	dp->next = vq->vq_desc_head_idx;
814 	vq->vq_desc_head_idx = desc_idx;
815 }
816