1 /*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/dev/virtio/virtqueue.c,v 1.2 2012/04/14 05:48:04 grehan Exp $
27 */
28
29 /*
30 * Implements the virtqueue interface as basically described
31 * in the original VirtIO paper.
32 */
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/sglist.h>
39 #include <sys/serialize.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42
43 #include <machine/cpu.h>
44 #include <machine/atomic.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47
48 #include "virtio.h"
49 #include "virtqueue.h"
50 #include "virtio_ring.h"
51
52 #include "virtio_bus_if.h"
53
54 struct virtqueue {
55 device_t vq_dev;
56 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
57 uint16_t vq_queue_index;
58 uint16_t vq_nentries;
59 uint32_t vq_flags;
60
61 #define VIRTQUEUE_FLAG_INDIRECT 0x0001
62 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
63
64 int vq_alignment;
65 int vq_ring_size;
66 void *vq_ring_mem;
67 int vq_max_indirect_size;
68 int vq_indirect_mem_size;
69
70 struct vring vq_ring;
71 uint16_t vq_free_cnt;
72 uint16_t vq_queued_cnt;
73 /*
74 * Head of the free chain in the descriptor table. If
75 * there are no free descriptors, this will be set to
76 * VQ_RING_DESC_CHAIN_END.
77 */
78 uint16_t vq_desc_head_idx;
79 /*
80 * Last consumed descriptor in the used table,
81 * trails vq_ring.used->idx.
82 */
83 uint16_t vq_used_cons_idx;
84
85 struct vq_desc_extra {
86 void *cookie;
87 struct vring_desc *indirect;
88 vm_paddr_t indirect_paddr;
89 uint16_t ndescs;
90 } vq_descx[0];
91 };
92
93 /*
94 * The maximum virtqueue size is 2^15. Use that value as the end of
95 * descriptor chain terminator since it will never be a valid index
96 * in the descriptor table. This is used to verify we are correctly
97 * handling vq_free_cnt.
98 */
99 #define VQ_RING_DESC_CHAIN_END 32768
100
101 #define VQASSERT(_vq, _exp, _msg, ...) \
102 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
103 ##__VA_ARGS__))
104
105 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
106 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
107 "invalid ring index: %d, max: %d", (_idx), \
108 (_vq)->vq_nentries)
109
110 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
111 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
112 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
113 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
114
115 static int virtqueue_init_indirect(struct virtqueue *vq, int);
116 static void virtqueue_free_indirect(struct virtqueue *vq);
117 static void virtqueue_init_indirect_list(struct virtqueue *,
118 struct vring_desc *);
119
120 static void vq_ring_init(struct virtqueue *);
121 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
122 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
123 struct vring_desc *, uint16_t, struct sglist *, int, int);
124 static int vq_ring_use_indirect(struct virtqueue *, int);
125 static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
126 struct sglist *, int, int);
127 static int vq_ring_must_notify_host(struct virtqueue *);
128 static void vq_ring_notify_host(struct virtqueue *);
129 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
130
131 uint64_t
virtqueue_filter_features(uint64_t features)132 virtqueue_filter_features(uint64_t features)
133 {
134 uint64_t mask;
135
136 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
137 mask |= VIRTIO_RING_F_INDIRECT_DESC;
138 mask |= VIRTIO_RING_F_EVENT_IDX;
139
140 return (features & mask);
141 }
142
143 int
virtqueue_alloc(device_t dev,uint16_t queue,uint16_t size,int align,vm_paddr_t highaddr,struct vq_alloc_info * info,struct virtqueue ** vqp)144 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
145 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
146 {
147 struct virtqueue *vq;
148 int error;
149
150 *vqp = NULL;
151 error = 0;
152
153 if (size == 0) {
154 device_printf(dev,
155 "virtqueue %d (%s) does not exist (size is zero)\n",
156 queue, info->vqai_name);
157 return (ENODEV);
158 } else if (!powerof2(size)) {
159 device_printf(dev,
160 "virtqueue %d (%s) size is not a power of 2: %d\n",
161 queue, info->vqai_name, size);
162 return (ENXIO);
163 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
164 device_printf(dev, "virtqueue %d (%s) requested too many "
165 "indirect descriptors: %d, max %d\n",
166 queue, info->vqai_name, info->vqai_maxindirsz,
167 VIRTIO_MAX_INDIRECT);
168 return (EINVAL);
169 }
170
171 vq = kmalloc(sizeof(struct virtqueue) +
172 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_INTWAIT | M_ZERO);
173 if (vq == NULL) {
174 device_printf(dev, "cannot allocate virtqueue\n");
175 return (ENOMEM);
176 }
177
178 vq->vq_dev = dev;
179 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
180 vq->vq_queue_index = queue;
181 vq->vq_alignment = align;
182 vq->vq_nentries = size;
183 vq->vq_free_cnt = size;
184
185 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
186 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
187
188 if (info->vqai_maxindirsz > 1) {
189 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
190 if (error)
191 goto fail;
192 }
193
194 vq->vq_ring_size = round_page(vring_size(size, align));
195 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
196 M_WAITOK | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
197 if (vq->vq_ring_mem == NULL) {
198 device_printf(dev,
199 "cannot allocate memory for virtqueue ring\n");
200 error = ENOMEM;
201 goto fail;
202 }
203
204 vq_ring_init(vq);
205 virtqueue_disable_intr(vq);
206
207 *vqp = vq;
208
209 fail:
210 if (error)
211 virtqueue_free(vq);
212
213 return (error);
214 }
215
216 static int
virtqueue_init_indirect(struct virtqueue * vq,int indirect_size)217 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
218 {
219 device_t dev;
220 struct vq_desc_extra *dxp;
221 int i, size;
222
223 dev = vq->vq_dev;
224
225 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
226 /*
227 * Indirect descriptors requested by the driver but not
228 * negotiated. Return zero to keep the initialization
229 * going: we'll run fine without.
230 */
231 if (bootverbose)
232 device_printf(dev, "virtqueue %d (%s) requested "
233 "indirect descriptors but not negotiated\n",
234 vq->vq_queue_index, vq->vq_name);
235 return (0);
236 }
237
238 size = indirect_size * sizeof(struct vring_desc);
239 vq->vq_max_indirect_size = indirect_size;
240 vq->vq_indirect_mem_size = size;
241 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
242
243 for (i = 0; i < vq->vq_nentries; i++) {
244 dxp = &vq->vq_descx[i];
245
246 dxp->indirect = contigmalloc(size, M_DEVBUF, M_WAITOK,
247 0, BUS_SPACE_MAXADDR, 16, 0);
248 if (dxp->indirect == NULL) {
249 device_printf(dev, "cannot allocate indirect list\n");
250 return (ENOMEM);
251 }
252
253 dxp->indirect_paddr = vtophys(dxp->indirect);
254 virtqueue_init_indirect_list(vq, dxp->indirect);
255 }
256
257 return (0);
258 }
259
260 static void
virtqueue_free_indirect(struct virtqueue * vq)261 virtqueue_free_indirect(struct virtqueue *vq)
262 {
263 struct vq_desc_extra *dxp;
264 int i;
265
266 for (i = 0; i < vq->vq_nentries; i++) {
267 dxp = &vq->vq_descx[i];
268
269 if (dxp->indirect == NULL)
270 break;
271
272 contigfree(dxp->indirect, vq->vq_indirect_mem_size, M_DEVBUF);
273 dxp->indirect = NULL;
274 dxp->indirect_paddr = 0;
275 }
276
277 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
278 vq->vq_indirect_mem_size = 0;
279 }
280
281 static void
virtqueue_init_indirect_list(struct virtqueue * vq,struct vring_desc * indirect)282 virtqueue_init_indirect_list(struct virtqueue *vq,
283 struct vring_desc *indirect)
284 {
285 int i;
286
287 bzero(indirect, vq->vq_indirect_mem_size);
288
289 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
290 indirect[i].next = i + 1;
291 indirect[i].next = VQ_RING_DESC_CHAIN_END;
292 }
293
294 int
virtqueue_reinit(struct virtqueue * vq,uint16_t size)295 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
296 {
297 struct vq_desc_extra *dxp;
298 int i;
299
300 if (vq->vq_nentries != size) {
301 device_printf(vq->vq_dev,
302 "%s: '%s' changed size; old=%hu, new=%hu\n",
303 __func__, vq->vq_name, vq->vq_nentries, size);
304 return (EINVAL);
305 }
306
307 /* Warn if the virtqueue was not properly cleaned up. */
308 if (vq->vq_free_cnt != vq->vq_nentries) {
309 device_printf(vq->vq_dev,
310 "%s: warning, '%s' virtqueue not empty, "
311 "leaking %d entries\n", __func__, vq->vq_name,
312 vq->vq_nentries - vq->vq_free_cnt);
313 }
314
315 vq->vq_desc_head_idx = 0;
316 vq->vq_used_cons_idx = 0;
317 vq->vq_queued_cnt = 0;
318 vq->vq_free_cnt = vq->vq_nentries;
319
320 /* To be safe, reset all our allocated memory. */
321 bzero(vq->vq_ring_mem, vq->vq_ring_size);
322 for (i = 0; i < vq->vq_nentries; i++) {
323 dxp = &vq->vq_descx[i];
324 dxp->cookie = NULL;
325 dxp->ndescs = 0;
326 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
327 virtqueue_init_indirect_list(vq, dxp->indirect);
328 }
329
330 vq_ring_init(vq);
331 virtqueue_disable_intr(vq);
332
333 return (0);
334 }
335
336 void
virtqueue_free(struct virtqueue * vq)337 virtqueue_free(struct virtqueue *vq)
338 {
339
340 if (vq->vq_free_cnt != vq->vq_nentries) {
341 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
342 "leaking %d entries\n", vq->vq_name,
343 vq->vq_nentries - vq->vq_free_cnt);
344 }
345
346 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
347 virtqueue_free_indirect(vq);
348
349 if (vq->vq_ring_mem != NULL) {
350 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
351 vq->vq_ring_size = 0;
352 vq->vq_ring_mem = NULL;
353 }
354
355 kfree(vq, M_DEVBUF);
356 }
357
358 vm_paddr_t
virtqueue_paddr(struct virtqueue * vq)359 virtqueue_paddr(struct virtqueue *vq)
360 {
361 return (vtophys(vq->vq_ring_mem));
362 }
363
364 int
virtqueue_size(struct virtqueue * vq)365 virtqueue_size(struct virtqueue *vq)
366 {
367 return (vq->vq_nentries);
368 }
369
370 int
virtqueue_empty(struct virtqueue * vq)371 virtqueue_empty(struct virtqueue *vq)
372 {
373
374 return (vq->vq_nentries == vq->vq_free_cnt);
375 }
376
377 int
virtqueue_full(struct virtqueue * vq)378 virtqueue_full(struct virtqueue *vq)
379 {
380
381 return (vq->vq_free_cnt == 0);
382 }
383
384 void
virtqueue_notify(struct virtqueue * vq,lwkt_serialize_t interlock)385 virtqueue_notify(struct virtqueue *vq, lwkt_serialize_t interlock)
386 {
387 /* Ensure updated avail->idx is visible to host. */
388 cpu_mfence();
389
390 if (vq_ring_must_notify_host(vq)) {
391 if (interlock != NULL)
392 lwkt_serialize_exit(interlock);
393 vq_ring_notify_host(vq);
394 if (interlock != NULL)
395 lwkt_serialize_enter(interlock);
396 }
397 vq->vq_queued_cnt = 0;
398 }
399
400 int
virtqueue_nused(struct virtqueue * vq)401 virtqueue_nused(struct virtqueue *vq)
402 {
403 uint16_t used_idx, nused;
404
405 used_idx = vq->vq_ring.used->idx;
406 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
407 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
408
409 return (nused);
410 }
411
412 int
virtqueue_pending(struct virtqueue * vq)413 virtqueue_pending(struct virtqueue *vq)
414 {
415 return (vq->vq_used_cons_idx != vq->vq_ring.used->idx);
416 }
417
418 /*
419 * Enable interrupts on a given virtqueue. Returns 1 if there are
420 * additional entries to process on the virtqueue after we return.
421 */
422 int
virtqueue_enable_intr(struct virtqueue * vq)423 virtqueue_enable_intr(struct virtqueue *vq)
424 {
425 /*
426 * Enable interrupts, making sure we get the latest
427 * index of what's already been consumed.
428 */
429 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
430 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
431 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx;
432 } else {
433 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
434 }
435
436 cpu_mfence();
437
438 /*
439 * Additional items may have been consumed in the time between
440 * since we last checked and enabled interrupts above. Let our
441 * caller know so it processes the new entries.
442 */
443 if (vq->vq_used_cons_idx != vq->vq_ring.used->idx)
444 return (1);
445
446 return (0);
447 }
448
449 int
virtqueue_postpone_intr(struct virtqueue * vq)450 virtqueue_postpone_intr(struct virtqueue *vq)
451 {
452 uint16_t ndesc;
453
454 /*
455 * Postpone until at least half of the available descriptors
456 * have been consumed.
457 *
458 * XXX Adaptive factor? (Linux uses 3/4)
459 */
460 ndesc = (uint16_t)(vq->vq_ring.avail->idx - vq->vq_used_cons_idx) / 2;
461
462 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
463 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
464 else
465 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
466
467 cpu_mfence();
468
469 /*
470 * Enough items may have already been consumed to meet our
471 * threshold since we last checked. Let our caller know so
472 * it processes the new entries.
473 */
474 if (virtqueue_nused(vq) > ndesc)
475 return (1);
476
477 return (0);
478 }
479
480 void
virtqueue_disable_intr(struct virtqueue * vq)481 virtqueue_disable_intr(struct virtqueue *vq)
482 {
483 /*
484 * Note this is only considered a hint to the host.
485 */
486 if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) == 0)
487 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
488 }
489
490 int
virtqueue_enqueue(struct virtqueue * vq,void * cookie,struct sglist * sg,int readable,int writable)491 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
492 int readable, int writable)
493 {
494 struct vq_desc_extra *dxp;
495 int needed;
496 uint16_t head_idx, idx;
497
498 needed = readable + writable;
499
500 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
501 VQASSERT(vq, needed == sg->sg_nseg,
502 "segment count mismatch, %d, %d", needed, sg->sg_nseg);
503 VQASSERT(vq,
504 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
505 "too many segments to enqueue: %d, %d/%d", needed,
506 vq->vq_nentries, vq->vq_max_indirect_size);
507
508 if (needed < 1)
509 return (EINVAL);
510 if (vq->vq_free_cnt == 0)
511 return (ENOSPC);
512
513 if (vq_ring_use_indirect(vq, needed)) {
514 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
515 return (0);
516 } else if (vq->vq_free_cnt < needed)
517 return (EMSGSIZE);
518
519 head_idx = vq->vq_desc_head_idx;
520 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
521 dxp = &vq->vq_descx[head_idx];
522
523 VQASSERT(vq, dxp->cookie == NULL,
524 "cookie already exists for index %d", head_idx);
525 dxp->cookie = cookie;
526 dxp->ndescs = needed;
527
528 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
529 sg, readable, writable);
530
531 vq->vq_desc_head_idx = idx;
532 vq->vq_free_cnt -= needed;
533 if (vq->vq_free_cnt == 0)
534 VQ_RING_ASSERT_CHAIN_TERM(vq);
535 else
536 VQ_RING_ASSERT_VALID_IDX(vq, idx);
537
538 vq_ring_update_avail(vq, head_idx);
539
540 return (0);
541 }
542
543 void *
virtqueue_dequeue(struct virtqueue * vq,uint32_t * len)544 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
545 {
546 struct vring_used_elem *uep;
547 void *cookie;
548 uint16_t used_idx, desc_idx;
549
550 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
551 return (NULL);
552
553 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
554 uep = &vq->vq_ring.used->ring[used_idx];
555
556 cpu_lfence();
557 desc_idx = (uint16_t) uep->id;
558 if (len != NULL)
559 *len = uep->len;
560
561 vq_ring_free_chain(vq, desc_idx);
562
563 cookie = vq->vq_descx[desc_idx].cookie;
564 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
565 vq->vq_descx[desc_idx].cookie = NULL;
566
567 return (cookie);
568 }
569
570 void *
virtqueue_poll(struct virtqueue * vq,uint32_t * len)571 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
572 {
573 void *cookie;
574
575 /* We only poll the virtqueue when dumping to virtio-blk */
576 while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
577 ;
578
579 return (cookie);
580 }
581
582 void *
virtqueue_drain(struct virtqueue * vq,int * last)583 virtqueue_drain(struct virtqueue *vq, int *last)
584 {
585 void *cookie;
586 int idx;
587
588 cookie = NULL;
589 idx = *last;
590
591 while (idx < vq->vq_nentries && cookie == NULL) {
592 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
593 vq->vq_descx[idx].cookie = NULL;
594 /* Free chain to keep free count consistent. */
595 vq_ring_free_chain(vq, idx);
596 }
597 idx++;
598 }
599
600 *last = idx;
601
602 return (cookie);
603 }
604
605 void
virtqueue_dump(struct virtqueue * vq)606 virtqueue_dump(struct virtqueue *vq)
607 {
608
609 if (vq == NULL)
610 return;
611
612 kprintf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
613 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
614 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
615 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
616 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
617 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
618 vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
619 vq->vq_ring.used->flags);
620 }
621
622 static void
vq_ring_init(struct virtqueue * vq)623 vq_ring_init(struct virtqueue *vq)
624 {
625 struct vring *vr;
626 char *ring_mem;
627 int i, size;
628
629 ring_mem = vq->vq_ring_mem;
630 size = vq->vq_nentries;
631 vr = &vq->vq_ring;
632
633 vring_init(vr, size, ring_mem, vq->vq_alignment);
634
635 for (i = 0; i < size - 1; i++)
636 vr->desc[i].next = i + 1;
637 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
638 }
639
640 static void
vq_ring_update_avail(struct virtqueue * vq,uint16_t desc_idx)641 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
642 {
643 uint16_t avail_idx;
644
645 /*
646 * Place the head of the descriptor chain into the next slot and make
647 * it usable to the host. The chain is made available now rather than
648 * deferring to virtqueue_notify() in the hopes that if the host is
649 * currently running on another CPU, we can keep it processing the new
650 * descriptor.
651 */
652 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
653 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
654
655 cpu_sfence();
656 vq->vq_ring.avail->idx++;
657
658 /* Keep pending count until virtqueue_notify() for debugging. */
659 vq->vq_queued_cnt++;
660 }
661
662 static uint16_t
vq_ring_enqueue_segments(struct virtqueue * vq,struct vring_desc * desc,uint16_t head_idx,struct sglist * sg,int readable,int writable)663 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
664 uint16_t head_idx, struct sglist *sg, int readable, int writable)
665 {
666 struct sglist_seg *seg;
667 struct vring_desc *dp;
668 int i, needed;
669 uint16_t idx;
670
671 needed = readable + writable;
672
673 for (i = 0, idx = head_idx, seg = sg->sg_segs;
674 i < needed;
675 i++, idx = dp->next, seg++) {
676 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
677 "premature end of free desc chain");
678
679 dp = &desc[idx];
680 dp->addr = seg->ss_paddr;
681 dp->len = seg->ss_len;
682 dp->flags = 0;
683
684 if (i < needed - 1)
685 dp->flags |= VRING_DESC_F_NEXT;
686 if (i >= readable)
687 dp->flags |= VRING_DESC_F_WRITE;
688 }
689
690 return (idx);
691 }
692
693 static int
vq_ring_use_indirect(struct virtqueue * vq,int needed)694 vq_ring_use_indirect(struct virtqueue *vq, int needed)
695 {
696
697 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
698 return (0);
699
700 if (vq->vq_max_indirect_size < needed)
701 return (0);
702
703 if (needed < 2)
704 return (0);
705
706 return (1);
707 }
708
709 static void
vq_ring_enqueue_indirect(struct virtqueue * vq,void * cookie,struct sglist * sg,int readable,int writable)710 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
711 struct sglist *sg, int readable, int writable)
712 {
713 struct vring_desc *dp;
714 struct vq_desc_extra *dxp;
715 int needed;
716 uint16_t head_idx;
717
718 needed = readable + writable;
719 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
720 "enqueuing too many indirect descriptors");
721
722 head_idx = vq->vq_desc_head_idx;
723 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
724 dp = &vq->vq_ring.desc[head_idx];
725 dxp = &vq->vq_descx[head_idx];
726
727 VQASSERT(vq, dxp->cookie == NULL,
728 "cookie already exists for index %d", head_idx);
729 dxp->cookie = cookie;
730 dxp->ndescs = 1;
731
732 dp->addr = dxp->indirect_paddr;
733 dp->len = needed * sizeof(struct vring_desc);
734 dp->flags = VRING_DESC_F_INDIRECT;
735
736 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
737 sg, readable, writable);
738
739 vq->vq_desc_head_idx = dp->next;
740 vq->vq_free_cnt--;
741 if (vq->vq_free_cnt == 0)
742 VQ_RING_ASSERT_CHAIN_TERM(vq);
743 else
744 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
745
746 vq_ring_update_avail(vq, head_idx);
747 }
748
749 static int
vq_ring_must_notify_host(struct virtqueue * vq)750 vq_ring_must_notify_host(struct virtqueue *vq)
751 {
752 uint16_t new_idx, prev_idx, event_idx;
753
754 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
755 new_idx = vq->vq_ring.avail->idx;
756 prev_idx = new_idx - vq->vq_queued_cnt;
757 event_idx = vring_avail_event(&vq->vq_ring);
758
759 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
760 }
761
762 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
763 }
764
765 static void
vq_ring_notify_host(struct virtqueue * vq)766 vq_ring_notify_host(struct virtqueue *vq)
767 {
768 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
769 }
770
771 static void
vq_ring_free_chain(struct virtqueue * vq,uint16_t desc_idx)772 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
773 {
774 struct vring_desc *dp;
775 struct vq_desc_extra *dxp;
776
777 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
778 dp = &vq->vq_ring.desc[desc_idx];
779 dxp = &vq->vq_descx[desc_idx];
780
781 if (vq->vq_free_cnt == 0)
782 VQ_RING_ASSERT_CHAIN_TERM(vq);
783
784 vq->vq_free_cnt += dxp->ndescs;
785 dxp->ndescs--;
786
787 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
788 while (dp->flags & VRING_DESC_F_NEXT) {
789 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
790 dp = &vq->vq_ring.desc[dp->next];
791 dxp->ndescs--;
792 }
793 }
794 VQASSERT(vq, dxp->ndescs == 0, "failed to free entire desc chain");
795
796 /*
797 * We must append the existing free chain, if any, to the end of
798 * newly freed chain. If the virtqueue was completely used, then
799 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
800 */
801 dp->next = vq->vq_desc_head_idx;
802 vq->vq_desc_head_idx = desc_idx;
803 }
804