1 /*
2 * Packed virtio ring manipulation routines
3 *
4 * Copyright 2019 Red Hat, Inc.
5 *
6 * Authors:
7 * Yuri Benditovich <ybendito@redhat.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met :
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and / or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of their contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "osdep.h"
34 #include "virtio_pci.h"
35 #include "VirtIO.h"
36 #include "kdebugprint.h"
37 #include "virtio_ring.h"
38 #include "windows/virtio_ring_allocation.h"
39
40 #include <pshpack1.h>
41
42 struct vring_packed_desc_event {
43 /* Descriptor Ring Change Event Offset/Wrap Counter. */
44 __le16 off_wrap;
45 /* Descriptor Ring Change Event Flags. */
46 __le16 flags;
47 };
48
49 struct vring_packed_desc {
50 /* Buffer Address. */
51 __virtio64 addr;
52 /* Buffer Length. */
53 __le32 len;
54 /* Buffer ID. */
55 __le16 id;
56 /* The flags depending on descriptor type. */
57 __le16 flags;
58 };
59
60 #include <poppack.h>
61
62 #define BUG_ON(condition) { if (condition) { KeBugCheck(0xE0E1E2E3); }}
63 #define BAD_RING(vq, fmt, ...) DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true)
64
65 /* This marks a buffer as continuing via the next field. */
66 #define VRING_DESC_F_NEXT 1
67 /* This marks a buffer as write-only (otherwise read-only). */
68 #define VRING_DESC_F_WRITE 2
69 /* This means the buffer contains a list of buffer descriptors. */
70 #define VRING_DESC_F_INDIRECT 4
71
72 /*
73 * Mark a descriptor as available or used in packed ring.
74 * Notice: they are defined as shifts instead of shifted values.
75 */
76 #define VRING_PACKED_DESC_F_AVAIL 7
77 #define VRING_PACKED_DESC_F_USED 15
78
79 /* Enable events in packed ring. */
80 #define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
81 /* Disable events in packed ring. */
82 #define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
83
84 /*
85 * Enable events for a specific descriptor in packed ring.
86 * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
87 * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
88 */
89 #define VRING_PACKED_EVENT_FLAG_DESC 0x2
90 /*
91 * Wrap counter bit shift in event suppression structure
92 * of packed ring.
93 */
94 #define VRING_PACKED_EVENT_F_WRAP_CTR 15
95
96 /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
97 /* Assuming a given event_idx value from the other side, if
98 * we have just incremented index from old to new_idx,
99 * should we trigger an event?
100 */
vring_need_event(__u16 event_idx,__u16 new_idx,__u16 old)101 static inline bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
102 {
103 /* Note: Xen has similar logic for notification hold-off
104 * in include/xen/interface/io/ring.h with req_event and req_prod
105 * corresponding to event_idx + 1 and new_idx respectively.
106 * Note also that req_event and req_prod in Xen start at 1,
107 * event indexes in virtio start at 0. */
108 return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
109 }
110
111 struct vring_desc_state_packed {
112 void *data; /* Data for callback. */
113 u16 num; /* Descriptor list length. */
114 u16 next; /* The next desc state in a list. */
115 u16 last; /* The last desc state in a list. */
116 };
117
118 struct virtqueue_packed {
119 struct virtqueue vq;
120 /* Number we've added since last sync. */
121 unsigned int num_added;
122 /* Head of free buffer list. */
123 unsigned int free_head;
124 /* Number of free descriptors */
125 unsigned int num_free;
126 /* Last used index we've seen. */
127 u16 last_used_idx;
128 /* Avail used flags. */
129 u16 avail_used_flags;
130 struct
131 {
132 /* Driver ring wrap counter. */
133 bool avail_wrap_counter;
134 /* Device ring wrap counter. */
135 bool used_wrap_counter;
136 /* Index of the next avail descriptor. */
137 u16 next_avail_idx;
138 /*
139 * Last written value to driver->flags in
140 * guest byte order.
141 */
142 u16 event_flags_shadow;
143 struct {
144 unsigned int num;
145 struct vring_packed_desc *desc;
146 struct vring_packed_desc_event *driver;
147 struct vring_packed_desc_event *device;
148 } vring;
149 /* Per-descriptor state. */
150 struct vring_desc_state_packed *desc_state;
151 } packed;
152 struct vring_desc_state_packed desc_states[];
153 };
154
155 #define packedvq(vq) ((struct virtqueue_packed *)vq)
156
vring_control_block_size_packed(u16 qsize)157 unsigned int vring_control_block_size_packed(u16 qsize)
158 {
159 return sizeof(struct virtqueue_packed) + sizeof(struct vring_desc_state_packed) * qsize;
160 }
161
vring_size_packed(unsigned int num,unsigned long align)162 unsigned long vring_size_packed(unsigned int num, unsigned long align)
163 {
164 /* array of descriptors */
165 unsigned long res = num * sizeof(struct vring_packed_desc);
166 /* driver and device event */
167 res += 2 * sizeof(struct vring_packed_desc_event);
168 return res;
169 }
170
virtqueue_add_buf_packed(struct virtqueue * _vq,struct scatterlist sg[],unsigned int out,unsigned int in,void * opaque,void * va_indirect,ULONGLONG phys_indirect)171 static int virtqueue_add_buf_packed(
172 struct virtqueue *_vq, /* the queue */
173 struct scatterlist sg[], /* sg array of length out + in */
174 unsigned int out, /* number of driver->device buffer descriptors in sg */
175 unsigned int in, /* number of device->driver buffer descriptors in sg */
176 void *opaque, /* later returned from virtqueue_get_buf */
177 void *va_indirect, /* VA of the indirect page or NULL */
178 ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
179 {
180 struct virtqueue_packed *vq = packedvq(_vq);
181 unsigned int descs_used;
182 struct vring_packed_desc *desc;
183 u16 head, id, i;
184
185 descs_used = out + in;
186 head = vq->packed.next_avail_idx;
187 id = (u16)vq->free_head;
188
189 BUG_ON(descs_used == 0);
190 BUG_ON(id >= vq->packed.vring.num);
191
192 if (va_indirect && vq->num_free > 0) {
193 desc = va_indirect;
194 for (i = 0; i < descs_used; i++) {
195 desc[i].flags = i < out ? 0 : VRING_DESC_F_WRITE;
196 desc[i].addr = sg[i].physAddr.QuadPart;
197 desc[i].len = sg[i].length;
198 }
199 vq->packed.vring.desc[head].addr = phys_indirect;
200 vq->packed.vring.desc[head].len = descs_used * sizeof(struct vring_packed_desc);
201 vq->packed.vring.desc[head].id = id;
202
203 KeMemoryBarrier();
204 vq->packed.vring.desc[head].flags = VRING_DESC_F_INDIRECT | vq->avail_used_flags;
205
206 DPrintf(5, "Added buffer head %i to Q%d\n", head, vq->vq.index);
207 head++;
208 if (head >= vq->packed.vring.num) {
209 head = 0;
210 vq->packed.avail_wrap_counter ^= 1;
211 vq->avail_used_flags ^=
212 1 << VRING_PACKED_DESC_F_AVAIL |
213 1 << VRING_PACKED_DESC_F_USED;
214 }
215 vq->packed.next_avail_idx = head;
216 /* We're using some buffers from the free list. */
217 vq->num_free -= 1;
218 vq->num_added += 1;
219
220 vq->free_head = vq->packed.desc_state[id].next;
221
222 /* Store token and indirect buffer state. */
223 vq->packed.desc_state[id].num = 1;
224 vq->packed.desc_state[id].data = opaque;
225 vq->packed.desc_state[id].last = id;
226
227 } else {
228 unsigned int n;
229 u16 curr, prev, head_flags;
230 if (vq->num_free < descs_used) {
231 DPrintf(6, "Can't add buffer to Q%d\n", vq->vq.index);
232 return -ENOSPC;
233 }
234 desc = vq->packed.vring.desc;
235 i = head;
236 curr = id;
237 for (n = 0; n < descs_used; n++) {
238 u16 flags = vq->avail_used_flags;
239 flags |= n < out ? 0 : VRING_DESC_F_WRITE;
240 if (n != descs_used - 1) {
241 flags |= VRING_DESC_F_NEXT;
242 }
243 desc[i].addr = sg[n].physAddr.QuadPart;
244 desc[i].len = sg[n].length;
245 desc[i].id = id;
246 if (n == 0) {
247 head_flags = flags;
248 }
249 else {
250 desc[i].flags = flags;
251 }
252
253 prev = curr;
254 curr = vq->packed.desc_state[curr].next;
255
256 if (++i >= vq->packed.vring.num) {
257 i = 0;
258 vq->avail_used_flags ^=
259 1 << VRING_PACKED_DESC_F_AVAIL |
260 1 << VRING_PACKED_DESC_F_USED;
261 }
262 }
263
264 if (i < head)
265 vq->packed.avail_wrap_counter ^= 1;
266
267 /* We're using some buffers from the free list. */
268 vq->num_free -= descs_used;
269
270 /* Update free pointer */
271 vq->packed.next_avail_idx = i;
272 vq->free_head = curr;
273
274 /* Store token. */
275 vq->packed.desc_state[id].num = (u16)descs_used;
276 vq->packed.desc_state[id].data = opaque;
277 vq->packed.desc_state[id].last = prev;
278
279 /*
280 * A driver MUST NOT make the first descriptor in the list
281 * available before all subsequent descriptors comprising
282 * the list are made available.
283 */
284 KeMemoryBarrier();
285 vq->packed.vring.desc[head].flags = head_flags;
286 vq->num_added += descs_used;
287
288 DPrintf(5, "Added buffer head @%i+%d to Q%d\n", head, descs_used, vq->vq.index);
289 }
290
291 return 0;
292 }
293
detach_buf_packed(struct virtqueue_packed * vq,unsigned int id)294 static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
295 {
296 struct vring_desc_state_packed *state = &vq->packed.desc_state[id];
297
298 /* Clear data ptr. */
299 state->data = NULL;
300
301 vq->packed.desc_state[state->last].next = (u16)vq->free_head;
302 vq->free_head = id;
303 vq->num_free += state->num;
304 }
305
virtqueue_detach_unused_buf_packed(struct virtqueue * _vq)306 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
307 {
308 struct virtqueue_packed *vq = packedvq(_vq);
309 unsigned int i;
310 void *buf;
311
312 for (i = 0; i < vq->packed.vring.num; i++) {
313 if (!vq->packed.desc_state[i].data)
314 continue;
315 /* detach_buf clears data, so grab it now. */
316 buf = vq->packed.desc_state[i].data;
317 detach_buf_packed(vq, i);
318 return buf;
319 }
320 /* That should have freed everything. */
321 BUG_ON(vq->num_free != vq->packed.vring.num);
322
323 return NULL;
324 }
325
virtqueue_disable_cb_packed(struct virtqueue * _vq)326 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
327 {
328 struct virtqueue_packed *vq = packedvq(_vq);
329
330 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
331 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
332 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
333 }
334 }
335
is_used_desc_packed(const struct virtqueue_packed * vq,u16 idx,bool used_wrap_counter)336 static inline bool is_used_desc_packed(const struct virtqueue_packed *vq,
337 u16 idx, bool used_wrap_counter)
338 {
339 bool avail, used;
340 u16 flags;
341
342 flags = vq->packed.vring.desc[idx].flags;
343 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
344 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
345
346 return avail == used && used == used_wrap_counter;
347 }
348
virtqueue_poll_packed(struct virtqueue_packed * vq,u16 off_wrap)349 static inline bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)
350 {
351 bool wrap_counter;
352 u16 used_idx;
353 KeMemoryBarrier();
354
355 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
356 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
357
358 return is_used_desc_packed(vq, used_idx, wrap_counter);
359
360 }
361
virtqueue_enable_cb_prepare_packed(struct virtqueue_packed * vq)362 static inline unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue_packed *vq)
363 {
364 bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
365 /*
366 * We optimistically turn back on interrupts, then check if there was
367 * more to do.
368 */
369
370 if (event_suppression_enabled) {
371 vq->packed.vring.driver->off_wrap =
372 vq->last_used_idx |
373 (vq->packed.used_wrap_counter <<
374 VRING_PACKED_EVENT_F_WRAP_CTR);
375 /*
376 * We need to update event offset and event wrap
377 * counter first before updating event flags.
378 */
379 KeMemoryBarrier();
380 }
381
382 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
383 vq->packed.event_flags_shadow = event_suppression_enabled ?
384 VRING_PACKED_EVENT_FLAG_DESC :
385 VRING_PACKED_EVENT_FLAG_ENABLE;
386 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
387 }
388
389 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
390 VRING_PACKED_EVENT_F_WRAP_CTR);
391 }
392
virtqueue_enable_cb_packed(struct virtqueue * _vq)393 static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
394 {
395 struct virtqueue_packed *vq = packedvq(_vq);
396 unsigned last_used_idx = virtqueue_enable_cb_prepare_packed(vq);
397
398 return !virtqueue_poll_packed(vq, (u16)last_used_idx);
399 }
400
virtqueue_enable_cb_delayed_packed(struct virtqueue * _vq)401 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
402 {
403 struct virtqueue_packed *vq = packedvq(_vq);
404 bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
405 u16 used_idx, wrap_counter;
406 u16 bufs;
407
408 /*
409 * We optimistically turn back on interrupts, then check if there was
410 * more to do.
411 */
412
413 if (event_suppression_enabled) {
414 /* TODO: tune this threshold */
415 bufs = (vq->packed.vring.num - vq->num_free) * 3 / 4;
416 wrap_counter = vq->packed.used_wrap_counter;
417
418 used_idx = vq->last_used_idx + bufs;
419 if (used_idx >= vq->packed.vring.num) {
420 used_idx -= (u16)vq->packed.vring.num;
421 wrap_counter ^= 1;
422 }
423
424 vq->packed.vring.driver->off_wrap = used_idx |
425 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR);
426
427 /*
428 * We need to update event offset and event wrap
429 * counter first before updating event flags.
430 */
431 KeMemoryBarrier();
432 }
433
434 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
435 vq->packed.event_flags_shadow = event_suppression_enabled ?
436 VRING_PACKED_EVENT_FLAG_DESC :
437 VRING_PACKED_EVENT_FLAG_ENABLE;
438 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
439 }
440
441 /*
442 * We need to update event suppression structure first
443 * before re-checking for more used buffers.
444 */
445 KeMemoryBarrier();
446
447 if (is_used_desc_packed(vq,
448 vq->last_used_idx,
449 vq->packed.used_wrap_counter)) {
450 return false;
451 }
452
453 return true;
454 }
455
virtqueue_is_interrupt_enabled_packed(struct virtqueue * _vq)456 static BOOLEAN virtqueue_is_interrupt_enabled_packed(struct virtqueue *_vq)
457 {
458 struct virtqueue_packed *vq = packedvq(_vq);
459 return vq->packed.event_flags_shadow & VRING_PACKED_EVENT_FLAG_DISABLE;
460 }
461
virtqueue_shutdown_packed(struct virtqueue * _vq)462 static void virtqueue_shutdown_packed(struct virtqueue *_vq)
463 {
464 struct virtqueue_packed *vq = packedvq(_vq);
465 unsigned int num = vq->packed.vring.num;
466 void *pages = vq->packed.vring.desc;
467 unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
468
469 RtlZeroMemory(pages, vring_size_packed(num, vring_align));
470 vring_new_virtqueue_packed(
471 _vq->index,
472 num,
473 vring_align,
474 _vq->vdev,
475 pages,
476 _vq->notification_cb,
477 _vq);
478 }
479
more_used_packed(const struct virtqueue_packed * vq)480 static inline bool more_used_packed(const struct virtqueue_packed *vq)
481 {
482 return is_used_desc_packed(vq, vq->last_used_idx,
483 vq->packed.used_wrap_counter);
484 }
485
virtqueue_get_buf_packed(struct virtqueue * _vq,unsigned int * len)486 static void *virtqueue_get_buf_packed(
487 struct virtqueue *_vq, /* the queue */
488 unsigned int *len) /* number of bytes returned by the device */
489 {
490 struct virtqueue_packed *vq = packedvq(_vq);
491 u16 last_used, id;
492 void *ret;
493
494 if (!more_used_packed(vq)) {
495 DPrintf(6, "%s: No more buffers in queue\n", __FUNCTION__);
496 return NULL;
497 }
498
499 /* Only get used elements after they have been exposed by host. */
500 KeMemoryBarrier();
501
502 last_used = vq->last_used_idx;
503 id = vq->packed.vring.desc[last_used].id;
504 *len = vq->packed.vring.desc[last_used].len;
505
506 if (id >= vq->packed.vring.num) {
507 BAD_RING(vq, "id %u out of range\n", id);
508 return NULL;
509 }
510 if (!vq->packed.desc_state[id].data) {
511 BAD_RING(vq, "id %u is not a head!\n", id);
512 return NULL;
513 }
514
515 /* detach_buf_packed clears data, so grab it now. */
516 ret = vq->packed.desc_state[id].data;
517 detach_buf_packed(vq, id);
518
519 vq->last_used_idx += vq->packed.desc_state[id].num;
520 if (vq->last_used_idx >= vq->packed.vring.num) {
521 vq->last_used_idx -= (u16)vq->packed.vring.num;
522 vq->packed.used_wrap_counter ^= 1;
523 }
524
525 /*
526 * If we expect an interrupt for the next entry, tell host
527 * by writing event index and flush out the write before
528 * the read in the next get_buf call.
529 */
530 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) {
531 vq->packed.vring.driver->off_wrap = vq->last_used_idx |
532 ((u16)vq->packed.used_wrap_counter <<
533 VRING_PACKED_EVENT_F_WRAP_CTR);
534 KeMemoryBarrier();
535 }
536
537 return ret;
538 }
539
virtqueue_has_buf_packed(struct virtqueue * _vq)540 static BOOLEAN virtqueue_has_buf_packed(struct virtqueue *_vq)
541 {
542 struct virtqueue_packed *vq = packedvq(_vq);
543 return more_used_packed(vq);
544 }
545
virtqueue_kick_prepare_packed(struct virtqueue * _vq)546 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
547 {
548 struct virtqueue_packed *vq = packedvq(_vq);
549 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
550 bool needs_kick;
551 union {
552 struct {
553 __le16 off_wrap;
554 __le16 flags;
555 };
556 u32 value32;
557 } snapshot;
558
559 /*
560 * We need to expose the new flags value before checking notification
561 * suppressions.
562 */
563 KeMemoryBarrier();
564
565 old = vq->packed.next_avail_idx - vq->num_added;
566 new = vq->packed.next_avail_idx;
567 vq->num_added = 0;
568
569 snapshot.value32 = *(u32 *)vq->packed.vring.device;
570 flags = snapshot.flags;
571
572 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
573 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
574 goto out;
575 }
576
577 off_wrap = snapshot.off_wrap;
578
579 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
580 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
581 if (wrap_counter != vq->packed.avail_wrap_counter)
582 event_idx -= (u16)vq->packed.vring.num;
583
584 needs_kick = vring_need_event(event_idx, new, old);
585 out:
586 return needs_kick;
587 }
588
virtqueue_kick_always_packed(struct virtqueue * _vq)589 static void virtqueue_kick_always_packed(struct virtqueue *_vq)
590 {
591 struct virtqueue_packed *vq = packedvq(_vq);
592 KeMemoryBarrier();
593 vq->num_added = 0;
594 virtqueue_notify(_vq);
595 }
596
597 /* Initializes a new virtqueue using already allocated memory */
vring_new_virtqueue_packed(unsigned int index,unsigned int num,unsigned int vring_align,VirtIODevice * vdev,void * pages,void (* notify)(struct virtqueue *),void * control)598 struct virtqueue *vring_new_virtqueue_packed(
599 unsigned int index, /* virtqueue index */
600 unsigned int num, /* virtqueue size (always a power of 2) */
601 unsigned int vring_align, /* vring alignment requirement */
602 VirtIODevice *vdev, /* the virtio device owning the queue */
603 void *pages, /* vring memory */
604 void(*notify)(struct virtqueue *), /* notification callback */
605 void *control) /* virtqueue memory */
606 {
607 struct virtqueue_packed *vq = packedvq(control);
608 unsigned int i;
609
610 vq->vq.vdev = vdev;
611 vq->vq.notification_cb = notify;
612 vq->vq.index = index;
613
614 vq->vq.avail_va = (u8 *)pages + num * sizeof(struct vring_packed_desc);
615 vq->vq.used_va = (u8 *)vq->vq.avail_va + sizeof(struct vring_packed_desc_event);
616
617 /* initialize the ring */
618 vq->packed.vring.num = num;
619 vq->packed.vring.desc = pages;
620 vq->packed.vring.driver = vq->vq.avail_va;
621 vq->packed.vring.device = vq->vq.used_va;
622
623 vq->num_free = num;
624 vq->free_head = 0;
625 vq->num_added = 0;
626 vq->packed.avail_wrap_counter = 1;
627 vq->packed.used_wrap_counter = 1;
628 vq->last_used_idx = 0;
629 vq->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
630 vq->packed.next_avail_idx = 0;
631 vq->packed.event_flags_shadow = 0;
632 vq->packed.desc_state = vq->desc_states;
633
634 RtlZeroMemory(vq->packed.desc_state, num * sizeof(*vq->packed.desc_state));
635 for (i = 0; i < num - 1; i++) {
636 vq->packed.desc_state[i].next = i + 1;
637 }
638
639 vq->vq.add_buf = virtqueue_add_buf_packed;
640 vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_packed;
641 vq->vq.disable_cb = virtqueue_disable_cb_packed;
642 vq->vq.enable_cb = virtqueue_enable_cb_packed;
643 vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_packed;
644 vq->vq.get_buf = virtqueue_get_buf_packed;
645 vq->vq.has_buf = virtqueue_has_buf_packed;
646 vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_packed;
647 vq->vq.kick_always = virtqueue_kick_always_packed;
648 vq->vq.kick_prepare = virtqueue_kick_prepare_packed;
649 vq->vq.shutdown = virtqueue_shutdown_packed;
650 return &vq->vq;
651 }
652