1 #ifndef _VIRTIO_RING_H_
2 # define _VIRTIO_RING_H_
3
4 #include <ipxe/virtio-pci.h>
5
6 /* Status byte for guest to report progress, and synchronize features. */
7 /* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
8 #define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
9 /* We have found a driver for the device. */
10 #define VIRTIO_CONFIG_S_DRIVER 2
11 /* Driver has used its parts of the config, and is happy */
12 #define VIRTIO_CONFIG_S_DRIVER_OK 4
13 /* Driver has finished configuring features */
14 #define VIRTIO_CONFIG_S_FEATURES_OK 8
15 /* We've given up on this device. */
16 #define VIRTIO_CONFIG_S_FAILED 0x80
17
18 /* Virtio feature flags used to negotiate device and driver features. */
19 /* Can the device handle any descriptor layout? */
20 #define VIRTIO_F_ANY_LAYOUT 27
21 /* v1.0 compliant. */
22 #define VIRTIO_F_VERSION_1 32
23 #define VIRTIO_F_IOMMU_PLATFORM 33
24
25 #define MAX_QUEUE_NUM (256)
26
27 #define VRING_DESC_F_NEXT 1
28 #define VRING_DESC_F_WRITE 2
29
30 #define VRING_AVAIL_F_NO_INTERRUPT 1
31
32 #define VRING_USED_F_NO_NOTIFY 1
33
34 struct vring_desc
35 {
36 u64 addr;
37 u32 len;
38 u16 flags;
39 u16 next;
40 };
41
42 struct vring_avail
43 {
44 u16 flags;
45 u16 idx;
46 u16 ring[0];
47 };
48
49 struct vring_used_elem
50 {
51 u32 id;
52 u32 len;
53 };
54
55 struct vring_used
56 {
57 u16 flags;
58 u16 idx;
59 struct vring_used_elem ring[];
60 };
61
62 struct vring {
63 unsigned int num;
64 struct vring_desc *desc;
65 struct vring_avail *avail;
66 struct vring_used *used;
67 };
68
69 #define vring_size(num) \
70 (((((sizeof(struct vring_desc) * num) + \
71 (sizeof(struct vring_avail) + sizeof(u16) * num)) \
72 + PAGE_MASK) & ~PAGE_MASK) + \
73 (sizeof(struct vring_used) + sizeof(struct vring_used_elem) * num))
74
75 struct vring_virtqueue {
76 unsigned char *queue;
77 struct vring vring;
78 u16 free_head;
79 u16 last_used_idx;
80 void **vdata;
81 /* PCI */
82 int queue_index;
83 struct virtio_pci_region notification;
84 };
85
86 struct vring_list {
87 char *addr;
88 unsigned int length;
89 };
90
vring_init(struct vring * vr,unsigned int num,unsigned char * queue)91 static inline void vring_init(struct vring *vr,
92 unsigned int num, unsigned char *queue)
93 {
94 unsigned int i;
95 unsigned long pa;
96
97 vr->num = num;
98
99 /* physical address of desc must be page aligned */
100
101 pa = virt_to_phys(queue);
102 pa = (pa + PAGE_MASK) & ~PAGE_MASK;
103 vr->desc = phys_to_virt(pa);
104
105 vr->avail = (struct vring_avail *)&vr->desc[num];
106
107 /* physical address of used must be page aligned */
108
109 pa = virt_to_phys(&vr->avail->ring[num]);
110 pa = (pa + PAGE_MASK) & ~PAGE_MASK;
111 vr->used = phys_to_virt(pa);
112
113 for (i = 0; i < num - 1; i++)
114 vr->desc[i].next = i + 1;
115 vr->desc[i].next = 0;
116 }
117
vring_enable_cb(struct vring_virtqueue * vq)118 static inline void vring_enable_cb(struct vring_virtqueue *vq)
119 {
120 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
121 }
122
vring_disable_cb(struct vring_virtqueue * vq)123 static inline void vring_disable_cb(struct vring_virtqueue *vq)
124 {
125 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
126 }
127
128
129 /*
130 * vring_more_used
131 *
132 * is there some used buffers ?
133 *
134 */
135
vring_more_used(struct vring_virtqueue * vq)136 static inline int vring_more_used(struct vring_virtqueue *vq)
137 {
138 wmb();
139 return vq->last_used_idx != vq->vring.used->idx;
140 }
141
142 void vring_detach(struct vring_virtqueue *vq, unsigned int head);
143 void *vring_get_buf(struct vring_virtqueue *vq, unsigned int *len);
144 void vring_add_buf(struct vring_virtqueue *vq, struct vring_list list[],
145 unsigned int out, unsigned int in,
146 void *index, int num_added);
147 void vring_kick(struct virtio_pci_modern_device *vdev, unsigned int ioaddr,
148 struct vring_virtqueue *vq, int num_added);
149
150 #endif /* _VIRTIO_RING_H_ */
151