xref: /linux/include/linux/virtio_ring.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_RING_H
3 #define _LINUX_VIRTIO_RING_H
4 
5 #include <asm/barrier.h>
6 #include <linux/irqreturn.h>
7 #include <uapi/linux/virtio_ring.h>
8 
9 /*
10  * Barriers in virtio are tricky.  Non-SMP virtio guests can't assume
11  * they're not on an SMP host system, so they need to assume real
12  * barriers.  Non-SMP virtio hosts could skip the barriers, but does
13  * anyone care?
14  *
15  * For virtio_pci on SMP, we don't need to order with respect to MMIO
16  * accesses through relaxed memory I/O windows, so virt_mb() et al are
17  * sufficient.
18  *
19  * For using virtio to talk to real devices (eg. other heterogeneous
20  * CPUs) we do need real barriers.  In theory, we could be using both
21  * kinds of virtio, so it's a runtime decision, and the branch is
22  * actually quite cheap.
23  */
24 
25 static inline void virtio_mb(bool weak_barriers)
26 {
27 	if (weak_barriers)
28 		virt_mb();
29 	else
30 		mb();
31 }
32 
33 static inline void virtio_rmb(bool weak_barriers)
34 {
35 	if (weak_barriers)
36 		virt_rmb();
37 	else
38 		dma_rmb();
39 }
40 
41 static inline void virtio_wmb(bool weak_barriers)
42 {
43 	if (weak_barriers)
44 		virt_wmb();
45 	else
46 		dma_wmb();
47 }
48 
49 static inline void virtio_store_mb(bool weak_barriers,
50 				   __virtio16 *p, __virtio16 v)
51 {
52 	if (weak_barriers) {
53 		virt_store_mb(*p, v);
54 	} else {
55 		WRITE_ONCE(*p, v);
56 		mb();
57 	}
58 }
59 
60 struct virtio_device;
61 struct virtqueue;
62 
63 /*
64  * Creates a virtqueue and allocates the descriptor ring.  If
65  * may_reduce_num is set, then this may allocate a smaller ring than
66  * expected.  The caller should query virtqueue_get_vring_size to learn
67  * the actual size of the ring.
68  */
69 struct virtqueue *vring_create_virtqueue(unsigned int index,
70 					 unsigned int num,
71 					 unsigned int vring_align,
72 					 struct virtio_device *vdev,
73 					 bool weak_barriers,
74 					 bool may_reduce_num,
75 					 bool ctx,
76 					 bool (*notify)(struct virtqueue *vq),
77 					 void (*callback)(struct virtqueue *vq),
78 					 const char *name);
79 
80 /* Creates a virtqueue with a custom layout. */
81 struct virtqueue *__vring_new_virtqueue(unsigned int index,
82 					struct vring vring,
83 					struct virtio_device *vdev,
84 					bool weak_barriers,
85 					bool ctx,
86 					bool (*notify)(struct virtqueue *),
87 					void (*callback)(struct virtqueue *),
88 					const char *name);
89 
90 /*
91  * Creates a virtqueue with a standard layout but a caller-allocated
92  * ring.
93  */
94 struct virtqueue *vring_new_virtqueue(unsigned int index,
95 				      unsigned int num,
96 				      unsigned int vring_align,
97 				      struct virtio_device *vdev,
98 				      bool weak_barriers,
99 				      bool ctx,
100 				      void *pages,
101 				      bool (*notify)(struct virtqueue *vq),
102 				      void (*callback)(struct virtqueue *vq),
103 				      const char *name);
104 
105 /*
106  * Destroys a virtqueue.  If created with vring_create_virtqueue, this
107  * also frees the ring.
108  */
109 void vring_del_virtqueue(struct virtqueue *vq);
110 
111 /* Filter out transport-specific feature bits. */
112 void vring_transport_features(struct virtio_device *vdev);
113 
114 irqreturn_t vring_interrupt(int irq, void *_vq);
115 #endif /* _LINUX_VIRTIO_RING_H */
116