xref: /qemu/hw/virtio/vhost-backend.c (revision ca61e750)
1 /*
2  * vhost-backend
3  *
4  * Copyright (c) 2013 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 #include "hw/virtio/vhost.h"
13 #include "hw/virtio/vhost-backend.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "standard-headers/linux/vhost_types.h"
17 
18 #include "hw/virtio/vhost-vdpa.h"
19 #ifdef CONFIG_VHOST_KERNEL
20 #include <linux/vhost.h>
21 #include <sys/ioctl.h>
22 
23 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
24                              void *arg)
25 {
26     int fd = (uintptr_t) dev->opaque;
27     int ret;
28 
29     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
30 
31     ret = ioctl(fd, request, arg);
32     return ret < 0 ? -errno : ret;
33 }
34 
35 static int vhost_kernel_init(struct vhost_dev *dev, void *opaque, Error **errp)
36 {
37     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
38 
39     dev->opaque = opaque;
40 
41     return 0;
42 }
43 
44 static int vhost_kernel_cleanup(struct vhost_dev *dev)
45 {
46     int fd = (uintptr_t) dev->opaque;
47 
48     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
49 
50     return close(fd) < 0 ? -errno : 0;
51 }
52 
53 static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
54 {
55     int limit = 64;
56     char *s;
57 
58     if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
59                             &s, NULL, NULL)) {
60         uint64_t val = g_ascii_strtoull(s, NULL, 10);
61         if (val < INT_MAX && val > 0) {
62             g_free(s);
63             return val;
64         }
65         error_report("ignoring invalid max_mem_regions value in vhost module:"
66                      " %s", s);
67     }
68     g_free(s);
69     return limit;
70 }
71 
72 static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
73                                         struct vhost_vring_file *file)
74 {
75     return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
76 }
77 
78 static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
79                                           struct vhost_scsi_target *target)
80 {
81     return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
82 }
83 
84 static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
85                                             struct vhost_scsi_target *target)
86 {
87     return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
88 }
89 
90 static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
91 {
92     return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
93 }
94 
95 static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
96                                      struct vhost_log *log)
97 {
98     return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
99 }
100 
101 static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
102                                       struct vhost_memory *mem)
103 {
104     return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
105 }
106 
107 static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
108                                        struct vhost_vring_addr *addr)
109 {
110     return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
111 }
112 
113 static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
114                                          struct vhost_vring_state *ring)
115 {
116     return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
117 }
118 
119 static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
120                                       struct vhost_vring_state *ring)
121 {
122     return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
123 }
124 
125 static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
126                                        struct vhost_vring_state *ring)
127 {
128     return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
129 }
130 
131 static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
132                                        struct vhost_vring_state *ring)
133 {
134     return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
135 }
136 
137 static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
138                                        struct vhost_vring_file *file)
139 {
140     return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
141 }
142 
143 static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
144                                        struct vhost_vring_file *file)
145 {
146     return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
147 }
148 
149 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
150                                                    struct vhost_vring_state *s)
151 {
152     return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
153 }
154 
155 static int vhost_kernel_set_features(struct vhost_dev *dev,
156                                      uint64_t features)
157 {
158     return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
159 }
160 
161 static int vhost_kernel_set_backend_cap(struct vhost_dev *dev)
162 {
163     uint64_t features;
164     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2;
165     int r;
166 
167     if (vhost_kernel_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
168         return 0;
169     }
170 
171     features &= f;
172     r = vhost_kernel_call(dev, VHOST_SET_BACKEND_FEATURES,
173                               &features);
174     if (r) {
175         return 0;
176     }
177 
178     dev->backend_cap = features;
179 
180     return 0;
181 }
182 
183 static int vhost_kernel_get_features(struct vhost_dev *dev,
184                                      uint64_t *features)
185 {
186     return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
187 }
188 
189 static int vhost_kernel_set_owner(struct vhost_dev *dev)
190 {
191     return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
192 }
193 
194 static int vhost_kernel_reset_device(struct vhost_dev *dev)
195 {
196     return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
197 }
198 
199 static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
200 {
201     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
202 
203     return idx - dev->vq_index;
204 }
205 
206 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
207                                             uint64_t guest_cid)
208 {
209     return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
210 }
211 
212 static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
213 {
214     return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
215 }
216 
217 static void vhost_kernel_iotlb_read(void *opaque)
218 {
219     struct vhost_dev *dev = opaque;
220     ssize_t len;
221 
222     if (dev->backend_cap &
223         (0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
224         struct vhost_msg_v2 msg;
225 
226         while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
227             if (len < sizeof msg) {
228                 error_report("Wrong vhost message len: %d", (int)len);
229                 break;
230             }
231             if (msg.type != VHOST_IOTLB_MSG_V2) {
232                 error_report("Unknown vhost iotlb message type");
233                 break;
234             }
235 
236             vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
237         }
238     } else {
239         struct vhost_msg msg;
240 
241         while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
242             if (len < sizeof msg) {
243                 error_report("Wrong vhost message len: %d", (int)len);
244                 break;
245             }
246             if (msg.type != VHOST_IOTLB_MSG) {
247                 error_report("Unknown vhost iotlb message type");
248                 break;
249             }
250 
251             vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
252         }
253     }
254 }
255 
256 static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev,
257                                               struct vhost_iotlb_msg *imsg)
258 {
259     if (dev->backend_cap & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
260         struct vhost_msg_v2 msg = {};
261 
262         msg.type = VHOST_IOTLB_MSG_V2;
263         msg.iotlb = *imsg;
264 
265         if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
266             error_report("Fail to update device iotlb");
267             return -EFAULT;
268         }
269     } else {
270         struct vhost_msg msg = {};
271 
272         msg.type = VHOST_IOTLB_MSG;
273         msg.iotlb = *imsg;
274 
275         if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
276             error_report("Fail to update device iotlb");
277             return -EFAULT;
278         }
279     }
280 
281     return 0;
282 }
283 
284 static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
285                                            int enabled)
286 {
287     if (enabled)
288         qemu_set_fd_handler((uintptr_t)dev->opaque,
289                             vhost_kernel_iotlb_read, NULL, dev);
290     else
291         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
292 }
293 
294 const VhostOps kernel_ops = {
295         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
296         .vhost_backend_init = vhost_kernel_init,
297         .vhost_backend_cleanup = vhost_kernel_cleanup,
298         .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
299         .vhost_net_set_backend = vhost_kernel_net_set_backend,
300         .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
301         .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
302         .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
303         .vhost_set_log_base = vhost_kernel_set_log_base,
304         .vhost_set_mem_table = vhost_kernel_set_mem_table,
305         .vhost_set_vring_addr = vhost_kernel_set_vring_addr,
306         .vhost_set_vring_endian = vhost_kernel_set_vring_endian,
307         .vhost_set_vring_num = vhost_kernel_set_vring_num,
308         .vhost_set_vring_base = vhost_kernel_set_vring_base,
309         .vhost_get_vring_base = vhost_kernel_get_vring_base,
310         .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
311         .vhost_set_vring_call = vhost_kernel_set_vring_call,
312         .vhost_set_vring_busyloop_timeout =
313                                 vhost_kernel_set_vring_busyloop_timeout,
314         .vhost_set_features = vhost_kernel_set_features,
315         .vhost_get_features = vhost_kernel_get_features,
316         .vhost_set_backend_cap = vhost_kernel_set_backend_cap,
317         .vhost_set_owner = vhost_kernel_set_owner,
318         .vhost_reset_device = vhost_kernel_reset_device,
319         .vhost_get_vq_index = vhost_kernel_get_vq_index,
320         .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
321         .vhost_vsock_set_running = vhost_kernel_vsock_set_running,
322         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
323         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
324 };
325 #endif
326 
327 int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
328                                              uint64_t iova, uint64_t uaddr,
329                                              uint64_t len,
330                                              IOMMUAccessFlags perm)
331 {
332     struct vhost_iotlb_msg imsg;
333 
334     imsg.iova =  iova;
335     imsg.uaddr = uaddr;
336     imsg.size = len;
337     imsg.type = VHOST_IOTLB_UPDATE;
338 
339     switch (perm) {
340     case IOMMU_RO:
341         imsg.perm = VHOST_ACCESS_RO;
342         break;
343     case IOMMU_WO:
344         imsg.perm = VHOST_ACCESS_WO;
345         break;
346     case IOMMU_RW:
347         imsg.perm = VHOST_ACCESS_RW;
348         break;
349     default:
350         return -EINVAL;
351     }
352 
353     if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
354         return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
355 
356     return -ENODEV;
357 }
358 
359 int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
360                                                  uint64_t iova, uint64_t len)
361 {
362     struct vhost_iotlb_msg imsg;
363 
364     imsg.iova = iova;
365     imsg.size = len;
366     imsg.type = VHOST_IOTLB_INVALIDATE;
367 
368     if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
369         return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
370 
371     return -ENODEV;
372 }
373 
374 int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
375                                           struct vhost_iotlb_msg *imsg)
376 {
377     int ret = 0;
378 
379     if (unlikely(!dev->vdev)) {
380         error_report("Unexpected IOTLB message when virtio device is stopped");
381         return -EINVAL;
382     }
383 
384     switch (imsg->type) {
385     case VHOST_IOTLB_MISS:
386         ret = vhost_device_iotlb_miss(dev, imsg->iova,
387                                       imsg->perm != VHOST_ACCESS_RO);
388         break;
389     case VHOST_IOTLB_ACCESS_FAIL:
390         /* FIXME: report device iotlb error */
391         error_report("Access failure IOTLB message type not supported");
392         ret = -ENOTSUP;
393         break;
394     case VHOST_IOTLB_UPDATE:
395     case VHOST_IOTLB_INVALIDATE:
396     default:
397         error_report("Unexpected IOTLB message type");
398         ret = -EINVAL;
399         break;
400     }
401 
402     return ret;
403 }
404