xref: /qemu/hw/9pfs/xen-9p-backend.c (revision e7b3af81)
1 /*
2  * Xen 9p backend
3  *
4  * Copyright Aporeto 2017
5  *
6  * Authors:
7  *  Stefano Stabellini <stefano@aporeto.com>
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 
13 #include "hw/hw.h"
14 #include "hw/9pfs/9p.h"
15 #include "hw/xen/xen_backend.h"
16 #include "hw/9pfs/xen-9pfs.h"
17 #include "qemu/config-file.h"
18 #include "qemu/option.h"
19 #include "fsdev/qemu-fsdev.h"
20 
21 #define VERSIONS "1"
22 #define MAX_RINGS 8
23 #define MAX_RING_ORDER 8
24 
25 typedef struct Xen9pfsRing {
26     struct Xen9pfsDev *priv;
27 
28     int ref;
29     xenevtchn_handle   *evtchndev;
30     int evtchn;
31     int local_port;
32     int ring_order;
33     struct xen_9pfs_data_intf *intf;
34     unsigned char *data;
35     struct xen_9pfs_data ring;
36 
37     struct iovec *sg;
38     QEMUBH *bh;
39 
40     /* local copies, so that we can read/write PDU data directly from
41      * the ring */
42     RING_IDX out_cons, out_size, in_cons;
43     bool inprogress;
44 } Xen9pfsRing;
45 
46 typedef struct Xen9pfsDev {
47     struct XenDevice xendev;  /* must be first */
48     V9fsState state;
49     char *path;
50     char *security_model;
51     char *tag;
52     char *id;
53 
54     int num_rings;
55     Xen9pfsRing *rings;
56 } Xen9pfsDev;
57 
58 static void xen_9pfs_disconnect(struct XenDevice *xendev);
59 
60 static void xen_9pfs_in_sg(Xen9pfsRing *ring,
61                            struct iovec *in_sg,
62                            int *num,
63                            uint32_t idx,
64                            uint32_t size)
65 {
66     RING_IDX cons, prod, masked_prod, masked_cons;
67 
68     cons = ring->intf->in_cons;
69     prod = ring->intf->in_prod;
70     xen_rmb();
71     masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
72     masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
73 
74     if (masked_prod < masked_cons) {
75         in_sg[0].iov_base = ring->ring.in + masked_prod;
76         in_sg[0].iov_len = masked_cons - masked_prod;
77         *num = 1;
78     } else {
79         in_sg[0].iov_base = ring->ring.in + masked_prod;
80         in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
81         in_sg[1].iov_base = ring->ring.in;
82         in_sg[1].iov_len = masked_cons;
83         *num = 2;
84     }
85 }
86 
87 static void xen_9pfs_out_sg(Xen9pfsRing *ring,
88                             struct iovec *out_sg,
89                             int *num,
90                             uint32_t idx)
91 {
92     RING_IDX cons, prod, masked_prod, masked_cons;
93 
94     cons = ring->intf->out_cons;
95     prod = ring->intf->out_prod;
96     xen_rmb();
97     masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
98     masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
99 
100     if (masked_cons < masked_prod) {
101         out_sg[0].iov_base = ring->ring.out + masked_cons;
102         out_sg[0].iov_len = ring->out_size;
103         *num = 1;
104     } else {
105         if (ring->out_size >
106             (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
107             out_sg[0].iov_base = ring->ring.out + masked_cons;
108             out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
109                                 masked_cons;
110             out_sg[1].iov_base = ring->ring.out;
111             out_sg[1].iov_len = ring->out_size -
112                                 (XEN_FLEX_RING_SIZE(ring->ring_order) -
113                                  masked_cons);
114             *num = 2;
115         } else {
116             out_sg[0].iov_base = ring->ring.out + masked_cons;
117             out_sg[0].iov_len = ring->out_size;
118             *num = 1;
119         }
120     }
121 }
122 
123 static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
124                                      size_t offset,
125                                      const char *fmt,
126                                      va_list ap)
127 {
128     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
129     struct iovec in_sg[2];
130     int num;
131     ssize_t ret;
132 
133     xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
134                    in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
135 
136     ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
137     if (ret < 0) {
138         xen_pv_printf(&xen_9pfs->xendev, 0,
139                       "Failed to encode VirtFS request type %d\n", pdu->id + 1);
140         xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
141         xen_9pfs_disconnect(&xen_9pfs->xendev);
142     }
143     return ret;
144 }
145 
146 static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
147                                        size_t offset,
148                                        const char *fmt,
149                                        va_list ap)
150 {
151     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
152     struct iovec out_sg[2];
153     int num;
154     ssize_t ret;
155 
156     xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
157                     out_sg, &num, pdu->idx);
158 
159     ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
160     if (ret < 0) {
161         xen_pv_printf(&xen_9pfs->xendev, 0,
162                       "Failed to decode VirtFS request type %d\n", pdu->id);
163         xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
164         xen_9pfs_disconnect(&xen_9pfs->xendev);
165     }
166     return ret;
167 }
168 
169 static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
170                                            struct iovec **piov,
171                                            unsigned int *pniov,
172                                            size_t size)
173 {
174     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
175     Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
176     int num;
177 
178     g_free(ring->sg);
179 
180     ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
181     xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
182     *piov = ring->sg;
183     *pniov = num;
184 }
185 
186 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
187                                           struct iovec **piov,
188                                           unsigned int *pniov,
189                                           size_t size)
190 {
191     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
192     Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
193     int num;
194     size_t buf_size;
195 
196     g_free(ring->sg);
197 
198     ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
199     xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
200 
201     buf_size = iov_size(ring->sg, num);
202     if (buf_size  < size) {
203         xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
204                 "needs %zu bytes, buffer has %zu\n", pdu->id, size,
205                 buf_size);
206         xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
207         xen_9pfs_disconnect(&xen_9pfs->xendev);
208     }
209 
210     *piov = ring->sg;
211     *pniov = num;
212 }
213 
214 static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
215 {
216     RING_IDX prod;
217     Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
218     Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
219 
220     g_free(ring->sg);
221     ring->sg = NULL;
222 
223     ring->intf->out_cons = ring->out_cons;
224     xen_wmb();
225 
226     prod = ring->intf->in_prod;
227     xen_rmb();
228     ring->intf->in_prod = prod + pdu->size;
229     xen_wmb();
230 
231     ring->inprogress = false;
232     xenevtchn_notify(ring->evtchndev, ring->local_port);
233 
234     qemu_bh_schedule(ring->bh);
235 }
236 
237 static const V9fsTransport xen_9p_transport = {
238     .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
239     .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
240     .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
241     .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
242     .push_and_notify = xen_9pfs_push_and_notify,
243 };
244 
245 static int xen_9pfs_init(struct XenDevice *xendev)
246 {
247     return 0;
248 }
249 
250 static int xen_9pfs_receive(Xen9pfsRing *ring)
251 {
252     P9MsgHeader h;
253     RING_IDX cons, prod, masked_prod, masked_cons, queued;
254     V9fsPDU *pdu;
255 
256     if (ring->inprogress) {
257         return 0;
258     }
259 
260     cons = ring->intf->out_cons;
261     prod = ring->intf->out_prod;
262     xen_rmb();
263 
264     queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
265     if (queued < sizeof(h)) {
266         return 0;
267     }
268     ring->inprogress = true;
269 
270     masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
271     masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
272 
273     xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
274                          masked_prod, &masked_cons,
275                          XEN_FLEX_RING_SIZE(ring->ring_order));
276     if (queued < le32_to_cpu(h.size_le)) {
277         return 0;
278     }
279 
280     /* cannot fail, because we only handle one request per ring at a time */
281     pdu = pdu_alloc(&ring->priv->state);
282     ring->out_size = le32_to_cpu(h.size_le);
283     ring->out_cons = cons + le32_to_cpu(h.size_le);
284 
285     pdu_submit(pdu, &h);
286 
287     return 0;
288 }
289 
290 static void xen_9pfs_bh(void *opaque)
291 {
292     Xen9pfsRing *ring = opaque;
293     xen_9pfs_receive(ring);
294 }
295 
296 static void xen_9pfs_evtchn_event(void *opaque)
297 {
298     Xen9pfsRing *ring = opaque;
299     evtchn_port_t port;
300 
301     port = xenevtchn_pending(ring->evtchndev);
302     xenevtchn_unmask(ring->evtchndev, port);
303 
304     qemu_bh_schedule(ring->bh);
305 }
306 
307 static void xen_9pfs_disconnect(struct XenDevice *xendev)
308 {
309     Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
310     int i;
311 
312     for (i = 0; i < xen_9pdev->num_rings; i++) {
313         if (xen_9pdev->rings[i].evtchndev != NULL) {
314             qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
315                     NULL, NULL, NULL);
316             xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
317                              xen_9pdev->rings[i].local_port);
318             xen_9pdev->rings[i].evtchndev = NULL;
319         }
320     }
321 }
322 
323 static int xen_9pfs_free(struct XenDevice *xendev)
324 {
325     Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
326     int i;
327 
328     if (xen_9pdev->rings[0].evtchndev != NULL) {
329         xen_9pfs_disconnect(xendev);
330     }
331 
332     for (i = 0; i < xen_9pdev->num_rings; i++) {
333         if (xen_9pdev->rings[i].data != NULL) {
334             xen_be_unmap_grant_refs(&xen_9pdev->xendev,
335                                     xen_9pdev->rings[i].data,
336                                     (1 << xen_9pdev->rings[i].ring_order));
337         }
338         if (xen_9pdev->rings[i].intf != NULL) {
339             xen_be_unmap_grant_refs(&xen_9pdev->xendev,
340                                     xen_9pdev->rings[i].intf,
341                                     1);
342         }
343         if (xen_9pdev->rings[i].bh != NULL) {
344             qemu_bh_delete(xen_9pdev->rings[i].bh);
345         }
346     }
347 
348     g_free(xen_9pdev->id);
349     g_free(xen_9pdev->tag);
350     g_free(xen_9pdev->path);
351     g_free(xen_9pdev->security_model);
352     g_free(xen_9pdev->rings);
353     return 0;
354 }
355 
356 static int xen_9pfs_connect(struct XenDevice *xendev)
357 {
358     int i;
359     Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
360     V9fsState *s = &xen_9pdev->state;
361     QemuOpts *fsdev;
362 
363     if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
364                              &xen_9pdev->num_rings) == -1 ||
365         xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
366         return -1;
367     }
368 
369     xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing));
370     for (i = 0; i < xen_9pdev->num_rings; i++) {
371         char *str;
372         int ring_order;
373 
374         xen_9pdev->rings[i].priv = xen_9pdev;
375         xen_9pdev->rings[i].evtchn = -1;
376         xen_9pdev->rings[i].local_port = -1;
377 
378         str = g_strdup_printf("ring-ref%u", i);
379         if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
380                                  &xen_9pdev->rings[i].ref) == -1) {
381             g_free(str);
382             goto out;
383         }
384         g_free(str);
385         str = g_strdup_printf("event-channel-%u", i);
386         if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
387                                  &xen_9pdev->rings[i].evtchn) == -1) {
388             g_free(str);
389             goto out;
390         }
391         g_free(str);
392 
393         xen_9pdev->rings[i].intf =
394             xen_be_map_grant_ref(&xen_9pdev->xendev,
395                                  xen_9pdev->rings[i].ref,
396                                  PROT_READ | PROT_WRITE);
397         if (!xen_9pdev->rings[i].intf) {
398             goto out;
399         }
400         ring_order = xen_9pdev->rings[i].intf->ring_order;
401         if (ring_order > MAX_RING_ORDER) {
402             goto out;
403         }
404         xen_9pdev->rings[i].ring_order = ring_order;
405         xen_9pdev->rings[i].data =
406             xen_be_map_grant_refs(&xen_9pdev->xendev,
407                                   xen_9pdev->rings[i].intf->ref,
408                                   (1 << ring_order),
409                                   PROT_READ | PROT_WRITE);
410         if (!xen_9pdev->rings[i].data) {
411             goto out;
412         }
413         xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
414         xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
415                                        XEN_FLEX_RING_SIZE(ring_order);
416 
417         xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
418         xen_9pdev->rings[i].out_cons = 0;
419         xen_9pdev->rings[i].out_size = 0;
420         xen_9pdev->rings[i].inprogress = false;
421 
422 
423         xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
424         if (xen_9pdev->rings[i].evtchndev == NULL) {
425             goto out;
426         }
427         qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
428         xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
429                                             (xen_9pdev->rings[i].evtchndev,
430                                              xendev->dom,
431                                              xen_9pdev->rings[i].evtchn);
432         if (xen_9pdev->rings[i].local_port == -1) {
433             xen_pv_printf(xendev, 0,
434                           "xenevtchn_bind_interdomain failed port=%d\n",
435                           xen_9pdev->rings[i].evtchn);
436             goto out;
437         }
438         xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
439         qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
440                 xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
441     }
442 
443     xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
444     xen_9pdev->path = xenstore_read_be_str(xendev, "path");
445     xen_9pdev->id = s->fsconf.fsdev_id =
446         g_strdup_printf("xen9p%d", xendev->dev);
447     xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
448     fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
449             s->fsconf.tag,
450             1, NULL);
451     qemu_opt_set(fsdev, "fsdriver", "local", NULL);
452     qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
453     qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
454     qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
455     qemu_fsdev_add(fsdev);
456     v9fs_device_realize_common(s, &xen_9p_transport, NULL);
457 
458     return 0;
459 
460 out:
461     xen_9pfs_free(xendev);
462     return -1;
463 }
464 
465 static void xen_9pfs_alloc(struct XenDevice *xendev)
466 {
467     xenstore_write_be_str(xendev, "versions", VERSIONS);
468     xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
469     xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
470 }
471 
472 struct XenDevOps xen_9pfs_ops = {
473     .size       = sizeof(Xen9pfsDev),
474     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
475     .alloc      = xen_9pfs_alloc,
476     .init       = xen_9pfs_init,
477     .initialise = xen_9pfs_connect,
478     .disconnect = xen_9pfs_disconnect,
479     .free       = xen_9pfs_free,
480 };
481