xref: /qemu/block/gluster.c (revision 2c533c54)
1 /*
2  * GlusterFS backend for QEMU
3  *
4  * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 #include "qemu/osdep.h"
11 #include <glusterfs/api/glfs.h>
12 #include "block/block_int.h"
13 #include "qapi/error.h"
14 #include "qapi/qmp/qerror.h"
15 #include "qemu/uri.h"
16 #include "qemu/error-report.h"
17 
18 #define GLUSTER_OPT_FILENAME        "filename"
19 #define GLUSTER_OPT_VOLUME          "volume"
20 #define GLUSTER_OPT_PATH            "path"
21 #define GLUSTER_OPT_TYPE            "type"
22 #define GLUSTER_OPT_SERVER_PATTERN  "server."
23 #define GLUSTER_OPT_HOST            "host"
24 #define GLUSTER_OPT_PORT            "port"
25 #define GLUSTER_OPT_TO              "to"
26 #define GLUSTER_OPT_IPV4            "ipv4"
27 #define GLUSTER_OPT_IPV6            "ipv6"
28 #define GLUSTER_OPT_SOCKET          "socket"
29 #define GLUSTER_OPT_DEBUG           "debug"
30 #define GLUSTER_DEFAULT_PORT        24007
31 #define GLUSTER_DEBUG_DEFAULT       4
32 #define GLUSTER_DEBUG_MAX           9
33 
34 #define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n"
35 
36 typedef struct GlusterAIOCB {
37     int64_t size;
38     int ret;
39     QEMUBH *bh;
40     Coroutine *coroutine;
41     AioContext *aio_context;
42 } GlusterAIOCB;
43 
44 typedef struct BDRVGlusterState {
45     struct glfs *glfs;
46     struct glfs_fd *fd;
47     bool supports_seek_data;
48     int debug_level;
49 } BDRVGlusterState;
50 
51 typedef struct BDRVGlusterReopenState {
52     struct glfs *glfs;
53     struct glfs_fd *fd;
54 } BDRVGlusterReopenState;
55 
56 
57 static QemuOptsList qemu_gluster_create_opts = {
58     .name = "qemu-gluster-create-opts",
59     .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
60     .desc = {
61         {
62             .name = BLOCK_OPT_SIZE,
63             .type = QEMU_OPT_SIZE,
64             .help = "Virtual disk size"
65         },
66         {
67             .name = BLOCK_OPT_PREALLOC,
68             .type = QEMU_OPT_STRING,
69             .help = "Preallocation mode (allowed values: off, full)"
70         },
71         {
72             .name = GLUSTER_OPT_DEBUG,
73             .type = QEMU_OPT_NUMBER,
74             .help = "Gluster log level, valid range is 0-9",
75         },
76         { /* end of list */ }
77     }
78 };
79 
80 static QemuOptsList runtime_opts = {
81     .name = "gluster",
82     .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
83     .desc = {
84         {
85             .name = GLUSTER_OPT_FILENAME,
86             .type = QEMU_OPT_STRING,
87             .help = "URL to the gluster image",
88         },
89         {
90             .name = GLUSTER_OPT_DEBUG,
91             .type = QEMU_OPT_NUMBER,
92             .help = "Gluster log level, valid range is 0-9",
93         },
94         { /* end of list */ }
95     },
96 };
97 
98 static QemuOptsList runtime_json_opts = {
99     .name = "gluster_json",
100     .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
101     .desc = {
102         {
103             .name = GLUSTER_OPT_VOLUME,
104             .type = QEMU_OPT_STRING,
105             .help = "name of gluster volume where VM image resides",
106         },
107         {
108             .name = GLUSTER_OPT_PATH,
109             .type = QEMU_OPT_STRING,
110             .help = "absolute path to image file in gluster volume",
111         },
112         {
113             .name = GLUSTER_OPT_DEBUG,
114             .type = QEMU_OPT_NUMBER,
115             .help = "Gluster log level, valid range is 0-9",
116         },
117         { /* end of list */ }
118     },
119 };
120 
121 static QemuOptsList runtime_type_opts = {
122     .name = "gluster_type",
123     .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head),
124     .desc = {
125         {
126             .name = GLUSTER_OPT_TYPE,
127             .type = QEMU_OPT_STRING,
128             .help = "tcp|unix",
129         },
130         { /* end of list */ }
131     },
132 };
133 
134 static QemuOptsList runtime_unix_opts = {
135     .name = "gluster_unix",
136     .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head),
137     .desc = {
138         {
139             .name = GLUSTER_OPT_SOCKET,
140             .type = QEMU_OPT_STRING,
141             .help = "socket file path)",
142         },
143         { /* end of list */ }
144     },
145 };
146 
147 static QemuOptsList runtime_tcp_opts = {
148     .name = "gluster_tcp",
149     .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head),
150     .desc = {
151         {
152             .name = GLUSTER_OPT_TYPE,
153             .type = QEMU_OPT_STRING,
154             .help = "tcp|unix",
155         },
156         {
157             .name = GLUSTER_OPT_HOST,
158             .type = QEMU_OPT_STRING,
159             .help = "host address (hostname/ipv4/ipv6 addresses)",
160         },
161         {
162             .name = GLUSTER_OPT_PORT,
163             .type = QEMU_OPT_NUMBER,
164             .help = "port number on which glusterd is listening (default 24007)",
165         },
166         {
167             .name = "to",
168             .type = QEMU_OPT_NUMBER,
169             .help = "max port number, not supported by gluster",
170         },
171         {
172             .name = "ipv4",
173             .type = QEMU_OPT_BOOL,
174             .help = "ipv4 bool value, not supported by gluster",
175         },
176         {
177             .name = "ipv6",
178             .type = QEMU_OPT_BOOL,
179             .help = "ipv6 bool value, not supported by gluster",
180         },
181         { /* end of list */ }
182     },
183 };
184 
185 static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
186 {
187     char *p, *q;
188 
189     if (!path) {
190         return -EINVAL;
191     }
192 
193     /* volume */
194     p = q = path + strspn(path, "/");
195     p += strcspn(p, "/");
196     if (*p == '\0') {
197         return -EINVAL;
198     }
199     gconf->volume = g_strndup(q, p - q);
200 
201     /* path */
202     p += strspn(p, "/");
203     if (*p == '\0') {
204         return -EINVAL;
205     }
206     gconf->path = g_strdup(p);
207     return 0;
208 }
209 
210 /*
211  * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
212  *
213  * 'gluster' is the protocol.
214  *
215  * 'transport' specifies the transport type used to connect to gluster
216  * management daemon (glusterd). Valid transport types are
217  * tcp or unix. If a transport type isn't specified, then tcp type is assumed.
218  *
219  * 'host' specifies the host where the volume file specification for
220  * the given volume resides. This can be either hostname or ipv4 address.
221  * If transport type is 'unix', then 'host' field should not be specified.
222  * The 'socket' field needs to be populated with the path to unix domain
223  * socket.
224  *
225  * 'port' is the port number on which glusterd is listening. This is optional
226  * and if not specified, QEMU will send 0 which will make gluster to use the
227  * default port. If the transport type is unix, then 'port' should not be
228  * specified.
229  *
230  * 'volume' is the name of the gluster volume which contains the VM image.
231  *
232  * 'path' is the path to the actual VM image that resides on gluster volume.
233  *
234  * Examples:
235  *
236  * file=gluster://1.2.3.4/testvol/a.img
237  * file=gluster+tcp://1.2.3.4/testvol/a.img
238  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
239  * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
240  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
241  */
242 static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf,
243                                   const char *filename)
244 {
245     GlusterServer *gsconf;
246     URI *uri;
247     QueryParams *qp = NULL;
248     bool is_unix = false;
249     int ret = 0;
250 
251     uri = uri_parse(filename);
252     if (!uri) {
253         return -EINVAL;
254     }
255 
256     gconf->server = g_new0(GlusterServerList, 1);
257     gconf->server->value = gsconf = g_new0(GlusterServer, 1);
258 
259     /* transport */
260     if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
261         gsconf->type = GLUSTER_TRANSPORT_TCP;
262     } else if (!strcmp(uri->scheme, "gluster+tcp")) {
263         gsconf->type = GLUSTER_TRANSPORT_TCP;
264     } else if (!strcmp(uri->scheme, "gluster+unix")) {
265         gsconf->type = GLUSTER_TRANSPORT_UNIX;
266         is_unix = true;
267     } else if (!strcmp(uri->scheme, "gluster+rdma")) {
268         gsconf->type = GLUSTER_TRANSPORT_TCP;
269         error_report("Warning: rdma feature is not supported, falling "
270                      "back to tcp");
271     } else {
272         ret = -EINVAL;
273         goto out;
274     }
275 
276     ret = parse_volume_options(gconf, uri->path);
277     if (ret < 0) {
278         goto out;
279     }
280 
281     qp = query_params_parse(uri->query);
282     if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
283         ret = -EINVAL;
284         goto out;
285     }
286 
287     if (is_unix) {
288         if (uri->server || uri->port) {
289             ret = -EINVAL;
290             goto out;
291         }
292         if (strcmp(qp->p[0].name, "socket")) {
293             ret = -EINVAL;
294             goto out;
295         }
296         gsconf->u.q_unix.path = g_strdup(qp->p[0].value);
297     } else {
298         gsconf->u.tcp.host = g_strdup(uri->server ? uri->server : "localhost");
299         if (uri->port) {
300             gsconf->u.tcp.port = g_strdup_printf("%d", uri->port);
301         } else {
302             gsconf->u.tcp.port = g_strdup_printf("%d", GLUSTER_DEFAULT_PORT);
303         }
304     }
305 
306 out:
307     if (qp) {
308         query_params_free(qp);
309     }
310     uri_free(uri);
311     return ret;
312 }
313 
314 static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
315                                            Error **errp)
316 {
317     struct glfs *glfs;
318     int ret;
319     int old_errno;
320     GlusterServerList *server;
321 
322     glfs = glfs_new(gconf->volume);
323     if (!glfs) {
324         goto out;
325     }
326 
327     for (server = gconf->server; server; server = server->next) {
328         if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
329             ret = glfs_set_volfile_server(glfs,
330                                    GlusterTransport_lookup[server->value->type],
331                                    server->value->u.q_unix.path, 0);
332         } else {
333             ret = glfs_set_volfile_server(glfs,
334                                    GlusterTransport_lookup[server->value->type],
335                                    server->value->u.tcp.host,
336                                    atoi(server->value->u.tcp.port));
337         }
338 
339         if (ret < 0) {
340             goto out;
341         }
342     }
343 
344     ret = glfs_set_logging(glfs, "-", gconf->debug_level);
345     if (ret < 0) {
346         goto out;
347     }
348 
349     ret = glfs_init(glfs);
350     if (ret) {
351         error_setg(errp, "Gluster connection for volume %s, path %s failed"
352                          " to connect", gconf->volume, gconf->path);
353         for (server = gconf->server; server; server = server->next) {
354             if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
355                 error_append_hint(errp, "hint: failed on socket %s ",
356                                   server->value->u.q_unix.path);
357             } else {
358                 error_append_hint(errp, "hint: failed on host %s and port %s ",
359                                   server->value->u.tcp.host,
360                                   server->value->u.tcp.port);
361             }
362         }
363 
364         error_append_hint(errp, "Please refer to gluster logs for more info\n");
365 
366         /* glfs_init sometimes doesn't set errno although docs suggest that */
367         if (errno == 0) {
368             errno = EINVAL;
369         }
370 
371         goto out;
372     }
373     return glfs;
374 
375 out:
376     if (glfs) {
377         old_errno = errno;
378         glfs_fini(glfs);
379         errno = old_errno;
380     }
381     return NULL;
382 }
383 
384 static int qapi_enum_parse(const char *opt)
385 {
386     int i;
387 
388     if (!opt) {
389         return GLUSTER_TRANSPORT__MAX;
390     }
391 
392     for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) {
393         if (!strcmp(opt, GlusterTransport_lookup[i])) {
394             return i;
395         }
396     }
397 
398     return i;
399 }
400 
401 /*
402  * Convert the json formatted command line into qapi.
403 */
404 static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
405                                   QDict *options, Error **errp)
406 {
407     QemuOpts *opts;
408     GlusterServer *gsconf;
409     GlusterServerList *curr = NULL;
410     QDict *backing_options = NULL;
411     Error *local_err = NULL;
412     char *str = NULL;
413     const char *ptr;
414     size_t num_servers;
415     int i;
416 
417     /* create opts info from runtime_json_opts list */
418     opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
419     qemu_opts_absorb_qdict(opts, options, &local_err);
420     if (local_err) {
421         goto out;
422     }
423 
424     num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN);
425     if (num_servers < 1) {
426         error_setg(&local_err, QERR_MISSING_PARAMETER, "server");
427         goto out;
428     }
429 
430     ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME);
431     if (!ptr) {
432         error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME);
433         goto out;
434     }
435     gconf->volume = g_strdup(ptr);
436 
437     ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH);
438     if (!ptr) {
439         error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH);
440         goto out;
441     }
442     gconf->path = g_strdup(ptr);
443     qemu_opts_del(opts);
444 
445     for (i = 0; i < num_servers; i++) {
446         str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i);
447         qdict_extract_subqdict(options, &backing_options, str);
448 
449         /* create opts info from runtime_type_opts list */
450         opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort);
451         qemu_opts_absorb_qdict(opts, backing_options, &local_err);
452         if (local_err) {
453             goto out;
454         }
455 
456         ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE);
457         gsconf = g_new0(GlusterServer, 1);
458         gsconf->type = qapi_enum_parse(ptr);
459         if (!ptr) {
460             error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE);
461             error_append_hint(&local_err, GERR_INDEX_HINT, i);
462             goto out;
463 
464         }
465         if (gsconf->type == GLUSTER_TRANSPORT__MAX) {
466             error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE,
467                        GLUSTER_OPT_TYPE, "tcp or unix");
468             error_append_hint(&local_err, GERR_INDEX_HINT, i);
469             goto out;
470         }
471         qemu_opts_del(opts);
472 
473         if (gsconf->type == GLUSTER_TRANSPORT_TCP) {
474             /* create opts info from runtime_tcp_opts list */
475             opts = qemu_opts_create(&runtime_tcp_opts, NULL, 0, &error_abort);
476             qemu_opts_absorb_qdict(opts, backing_options, &local_err);
477             if (local_err) {
478                 goto out;
479             }
480 
481             ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST);
482             if (!ptr) {
483                 error_setg(&local_err, QERR_MISSING_PARAMETER,
484                            GLUSTER_OPT_HOST);
485                 error_append_hint(&local_err, GERR_INDEX_HINT, i);
486                 goto out;
487             }
488             gsconf->u.tcp.host = g_strdup(ptr);
489             ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT);
490             if (!ptr) {
491                 error_setg(&local_err, QERR_MISSING_PARAMETER,
492                            GLUSTER_OPT_PORT);
493                 error_append_hint(&local_err, GERR_INDEX_HINT, i);
494                 goto out;
495             }
496             gsconf->u.tcp.port = g_strdup(ptr);
497 
498             /* defend for unsupported fields in InetSocketAddress,
499              * i.e. @ipv4, @ipv6  and @to
500              */
501             ptr = qemu_opt_get(opts, GLUSTER_OPT_TO);
502             if (ptr) {
503                 gsconf->u.tcp.has_to = true;
504             }
505             ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4);
506             if (ptr) {
507                 gsconf->u.tcp.has_ipv4 = true;
508             }
509             ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6);
510             if (ptr) {
511                 gsconf->u.tcp.has_ipv6 = true;
512             }
513             if (gsconf->u.tcp.has_to) {
514                 error_setg(&local_err, "Parameter 'to' not supported");
515                 goto out;
516             }
517             if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) {
518                 error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported");
519                 goto out;
520             }
521             qemu_opts_del(opts);
522         } else {
523             /* create opts info from runtime_unix_opts list */
524             opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort);
525             qemu_opts_absorb_qdict(opts, backing_options, &local_err);
526             if (local_err) {
527                 goto out;
528             }
529 
530             ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET);
531             if (!ptr) {
532                 error_setg(&local_err, QERR_MISSING_PARAMETER,
533                            GLUSTER_OPT_SOCKET);
534                 error_append_hint(&local_err, GERR_INDEX_HINT, i);
535                 goto out;
536             }
537             gsconf->u.q_unix.path = g_strdup(ptr);
538             qemu_opts_del(opts);
539         }
540 
541         if (gconf->server == NULL) {
542             gconf->server = g_new0(GlusterServerList, 1);
543             gconf->server->value = gsconf;
544             curr = gconf->server;
545         } else {
546             curr->next = g_new0(GlusterServerList, 1);
547             curr->next->value = gsconf;
548             curr = curr->next;
549         }
550 
551         qdict_del(backing_options, str);
552         g_free(str);
553         str = NULL;
554     }
555 
556     return 0;
557 
558 out:
559     error_propagate(errp, local_err);
560     qemu_opts_del(opts);
561     if (str) {
562         qdict_del(backing_options, str);
563         g_free(str);
564     }
565     errno = EINVAL;
566     return -errno;
567 }
568 
569 static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
570                                       const char *filename,
571                                       QDict *options, Error **errp)
572 {
573     int ret;
574     if (filename) {
575         ret = qemu_gluster_parse_uri(gconf, filename);
576         if (ret < 0) {
577             error_setg(errp, "invalid URI");
578             error_append_hint(errp, "Usage: file=gluster[+transport]://"
579                                     "[host[:port]]/volume/path[?socket=...]\n");
580             errno = -ret;
581             return NULL;
582         }
583     } else {
584         ret = qemu_gluster_parse_json(gconf, options, errp);
585         if (ret < 0) {
586             error_append_hint(errp, "Usage: "
587                              "-drive driver=qcow2,file.driver=gluster,"
588                              "file.volume=testvol,file.path=/path/a.qcow2"
589                              "[,file.debug=9],file.server.0.type=tcp,"
590                              "file.server.0.host=1.2.3.4,"
591                              "file.server.0.port=24007,"
592                              "file.server.1.transport=unix,"
593                              "file.server.1.socket=/var/run/glusterd.socket ..."
594                              "\n");
595             errno = -ret;
596             return NULL;
597         }
598 
599     }
600 
601     return qemu_gluster_glfs_init(gconf, errp);
602 }
603 
604 static void qemu_gluster_complete_aio(void *opaque)
605 {
606     GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
607 
608     qemu_bh_delete(acb->bh);
609     acb->bh = NULL;
610     qemu_coroutine_enter(acb->coroutine);
611 }
612 
613 /*
614  * AIO callback routine called from GlusterFS thread.
615  */
616 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
617 {
618     GlusterAIOCB *acb = (GlusterAIOCB *)arg;
619 
620     if (!ret || ret == acb->size) {
621         acb->ret = 0; /* Success */
622     } else if (ret < 0) {
623         acb->ret = -errno; /* Read/Write failed */
624     } else {
625         acb->ret = -EIO; /* Partial read/write - fail it */
626     }
627 
628     acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb);
629     qemu_bh_schedule(acb->bh);
630 }
631 
632 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
633 {
634     assert(open_flags != NULL);
635 
636     *open_flags |= O_BINARY;
637 
638     if (bdrv_flags & BDRV_O_RDWR) {
639         *open_flags |= O_RDWR;
640     } else {
641         *open_flags |= O_RDONLY;
642     }
643 
644     if ((bdrv_flags & BDRV_O_NOCACHE)) {
645         *open_flags |= O_DIRECT;
646     }
647 }
648 
649 /*
650  * Do SEEK_DATA/HOLE to detect if it is functional. Older broken versions of
651  * gfapi incorrectly return the current offset when SEEK_DATA/HOLE is used.
652  * - Corrected versions return -1 and set errno to EINVAL.
653  * - Versions that support SEEK_DATA/HOLE correctly, will return -1 and set
654  *   errno to ENXIO when SEEK_DATA is called with a position of EOF.
655  */
656 static bool qemu_gluster_test_seek(struct glfs_fd *fd)
657 {
658     off_t ret, eof;
659 
660     eof = glfs_lseek(fd, 0, SEEK_END);
661     if (eof < 0) {
662         /* this should never occur */
663         return false;
664     }
665 
666     /* this should always fail with ENXIO if SEEK_DATA is supported */
667     ret = glfs_lseek(fd, eof, SEEK_DATA);
668     return (ret < 0) && (errno == ENXIO);
669 }
670 
671 static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
672                              int bdrv_flags, Error **errp)
673 {
674     BDRVGlusterState *s = bs->opaque;
675     int open_flags = 0;
676     int ret = 0;
677     BlockdevOptionsGluster *gconf = NULL;
678     QemuOpts *opts;
679     Error *local_err = NULL;
680     const char *filename;
681 
682     opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
683     qemu_opts_absorb_qdict(opts, options, &local_err);
684     if (local_err) {
685         error_propagate(errp, local_err);
686         ret = -EINVAL;
687         goto out;
688     }
689 
690     filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME);
691 
692     s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
693                                          GLUSTER_DEBUG_DEFAULT);
694     if (s->debug_level < 0) {
695         s->debug_level = 0;
696     } else if (s->debug_level > GLUSTER_DEBUG_MAX) {
697         s->debug_level = GLUSTER_DEBUG_MAX;
698     }
699 
700     gconf = g_new0(BlockdevOptionsGluster, 1);
701     gconf->debug_level = s->debug_level;
702     gconf->has_debug_level = true;
703     s->glfs = qemu_gluster_init(gconf, filename, options, errp);
704     if (!s->glfs) {
705         ret = -errno;
706         goto out;
707     }
708 
709 #ifdef CONFIG_GLUSTERFS_XLATOR_OPT
710     /* Without this, if fsync fails for a recoverable reason (for instance,
711      * ENOSPC), gluster will dump its cache, preventing retries.  This means
712      * almost certain data loss.  Not all gluster versions support the
713      * 'resync-failed-syncs-after-fsync' key value, but there is no way to
714      * discover during runtime if it is supported (this api returns success for
715      * unknown key/value pairs) */
716     ret = glfs_set_xlator_option(s->glfs, "*-write-behind",
717                                           "resync-failed-syncs-after-fsync",
718                                           "on");
719     if (ret < 0) {
720         error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
721         ret = -errno;
722         goto out;
723     }
724 #endif
725 
726     qemu_gluster_parse_flags(bdrv_flags, &open_flags);
727 
728     s->fd = glfs_open(s->glfs, gconf->path, open_flags);
729     if (!s->fd) {
730         ret = -errno;
731     }
732 
733     s->supports_seek_data = qemu_gluster_test_seek(s->fd);
734 
735 out:
736     qemu_opts_del(opts);
737     qapi_free_BlockdevOptionsGluster(gconf);
738     if (!ret) {
739         return ret;
740     }
741     if (s->fd) {
742         glfs_close(s->fd);
743     }
744     if (s->glfs) {
745         glfs_fini(s->glfs);
746     }
747     return ret;
748 }
749 
750 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
751                                        BlockReopenQueue *queue, Error **errp)
752 {
753     int ret = 0;
754     BDRVGlusterState *s;
755     BDRVGlusterReopenState *reop_s;
756     BlockdevOptionsGluster *gconf;
757     int open_flags = 0;
758 
759     assert(state != NULL);
760     assert(state->bs != NULL);
761 
762     s = state->bs->opaque;
763 
764     state->opaque = g_new0(BDRVGlusterReopenState, 1);
765     reop_s = state->opaque;
766 
767     qemu_gluster_parse_flags(state->flags, &open_flags);
768 
769     gconf = g_new0(BlockdevOptionsGluster, 1);
770     gconf->debug_level = s->debug_level;
771     gconf->has_debug_level = true;
772     reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp);
773     if (reop_s->glfs == NULL) {
774         ret = -errno;
775         goto exit;
776     }
777 
778 #ifdef CONFIG_GLUSTERFS_XLATOR_OPT
779     ret = glfs_set_xlator_option(reop_s->glfs, "*-write-behind",
780                                  "resync-failed-syncs-after-fsync", "on");
781     if (ret < 0) {
782         error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
783         ret = -errno;
784         goto exit;
785     }
786 #endif
787 
788     reop_s->fd = glfs_open(reop_s->glfs, gconf->path, open_flags);
789     if (reop_s->fd == NULL) {
790         /* reops->glfs will be cleaned up in _abort */
791         ret = -errno;
792         goto exit;
793     }
794 
795 exit:
796     /* state->opaque will be freed in either the _abort or _commit */
797     qapi_free_BlockdevOptionsGluster(gconf);
798     return ret;
799 }
800 
801 static void qemu_gluster_reopen_commit(BDRVReopenState *state)
802 {
803     BDRVGlusterReopenState *reop_s = state->opaque;
804     BDRVGlusterState *s = state->bs->opaque;
805 
806 
807     /* close the old */
808     if (s->fd) {
809         glfs_close(s->fd);
810     }
811     if (s->glfs) {
812         glfs_fini(s->glfs);
813     }
814 
815     /* use the newly opened image / connection */
816     s->fd         = reop_s->fd;
817     s->glfs       = reop_s->glfs;
818 
819     g_free(state->opaque);
820     state->opaque = NULL;
821 
822     return;
823 }
824 
825 
826 static void qemu_gluster_reopen_abort(BDRVReopenState *state)
827 {
828     BDRVGlusterReopenState *reop_s = state->opaque;
829 
830     if (reop_s == NULL) {
831         return;
832     }
833 
834     if (reop_s->fd) {
835         glfs_close(reop_s->fd);
836     }
837 
838     if (reop_s->glfs) {
839         glfs_fini(reop_s->glfs);
840     }
841 
842     g_free(state->opaque);
843     state->opaque = NULL;
844 
845     return;
846 }
847 
848 #ifdef CONFIG_GLUSTERFS_ZEROFILL
849 static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
850                                                       int64_t offset,
851                                                       int size,
852                                                       BdrvRequestFlags flags)
853 {
854     int ret;
855     GlusterAIOCB acb;
856     BDRVGlusterState *s = bs->opaque;
857 
858     acb.size = size;
859     acb.ret = 0;
860     acb.coroutine = qemu_coroutine_self();
861     acb.aio_context = bdrv_get_aio_context(bs);
862 
863     ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
864     if (ret < 0) {
865         return -errno;
866     }
867 
868     qemu_coroutine_yield();
869     return acb.ret;
870 }
871 
872 static inline bool gluster_supports_zerofill(void)
873 {
874     return 1;
875 }
876 
877 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
878                                         int64_t size)
879 {
880     return glfs_zerofill(fd, offset, size);
881 }
882 
883 #else
884 static inline bool gluster_supports_zerofill(void)
885 {
886     return 0;
887 }
888 
889 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
890                                         int64_t size)
891 {
892     return 0;
893 }
894 #endif
895 
896 static int qemu_gluster_create(const char *filename,
897                                QemuOpts *opts, Error **errp)
898 {
899     BlockdevOptionsGluster *gconf;
900     struct glfs *glfs;
901     struct glfs_fd *fd;
902     int ret = 0;
903     int prealloc = 0;
904     int64_t total_size = 0;
905     char *tmp = NULL;
906 
907     gconf = g_new0(BlockdevOptionsGluster, 1);
908     gconf->debug_level = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG,
909                                                  GLUSTER_DEBUG_DEFAULT);
910     if (gconf->debug_level < 0) {
911         gconf->debug_level = 0;
912     } else if (gconf->debug_level > GLUSTER_DEBUG_MAX) {
913         gconf->debug_level = GLUSTER_DEBUG_MAX;
914     }
915     gconf->has_debug_level = true;
916 
917     glfs = qemu_gluster_init(gconf, filename, NULL, errp);
918     if (!glfs) {
919         ret = -errno;
920         goto out;
921     }
922 
923     total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
924                           BDRV_SECTOR_SIZE);
925 
926     tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
927     if (!tmp || !strcmp(tmp, "off")) {
928         prealloc = 0;
929     } else if (!strcmp(tmp, "full") && gluster_supports_zerofill()) {
930         prealloc = 1;
931     } else {
932         error_setg(errp, "Invalid preallocation mode: '%s'"
933                          " or GlusterFS doesn't support zerofill API", tmp);
934         ret = -EINVAL;
935         goto out;
936     }
937 
938     fd = glfs_creat(glfs, gconf->path,
939                     O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
940     if (!fd) {
941         ret = -errno;
942     } else {
943         if (!glfs_ftruncate(fd, total_size)) {
944             if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) {
945                 ret = -errno;
946             }
947         } else {
948             ret = -errno;
949         }
950 
951         if (glfs_close(fd) != 0) {
952             ret = -errno;
953         }
954     }
955 out:
956     g_free(tmp);
957     qapi_free_BlockdevOptionsGluster(gconf);
958     if (glfs) {
959         glfs_fini(glfs);
960     }
961     return ret;
962 }
963 
964 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
965                                            int64_t sector_num, int nb_sectors,
966                                            QEMUIOVector *qiov, int write)
967 {
968     int ret;
969     GlusterAIOCB acb;
970     BDRVGlusterState *s = bs->opaque;
971     size_t size = nb_sectors * BDRV_SECTOR_SIZE;
972     off_t offset = sector_num * BDRV_SECTOR_SIZE;
973 
974     acb.size = size;
975     acb.ret = 0;
976     acb.coroutine = qemu_coroutine_self();
977     acb.aio_context = bdrv_get_aio_context(bs);
978 
979     if (write) {
980         ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
981                                  gluster_finish_aiocb, &acb);
982     } else {
983         ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
984                                 gluster_finish_aiocb, &acb);
985     }
986 
987     if (ret < 0) {
988         return -errno;
989     }
990 
991     qemu_coroutine_yield();
992     return acb.ret;
993 }
994 
995 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
996 {
997     int ret;
998     BDRVGlusterState *s = bs->opaque;
999 
1000     ret = glfs_ftruncate(s->fd, offset);
1001     if (ret < 0) {
1002         return -errno;
1003     }
1004 
1005     return 0;
1006 }
1007 
1008 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
1009                                               int64_t sector_num,
1010                                               int nb_sectors,
1011                                               QEMUIOVector *qiov)
1012 {
1013     return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
1014 }
1015 
1016 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
1017                                                int64_t sector_num,
1018                                                int nb_sectors,
1019                                                QEMUIOVector *qiov)
1020 {
1021     return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
1022 }
1023 
1024 static void qemu_gluster_close(BlockDriverState *bs)
1025 {
1026     BDRVGlusterState *s = bs->opaque;
1027 
1028     if (s->fd) {
1029         glfs_close(s->fd);
1030         s->fd = NULL;
1031     }
1032     glfs_fini(s->glfs);
1033 }
1034 
1035 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
1036 {
1037     int ret;
1038     GlusterAIOCB acb;
1039     BDRVGlusterState *s = bs->opaque;
1040 
1041     acb.size = 0;
1042     acb.ret = 0;
1043     acb.coroutine = qemu_coroutine_self();
1044     acb.aio_context = bdrv_get_aio_context(bs);
1045 
1046     ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
1047     if (ret < 0) {
1048         ret = -errno;
1049         goto error;
1050     }
1051 
1052     qemu_coroutine_yield();
1053     if (acb.ret < 0) {
1054         ret = acb.ret;
1055         goto error;
1056     }
1057 
1058     return acb.ret;
1059 
1060 error:
1061     /* Some versions of Gluster (3.5.6 -> 3.5.8?) will not retain its cache
1062      * after a fsync failure, so we have no way of allowing the guest to safely
1063      * continue.  Gluster versions prior to 3.5.6 don't retain the cache
1064      * either, but will invalidate the fd on error, so this is again our only
1065      * option.
1066      *
1067      * The 'resync-failed-syncs-after-fsync' xlator option for the
1068      * write-behind cache will cause later gluster versions to retain its
1069      * cache after error, so long as the fd remains open.  However, we
1070      * currently have no way of knowing if this option is supported.
1071      *
1072      * TODO: Once gluster provides a way for us to determine if the option
1073      * is supported, bypass the closure and setting drv to NULL.  */
1074     qemu_gluster_close(bs);
1075     bs->drv = NULL;
1076     return ret;
1077 }
1078 
1079 #ifdef CONFIG_GLUSTERFS_DISCARD
1080 static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
1081                                                  int64_t offset, int size)
1082 {
1083     int ret;
1084     GlusterAIOCB acb;
1085     BDRVGlusterState *s = bs->opaque;
1086 
1087     acb.size = 0;
1088     acb.ret = 0;
1089     acb.coroutine = qemu_coroutine_self();
1090     acb.aio_context = bdrv_get_aio_context(bs);
1091 
1092     ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
1093     if (ret < 0) {
1094         return -errno;
1095     }
1096 
1097     qemu_coroutine_yield();
1098     return acb.ret;
1099 }
1100 #endif
1101 
1102 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
1103 {
1104     BDRVGlusterState *s = bs->opaque;
1105     int64_t ret;
1106 
1107     ret = glfs_lseek(s->fd, 0, SEEK_END);
1108     if (ret < 0) {
1109         return -errno;
1110     } else {
1111         return ret;
1112     }
1113 }
1114 
1115 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
1116 {
1117     BDRVGlusterState *s = bs->opaque;
1118     struct stat st;
1119     int ret;
1120 
1121     ret = glfs_fstat(s->fd, &st);
1122     if (ret < 0) {
1123         return -errno;
1124     } else {
1125         return st.st_blocks * 512;
1126     }
1127 }
1128 
1129 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
1130 {
1131     /* GlusterFS volume could be backed by a block device */
1132     return 0;
1133 }
1134 
1135 /*
1136  * Find allocation range in @bs around offset @start.
1137  * May change underlying file descriptor's file offset.
1138  * If @start is not in a hole, store @start in @data, and the
1139  * beginning of the next hole in @hole, and return 0.
1140  * If @start is in a non-trailing hole, store @start in @hole and the
1141  * beginning of the next non-hole in @data, and return 0.
1142  * If @start is in a trailing hole or beyond EOF, return -ENXIO.
1143  * If we can't find out, return a negative errno other than -ENXIO.
1144  *
1145  * (Shamefully copied from raw-posix.c, only miniscule adaptions.)
1146  */
1147 static int find_allocation(BlockDriverState *bs, off_t start,
1148                            off_t *data, off_t *hole)
1149 {
1150     BDRVGlusterState *s = bs->opaque;
1151     off_t offs;
1152 
1153     if (!s->supports_seek_data) {
1154         return -ENOTSUP;
1155     }
1156 
1157     /*
1158      * SEEK_DATA cases:
1159      * D1. offs == start: start is in data
1160      * D2. offs > start: start is in a hole, next data at offs
1161      * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
1162      *                              or start is beyond EOF
1163      *     If the latter happens, the file has been truncated behind
1164      *     our back since we opened it.  All bets are off then.
1165      *     Treating like a trailing hole is simplest.
1166      * D4. offs < 0, errno != ENXIO: we learned nothing
1167      */
1168     offs = glfs_lseek(s->fd, start, SEEK_DATA);
1169     if (offs < 0) {
1170         return -errno;          /* D3 or D4 */
1171     }
1172     assert(offs >= start);
1173 
1174     if (offs > start) {
1175         /* D2: in hole, next data at offs */
1176         *hole = start;
1177         *data = offs;
1178         return 0;
1179     }
1180 
1181     /* D1: in data, end not yet known */
1182 
1183     /*
1184      * SEEK_HOLE cases:
1185      * H1. offs == start: start is in a hole
1186      *     If this happens here, a hole has been dug behind our back
1187      *     since the previous lseek().
1188      * H2. offs > start: either start is in data, next hole at offs,
1189      *                   or start is in trailing hole, EOF at offs
1190      *     Linux treats trailing holes like any other hole: offs ==
1191      *     start.  Solaris seeks to EOF instead: offs > start (blech).
1192      *     If that happens here, a hole has been dug behind our back
1193      *     since the previous lseek().
1194      * H3. offs < 0, errno = ENXIO: start is beyond EOF
1195      *     If this happens, the file has been truncated behind our
1196      *     back since we opened it.  Treat it like a trailing hole.
1197      * H4. offs < 0, errno != ENXIO: we learned nothing
1198      *     Pretend we know nothing at all, i.e. "forget" about D1.
1199      */
1200     offs = glfs_lseek(s->fd, start, SEEK_HOLE);
1201     if (offs < 0) {
1202         return -errno;          /* D1 and (H3 or H4) */
1203     }
1204     assert(offs >= start);
1205 
1206     if (offs > start) {
1207         /*
1208          * D1 and H2: either in data, next hole at offs, or it was in
1209          * data but is now in a trailing hole.  In the latter case,
1210          * all bets are off.  Treating it as if it there was data all
1211          * the way to EOF is safe, so simply do that.
1212          */
1213         *data = start;
1214         *hole = offs;
1215         return 0;
1216     }
1217 
1218     /* D1 and H1 */
1219     return -EBUSY;
1220 }
1221 
1222 /*
1223  * Returns the allocation status of the specified sectors.
1224  *
1225  * If 'sector_num' is beyond the end of the disk image the return value is 0
1226  * and 'pnum' is set to 0.
1227  *
1228  * 'pnum' is set to the number of sectors (including and immediately following
1229  * the specified sector) that are known to be in the same
1230  * allocated/unallocated state.
1231  *
1232  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1233  * beyond the end of the disk image it will be clamped.
1234  *
1235  * (Based on raw_co_get_block_status() from raw-posix.c.)
1236  */
1237 static int64_t coroutine_fn qemu_gluster_co_get_block_status(
1238         BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
1239         BlockDriverState **file)
1240 {
1241     BDRVGlusterState *s = bs->opaque;
1242     off_t start, data = 0, hole = 0;
1243     int64_t total_size;
1244     int ret = -EINVAL;
1245 
1246     if (!s->fd) {
1247         return ret;
1248     }
1249 
1250     start = sector_num * BDRV_SECTOR_SIZE;
1251     total_size = bdrv_getlength(bs);
1252     if (total_size < 0) {
1253         return total_size;
1254     } else if (start >= total_size) {
1255         *pnum = 0;
1256         return 0;
1257     } else if (start + nb_sectors * BDRV_SECTOR_SIZE > total_size) {
1258         nb_sectors = DIV_ROUND_UP(total_size - start, BDRV_SECTOR_SIZE);
1259     }
1260 
1261     ret = find_allocation(bs, start, &data, &hole);
1262     if (ret == -ENXIO) {
1263         /* Trailing hole */
1264         *pnum = nb_sectors;
1265         ret = BDRV_BLOCK_ZERO;
1266     } else if (ret < 0) {
1267         /* No info available, so pretend there are no holes */
1268         *pnum = nb_sectors;
1269         ret = BDRV_BLOCK_DATA;
1270     } else if (data == start) {
1271         /* On a data extent, compute sectors to the end of the extent,
1272          * possibly including a partial sector at EOF. */
1273         *pnum = MIN(nb_sectors, DIV_ROUND_UP(hole - start, BDRV_SECTOR_SIZE));
1274         ret = BDRV_BLOCK_DATA;
1275     } else {
1276         /* On a hole, compute sectors to the beginning of the next extent.  */
1277         assert(hole == start);
1278         *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE);
1279         ret = BDRV_BLOCK_ZERO;
1280     }
1281 
1282     *file = bs;
1283 
1284     return ret | BDRV_BLOCK_OFFSET_VALID | start;
1285 }
1286 
1287 
1288 static BlockDriver bdrv_gluster = {
1289     .format_name                  = "gluster",
1290     .protocol_name                = "gluster",
1291     .instance_size                = sizeof(BDRVGlusterState),
1292     .bdrv_needs_filename          = false,
1293     .bdrv_file_open               = qemu_gluster_open,
1294     .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1295     .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1296     .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1297     .bdrv_close                   = qemu_gluster_close,
1298     .bdrv_create                  = qemu_gluster_create,
1299     .bdrv_getlength               = qemu_gluster_getlength,
1300     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1301     .bdrv_truncate                = qemu_gluster_truncate,
1302     .bdrv_co_readv                = qemu_gluster_co_readv,
1303     .bdrv_co_writev               = qemu_gluster_co_writev,
1304     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1305     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1306 #ifdef CONFIG_GLUSTERFS_DISCARD
1307     .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1308 #endif
1309 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1310     .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1311 #endif
1312     .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1313     .create_opts                  = &qemu_gluster_create_opts,
1314 };
1315 
1316 static BlockDriver bdrv_gluster_tcp = {
1317     .format_name                  = "gluster",
1318     .protocol_name                = "gluster+tcp",
1319     .instance_size                = sizeof(BDRVGlusterState),
1320     .bdrv_needs_filename          = false,
1321     .bdrv_file_open               = qemu_gluster_open,
1322     .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1323     .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1324     .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1325     .bdrv_close                   = qemu_gluster_close,
1326     .bdrv_create                  = qemu_gluster_create,
1327     .bdrv_getlength               = qemu_gluster_getlength,
1328     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1329     .bdrv_truncate                = qemu_gluster_truncate,
1330     .bdrv_co_readv                = qemu_gluster_co_readv,
1331     .bdrv_co_writev               = qemu_gluster_co_writev,
1332     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1333     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1334 #ifdef CONFIG_GLUSTERFS_DISCARD
1335     .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1336 #endif
1337 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1338     .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1339 #endif
1340     .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1341     .create_opts                  = &qemu_gluster_create_opts,
1342 };
1343 
1344 static BlockDriver bdrv_gluster_unix = {
1345     .format_name                  = "gluster",
1346     .protocol_name                = "gluster+unix",
1347     .instance_size                = sizeof(BDRVGlusterState),
1348     .bdrv_needs_filename          = true,
1349     .bdrv_file_open               = qemu_gluster_open,
1350     .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1351     .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1352     .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1353     .bdrv_close                   = qemu_gluster_close,
1354     .bdrv_create                  = qemu_gluster_create,
1355     .bdrv_getlength               = qemu_gluster_getlength,
1356     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1357     .bdrv_truncate                = qemu_gluster_truncate,
1358     .bdrv_co_readv                = qemu_gluster_co_readv,
1359     .bdrv_co_writev               = qemu_gluster_co_writev,
1360     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1361     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1362 #ifdef CONFIG_GLUSTERFS_DISCARD
1363     .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1364 #endif
1365 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1366     .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1367 #endif
1368     .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1369     .create_opts                  = &qemu_gluster_create_opts,
1370 };
1371 
1372 /* rdma is deprecated (actually never supported for volfile fetch).
1373  * Let's maintain it for the protocol compatibility, to make sure things
1374  * won't break immediately. For now, gluster+rdma will fall back to gluster+tcp
1375  * protocol with a warning.
1376  * TODO: remove gluster+rdma interface support
1377  */
1378 static BlockDriver bdrv_gluster_rdma = {
1379     .format_name                  = "gluster",
1380     .protocol_name                = "gluster+rdma",
1381     .instance_size                = sizeof(BDRVGlusterState),
1382     .bdrv_needs_filename          = true,
1383     .bdrv_file_open               = qemu_gluster_open,
1384     .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1385     .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1386     .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1387     .bdrv_close                   = qemu_gluster_close,
1388     .bdrv_create                  = qemu_gluster_create,
1389     .bdrv_getlength               = qemu_gluster_getlength,
1390     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1391     .bdrv_truncate                = qemu_gluster_truncate,
1392     .bdrv_co_readv                = qemu_gluster_co_readv,
1393     .bdrv_co_writev               = qemu_gluster_co_writev,
1394     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1395     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1396 #ifdef CONFIG_GLUSTERFS_DISCARD
1397     .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1398 #endif
1399 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1400     .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1401 #endif
1402     .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1403     .create_opts                  = &qemu_gluster_create_opts,
1404 };
1405 
1406 static void bdrv_gluster_init(void)
1407 {
1408     bdrv_register(&bdrv_gluster_rdma);
1409     bdrv_register(&bdrv_gluster_unix);
1410     bdrv_register(&bdrv_gluster_tcp);
1411     bdrv_register(&bdrv_gluster);
1412 }
1413 
1414 block_init(bdrv_gluster_init);
1415