xref: /qemu/blockdev.c (revision 226419d6)
1 /*
2  * QEMU host block devices
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or
7  * later.  See the COPYING file in the top-level directory.
8  *
9  * This file incorporates work covered by the following copyright and
10  * permission notice:
11  *
12  * Copyright (c) 2003-2008 Fabrice Bellard
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this software and associated documentation files (the "Software"), to deal
16  * in the Software without restriction, including without limitation the rights
17  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18  * copies of the Software, and to permit persons to whom the Software is
19  * furnished to do so, subject to the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30  * THE SOFTWARE.
31  */
32 
33 #include "qemu/osdep.h"
34 #include "sysemu/block-backend.h"
35 #include "sysemu/blockdev.h"
36 #include "hw/block/block.h"
37 #include "block/blockjob.h"
38 #include "block/throttle-groups.h"
39 #include "monitor/monitor.h"
40 #include "qemu/error-report.h"
41 #include "qemu/option.h"
42 #include "qemu/config-file.h"
43 #include "qapi/qmp/types.h"
44 #include "qapi-visit.h"
45 #include "qapi/qmp/qerror.h"
46 #include "qapi/qmp-output-visitor.h"
47 #include "qapi/util.h"
48 #include "sysemu/sysemu.h"
49 #include "block/block_int.h"
50 #include "qmp-commands.h"
51 #include "trace.h"
52 #include "sysemu/arch_init.h"
53 
54 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
55     QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
56 
57 static const char *const if_name[IF_COUNT] = {
58     [IF_NONE] = "none",
59     [IF_IDE] = "ide",
60     [IF_SCSI] = "scsi",
61     [IF_FLOPPY] = "floppy",
62     [IF_PFLASH] = "pflash",
63     [IF_MTD] = "mtd",
64     [IF_SD] = "sd",
65     [IF_VIRTIO] = "virtio",
66     [IF_XEN] = "xen",
67 };
68 
69 static int if_max_devs[IF_COUNT] = {
70     /*
71      * Do not change these numbers!  They govern how drive option
72      * index maps to unit and bus.  That mapping is ABI.
73      *
74      * All controllers used to imlement if=T drives need to support
75      * if_max_devs[T] units, for any T with if_max_devs[T] != 0.
76      * Otherwise, some index values map to "impossible" bus, unit
77      * values.
78      *
79      * For instance, if you change [IF_SCSI] to 255, -drive
80      * if=scsi,index=12 no longer means bus=1,unit=5, but
81      * bus=0,unit=12.  With an lsi53c895a controller (7 units max),
82      * the drive can't be set up.  Regression.
83      */
84     [IF_IDE] = 2,
85     [IF_SCSI] = 7,
86 };
87 
88 /**
89  * Boards may call this to offer board-by-board overrides
90  * of the default, global values.
91  */
92 void override_max_devs(BlockInterfaceType type, int max_devs)
93 {
94     BlockBackend *blk;
95     DriveInfo *dinfo;
96 
97     if (max_devs <= 0) {
98         return;
99     }
100 
101     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
102         dinfo = blk_legacy_dinfo(blk);
103         if (dinfo->type == type) {
104             fprintf(stderr, "Cannot override units-per-bus property of"
105                     " the %s interface, because a drive of that type has"
106                     " already been added.\n", if_name[type]);
107             g_assert_not_reached();
108         }
109     }
110 
111     if_max_devs[type] = max_devs;
112 }
113 
114 /*
115  * We automatically delete the drive when a device using it gets
116  * unplugged.  Questionable feature, but we can't just drop it.
117  * Device models call blockdev_mark_auto_del() to schedule the
118  * automatic deletion, and generic qdev code calls blockdev_auto_del()
119  * when deletion is actually safe.
120  */
121 void blockdev_mark_auto_del(BlockBackend *blk)
122 {
123     DriveInfo *dinfo = blk_legacy_dinfo(blk);
124     BlockDriverState *bs = blk_bs(blk);
125     AioContext *aio_context;
126 
127     if (!dinfo) {
128         return;
129     }
130 
131     if (bs) {
132         aio_context = bdrv_get_aio_context(bs);
133         aio_context_acquire(aio_context);
134 
135         if (bs->job) {
136             block_job_cancel(bs->job);
137         }
138 
139         aio_context_release(aio_context);
140     }
141 
142     dinfo->auto_del = 1;
143 }
144 
145 void blockdev_auto_del(BlockBackend *blk)
146 {
147     DriveInfo *dinfo = blk_legacy_dinfo(blk);
148 
149     if (dinfo && dinfo->auto_del) {
150         blk_unref(blk);
151     }
152 }
153 
154 /**
155  * Returns the current mapping of how many units per bus
156  * a particular interface can support.
157  *
158  *  A positive integer indicates n units per bus.
159  *  0 implies the mapping has not been established.
160  * -1 indicates an invalid BlockInterfaceType was given.
161  */
162 int drive_get_max_devs(BlockInterfaceType type)
163 {
164     if (type >= IF_IDE && type < IF_COUNT) {
165         return if_max_devs[type];
166     }
167 
168     return -1;
169 }
170 
171 static int drive_index_to_bus_id(BlockInterfaceType type, int index)
172 {
173     int max_devs = if_max_devs[type];
174     return max_devs ? index / max_devs : 0;
175 }
176 
177 static int drive_index_to_unit_id(BlockInterfaceType type, int index)
178 {
179     int max_devs = if_max_devs[type];
180     return max_devs ? index % max_devs : index;
181 }
182 
183 QemuOpts *drive_def(const char *optstr)
184 {
185     return qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false);
186 }
187 
188 QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
189                     const char *optstr)
190 {
191     QemuOpts *opts;
192 
193     opts = drive_def(optstr);
194     if (!opts) {
195         return NULL;
196     }
197     if (type != IF_DEFAULT) {
198         qemu_opt_set(opts, "if", if_name[type], &error_abort);
199     }
200     if (index >= 0) {
201         qemu_opt_set_number(opts, "index", index, &error_abort);
202     }
203     if (file)
204         qemu_opt_set(opts, "file", file, &error_abort);
205     return opts;
206 }
207 
208 DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit)
209 {
210     BlockBackend *blk;
211     DriveInfo *dinfo;
212 
213     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
214         dinfo = blk_legacy_dinfo(blk);
215         if (dinfo && dinfo->type == type
216             && dinfo->bus == bus && dinfo->unit == unit) {
217             return dinfo;
218         }
219     }
220 
221     return NULL;
222 }
223 
224 bool drive_check_orphaned(void)
225 {
226     BlockBackend *blk;
227     DriveInfo *dinfo;
228     bool rs = false;
229 
230     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
231         dinfo = blk_legacy_dinfo(blk);
232         /* If dinfo->bdrv->dev is NULL, it has no device attached. */
233         /* Unless this is a default drive, this may be an oversight. */
234         if (!blk_get_attached_dev(blk) && !dinfo->is_default &&
235             dinfo->type != IF_NONE) {
236             fprintf(stderr, "Warning: Orphaned drive without device: "
237                     "id=%s,file=%s,if=%s,bus=%d,unit=%d\n",
238                     blk_name(blk), blk_bs(blk) ? blk_bs(blk)->filename : "",
239                     if_name[dinfo->type], dinfo->bus, dinfo->unit);
240             rs = true;
241         }
242     }
243 
244     return rs;
245 }
246 
247 DriveInfo *drive_get_by_index(BlockInterfaceType type, int index)
248 {
249     return drive_get(type,
250                      drive_index_to_bus_id(type, index),
251                      drive_index_to_unit_id(type, index));
252 }
253 
254 int drive_get_max_bus(BlockInterfaceType type)
255 {
256     int max_bus;
257     BlockBackend *blk;
258     DriveInfo *dinfo;
259 
260     max_bus = -1;
261     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
262         dinfo = blk_legacy_dinfo(blk);
263         if (dinfo && dinfo->type == type && dinfo->bus > max_bus) {
264             max_bus = dinfo->bus;
265         }
266     }
267     return max_bus;
268 }
269 
270 /* Get a block device.  This should only be used for single-drive devices
271    (e.g. SD/Floppy/MTD).  Multi-disk devices (scsi/ide) should use the
272    appropriate bus.  */
273 DriveInfo *drive_get_next(BlockInterfaceType type)
274 {
275     static int next_block_unit[IF_COUNT];
276 
277     return drive_get(type, 0, next_block_unit[type]++);
278 }
279 
280 static void bdrv_format_print(void *opaque, const char *name)
281 {
282     error_printf(" %s", name);
283 }
284 
285 typedef struct {
286     QEMUBH *bh;
287     BlockDriverState *bs;
288 } BDRVPutRefBH;
289 
290 static int parse_block_error_action(const char *buf, bool is_read, Error **errp)
291 {
292     if (!strcmp(buf, "ignore")) {
293         return BLOCKDEV_ON_ERROR_IGNORE;
294     } else if (!is_read && !strcmp(buf, "enospc")) {
295         return BLOCKDEV_ON_ERROR_ENOSPC;
296     } else if (!strcmp(buf, "stop")) {
297         return BLOCKDEV_ON_ERROR_STOP;
298     } else if (!strcmp(buf, "report")) {
299         return BLOCKDEV_ON_ERROR_REPORT;
300     } else {
301         error_setg(errp, "'%s' invalid %s error action",
302                    buf, is_read ? "read" : "write");
303         return -1;
304     }
305 }
306 
307 static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals,
308                                   Error **errp)
309 {
310     const QListEntry *entry;
311     for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) {
312         switch (qobject_type(entry->value)) {
313 
314         case QTYPE_QSTRING: {
315             unsigned long long length;
316             const char *str = qstring_get_str(qobject_to_qstring(entry->value));
317             if (parse_uint_full(str, &length, 10) == 0 &&
318                 length > 0 && length <= UINT_MAX) {
319                 block_acct_add_interval(stats, (unsigned) length);
320             } else {
321                 error_setg(errp, "Invalid interval length: %s", str);
322                 return false;
323             }
324             break;
325         }
326 
327         case QTYPE_QINT: {
328             int64_t length = qint_get_int(qobject_to_qint(entry->value));
329             if (length > 0 && length <= UINT_MAX) {
330                 block_acct_add_interval(stats, (unsigned) length);
331             } else {
332                 error_setg(errp, "Invalid interval length: %" PRId64, length);
333                 return false;
334             }
335             break;
336         }
337 
338         default:
339             error_setg(errp, "The specification of stats-intervals is invalid");
340             return false;
341         }
342     }
343     return true;
344 }
345 
346 typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType;
347 
348 /* All parameters but @opts are optional and may be set to NULL. */
349 static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags,
350     const char **throttling_group, ThrottleConfig *throttle_cfg,
351     BlockdevDetectZeroesOptions *detect_zeroes, Error **errp)
352 {
353     const char *discard;
354     Error *local_error = NULL;
355     const char *aio;
356 
357     if (bdrv_flags) {
358         if (!qemu_opt_get_bool(opts, "read-only", false)) {
359             *bdrv_flags |= BDRV_O_RDWR;
360         }
361         if (qemu_opt_get_bool(opts, "copy-on-read", false)) {
362             *bdrv_flags |= BDRV_O_COPY_ON_READ;
363         }
364 
365         if ((discard = qemu_opt_get(opts, "discard")) != NULL) {
366             if (bdrv_parse_discard_flags(discard, bdrv_flags) != 0) {
367                 error_setg(errp, "Invalid discard option");
368                 return;
369             }
370         }
371 
372         if ((aio = qemu_opt_get(opts, "aio")) != NULL) {
373             if (!strcmp(aio, "native")) {
374                 *bdrv_flags |= BDRV_O_NATIVE_AIO;
375             } else if (!strcmp(aio, "threads")) {
376                 /* this is the default */
377             } else {
378                error_setg(errp, "invalid aio option");
379                return;
380             }
381         }
382     }
383 
384     /* disk I/O throttling */
385     if (throttling_group) {
386         *throttling_group = qemu_opt_get(opts, "throttling.group");
387     }
388 
389     if (throttle_cfg) {
390         throttle_config_init(throttle_cfg);
391         throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg =
392             qemu_opt_get_number(opts, "throttling.bps-total", 0);
393         throttle_cfg->buckets[THROTTLE_BPS_READ].avg  =
394             qemu_opt_get_number(opts, "throttling.bps-read", 0);
395         throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg =
396             qemu_opt_get_number(opts, "throttling.bps-write", 0);
397         throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg =
398             qemu_opt_get_number(opts, "throttling.iops-total", 0);
399         throttle_cfg->buckets[THROTTLE_OPS_READ].avg =
400             qemu_opt_get_number(opts, "throttling.iops-read", 0);
401         throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg =
402             qemu_opt_get_number(opts, "throttling.iops-write", 0);
403 
404         throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max =
405             qemu_opt_get_number(opts, "throttling.bps-total-max", 0);
406         throttle_cfg->buckets[THROTTLE_BPS_READ].max  =
407             qemu_opt_get_number(opts, "throttling.bps-read-max", 0);
408         throttle_cfg->buckets[THROTTLE_BPS_WRITE].max =
409             qemu_opt_get_number(opts, "throttling.bps-write-max", 0);
410         throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max =
411             qemu_opt_get_number(opts, "throttling.iops-total-max", 0);
412         throttle_cfg->buckets[THROTTLE_OPS_READ].max =
413             qemu_opt_get_number(opts, "throttling.iops-read-max", 0);
414         throttle_cfg->buckets[THROTTLE_OPS_WRITE].max =
415             qemu_opt_get_number(opts, "throttling.iops-write-max", 0);
416 
417         throttle_cfg->buckets[THROTTLE_BPS_TOTAL].burst_length =
418             qemu_opt_get_number(opts, "throttling.bps-total-max-length", 1);
419         throttle_cfg->buckets[THROTTLE_BPS_READ].burst_length  =
420             qemu_opt_get_number(opts, "throttling.bps-read-max-length", 1);
421         throttle_cfg->buckets[THROTTLE_BPS_WRITE].burst_length =
422             qemu_opt_get_number(opts, "throttling.bps-write-max-length", 1);
423         throttle_cfg->buckets[THROTTLE_OPS_TOTAL].burst_length =
424             qemu_opt_get_number(opts, "throttling.iops-total-max-length", 1);
425         throttle_cfg->buckets[THROTTLE_OPS_READ].burst_length =
426             qemu_opt_get_number(opts, "throttling.iops-read-max-length", 1);
427         throttle_cfg->buckets[THROTTLE_OPS_WRITE].burst_length =
428             qemu_opt_get_number(opts, "throttling.iops-write-max-length", 1);
429 
430         throttle_cfg->op_size =
431             qemu_opt_get_number(opts, "throttling.iops-size", 0);
432 
433         if (!throttle_is_valid(throttle_cfg, errp)) {
434             return;
435         }
436     }
437 
438     if (detect_zeroes) {
439         *detect_zeroes =
440             qapi_enum_parse(BlockdevDetectZeroesOptions_lookup,
441                             qemu_opt_get(opts, "detect-zeroes"),
442                             BLOCKDEV_DETECT_ZEROES_OPTIONS__MAX,
443                             BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
444                             &local_error);
445         if (local_error) {
446             error_propagate(errp, local_error);
447             return;
448         }
449 
450         if (bdrv_flags &&
451             *detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP &&
452             !(*bdrv_flags & BDRV_O_UNMAP))
453         {
454             error_setg(errp, "setting detect-zeroes to unmap is not allowed "
455                              "without setting discard operation to unmap");
456             return;
457         }
458     }
459 }
460 
461 /* Takes the ownership of bs_opts */
462 static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
463                                    Error **errp)
464 {
465     const char *buf;
466     int bdrv_flags = 0;
467     int on_read_error, on_write_error;
468     bool account_invalid, account_failed;
469     BlockBackend *blk;
470     BlockDriverState *bs;
471     ThrottleConfig cfg;
472     int snapshot = 0;
473     Error *error = NULL;
474     QemuOpts *opts;
475     QDict *interval_dict = NULL;
476     QList *interval_list = NULL;
477     const char *id;
478     BlockdevDetectZeroesOptions detect_zeroes =
479         BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
480     const char *throttling_group = NULL;
481 
482     /* Check common options by copying from bs_opts to opts, all other options
483      * stay in bs_opts for processing by bdrv_open(). */
484     id = qdict_get_try_str(bs_opts, "id");
485     opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error);
486     if (error) {
487         error_propagate(errp, error);
488         goto err_no_opts;
489     }
490 
491     qemu_opts_absorb_qdict(opts, bs_opts, &error);
492     if (error) {
493         error_propagate(errp, error);
494         goto early_err;
495     }
496 
497     if (id) {
498         qdict_del(bs_opts, "id");
499     }
500 
501     /* extract parameters */
502     snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
503 
504     account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true);
505     account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true);
506 
507     qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals.");
508     qdict_array_split(interval_dict, &interval_list);
509 
510     if (qdict_size(interval_dict) != 0) {
511         error_setg(errp, "Invalid option stats-intervals.%s",
512                    qdict_first(interval_dict)->key);
513         goto early_err;
514     }
515 
516     extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg,
517                                     &detect_zeroes, &error);
518     if (error) {
519         error_propagate(errp, error);
520         goto early_err;
521     }
522 
523     if ((buf = qemu_opt_get(opts, "format")) != NULL) {
524         if (is_help_option(buf)) {
525             error_printf("Supported formats:");
526             bdrv_iterate_format(bdrv_format_print, NULL);
527             error_printf("\n");
528             goto early_err;
529         }
530 
531         if (qdict_haskey(bs_opts, "driver")) {
532             error_setg(errp, "Cannot specify both 'driver' and 'format'");
533             goto early_err;
534         }
535         qdict_put(bs_opts, "driver", qstring_from_str(buf));
536     }
537 
538     on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
539     if ((buf = qemu_opt_get(opts, "werror")) != NULL) {
540         on_write_error = parse_block_error_action(buf, 0, &error);
541         if (error) {
542             error_propagate(errp, error);
543             goto early_err;
544         }
545     }
546 
547     on_read_error = BLOCKDEV_ON_ERROR_REPORT;
548     if ((buf = qemu_opt_get(opts, "rerror")) != NULL) {
549         on_read_error = parse_block_error_action(buf, 1, &error);
550         if (error) {
551             error_propagate(errp, error);
552             goto early_err;
553         }
554     }
555 
556     if (snapshot) {
557         bdrv_flags |= BDRV_O_SNAPSHOT;
558     }
559 
560     /* init */
561     if ((!file || !*file) && !qdict_size(bs_opts)) {
562         BlockBackendRootState *blk_rs;
563 
564         blk = blk_new(qemu_opts_id(opts), errp);
565         if (!blk) {
566             goto early_err;
567         }
568 
569         blk_rs = blk_get_root_state(blk);
570         blk_rs->open_flags    = bdrv_flags;
571         blk_rs->read_only     = !(bdrv_flags & BDRV_O_RDWR);
572         blk_rs->detect_zeroes = detect_zeroes;
573 
574         if (throttle_enabled(&cfg)) {
575             if (!throttling_group) {
576                 throttling_group = blk_name(blk);
577             }
578             blk_rs->throttle_group = g_strdup(throttling_group);
579             blk_rs->throttle_state = throttle_group_incref(throttling_group);
580             blk_rs->throttle_state->cfg = cfg;
581         }
582 
583         QDECREF(bs_opts);
584     } else {
585         if (file && !*file) {
586             file = NULL;
587         }
588 
589         /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
590          * with other callers) rather than what we want as the real defaults.
591          * Apply the defaults here instead. */
592         qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_WB, "on");
593         qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
594         qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
595 
596         if (snapshot) {
597             /* always use cache=unsafe with snapshot */
598             qdict_put(bs_opts, BDRV_OPT_CACHE_WB, qstring_from_str("on"));
599             qdict_put(bs_opts, BDRV_OPT_CACHE_DIRECT, qstring_from_str("off"));
600             qdict_put(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, qstring_from_str("on"));
601         }
602 
603         if (runstate_check(RUN_STATE_INMIGRATE)) {
604             bdrv_flags |= BDRV_O_INACTIVE;
605         }
606 
607         blk = blk_new_open(qemu_opts_id(opts), file, NULL, bs_opts, bdrv_flags,
608                            errp);
609         if (!blk) {
610             goto err_no_bs_opts;
611         }
612         bs = blk_bs(blk);
613 
614         bs->detect_zeroes = detect_zeroes;
615 
616         /* disk I/O throttling */
617         if (throttle_enabled(&cfg)) {
618             if (!throttling_group) {
619                 throttling_group = blk_name(blk);
620             }
621             bdrv_io_limits_enable(bs, throttling_group);
622             bdrv_set_io_limits(bs, &cfg);
623         }
624 
625         if (bdrv_key_required(bs)) {
626             autostart = 0;
627         }
628 
629         block_acct_init(blk_get_stats(blk), account_invalid, account_failed);
630 
631         if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) {
632             blk_unref(blk);
633             blk = NULL;
634             goto err_no_bs_opts;
635         }
636     }
637 
638     blk_set_on_error(blk, on_read_error, on_write_error);
639 
640 err_no_bs_opts:
641     qemu_opts_del(opts);
642     QDECREF(interval_dict);
643     QDECREF(interval_list);
644     return blk;
645 
646 early_err:
647     qemu_opts_del(opts);
648     QDECREF(interval_dict);
649     QDECREF(interval_list);
650 err_no_opts:
651     QDECREF(bs_opts);
652     return NULL;
653 }
654 
655 static QemuOptsList qemu_root_bds_opts;
656 
657 /* Takes the ownership of bs_opts */
658 static BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp)
659 {
660     BlockDriverState *bs;
661     QemuOpts *opts;
662     Error *local_error = NULL;
663     BlockdevDetectZeroesOptions detect_zeroes;
664     int ret;
665     int bdrv_flags = 0;
666 
667     opts = qemu_opts_create(&qemu_root_bds_opts, NULL, 1, errp);
668     if (!opts) {
669         goto fail;
670     }
671 
672     qemu_opts_absorb_qdict(opts, bs_opts, &local_error);
673     if (local_error) {
674         error_propagate(errp, local_error);
675         goto fail;
676     }
677 
678     extract_common_blockdev_options(opts, &bdrv_flags, NULL, NULL,
679                                     &detect_zeroes, &local_error);
680     if (local_error) {
681         error_propagate(errp, local_error);
682         goto fail;
683     }
684 
685     if (runstate_check(RUN_STATE_INMIGRATE)) {
686         bdrv_flags |= BDRV_O_INACTIVE;
687     }
688 
689     bs = NULL;
690     ret = bdrv_open(&bs, NULL, NULL, bs_opts, bdrv_flags, errp);
691     if (ret < 0) {
692         goto fail_no_bs_opts;
693     }
694 
695     bs->detect_zeroes = detect_zeroes;
696 
697 fail_no_bs_opts:
698     qemu_opts_del(opts);
699     return bs;
700 
701 fail:
702     qemu_opts_del(opts);
703     QDECREF(bs_opts);
704     return NULL;
705 }
706 
707 void blockdev_close_all_bdrv_states(void)
708 {
709     BlockDriverState *bs, *next_bs;
710 
711     QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) {
712         AioContext *ctx = bdrv_get_aio_context(bs);
713 
714         aio_context_acquire(ctx);
715         bdrv_unref(bs);
716         aio_context_release(ctx);
717     }
718 }
719 
720 static void qemu_opt_rename(QemuOpts *opts, const char *from, const char *to,
721                             Error **errp)
722 {
723     const char *value;
724 
725     value = qemu_opt_get(opts, from);
726     if (value) {
727         if (qemu_opt_find(opts, to)) {
728             error_setg(errp, "'%s' and its alias '%s' can't be used at the "
729                        "same time", to, from);
730             return;
731         }
732     }
733 
734     /* rename all items in opts */
735     while ((value = qemu_opt_get(opts, from))) {
736         qemu_opt_set(opts, to, value, &error_abort);
737         qemu_opt_unset(opts, from);
738     }
739 }
740 
741 QemuOptsList qemu_legacy_drive_opts = {
742     .name = "drive",
743     .head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head),
744     .desc = {
745         {
746             .name = "bus",
747             .type = QEMU_OPT_NUMBER,
748             .help = "bus number",
749         },{
750             .name = "unit",
751             .type = QEMU_OPT_NUMBER,
752             .help = "unit number (i.e. lun for scsi)",
753         },{
754             .name = "index",
755             .type = QEMU_OPT_NUMBER,
756             .help = "index number",
757         },{
758             .name = "media",
759             .type = QEMU_OPT_STRING,
760             .help = "media type (disk, cdrom)",
761         },{
762             .name = "if",
763             .type = QEMU_OPT_STRING,
764             .help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)",
765         },{
766             .name = "cyls",
767             .type = QEMU_OPT_NUMBER,
768             .help = "number of cylinders (ide disk geometry)",
769         },{
770             .name = "heads",
771             .type = QEMU_OPT_NUMBER,
772             .help = "number of heads (ide disk geometry)",
773         },{
774             .name = "secs",
775             .type = QEMU_OPT_NUMBER,
776             .help = "number of sectors (ide disk geometry)",
777         },{
778             .name = "trans",
779             .type = QEMU_OPT_STRING,
780             .help = "chs translation (auto, lba, none)",
781         },{
782             .name = "boot",
783             .type = QEMU_OPT_BOOL,
784             .help = "(deprecated, ignored)",
785         },{
786             .name = "addr",
787             .type = QEMU_OPT_STRING,
788             .help = "pci address (virtio only)",
789         },{
790             .name = "serial",
791             .type = QEMU_OPT_STRING,
792             .help = "disk serial number",
793         },{
794             .name = "file",
795             .type = QEMU_OPT_STRING,
796             .help = "file name",
797         },
798 
799         /* Options that are passed on, but have special semantics with -drive */
800         {
801             .name = "read-only",
802             .type = QEMU_OPT_BOOL,
803             .help = "open drive file as read-only",
804         },{
805             .name = "rerror",
806             .type = QEMU_OPT_STRING,
807             .help = "read error action",
808         },{
809             .name = "werror",
810             .type = QEMU_OPT_STRING,
811             .help = "write error action",
812         },{
813             .name = "copy-on-read",
814             .type = QEMU_OPT_BOOL,
815             .help = "copy read data from backing file into image file",
816         },
817 
818         { /* end of list */ }
819     },
820 };
821 
822 DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type)
823 {
824     const char *value;
825     BlockBackend *blk;
826     DriveInfo *dinfo = NULL;
827     QDict *bs_opts;
828     QemuOpts *legacy_opts;
829     DriveMediaType media = MEDIA_DISK;
830     BlockInterfaceType type;
831     int cyls, heads, secs, translation;
832     int max_devs, bus_id, unit_id, index;
833     const char *devaddr;
834     const char *werror, *rerror;
835     bool read_only = false;
836     bool copy_on_read;
837     const char *serial;
838     const char *filename;
839     Error *local_err = NULL;
840     int i;
841 
842     /* Change legacy command line options into QMP ones */
843     static const struct {
844         const char *from;
845         const char *to;
846     } opt_renames[] = {
847         { "iops",           "throttling.iops-total" },
848         { "iops_rd",        "throttling.iops-read" },
849         { "iops_wr",        "throttling.iops-write" },
850 
851         { "bps",            "throttling.bps-total" },
852         { "bps_rd",         "throttling.bps-read" },
853         { "bps_wr",         "throttling.bps-write" },
854 
855         { "iops_max",       "throttling.iops-total-max" },
856         { "iops_rd_max",    "throttling.iops-read-max" },
857         { "iops_wr_max",    "throttling.iops-write-max" },
858 
859         { "bps_max",        "throttling.bps-total-max" },
860         { "bps_rd_max",     "throttling.bps-read-max" },
861         { "bps_wr_max",     "throttling.bps-write-max" },
862 
863         { "iops_size",      "throttling.iops-size" },
864 
865         { "group",          "throttling.group" },
866 
867         { "readonly",       "read-only" },
868     };
869 
870     for (i = 0; i < ARRAY_SIZE(opt_renames); i++) {
871         qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to,
872                         &local_err);
873         if (local_err) {
874             error_report_err(local_err);
875             return NULL;
876         }
877     }
878 
879     value = qemu_opt_get(all_opts, "cache");
880     if (value) {
881         int flags = 0;
882 
883         if (bdrv_parse_cache_flags(value, &flags) != 0) {
884             error_report("invalid cache option");
885             return NULL;
886         }
887 
888         /* Specific options take precedence */
889         if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) {
890             qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB,
891                               !!(flags & BDRV_O_CACHE_WB), &error_abort);
892         }
893         if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) {
894             qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT,
895                               !!(flags & BDRV_O_NOCACHE), &error_abort);
896         }
897         if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) {
898             qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH,
899                               !!(flags & BDRV_O_NO_FLUSH), &error_abort);
900         }
901         qemu_opt_unset(all_opts, "cache");
902     }
903 
904     /* Get a QDict for processing the options */
905     bs_opts = qdict_new();
906     qemu_opts_to_qdict(all_opts, bs_opts);
907 
908     legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0,
909                                    &error_abort);
910     qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err);
911     if (local_err) {
912         error_report_err(local_err);
913         goto fail;
914     }
915 
916     /* Deprecated option boot=[on|off] */
917     if (qemu_opt_get(legacy_opts, "boot") != NULL) {
918         fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be "
919                 "ignored. Future versions will reject this parameter. Please "
920                 "update your scripts.\n");
921     }
922 
923     /* Media type */
924     value = qemu_opt_get(legacy_opts, "media");
925     if (value) {
926         if (!strcmp(value, "disk")) {
927             media = MEDIA_DISK;
928         } else if (!strcmp(value, "cdrom")) {
929             media = MEDIA_CDROM;
930             read_only = true;
931         } else {
932             error_report("'%s' invalid media", value);
933             goto fail;
934         }
935     }
936 
937     /* copy-on-read is disabled with a warning for read-only devices */
938     read_only |= qemu_opt_get_bool(legacy_opts, "read-only", false);
939     copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false);
940 
941     if (read_only && copy_on_read) {
942         error_report("warning: disabling copy-on-read on read-only drive");
943         copy_on_read = false;
944     }
945 
946     qdict_put(bs_opts, "read-only",
947               qstring_from_str(read_only ? "on" : "off"));
948     qdict_put(bs_opts, "copy-on-read",
949               qstring_from_str(copy_on_read ? "on" :"off"));
950 
951     /* Controller type */
952     value = qemu_opt_get(legacy_opts, "if");
953     if (value) {
954         for (type = 0;
955              type < IF_COUNT && strcmp(value, if_name[type]);
956              type++) {
957         }
958         if (type == IF_COUNT) {
959             error_report("unsupported bus type '%s'", value);
960             goto fail;
961         }
962     } else {
963         type = block_default_type;
964     }
965 
966     /* Geometry */
967     cyls  = qemu_opt_get_number(legacy_opts, "cyls", 0);
968     heads = qemu_opt_get_number(legacy_opts, "heads", 0);
969     secs  = qemu_opt_get_number(legacy_opts, "secs", 0);
970 
971     if (cyls || heads || secs) {
972         if (cyls < 1) {
973             error_report("invalid physical cyls number");
974             goto fail;
975         }
976         if (heads < 1) {
977             error_report("invalid physical heads number");
978             goto fail;
979         }
980         if (secs < 1) {
981             error_report("invalid physical secs number");
982             goto fail;
983         }
984     }
985 
986     translation = BIOS_ATA_TRANSLATION_AUTO;
987     value = qemu_opt_get(legacy_opts, "trans");
988     if (value != NULL) {
989         if (!cyls) {
990             error_report("'%s' trans must be used with cyls, heads and secs",
991                          value);
992             goto fail;
993         }
994         if (!strcmp(value, "none")) {
995             translation = BIOS_ATA_TRANSLATION_NONE;
996         } else if (!strcmp(value, "lba")) {
997             translation = BIOS_ATA_TRANSLATION_LBA;
998         } else if (!strcmp(value, "large")) {
999             translation = BIOS_ATA_TRANSLATION_LARGE;
1000         } else if (!strcmp(value, "rechs")) {
1001             translation = BIOS_ATA_TRANSLATION_RECHS;
1002         } else if (!strcmp(value, "auto")) {
1003             translation = BIOS_ATA_TRANSLATION_AUTO;
1004         } else {
1005             error_report("'%s' invalid translation type", value);
1006             goto fail;
1007         }
1008     }
1009 
1010     if (media == MEDIA_CDROM) {
1011         if (cyls || secs || heads) {
1012             error_report("CHS can't be set with media=cdrom");
1013             goto fail;
1014         }
1015     }
1016 
1017     /* Device address specified by bus/unit or index.
1018      * If none was specified, try to find the first free one. */
1019     bus_id  = qemu_opt_get_number(legacy_opts, "bus", 0);
1020     unit_id = qemu_opt_get_number(legacy_opts, "unit", -1);
1021     index   = qemu_opt_get_number(legacy_opts, "index", -1);
1022 
1023     max_devs = if_max_devs[type];
1024 
1025     if (index != -1) {
1026         if (bus_id != 0 || unit_id != -1) {
1027             error_report("index cannot be used with bus and unit");
1028             goto fail;
1029         }
1030         bus_id = drive_index_to_bus_id(type, index);
1031         unit_id = drive_index_to_unit_id(type, index);
1032     }
1033 
1034     if (unit_id == -1) {
1035        unit_id = 0;
1036        while (drive_get(type, bus_id, unit_id) != NULL) {
1037            unit_id++;
1038            if (max_devs && unit_id >= max_devs) {
1039                unit_id -= max_devs;
1040                bus_id++;
1041            }
1042        }
1043     }
1044 
1045     if (max_devs && unit_id >= max_devs) {
1046         error_report("unit %d too big (max is %d)", unit_id, max_devs - 1);
1047         goto fail;
1048     }
1049 
1050     if (drive_get(type, bus_id, unit_id) != NULL) {
1051         error_report("drive with bus=%d, unit=%d (index=%d) exists",
1052                      bus_id, unit_id, index);
1053         goto fail;
1054     }
1055 
1056     /* Serial number */
1057     serial = qemu_opt_get(legacy_opts, "serial");
1058 
1059     /* no id supplied -> create one */
1060     if (qemu_opts_id(all_opts) == NULL) {
1061         char *new_id;
1062         const char *mediastr = "";
1063         if (type == IF_IDE || type == IF_SCSI) {
1064             mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd";
1065         }
1066         if (max_devs) {
1067             new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id,
1068                                      mediastr, unit_id);
1069         } else {
1070             new_id = g_strdup_printf("%s%s%i", if_name[type],
1071                                      mediastr, unit_id);
1072         }
1073         qdict_put(bs_opts, "id", qstring_from_str(new_id));
1074         g_free(new_id);
1075     }
1076 
1077     /* Add virtio block device */
1078     devaddr = qemu_opt_get(legacy_opts, "addr");
1079     if (devaddr && type != IF_VIRTIO) {
1080         error_report("addr is not supported by this bus type");
1081         goto fail;
1082     }
1083 
1084     if (type == IF_VIRTIO) {
1085         QemuOpts *devopts;
1086         devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
1087                                    &error_abort);
1088         if (arch_type == QEMU_ARCH_S390X) {
1089             qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort);
1090         } else {
1091             qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort);
1092         }
1093         qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
1094                      &error_abort);
1095         if (devaddr) {
1096             qemu_opt_set(devopts, "addr", devaddr, &error_abort);
1097         }
1098     }
1099 
1100     filename = qemu_opt_get(legacy_opts, "file");
1101 
1102     /* Check werror/rerror compatibility with if=... */
1103     werror = qemu_opt_get(legacy_opts, "werror");
1104     if (werror != NULL) {
1105         if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO &&
1106             type != IF_NONE) {
1107             error_report("werror is not supported by this bus type");
1108             goto fail;
1109         }
1110         qdict_put(bs_opts, "werror", qstring_from_str(werror));
1111     }
1112 
1113     rerror = qemu_opt_get(legacy_opts, "rerror");
1114     if (rerror != NULL) {
1115         if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI &&
1116             type != IF_NONE) {
1117             error_report("rerror is not supported by this bus type");
1118             goto fail;
1119         }
1120         qdict_put(bs_opts, "rerror", qstring_from_str(rerror));
1121     }
1122 
1123     /* Actual block device init: Functionality shared with blockdev-add */
1124     blk = blockdev_init(filename, bs_opts, &local_err);
1125     bs_opts = NULL;
1126     if (!blk) {
1127         if (local_err) {
1128             error_report_err(local_err);
1129         }
1130         goto fail;
1131     } else {
1132         assert(!local_err);
1133     }
1134 
1135     /* Create legacy DriveInfo */
1136     dinfo = g_malloc0(sizeof(*dinfo));
1137     dinfo->opts = all_opts;
1138 
1139     dinfo->cyls = cyls;
1140     dinfo->heads = heads;
1141     dinfo->secs = secs;
1142     dinfo->trans = translation;
1143 
1144     dinfo->type = type;
1145     dinfo->bus = bus_id;
1146     dinfo->unit = unit_id;
1147     dinfo->devaddr = devaddr;
1148     dinfo->serial = g_strdup(serial);
1149 
1150     blk_set_legacy_dinfo(blk, dinfo);
1151 
1152     switch(type) {
1153     case IF_IDE:
1154     case IF_SCSI:
1155     case IF_XEN:
1156     case IF_NONE:
1157         dinfo->media_cd = media == MEDIA_CDROM;
1158         break;
1159     default:
1160         break;
1161     }
1162 
1163 fail:
1164     qemu_opts_del(legacy_opts);
1165     QDECREF(bs_opts);
1166     return dinfo;
1167 }
1168 
1169 void hmp_commit(Monitor *mon, const QDict *qdict)
1170 {
1171     const char *device = qdict_get_str(qdict, "device");
1172     BlockBackend *blk;
1173     int ret;
1174 
1175     if (!strcmp(device, "all")) {
1176         ret = bdrv_commit_all();
1177     } else {
1178         BlockDriverState *bs;
1179         AioContext *aio_context;
1180 
1181         blk = blk_by_name(device);
1182         if (!blk) {
1183             monitor_printf(mon, "Device '%s' not found\n", device);
1184             return;
1185         }
1186         if (!blk_is_available(blk)) {
1187             monitor_printf(mon, "Device '%s' has no medium\n", device);
1188             return;
1189         }
1190 
1191         bs = blk_bs(blk);
1192         aio_context = bdrv_get_aio_context(bs);
1193         aio_context_acquire(aio_context);
1194 
1195         ret = bdrv_commit(bs);
1196 
1197         aio_context_release(aio_context);
1198     }
1199     if (ret < 0) {
1200         monitor_printf(mon, "'commit' error for '%s': %s\n", device,
1201                        strerror(-ret));
1202     }
1203 }
1204 
1205 static void blockdev_do_action(TransactionAction *action, Error **errp)
1206 {
1207     TransactionActionList list;
1208 
1209     list.value = action;
1210     list.next = NULL;
1211     qmp_transaction(&list, false, NULL, errp);
1212 }
1213 
1214 void qmp_blockdev_snapshot_sync(bool has_device, const char *device,
1215                                 bool has_node_name, const char *node_name,
1216                                 const char *snapshot_file,
1217                                 bool has_snapshot_node_name,
1218                                 const char *snapshot_node_name,
1219                                 bool has_format, const char *format,
1220                                 bool has_mode, NewImageMode mode, Error **errp)
1221 {
1222     BlockdevSnapshotSync snapshot = {
1223         .has_device = has_device,
1224         .device = (char *) device,
1225         .has_node_name = has_node_name,
1226         .node_name = (char *) node_name,
1227         .snapshot_file = (char *) snapshot_file,
1228         .has_snapshot_node_name = has_snapshot_node_name,
1229         .snapshot_node_name = (char *) snapshot_node_name,
1230         .has_format = has_format,
1231         .format = (char *) format,
1232         .has_mode = has_mode,
1233         .mode = mode,
1234     };
1235     TransactionAction action = {
1236         .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC,
1237         .u.blockdev_snapshot_sync = &snapshot,
1238     };
1239     blockdev_do_action(&action, errp);
1240 }
1241 
1242 void qmp_blockdev_snapshot(const char *node, const char *overlay,
1243                            Error **errp)
1244 {
1245     BlockdevSnapshot snapshot_data = {
1246         .node = (char *) node,
1247         .overlay = (char *) overlay
1248     };
1249     TransactionAction action = {
1250         .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT,
1251         .u.blockdev_snapshot = &snapshot_data,
1252     };
1253     blockdev_do_action(&action, errp);
1254 }
1255 
1256 void qmp_blockdev_snapshot_internal_sync(const char *device,
1257                                          const char *name,
1258                                          Error **errp)
1259 {
1260     BlockdevSnapshotInternal snapshot = {
1261         .device = (char *) device,
1262         .name = (char *) name
1263     };
1264     TransactionAction action = {
1265         .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC,
1266         .u.blockdev_snapshot_internal_sync = &snapshot,
1267     };
1268     blockdev_do_action(&action, errp);
1269 }
1270 
1271 SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
1272                                                          bool has_id,
1273                                                          const char *id,
1274                                                          bool has_name,
1275                                                          const char *name,
1276                                                          Error **errp)
1277 {
1278     BlockDriverState *bs;
1279     BlockBackend *blk;
1280     AioContext *aio_context;
1281     QEMUSnapshotInfo sn;
1282     Error *local_err = NULL;
1283     SnapshotInfo *info = NULL;
1284     int ret;
1285 
1286     blk = blk_by_name(device);
1287     if (!blk) {
1288         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1289                   "Device '%s' not found", device);
1290         return NULL;
1291     }
1292 
1293     aio_context = blk_get_aio_context(blk);
1294     aio_context_acquire(aio_context);
1295 
1296     if (!has_id) {
1297         id = NULL;
1298     }
1299 
1300     if (!has_name) {
1301         name = NULL;
1302     }
1303 
1304     if (!id && !name) {
1305         error_setg(errp, "Name or id must be provided");
1306         goto out_aio_context;
1307     }
1308 
1309     if (!blk_is_available(blk)) {
1310         error_setg(errp, "Device '%s' has no medium", device);
1311         goto out_aio_context;
1312     }
1313     bs = blk_bs(blk);
1314 
1315     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) {
1316         goto out_aio_context;
1317     }
1318 
1319     ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err);
1320     if (local_err) {
1321         error_propagate(errp, local_err);
1322         goto out_aio_context;
1323     }
1324     if (!ret) {
1325         error_setg(errp,
1326                    "Snapshot with id '%s' and name '%s' does not exist on "
1327                    "device '%s'",
1328                    STR_OR_NULL(id), STR_OR_NULL(name), device);
1329         goto out_aio_context;
1330     }
1331 
1332     bdrv_snapshot_delete(bs, id, name, &local_err);
1333     if (local_err) {
1334         error_propagate(errp, local_err);
1335         goto out_aio_context;
1336     }
1337 
1338     aio_context_release(aio_context);
1339 
1340     info = g_new0(SnapshotInfo, 1);
1341     info->id = g_strdup(sn.id_str);
1342     info->name = g_strdup(sn.name);
1343     info->date_nsec = sn.date_nsec;
1344     info->date_sec = sn.date_sec;
1345     info->vm_state_size = sn.vm_state_size;
1346     info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000;
1347     info->vm_clock_sec = sn.vm_clock_nsec / 1000000000;
1348 
1349     return info;
1350 
1351 out_aio_context:
1352     aio_context_release(aio_context);
1353     return NULL;
1354 }
1355 
1356 /**
1357  * block_dirty_bitmap_lookup:
1358  * Return a dirty bitmap (if present), after validating
1359  * the node reference and bitmap names.
1360  *
1361  * @node: The name of the BDS node to search for bitmaps
1362  * @name: The name of the bitmap to search for
1363  * @pbs: Output pointer for BDS lookup, if desired. Can be NULL.
1364  * @paio: Output pointer for aio_context acquisition, if desired. Can be NULL.
1365  * @errp: Output pointer for error information. Can be NULL.
1366  *
1367  * @return: A bitmap object on success, or NULL on failure.
1368  */
1369 static BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
1370                                                   const char *name,
1371                                                   BlockDriverState **pbs,
1372                                                   AioContext **paio,
1373                                                   Error **errp)
1374 {
1375     BlockDriverState *bs;
1376     BdrvDirtyBitmap *bitmap;
1377     AioContext *aio_context;
1378 
1379     if (!node) {
1380         error_setg(errp, "Node cannot be NULL");
1381         return NULL;
1382     }
1383     if (!name) {
1384         error_setg(errp, "Bitmap name cannot be NULL");
1385         return NULL;
1386     }
1387     bs = bdrv_lookup_bs(node, node, NULL);
1388     if (!bs) {
1389         error_setg(errp, "Node '%s' not found", node);
1390         return NULL;
1391     }
1392 
1393     aio_context = bdrv_get_aio_context(bs);
1394     aio_context_acquire(aio_context);
1395 
1396     bitmap = bdrv_find_dirty_bitmap(bs, name);
1397     if (!bitmap) {
1398         error_setg(errp, "Dirty bitmap '%s' not found", name);
1399         goto fail;
1400     }
1401 
1402     if (pbs) {
1403         *pbs = bs;
1404     }
1405     if (paio) {
1406         *paio = aio_context;
1407     } else {
1408         aio_context_release(aio_context);
1409     }
1410 
1411     return bitmap;
1412 
1413  fail:
1414     aio_context_release(aio_context);
1415     return NULL;
1416 }
1417 
1418 /* New and old BlockDriverState structs for atomic group operations */
1419 
1420 typedef struct BlkActionState BlkActionState;
1421 
1422 /**
1423  * BlkActionOps:
1424  * Table of operations that define an Action.
1425  *
1426  * @instance_size: Size of state struct, in bytes.
1427  * @prepare: Prepare the work, must NOT be NULL.
1428  * @commit: Commit the changes, can be NULL.
1429  * @abort: Abort the changes on fail, can be NULL.
1430  * @clean: Clean up resources after all transaction actions have called
1431  *         commit() or abort(). Can be NULL.
1432  *
1433  * Only prepare() may fail. In a single transaction, only one of commit() or
1434  * abort() will be called. clean() will always be called if it is present.
1435  */
1436 typedef struct BlkActionOps {
1437     size_t instance_size;
1438     void (*prepare)(BlkActionState *common, Error **errp);
1439     void (*commit)(BlkActionState *common);
1440     void (*abort)(BlkActionState *common);
1441     void (*clean)(BlkActionState *common);
1442 } BlkActionOps;
1443 
1444 /**
1445  * BlkActionState:
1446  * Describes one Action's state within a Transaction.
1447  *
1448  * @action: QAPI-defined enum identifying which Action to perform.
1449  * @ops: Table of ActionOps this Action can perform.
1450  * @block_job_txn: Transaction which this action belongs to.
1451  * @entry: List membership for all Actions in this Transaction.
1452  *
1453  * This structure must be arranged as first member in a subclassed type,
1454  * assuming that the compiler will also arrange it to the same offsets as the
1455  * base class.
1456  */
1457 struct BlkActionState {
1458     TransactionAction *action;
1459     const BlkActionOps *ops;
1460     BlockJobTxn *block_job_txn;
1461     TransactionProperties *txn_props;
1462     QSIMPLEQ_ENTRY(BlkActionState) entry;
1463 };
1464 
1465 /* internal snapshot private data */
1466 typedef struct InternalSnapshotState {
1467     BlkActionState common;
1468     BlockDriverState *bs;
1469     AioContext *aio_context;
1470     QEMUSnapshotInfo sn;
1471     bool created;
1472 } InternalSnapshotState;
1473 
1474 
1475 static int action_check_completion_mode(BlkActionState *s, Error **errp)
1476 {
1477     if (s->txn_props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
1478         error_setg(errp,
1479                    "Action '%s' does not support Transaction property "
1480                    "completion-mode = %s",
1481                    TransactionActionKind_lookup[s->action->type],
1482                    ActionCompletionMode_lookup[s->txn_props->completion_mode]);
1483         return -1;
1484     }
1485     return 0;
1486 }
1487 
1488 static void internal_snapshot_prepare(BlkActionState *common,
1489                                       Error **errp)
1490 {
1491     Error *local_err = NULL;
1492     const char *device;
1493     const char *name;
1494     BlockBackend *blk;
1495     BlockDriverState *bs;
1496     QEMUSnapshotInfo old_sn, *sn;
1497     bool ret;
1498     qemu_timeval tv;
1499     BlockdevSnapshotInternal *internal;
1500     InternalSnapshotState *state;
1501     int ret1;
1502 
1503     g_assert(common->action->type ==
1504              TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC);
1505     internal = common->action->u.blockdev_snapshot_internal_sync;
1506     state = DO_UPCAST(InternalSnapshotState, common, common);
1507 
1508     /* 1. parse input */
1509     device = internal->device;
1510     name = internal->name;
1511 
1512     /* 2. check for validation */
1513     if (action_check_completion_mode(common, errp) < 0) {
1514         return;
1515     }
1516 
1517     blk = blk_by_name(device);
1518     if (!blk) {
1519         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1520                   "Device '%s' not found", device);
1521         return;
1522     }
1523 
1524     /* AioContext is released in .clean() */
1525     state->aio_context = blk_get_aio_context(blk);
1526     aio_context_acquire(state->aio_context);
1527 
1528     if (!blk_is_available(blk)) {
1529         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
1530         return;
1531     }
1532     bs = blk_bs(blk);
1533 
1534     state->bs = bs;
1535     bdrv_drained_begin(bs);
1536 
1537     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
1538         return;
1539     }
1540 
1541     if (bdrv_is_read_only(bs)) {
1542         error_setg(errp, "Device '%s' is read only", device);
1543         return;
1544     }
1545 
1546     if (!bdrv_can_snapshot(bs)) {
1547         error_setg(errp, "Block format '%s' used by device '%s' "
1548                    "does not support internal snapshots",
1549                    bs->drv->format_name, device);
1550         return;
1551     }
1552 
1553     if (!strlen(name)) {
1554         error_setg(errp, "Name is empty");
1555         return;
1556     }
1557 
1558     /* check whether a snapshot with name exist */
1559     ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn,
1560                                             &local_err);
1561     if (local_err) {
1562         error_propagate(errp, local_err);
1563         return;
1564     } else if (ret) {
1565         error_setg(errp,
1566                    "Snapshot with name '%s' already exists on device '%s'",
1567                    name, device);
1568         return;
1569     }
1570 
1571     /* 3. take the snapshot */
1572     sn = &state->sn;
1573     pstrcpy(sn->name, sizeof(sn->name), name);
1574     qemu_gettimeofday(&tv);
1575     sn->date_sec = tv.tv_sec;
1576     sn->date_nsec = tv.tv_usec * 1000;
1577     sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1578 
1579     ret1 = bdrv_snapshot_create(bs, sn);
1580     if (ret1 < 0) {
1581         error_setg_errno(errp, -ret1,
1582                          "Failed to create snapshot '%s' on device '%s'",
1583                          name, device);
1584         return;
1585     }
1586 
1587     /* 4. succeed, mark a snapshot is created */
1588     state->created = true;
1589 }
1590 
1591 static void internal_snapshot_abort(BlkActionState *common)
1592 {
1593     InternalSnapshotState *state =
1594                              DO_UPCAST(InternalSnapshotState, common, common);
1595     BlockDriverState *bs = state->bs;
1596     QEMUSnapshotInfo *sn = &state->sn;
1597     Error *local_error = NULL;
1598 
1599     if (!state->created) {
1600         return;
1601     }
1602 
1603     if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
1604         error_reportf_err(local_error,
1605                           "Failed to delete snapshot with id '%s' and "
1606                           "name '%s' on device '%s' in abort: ",
1607                           sn->id_str, sn->name,
1608                           bdrv_get_device_name(bs));
1609     }
1610 }
1611 
1612 static void internal_snapshot_clean(BlkActionState *common)
1613 {
1614     InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
1615                                              common, common);
1616 
1617     if (state->aio_context) {
1618         if (state->bs) {
1619             bdrv_drained_end(state->bs);
1620         }
1621         aio_context_release(state->aio_context);
1622     }
1623 }
1624 
1625 /* external snapshot private data */
1626 typedef struct ExternalSnapshotState {
1627     BlkActionState common;
1628     BlockDriverState *old_bs;
1629     BlockDriverState *new_bs;
1630     AioContext *aio_context;
1631 } ExternalSnapshotState;
1632 
1633 static void external_snapshot_prepare(BlkActionState *common,
1634                                       Error **errp)
1635 {
1636     int flags = 0, ret;
1637     QDict *options = NULL;
1638     Error *local_err = NULL;
1639     /* Device and node name of the image to generate the snapshot from */
1640     const char *device;
1641     const char *node_name;
1642     /* Reference to the new image (for 'blockdev-snapshot') */
1643     const char *snapshot_ref;
1644     /* File name of the new image (for 'blockdev-snapshot-sync') */
1645     const char *new_image_file;
1646     ExternalSnapshotState *state =
1647                              DO_UPCAST(ExternalSnapshotState, common, common);
1648     TransactionAction *action = common->action;
1649 
1650     /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
1651      * purpose but a different set of parameters */
1652     switch (action->type) {
1653     case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT:
1654         {
1655             BlockdevSnapshot *s = action->u.blockdev_snapshot;
1656             device = s->node;
1657             node_name = s->node;
1658             new_image_file = NULL;
1659             snapshot_ref = s->overlay;
1660         }
1661         break;
1662     case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
1663         {
1664             BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync;
1665             device = s->has_device ? s->device : NULL;
1666             node_name = s->has_node_name ? s->node_name : NULL;
1667             new_image_file = s->snapshot_file;
1668             snapshot_ref = NULL;
1669         }
1670         break;
1671     default:
1672         g_assert_not_reached();
1673     }
1674 
1675     /* start processing */
1676     if (action_check_completion_mode(common, errp) < 0) {
1677         return;
1678     }
1679 
1680     state->old_bs = bdrv_lookup_bs(device, node_name, errp);
1681     if (!state->old_bs) {
1682         return;
1683     }
1684 
1685     /* Acquire AioContext now so any threads operating on old_bs stop */
1686     state->aio_context = bdrv_get_aio_context(state->old_bs);
1687     aio_context_acquire(state->aio_context);
1688     bdrv_drained_begin(state->old_bs);
1689 
1690     if (!bdrv_is_inserted(state->old_bs)) {
1691         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
1692         return;
1693     }
1694 
1695     if (bdrv_op_is_blocked(state->old_bs,
1696                            BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
1697         return;
1698     }
1699 
1700     if (!bdrv_is_read_only(state->old_bs)) {
1701         if (bdrv_flush(state->old_bs)) {
1702             error_setg(errp, QERR_IO_ERROR);
1703             return;
1704         }
1705     }
1706 
1707     if (!bdrv_is_first_non_filter(state->old_bs)) {
1708         error_setg(errp, QERR_FEATURE_DISABLED, "snapshot");
1709         return;
1710     }
1711 
1712     if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
1713         BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync;
1714         const char *format = s->has_format ? s->format : "qcow2";
1715         enum NewImageMode mode;
1716         const char *snapshot_node_name =
1717             s->has_snapshot_node_name ? s->snapshot_node_name : NULL;
1718 
1719         if (node_name && !snapshot_node_name) {
1720             error_setg(errp, "New snapshot node name missing");
1721             return;
1722         }
1723 
1724         if (snapshot_node_name &&
1725             bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
1726             error_setg(errp, "New snapshot node name already in use");
1727             return;
1728         }
1729 
1730         flags = state->old_bs->open_flags;
1731 
1732         /* create new image w/backing file */
1733         mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS;
1734         if (mode != NEW_IMAGE_MODE_EXISTING) {
1735             bdrv_img_create(new_image_file, format,
1736                             state->old_bs->filename,
1737                             state->old_bs->drv->format_name,
1738                             NULL, -1, flags, &local_err, false);
1739             if (local_err) {
1740                 error_propagate(errp, local_err);
1741                 return;
1742             }
1743         }
1744 
1745         options = qdict_new();
1746         if (s->has_snapshot_node_name) {
1747             qdict_put(options, "node-name",
1748                       qstring_from_str(snapshot_node_name));
1749         }
1750         qdict_put(options, "driver", qstring_from_str(format));
1751 
1752         flags |= BDRV_O_NO_BACKING;
1753     }
1754 
1755     assert(state->new_bs == NULL);
1756     ret = bdrv_open(&state->new_bs, new_image_file, snapshot_ref, options,
1757                     flags, errp);
1758     /* We will manually add the backing_hd field to the bs later */
1759     if (ret != 0) {
1760         return;
1761     }
1762 
1763     if (state->new_bs->blk != NULL) {
1764         error_setg(errp, "The snapshot is already in use by %s",
1765                    blk_name(state->new_bs->blk));
1766         return;
1767     }
1768 
1769     if (bdrv_op_is_blocked(state->new_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
1770                            errp)) {
1771         return;
1772     }
1773 
1774     if (state->new_bs->backing != NULL) {
1775         error_setg(errp, "The snapshot already has a backing image");
1776         return;
1777     }
1778 
1779     if (!state->new_bs->drv->supports_backing) {
1780         error_setg(errp, "The snapshot does not support backing images");
1781     }
1782 }
1783 
1784 static void external_snapshot_commit(BlkActionState *common)
1785 {
1786     ExternalSnapshotState *state =
1787                              DO_UPCAST(ExternalSnapshotState, common, common);
1788 
1789     bdrv_set_aio_context(state->new_bs, state->aio_context);
1790 
1791     /* This removes our old bs and adds the new bs */
1792     bdrv_append(state->new_bs, state->old_bs);
1793     /* We don't need (or want) to use the transactional
1794      * bdrv_reopen_multiple() across all the entries at once, because we
1795      * don't want to abort all of them if one of them fails the reopen */
1796     bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR,
1797                 NULL);
1798 }
1799 
1800 static void external_snapshot_abort(BlkActionState *common)
1801 {
1802     ExternalSnapshotState *state =
1803                              DO_UPCAST(ExternalSnapshotState, common, common);
1804     if (state->new_bs) {
1805         bdrv_unref(state->new_bs);
1806     }
1807 }
1808 
1809 static void external_snapshot_clean(BlkActionState *common)
1810 {
1811     ExternalSnapshotState *state =
1812                              DO_UPCAST(ExternalSnapshotState, common, common);
1813     if (state->aio_context) {
1814         bdrv_drained_end(state->old_bs);
1815         aio_context_release(state->aio_context);
1816     }
1817 }
1818 
1819 typedef struct DriveBackupState {
1820     BlkActionState common;
1821     BlockDriverState *bs;
1822     AioContext *aio_context;
1823     BlockJob *job;
1824 } DriveBackupState;
1825 
1826 static void do_drive_backup(const char *device, const char *target,
1827                             bool has_format, const char *format,
1828                             enum MirrorSyncMode sync,
1829                             bool has_mode, enum NewImageMode mode,
1830                             bool has_speed, int64_t speed,
1831                             bool has_bitmap, const char *bitmap,
1832                             bool has_on_source_error,
1833                             BlockdevOnError on_source_error,
1834                             bool has_on_target_error,
1835                             BlockdevOnError on_target_error,
1836                             BlockJobTxn *txn, Error **errp);
1837 
1838 static void drive_backup_prepare(BlkActionState *common, Error **errp)
1839 {
1840     DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1841     BlockBackend *blk;
1842     DriveBackup *backup;
1843     Error *local_err = NULL;
1844 
1845     assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
1846     backup = common->action->u.drive_backup;
1847 
1848     blk = blk_by_name(backup->device);
1849     if (!blk) {
1850         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1851                   "Device '%s' not found", backup->device);
1852         return;
1853     }
1854 
1855     if (!blk_is_available(blk)) {
1856         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
1857         return;
1858     }
1859 
1860     /* AioContext is released in .clean() */
1861     state->aio_context = blk_get_aio_context(blk);
1862     aio_context_acquire(state->aio_context);
1863     bdrv_drained_begin(blk_bs(blk));
1864     state->bs = blk_bs(blk);
1865 
1866     do_drive_backup(backup->device, backup->target,
1867                     backup->has_format, backup->format,
1868                     backup->sync,
1869                     backup->has_mode, backup->mode,
1870                     backup->has_speed, backup->speed,
1871                     backup->has_bitmap, backup->bitmap,
1872                     backup->has_on_source_error, backup->on_source_error,
1873                     backup->has_on_target_error, backup->on_target_error,
1874                     common->block_job_txn, &local_err);
1875     if (local_err) {
1876         error_propagate(errp, local_err);
1877         return;
1878     }
1879 
1880     state->job = state->bs->job;
1881 }
1882 
1883 static void drive_backup_abort(BlkActionState *common)
1884 {
1885     DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1886     BlockDriverState *bs = state->bs;
1887 
1888     /* Only cancel if it's the job we started */
1889     if (bs && bs->job && bs->job == state->job) {
1890         block_job_cancel_sync(bs->job);
1891     }
1892 }
1893 
1894 static void drive_backup_clean(BlkActionState *common)
1895 {
1896     DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1897 
1898     if (state->aio_context) {
1899         bdrv_drained_end(state->bs);
1900         aio_context_release(state->aio_context);
1901     }
1902 }
1903 
1904 typedef struct BlockdevBackupState {
1905     BlkActionState common;
1906     BlockDriverState *bs;
1907     BlockJob *job;
1908     AioContext *aio_context;
1909 } BlockdevBackupState;
1910 
1911 static void do_blockdev_backup(const char *device, const char *target,
1912                                enum MirrorSyncMode sync,
1913                                bool has_speed, int64_t speed,
1914                                bool has_on_source_error,
1915                                BlockdevOnError on_source_error,
1916                                bool has_on_target_error,
1917                                BlockdevOnError on_target_error,
1918                                BlockJobTxn *txn, Error **errp);
1919 
1920 static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
1921 {
1922     BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1923     BlockdevBackup *backup;
1924     BlockBackend *blk, *target;
1925     Error *local_err = NULL;
1926 
1927     assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
1928     backup = common->action->u.blockdev_backup;
1929 
1930     blk = blk_by_name(backup->device);
1931     if (!blk) {
1932         error_setg(errp, "Device '%s' not found", backup->device);
1933         return;
1934     }
1935 
1936     if (!blk_is_available(blk)) {
1937         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
1938         return;
1939     }
1940 
1941     target = blk_by_name(backup->target);
1942     if (!target) {
1943         error_setg(errp, "Device '%s' not found", backup->target);
1944         return;
1945     }
1946 
1947     /* AioContext is released in .clean() */
1948     state->aio_context = blk_get_aio_context(blk);
1949     if (state->aio_context != blk_get_aio_context(target)) {
1950         state->aio_context = NULL;
1951         error_setg(errp, "Backup between two IO threads is not implemented");
1952         return;
1953     }
1954     aio_context_acquire(state->aio_context);
1955     state->bs = blk_bs(blk);
1956     bdrv_drained_begin(state->bs);
1957 
1958     do_blockdev_backup(backup->device, backup->target,
1959                        backup->sync,
1960                        backup->has_speed, backup->speed,
1961                        backup->has_on_source_error, backup->on_source_error,
1962                        backup->has_on_target_error, backup->on_target_error,
1963                        common->block_job_txn, &local_err);
1964     if (local_err) {
1965         error_propagate(errp, local_err);
1966         return;
1967     }
1968 
1969     state->job = state->bs->job;
1970 }
1971 
1972 static void blockdev_backup_abort(BlkActionState *common)
1973 {
1974     BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1975     BlockDriverState *bs = state->bs;
1976 
1977     /* Only cancel if it's the job we started */
1978     if (bs && bs->job && bs->job == state->job) {
1979         block_job_cancel_sync(bs->job);
1980     }
1981 }
1982 
1983 static void blockdev_backup_clean(BlkActionState *common)
1984 {
1985     BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1986 
1987     if (state->aio_context) {
1988         bdrv_drained_end(state->bs);
1989         aio_context_release(state->aio_context);
1990     }
1991 }
1992 
1993 typedef struct BlockDirtyBitmapState {
1994     BlkActionState common;
1995     BdrvDirtyBitmap *bitmap;
1996     BlockDriverState *bs;
1997     AioContext *aio_context;
1998     HBitmap *backup;
1999     bool prepared;
2000 } BlockDirtyBitmapState;
2001 
2002 static void block_dirty_bitmap_add_prepare(BlkActionState *common,
2003                                            Error **errp)
2004 {
2005     Error *local_err = NULL;
2006     BlockDirtyBitmapAdd *action;
2007     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2008                                              common, common);
2009 
2010     if (action_check_completion_mode(common, errp) < 0) {
2011         return;
2012     }
2013 
2014     action = common->action->u.block_dirty_bitmap_add;
2015     /* AIO context taken and released within qmp_block_dirty_bitmap_add */
2016     qmp_block_dirty_bitmap_add(action->node, action->name,
2017                                action->has_granularity, action->granularity,
2018                                &local_err);
2019 
2020     if (!local_err) {
2021         state->prepared = true;
2022     } else {
2023         error_propagate(errp, local_err);
2024     }
2025 }
2026 
2027 static void block_dirty_bitmap_add_abort(BlkActionState *common)
2028 {
2029     BlockDirtyBitmapAdd *action;
2030     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2031                                              common, common);
2032 
2033     action = common->action->u.block_dirty_bitmap_add;
2034     /* Should not be able to fail: IF the bitmap was added via .prepare(),
2035      * then the node reference and bitmap name must have been valid.
2036      */
2037     if (state->prepared) {
2038         qmp_block_dirty_bitmap_remove(action->node, action->name, &error_abort);
2039     }
2040 }
2041 
2042 static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
2043                                              Error **errp)
2044 {
2045     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2046                                              common, common);
2047     BlockDirtyBitmap *action;
2048 
2049     if (action_check_completion_mode(common, errp) < 0) {
2050         return;
2051     }
2052 
2053     action = common->action->u.block_dirty_bitmap_clear;
2054     state->bitmap = block_dirty_bitmap_lookup(action->node,
2055                                               action->name,
2056                                               &state->bs,
2057                                               &state->aio_context,
2058                                               errp);
2059     if (!state->bitmap) {
2060         return;
2061     }
2062 
2063     if (bdrv_dirty_bitmap_frozen(state->bitmap)) {
2064         error_setg(errp, "Cannot modify a frozen bitmap");
2065         return;
2066     } else if (!bdrv_dirty_bitmap_enabled(state->bitmap)) {
2067         error_setg(errp, "Cannot clear a disabled bitmap");
2068         return;
2069     }
2070 
2071     bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
2072     /* AioContext is released in .clean() */
2073 }
2074 
2075 static void block_dirty_bitmap_clear_abort(BlkActionState *common)
2076 {
2077     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2078                                              common, common);
2079 
2080     bdrv_undo_clear_dirty_bitmap(state->bitmap, state->backup);
2081 }
2082 
2083 static void block_dirty_bitmap_clear_commit(BlkActionState *common)
2084 {
2085     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2086                                              common, common);
2087 
2088     hbitmap_free(state->backup);
2089 }
2090 
2091 static void block_dirty_bitmap_clear_clean(BlkActionState *common)
2092 {
2093     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2094                                              common, common);
2095 
2096     if (state->aio_context) {
2097         aio_context_release(state->aio_context);
2098     }
2099 }
2100 
2101 static void abort_prepare(BlkActionState *common, Error **errp)
2102 {
2103     error_setg(errp, "Transaction aborted using Abort action");
2104 }
2105 
2106 static void abort_commit(BlkActionState *common)
2107 {
2108     g_assert_not_reached(); /* this action never succeeds */
2109 }
2110 
2111 static const BlkActionOps actions[] = {
2112     [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = {
2113         .instance_size = sizeof(ExternalSnapshotState),
2114         .prepare  = external_snapshot_prepare,
2115         .commit   = external_snapshot_commit,
2116         .abort = external_snapshot_abort,
2117         .clean = external_snapshot_clean,
2118     },
2119     [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = {
2120         .instance_size = sizeof(ExternalSnapshotState),
2121         .prepare  = external_snapshot_prepare,
2122         .commit   = external_snapshot_commit,
2123         .abort = external_snapshot_abort,
2124         .clean = external_snapshot_clean,
2125     },
2126     [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = {
2127         .instance_size = sizeof(DriveBackupState),
2128         .prepare = drive_backup_prepare,
2129         .abort = drive_backup_abort,
2130         .clean = drive_backup_clean,
2131     },
2132     [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = {
2133         .instance_size = sizeof(BlockdevBackupState),
2134         .prepare = blockdev_backup_prepare,
2135         .abort = blockdev_backup_abort,
2136         .clean = blockdev_backup_clean,
2137     },
2138     [TRANSACTION_ACTION_KIND_ABORT] = {
2139         .instance_size = sizeof(BlkActionState),
2140         .prepare = abort_prepare,
2141         .commit = abort_commit,
2142     },
2143     [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = {
2144         .instance_size = sizeof(InternalSnapshotState),
2145         .prepare  = internal_snapshot_prepare,
2146         .abort = internal_snapshot_abort,
2147         .clean = internal_snapshot_clean,
2148     },
2149     [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD] = {
2150         .instance_size = sizeof(BlockDirtyBitmapState),
2151         .prepare = block_dirty_bitmap_add_prepare,
2152         .abort = block_dirty_bitmap_add_abort,
2153     },
2154     [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR] = {
2155         .instance_size = sizeof(BlockDirtyBitmapState),
2156         .prepare = block_dirty_bitmap_clear_prepare,
2157         .commit = block_dirty_bitmap_clear_commit,
2158         .abort = block_dirty_bitmap_clear_abort,
2159         .clean = block_dirty_bitmap_clear_clean,
2160     }
2161 };
2162 
2163 /**
2164  * Allocate a TransactionProperties structure if necessary, and fill
2165  * that structure with desired defaults if they are unset.
2166  */
2167 static TransactionProperties *get_transaction_properties(
2168     TransactionProperties *props)
2169 {
2170     if (!props) {
2171         props = g_new0(TransactionProperties, 1);
2172     }
2173 
2174     if (!props->has_completion_mode) {
2175         props->has_completion_mode = true;
2176         props->completion_mode = ACTION_COMPLETION_MODE_INDIVIDUAL;
2177     }
2178 
2179     return props;
2180 }
2181 
2182 /*
2183  * 'Atomic' group operations.  The operations are performed as a set, and if
2184  * any fail then we roll back all operations in the group.
2185  */
2186 void qmp_transaction(TransactionActionList *dev_list,
2187                      bool has_props,
2188                      struct TransactionProperties *props,
2189                      Error **errp)
2190 {
2191     TransactionActionList *dev_entry = dev_list;
2192     BlockJobTxn *block_job_txn = NULL;
2193     BlkActionState *state, *next;
2194     Error *local_err = NULL;
2195 
2196     QSIMPLEQ_HEAD(snap_bdrv_states, BlkActionState) snap_bdrv_states;
2197     QSIMPLEQ_INIT(&snap_bdrv_states);
2198 
2199     /* Does this transaction get canceled as a group on failure?
2200      * If not, we don't really need to make a BlockJobTxn.
2201      */
2202     props = get_transaction_properties(props);
2203     if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
2204         block_job_txn = block_job_txn_new();
2205     }
2206 
2207     /* drain all i/o before any operations */
2208     bdrv_drain_all();
2209 
2210     /* We don't do anything in this loop that commits us to the operations */
2211     while (NULL != dev_entry) {
2212         TransactionAction *dev_info = NULL;
2213         const BlkActionOps *ops;
2214 
2215         dev_info = dev_entry->value;
2216         dev_entry = dev_entry->next;
2217 
2218         assert(dev_info->type < ARRAY_SIZE(actions));
2219 
2220         ops = &actions[dev_info->type];
2221         assert(ops->instance_size > 0);
2222 
2223         state = g_malloc0(ops->instance_size);
2224         state->ops = ops;
2225         state->action = dev_info;
2226         state->block_job_txn = block_job_txn;
2227         state->txn_props = props;
2228         QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, state, entry);
2229 
2230         state->ops->prepare(state, &local_err);
2231         if (local_err) {
2232             error_propagate(errp, local_err);
2233             goto delete_and_fail;
2234         }
2235     }
2236 
2237     QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
2238         if (state->ops->commit) {
2239             state->ops->commit(state);
2240         }
2241     }
2242 
2243     /* success */
2244     goto exit;
2245 
2246 delete_and_fail:
2247     /* failure, and it is all-or-none; roll back all operations */
2248     QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
2249         if (state->ops->abort) {
2250             state->ops->abort(state);
2251         }
2252     }
2253 exit:
2254     QSIMPLEQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) {
2255         if (state->ops->clean) {
2256             state->ops->clean(state);
2257         }
2258         g_free(state);
2259     }
2260     if (!has_props) {
2261         qapi_free_TransactionProperties(props);
2262     }
2263     block_job_txn_unref(block_job_txn);
2264 }
2265 
2266 void qmp_eject(const char *device, bool has_force, bool force, Error **errp)
2267 {
2268     Error *local_err = NULL;
2269 
2270     qmp_blockdev_open_tray(device, has_force, force, &local_err);
2271     if (local_err) {
2272         error_propagate(errp, local_err);
2273         return;
2274     }
2275 
2276     qmp_x_blockdev_remove_medium(device, errp);
2277 }
2278 
2279 void qmp_block_passwd(bool has_device, const char *device,
2280                       bool has_node_name, const char *node_name,
2281                       const char *password, Error **errp)
2282 {
2283     Error *local_err = NULL;
2284     BlockDriverState *bs;
2285     AioContext *aio_context;
2286 
2287     bs = bdrv_lookup_bs(has_device ? device : NULL,
2288                         has_node_name ? node_name : NULL,
2289                         &local_err);
2290     if (local_err) {
2291         error_propagate(errp, local_err);
2292         return;
2293     }
2294 
2295     aio_context = bdrv_get_aio_context(bs);
2296     aio_context_acquire(aio_context);
2297 
2298     bdrv_add_key(bs, password, errp);
2299 
2300     aio_context_release(aio_context);
2301 }
2302 
2303 void qmp_blockdev_open_tray(const char *device, bool has_force, bool force,
2304                             Error **errp)
2305 {
2306     BlockBackend *blk;
2307     bool locked;
2308 
2309     if (!has_force) {
2310         force = false;
2311     }
2312 
2313     blk = blk_by_name(device);
2314     if (!blk) {
2315         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2316                   "Device '%s' not found", device);
2317         return;
2318     }
2319 
2320     if (!blk_dev_has_removable_media(blk)) {
2321         error_setg(errp, "Device '%s' is not removable", device);
2322         return;
2323     }
2324 
2325     if (!blk_dev_has_tray(blk)) {
2326         /* Ignore this command on tray-less devices */
2327         return;
2328     }
2329 
2330     if (blk_dev_is_tray_open(blk)) {
2331         return;
2332     }
2333 
2334     locked = blk_dev_is_medium_locked(blk);
2335     if (locked) {
2336         blk_dev_eject_request(blk, force);
2337     }
2338 
2339     if (!locked || force) {
2340         blk_dev_change_media_cb(blk, false);
2341     }
2342 }
2343 
2344 void qmp_blockdev_close_tray(const char *device, Error **errp)
2345 {
2346     BlockBackend *blk;
2347 
2348     blk = blk_by_name(device);
2349     if (!blk) {
2350         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2351                   "Device '%s' not found", device);
2352         return;
2353     }
2354 
2355     if (!blk_dev_has_removable_media(blk)) {
2356         error_setg(errp, "Device '%s' is not removable", device);
2357         return;
2358     }
2359 
2360     if (!blk_dev_has_tray(blk)) {
2361         /* Ignore this command on tray-less devices */
2362         return;
2363     }
2364 
2365     if (!blk_dev_is_tray_open(blk)) {
2366         return;
2367     }
2368 
2369     blk_dev_change_media_cb(blk, true);
2370 }
2371 
2372 void qmp_x_blockdev_remove_medium(const char *device, Error **errp)
2373 {
2374     BlockBackend *blk;
2375     BlockDriverState *bs;
2376     AioContext *aio_context;
2377     bool has_device;
2378 
2379     blk = blk_by_name(device);
2380     if (!blk) {
2381         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2382                   "Device '%s' not found", device);
2383         return;
2384     }
2385 
2386     /* For BBs without a device, we can exchange the BDS tree at will */
2387     has_device = blk_get_attached_dev(blk);
2388 
2389     if (has_device && !blk_dev_has_removable_media(blk)) {
2390         error_setg(errp, "Device '%s' is not removable", device);
2391         return;
2392     }
2393 
2394     if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
2395         error_setg(errp, "Tray of device '%s' is not open", device);
2396         return;
2397     }
2398 
2399     bs = blk_bs(blk);
2400     if (!bs) {
2401         return;
2402     }
2403 
2404     aio_context = bdrv_get_aio_context(bs);
2405     aio_context_acquire(aio_context);
2406 
2407     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
2408         goto out;
2409     }
2410 
2411     /* This follows the convention established by bdrv_make_anon() */
2412     if (bs->device_list.tqe_prev) {
2413         bdrv_device_remove(bs);
2414     }
2415 
2416     blk_remove_bs(blk);
2417 
2418     if (!blk_dev_has_tray(blk)) {
2419         /* For tray-less devices, blockdev-open-tray is a no-op (or may not be
2420          * called at all); therefore, the medium needs to be ejected here.
2421          * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load
2422          * value passed here (i.e. false). */
2423         blk_dev_change_media_cb(blk, false);
2424     }
2425 
2426 out:
2427     aio_context_release(aio_context);
2428 }
2429 
2430 static void qmp_blockdev_insert_anon_medium(const char *device,
2431                                             BlockDriverState *bs, Error **errp)
2432 {
2433     BlockBackend *blk;
2434     bool has_device;
2435 
2436     blk = blk_by_name(device);
2437     if (!blk) {
2438         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2439                   "Device '%s' not found", device);
2440         return;
2441     }
2442 
2443     /* For BBs without a device, we can exchange the BDS tree at will */
2444     has_device = blk_get_attached_dev(blk);
2445 
2446     if (has_device && !blk_dev_has_removable_media(blk)) {
2447         error_setg(errp, "Device '%s' is not removable", device);
2448         return;
2449     }
2450 
2451     if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
2452         error_setg(errp, "Tray of device '%s' is not open", device);
2453         return;
2454     }
2455 
2456     if (blk_bs(blk)) {
2457         error_setg(errp, "There already is a medium in device '%s'", device);
2458         return;
2459     }
2460 
2461     blk_insert_bs(blk, bs);
2462 
2463     QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
2464 
2465     if (!blk_dev_has_tray(blk)) {
2466         /* For tray-less devices, blockdev-close-tray is a no-op (or may not be
2467          * called at all); therefore, the medium needs to be pushed into the
2468          * slot here.
2469          * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load
2470          * value passed here (i.e. true). */
2471         blk_dev_change_media_cb(blk, true);
2472     }
2473 }
2474 
2475 void qmp_x_blockdev_insert_medium(const char *device, const char *node_name,
2476                                   Error **errp)
2477 {
2478     BlockDriverState *bs;
2479 
2480     bs = bdrv_find_node(node_name);
2481     if (!bs) {
2482         error_setg(errp, "Node '%s' not found", node_name);
2483         return;
2484     }
2485 
2486     if (bs->blk) {
2487         error_setg(errp, "Node '%s' is already in use by '%s'", node_name,
2488                    blk_name(bs->blk));
2489         return;
2490     }
2491 
2492     qmp_blockdev_insert_anon_medium(device, bs, errp);
2493 }
2494 
2495 void qmp_blockdev_change_medium(const char *device, const char *filename,
2496                                 bool has_format, const char *format,
2497                                 bool has_read_only,
2498                                 BlockdevChangeReadOnlyMode read_only,
2499                                 Error **errp)
2500 {
2501     BlockBackend *blk;
2502     BlockDriverState *medium_bs = NULL;
2503     int bdrv_flags, ret;
2504     QDict *options = NULL;
2505     Error *err = NULL;
2506 
2507     blk = blk_by_name(device);
2508     if (!blk) {
2509         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2510                   "Device '%s' not found", device);
2511         goto fail;
2512     }
2513 
2514     if (blk_bs(blk)) {
2515         blk_update_root_state(blk);
2516     }
2517 
2518     bdrv_flags = blk_get_open_flags_from_root_state(blk);
2519     bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING |
2520         BDRV_O_PROTOCOL);
2521 
2522     if (!has_read_only) {
2523         read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN;
2524     }
2525 
2526     switch (read_only) {
2527     case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN:
2528         break;
2529 
2530     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY:
2531         bdrv_flags &= ~BDRV_O_RDWR;
2532         break;
2533 
2534     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE:
2535         bdrv_flags |= BDRV_O_RDWR;
2536         break;
2537 
2538     default:
2539         abort();
2540     }
2541 
2542     if (has_format) {
2543         options = qdict_new();
2544         qdict_put(options, "driver", qstring_from_str(format));
2545     }
2546 
2547     assert(!medium_bs);
2548     ret = bdrv_open(&medium_bs, filename, NULL, options, bdrv_flags, errp);
2549     if (ret < 0) {
2550         goto fail;
2551     }
2552 
2553     blk_apply_root_state(blk, medium_bs);
2554 
2555     bdrv_add_key(medium_bs, NULL, &err);
2556     if (err) {
2557         error_propagate(errp, err);
2558         goto fail;
2559     }
2560 
2561     qmp_blockdev_open_tray(device, false, false, &err);
2562     if (err) {
2563         error_propagate(errp, err);
2564         goto fail;
2565     }
2566 
2567     qmp_x_blockdev_remove_medium(device, &err);
2568     if (err) {
2569         error_propagate(errp, err);
2570         goto fail;
2571     }
2572 
2573     qmp_blockdev_insert_anon_medium(device, medium_bs, &err);
2574     if (err) {
2575         error_propagate(errp, err);
2576         goto fail;
2577     }
2578 
2579     qmp_blockdev_close_tray(device, errp);
2580 
2581 fail:
2582     /* If the medium has been inserted, the device has its own reference, so
2583      * ours must be relinquished; and if it has not been inserted successfully,
2584      * the reference must be relinquished anyway */
2585     bdrv_unref(medium_bs);
2586 }
2587 
2588 /* throttling disk I/O limits */
2589 void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd,
2590                                int64_t bps_wr,
2591                                int64_t iops,
2592                                int64_t iops_rd,
2593                                int64_t iops_wr,
2594                                bool has_bps_max,
2595                                int64_t bps_max,
2596                                bool has_bps_rd_max,
2597                                int64_t bps_rd_max,
2598                                bool has_bps_wr_max,
2599                                int64_t bps_wr_max,
2600                                bool has_iops_max,
2601                                int64_t iops_max,
2602                                bool has_iops_rd_max,
2603                                int64_t iops_rd_max,
2604                                bool has_iops_wr_max,
2605                                int64_t iops_wr_max,
2606                                bool has_bps_max_length,
2607                                int64_t bps_max_length,
2608                                bool has_bps_rd_max_length,
2609                                int64_t bps_rd_max_length,
2610                                bool has_bps_wr_max_length,
2611                                int64_t bps_wr_max_length,
2612                                bool has_iops_max_length,
2613                                int64_t iops_max_length,
2614                                bool has_iops_rd_max_length,
2615                                int64_t iops_rd_max_length,
2616                                bool has_iops_wr_max_length,
2617                                int64_t iops_wr_max_length,
2618                                bool has_iops_size,
2619                                int64_t iops_size,
2620                                bool has_group,
2621                                const char *group, Error **errp)
2622 {
2623     ThrottleConfig cfg;
2624     BlockDriverState *bs;
2625     BlockBackend *blk;
2626     AioContext *aio_context;
2627 
2628     blk = blk_by_name(device);
2629     if (!blk) {
2630         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2631                   "Device '%s' not found", device);
2632         return;
2633     }
2634 
2635     aio_context = blk_get_aio_context(blk);
2636     aio_context_acquire(aio_context);
2637 
2638     bs = blk_bs(blk);
2639     if (!bs) {
2640         error_setg(errp, "Device '%s' has no medium", device);
2641         goto out;
2642     }
2643 
2644     throttle_config_init(&cfg);
2645     cfg.buckets[THROTTLE_BPS_TOTAL].avg = bps;
2646     cfg.buckets[THROTTLE_BPS_READ].avg  = bps_rd;
2647     cfg.buckets[THROTTLE_BPS_WRITE].avg = bps_wr;
2648 
2649     cfg.buckets[THROTTLE_OPS_TOTAL].avg = iops;
2650     cfg.buckets[THROTTLE_OPS_READ].avg  = iops_rd;
2651     cfg.buckets[THROTTLE_OPS_WRITE].avg = iops_wr;
2652 
2653     if (has_bps_max) {
2654         cfg.buckets[THROTTLE_BPS_TOTAL].max = bps_max;
2655     }
2656     if (has_bps_rd_max) {
2657         cfg.buckets[THROTTLE_BPS_READ].max = bps_rd_max;
2658     }
2659     if (has_bps_wr_max) {
2660         cfg.buckets[THROTTLE_BPS_WRITE].max = bps_wr_max;
2661     }
2662     if (has_iops_max) {
2663         cfg.buckets[THROTTLE_OPS_TOTAL].max = iops_max;
2664     }
2665     if (has_iops_rd_max) {
2666         cfg.buckets[THROTTLE_OPS_READ].max = iops_rd_max;
2667     }
2668     if (has_iops_wr_max) {
2669         cfg.buckets[THROTTLE_OPS_WRITE].max = iops_wr_max;
2670     }
2671 
2672     if (has_bps_max_length) {
2673         cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = bps_max_length;
2674     }
2675     if (has_bps_rd_max_length) {
2676         cfg.buckets[THROTTLE_BPS_READ].burst_length = bps_rd_max_length;
2677     }
2678     if (has_bps_wr_max_length) {
2679         cfg.buckets[THROTTLE_BPS_WRITE].burst_length = bps_wr_max_length;
2680     }
2681     if (has_iops_max_length) {
2682         cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = iops_max_length;
2683     }
2684     if (has_iops_rd_max_length) {
2685         cfg.buckets[THROTTLE_OPS_READ].burst_length = iops_rd_max_length;
2686     }
2687     if (has_iops_wr_max_length) {
2688         cfg.buckets[THROTTLE_OPS_WRITE].burst_length = iops_wr_max_length;
2689     }
2690 
2691     if (has_iops_size) {
2692         cfg.op_size = iops_size;
2693     }
2694 
2695     if (!throttle_is_valid(&cfg, errp)) {
2696         goto out;
2697     }
2698 
2699     if (throttle_enabled(&cfg)) {
2700         /* Enable I/O limits if they're not enabled yet, otherwise
2701          * just update the throttling group. */
2702         if (!bs->throttle_state) {
2703             bdrv_io_limits_enable(bs, has_group ? group : device);
2704         } else if (has_group) {
2705             bdrv_io_limits_update_group(bs, group);
2706         }
2707         /* Set the new throttling configuration */
2708         bdrv_set_io_limits(bs, &cfg);
2709     } else if (bs->throttle_state) {
2710         /* If all throttling settings are set to 0, disable I/O limits */
2711         bdrv_io_limits_disable(bs);
2712     }
2713 
2714 out:
2715     aio_context_release(aio_context);
2716 }
2717 
2718 void qmp_block_dirty_bitmap_add(const char *node, const char *name,
2719                                 bool has_granularity, uint32_t granularity,
2720                                 Error **errp)
2721 {
2722     AioContext *aio_context;
2723     BlockDriverState *bs;
2724 
2725     if (!name || name[0] == '\0') {
2726         error_setg(errp, "Bitmap name cannot be empty");
2727         return;
2728     }
2729 
2730     bs = bdrv_lookup_bs(node, node, errp);
2731     if (!bs) {
2732         return;
2733     }
2734 
2735     aio_context = bdrv_get_aio_context(bs);
2736     aio_context_acquire(aio_context);
2737 
2738     if (has_granularity) {
2739         if (granularity < 512 || !is_power_of_2(granularity)) {
2740             error_setg(errp, "Granularity must be power of 2 "
2741                              "and at least 512");
2742             goto out;
2743         }
2744     } else {
2745         /* Default to cluster size, if available: */
2746         granularity = bdrv_get_default_bitmap_granularity(bs);
2747     }
2748 
2749     bdrv_create_dirty_bitmap(bs, granularity, name, errp);
2750 
2751  out:
2752     aio_context_release(aio_context);
2753 }
2754 
2755 void qmp_block_dirty_bitmap_remove(const char *node, const char *name,
2756                                    Error **errp)
2757 {
2758     AioContext *aio_context;
2759     BlockDriverState *bs;
2760     BdrvDirtyBitmap *bitmap;
2761 
2762     bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp);
2763     if (!bitmap || !bs) {
2764         return;
2765     }
2766 
2767     if (bdrv_dirty_bitmap_frozen(bitmap)) {
2768         error_setg(errp,
2769                    "Bitmap '%s' is currently frozen and cannot be removed",
2770                    name);
2771         goto out;
2772     }
2773     bdrv_dirty_bitmap_make_anon(bitmap);
2774     bdrv_release_dirty_bitmap(bs, bitmap);
2775 
2776  out:
2777     aio_context_release(aio_context);
2778 }
2779 
2780 /**
2781  * Completely clear a bitmap, for the purposes of synchronizing a bitmap
2782  * immediately after a full backup operation.
2783  */
2784 void qmp_block_dirty_bitmap_clear(const char *node, const char *name,
2785                                   Error **errp)
2786 {
2787     AioContext *aio_context;
2788     BdrvDirtyBitmap *bitmap;
2789     BlockDriverState *bs;
2790 
2791     bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp);
2792     if (!bitmap || !bs) {
2793         return;
2794     }
2795 
2796     if (bdrv_dirty_bitmap_frozen(bitmap)) {
2797         error_setg(errp,
2798                    "Bitmap '%s' is currently frozen and cannot be modified",
2799                    name);
2800         goto out;
2801     } else if (!bdrv_dirty_bitmap_enabled(bitmap)) {
2802         error_setg(errp,
2803                    "Bitmap '%s' is currently disabled and cannot be cleared",
2804                    name);
2805         goto out;
2806     }
2807 
2808     bdrv_clear_dirty_bitmap(bitmap, NULL);
2809 
2810  out:
2811     aio_context_release(aio_context);
2812 }
2813 
2814 void hmp_drive_del(Monitor *mon, const QDict *qdict)
2815 {
2816     const char *id = qdict_get_str(qdict, "id");
2817     BlockBackend *blk;
2818     BlockDriverState *bs;
2819     AioContext *aio_context;
2820     Error *local_err = NULL;
2821 
2822     blk = blk_by_name(id);
2823     if (!blk) {
2824         error_report("Device '%s' not found", id);
2825         return;
2826     }
2827 
2828     if (!blk_legacy_dinfo(blk)) {
2829         error_report("Deleting device added with blockdev-add"
2830                      " is not supported");
2831         return;
2832     }
2833 
2834     aio_context = blk_get_aio_context(blk);
2835     aio_context_acquire(aio_context);
2836 
2837     bs = blk_bs(blk);
2838     if (bs) {
2839         if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) {
2840             error_report_err(local_err);
2841             aio_context_release(aio_context);
2842             return;
2843         }
2844 
2845         blk_remove_bs(blk);
2846     }
2847 
2848     /* if we have a device attached to this BlockDriverState
2849      * then we need to make the drive anonymous until the device
2850      * can be removed.  If this is a drive with no device backing
2851      * then we can just get rid of the block driver state right here.
2852      */
2853     if (blk_get_attached_dev(blk)) {
2854         blk_hide_on_behalf_of_hmp_drive_del(blk);
2855         /* Further I/O must not pause the guest */
2856         blk_set_on_error(blk, BLOCKDEV_ON_ERROR_REPORT,
2857                          BLOCKDEV_ON_ERROR_REPORT);
2858     } else {
2859         blk_unref(blk);
2860     }
2861 
2862     aio_context_release(aio_context);
2863 }
2864 
2865 void qmp_block_resize(bool has_device, const char *device,
2866                       bool has_node_name, const char *node_name,
2867                       int64_t size, Error **errp)
2868 {
2869     Error *local_err = NULL;
2870     BlockDriverState *bs;
2871     AioContext *aio_context;
2872     int ret;
2873 
2874     bs = bdrv_lookup_bs(has_device ? device : NULL,
2875                         has_node_name ? node_name : NULL,
2876                         &local_err);
2877     if (local_err) {
2878         error_propagate(errp, local_err);
2879         return;
2880     }
2881 
2882     aio_context = bdrv_get_aio_context(bs);
2883     aio_context_acquire(aio_context);
2884 
2885     if (!bdrv_is_first_non_filter(bs)) {
2886         error_setg(errp, QERR_FEATURE_DISABLED, "resize");
2887         goto out;
2888     }
2889 
2890     if (size < 0) {
2891         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size");
2892         goto out;
2893     }
2894 
2895     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
2896         error_setg(errp, QERR_DEVICE_IN_USE, device);
2897         goto out;
2898     }
2899 
2900     /* complete all in-flight operations before resizing the device */
2901     bdrv_drain_all();
2902 
2903     ret = bdrv_truncate(bs, size);
2904     switch (ret) {
2905     case 0:
2906         break;
2907     case -ENOMEDIUM:
2908         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
2909         break;
2910     case -ENOTSUP:
2911         error_setg(errp, QERR_UNSUPPORTED);
2912         break;
2913     case -EACCES:
2914         error_setg(errp, "Device '%s' is read only", device);
2915         break;
2916     case -EBUSY:
2917         error_setg(errp, QERR_DEVICE_IN_USE, device);
2918         break;
2919     default:
2920         error_setg_errno(errp, -ret, "Could not resize");
2921         break;
2922     }
2923 
2924 out:
2925     aio_context_release(aio_context);
2926 }
2927 
2928 static void block_job_cb(void *opaque, int ret)
2929 {
2930     /* Note that this function may be executed from another AioContext besides
2931      * the QEMU main loop.  If you need to access anything that assumes the
2932      * QEMU global mutex, use a BH or introduce a mutex.
2933      */
2934 
2935     BlockDriverState *bs = opaque;
2936     const char *msg = NULL;
2937 
2938     trace_block_job_cb(bs, bs->job, ret);
2939 
2940     assert(bs->job);
2941 
2942     if (ret < 0) {
2943         msg = strerror(-ret);
2944     }
2945 
2946     if (block_job_is_cancelled(bs->job)) {
2947         block_job_event_cancelled(bs->job);
2948     } else {
2949         block_job_event_completed(bs->job, msg);
2950     }
2951 }
2952 
2953 void qmp_block_stream(const char *device,
2954                       bool has_base, const char *base,
2955                       bool has_backing_file, const char *backing_file,
2956                       bool has_speed, int64_t speed,
2957                       bool has_on_error, BlockdevOnError on_error,
2958                       Error **errp)
2959 {
2960     BlockBackend *blk;
2961     BlockDriverState *bs;
2962     BlockDriverState *base_bs = NULL;
2963     AioContext *aio_context;
2964     Error *local_err = NULL;
2965     const char *base_name = NULL;
2966 
2967     if (!has_on_error) {
2968         on_error = BLOCKDEV_ON_ERROR_REPORT;
2969     }
2970 
2971     blk = blk_by_name(device);
2972     if (!blk) {
2973         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2974                   "Device '%s' not found", device);
2975         return;
2976     }
2977 
2978     aio_context = blk_get_aio_context(blk);
2979     aio_context_acquire(aio_context);
2980 
2981     if (!blk_is_available(blk)) {
2982         error_setg(errp, "Device '%s' has no medium", device);
2983         goto out;
2984     }
2985     bs = blk_bs(blk);
2986 
2987     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_STREAM, errp)) {
2988         goto out;
2989     }
2990 
2991     if (has_base) {
2992         base_bs = bdrv_find_backing_image(bs, base);
2993         if (base_bs == NULL) {
2994             error_setg(errp, QERR_BASE_NOT_FOUND, base);
2995             goto out;
2996         }
2997         assert(bdrv_get_aio_context(base_bs) == aio_context);
2998         base_name = base;
2999     }
3000 
3001     /* if we are streaming the entire chain, the result will have no backing
3002      * file, and specifying one is therefore an error */
3003     if (base_bs == NULL && has_backing_file) {
3004         error_setg(errp, "backing file specified, but streaming the "
3005                          "entire chain");
3006         goto out;
3007     }
3008 
3009     /* backing_file string overrides base bs filename */
3010     base_name = has_backing_file ? backing_file : base_name;
3011 
3012     stream_start(bs, base_bs, base_name, has_speed ? speed : 0,
3013                  on_error, block_job_cb, bs, &local_err);
3014     if (local_err) {
3015         error_propagate(errp, local_err);
3016         goto out;
3017     }
3018 
3019     trace_qmp_block_stream(bs, bs->job);
3020 
3021 out:
3022     aio_context_release(aio_context);
3023 }
3024 
3025 void qmp_block_commit(const char *device,
3026                       bool has_base, const char *base,
3027                       bool has_top, const char *top,
3028                       bool has_backing_file, const char *backing_file,
3029                       bool has_speed, int64_t speed,
3030                       Error **errp)
3031 {
3032     BlockBackend *blk;
3033     BlockDriverState *bs;
3034     BlockDriverState *base_bs, *top_bs;
3035     AioContext *aio_context;
3036     Error *local_err = NULL;
3037     /* This will be part of the QMP command, if/when the
3038      * BlockdevOnError change for blkmirror makes it in
3039      */
3040     BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT;
3041 
3042     if (!has_speed) {
3043         speed = 0;
3044     }
3045 
3046     /* Important Note:
3047      *  libvirt relies on the DeviceNotFound error class in order to probe for
3048      *  live commit feature versions; for this to work, we must make sure to
3049      *  perform the device lookup before any generic errors that may occur in a
3050      *  scenario in which all optional arguments are omitted. */
3051     blk = blk_by_name(device);
3052     if (!blk) {
3053         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3054                   "Device '%s' not found", device);
3055         return;
3056     }
3057 
3058     aio_context = blk_get_aio_context(blk);
3059     aio_context_acquire(aio_context);
3060 
3061     if (!blk_is_available(blk)) {
3062         error_setg(errp, "Device '%s' has no medium", device);
3063         goto out;
3064     }
3065     bs = blk_bs(blk);
3066 
3067     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) {
3068         goto out;
3069     }
3070 
3071     /* default top_bs is the active layer */
3072     top_bs = bs;
3073 
3074     if (has_top && top) {
3075         if (strcmp(bs->filename, top) != 0) {
3076             top_bs = bdrv_find_backing_image(bs, top);
3077         }
3078     }
3079 
3080     if (top_bs == NULL) {
3081         error_setg(errp, "Top image file %s not found", top ? top : "NULL");
3082         goto out;
3083     }
3084 
3085     assert(bdrv_get_aio_context(top_bs) == aio_context);
3086 
3087     if (has_base && base) {
3088         base_bs = bdrv_find_backing_image(top_bs, base);
3089     } else {
3090         base_bs = bdrv_find_base(top_bs);
3091     }
3092 
3093     if (base_bs == NULL) {
3094         error_setg(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL");
3095         goto out;
3096     }
3097 
3098     assert(bdrv_get_aio_context(base_bs) == aio_context);
3099 
3100     if (bdrv_op_is_blocked(base_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
3101         goto out;
3102     }
3103 
3104     /* Do not allow attempts to commit an image into itself */
3105     if (top_bs == base_bs) {
3106         error_setg(errp, "cannot commit an image into itself");
3107         goto out;
3108     }
3109 
3110     if (top_bs == bs) {
3111         if (has_backing_file) {
3112             error_setg(errp, "'backing-file' specified,"
3113                              " but 'top' is the active layer");
3114             goto out;
3115         }
3116         commit_active_start(bs, base_bs, speed, on_error, block_job_cb,
3117                             bs, &local_err);
3118     } else {
3119         commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs,
3120                      has_backing_file ? backing_file : NULL, &local_err);
3121     }
3122     if (local_err != NULL) {
3123         error_propagate(errp, local_err);
3124         goto out;
3125     }
3126 
3127 out:
3128     aio_context_release(aio_context);
3129 }
3130 
3131 static void do_drive_backup(const char *device, const char *target,
3132                             bool has_format, const char *format,
3133                             enum MirrorSyncMode sync,
3134                             bool has_mode, enum NewImageMode mode,
3135                             bool has_speed, int64_t speed,
3136                             bool has_bitmap, const char *bitmap,
3137                             bool has_on_source_error,
3138                             BlockdevOnError on_source_error,
3139                             bool has_on_target_error,
3140                             BlockdevOnError on_target_error,
3141                             BlockJobTxn *txn, Error **errp)
3142 {
3143     BlockBackend *blk;
3144     BlockDriverState *bs;
3145     BlockDriverState *target_bs;
3146     BlockDriverState *source = NULL;
3147     BdrvDirtyBitmap *bmap = NULL;
3148     AioContext *aio_context;
3149     QDict *options = NULL;
3150     Error *local_err = NULL;
3151     int flags;
3152     int64_t size;
3153     int ret;
3154 
3155     if (!has_speed) {
3156         speed = 0;
3157     }
3158     if (!has_on_source_error) {
3159         on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3160     }
3161     if (!has_on_target_error) {
3162         on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3163     }
3164     if (!has_mode) {
3165         mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
3166     }
3167 
3168     blk = blk_by_name(device);
3169     if (!blk) {
3170         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3171                   "Device '%s' not found", device);
3172         return;
3173     }
3174 
3175     aio_context = blk_get_aio_context(blk);
3176     aio_context_acquire(aio_context);
3177 
3178     /* Although backup_run has this check too, we need to use bs->drv below, so
3179      * do an early check redundantly. */
3180     if (!blk_is_available(blk)) {
3181         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
3182         goto out;
3183     }
3184     bs = blk_bs(blk);
3185 
3186     if (!has_format) {
3187         format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name;
3188     }
3189 
3190     /* Early check to avoid creating target */
3191     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
3192         goto out;
3193     }
3194 
3195     flags = bs->open_flags | BDRV_O_RDWR;
3196 
3197     /* See if we have a backing HD we can use to create our new image
3198      * on top of. */
3199     if (sync == MIRROR_SYNC_MODE_TOP) {
3200         source = backing_bs(bs);
3201         if (!source) {
3202             sync = MIRROR_SYNC_MODE_FULL;
3203         }
3204     }
3205     if (sync == MIRROR_SYNC_MODE_NONE) {
3206         source = bs;
3207     }
3208 
3209     size = bdrv_getlength(bs);
3210     if (size < 0) {
3211         error_setg_errno(errp, -size, "bdrv_getlength failed");
3212         goto out;
3213     }
3214 
3215     if (mode != NEW_IMAGE_MODE_EXISTING) {
3216         assert(format);
3217         if (source) {
3218             bdrv_img_create(target, format, source->filename,
3219                             source->drv->format_name, NULL,
3220                             size, flags, &local_err, false);
3221         } else {
3222             bdrv_img_create(target, format, NULL, NULL, NULL,
3223                             size, flags, &local_err, false);
3224         }
3225     }
3226 
3227     if (local_err) {
3228         error_propagate(errp, local_err);
3229         goto out;
3230     }
3231 
3232     if (format) {
3233         options = qdict_new();
3234         qdict_put(options, "driver", qstring_from_str(format));
3235     }
3236 
3237     target_bs = NULL;
3238     ret = bdrv_open(&target_bs, target, NULL, options, flags, &local_err);
3239     if (ret < 0) {
3240         error_propagate(errp, local_err);
3241         goto out;
3242     }
3243 
3244     bdrv_set_aio_context(target_bs, aio_context);
3245 
3246     if (has_bitmap) {
3247         bmap = bdrv_find_dirty_bitmap(bs, bitmap);
3248         if (!bmap) {
3249             error_setg(errp, "Bitmap '%s' could not be found", bitmap);
3250             bdrv_unref(target_bs);
3251             goto out;
3252         }
3253     }
3254 
3255     backup_start(bs, target_bs, speed, sync, bmap,
3256                  on_source_error, on_target_error,
3257                  block_job_cb, bs, txn, &local_err);
3258     if (local_err != NULL) {
3259         bdrv_unref(target_bs);
3260         error_propagate(errp, local_err);
3261         goto out;
3262     }
3263 
3264 out:
3265     aio_context_release(aio_context);
3266 }
3267 
3268 void qmp_drive_backup(const char *device, const char *target,
3269                       bool has_format, const char *format,
3270                       enum MirrorSyncMode sync,
3271                       bool has_mode, enum NewImageMode mode,
3272                       bool has_speed, int64_t speed,
3273                       bool has_bitmap, const char *bitmap,
3274                       bool has_on_source_error, BlockdevOnError on_source_error,
3275                       bool has_on_target_error, BlockdevOnError on_target_error,
3276                       Error **errp)
3277 {
3278     return do_drive_backup(device, target, has_format, format, sync,
3279                            has_mode, mode, has_speed, speed,
3280                            has_bitmap, bitmap,
3281                            has_on_source_error, on_source_error,
3282                            has_on_target_error, on_target_error,
3283                            NULL, errp);
3284 }
3285 
3286 BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp)
3287 {
3288     return bdrv_named_nodes_list(errp);
3289 }
3290 
3291 void do_blockdev_backup(const char *device, const char *target,
3292                          enum MirrorSyncMode sync,
3293                          bool has_speed, int64_t speed,
3294                          bool has_on_source_error,
3295                          BlockdevOnError on_source_error,
3296                          bool has_on_target_error,
3297                          BlockdevOnError on_target_error,
3298                          BlockJobTxn *txn, Error **errp)
3299 {
3300     BlockBackend *blk, *target_blk;
3301     BlockDriverState *bs;
3302     BlockDriverState *target_bs;
3303     Error *local_err = NULL;
3304     AioContext *aio_context;
3305 
3306     if (!has_speed) {
3307         speed = 0;
3308     }
3309     if (!has_on_source_error) {
3310         on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3311     }
3312     if (!has_on_target_error) {
3313         on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3314     }
3315 
3316     blk = blk_by_name(device);
3317     if (!blk) {
3318         error_setg(errp, "Device '%s' not found", device);
3319         return;
3320     }
3321 
3322     aio_context = blk_get_aio_context(blk);
3323     aio_context_acquire(aio_context);
3324 
3325     if (!blk_is_available(blk)) {
3326         error_setg(errp, "Device '%s' has no medium", device);
3327         goto out;
3328     }
3329     bs = blk_bs(blk);
3330 
3331     target_blk = blk_by_name(target);
3332     if (!target_blk) {
3333         error_setg(errp, "Device '%s' not found", target);
3334         goto out;
3335     }
3336 
3337     if (!blk_is_available(target_blk)) {
3338         error_setg(errp, "Device '%s' has no medium", target);
3339         goto out;
3340     }
3341     target_bs = blk_bs(target_blk);
3342 
3343     bdrv_ref(target_bs);
3344     bdrv_set_aio_context(target_bs, aio_context);
3345     backup_start(bs, target_bs, speed, sync, NULL, on_source_error,
3346                  on_target_error, block_job_cb, bs, txn, &local_err);
3347     if (local_err != NULL) {
3348         bdrv_unref(target_bs);
3349         error_propagate(errp, local_err);
3350     }
3351 out:
3352     aio_context_release(aio_context);
3353 }
3354 
3355 void qmp_blockdev_backup(const char *device, const char *target,
3356                          enum MirrorSyncMode sync,
3357                          bool has_speed, int64_t speed,
3358                          bool has_on_source_error,
3359                          BlockdevOnError on_source_error,
3360                          bool has_on_target_error,
3361                          BlockdevOnError on_target_error,
3362                          Error **errp)
3363 {
3364     do_blockdev_backup(device, target, sync, has_speed, speed,
3365                        has_on_source_error, on_source_error,
3366                        has_on_target_error, on_target_error,
3367                        NULL, errp);
3368 }
3369 
3370 /* Parameter check and block job starting for drive mirroring.
3371  * Caller should hold @device and @target's aio context (must be the same).
3372  **/
3373 static void blockdev_mirror_common(BlockDriverState *bs,
3374                                    BlockDriverState *target,
3375                                    bool has_replaces, const char *replaces,
3376                                    enum MirrorSyncMode sync,
3377                                    bool has_speed, int64_t speed,
3378                                    bool has_granularity, uint32_t granularity,
3379                                    bool has_buf_size, int64_t buf_size,
3380                                    bool has_on_source_error,
3381                                    BlockdevOnError on_source_error,
3382                                    bool has_on_target_error,
3383                                    BlockdevOnError on_target_error,
3384                                    bool has_unmap, bool unmap,
3385                                    Error **errp)
3386 {
3387 
3388     if (!has_speed) {
3389         speed = 0;
3390     }
3391     if (!has_on_source_error) {
3392         on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3393     }
3394     if (!has_on_target_error) {
3395         on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3396     }
3397     if (!has_granularity) {
3398         granularity = 0;
3399     }
3400     if (!has_buf_size) {
3401         buf_size = 0;
3402     }
3403     if (!has_unmap) {
3404         unmap = true;
3405     }
3406 
3407     if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
3408         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
3409                    "a value in range [512B, 64MB]");
3410         return;
3411     }
3412     if (granularity & (granularity - 1)) {
3413         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
3414                    "power of 2");
3415         return;
3416     }
3417 
3418     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
3419         return;
3420     }
3421     if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) {
3422         return;
3423     }
3424     if (target->blk) {
3425         error_setg(errp, "Cannot mirror to an attached block device");
3426         return;
3427     }
3428 
3429     if (!bs->backing && sync == MIRROR_SYNC_MODE_TOP) {
3430         sync = MIRROR_SYNC_MODE_FULL;
3431     }
3432 
3433     /* pass the node name to replace to mirror start since it's loose coupling
3434      * and will allow to check whether the node still exist at mirror completion
3435      */
3436     mirror_start(bs, target,
3437                  has_replaces ? replaces : NULL,
3438                  speed, granularity, buf_size, sync,
3439                  on_source_error, on_target_error, unmap,
3440                  block_job_cb, bs, errp);
3441 }
3442 
3443 void qmp_drive_mirror(const char *device, const char *target,
3444                       bool has_format, const char *format,
3445                       bool has_node_name, const char *node_name,
3446                       bool has_replaces, const char *replaces,
3447                       enum MirrorSyncMode sync,
3448                       bool has_mode, enum NewImageMode mode,
3449                       bool has_speed, int64_t speed,
3450                       bool has_granularity, uint32_t granularity,
3451                       bool has_buf_size, int64_t buf_size,
3452                       bool has_on_source_error, BlockdevOnError on_source_error,
3453                       bool has_on_target_error, BlockdevOnError on_target_error,
3454                       bool has_unmap, bool unmap,
3455                       Error **errp)
3456 {
3457     BlockDriverState *bs;
3458     BlockBackend *blk;
3459     BlockDriverState *source, *target_bs;
3460     AioContext *aio_context;
3461     Error *local_err = NULL;
3462     QDict *options = NULL;
3463     int flags;
3464     int64_t size;
3465     int ret;
3466 
3467     blk = blk_by_name(device);
3468     if (!blk) {
3469         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3470                   "Device '%s' not found", device);
3471         return;
3472     }
3473 
3474     aio_context = blk_get_aio_context(blk);
3475     aio_context_acquire(aio_context);
3476 
3477     if (!blk_is_available(blk)) {
3478         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
3479         goto out;
3480     }
3481     bs = blk_bs(blk);
3482     if (!has_mode) {
3483         mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
3484     }
3485 
3486     if (!has_format) {
3487         format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name;
3488     }
3489 
3490     flags = bs->open_flags | BDRV_O_RDWR;
3491     source = backing_bs(bs);
3492     if (!source && sync == MIRROR_SYNC_MODE_TOP) {
3493         sync = MIRROR_SYNC_MODE_FULL;
3494     }
3495     if (sync == MIRROR_SYNC_MODE_NONE) {
3496         source = bs;
3497     }
3498 
3499     size = bdrv_getlength(bs);
3500     if (size < 0) {
3501         error_setg_errno(errp, -size, "bdrv_getlength failed");
3502         goto out;
3503     }
3504 
3505     if (has_replaces) {
3506         BlockDriverState *to_replace_bs;
3507         AioContext *replace_aio_context;
3508         int64_t replace_size;
3509 
3510         if (!has_node_name) {
3511             error_setg(errp, "a node-name must be provided when replacing a"
3512                              " named node of the graph");
3513             goto out;
3514         }
3515 
3516         to_replace_bs = check_to_replace_node(bs, replaces, &local_err);
3517 
3518         if (!to_replace_bs) {
3519             error_propagate(errp, local_err);
3520             goto out;
3521         }
3522 
3523         replace_aio_context = bdrv_get_aio_context(to_replace_bs);
3524         aio_context_acquire(replace_aio_context);
3525         replace_size = bdrv_getlength(to_replace_bs);
3526         aio_context_release(replace_aio_context);
3527 
3528         if (size != replace_size) {
3529             error_setg(errp, "cannot replace image with a mirror image of "
3530                              "different size");
3531             goto out;
3532         }
3533     }
3534 
3535     if ((sync == MIRROR_SYNC_MODE_FULL || !source)
3536         && mode != NEW_IMAGE_MODE_EXISTING)
3537     {
3538         /* create new image w/o backing file */
3539         assert(format);
3540         bdrv_img_create(target, format,
3541                         NULL, NULL, NULL, size, flags, &local_err, false);
3542     } else {
3543         switch (mode) {
3544         case NEW_IMAGE_MODE_EXISTING:
3545             break;
3546         case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
3547             /* create new image with backing file */
3548             bdrv_img_create(target, format,
3549                             source->filename,
3550                             source->drv->format_name,
3551                             NULL, size, flags, &local_err, false);
3552             break;
3553         default:
3554             abort();
3555         }
3556     }
3557 
3558     if (local_err) {
3559         error_propagate(errp, local_err);
3560         goto out;
3561     }
3562 
3563     options = qdict_new();
3564     if (has_node_name) {
3565         qdict_put(options, "node-name", qstring_from_str(node_name));
3566     }
3567     if (format) {
3568         qdict_put(options, "driver", qstring_from_str(format));
3569     }
3570 
3571     /* Mirroring takes care of copy-on-write using the source's backing
3572      * file.
3573      */
3574     target_bs = NULL;
3575     ret = bdrv_open(&target_bs, target, NULL, options,
3576                     flags | BDRV_O_NO_BACKING, &local_err);
3577     if (ret < 0) {
3578         error_propagate(errp, local_err);
3579         goto out;
3580     }
3581 
3582     bdrv_set_aio_context(target_bs, aio_context);
3583 
3584     blockdev_mirror_common(bs, target_bs,
3585                            has_replaces, replaces, sync,
3586                            has_speed, speed,
3587                            has_granularity, granularity,
3588                            has_buf_size, buf_size,
3589                            has_on_source_error, on_source_error,
3590                            has_on_target_error, on_target_error,
3591                            has_unmap, unmap,
3592                            &local_err);
3593     if (local_err) {
3594         error_propagate(errp, local_err);
3595         bdrv_unref(target_bs);
3596     }
3597 out:
3598     aio_context_release(aio_context);
3599 }
3600 
3601 void qmp_blockdev_mirror(const char *device, const char *target,
3602                          bool has_replaces, const char *replaces,
3603                          MirrorSyncMode sync,
3604                          bool has_speed, int64_t speed,
3605                          bool has_granularity, uint32_t granularity,
3606                          bool has_buf_size, int64_t buf_size,
3607                          bool has_on_source_error,
3608                          BlockdevOnError on_source_error,
3609                          bool has_on_target_error,
3610                          BlockdevOnError on_target_error,
3611                          Error **errp)
3612 {
3613     BlockDriverState *bs;
3614     BlockBackend *blk;
3615     BlockDriverState *target_bs;
3616     AioContext *aio_context;
3617     Error *local_err = NULL;
3618 
3619     blk = blk_by_name(device);
3620     if (!blk) {
3621         error_setg(errp, "Device '%s' not found", device);
3622         return;
3623     }
3624     bs = blk_bs(blk);
3625 
3626     if (!bs) {
3627         error_setg(errp, "Device '%s' has no media", device);
3628         return;
3629     }
3630 
3631     target_bs = bdrv_lookup_bs(target, target, errp);
3632     if (!target_bs) {
3633         return;
3634     }
3635 
3636     aio_context = bdrv_get_aio_context(bs);
3637     aio_context_acquire(aio_context);
3638 
3639     bdrv_ref(target_bs);
3640     bdrv_set_aio_context(target_bs, aio_context);
3641 
3642     blockdev_mirror_common(bs, target_bs,
3643                            has_replaces, replaces, sync,
3644                            has_speed, speed,
3645                            has_granularity, granularity,
3646                            has_buf_size, buf_size,
3647                            has_on_source_error, on_source_error,
3648                            has_on_target_error, on_target_error,
3649                            true, true,
3650                            &local_err);
3651     if (local_err) {
3652         error_propagate(errp, local_err);
3653         bdrv_unref(target_bs);
3654     }
3655 
3656     aio_context_release(aio_context);
3657 }
3658 
3659 /* Get the block job for a given device name and acquire its AioContext */
3660 static BlockJob *find_block_job(const char *device, AioContext **aio_context,
3661                                 Error **errp)
3662 {
3663     BlockBackend *blk;
3664     BlockDriverState *bs;
3665 
3666     *aio_context = NULL;
3667 
3668     blk = blk_by_name(device);
3669     if (!blk) {
3670         goto notfound;
3671     }
3672 
3673     *aio_context = blk_get_aio_context(blk);
3674     aio_context_acquire(*aio_context);
3675 
3676     if (!blk_is_available(blk)) {
3677         goto notfound;
3678     }
3679     bs = blk_bs(blk);
3680 
3681     if (!bs->job) {
3682         goto notfound;
3683     }
3684 
3685     return bs->job;
3686 
3687 notfound:
3688     error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
3689               "No active block job on device '%s'", device);
3690     if (*aio_context) {
3691         aio_context_release(*aio_context);
3692         *aio_context = NULL;
3693     }
3694     return NULL;
3695 }
3696 
3697 void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
3698 {
3699     AioContext *aio_context;
3700     BlockJob *job = find_block_job(device, &aio_context, errp);
3701 
3702     if (!job) {
3703         return;
3704     }
3705 
3706     block_job_set_speed(job, speed, errp);
3707     aio_context_release(aio_context);
3708 }
3709 
3710 void qmp_block_job_cancel(const char *device,
3711                           bool has_force, bool force, Error **errp)
3712 {
3713     AioContext *aio_context;
3714     BlockJob *job = find_block_job(device, &aio_context, errp);
3715 
3716     if (!job) {
3717         return;
3718     }
3719 
3720     if (!has_force) {
3721         force = false;
3722     }
3723 
3724     if (job->user_paused && !force) {
3725         error_setg(errp, "The block job for device '%s' is currently paused",
3726                    device);
3727         goto out;
3728     }
3729 
3730     trace_qmp_block_job_cancel(job);
3731     block_job_cancel(job);
3732 out:
3733     aio_context_release(aio_context);
3734 }
3735 
3736 void qmp_block_job_pause(const char *device, Error **errp)
3737 {
3738     AioContext *aio_context;
3739     BlockJob *job = find_block_job(device, &aio_context, errp);
3740 
3741     if (!job || job->user_paused) {
3742         return;
3743     }
3744 
3745     job->user_paused = true;
3746     trace_qmp_block_job_pause(job);
3747     block_job_pause(job);
3748     aio_context_release(aio_context);
3749 }
3750 
3751 void qmp_block_job_resume(const char *device, Error **errp)
3752 {
3753     AioContext *aio_context;
3754     BlockJob *job = find_block_job(device, &aio_context, errp);
3755 
3756     if (!job || !job->user_paused) {
3757         return;
3758     }
3759 
3760     job->user_paused = false;
3761     trace_qmp_block_job_resume(job);
3762     block_job_resume(job);
3763     aio_context_release(aio_context);
3764 }
3765 
3766 void qmp_block_job_complete(const char *device, Error **errp)
3767 {
3768     AioContext *aio_context;
3769     BlockJob *job = find_block_job(device, &aio_context, errp);
3770 
3771     if (!job) {
3772         return;
3773     }
3774 
3775     trace_qmp_block_job_complete(job);
3776     block_job_complete(job, errp);
3777     aio_context_release(aio_context);
3778 }
3779 
3780 void qmp_change_backing_file(const char *device,
3781                              const char *image_node_name,
3782                              const char *backing_file,
3783                              Error **errp)
3784 {
3785     BlockBackend *blk;
3786     BlockDriverState *bs = NULL;
3787     AioContext *aio_context;
3788     BlockDriverState *image_bs = NULL;
3789     Error *local_err = NULL;
3790     bool ro;
3791     int open_flags;
3792     int ret;
3793 
3794     blk = blk_by_name(device);
3795     if (!blk) {
3796         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3797                   "Device '%s' not found", device);
3798         return;
3799     }
3800 
3801     aio_context = blk_get_aio_context(blk);
3802     aio_context_acquire(aio_context);
3803 
3804     if (!blk_is_available(blk)) {
3805         error_setg(errp, "Device '%s' has no medium", device);
3806         goto out;
3807     }
3808     bs = blk_bs(blk);
3809 
3810     image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err);
3811     if (local_err) {
3812         error_propagate(errp, local_err);
3813         goto out;
3814     }
3815 
3816     if (!image_bs) {
3817         error_setg(errp, "image file not found");
3818         goto out;
3819     }
3820 
3821     if (bdrv_find_base(image_bs) == image_bs) {
3822         error_setg(errp, "not allowing backing file change on an image "
3823                          "without a backing file");
3824         goto out;
3825     }
3826 
3827     /* even though we are not necessarily operating on bs, we need it to
3828      * determine if block ops are currently prohibited on the chain */
3829     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
3830         goto out;
3831     }
3832 
3833     /* final sanity check */
3834     if (!bdrv_chain_contains(bs, image_bs)) {
3835         error_setg(errp, "'%s' and image file are not in the same chain",
3836                    device);
3837         goto out;
3838     }
3839 
3840     /* if not r/w, reopen to make r/w */
3841     open_flags = image_bs->open_flags;
3842     ro = bdrv_is_read_only(image_bs);
3843 
3844     if (ro) {
3845         bdrv_reopen(image_bs, open_flags | BDRV_O_RDWR, &local_err);
3846         if (local_err) {
3847             error_propagate(errp, local_err);
3848             goto out;
3849         }
3850     }
3851 
3852     ret = bdrv_change_backing_file(image_bs, backing_file,
3853                                image_bs->drv ? image_bs->drv->format_name : "");
3854 
3855     if (ret < 0) {
3856         error_setg_errno(errp, -ret, "Could not change backing file to '%s'",
3857                          backing_file);
3858         /* don't exit here, so we can try to restore open flags if
3859          * appropriate */
3860     }
3861 
3862     if (ro) {
3863         bdrv_reopen(image_bs, open_flags, &local_err);
3864         if (local_err) {
3865             error_propagate(errp, local_err); /* will preserve prior errp */
3866         }
3867     }
3868 
3869 out:
3870     aio_context_release(aio_context);
3871 }
3872 
3873 void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
3874 {
3875     QmpOutputVisitor *ov = qmp_output_visitor_new();
3876     BlockDriverState *bs;
3877     BlockBackend *blk = NULL;
3878     QObject *obj;
3879     QDict *qdict;
3880     Error *local_err = NULL;
3881 
3882     /* TODO Sort it out in raw-posix and drive_new(): Reject aio=native with
3883      * cache.direct=false instead of silently switching to aio=threads, except
3884      * when called from drive_new().
3885      *
3886      * For now, simply forbidding the combination for all drivers will do. */
3887     if (options->has_aio && options->aio == BLOCKDEV_AIO_OPTIONS_NATIVE) {
3888         bool direct = options->has_cache &&
3889                       options->cache->has_direct &&
3890                       options->cache->direct;
3891         if (!direct) {
3892             error_setg(errp, "aio=native requires cache.direct=true");
3893             goto fail;
3894         }
3895     }
3896 
3897     visit_type_BlockdevOptions(qmp_output_get_visitor(ov), NULL, &options,
3898                                &local_err);
3899     if (local_err) {
3900         error_propagate(errp, local_err);
3901         goto fail;
3902     }
3903 
3904     obj = qmp_output_get_qobject(ov);
3905     qdict = qobject_to_qdict(obj);
3906 
3907     qdict_flatten(qdict);
3908 
3909     if (options->has_id) {
3910         blk = blockdev_init(NULL, qdict, &local_err);
3911         if (local_err) {
3912             error_propagate(errp, local_err);
3913             goto fail;
3914         }
3915 
3916         bs = blk_bs(blk);
3917     } else {
3918         if (!qdict_get_try_str(qdict, "node-name")) {
3919             error_setg(errp, "'id' and/or 'node-name' need to be specified for "
3920                        "the root node");
3921             goto fail;
3922         }
3923 
3924         bs = bds_tree_init(qdict, errp);
3925         if (!bs) {
3926             goto fail;
3927         }
3928 
3929         QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list);
3930     }
3931 
3932     if (bs && bdrv_key_required(bs)) {
3933         if (blk) {
3934             blk_unref(blk);
3935         } else {
3936             QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
3937             bdrv_unref(bs);
3938         }
3939         error_setg(errp, "blockdev-add doesn't support encrypted devices");
3940         goto fail;
3941     }
3942 
3943 fail:
3944     qmp_output_visitor_cleanup(ov);
3945 }
3946 
3947 void qmp_x_blockdev_del(bool has_id, const char *id,
3948                         bool has_node_name, const char *node_name, Error **errp)
3949 {
3950     AioContext *aio_context;
3951     BlockBackend *blk;
3952     BlockDriverState *bs;
3953 
3954     if (has_id && has_node_name) {
3955         error_setg(errp, "Only one of id and node-name must be specified");
3956         return;
3957     } else if (!has_id && !has_node_name) {
3958         error_setg(errp, "No block device specified");
3959         return;
3960     }
3961 
3962     if (has_id) {
3963         blk = blk_by_name(id);
3964         if (!blk) {
3965             error_setg(errp, "Cannot find block backend %s", id);
3966             return;
3967         }
3968         if (blk_get_refcnt(blk) > 1) {
3969             error_setg(errp, "Block backend %s is in use", id);
3970             return;
3971         }
3972         bs = blk_bs(blk);
3973         aio_context = blk_get_aio_context(blk);
3974     } else {
3975         bs = bdrv_find_node(node_name);
3976         if (!bs) {
3977             error_setg(errp, "Cannot find node %s", node_name);
3978             return;
3979         }
3980         blk = bs->blk;
3981         if (blk) {
3982             error_setg(errp, "Node %s is in use by %s",
3983                        node_name, blk_name(blk));
3984             return;
3985         }
3986         aio_context = bdrv_get_aio_context(bs);
3987     }
3988 
3989     aio_context_acquire(aio_context);
3990 
3991     if (bs) {
3992         if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) {
3993             goto out;
3994         }
3995 
3996         if (!blk && !bs->monitor_list.tqe_prev) {
3997             error_setg(errp, "Node %s is not owned by the monitor",
3998                        bs->node_name);
3999             goto out;
4000         }
4001 
4002         if (bs->refcnt > 1) {
4003             error_setg(errp, "Block device %s is in use",
4004                        bdrv_get_device_or_node_name(bs));
4005             goto out;
4006         }
4007     }
4008 
4009     if (blk) {
4010         blk_unref(blk);
4011     } else {
4012         QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
4013         bdrv_unref(bs);
4014     }
4015 
4016 out:
4017     aio_context_release(aio_context);
4018 }
4019 
4020 BlockJobInfoList *qmp_query_block_jobs(Error **errp)
4021 {
4022     BlockJobInfoList *head = NULL, **p_next = &head;
4023     BlockDriverState *bs;
4024 
4025     for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
4026         AioContext *aio_context = bdrv_get_aio_context(bs);
4027 
4028         aio_context_acquire(aio_context);
4029 
4030         if (bs->job) {
4031             BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1);
4032             elem->value = block_job_query(bs->job);
4033             *p_next = elem;
4034             p_next = &elem->next;
4035         }
4036 
4037         aio_context_release(aio_context);
4038     }
4039 
4040     return head;
4041 }
4042 
4043 QemuOptsList qemu_common_drive_opts = {
4044     .name = "drive",
4045     .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
4046     .desc = {
4047         {
4048             .name = "snapshot",
4049             .type = QEMU_OPT_BOOL,
4050             .help = "enable/disable snapshot mode",
4051         },{
4052             .name = "discard",
4053             .type = QEMU_OPT_STRING,
4054             .help = "discard operation (ignore/off, unmap/on)",
4055         },{
4056             .name = "aio",
4057             .type = QEMU_OPT_STRING,
4058             .help = "host AIO implementation (threads, native)",
4059         },{
4060             .name = "format",
4061             .type = QEMU_OPT_STRING,
4062             .help = "disk format (raw, qcow2, ...)",
4063         },{
4064             .name = "rerror",
4065             .type = QEMU_OPT_STRING,
4066             .help = "read error action",
4067         },{
4068             .name = "werror",
4069             .type = QEMU_OPT_STRING,
4070             .help = "write error action",
4071         },{
4072             .name = "read-only",
4073             .type = QEMU_OPT_BOOL,
4074             .help = "open drive file as read-only",
4075         },{
4076             .name = "throttling.iops-total",
4077             .type = QEMU_OPT_NUMBER,
4078             .help = "limit total I/O operations per second",
4079         },{
4080             .name = "throttling.iops-read",
4081             .type = QEMU_OPT_NUMBER,
4082             .help = "limit read operations per second",
4083         },{
4084             .name = "throttling.iops-write",
4085             .type = QEMU_OPT_NUMBER,
4086             .help = "limit write operations per second",
4087         },{
4088             .name = "throttling.bps-total",
4089             .type = QEMU_OPT_NUMBER,
4090             .help = "limit total bytes per second",
4091         },{
4092             .name = "throttling.bps-read",
4093             .type = QEMU_OPT_NUMBER,
4094             .help = "limit read bytes per second",
4095         },{
4096             .name = "throttling.bps-write",
4097             .type = QEMU_OPT_NUMBER,
4098             .help = "limit write bytes per second",
4099         },{
4100             .name = "throttling.iops-total-max",
4101             .type = QEMU_OPT_NUMBER,
4102             .help = "I/O operations burst",
4103         },{
4104             .name = "throttling.iops-read-max",
4105             .type = QEMU_OPT_NUMBER,
4106             .help = "I/O operations read burst",
4107         },{
4108             .name = "throttling.iops-write-max",
4109             .type = QEMU_OPT_NUMBER,
4110             .help = "I/O operations write burst",
4111         },{
4112             .name = "throttling.bps-total-max",
4113             .type = QEMU_OPT_NUMBER,
4114             .help = "total bytes burst",
4115         },{
4116             .name = "throttling.bps-read-max",
4117             .type = QEMU_OPT_NUMBER,
4118             .help = "total bytes read burst",
4119         },{
4120             .name = "throttling.bps-write-max",
4121             .type = QEMU_OPT_NUMBER,
4122             .help = "total bytes write burst",
4123         },{
4124             .name = "throttling.iops-total-max-length",
4125             .type = QEMU_OPT_NUMBER,
4126             .help = "length of the iops-total-max burst period, in seconds",
4127         },{
4128             .name = "throttling.iops-read-max-length",
4129             .type = QEMU_OPT_NUMBER,
4130             .help = "length of the iops-read-max burst period, in seconds",
4131         },{
4132             .name = "throttling.iops-write-max-length",
4133             .type = QEMU_OPT_NUMBER,
4134             .help = "length of the iops-write-max burst period, in seconds",
4135         },{
4136             .name = "throttling.bps-total-max-length",
4137             .type = QEMU_OPT_NUMBER,
4138             .help = "length of the bps-total-max burst period, in seconds",
4139         },{
4140             .name = "throttling.bps-read-max-length",
4141             .type = QEMU_OPT_NUMBER,
4142             .help = "length of the bps-read-max burst period, in seconds",
4143         },{
4144             .name = "throttling.bps-write-max-length",
4145             .type = QEMU_OPT_NUMBER,
4146             .help = "length of the bps-write-max burst period, in seconds",
4147         },{
4148             .name = "throttling.iops-size",
4149             .type = QEMU_OPT_NUMBER,
4150             .help = "when limiting by iops max size of an I/O in bytes",
4151         },{
4152             .name = "throttling.group",
4153             .type = QEMU_OPT_STRING,
4154             .help = "name of the block throttling group",
4155         },{
4156             .name = "copy-on-read",
4157             .type = QEMU_OPT_BOOL,
4158             .help = "copy read data from backing file into image file",
4159         },{
4160             .name = "detect-zeroes",
4161             .type = QEMU_OPT_STRING,
4162             .help = "try to optimize zero writes (off, on, unmap)",
4163         },{
4164             .name = "stats-account-invalid",
4165             .type = QEMU_OPT_BOOL,
4166             .help = "whether to account for invalid I/O operations "
4167                     "in the statistics",
4168         },{
4169             .name = "stats-account-failed",
4170             .type = QEMU_OPT_BOOL,
4171             .help = "whether to account for failed I/O operations "
4172                     "in the statistics",
4173         },
4174         { /* end of list */ }
4175     },
4176 };
4177 
4178 static QemuOptsList qemu_root_bds_opts = {
4179     .name = "root-bds",
4180     .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
4181     .desc = {
4182         {
4183             .name = "discard",
4184             .type = QEMU_OPT_STRING,
4185             .help = "discard operation (ignore/off, unmap/on)",
4186         },{
4187             .name = "aio",
4188             .type = QEMU_OPT_STRING,
4189             .help = "host AIO implementation (threads, native)",
4190         },{
4191             .name = "read-only",
4192             .type = QEMU_OPT_BOOL,
4193             .help = "open drive file as read-only",
4194         },{
4195             .name = "copy-on-read",
4196             .type = QEMU_OPT_BOOL,
4197             .help = "copy read data from backing file into image file",
4198         },{
4199             .name = "detect-zeroes",
4200             .type = QEMU_OPT_STRING,
4201             .help = "try to optimize zero writes (off, on, unmap)",
4202         },
4203         { /* end of list */ }
4204     },
4205 };
4206 
4207 QemuOptsList qemu_drive_opts = {
4208     .name = "drive",
4209     .head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head),
4210     .desc = {
4211         /*
4212          * no elements => accept any params
4213          * validation will happen later
4214          */
4215         { /* end of list */ }
4216     },
4217 };
4218