xref: /qemu/block/qapi-sysemu.c (revision d4eb5038)
1 /*
2  * QMP command handlers specific to the system emulators
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or
7  * later.  See the COPYING file in the top-level directory.
8  *
9  * This file incorporates work covered by the following copyright and
10  * permission notice:
11  *
12  * Copyright (c) 2003-2008 Fabrice Bellard
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this software and associated documentation files (the "Software"), to deal
16  * in the Software without restriction, including without limitation the rights
17  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18  * copies of the Software, and to permit persons to whom the Software is
19  * furnished to do so, subject to the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30  * THE SOFTWARE.
31  */
32 
33 #include "qemu/osdep.h"
34 
35 #include "block/block_int.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-commands-block.h"
38 #include "qapi/qmp/qdict.h"
39 #include "sysemu/block-backend.h"
40 #include "sysemu/blockdev.h"
41 
42 static BlockBackend *qmp_get_blk(const char *blk_name, const char *qdev_id,
43                                  Error **errp)
44 {
45     BlockBackend *blk;
46 
47     if (!blk_name == !qdev_id) {
48         error_setg(errp, "Need exactly one of 'device' and 'id'");
49         return NULL;
50     }
51 
52     if (qdev_id) {
53         blk = blk_by_qdev_id(qdev_id, errp);
54     } else {
55         blk = blk_by_name(blk_name);
56         if (blk == NULL) {
57             error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
58                       "Device '%s' not found", blk_name);
59         }
60     }
61 
62     return blk;
63 }
64 
65 /*
66  * Attempt to open the tray of @device.
67  * If @force, ignore its tray lock.
68  * Else, if the tray is locked, don't open it, but ask the guest to open it.
69  * On error, store an error through @errp and return -errno.
70  * If @device does not exist, return -ENODEV.
71  * If it has no removable media, return -ENOTSUP.
72  * If it has no tray, return -ENOSYS.
73  * If the guest was asked to open the tray, return -EINPROGRESS.
74  * Else, return 0.
75  */
76 static int do_open_tray(const char *blk_name, const char *qdev_id,
77                         bool force, Error **errp)
78 {
79     BlockBackend *blk;
80     const char *device = qdev_id ?: blk_name;
81     bool locked;
82 
83     blk = qmp_get_blk(blk_name, qdev_id, errp);
84     if (!blk) {
85         return -ENODEV;
86     }
87 
88     if (!blk_dev_has_removable_media(blk)) {
89         error_setg(errp, "Device '%s' is not removable", device);
90         return -ENOTSUP;
91     }
92 
93     if (!blk_dev_has_tray(blk)) {
94         error_setg(errp, "Device '%s' does not have a tray", device);
95         return -ENOSYS;
96     }
97 
98     if (blk_dev_is_tray_open(blk)) {
99         return 0;
100     }
101 
102     locked = blk_dev_is_medium_locked(blk);
103     if (locked) {
104         blk_dev_eject_request(blk, force);
105     }
106 
107     if (!locked || force) {
108         blk_dev_change_media_cb(blk, false, &error_abort);
109     }
110 
111     if (locked && !force) {
112         error_setg(errp, "Device '%s' is locked and force was not specified, "
113                    "wait for tray to open and try again", device);
114         return -EINPROGRESS;
115     }
116 
117     return 0;
118 }
119 
120 void qmp_blockdev_open_tray(const char *device,
121                             const char *id,
122                             bool has_force, bool force,
123                             Error **errp)
124 {
125     Error *local_err = NULL;
126     int rc;
127 
128     if (!has_force) {
129         force = false;
130     }
131     rc = do_open_tray(device, id, force, &local_err);
132     if (rc && rc != -ENOSYS && rc != -EINPROGRESS) {
133         error_propagate(errp, local_err);
134         return;
135     }
136     error_free(local_err);
137 }
138 
139 void qmp_blockdev_close_tray(const char *device,
140                              const char *id,
141                              Error **errp)
142 {
143     BlockBackend *blk;
144     Error *local_err = NULL;
145 
146     blk = qmp_get_blk(device, id, errp);
147     if (!blk) {
148         return;
149     }
150 
151     if (!blk_dev_has_removable_media(blk)) {
152         error_setg(errp, "Device '%s' is not removable", device ?: id);
153         return;
154     }
155 
156     if (!blk_dev_has_tray(blk)) {
157         /* Ignore this command on tray-less devices */
158         return;
159     }
160 
161     if (!blk_dev_is_tray_open(blk)) {
162         return;
163     }
164 
165     blk_dev_change_media_cb(blk, true, &local_err);
166     if (local_err) {
167         error_propagate(errp, local_err);
168         return;
169     }
170 }
171 
172 static void GRAPH_UNLOCKED
173 blockdev_remove_medium(const char *device, const char *id, Error **errp)
174 {
175     BlockBackend *blk;
176     BlockDriverState *bs;
177     AioContext *aio_context;
178     bool has_attached_device;
179 
180     GLOBAL_STATE_CODE();
181 
182     blk = qmp_get_blk(device, id, errp);
183     if (!blk) {
184         return;
185     }
186 
187     /* For BBs without a device, we can exchange the BDS tree at will */
188     has_attached_device = blk_get_attached_dev(blk);
189 
190     if (has_attached_device && !blk_dev_has_removable_media(blk)) {
191         error_setg(errp, "Device '%s' is not removable", device ?: id);
192         return;
193     }
194 
195     if (has_attached_device && blk_dev_has_tray(blk) &&
196         !blk_dev_is_tray_open(blk))
197     {
198         error_setg(errp, "Tray of device '%s' is not open", device ?: id);
199         return;
200     }
201 
202     bs = blk_bs(blk);
203     if (!bs) {
204         return;
205     }
206 
207     aio_context = bdrv_get_aio_context(bs);
208     aio_context_acquire(aio_context);
209 
210     bdrv_graph_rdlock_main_loop();
211     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
212         bdrv_graph_rdunlock_main_loop();
213         goto out;
214     }
215     bdrv_graph_rdunlock_main_loop();
216 
217     blk_remove_bs(blk);
218 
219     if (!blk_dev_has_tray(blk)) {
220         /* For tray-less devices, blockdev-open-tray is a no-op (or may not be
221          * called at all); therefore, the medium needs to be ejected here.
222          * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load
223          * value passed here (i.e. false). */
224         blk_dev_change_media_cb(blk, false, &error_abort);
225     }
226 
227 out:
228     aio_context_release(aio_context);
229 }
230 
231 void qmp_blockdev_remove_medium(const char *id, Error **errp)
232 {
233     blockdev_remove_medium(NULL, id, errp);
234 }
235 
236 static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
237                                             BlockDriverState *bs, Error **errp)
238 {
239     Error *local_err = NULL;
240     bool has_device;
241     int ret;
242 
243     /* For BBs without a device, we can exchange the BDS tree at will */
244     has_device = blk_get_attached_dev(blk);
245 
246     if (has_device && !blk_dev_has_removable_media(blk)) {
247         error_setg(errp, "Device is not removable");
248         return;
249     }
250 
251     if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
252         error_setg(errp, "Tray of the device is not open");
253         return;
254     }
255 
256     if (blk_bs(blk)) {
257         error_setg(errp, "There already is a medium in the device");
258         return;
259     }
260 
261     ret = blk_insert_bs(blk, bs, errp);
262     if (ret < 0) {
263         return;
264     }
265 
266     if (!blk_dev_has_tray(blk)) {
267         /* For tray-less devices, blockdev-close-tray is a no-op (or may not be
268          * called at all); therefore, the medium needs to be pushed into the
269          * slot here.
270          * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load
271          * value passed here (i.e. true). */
272         blk_dev_change_media_cb(blk, true, &local_err);
273         if (local_err) {
274             error_propagate(errp, local_err);
275             blk_remove_bs(blk);
276             return;
277         }
278     }
279 }
280 
281 static void blockdev_insert_medium(const char *device, const char *id,
282                                    const char *node_name, Error **errp)
283 {
284     BlockBackend *blk;
285     BlockDriverState *bs;
286 
287     GRAPH_RDLOCK_GUARD_MAINLOOP();
288 
289     blk = qmp_get_blk(device, id, errp);
290     if (!blk) {
291         return;
292     }
293 
294     bs = bdrv_find_node(node_name);
295     if (!bs) {
296         error_setg(errp, "Node '%s' not found", node_name);
297         return;
298     }
299 
300     if (bdrv_has_blk(bs)) {
301         error_setg(errp, "Node '%s' is already in use", node_name);
302         return;
303     }
304 
305     qmp_blockdev_insert_anon_medium(blk, bs, errp);
306 }
307 
308 void qmp_blockdev_insert_medium(const char *id, const char *node_name,
309                                 Error **errp)
310 {
311     blockdev_insert_medium(NULL, id, node_name, errp);
312 }
313 
314 void qmp_blockdev_change_medium(const char *device,
315                                 const char *id,
316                                 const char *filename,
317                                 const char *format,
318                                 bool has_force, bool force,
319                                 bool has_read_only,
320                                 BlockdevChangeReadOnlyMode read_only,
321                                 Error **errp)
322 {
323     BlockBackend *blk;
324     BlockDriverState *medium_bs = NULL;
325     int bdrv_flags;
326     bool detect_zeroes;
327     int rc;
328     QDict *options = NULL;
329     Error *err = NULL;
330 
331     blk = qmp_get_blk(device, id, errp);
332     if (!blk) {
333         goto fail;
334     }
335 
336     if (blk_bs(blk)) {
337         blk_update_root_state(blk);
338     }
339 
340     bdrv_flags = blk_get_open_flags_from_root_state(blk);
341     bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING |
342         BDRV_O_PROTOCOL | BDRV_O_AUTO_RDONLY);
343 
344     if (!has_read_only) {
345         read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN;
346     }
347 
348     switch (read_only) {
349     case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN:
350         break;
351 
352     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY:
353         bdrv_flags &= ~BDRV_O_RDWR;
354         break;
355 
356     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE:
357         bdrv_flags |= BDRV_O_RDWR;
358         break;
359 
360     default:
361         abort();
362     }
363 
364     options = qdict_new();
365     detect_zeroes = blk_get_detect_zeroes_from_root_state(blk);
366     qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off");
367 
368     if (format) {
369         qdict_put_str(options, "driver", format);
370     }
371 
372     aio_context_acquire(qemu_get_aio_context());
373     medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp);
374     aio_context_release(qemu_get_aio_context());
375 
376     if (!medium_bs) {
377         goto fail;
378     }
379 
380     rc = do_open_tray(device, id, force, &err);
381     if (rc && rc != -ENOSYS) {
382         error_propagate(errp, err);
383         goto fail;
384     }
385     error_free(err);
386     err = NULL;
387 
388     blockdev_remove_medium(device, id, &err);
389     if (err) {
390         error_propagate(errp, err);
391         goto fail;
392     }
393 
394     qmp_blockdev_insert_anon_medium(blk, medium_bs, &err);
395     if (err) {
396         error_propagate(errp, err);
397         goto fail;
398     }
399 
400     qmp_blockdev_close_tray(device, id, errp);
401 
402 fail:
403     /* If the medium has been inserted, the device has its own reference, so
404      * ours must be relinquished; and if it has not been inserted successfully,
405      * the reference must be relinquished anyway */
406     bdrv_unref(medium_bs);
407 }
408 
409 void qmp_eject(const char *device, const char *id,
410                bool has_force, bool force, Error **errp)
411 {
412     Error *local_err = NULL;
413     int rc;
414 
415     if (!has_force) {
416         force = false;
417     }
418 
419     rc = do_open_tray(device, id, force, &local_err);
420     if (rc && rc != -ENOSYS) {
421         error_propagate(errp, local_err);
422         return;
423     }
424     error_free(local_err);
425 
426     blockdev_remove_medium(device, id, errp);
427 }
428 
429 /* throttling disk I/O limits */
430 void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
431 {
432     ThrottleConfig cfg;
433     BlockDriverState *bs;
434     BlockBackend *blk;
435     AioContext *aio_context;
436 
437     blk = qmp_get_blk(arg->device, arg->id, errp);
438     if (!blk) {
439         return;
440     }
441 
442     aio_context = blk_get_aio_context(blk);
443     aio_context_acquire(aio_context);
444 
445     bs = blk_bs(blk);
446     if (!bs) {
447         error_setg(errp, "Device has no medium");
448         goto out;
449     }
450 
451     throttle_config_init(&cfg);
452     cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps;
453     cfg.buckets[THROTTLE_BPS_READ].avg  = arg->bps_rd;
454     cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr;
455 
456     cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops;
457     cfg.buckets[THROTTLE_OPS_READ].avg  = arg->iops_rd;
458     cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr;
459 
460     if (arg->has_bps_max) {
461         cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max;
462     }
463     if (arg->has_bps_rd_max) {
464         cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max;
465     }
466     if (arg->has_bps_wr_max) {
467         cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max;
468     }
469     if (arg->has_iops_max) {
470         cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max;
471     }
472     if (arg->has_iops_rd_max) {
473         cfg.buckets[THROTTLE_OPS_READ].max = arg->iops_rd_max;
474     }
475     if (arg->has_iops_wr_max) {
476         cfg.buckets[THROTTLE_OPS_WRITE].max = arg->iops_wr_max;
477     }
478 
479     if (arg->has_bps_max_length) {
480         cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_max_length;
481     }
482     if (arg->has_bps_rd_max_length) {
483         cfg.buckets[THROTTLE_BPS_READ].burst_length = arg->bps_rd_max_length;
484     }
485     if (arg->has_bps_wr_max_length) {
486         cfg.buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_wr_max_length;
487     }
488     if (arg->has_iops_max_length) {
489         cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_max_length;
490     }
491     if (arg->has_iops_rd_max_length) {
492         cfg.buckets[THROTTLE_OPS_READ].burst_length = arg->iops_rd_max_length;
493     }
494     if (arg->has_iops_wr_max_length) {
495         cfg.buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_wr_max_length;
496     }
497 
498     if (arg->has_iops_size) {
499         cfg.op_size = arg->iops_size;
500     }
501 
502     if (!throttle_is_valid(&cfg, errp)) {
503         goto out;
504     }
505 
506     if (throttle_enabled(&cfg)) {
507         /* Enable I/O limits if they're not enabled yet, otherwise
508          * just update the throttling group. */
509         if (!blk_get_public(blk)->throttle_group_member.throttle_state) {
510             blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id);
511         } else if (arg->group) {
512             blk_io_limits_update_group(blk, arg->group);
513         }
514         /* Set the new throttling configuration */
515         blk_set_io_limits(blk, &cfg);
516     } else if (blk_get_public(blk)->throttle_group_member.throttle_state) {
517         /* If all throttling settings are set to 0, disable I/O limits */
518         blk_io_limits_disable(blk);
519     }
520 
521 out:
522     aio_context_release(aio_context);
523 }
524 
525 void qmp_block_latency_histogram_set(
526     const char *id,
527     bool has_boundaries, uint64List *boundaries,
528     bool has_boundaries_read, uint64List *boundaries_read,
529     bool has_boundaries_write, uint64List *boundaries_write,
530     bool has_boundaries_append, uint64List *boundaries_append,
531     bool has_boundaries_flush, uint64List *boundaries_flush,
532     Error **errp)
533 {
534     BlockBackend *blk = qmp_get_blk(NULL, id, errp);
535     BlockAcctStats *stats;
536     int ret;
537 
538     if (!blk) {
539         return;
540     }
541 
542     stats = blk_get_stats(blk);
543 
544     if (!has_boundaries && !has_boundaries_read && !has_boundaries_write &&
545         !has_boundaries_flush)
546     {
547         block_latency_histograms_clear(stats);
548         return;
549     }
550 
551     if (has_boundaries || has_boundaries_read) {
552         ret = block_latency_histogram_set(
553             stats, BLOCK_ACCT_READ,
554             has_boundaries_read ? boundaries_read : boundaries);
555         if (ret) {
556             error_setg(errp, "Device '%s' set read boundaries fail", id);
557             return;
558         }
559     }
560 
561     if (has_boundaries || has_boundaries_write) {
562         ret = block_latency_histogram_set(
563             stats, BLOCK_ACCT_WRITE,
564             has_boundaries_write ? boundaries_write : boundaries);
565         if (ret) {
566             error_setg(errp, "Device '%s' set write boundaries fail", id);
567             return;
568         }
569     }
570 
571     if (has_boundaries || has_boundaries_append) {
572         ret = block_latency_histogram_set(
573                 stats, BLOCK_ACCT_ZONE_APPEND,
574                 has_boundaries_append ? boundaries_append : boundaries);
575         if (ret) {
576             error_setg(errp, "Device '%s' set append write boundaries fail", id);
577             return;
578         }
579     }
580 
581     if (has_boundaries || has_boundaries_flush) {
582         ret = block_latency_histogram_set(
583             stats, BLOCK_ACCT_FLUSH,
584             has_boundaries_flush ? boundaries_flush : boundaries);
585         if (ret) {
586             error_setg(errp, "Device '%s' set flush boundaries fail", id);
587             return;
588         }
589     }
590 }
591