1 /* 2 * QEMU Block backends 3 * 4 * Copyright (C) 2014-2016 Red Hat, Inc. 5 * 6 * Authors: 7 * Markus Armbruster <armbru@redhat.com>, 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2.1 10 * or later. See the COPYING.LIB file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "sysemu/block-backend.h" 15 #include "block/block_int.h" 16 #include "block/blockjob.h" 17 #include "block/throttle-groups.h" 18 #include "sysemu/blockdev.h" 19 #include "sysemu/sysemu.h" 20 #include "qapi-event.h" 21 #include "qemu/id.h" 22 #include "trace.h" 23 #include "migration/misc.h" 24 25 /* Number of coroutines to reserve per attached device model */ 26 #define COROUTINE_POOL_RESERVATION 64 27 28 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 29 30 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb); 31 32 struct BlockBackend { 33 char *name; 34 int refcnt; 35 BdrvChild *root; 36 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ 37 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */ 38 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */ 39 BlockBackendPublic public; 40 41 void *dev; /* attached device model, if any */ 42 bool legacy_dev; /* true if dev is not a DeviceState */ 43 /* TODO change to DeviceState when all users are qdevified */ 44 const BlockDevOps *dev_ops; 45 void *dev_opaque; 46 47 /* the block size for which the guest device expects atomicity */ 48 int guest_block_size; 49 50 /* If the BDS tree is removed, some of its options are stored here (which 51 * can be used to restore those options in the new BDS on insert) */ 52 BlockBackendRootState root_state; 53 54 bool enable_write_cache; 55 56 /* I/O stats (display with "info blockstats"). */ 57 BlockAcctStats stats; 58 59 BlockdevOnError on_read_error, on_write_error; 60 bool iostatus_enabled; 61 BlockDeviceIoStatus iostatus; 62 63 uint64_t perm; 64 uint64_t shared_perm; 65 bool disable_perm; 66 67 bool allow_write_beyond_eof; 68 69 NotifierList remove_bs_notifiers, insert_bs_notifiers; 70 71 int quiesce_counter; 72 VMChangeStateEntry *vmsh; 73 bool force_allow_inactivate; 74 }; 75 76 typedef struct BlockBackendAIOCB { 77 BlockAIOCB common; 78 BlockBackend *blk; 79 int ret; 80 } BlockBackendAIOCB; 81 82 static const AIOCBInfo block_backend_aiocb_info = { 83 .get_aio_context = blk_aiocb_get_aio_context, 84 .aiocb_size = sizeof(BlockBackendAIOCB), 85 }; 86 87 static void drive_info_del(DriveInfo *dinfo); 88 static BlockBackend *bdrv_first_blk(BlockDriverState *bs); 89 90 /* All BlockBackends */ 91 static QTAILQ_HEAD(, BlockBackend) block_backends = 92 QTAILQ_HEAD_INITIALIZER(block_backends); 93 94 /* All BlockBackends referenced by the monitor and which are iterated through by 95 * blk_next() */ 96 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends = 97 QTAILQ_HEAD_INITIALIZER(monitor_block_backends); 98 99 static void blk_root_inherit_options(int *child_flags, QDict *child_options, 100 int parent_flags, QDict *parent_options) 101 { 102 /* We're not supposed to call this function for root nodes */ 103 abort(); 104 } 105 static void blk_root_drained_begin(BdrvChild *child); 106 static void blk_root_drained_end(BdrvChild *child); 107 108 static void blk_root_change_media(BdrvChild *child, bool load); 109 static void blk_root_resize(BdrvChild *child); 110 111 static char *blk_root_get_parent_desc(BdrvChild *child) 112 { 113 BlockBackend *blk = child->opaque; 114 char *dev_id; 115 116 if (blk->name) { 117 return g_strdup(blk->name); 118 } 119 120 dev_id = blk_get_attached_dev_id(blk); 121 if (*dev_id) { 122 return dev_id; 123 } else { 124 /* TODO Callback into the BB owner for something more detailed */ 125 g_free(dev_id); 126 return g_strdup("a block device"); 127 } 128 } 129 130 static const char *blk_root_get_name(BdrvChild *child) 131 { 132 return blk_name(child->opaque); 133 } 134 135 static void blk_vm_state_changed(void *opaque, int running, RunState state) 136 { 137 Error *local_err = NULL; 138 BlockBackend *blk = opaque; 139 140 if (state == RUN_STATE_INMIGRATE) { 141 return; 142 } 143 144 qemu_del_vm_change_state_handler(blk->vmsh); 145 blk->vmsh = NULL; 146 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); 147 if (local_err) { 148 error_report_err(local_err); 149 } 150 } 151 152 /* 153 * Notifies the user of the BlockBackend that migration has completed. qdev 154 * devices can tighten their permissions in response (specifically revoke 155 * shared write permissions that we needed for storage migration). 156 * 157 * If an error is returned, the VM cannot be allowed to be resumed. 158 */ 159 static void blk_root_activate(BdrvChild *child, Error **errp) 160 { 161 BlockBackend *blk = child->opaque; 162 Error *local_err = NULL; 163 164 if (!blk->disable_perm) { 165 return; 166 } 167 168 blk->disable_perm = false; 169 170 blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err); 171 if (local_err) { 172 error_propagate(errp, local_err); 173 blk->disable_perm = true; 174 return; 175 } 176 177 if (runstate_check(RUN_STATE_INMIGRATE)) { 178 /* Activation can happen when migration process is still active, for 179 * example when nbd_server_add is called during non-shared storage 180 * migration. Defer the shared_perm update to migration completion. */ 181 if (!blk->vmsh) { 182 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed, 183 blk); 184 } 185 return; 186 } 187 188 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); 189 if (local_err) { 190 error_propagate(errp, local_err); 191 blk->disable_perm = true; 192 return; 193 } 194 } 195 196 void blk_set_force_allow_inactivate(BlockBackend *blk) 197 { 198 blk->force_allow_inactivate = true; 199 } 200 201 static bool blk_can_inactivate(BlockBackend *blk) 202 { 203 /* If it is a guest device, inactivate is ok. */ 204 if (blk->dev || blk_name(blk)[0]) { 205 return true; 206 } 207 208 /* Inactivating means no more writes to the image can be done, 209 * even if those writes would be changes invisible to the 210 * guest. For block job BBs that satisfy this, we can just allow 211 * it. This is the case for mirror job source, which is required 212 * by libvirt non-shared block migration. */ 213 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) { 214 return true; 215 } 216 217 return blk->force_allow_inactivate; 218 } 219 220 static int blk_root_inactivate(BdrvChild *child) 221 { 222 BlockBackend *blk = child->opaque; 223 224 if (blk->disable_perm) { 225 return 0; 226 } 227 228 if (!blk_can_inactivate(blk)) { 229 return -EPERM; 230 } 231 232 blk->disable_perm = true; 233 if (blk->root) { 234 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort); 235 } 236 237 return 0; 238 } 239 240 static const BdrvChildRole child_root = { 241 .inherit_options = blk_root_inherit_options, 242 243 .change_media = blk_root_change_media, 244 .resize = blk_root_resize, 245 .get_name = blk_root_get_name, 246 .get_parent_desc = blk_root_get_parent_desc, 247 248 .drained_begin = blk_root_drained_begin, 249 .drained_end = blk_root_drained_end, 250 251 .activate = blk_root_activate, 252 .inactivate = blk_root_inactivate, 253 }; 254 255 /* 256 * Create a new BlockBackend with a reference count of one. 257 * 258 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions 259 * to request for a block driver node that is attached to this BlockBackend. 260 * @shared_perm is a bitmask which describes which permissions may be granted 261 * to other users of the attached node. 262 * Both sets of permissions can be changed later using blk_set_perm(). 263 * 264 * Return the new BlockBackend on success, null on failure. 265 */ 266 BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm) 267 { 268 BlockBackend *blk; 269 270 blk = g_new0(BlockBackend, 1); 271 blk->refcnt = 1; 272 blk->perm = perm; 273 blk->shared_perm = shared_perm; 274 blk_set_enable_write_cache(blk, true); 275 276 block_acct_init(&blk->stats); 277 278 notifier_list_init(&blk->remove_bs_notifiers); 279 notifier_list_init(&blk->insert_bs_notifiers); 280 281 QTAILQ_INSERT_TAIL(&block_backends, blk, link); 282 return blk; 283 } 284 285 /* 286 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both. 287 * 288 * Just as with bdrv_open(), after having called this function the reference to 289 * @options belongs to the block layer (even on failure). 290 * 291 * TODO: Remove @filename and @flags; it should be possible to specify a whole 292 * BDS tree just by specifying the @options QDict (or @reference, 293 * alternatively). At the time of adding this function, this is not possible, 294 * though, so callers of this function have to be able to specify @filename and 295 * @flags. 296 */ 297 BlockBackend *blk_new_open(const char *filename, const char *reference, 298 QDict *options, int flags, Error **errp) 299 { 300 BlockBackend *blk; 301 BlockDriverState *bs; 302 uint64_t perm; 303 304 /* blk_new_open() is mainly used in .bdrv_create implementations and the 305 * tools where sharing isn't a concern because the BDS stays private, so we 306 * just request permission according to the flags. 307 * 308 * The exceptions are xen_disk and blockdev_init(); in these cases, the 309 * caller of blk_new_open() doesn't make use of the permissions, but they 310 * shouldn't hurt either. We can still share everything here because the 311 * guest devices will add their own blockers if they can't share. */ 312 perm = BLK_PERM_CONSISTENT_READ; 313 if (flags & BDRV_O_RDWR) { 314 perm |= BLK_PERM_WRITE; 315 } 316 if (flags & BDRV_O_RESIZE) { 317 perm |= BLK_PERM_RESIZE; 318 } 319 320 blk = blk_new(perm, BLK_PERM_ALL); 321 bs = bdrv_open(filename, reference, options, flags, errp); 322 if (!bs) { 323 blk_unref(blk); 324 return NULL; 325 } 326 327 blk->root = bdrv_root_attach_child(bs, "root", &child_root, 328 perm, BLK_PERM_ALL, blk, errp); 329 if (!blk->root) { 330 bdrv_unref(bs); 331 blk_unref(blk); 332 return NULL; 333 } 334 335 return blk; 336 } 337 338 static void blk_delete(BlockBackend *blk) 339 { 340 assert(!blk->refcnt); 341 assert(!blk->name); 342 assert(!blk->dev); 343 if (blk->public.throttle_group_member.throttle_state) { 344 blk_io_limits_disable(blk); 345 } 346 if (blk->root) { 347 blk_remove_bs(blk); 348 } 349 if (blk->vmsh) { 350 qemu_del_vm_change_state_handler(blk->vmsh); 351 blk->vmsh = NULL; 352 } 353 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); 354 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); 355 QTAILQ_REMOVE(&block_backends, blk, link); 356 drive_info_del(blk->legacy_dinfo); 357 block_acct_cleanup(&blk->stats); 358 g_free(blk); 359 } 360 361 static void drive_info_del(DriveInfo *dinfo) 362 { 363 if (!dinfo) { 364 return; 365 } 366 qemu_opts_del(dinfo->opts); 367 g_free(dinfo->serial); 368 g_free(dinfo); 369 } 370 371 int blk_get_refcnt(BlockBackend *blk) 372 { 373 return blk ? blk->refcnt : 0; 374 } 375 376 /* 377 * Increment @blk's reference count. 378 * @blk must not be null. 379 */ 380 void blk_ref(BlockBackend *blk) 381 { 382 blk->refcnt++; 383 } 384 385 /* 386 * Decrement @blk's reference count. 387 * If this drops it to zero, destroy @blk. 388 * For convenience, do nothing if @blk is null. 389 */ 390 void blk_unref(BlockBackend *blk) 391 { 392 if (blk) { 393 assert(blk->refcnt > 0); 394 if (!--blk->refcnt) { 395 blk_delete(blk); 396 } 397 } 398 } 399 400 /* 401 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the 402 * ones which are hidden (i.e. are not referenced by the monitor). 403 */ 404 BlockBackend *blk_all_next(BlockBackend *blk) 405 { 406 return blk ? QTAILQ_NEXT(blk, link) 407 : QTAILQ_FIRST(&block_backends); 408 } 409 410 void blk_remove_all_bs(void) 411 { 412 BlockBackend *blk = NULL; 413 414 while ((blk = blk_all_next(blk)) != NULL) { 415 AioContext *ctx = blk_get_aio_context(blk); 416 417 aio_context_acquire(ctx); 418 if (blk->root) { 419 blk_remove_bs(blk); 420 } 421 aio_context_release(ctx); 422 } 423 } 424 425 /* 426 * Return the monitor-owned BlockBackend after @blk. 427 * If @blk is null, return the first one. 428 * Else, return @blk's next sibling, which may be null. 429 * 430 * To iterate over all BlockBackends, do 431 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 432 * ... 433 * } 434 */ 435 BlockBackend *blk_next(BlockBackend *blk) 436 { 437 return blk ? QTAILQ_NEXT(blk, monitor_link) 438 : QTAILQ_FIRST(&monitor_block_backends); 439 } 440 441 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by 442 * the monitor or attached to a BlockBackend */ 443 BlockDriverState *bdrv_next(BdrvNextIterator *it) 444 { 445 BlockDriverState *bs; 446 447 /* First, return all root nodes of BlockBackends. In order to avoid 448 * returning a BDS twice when multiple BBs refer to it, we only return it 449 * if the BB is the first one in the parent list of the BDS. */ 450 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { 451 do { 452 it->blk = blk_all_next(it->blk); 453 bs = it->blk ? blk_bs(it->blk) : NULL; 454 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk)); 455 456 if (bs) { 457 return bs; 458 } 459 it->phase = BDRV_NEXT_MONITOR_OWNED; 460 } 461 462 /* Then return the monitor-owned BDSes without a BB attached. Ignore all 463 * BDSes that are attached to a BlockBackend here; they have been handled 464 * by the above block already */ 465 do { 466 it->bs = bdrv_next_monitor_owned(it->bs); 467 bs = it->bs; 468 } while (bs && bdrv_has_blk(bs)); 469 470 return bs; 471 } 472 473 BlockDriverState *bdrv_first(BdrvNextIterator *it) 474 { 475 *it = (BdrvNextIterator) { 476 .phase = BDRV_NEXT_BACKEND_ROOTS, 477 }; 478 479 return bdrv_next(it); 480 } 481 482 /* 483 * Add a BlockBackend into the list of backends referenced by the monitor, with 484 * the given @name acting as the handle for the monitor. 485 * Strictly for use by blockdev.c. 486 * 487 * @name must not be null or empty. 488 * 489 * Returns true on success and false on failure. In the latter case, an Error 490 * object is returned through @errp. 491 */ 492 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp) 493 { 494 assert(!blk->name); 495 assert(name && name[0]); 496 497 if (!id_wellformed(name)) { 498 error_setg(errp, "Invalid device name"); 499 return false; 500 } 501 if (blk_by_name(name)) { 502 error_setg(errp, "Device with id '%s' already exists", name); 503 return false; 504 } 505 if (bdrv_find_node(name)) { 506 error_setg(errp, 507 "Device name '%s' conflicts with an existing node name", 508 name); 509 return false; 510 } 511 512 blk->name = g_strdup(name); 513 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link); 514 return true; 515 } 516 517 /* 518 * Remove a BlockBackend from the list of backends referenced by the monitor. 519 * Strictly for use by blockdev.c. 520 */ 521 void monitor_remove_blk(BlockBackend *blk) 522 { 523 if (!blk->name) { 524 return; 525 } 526 527 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link); 528 g_free(blk->name); 529 blk->name = NULL; 530 } 531 532 /* 533 * Return @blk's name, a non-null string. 534 * Returns an empty string iff @blk is not referenced by the monitor. 535 */ 536 const char *blk_name(const BlockBackend *blk) 537 { 538 return blk->name ?: ""; 539 } 540 541 /* 542 * Return the BlockBackend with name @name if it exists, else null. 543 * @name must not be null. 544 */ 545 BlockBackend *blk_by_name(const char *name) 546 { 547 BlockBackend *blk = NULL; 548 549 assert(name); 550 while ((blk = blk_next(blk)) != NULL) { 551 if (!strcmp(name, blk->name)) { 552 return blk; 553 } 554 } 555 return NULL; 556 } 557 558 /* 559 * Return the BlockDriverState attached to @blk if any, else null. 560 */ 561 BlockDriverState *blk_bs(BlockBackend *blk) 562 { 563 return blk->root ? blk->root->bs : NULL; 564 } 565 566 static BlockBackend *bdrv_first_blk(BlockDriverState *bs) 567 { 568 BdrvChild *child; 569 QLIST_FOREACH(child, &bs->parents, next_parent) { 570 if (child->role == &child_root) { 571 return child->opaque; 572 } 573 } 574 575 return NULL; 576 } 577 578 /* 579 * Returns true if @bs has an associated BlockBackend. 580 */ 581 bool bdrv_has_blk(BlockDriverState *bs) 582 { 583 return bdrv_first_blk(bs) != NULL; 584 } 585 586 /* 587 * Returns true if @bs has only BlockBackends as parents. 588 */ 589 bool bdrv_is_root_node(BlockDriverState *bs) 590 { 591 BdrvChild *c; 592 593 QLIST_FOREACH(c, &bs->parents, next_parent) { 594 if (c->role != &child_root) { 595 return false; 596 } 597 } 598 599 return true; 600 } 601 602 /* 603 * Return @blk's DriveInfo if any, else null. 604 */ 605 DriveInfo *blk_legacy_dinfo(BlockBackend *blk) 606 { 607 return blk->legacy_dinfo; 608 } 609 610 /* 611 * Set @blk's DriveInfo to @dinfo, and return it. 612 * @blk must not have a DriveInfo set already. 613 * No other BlockBackend may have the same DriveInfo set. 614 */ 615 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) 616 { 617 assert(!blk->legacy_dinfo); 618 return blk->legacy_dinfo = dinfo; 619 } 620 621 /* 622 * Return the BlockBackend with DriveInfo @dinfo. 623 * It must exist. 624 */ 625 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) 626 { 627 BlockBackend *blk = NULL; 628 629 while ((blk = blk_next(blk)) != NULL) { 630 if (blk->legacy_dinfo == dinfo) { 631 return blk; 632 } 633 } 634 abort(); 635 } 636 637 /* 638 * Returns a pointer to the publicly accessible fields of @blk. 639 */ 640 BlockBackendPublic *blk_get_public(BlockBackend *blk) 641 { 642 return &blk->public; 643 } 644 645 /* 646 * Returns a BlockBackend given the associated @public fields. 647 */ 648 BlockBackend *blk_by_public(BlockBackendPublic *public) 649 { 650 return container_of(public, BlockBackend, public); 651 } 652 653 /* 654 * Disassociates the currently associated BlockDriverState from @blk. 655 */ 656 void blk_remove_bs(BlockBackend *blk) 657 { 658 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 659 BlockDriverState *bs; 660 661 notifier_list_notify(&blk->remove_bs_notifiers, blk); 662 if (tgm->throttle_state) { 663 bs = blk_bs(blk); 664 bdrv_drained_begin(bs); 665 throttle_group_detach_aio_context(tgm); 666 throttle_group_attach_aio_context(tgm, qemu_get_aio_context()); 667 bdrv_drained_end(bs); 668 } 669 670 blk_update_root_state(blk); 671 672 bdrv_root_unref_child(blk->root); 673 blk->root = NULL; 674 } 675 676 /* 677 * Associates a new BlockDriverState with @blk. 678 */ 679 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) 680 { 681 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 682 blk->root = bdrv_root_attach_child(bs, "root", &child_root, 683 blk->perm, blk->shared_perm, blk, errp); 684 if (blk->root == NULL) { 685 return -EPERM; 686 } 687 bdrv_ref(bs); 688 689 notifier_list_notify(&blk->insert_bs_notifiers, blk); 690 if (tgm->throttle_state) { 691 throttle_group_detach_aio_context(tgm); 692 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs)); 693 } 694 695 return 0; 696 } 697 698 /* 699 * Sets the permission bitmasks that the user of the BlockBackend needs. 700 */ 701 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, 702 Error **errp) 703 { 704 int ret; 705 706 if (blk->root && !blk->disable_perm) { 707 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp); 708 if (ret < 0) { 709 return ret; 710 } 711 } 712 713 blk->perm = perm; 714 blk->shared_perm = shared_perm; 715 716 return 0; 717 } 718 719 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm) 720 { 721 *perm = blk->perm; 722 *shared_perm = blk->shared_perm; 723 } 724 725 static int blk_do_attach_dev(BlockBackend *blk, void *dev) 726 { 727 if (blk->dev) { 728 return -EBUSY; 729 } 730 731 /* While migration is still incoming, we don't need to apply the 732 * permissions of guest device BlockBackends. We might still have a block 733 * job or NBD server writing to the image for storage migration. */ 734 if (runstate_check(RUN_STATE_INMIGRATE)) { 735 blk->disable_perm = true; 736 } 737 738 blk_ref(blk); 739 blk->dev = dev; 740 blk->legacy_dev = false; 741 blk_iostatus_reset(blk); 742 743 return 0; 744 } 745 746 /* 747 * Attach device model @dev to @blk. 748 * Return 0 on success, -EBUSY when a device model is attached already. 749 */ 750 int blk_attach_dev(BlockBackend *blk, DeviceState *dev) 751 { 752 return blk_do_attach_dev(blk, dev); 753 } 754 755 /* 756 * Attach device model @dev to @blk. 757 * @blk must not have a device model attached already. 758 * TODO qdevified devices don't use this, remove when devices are qdevified 759 */ 760 void blk_attach_dev_legacy(BlockBackend *blk, void *dev) 761 { 762 if (blk_do_attach_dev(blk, dev) < 0) { 763 abort(); 764 } 765 blk->legacy_dev = true; 766 } 767 768 /* 769 * Detach device model @dev from @blk. 770 * @dev must be currently attached to @blk. 771 */ 772 void blk_detach_dev(BlockBackend *blk, void *dev) 773 /* TODO change to DeviceState *dev when all users are qdevified */ 774 { 775 assert(blk->dev == dev); 776 blk->dev = NULL; 777 blk->dev_ops = NULL; 778 blk->dev_opaque = NULL; 779 blk->guest_block_size = 512; 780 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort); 781 blk_unref(blk); 782 } 783 784 /* 785 * Return the device model attached to @blk if any, else null. 786 */ 787 void *blk_get_attached_dev(BlockBackend *blk) 788 /* TODO change to return DeviceState * when all users are qdevified */ 789 { 790 return blk->dev; 791 } 792 793 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block 794 * device attached to the BlockBackend. */ 795 char *blk_get_attached_dev_id(BlockBackend *blk) 796 { 797 DeviceState *dev; 798 799 assert(!blk->legacy_dev); 800 dev = blk->dev; 801 802 if (!dev) { 803 return g_strdup(""); 804 } else if (dev->id) { 805 return g_strdup(dev->id); 806 } 807 return object_get_canonical_path(OBJECT(dev)); 808 } 809 810 /* 811 * Return the BlockBackend which has the device model @dev attached if it 812 * exists, else null. 813 * 814 * @dev must not be null. 815 */ 816 BlockBackend *blk_by_dev(void *dev) 817 { 818 BlockBackend *blk = NULL; 819 820 assert(dev != NULL); 821 while ((blk = blk_all_next(blk)) != NULL) { 822 if (blk->dev == dev) { 823 return blk; 824 } 825 } 826 return NULL; 827 } 828 829 /* 830 * Set @blk's device model callbacks to @ops. 831 * @opaque is the opaque argument to pass to the callbacks. 832 * This is for use by device models. 833 */ 834 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, 835 void *opaque) 836 { 837 /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep 838 * it that way, so we can assume blk->dev, if present, is a DeviceState if 839 * blk->dev_ops is set. Non-device users may use dev_ops without device. */ 840 assert(!blk->legacy_dev); 841 842 blk->dev_ops = ops; 843 blk->dev_opaque = opaque; 844 845 /* Are we currently quiesced? Should we enforce this right now? */ 846 if (blk->quiesce_counter && ops->drained_begin) { 847 ops->drained_begin(opaque); 848 } 849 } 850 851 /* 852 * Notify @blk's attached device model of media change. 853 * 854 * If @load is true, notify of media load. This action can fail, meaning that 855 * the medium cannot be loaded. @errp is set then. 856 * 857 * If @load is false, notify of media eject. This can never fail. 858 * 859 * Also send DEVICE_TRAY_MOVED events as appropriate. 860 */ 861 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp) 862 { 863 if (blk->dev_ops && blk->dev_ops->change_media_cb) { 864 bool tray_was_open, tray_is_open; 865 Error *local_err = NULL; 866 867 assert(!blk->legacy_dev); 868 869 tray_was_open = blk_dev_is_tray_open(blk); 870 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err); 871 if (local_err) { 872 assert(load == true); 873 error_propagate(errp, local_err); 874 return; 875 } 876 tray_is_open = blk_dev_is_tray_open(blk); 877 878 if (tray_was_open != tray_is_open) { 879 char *id = blk_get_attached_dev_id(blk); 880 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open, 881 &error_abort); 882 g_free(id); 883 } 884 } 885 } 886 887 static void blk_root_change_media(BdrvChild *child, bool load) 888 { 889 blk_dev_change_media_cb(child->opaque, load, NULL); 890 } 891 892 /* 893 * Does @blk's attached device model have removable media? 894 * %true if no device model is attached. 895 */ 896 bool blk_dev_has_removable_media(BlockBackend *blk) 897 { 898 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); 899 } 900 901 /* 902 * Does @blk's attached device model have a tray? 903 */ 904 bool blk_dev_has_tray(BlockBackend *blk) 905 { 906 return blk->dev_ops && blk->dev_ops->is_tray_open; 907 } 908 909 /* 910 * Notify @blk's attached device model of a media eject request. 911 * If @force is true, the medium is about to be yanked out forcefully. 912 */ 913 void blk_dev_eject_request(BlockBackend *blk, bool force) 914 { 915 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { 916 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); 917 } 918 } 919 920 /* 921 * Does @blk's attached device model have a tray, and is it open? 922 */ 923 bool blk_dev_is_tray_open(BlockBackend *blk) 924 { 925 if (blk_dev_has_tray(blk)) { 926 return blk->dev_ops->is_tray_open(blk->dev_opaque); 927 } 928 return false; 929 } 930 931 /* 932 * Does @blk's attached device model have the medium locked? 933 * %false if the device model has no such lock. 934 */ 935 bool blk_dev_is_medium_locked(BlockBackend *blk) 936 { 937 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { 938 return blk->dev_ops->is_medium_locked(blk->dev_opaque); 939 } 940 return false; 941 } 942 943 /* 944 * Notify @blk's attached device model of a backend size change. 945 */ 946 static void blk_root_resize(BdrvChild *child) 947 { 948 BlockBackend *blk = child->opaque; 949 950 if (blk->dev_ops && blk->dev_ops->resize_cb) { 951 blk->dev_ops->resize_cb(blk->dev_opaque); 952 } 953 } 954 955 void blk_iostatus_enable(BlockBackend *blk) 956 { 957 blk->iostatus_enabled = true; 958 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 959 } 960 961 /* The I/O status is only enabled if the drive explicitly 962 * enables it _and_ the VM is configured to stop on errors */ 963 bool blk_iostatus_is_enabled(const BlockBackend *blk) 964 { 965 return (blk->iostatus_enabled && 966 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 967 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || 968 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 969 } 970 971 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) 972 { 973 return blk->iostatus; 974 } 975 976 void blk_iostatus_disable(BlockBackend *blk) 977 { 978 blk->iostatus_enabled = false; 979 } 980 981 void blk_iostatus_reset(BlockBackend *blk) 982 { 983 if (blk_iostatus_is_enabled(blk)) { 984 BlockDriverState *bs = blk_bs(blk); 985 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 986 if (bs && bs->job) { 987 block_job_iostatus_reset(bs->job); 988 } 989 } 990 } 991 992 void blk_iostatus_set_err(BlockBackend *blk, int error) 993 { 994 assert(blk_iostatus_is_enabled(blk)); 995 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 996 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 997 BLOCK_DEVICE_IO_STATUS_FAILED; 998 } 999 } 1000 1001 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow) 1002 { 1003 blk->allow_write_beyond_eof = allow; 1004 } 1005 1006 static int blk_check_byte_request(BlockBackend *blk, int64_t offset, 1007 size_t size) 1008 { 1009 int64_t len; 1010 1011 if (size > INT_MAX) { 1012 return -EIO; 1013 } 1014 1015 if (!blk_is_available(blk)) { 1016 return -ENOMEDIUM; 1017 } 1018 1019 if (offset < 0) { 1020 return -EIO; 1021 } 1022 1023 if (!blk->allow_write_beyond_eof) { 1024 len = blk_getlength(blk); 1025 if (len < 0) { 1026 return len; 1027 } 1028 1029 if (offset > len || len - offset < size) { 1030 return -EIO; 1031 } 1032 } 1033 1034 return 0; 1035 } 1036 1037 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, 1038 unsigned int bytes, QEMUIOVector *qiov, 1039 BdrvRequestFlags flags) 1040 { 1041 int ret; 1042 BlockDriverState *bs = blk_bs(blk); 1043 1044 trace_blk_co_preadv(blk, bs, offset, bytes, flags); 1045 1046 ret = blk_check_byte_request(blk, offset, bytes); 1047 if (ret < 0) { 1048 return ret; 1049 } 1050 1051 bdrv_inc_in_flight(bs); 1052 1053 /* throttling disk I/O */ 1054 if (blk->public.throttle_group_member.throttle_state) { 1055 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, 1056 bytes, false); 1057 } 1058 1059 ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags); 1060 bdrv_dec_in_flight(bs); 1061 return ret; 1062 } 1063 1064 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, 1065 unsigned int bytes, QEMUIOVector *qiov, 1066 BdrvRequestFlags flags) 1067 { 1068 int ret; 1069 BlockDriverState *bs = blk_bs(blk); 1070 1071 trace_blk_co_pwritev(blk, bs, offset, bytes, flags); 1072 1073 ret = blk_check_byte_request(blk, offset, bytes); 1074 if (ret < 0) { 1075 return ret; 1076 } 1077 1078 bdrv_inc_in_flight(bs); 1079 /* throttling disk I/O */ 1080 if (blk->public.throttle_group_member.throttle_state) { 1081 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, 1082 bytes, true); 1083 } 1084 1085 if (!blk->enable_write_cache) { 1086 flags |= BDRV_REQ_FUA; 1087 } 1088 1089 ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags); 1090 bdrv_dec_in_flight(bs); 1091 return ret; 1092 } 1093 1094 typedef struct BlkRwCo { 1095 BlockBackend *blk; 1096 int64_t offset; 1097 QEMUIOVector *qiov; 1098 int ret; 1099 BdrvRequestFlags flags; 1100 } BlkRwCo; 1101 1102 static void blk_read_entry(void *opaque) 1103 { 1104 BlkRwCo *rwco = opaque; 1105 1106 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size, 1107 rwco->qiov, rwco->flags); 1108 } 1109 1110 static void blk_write_entry(void *opaque) 1111 { 1112 BlkRwCo *rwco = opaque; 1113 1114 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size, 1115 rwco->qiov, rwco->flags); 1116 } 1117 1118 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, 1119 int64_t bytes, CoroutineEntry co_entry, 1120 BdrvRequestFlags flags) 1121 { 1122 QEMUIOVector qiov; 1123 struct iovec iov; 1124 BlkRwCo rwco; 1125 1126 iov = (struct iovec) { 1127 .iov_base = buf, 1128 .iov_len = bytes, 1129 }; 1130 qemu_iovec_init_external(&qiov, &iov, 1); 1131 1132 rwco = (BlkRwCo) { 1133 .blk = blk, 1134 .offset = offset, 1135 .qiov = &qiov, 1136 .flags = flags, 1137 .ret = NOT_DONE, 1138 }; 1139 1140 if (qemu_in_coroutine()) { 1141 /* Fast-path if already in coroutine context */ 1142 co_entry(&rwco); 1143 } else { 1144 Coroutine *co = qemu_coroutine_create(co_entry, &rwco); 1145 bdrv_coroutine_enter(blk_bs(blk), co); 1146 BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE); 1147 } 1148 1149 return rwco.ret; 1150 } 1151 1152 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf, 1153 int count) 1154 { 1155 int ret; 1156 1157 ret = blk_check_byte_request(blk, offset, count); 1158 if (ret < 0) { 1159 return ret; 1160 } 1161 1162 blk_root_drained_begin(blk->root); 1163 ret = blk_pread(blk, offset, buf, count); 1164 blk_root_drained_end(blk->root); 1165 return ret; 1166 } 1167 1168 int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1169 int bytes, BdrvRequestFlags flags) 1170 { 1171 return blk_prw(blk, offset, NULL, bytes, blk_write_entry, 1172 flags | BDRV_REQ_ZERO_WRITE); 1173 } 1174 1175 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) 1176 { 1177 return bdrv_make_zero(blk->root, flags); 1178 } 1179 1180 static void error_callback_bh(void *opaque) 1181 { 1182 struct BlockBackendAIOCB *acb = opaque; 1183 1184 bdrv_dec_in_flight(acb->common.bs); 1185 acb->common.cb(acb->common.opaque, acb->ret); 1186 qemu_aio_unref(acb); 1187 } 1188 1189 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, 1190 BlockCompletionFunc *cb, 1191 void *opaque, int ret) 1192 { 1193 struct BlockBackendAIOCB *acb; 1194 1195 bdrv_inc_in_flight(blk_bs(blk)); 1196 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); 1197 acb->blk = blk; 1198 acb->ret = ret; 1199 1200 aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb); 1201 return &acb->common; 1202 } 1203 1204 typedef struct BlkAioEmAIOCB { 1205 BlockAIOCB common; 1206 BlkRwCo rwco; 1207 int bytes; 1208 bool has_returned; 1209 } BlkAioEmAIOCB; 1210 1211 static const AIOCBInfo blk_aio_em_aiocb_info = { 1212 .aiocb_size = sizeof(BlkAioEmAIOCB), 1213 }; 1214 1215 static void blk_aio_complete(BlkAioEmAIOCB *acb) 1216 { 1217 if (acb->has_returned) { 1218 bdrv_dec_in_flight(acb->common.bs); 1219 acb->common.cb(acb->common.opaque, acb->rwco.ret); 1220 qemu_aio_unref(acb); 1221 } 1222 } 1223 1224 static void blk_aio_complete_bh(void *opaque) 1225 { 1226 BlkAioEmAIOCB *acb = opaque; 1227 assert(acb->has_returned); 1228 blk_aio_complete(acb); 1229 } 1230 1231 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, 1232 QEMUIOVector *qiov, CoroutineEntry co_entry, 1233 BdrvRequestFlags flags, 1234 BlockCompletionFunc *cb, void *opaque) 1235 { 1236 BlkAioEmAIOCB *acb; 1237 Coroutine *co; 1238 1239 bdrv_inc_in_flight(blk_bs(blk)); 1240 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); 1241 acb->rwco = (BlkRwCo) { 1242 .blk = blk, 1243 .offset = offset, 1244 .qiov = qiov, 1245 .flags = flags, 1246 .ret = NOT_DONE, 1247 }; 1248 acb->bytes = bytes; 1249 acb->has_returned = false; 1250 1251 co = qemu_coroutine_create(co_entry, acb); 1252 bdrv_coroutine_enter(blk_bs(blk), co); 1253 1254 acb->has_returned = true; 1255 if (acb->rwco.ret != NOT_DONE) { 1256 aio_bh_schedule_oneshot(blk_get_aio_context(blk), 1257 blk_aio_complete_bh, acb); 1258 } 1259 1260 return &acb->common; 1261 } 1262 1263 static void blk_aio_read_entry(void *opaque) 1264 { 1265 BlkAioEmAIOCB *acb = opaque; 1266 BlkRwCo *rwco = &acb->rwco; 1267 1268 assert(rwco->qiov->size == acb->bytes); 1269 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes, 1270 rwco->qiov, rwco->flags); 1271 blk_aio_complete(acb); 1272 } 1273 1274 static void blk_aio_write_entry(void *opaque) 1275 { 1276 BlkAioEmAIOCB *acb = opaque; 1277 BlkRwCo *rwco = &acb->rwco; 1278 1279 assert(!rwco->qiov || rwco->qiov->size == acb->bytes); 1280 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes, 1281 rwco->qiov, rwco->flags); 1282 blk_aio_complete(acb); 1283 } 1284 1285 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1286 int count, BdrvRequestFlags flags, 1287 BlockCompletionFunc *cb, void *opaque) 1288 { 1289 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry, 1290 flags | BDRV_REQ_ZERO_WRITE, cb, opaque); 1291 } 1292 1293 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) 1294 { 1295 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0); 1296 if (ret < 0) { 1297 return ret; 1298 } 1299 return count; 1300 } 1301 1302 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count, 1303 BdrvRequestFlags flags) 1304 { 1305 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry, 1306 flags); 1307 if (ret < 0) { 1308 return ret; 1309 } 1310 return count; 1311 } 1312 1313 int64_t blk_getlength(BlockBackend *blk) 1314 { 1315 if (!blk_is_available(blk)) { 1316 return -ENOMEDIUM; 1317 } 1318 1319 return bdrv_getlength(blk_bs(blk)); 1320 } 1321 1322 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) 1323 { 1324 if (!blk_bs(blk)) { 1325 *nb_sectors_ptr = 0; 1326 } else { 1327 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr); 1328 } 1329 } 1330 1331 int64_t blk_nb_sectors(BlockBackend *blk) 1332 { 1333 if (!blk_is_available(blk)) { 1334 return -ENOMEDIUM; 1335 } 1336 1337 return bdrv_nb_sectors(blk_bs(blk)); 1338 } 1339 1340 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, 1341 QEMUIOVector *qiov, BdrvRequestFlags flags, 1342 BlockCompletionFunc *cb, void *opaque) 1343 { 1344 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1345 blk_aio_read_entry, flags, cb, opaque); 1346 } 1347 1348 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, 1349 QEMUIOVector *qiov, BdrvRequestFlags flags, 1350 BlockCompletionFunc *cb, void *opaque) 1351 { 1352 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1353 blk_aio_write_entry, flags, cb, opaque); 1354 } 1355 1356 static void blk_aio_flush_entry(void *opaque) 1357 { 1358 BlkAioEmAIOCB *acb = opaque; 1359 BlkRwCo *rwco = &acb->rwco; 1360 1361 rwco->ret = blk_co_flush(rwco->blk); 1362 blk_aio_complete(acb); 1363 } 1364 1365 BlockAIOCB *blk_aio_flush(BlockBackend *blk, 1366 BlockCompletionFunc *cb, void *opaque) 1367 { 1368 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); 1369 } 1370 1371 static void blk_aio_pdiscard_entry(void *opaque) 1372 { 1373 BlkAioEmAIOCB *acb = opaque; 1374 BlkRwCo *rwco = &acb->rwco; 1375 1376 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes); 1377 blk_aio_complete(acb); 1378 } 1379 1380 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, 1381 int64_t offset, int bytes, 1382 BlockCompletionFunc *cb, void *opaque) 1383 { 1384 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0, 1385 cb, opaque); 1386 } 1387 1388 void blk_aio_cancel(BlockAIOCB *acb) 1389 { 1390 bdrv_aio_cancel(acb); 1391 } 1392 1393 void blk_aio_cancel_async(BlockAIOCB *acb) 1394 { 1395 bdrv_aio_cancel_async(acb); 1396 } 1397 1398 int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1399 { 1400 if (!blk_is_available(blk)) { 1401 return -ENOMEDIUM; 1402 } 1403 1404 return bdrv_co_ioctl(blk_bs(blk), req, buf); 1405 } 1406 1407 static void blk_ioctl_entry(void *opaque) 1408 { 1409 BlkRwCo *rwco = opaque; 1410 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, 1411 rwco->qiov->iov[0].iov_base); 1412 } 1413 1414 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1415 { 1416 return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0); 1417 } 1418 1419 static void blk_aio_ioctl_entry(void *opaque) 1420 { 1421 BlkAioEmAIOCB *acb = opaque; 1422 BlkRwCo *rwco = &acb->rwco; 1423 1424 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, 1425 rwco->qiov->iov[0].iov_base); 1426 blk_aio_complete(acb); 1427 } 1428 1429 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, 1430 BlockCompletionFunc *cb, void *opaque) 1431 { 1432 QEMUIOVector qiov; 1433 struct iovec iov; 1434 1435 iov = (struct iovec) { 1436 .iov_base = buf, 1437 .iov_len = 0, 1438 }; 1439 qemu_iovec_init_external(&qiov, &iov, 1); 1440 1441 return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque); 1442 } 1443 1444 int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes) 1445 { 1446 int ret = blk_check_byte_request(blk, offset, bytes); 1447 if (ret < 0) { 1448 return ret; 1449 } 1450 1451 return bdrv_co_pdiscard(blk_bs(blk), offset, bytes); 1452 } 1453 1454 int blk_co_flush(BlockBackend *blk) 1455 { 1456 if (!blk_is_available(blk)) { 1457 return -ENOMEDIUM; 1458 } 1459 1460 return bdrv_co_flush(blk_bs(blk)); 1461 } 1462 1463 static void blk_flush_entry(void *opaque) 1464 { 1465 BlkRwCo *rwco = opaque; 1466 rwco->ret = blk_co_flush(rwco->blk); 1467 } 1468 1469 int blk_flush(BlockBackend *blk) 1470 { 1471 return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0); 1472 } 1473 1474 void blk_drain(BlockBackend *blk) 1475 { 1476 if (blk_bs(blk)) { 1477 bdrv_drain(blk_bs(blk)); 1478 } 1479 } 1480 1481 void blk_drain_all(void) 1482 { 1483 bdrv_drain_all(); 1484 } 1485 1486 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, 1487 BlockdevOnError on_write_error) 1488 { 1489 blk->on_read_error = on_read_error; 1490 blk->on_write_error = on_write_error; 1491 } 1492 1493 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) 1494 { 1495 return is_read ? blk->on_read_error : blk->on_write_error; 1496 } 1497 1498 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, 1499 int error) 1500 { 1501 BlockdevOnError on_err = blk_get_on_error(blk, is_read); 1502 1503 switch (on_err) { 1504 case BLOCKDEV_ON_ERROR_ENOSPC: 1505 return (error == ENOSPC) ? 1506 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 1507 case BLOCKDEV_ON_ERROR_STOP: 1508 return BLOCK_ERROR_ACTION_STOP; 1509 case BLOCKDEV_ON_ERROR_REPORT: 1510 return BLOCK_ERROR_ACTION_REPORT; 1511 case BLOCKDEV_ON_ERROR_IGNORE: 1512 return BLOCK_ERROR_ACTION_IGNORE; 1513 case BLOCKDEV_ON_ERROR_AUTO: 1514 default: 1515 abort(); 1516 } 1517 } 1518 1519 static void send_qmp_error_event(BlockBackend *blk, 1520 BlockErrorAction action, 1521 bool is_read, int error) 1522 { 1523 IoOperationType optype; 1524 1525 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; 1526 qapi_event_send_block_io_error(blk_name(blk), 1527 bdrv_get_node_name(blk_bs(blk)), optype, 1528 action, blk_iostatus_is_enabled(blk), 1529 error == ENOSPC, strerror(error), 1530 &error_abort); 1531 } 1532 1533 /* This is done by device models because, while the block layer knows 1534 * about the error, it does not know whether an operation comes from 1535 * the device or the block layer (from a job, for example). 1536 */ 1537 void blk_error_action(BlockBackend *blk, BlockErrorAction action, 1538 bool is_read, int error) 1539 { 1540 assert(error >= 0); 1541 1542 if (action == BLOCK_ERROR_ACTION_STOP) { 1543 /* First set the iostatus, so that "info block" returns an iostatus 1544 * that matches the events raised so far (an additional error iostatus 1545 * is fine, but not a lost one). 1546 */ 1547 blk_iostatus_set_err(blk, error); 1548 1549 /* Then raise the request to stop the VM and the event. 1550 * qemu_system_vmstop_request_prepare has two effects. First, 1551 * it ensures that the STOP event always comes after the 1552 * BLOCK_IO_ERROR event. Second, it ensures that even if management 1553 * can observe the STOP event and do a "cont" before the STOP 1554 * event is issued, the VM will not stop. In this case, vm_start() 1555 * also ensures that the STOP/RESUME pair of events is emitted. 1556 */ 1557 qemu_system_vmstop_request_prepare(); 1558 send_qmp_error_event(blk, action, is_read, error); 1559 qemu_system_vmstop_request(RUN_STATE_IO_ERROR); 1560 } else { 1561 send_qmp_error_event(blk, action, is_read, error); 1562 } 1563 } 1564 1565 int blk_is_read_only(BlockBackend *blk) 1566 { 1567 BlockDriverState *bs = blk_bs(blk); 1568 1569 if (bs) { 1570 return bdrv_is_read_only(bs); 1571 } else { 1572 return blk->root_state.read_only; 1573 } 1574 } 1575 1576 int blk_is_sg(BlockBackend *blk) 1577 { 1578 BlockDriverState *bs = blk_bs(blk); 1579 1580 if (!bs) { 1581 return 0; 1582 } 1583 1584 return bdrv_is_sg(bs); 1585 } 1586 1587 int blk_enable_write_cache(BlockBackend *blk) 1588 { 1589 return blk->enable_write_cache; 1590 } 1591 1592 void blk_set_enable_write_cache(BlockBackend *blk, bool wce) 1593 { 1594 blk->enable_write_cache = wce; 1595 } 1596 1597 void blk_invalidate_cache(BlockBackend *blk, Error **errp) 1598 { 1599 BlockDriverState *bs = blk_bs(blk); 1600 1601 if (!bs) { 1602 error_setg(errp, "Device '%s' has no medium", blk->name); 1603 return; 1604 } 1605 1606 bdrv_invalidate_cache(bs, errp); 1607 } 1608 1609 bool blk_is_inserted(BlockBackend *blk) 1610 { 1611 BlockDriverState *bs = blk_bs(blk); 1612 1613 return bs && bdrv_is_inserted(bs); 1614 } 1615 1616 bool blk_is_available(BlockBackend *blk) 1617 { 1618 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk); 1619 } 1620 1621 void blk_lock_medium(BlockBackend *blk, bool locked) 1622 { 1623 BlockDriverState *bs = blk_bs(blk); 1624 1625 if (bs) { 1626 bdrv_lock_medium(bs, locked); 1627 } 1628 } 1629 1630 void blk_eject(BlockBackend *blk, bool eject_flag) 1631 { 1632 BlockDriverState *bs = blk_bs(blk); 1633 char *id; 1634 1635 /* blk_eject is only called by qdevified devices */ 1636 assert(!blk->legacy_dev); 1637 1638 if (bs) { 1639 bdrv_eject(bs, eject_flag); 1640 } 1641 1642 /* Whether or not we ejected on the backend, 1643 * the frontend experienced a tray event. */ 1644 id = blk_get_attached_dev_id(blk); 1645 qapi_event_send_device_tray_moved(blk_name(blk), id, 1646 eject_flag, &error_abort); 1647 g_free(id); 1648 } 1649 1650 int blk_get_flags(BlockBackend *blk) 1651 { 1652 BlockDriverState *bs = blk_bs(blk); 1653 1654 if (bs) { 1655 return bdrv_get_flags(bs); 1656 } else { 1657 return blk->root_state.open_flags; 1658 } 1659 } 1660 1661 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */ 1662 uint32_t blk_get_max_transfer(BlockBackend *blk) 1663 { 1664 BlockDriverState *bs = blk_bs(blk); 1665 uint32_t max = 0; 1666 1667 if (bs) { 1668 max = bs->bl.max_transfer; 1669 } 1670 return MIN_NON_ZERO(max, INT_MAX); 1671 } 1672 1673 int blk_get_max_iov(BlockBackend *blk) 1674 { 1675 return blk->root->bs->bl.max_iov; 1676 } 1677 1678 void blk_set_guest_block_size(BlockBackend *blk, int align) 1679 { 1680 blk->guest_block_size = align; 1681 } 1682 1683 void *blk_try_blockalign(BlockBackend *blk, size_t size) 1684 { 1685 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size); 1686 } 1687 1688 void *blk_blockalign(BlockBackend *blk, size_t size) 1689 { 1690 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size); 1691 } 1692 1693 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) 1694 { 1695 BlockDriverState *bs = blk_bs(blk); 1696 1697 if (!bs) { 1698 return false; 1699 } 1700 1701 return bdrv_op_is_blocked(bs, op, errp); 1702 } 1703 1704 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) 1705 { 1706 BlockDriverState *bs = blk_bs(blk); 1707 1708 if (bs) { 1709 bdrv_op_unblock(bs, op, reason); 1710 } 1711 } 1712 1713 void blk_op_block_all(BlockBackend *blk, Error *reason) 1714 { 1715 BlockDriverState *bs = blk_bs(blk); 1716 1717 if (bs) { 1718 bdrv_op_block_all(bs, reason); 1719 } 1720 } 1721 1722 void blk_op_unblock_all(BlockBackend *blk, Error *reason) 1723 { 1724 BlockDriverState *bs = blk_bs(blk); 1725 1726 if (bs) { 1727 bdrv_op_unblock_all(bs, reason); 1728 } 1729 } 1730 1731 AioContext *blk_get_aio_context(BlockBackend *blk) 1732 { 1733 BlockDriverState *bs = blk_bs(blk); 1734 1735 if (bs) { 1736 return bdrv_get_aio_context(bs); 1737 } else { 1738 return qemu_get_aio_context(); 1739 } 1740 } 1741 1742 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb) 1743 { 1744 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb); 1745 return blk_get_aio_context(blk_acb->blk); 1746 } 1747 1748 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context) 1749 { 1750 BlockDriverState *bs = blk_bs(blk); 1751 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 1752 1753 if (bs) { 1754 if (tgm->throttle_state) { 1755 bdrv_drained_begin(bs); 1756 throttle_group_detach_aio_context(tgm); 1757 throttle_group_attach_aio_context(tgm, new_context); 1758 bdrv_drained_end(bs); 1759 } 1760 bdrv_set_aio_context(bs, new_context); 1761 } 1762 } 1763 1764 void blk_add_aio_context_notifier(BlockBackend *blk, 1765 void (*attached_aio_context)(AioContext *new_context, void *opaque), 1766 void (*detach_aio_context)(void *opaque), void *opaque) 1767 { 1768 BlockDriverState *bs = blk_bs(blk); 1769 1770 if (bs) { 1771 bdrv_add_aio_context_notifier(bs, attached_aio_context, 1772 detach_aio_context, opaque); 1773 } 1774 } 1775 1776 void blk_remove_aio_context_notifier(BlockBackend *blk, 1777 void (*attached_aio_context)(AioContext *, 1778 void *), 1779 void (*detach_aio_context)(void *), 1780 void *opaque) 1781 { 1782 BlockDriverState *bs = blk_bs(blk); 1783 1784 if (bs) { 1785 bdrv_remove_aio_context_notifier(bs, attached_aio_context, 1786 detach_aio_context, opaque); 1787 } 1788 } 1789 1790 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify) 1791 { 1792 notifier_list_add(&blk->remove_bs_notifiers, notify); 1793 } 1794 1795 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify) 1796 { 1797 notifier_list_add(&blk->insert_bs_notifiers, notify); 1798 } 1799 1800 void blk_io_plug(BlockBackend *blk) 1801 { 1802 BlockDriverState *bs = blk_bs(blk); 1803 1804 if (bs) { 1805 bdrv_io_plug(bs); 1806 } 1807 } 1808 1809 void blk_io_unplug(BlockBackend *blk) 1810 { 1811 BlockDriverState *bs = blk_bs(blk); 1812 1813 if (bs) { 1814 bdrv_io_unplug(bs); 1815 } 1816 } 1817 1818 BlockAcctStats *blk_get_stats(BlockBackend *blk) 1819 { 1820 return &blk->stats; 1821 } 1822 1823 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, 1824 BlockCompletionFunc *cb, void *opaque) 1825 { 1826 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); 1827 } 1828 1829 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1830 int bytes, BdrvRequestFlags flags) 1831 { 1832 return blk_co_pwritev(blk, offset, bytes, NULL, 1833 flags | BDRV_REQ_ZERO_WRITE); 1834 } 1835 1836 int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf, 1837 int count) 1838 { 1839 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry, 1840 BDRV_REQ_WRITE_COMPRESSED); 1841 } 1842 1843 int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc, 1844 Error **errp) 1845 { 1846 if (!blk_is_available(blk)) { 1847 error_setg(errp, "No medium inserted"); 1848 return -ENOMEDIUM; 1849 } 1850 1851 return bdrv_truncate(blk->root, offset, prealloc, errp); 1852 } 1853 1854 static void blk_pdiscard_entry(void *opaque) 1855 { 1856 BlkRwCo *rwco = opaque; 1857 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size); 1858 } 1859 1860 int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes) 1861 { 1862 return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0); 1863 } 1864 1865 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, 1866 int64_t pos, int size) 1867 { 1868 int ret; 1869 1870 if (!blk_is_available(blk)) { 1871 return -ENOMEDIUM; 1872 } 1873 1874 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size); 1875 if (ret < 0) { 1876 return ret; 1877 } 1878 1879 if (ret == size && !blk->enable_write_cache) { 1880 ret = bdrv_flush(blk_bs(blk)); 1881 } 1882 1883 return ret < 0 ? ret : size; 1884 } 1885 1886 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) 1887 { 1888 if (!blk_is_available(blk)) { 1889 return -ENOMEDIUM; 1890 } 1891 1892 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size); 1893 } 1894 1895 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) 1896 { 1897 if (!blk_is_available(blk)) { 1898 return -ENOMEDIUM; 1899 } 1900 1901 return bdrv_probe_blocksizes(blk_bs(blk), bsz); 1902 } 1903 1904 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) 1905 { 1906 if (!blk_is_available(blk)) { 1907 return -ENOMEDIUM; 1908 } 1909 1910 return bdrv_probe_geometry(blk_bs(blk), geo); 1911 } 1912 1913 /* 1914 * Updates the BlockBackendRootState object with data from the currently 1915 * attached BlockDriverState. 1916 */ 1917 void blk_update_root_state(BlockBackend *blk) 1918 { 1919 assert(blk->root); 1920 1921 blk->root_state.open_flags = blk->root->bs->open_flags; 1922 blk->root_state.read_only = blk->root->bs->read_only; 1923 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes; 1924 } 1925 1926 /* 1927 * Returns the detect-zeroes setting to be used for bdrv_open() of a 1928 * BlockDriverState which is supposed to inherit the root state. 1929 */ 1930 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk) 1931 { 1932 return blk->root_state.detect_zeroes; 1933 } 1934 1935 /* 1936 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is 1937 * supposed to inherit the root state. 1938 */ 1939 int blk_get_open_flags_from_root_state(BlockBackend *blk) 1940 { 1941 int bs_flags; 1942 1943 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR; 1944 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR; 1945 1946 return bs_flags; 1947 } 1948 1949 BlockBackendRootState *blk_get_root_state(BlockBackend *blk) 1950 { 1951 return &blk->root_state; 1952 } 1953 1954 int blk_commit_all(void) 1955 { 1956 BlockBackend *blk = NULL; 1957 1958 while ((blk = blk_all_next(blk)) != NULL) { 1959 AioContext *aio_context = blk_get_aio_context(blk); 1960 1961 aio_context_acquire(aio_context); 1962 if (blk_is_inserted(blk) && blk->root->bs->backing) { 1963 int ret = bdrv_commit(blk->root->bs); 1964 if (ret < 0) { 1965 aio_context_release(aio_context); 1966 return ret; 1967 } 1968 } 1969 aio_context_release(aio_context); 1970 } 1971 return 0; 1972 } 1973 1974 1975 /* throttling disk I/O limits */ 1976 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg) 1977 { 1978 throttle_group_config(&blk->public.throttle_group_member, cfg); 1979 } 1980 1981 void blk_io_limits_disable(BlockBackend *blk) 1982 { 1983 BlockDriverState *bs = blk_bs(blk); 1984 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 1985 assert(tgm->throttle_state); 1986 if (bs) { 1987 bdrv_drained_begin(bs); 1988 } 1989 throttle_group_unregister_tgm(tgm); 1990 if (bs) { 1991 bdrv_drained_end(bs); 1992 } 1993 } 1994 1995 /* should be called before blk_set_io_limits if a limit is set */ 1996 void blk_io_limits_enable(BlockBackend *blk, const char *group) 1997 { 1998 assert(!blk->public.throttle_group_member.throttle_state); 1999 throttle_group_register_tgm(&blk->public.throttle_group_member, 2000 group, blk_get_aio_context(blk)); 2001 } 2002 2003 void blk_io_limits_update_group(BlockBackend *blk, const char *group) 2004 { 2005 /* this BB is not part of any group */ 2006 if (!blk->public.throttle_group_member.throttle_state) { 2007 return; 2008 } 2009 2010 /* this BB is a part of the same group than the one we want */ 2011 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member), 2012 group)) { 2013 return; 2014 } 2015 2016 /* need to change the group this bs belong to */ 2017 blk_io_limits_disable(blk); 2018 blk_io_limits_enable(blk, group); 2019 } 2020 2021 static void blk_root_drained_begin(BdrvChild *child) 2022 { 2023 BlockBackend *blk = child->opaque; 2024 2025 if (++blk->quiesce_counter == 1) { 2026 if (blk->dev_ops && blk->dev_ops->drained_begin) { 2027 blk->dev_ops->drained_begin(blk->dev_opaque); 2028 } 2029 } 2030 2031 /* Note that blk->root may not be accessible here yet if we are just 2032 * attaching to a BlockDriverState that is drained. Use child instead. */ 2033 2034 if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) { 2035 throttle_group_restart_tgm(&blk->public.throttle_group_member); 2036 } 2037 } 2038 2039 static void blk_root_drained_end(BdrvChild *child) 2040 { 2041 BlockBackend *blk = child->opaque; 2042 assert(blk->quiesce_counter); 2043 2044 assert(blk->public.throttle_group_member.io_limits_disabled); 2045 atomic_dec(&blk->public.throttle_group_member.io_limits_disabled); 2046 2047 if (--blk->quiesce_counter == 0) { 2048 if (blk->dev_ops && blk->dev_ops->drained_end) { 2049 blk->dev_ops->drained_end(blk->dev_opaque); 2050 } 2051 } 2052 } 2053