1 /* 2 * QEMU Block backends 3 * 4 * Copyright (C) 2014-2016 Red Hat, Inc. 5 * 6 * Authors: 7 * Markus Armbruster <armbru@redhat.com>, 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2.1 10 * or later. See the COPYING.LIB file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "sysemu/block-backend.h" 15 #include "block/block_int.h" 16 #include "block/blockjob.h" 17 #include "block/throttle-groups.h" 18 #include "sysemu/blockdev.h" 19 #include "sysemu/sysemu.h" 20 #include "qapi-event.h" 21 #include "qemu/id.h" 22 #include "trace.h" 23 24 /* Number of coroutines to reserve per attached device model */ 25 #define COROUTINE_POOL_RESERVATION 64 26 27 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 28 29 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb); 30 31 struct BlockBackend { 32 char *name; 33 int refcnt; 34 BdrvChild *root; 35 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ 36 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */ 37 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */ 38 BlockBackendPublic public; 39 40 void *dev; /* attached device model, if any */ 41 bool legacy_dev; /* true if dev is not a DeviceState */ 42 /* TODO change to DeviceState when all users are qdevified */ 43 const BlockDevOps *dev_ops; 44 void *dev_opaque; 45 46 /* the block size for which the guest device expects atomicity */ 47 int guest_block_size; 48 49 /* If the BDS tree is removed, some of its options are stored here (which 50 * can be used to restore those options in the new BDS on insert) */ 51 BlockBackendRootState root_state; 52 53 bool enable_write_cache; 54 55 /* I/O stats (display with "info blockstats"). */ 56 BlockAcctStats stats; 57 58 BlockdevOnError on_read_error, on_write_error; 59 bool iostatus_enabled; 60 BlockDeviceIoStatus iostatus; 61 62 bool allow_write_beyond_eof; 63 64 NotifierList remove_bs_notifiers, insert_bs_notifiers; 65 }; 66 67 typedef struct BlockBackendAIOCB { 68 BlockAIOCB common; 69 BlockBackend *blk; 70 int ret; 71 } BlockBackendAIOCB; 72 73 static const AIOCBInfo block_backend_aiocb_info = { 74 .get_aio_context = blk_aiocb_get_aio_context, 75 .aiocb_size = sizeof(BlockBackendAIOCB), 76 }; 77 78 static void drive_info_del(DriveInfo *dinfo); 79 static BlockBackend *bdrv_first_blk(BlockDriverState *bs); 80 81 /* All BlockBackends */ 82 static QTAILQ_HEAD(, BlockBackend) block_backends = 83 QTAILQ_HEAD_INITIALIZER(block_backends); 84 85 /* All BlockBackends referenced by the monitor and which are iterated through by 86 * blk_next() */ 87 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends = 88 QTAILQ_HEAD_INITIALIZER(monitor_block_backends); 89 90 static void blk_root_inherit_options(int *child_flags, QDict *child_options, 91 int parent_flags, QDict *parent_options) 92 { 93 /* We're not supposed to call this function for root nodes */ 94 abort(); 95 } 96 static void blk_root_drained_begin(BdrvChild *child); 97 static void blk_root_drained_end(BdrvChild *child); 98 99 static void blk_root_change_media(BdrvChild *child, bool load); 100 static void blk_root_resize(BdrvChild *child); 101 102 static const char *blk_root_get_name(BdrvChild *child) 103 { 104 return blk_name(child->opaque); 105 } 106 107 static const BdrvChildRole child_root = { 108 .inherit_options = blk_root_inherit_options, 109 110 .change_media = blk_root_change_media, 111 .resize = blk_root_resize, 112 .get_name = blk_root_get_name, 113 114 .drained_begin = blk_root_drained_begin, 115 .drained_end = blk_root_drained_end, 116 }; 117 118 /* 119 * Create a new BlockBackend with a reference count of one. 120 * Store an error through @errp on failure, unless it's null. 121 * Return the new BlockBackend on success, null on failure. 122 */ 123 BlockBackend *blk_new(void) 124 { 125 BlockBackend *blk; 126 127 blk = g_new0(BlockBackend, 1); 128 blk->refcnt = 1; 129 blk_set_enable_write_cache(blk, true); 130 131 qemu_co_queue_init(&blk->public.throttled_reqs[0]); 132 qemu_co_queue_init(&blk->public.throttled_reqs[1]); 133 134 notifier_list_init(&blk->remove_bs_notifiers); 135 notifier_list_init(&blk->insert_bs_notifiers); 136 137 QTAILQ_INSERT_TAIL(&block_backends, blk, link); 138 return blk; 139 } 140 141 /* 142 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both. 143 * 144 * Just as with bdrv_open(), after having called this function the reference to 145 * @options belongs to the block layer (even on failure). 146 * 147 * TODO: Remove @filename and @flags; it should be possible to specify a whole 148 * BDS tree just by specifying the @options QDict (or @reference, 149 * alternatively). At the time of adding this function, this is not possible, 150 * though, so callers of this function have to be able to specify @filename and 151 * @flags. 152 */ 153 BlockBackend *blk_new_open(const char *filename, const char *reference, 154 QDict *options, int flags, Error **errp) 155 { 156 BlockBackend *blk; 157 BlockDriverState *bs; 158 159 blk = blk_new(); 160 bs = bdrv_open(filename, reference, options, flags, errp); 161 if (!bs) { 162 blk_unref(blk); 163 return NULL; 164 } 165 166 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk); 167 168 return blk; 169 } 170 171 static void blk_delete(BlockBackend *blk) 172 { 173 assert(!blk->refcnt); 174 assert(!blk->name); 175 assert(!blk->dev); 176 if (blk->root) { 177 blk_remove_bs(blk); 178 } 179 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); 180 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); 181 QTAILQ_REMOVE(&block_backends, blk, link); 182 drive_info_del(blk->legacy_dinfo); 183 block_acct_cleanup(&blk->stats); 184 g_free(blk); 185 } 186 187 static void drive_info_del(DriveInfo *dinfo) 188 { 189 if (!dinfo) { 190 return; 191 } 192 qemu_opts_del(dinfo->opts); 193 g_free(dinfo->serial); 194 g_free(dinfo); 195 } 196 197 int blk_get_refcnt(BlockBackend *blk) 198 { 199 return blk ? blk->refcnt : 0; 200 } 201 202 /* 203 * Increment @blk's reference count. 204 * @blk must not be null. 205 */ 206 void blk_ref(BlockBackend *blk) 207 { 208 blk->refcnt++; 209 } 210 211 /* 212 * Decrement @blk's reference count. 213 * If this drops it to zero, destroy @blk. 214 * For convenience, do nothing if @blk is null. 215 */ 216 void blk_unref(BlockBackend *blk) 217 { 218 if (blk) { 219 assert(blk->refcnt > 0); 220 if (!--blk->refcnt) { 221 blk_delete(blk); 222 } 223 } 224 } 225 226 /* 227 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the 228 * ones which are hidden (i.e. are not referenced by the monitor). 229 */ 230 static BlockBackend *blk_all_next(BlockBackend *blk) 231 { 232 return blk ? QTAILQ_NEXT(blk, link) 233 : QTAILQ_FIRST(&block_backends); 234 } 235 236 void blk_remove_all_bs(void) 237 { 238 BlockBackend *blk = NULL; 239 240 while ((blk = blk_all_next(blk)) != NULL) { 241 AioContext *ctx = blk_get_aio_context(blk); 242 243 aio_context_acquire(ctx); 244 if (blk->root) { 245 blk_remove_bs(blk); 246 } 247 aio_context_release(ctx); 248 } 249 } 250 251 /* 252 * Return the monitor-owned BlockBackend after @blk. 253 * If @blk is null, return the first one. 254 * Else, return @blk's next sibling, which may be null. 255 * 256 * To iterate over all BlockBackends, do 257 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 258 * ... 259 * } 260 */ 261 BlockBackend *blk_next(BlockBackend *blk) 262 { 263 return blk ? QTAILQ_NEXT(blk, monitor_link) 264 : QTAILQ_FIRST(&monitor_block_backends); 265 } 266 267 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by 268 * the monitor or attached to a BlockBackend */ 269 BlockDriverState *bdrv_next(BdrvNextIterator *it) 270 { 271 BlockDriverState *bs; 272 273 /* First, return all root nodes of BlockBackends. In order to avoid 274 * returning a BDS twice when multiple BBs refer to it, we only return it 275 * if the BB is the first one in the parent list of the BDS. */ 276 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { 277 do { 278 it->blk = blk_all_next(it->blk); 279 bs = it->blk ? blk_bs(it->blk) : NULL; 280 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk)); 281 282 if (bs) { 283 return bs; 284 } 285 it->phase = BDRV_NEXT_MONITOR_OWNED; 286 } 287 288 /* Then return the monitor-owned BDSes without a BB attached. Ignore all 289 * BDSes that are attached to a BlockBackend here; they have been handled 290 * by the above block already */ 291 do { 292 it->bs = bdrv_next_monitor_owned(it->bs); 293 bs = it->bs; 294 } while (bs && bdrv_has_blk(bs)); 295 296 return bs; 297 } 298 299 BlockDriverState *bdrv_first(BdrvNextIterator *it) 300 { 301 *it = (BdrvNextIterator) { 302 .phase = BDRV_NEXT_BACKEND_ROOTS, 303 }; 304 305 return bdrv_next(it); 306 } 307 308 /* 309 * Add a BlockBackend into the list of backends referenced by the monitor, with 310 * the given @name acting as the handle for the monitor. 311 * Strictly for use by blockdev.c. 312 * 313 * @name must not be null or empty. 314 * 315 * Returns true on success and false on failure. In the latter case, an Error 316 * object is returned through @errp. 317 */ 318 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp) 319 { 320 assert(!blk->name); 321 assert(name && name[0]); 322 323 if (!id_wellformed(name)) { 324 error_setg(errp, "Invalid device name"); 325 return false; 326 } 327 if (blk_by_name(name)) { 328 error_setg(errp, "Device with id '%s' already exists", name); 329 return false; 330 } 331 if (bdrv_find_node(name)) { 332 error_setg(errp, 333 "Device name '%s' conflicts with an existing node name", 334 name); 335 return false; 336 } 337 338 blk->name = g_strdup(name); 339 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link); 340 return true; 341 } 342 343 /* 344 * Remove a BlockBackend from the list of backends referenced by the monitor. 345 * Strictly for use by blockdev.c. 346 */ 347 void monitor_remove_blk(BlockBackend *blk) 348 { 349 if (!blk->name) { 350 return; 351 } 352 353 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link); 354 g_free(blk->name); 355 blk->name = NULL; 356 } 357 358 /* 359 * Return @blk's name, a non-null string. 360 * Returns an empty string iff @blk is not referenced by the monitor. 361 */ 362 const char *blk_name(BlockBackend *blk) 363 { 364 return blk->name ?: ""; 365 } 366 367 /* 368 * Return the BlockBackend with name @name if it exists, else null. 369 * @name must not be null. 370 */ 371 BlockBackend *blk_by_name(const char *name) 372 { 373 BlockBackend *blk = NULL; 374 375 assert(name); 376 while ((blk = blk_next(blk)) != NULL) { 377 if (!strcmp(name, blk->name)) { 378 return blk; 379 } 380 } 381 return NULL; 382 } 383 384 /* 385 * Return the BlockDriverState attached to @blk if any, else null. 386 */ 387 BlockDriverState *blk_bs(BlockBackend *blk) 388 { 389 return blk->root ? blk->root->bs : NULL; 390 } 391 392 static BlockBackend *bdrv_first_blk(BlockDriverState *bs) 393 { 394 BdrvChild *child; 395 QLIST_FOREACH(child, &bs->parents, next_parent) { 396 if (child->role == &child_root) { 397 return child->opaque; 398 } 399 } 400 401 return NULL; 402 } 403 404 /* 405 * Returns true if @bs has an associated BlockBackend. 406 */ 407 bool bdrv_has_blk(BlockDriverState *bs) 408 { 409 return bdrv_first_blk(bs) != NULL; 410 } 411 412 /* 413 * Returns true if @bs has only BlockBackends as parents. 414 */ 415 bool bdrv_is_root_node(BlockDriverState *bs) 416 { 417 BdrvChild *c; 418 419 QLIST_FOREACH(c, &bs->parents, next_parent) { 420 if (c->role != &child_root) { 421 return false; 422 } 423 } 424 425 return true; 426 } 427 428 /* 429 * Return @blk's DriveInfo if any, else null. 430 */ 431 DriveInfo *blk_legacy_dinfo(BlockBackend *blk) 432 { 433 return blk->legacy_dinfo; 434 } 435 436 /* 437 * Set @blk's DriveInfo to @dinfo, and return it. 438 * @blk must not have a DriveInfo set already. 439 * No other BlockBackend may have the same DriveInfo set. 440 */ 441 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) 442 { 443 assert(!blk->legacy_dinfo); 444 return blk->legacy_dinfo = dinfo; 445 } 446 447 /* 448 * Return the BlockBackend with DriveInfo @dinfo. 449 * It must exist. 450 */ 451 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) 452 { 453 BlockBackend *blk = NULL; 454 455 while ((blk = blk_next(blk)) != NULL) { 456 if (blk->legacy_dinfo == dinfo) { 457 return blk; 458 } 459 } 460 abort(); 461 } 462 463 /* 464 * Returns a pointer to the publicly accessible fields of @blk. 465 */ 466 BlockBackendPublic *blk_get_public(BlockBackend *blk) 467 { 468 return &blk->public; 469 } 470 471 /* 472 * Returns a BlockBackend given the associated @public fields. 473 */ 474 BlockBackend *blk_by_public(BlockBackendPublic *public) 475 { 476 return container_of(public, BlockBackend, public); 477 } 478 479 /* 480 * Disassociates the currently associated BlockDriverState from @blk. 481 */ 482 void blk_remove_bs(BlockBackend *blk) 483 { 484 notifier_list_notify(&blk->remove_bs_notifiers, blk); 485 if (blk->public.throttle_state) { 486 throttle_timers_detach_aio_context(&blk->public.throttle_timers); 487 } 488 489 blk_update_root_state(blk); 490 491 bdrv_root_unref_child(blk->root); 492 blk->root = NULL; 493 } 494 495 /* 496 * Associates a new BlockDriverState with @blk. 497 */ 498 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs) 499 { 500 bdrv_ref(bs); 501 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk); 502 503 notifier_list_notify(&blk->insert_bs_notifiers, blk); 504 if (blk->public.throttle_state) { 505 throttle_timers_attach_aio_context( 506 &blk->public.throttle_timers, bdrv_get_aio_context(bs)); 507 } 508 } 509 510 static int blk_do_attach_dev(BlockBackend *blk, void *dev) 511 { 512 if (blk->dev) { 513 return -EBUSY; 514 } 515 blk_ref(blk); 516 blk->dev = dev; 517 blk->legacy_dev = false; 518 blk_iostatus_reset(blk); 519 return 0; 520 } 521 522 /* 523 * Attach device model @dev to @blk. 524 * Return 0 on success, -EBUSY when a device model is attached already. 525 */ 526 int blk_attach_dev(BlockBackend *blk, DeviceState *dev) 527 { 528 return blk_do_attach_dev(blk, dev); 529 } 530 531 /* 532 * Attach device model @dev to @blk. 533 * @blk must not have a device model attached already. 534 * TODO qdevified devices don't use this, remove when devices are qdevified 535 */ 536 void blk_attach_dev_legacy(BlockBackend *blk, void *dev) 537 { 538 if (blk_do_attach_dev(blk, dev) < 0) { 539 abort(); 540 } 541 blk->legacy_dev = true; 542 } 543 544 /* 545 * Detach device model @dev from @blk. 546 * @dev must be currently attached to @blk. 547 */ 548 void blk_detach_dev(BlockBackend *blk, void *dev) 549 /* TODO change to DeviceState *dev when all users are qdevified */ 550 { 551 assert(blk->dev == dev); 552 blk->dev = NULL; 553 blk->dev_ops = NULL; 554 blk->dev_opaque = NULL; 555 blk->guest_block_size = 512; 556 blk_unref(blk); 557 } 558 559 /* 560 * Return the device model attached to @blk if any, else null. 561 */ 562 void *blk_get_attached_dev(BlockBackend *blk) 563 /* TODO change to return DeviceState * when all users are qdevified */ 564 { 565 return blk->dev; 566 } 567 568 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block 569 * device attached to the BlockBackend. */ 570 static char *blk_get_attached_dev_id(BlockBackend *blk) 571 { 572 DeviceState *dev; 573 574 assert(!blk->legacy_dev); 575 dev = blk->dev; 576 577 if (!dev) { 578 return g_strdup(""); 579 } else if (dev->id) { 580 return g_strdup(dev->id); 581 } 582 return object_get_canonical_path(OBJECT(dev)); 583 } 584 585 /* 586 * Return the BlockBackend which has the device model @dev attached if it 587 * exists, else null. 588 * 589 * @dev must not be null. 590 */ 591 BlockBackend *blk_by_dev(void *dev) 592 { 593 BlockBackend *blk = NULL; 594 595 assert(dev != NULL); 596 while ((blk = blk_all_next(blk)) != NULL) { 597 if (blk->dev == dev) { 598 return blk; 599 } 600 } 601 return NULL; 602 } 603 604 /* 605 * Set @blk's device model callbacks to @ops. 606 * @opaque is the opaque argument to pass to the callbacks. 607 * This is for use by device models. 608 */ 609 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, 610 void *opaque) 611 { 612 /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep 613 * it that way, so we can assume blk->dev is a DeviceState if blk->dev_ops 614 * is set. */ 615 assert(!blk->legacy_dev); 616 617 blk->dev_ops = ops; 618 blk->dev_opaque = opaque; 619 } 620 621 /* 622 * Notify @blk's attached device model of media change. 623 * If @load is true, notify of media load. 624 * Else, notify of media eject. 625 * Also send DEVICE_TRAY_MOVED events as appropriate. 626 */ 627 void blk_dev_change_media_cb(BlockBackend *blk, bool load) 628 { 629 if (blk->dev_ops && blk->dev_ops->change_media_cb) { 630 bool tray_was_open, tray_is_open; 631 632 assert(!blk->legacy_dev); 633 634 tray_was_open = blk_dev_is_tray_open(blk); 635 blk->dev_ops->change_media_cb(blk->dev_opaque, load); 636 tray_is_open = blk_dev_is_tray_open(blk); 637 638 if (tray_was_open != tray_is_open) { 639 char *id = blk_get_attached_dev_id(blk); 640 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open, 641 &error_abort); 642 g_free(id); 643 } 644 } 645 } 646 647 static void blk_root_change_media(BdrvChild *child, bool load) 648 { 649 blk_dev_change_media_cb(child->opaque, load); 650 } 651 652 /* 653 * Does @blk's attached device model have removable media? 654 * %true if no device model is attached. 655 */ 656 bool blk_dev_has_removable_media(BlockBackend *blk) 657 { 658 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); 659 } 660 661 /* 662 * Does @blk's attached device model have a tray? 663 */ 664 bool blk_dev_has_tray(BlockBackend *blk) 665 { 666 return blk->dev_ops && blk->dev_ops->is_tray_open; 667 } 668 669 /* 670 * Notify @blk's attached device model of a media eject request. 671 * If @force is true, the medium is about to be yanked out forcefully. 672 */ 673 void blk_dev_eject_request(BlockBackend *blk, bool force) 674 { 675 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { 676 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); 677 } 678 } 679 680 /* 681 * Does @blk's attached device model have a tray, and is it open? 682 */ 683 bool blk_dev_is_tray_open(BlockBackend *blk) 684 { 685 if (blk_dev_has_tray(blk)) { 686 return blk->dev_ops->is_tray_open(blk->dev_opaque); 687 } 688 return false; 689 } 690 691 /* 692 * Does @blk's attached device model have the medium locked? 693 * %false if the device model has no such lock. 694 */ 695 bool blk_dev_is_medium_locked(BlockBackend *blk) 696 { 697 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { 698 return blk->dev_ops->is_medium_locked(blk->dev_opaque); 699 } 700 return false; 701 } 702 703 /* 704 * Notify @blk's attached device model of a backend size change. 705 */ 706 static void blk_root_resize(BdrvChild *child) 707 { 708 BlockBackend *blk = child->opaque; 709 710 if (blk->dev_ops && blk->dev_ops->resize_cb) { 711 blk->dev_ops->resize_cb(blk->dev_opaque); 712 } 713 } 714 715 void blk_iostatus_enable(BlockBackend *blk) 716 { 717 blk->iostatus_enabled = true; 718 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 719 } 720 721 /* The I/O status is only enabled if the drive explicitly 722 * enables it _and_ the VM is configured to stop on errors */ 723 bool blk_iostatus_is_enabled(const BlockBackend *blk) 724 { 725 return (blk->iostatus_enabled && 726 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 727 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || 728 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 729 } 730 731 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) 732 { 733 return blk->iostatus; 734 } 735 736 void blk_iostatus_disable(BlockBackend *blk) 737 { 738 blk->iostatus_enabled = false; 739 } 740 741 void blk_iostatus_reset(BlockBackend *blk) 742 { 743 if (blk_iostatus_is_enabled(blk)) { 744 BlockDriverState *bs = blk_bs(blk); 745 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 746 if (bs && bs->job) { 747 block_job_iostatus_reset(bs->job); 748 } 749 } 750 } 751 752 void blk_iostatus_set_err(BlockBackend *blk, int error) 753 { 754 assert(blk_iostatus_is_enabled(blk)); 755 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 756 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 757 BLOCK_DEVICE_IO_STATUS_FAILED; 758 } 759 } 760 761 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow) 762 { 763 blk->allow_write_beyond_eof = allow; 764 } 765 766 static int blk_check_byte_request(BlockBackend *blk, int64_t offset, 767 size_t size) 768 { 769 int64_t len; 770 771 if (size > INT_MAX) { 772 return -EIO; 773 } 774 775 if (!blk_is_available(blk)) { 776 return -ENOMEDIUM; 777 } 778 779 if (offset < 0) { 780 return -EIO; 781 } 782 783 if (!blk->allow_write_beyond_eof) { 784 len = blk_getlength(blk); 785 if (len < 0) { 786 return len; 787 } 788 789 if (offset > len || len - offset < size) { 790 return -EIO; 791 } 792 } 793 794 return 0; 795 } 796 797 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, 798 unsigned int bytes, QEMUIOVector *qiov, 799 BdrvRequestFlags flags) 800 { 801 int ret; 802 803 trace_blk_co_preadv(blk, blk_bs(blk), offset, bytes, flags); 804 805 ret = blk_check_byte_request(blk, offset, bytes); 806 if (ret < 0) { 807 return ret; 808 } 809 810 /* throttling disk I/O */ 811 if (blk->public.throttle_state) { 812 throttle_group_co_io_limits_intercept(blk, bytes, false); 813 } 814 815 return bdrv_co_preadv(blk->root, offset, bytes, qiov, flags); 816 } 817 818 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, 819 unsigned int bytes, QEMUIOVector *qiov, 820 BdrvRequestFlags flags) 821 { 822 int ret; 823 824 trace_blk_co_pwritev(blk, blk_bs(blk), offset, bytes, flags); 825 826 ret = blk_check_byte_request(blk, offset, bytes); 827 if (ret < 0) { 828 return ret; 829 } 830 831 /* throttling disk I/O */ 832 if (blk->public.throttle_state) { 833 throttle_group_co_io_limits_intercept(blk, bytes, true); 834 } 835 836 if (!blk->enable_write_cache) { 837 flags |= BDRV_REQ_FUA; 838 } 839 840 return bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags); 841 } 842 843 typedef struct BlkRwCo { 844 BlockBackend *blk; 845 int64_t offset; 846 QEMUIOVector *qiov; 847 int ret; 848 BdrvRequestFlags flags; 849 } BlkRwCo; 850 851 static void blk_read_entry(void *opaque) 852 { 853 BlkRwCo *rwco = opaque; 854 855 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size, 856 rwco->qiov, rwco->flags); 857 } 858 859 static void blk_write_entry(void *opaque) 860 { 861 BlkRwCo *rwco = opaque; 862 863 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size, 864 rwco->qiov, rwco->flags); 865 } 866 867 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, 868 int64_t bytes, CoroutineEntry co_entry, 869 BdrvRequestFlags flags) 870 { 871 AioContext *aio_context; 872 QEMUIOVector qiov; 873 struct iovec iov; 874 Coroutine *co; 875 BlkRwCo rwco; 876 877 iov = (struct iovec) { 878 .iov_base = buf, 879 .iov_len = bytes, 880 }; 881 qemu_iovec_init_external(&qiov, &iov, 1); 882 883 rwco = (BlkRwCo) { 884 .blk = blk, 885 .offset = offset, 886 .qiov = &qiov, 887 .flags = flags, 888 .ret = NOT_DONE, 889 }; 890 891 co = qemu_coroutine_create(co_entry, &rwco); 892 qemu_coroutine_enter(co); 893 894 aio_context = blk_get_aio_context(blk); 895 while (rwco.ret == NOT_DONE) { 896 aio_poll(aio_context, true); 897 } 898 899 return rwco.ret; 900 } 901 902 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf, 903 int count) 904 { 905 int ret; 906 907 ret = blk_check_byte_request(blk, offset, count); 908 if (ret < 0) { 909 return ret; 910 } 911 912 blk_root_drained_begin(blk->root); 913 ret = blk_pread(blk, offset, buf, count); 914 blk_root_drained_end(blk->root); 915 return ret; 916 } 917 918 int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, 919 int count, BdrvRequestFlags flags) 920 { 921 return blk_prw(blk, offset, NULL, count, blk_write_entry, 922 flags | BDRV_REQ_ZERO_WRITE); 923 } 924 925 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) 926 { 927 return bdrv_make_zero(blk->root, flags); 928 } 929 930 static void error_callback_bh(void *opaque) 931 { 932 struct BlockBackendAIOCB *acb = opaque; 933 acb->common.cb(acb->common.opaque, acb->ret); 934 qemu_aio_unref(acb); 935 } 936 937 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, 938 BlockCompletionFunc *cb, 939 void *opaque, int ret) 940 { 941 struct BlockBackendAIOCB *acb; 942 943 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); 944 acb->blk = blk; 945 acb->ret = ret; 946 947 aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb); 948 return &acb->common; 949 } 950 951 typedef struct BlkAioEmAIOCB { 952 BlockAIOCB common; 953 BlkRwCo rwco; 954 int bytes; 955 bool has_returned; 956 } BlkAioEmAIOCB; 957 958 static const AIOCBInfo blk_aio_em_aiocb_info = { 959 .aiocb_size = sizeof(BlkAioEmAIOCB), 960 }; 961 962 static void blk_aio_complete(BlkAioEmAIOCB *acb) 963 { 964 if (acb->has_returned) { 965 acb->common.cb(acb->common.opaque, acb->rwco.ret); 966 qemu_aio_unref(acb); 967 } 968 } 969 970 static void blk_aio_complete_bh(void *opaque) 971 { 972 BlkAioEmAIOCB *acb = opaque; 973 974 assert(acb->has_returned); 975 blk_aio_complete(acb); 976 } 977 978 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, 979 QEMUIOVector *qiov, CoroutineEntry co_entry, 980 BdrvRequestFlags flags, 981 BlockCompletionFunc *cb, void *opaque) 982 { 983 BlkAioEmAIOCB *acb; 984 Coroutine *co; 985 986 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); 987 acb->rwco = (BlkRwCo) { 988 .blk = blk, 989 .offset = offset, 990 .qiov = qiov, 991 .flags = flags, 992 .ret = NOT_DONE, 993 }; 994 acb->bytes = bytes; 995 acb->has_returned = false; 996 997 co = qemu_coroutine_create(co_entry, acb); 998 qemu_coroutine_enter(co); 999 1000 acb->has_returned = true; 1001 if (acb->rwco.ret != NOT_DONE) { 1002 aio_bh_schedule_oneshot(blk_get_aio_context(blk), 1003 blk_aio_complete_bh, acb); 1004 } 1005 1006 return &acb->common; 1007 } 1008 1009 static void blk_aio_read_entry(void *opaque) 1010 { 1011 BlkAioEmAIOCB *acb = opaque; 1012 BlkRwCo *rwco = &acb->rwco; 1013 1014 assert(rwco->qiov->size == acb->bytes); 1015 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes, 1016 rwco->qiov, rwco->flags); 1017 blk_aio_complete(acb); 1018 } 1019 1020 static void blk_aio_write_entry(void *opaque) 1021 { 1022 BlkAioEmAIOCB *acb = opaque; 1023 BlkRwCo *rwco = &acb->rwco; 1024 1025 assert(!rwco->qiov || rwco->qiov->size == acb->bytes); 1026 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes, 1027 rwco->qiov, rwco->flags); 1028 blk_aio_complete(acb); 1029 } 1030 1031 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1032 int count, BdrvRequestFlags flags, 1033 BlockCompletionFunc *cb, void *opaque) 1034 { 1035 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry, 1036 flags | BDRV_REQ_ZERO_WRITE, cb, opaque); 1037 } 1038 1039 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) 1040 { 1041 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0); 1042 if (ret < 0) { 1043 return ret; 1044 } 1045 return count; 1046 } 1047 1048 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count, 1049 BdrvRequestFlags flags) 1050 { 1051 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry, 1052 flags); 1053 if (ret < 0) { 1054 return ret; 1055 } 1056 return count; 1057 } 1058 1059 int64_t blk_getlength(BlockBackend *blk) 1060 { 1061 if (!blk_is_available(blk)) { 1062 return -ENOMEDIUM; 1063 } 1064 1065 return bdrv_getlength(blk_bs(blk)); 1066 } 1067 1068 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) 1069 { 1070 if (!blk_bs(blk)) { 1071 *nb_sectors_ptr = 0; 1072 } else { 1073 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr); 1074 } 1075 } 1076 1077 int64_t blk_nb_sectors(BlockBackend *blk) 1078 { 1079 if (!blk_is_available(blk)) { 1080 return -ENOMEDIUM; 1081 } 1082 1083 return bdrv_nb_sectors(blk_bs(blk)); 1084 } 1085 1086 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, 1087 QEMUIOVector *qiov, BdrvRequestFlags flags, 1088 BlockCompletionFunc *cb, void *opaque) 1089 { 1090 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1091 blk_aio_read_entry, flags, cb, opaque); 1092 } 1093 1094 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, 1095 QEMUIOVector *qiov, BdrvRequestFlags flags, 1096 BlockCompletionFunc *cb, void *opaque) 1097 { 1098 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1099 blk_aio_write_entry, flags, cb, opaque); 1100 } 1101 1102 static void blk_aio_flush_entry(void *opaque) 1103 { 1104 BlkAioEmAIOCB *acb = opaque; 1105 BlkRwCo *rwco = &acb->rwco; 1106 1107 rwco->ret = blk_co_flush(rwco->blk); 1108 blk_aio_complete(acb); 1109 } 1110 1111 BlockAIOCB *blk_aio_flush(BlockBackend *blk, 1112 BlockCompletionFunc *cb, void *opaque) 1113 { 1114 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); 1115 } 1116 1117 static void blk_aio_pdiscard_entry(void *opaque) 1118 { 1119 BlkAioEmAIOCB *acb = opaque; 1120 BlkRwCo *rwco = &acb->rwco; 1121 1122 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes); 1123 blk_aio_complete(acb); 1124 } 1125 1126 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, 1127 int64_t offset, int count, 1128 BlockCompletionFunc *cb, void *opaque) 1129 { 1130 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_pdiscard_entry, 0, 1131 cb, opaque); 1132 } 1133 1134 void blk_aio_cancel(BlockAIOCB *acb) 1135 { 1136 bdrv_aio_cancel(acb); 1137 } 1138 1139 void blk_aio_cancel_async(BlockAIOCB *acb) 1140 { 1141 bdrv_aio_cancel_async(acb); 1142 } 1143 1144 int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1145 { 1146 if (!blk_is_available(blk)) { 1147 return -ENOMEDIUM; 1148 } 1149 1150 return bdrv_co_ioctl(blk_bs(blk), req, buf); 1151 } 1152 1153 static void blk_ioctl_entry(void *opaque) 1154 { 1155 BlkRwCo *rwco = opaque; 1156 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, 1157 rwco->qiov->iov[0].iov_base); 1158 } 1159 1160 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1161 { 1162 return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0); 1163 } 1164 1165 static void blk_aio_ioctl_entry(void *opaque) 1166 { 1167 BlkAioEmAIOCB *acb = opaque; 1168 BlkRwCo *rwco = &acb->rwco; 1169 1170 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, 1171 rwco->qiov->iov[0].iov_base); 1172 blk_aio_complete(acb); 1173 } 1174 1175 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, 1176 BlockCompletionFunc *cb, void *opaque) 1177 { 1178 QEMUIOVector qiov; 1179 struct iovec iov; 1180 1181 iov = (struct iovec) { 1182 .iov_base = buf, 1183 .iov_len = 0, 1184 }; 1185 qemu_iovec_init_external(&qiov, &iov, 1); 1186 1187 return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque); 1188 } 1189 1190 int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count) 1191 { 1192 int ret = blk_check_byte_request(blk, offset, count); 1193 if (ret < 0) { 1194 return ret; 1195 } 1196 1197 return bdrv_co_pdiscard(blk_bs(blk), offset, count); 1198 } 1199 1200 int blk_co_flush(BlockBackend *blk) 1201 { 1202 if (!blk_is_available(blk)) { 1203 return -ENOMEDIUM; 1204 } 1205 1206 return bdrv_co_flush(blk_bs(blk)); 1207 } 1208 1209 static void blk_flush_entry(void *opaque) 1210 { 1211 BlkRwCo *rwco = opaque; 1212 rwco->ret = blk_co_flush(rwco->blk); 1213 } 1214 1215 int blk_flush(BlockBackend *blk) 1216 { 1217 return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0); 1218 } 1219 1220 void blk_drain(BlockBackend *blk) 1221 { 1222 if (blk_bs(blk)) { 1223 bdrv_drain(blk_bs(blk)); 1224 } 1225 } 1226 1227 void blk_drain_all(void) 1228 { 1229 bdrv_drain_all(); 1230 } 1231 1232 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, 1233 BlockdevOnError on_write_error) 1234 { 1235 blk->on_read_error = on_read_error; 1236 blk->on_write_error = on_write_error; 1237 } 1238 1239 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) 1240 { 1241 return is_read ? blk->on_read_error : blk->on_write_error; 1242 } 1243 1244 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, 1245 int error) 1246 { 1247 BlockdevOnError on_err = blk_get_on_error(blk, is_read); 1248 1249 switch (on_err) { 1250 case BLOCKDEV_ON_ERROR_ENOSPC: 1251 return (error == ENOSPC) ? 1252 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 1253 case BLOCKDEV_ON_ERROR_STOP: 1254 return BLOCK_ERROR_ACTION_STOP; 1255 case BLOCKDEV_ON_ERROR_REPORT: 1256 return BLOCK_ERROR_ACTION_REPORT; 1257 case BLOCKDEV_ON_ERROR_IGNORE: 1258 return BLOCK_ERROR_ACTION_IGNORE; 1259 case BLOCKDEV_ON_ERROR_AUTO: 1260 default: 1261 abort(); 1262 } 1263 } 1264 1265 static void send_qmp_error_event(BlockBackend *blk, 1266 BlockErrorAction action, 1267 bool is_read, int error) 1268 { 1269 IoOperationType optype; 1270 1271 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; 1272 qapi_event_send_block_io_error(blk_name(blk), 1273 bdrv_get_node_name(blk_bs(blk)), optype, 1274 action, blk_iostatus_is_enabled(blk), 1275 error == ENOSPC, strerror(error), 1276 &error_abort); 1277 } 1278 1279 /* This is done by device models because, while the block layer knows 1280 * about the error, it does not know whether an operation comes from 1281 * the device or the block layer (from a job, for example). 1282 */ 1283 void blk_error_action(BlockBackend *blk, BlockErrorAction action, 1284 bool is_read, int error) 1285 { 1286 assert(error >= 0); 1287 1288 if (action == BLOCK_ERROR_ACTION_STOP) { 1289 /* First set the iostatus, so that "info block" returns an iostatus 1290 * that matches the events raised so far (an additional error iostatus 1291 * is fine, but not a lost one). 1292 */ 1293 blk_iostatus_set_err(blk, error); 1294 1295 /* Then raise the request to stop the VM and the event. 1296 * qemu_system_vmstop_request_prepare has two effects. First, 1297 * it ensures that the STOP event always comes after the 1298 * BLOCK_IO_ERROR event. Second, it ensures that even if management 1299 * can observe the STOP event and do a "cont" before the STOP 1300 * event is issued, the VM will not stop. In this case, vm_start() 1301 * also ensures that the STOP/RESUME pair of events is emitted. 1302 */ 1303 qemu_system_vmstop_request_prepare(); 1304 send_qmp_error_event(blk, action, is_read, error); 1305 qemu_system_vmstop_request(RUN_STATE_IO_ERROR); 1306 } else { 1307 send_qmp_error_event(blk, action, is_read, error); 1308 } 1309 } 1310 1311 int blk_is_read_only(BlockBackend *blk) 1312 { 1313 BlockDriverState *bs = blk_bs(blk); 1314 1315 if (bs) { 1316 return bdrv_is_read_only(bs); 1317 } else { 1318 return blk->root_state.read_only; 1319 } 1320 } 1321 1322 int blk_is_sg(BlockBackend *blk) 1323 { 1324 BlockDriverState *bs = blk_bs(blk); 1325 1326 if (!bs) { 1327 return 0; 1328 } 1329 1330 return bdrv_is_sg(bs); 1331 } 1332 1333 int blk_enable_write_cache(BlockBackend *blk) 1334 { 1335 return blk->enable_write_cache; 1336 } 1337 1338 void blk_set_enable_write_cache(BlockBackend *blk, bool wce) 1339 { 1340 blk->enable_write_cache = wce; 1341 } 1342 1343 void blk_invalidate_cache(BlockBackend *blk, Error **errp) 1344 { 1345 BlockDriverState *bs = blk_bs(blk); 1346 1347 if (!bs) { 1348 error_setg(errp, "Device '%s' has no medium", blk->name); 1349 return; 1350 } 1351 1352 bdrv_invalidate_cache(bs, errp); 1353 } 1354 1355 bool blk_is_inserted(BlockBackend *blk) 1356 { 1357 BlockDriverState *bs = blk_bs(blk); 1358 1359 return bs && bdrv_is_inserted(bs); 1360 } 1361 1362 bool blk_is_available(BlockBackend *blk) 1363 { 1364 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk); 1365 } 1366 1367 void blk_lock_medium(BlockBackend *blk, bool locked) 1368 { 1369 BlockDriverState *bs = blk_bs(blk); 1370 1371 if (bs) { 1372 bdrv_lock_medium(bs, locked); 1373 } 1374 } 1375 1376 void blk_eject(BlockBackend *blk, bool eject_flag) 1377 { 1378 BlockDriverState *bs = blk_bs(blk); 1379 char *id; 1380 1381 /* blk_eject is only called by qdevified devices */ 1382 assert(!blk->legacy_dev); 1383 1384 if (bs) { 1385 bdrv_eject(bs, eject_flag); 1386 1387 id = blk_get_attached_dev_id(blk); 1388 qapi_event_send_device_tray_moved(blk_name(blk), id, 1389 eject_flag, &error_abort); 1390 g_free(id); 1391 1392 } 1393 } 1394 1395 int blk_get_flags(BlockBackend *blk) 1396 { 1397 BlockDriverState *bs = blk_bs(blk); 1398 1399 if (bs) { 1400 return bdrv_get_flags(bs); 1401 } else { 1402 return blk->root_state.open_flags; 1403 } 1404 } 1405 1406 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */ 1407 uint32_t blk_get_max_transfer(BlockBackend *blk) 1408 { 1409 BlockDriverState *bs = blk_bs(blk); 1410 uint32_t max = 0; 1411 1412 if (bs) { 1413 max = bs->bl.max_transfer; 1414 } 1415 return MIN_NON_ZERO(max, INT_MAX); 1416 } 1417 1418 int blk_get_max_iov(BlockBackend *blk) 1419 { 1420 return blk->root->bs->bl.max_iov; 1421 } 1422 1423 void blk_set_guest_block_size(BlockBackend *blk, int align) 1424 { 1425 blk->guest_block_size = align; 1426 } 1427 1428 void *blk_try_blockalign(BlockBackend *blk, size_t size) 1429 { 1430 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size); 1431 } 1432 1433 void *blk_blockalign(BlockBackend *blk, size_t size) 1434 { 1435 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size); 1436 } 1437 1438 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) 1439 { 1440 BlockDriverState *bs = blk_bs(blk); 1441 1442 if (!bs) { 1443 return false; 1444 } 1445 1446 return bdrv_op_is_blocked(bs, op, errp); 1447 } 1448 1449 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) 1450 { 1451 BlockDriverState *bs = blk_bs(blk); 1452 1453 if (bs) { 1454 bdrv_op_unblock(bs, op, reason); 1455 } 1456 } 1457 1458 void blk_op_block_all(BlockBackend *blk, Error *reason) 1459 { 1460 BlockDriverState *bs = blk_bs(blk); 1461 1462 if (bs) { 1463 bdrv_op_block_all(bs, reason); 1464 } 1465 } 1466 1467 void blk_op_unblock_all(BlockBackend *blk, Error *reason) 1468 { 1469 BlockDriverState *bs = blk_bs(blk); 1470 1471 if (bs) { 1472 bdrv_op_unblock_all(bs, reason); 1473 } 1474 } 1475 1476 AioContext *blk_get_aio_context(BlockBackend *blk) 1477 { 1478 BlockDriverState *bs = blk_bs(blk); 1479 1480 if (bs) { 1481 return bdrv_get_aio_context(bs); 1482 } else { 1483 return qemu_get_aio_context(); 1484 } 1485 } 1486 1487 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb) 1488 { 1489 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb); 1490 return blk_get_aio_context(blk_acb->blk); 1491 } 1492 1493 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context) 1494 { 1495 BlockDriverState *bs = blk_bs(blk); 1496 1497 if (bs) { 1498 if (blk->public.throttle_state) { 1499 throttle_timers_detach_aio_context(&blk->public.throttle_timers); 1500 } 1501 bdrv_set_aio_context(bs, new_context); 1502 if (blk->public.throttle_state) { 1503 throttle_timers_attach_aio_context(&blk->public.throttle_timers, 1504 new_context); 1505 } 1506 } 1507 } 1508 1509 void blk_add_aio_context_notifier(BlockBackend *blk, 1510 void (*attached_aio_context)(AioContext *new_context, void *opaque), 1511 void (*detach_aio_context)(void *opaque), void *opaque) 1512 { 1513 BlockDriverState *bs = blk_bs(blk); 1514 1515 if (bs) { 1516 bdrv_add_aio_context_notifier(bs, attached_aio_context, 1517 detach_aio_context, opaque); 1518 } 1519 } 1520 1521 void blk_remove_aio_context_notifier(BlockBackend *blk, 1522 void (*attached_aio_context)(AioContext *, 1523 void *), 1524 void (*detach_aio_context)(void *), 1525 void *opaque) 1526 { 1527 BlockDriverState *bs = blk_bs(blk); 1528 1529 if (bs) { 1530 bdrv_remove_aio_context_notifier(bs, attached_aio_context, 1531 detach_aio_context, opaque); 1532 } 1533 } 1534 1535 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify) 1536 { 1537 notifier_list_add(&blk->remove_bs_notifiers, notify); 1538 } 1539 1540 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify) 1541 { 1542 notifier_list_add(&blk->insert_bs_notifiers, notify); 1543 } 1544 1545 void blk_io_plug(BlockBackend *blk) 1546 { 1547 BlockDriverState *bs = blk_bs(blk); 1548 1549 if (bs) { 1550 bdrv_io_plug(bs); 1551 } 1552 } 1553 1554 void blk_io_unplug(BlockBackend *blk) 1555 { 1556 BlockDriverState *bs = blk_bs(blk); 1557 1558 if (bs) { 1559 bdrv_io_unplug(bs); 1560 } 1561 } 1562 1563 BlockAcctStats *blk_get_stats(BlockBackend *blk) 1564 { 1565 return &blk->stats; 1566 } 1567 1568 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, 1569 BlockCompletionFunc *cb, void *opaque) 1570 { 1571 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); 1572 } 1573 1574 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1575 int count, BdrvRequestFlags flags) 1576 { 1577 return blk_co_pwritev(blk, offset, count, NULL, 1578 flags | BDRV_REQ_ZERO_WRITE); 1579 } 1580 1581 int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf, 1582 int count) 1583 { 1584 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry, 1585 BDRV_REQ_WRITE_COMPRESSED); 1586 } 1587 1588 int blk_truncate(BlockBackend *blk, int64_t offset) 1589 { 1590 if (!blk_is_available(blk)) { 1591 return -ENOMEDIUM; 1592 } 1593 1594 return bdrv_truncate(blk_bs(blk), offset); 1595 } 1596 1597 static void blk_pdiscard_entry(void *opaque) 1598 { 1599 BlkRwCo *rwco = opaque; 1600 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size); 1601 } 1602 1603 int blk_pdiscard(BlockBackend *blk, int64_t offset, int count) 1604 { 1605 return blk_prw(blk, offset, NULL, count, blk_pdiscard_entry, 0); 1606 } 1607 1608 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, 1609 int64_t pos, int size) 1610 { 1611 int ret; 1612 1613 if (!blk_is_available(blk)) { 1614 return -ENOMEDIUM; 1615 } 1616 1617 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size); 1618 if (ret < 0) { 1619 return ret; 1620 } 1621 1622 if (ret == size && !blk->enable_write_cache) { 1623 ret = bdrv_flush(blk_bs(blk)); 1624 } 1625 1626 return ret < 0 ? ret : size; 1627 } 1628 1629 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) 1630 { 1631 if (!blk_is_available(blk)) { 1632 return -ENOMEDIUM; 1633 } 1634 1635 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size); 1636 } 1637 1638 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) 1639 { 1640 if (!blk_is_available(blk)) { 1641 return -ENOMEDIUM; 1642 } 1643 1644 return bdrv_probe_blocksizes(blk_bs(blk), bsz); 1645 } 1646 1647 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) 1648 { 1649 if (!blk_is_available(blk)) { 1650 return -ENOMEDIUM; 1651 } 1652 1653 return bdrv_probe_geometry(blk_bs(blk), geo); 1654 } 1655 1656 /* 1657 * Updates the BlockBackendRootState object with data from the currently 1658 * attached BlockDriverState. 1659 */ 1660 void blk_update_root_state(BlockBackend *blk) 1661 { 1662 assert(blk->root); 1663 1664 blk->root_state.open_flags = blk->root->bs->open_flags; 1665 blk->root_state.read_only = blk->root->bs->read_only; 1666 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes; 1667 } 1668 1669 /* 1670 * Returns the detect-zeroes setting to be used for bdrv_open() of a 1671 * BlockDriverState which is supposed to inherit the root state. 1672 */ 1673 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk) 1674 { 1675 return blk->root_state.detect_zeroes; 1676 } 1677 1678 /* 1679 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is 1680 * supposed to inherit the root state. 1681 */ 1682 int blk_get_open_flags_from_root_state(BlockBackend *blk) 1683 { 1684 int bs_flags; 1685 1686 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR; 1687 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR; 1688 1689 return bs_flags; 1690 } 1691 1692 BlockBackendRootState *blk_get_root_state(BlockBackend *blk) 1693 { 1694 return &blk->root_state; 1695 } 1696 1697 int blk_commit_all(void) 1698 { 1699 BlockBackend *blk = NULL; 1700 1701 while ((blk = blk_all_next(blk)) != NULL) { 1702 AioContext *aio_context = blk_get_aio_context(blk); 1703 1704 aio_context_acquire(aio_context); 1705 if (blk_is_inserted(blk) && blk->root->bs->backing) { 1706 int ret = bdrv_commit(blk->root->bs); 1707 if (ret < 0) { 1708 aio_context_release(aio_context); 1709 return ret; 1710 } 1711 } 1712 aio_context_release(aio_context); 1713 } 1714 return 0; 1715 } 1716 1717 1718 /* throttling disk I/O limits */ 1719 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg) 1720 { 1721 throttle_group_config(blk, cfg); 1722 } 1723 1724 void blk_io_limits_disable(BlockBackend *blk) 1725 { 1726 assert(blk->public.throttle_state); 1727 bdrv_drained_begin(blk_bs(blk)); 1728 throttle_group_unregister_blk(blk); 1729 bdrv_drained_end(blk_bs(blk)); 1730 } 1731 1732 /* should be called before blk_set_io_limits if a limit is set */ 1733 void blk_io_limits_enable(BlockBackend *blk, const char *group) 1734 { 1735 assert(!blk->public.throttle_state); 1736 throttle_group_register_blk(blk, group); 1737 } 1738 1739 void blk_io_limits_update_group(BlockBackend *blk, const char *group) 1740 { 1741 /* this BB is not part of any group */ 1742 if (!blk->public.throttle_state) { 1743 return; 1744 } 1745 1746 /* this BB is a part of the same group than the one we want */ 1747 if (!g_strcmp0(throttle_group_get_name(blk), group)) { 1748 return; 1749 } 1750 1751 /* need to change the group this bs belong to */ 1752 blk_io_limits_disable(blk); 1753 blk_io_limits_enable(blk, group); 1754 } 1755 1756 static void blk_root_drained_begin(BdrvChild *child) 1757 { 1758 BlockBackend *blk = child->opaque; 1759 1760 /* Note that blk->root may not be accessible here yet if we are just 1761 * attaching to a BlockDriverState that is drained. Use child instead. */ 1762 1763 if (blk->public.io_limits_disabled++ == 0) { 1764 throttle_group_restart_blk(blk); 1765 } 1766 } 1767 1768 static void blk_root_drained_end(BdrvChild *child) 1769 { 1770 BlockBackend *blk = child->opaque; 1771 1772 assert(blk->public.io_limits_disabled); 1773 --blk->public.io_limits_disabled; 1774 } 1775