1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/write-threshold.h" 34 #include "qemu/cutils.h" 35 #include "qemu/memalign.h" 36 #include "qapi/error.h" 37 #include "qemu/error-report.h" 38 #include "qemu/main-loop.h" 39 #include "sysemu/replay.h" 40 41 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 42 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 43 44 static void bdrv_parent_cb_resize(BlockDriverState *bs); 45 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 46 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 47 48 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 49 bool ignore_bds_parents) 50 { 51 BdrvChild *c, *next; 52 53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 54 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 55 continue; 56 } 57 bdrv_parent_drained_begin_single(c, false); 58 } 59 } 60 61 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, 62 int *drained_end_counter) 63 { 64 assert(c->parent_quiesce_counter > 0); 65 c->parent_quiesce_counter--; 66 if (c->klass->drained_end) { 67 c->klass->drained_end(c, drained_end_counter); 68 } 69 } 70 71 void bdrv_parent_drained_end_single(BdrvChild *c) 72 { 73 int drained_end_counter = 0; 74 IO_OR_GS_CODE(); 75 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); 76 BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); 77 } 78 79 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 80 bool ignore_bds_parents, 81 int *drained_end_counter) 82 { 83 BdrvChild *c; 84 85 QLIST_FOREACH(c, &bs->parents, next_parent) { 86 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 87 continue; 88 } 89 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); 90 } 91 } 92 93 static bool bdrv_parent_drained_poll_single(BdrvChild *c) 94 { 95 if (c->klass->drained_poll) { 96 return c->klass->drained_poll(c); 97 } 98 return false; 99 } 100 101 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 102 bool ignore_bds_parents) 103 { 104 BdrvChild *c, *next; 105 bool busy = false; 106 107 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 108 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 109 continue; 110 } 111 busy |= bdrv_parent_drained_poll_single(c); 112 } 113 114 return busy; 115 } 116 117 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) 118 { 119 IO_OR_GS_CODE(); 120 c->parent_quiesce_counter++; 121 if (c->klass->drained_begin) { 122 c->klass->drained_begin(c); 123 } 124 if (poll) { 125 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); 126 } 127 } 128 129 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 130 { 131 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, 132 src->pdiscard_alignment); 133 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 134 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 135 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, 136 src->max_hw_transfer); 137 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 138 src->opt_mem_alignment); 139 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 140 src->min_mem_alignment); 141 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 142 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); 143 } 144 145 typedef struct BdrvRefreshLimitsState { 146 BlockDriverState *bs; 147 BlockLimits old_bl; 148 } BdrvRefreshLimitsState; 149 150 static void bdrv_refresh_limits_abort(void *opaque) 151 { 152 BdrvRefreshLimitsState *s = opaque; 153 154 s->bs->bl = s->old_bl; 155 } 156 157 static TransactionActionDrv bdrv_refresh_limits_drv = { 158 .abort = bdrv_refresh_limits_abort, 159 .clean = g_free, 160 }; 161 162 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 163 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 164 { 165 ERRP_GUARD(); 166 BlockDriver *drv = bs->drv; 167 BdrvChild *c; 168 bool have_limits; 169 170 GLOBAL_STATE_CODE(); 171 172 if (tran) { 173 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 174 *s = (BdrvRefreshLimitsState) { 175 .bs = bs, 176 .old_bl = bs->bl, 177 }; 178 tran_add(tran, &bdrv_refresh_limits_drv, s); 179 } 180 181 memset(&bs->bl, 0, sizeof(bs->bl)); 182 183 if (!drv) { 184 return; 185 } 186 187 /* Default alignment based on whether driver has byte interface */ 188 bs->bl.request_alignment = (drv->bdrv_co_preadv || 189 drv->bdrv_aio_preadv || 190 drv->bdrv_co_preadv_part) ? 1 : 512; 191 192 /* Take some limits from the children as a default */ 193 have_limits = false; 194 QLIST_FOREACH(c, &bs->children, next) { 195 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 196 { 197 bdrv_merge_limits(&bs->bl, &c->bs->bl); 198 have_limits = true; 199 } 200 } 201 202 if (!have_limits) { 203 bs->bl.min_mem_alignment = 512; 204 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); 205 206 /* Safe default since most protocols use readv()/writev()/etc */ 207 bs->bl.max_iov = IOV_MAX; 208 } 209 210 /* Then let the driver override it */ 211 if (drv->bdrv_refresh_limits) { 212 drv->bdrv_refresh_limits(bs, errp); 213 if (*errp) { 214 return; 215 } 216 } 217 218 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 219 error_setg(errp, "Driver requires too large request alignment"); 220 } 221 } 222 223 /** 224 * The copy-on-read flag is actually a reference count so multiple users may 225 * use the feature without worrying about clobbering its previous state. 226 * Copy-on-read stays enabled until all users have called to disable it. 227 */ 228 void bdrv_enable_copy_on_read(BlockDriverState *bs) 229 { 230 IO_CODE(); 231 qatomic_inc(&bs->copy_on_read); 232 } 233 234 void bdrv_disable_copy_on_read(BlockDriverState *bs) 235 { 236 int old = qatomic_fetch_dec(&bs->copy_on_read); 237 IO_CODE(); 238 assert(old >= 1); 239 } 240 241 typedef struct { 242 Coroutine *co; 243 BlockDriverState *bs; 244 bool done; 245 bool begin; 246 bool recursive; 247 bool poll; 248 BdrvChild *parent; 249 bool ignore_bds_parents; 250 int *drained_end_counter; 251 } BdrvCoDrainData; 252 253 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) 254 { 255 BdrvCoDrainData *data = opaque; 256 BlockDriverState *bs = data->bs; 257 258 if (data->begin) { 259 bs->drv->bdrv_co_drain_begin(bs); 260 } else { 261 bs->drv->bdrv_co_drain_end(bs); 262 } 263 264 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ 265 qatomic_mb_set(&data->done, true); 266 if (!data->begin) { 267 qatomic_dec(data->drained_end_counter); 268 } 269 bdrv_dec_in_flight(bs); 270 271 g_free(data); 272 } 273 274 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ 275 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, 276 int *drained_end_counter) 277 { 278 BdrvCoDrainData *data; 279 280 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || 281 (!begin && !bs->drv->bdrv_co_drain_end)) { 282 return; 283 } 284 285 data = g_new(BdrvCoDrainData, 1); 286 *data = (BdrvCoDrainData) { 287 .bs = bs, 288 .done = false, 289 .begin = begin, 290 .drained_end_counter = drained_end_counter, 291 }; 292 293 if (!begin) { 294 qatomic_inc(drained_end_counter); 295 } 296 297 /* Make sure the driver callback completes during the polling phase for 298 * drain_begin. */ 299 bdrv_inc_in_flight(bs); 300 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); 301 aio_co_schedule(bdrv_get_aio_context(bs), data->co); 302 } 303 304 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 305 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 306 BdrvChild *ignore_parent, bool ignore_bds_parents) 307 { 308 BdrvChild *child, *next; 309 IO_OR_GS_CODE(); 310 311 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 312 return true; 313 } 314 315 if (qatomic_read(&bs->in_flight)) { 316 return true; 317 } 318 319 if (recursive) { 320 assert(!ignore_bds_parents); 321 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 322 if (bdrv_drain_poll(child->bs, recursive, child, false)) { 323 return true; 324 } 325 } 326 } 327 328 return false; 329 } 330 331 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, 332 BdrvChild *ignore_parent) 333 { 334 return bdrv_drain_poll(bs, recursive, ignore_parent, false); 335 } 336 337 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 338 BdrvChild *parent, bool ignore_bds_parents, 339 bool poll); 340 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 341 BdrvChild *parent, bool ignore_bds_parents, 342 int *drained_end_counter); 343 344 static void bdrv_co_drain_bh_cb(void *opaque) 345 { 346 BdrvCoDrainData *data = opaque; 347 Coroutine *co = data->co; 348 BlockDriverState *bs = data->bs; 349 350 if (bs) { 351 AioContext *ctx = bdrv_get_aio_context(bs); 352 aio_context_acquire(ctx); 353 bdrv_dec_in_flight(bs); 354 if (data->begin) { 355 assert(!data->drained_end_counter); 356 bdrv_do_drained_begin(bs, data->recursive, data->parent, 357 data->ignore_bds_parents, data->poll); 358 } else { 359 assert(!data->poll); 360 bdrv_do_drained_end(bs, data->recursive, data->parent, 361 data->ignore_bds_parents, 362 data->drained_end_counter); 363 } 364 aio_context_release(ctx); 365 } else { 366 assert(data->begin); 367 bdrv_drain_all_begin(); 368 } 369 370 data->done = true; 371 aio_co_wake(co); 372 } 373 374 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 375 bool begin, bool recursive, 376 BdrvChild *parent, 377 bool ignore_bds_parents, 378 bool poll, 379 int *drained_end_counter) 380 { 381 BdrvCoDrainData data; 382 Coroutine *self = qemu_coroutine_self(); 383 AioContext *ctx = bdrv_get_aio_context(bs); 384 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 385 386 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 387 * other coroutines run if they were queued by aio_co_enter(). */ 388 389 assert(qemu_in_coroutine()); 390 data = (BdrvCoDrainData) { 391 .co = self, 392 .bs = bs, 393 .done = false, 394 .begin = begin, 395 .recursive = recursive, 396 .parent = parent, 397 .ignore_bds_parents = ignore_bds_parents, 398 .poll = poll, 399 .drained_end_counter = drained_end_counter, 400 }; 401 402 if (bs) { 403 bdrv_inc_in_flight(bs); 404 } 405 406 /* 407 * Temporarily drop the lock across yield or we would get deadlocks. 408 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 409 * 410 * When we yield below, the lock for the current context will be 411 * released, so if this is actually the lock that protects bs, don't drop 412 * it a second time. 413 */ 414 if (ctx != co_ctx) { 415 aio_context_release(ctx); 416 } 417 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); 418 419 qemu_coroutine_yield(); 420 /* If we are resumed from some other event (such as an aio completion or a 421 * timer callback), it is a bug in the caller that should be fixed. */ 422 assert(data.done); 423 424 /* Reaquire the AioContext of bs if we dropped it */ 425 if (ctx != co_ctx) { 426 aio_context_acquire(ctx); 427 } 428 } 429 430 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 431 BdrvChild *parent, bool ignore_bds_parents) 432 { 433 IO_OR_GS_CODE(); 434 assert(!qemu_in_coroutine()); 435 436 /* Stop things in parent-to-child order */ 437 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 438 aio_disable_external(bdrv_get_aio_context(bs)); 439 } 440 441 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); 442 bdrv_drain_invoke(bs, true, NULL); 443 } 444 445 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 446 BdrvChild *parent, bool ignore_bds_parents, 447 bool poll) 448 { 449 BdrvChild *child, *next; 450 451 if (qemu_in_coroutine()) { 452 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, 453 poll, NULL); 454 return; 455 } 456 457 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); 458 459 if (recursive) { 460 assert(!ignore_bds_parents); 461 bs->recursive_quiesce_counter++; 462 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 463 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, 464 false); 465 } 466 } 467 468 /* 469 * Wait for drained requests to finish. 470 * 471 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 472 * call is needed so things in this AioContext can make progress even 473 * though we don't return to the main AioContext loop - this automatically 474 * includes other nodes in the same AioContext and therefore all child 475 * nodes. 476 */ 477 if (poll) { 478 assert(!ignore_bds_parents); 479 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); 480 } 481 } 482 483 void bdrv_drained_begin(BlockDriverState *bs) 484 { 485 IO_OR_GS_CODE(); 486 bdrv_do_drained_begin(bs, false, NULL, false, true); 487 } 488 489 void bdrv_subtree_drained_begin(BlockDriverState *bs) 490 { 491 IO_OR_GS_CODE(); 492 bdrv_do_drained_begin(bs, true, NULL, false, true); 493 } 494 495 /** 496 * This function does not poll, nor must any of its recursively called 497 * functions. The *drained_end_counter pointee will be incremented 498 * once for every background operation scheduled, and decremented once 499 * the operation settles. Therefore, the pointer must remain valid 500 * until the pointee reaches 0. That implies that whoever sets up the 501 * pointee has to poll until it is 0. 502 * 503 * We use atomic operations to access *drained_end_counter, because 504 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of 505 * @bs may contain nodes in different AioContexts, 506 * (2) bdrv_drain_all_end() uses the same counter for all nodes, 507 * regardless of which AioContext they are in. 508 */ 509 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 510 BdrvChild *parent, bool ignore_bds_parents, 511 int *drained_end_counter) 512 { 513 BdrvChild *child; 514 int old_quiesce_counter; 515 516 assert(drained_end_counter != NULL); 517 518 if (qemu_in_coroutine()) { 519 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, 520 false, drained_end_counter); 521 return; 522 } 523 assert(bs->quiesce_counter > 0); 524 525 /* Re-enable things in child-to-parent order */ 526 bdrv_drain_invoke(bs, false, drained_end_counter); 527 bdrv_parent_drained_end(bs, parent, ignore_bds_parents, 528 drained_end_counter); 529 530 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 531 if (old_quiesce_counter == 1) { 532 aio_enable_external(bdrv_get_aio_context(bs)); 533 } 534 535 if (recursive) { 536 assert(!ignore_bds_parents); 537 bs->recursive_quiesce_counter--; 538 QLIST_FOREACH(child, &bs->children, next) { 539 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, 540 drained_end_counter); 541 } 542 } 543 } 544 545 void bdrv_drained_end(BlockDriverState *bs) 546 { 547 int drained_end_counter = 0; 548 IO_OR_GS_CODE(); 549 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); 550 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 551 } 552 553 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) 554 { 555 IO_CODE(); 556 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); 557 } 558 559 void bdrv_subtree_drained_end(BlockDriverState *bs) 560 { 561 int drained_end_counter = 0; 562 IO_OR_GS_CODE(); 563 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); 564 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 565 } 566 567 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 568 { 569 int i; 570 IO_OR_GS_CODE(); 571 572 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 573 bdrv_do_drained_begin(child->bs, true, child, false, true); 574 } 575 } 576 577 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 578 { 579 int drained_end_counter = 0; 580 int i; 581 IO_OR_GS_CODE(); 582 583 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 584 bdrv_do_drained_end(child->bs, true, child, false, 585 &drained_end_counter); 586 } 587 588 BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); 589 } 590 591 void bdrv_drain(BlockDriverState *bs) 592 { 593 IO_OR_GS_CODE(); 594 bdrv_drained_begin(bs); 595 bdrv_drained_end(bs); 596 } 597 598 static void bdrv_drain_assert_idle(BlockDriverState *bs) 599 { 600 BdrvChild *child, *next; 601 602 assert(qatomic_read(&bs->in_flight) == 0); 603 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 604 bdrv_drain_assert_idle(child->bs); 605 } 606 } 607 608 unsigned int bdrv_drain_all_count = 0; 609 610 static bool bdrv_drain_all_poll(void) 611 { 612 BlockDriverState *bs = NULL; 613 bool result = false; 614 GLOBAL_STATE_CODE(); 615 616 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 617 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 618 while ((bs = bdrv_next_all_states(bs))) { 619 AioContext *aio_context = bdrv_get_aio_context(bs); 620 aio_context_acquire(aio_context); 621 result |= bdrv_drain_poll(bs, false, NULL, true); 622 aio_context_release(aio_context); 623 } 624 625 return result; 626 } 627 628 /* 629 * Wait for pending requests to complete across all BlockDriverStates 630 * 631 * This function does not flush data to disk, use bdrv_flush_all() for that 632 * after calling this function. 633 * 634 * This pauses all block jobs and disables external clients. It must 635 * be paired with bdrv_drain_all_end(). 636 * 637 * NOTE: no new block jobs or BlockDriverStates can be created between 638 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 639 */ 640 void bdrv_drain_all_begin(void) 641 { 642 BlockDriverState *bs = NULL; 643 GLOBAL_STATE_CODE(); 644 645 if (qemu_in_coroutine()) { 646 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); 647 return; 648 } 649 650 /* 651 * bdrv queue is managed by record/replay, 652 * waiting for finishing the I/O requests may 653 * be infinite 654 */ 655 if (replay_events_enabled()) { 656 return; 657 } 658 659 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 660 * loop AioContext, so make sure we're in the main context. */ 661 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 662 assert(bdrv_drain_all_count < INT_MAX); 663 bdrv_drain_all_count++; 664 665 /* Quiesce all nodes, without polling in-flight requests yet. The graph 666 * cannot change during this loop. */ 667 while ((bs = bdrv_next_all_states(bs))) { 668 AioContext *aio_context = bdrv_get_aio_context(bs); 669 670 aio_context_acquire(aio_context); 671 bdrv_do_drained_begin(bs, false, NULL, true, false); 672 aio_context_release(aio_context); 673 } 674 675 /* Now poll the in-flight requests */ 676 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 677 678 while ((bs = bdrv_next_all_states(bs))) { 679 bdrv_drain_assert_idle(bs); 680 } 681 } 682 683 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 684 { 685 int drained_end_counter = 0; 686 GLOBAL_STATE_CODE(); 687 688 g_assert(bs->quiesce_counter > 0); 689 g_assert(!bs->refcnt); 690 691 while (bs->quiesce_counter) { 692 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 693 } 694 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 695 } 696 697 void bdrv_drain_all_end(void) 698 { 699 BlockDriverState *bs = NULL; 700 int drained_end_counter = 0; 701 GLOBAL_STATE_CODE(); 702 703 /* 704 * bdrv queue is managed by record/replay, 705 * waiting for finishing the I/O requests may 706 * be endless 707 */ 708 if (replay_events_enabled()) { 709 return; 710 } 711 712 while ((bs = bdrv_next_all_states(bs))) { 713 AioContext *aio_context = bdrv_get_aio_context(bs); 714 715 aio_context_acquire(aio_context); 716 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 717 aio_context_release(aio_context); 718 } 719 720 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 721 AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); 722 723 assert(bdrv_drain_all_count > 0); 724 bdrv_drain_all_count--; 725 } 726 727 void bdrv_drain_all(void) 728 { 729 GLOBAL_STATE_CODE(); 730 bdrv_drain_all_begin(); 731 bdrv_drain_all_end(); 732 } 733 734 /** 735 * Remove an active request from the tracked requests list 736 * 737 * This function should be called when a tracked request is completing. 738 */ 739 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) 740 { 741 if (req->serialising) { 742 qatomic_dec(&req->bs->serialising_in_flight); 743 } 744 745 qemu_co_mutex_lock(&req->bs->reqs_lock); 746 QLIST_REMOVE(req, list); 747 qemu_co_queue_restart_all(&req->wait_queue); 748 qemu_co_mutex_unlock(&req->bs->reqs_lock); 749 } 750 751 /** 752 * Add an active request to the tracked requests list 753 */ 754 static void tracked_request_begin(BdrvTrackedRequest *req, 755 BlockDriverState *bs, 756 int64_t offset, 757 int64_t bytes, 758 enum BdrvTrackedRequestType type) 759 { 760 bdrv_check_request(offset, bytes, &error_abort); 761 762 *req = (BdrvTrackedRequest){ 763 .bs = bs, 764 .offset = offset, 765 .bytes = bytes, 766 .type = type, 767 .co = qemu_coroutine_self(), 768 .serialising = false, 769 .overlap_offset = offset, 770 .overlap_bytes = bytes, 771 }; 772 773 qemu_co_queue_init(&req->wait_queue); 774 775 qemu_co_mutex_lock(&bs->reqs_lock); 776 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 777 qemu_co_mutex_unlock(&bs->reqs_lock); 778 } 779 780 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 781 int64_t offset, int64_t bytes) 782 { 783 bdrv_check_request(offset, bytes, &error_abort); 784 785 /* aaaa bbbb */ 786 if (offset >= req->overlap_offset + req->overlap_bytes) { 787 return false; 788 } 789 /* bbbb aaaa */ 790 if (req->overlap_offset >= offset + bytes) { 791 return false; 792 } 793 return true; 794 } 795 796 /* Called with self->bs->reqs_lock held */ 797 static BdrvTrackedRequest * 798 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 799 { 800 BdrvTrackedRequest *req; 801 802 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 803 if (req == self || (!req->serialising && !self->serialising)) { 804 continue; 805 } 806 if (tracked_request_overlaps(req, self->overlap_offset, 807 self->overlap_bytes)) 808 { 809 /* 810 * Hitting this means there was a reentrant request, for 811 * example, a block driver issuing nested requests. This must 812 * never happen since it means deadlock. 813 */ 814 assert(qemu_coroutine_self() != req->co); 815 816 /* 817 * If the request is already (indirectly) waiting for us, or 818 * will wait for us as soon as it wakes up, then just go on 819 * (instead of producing a deadlock in the former case). 820 */ 821 if (!req->waiting_for) { 822 return req; 823 } 824 } 825 } 826 827 return NULL; 828 } 829 830 /* Called with self->bs->reqs_lock held */ 831 static bool coroutine_fn 832 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 833 { 834 BdrvTrackedRequest *req; 835 bool waited = false; 836 837 while ((req = bdrv_find_conflicting_request(self))) { 838 self->waiting_for = req; 839 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 840 self->waiting_for = NULL; 841 waited = true; 842 } 843 844 return waited; 845 } 846 847 /* Called with req->bs->reqs_lock held */ 848 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 849 uint64_t align) 850 { 851 int64_t overlap_offset = req->offset & ~(align - 1); 852 int64_t overlap_bytes = 853 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 854 855 bdrv_check_request(req->offset, req->bytes, &error_abort); 856 857 if (!req->serialising) { 858 qatomic_inc(&req->bs->serialising_in_flight); 859 req->serialising = true; 860 } 861 862 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 863 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 864 } 865 866 /** 867 * Return the tracked request on @bs for the current coroutine, or 868 * NULL if there is none. 869 */ 870 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 871 { 872 BdrvTrackedRequest *req; 873 Coroutine *self = qemu_coroutine_self(); 874 IO_CODE(); 875 876 QLIST_FOREACH(req, &bs->tracked_requests, list) { 877 if (req->co == self) { 878 return req; 879 } 880 } 881 882 return NULL; 883 } 884 885 /** 886 * Round a region to cluster boundaries 887 */ 888 void bdrv_round_to_clusters(BlockDriverState *bs, 889 int64_t offset, int64_t bytes, 890 int64_t *cluster_offset, 891 int64_t *cluster_bytes) 892 { 893 BlockDriverInfo bdi; 894 IO_CODE(); 895 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 896 *cluster_offset = offset; 897 *cluster_bytes = bytes; 898 } else { 899 int64_t c = bdi.cluster_size; 900 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 901 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 902 } 903 } 904 905 static int bdrv_get_cluster_size(BlockDriverState *bs) 906 { 907 BlockDriverInfo bdi; 908 int ret; 909 910 ret = bdrv_get_info(bs, &bdi); 911 if (ret < 0 || bdi.cluster_size == 0) { 912 return bs->bl.request_alignment; 913 } else { 914 return bdi.cluster_size; 915 } 916 } 917 918 void bdrv_inc_in_flight(BlockDriverState *bs) 919 { 920 IO_CODE(); 921 qatomic_inc(&bs->in_flight); 922 } 923 924 void bdrv_wakeup(BlockDriverState *bs) 925 { 926 IO_CODE(); 927 aio_wait_kick(); 928 } 929 930 void bdrv_dec_in_flight(BlockDriverState *bs) 931 { 932 IO_CODE(); 933 qatomic_dec(&bs->in_flight); 934 bdrv_wakeup(bs); 935 } 936 937 static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 938 { 939 BlockDriverState *bs = self->bs; 940 bool waited = false; 941 942 if (!qatomic_read(&bs->serialising_in_flight)) { 943 return false; 944 } 945 946 qemu_co_mutex_lock(&bs->reqs_lock); 947 waited = bdrv_wait_serialising_requests_locked(self); 948 qemu_co_mutex_unlock(&bs->reqs_lock); 949 950 return waited; 951 } 952 953 bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 954 uint64_t align) 955 { 956 bool waited; 957 IO_CODE(); 958 959 qemu_co_mutex_lock(&req->bs->reqs_lock); 960 961 tracked_request_set_serialising(req, align); 962 waited = bdrv_wait_serialising_requests_locked(req); 963 964 qemu_co_mutex_unlock(&req->bs->reqs_lock); 965 966 return waited; 967 } 968 969 int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 970 QEMUIOVector *qiov, size_t qiov_offset, 971 Error **errp) 972 { 973 /* 974 * Check generic offset/bytes correctness 975 */ 976 977 if (offset < 0) { 978 error_setg(errp, "offset is negative: %" PRIi64, offset); 979 return -EIO; 980 } 981 982 if (bytes < 0) { 983 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 984 return -EIO; 985 } 986 987 if (bytes > BDRV_MAX_LENGTH) { 988 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 989 bytes, BDRV_MAX_LENGTH); 990 return -EIO; 991 } 992 993 if (offset > BDRV_MAX_LENGTH) { 994 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 995 offset, BDRV_MAX_LENGTH); 996 return -EIO; 997 } 998 999 if (offset > BDRV_MAX_LENGTH - bytes) { 1000 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 1001 "exceeds maximum(%" PRIi64 ")", offset, bytes, 1002 BDRV_MAX_LENGTH); 1003 return -EIO; 1004 } 1005 1006 if (!qiov) { 1007 return 0; 1008 } 1009 1010 /* 1011 * Check qiov and qiov_offset 1012 */ 1013 1014 if (qiov_offset > qiov->size) { 1015 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 1016 qiov_offset, qiov->size); 1017 return -EIO; 1018 } 1019 1020 if (bytes > qiov->size - qiov_offset) { 1021 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 1022 "vector size(%zu)", bytes, qiov_offset, qiov->size); 1023 return -EIO; 1024 } 1025 1026 return 0; 1027 } 1028 1029 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 1030 { 1031 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 1032 } 1033 1034 static int bdrv_check_request32(int64_t offset, int64_t bytes, 1035 QEMUIOVector *qiov, size_t qiov_offset) 1036 { 1037 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 1038 if (ret < 0) { 1039 return ret; 1040 } 1041 1042 if (bytes > BDRV_REQUEST_MAX_BYTES) { 1043 return -EIO; 1044 } 1045 1046 return 0; 1047 } 1048 1049 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 1050 int64_t bytes, BdrvRequestFlags flags) 1051 { 1052 IO_CODE(); 1053 return bdrv_pwritev(child, offset, bytes, NULL, 1054 BDRV_REQ_ZERO_WRITE | flags); 1055 } 1056 1057 /* 1058 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 1059 * The operation is sped up by checking the block status and only writing 1060 * zeroes to the device if they currently do not return zeroes. Optional 1061 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 1062 * BDRV_REQ_FUA). 1063 * 1064 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 1065 */ 1066 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 1067 { 1068 int ret; 1069 int64_t target_size, bytes, offset = 0; 1070 BlockDriverState *bs = child->bs; 1071 IO_CODE(); 1072 1073 target_size = bdrv_getlength(bs); 1074 if (target_size < 0) { 1075 return target_size; 1076 } 1077 1078 for (;;) { 1079 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 1080 if (bytes <= 0) { 1081 return 0; 1082 } 1083 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 1084 if (ret < 0) { 1085 return ret; 1086 } 1087 if (ret & BDRV_BLOCK_ZERO) { 1088 offset += bytes; 1089 continue; 1090 } 1091 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 1092 if (ret < 0) { 1093 return ret; 1094 } 1095 offset += bytes; 1096 } 1097 } 1098 1099 /* See bdrv_pwrite() for the return codes */ 1100 int bdrv_pread(BdrvChild *child, int64_t offset, int64_t bytes, void *buf, 1101 BdrvRequestFlags flags) 1102 { 1103 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1104 IO_CODE(); 1105 1106 if (bytes < 0) { 1107 return -EINVAL; 1108 } 1109 1110 return bdrv_preadv(child, offset, bytes, &qiov, flags); 1111 } 1112 1113 /* Return no. of bytes on success or < 0 on error. Important errors are: 1114 -EIO generic I/O error (may happen for all errors) 1115 -ENOMEDIUM No media inserted. 1116 -EINVAL Invalid offset or number of bytes 1117 -EACCES Trying to write a read-only device 1118 */ 1119 int bdrv_pwrite(BdrvChild *child, int64_t offset, int64_t bytes, 1120 const void *buf, BdrvRequestFlags flags) 1121 { 1122 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1123 IO_CODE(); 1124 1125 if (bytes < 0) { 1126 return -EINVAL; 1127 } 1128 1129 return bdrv_pwritev(child, offset, bytes, &qiov, flags); 1130 } 1131 1132 /* 1133 * Writes to the file and ensures that no writes are reordered across this 1134 * request (acts as a barrier) 1135 * 1136 * Returns 0 on success, -errno in error cases. 1137 */ 1138 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, int64_t bytes, 1139 const void *buf, BdrvRequestFlags flags) 1140 { 1141 int ret; 1142 IO_CODE(); 1143 1144 ret = bdrv_pwrite(child, offset, bytes, buf, flags); 1145 if (ret < 0) { 1146 return ret; 1147 } 1148 1149 ret = bdrv_flush(child->bs); 1150 if (ret < 0) { 1151 return ret; 1152 } 1153 1154 return 0; 1155 } 1156 1157 typedef struct CoroutineIOCompletion { 1158 Coroutine *coroutine; 1159 int ret; 1160 } CoroutineIOCompletion; 1161 1162 static void bdrv_co_io_em_complete(void *opaque, int ret) 1163 { 1164 CoroutineIOCompletion *co = opaque; 1165 1166 co->ret = ret; 1167 aio_co_wake(co->coroutine); 1168 } 1169 1170 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 1171 int64_t offset, int64_t bytes, 1172 QEMUIOVector *qiov, 1173 size_t qiov_offset, int flags) 1174 { 1175 BlockDriver *drv = bs->drv; 1176 int64_t sector_num; 1177 unsigned int nb_sectors; 1178 QEMUIOVector local_qiov; 1179 int ret; 1180 1181 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1182 assert(!(flags & ~BDRV_REQ_MASK)); 1183 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1184 1185 if (!drv) { 1186 return -ENOMEDIUM; 1187 } 1188 1189 if (drv->bdrv_co_preadv_part) { 1190 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 1191 flags); 1192 } 1193 1194 if (qiov_offset > 0 || bytes != qiov->size) { 1195 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1196 qiov = &local_qiov; 1197 } 1198 1199 if (drv->bdrv_co_preadv) { 1200 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1201 goto out; 1202 } 1203 1204 if (drv->bdrv_aio_preadv) { 1205 BlockAIOCB *acb; 1206 CoroutineIOCompletion co = { 1207 .coroutine = qemu_coroutine_self(), 1208 }; 1209 1210 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1211 bdrv_co_io_em_complete, &co); 1212 if (acb == NULL) { 1213 ret = -EIO; 1214 goto out; 1215 } else { 1216 qemu_coroutine_yield(); 1217 ret = co.ret; 1218 goto out; 1219 } 1220 } 1221 1222 sector_num = offset >> BDRV_SECTOR_BITS; 1223 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1224 1225 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1226 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1227 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1228 assert(drv->bdrv_co_readv); 1229 1230 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1231 1232 out: 1233 if (qiov == &local_qiov) { 1234 qemu_iovec_destroy(&local_qiov); 1235 } 1236 1237 return ret; 1238 } 1239 1240 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1241 int64_t offset, int64_t bytes, 1242 QEMUIOVector *qiov, 1243 size_t qiov_offset, 1244 BdrvRequestFlags flags) 1245 { 1246 BlockDriver *drv = bs->drv; 1247 int64_t sector_num; 1248 unsigned int nb_sectors; 1249 QEMUIOVector local_qiov; 1250 int ret; 1251 1252 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1253 assert(!(flags & ~BDRV_REQ_MASK)); 1254 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1255 1256 if (!drv) { 1257 return -ENOMEDIUM; 1258 } 1259 1260 if (drv->bdrv_co_pwritev_part) { 1261 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1262 flags & bs->supported_write_flags); 1263 flags &= ~bs->supported_write_flags; 1264 goto emulate_flags; 1265 } 1266 1267 if (qiov_offset > 0 || bytes != qiov->size) { 1268 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1269 qiov = &local_qiov; 1270 } 1271 1272 if (drv->bdrv_co_pwritev) { 1273 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 1274 flags & bs->supported_write_flags); 1275 flags &= ~bs->supported_write_flags; 1276 goto emulate_flags; 1277 } 1278 1279 if (drv->bdrv_aio_pwritev) { 1280 BlockAIOCB *acb; 1281 CoroutineIOCompletion co = { 1282 .coroutine = qemu_coroutine_self(), 1283 }; 1284 1285 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, 1286 flags & bs->supported_write_flags, 1287 bdrv_co_io_em_complete, &co); 1288 flags &= ~bs->supported_write_flags; 1289 if (acb == NULL) { 1290 ret = -EIO; 1291 } else { 1292 qemu_coroutine_yield(); 1293 ret = co.ret; 1294 } 1295 goto emulate_flags; 1296 } 1297 1298 sector_num = offset >> BDRV_SECTOR_BITS; 1299 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1300 1301 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1302 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1303 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1304 1305 assert(drv->bdrv_co_writev); 1306 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, 1307 flags & bs->supported_write_flags); 1308 flags &= ~bs->supported_write_flags; 1309 1310 emulate_flags: 1311 if (ret == 0 && (flags & BDRV_REQ_FUA)) { 1312 ret = bdrv_co_flush(bs); 1313 } 1314 1315 if (qiov == &local_qiov) { 1316 qemu_iovec_destroy(&local_qiov); 1317 } 1318 1319 return ret; 1320 } 1321 1322 static int coroutine_fn 1323 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1324 int64_t bytes, QEMUIOVector *qiov, 1325 size_t qiov_offset) 1326 { 1327 BlockDriver *drv = bs->drv; 1328 QEMUIOVector local_qiov; 1329 int ret; 1330 1331 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1332 1333 if (!drv) { 1334 return -ENOMEDIUM; 1335 } 1336 1337 if (!block_driver_can_compress(drv)) { 1338 return -ENOTSUP; 1339 } 1340 1341 if (drv->bdrv_co_pwritev_compressed_part) { 1342 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1343 qiov, qiov_offset); 1344 } 1345 1346 if (qiov_offset == 0) { 1347 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1348 } 1349 1350 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1351 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1352 qemu_iovec_destroy(&local_qiov); 1353 1354 return ret; 1355 } 1356 1357 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1358 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1359 size_t qiov_offset, int flags) 1360 { 1361 BlockDriverState *bs = child->bs; 1362 1363 /* Perform I/O through a temporary buffer so that users who scribble over 1364 * their read buffer while the operation is in progress do not end up 1365 * modifying the image file. This is critical for zero-copy guest I/O 1366 * where anything might happen inside guest memory. 1367 */ 1368 void *bounce_buffer = NULL; 1369 1370 BlockDriver *drv = bs->drv; 1371 int64_t cluster_offset; 1372 int64_t cluster_bytes; 1373 int64_t skip_bytes; 1374 int ret; 1375 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1376 BDRV_REQUEST_MAX_BYTES); 1377 int64_t progress = 0; 1378 bool skip_write; 1379 1380 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1381 1382 if (!drv) { 1383 return -ENOMEDIUM; 1384 } 1385 1386 /* 1387 * Do not write anything when the BDS is inactive. That is not 1388 * allowed, and it would not help. 1389 */ 1390 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1391 1392 /* FIXME We cannot require callers to have write permissions when all they 1393 * are doing is a read request. If we did things right, write permissions 1394 * would be obtained anyway, but internally by the copy-on-read code. As 1395 * long as it is implemented here rather than in a separate filter driver, 1396 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1397 * it could request permissions. Therefore we have to bypass the permission 1398 * system for the moment. */ 1399 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1400 1401 /* Cover entire cluster so no additional backing file I/O is required when 1402 * allocating cluster in the image file. Note that this value may exceed 1403 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1404 * is one reason we loop rather than doing it all at once. 1405 */ 1406 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1407 skip_bytes = offset - cluster_offset; 1408 1409 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1410 cluster_offset, cluster_bytes); 1411 1412 while (cluster_bytes) { 1413 int64_t pnum; 1414 1415 if (skip_write) { 1416 ret = 1; /* "already allocated", so nothing will be copied */ 1417 pnum = MIN(cluster_bytes, max_transfer); 1418 } else { 1419 ret = bdrv_is_allocated(bs, cluster_offset, 1420 MIN(cluster_bytes, max_transfer), &pnum); 1421 if (ret < 0) { 1422 /* 1423 * Safe to treat errors in querying allocation as if 1424 * unallocated; we'll probably fail again soon on the 1425 * read, but at least that will set a decent errno. 1426 */ 1427 pnum = MIN(cluster_bytes, max_transfer); 1428 } 1429 1430 /* Stop at EOF if the image ends in the middle of the cluster */ 1431 if (ret == 0 && pnum == 0) { 1432 assert(progress >= bytes); 1433 break; 1434 } 1435 1436 assert(skip_bytes < pnum); 1437 } 1438 1439 if (ret <= 0) { 1440 QEMUIOVector local_qiov; 1441 1442 /* Must copy-on-read; use the bounce buffer */ 1443 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1444 if (!bounce_buffer) { 1445 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1446 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1447 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1448 1449 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1450 if (!bounce_buffer) { 1451 ret = -ENOMEM; 1452 goto err; 1453 } 1454 } 1455 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1456 1457 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1458 &local_qiov, 0, 0); 1459 if (ret < 0) { 1460 goto err; 1461 } 1462 1463 bdrv_debug_event(bs, BLKDBG_COR_WRITE); 1464 if (drv->bdrv_co_pwrite_zeroes && 1465 buffer_is_zero(bounce_buffer, pnum)) { 1466 /* FIXME: Should we (perhaps conditionally) be setting 1467 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1468 * that still correctly reads as zero? */ 1469 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1470 BDRV_REQ_WRITE_UNCHANGED); 1471 } else { 1472 /* This does not change the data on the disk, it is not 1473 * necessary to flush even in cache=writethrough mode. 1474 */ 1475 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1476 &local_qiov, 0, 1477 BDRV_REQ_WRITE_UNCHANGED); 1478 } 1479 1480 if (ret < 0) { 1481 /* It might be okay to ignore write errors for guest 1482 * requests. If this is a deliberate copy-on-read 1483 * then we don't want to ignore the error. Simply 1484 * report it in all cases. 1485 */ 1486 goto err; 1487 } 1488 1489 if (!(flags & BDRV_REQ_PREFETCH)) { 1490 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1491 bounce_buffer + skip_bytes, 1492 MIN(pnum - skip_bytes, bytes - progress)); 1493 } 1494 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1495 /* Read directly into the destination */ 1496 ret = bdrv_driver_preadv(bs, offset + progress, 1497 MIN(pnum - skip_bytes, bytes - progress), 1498 qiov, qiov_offset + progress, 0); 1499 if (ret < 0) { 1500 goto err; 1501 } 1502 } 1503 1504 cluster_offset += pnum; 1505 cluster_bytes -= pnum; 1506 progress += pnum - skip_bytes; 1507 skip_bytes = 0; 1508 } 1509 ret = 0; 1510 1511 err: 1512 qemu_vfree(bounce_buffer); 1513 return ret; 1514 } 1515 1516 /* 1517 * Forwards an already correctly aligned request to the BlockDriver. This 1518 * handles copy on read, zeroing after EOF, and fragmentation of large 1519 * reads; any other features must be implemented by the caller. 1520 */ 1521 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1522 BdrvTrackedRequest *req, int64_t offset, int64_t bytes, 1523 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1524 { 1525 BlockDriverState *bs = child->bs; 1526 int64_t total_bytes, max_bytes; 1527 int ret = 0; 1528 int64_t bytes_remaining = bytes; 1529 int max_transfer; 1530 1531 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1532 assert(is_power_of_2(align)); 1533 assert((offset & (align - 1)) == 0); 1534 assert((bytes & (align - 1)) == 0); 1535 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1536 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1537 align); 1538 1539 /* TODO: We would need a per-BDS .supported_read_flags and 1540 * potential fallback support, if we ever implement any read flags 1541 * to pass through to drivers. For now, there aren't any 1542 * passthrough flags. */ 1543 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); 1544 1545 /* Handle Copy on Read and associated serialisation */ 1546 if (flags & BDRV_REQ_COPY_ON_READ) { 1547 /* If we touch the same cluster it counts as an overlap. This 1548 * guarantees that allocating writes will be serialized and not race 1549 * with each other for the same cluster. For example, in copy-on-read 1550 * it ensures that the CoR read and write operations are atomic and 1551 * guest writes cannot interleave between them. */ 1552 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1553 } else { 1554 bdrv_wait_serialising_requests(req); 1555 } 1556 1557 if (flags & BDRV_REQ_COPY_ON_READ) { 1558 int64_t pnum; 1559 1560 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1561 flags &= ~BDRV_REQ_COPY_ON_READ; 1562 1563 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1564 if (ret < 0) { 1565 goto out; 1566 } 1567 1568 if (!ret || pnum != bytes) { 1569 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1570 qiov, qiov_offset, flags); 1571 goto out; 1572 } else if (flags & BDRV_REQ_PREFETCH) { 1573 goto out; 1574 } 1575 } 1576 1577 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1578 total_bytes = bdrv_getlength(bs); 1579 if (total_bytes < 0) { 1580 ret = total_bytes; 1581 goto out; 1582 } 1583 1584 assert(!(flags & ~bs->supported_read_flags)); 1585 1586 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1587 if (bytes <= max_bytes && bytes <= max_transfer) { 1588 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1589 goto out; 1590 } 1591 1592 while (bytes_remaining) { 1593 int64_t num; 1594 1595 if (max_bytes) { 1596 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1597 assert(num); 1598 1599 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1600 num, qiov, 1601 qiov_offset + bytes - bytes_remaining, 1602 flags); 1603 max_bytes -= num; 1604 } else { 1605 num = bytes_remaining; 1606 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1607 0, bytes_remaining); 1608 } 1609 if (ret < 0) { 1610 goto out; 1611 } 1612 bytes_remaining -= num; 1613 } 1614 1615 out: 1616 return ret < 0 ? ret : 0; 1617 } 1618 1619 /* 1620 * Request padding 1621 * 1622 * |<---- align ----->| |<----- align ---->| 1623 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1624 * | | | | | | 1625 * -*----------$-------*-------- ... --------*-----$------------*--- 1626 * | | | | | | 1627 * | offset | | end | 1628 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1629 * [buf ... ) [tail_buf ) 1630 * 1631 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1632 * is placed at the beginning of @buf and @tail at the @end. 1633 * 1634 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1635 * around tail, if tail exists. 1636 * 1637 * @merge_reads is true for small requests, 1638 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1639 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1640 */ 1641 typedef struct BdrvRequestPadding { 1642 uint8_t *buf; 1643 size_t buf_len; 1644 uint8_t *tail_buf; 1645 size_t head; 1646 size_t tail; 1647 bool merge_reads; 1648 QEMUIOVector local_qiov; 1649 } BdrvRequestPadding; 1650 1651 static bool bdrv_init_padding(BlockDriverState *bs, 1652 int64_t offset, int64_t bytes, 1653 BdrvRequestPadding *pad) 1654 { 1655 int64_t align = bs->bl.request_alignment; 1656 int64_t sum; 1657 1658 bdrv_check_request(offset, bytes, &error_abort); 1659 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1660 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1661 1662 memset(pad, 0, sizeof(*pad)); 1663 1664 pad->head = offset & (align - 1); 1665 pad->tail = ((offset + bytes) & (align - 1)); 1666 if (pad->tail) { 1667 pad->tail = align - pad->tail; 1668 } 1669 1670 if (!pad->head && !pad->tail) { 1671 return false; 1672 } 1673 1674 assert(bytes); /* Nothing good in aligning zero-length requests */ 1675 1676 sum = pad->head + bytes + pad->tail; 1677 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1678 pad->buf = qemu_blockalign(bs, pad->buf_len); 1679 pad->merge_reads = sum == pad->buf_len; 1680 if (pad->tail) { 1681 pad->tail_buf = pad->buf + pad->buf_len - align; 1682 } 1683 1684 return true; 1685 } 1686 1687 static int bdrv_padding_rmw_read(BdrvChild *child, 1688 BdrvTrackedRequest *req, 1689 BdrvRequestPadding *pad, 1690 bool zero_middle) 1691 { 1692 QEMUIOVector local_qiov; 1693 BlockDriverState *bs = child->bs; 1694 uint64_t align = bs->bl.request_alignment; 1695 int ret; 1696 1697 assert(req->serialising && pad->buf); 1698 1699 if (pad->head || pad->merge_reads) { 1700 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1701 1702 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1703 1704 if (pad->head) { 1705 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1706 } 1707 if (pad->merge_reads && pad->tail) { 1708 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1709 } 1710 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1711 align, &local_qiov, 0, 0); 1712 if (ret < 0) { 1713 return ret; 1714 } 1715 if (pad->head) { 1716 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1717 } 1718 if (pad->merge_reads && pad->tail) { 1719 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1720 } 1721 1722 if (pad->merge_reads) { 1723 goto zero_mem; 1724 } 1725 } 1726 1727 if (pad->tail) { 1728 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1729 1730 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1731 ret = bdrv_aligned_preadv( 1732 child, req, 1733 req->overlap_offset + req->overlap_bytes - align, 1734 align, align, &local_qiov, 0, 0); 1735 if (ret < 0) { 1736 return ret; 1737 } 1738 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1739 } 1740 1741 zero_mem: 1742 if (zero_middle) { 1743 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1744 } 1745 1746 return 0; 1747 } 1748 1749 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1750 { 1751 if (pad->buf) { 1752 qemu_vfree(pad->buf); 1753 qemu_iovec_destroy(&pad->local_qiov); 1754 } 1755 memset(pad, 0, sizeof(*pad)); 1756 } 1757 1758 /* 1759 * bdrv_pad_request 1760 * 1761 * Exchange request parameters with padded request if needed. Don't include RMW 1762 * read of padding, bdrv_padding_rmw_read() should be called separately if 1763 * needed. 1764 * 1765 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1766 * - on function start they represent original request 1767 * - on failure or when padding is not needed they are unchanged 1768 * - on success when padding is needed they represent padded request 1769 */ 1770 static int bdrv_pad_request(BlockDriverState *bs, 1771 QEMUIOVector **qiov, size_t *qiov_offset, 1772 int64_t *offset, int64_t *bytes, 1773 BdrvRequestPadding *pad, bool *padded) 1774 { 1775 int ret; 1776 1777 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort); 1778 1779 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1780 if (padded) { 1781 *padded = false; 1782 } 1783 return 0; 1784 } 1785 1786 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1787 *qiov, *qiov_offset, *bytes, 1788 pad->buf + pad->buf_len - pad->tail, 1789 pad->tail); 1790 if (ret < 0) { 1791 bdrv_padding_destroy(pad); 1792 return ret; 1793 } 1794 *bytes += pad->head + pad->tail; 1795 *offset -= pad->head; 1796 *qiov = &pad->local_qiov; 1797 *qiov_offset = 0; 1798 if (padded) { 1799 *padded = true; 1800 } 1801 1802 return 0; 1803 } 1804 1805 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1806 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1807 BdrvRequestFlags flags) 1808 { 1809 IO_CODE(); 1810 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1811 } 1812 1813 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1814 int64_t offset, int64_t bytes, 1815 QEMUIOVector *qiov, size_t qiov_offset, 1816 BdrvRequestFlags flags) 1817 { 1818 BlockDriverState *bs = child->bs; 1819 BdrvTrackedRequest req; 1820 BdrvRequestPadding pad; 1821 int ret; 1822 IO_CODE(); 1823 1824 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1825 1826 if (!bdrv_is_inserted(bs)) { 1827 return -ENOMEDIUM; 1828 } 1829 1830 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1831 if (ret < 0) { 1832 return ret; 1833 } 1834 1835 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1836 /* 1837 * Aligning zero request is nonsense. Even if driver has special meaning 1838 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1839 * it to driver due to request_alignment. 1840 * 1841 * Still, no reason to return an error if someone do unaligned 1842 * zero-length read occasionally. 1843 */ 1844 return 0; 1845 } 1846 1847 bdrv_inc_in_flight(bs); 1848 1849 /* Don't do copy-on-read if we read data before write operation */ 1850 if (qatomic_read(&bs->copy_on_read)) { 1851 flags |= BDRV_REQ_COPY_ON_READ; 1852 } 1853 1854 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 1855 NULL); 1856 if (ret < 0) { 1857 goto fail; 1858 } 1859 1860 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1861 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1862 bs->bl.request_alignment, 1863 qiov, qiov_offset, flags); 1864 tracked_request_end(&req); 1865 bdrv_padding_destroy(&pad); 1866 1867 fail: 1868 bdrv_dec_in_flight(bs); 1869 1870 return ret; 1871 } 1872 1873 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1874 int64_t offset, int64_t bytes, BdrvRequestFlags flags) 1875 { 1876 BlockDriver *drv = bs->drv; 1877 QEMUIOVector qiov; 1878 void *buf = NULL; 1879 int ret = 0; 1880 bool need_flush = false; 1881 int head = 0; 1882 int tail = 0; 1883 1884 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, 1885 INT64_MAX); 1886 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1887 bs->bl.request_alignment); 1888 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1889 1890 bdrv_check_request(offset, bytes, &error_abort); 1891 1892 if (!drv) { 1893 return -ENOMEDIUM; 1894 } 1895 1896 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1897 return -ENOTSUP; 1898 } 1899 1900 /* Invalidate the cached block-status data range if this write overlaps */ 1901 bdrv_bsc_invalidate_range(bs, offset, bytes); 1902 1903 assert(alignment % bs->bl.request_alignment == 0); 1904 head = offset % alignment; 1905 tail = (offset + bytes) % alignment; 1906 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1907 assert(max_write_zeroes >= bs->bl.request_alignment); 1908 1909 while (bytes > 0 && !ret) { 1910 int64_t num = bytes; 1911 1912 /* Align request. Block drivers can expect the "bulk" of the request 1913 * to be aligned, and that unaligned requests do not cross cluster 1914 * boundaries. 1915 */ 1916 if (head) { 1917 /* Make a small request up to the first aligned sector. For 1918 * convenience, limit this request to max_transfer even if 1919 * we don't need to fall back to writes. */ 1920 num = MIN(MIN(bytes, max_transfer), alignment - head); 1921 head = (head + num) % alignment; 1922 assert(num < max_write_zeroes); 1923 } else if (tail && num > alignment) { 1924 /* Shorten the request to the last aligned sector. */ 1925 num -= tail; 1926 } 1927 1928 /* limit request size */ 1929 if (num > max_write_zeroes) { 1930 num = max_write_zeroes; 1931 } 1932 1933 ret = -ENOTSUP; 1934 /* First try the efficient write zeroes operation */ 1935 if (drv->bdrv_co_pwrite_zeroes) { 1936 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1937 flags & bs->supported_zero_flags); 1938 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1939 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1940 need_flush = true; 1941 } 1942 } else { 1943 assert(!bs->supported_zero_flags); 1944 } 1945 1946 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1947 /* Fall back to bounce buffer if write zeroes is unsupported */ 1948 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1949 1950 if ((flags & BDRV_REQ_FUA) && 1951 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1952 /* No need for bdrv_driver_pwrite() to do a fallback 1953 * flush on each chunk; use just one at the end */ 1954 write_flags &= ~BDRV_REQ_FUA; 1955 need_flush = true; 1956 } 1957 num = MIN(num, max_transfer); 1958 if (buf == NULL) { 1959 buf = qemu_try_blockalign0(bs, num); 1960 if (buf == NULL) { 1961 ret = -ENOMEM; 1962 goto fail; 1963 } 1964 } 1965 qemu_iovec_init_buf(&qiov, buf, num); 1966 1967 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1968 1969 /* Keep bounce buffer around if it is big enough for all 1970 * all future requests. 1971 */ 1972 if (num < max_transfer) { 1973 qemu_vfree(buf); 1974 buf = NULL; 1975 } 1976 } 1977 1978 offset += num; 1979 bytes -= num; 1980 } 1981 1982 fail: 1983 if (ret == 0 && need_flush) { 1984 ret = bdrv_co_flush(bs); 1985 } 1986 qemu_vfree(buf); 1987 return ret; 1988 } 1989 1990 static inline int coroutine_fn 1991 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1992 BdrvTrackedRequest *req, int flags) 1993 { 1994 BlockDriverState *bs = child->bs; 1995 1996 bdrv_check_request(offset, bytes, &error_abort); 1997 1998 if (bdrv_is_read_only(bs)) { 1999 return -EPERM; 2000 } 2001 2002 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 2003 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 2004 assert(!(flags & ~BDRV_REQ_MASK)); 2005 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 2006 2007 if (flags & BDRV_REQ_SERIALISING) { 2008 QEMU_LOCK_GUARD(&bs->reqs_lock); 2009 2010 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 2011 2012 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 2013 return -EBUSY; 2014 } 2015 2016 bdrv_wait_serialising_requests_locked(req); 2017 } else { 2018 bdrv_wait_serialising_requests(req); 2019 } 2020 2021 assert(req->overlap_offset <= offset); 2022 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 2023 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 2024 child->perm & BLK_PERM_RESIZE); 2025 2026 switch (req->type) { 2027 case BDRV_TRACKED_WRITE: 2028 case BDRV_TRACKED_DISCARD: 2029 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 2030 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 2031 } else { 2032 assert(child->perm & BLK_PERM_WRITE); 2033 } 2034 bdrv_write_threshold_check_write(bs, offset, bytes); 2035 return 0; 2036 case BDRV_TRACKED_TRUNCATE: 2037 assert(child->perm & BLK_PERM_RESIZE); 2038 return 0; 2039 default: 2040 abort(); 2041 } 2042 } 2043 2044 static inline void coroutine_fn 2045 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 2046 BdrvTrackedRequest *req, int ret) 2047 { 2048 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 2049 BlockDriverState *bs = child->bs; 2050 2051 bdrv_check_request(offset, bytes, &error_abort); 2052 2053 qatomic_inc(&bs->write_gen); 2054 2055 /* 2056 * Discard cannot extend the image, but in error handling cases, such as 2057 * when reverting a qcow2 cluster allocation, the discarded range can pass 2058 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 2059 * here. Instead, just skip it, since semantically a discard request 2060 * beyond EOF cannot expand the image anyway. 2061 */ 2062 if (ret == 0 && 2063 (req->type == BDRV_TRACKED_TRUNCATE || 2064 end_sector > bs->total_sectors) && 2065 req->type != BDRV_TRACKED_DISCARD) { 2066 bs->total_sectors = end_sector; 2067 bdrv_parent_cb_resize(bs); 2068 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 2069 } 2070 if (req->bytes) { 2071 switch (req->type) { 2072 case BDRV_TRACKED_WRITE: 2073 stat64_max(&bs->wr_highest_offset, offset + bytes); 2074 /* fall through, to set dirty bits */ 2075 case BDRV_TRACKED_DISCARD: 2076 bdrv_set_dirty(bs, offset, bytes); 2077 break; 2078 default: 2079 break; 2080 } 2081 } 2082 } 2083 2084 /* 2085 * Forwards an already correctly aligned write request to the BlockDriver, 2086 * after possibly fragmenting it. 2087 */ 2088 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 2089 BdrvTrackedRequest *req, int64_t offset, int64_t bytes, 2090 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, 2091 BdrvRequestFlags flags) 2092 { 2093 BlockDriverState *bs = child->bs; 2094 BlockDriver *drv = bs->drv; 2095 int ret; 2096 2097 int64_t bytes_remaining = bytes; 2098 int max_transfer; 2099 2100 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 2101 2102 if (!drv) { 2103 return -ENOMEDIUM; 2104 } 2105 2106 if (bdrv_has_readonly_bitmaps(bs)) { 2107 return -EPERM; 2108 } 2109 2110 assert(is_power_of_2(align)); 2111 assert((offset & (align - 1)) == 0); 2112 assert((bytes & (align - 1)) == 0); 2113 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 2114 align); 2115 2116 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 2117 2118 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 2119 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 2120 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 2121 flags |= BDRV_REQ_ZERO_WRITE; 2122 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 2123 flags |= BDRV_REQ_MAY_UNMAP; 2124 } 2125 } 2126 2127 if (ret < 0) { 2128 /* Do nothing, write notifier decided to fail this request */ 2129 } else if (flags & BDRV_REQ_ZERO_WRITE) { 2130 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 2131 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 2132 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 2133 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 2134 qiov, qiov_offset); 2135 } else if (bytes <= max_transfer) { 2136 bdrv_debug_event(bs, BLKDBG_PWRITEV); 2137 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 2138 } else { 2139 bdrv_debug_event(bs, BLKDBG_PWRITEV); 2140 while (bytes_remaining) { 2141 int num = MIN(bytes_remaining, max_transfer); 2142 int local_flags = flags; 2143 2144 assert(num); 2145 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 2146 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 2147 /* If FUA is going to be emulated by flush, we only 2148 * need to flush on the last iteration */ 2149 local_flags &= ~BDRV_REQ_FUA; 2150 } 2151 2152 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 2153 num, qiov, 2154 qiov_offset + bytes - bytes_remaining, 2155 local_flags); 2156 if (ret < 0) { 2157 break; 2158 } 2159 bytes_remaining -= num; 2160 } 2161 } 2162 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 2163 2164 if (ret >= 0) { 2165 ret = 0; 2166 } 2167 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 2168 2169 return ret; 2170 } 2171 2172 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 2173 int64_t offset, 2174 int64_t bytes, 2175 BdrvRequestFlags flags, 2176 BdrvTrackedRequest *req) 2177 { 2178 BlockDriverState *bs = child->bs; 2179 QEMUIOVector local_qiov; 2180 uint64_t align = bs->bl.request_alignment; 2181 int ret = 0; 2182 bool padding; 2183 BdrvRequestPadding pad; 2184 2185 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2186 if (padding) { 2187 assert(!(flags & BDRV_REQ_NO_WAIT)); 2188 bdrv_make_request_serialising(req, align); 2189 2190 bdrv_padding_rmw_read(child, req, &pad, true); 2191 2192 if (pad.head || pad.merge_reads) { 2193 int64_t aligned_offset = offset & ~(align - 1); 2194 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2195 2196 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2197 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2198 align, &local_qiov, 0, 2199 flags & ~BDRV_REQ_ZERO_WRITE); 2200 if (ret < 0 || pad.merge_reads) { 2201 /* Error or all work is done */ 2202 goto out; 2203 } 2204 offset += write_bytes - pad.head; 2205 bytes -= write_bytes - pad.head; 2206 } 2207 } 2208 2209 assert(!bytes || (offset & (align - 1)) == 0); 2210 if (bytes >= align) { 2211 /* Write the aligned part in the middle. */ 2212 int64_t aligned_bytes = bytes & ~(align - 1); 2213 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2214 NULL, 0, flags); 2215 if (ret < 0) { 2216 goto out; 2217 } 2218 bytes -= aligned_bytes; 2219 offset += aligned_bytes; 2220 } 2221 2222 assert(!bytes || (offset & (align - 1)) == 0); 2223 if (bytes) { 2224 assert(align == pad.tail + bytes); 2225 2226 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2227 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2228 &local_qiov, 0, 2229 flags & ~BDRV_REQ_ZERO_WRITE); 2230 } 2231 2232 out: 2233 bdrv_padding_destroy(&pad); 2234 2235 return ret; 2236 } 2237 2238 /* 2239 * Handle a write request in coroutine context 2240 */ 2241 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2242 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2243 BdrvRequestFlags flags) 2244 { 2245 IO_CODE(); 2246 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2247 } 2248 2249 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2250 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2251 BdrvRequestFlags flags) 2252 { 2253 BlockDriverState *bs = child->bs; 2254 BdrvTrackedRequest req; 2255 uint64_t align = bs->bl.request_alignment; 2256 BdrvRequestPadding pad; 2257 int ret; 2258 bool padded = false; 2259 IO_CODE(); 2260 2261 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2262 2263 if (!bdrv_is_inserted(bs)) { 2264 return -ENOMEDIUM; 2265 } 2266 2267 if (flags & BDRV_REQ_ZERO_WRITE) { 2268 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 2269 } else { 2270 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2271 } 2272 if (ret < 0) { 2273 return ret; 2274 } 2275 2276 /* If the request is misaligned then we can't make it efficient */ 2277 if ((flags & BDRV_REQ_NO_FALLBACK) && 2278 !QEMU_IS_ALIGNED(offset | bytes, align)) 2279 { 2280 return -ENOTSUP; 2281 } 2282 2283 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2284 /* 2285 * Aligning zero request is nonsense. Even if driver has special meaning 2286 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2287 * it to driver due to request_alignment. 2288 * 2289 * Still, no reason to return an error if someone do unaligned 2290 * zero-length write occasionally. 2291 */ 2292 return 0; 2293 } 2294 2295 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2296 /* 2297 * Pad request for following read-modify-write cycle. 2298 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2299 * alignment only if there is no ZERO flag. 2300 */ 2301 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 2302 &padded); 2303 if (ret < 0) { 2304 return ret; 2305 } 2306 } 2307 2308 bdrv_inc_in_flight(bs); 2309 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2310 2311 if (flags & BDRV_REQ_ZERO_WRITE) { 2312 assert(!padded); 2313 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2314 goto out; 2315 } 2316 2317 if (padded) { 2318 /* 2319 * Request was unaligned to request_alignment and therefore 2320 * padded. We are going to do read-modify-write, and must 2321 * serialize the request to prevent interactions of the 2322 * widened region with other transactions. 2323 */ 2324 assert(!(flags & BDRV_REQ_NO_WAIT)); 2325 bdrv_make_request_serialising(&req, align); 2326 bdrv_padding_rmw_read(child, &req, &pad, false); 2327 } 2328 2329 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2330 qiov, qiov_offset, flags); 2331 2332 bdrv_padding_destroy(&pad); 2333 2334 out: 2335 tracked_request_end(&req); 2336 bdrv_dec_in_flight(bs); 2337 2338 return ret; 2339 } 2340 2341 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2342 int64_t bytes, BdrvRequestFlags flags) 2343 { 2344 IO_CODE(); 2345 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2346 2347 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2348 flags &= ~BDRV_REQ_MAY_UNMAP; 2349 } 2350 2351 return bdrv_co_pwritev(child, offset, bytes, NULL, 2352 BDRV_REQ_ZERO_WRITE | flags); 2353 } 2354 2355 /* 2356 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2357 */ 2358 int bdrv_flush_all(void) 2359 { 2360 BdrvNextIterator it; 2361 BlockDriverState *bs = NULL; 2362 int result = 0; 2363 2364 GLOBAL_STATE_CODE(); 2365 2366 /* 2367 * bdrv queue is managed by record/replay, 2368 * creating new flush request for stopping 2369 * the VM may break the determinism 2370 */ 2371 if (replay_events_enabled()) { 2372 return result; 2373 } 2374 2375 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2376 AioContext *aio_context = bdrv_get_aio_context(bs); 2377 int ret; 2378 2379 aio_context_acquire(aio_context); 2380 ret = bdrv_flush(bs); 2381 if (ret < 0 && !result) { 2382 result = ret; 2383 } 2384 aio_context_release(aio_context); 2385 } 2386 2387 return result; 2388 } 2389 2390 /* 2391 * Returns the allocation status of the specified sectors. 2392 * Drivers not implementing the functionality are assumed to not support 2393 * backing files, hence all their sectors are reported as allocated. 2394 * 2395 * If 'want_zero' is true, the caller is querying for mapping 2396 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2397 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2398 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2399 * 2400 * If 'offset' is beyond the end of the disk image the return value is 2401 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2402 * 2403 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2404 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2405 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2406 * 2407 * 'pnum' is set to the number of bytes (including and immediately 2408 * following the specified offset) that are easily known to be in the 2409 * same allocated/unallocated state. Note that a second call starting 2410 * at the original offset plus returned pnum may have the same status. 2411 * The returned value is non-zero on success except at end-of-file. 2412 * 2413 * Returns negative errno on failure. Otherwise, if the 2414 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2415 * set to the host mapping and BDS corresponding to the guest offset. 2416 */ 2417 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, 2418 bool want_zero, 2419 int64_t offset, int64_t bytes, 2420 int64_t *pnum, int64_t *map, 2421 BlockDriverState **file) 2422 { 2423 int64_t total_size; 2424 int64_t n; /* bytes */ 2425 int ret; 2426 int64_t local_map = 0; 2427 BlockDriverState *local_file = NULL; 2428 int64_t aligned_offset, aligned_bytes; 2429 uint32_t align; 2430 bool has_filtered_child; 2431 2432 assert(pnum); 2433 *pnum = 0; 2434 total_size = bdrv_getlength(bs); 2435 if (total_size < 0) { 2436 ret = total_size; 2437 goto early_out; 2438 } 2439 2440 if (offset >= total_size) { 2441 ret = BDRV_BLOCK_EOF; 2442 goto early_out; 2443 } 2444 if (!bytes) { 2445 ret = 0; 2446 goto early_out; 2447 } 2448 2449 n = total_size - offset; 2450 if (n < bytes) { 2451 bytes = n; 2452 } 2453 2454 /* Must be non-NULL or bdrv_getlength() would have failed */ 2455 assert(bs->drv); 2456 has_filtered_child = bdrv_filter_child(bs); 2457 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2458 *pnum = bytes; 2459 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2460 if (offset + bytes == total_size) { 2461 ret |= BDRV_BLOCK_EOF; 2462 } 2463 if (bs->drv->protocol_name) { 2464 ret |= BDRV_BLOCK_OFFSET_VALID; 2465 local_map = offset; 2466 local_file = bs; 2467 } 2468 goto early_out; 2469 } 2470 2471 bdrv_inc_in_flight(bs); 2472 2473 /* Round out to request_alignment boundaries */ 2474 align = bs->bl.request_alignment; 2475 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2476 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2477 2478 if (bs->drv->bdrv_co_block_status) { 2479 /* 2480 * Use the block-status cache only for protocol nodes: Format 2481 * drivers are generally quick to inquire the status, but protocol 2482 * drivers often need to get information from outside of qemu, so 2483 * we do not have control over the actual implementation. There 2484 * have been cases where inquiring the status took an unreasonably 2485 * long time, and we can do nothing in qemu to fix it. 2486 * This is especially problematic for images with large data areas, 2487 * because finding the few holes in them and giving them special 2488 * treatment does not gain much performance. Therefore, we try to 2489 * cache the last-identified data region. 2490 * 2491 * Second, limiting ourselves to protocol nodes allows us to assume 2492 * the block status for data regions to be DATA | OFFSET_VALID, and 2493 * that the host offset is the same as the guest offset. 2494 * 2495 * Note that it is possible that external writers zero parts of 2496 * the cached regions without the cache being invalidated, and so 2497 * we may report zeroes as data. This is not catastrophic, 2498 * however, because reporting zeroes as data is fine. 2499 */ 2500 if (QLIST_EMPTY(&bs->children) && 2501 bdrv_bsc_is_data(bs, aligned_offset, pnum)) 2502 { 2503 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2504 local_file = bs; 2505 local_map = aligned_offset; 2506 } else { 2507 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2508 aligned_bytes, pnum, &local_map, 2509 &local_file); 2510 2511 /* 2512 * Note that checking QLIST_EMPTY(&bs->children) is also done when 2513 * the cache is queried above. Technically, we do not need to check 2514 * it here; the worst that can happen is that we fill the cache for 2515 * non-protocol nodes, and then it is never used. However, filling 2516 * the cache requires an RCU update, so double check here to avoid 2517 * such an update if possible. 2518 * 2519 * Check want_zero, because we only want to update the cache when we 2520 * have accurate information about what is zero and what is data. 2521 */ 2522 if (want_zero && 2523 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && 2524 QLIST_EMPTY(&bs->children)) 2525 { 2526 /* 2527 * When a protocol driver reports BLOCK_OFFSET_VALID, the 2528 * returned local_map value must be the same as the offset we 2529 * have passed (aligned_offset), and local_bs must be the node 2530 * itself. 2531 * Assert this, because we follow this rule when reading from 2532 * the cache (see the `local_file = bs` and 2533 * `local_map = aligned_offset` assignments above), and the 2534 * result the cache delivers must be the same as the driver 2535 * would deliver. 2536 */ 2537 assert(local_file == bs); 2538 assert(local_map == aligned_offset); 2539 bdrv_bsc_fill(bs, aligned_offset, *pnum); 2540 } 2541 } 2542 } else { 2543 /* Default code for filters */ 2544 2545 local_file = bdrv_filter_bs(bs); 2546 assert(local_file); 2547 2548 *pnum = aligned_bytes; 2549 local_map = aligned_offset; 2550 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2551 } 2552 if (ret < 0) { 2553 *pnum = 0; 2554 goto out; 2555 } 2556 2557 /* 2558 * The driver's result must be a non-zero multiple of request_alignment. 2559 * Clamp pnum and adjust map to original request. 2560 */ 2561 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2562 align > offset - aligned_offset); 2563 if (ret & BDRV_BLOCK_RECURSE) { 2564 assert(ret & BDRV_BLOCK_DATA); 2565 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2566 assert(!(ret & BDRV_BLOCK_ZERO)); 2567 } 2568 2569 *pnum -= offset - aligned_offset; 2570 if (*pnum > bytes) { 2571 *pnum = bytes; 2572 } 2573 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2574 local_map += offset - aligned_offset; 2575 } 2576 2577 if (ret & BDRV_BLOCK_RAW) { 2578 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2579 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2580 *pnum, pnum, &local_map, &local_file); 2581 goto out; 2582 } 2583 2584 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2585 ret |= BDRV_BLOCK_ALLOCATED; 2586 } else if (bs->drv->supports_backing) { 2587 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2588 2589 if (!cow_bs) { 2590 ret |= BDRV_BLOCK_ZERO; 2591 } else if (want_zero) { 2592 int64_t size2 = bdrv_getlength(cow_bs); 2593 2594 if (size2 >= 0 && offset >= size2) { 2595 ret |= BDRV_BLOCK_ZERO; 2596 } 2597 } 2598 } 2599 2600 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2601 local_file && local_file != bs && 2602 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2603 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2604 int64_t file_pnum; 2605 int ret2; 2606 2607 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2608 *pnum, &file_pnum, NULL, NULL); 2609 if (ret2 >= 0) { 2610 /* Ignore errors. This is just providing extra information, it 2611 * is useful but not necessary. 2612 */ 2613 if (ret2 & BDRV_BLOCK_EOF && 2614 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2615 /* 2616 * It is valid for the format block driver to read 2617 * beyond the end of the underlying file's current 2618 * size; such areas read as zero. 2619 */ 2620 ret |= BDRV_BLOCK_ZERO; 2621 } else { 2622 /* Limit request to the range reported by the protocol driver */ 2623 *pnum = file_pnum; 2624 ret |= (ret2 & BDRV_BLOCK_ZERO); 2625 } 2626 } 2627 } 2628 2629 out: 2630 bdrv_dec_in_flight(bs); 2631 if (ret >= 0 && offset + *pnum == total_size) { 2632 ret |= BDRV_BLOCK_EOF; 2633 } 2634 early_out: 2635 if (file) { 2636 *file = local_file; 2637 } 2638 if (map) { 2639 *map = local_map; 2640 } 2641 return ret; 2642 } 2643 2644 int coroutine_fn 2645 bdrv_co_common_block_status_above(BlockDriverState *bs, 2646 BlockDriverState *base, 2647 bool include_base, 2648 bool want_zero, 2649 int64_t offset, 2650 int64_t bytes, 2651 int64_t *pnum, 2652 int64_t *map, 2653 BlockDriverState **file, 2654 int *depth) 2655 { 2656 int ret; 2657 BlockDriverState *p; 2658 int64_t eof = 0; 2659 int dummy; 2660 IO_CODE(); 2661 2662 assert(!include_base || base); /* Can't include NULL base */ 2663 2664 if (!depth) { 2665 depth = &dummy; 2666 } 2667 *depth = 0; 2668 2669 if (!include_base && bs == base) { 2670 *pnum = bytes; 2671 return 0; 2672 } 2673 2674 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2675 ++*depth; 2676 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2677 return ret; 2678 } 2679 2680 if (ret & BDRV_BLOCK_EOF) { 2681 eof = offset + *pnum; 2682 } 2683 2684 assert(*pnum <= bytes); 2685 bytes = *pnum; 2686 2687 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2688 p = bdrv_filter_or_cow_bs(p)) 2689 { 2690 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2691 file); 2692 ++*depth; 2693 if (ret < 0) { 2694 return ret; 2695 } 2696 if (*pnum == 0) { 2697 /* 2698 * The top layer deferred to this layer, and because this layer is 2699 * short, any zeroes that we synthesize beyond EOF behave as if they 2700 * were allocated at this layer. 2701 * 2702 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2703 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2704 * below. 2705 */ 2706 assert(ret & BDRV_BLOCK_EOF); 2707 *pnum = bytes; 2708 if (file) { 2709 *file = p; 2710 } 2711 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2712 break; 2713 } 2714 if (ret & BDRV_BLOCK_ALLOCATED) { 2715 /* 2716 * We've found the node and the status, we must break. 2717 * 2718 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2719 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2720 * below. 2721 */ 2722 ret &= ~BDRV_BLOCK_EOF; 2723 break; 2724 } 2725 2726 if (p == base) { 2727 assert(include_base); 2728 break; 2729 } 2730 2731 /* 2732 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2733 * let's continue the diving. 2734 */ 2735 assert(*pnum <= bytes); 2736 bytes = *pnum; 2737 } 2738 2739 if (offset + *pnum == eof) { 2740 ret |= BDRV_BLOCK_EOF; 2741 } 2742 2743 return ret; 2744 } 2745 2746 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2747 int64_t offset, int64_t bytes, int64_t *pnum, 2748 int64_t *map, BlockDriverState **file) 2749 { 2750 IO_CODE(); 2751 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2752 pnum, map, file, NULL); 2753 } 2754 2755 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2756 int64_t *pnum, int64_t *map, BlockDriverState **file) 2757 { 2758 IO_CODE(); 2759 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2760 offset, bytes, pnum, map, file); 2761 } 2762 2763 /* 2764 * Check @bs (and its backing chain) to see if the range defined 2765 * by @offset and @bytes is known to read as zeroes. 2766 * Return 1 if that is the case, 0 otherwise and -errno on error. 2767 * This test is meant to be fast rather than accurate so returning 0 2768 * does not guarantee non-zero data. 2769 */ 2770 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2771 int64_t bytes) 2772 { 2773 int ret; 2774 int64_t pnum = bytes; 2775 IO_CODE(); 2776 2777 if (!bytes) { 2778 return 1; 2779 } 2780 2781 ret = bdrv_common_block_status_above(bs, NULL, false, false, offset, 2782 bytes, &pnum, NULL, NULL, NULL); 2783 2784 if (ret < 0) { 2785 return ret; 2786 } 2787 2788 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2789 } 2790 2791 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, 2792 int64_t bytes, int64_t *pnum) 2793 { 2794 int ret; 2795 int64_t dummy; 2796 IO_CODE(); 2797 2798 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2799 bytes, pnum ? pnum : &dummy, NULL, 2800 NULL, NULL); 2801 if (ret < 0) { 2802 return ret; 2803 } 2804 return !!(ret & BDRV_BLOCK_ALLOCATED); 2805 } 2806 2807 /* 2808 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2809 * 2810 * Return a positive depth if (a prefix of) the given range is allocated 2811 * in any image between BASE and TOP (BASE is only included if include_base 2812 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2813 * BASE can be NULL to check if the given offset is allocated in any 2814 * image of the chain. Return 0 otherwise, or negative errno on 2815 * failure. 2816 * 2817 * 'pnum' is set to the number of bytes (including and immediately 2818 * following the specified offset) that are known to be in the same 2819 * allocated/unallocated state. Note that a subsequent call starting 2820 * at 'offset + *pnum' may return the same allocation status (in other 2821 * words, the result is not necessarily the maximum possible range); 2822 * but 'pnum' will only be 0 when end of file is reached. 2823 */ 2824 int bdrv_is_allocated_above(BlockDriverState *top, 2825 BlockDriverState *base, 2826 bool include_base, int64_t offset, 2827 int64_t bytes, int64_t *pnum) 2828 { 2829 int depth; 2830 int ret = bdrv_common_block_status_above(top, base, include_base, false, 2831 offset, bytes, pnum, NULL, NULL, 2832 &depth); 2833 IO_CODE(); 2834 if (ret < 0) { 2835 return ret; 2836 } 2837 2838 if (ret & BDRV_BLOCK_ALLOCATED) { 2839 return depth; 2840 } 2841 return 0; 2842 } 2843 2844 int coroutine_fn 2845 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2846 { 2847 BlockDriver *drv = bs->drv; 2848 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2849 int ret; 2850 IO_CODE(); 2851 2852 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2853 if (ret < 0) { 2854 return ret; 2855 } 2856 2857 if (!drv) { 2858 return -ENOMEDIUM; 2859 } 2860 2861 bdrv_inc_in_flight(bs); 2862 2863 if (drv->bdrv_load_vmstate) { 2864 ret = drv->bdrv_load_vmstate(bs, qiov, pos); 2865 } else if (child_bs) { 2866 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2867 } else { 2868 ret = -ENOTSUP; 2869 } 2870 2871 bdrv_dec_in_flight(bs); 2872 2873 return ret; 2874 } 2875 2876 int coroutine_fn 2877 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2878 { 2879 BlockDriver *drv = bs->drv; 2880 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2881 int ret; 2882 IO_CODE(); 2883 2884 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2885 if (ret < 0) { 2886 return ret; 2887 } 2888 2889 if (!drv) { 2890 return -ENOMEDIUM; 2891 } 2892 2893 bdrv_inc_in_flight(bs); 2894 2895 if (drv->bdrv_save_vmstate) { 2896 ret = drv->bdrv_save_vmstate(bs, qiov, pos); 2897 } else if (child_bs) { 2898 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2899 } else { 2900 ret = -ENOTSUP; 2901 } 2902 2903 bdrv_dec_in_flight(bs); 2904 2905 return ret; 2906 } 2907 2908 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2909 int64_t pos, int size) 2910 { 2911 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2912 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2913 IO_CODE(); 2914 2915 return ret < 0 ? ret : size; 2916 } 2917 2918 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2919 int64_t pos, int size) 2920 { 2921 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2922 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2923 IO_CODE(); 2924 2925 return ret < 0 ? ret : size; 2926 } 2927 2928 /**************************************************************/ 2929 /* async I/Os */ 2930 2931 void bdrv_aio_cancel(BlockAIOCB *acb) 2932 { 2933 IO_CODE(); 2934 qemu_aio_ref(acb); 2935 bdrv_aio_cancel_async(acb); 2936 while (acb->refcnt > 1) { 2937 if (acb->aiocb_info->get_aio_context) { 2938 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2939 } else if (acb->bs) { 2940 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2941 * assert that we're not using an I/O thread. Thread-safe 2942 * code should use bdrv_aio_cancel_async exclusively. 2943 */ 2944 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2945 aio_poll(bdrv_get_aio_context(acb->bs), true); 2946 } else { 2947 abort(); 2948 } 2949 } 2950 qemu_aio_unref(acb); 2951 } 2952 2953 /* Async version of aio cancel. The caller is not blocked if the acb implements 2954 * cancel_async, otherwise we do nothing and let the request normally complete. 2955 * In either case the completion callback must be called. */ 2956 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2957 { 2958 IO_CODE(); 2959 if (acb->aiocb_info->cancel_async) { 2960 acb->aiocb_info->cancel_async(acb); 2961 } 2962 } 2963 2964 /**************************************************************/ 2965 /* Coroutine block device emulation */ 2966 2967 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2968 { 2969 BdrvChild *primary_child = bdrv_primary_child(bs); 2970 BdrvChild *child; 2971 int current_gen; 2972 int ret = 0; 2973 IO_CODE(); 2974 2975 bdrv_inc_in_flight(bs); 2976 2977 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 2978 bdrv_is_sg(bs)) { 2979 goto early_exit; 2980 } 2981 2982 qemu_co_mutex_lock(&bs->reqs_lock); 2983 current_gen = qatomic_read(&bs->write_gen); 2984 2985 /* Wait until any previous flushes are completed */ 2986 while (bs->active_flush_req) { 2987 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2988 } 2989 2990 /* Flushes reach this point in nondecreasing current_gen order. */ 2991 bs->active_flush_req = true; 2992 qemu_co_mutex_unlock(&bs->reqs_lock); 2993 2994 /* Write back all layers by calling one driver function */ 2995 if (bs->drv->bdrv_co_flush) { 2996 ret = bs->drv->bdrv_co_flush(bs); 2997 goto out; 2998 } 2999 3000 /* Write back cached data to the OS even with cache=unsafe */ 3001 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 3002 if (bs->drv->bdrv_co_flush_to_os) { 3003 ret = bs->drv->bdrv_co_flush_to_os(bs); 3004 if (ret < 0) { 3005 goto out; 3006 } 3007 } 3008 3009 /* But don't actually force it to the disk with cache=unsafe */ 3010 if (bs->open_flags & BDRV_O_NO_FLUSH) { 3011 goto flush_children; 3012 } 3013 3014 /* Check if we really need to flush anything */ 3015 if (bs->flushed_gen == current_gen) { 3016 goto flush_children; 3017 } 3018 3019 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 3020 if (!bs->drv) { 3021 /* bs->drv->bdrv_co_flush() might have ejected the BDS 3022 * (even in case of apparent success) */ 3023 ret = -ENOMEDIUM; 3024 goto out; 3025 } 3026 if (bs->drv->bdrv_co_flush_to_disk) { 3027 ret = bs->drv->bdrv_co_flush_to_disk(bs); 3028 } else if (bs->drv->bdrv_aio_flush) { 3029 BlockAIOCB *acb; 3030 CoroutineIOCompletion co = { 3031 .coroutine = qemu_coroutine_self(), 3032 }; 3033 3034 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 3035 if (acb == NULL) { 3036 ret = -EIO; 3037 } else { 3038 qemu_coroutine_yield(); 3039 ret = co.ret; 3040 } 3041 } else { 3042 /* 3043 * Some block drivers always operate in either writethrough or unsafe 3044 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 3045 * know how the server works (because the behaviour is hardcoded or 3046 * depends on server-side configuration), so we can't ensure that 3047 * everything is safe on disk. Returning an error doesn't work because 3048 * that would break guests even if the server operates in writethrough 3049 * mode. 3050 * 3051 * Let's hope the user knows what he's doing. 3052 */ 3053 ret = 0; 3054 } 3055 3056 if (ret < 0) { 3057 goto out; 3058 } 3059 3060 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 3061 * in the case of cache=unsafe, so there are no useless flushes. 3062 */ 3063 flush_children: 3064 ret = 0; 3065 QLIST_FOREACH(child, &bs->children, next) { 3066 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 3067 int this_child_ret = bdrv_co_flush(child->bs); 3068 if (!ret) { 3069 ret = this_child_ret; 3070 } 3071 } 3072 } 3073 3074 out: 3075 /* Notify any pending flushes that we have completed */ 3076 if (ret == 0) { 3077 bs->flushed_gen = current_gen; 3078 } 3079 3080 qemu_co_mutex_lock(&bs->reqs_lock); 3081 bs->active_flush_req = false; 3082 /* Return value is ignored - it's ok if wait queue is empty */ 3083 qemu_co_queue_next(&bs->flush_queue); 3084 qemu_co_mutex_unlock(&bs->reqs_lock); 3085 3086 early_exit: 3087 bdrv_dec_in_flight(bs); 3088 return ret; 3089 } 3090 3091 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 3092 int64_t bytes) 3093 { 3094 BdrvTrackedRequest req; 3095 int ret; 3096 int64_t max_pdiscard; 3097 int head, tail, align; 3098 BlockDriverState *bs = child->bs; 3099 IO_CODE(); 3100 3101 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { 3102 return -ENOMEDIUM; 3103 } 3104 3105 if (bdrv_has_readonly_bitmaps(bs)) { 3106 return -EPERM; 3107 } 3108 3109 ret = bdrv_check_request(offset, bytes, NULL); 3110 if (ret < 0) { 3111 return ret; 3112 } 3113 3114 /* Do nothing if disabled. */ 3115 if (!(bs->open_flags & BDRV_O_UNMAP)) { 3116 return 0; 3117 } 3118 3119 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 3120 return 0; 3121 } 3122 3123 /* Invalidate the cached block-status data range if this discard overlaps */ 3124 bdrv_bsc_invalidate_range(bs, offset, bytes); 3125 3126 /* Discard is advisory, but some devices track and coalesce 3127 * unaligned requests, so we must pass everything down rather than 3128 * round here. Still, most devices will just silently ignore 3129 * unaligned requests (by returning -ENOTSUP), so we must fragment 3130 * the request accordingly. */ 3131 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3132 assert(align % bs->bl.request_alignment == 0); 3133 head = offset % align; 3134 tail = (offset + bytes) % align; 3135 3136 bdrv_inc_in_flight(bs); 3137 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3138 3139 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3140 if (ret < 0) { 3141 goto out; 3142 } 3143 3144 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), 3145 align); 3146 assert(max_pdiscard >= bs->bl.request_alignment); 3147 3148 while (bytes > 0) { 3149 int64_t num = bytes; 3150 3151 if (head) { 3152 /* Make small requests to get to alignment boundaries. */ 3153 num = MIN(bytes, align - head); 3154 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3155 num %= bs->bl.request_alignment; 3156 } 3157 head = (head + num) % align; 3158 assert(num < max_pdiscard); 3159 } else if (tail) { 3160 if (num > align) { 3161 /* Shorten the request to the last aligned cluster. */ 3162 num -= tail; 3163 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3164 tail > bs->bl.request_alignment) { 3165 tail %= bs->bl.request_alignment; 3166 num -= tail; 3167 } 3168 } 3169 /* limit request size */ 3170 if (num > max_pdiscard) { 3171 num = max_pdiscard; 3172 } 3173 3174 if (!bs->drv) { 3175 ret = -ENOMEDIUM; 3176 goto out; 3177 } 3178 if (bs->drv->bdrv_co_pdiscard) { 3179 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3180 } else { 3181 BlockAIOCB *acb; 3182 CoroutineIOCompletion co = { 3183 .coroutine = qemu_coroutine_self(), 3184 }; 3185 3186 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3187 bdrv_co_io_em_complete, &co); 3188 if (acb == NULL) { 3189 ret = -EIO; 3190 goto out; 3191 } else { 3192 qemu_coroutine_yield(); 3193 ret = co.ret; 3194 } 3195 } 3196 if (ret && ret != -ENOTSUP) { 3197 goto out; 3198 } 3199 3200 offset += num; 3201 bytes -= num; 3202 } 3203 ret = 0; 3204 out: 3205 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3206 tracked_request_end(&req); 3207 bdrv_dec_in_flight(bs); 3208 return ret; 3209 } 3210 3211 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3212 { 3213 BlockDriver *drv = bs->drv; 3214 CoroutineIOCompletion co = { 3215 .coroutine = qemu_coroutine_self(), 3216 }; 3217 BlockAIOCB *acb; 3218 IO_CODE(); 3219 3220 bdrv_inc_in_flight(bs); 3221 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3222 co.ret = -ENOTSUP; 3223 goto out; 3224 } 3225 3226 if (drv->bdrv_co_ioctl) { 3227 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3228 } else { 3229 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3230 if (!acb) { 3231 co.ret = -ENOTSUP; 3232 goto out; 3233 } 3234 qemu_coroutine_yield(); 3235 } 3236 out: 3237 bdrv_dec_in_flight(bs); 3238 return co.ret; 3239 } 3240 3241 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3242 { 3243 IO_CODE(); 3244 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3245 } 3246 3247 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3248 { 3249 IO_CODE(); 3250 return memset(qemu_blockalign(bs, size), 0, size); 3251 } 3252 3253 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3254 { 3255 size_t align = bdrv_opt_mem_align(bs); 3256 IO_CODE(); 3257 3258 /* Ensure that NULL is never returned on success */ 3259 assert(align > 0); 3260 if (size == 0) { 3261 size = align; 3262 } 3263 3264 return qemu_try_memalign(align, size); 3265 } 3266 3267 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3268 { 3269 void *mem = qemu_try_blockalign(bs, size); 3270 IO_CODE(); 3271 3272 if (mem) { 3273 memset(mem, 0, size); 3274 } 3275 3276 return mem; 3277 } 3278 3279 /* 3280 * Check if all memory in this vector is sector aligned. 3281 */ 3282 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 3283 { 3284 int i; 3285 size_t alignment = bdrv_min_mem_align(bs); 3286 IO_CODE(); 3287 3288 for (i = 0; i < qiov->niov; i++) { 3289 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 3290 return false; 3291 } 3292 if (qiov->iov[i].iov_len % alignment) { 3293 return false; 3294 } 3295 } 3296 3297 return true; 3298 } 3299 3300 void bdrv_io_plug(BlockDriverState *bs) 3301 { 3302 BdrvChild *child; 3303 IO_CODE(); 3304 3305 QLIST_FOREACH(child, &bs->children, next) { 3306 bdrv_io_plug(child->bs); 3307 } 3308 3309 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 3310 BlockDriver *drv = bs->drv; 3311 if (drv && drv->bdrv_io_plug) { 3312 drv->bdrv_io_plug(bs); 3313 } 3314 } 3315 } 3316 3317 void bdrv_io_unplug(BlockDriverState *bs) 3318 { 3319 BdrvChild *child; 3320 IO_CODE(); 3321 3322 assert(bs->io_plugged); 3323 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3324 BlockDriver *drv = bs->drv; 3325 if (drv && drv->bdrv_io_unplug) { 3326 drv->bdrv_io_unplug(bs); 3327 } 3328 } 3329 3330 QLIST_FOREACH(child, &bs->children, next) { 3331 bdrv_io_unplug(child->bs); 3332 } 3333 } 3334 3335 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) 3336 { 3337 BdrvChild *child; 3338 3339 GLOBAL_STATE_CODE(); 3340 if (bs->drv && bs->drv->bdrv_register_buf) { 3341 bs->drv->bdrv_register_buf(bs, host, size); 3342 } 3343 QLIST_FOREACH(child, &bs->children, next) { 3344 bdrv_register_buf(child->bs, host, size); 3345 } 3346 } 3347 3348 void bdrv_unregister_buf(BlockDriverState *bs, void *host) 3349 { 3350 BdrvChild *child; 3351 3352 GLOBAL_STATE_CODE(); 3353 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3354 bs->drv->bdrv_unregister_buf(bs, host); 3355 } 3356 QLIST_FOREACH(child, &bs->children, next) { 3357 bdrv_unregister_buf(child->bs, host); 3358 } 3359 } 3360 3361 static int coroutine_fn bdrv_co_copy_range_internal( 3362 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3363 int64_t dst_offset, int64_t bytes, 3364 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3365 bool recurse_src) 3366 { 3367 BdrvTrackedRequest req; 3368 int ret; 3369 3370 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3371 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3372 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3373 assert(!(read_flags & BDRV_REQ_NO_WAIT)); 3374 assert(!(write_flags & BDRV_REQ_NO_WAIT)); 3375 3376 if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) { 3377 return -ENOMEDIUM; 3378 } 3379 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3380 if (ret) { 3381 return ret; 3382 } 3383 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3384 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3385 } 3386 3387 if (!src || !src->bs || !bdrv_is_inserted(src->bs)) { 3388 return -ENOMEDIUM; 3389 } 3390 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3391 if (ret) { 3392 return ret; 3393 } 3394 3395 if (!src->bs->drv->bdrv_co_copy_range_from 3396 || !dst->bs->drv->bdrv_co_copy_range_to 3397 || src->bs->encrypted || dst->bs->encrypted) { 3398 return -ENOTSUP; 3399 } 3400 3401 if (recurse_src) { 3402 bdrv_inc_in_flight(src->bs); 3403 tracked_request_begin(&req, src->bs, src_offset, bytes, 3404 BDRV_TRACKED_READ); 3405 3406 /* BDRV_REQ_SERIALISING is only for write operation */ 3407 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3408 bdrv_wait_serialising_requests(&req); 3409 3410 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3411 src, src_offset, 3412 dst, dst_offset, 3413 bytes, 3414 read_flags, write_flags); 3415 3416 tracked_request_end(&req); 3417 bdrv_dec_in_flight(src->bs); 3418 } else { 3419 bdrv_inc_in_flight(dst->bs); 3420 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3421 BDRV_TRACKED_WRITE); 3422 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3423 write_flags); 3424 if (!ret) { 3425 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3426 src, src_offset, 3427 dst, dst_offset, 3428 bytes, 3429 read_flags, write_flags); 3430 } 3431 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3432 tracked_request_end(&req); 3433 bdrv_dec_in_flight(dst->bs); 3434 } 3435 3436 return ret; 3437 } 3438 3439 /* Copy range from @src to @dst. 3440 * 3441 * See the comment of bdrv_co_copy_range for the parameter and return value 3442 * semantics. */ 3443 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3444 BdrvChild *dst, int64_t dst_offset, 3445 int64_t bytes, 3446 BdrvRequestFlags read_flags, 3447 BdrvRequestFlags write_flags) 3448 { 3449 IO_CODE(); 3450 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3451 read_flags, write_flags); 3452 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3453 bytes, read_flags, write_flags, true); 3454 } 3455 3456 /* Copy range from @src to @dst. 3457 * 3458 * See the comment of bdrv_co_copy_range for the parameter and return value 3459 * semantics. */ 3460 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3461 BdrvChild *dst, int64_t dst_offset, 3462 int64_t bytes, 3463 BdrvRequestFlags read_flags, 3464 BdrvRequestFlags write_flags) 3465 { 3466 IO_CODE(); 3467 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3468 read_flags, write_flags); 3469 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3470 bytes, read_flags, write_flags, false); 3471 } 3472 3473 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3474 BdrvChild *dst, int64_t dst_offset, 3475 int64_t bytes, BdrvRequestFlags read_flags, 3476 BdrvRequestFlags write_flags) 3477 { 3478 IO_CODE(); 3479 return bdrv_co_copy_range_from(src, src_offset, 3480 dst, dst_offset, 3481 bytes, read_flags, write_flags); 3482 } 3483 3484 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3485 { 3486 BdrvChild *c; 3487 QLIST_FOREACH(c, &bs->parents, next_parent) { 3488 if (c->klass->resize) { 3489 c->klass->resize(c); 3490 } 3491 } 3492 } 3493 3494 /** 3495 * Truncate file to 'offset' bytes (needed only for file protocols) 3496 * 3497 * If 'exact' is true, the file must be resized to exactly the given 3498 * 'offset'. Otherwise, it is sufficient for the node to be at least 3499 * 'offset' bytes in length. 3500 */ 3501 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3502 PreallocMode prealloc, BdrvRequestFlags flags, 3503 Error **errp) 3504 { 3505 BlockDriverState *bs = child->bs; 3506 BdrvChild *filtered, *backing; 3507 BlockDriver *drv = bs->drv; 3508 BdrvTrackedRequest req; 3509 int64_t old_size, new_bytes; 3510 int ret; 3511 IO_CODE(); 3512 3513 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3514 if (!drv) { 3515 error_setg(errp, "No medium inserted"); 3516 return -ENOMEDIUM; 3517 } 3518 if (offset < 0) { 3519 error_setg(errp, "Image size cannot be negative"); 3520 return -EINVAL; 3521 } 3522 3523 ret = bdrv_check_request(offset, 0, errp); 3524 if (ret < 0) { 3525 return ret; 3526 } 3527 3528 old_size = bdrv_getlength(bs); 3529 if (old_size < 0) { 3530 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3531 return old_size; 3532 } 3533 3534 if (bdrv_is_read_only(bs)) { 3535 error_setg(errp, "Image is read-only"); 3536 return -EACCES; 3537 } 3538 3539 if (offset > old_size) { 3540 new_bytes = offset - old_size; 3541 } else { 3542 new_bytes = 0; 3543 } 3544 3545 bdrv_inc_in_flight(bs); 3546 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3547 BDRV_TRACKED_TRUNCATE); 3548 3549 /* If we are growing the image and potentially using preallocation for the 3550 * new area, we need to make sure that no write requests are made to it 3551 * concurrently or they might be overwritten by preallocation. */ 3552 if (new_bytes) { 3553 bdrv_make_request_serialising(&req, 1); 3554 } 3555 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3556 0); 3557 if (ret < 0) { 3558 error_setg_errno(errp, -ret, 3559 "Failed to prepare request for truncation"); 3560 goto out; 3561 } 3562 3563 filtered = bdrv_filter_child(bs); 3564 backing = bdrv_cow_child(bs); 3565 3566 /* 3567 * If the image has a backing file that is large enough that it would 3568 * provide data for the new area, we cannot leave it unallocated because 3569 * then the backing file content would become visible. Instead, zero-fill 3570 * the new area. 3571 * 3572 * Note that if the image has a backing file, but was opened without the 3573 * backing file, taking care of keeping things consistent with that backing 3574 * file is the user's responsibility. 3575 */ 3576 if (new_bytes && backing) { 3577 int64_t backing_len; 3578 3579 backing_len = bdrv_getlength(backing->bs); 3580 if (backing_len < 0) { 3581 ret = backing_len; 3582 error_setg_errno(errp, -ret, "Could not get backing file size"); 3583 goto out; 3584 } 3585 3586 if (backing_len > old_size) { 3587 flags |= BDRV_REQ_ZERO_WRITE; 3588 } 3589 } 3590 3591 if (drv->bdrv_co_truncate) { 3592 if (flags & ~bs->supported_truncate_flags) { 3593 error_setg(errp, "Block driver does not support requested flags"); 3594 ret = -ENOTSUP; 3595 goto out; 3596 } 3597 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3598 } else if (filtered) { 3599 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3600 } else { 3601 error_setg(errp, "Image format driver does not support resize"); 3602 ret = -ENOTSUP; 3603 goto out; 3604 } 3605 if (ret < 0) { 3606 goto out; 3607 } 3608 3609 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3610 if (ret < 0) { 3611 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3612 } else { 3613 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3614 } 3615 /* It's possible that truncation succeeded but refresh_total_sectors 3616 * failed, but the latter doesn't affect how we should finish the request. 3617 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ 3618 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3619 3620 out: 3621 tracked_request_end(&req); 3622 bdrv_dec_in_flight(bs); 3623 3624 return ret; 3625 } 3626 3627 void bdrv_cancel_in_flight(BlockDriverState *bs) 3628 { 3629 GLOBAL_STATE_CODE(); 3630 if (!bs || !bs->drv) { 3631 return; 3632 } 3633 3634 if (bs->drv->bdrv_cancel_in_flight) { 3635 bs->drv->bdrv_cancel_in_flight(bs); 3636 } 3637 } 3638 3639 int coroutine_fn 3640 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, 3641 QEMUIOVector *qiov, size_t qiov_offset) 3642 { 3643 BlockDriverState *bs = child->bs; 3644 BlockDriver *drv = bs->drv; 3645 int ret; 3646 IO_CODE(); 3647 3648 if (!drv) { 3649 return -ENOMEDIUM; 3650 } 3651 3652 if (!drv->bdrv_co_preadv_snapshot) { 3653 return -ENOTSUP; 3654 } 3655 3656 bdrv_inc_in_flight(bs); 3657 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); 3658 bdrv_dec_in_flight(bs); 3659 3660 return ret; 3661 } 3662 3663 int coroutine_fn 3664 bdrv_co_snapshot_block_status(BlockDriverState *bs, 3665 bool want_zero, int64_t offset, int64_t bytes, 3666 int64_t *pnum, int64_t *map, 3667 BlockDriverState **file) 3668 { 3669 BlockDriver *drv = bs->drv; 3670 int ret; 3671 IO_CODE(); 3672 3673 if (!drv) { 3674 return -ENOMEDIUM; 3675 } 3676 3677 if (!drv->bdrv_co_snapshot_block_status) { 3678 return -ENOTSUP; 3679 } 3680 3681 bdrv_inc_in_flight(bs); 3682 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, 3683 pnum, map, file); 3684 bdrv_dec_in_flight(bs); 3685 3686 return ret; 3687 } 3688 3689 int coroutine_fn 3690 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) 3691 { 3692 BlockDriver *drv = bs->drv; 3693 int ret; 3694 IO_CODE(); 3695 3696 if (!drv) { 3697 return -ENOMEDIUM; 3698 } 3699 3700 if (!drv->bdrv_co_pdiscard_snapshot) { 3701 return -ENOTSUP; 3702 } 3703 3704 bdrv_inc_in_flight(bs); 3705 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); 3706 bdrv_dec_in_flight(bs); 3707 3708 return ret; 3709 } 3710