1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/dirty-bitmap.h" 34 #include "block/write-threshold.h" 35 #include "qemu/cutils.h" 36 #include "qemu/memalign.h" 37 #include "qapi/error.h" 38 #include "qemu/error-report.h" 39 #include "qemu/main-loop.h" 40 #include "sysemu/replay.h" 41 42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 44 45 static void bdrv_parent_cb_resize(BlockDriverState *bs); 46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 47 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 48 49 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) 50 { 51 BdrvChild *c, *next; 52 53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 54 if (c == ignore) { 55 continue; 56 } 57 bdrv_parent_drained_begin_single(c); 58 } 59 } 60 61 void bdrv_parent_drained_end_single(BdrvChild *c) 62 { 63 IO_OR_GS_CODE(); 64 65 assert(c->quiesced_parent); 66 c->quiesced_parent = false; 67 68 if (c->klass->drained_end) { 69 c->klass->drained_end(c); 70 } 71 } 72 73 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) 74 { 75 BdrvChild *c; 76 77 QLIST_FOREACH(c, &bs->parents, next_parent) { 78 if (c == ignore) { 79 continue; 80 } 81 bdrv_parent_drained_end_single(c); 82 } 83 } 84 85 bool bdrv_parent_drained_poll_single(BdrvChild *c) 86 { 87 if (c->klass->drained_poll) { 88 return c->klass->drained_poll(c); 89 } 90 return false; 91 } 92 93 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 94 bool ignore_bds_parents) 95 { 96 BdrvChild *c, *next; 97 bool busy = false; 98 99 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 100 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 101 continue; 102 } 103 busy |= bdrv_parent_drained_poll_single(c); 104 } 105 106 return busy; 107 } 108 109 void bdrv_parent_drained_begin_single(BdrvChild *c) 110 { 111 IO_OR_GS_CODE(); 112 113 assert(!c->quiesced_parent); 114 c->quiesced_parent = true; 115 116 if (c->klass->drained_begin) { 117 c->klass->drained_begin(c); 118 } 119 } 120 121 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 122 { 123 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, 124 src->pdiscard_alignment); 125 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 126 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 127 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, 128 src->max_hw_transfer); 129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 130 src->opt_mem_alignment); 131 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 132 src->min_mem_alignment); 133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 134 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); 135 } 136 137 typedef struct BdrvRefreshLimitsState { 138 BlockDriverState *bs; 139 BlockLimits old_bl; 140 } BdrvRefreshLimitsState; 141 142 static void bdrv_refresh_limits_abort(void *opaque) 143 { 144 BdrvRefreshLimitsState *s = opaque; 145 146 s->bs->bl = s->old_bl; 147 } 148 149 static TransactionActionDrv bdrv_refresh_limits_drv = { 150 .abort = bdrv_refresh_limits_abort, 151 .clean = g_free, 152 }; 153 154 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 156 { 157 ERRP_GUARD(); 158 BlockDriver *drv = bs->drv; 159 BdrvChild *c; 160 bool have_limits; 161 162 GLOBAL_STATE_CODE(); 163 assume_graph_lock(); /* FIXME */ 164 165 if (tran) { 166 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 167 *s = (BdrvRefreshLimitsState) { 168 .bs = bs, 169 .old_bl = bs->bl, 170 }; 171 tran_add(tran, &bdrv_refresh_limits_drv, s); 172 } 173 174 memset(&bs->bl, 0, sizeof(bs->bl)); 175 176 if (!drv) { 177 return; 178 } 179 180 /* Default alignment based on whether driver has byte interface */ 181 bs->bl.request_alignment = (drv->bdrv_co_preadv || 182 drv->bdrv_aio_preadv || 183 drv->bdrv_co_preadv_part) ? 1 : 512; 184 185 /* Take some limits from the children as a default */ 186 have_limits = false; 187 QLIST_FOREACH(c, &bs->children, next) { 188 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 189 { 190 bdrv_merge_limits(&bs->bl, &c->bs->bl); 191 have_limits = true; 192 } 193 194 if (c->role & BDRV_CHILD_FILTERED) { 195 bs->bl.has_variable_length |= c->bs->bl.has_variable_length; 196 } 197 } 198 199 if (!have_limits) { 200 bs->bl.min_mem_alignment = 512; 201 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); 202 203 /* Safe default since most protocols use readv()/writev()/etc */ 204 bs->bl.max_iov = IOV_MAX; 205 } 206 207 /* Then let the driver override it */ 208 if (drv->bdrv_refresh_limits) { 209 drv->bdrv_refresh_limits(bs, errp); 210 if (*errp) { 211 return; 212 } 213 } 214 215 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 216 error_setg(errp, "Driver requires too large request alignment"); 217 } 218 } 219 220 /** 221 * The copy-on-read flag is actually a reference count so multiple users may 222 * use the feature without worrying about clobbering its previous state. 223 * Copy-on-read stays enabled until all users have called to disable it. 224 */ 225 void bdrv_enable_copy_on_read(BlockDriverState *bs) 226 { 227 IO_CODE(); 228 qatomic_inc(&bs->copy_on_read); 229 } 230 231 void bdrv_disable_copy_on_read(BlockDriverState *bs) 232 { 233 int old = qatomic_fetch_dec(&bs->copy_on_read); 234 IO_CODE(); 235 assert(old >= 1); 236 } 237 238 typedef struct { 239 Coroutine *co; 240 BlockDriverState *bs; 241 bool done; 242 bool begin; 243 bool poll; 244 BdrvChild *parent; 245 } BdrvCoDrainData; 246 247 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 248 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, 249 bool ignore_bds_parents) 250 { 251 IO_OR_GS_CODE(); 252 253 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 254 return true; 255 } 256 257 if (qatomic_read(&bs->in_flight)) { 258 return true; 259 } 260 261 return false; 262 } 263 264 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, 265 BdrvChild *ignore_parent) 266 { 267 return bdrv_drain_poll(bs, ignore_parent, false); 268 } 269 270 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 271 bool poll); 272 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent); 273 274 static void bdrv_co_drain_bh_cb(void *opaque) 275 { 276 BdrvCoDrainData *data = opaque; 277 Coroutine *co = data->co; 278 BlockDriverState *bs = data->bs; 279 280 if (bs) { 281 AioContext *ctx = bdrv_get_aio_context(bs); 282 aio_context_acquire(ctx); 283 bdrv_dec_in_flight(bs); 284 if (data->begin) { 285 bdrv_do_drained_begin(bs, data->parent, data->poll); 286 } else { 287 assert(!data->poll); 288 bdrv_do_drained_end(bs, data->parent); 289 } 290 aio_context_release(ctx); 291 } else { 292 assert(data->begin); 293 bdrv_drain_all_begin(); 294 } 295 296 data->done = true; 297 aio_co_wake(co); 298 } 299 300 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 301 bool begin, 302 BdrvChild *parent, 303 bool poll) 304 { 305 BdrvCoDrainData data; 306 Coroutine *self = qemu_coroutine_self(); 307 AioContext *ctx = bdrv_get_aio_context(bs); 308 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 309 310 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 311 * other coroutines run if they were queued by aio_co_enter(). */ 312 313 assert(qemu_in_coroutine()); 314 data = (BdrvCoDrainData) { 315 .co = self, 316 .bs = bs, 317 .done = false, 318 .begin = begin, 319 .parent = parent, 320 .poll = poll, 321 }; 322 323 if (bs) { 324 bdrv_inc_in_flight(bs); 325 } 326 327 /* 328 * Temporarily drop the lock across yield or we would get deadlocks. 329 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 330 * 331 * When we yield below, the lock for the current context will be 332 * released, so if this is actually the lock that protects bs, don't drop 333 * it a second time. 334 */ 335 if (ctx != co_ctx) { 336 aio_context_release(ctx); 337 } 338 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); 339 340 qemu_coroutine_yield(); 341 /* If we are resumed from some other event (such as an aio completion or a 342 * timer callback), it is a bug in the caller that should be fixed. */ 343 assert(data.done); 344 345 /* Reaquire the AioContext of bs if we dropped it */ 346 if (ctx != co_ctx) { 347 aio_context_acquire(ctx); 348 } 349 } 350 351 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 352 bool poll) 353 { 354 IO_OR_GS_CODE(); 355 356 if (qemu_in_coroutine()) { 357 bdrv_co_yield_to_drain(bs, true, parent, poll); 358 return; 359 } 360 361 /* Stop things in parent-to-child order */ 362 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 363 aio_disable_external(bdrv_get_aio_context(bs)); 364 bdrv_parent_drained_begin(bs, parent); 365 if (bs->drv && bs->drv->bdrv_drain_begin) { 366 bs->drv->bdrv_drain_begin(bs); 367 } 368 } 369 370 /* 371 * Wait for drained requests to finish. 372 * 373 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 374 * call is needed so things in this AioContext can make progress even 375 * though we don't return to the main AioContext loop - this automatically 376 * includes other nodes in the same AioContext and therefore all child 377 * nodes. 378 */ 379 if (poll) { 380 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); 381 } 382 } 383 384 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) 385 { 386 bdrv_do_drained_begin(bs, parent, false); 387 } 388 389 void bdrv_drained_begin(BlockDriverState *bs) 390 { 391 IO_OR_GS_CODE(); 392 bdrv_do_drained_begin(bs, NULL, true); 393 } 394 395 /** 396 * This function does not poll, nor must any of its recursively called 397 * functions. 398 */ 399 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) 400 { 401 int old_quiesce_counter; 402 403 if (qemu_in_coroutine()) { 404 bdrv_co_yield_to_drain(bs, false, parent, false); 405 return; 406 } 407 assert(bs->quiesce_counter > 0); 408 409 /* Re-enable things in child-to-parent order */ 410 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 411 if (old_quiesce_counter == 1) { 412 if (bs->drv && bs->drv->bdrv_drain_end) { 413 bs->drv->bdrv_drain_end(bs); 414 } 415 bdrv_parent_drained_end(bs, parent); 416 aio_enable_external(bdrv_get_aio_context(bs)); 417 } 418 } 419 420 void bdrv_drained_end(BlockDriverState *bs) 421 { 422 IO_OR_GS_CODE(); 423 bdrv_do_drained_end(bs, NULL); 424 } 425 426 void bdrv_drain(BlockDriverState *bs) 427 { 428 IO_OR_GS_CODE(); 429 bdrv_drained_begin(bs); 430 bdrv_drained_end(bs); 431 } 432 433 static void bdrv_drain_assert_idle(BlockDriverState *bs) 434 { 435 BdrvChild *child, *next; 436 437 assert(qatomic_read(&bs->in_flight) == 0); 438 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 439 bdrv_drain_assert_idle(child->bs); 440 } 441 } 442 443 unsigned int bdrv_drain_all_count = 0; 444 445 static bool bdrv_drain_all_poll(void) 446 { 447 BlockDriverState *bs = NULL; 448 bool result = false; 449 GLOBAL_STATE_CODE(); 450 451 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 452 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 453 while ((bs = bdrv_next_all_states(bs))) { 454 AioContext *aio_context = bdrv_get_aio_context(bs); 455 aio_context_acquire(aio_context); 456 result |= bdrv_drain_poll(bs, NULL, true); 457 aio_context_release(aio_context); 458 } 459 460 return result; 461 } 462 463 /* 464 * Wait for pending requests to complete across all BlockDriverStates 465 * 466 * This function does not flush data to disk, use bdrv_flush_all() for that 467 * after calling this function. 468 * 469 * This pauses all block jobs and disables external clients. It must 470 * be paired with bdrv_drain_all_end(). 471 * 472 * NOTE: no new block jobs or BlockDriverStates can be created between 473 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 474 */ 475 void bdrv_drain_all_begin_nopoll(void) 476 { 477 BlockDriverState *bs = NULL; 478 GLOBAL_STATE_CODE(); 479 480 /* 481 * bdrv queue is managed by record/replay, 482 * waiting for finishing the I/O requests may 483 * be infinite 484 */ 485 if (replay_events_enabled()) { 486 return; 487 } 488 489 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 490 * loop AioContext, so make sure we're in the main context. */ 491 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 492 assert(bdrv_drain_all_count < INT_MAX); 493 bdrv_drain_all_count++; 494 495 /* Quiesce all nodes, without polling in-flight requests yet. The graph 496 * cannot change during this loop. */ 497 while ((bs = bdrv_next_all_states(bs))) { 498 AioContext *aio_context = bdrv_get_aio_context(bs); 499 500 aio_context_acquire(aio_context); 501 bdrv_do_drained_begin(bs, NULL, false); 502 aio_context_release(aio_context); 503 } 504 } 505 506 void bdrv_drain_all_begin(void) 507 { 508 BlockDriverState *bs = NULL; 509 510 if (qemu_in_coroutine()) { 511 bdrv_co_yield_to_drain(NULL, true, NULL, true); 512 return; 513 } 514 515 /* 516 * bdrv queue is managed by record/replay, 517 * waiting for finishing the I/O requests may 518 * be infinite 519 */ 520 if (replay_events_enabled()) { 521 return; 522 } 523 524 bdrv_drain_all_begin_nopoll(); 525 526 /* Now poll the in-flight requests */ 527 AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll()); 528 529 while ((bs = bdrv_next_all_states(bs))) { 530 bdrv_drain_assert_idle(bs); 531 } 532 } 533 534 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 535 { 536 GLOBAL_STATE_CODE(); 537 538 g_assert(bs->quiesce_counter > 0); 539 g_assert(!bs->refcnt); 540 541 while (bs->quiesce_counter) { 542 bdrv_do_drained_end(bs, NULL); 543 } 544 } 545 546 void bdrv_drain_all_end(void) 547 { 548 BlockDriverState *bs = NULL; 549 GLOBAL_STATE_CODE(); 550 551 /* 552 * bdrv queue is managed by record/replay, 553 * waiting for finishing the I/O requests may 554 * be endless 555 */ 556 if (replay_events_enabled()) { 557 return; 558 } 559 560 while ((bs = bdrv_next_all_states(bs))) { 561 AioContext *aio_context = bdrv_get_aio_context(bs); 562 563 aio_context_acquire(aio_context); 564 bdrv_do_drained_end(bs, NULL); 565 aio_context_release(aio_context); 566 } 567 568 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 569 assert(bdrv_drain_all_count > 0); 570 bdrv_drain_all_count--; 571 } 572 573 void bdrv_drain_all(void) 574 { 575 GLOBAL_STATE_CODE(); 576 bdrv_drain_all_begin(); 577 bdrv_drain_all_end(); 578 } 579 580 /** 581 * Remove an active request from the tracked requests list 582 * 583 * This function should be called when a tracked request is completing. 584 */ 585 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) 586 { 587 if (req->serialising) { 588 qatomic_dec(&req->bs->serialising_in_flight); 589 } 590 591 qemu_co_mutex_lock(&req->bs->reqs_lock); 592 QLIST_REMOVE(req, list); 593 qemu_co_queue_restart_all(&req->wait_queue); 594 qemu_co_mutex_unlock(&req->bs->reqs_lock); 595 } 596 597 /** 598 * Add an active request to the tracked requests list 599 */ 600 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, 601 BlockDriverState *bs, 602 int64_t offset, 603 int64_t bytes, 604 enum BdrvTrackedRequestType type) 605 { 606 bdrv_check_request(offset, bytes, &error_abort); 607 608 *req = (BdrvTrackedRequest){ 609 .bs = bs, 610 .offset = offset, 611 .bytes = bytes, 612 .type = type, 613 .co = qemu_coroutine_self(), 614 .serialising = false, 615 .overlap_offset = offset, 616 .overlap_bytes = bytes, 617 }; 618 619 qemu_co_queue_init(&req->wait_queue); 620 621 qemu_co_mutex_lock(&bs->reqs_lock); 622 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 623 qemu_co_mutex_unlock(&bs->reqs_lock); 624 } 625 626 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 627 int64_t offset, int64_t bytes) 628 { 629 bdrv_check_request(offset, bytes, &error_abort); 630 631 /* aaaa bbbb */ 632 if (offset >= req->overlap_offset + req->overlap_bytes) { 633 return false; 634 } 635 /* bbbb aaaa */ 636 if (req->overlap_offset >= offset + bytes) { 637 return false; 638 } 639 return true; 640 } 641 642 /* Called with self->bs->reqs_lock held */ 643 static coroutine_fn BdrvTrackedRequest * 644 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 645 { 646 BdrvTrackedRequest *req; 647 648 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 649 if (req == self || (!req->serialising && !self->serialising)) { 650 continue; 651 } 652 if (tracked_request_overlaps(req, self->overlap_offset, 653 self->overlap_bytes)) 654 { 655 /* 656 * Hitting this means there was a reentrant request, for 657 * example, a block driver issuing nested requests. This must 658 * never happen since it means deadlock. 659 */ 660 assert(qemu_coroutine_self() != req->co); 661 662 /* 663 * If the request is already (indirectly) waiting for us, or 664 * will wait for us as soon as it wakes up, then just go on 665 * (instead of producing a deadlock in the former case). 666 */ 667 if (!req->waiting_for) { 668 return req; 669 } 670 } 671 } 672 673 return NULL; 674 } 675 676 /* Called with self->bs->reqs_lock held */ 677 static void coroutine_fn 678 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 679 { 680 BdrvTrackedRequest *req; 681 682 while ((req = bdrv_find_conflicting_request(self))) { 683 self->waiting_for = req; 684 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 685 self->waiting_for = NULL; 686 } 687 } 688 689 /* Called with req->bs->reqs_lock held */ 690 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 691 uint64_t align) 692 { 693 int64_t overlap_offset = req->offset & ~(align - 1); 694 int64_t overlap_bytes = 695 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 696 697 bdrv_check_request(req->offset, req->bytes, &error_abort); 698 699 if (!req->serialising) { 700 qatomic_inc(&req->bs->serialising_in_flight); 701 req->serialising = true; 702 } 703 704 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 705 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 706 } 707 708 /** 709 * Return the tracked request on @bs for the current coroutine, or 710 * NULL if there is none. 711 */ 712 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 713 { 714 BdrvTrackedRequest *req; 715 Coroutine *self = qemu_coroutine_self(); 716 IO_CODE(); 717 718 QLIST_FOREACH(req, &bs->tracked_requests, list) { 719 if (req->co == self) { 720 return req; 721 } 722 } 723 724 return NULL; 725 } 726 727 /** 728 * Round a region to cluster boundaries 729 */ 730 void coroutine_fn GRAPH_RDLOCK 731 bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes, 732 int64_t *cluster_offset, int64_t *cluster_bytes) 733 { 734 BlockDriverInfo bdi; 735 IO_CODE(); 736 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 737 *cluster_offset = offset; 738 *cluster_bytes = bytes; 739 } else { 740 int64_t c = bdi.cluster_size; 741 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 742 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 743 } 744 } 745 746 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs) 747 { 748 BlockDriverInfo bdi; 749 int ret; 750 751 ret = bdrv_co_get_info(bs, &bdi); 752 if (ret < 0 || bdi.cluster_size == 0) { 753 return bs->bl.request_alignment; 754 } else { 755 return bdi.cluster_size; 756 } 757 } 758 759 void bdrv_inc_in_flight(BlockDriverState *bs) 760 { 761 IO_CODE(); 762 qatomic_inc(&bs->in_flight); 763 } 764 765 void bdrv_wakeup(BlockDriverState *bs) 766 { 767 IO_CODE(); 768 aio_wait_kick(); 769 } 770 771 void bdrv_dec_in_flight(BlockDriverState *bs) 772 { 773 IO_CODE(); 774 qatomic_dec(&bs->in_flight); 775 bdrv_wakeup(bs); 776 } 777 778 static void coroutine_fn 779 bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 780 { 781 BlockDriverState *bs = self->bs; 782 783 if (!qatomic_read(&bs->serialising_in_flight)) { 784 return; 785 } 786 787 qemu_co_mutex_lock(&bs->reqs_lock); 788 bdrv_wait_serialising_requests_locked(self); 789 qemu_co_mutex_unlock(&bs->reqs_lock); 790 } 791 792 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 793 uint64_t align) 794 { 795 IO_CODE(); 796 797 qemu_co_mutex_lock(&req->bs->reqs_lock); 798 799 tracked_request_set_serialising(req, align); 800 bdrv_wait_serialising_requests_locked(req); 801 802 qemu_co_mutex_unlock(&req->bs->reqs_lock); 803 } 804 805 int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 806 QEMUIOVector *qiov, size_t qiov_offset, 807 Error **errp) 808 { 809 /* 810 * Check generic offset/bytes correctness 811 */ 812 813 if (offset < 0) { 814 error_setg(errp, "offset is negative: %" PRIi64, offset); 815 return -EIO; 816 } 817 818 if (bytes < 0) { 819 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 820 return -EIO; 821 } 822 823 if (bytes > BDRV_MAX_LENGTH) { 824 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 825 bytes, BDRV_MAX_LENGTH); 826 return -EIO; 827 } 828 829 if (offset > BDRV_MAX_LENGTH) { 830 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 831 offset, BDRV_MAX_LENGTH); 832 return -EIO; 833 } 834 835 if (offset > BDRV_MAX_LENGTH - bytes) { 836 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 837 "exceeds maximum(%" PRIi64 ")", offset, bytes, 838 BDRV_MAX_LENGTH); 839 return -EIO; 840 } 841 842 if (!qiov) { 843 return 0; 844 } 845 846 /* 847 * Check qiov and qiov_offset 848 */ 849 850 if (qiov_offset > qiov->size) { 851 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 852 qiov_offset, qiov->size); 853 return -EIO; 854 } 855 856 if (bytes > qiov->size - qiov_offset) { 857 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 858 "vector size(%zu)", bytes, qiov_offset, qiov->size); 859 return -EIO; 860 } 861 862 return 0; 863 } 864 865 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 866 { 867 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 868 } 869 870 static int bdrv_check_request32(int64_t offset, int64_t bytes, 871 QEMUIOVector *qiov, size_t qiov_offset) 872 { 873 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 874 if (ret < 0) { 875 return ret; 876 } 877 878 if (bytes > BDRV_REQUEST_MAX_BYTES) { 879 return -EIO; 880 } 881 882 return 0; 883 } 884 885 /* 886 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 887 * The operation is sped up by checking the block status and only writing 888 * zeroes to the device if they currently do not return zeroes. Optional 889 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 890 * BDRV_REQ_FUA). 891 * 892 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 893 */ 894 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 895 { 896 int ret; 897 int64_t target_size, bytes, offset = 0; 898 BlockDriverState *bs = child->bs; 899 IO_CODE(); 900 901 target_size = bdrv_getlength(bs); 902 if (target_size < 0) { 903 return target_size; 904 } 905 906 for (;;) { 907 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 908 if (bytes <= 0) { 909 return 0; 910 } 911 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 912 if (ret < 0) { 913 return ret; 914 } 915 if (ret & BDRV_BLOCK_ZERO) { 916 offset += bytes; 917 continue; 918 } 919 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 920 if (ret < 0) { 921 return ret; 922 } 923 offset += bytes; 924 } 925 } 926 927 /* 928 * Writes to the file and ensures that no writes are reordered across this 929 * request (acts as a barrier) 930 * 931 * Returns 0 on success, -errno in error cases. 932 */ 933 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, 934 int64_t bytes, const void *buf, 935 BdrvRequestFlags flags) 936 { 937 int ret; 938 IO_CODE(); 939 assert_bdrv_graph_readable(); 940 941 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags); 942 if (ret < 0) { 943 return ret; 944 } 945 946 ret = bdrv_co_flush(child->bs); 947 if (ret < 0) { 948 return ret; 949 } 950 951 return 0; 952 } 953 954 typedef struct CoroutineIOCompletion { 955 Coroutine *coroutine; 956 int ret; 957 } CoroutineIOCompletion; 958 959 static void bdrv_co_io_em_complete(void *opaque, int ret) 960 { 961 CoroutineIOCompletion *co = opaque; 962 963 co->ret = ret; 964 aio_co_wake(co->coroutine); 965 } 966 967 static int coroutine_fn GRAPH_RDLOCK 968 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 969 QEMUIOVector *qiov, size_t qiov_offset, int flags) 970 { 971 BlockDriver *drv = bs->drv; 972 int64_t sector_num; 973 unsigned int nb_sectors; 974 QEMUIOVector local_qiov; 975 int ret; 976 assert_bdrv_graph_readable(); 977 978 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 979 assert(!(flags & ~bs->supported_read_flags)); 980 981 if (!drv) { 982 return -ENOMEDIUM; 983 } 984 985 if (drv->bdrv_co_preadv_part) { 986 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 987 flags); 988 } 989 990 if (qiov_offset > 0 || bytes != qiov->size) { 991 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 992 qiov = &local_qiov; 993 } 994 995 if (drv->bdrv_co_preadv) { 996 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 997 goto out; 998 } 999 1000 if (drv->bdrv_aio_preadv) { 1001 BlockAIOCB *acb; 1002 CoroutineIOCompletion co = { 1003 .coroutine = qemu_coroutine_self(), 1004 }; 1005 1006 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1007 bdrv_co_io_em_complete, &co); 1008 if (acb == NULL) { 1009 ret = -EIO; 1010 goto out; 1011 } else { 1012 qemu_coroutine_yield(); 1013 ret = co.ret; 1014 goto out; 1015 } 1016 } 1017 1018 sector_num = offset >> BDRV_SECTOR_BITS; 1019 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1020 1021 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1022 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1023 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1024 assert(drv->bdrv_co_readv); 1025 1026 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1027 1028 out: 1029 if (qiov == &local_qiov) { 1030 qemu_iovec_destroy(&local_qiov); 1031 } 1032 1033 return ret; 1034 } 1035 1036 static int coroutine_fn GRAPH_RDLOCK 1037 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, 1038 QEMUIOVector *qiov, size_t qiov_offset, 1039 BdrvRequestFlags flags) 1040 { 1041 BlockDriver *drv = bs->drv; 1042 bool emulate_fua = false; 1043 int64_t sector_num; 1044 unsigned int nb_sectors; 1045 QEMUIOVector local_qiov; 1046 int ret; 1047 assert_bdrv_graph_readable(); 1048 1049 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1050 1051 if (!drv) { 1052 return -ENOMEDIUM; 1053 } 1054 1055 if ((flags & BDRV_REQ_FUA) && 1056 (~bs->supported_write_flags & BDRV_REQ_FUA)) { 1057 flags &= ~BDRV_REQ_FUA; 1058 emulate_fua = true; 1059 } 1060 1061 flags &= bs->supported_write_flags; 1062 1063 if (drv->bdrv_co_pwritev_part) { 1064 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1065 flags); 1066 goto emulate_flags; 1067 } 1068 1069 if (qiov_offset > 0 || bytes != qiov->size) { 1070 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1071 qiov = &local_qiov; 1072 } 1073 1074 if (drv->bdrv_co_pwritev) { 1075 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); 1076 goto emulate_flags; 1077 } 1078 1079 if (drv->bdrv_aio_pwritev) { 1080 BlockAIOCB *acb; 1081 CoroutineIOCompletion co = { 1082 .coroutine = qemu_coroutine_self(), 1083 }; 1084 1085 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, 1086 bdrv_co_io_em_complete, &co); 1087 if (acb == NULL) { 1088 ret = -EIO; 1089 } else { 1090 qemu_coroutine_yield(); 1091 ret = co.ret; 1092 } 1093 goto emulate_flags; 1094 } 1095 1096 sector_num = offset >> BDRV_SECTOR_BITS; 1097 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1098 1099 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1100 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1101 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1102 1103 assert(drv->bdrv_co_writev); 1104 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); 1105 1106 emulate_flags: 1107 if (ret == 0 && emulate_fua) { 1108 ret = bdrv_co_flush(bs); 1109 } 1110 1111 if (qiov == &local_qiov) { 1112 qemu_iovec_destroy(&local_qiov); 1113 } 1114 1115 return ret; 1116 } 1117 1118 static int coroutine_fn GRAPH_RDLOCK 1119 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1120 int64_t bytes, QEMUIOVector *qiov, 1121 size_t qiov_offset) 1122 { 1123 BlockDriver *drv = bs->drv; 1124 QEMUIOVector local_qiov; 1125 int ret; 1126 assert_bdrv_graph_readable(); 1127 1128 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1129 1130 if (!drv) { 1131 return -ENOMEDIUM; 1132 } 1133 1134 if (!block_driver_can_compress(drv)) { 1135 return -ENOTSUP; 1136 } 1137 1138 if (drv->bdrv_co_pwritev_compressed_part) { 1139 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1140 qiov, qiov_offset); 1141 } 1142 1143 if (qiov_offset == 0) { 1144 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1145 } 1146 1147 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1148 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1149 qemu_iovec_destroy(&local_qiov); 1150 1151 return ret; 1152 } 1153 1154 static int coroutine_fn GRAPH_RDLOCK 1155 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, 1156 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1157 { 1158 BlockDriverState *bs = child->bs; 1159 1160 /* Perform I/O through a temporary buffer so that users who scribble over 1161 * their read buffer while the operation is in progress do not end up 1162 * modifying the image file. This is critical for zero-copy guest I/O 1163 * where anything might happen inside guest memory. 1164 */ 1165 void *bounce_buffer = NULL; 1166 1167 BlockDriver *drv = bs->drv; 1168 int64_t cluster_offset; 1169 int64_t cluster_bytes; 1170 int64_t skip_bytes; 1171 int ret; 1172 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1173 BDRV_REQUEST_MAX_BYTES); 1174 int64_t progress = 0; 1175 bool skip_write; 1176 1177 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1178 1179 if (!drv) { 1180 return -ENOMEDIUM; 1181 } 1182 1183 /* 1184 * Do not write anything when the BDS is inactive. That is not 1185 * allowed, and it would not help. 1186 */ 1187 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1188 1189 /* FIXME We cannot require callers to have write permissions when all they 1190 * are doing is a read request. If we did things right, write permissions 1191 * would be obtained anyway, but internally by the copy-on-read code. As 1192 * long as it is implemented here rather than in a separate filter driver, 1193 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1194 * it could request permissions. Therefore we have to bypass the permission 1195 * system for the moment. */ 1196 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1197 1198 /* Cover entire cluster so no additional backing file I/O is required when 1199 * allocating cluster in the image file. Note that this value may exceed 1200 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1201 * is one reason we loop rather than doing it all at once. 1202 */ 1203 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1204 skip_bytes = offset - cluster_offset; 1205 1206 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1207 cluster_offset, cluster_bytes); 1208 1209 while (cluster_bytes) { 1210 int64_t pnum; 1211 1212 if (skip_write) { 1213 ret = 1; /* "already allocated", so nothing will be copied */ 1214 pnum = MIN(cluster_bytes, max_transfer); 1215 } else { 1216 ret = bdrv_is_allocated(bs, cluster_offset, 1217 MIN(cluster_bytes, max_transfer), &pnum); 1218 if (ret < 0) { 1219 /* 1220 * Safe to treat errors in querying allocation as if 1221 * unallocated; we'll probably fail again soon on the 1222 * read, but at least that will set a decent errno. 1223 */ 1224 pnum = MIN(cluster_bytes, max_transfer); 1225 } 1226 1227 /* Stop at EOF if the image ends in the middle of the cluster */ 1228 if (ret == 0 && pnum == 0) { 1229 assert(progress >= bytes); 1230 break; 1231 } 1232 1233 assert(skip_bytes < pnum); 1234 } 1235 1236 if (ret <= 0) { 1237 QEMUIOVector local_qiov; 1238 1239 /* Must copy-on-read; use the bounce buffer */ 1240 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1241 if (!bounce_buffer) { 1242 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1243 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1244 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1245 1246 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1247 if (!bounce_buffer) { 1248 ret = -ENOMEM; 1249 goto err; 1250 } 1251 } 1252 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1253 1254 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1255 &local_qiov, 0, 0); 1256 if (ret < 0) { 1257 goto err; 1258 } 1259 1260 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE); 1261 if (drv->bdrv_co_pwrite_zeroes && 1262 buffer_is_zero(bounce_buffer, pnum)) { 1263 /* FIXME: Should we (perhaps conditionally) be setting 1264 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1265 * that still correctly reads as zero? */ 1266 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1267 BDRV_REQ_WRITE_UNCHANGED); 1268 } else { 1269 /* This does not change the data on the disk, it is not 1270 * necessary to flush even in cache=writethrough mode. 1271 */ 1272 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1273 &local_qiov, 0, 1274 BDRV_REQ_WRITE_UNCHANGED); 1275 } 1276 1277 if (ret < 0) { 1278 /* It might be okay to ignore write errors for guest 1279 * requests. If this is a deliberate copy-on-read 1280 * then we don't want to ignore the error. Simply 1281 * report it in all cases. 1282 */ 1283 goto err; 1284 } 1285 1286 if (!(flags & BDRV_REQ_PREFETCH)) { 1287 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1288 bounce_buffer + skip_bytes, 1289 MIN(pnum - skip_bytes, bytes - progress)); 1290 } 1291 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1292 /* Read directly into the destination */ 1293 ret = bdrv_driver_preadv(bs, offset + progress, 1294 MIN(pnum - skip_bytes, bytes - progress), 1295 qiov, qiov_offset + progress, 0); 1296 if (ret < 0) { 1297 goto err; 1298 } 1299 } 1300 1301 cluster_offset += pnum; 1302 cluster_bytes -= pnum; 1303 progress += pnum - skip_bytes; 1304 skip_bytes = 0; 1305 } 1306 ret = 0; 1307 1308 err: 1309 qemu_vfree(bounce_buffer); 1310 return ret; 1311 } 1312 1313 /* 1314 * Forwards an already correctly aligned request to the BlockDriver. This 1315 * handles copy on read, zeroing after EOF, and fragmentation of large 1316 * reads; any other features must be implemented by the caller. 1317 */ 1318 static int coroutine_fn GRAPH_RDLOCK 1319 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req, 1320 int64_t offset, int64_t bytes, int64_t align, 1321 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1322 { 1323 BlockDriverState *bs = child->bs; 1324 int64_t total_bytes, max_bytes; 1325 int ret = 0; 1326 int64_t bytes_remaining = bytes; 1327 int max_transfer; 1328 1329 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1330 assert(is_power_of_2(align)); 1331 assert((offset & (align - 1)) == 0); 1332 assert((bytes & (align - 1)) == 0); 1333 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1334 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1335 align); 1336 1337 /* 1338 * TODO: We would need a per-BDS .supported_read_flags and 1339 * potential fallback support, if we ever implement any read flags 1340 * to pass through to drivers. For now, there aren't any 1341 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint. 1342 */ 1343 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH | 1344 BDRV_REQ_REGISTERED_BUF))); 1345 1346 /* Handle Copy on Read and associated serialisation */ 1347 if (flags & BDRV_REQ_COPY_ON_READ) { 1348 /* If we touch the same cluster it counts as an overlap. This 1349 * guarantees that allocating writes will be serialized and not race 1350 * with each other for the same cluster. For example, in copy-on-read 1351 * it ensures that the CoR read and write operations are atomic and 1352 * guest writes cannot interleave between them. */ 1353 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1354 } else { 1355 bdrv_wait_serialising_requests(req); 1356 } 1357 1358 if (flags & BDRV_REQ_COPY_ON_READ) { 1359 int64_t pnum; 1360 1361 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1362 flags &= ~BDRV_REQ_COPY_ON_READ; 1363 1364 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1365 if (ret < 0) { 1366 goto out; 1367 } 1368 1369 if (!ret || pnum != bytes) { 1370 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1371 qiov, qiov_offset, flags); 1372 goto out; 1373 } else if (flags & BDRV_REQ_PREFETCH) { 1374 goto out; 1375 } 1376 } 1377 1378 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1379 total_bytes = bdrv_getlength(bs); 1380 if (total_bytes < 0) { 1381 ret = total_bytes; 1382 goto out; 1383 } 1384 1385 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); 1386 1387 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1388 if (bytes <= max_bytes && bytes <= max_transfer) { 1389 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1390 goto out; 1391 } 1392 1393 while (bytes_remaining) { 1394 int64_t num; 1395 1396 if (max_bytes) { 1397 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1398 assert(num); 1399 1400 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1401 num, qiov, 1402 qiov_offset + bytes - bytes_remaining, 1403 flags); 1404 max_bytes -= num; 1405 } else { 1406 num = bytes_remaining; 1407 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1408 0, bytes_remaining); 1409 } 1410 if (ret < 0) { 1411 goto out; 1412 } 1413 bytes_remaining -= num; 1414 } 1415 1416 out: 1417 return ret < 0 ? ret : 0; 1418 } 1419 1420 /* 1421 * Request padding 1422 * 1423 * |<---- align ----->| |<----- align ---->| 1424 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1425 * | | | | | | 1426 * -*----------$-------*-------- ... --------*-----$------------*--- 1427 * | | | | | | 1428 * | offset | | end | 1429 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1430 * [buf ... ) [tail_buf ) 1431 * 1432 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1433 * is placed at the beginning of @buf and @tail at the @end. 1434 * 1435 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1436 * around tail, if tail exists. 1437 * 1438 * @merge_reads is true for small requests, 1439 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1440 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1441 */ 1442 typedef struct BdrvRequestPadding { 1443 uint8_t *buf; 1444 size_t buf_len; 1445 uint8_t *tail_buf; 1446 size_t head; 1447 size_t tail; 1448 bool merge_reads; 1449 QEMUIOVector local_qiov; 1450 } BdrvRequestPadding; 1451 1452 static bool bdrv_init_padding(BlockDriverState *bs, 1453 int64_t offset, int64_t bytes, 1454 BdrvRequestPadding *pad) 1455 { 1456 int64_t align = bs->bl.request_alignment; 1457 int64_t sum; 1458 1459 bdrv_check_request(offset, bytes, &error_abort); 1460 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1461 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1462 1463 memset(pad, 0, sizeof(*pad)); 1464 1465 pad->head = offset & (align - 1); 1466 pad->tail = ((offset + bytes) & (align - 1)); 1467 if (pad->tail) { 1468 pad->tail = align - pad->tail; 1469 } 1470 1471 if (!pad->head && !pad->tail) { 1472 return false; 1473 } 1474 1475 assert(bytes); /* Nothing good in aligning zero-length requests */ 1476 1477 sum = pad->head + bytes + pad->tail; 1478 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1479 pad->buf = qemu_blockalign(bs, pad->buf_len); 1480 pad->merge_reads = sum == pad->buf_len; 1481 if (pad->tail) { 1482 pad->tail_buf = pad->buf + pad->buf_len - align; 1483 } 1484 1485 return true; 1486 } 1487 1488 static int coroutine_fn GRAPH_RDLOCK 1489 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req, 1490 BdrvRequestPadding *pad, bool zero_middle) 1491 { 1492 QEMUIOVector local_qiov; 1493 BlockDriverState *bs = child->bs; 1494 uint64_t align = bs->bl.request_alignment; 1495 int ret; 1496 1497 assert(req->serialising && pad->buf); 1498 1499 if (pad->head || pad->merge_reads) { 1500 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1501 1502 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1503 1504 if (pad->head) { 1505 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1506 } 1507 if (pad->merge_reads && pad->tail) { 1508 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1509 } 1510 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1511 align, &local_qiov, 0, 0); 1512 if (ret < 0) { 1513 return ret; 1514 } 1515 if (pad->head) { 1516 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1517 } 1518 if (pad->merge_reads && pad->tail) { 1519 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1520 } 1521 1522 if (pad->merge_reads) { 1523 goto zero_mem; 1524 } 1525 } 1526 1527 if (pad->tail) { 1528 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1529 1530 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1531 ret = bdrv_aligned_preadv( 1532 child, req, 1533 req->overlap_offset + req->overlap_bytes - align, 1534 align, align, &local_qiov, 0, 0); 1535 if (ret < 0) { 1536 return ret; 1537 } 1538 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1539 } 1540 1541 zero_mem: 1542 if (zero_middle) { 1543 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1544 } 1545 1546 return 0; 1547 } 1548 1549 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1550 { 1551 if (pad->buf) { 1552 qemu_vfree(pad->buf); 1553 qemu_iovec_destroy(&pad->local_qiov); 1554 } 1555 memset(pad, 0, sizeof(*pad)); 1556 } 1557 1558 /* 1559 * bdrv_pad_request 1560 * 1561 * Exchange request parameters with padded request if needed. Don't include RMW 1562 * read of padding, bdrv_padding_rmw_read() should be called separately if 1563 * needed. 1564 * 1565 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1566 * - on function start they represent original request 1567 * - on failure or when padding is not needed they are unchanged 1568 * - on success when padding is needed they represent padded request 1569 */ 1570 static int bdrv_pad_request(BlockDriverState *bs, 1571 QEMUIOVector **qiov, size_t *qiov_offset, 1572 int64_t *offset, int64_t *bytes, 1573 BdrvRequestPadding *pad, bool *padded, 1574 BdrvRequestFlags *flags) 1575 { 1576 int ret; 1577 1578 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort); 1579 1580 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1581 if (padded) { 1582 *padded = false; 1583 } 1584 return 0; 1585 } 1586 1587 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1588 *qiov, *qiov_offset, *bytes, 1589 pad->buf + pad->buf_len - pad->tail, 1590 pad->tail); 1591 if (ret < 0) { 1592 bdrv_padding_destroy(pad); 1593 return ret; 1594 } 1595 *bytes += pad->head + pad->tail; 1596 *offset -= pad->head; 1597 *qiov = &pad->local_qiov; 1598 *qiov_offset = 0; 1599 if (padded) { 1600 *padded = true; 1601 } 1602 if (flags) { 1603 /* Can't use optimization hint with bounce buffer */ 1604 *flags &= ~BDRV_REQ_REGISTERED_BUF; 1605 } 1606 1607 return 0; 1608 } 1609 1610 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1611 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1612 BdrvRequestFlags flags) 1613 { 1614 IO_CODE(); 1615 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1616 } 1617 1618 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1619 int64_t offset, int64_t bytes, 1620 QEMUIOVector *qiov, size_t qiov_offset, 1621 BdrvRequestFlags flags) 1622 { 1623 BlockDriverState *bs = child->bs; 1624 BdrvTrackedRequest req; 1625 BdrvRequestPadding pad; 1626 int ret; 1627 IO_CODE(); 1628 1629 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1630 1631 if (!bdrv_co_is_inserted(bs)) { 1632 return -ENOMEDIUM; 1633 } 1634 1635 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1636 if (ret < 0) { 1637 return ret; 1638 } 1639 1640 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1641 /* 1642 * Aligning zero request is nonsense. Even if driver has special meaning 1643 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1644 * it to driver due to request_alignment. 1645 * 1646 * Still, no reason to return an error if someone do unaligned 1647 * zero-length read occasionally. 1648 */ 1649 return 0; 1650 } 1651 1652 bdrv_inc_in_flight(bs); 1653 1654 /* Don't do copy-on-read if we read data before write operation */ 1655 if (qatomic_read(&bs->copy_on_read)) { 1656 flags |= BDRV_REQ_COPY_ON_READ; 1657 } 1658 1659 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 1660 NULL, &flags); 1661 if (ret < 0) { 1662 goto fail; 1663 } 1664 1665 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1666 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1667 bs->bl.request_alignment, 1668 qiov, qiov_offset, flags); 1669 tracked_request_end(&req); 1670 bdrv_padding_destroy(&pad); 1671 1672 fail: 1673 bdrv_dec_in_flight(bs); 1674 1675 return ret; 1676 } 1677 1678 static int coroutine_fn GRAPH_RDLOCK 1679 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 1680 BdrvRequestFlags flags) 1681 { 1682 BlockDriver *drv = bs->drv; 1683 QEMUIOVector qiov; 1684 void *buf = NULL; 1685 int ret = 0; 1686 bool need_flush = false; 1687 int head = 0; 1688 int tail = 0; 1689 1690 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, 1691 INT64_MAX); 1692 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1693 bs->bl.request_alignment); 1694 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1695 1696 assert_bdrv_graph_readable(); 1697 bdrv_check_request(offset, bytes, &error_abort); 1698 1699 if (!drv) { 1700 return -ENOMEDIUM; 1701 } 1702 1703 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1704 return -ENOTSUP; 1705 } 1706 1707 /* By definition there is no user buffer so this flag doesn't make sense */ 1708 if (flags & BDRV_REQ_REGISTERED_BUF) { 1709 return -EINVAL; 1710 } 1711 1712 /* Invalidate the cached block-status data range if this write overlaps */ 1713 bdrv_bsc_invalidate_range(bs, offset, bytes); 1714 1715 assert(alignment % bs->bl.request_alignment == 0); 1716 head = offset % alignment; 1717 tail = (offset + bytes) % alignment; 1718 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1719 assert(max_write_zeroes >= bs->bl.request_alignment); 1720 1721 while (bytes > 0 && !ret) { 1722 int64_t num = bytes; 1723 1724 /* Align request. Block drivers can expect the "bulk" of the request 1725 * to be aligned, and that unaligned requests do not cross cluster 1726 * boundaries. 1727 */ 1728 if (head) { 1729 /* Make a small request up to the first aligned sector. For 1730 * convenience, limit this request to max_transfer even if 1731 * we don't need to fall back to writes. */ 1732 num = MIN(MIN(bytes, max_transfer), alignment - head); 1733 head = (head + num) % alignment; 1734 assert(num < max_write_zeroes); 1735 } else if (tail && num > alignment) { 1736 /* Shorten the request to the last aligned sector. */ 1737 num -= tail; 1738 } 1739 1740 /* limit request size */ 1741 if (num > max_write_zeroes) { 1742 num = max_write_zeroes; 1743 } 1744 1745 ret = -ENOTSUP; 1746 /* First try the efficient write zeroes operation */ 1747 if (drv->bdrv_co_pwrite_zeroes) { 1748 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1749 flags & bs->supported_zero_flags); 1750 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1751 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1752 need_flush = true; 1753 } 1754 } else { 1755 assert(!bs->supported_zero_flags); 1756 } 1757 1758 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1759 /* Fall back to bounce buffer if write zeroes is unsupported */ 1760 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1761 1762 if ((flags & BDRV_REQ_FUA) && 1763 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1764 /* No need for bdrv_driver_pwrite() to do a fallback 1765 * flush on each chunk; use just one at the end */ 1766 write_flags &= ~BDRV_REQ_FUA; 1767 need_flush = true; 1768 } 1769 num = MIN(num, max_transfer); 1770 if (buf == NULL) { 1771 buf = qemu_try_blockalign0(bs, num); 1772 if (buf == NULL) { 1773 ret = -ENOMEM; 1774 goto fail; 1775 } 1776 } 1777 qemu_iovec_init_buf(&qiov, buf, num); 1778 1779 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1780 1781 /* Keep bounce buffer around if it is big enough for all 1782 * all future requests. 1783 */ 1784 if (num < max_transfer) { 1785 qemu_vfree(buf); 1786 buf = NULL; 1787 } 1788 } 1789 1790 offset += num; 1791 bytes -= num; 1792 } 1793 1794 fail: 1795 if (ret == 0 && need_flush) { 1796 ret = bdrv_co_flush(bs); 1797 } 1798 qemu_vfree(buf); 1799 return ret; 1800 } 1801 1802 static inline int coroutine_fn GRAPH_RDLOCK 1803 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1804 BdrvTrackedRequest *req, int flags) 1805 { 1806 BlockDriverState *bs = child->bs; 1807 1808 bdrv_check_request(offset, bytes, &error_abort); 1809 1810 if (bdrv_is_read_only(bs)) { 1811 return -EPERM; 1812 } 1813 1814 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1815 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1816 assert(!(flags & ~BDRV_REQ_MASK)); 1817 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1818 1819 if (flags & BDRV_REQ_SERIALISING) { 1820 QEMU_LOCK_GUARD(&bs->reqs_lock); 1821 1822 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1823 1824 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1825 return -EBUSY; 1826 } 1827 1828 bdrv_wait_serialising_requests_locked(req); 1829 } else { 1830 bdrv_wait_serialising_requests(req); 1831 } 1832 1833 assert(req->overlap_offset <= offset); 1834 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1835 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 1836 child->perm & BLK_PERM_RESIZE); 1837 1838 switch (req->type) { 1839 case BDRV_TRACKED_WRITE: 1840 case BDRV_TRACKED_DISCARD: 1841 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1842 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1843 } else { 1844 assert(child->perm & BLK_PERM_WRITE); 1845 } 1846 bdrv_write_threshold_check_write(bs, offset, bytes); 1847 return 0; 1848 case BDRV_TRACKED_TRUNCATE: 1849 assert(child->perm & BLK_PERM_RESIZE); 1850 return 0; 1851 default: 1852 abort(); 1853 } 1854 } 1855 1856 static inline void coroutine_fn 1857 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 1858 BdrvTrackedRequest *req, int ret) 1859 { 1860 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1861 BlockDriverState *bs = child->bs; 1862 1863 bdrv_check_request(offset, bytes, &error_abort); 1864 1865 qatomic_inc(&bs->write_gen); 1866 1867 /* 1868 * Discard cannot extend the image, but in error handling cases, such as 1869 * when reverting a qcow2 cluster allocation, the discarded range can pass 1870 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1871 * here. Instead, just skip it, since semantically a discard request 1872 * beyond EOF cannot expand the image anyway. 1873 */ 1874 if (ret == 0 && 1875 (req->type == BDRV_TRACKED_TRUNCATE || 1876 end_sector > bs->total_sectors) && 1877 req->type != BDRV_TRACKED_DISCARD) { 1878 bs->total_sectors = end_sector; 1879 bdrv_parent_cb_resize(bs); 1880 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1881 } 1882 if (req->bytes) { 1883 switch (req->type) { 1884 case BDRV_TRACKED_WRITE: 1885 stat64_max(&bs->wr_highest_offset, offset + bytes); 1886 /* fall through, to set dirty bits */ 1887 case BDRV_TRACKED_DISCARD: 1888 bdrv_set_dirty(bs, offset, bytes); 1889 break; 1890 default: 1891 break; 1892 } 1893 } 1894 } 1895 1896 /* 1897 * Forwards an already correctly aligned write request to the BlockDriver, 1898 * after possibly fragmenting it. 1899 */ 1900 static int coroutine_fn GRAPH_RDLOCK 1901 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, 1902 int64_t offset, int64_t bytes, int64_t align, 1903 QEMUIOVector *qiov, size_t qiov_offset, 1904 BdrvRequestFlags flags) 1905 { 1906 BlockDriverState *bs = child->bs; 1907 BlockDriver *drv = bs->drv; 1908 int ret; 1909 1910 int64_t bytes_remaining = bytes; 1911 int max_transfer; 1912 1913 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1914 1915 if (!drv) { 1916 return -ENOMEDIUM; 1917 } 1918 1919 if (bdrv_has_readonly_bitmaps(bs)) { 1920 return -EPERM; 1921 } 1922 1923 assert(is_power_of_2(align)); 1924 assert((offset & (align - 1)) == 0); 1925 assert((bytes & (align - 1)) == 0); 1926 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1927 align); 1928 1929 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1930 1931 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1932 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1933 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 1934 flags |= BDRV_REQ_ZERO_WRITE; 1935 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1936 flags |= BDRV_REQ_MAY_UNMAP; 1937 } 1938 1939 /* Can't use optimization hint with bufferless zero write */ 1940 flags &= ~BDRV_REQ_REGISTERED_BUF; 1941 } 1942 1943 if (ret < 0) { 1944 /* Do nothing, write notifier decided to fail this request */ 1945 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1946 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1947 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1948 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1949 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 1950 qiov, qiov_offset); 1951 } else if (bytes <= max_transfer) { 1952 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1953 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 1954 } else { 1955 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1956 while (bytes_remaining) { 1957 int num = MIN(bytes_remaining, max_transfer); 1958 int local_flags = flags; 1959 1960 assert(num); 1961 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1962 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1963 /* If FUA is going to be emulated by flush, we only 1964 * need to flush on the last iteration */ 1965 local_flags &= ~BDRV_REQ_FUA; 1966 } 1967 1968 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1969 num, qiov, 1970 qiov_offset + bytes - bytes_remaining, 1971 local_flags); 1972 if (ret < 0) { 1973 break; 1974 } 1975 bytes_remaining -= num; 1976 } 1977 } 1978 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE); 1979 1980 if (ret >= 0) { 1981 ret = 0; 1982 } 1983 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1984 1985 return ret; 1986 } 1987 1988 static int coroutine_fn GRAPH_RDLOCK 1989 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, 1990 BdrvRequestFlags flags, BdrvTrackedRequest *req) 1991 { 1992 BlockDriverState *bs = child->bs; 1993 QEMUIOVector local_qiov; 1994 uint64_t align = bs->bl.request_alignment; 1995 int ret = 0; 1996 bool padding; 1997 BdrvRequestPadding pad; 1998 1999 /* This flag doesn't make sense for padding or zero writes */ 2000 flags &= ~BDRV_REQ_REGISTERED_BUF; 2001 2002 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2003 if (padding) { 2004 assert(!(flags & BDRV_REQ_NO_WAIT)); 2005 bdrv_make_request_serialising(req, align); 2006 2007 bdrv_padding_rmw_read(child, req, &pad, true); 2008 2009 if (pad.head || pad.merge_reads) { 2010 int64_t aligned_offset = offset & ~(align - 1); 2011 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2012 2013 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2014 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2015 align, &local_qiov, 0, 2016 flags & ~BDRV_REQ_ZERO_WRITE); 2017 if (ret < 0 || pad.merge_reads) { 2018 /* Error or all work is done */ 2019 goto out; 2020 } 2021 offset += write_bytes - pad.head; 2022 bytes -= write_bytes - pad.head; 2023 } 2024 } 2025 2026 assert(!bytes || (offset & (align - 1)) == 0); 2027 if (bytes >= align) { 2028 /* Write the aligned part in the middle. */ 2029 int64_t aligned_bytes = bytes & ~(align - 1); 2030 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2031 NULL, 0, flags); 2032 if (ret < 0) { 2033 goto out; 2034 } 2035 bytes -= aligned_bytes; 2036 offset += aligned_bytes; 2037 } 2038 2039 assert(!bytes || (offset & (align - 1)) == 0); 2040 if (bytes) { 2041 assert(align == pad.tail + bytes); 2042 2043 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2044 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2045 &local_qiov, 0, 2046 flags & ~BDRV_REQ_ZERO_WRITE); 2047 } 2048 2049 out: 2050 bdrv_padding_destroy(&pad); 2051 2052 return ret; 2053 } 2054 2055 /* 2056 * Handle a write request in coroutine context 2057 */ 2058 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2059 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2060 BdrvRequestFlags flags) 2061 { 2062 IO_CODE(); 2063 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2064 } 2065 2066 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2067 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2068 BdrvRequestFlags flags) 2069 { 2070 BlockDriverState *bs = child->bs; 2071 BdrvTrackedRequest req; 2072 uint64_t align = bs->bl.request_alignment; 2073 BdrvRequestPadding pad; 2074 int ret; 2075 bool padded = false; 2076 IO_CODE(); 2077 2078 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2079 2080 if (!bdrv_co_is_inserted(bs)) { 2081 return -ENOMEDIUM; 2082 } 2083 2084 if (flags & BDRV_REQ_ZERO_WRITE) { 2085 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 2086 } else { 2087 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2088 } 2089 if (ret < 0) { 2090 return ret; 2091 } 2092 2093 /* If the request is misaligned then we can't make it efficient */ 2094 if ((flags & BDRV_REQ_NO_FALLBACK) && 2095 !QEMU_IS_ALIGNED(offset | bytes, align)) 2096 { 2097 return -ENOTSUP; 2098 } 2099 2100 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2101 /* 2102 * Aligning zero request is nonsense. Even if driver has special meaning 2103 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2104 * it to driver due to request_alignment. 2105 * 2106 * Still, no reason to return an error if someone do unaligned 2107 * zero-length write occasionally. 2108 */ 2109 return 0; 2110 } 2111 2112 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2113 /* 2114 * Pad request for following read-modify-write cycle. 2115 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2116 * alignment only if there is no ZERO flag. 2117 */ 2118 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 2119 &padded, &flags); 2120 if (ret < 0) { 2121 return ret; 2122 } 2123 } 2124 2125 bdrv_inc_in_flight(bs); 2126 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2127 2128 if (flags & BDRV_REQ_ZERO_WRITE) { 2129 assert(!padded); 2130 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2131 goto out; 2132 } 2133 2134 if (padded) { 2135 /* 2136 * Request was unaligned to request_alignment and therefore 2137 * padded. We are going to do read-modify-write, and must 2138 * serialize the request to prevent interactions of the 2139 * widened region with other transactions. 2140 */ 2141 assert(!(flags & BDRV_REQ_NO_WAIT)); 2142 bdrv_make_request_serialising(&req, align); 2143 bdrv_padding_rmw_read(child, &req, &pad, false); 2144 } 2145 2146 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2147 qiov, qiov_offset, flags); 2148 2149 bdrv_padding_destroy(&pad); 2150 2151 out: 2152 tracked_request_end(&req); 2153 bdrv_dec_in_flight(bs); 2154 2155 return ret; 2156 } 2157 2158 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2159 int64_t bytes, BdrvRequestFlags flags) 2160 { 2161 IO_CODE(); 2162 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2163 assert_bdrv_graph_readable(); 2164 2165 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2166 flags &= ~BDRV_REQ_MAY_UNMAP; 2167 } 2168 2169 return bdrv_co_pwritev(child, offset, bytes, NULL, 2170 BDRV_REQ_ZERO_WRITE | flags); 2171 } 2172 2173 /* 2174 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2175 */ 2176 int bdrv_flush_all(void) 2177 { 2178 BdrvNextIterator it; 2179 BlockDriverState *bs = NULL; 2180 int result = 0; 2181 2182 GLOBAL_STATE_CODE(); 2183 2184 /* 2185 * bdrv queue is managed by record/replay, 2186 * creating new flush request for stopping 2187 * the VM may break the determinism 2188 */ 2189 if (replay_events_enabled()) { 2190 return result; 2191 } 2192 2193 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2194 AioContext *aio_context = bdrv_get_aio_context(bs); 2195 int ret; 2196 2197 aio_context_acquire(aio_context); 2198 ret = bdrv_flush(bs); 2199 if (ret < 0 && !result) { 2200 result = ret; 2201 } 2202 aio_context_release(aio_context); 2203 } 2204 2205 return result; 2206 } 2207 2208 /* 2209 * Returns the allocation status of the specified sectors. 2210 * Drivers not implementing the functionality are assumed to not support 2211 * backing files, hence all their sectors are reported as allocated. 2212 * 2213 * If 'want_zero' is true, the caller is querying for mapping 2214 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2215 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2216 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2217 * 2218 * If 'offset' is beyond the end of the disk image the return value is 2219 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2220 * 2221 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2222 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2223 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2224 * 2225 * 'pnum' is set to the number of bytes (including and immediately 2226 * following the specified offset) that are easily known to be in the 2227 * same allocated/unallocated state. Note that a second call starting 2228 * at the original offset plus returned pnum may have the same status. 2229 * The returned value is non-zero on success except at end-of-file. 2230 * 2231 * Returns negative errno on failure. Otherwise, if the 2232 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2233 * set to the host mapping and BDS corresponding to the guest offset. 2234 */ 2235 static int coroutine_fn GRAPH_RDLOCK 2236 bdrv_co_block_status(BlockDriverState *bs, bool want_zero, 2237 int64_t offset, int64_t bytes, 2238 int64_t *pnum, int64_t *map, BlockDriverState **file) 2239 { 2240 int64_t total_size; 2241 int64_t n; /* bytes */ 2242 int ret; 2243 int64_t local_map = 0; 2244 BlockDriverState *local_file = NULL; 2245 int64_t aligned_offset, aligned_bytes; 2246 uint32_t align; 2247 bool has_filtered_child; 2248 2249 assert(pnum); 2250 assert_bdrv_graph_readable(); 2251 *pnum = 0; 2252 total_size = bdrv_getlength(bs); 2253 if (total_size < 0) { 2254 ret = total_size; 2255 goto early_out; 2256 } 2257 2258 if (offset >= total_size) { 2259 ret = BDRV_BLOCK_EOF; 2260 goto early_out; 2261 } 2262 if (!bytes) { 2263 ret = 0; 2264 goto early_out; 2265 } 2266 2267 n = total_size - offset; 2268 if (n < bytes) { 2269 bytes = n; 2270 } 2271 2272 /* Must be non-NULL or bdrv_getlength() would have failed */ 2273 assert(bs->drv); 2274 has_filtered_child = bdrv_filter_child(bs); 2275 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2276 *pnum = bytes; 2277 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2278 if (offset + bytes == total_size) { 2279 ret |= BDRV_BLOCK_EOF; 2280 } 2281 if (bs->drv->protocol_name) { 2282 ret |= BDRV_BLOCK_OFFSET_VALID; 2283 local_map = offset; 2284 local_file = bs; 2285 } 2286 goto early_out; 2287 } 2288 2289 bdrv_inc_in_flight(bs); 2290 2291 /* Round out to request_alignment boundaries */ 2292 align = bs->bl.request_alignment; 2293 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2294 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2295 2296 if (bs->drv->bdrv_co_block_status) { 2297 /* 2298 * Use the block-status cache only for protocol nodes: Format 2299 * drivers are generally quick to inquire the status, but protocol 2300 * drivers often need to get information from outside of qemu, so 2301 * we do not have control over the actual implementation. There 2302 * have been cases where inquiring the status took an unreasonably 2303 * long time, and we can do nothing in qemu to fix it. 2304 * This is especially problematic for images with large data areas, 2305 * because finding the few holes in them and giving them special 2306 * treatment does not gain much performance. Therefore, we try to 2307 * cache the last-identified data region. 2308 * 2309 * Second, limiting ourselves to protocol nodes allows us to assume 2310 * the block status for data regions to be DATA | OFFSET_VALID, and 2311 * that the host offset is the same as the guest offset. 2312 * 2313 * Note that it is possible that external writers zero parts of 2314 * the cached regions without the cache being invalidated, and so 2315 * we may report zeroes as data. This is not catastrophic, 2316 * however, because reporting zeroes as data is fine. 2317 */ 2318 if (QLIST_EMPTY(&bs->children) && 2319 bdrv_bsc_is_data(bs, aligned_offset, pnum)) 2320 { 2321 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2322 local_file = bs; 2323 local_map = aligned_offset; 2324 } else { 2325 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2326 aligned_bytes, pnum, &local_map, 2327 &local_file); 2328 2329 /* 2330 * Note that checking QLIST_EMPTY(&bs->children) is also done when 2331 * the cache is queried above. Technically, we do not need to check 2332 * it here; the worst that can happen is that we fill the cache for 2333 * non-protocol nodes, and then it is never used. However, filling 2334 * the cache requires an RCU update, so double check here to avoid 2335 * such an update if possible. 2336 * 2337 * Check want_zero, because we only want to update the cache when we 2338 * have accurate information about what is zero and what is data. 2339 */ 2340 if (want_zero && 2341 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && 2342 QLIST_EMPTY(&bs->children)) 2343 { 2344 /* 2345 * When a protocol driver reports BLOCK_OFFSET_VALID, the 2346 * returned local_map value must be the same as the offset we 2347 * have passed (aligned_offset), and local_bs must be the node 2348 * itself. 2349 * Assert this, because we follow this rule when reading from 2350 * the cache (see the `local_file = bs` and 2351 * `local_map = aligned_offset` assignments above), and the 2352 * result the cache delivers must be the same as the driver 2353 * would deliver. 2354 */ 2355 assert(local_file == bs); 2356 assert(local_map == aligned_offset); 2357 bdrv_bsc_fill(bs, aligned_offset, *pnum); 2358 } 2359 } 2360 } else { 2361 /* Default code for filters */ 2362 2363 local_file = bdrv_filter_bs(bs); 2364 assert(local_file); 2365 2366 *pnum = aligned_bytes; 2367 local_map = aligned_offset; 2368 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2369 } 2370 if (ret < 0) { 2371 *pnum = 0; 2372 goto out; 2373 } 2374 2375 /* 2376 * The driver's result must be a non-zero multiple of request_alignment. 2377 * Clamp pnum and adjust map to original request. 2378 */ 2379 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2380 align > offset - aligned_offset); 2381 if (ret & BDRV_BLOCK_RECURSE) { 2382 assert(ret & BDRV_BLOCK_DATA); 2383 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2384 assert(!(ret & BDRV_BLOCK_ZERO)); 2385 } 2386 2387 *pnum -= offset - aligned_offset; 2388 if (*pnum > bytes) { 2389 *pnum = bytes; 2390 } 2391 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2392 local_map += offset - aligned_offset; 2393 } 2394 2395 if (ret & BDRV_BLOCK_RAW) { 2396 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2397 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2398 *pnum, pnum, &local_map, &local_file); 2399 goto out; 2400 } 2401 2402 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2403 ret |= BDRV_BLOCK_ALLOCATED; 2404 } else if (bs->drv->supports_backing) { 2405 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2406 2407 if (!cow_bs) { 2408 ret |= BDRV_BLOCK_ZERO; 2409 } else if (want_zero) { 2410 int64_t size2 = bdrv_getlength(cow_bs); 2411 2412 if (size2 >= 0 && offset >= size2) { 2413 ret |= BDRV_BLOCK_ZERO; 2414 } 2415 } 2416 } 2417 2418 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2419 local_file && local_file != bs && 2420 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2421 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2422 int64_t file_pnum; 2423 int ret2; 2424 2425 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2426 *pnum, &file_pnum, NULL, NULL); 2427 if (ret2 >= 0) { 2428 /* Ignore errors. This is just providing extra information, it 2429 * is useful but not necessary. 2430 */ 2431 if (ret2 & BDRV_BLOCK_EOF && 2432 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2433 /* 2434 * It is valid for the format block driver to read 2435 * beyond the end of the underlying file's current 2436 * size; such areas read as zero. 2437 */ 2438 ret |= BDRV_BLOCK_ZERO; 2439 } else { 2440 /* Limit request to the range reported by the protocol driver */ 2441 *pnum = file_pnum; 2442 ret |= (ret2 & BDRV_BLOCK_ZERO); 2443 } 2444 } 2445 } 2446 2447 out: 2448 bdrv_dec_in_flight(bs); 2449 if (ret >= 0 && offset + *pnum == total_size) { 2450 ret |= BDRV_BLOCK_EOF; 2451 } 2452 early_out: 2453 if (file) { 2454 *file = local_file; 2455 } 2456 if (map) { 2457 *map = local_map; 2458 } 2459 return ret; 2460 } 2461 2462 int coroutine_fn 2463 bdrv_co_common_block_status_above(BlockDriverState *bs, 2464 BlockDriverState *base, 2465 bool include_base, 2466 bool want_zero, 2467 int64_t offset, 2468 int64_t bytes, 2469 int64_t *pnum, 2470 int64_t *map, 2471 BlockDriverState **file, 2472 int *depth) 2473 { 2474 int ret; 2475 BlockDriverState *p; 2476 int64_t eof = 0; 2477 int dummy; 2478 IO_CODE(); 2479 2480 assert(!include_base || base); /* Can't include NULL base */ 2481 assert_bdrv_graph_readable(); 2482 2483 if (!depth) { 2484 depth = &dummy; 2485 } 2486 *depth = 0; 2487 2488 if (!include_base && bs == base) { 2489 *pnum = bytes; 2490 return 0; 2491 } 2492 2493 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2494 ++*depth; 2495 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2496 return ret; 2497 } 2498 2499 if (ret & BDRV_BLOCK_EOF) { 2500 eof = offset + *pnum; 2501 } 2502 2503 assert(*pnum <= bytes); 2504 bytes = *pnum; 2505 2506 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2507 p = bdrv_filter_or_cow_bs(p)) 2508 { 2509 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2510 file); 2511 ++*depth; 2512 if (ret < 0) { 2513 return ret; 2514 } 2515 if (*pnum == 0) { 2516 /* 2517 * The top layer deferred to this layer, and because this layer is 2518 * short, any zeroes that we synthesize beyond EOF behave as if they 2519 * were allocated at this layer. 2520 * 2521 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2522 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2523 * below. 2524 */ 2525 assert(ret & BDRV_BLOCK_EOF); 2526 *pnum = bytes; 2527 if (file) { 2528 *file = p; 2529 } 2530 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2531 break; 2532 } 2533 if (ret & BDRV_BLOCK_ALLOCATED) { 2534 /* 2535 * We've found the node and the status, we must break. 2536 * 2537 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2538 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2539 * below. 2540 */ 2541 ret &= ~BDRV_BLOCK_EOF; 2542 break; 2543 } 2544 2545 if (p == base) { 2546 assert(include_base); 2547 break; 2548 } 2549 2550 /* 2551 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2552 * let's continue the diving. 2553 */ 2554 assert(*pnum <= bytes); 2555 bytes = *pnum; 2556 } 2557 2558 if (offset + *pnum == eof) { 2559 ret |= BDRV_BLOCK_EOF; 2560 } 2561 2562 return ret; 2563 } 2564 2565 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2566 BlockDriverState *base, 2567 int64_t offset, int64_t bytes, 2568 int64_t *pnum, int64_t *map, 2569 BlockDriverState **file) 2570 { 2571 IO_CODE(); 2572 return bdrv_co_common_block_status_above(bs, base, false, true, offset, 2573 bytes, pnum, map, file, NULL); 2574 } 2575 2576 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2577 int64_t offset, int64_t bytes, int64_t *pnum, 2578 int64_t *map, BlockDriverState **file) 2579 { 2580 IO_CODE(); 2581 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2582 pnum, map, file, NULL); 2583 } 2584 2585 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2586 int64_t *pnum, int64_t *map, BlockDriverState **file) 2587 { 2588 IO_CODE(); 2589 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2590 offset, bytes, pnum, map, file); 2591 } 2592 2593 /* 2594 * Check @bs (and its backing chain) to see if the range defined 2595 * by @offset and @bytes is known to read as zeroes. 2596 * Return 1 if that is the case, 0 otherwise and -errno on error. 2597 * This test is meant to be fast rather than accurate so returning 0 2598 * does not guarantee non-zero data. 2599 */ 2600 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2601 int64_t bytes) 2602 { 2603 int ret; 2604 int64_t pnum = bytes; 2605 IO_CODE(); 2606 2607 if (!bytes) { 2608 return 1; 2609 } 2610 2611 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, 2612 bytes, &pnum, NULL, NULL, NULL); 2613 2614 if (ret < 0) { 2615 return ret; 2616 } 2617 2618 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2619 } 2620 2621 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, 2622 int64_t bytes, int64_t *pnum) 2623 { 2624 int ret; 2625 int64_t dummy; 2626 IO_CODE(); 2627 2628 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, 2629 bytes, pnum ? pnum : &dummy, NULL, 2630 NULL, NULL); 2631 if (ret < 0) { 2632 return ret; 2633 } 2634 return !!(ret & BDRV_BLOCK_ALLOCATED); 2635 } 2636 2637 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, 2638 int64_t *pnum) 2639 { 2640 int ret; 2641 int64_t dummy; 2642 IO_CODE(); 2643 2644 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2645 bytes, pnum ? pnum : &dummy, NULL, 2646 NULL, NULL); 2647 if (ret < 0) { 2648 return ret; 2649 } 2650 return !!(ret & BDRV_BLOCK_ALLOCATED); 2651 } 2652 2653 /* See bdrv_is_allocated_above for documentation */ 2654 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, 2655 BlockDriverState *base, 2656 bool include_base, int64_t offset, 2657 int64_t bytes, int64_t *pnum) 2658 { 2659 int depth; 2660 int ret; 2661 IO_CODE(); 2662 2663 ret = bdrv_co_common_block_status_above(top, base, include_base, false, 2664 offset, bytes, pnum, NULL, NULL, 2665 &depth); 2666 if (ret < 0) { 2667 return ret; 2668 } 2669 2670 if (ret & BDRV_BLOCK_ALLOCATED) { 2671 return depth; 2672 } 2673 return 0; 2674 } 2675 2676 /* 2677 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2678 * 2679 * Return a positive depth if (a prefix of) the given range is allocated 2680 * in any image between BASE and TOP (BASE is only included if include_base 2681 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2682 * BASE can be NULL to check if the given offset is allocated in any 2683 * image of the chain. Return 0 otherwise, or negative errno on 2684 * failure. 2685 * 2686 * 'pnum' is set to the number of bytes (including and immediately 2687 * following the specified offset) that are known to be in the same 2688 * allocated/unallocated state. Note that a subsequent call starting 2689 * at 'offset + *pnum' may return the same allocation status (in other 2690 * words, the result is not necessarily the maximum possible range); 2691 * but 'pnum' will only be 0 when end of file is reached. 2692 */ 2693 int bdrv_is_allocated_above(BlockDriverState *top, 2694 BlockDriverState *base, 2695 bool include_base, int64_t offset, 2696 int64_t bytes, int64_t *pnum) 2697 { 2698 int depth; 2699 int ret; 2700 IO_CODE(); 2701 2702 ret = bdrv_common_block_status_above(top, base, include_base, false, 2703 offset, bytes, pnum, NULL, NULL, 2704 &depth); 2705 if (ret < 0) { 2706 return ret; 2707 } 2708 2709 if (ret & BDRV_BLOCK_ALLOCATED) { 2710 return depth; 2711 } 2712 return 0; 2713 } 2714 2715 int coroutine_fn 2716 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2717 { 2718 BlockDriver *drv = bs->drv; 2719 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2720 int ret; 2721 IO_CODE(); 2722 assert_bdrv_graph_readable(); 2723 2724 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2725 if (ret < 0) { 2726 return ret; 2727 } 2728 2729 if (!drv) { 2730 return -ENOMEDIUM; 2731 } 2732 2733 bdrv_inc_in_flight(bs); 2734 2735 if (drv->bdrv_co_load_vmstate) { 2736 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); 2737 } else if (child_bs) { 2738 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2739 } else { 2740 ret = -ENOTSUP; 2741 } 2742 2743 bdrv_dec_in_flight(bs); 2744 2745 return ret; 2746 } 2747 2748 int coroutine_fn 2749 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2750 { 2751 BlockDriver *drv = bs->drv; 2752 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2753 int ret; 2754 IO_CODE(); 2755 assert_bdrv_graph_readable(); 2756 2757 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2758 if (ret < 0) { 2759 return ret; 2760 } 2761 2762 if (!drv) { 2763 return -ENOMEDIUM; 2764 } 2765 2766 bdrv_inc_in_flight(bs); 2767 2768 if (drv->bdrv_co_save_vmstate) { 2769 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); 2770 } else if (child_bs) { 2771 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2772 } else { 2773 ret = -ENOTSUP; 2774 } 2775 2776 bdrv_dec_in_flight(bs); 2777 2778 return ret; 2779 } 2780 2781 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2782 int64_t pos, int size) 2783 { 2784 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2785 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2786 IO_CODE(); 2787 2788 return ret < 0 ? ret : size; 2789 } 2790 2791 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2792 int64_t pos, int size) 2793 { 2794 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2795 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2796 IO_CODE(); 2797 2798 return ret < 0 ? ret : size; 2799 } 2800 2801 /**************************************************************/ 2802 /* async I/Os */ 2803 2804 void bdrv_aio_cancel(BlockAIOCB *acb) 2805 { 2806 IO_CODE(); 2807 qemu_aio_ref(acb); 2808 bdrv_aio_cancel_async(acb); 2809 while (acb->refcnt > 1) { 2810 if (acb->aiocb_info->get_aio_context) { 2811 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2812 } else if (acb->bs) { 2813 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2814 * assert that we're not using an I/O thread. Thread-safe 2815 * code should use bdrv_aio_cancel_async exclusively. 2816 */ 2817 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2818 aio_poll(bdrv_get_aio_context(acb->bs), true); 2819 } else { 2820 abort(); 2821 } 2822 } 2823 qemu_aio_unref(acb); 2824 } 2825 2826 /* Async version of aio cancel. The caller is not blocked if the acb implements 2827 * cancel_async, otherwise we do nothing and let the request normally complete. 2828 * In either case the completion callback must be called. */ 2829 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2830 { 2831 IO_CODE(); 2832 if (acb->aiocb_info->cancel_async) { 2833 acb->aiocb_info->cancel_async(acb); 2834 } 2835 } 2836 2837 /**************************************************************/ 2838 /* Coroutine block device emulation */ 2839 2840 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2841 { 2842 BdrvChild *primary_child = bdrv_primary_child(bs); 2843 BdrvChild *child; 2844 int current_gen; 2845 int ret = 0; 2846 IO_CODE(); 2847 2848 assert_bdrv_graph_readable(); 2849 bdrv_inc_in_flight(bs); 2850 2851 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || 2852 bdrv_is_sg(bs)) { 2853 goto early_exit; 2854 } 2855 2856 qemu_co_mutex_lock(&bs->reqs_lock); 2857 current_gen = qatomic_read(&bs->write_gen); 2858 2859 /* Wait until any previous flushes are completed */ 2860 while (bs->active_flush_req) { 2861 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2862 } 2863 2864 /* Flushes reach this point in nondecreasing current_gen order. */ 2865 bs->active_flush_req = true; 2866 qemu_co_mutex_unlock(&bs->reqs_lock); 2867 2868 /* Write back all layers by calling one driver function */ 2869 if (bs->drv->bdrv_co_flush) { 2870 ret = bs->drv->bdrv_co_flush(bs); 2871 goto out; 2872 } 2873 2874 /* Write back cached data to the OS even with cache=unsafe */ 2875 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2876 if (bs->drv->bdrv_co_flush_to_os) { 2877 ret = bs->drv->bdrv_co_flush_to_os(bs); 2878 if (ret < 0) { 2879 goto out; 2880 } 2881 } 2882 2883 /* But don't actually force it to the disk with cache=unsafe */ 2884 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2885 goto flush_children; 2886 } 2887 2888 /* Check if we really need to flush anything */ 2889 if (bs->flushed_gen == current_gen) { 2890 goto flush_children; 2891 } 2892 2893 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 2894 if (!bs->drv) { 2895 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2896 * (even in case of apparent success) */ 2897 ret = -ENOMEDIUM; 2898 goto out; 2899 } 2900 if (bs->drv->bdrv_co_flush_to_disk) { 2901 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2902 } else if (bs->drv->bdrv_aio_flush) { 2903 BlockAIOCB *acb; 2904 CoroutineIOCompletion co = { 2905 .coroutine = qemu_coroutine_self(), 2906 }; 2907 2908 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2909 if (acb == NULL) { 2910 ret = -EIO; 2911 } else { 2912 qemu_coroutine_yield(); 2913 ret = co.ret; 2914 } 2915 } else { 2916 /* 2917 * Some block drivers always operate in either writethrough or unsafe 2918 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2919 * know how the server works (because the behaviour is hardcoded or 2920 * depends on server-side configuration), so we can't ensure that 2921 * everything is safe on disk. Returning an error doesn't work because 2922 * that would break guests even if the server operates in writethrough 2923 * mode. 2924 * 2925 * Let's hope the user knows what he's doing. 2926 */ 2927 ret = 0; 2928 } 2929 2930 if (ret < 0) { 2931 goto out; 2932 } 2933 2934 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2935 * in the case of cache=unsafe, so there are no useless flushes. 2936 */ 2937 flush_children: 2938 ret = 0; 2939 QLIST_FOREACH(child, &bs->children, next) { 2940 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 2941 int this_child_ret = bdrv_co_flush(child->bs); 2942 if (!ret) { 2943 ret = this_child_ret; 2944 } 2945 } 2946 } 2947 2948 out: 2949 /* Notify any pending flushes that we have completed */ 2950 if (ret == 0) { 2951 bs->flushed_gen = current_gen; 2952 } 2953 2954 qemu_co_mutex_lock(&bs->reqs_lock); 2955 bs->active_flush_req = false; 2956 /* Return value is ignored - it's ok if wait queue is empty */ 2957 qemu_co_queue_next(&bs->flush_queue); 2958 qemu_co_mutex_unlock(&bs->reqs_lock); 2959 2960 early_exit: 2961 bdrv_dec_in_flight(bs); 2962 return ret; 2963 } 2964 2965 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2966 int64_t bytes) 2967 { 2968 BdrvTrackedRequest req; 2969 int ret; 2970 int64_t max_pdiscard; 2971 int head, tail, align; 2972 BlockDriverState *bs = child->bs; 2973 IO_CODE(); 2974 assert_bdrv_graph_readable(); 2975 2976 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { 2977 return -ENOMEDIUM; 2978 } 2979 2980 if (bdrv_has_readonly_bitmaps(bs)) { 2981 return -EPERM; 2982 } 2983 2984 ret = bdrv_check_request(offset, bytes, NULL); 2985 if (ret < 0) { 2986 return ret; 2987 } 2988 2989 /* Do nothing if disabled. */ 2990 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2991 return 0; 2992 } 2993 2994 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2995 return 0; 2996 } 2997 2998 /* Invalidate the cached block-status data range if this discard overlaps */ 2999 bdrv_bsc_invalidate_range(bs, offset, bytes); 3000 3001 /* Discard is advisory, but some devices track and coalesce 3002 * unaligned requests, so we must pass everything down rather than 3003 * round here. Still, most devices will just silently ignore 3004 * unaligned requests (by returning -ENOTSUP), so we must fragment 3005 * the request accordingly. */ 3006 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3007 assert(align % bs->bl.request_alignment == 0); 3008 head = offset % align; 3009 tail = (offset + bytes) % align; 3010 3011 bdrv_inc_in_flight(bs); 3012 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3013 3014 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3015 if (ret < 0) { 3016 goto out; 3017 } 3018 3019 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), 3020 align); 3021 assert(max_pdiscard >= bs->bl.request_alignment); 3022 3023 while (bytes > 0) { 3024 int64_t num = bytes; 3025 3026 if (head) { 3027 /* Make small requests to get to alignment boundaries. */ 3028 num = MIN(bytes, align - head); 3029 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3030 num %= bs->bl.request_alignment; 3031 } 3032 head = (head + num) % align; 3033 assert(num < max_pdiscard); 3034 } else if (tail) { 3035 if (num > align) { 3036 /* Shorten the request to the last aligned cluster. */ 3037 num -= tail; 3038 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3039 tail > bs->bl.request_alignment) { 3040 tail %= bs->bl.request_alignment; 3041 num -= tail; 3042 } 3043 } 3044 /* limit request size */ 3045 if (num > max_pdiscard) { 3046 num = max_pdiscard; 3047 } 3048 3049 if (!bs->drv) { 3050 ret = -ENOMEDIUM; 3051 goto out; 3052 } 3053 if (bs->drv->bdrv_co_pdiscard) { 3054 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3055 } else { 3056 BlockAIOCB *acb; 3057 CoroutineIOCompletion co = { 3058 .coroutine = qemu_coroutine_self(), 3059 }; 3060 3061 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3062 bdrv_co_io_em_complete, &co); 3063 if (acb == NULL) { 3064 ret = -EIO; 3065 goto out; 3066 } else { 3067 qemu_coroutine_yield(); 3068 ret = co.ret; 3069 } 3070 } 3071 if (ret && ret != -ENOTSUP) { 3072 goto out; 3073 } 3074 3075 offset += num; 3076 bytes -= num; 3077 } 3078 ret = 0; 3079 out: 3080 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3081 tracked_request_end(&req); 3082 bdrv_dec_in_flight(bs); 3083 return ret; 3084 } 3085 3086 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3087 { 3088 BlockDriver *drv = bs->drv; 3089 CoroutineIOCompletion co = { 3090 .coroutine = qemu_coroutine_self(), 3091 }; 3092 BlockAIOCB *acb; 3093 IO_CODE(); 3094 assert_bdrv_graph_readable(); 3095 3096 bdrv_inc_in_flight(bs); 3097 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3098 co.ret = -ENOTSUP; 3099 goto out; 3100 } 3101 3102 if (drv->bdrv_co_ioctl) { 3103 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3104 } else { 3105 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3106 if (!acb) { 3107 co.ret = -ENOTSUP; 3108 goto out; 3109 } 3110 qemu_coroutine_yield(); 3111 } 3112 out: 3113 bdrv_dec_in_flight(bs); 3114 return co.ret; 3115 } 3116 3117 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3118 { 3119 IO_CODE(); 3120 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3121 } 3122 3123 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3124 { 3125 IO_CODE(); 3126 return memset(qemu_blockalign(bs, size), 0, size); 3127 } 3128 3129 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3130 { 3131 size_t align = bdrv_opt_mem_align(bs); 3132 IO_CODE(); 3133 3134 /* Ensure that NULL is never returned on success */ 3135 assert(align > 0); 3136 if (size == 0) { 3137 size = align; 3138 } 3139 3140 return qemu_try_memalign(align, size); 3141 } 3142 3143 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3144 { 3145 void *mem = qemu_try_blockalign(bs, size); 3146 IO_CODE(); 3147 3148 if (mem) { 3149 memset(mem, 0, size); 3150 } 3151 3152 return mem; 3153 } 3154 3155 void coroutine_fn bdrv_co_io_plug(BlockDriverState *bs) 3156 { 3157 BdrvChild *child; 3158 IO_CODE(); 3159 assert_bdrv_graph_readable(); 3160 3161 QLIST_FOREACH(child, &bs->children, next) { 3162 bdrv_co_io_plug(child->bs); 3163 } 3164 3165 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 3166 BlockDriver *drv = bs->drv; 3167 if (drv && drv->bdrv_co_io_plug) { 3168 drv->bdrv_co_io_plug(bs); 3169 } 3170 } 3171 } 3172 3173 void coroutine_fn bdrv_co_io_unplug(BlockDriverState *bs) 3174 { 3175 BdrvChild *child; 3176 IO_CODE(); 3177 assert_bdrv_graph_readable(); 3178 3179 assert(bs->io_plugged); 3180 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3181 BlockDriver *drv = bs->drv; 3182 if (drv && drv->bdrv_co_io_unplug) { 3183 drv->bdrv_co_io_unplug(bs); 3184 } 3185 } 3186 3187 QLIST_FOREACH(child, &bs->children, next) { 3188 bdrv_co_io_unplug(child->bs); 3189 } 3190 } 3191 3192 /* Helper that undoes bdrv_register_buf() when it fails partway through */ 3193 static void GRAPH_RDLOCK 3194 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size, 3195 BdrvChild *final_child) 3196 { 3197 BdrvChild *child; 3198 3199 GLOBAL_STATE_CODE(); 3200 assert_bdrv_graph_readable(); 3201 3202 QLIST_FOREACH(child, &bs->children, next) { 3203 if (child == final_child) { 3204 break; 3205 } 3206 3207 bdrv_unregister_buf(child->bs, host, size); 3208 } 3209 3210 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3211 bs->drv->bdrv_unregister_buf(bs, host, size); 3212 } 3213 } 3214 3215 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, 3216 Error **errp) 3217 { 3218 BdrvChild *child; 3219 3220 GLOBAL_STATE_CODE(); 3221 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3222 3223 if (bs->drv && bs->drv->bdrv_register_buf) { 3224 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { 3225 return false; 3226 } 3227 } 3228 QLIST_FOREACH(child, &bs->children, next) { 3229 if (!bdrv_register_buf(child->bs, host, size, errp)) { 3230 bdrv_register_buf_rollback(bs, host, size, child); 3231 return false; 3232 } 3233 } 3234 return true; 3235 } 3236 3237 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) 3238 { 3239 BdrvChild *child; 3240 3241 GLOBAL_STATE_CODE(); 3242 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3243 3244 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3245 bs->drv->bdrv_unregister_buf(bs, host, size); 3246 } 3247 QLIST_FOREACH(child, &bs->children, next) { 3248 bdrv_unregister_buf(child->bs, host, size); 3249 } 3250 } 3251 3252 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal( 3253 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3254 int64_t dst_offset, int64_t bytes, 3255 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3256 bool recurse_src) 3257 { 3258 BdrvTrackedRequest req; 3259 int ret; 3260 assert_bdrv_graph_readable(); 3261 3262 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3263 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3264 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3265 assert(!(read_flags & BDRV_REQ_NO_WAIT)); 3266 assert(!(write_flags & BDRV_REQ_NO_WAIT)); 3267 3268 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { 3269 return -ENOMEDIUM; 3270 } 3271 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3272 if (ret) { 3273 return ret; 3274 } 3275 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3276 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3277 } 3278 3279 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { 3280 return -ENOMEDIUM; 3281 } 3282 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3283 if (ret) { 3284 return ret; 3285 } 3286 3287 if (!src->bs->drv->bdrv_co_copy_range_from 3288 || !dst->bs->drv->bdrv_co_copy_range_to 3289 || src->bs->encrypted || dst->bs->encrypted) { 3290 return -ENOTSUP; 3291 } 3292 3293 if (recurse_src) { 3294 bdrv_inc_in_flight(src->bs); 3295 tracked_request_begin(&req, src->bs, src_offset, bytes, 3296 BDRV_TRACKED_READ); 3297 3298 /* BDRV_REQ_SERIALISING is only for write operation */ 3299 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3300 bdrv_wait_serialising_requests(&req); 3301 3302 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3303 src, src_offset, 3304 dst, dst_offset, 3305 bytes, 3306 read_flags, write_flags); 3307 3308 tracked_request_end(&req); 3309 bdrv_dec_in_flight(src->bs); 3310 } else { 3311 bdrv_inc_in_flight(dst->bs); 3312 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3313 BDRV_TRACKED_WRITE); 3314 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3315 write_flags); 3316 if (!ret) { 3317 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3318 src, src_offset, 3319 dst, dst_offset, 3320 bytes, 3321 read_flags, write_flags); 3322 } 3323 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3324 tracked_request_end(&req); 3325 bdrv_dec_in_flight(dst->bs); 3326 } 3327 3328 return ret; 3329 } 3330 3331 /* Copy range from @src to @dst. 3332 * 3333 * See the comment of bdrv_co_copy_range for the parameter and return value 3334 * semantics. */ 3335 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3336 BdrvChild *dst, int64_t dst_offset, 3337 int64_t bytes, 3338 BdrvRequestFlags read_flags, 3339 BdrvRequestFlags write_flags) 3340 { 3341 IO_CODE(); 3342 assert_bdrv_graph_readable(); 3343 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3344 read_flags, write_flags); 3345 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3346 bytes, read_flags, write_flags, true); 3347 } 3348 3349 /* Copy range from @src to @dst. 3350 * 3351 * See the comment of bdrv_co_copy_range for the parameter and return value 3352 * semantics. */ 3353 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3354 BdrvChild *dst, int64_t dst_offset, 3355 int64_t bytes, 3356 BdrvRequestFlags read_flags, 3357 BdrvRequestFlags write_flags) 3358 { 3359 IO_CODE(); 3360 assert_bdrv_graph_readable(); 3361 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3362 read_flags, write_flags); 3363 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3364 bytes, read_flags, write_flags, false); 3365 } 3366 3367 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3368 BdrvChild *dst, int64_t dst_offset, 3369 int64_t bytes, BdrvRequestFlags read_flags, 3370 BdrvRequestFlags write_flags) 3371 { 3372 IO_CODE(); 3373 assert_bdrv_graph_readable(); 3374 3375 return bdrv_co_copy_range_from(src, src_offset, 3376 dst, dst_offset, 3377 bytes, read_flags, write_flags); 3378 } 3379 3380 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3381 { 3382 BdrvChild *c; 3383 QLIST_FOREACH(c, &bs->parents, next_parent) { 3384 if (c->klass->resize) { 3385 c->klass->resize(c); 3386 } 3387 } 3388 } 3389 3390 /** 3391 * Truncate file to 'offset' bytes (needed only for file protocols) 3392 * 3393 * If 'exact' is true, the file must be resized to exactly the given 3394 * 'offset'. Otherwise, it is sufficient for the node to be at least 3395 * 'offset' bytes in length. 3396 */ 3397 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3398 PreallocMode prealloc, BdrvRequestFlags flags, 3399 Error **errp) 3400 { 3401 BlockDriverState *bs = child->bs; 3402 BdrvChild *filtered, *backing; 3403 BlockDriver *drv = bs->drv; 3404 BdrvTrackedRequest req; 3405 int64_t old_size, new_bytes; 3406 int ret; 3407 IO_CODE(); 3408 assert_bdrv_graph_readable(); 3409 3410 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3411 if (!drv) { 3412 error_setg(errp, "No medium inserted"); 3413 return -ENOMEDIUM; 3414 } 3415 if (offset < 0) { 3416 error_setg(errp, "Image size cannot be negative"); 3417 return -EINVAL; 3418 } 3419 3420 ret = bdrv_check_request(offset, 0, errp); 3421 if (ret < 0) { 3422 return ret; 3423 } 3424 3425 old_size = bdrv_getlength(bs); 3426 if (old_size < 0) { 3427 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3428 return old_size; 3429 } 3430 3431 if (bdrv_is_read_only(bs)) { 3432 error_setg(errp, "Image is read-only"); 3433 return -EACCES; 3434 } 3435 3436 if (offset > old_size) { 3437 new_bytes = offset - old_size; 3438 } else { 3439 new_bytes = 0; 3440 } 3441 3442 bdrv_inc_in_flight(bs); 3443 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3444 BDRV_TRACKED_TRUNCATE); 3445 3446 /* If we are growing the image and potentially using preallocation for the 3447 * new area, we need to make sure that no write requests are made to it 3448 * concurrently or they might be overwritten by preallocation. */ 3449 if (new_bytes) { 3450 bdrv_make_request_serialising(&req, 1); 3451 } 3452 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3453 0); 3454 if (ret < 0) { 3455 error_setg_errno(errp, -ret, 3456 "Failed to prepare request for truncation"); 3457 goto out; 3458 } 3459 3460 filtered = bdrv_filter_child(bs); 3461 backing = bdrv_cow_child(bs); 3462 3463 /* 3464 * If the image has a backing file that is large enough that it would 3465 * provide data for the new area, we cannot leave it unallocated because 3466 * then the backing file content would become visible. Instead, zero-fill 3467 * the new area. 3468 * 3469 * Note that if the image has a backing file, but was opened without the 3470 * backing file, taking care of keeping things consistent with that backing 3471 * file is the user's responsibility. 3472 */ 3473 if (new_bytes && backing) { 3474 int64_t backing_len; 3475 3476 backing_len = bdrv_co_getlength(backing->bs); 3477 if (backing_len < 0) { 3478 ret = backing_len; 3479 error_setg_errno(errp, -ret, "Could not get backing file size"); 3480 goto out; 3481 } 3482 3483 if (backing_len > old_size) { 3484 flags |= BDRV_REQ_ZERO_WRITE; 3485 } 3486 } 3487 3488 if (drv->bdrv_co_truncate) { 3489 if (flags & ~bs->supported_truncate_flags) { 3490 error_setg(errp, "Block driver does not support requested flags"); 3491 ret = -ENOTSUP; 3492 goto out; 3493 } 3494 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3495 } else if (filtered) { 3496 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3497 } else { 3498 error_setg(errp, "Image format driver does not support resize"); 3499 ret = -ENOTSUP; 3500 goto out; 3501 } 3502 if (ret < 0) { 3503 goto out; 3504 } 3505 3506 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3507 if (ret < 0) { 3508 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3509 } else { 3510 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3511 } 3512 /* 3513 * It's possible that truncation succeeded but bdrv_refresh_total_sectors 3514 * failed, but the latter doesn't affect how we should finish the request. 3515 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. 3516 */ 3517 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3518 3519 out: 3520 tracked_request_end(&req); 3521 bdrv_dec_in_flight(bs); 3522 3523 return ret; 3524 } 3525 3526 void bdrv_cancel_in_flight(BlockDriverState *bs) 3527 { 3528 GLOBAL_STATE_CODE(); 3529 if (!bs || !bs->drv) { 3530 return; 3531 } 3532 3533 if (bs->drv->bdrv_cancel_in_flight) { 3534 bs->drv->bdrv_cancel_in_flight(bs); 3535 } 3536 } 3537 3538 int coroutine_fn 3539 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, 3540 QEMUIOVector *qiov, size_t qiov_offset) 3541 { 3542 BlockDriverState *bs = child->bs; 3543 BlockDriver *drv = bs->drv; 3544 int ret; 3545 IO_CODE(); 3546 assert_bdrv_graph_readable(); 3547 3548 if (!drv) { 3549 return -ENOMEDIUM; 3550 } 3551 3552 if (!drv->bdrv_co_preadv_snapshot) { 3553 return -ENOTSUP; 3554 } 3555 3556 bdrv_inc_in_flight(bs); 3557 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); 3558 bdrv_dec_in_flight(bs); 3559 3560 return ret; 3561 } 3562 3563 int coroutine_fn 3564 bdrv_co_snapshot_block_status(BlockDriverState *bs, 3565 bool want_zero, int64_t offset, int64_t bytes, 3566 int64_t *pnum, int64_t *map, 3567 BlockDriverState **file) 3568 { 3569 BlockDriver *drv = bs->drv; 3570 int ret; 3571 IO_CODE(); 3572 assert_bdrv_graph_readable(); 3573 3574 if (!drv) { 3575 return -ENOMEDIUM; 3576 } 3577 3578 if (!drv->bdrv_co_snapshot_block_status) { 3579 return -ENOTSUP; 3580 } 3581 3582 bdrv_inc_in_flight(bs); 3583 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, 3584 pnum, map, file); 3585 bdrv_dec_in_flight(bs); 3586 3587 return ret; 3588 } 3589 3590 int coroutine_fn 3591 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) 3592 { 3593 BlockDriver *drv = bs->drv; 3594 int ret; 3595 IO_CODE(); 3596 assert_bdrv_graph_readable(); 3597 3598 if (!drv) { 3599 return -ENOMEDIUM; 3600 } 3601 3602 if (!drv->bdrv_co_pdiscard_snapshot) { 3603 return -ENOTSUP; 3604 } 3605 3606 bdrv_inc_in_flight(bs); 3607 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); 3608 bdrv_dec_in_flight(bs); 3609 3610 return ret; 3611 } 3612