1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/dirty-bitmap.h" 34 #include "block/write-threshold.h" 35 #include "qemu/cutils.h" 36 #include "qemu/memalign.h" 37 #include "qapi/error.h" 38 #include "qemu/error-report.h" 39 #include "qemu/main-loop.h" 40 #include "sysemu/replay.h" 41 42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 44 45 static void bdrv_parent_cb_resize(BlockDriverState *bs); 46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 47 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 48 49 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) 50 { 51 BdrvChild *c, *next; 52 53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 54 if (c == ignore) { 55 continue; 56 } 57 bdrv_parent_drained_begin_single(c); 58 } 59 } 60 61 void bdrv_parent_drained_end_single(BdrvChild *c) 62 { 63 IO_OR_GS_CODE(); 64 65 assert(c->quiesced_parent); 66 c->quiesced_parent = false; 67 68 if (c->klass->drained_end) { 69 c->klass->drained_end(c); 70 } 71 } 72 73 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) 74 { 75 BdrvChild *c; 76 77 QLIST_FOREACH(c, &bs->parents, next_parent) { 78 if (c == ignore) { 79 continue; 80 } 81 bdrv_parent_drained_end_single(c); 82 } 83 } 84 85 bool bdrv_parent_drained_poll_single(BdrvChild *c) 86 { 87 if (c->klass->drained_poll) { 88 return c->klass->drained_poll(c); 89 } 90 return false; 91 } 92 93 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 94 bool ignore_bds_parents) 95 { 96 BdrvChild *c, *next; 97 bool busy = false; 98 99 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 100 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 101 continue; 102 } 103 busy |= bdrv_parent_drained_poll_single(c); 104 } 105 106 return busy; 107 } 108 109 void bdrv_parent_drained_begin_single(BdrvChild *c) 110 { 111 IO_OR_GS_CODE(); 112 113 assert(!c->quiesced_parent); 114 c->quiesced_parent = true; 115 116 if (c->klass->drained_begin) { 117 c->klass->drained_begin(c); 118 } 119 } 120 121 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 122 { 123 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, 124 src->pdiscard_alignment); 125 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 126 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 127 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, 128 src->max_hw_transfer); 129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 130 src->opt_mem_alignment); 131 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 132 src->min_mem_alignment); 133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 134 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); 135 } 136 137 typedef struct BdrvRefreshLimitsState { 138 BlockDriverState *bs; 139 BlockLimits old_bl; 140 } BdrvRefreshLimitsState; 141 142 static void bdrv_refresh_limits_abort(void *opaque) 143 { 144 BdrvRefreshLimitsState *s = opaque; 145 146 s->bs->bl = s->old_bl; 147 } 148 149 static TransactionActionDrv bdrv_refresh_limits_drv = { 150 .abort = bdrv_refresh_limits_abort, 151 .clean = g_free, 152 }; 153 154 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 156 { 157 ERRP_GUARD(); 158 BlockDriver *drv = bs->drv; 159 BdrvChild *c; 160 bool have_limits; 161 162 GLOBAL_STATE_CODE(); 163 164 if (tran) { 165 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 166 *s = (BdrvRefreshLimitsState) { 167 .bs = bs, 168 .old_bl = bs->bl, 169 }; 170 tran_add(tran, &bdrv_refresh_limits_drv, s); 171 } 172 173 memset(&bs->bl, 0, sizeof(bs->bl)); 174 175 if (!drv) { 176 return; 177 } 178 179 /* Default alignment based on whether driver has byte interface */ 180 bs->bl.request_alignment = (drv->bdrv_co_preadv || 181 drv->bdrv_aio_preadv || 182 drv->bdrv_co_preadv_part) ? 1 : 512; 183 184 /* Take some limits from the children as a default */ 185 have_limits = false; 186 QLIST_FOREACH(c, &bs->children, next) { 187 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 188 { 189 bdrv_merge_limits(&bs->bl, &c->bs->bl); 190 have_limits = true; 191 } 192 } 193 194 if (!have_limits) { 195 bs->bl.min_mem_alignment = 512; 196 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); 197 198 /* Safe default since most protocols use readv()/writev()/etc */ 199 bs->bl.max_iov = IOV_MAX; 200 } 201 202 /* Then let the driver override it */ 203 if (drv->bdrv_refresh_limits) { 204 drv->bdrv_refresh_limits(bs, errp); 205 if (*errp) { 206 return; 207 } 208 } 209 210 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 211 error_setg(errp, "Driver requires too large request alignment"); 212 } 213 } 214 215 /** 216 * The copy-on-read flag is actually a reference count so multiple users may 217 * use the feature without worrying about clobbering its previous state. 218 * Copy-on-read stays enabled until all users have called to disable it. 219 */ 220 void bdrv_enable_copy_on_read(BlockDriverState *bs) 221 { 222 IO_CODE(); 223 qatomic_inc(&bs->copy_on_read); 224 } 225 226 void bdrv_disable_copy_on_read(BlockDriverState *bs) 227 { 228 int old = qatomic_fetch_dec(&bs->copy_on_read); 229 IO_CODE(); 230 assert(old >= 1); 231 } 232 233 typedef struct { 234 Coroutine *co; 235 BlockDriverState *bs; 236 bool done; 237 bool begin; 238 bool poll; 239 BdrvChild *parent; 240 } BdrvCoDrainData; 241 242 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 243 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, 244 bool ignore_bds_parents) 245 { 246 IO_OR_GS_CODE(); 247 248 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 249 return true; 250 } 251 252 if (qatomic_read(&bs->in_flight)) { 253 return true; 254 } 255 256 return false; 257 } 258 259 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, 260 BdrvChild *ignore_parent) 261 { 262 return bdrv_drain_poll(bs, ignore_parent, false); 263 } 264 265 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 266 bool poll); 267 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent); 268 269 static void bdrv_co_drain_bh_cb(void *opaque) 270 { 271 BdrvCoDrainData *data = opaque; 272 Coroutine *co = data->co; 273 BlockDriverState *bs = data->bs; 274 275 if (bs) { 276 AioContext *ctx = bdrv_get_aio_context(bs); 277 aio_context_acquire(ctx); 278 bdrv_dec_in_flight(bs); 279 if (data->begin) { 280 bdrv_do_drained_begin(bs, data->parent, data->poll); 281 } else { 282 assert(!data->poll); 283 bdrv_do_drained_end(bs, data->parent); 284 } 285 aio_context_release(ctx); 286 } else { 287 assert(data->begin); 288 bdrv_drain_all_begin(); 289 } 290 291 data->done = true; 292 aio_co_wake(co); 293 } 294 295 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 296 bool begin, 297 BdrvChild *parent, 298 bool poll) 299 { 300 BdrvCoDrainData data; 301 Coroutine *self = qemu_coroutine_self(); 302 AioContext *ctx = bdrv_get_aio_context(bs); 303 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 304 305 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 306 * other coroutines run if they were queued by aio_co_enter(). */ 307 308 assert(qemu_in_coroutine()); 309 data = (BdrvCoDrainData) { 310 .co = self, 311 .bs = bs, 312 .done = false, 313 .begin = begin, 314 .parent = parent, 315 .poll = poll, 316 }; 317 318 if (bs) { 319 bdrv_inc_in_flight(bs); 320 } 321 322 /* 323 * Temporarily drop the lock across yield or we would get deadlocks. 324 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 325 * 326 * When we yield below, the lock for the current context will be 327 * released, so if this is actually the lock that protects bs, don't drop 328 * it a second time. 329 */ 330 if (ctx != co_ctx) { 331 aio_context_release(ctx); 332 } 333 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); 334 335 qemu_coroutine_yield(); 336 /* If we are resumed from some other event (such as an aio completion or a 337 * timer callback), it is a bug in the caller that should be fixed. */ 338 assert(data.done); 339 340 /* Reaquire the AioContext of bs if we dropped it */ 341 if (ctx != co_ctx) { 342 aio_context_acquire(ctx); 343 } 344 } 345 346 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 347 bool poll) 348 { 349 IO_OR_GS_CODE(); 350 351 if (qemu_in_coroutine()) { 352 bdrv_co_yield_to_drain(bs, true, parent, poll); 353 return; 354 } 355 356 /* Stop things in parent-to-child order */ 357 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 358 aio_disable_external(bdrv_get_aio_context(bs)); 359 bdrv_parent_drained_begin(bs, parent); 360 if (bs->drv && bs->drv->bdrv_drain_begin) { 361 bs->drv->bdrv_drain_begin(bs); 362 } 363 } 364 365 /* 366 * Wait for drained requests to finish. 367 * 368 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 369 * call is needed so things in this AioContext can make progress even 370 * though we don't return to the main AioContext loop - this automatically 371 * includes other nodes in the same AioContext and therefore all child 372 * nodes. 373 */ 374 if (poll) { 375 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); 376 } 377 } 378 379 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) 380 { 381 bdrv_do_drained_begin(bs, parent, false); 382 } 383 384 void bdrv_drained_begin(BlockDriverState *bs) 385 { 386 IO_OR_GS_CODE(); 387 bdrv_do_drained_begin(bs, NULL, true); 388 } 389 390 /** 391 * This function does not poll, nor must any of its recursively called 392 * functions. 393 */ 394 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) 395 { 396 int old_quiesce_counter; 397 398 if (qemu_in_coroutine()) { 399 bdrv_co_yield_to_drain(bs, false, parent, false); 400 return; 401 } 402 assert(bs->quiesce_counter > 0); 403 404 /* Re-enable things in child-to-parent order */ 405 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 406 if (old_quiesce_counter == 1) { 407 if (bs->drv && bs->drv->bdrv_drain_end) { 408 bs->drv->bdrv_drain_end(bs); 409 } 410 bdrv_parent_drained_end(bs, parent); 411 aio_enable_external(bdrv_get_aio_context(bs)); 412 } 413 } 414 415 void bdrv_drained_end(BlockDriverState *bs) 416 { 417 IO_OR_GS_CODE(); 418 bdrv_do_drained_end(bs, NULL); 419 } 420 421 void bdrv_drain(BlockDriverState *bs) 422 { 423 IO_OR_GS_CODE(); 424 bdrv_drained_begin(bs); 425 bdrv_drained_end(bs); 426 } 427 428 static void bdrv_drain_assert_idle(BlockDriverState *bs) 429 { 430 BdrvChild *child, *next; 431 432 assert(qatomic_read(&bs->in_flight) == 0); 433 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 434 bdrv_drain_assert_idle(child->bs); 435 } 436 } 437 438 unsigned int bdrv_drain_all_count = 0; 439 440 static bool bdrv_drain_all_poll(void) 441 { 442 BlockDriverState *bs = NULL; 443 bool result = false; 444 GLOBAL_STATE_CODE(); 445 446 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 447 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 448 while ((bs = bdrv_next_all_states(bs))) { 449 AioContext *aio_context = bdrv_get_aio_context(bs); 450 aio_context_acquire(aio_context); 451 result |= bdrv_drain_poll(bs, NULL, true); 452 aio_context_release(aio_context); 453 } 454 455 return result; 456 } 457 458 /* 459 * Wait for pending requests to complete across all BlockDriverStates 460 * 461 * This function does not flush data to disk, use bdrv_flush_all() for that 462 * after calling this function. 463 * 464 * This pauses all block jobs and disables external clients. It must 465 * be paired with bdrv_drain_all_end(). 466 * 467 * NOTE: no new block jobs or BlockDriverStates can be created between 468 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 469 */ 470 void bdrv_drain_all_begin_nopoll(void) 471 { 472 BlockDriverState *bs = NULL; 473 GLOBAL_STATE_CODE(); 474 475 /* 476 * bdrv queue is managed by record/replay, 477 * waiting for finishing the I/O requests may 478 * be infinite 479 */ 480 if (replay_events_enabled()) { 481 return; 482 } 483 484 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 485 * loop AioContext, so make sure we're in the main context. */ 486 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 487 assert(bdrv_drain_all_count < INT_MAX); 488 bdrv_drain_all_count++; 489 490 /* Quiesce all nodes, without polling in-flight requests yet. The graph 491 * cannot change during this loop. */ 492 while ((bs = bdrv_next_all_states(bs))) { 493 AioContext *aio_context = bdrv_get_aio_context(bs); 494 495 aio_context_acquire(aio_context); 496 bdrv_do_drained_begin(bs, NULL, false); 497 aio_context_release(aio_context); 498 } 499 } 500 501 void bdrv_drain_all_begin(void) 502 { 503 BlockDriverState *bs = NULL; 504 505 if (qemu_in_coroutine()) { 506 bdrv_co_yield_to_drain(NULL, true, NULL, true); 507 return; 508 } 509 510 /* 511 * bdrv queue is managed by record/replay, 512 * waiting for finishing the I/O requests may 513 * be infinite 514 */ 515 if (replay_events_enabled()) { 516 return; 517 } 518 519 bdrv_drain_all_begin_nopoll(); 520 521 /* Now poll the in-flight requests */ 522 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 523 524 while ((bs = bdrv_next_all_states(bs))) { 525 bdrv_drain_assert_idle(bs); 526 } 527 } 528 529 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 530 { 531 GLOBAL_STATE_CODE(); 532 533 g_assert(bs->quiesce_counter > 0); 534 g_assert(!bs->refcnt); 535 536 while (bs->quiesce_counter) { 537 bdrv_do_drained_end(bs, NULL); 538 } 539 } 540 541 void bdrv_drain_all_end(void) 542 { 543 BlockDriverState *bs = NULL; 544 GLOBAL_STATE_CODE(); 545 546 /* 547 * bdrv queue is managed by record/replay, 548 * waiting for finishing the I/O requests may 549 * be endless 550 */ 551 if (replay_events_enabled()) { 552 return; 553 } 554 555 while ((bs = bdrv_next_all_states(bs))) { 556 AioContext *aio_context = bdrv_get_aio_context(bs); 557 558 aio_context_acquire(aio_context); 559 bdrv_do_drained_end(bs, NULL); 560 aio_context_release(aio_context); 561 } 562 563 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 564 assert(bdrv_drain_all_count > 0); 565 bdrv_drain_all_count--; 566 } 567 568 void bdrv_drain_all(void) 569 { 570 GLOBAL_STATE_CODE(); 571 bdrv_drain_all_begin(); 572 bdrv_drain_all_end(); 573 } 574 575 /** 576 * Remove an active request from the tracked requests list 577 * 578 * This function should be called when a tracked request is completing. 579 */ 580 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) 581 { 582 if (req->serialising) { 583 qatomic_dec(&req->bs->serialising_in_flight); 584 } 585 586 qemu_co_mutex_lock(&req->bs->reqs_lock); 587 QLIST_REMOVE(req, list); 588 qemu_co_queue_restart_all(&req->wait_queue); 589 qemu_co_mutex_unlock(&req->bs->reqs_lock); 590 } 591 592 /** 593 * Add an active request to the tracked requests list 594 */ 595 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, 596 BlockDriverState *bs, 597 int64_t offset, 598 int64_t bytes, 599 enum BdrvTrackedRequestType type) 600 { 601 bdrv_check_request(offset, bytes, &error_abort); 602 603 *req = (BdrvTrackedRequest){ 604 .bs = bs, 605 .offset = offset, 606 .bytes = bytes, 607 .type = type, 608 .co = qemu_coroutine_self(), 609 .serialising = false, 610 .overlap_offset = offset, 611 .overlap_bytes = bytes, 612 }; 613 614 qemu_co_queue_init(&req->wait_queue); 615 616 qemu_co_mutex_lock(&bs->reqs_lock); 617 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 618 qemu_co_mutex_unlock(&bs->reqs_lock); 619 } 620 621 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 622 int64_t offset, int64_t bytes) 623 { 624 bdrv_check_request(offset, bytes, &error_abort); 625 626 /* aaaa bbbb */ 627 if (offset >= req->overlap_offset + req->overlap_bytes) { 628 return false; 629 } 630 /* bbbb aaaa */ 631 if (req->overlap_offset >= offset + bytes) { 632 return false; 633 } 634 return true; 635 } 636 637 /* Called with self->bs->reqs_lock held */ 638 static coroutine_fn BdrvTrackedRequest * 639 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 640 { 641 BdrvTrackedRequest *req; 642 643 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 644 if (req == self || (!req->serialising && !self->serialising)) { 645 continue; 646 } 647 if (tracked_request_overlaps(req, self->overlap_offset, 648 self->overlap_bytes)) 649 { 650 /* 651 * Hitting this means there was a reentrant request, for 652 * example, a block driver issuing nested requests. This must 653 * never happen since it means deadlock. 654 */ 655 assert(qemu_coroutine_self() != req->co); 656 657 /* 658 * If the request is already (indirectly) waiting for us, or 659 * will wait for us as soon as it wakes up, then just go on 660 * (instead of producing a deadlock in the former case). 661 */ 662 if (!req->waiting_for) { 663 return req; 664 } 665 } 666 } 667 668 return NULL; 669 } 670 671 /* Called with self->bs->reqs_lock held */ 672 static void coroutine_fn 673 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 674 { 675 BdrvTrackedRequest *req; 676 677 while ((req = bdrv_find_conflicting_request(self))) { 678 self->waiting_for = req; 679 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 680 self->waiting_for = NULL; 681 } 682 } 683 684 /* Called with req->bs->reqs_lock held */ 685 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 686 uint64_t align) 687 { 688 int64_t overlap_offset = req->offset & ~(align - 1); 689 int64_t overlap_bytes = 690 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 691 692 bdrv_check_request(req->offset, req->bytes, &error_abort); 693 694 if (!req->serialising) { 695 qatomic_inc(&req->bs->serialising_in_flight); 696 req->serialising = true; 697 } 698 699 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 700 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 701 } 702 703 /** 704 * Return the tracked request on @bs for the current coroutine, or 705 * NULL if there is none. 706 */ 707 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 708 { 709 BdrvTrackedRequest *req; 710 Coroutine *self = qemu_coroutine_self(); 711 IO_CODE(); 712 713 QLIST_FOREACH(req, &bs->tracked_requests, list) { 714 if (req->co == self) { 715 return req; 716 } 717 } 718 719 return NULL; 720 } 721 722 /** 723 * Round a region to cluster boundaries 724 */ 725 void coroutine_fn bdrv_round_to_clusters(BlockDriverState *bs, 726 int64_t offset, int64_t bytes, 727 int64_t *cluster_offset, 728 int64_t *cluster_bytes) 729 { 730 BlockDriverInfo bdi; 731 IO_CODE(); 732 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 733 *cluster_offset = offset; 734 *cluster_bytes = bytes; 735 } else { 736 int64_t c = bdi.cluster_size; 737 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 738 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 739 } 740 } 741 742 static coroutine_fn int bdrv_get_cluster_size(BlockDriverState *bs) 743 { 744 BlockDriverInfo bdi; 745 int ret; 746 747 ret = bdrv_co_get_info(bs, &bdi); 748 if (ret < 0 || bdi.cluster_size == 0) { 749 return bs->bl.request_alignment; 750 } else { 751 return bdi.cluster_size; 752 } 753 } 754 755 void bdrv_inc_in_flight(BlockDriverState *bs) 756 { 757 IO_CODE(); 758 qatomic_inc(&bs->in_flight); 759 } 760 761 void bdrv_wakeup(BlockDriverState *bs) 762 { 763 IO_CODE(); 764 aio_wait_kick(); 765 } 766 767 void bdrv_dec_in_flight(BlockDriverState *bs) 768 { 769 IO_CODE(); 770 qatomic_dec(&bs->in_flight); 771 bdrv_wakeup(bs); 772 } 773 774 static void coroutine_fn 775 bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 776 { 777 BlockDriverState *bs = self->bs; 778 779 if (!qatomic_read(&bs->serialising_in_flight)) { 780 return; 781 } 782 783 qemu_co_mutex_lock(&bs->reqs_lock); 784 bdrv_wait_serialising_requests_locked(self); 785 qemu_co_mutex_unlock(&bs->reqs_lock); 786 } 787 788 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 789 uint64_t align) 790 { 791 IO_CODE(); 792 793 qemu_co_mutex_lock(&req->bs->reqs_lock); 794 795 tracked_request_set_serialising(req, align); 796 bdrv_wait_serialising_requests_locked(req); 797 798 qemu_co_mutex_unlock(&req->bs->reqs_lock); 799 } 800 801 int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 802 QEMUIOVector *qiov, size_t qiov_offset, 803 Error **errp) 804 { 805 /* 806 * Check generic offset/bytes correctness 807 */ 808 809 if (offset < 0) { 810 error_setg(errp, "offset is negative: %" PRIi64, offset); 811 return -EIO; 812 } 813 814 if (bytes < 0) { 815 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 816 return -EIO; 817 } 818 819 if (bytes > BDRV_MAX_LENGTH) { 820 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 821 bytes, BDRV_MAX_LENGTH); 822 return -EIO; 823 } 824 825 if (offset > BDRV_MAX_LENGTH) { 826 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 827 offset, BDRV_MAX_LENGTH); 828 return -EIO; 829 } 830 831 if (offset > BDRV_MAX_LENGTH - bytes) { 832 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 833 "exceeds maximum(%" PRIi64 ")", offset, bytes, 834 BDRV_MAX_LENGTH); 835 return -EIO; 836 } 837 838 if (!qiov) { 839 return 0; 840 } 841 842 /* 843 * Check qiov and qiov_offset 844 */ 845 846 if (qiov_offset > qiov->size) { 847 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 848 qiov_offset, qiov->size); 849 return -EIO; 850 } 851 852 if (bytes > qiov->size - qiov_offset) { 853 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 854 "vector size(%zu)", bytes, qiov_offset, qiov->size); 855 return -EIO; 856 } 857 858 return 0; 859 } 860 861 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 862 { 863 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 864 } 865 866 static int bdrv_check_request32(int64_t offset, int64_t bytes, 867 QEMUIOVector *qiov, size_t qiov_offset) 868 { 869 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 870 if (ret < 0) { 871 return ret; 872 } 873 874 if (bytes > BDRV_REQUEST_MAX_BYTES) { 875 return -EIO; 876 } 877 878 return 0; 879 } 880 881 /* 882 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 883 * The operation is sped up by checking the block status and only writing 884 * zeroes to the device if they currently do not return zeroes. Optional 885 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 886 * BDRV_REQ_FUA). 887 * 888 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 889 */ 890 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 891 { 892 int ret; 893 int64_t target_size, bytes, offset = 0; 894 BlockDriverState *bs = child->bs; 895 IO_CODE(); 896 897 target_size = bdrv_getlength(bs); 898 if (target_size < 0) { 899 return target_size; 900 } 901 902 for (;;) { 903 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 904 if (bytes <= 0) { 905 return 0; 906 } 907 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 908 if (ret < 0) { 909 return ret; 910 } 911 if (ret & BDRV_BLOCK_ZERO) { 912 offset += bytes; 913 continue; 914 } 915 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 916 if (ret < 0) { 917 return ret; 918 } 919 offset += bytes; 920 } 921 } 922 923 /* 924 * Writes to the file and ensures that no writes are reordered across this 925 * request (acts as a barrier) 926 * 927 * Returns 0 on success, -errno in error cases. 928 */ 929 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, 930 int64_t bytes, const void *buf, 931 BdrvRequestFlags flags) 932 { 933 int ret; 934 IO_CODE(); 935 936 assume_graph_lock(); /* FIXME */ 937 938 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags); 939 if (ret < 0) { 940 return ret; 941 } 942 943 ret = bdrv_co_flush(child->bs); 944 if (ret < 0) { 945 return ret; 946 } 947 948 return 0; 949 } 950 951 typedef struct CoroutineIOCompletion { 952 Coroutine *coroutine; 953 int ret; 954 } CoroutineIOCompletion; 955 956 static void bdrv_co_io_em_complete(void *opaque, int ret) 957 { 958 CoroutineIOCompletion *co = opaque; 959 960 co->ret = ret; 961 aio_co_wake(co->coroutine); 962 } 963 964 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 965 int64_t offset, int64_t bytes, 966 QEMUIOVector *qiov, 967 size_t qiov_offset, int flags) 968 { 969 BlockDriver *drv = bs->drv; 970 int64_t sector_num; 971 unsigned int nb_sectors; 972 QEMUIOVector local_qiov; 973 int ret; 974 975 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 976 assert(!(flags & ~bs->supported_read_flags)); 977 978 if (!drv) { 979 return -ENOMEDIUM; 980 } 981 982 if (drv->bdrv_co_preadv_part) { 983 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 984 flags); 985 } 986 987 if (qiov_offset > 0 || bytes != qiov->size) { 988 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 989 qiov = &local_qiov; 990 } 991 992 if (drv->bdrv_co_preadv) { 993 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 994 goto out; 995 } 996 997 if (drv->bdrv_aio_preadv) { 998 BlockAIOCB *acb; 999 CoroutineIOCompletion co = { 1000 .coroutine = qemu_coroutine_self(), 1001 }; 1002 1003 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1004 bdrv_co_io_em_complete, &co); 1005 if (acb == NULL) { 1006 ret = -EIO; 1007 goto out; 1008 } else { 1009 qemu_coroutine_yield(); 1010 ret = co.ret; 1011 goto out; 1012 } 1013 } 1014 1015 sector_num = offset >> BDRV_SECTOR_BITS; 1016 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1017 1018 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1019 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1020 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1021 assert(drv->bdrv_co_readv); 1022 1023 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1024 1025 out: 1026 if (qiov == &local_qiov) { 1027 qemu_iovec_destroy(&local_qiov); 1028 } 1029 1030 return ret; 1031 } 1032 1033 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1034 int64_t offset, int64_t bytes, 1035 QEMUIOVector *qiov, 1036 size_t qiov_offset, 1037 BdrvRequestFlags flags) 1038 { 1039 BlockDriver *drv = bs->drv; 1040 bool emulate_fua = false; 1041 int64_t sector_num; 1042 unsigned int nb_sectors; 1043 QEMUIOVector local_qiov; 1044 int ret; 1045 1046 assume_graph_lock(); /* FIXME */ 1047 1048 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1049 1050 if (!drv) { 1051 return -ENOMEDIUM; 1052 } 1053 1054 if ((flags & BDRV_REQ_FUA) && 1055 (~bs->supported_write_flags & BDRV_REQ_FUA)) { 1056 flags &= ~BDRV_REQ_FUA; 1057 emulate_fua = true; 1058 } 1059 1060 flags &= bs->supported_write_flags; 1061 1062 if (drv->bdrv_co_pwritev_part) { 1063 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1064 flags); 1065 goto emulate_flags; 1066 } 1067 1068 if (qiov_offset > 0 || bytes != qiov->size) { 1069 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1070 qiov = &local_qiov; 1071 } 1072 1073 if (drv->bdrv_co_pwritev) { 1074 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); 1075 goto emulate_flags; 1076 } 1077 1078 if (drv->bdrv_aio_pwritev) { 1079 BlockAIOCB *acb; 1080 CoroutineIOCompletion co = { 1081 .coroutine = qemu_coroutine_self(), 1082 }; 1083 1084 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, 1085 bdrv_co_io_em_complete, &co); 1086 if (acb == NULL) { 1087 ret = -EIO; 1088 } else { 1089 qemu_coroutine_yield(); 1090 ret = co.ret; 1091 } 1092 goto emulate_flags; 1093 } 1094 1095 sector_num = offset >> BDRV_SECTOR_BITS; 1096 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1097 1098 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1099 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1100 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1101 1102 assert(drv->bdrv_co_writev); 1103 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); 1104 1105 emulate_flags: 1106 if (ret == 0 && emulate_fua) { 1107 ret = bdrv_co_flush(bs); 1108 } 1109 1110 if (qiov == &local_qiov) { 1111 qemu_iovec_destroy(&local_qiov); 1112 } 1113 1114 return ret; 1115 } 1116 1117 static int coroutine_fn 1118 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1119 int64_t bytes, QEMUIOVector *qiov, 1120 size_t qiov_offset) 1121 { 1122 BlockDriver *drv = bs->drv; 1123 QEMUIOVector local_qiov; 1124 int ret; 1125 1126 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1127 1128 if (!drv) { 1129 return -ENOMEDIUM; 1130 } 1131 1132 if (!block_driver_can_compress(drv)) { 1133 return -ENOTSUP; 1134 } 1135 1136 if (drv->bdrv_co_pwritev_compressed_part) { 1137 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1138 qiov, qiov_offset); 1139 } 1140 1141 if (qiov_offset == 0) { 1142 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1143 } 1144 1145 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1146 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1147 qemu_iovec_destroy(&local_qiov); 1148 1149 return ret; 1150 } 1151 1152 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1153 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1154 size_t qiov_offset, int flags) 1155 { 1156 BlockDriverState *bs = child->bs; 1157 1158 /* Perform I/O through a temporary buffer so that users who scribble over 1159 * their read buffer while the operation is in progress do not end up 1160 * modifying the image file. This is critical for zero-copy guest I/O 1161 * where anything might happen inside guest memory. 1162 */ 1163 void *bounce_buffer = NULL; 1164 1165 BlockDriver *drv = bs->drv; 1166 int64_t cluster_offset; 1167 int64_t cluster_bytes; 1168 int64_t skip_bytes; 1169 int ret; 1170 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1171 BDRV_REQUEST_MAX_BYTES); 1172 int64_t progress = 0; 1173 bool skip_write; 1174 1175 assume_graph_lock(); /* FIXME */ 1176 1177 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1178 1179 if (!drv) { 1180 return -ENOMEDIUM; 1181 } 1182 1183 /* 1184 * Do not write anything when the BDS is inactive. That is not 1185 * allowed, and it would not help. 1186 */ 1187 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1188 1189 /* FIXME We cannot require callers to have write permissions when all they 1190 * are doing is a read request. If we did things right, write permissions 1191 * would be obtained anyway, but internally by the copy-on-read code. As 1192 * long as it is implemented here rather than in a separate filter driver, 1193 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1194 * it could request permissions. Therefore we have to bypass the permission 1195 * system for the moment. */ 1196 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1197 1198 /* Cover entire cluster so no additional backing file I/O is required when 1199 * allocating cluster in the image file. Note that this value may exceed 1200 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1201 * is one reason we loop rather than doing it all at once. 1202 */ 1203 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1204 skip_bytes = offset - cluster_offset; 1205 1206 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1207 cluster_offset, cluster_bytes); 1208 1209 while (cluster_bytes) { 1210 int64_t pnum; 1211 1212 if (skip_write) { 1213 ret = 1; /* "already allocated", so nothing will be copied */ 1214 pnum = MIN(cluster_bytes, max_transfer); 1215 } else { 1216 ret = bdrv_is_allocated(bs, cluster_offset, 1217 MIN(cluster_bytes, max_transfer), &pnum); 1218 if (ret < 0) { 1219 /* 1220 * Safe to treat errors in querying allocation as if 1221 * unallocated; we'll probably fail again soon on the 1222 * read, but at least that will set a decent errno. 1223 */ 1224 pnum = MIN(cluster_bytes, max_transfer); 1225 } 1226 1227 /* Stop at EOF if the image ends in the middle of the cluster */ 1228 if (ret == 0 && pnum == 0) { 1229 assert(progress >= bytes); 1230 break; 1231 } 1232 1233 assert(skip_bytes < pnum); 1234 } 1235 1236 if (ret <= 0) { 1237 QEMUIOVector local_qiov; 1238 1239 /* Must copy-on-read; use the bounce buffer */ 1240 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1241 if (!bounce_buffer) { 1242 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1243 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1244 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1245 1246 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1247 if (!bounce_buffer) { 1248 ret = -ENOMEM; 1249 goto err; 1250 } 1251 } 1252 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1253 1254 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1255 &local_qiov, 0, 0); 1256 if (ret < 0) { 1257 goto err; 1258 } 1259 1260 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE); 1261 if (drv->bdrv_co_pwrite_zeroes && 1262 buffer_is_zero(bounce_buffer, pnum)) { 1263 /* FIXME: Should we (perhaps conditionally) be setting 1264 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1265 * that still correctly reads as zero? */ 1266 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1267 BDRV_REQ_WRITE_UNCHANGED); 1268 } else { 1269 /* This does not change the data on the disk, it is not 1270 * necessary to flush even in cache=writethrough mode. 1271 */ 1272 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1273 &local_qiov, 0, 1274 BDRV_REQ_WRITE_UNCHANGED); 1275 } 1276 1277 if (ret < 0) { 1278 /* It might be okay to ignore write errors for guest 1279 * requests. If this is a deliberate copy-on-read 1280 * then we don't want to ignore the error. Simply 1281 * report it in all cases. 1282 */ 1283 goto err; 1284 } 1285 1286 if (!(flags & BDRV_REQ_PREFETCH)) { 1287 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1288 bounce_buffer + skip_bytes, 1289 MIN(pnum - skip_bytes, bytes - progress)); 1290 } 1291 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1292 /* Read directly into the destination */ 1293 ret = bdrv_driver_preadv(bs, offset + progress, 1294 MIN(pnum - skip_bytes, bytes - progress), 1295 qiov, qiov_offset + progress, 0); 1296 if (ret < 0) { 1297 goto err; 1298 } 1299 } 1300 1301 cluster_offset += pnum; 1302 cluster_bytes -= pnum; 1303 progress += pnum - skip_bytes; 1304 skip_bytes = 0; 1305 } 1306 ret = 0; 1307 1308 err: 1309 qemu_vfree(bounce_buffer); 1310 return ret; 1311 } 1312 1313 /* 1314 * Forwards an already correctly aligned request to the BlockDriver. This 1315 * handles copy on read, zeroing after EOF, and fragmentation of large 1316 * reads; any other features must be implemented by the caller. 1317 */ 1318 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1319 BdrvTrackedRequest *req, int64_t offset, int64_t bytes, 1320 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1321 { 1322 BlockDriverState *bs = child->bs; 1323 int64_t total_bytes, max_bytes; 1324 int ret = 0; 1325 int64_t bytes_remaining = bytes; 1326 int max_transfer; 1327 1328 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1329 assert(is_power_of_2(align)); 1330 assert((offset & (align - 1)) == 0); 1331 assert((bytes & (align - 1)) == 0); 1332 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1333 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1334 align); 1335 1336 /* 1337 * TODO: We would need a per-BDS .supported_read_flags and 1338 * potential fallback support, if we ever implement any read flags 1339 * to pass through to drivers. For now, there aren't any 1340 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint. 1341 */ 1342 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH | 1343 BDRV_REQ_REGISTERED_BUF))); 1344 1345 /* Handle Copy on Read and associated serialisation */ 1346 if (flags & BDRV_REQ_COPY_ON_READ) { 1347 /* If we touch the same cluster it counts as an overlap. This 1348 * guarantees that allocating writes will be serialized and not race 1349 * with each other for the same cluster. For example, in copy-on-read 1350 * it ensures that the CoR read and write operations are atomic and 1351 * guest writes cannot interleave between them. */ 1352 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1353 } else { 1354 bdrv_wait_serialising_requests(req); 1355 } 1356 1357 if (flags & BDRV_REQ_COPY_ON_READ) { 1358 int64_t pnum; 1359 1360 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1361 flags &= ~BDRV_REQ_COPY_ON_READ; 1362 1363 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1364 if (ret < 0) { 1365 goto out; 1366 } 1367 1368 if (!ret || pnum != bytes) { 1369 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1370 qiov, qiov_offset, flags); 1371 goto out; 1372 } else if (flags & BDRV_REQ_PREFETCH) { 1373 goto out; 1374 } 1375 } 1376 1377 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1378 total_bytes = bdrv_getlength(bs); 1379 if (total_bytes < 0) { 1380 ret = total_bytes; 1381 goto out; 1382 } 1383 1384 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); 1385 1386 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1387 if (bytes <= max_bytes && bytes <= max_transfer) { 1388 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1389 goto out; 1390 } 1391 1392 while (bytes_remaining) { 1393 int64_t num; 1394 1395 if (max_bytes) { 1396 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1397 assert(num); 1398 1399 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1400 num, qiov, 1401 qiov_offset + bytes - bytes_remaining, 1402 flags); 1403 max_bytes -= num; 1404 } else { 1405 num = bytes_remaining; 1406 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1407 0, bytes_remaining); 1408 } 1409 if (ret < 0) { 1410 goto out; 1411 } 1412 bytes_remaining -= num; 1413 } 1414 1415 out: 1416 return ret < 0 ? ret : 0; 1417 } 1418 1419 /* 1420 * Request padding 1421 * 1422 * |<---- align ----->| |<----- align ---->| 1423 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1424 * | | | | | | 1425 * -*----------$-------*-------- ... --------*-----$------------*--- 1426 * | | | | | | 1427 * | offset | | end | 1428 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1429 * [buf ... ) [tail_buf ) 1430 * 1431 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1432 * is placed at the beginning of @buf and @tail at the @end. 1433 * 1434 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1435 * around tail, if tail exists. 1436 * 1437 * @merge_reads is true for small requests, 1438 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1439 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1440 */ 1441 typedef struct BdrvRequestPadding { 1442 uint8_t *buf; 1443 size_t buf_len; 1444 uint8_t *tail_buf; 1445 size_t head; 1446 size_t tail; 1447 bool merge_reads; 1448 QEMUIOVector local_qiov; 1449 } BdrvRequestPadding; 1450 1451 static bool bdrv_init_padding(BlockDriverState *bs, 1452 int64_t offset, int64_t bytes, 1453 BdrvRequestPadding *pad) 1454 { 1455 int64_t align = bs->bl.request_alignment; 1456 int64_t sum; 1457 1458 bdrv_check_request(offset, bytes, &error_abort); 1459 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1460 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1461 1462 memset(pad, 0, sizeof(*pad)); 1463 1464 pad->head = offset & (align - 1); 1465 pad->tail = ((offset + bytes) & (align - 1)); 1466 if (pad->tail) { 1467 pad->tail = align - pad->tail; 1468 } 1469 1470 if (!pad->head && !pad->tail) { 1471 return false; 1472 } 1473 1474 assert(bytes); /* Nothing good in aligning zero-length requests */ 1475 1476 sum = pad->head + bytes + pad->tail; 1477 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1478 pad->buf = qemu_blockalign(bs, pad->buf_len); 1479 pad->merge_reads = sum == pad->buf_len; 1480 if (pad->tail) { 1481 pad->tail_buf = pad->buf + pad->buf_len - align; 1482 } 1483 1484 return true; 1485 } 1486 1487 static coroutine_fn int bdrv_padding_rmw_read(BdrvChild *child, 1488 BdrvTrackedRequest *req, 1489 BdrvRequestPadding *pad, 1490 bool zero_middle) 1491 { 1492 QEMUIOVector local_qiov; 1493 BlockDriverState *bs = child->bs; 1494 uint64_t align = bs->bl.request_alignment; 1495 int ret; 1496 1497 assert(req->serialising && pad->buf); 1498 1499 if (pad->head || pad->merge_reads) { 1500 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1501 1502 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1503 1504 if (pad->head) { 1505 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1506 } 1507 if (pad->merge_reads && pad->tail) { 1508 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1509 } 1510 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1511 align, &local_qiov, 0, 0); 1512 if (ret < 0) { 1513 return ret; 1514 } 1515 if (pad->head) { 1516 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1517 } 1518 if (pad->merge_reads && pad->tail) { 1519 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1520 } 1521 1522 if (pad->merge_reads) { 1523 goto zero_mem; 1524 } 1525 } 1526 1527 if (pad->tail) { 1528 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1529 1530 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1531 ret = bdrv_aligned_preadv( 1532 child, req, 1533 req->overlap_offset + req->overlap_bytes - align, 1534 align, align, &local_qiov, 0, 0); 1535 if (ret < 0) { 1536 return ret; 1537 } 1538 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1539 } 1540 1541 zero_mem: 1542 if (zero_middle) { 1543 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1544 } 1545 1546 return 0; 1547 } 1548 1549 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1550 { 1551 if (pad->buf) { 1552 qemu_vfree(pad->buf); 1553 qemu_iovec_destroy(&pad->local_qiov); 1554 } 1555 memset(pad, 0, sizeof(*pad)); 1556 } 1557 1558 /* 1559 * bdrv_pad_request 1560 * 1561 * Exchange request parameters with padded request if needed. Don't include RMW 1562 * read of padding, bdrv_padding_rmw_read() should be called separately if 1563 * needed. 1564 * 1565 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1566 * - on function start they represent original request 1567 * - on failure or when padding is not needed they are unchanged 1568 * - on success when padding is needed they represent padded request 1569 */ 1570 static int bdrv_pad_request(BlockDriverState *bs, 1571 QEMUIOVector **qiov, size_t *qiov_offset, 1572 int64_t *offset, int64_t *bytes, 1573 BdrvRequestPadding *pad, bool *padded, 1574 BdrvRequestFlags *flags) 1575 { 1576 int ret; 1577 1578 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort); 1579 1580 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1581 if (padded) { 1582 *padded = false; 1583 } 1584 return 0; 1585 } 1586 1587 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1588 *qiov, *qiov_offset, *bytes, 1589 pad->buf + pad->buf_len - pad->tail, 1590 pad->tail); 1591 if (ret < 0) { 1592 bdrv_padding_destroy(pad); 1593 return ret; 1594 } 1595 *bytes += pad->head + pad->tail; 1596 *offset -= pad->head; 1597 *qiov = &pad->local_qiov; 1598 *qiov_offset = 0; 1599 if (padded) { 1600 *padded = true; 1601 } 1602 if (flags) { 1603 /* Can't use optimization hint with bounce buffer */ 1604 *flags &= ~BDRV_REQ_REGISTERED_BUF; 1605 } 1606 1607 return 0; 1608 } 1609 1610 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1611 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1612 BdrvRequestFlags flags) 1613 { 1614 IO_CODE(); 1615 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1616 } 1617 1618 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1619 int64_t offset, int64_t bytes, 1620 QEMUIOVector *qiov, size_t qiov_offset, 1621 BdrvRequestFlags flags) 1622 { 1623 BlockDriverState *bs = child->bs; 1624 BdrvTrackedRequest req; 1625 BdrvRequestPadding pad; 1626 int ret; 1627 IO_CODE(); 1628 1629 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1630 1631 if (!bdrv_co_is_inserted(bs)) { 1632 return -ENOMEDIUM; 1633 } 1634 1635 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1636 if (ret < 0) { 1637 return ret; 1638 } 1639 1640 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1641 /* 1642 * Aligning zero request is nonsense. Even if driver has special meaning 1643 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1644 * it to driver due to request_alignment. 1645 * 1646 * Still, no reason to return an error if someone do unaligned 1647 * zero-length read occasionally. 1648 */ 1649 return 0; 1650 } 1651 1652 bdrv_inc_in_flight(bs); 1653 1654 /* Don't do copy-on-read if we read data before write operation */ 1655 if (qatomic_read(&bs->copy_on_read)) { 1656 flags |= BDRV_REQ_COPY_ON_READ; 1657 } 1658 1659 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 1660 NULL, &flags); 1661 if (ret < 0) { 1662 goto fail; 1663 } 1664 1665 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1666 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1667 bs->bl.request_alignment, 1668 qiov, qiov_offset, flags); 1669 tracked_request_end(&req); 1670 bdrv_padding_destroy(&pad); 1671 1672 fail: 1673 bdrv_dec_in_flight(bs); 1674 1675 return ret; 1676 } 1677 1678 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1679 int64_t offset, int64_t bytes, BdrvRequestFlags flags) 1680 { 1681 BlockDriver *drv = bs->drv; 1682 QEMUIOVector qiov; 1683 void *buf = NULL; 1684 int ret = 0; 1685 bool need_flush = false; 1686 int head = 0; 1687 int tail = 0; 1688 1689 assume_graph_lock(); /* FIXME */ 1690 1691 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, 1692 INT64_MAX); 1693 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1694 bs->bl.request_alignment); 1695 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1696 1697 assert_bdrv_graph_readable(); 1698 bdrv_check_request(offset, bytes, &error_abort); 1699 1700 if (!drv) { 1701 return -ENOMEDIUM; 1702 } 1703 1704 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1705 return -ENOTSUP; 1706 } 1707 1708 /* By definition there is no user buffer so this flag doesn't make sense */ 1709 if (flags & BDRV_REQ_REGISTERED_BUF) { 1710 return -EINVAL; 1711 } 1712 1713 /* Invalidate the cached block-status data range if this write overlaps */ 1714 bdrv_bsc_invalidate_range(bs, offset, bytes); 1715 1716 assert(alignment % bs->bl.request_alignment == 0); 1717 head = offset % alignment; 1718 tail = (offset + bytes) % alignment; 1719 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1720 assert(max_write_zeroes >= bs->bl.request_alignment); 1721 1722 while (bytes > 0 && !ret) { 1723 int64_t num = bytes; 1724 1725 /* Align request. Block drivers can expect the "bulk" of the request 1726 * to be aligned, and that unaligned requests do not cross cluster 1727 * boundaries. 1728 */ 1729 if (head) { 1730 /* Make a small request up to the first aligned sector. For 1731 * convenience, limit this request to max_transfer even if 1732 * we don't need to fall back to writes. */ 1733 num = MIN(MIN(bytes, max_transfer), alignment - head); 1734 head = (head + num) % alignment; 1735 assert(num < max_write_zeroes); 1736 } else if (tail && num > alignment) { 1737 /* Shorten the request to the last aligned sector. */ 1738 num -= tail; 1739 } 1740 1741 /* limit request size */ 1742 if (num > max_write_zeroes) { 1743 num = max_write_zeroes; 1744 } 1745 1746 ret = -ENOTSUP; 1747 /* First try the efficient write zeroes operation */ 1748 if (drv->bdrv_co_pwrite_zeroes) { 1749 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1750 flags & bs->supported_zero_flags); 1751 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1752 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1753 need_flush = true; 1754 } 1755 } else { 1756 assert(!bs->supported_zero_flags); 1757 } 1758 1759 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1760 /* Fall back to bounce buffer if write zeroes is unsupported */ 1761 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1762 1763 if ((flags & BDRV_REQ_FUA) && 1764 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1765 /* No need for bdrv_driver_pwrite() to do a fallback 1766 * flush on each chunk; use just one at the end */ 1767 write_flags &= ~BDRV_REQ_FUA; 1768 need_flush = true; 1769 } 1770 num = MIN(num, max_transfer); 1771 if (buf == NULL) { 1772 buf = qemu_try_blockalign0(bs, num); 1773 if (buf == NULL) { 1774 ret = -ENOMEM; 1775 goto fail; 1776 } 1777 } 1778 qemu_iovec_init_buf(&qiov, buf, num); 1779 1780 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1781 1782 /* Keep bounce buffer around if it is big enough for all 1783 * all future requests. 1784 */ 1785 if (num < max_transfer) { 1786 qemu_vfree(buf); 1787 buf = NULL; 1788 } 1789 } 1790 1791 offset += num; 1792 bytes -= num; 1793 } 1794 1795 fail: 1796 if (ret == 0 && need_flush) { 1797 ret = bdrv_co_flush(bs); 1798 } 1799 qemu_vfree(buf); 1800 return ret; 1801 } 1802 1803 static inline int coroutine_fn 1804 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1805 BdrvTrackedRequest *req, int flags) 1806 { 1807 BlockDriverState *bs = child->bs; 1808 1809 bdrv_check_request(offset, bytes, &error_abort); 1810 1811 if (bdrv_is_read_only(bs)) { 1812 return -EPERM; 1813 } 1814 1815 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1816 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1817 assert(!(flags & ~BDRV_REQ_MASK)); 1818 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1819 1820 if (flags & BDRV_REQ_SERIALISING) { 1821 QEMU_LOCK_GUARD(&bs->reqs_lock); 1822 1823 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1824 1825 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1826 return -EBUSY; 1827 } 1828 1829 bdrv_wait_serialising_requests_locked(req); 1830 } else { 1831 bdrv_wait_serialising_requests(req); 1832 } 1833 1834 assert(req->overlap_offset <= offset); 1835 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1836 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 1837 child->perm & BLK_PERM_RESIZE); 1838 1839 switch (req->type) { 1840 case BDRV_TRACKED_WRITE: 1841 case BDRV_TRACKED_DISCARD: 1842 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1843 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1844 } else { 1845 assert(child->perm & BLK_PERM_WRITE); 1846 } 1847 bdrv_write_threshold_check_write(bs, offset, bytes); 1848 return 0; 1849 case BDRV_TRACKED_TRUNCATE: 1850 assert(child->perm & BLK_PERM_RESIZE); 1851 return 0; 1852 default: 1853 abort(); 1854 } 1855 } 1856 1857 static inline void coroutine_fn 1858 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 1859 BdrvTrackedRequest *req, int ret) 1860 { 1861 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1862 BlockDriverState *bs = child->bs; 1863 1864 bdrv_check_request(offset, bytes, &error_abort); 1865 1866 qatomic_inc(&bs->write_gen); 1867 1868 /* 1869 * Discard cannot extend the image, but in error handling cases, such as 1870 * when reverting a qcow2 cluster allocation, the discarded range can pass 1871 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1872 * here. Instead, just skip it, since semantically a discard request 1873 * beyond EOF cannot expand the image anyway. 1874 */ 1875 if (ret == 0 && 1876 (req->type == BDRV_TRACKED_TRUNCATE || 1877 end_sector > bs->total_sectors) && 1878 req->type != BDRV_TRACKED_DISCARD) { 1879 bs->total_sectors = end_sector; 1880 bdrv_parent_cb_resize(bs); 1881 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1882 } 1883 if (req->bytes) { 1884 switch (req->type) { 1885 case BDRV_TRACKED_WRITE: 1886 stat64_max(&bs->wr_highest_offset, offset + bytes); 1887 /* fall through, to set dirty bits */ 1888 case BDRV_TRACKED_DISCARD: 1889 bdrv_set_dirty(bs, offset, bytes); 1890 break; 1891 default: 1892 break; 1893 } 1894 } 1895 } 1896 1897 /* 1898 * Forwards an already correctly aligned write request to the BlockDriver, 1899 * after possibly fragmenting it. 1900 */ 1901 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 1902 BdrvTrackedRequest *req, int64_t offset, int64_t bytes, 1903 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, 1904 BdrvRequestFlags flags) 1905 { 1906 BlockDriverState *bs = child->bs; 1907 BlockDriver *drv = bs->drv; 1908 int ret; 1909 1910 int64_t bytes_remaining = bytes; 1911 int max_transfer; 1912 1913 assume_graph_lock(); /* FIXME */ 1914 1915 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1916 1917 if (!drv) { 1918 return -ENOMEDIUM; 1919 } 1920 1921 if (bdrv_has_readonly_bitmaps(bs)) { 1922 return -EPERM; 1923 } 1924 1925 assert(is_power_of_2(align)); 1926 assert((offset & (align - 1)) == 0); 1927 assert((bytes & (align - 1)) == 0); 1928 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1929 align); 1930 1931 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1932 1933 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1934 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1935 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 1936 flags |= BDRV_REQ_ZERO_WRITE; 1937 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1938 flags |= BDRV_REQ_MAY_UNMAP; 1939 } 1940 1941 /* Can't use optimization hint with bufferless zero write */ 1942 flags &= ~BDRV_REQ_REGISTERED_BUF; 1943 } 1944 1945 if (ret < 0) { 1946 /* Do nothing, write notifier decided to fail this request */ 1947 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1948 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1949 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1950 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1951 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 1952 qiov, qiov_offset); 1953 } else if (bytes <= max_transfer) { 1954 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1955 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 1956 } else { 1957 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1958 while (bytes_remaining) { 1959 int num = MIN(bytes_remaining, max_transfer); 1960 int local_flags = flags; 1961 1962 assert(num); 1963 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1964 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1965 /* If FUA is going to be emulated by flush, we only 1966 * need to flush on the last iteration */ 1967 local_flags &= ~BDRV_REQ_FUA; 1968 } 1969 1970 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1971 num, qiov, 1972 qiov_offset + bytes - bytes_remaining, 1973 local_flags); 1974 if (ret < 0) { 1975 break; 1976 } 1977 bytes_remaining -= num; 1978 } 1979 } 1980 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE); 1981 1982 if (ret >= 0) { 1983 ret = 0; 1984 } 1985 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1986 1987 return ret; 1988 } 1989 1990 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 1991 int64_t offset, 1992 int64_t bytes, 1993 BdrvRequestFlags flags, 1994 BdrvTrackedRequest *req) 1995 { 1996 BlockDriverState *bs = child->bs; 1997 QEMUIOVector local_qiov; 1998 uint64_t align = bs->bl.request_alignment; 1999 int ret = 0; 2000 bool padding; 2001 BdrvRequestPadding pad; 2002 2003 /* This flag doesn't make sense for padding or zero writes */ 2004 flags &= ~BDRV_REQ_REGISTERED_BUF; 2005 2006 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2007 if (padding) { 2008 assert(!(flags & BDRV_REQ_NO_WAIT)); 2009 bdrv_make_request_serialising(req, align); 2010 2011 bdrv_padding_rmw_read(child, req, &pad, true); 2012 2013 if (pad.head || pad.merge_reads) { 2014 int64_t aligned_offset = offset & ~(align - 1); 2015 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2016 2017 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2018 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2019 align, &local_qiov, 0, 2020 flags & ~BDRV_REQ_ZERO_WRITE); 2021 if (ret < 0 || pad.merge_reads) { 2022 /* Error or all work is done */ 2023 goto out; 2024 } 2025 offset += write_bytes - pad.head; 2026 bytes -= write_bytes - pad.head; 2027 } 2028 } 2029 2030 assert(!bytes || (offset & (align - 1)) == 0); 2031 if (bytes >= align) { 2032 /* Write the aligned part in the middle. */ 2033 int64_t aligned_bytes = bytes & ~(align - 1); 2034 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2035 NULL, 0, flags); 2036 if (ret < 0) { 2037 goto out; 2038 } 2039 bytes -= aligned_bytes; 2040 offset += aligned_bytes; 2041 } 2042 2043 assert(!bytes || (offset & (align - 1)) == 0); 2044 if (bytes) { 2045 assert(align == pad.tail + bytes); 2046 2047 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2048 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2049 &local_qiov, 0, 2050 flags & ~BDRV_REQ_ZERO_WRITE); 2051 } 2052 2053 out: 2054 bdrv_padding_destroy(&pad); 2055 2056 return ret; 2057 } 2058 2059 /* 2060 * Handle a write request in coroutine context 2061 */ 2062 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2063 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2064 BdrvRequestFlags flags) 2065 { 2066 IO_CODE(); 2067 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2068 } 2069 2070 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2071 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2072 BdrvRequestFlags flags) 2073 { 2074 BlockDriverState *bs = child->bs; 2075 BdrvTrackedRequest req; 2076 uint64_t align = bs->bl.request_alignment; 2077 BdrvRequestPadding pad; 2078 int ret; 2079 bool padded = false; 2080 IO_CODE(); 2081 2082 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2083 2084 if (!bdrv_co_is_inserted(bs)) { 2085 return -ENOMEDIUM; 2086 } 2087 2088 if (flags & BDRV_REQ_ZERO_WRITE) { 2089 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 2090 } else { 2091 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2092 } 2093 if (ret < 0) { 2094 return ret; 2095 } 2096 2097 /* If the request is misaligned then we can't make it efficient */ 2098 if ((flags & BDRV_REQ_NO_FALLBACK) && 2099 !QEMU_IS_ALIGNED(offset | bytes, align)) 2100 { 2101 return -ENOTSUP; 2102 } 2103 2104 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2105 /* 2106 * Aligning zero request is nonsense. Even if driver has special meaning 2107 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2108 * it to driver due to request_alignment. 2109 * 2110 * Still, no reason to return an error if someone do unaligned 2111 * zero-length write occasionally. 2112 */ 2113 return 0; 2114 } 2115 2116 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2117 /* 2118 * Pad request for following read-modify-write cycle. 2119 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2120 * alignment only if there is no ZERO flag. 2121 */ 2122 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 2123 &padded, &flags); 2124 if (ret < 0) { 2125 return ret; 2126 } 2127 } 2128 2129 bdrv_inc_in_flight(bs); 2130 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2131 2132 if (flags & BDRV_REQ_ZERO_WRITE) { 2133 assert(!padded); 2134 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2135 goto out; 2136 } 2137 2138 if (padded) { 2139 /* 2140 * Request was unaligned to request_alignment and therefore 2141 * padded. We are going to do read-modify-write, and must 2142 * serialize the request to prevent interactions of the 2143 * widened region with other transactions. 2144 */ 2145 assert(!(flags & BDRV_REQ_NO_WAIT)); 2146 bdrv_make_request_serialising(&req, align); 2147 bdrv_padding_rmw_read(child, &req, &pad, false); 2148 } 2149 2150 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2151 qiov, qiov_offset, flags); 2152 2153 bdrv_padding_destroy(&pad); 2154 2155 out: 2156 tracked_request_end(&req); 2157 bdrv_dec_in_flight(bs); 2158 2159 return ret; 2160 } 2161 2162 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2163 int64_t bytes, BdrvRequestFlags flags) 2164 { 2165 IO_CODE(); 2166 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2167 assert_bdrv_graph_readable(); 2168 2169 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2170 flags &= ~BDRV_REQ_MAY_UNMAP; 2171 } 2172 2173 return bdrv_co_pwritev(child, offset, bytes, NULL, 2174 BDRV_REQ_ZERO_WRITE | flags); 2175 } 2176 2177 /* 2178 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2179 */ 2180 int bdrv_flush_all(void) 2181 { 2182 BdrvNextIterator it; 2183 BlockDriverState *bs = NULL; 2184 int result = 0; 2185 2186 GLOBAL_STATE_CODE(); 2187 2188 /* 2189 * bdrv queue is managed by record/replay, 2190 * creating new flush request for stopping 2191 * the VM may break the determinism 2192 */ 2193 if (replay_events_enabled()) { 2194 return result; 2195 } 2196 2197 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2198 AioContext *aio_context = bdrv_get_aio_context(bs); 2199 int ret; 2200 2201 aio_context_acquire(aio_context); 2202 ret = bdrv_flush(bs); 2203 if (ret < 0 && !result) { 2204 result = ret; 2205 } 2206 aio_context_release(aio_context); 2207 } 2208 2209 return result; 2210 } 2211 2212 /* 2213 * Returns the allocation status of the specified sectors. 2214 * Drivers not implementing the functionality are assumed to not support 2215 * backing files, hence all their sectors are reported as allocated. 2216 * 2217 * If 'want_zero' is true, the caller is querying for mapping 2218 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2219 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2220 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2221 * 2222 * If 'offset' is beyond the end of the disk image the return value is 2223 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2224 * 2225 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2226 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2227 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2228 * 2229 * 'pnum' is set to the number of bytes (including and immediately 2230 * following the specified offset) that are easily known to be in the 2231 * same allocated/unallocated state. Note that a second call starting 2232 * at the original offset plus returned pnum may have the same status. 2233 * The returned value is non-zero on success except at end-of-file. 2234 * 2235 * Returns negative errno on failure. Otherwise, if the 2236 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2237 * set to the host mapping and BDS corresponding to the guest offset. 2238 */ 2239 static int coroutine_fn GRAPH_RDLOCK 2240 bdrv_co_block_status(BlockDriverState *bs, bool want_zero, 2241 int64_t offset, int64_t bytes, 2242 int64_t *pnum, int64_t *map, BlockDriverState **file) 2243 { 2244 int64_t total_size; 2245 int64_t n; /* bytes */ 2246 int ret; 2247 int64_t local_map = 0; 2248 BlockDriverState *local_file = NULL; 2249 int64_t aligned_offset, aligned_bytes; 2250 uint32_t align; 2251 bool has_filtered_child; 2252 2253 assert(pnum); 2254 assert_bdrv_graph_readable(); 2255 *pnum = 0; 2256 total_size = bdrv_getlength(bs); 2257 if (total_size < 0) { 2258 ret = total_size; 2259 goto early_out; 2260 } 2261 2262 if (offset >= total_size) { 2263 ret = BDRV_BLOCK_EOF; 2264 goto early_out; 2265 } 2266 if (!bytes) { 2267 ret = 0; 2268 goto early_out; 2269 } 2270 2271 n = total_size - offset; 2272 if (n < bytes) { 2273 bytes = n; 2274 } 2275 2276 /* Must be non-NULL or bdrv_getlength() would have failed */ 2277 assert(bs->drv); 2278 has_filtered_child = bdrv_filter_child(bs); 2279 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2280 *pnum = bytes; 2281 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2282 if (offset + bytes == total_size) { 2283 ret |= BDRV_BLOCK_EOF; 2284 } 2285 if (bs->drv->protocol_name) { 2286 ret |= BDRV_BLOCK_OFFSET_VALID; 2287 local_map = offset; 2288 local_file = bs; 2289 } 2290 goto early_out; 2291 } 2292 2293 bdrv_inc_in_flight(bs); 2294 2295 /* Round out to request_alignment boundaries */ 2296 align = bs->bl.request_alignment; 2297 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2298 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2299 2300 if (bs->drv->bdrv_co_block_status) { 2301 /* 2302 * Use the block-status cache only for protocol nodes: Format 2303 * drivers are generally quick to inquire the status, but protocol 2304 * drivers often need to get information from outside of qemu, so 2305 * we do not have control over the actual implementation. There 2306 * have been cases where inquiring the status took an unreasonably 2307 * long time, and we can do nothing in qemu to fix it. 2308 * This is especially problematic for images with large data areas, 2309 * because finding the few holes in them and giving them special 2310 * treatment does not gain much performance. Therefore, we try to 2311 * cache the last-identified data region. 2312 * 2313 * Second, limiting ourselves to protocol nodes allows us to assume 2314 * the block status for data regions to be DATA | OFFSET_VALID, and 2315 * that the host offset is the same as the guest offset. 2316 * 2317 * Note that it is possible that external writers zero parts of 2318 * the cached regions without the cache being invalidated, and so 2319 * we may report zeroes as data. This is not catastrophic, 2320 * however, because reporting zeroes as data is fine. 2321 */ 2322 if (QLIST_EMPTY(&bs->children) && 2323 bdrv_bsc_is_data(bs, aligned_offset, pnum)) 2324 { 2325 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2326 local_file = bs; 2327 local_map = aligned_offset; 2328 } else { 2329 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2330 aligned_bytes, pnum, &local_map, 2331 &local_file); 2332 2333 /* 2334 * Note that checking QLIST_EMPTY(&bs->children) is also done when 2335 * the cache is queried above. Technically, we do not need to check 2336 * it here; the worst that can happen is that we fill the cache for 2337 * non-protocol nodes, and then it is never used. However, filling 2338 * the cache requires an RCU update, so double check here to avoid 2339 * such an update if possible. 2340 * 2341 * Check want_zero, because we only want to update the cache when we 2342 * have accurate information about what is zero and what is data. 2343 */ 2344 if (want_zero && 2345 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && 2346 QLIST_EMPTY(&bs->children)) 2347 { 2348 /* 2349 * When a protocol driver reports BLOCK_OFFSET_VALID, the 2350 * returned local_map value must be the same as the offset we 2351 * have passed (aligned_offset), and local_bs must be the node 2352 * itself. 2353 * Assert this, because we follow this rule when reading from 2354 * the cache (see the `local_file = bs` and 2355 * `local_map = aligned_offset` assignments above), and the 2356 * result the cache delivers must be the same as the driver 2357 * would deliver. 2358 */ 2359 assert(local_file == bs); 2360 assert(local_map == aligned_offset); 2361 bdrv_bsc_fill(bs, aligned_offset, *pnum); 2362 } 2363 } 2364 } else { 2365 /* Default code for filters */ 2366 2367 local_file = bdrv_filter_bs(bs); 2368 assert(local_file); 2369 2370 *pnum = aligned_bytes; 2371 local_map = aligned_offset; 2372 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2373 } 2374 if (ret < 0) { 2375 *pnum = 0; 2376 goto out; 2377 } 2378 2379 /* 2380 * The driver's result must be a non-zero multiple of request_alignment. 2381 * Clamp pnum and adjust map to original request. 2382 */ 2383 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2384 align > offset - aligned_offset); 2385 if (ret & BDRV_BLOCK_RECURSE) { 2386 assert(ret & BDRV_BLOCK_DATA); 2387 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2388 assert(!(ret & BDRV_BLOCK_ZERO)); 2389 } 2390 2391 *pnum -= offset - aligned_offset; 2392 if (*pnum > bytes) { 2393 *pnum = bytes; 2394 } 2395 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2396 local_map += offset - aligned_offset; 2397 } 2398 2399 if (ret & BDRV_BLOCK_RAW) { 2400 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2401 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2402 *pnum, pnum, &local_map, &local_file); 2403 goto out; 2404 } 2405 2406 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2407 ret |= BDRV_BLOCK_ALLOCATED; 2408 } else if (bs->drv->supports_backing) { 2409 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2410 2411 if (!cow_bs) { 2412 ret |= BDRV_BLOCK_ZERO; 2413 } else if (want_zero) { 2414 int64_t size2 = bdrv_getlength(cow_bs); 2415 2416 if (size2 >= 0 && offset >= size2) { 2417 ret |= BDRV_BLOCK_ZERO; 2418 } 2419 } 2420 } 2421 2422 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2423 local_file && local_file != bs && 2424 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2425 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2426 int64_t file_pnum; 2427 int ret2; 2428 2429 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2430 *pnum, &file_pnum, NULL, NULL); 2431 if (ret2 >= 0) { 2432 /* Ignore errors. This is just providing extra information, it 2433 * is useful but not necessary. 2434 */ 2435 if (ret2 & BDRV_BLOCK_EOF && 2436 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2437 /* 2438 * It is valid for the format block driver to read 2439 * beyond the end of the underlying file's current 2440 * size; such areas read as zero. 2441 */ 2442 ret |= BDRV_BLOCK_ZERO; 2443 } else { 2444 /* Limit request to the range reported by the protocol driver */ 2445 *pnum = file_pnum; 2446 ret |= (ret2 & BDRV_BLOCK_ZERO); 2447 } 2448 } 2449 } 2450 2451 out: 2452 bdrv_dec_in_flight(bs); 2453 if (ret >= 0 && offset + *pnum == total_size) { 2454 ret |= BDRV_BLOCK_EOF; 2455 } 2456 early_out: 2457 if (file) { 2458 *file = local_file; 2459 } 2460 if (map) { 2461 *map = local_map; 2462 } 2463 return ret; 2464 } 2465 2466 int coroutine_fn 2467 bdrv_co_common_block_status_above(BlockDriverState *bs, 2468 BlockDriverState *base, 2469 bool include_base, 2470 bool want_zero, 2471 int64_t offset, 2472 int64_t bytes, 2473 int64_t *pnum, 2474 int64_t *map, 2475 BlockDriverState **file, 2476 int *depth) 2477 { 2478 int ret; 2479 BlockDriverState *p; 2480 int64_t eof = 0; 2481 int dummy; 2482 IO_CODE(); 2483 2484 assert(!include_base || base); /* Can't include NULL base */ 2485 assert_bdrv_graph_readable(); 2486 2487 if (!depth) { 2488 depth = &dummy; 2489 } 2490 *depth = 0; 2491 2492 if (!include_base && bs == base) { 2493 *pnum = bytes; 2494 return 0; 2495 } 2496 2497 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2498 ++*depth; 2499 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2500 return ret; 2501 } 2502 2503 if (ret & BDRV_BLOCK_EOF) { 2504 eof = offset + *pnum; 2505 } 2506 2507 assert(*pnum <= bytes); 2508 bytes = *pnum; 2509 2510 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2511 p = bdrv_filter_or_cow_bs(p)) 2512 { 2513 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2514 file); 2515 ++*depth; 2516 if (ret < 0) { 2517 return ret; 2518 } 2519 if (*pnum == 0) { 2520 /* 2521 * The top layer deferred to this layer, and because this layer is 2522 * short, any zeroes that we synthesize beyond EOF behave as if they 2523 * were allocated at this layer. 2524 * 2525 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2526 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2527 * below. 2528 */ 2529 assert(ret & BDRV_BLOCK_EOF); 2530 *pnum = bytes; 2531 if (file) { 2532 *file = p; 2533 } 2534 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2535 break; 2536 } 2537 if (ret & BDRV_BLOCK_ALLOCATED) { 2538 /* 2539 * We've found the node and the status, we must break. 2540 * 2541 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2542 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2543 * below. 2544 */ 2545 ret &= ~BDRV_BLOCK_EOF; 2546 break; 2547 } 2548 2549 if (p == base) { 2550 assert(include_base); 2551 break; 2552 } 2553 2554 /* 2555 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2556 * let's continue the diving. 2557 */ 2558 assert(*pnum <= bytes); 2559 bytes = *pnum; 2560 } 2561 2562 if (offset + *pnum == eof) { 2563 ret |= BDRV_BLOCK_EOF; 2564 } 2565 2566 return ret; 2567 } 2568 2569 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2570 BlockDriverState *base, 2571 int64_t offset, int64_t bytes, 2572 int64_t *pnum, int64_t *map, 2573 BlockDriverState **file) 2574 { 2575 IO_CODE(); 2576 return bdrv_co_common_block_status_above(bs, base, false, true, offset, 2577 bytes, pnum, map, file, NULL); 2578 } 2579 2580 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2581 int64_t offset, int64_t bytes, int64_t *pnum, 2582 int64_t *map, BlockDriverState **file) 2583 { 2584 IO_CODE(); 2585 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2586 pnum, map, file, NULL); 2587 } 2588 2589 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2590 int64_t *pnum, int64_t *map, BlockDriverState **file) 2591 { 2592 IO_CODE(); 2593 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2594 offset, bytes, pnum, map, file); 2595 } 2596 2597 /* 2598 * Check @bs (and its backing chain) to see if the range defined 2599 * by @offset and @bytes is known to read as zeroes. 2600 * Return 1 if that is the case, 0 otherwise and -errno on error. 2601 * This test is meant to be fast rather than accurate so returning 0 2602 * does not guarantee non-zero data. 2603 */ 2604 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2605 int64_t bytes) 2606 { 2607 int ret; 2608 int64_t pnum = bytes; 2609 IO_CODE(); 2610 2611 if (!bytes) { 2612 return 1; 2613 } 2614 2615 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, 2616 bytes, &pnum, NULL, NULL, NULL); 2617 2618 if (ret < 0) { 2619 return ret; 2620 } 2621 2622 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2623 } 2624 2625 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, 2626 int64_t bytes, int64_t *pnum) 2627 { 2628 int ret; 2629 int64_t dummy; 2630 IO_CODE(); 2631 2632 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, 2633 bytes, pnum ? pnum : &dummy, NULL, 2634 NULL, NULL); 2635 if (ret < 0) { 2636 return ret; 2637 } 2638 return !!(ret & BDRV_BLOCK_ALLOCATED); 2639 } 2640 2641 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, 2642 int64_t *pnum) 2643 { 2644 int ret; 2645 int64_t dummy; 2646 IO_CODE(); 2647 2648 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2649 bytes, pnum ? pnum : &dummy, NULL, 2650 NULL, NULL); 2651 if (ret < 0) { 2652 return ret; 2653 } 2654 return !!(ret & BDRV_BLOCK_ALLOCATED); 2655 } 2656 2657 /* See bdrv_is_allocated_above for documentation */ 2658 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, 2659 BlockDriverState *base, 2660 bool include_base, int64_t offset, 2661 int64_t bytes, int64_t *pnum) 2662 { 2663 int depth; 2664 int ret; 2665 IO_CODE(); 2666 2667 ret = bdrv_co_common_block_status_above(top, base, include_base, false, 2668 offset, bytes, pnum, NULL, NULL, 2669 &depth); 2670 if (ret < 0) { 2671 return ret; 2672 } 2673 2674 if (ret & BDRV_BLOCK_ALLOCATED) { 2675 return depth; 2676 } 2677 return 0; 2678 } 2679 2680 /* 2681 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2682 * 2683 * Return a positive depth if (a prefix of) the given range is allocated 2684 * in any image between BASE and TOP (BASE is only included if include_base 2685 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2686 * BASE can be NULL to check if the given offset is allocated in any 2687 * image of the chain. Return 0 otherwise, or negative errno on 2688 * failure. 2689 * 2690 * 'pnum' is set to the number of bytes (including and immediately 2691 * following the specified offset) that are known to be in the same 2692 * allocated/unallocated state. Note that a subsequent call starting 2693 * at 'offset + *pnum' may return the same allocation status (in other 2694 * words, the result is not necessarily the maximum possible range); 2695 * but 'pnum' will only be 0 when end of file is reached. 2696 */ 2697 int bdrv_is_allocated_above(BlockDriverState *top, 2698 BlockDriverState *base, 2699 bool include_base, int64_t offset, 2700 int64_t bytes, int64_t *pnum) 2701 { 2702 int depth; 2703 int ret; 2704 IO_CODE(); 2705 2706 ret = bdrv_common_block_status_above(top, base, include_base, false, 2707 offset, bytes, pnum, NULL, NULL, 2708 &depth); 2709 if (ret < 0) { 2710 return ret; 2711 } 2712 2713 if (ret & BDRV_BLOCK_ALLOCATED) { 2714 return depth; 2715 } 2716 return 0; 2717 } 2718 2719 int coroutine_fn 2720 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2721 { 2722 BlockDriver *drv = bs->drv; 2723 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2724 int ret; 2725 IO_CODE(); 2726 assert_bdrv_graph_readable(); 2727 2728 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2729 if (ret < 0) { 2730 return ret; 2731 } 2732 2733 if (!drv) { 2734 return -ENOMEDIUM; 2735 } 2736 2737 bdrv_inc_in_flight(bs); 2738 2739 if (drv->bdrv_co_load_vmstate) { 2740 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); 2741 } else if (child_bs) { 2742 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2743 } else { 2744 ret = -ENOTSUP; 2745 } 2746 2747 bdrv_dec_in_flight(bs); 2748 2749 return ret; 2750 } 2751 2752 int coroutine_fn 2753 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2754 { 2755 BlockDriver *drv = bs->drv; 2756 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2757 int ret; 2758 IO_CODE(); 2759 assert_bdrv_graph_readable(); 2760 2761 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2762 if (ret < 0) { 2763 return ret; 2764 } 2765 2766 if (!drv) { 2767 return -ENOMEDIUM; 2768 } 2769 2770 bdrv_inc_in_flight(bs); 2771 2772 if (drv->bdrv_co_save_vmstate) { 2773 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); 2774 } else if (child_bs) { 2775 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2776 } else { 2777 ret = -ENOTSUP; 2778 } 2779 2780 bdrv_dec_in_flight(bs); 2781 2782 return ret; 2783 } 2784 2785 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2786 int64_t pos, int size) 2787 { 2788 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2789 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2790 IO_CODE(); 2791 2792 return ret < 0 ? ret : size; 2793 } 2794 2795 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2796 int64_t pos, int size) 2797 { 2798 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2799 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2800 IO_CODE(); 2801 2802 return ret < 0 ? ret : size; 2803 } 2804 2805 /**************************************************************/ 2806 /* async I/Os */ 2807 2808 void bdrv_aio_cancel(BlockAIOCB *acb) 2809 { 2810 IO_CODE(); 2811 qemu_aio_ref(acb); 2812 bdrv_aio_cancel_async(acb); 2813 while (acb->refcnt > 1) { 2814 if (acb->aiocb_info->get_aio_context) { 2815 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2816 } else if (acb->bs) { 2817 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2818 * assert that we're not using an I/O thread. Thread-safe 2819 * code should use bdrv_aio_cancel_async exclusively. 2820 */ 2821 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2822 aio_poll(bdrv_get_aio_context(acb->bs), true); 2823 } else { 2824 abort(); 2825 } 2826 } 2827 qemu_aio_unref(acb); 2828 } 2829 2830 /* Async version of aio cancel. The caller is not blocked if the acb implements 2831 * cancel_async, otherwise we do nothing and let the request normally complete. 2832 * In either case the completion callback must be called. */ 2833 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2834 { 2835 IO_CODE(); 2836 if (acb->aiocb_info->cancel_async) { 2837 acb->aiocb_info->cancel_async(acb); 2838 } 2839 } 2840 2841 /**************************************************************/ 2842 /* Coroutine block device emulation */ 2843 2844 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2845 { 2846 BdrvChild *primary_child = bdrv_primary_child(bs); 2847 BdrvChild *child; 2848 int current_gen; 2849 int ret = 0; 2850 IO_CODE(); 2851 2852 assert_bdrv_graph_readable(); 2853 bdrv_inc_in_flight(bs); 2854 2855 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || 2856 bdrv_is_sg(bs)) { 2857 goto early_exit; 2858 } 2859 2860 qemu_co_mutex_lock(&bs->reqs_lock); 2861 current_gen = qatomic_read(&bs->write_gen); 2862 2863 /* Wait until any previous flushes are completed */ 2864 while (bs->active_flush_req) { 2865 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2866 } 2867 2868 /* Flushes reach this point in nondecreasing current_gen order. */ 2869 bs->active_flush_req = true; 2870 qemu_co_mutex_unlock(&bs->reqs_lock); 2871 2872 /* Write back all layers by calling one driver function */ 2873 if (bs->drv->bdrv_co_flush) { 2874 ret = bs->drv->bdrv_co_flush(bs); 2875 goto out; 2876 } 2877 2878 /* Write back cached data to the OS even with cache=unsafe */ 2879 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2880 if (bs->drv->bdrv_co_flush_to_os) { 2881 ret = bs->drv->bdrv_co_flush_to_os(bs); 2882 if (ret < 0) { 2883 goto out; 2884 } 2885 } 2886 2887 /* But don't actually force it to the disk with cache=unsafe */ 2888 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2889 goto flush_children; 2890 } 2891 2892 /* Check if we really need to flush anything */ 2893 if (bs->flushed_gen == current_gen) { 2894 goto flush_children; 2895 } 2896 2897 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 2898 if (!bs->drv) { 2899 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2900 * (even in case of apparent success) */ 2901 ret = -ENOMEDIUM; 2902 goto out; 2903 } 2904 if (bs->drv->bdrv_co_flush_to_disk) { 2905 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2906 } else if (bs->drv->bdrv_aio_flush) { 2907 BlockAIOCB *acb; 2908 CoroutineIOCompletion co = { 2909 .coroutine = qemu_coroutine_self(), 2910 }; 2911 2912 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2913 if (acb == NULL) { 2914 ret = -EIO; 2915 } else { 2916 qemu_coroutine_yield(); 2917 ret = co.ret; 2918 } 2919 } else { 2920 /* 2921 * Some block drivers always operate in either writethrough or unsafe 2922 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2923 * know how the server works (because the behaviour is hardcoded or 2924 * depends on server-side configuration), so we can't ensure that 2925 * everything is safe on disk. Returning an error doesn't work because 2926 * that would break guests even if the server operates in writethrough 2927 * mode. 2928 * 2929 * Let's hope the user knows what he's doing. 2930 */ 2931 ret = 0; 2932 } 2933 2934 if (ret < 0) { 2935 goto out; 2936 } 2937 2938 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2939 * in the case of cache=unsafe, so there are no useless flushes. 2940 */ 2941 flush_children: 2942 ret = 0; 2943 QLIST_FOREACH(child, &bs->children, next) { 2944 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 2945 int this_child_ret = bdrv_co_flush(child->bs); 2946 if (!ret) { 2947 ret = this_child_ret; 2948 } 2949 } 2950 } 2951 2952 out: 2953 /* Notify any pending flushes that we have completed */ 2954 if (ret == 0) { 2955 bs->flushed_gen = current_gen; 2956 } 2957 2958 qemu_co_mutex_lock(&bs->reqs_lock); 2959 bs->active_flush_req = false; 2960 /* Return value is ignored - it's ok if wait queue is empty */ 2961 qemu_co_queue_next(&bs->flush_queue); 2962 qemu_co_mutex_unlock(&bs->reqs_lock); 2963 2964 early_exit: 2965 bdrv_dec_in_flight(bs); 2966 return ret; 2967 } 2968 2969 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2970 int64_t bytes) 2971 { 2972 BdrvTrackedRequest req; 2973 int ret; 2974 int64_t max_pdiscard; 2975 int head, tail, align; 2976 BlockDriverState *bs = child->bs; 2977 IO_CODE(); 2978 assert_bdrv_graph_readable(); 2979 2980 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { 2981 return -ENOMEDIUM; 2982 } 2983 2984 if (bdrv_has_readonly_bitmaps(bs)) { 2985 return -EPERM; 2986 } 2987 2988 ret = bdrv_check_request(offset, bytes, NULL); 2989 if (ret < 0) { 2990 return ret; 2991 } 2992 2993 /* Do nothing if disabled. */ 2994 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2995 return 0; 2996 } 2997 2998 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2999 return 0; 3000 } 3001 3002 /* Invalidate the cached block-status data range if this discard overlaps */ 3003 bdrv_bsc_invalidate_range(bs, offset, bytes); 3004 3005 /* Discard is advisory, but some devices track and coalesce 3006 * unaligned requests, so we must pass everything down rather than 3007 * round here. Still, most devices will just silently ignore 3008 * unaligned requests (by returning -ENOTSUP), so we must fragment 3009 * the request accordingly. */ 3010 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3011 assert(align % bs->bl.request_alignment == 0); 3012 head = offset % align; 3013 tail = (offset + bytes) % align; 3014 3015 bdrv_inc_in_flight(bs); 3016 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3017 3018 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3019 if (ret < 0) { 3020 goto out; 3021 } 3022 3023 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), 3024 align); 3025 assert(max_pdiscard >= bs->bl.request_alignment); 3026 3027 while (bytes > 0) { 3028 int64_t num = bytes; 3029 3030 if (head) { 3031 /* Make small requests to get to alignment boundaries. */ 3032 num = MIN(bytes, align - head); 3033 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3034 num %= bs->bl.request_alignment; 3035 } 3036 head = (head + num) % align; 3037 assert(num < max_pdiscard); 3038 } else if (tail) { 3039 if (num > align) { 3040 /* Shorten the request to the last aligned cluster. */ 3041 num -= tail; 3042 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3043 tail > bs->bl.request_alignment) { 3044 tail %= bs->bl.request_alignment; 3045 num -= tail; 3046 } 3047 } 3048 /* limit request size */ 3049 if (num > max_pdiscard) { 3050 num = max_pdiscard; 3051 } 3052 3053 if (!bs->drv) { 3054 ret = -ENOMEDIUM; 3055 goto out; 3056 } 3057 if (bs->drv->bdrv_co_pdiscard) { 3058 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3059 } else { 3060 BlockAIOCB *acb; 3061 CoroutineIOCompletion co = { 3062 .coroutine = qemu_coroutine_self(), 3063 }; 3064 3065 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3066 bdrv_co_io_em_complete, &co); 3067 if (acb == NULL) { 3068 ret = -EIO; 3069 goto out; 3070 } else { 3071 qemu_coroutine_yield(); 3072 ret = co.ret; 3073 } 3074 } 3075 if (ret && ret != -ENOTSUP) { 3076 goto out; 3077 } 3078 3079 offset += num; 3080 bytes -= num; 3081 } 3082 ret = 0; 3083 out: 3084 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3085 tracked_request_end(&req); 3086 bdrv_dec_in_flight(bs); 3087 return ret; 3088 } 3089 3090 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3091 { 3092 BlockDriver *drv = bs->drv; 3093 CoroutineIOCompletion co = { 3094 .coroutine = qemu_coroutine_self(), 3095 }; 3096 BlockAIOCB *acb; 3097 IO_CODE(); 3098 assert_bdrv_graph_readable(); 3099 3100 bdrv_inc_in_flight(bs); 3101 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3102 co.ret = -ENOTSUP; 3103 goto out; 3104 } 3105 3106 if (drv->bdrv_co_ioctl) { 3107 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3108 } else { 3109 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3110 if (!acb) { 3111 co.ret = -ENOTSUP; 3112 goto out; 3113 } 3114 qemu_coroutine_yield(); 3115 } 3116 out: 3117 bdrv_dec_in_flight(bs); 3118 return co.ret; 3119 } 3120 3121 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3122 { 3123 IO_CODE(); 3124 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3125 } 3126 3127 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3128 { 3129 IO_CODE(); 3130 return memset(qemu_blockalign(bs, size), 0, size); 3131 } 3132 3133 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3134 { 3135 size_t align = bdrv_opt_mem_align(bs); 3136 IO_CODE(); 3137 3138 /* Ensure that NULL is never returned on success */ 3139 assert(align > 0); 3140 if (size == 0) { 3141 size = align; 3142 } 3143 3144 return qemu_try_memalign(align, size); 3145 } 3146 3147 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3148 { 3149 void *mem = qemu_try_blockalign(bs, size); 3150 IO_CODE(); 3151 3152 if (mem) { 3153 memset(mem, 0, size); 3154 } 3155 3156 return mem; 3157 } 3158 3159 void coroutine_fn bdrv_co_io_plug(BlockDriverState *bs) 3160 { 3161 BdrvChild *child; 3162 IO_CODE(); 3163 3164 QLIST_FOREACH(child, &bs->children, next) { 3165 bdrv_co_io_plug(child->bs); 3166 } 3167 3168 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 3169 BlockDriver *drv = bs->drv; 3170 if (drv && drv->bdrv_co_io_plug) { 3171 drv->bdrv_co_io_plug(bs); 3172 } 3173 } 3174 } 3175 3176 void coroutine_fn bdrv_co_io_unplug(BlockDriverState *bs) 3177 { 3178 BdrvChild *child; 3179 IO_CODE(); 3180 3181 assert(bs->io_plugged); 3182 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3183 BlockDriver *drv = bs->drv; 3184 if (drv && drv->bdrv_co_io_unplug) { 3185 drv->bdrv_co_io_unplug(bs); 3186 } 3187 } 3188 3189 QLIST_FOREACH(child, &bs->children, next) { 3190 bdrv_co_io_unplug(child->bs); 3191 } 3192 } 3193 3194 /* Helper that undoes bdrv_register_buf() when it fails partway through */ 3195 static void bdrv_register_buf_rollback(BlockDriverState *bs, 3196 void *host, 3197 size_t size, 3198 BdrvChild *final_child) 3199 { 3200 BdrvChild *child; 3201 3202 QLIST_FOREACH(child, &bs->children, next) { 3203 if (child == final_child) { 3204 break; 3205 } 3206 3207 bdrv_unregister_buf(child->bs, host, size); 3208 } 3209 3210 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3211 bs->drv->bdrv_unregister_buf(bs, host, size); 3212 } 3213 } 3214 3215 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, 3216 Error **errp) 3217 { 3218 BdrvChild *child; 3219 3220 GLOBAL_STATE_CODE(); 3221 if (bs->drv && bs->drv->bdrv_register_buf) { 3222 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { 3223 return false; 3224 } 3225 } 3226 QLIST_FOREACH(child, &bs->children, next) { 3227 if (!bdrv_register_buf(child->bs, host, size, errp)) { 3228 bdrv_register_buf_rollback(bs, host, size, child); 3229 return false; 3230 } 3231 } 3232 return true; 3233 } 3234 3235 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) 3236 { 3237 BdrvChild *child; 3238 3239 GLOBAL_STATE_CODE(); 3240 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3241 bs->drv->bdrv_unregister_buf(bs, host, size); 3242 } 3243 QLIST_FOREACH(child, &bs->children, next) { 3244 bdrv_unregister_buf(child->bs, host, size); 3245 } 3246 } 3247 3248 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal( 3249 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3250 int64_t dst_offset, int64_t bytes, 3251 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3252 bool recurse_src) 3253 { 3254 BdrvTrackedRequest req; 3255 int ret; 3256 3257 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3258 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3259 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3260 assert(!(read_flags & BDRV_REQ_NO_WAIT)); 3261 assert(!(write_flags & BDRV_REQ_NO_WAIT)); 3262 3263 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { 3264 return -ENOMEDIUM; 3265 } 3266 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3267 if (ret) { 3268 return ret; 3269 } 3270 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3271 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3272 } 3273 3274 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { 3275 return -ENOMEDIUM; 3276 } 3277 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3278 if (ret) { 3279 return ret; 3280 } 3281 3282 if (!src->bs->drv->bdrv_co_copy_range_from 3283 || !dst->bs->drv->bdrv_co_copy_range_to 3284 || src->bs->encrypted || dst->bs->encrypted) { 3285 return -ENOTSUP; 3286 } 3287 3288 if (recurse_src) { 3289 bdrv_inc_in_flight(src->bs); 3290 tracked_request_begin(&req, src->bs, src_offset, bytes, 3291 BDRV_TRACKED_READ); 3292 3293 /* BDRV_REQ_SERIALISING is only for write operation */ 3294 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3295 bdrv_wait_serialising_requests(&req); 3296 3297 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3298 src, src_offset, 3299 dst, dst_offset, 3300 bytes, 3301 read_flags, write_flags); 3302 3303 tracked_request_end(&req); 3304 bdrv_dec_in_flight(src->bs); 3305 } else { 3306 bdrv_inc_in_flight(dst->bs); 3307 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3308 BDRV_TRACKED_WRITE); 3309 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3310 write_flags); 3311 if (!ret) { 3312 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3313 src, src_offset, 3314 dst, dst_offset, 3315 bytes, 3316 read_flags, write_flags); 3317 } 3318 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3319 tracked_request_end(&req); 3320 bdrv_dec_in_flight(dst->bs); 3321 } 3322 3323 return ret; 3324 } 3325 3326 /* Copy range from @src to @dst. 3327 * 3328 * See the comment of bdrv_co_copy_range for the parameter and return value 3329 * semantics. */ 3330 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3331 BdrvChild *dst, int64_t dst_offset, 3332 int64_t bytes, 3333 BdrvRequestFlags read_flags, 3334 BdrvRequestFlags write_flags) 3335 { 3336 IO_CODE(); 3337 assume_graph_lock(); /* FIXME */ 3338 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3339 read_flags, write_flags); 3340 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3341 bytes, read_flags, write_flags, true); 3342 } 3343 3344 /* Copy range from @src to @dst. 3345 * 3346 * See the comment of bdrv_co_copy_range for the parameter and return value 3347 * semantics. */ 3348 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3349 BdrvChild *dst, int64_t dst_offset, 3350 int64_t bytes, 3351 BdrvRequestFlags read_flags, 3352 BdrvRequestFlags write_flags) 3353 { 3354 IO_CODE(); 3355 assume_graph_lock(); /* FIXME */ 3356 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3357 read_flags, write_flags); 3358 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3359 bytes, read_flags, write_flags, false); 3360 } 3361 3362 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3363 BdrvChild *dst, int64_t dst_offset, 3364 int64_t bytes, BdrvRequestFlags read_flags, 3365 BdrvRequestFlags write_flags) 3366 { 3367 IO_CODE(); 3368 return bdrv_co_copy_range_from(src, src_offset, 3369 dst, dst_offset, 3370 bytes, read_flags, write_flags); 3371 } 3372 3373 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3374 { 3375 BdrvChild *c; 3376 QLIST_FOREACH(c, &bs->parents, next_parent) { 3377 if (c->klass->resize) { 3378 c->klass->resize(c); 3379 } 3380 } 3381 } 3382 3383 /** 3384 * Truncate file to 'offset' bytes (needed only for file protocols) 3385 * 3386 * If 'exact' is true, the file must be resized to exactly the given 3387 * 'offset'. Otherwise, it is sufficient for the node to be at least 3388 * 'offset' bytes in length. 3389 */ 3390 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3391 PreallocMode prealloc, BdrvRequestFlags flags, 3392 Error **errp) 3393 { 3394 BlockDriverState *bs = child->bs; 3395 BdrvChild *filtered, *backing; 3396 BlockDriver *drv = bs->drv; 3397 BdrvTrackedRequest req; 3398 int64_t old_size, new_bytes; 3399 int ret; 3400 IO_CODE(); 3401 assert_bdrv_graph_readable(); 3402 3403 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3404 if (!drv) { 3405 error_setg(errp, "No medium inserted"); 3406 return -ENOMEDIUM; 3407 } 3408 if (offset < 0) { 3409 error_setg(errp, "Image size cannot be negative"); 3410 return -EINVAL; 3411 } 3412 3413 ret = bdrv_check_request(offset, 0, errp); 3414 if (ret < 0) { 3415 return ret; 3416 } 3417 3418 old_size = bdrv_getlength(bs); 3419 if (old_size < 0) { 3420 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3421 return old_size; 3422 } 3423 3424 if (bdrv_is_read_only(bs)) { 3425 error_setg(errp, "Image is read-only"); 3426 return -EACCES; 3427 } 3428 3429 if (offset > old_size) { 3430 new_bytes = offset - old_size; 3431 } else { 3432 new_bytes = 0; 3433 } 3434 3435 bdrv_inc_in_flight(bs); 3436 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3437 BDRV_TRACKED_TRUNCATE); 3438 3439 /* If we are growing the image and potentially using preallocation for the 3440 * new area, we need to make sure that no write requests are made to it 3441 * concurrently or they might be overwritten by preallocation. */ 3442 if (new_bytes) { 3443 bdrv_make_request_serialising(&req, 1); 3444 } 3445 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3446 0); 3447 if (ret < 0) { 3448 error_setg_errno(errp, -ret, 3449 "Failed to prepare request for truncation"); 3450 goto out; 3451 } 3452 3453 filtered = bdrv_filter_child(bs); 3454 backing = bdrv_cow_child(bs); 3455 3456 /* 3457 * If the image has a backing file that is large enough that it would 3458 * provide data for the new area, we cannot leave it unallocated because 3459 * then the backing file content would become visible. Instead, zero-fill 3460 * the new area. 3461 * 3462 * Note that if the image has a backing file, but was opened without the 3463 * backing file, taking care of keeping things consistent with that backing 3464 * file is the user's responsibility. 3465 */ 3466 if (new_bytes && backing) { 3467 int64_t backing_len; 3468 3469 backing_len = bdrv_co_getlength(backing->bs); 3470 if (backing_len < 0) { 3471 ret = backing_len; 3472 error_setg_errno(errp, -ret, "Could not get backing file size"); 3473 goto out; 3474 } 3475 3476 if (backing_len > old_size) { 3477 flags |= BDRV_REQ_ZERO_WRITE; 3478 } 3479 } 3480 3481 if (drv->bdrv_co_truncate) { 3482 if (flags & ~bs->supported_truncate_flags) { 3483 error_setg(errp, "Block driver does not support requested flags"); 3484 ret = -ENOTSUP; 3485 goto out; 3486 } 3487 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3488 } else if (filtered) { 3489 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3490 } else { 3491 error_setg(errp, "Image format driver does not support resize"); 3492 ret = -ENOTSUP; 3493 goto out; 3494 } 3495 if (ret < 0) { 3496 goto out; 3497 } 3498 3499 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3500 if (ret < 0) { 3501 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3502 } else { 3503 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3504 } 3505 /* 3506 * It's possible that truncation succeeded but bdrv_refresh_total_sectors 3507 * failed, but the latter doesn't affect how we should finish the request. 3508 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. 3509 */ 3510 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3511 3512 out: 3513 tracked_request_end(&req); 3514 bdrv_dec_in_flight(bs); 3515 3516 return ret; 3517 } 3518 3519 void bdrv_cancel_in_flight(BlockDriverState *bs) 3520 { 3521 GLOBAL_STATE_CODE(); 3522 if (!bs || !bs->drv) { 3523 return; 3524 } 3525 3526 if (bs->drv->bdrv_cancel_in_flight) { 3527 bs->drv->bdrv_cancel_in_flight(bs); 3528 } 3529 } 3530 3531 int coroutine_fn 3532 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, 3533 QEMUIOVector *qiov, size_t qiov_offset) 3534 { 3535 BlockDriverState *bs = child->bs; 3536 BlockDriver *drv = bs->drv; 3537 int ret; 3538 IO_CODE(); 3539 3540 if (!drv) { 3541 return -ENOMEDIUM; 3542 } 3543 3544 if (!drv->bdrv_co_preadv_snapshot) { 3545 return -ENOTSUP; 3546 } 3547 3548 bdrv_inc_in_flight(bs); 3549 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); 3550 bdrv_dec_in_flight(bs); 3551 3552 return ret; 3553 } 3554 3555 int coroutine_fn 3556 bdrv_co_snapshot_block_status(BlockDriverState *bs, 3557 bool want_zero, int64_t offset, int64_t bytes, 3558 int64_t *pnum, int64_t *map, 3559 BlockDriverState **file) 3560 { 3561 BlockDriver *drv = bs->drv; 3562 int ret; 3563 IO_CODE(); 3564 3565 if (!drv) { 3566 return -ENOMEDIUM; 3567 } 3568 3569 if (!drv->bdrv_co_snapshot_block_status) { 3570 return -ENOTSUP; 3571 } 3572 3573 bdrv_inc_in_flight(bs); 3574 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, 3575 pnum, map, file); 3576 bdrv_dec_in_flight(bs); 3577 3578 return ret; 3579 } 3580 3581 int coroutine_fn 3582 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) 3583 { 3584 BlockDriver *drv = bs->drv; 3585 int ret; 3586 IO_CODE(); 3587 assert_bdrv_graph_readable(); 3588 3589 if (!drv) { 3590 return -ENOMEDIUM; 3591 } 3592 3593 if (!drv->bdrv_co_pdiscard_snapshot) { 3594 return -ENOTSUP; 3595 } 3596 3597 bdrv_inc_in_flight(bs); 3598 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); 3599 bdrv_dec_in_flight(bs); 3600 3601 return ret; 3602 } 3603