1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/dirty-bitmap.h" 34 #include "block/write-threshold.h" 35 #include "qemu/cutils.h" 36 #include "qemu/memalign.h" 37 #include "qapi/error.h" 38 #include "qemu/error-report.h" 39 #include "qemu/main-loop.h" 40 #include "sysemu/replay.h" 41 42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 44 45 static void bdrv_parent_cb_resize(BlockDriverState *bs); 46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 47 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 48 49 static void GRAPH_RDLOCK 50 bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) 51 { 52 BdrvChild *c, *next; 53 IO_OR_GS_CODE(); 54 assert_bdrv_graph_readable(); 55 56 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 57 if (c == ignore) { 58 continue; 59 } 60 bdrv_parent_drained_begin_single(c); 61 } 62 } 63 64 void bdrv_parent_drained_end_single(BdrvChild *c) 65 { 66 GLOBAL_STATE_CODE(); 67 68 assert(c->quiesced_parent); 69 c->quiesced_parent = false; 70 71 if (c->klass->drained_end) { 72 c->klass->drained_end(c); 73 } 74 } 75 76 static void GRAPH_RDLOCK 77 bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) 78 { 79 BdrvChild *c; 80 IO_OR_GS_CODE(); 81 assert_bdrv_graph_readable(); 82 83 QLIST_FOREACH(c, &bs->parents, next_parent) { 84 if (c == ignore) { 85 continue; 86 } 87 bdrv_parent_drained_end_single(c); 88 } 89 } 90 91 bool bdrv_parent_drained_poll_single(BdrvChild *c) 92 { 93 IO_OR_GS_CODE(); 94 95 if (c->klass->drained_poll) { 96 return c->klass->drained_poll(c); 97 } 98 return false; 99 } 100 101 static bool GRAPH_RDLOCK 102 bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 103 bool ignore_bds_parents) 104 { 105 BdrvChild *c, *next; 106 bool busy = false; 107 IO_OR_GS_CODE(); 108 assert_bdrv_graph_readable(); 109 110 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 111 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 112 continue; 113 } 114 busy |= bdrv_parent_drained_poll_single(c); 115 } 116 117 return busy; 118 } 119 120 void bdrv_parent_drained_begin_single(BdrvChild *c) 121 { 122 GLOBAL_STATE_CODE(); 123 124 assert(!c->quiesced_parent); 125 c->quiesced_parent = true; 126 127 if (c->klass->drained_begin) { 128 /* called with rdlock taken, but it doesn't really need it. */ 129 c->klass->drained_begin(c); 130 } 131 } 132 133 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 134 { 135 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, 136 src->pdiscard_alignment); 137 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 138 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 139 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, 140 src->max_hw_transfer); 141 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 142 src->opt_mem_alignment); 143 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 144 src->min_mem_alignment); 145 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 146 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); 147 } 148 149 typedef struct BdrvRefreshLimitsState { 150 BlockDriverState *bs; 151 BlockLimits old_bl; 152 } BdrvRefreshLimitsState; 153 154 static void bdrv_refresh_limits_abort(void *opaque) 155 { 156 BdrvRefreshLimitsState *s = opaque; 157 158 s->bs->bl = s->old_bl; 159 } 160 161 static TransactionActionDrv bdrv_refresh_limits_drv = { 162 .abort = bdrv_refresh_limits_abort, 163 .clean = g_free, 164 }; 165 166 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 167 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 168 { 169 ERRP_GUARD(); 170 BlockDriver *drv = bs->drv; 171 BdrvChild *c; 172 bool have_limits; 173 174 GLOBAL_STATE_CODE(); 175 176 if (tran) { 177 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 178 *s = (BdrvRefreshLimitsState) { 179 .bs = bs, 180 .old_bl = bs->bl, 181 }; 182 tran_add(tran, &bdrv_refresh_limits_drv, s); 183 } 184 185 memset(&bs->bl, 0, sizeof(bs->bl)); 186 187 if (!drv) { 188 return; 189 } 190 191 /* Default alignment based on whether driver has byte interface */ 192 bs->bl.request_alignment = (drv->bdrv_co_preadv || 193 drv->bdrv_aio_preadv || 194 drv->bdrv_co_preadv_part) ? 1 : 512; 195 196 /* Take some limits from the children as a default */ 197 have_limits = false; 198 QLIST_FOREACH(c, &bs->children, next) { 199 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 200 { 201 bdrv_merge_limits(&bs->bl, &c->bs->bl); 202 have_limits = true; 203 } 204 205 if (c->role & BDRV_CHILD_FILTERED) { 206 bs->bl.has_variable_length |= c->bs->bl.has_variable_length; 207 } 208 } 209 210 if (!have_limits) { 211 bs->bl.min_mem_alignment = 512; 212 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); 213 214 /* Safe default since most protocols use readv()/writev()/etc */ 215 bs->bl.max_iov = IOV_MAX; 216 } 217 218 /* Then let the driver override it */ 219 if (drv->bdrv_refresh_limits) { 220 drv->bdrv_refresh_limits(bs, errp); 221 if (*errp) { 222 return; 223 } 224 } 225 226 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 227 error_setg(errp, "Driver requires too large request alignment"); 228 } 229 } 230 231 /** 232 * The copy-on-read flag is actually a reference count so multiple users may 233 * use the feature without worrying about clobbering its previous state. 234 * Copy-on-read stays enabled until all users have called to disable it. 235 */ 236 void bdrv_enable_copy_on_read(BlockDriverState *bs) 237 { 238 IO_CODE(); 239 qatomic_inc(&bs->copy_on_read); 240 } 241 242 void bdrv_disable_copy_on_read(BlockDriverState *bs) 243 { 244 int old = qatomic_fetch_dec(&bs->copy_on_read); 245 IO_CODE(); 246 assert(old >= 1); 247 } 248 249 typedef struct { 250 Coroutine *co; 251 BlockDriverState *bs; 252 bool done; 253 bool begin; 254 bool poll; 255 BdrvChild *parent; 256 } BdrvCoDrainData; 257 258 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 259 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, 260 bool ignore_bds_parents) 261 { 262 GLOBAL_STATE_CODE(); 263 264 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 265 return true; 266 } 267 268 if (qatomic_read(&bs->in_flight)) { 269 return true; 270 } 271 272 return false; 273 } 274 275 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, 276 BdrvChild *ignore_parent) 277 { 278 GLOBAL_STATE_CODE(); 279 GRAPH_RDLOCK_GUARD_MAINLOOP(); 280 281 return bdrv_drain_poll(bs, ignore_parent, false); 282 } 283 284 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 285 bool poll); 286 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent); 287 288 static void bdrv_co_drain_bh_cb(void *opaque) 289 { 290 BdrvCoDrainData *data = opaque; 291 Coroutine *co = data->co; 292 BlockDriverState *bs = data->bs; 293 294 if (bs) { 295 AioContext *ctx = bdrv_get_aio_context(bs); 296 aio_context_acquire(ctx); 297 bdrv_dec_in_flight(bs); 298 if (data->begin) { 299 bdrv_do_drained_begin(bs, data->parent, data->poll); 300 } else { 301 assert(!data->poll); 302 bdrv_do_drained_end(bs, data->parent); 303 } 304 aio_context_release(ctx); 305 } else { 306 assert(data->begin); 307 bdrv_drain_all_begin(); 308 } 309 310 data->done = true; 311 aio_co_wake(co); 312 } 313 314 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 315 bool begin, 316 BdrvChild *parent, 317 bool poll) 318 { 319 BdrvCoDrainData data; 320 Coroutine *self = qemu_coroutine_self(); 321 AioContext *ctx = bdrv_get_aio_context(bs); 322 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 323 324 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 325 * other coroutines run if they were queued by aio_co_enter(). */ 326 327 assert(qemu_in_coroutine()); 328 data = (BdrvCoDrainData) { 329 .co = self, 330 .bs = bs, 331 .done = false, 332 .begin = begin, 333 .parent = parent, 334 .poll = poll, 335 }; 336 337 if (bs) { 338 bdrv_inc_in_flight(bs); 339 } 340 341 /* 342 * Temporarily drop the lock across yield or we would get deadlocks. 343 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 344 * 345 * When we yield below, the lock for the current context will be 346 * released, so if this is actually the lock that protects bs, don't drop 347 * it a second time. 348 */ 349 if (ctx != co_ctx) { 350 aio_context_release(ctx); 351 } 352 replay_bh_schedule_oneshot_event(qemu_get_aio_context(), 353 bdrv_co_drain_bh_cb, &data); 354 355 qemu_coroutine_yield(); 356 /* If we are resumed from some other event (such as an aio completion or a 357 * timer callback), it is a bug in the caller that should be fixed. */ 358 assert(data.done); 359 360 /* Reacquire the AioContext of bs if we dropped it */ 361 if (ctx != co_ctx) { 362 aio_context_acquire(ctx); 363 } 364 } 365 366 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 367 bool poll) 368 { 369 IO_OR_GS_CODE(); 370 371 if (qemu_in_coroutine()) { 372 bdrv_co_yield_to_drain(bs, true, parent, poll); 373 return; 374 } 375 376 GLOBAL_STATE_CODE(); 377 378 /* Stop things in parent-to-child order */ 379 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 380 GRAPH_RDLOCK_GUARD_MAINLOOP(); 381 bdrv_parent_drained_begin(bs, parent); 382 if (bs->drv && bs->drv->bdrv_drain_begin) { 383 bs->drv->bdrv_drain_begin(bs); 384 } 385 } 386 387 /* 388 * Wait for drained requests to finish. 389 * 390 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 391 * call is needed so things in this AioContext can make progress even 392 * though we don't return to the main AioContext loop - this automatically 393 * includes other nodes in the same AioContext and therefore all child 394 * nodes. 395 */ 396 if (poll) { 397 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); 398 } 399 } 400 401 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) 402 { 403 bdrv_do_drained_begin(bs, parent, false); 404 } 405 406 void coroutine_mixed_fn 407 bdrv_drained_begin(BlockDriverState *bs) 408 { 409 IO_OR_GS_CODE(); 410 bdrv_do_drained_begin(bs, NULL, true); 411 } 412 413 /** 414 * This function does not poll, nor must any of its recursively called 415 * functions. 416 */ 417 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) 418 { 419 int old_quiesce_counter; 420 421 IO_OR_GS_CODE(); 422 423 if (qemu_in_coroutine()) { 424 bdrv_co_yield_to_drain(bs, false, parent, false); 425 return; 426 } 427 428 /* At this point, we should be always running in the main loop. */ 429 GLOBAL_STATE_CODE(); 430 assert(bs->quiesce_counter > 0); 431 GLOBAL_STATE_CODE(); 432 433 /* Re-enable things in child-to-parent order */ 434 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 435 if (old_quiesce_counter == 1) { 436 GRAPH_RDLOCK_GUARD_MAINLOOP(); 437 if (bs->drv && bs->drv->bdrv_drain_end) { 438 bs->drv->bdrv_drain_end(bs); 439 } 440 bdrv_parent_drained_end(bs, parent); 441 } 442 } 443 444 void bdrv_drained_end(BlockDriverState *bs) 445 { 446 IO_OR_GS_CODE(); 447 bdrv_do_drained_end(bs, NULL); 448 } 449 450 void bdrv_drain(BlockDriverState *bs) 451 { 452 IO_OR_GS_CODE(); 453 bdrv_drained_begin(bs); 454 bdrv_drained_end(bs); 455 } 456 457 static void bdrv_drain_assert_idle(BlockDriverState *bs) 458 { 459 BdrvChild *child, *next; 460 GLOBAL_STATE_CODE(); 461 GRAPH_RDLOCK_GUARD_MAINLOOP(); 462 463 assert(qatomic_read(&bs->in_flight) == 0); 464 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 465 bdrv_drain_assert_idle(child->bs); 466 } 467 } 468 469 unsigned int bdrv_drain_all_count = 0; 470 471 static bool bdrv_drain_all_poll(void) 472 { 473 BlockDriverState *bs = NULL; 474 bool result = false; 475 476 GLOBAL_STATE_CODE(); 477 GRAPH_RDLOCK_GUARD_MAINLOOP(); 478 479 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 480 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 481 while ((bs = bdrv_next_all_states(bs))) { 482 AioContext *aio_context = bdrv_get_aio_context(bs); 483 aio_context_acquire(aio_context); 484 result |= bdrv_drain_poll(bs, NULL, true); 485 aio_context_release(aio_context); 486 } 487 488 return result; 489 } 490 491 /* 492 * Wait for pending requests to complete across all BlockDriverStates 493 * 494 * This function does not flush data to disk, use bdrv_flush_all() for that 495 * after calling this function. 496 * 497 * This pauses all block jobs and disables external clients. It must 498 * be paired with bdrv_drain_all_end(). 499 * 500 * NOTE: no new block jobs or BlockDriverStates can be created between 501 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 502 */ 503 void bdrv_drain_all_begin_nopoll(void) 504 { 505 BlockDriverState *bs = NULL; 506 GLOBAL_STATE_CODE(); 507 508 /* 509 * bdrv queue is managed by record/replay, 510 * waiting for finishing the I/O requests may 511 * be infinite 512 */ 513 if (replay_events_enabled()) { 514 return; 515 } 516 517 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 518 * loop AioContext, so make sure we're in the main context. */ 519 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 520 assert(bdrv_drain_all_count < INT_MAX); 521 bdrv_drain_all_count++; 522 523 /* Quiesce all nodes, without polling in-flight requests yet. The graph 524 * cannot change during this loop. */ 525 while ((bs = bdrv_next_all_states(bs))) { 526 AioContext *aio_context = bdrv_get_aio_context(bs); 527 528 aio_context_acquire(aio_context); 529 bdrv_do_drained_begin(bs, NULL, false); 530 aio_context_release(aio_context); 531 } 532 } 533 534 void coroutine_mixed_fn bdrv_drain_all_begin(void) 535 { 536 BlockDriverState *bs = NULL; 537 538 if (qemu_in_coroutine()) { 539 bdrv_co_yield_to_drain(NULL, true, NULL, true); 540 return; 541 } 542 543 /* 544 * bdrv queue is managed by record/replay, 545 * waiting for finishing the I/O requests may 546 * be infinite 547 */ 548 if (replay_events_enabled()) { 549 return; 550 } 551 552 bdrv_drain_all_begin_nopoll(); 553 554 /* Now poll the in-flight requests */ 555 AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll()); 556 557 while ((bs = bdrv_next_all_states(bs))) { 558 bdrv_drain_assert_idle(bs); 559 } 560 } 561 562 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 563 { 564 GLOBAL_STATE_CODE(); 565 566 g_assert(bs->quiesce_counter > 0); 567 g_assert(!bs->refcnt); 568 569 while (bs->quiesce_counter) { 570 bdrv_do_drained_end(bs, NULL); 571 } 572 } 573 574 void bdrv_drain_all_end(void) 575 { 576 BlockDriverState *bs = NULL; 577 GLOBAL_STATE_CODE(); 578 579 /* 580 * bdrv queue is managed by record/replay, 581 * waiting for finishing the I/O requests may 582 * be endless 583 */ 584 if (replay_events_enabled()) { 585 return; 586 } 587 588 while ((bs = bdrv_next_all_states(bs))) { 589 AioContext *aio_context = bdrv_get_aio_context(bs); 590 591 aio_context_acquire(aio_context); 592 bdrv_do_drained_end(bs, NULL); 593 aio_context_release(aio_context); 594 } 595 596 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 597 assert(bdrv_drain_all_count > 0); 598 bdrv_drain_all_count--; 599 } 600 601 void bdrv_drain_all(void) 602 { 603 GLOBAL_STATE_CODE(); 604 bdrv_drain_all_begin(); 605 bdrv_drain_all_end(); 606 } 607 608 /** 609 * Remove an active request from the tracked requests list 610 * 611 * This function should be called when a tracked request is completing. 612 */ 613 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) 614 { 615 if (req->serialising) { 616 qatomic_dec(&req->bs->serialising_in_flight); 617 } 618 619 qemu_mutex_lock(&req->bs->reqs_lock); 620 QLIST_REMOVE(req, list); 621 qemu_mutex_unlock(&req->bs->reqs_lock); 622 623 /* 624 * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called 625 * anymore because the request has been removed from the list, so it's safe 626 * to restart the queue outside reqs_lock to minimize the critical section. 627 */ 628 qemu_co_queue_restart_all(&req->wait_queue); 629 } 630 631 /** 632 * Add an active request to the tracked requests list 633 */ 634 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, 635 BlockDriverState *bs, 636 int64_t offset, 637 int64_t bytes, 638 enum BdrvTrackedRequestType type) 639 { 640 bdrv_check_request(offset, bytes, &error_abort); 641 642 *req = (BdrvTrackedRequest){ 643 .bs = bs, 644 .offset = offset, 645 .bytes = bytes, 646 .type = type, 647 .co = qemu_coroutine_self(), 648 .serialising = false, 649 .overlap_offset = offset, 650 .overlap_bytes = bytes, 651 }; 652 653 qemu_co_queue_init(&req->wait_queue); 654 655 qemu_mutex_lock(&bs->reqs_lock); 656 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 657 qemu_mutex_unlock(&bs->reqs_lock); 658 } 659 660 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 661 int64_t offset, int64_t bytes) 662 { 663 bdrv_check_request(offset, bytes, &error_abort); 664 665 /* aaaa bbbb */ 666 if (offset >= req->overlap_offset + req->overlap_bytes) { 667 return false; 668 } 669 /* bbbb aaaa */ 670 if (req->overlap_offset >= offset + bytes) { 671 return false; 672 } 673 return true; 674 } 675 676 /* Called with self->bs->reqs_lock held */ 677 static coroutine_fn BdrvTrackedRequest * 678 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 679 { 680 BdrvTrackedRequest *req; 681 682 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 683 if (req == self || (!req->serialising && !self->serialising)) { 684 continue; 685 } 686 if (tracked_request_overlaps(req, self->overlap_offset, 687 self->overlap_bytes)) 688 { 689 /* 690 * Hitting this means there was a reentrant request, for 691 * example, a block driver issuing nested requests. This must 692 * never happen since it means deadlock. 693 */ 694 assert(qemu_coroutine_self() != req->co); 695 696 /* 697 * If the request is already (indirectly) waiting for us, or 698 * will wait for us as soon as it wakes up, then just go on 699 * (instead of producing a deadlock in the former case). 700 */ 701 if (!req->waiting_for) { 702 return req; 703 } 704 } 705 } 706 707 return NULL; 708 } 709 710 /* Called with self->bs->reqs_lock held */ 711 static void coroutine_fn 712 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 713 { 714 BdrvTrackedRequest *req; 715 716 while ((req = bdrv_find_conflicting_request(self))) { 717 self->waiting_for = req; 718 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 719 self->waiting_for = NULL; 720 } 721 } 722 723 /* Called with req->bs->reqs_lock held */ 724 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 725 uint64_t align) 726 { 727 int64_t overlap_offset = req->offset & ~(align - 1); 728 int64_t overlap_bytes = 729 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 730 731 bdrv_check_request(req->offset, req->bytes, &error_abort); 732 733 if (!req->serialising) { 734 qatomic_inc(&req->bs->serialising_in_flight); 735 req->serialising = true; 736 } 737 738 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 739 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 740 } 741 742 /** 743 * Return the tracked request on @bs for the current coroutine, or 744 * NULL if there is none. 745 */ 746 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 747 { 748 BdrvTrackedRequest *req; 749 Coroutine *self = qemu_coroutine_self(); 750 IO_CODE(); 751 752 QLIST_FOREACH(req, &bs->tracked_requests, list) { 753 if (req->co == self) { 754 return req; 755 } 756 } 757 758 return NULL; 759 } 760 761 /** 762 * Round a region to subcluster (if supported) or cluster boundaries 763 */ 764 void coroutine_fn GRAPH_RDLOCK 765 bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes, 766 int64_t *align_offset, int64_t *align_bytes) 767 { 768 BlockDriverInfo bdi; 769 IO_CODE(); 770 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) { 771 *align_offset = offset; 772 *align_bytes = bytes; 773 } else { 774 int64_t c = bdi.subcluster_size; 775 *align_offset = QEMU_ALIGN_DOWN(offset, c); 776 *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c); 777 } 778 } 779 780 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs) 781 { 782 BlockDriverInfo bdi; 783 int ret; 784 785 ret = bdrv_co_get_info(bs, &bdi); 786 if (ret < 0 || bdi.cluster_size == 0) { 787 return bs->bl.request_alignment; 788 } else { 789 return bdi.cluster_size; 790 } 791 } 792 793 void bdrv_inc_in_flight(BlockDriverState *bs) 794 { 795 IO_CODE(); 796 qatomic_inc(&bs->in_flight); 797 } 798 799 void bdrv_wakeup(BlockDriverState *bs) 800 { 801 IO_CODE(); 802 aio_wait_kick(); 803 } 804 805 void bdrv_dec_in_flight(BlockDriverState *bs) 806 { 807 IO_CODE(); 808 qatomic_dec(&bs->in_flight); 809 bdrv_wakeup(bs); 810 } 811 812 static void coroutine_fn 813 bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 814 { 815 BlockDriverState *bs = self->bs; 816 817 if (!qatomic_read(&bs->serialising_in_flight)) { 818 return; 819 } 820 821 qemu_mutex_lock(&bs->reqs_lock); 822 bdrv_wait_serialising_requests_locked(self); 823 qemu_mutex_unlock(&bs->reqs_lock); 824 } 825 826 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 827 uint64_t align) 828 { 829 IO_CODE(); 830 831 qemu_mutex_lock(&req->bs->reqs_lock); 832 833 tracked_request_set_serialising(req, align); 834 bdrv_wait_serialising_requests_locked(req); 835 836 qemu_mutex_unlock(&req->bs->reqs_lock); 837 } 838 839 int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 840 QEMUIOVector *qiov, size_t qiov_offset, 841 Error **errp) 842 { 843 /* 844 * Check generic offset/bytes correctness 845 */ 846 847 if (offset < 0) { 848 error_setg(errp, "offset is negative: %" PRIi64, offset); 849 return -EIO; 850 } 851 852 if (bytes < 0) { 853 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 854 return -EIO; 855 } 856 857 if (bytes > BDRV_MAX_LENGTH) { 858 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 859 bytes, BDRV_MAX_LENGTH); 860 return -EIO; 861 } 862 863 if (offset > BDRV_MAX_LENGTH) { 864 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 865 offset, BDRV_MAX_LENGTH); 866 return -EIO; 867 } 868 869 if (offset > BDRV_MAX_LENGTH - bytes) { 870 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 871 "exceeds maximum(%" PRIi64 ")", offset, bytes, 872 BDRV_MAX_LENGTH); 873 return -EIO; 874 } 875 876 if (!qiov) { 877 return 0; 878 } 879 880 /* 881 * Check qiov and qiov_offset 882 */ 883 884 if (qiov_offset > qiov->size) { 885 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 886 qiov_offset, qiov->size); 887 return -EIO; 888 } 889 890 if (bytes > qiov->size - qiov_offset) { 891 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 892 "vector size(%zu)", bytes, qiov_offset, qiov->size); 893 return -EIO; 894 } 895 896 return 0; 897 } 898 899 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 900 { 901 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 902 } 903 904 static int bdrv_check_request32(int64_t offset, int64_t bytes, 905 QEMUIOVector *qiov, size_t qiov_offset) 906 { 907 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 908 if (ret < 0) { 909 return ret; 910 } 911 912 if (bytes > BDRV_REQUEST_MAX_BYTES) { 913 return -EIO; 914 } 915 916 return 0; 917 } 918 919 /* 920 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 921 * The operation is sped up by checking the block status and only writing 922 * zeroes to the device if they currently do not return zeroes. Optional 923 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 924 * BDRV_REQ_FUA). 925 * 926 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 927 */ 928 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 929 { 930 int ret; 931 int64_t target_size, bytes, offset = 0; 932 BlockDriverState *bs = child->bs; 933 IO_CODE(); 934 935 target_size = bdrv_getlength(bs); 936 if (target_size < 0) { 937 return target_size; 938 } 939 940 for (;;) { 941 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 942 if (bytes <= 0) { 943 return 0; 944 } 945 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 946 if (ret < 0) { 947 return ret; 948 } 949 if (ret & BDRV_BLOCK_ZERO) { 950 offset += bytes; 951 continue; 952 } 953 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 954 if (ret < 0) { 955 return ret; 956 } 957 offset += bytes; 958 } 959 } 960 961 /* 962 * Writes to the file and ensures that no writes are reordered across this 963 * request (acts as a barrier) 964 * 965 * Returns 0 on success, -errno in error cases. 966 */ 967 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, 968 int64_t bytes, const void *buf, 969 BdrvRequestFlags flags) 970 { 971 int ret; 972 IO_CODE(); 973 assert_bdrv_graph_readable(); 974 975 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags); 976 if (ret < 0) { 977 return ret; 978 } 979 980 ret = bdrv_co_flush(child->bs); 981 if (ret < 0) { 982 return ret; 983 } 984 985 return 0; 986 } 987 988 typedef struct CoroutineIOCompletion { 989 Coroutine *coroutine; 990 int ret; 991 } CoroutineIOCompletion; 992 993 static void bdrv_co_io_em_complete(void *opaque, int ret) 994 { 995 CoroutineIOCompletion *co = opaque; 996 997 co->ret = ret; 998 aio_co_wake(co->coroutine); 999 } 1000 1001 static int coroutine_fn GRAPH_RDLOCK 1002 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 1003 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1004 { 1005 BlockDriver *drv = bs->drv; 1006 int64_t sector_num; 1007 unsigned int nb_sectors; 1008 QEMUIOVector local_qiov; 1009 int ret; 1010 assert_bdrv_graph_readable(); 1011 1012 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1013 assert(!(flags & ~bs->supported_read_flags)); 1014 1015 if (!drv) { 1016 return -ENOMEDIUM; 1017 } 1018 1019 if (drv->bdrv_co_preadv_part) { 1020 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 1021 flags); 1022 } 1023 1024 if (qiov_offset > 0 || bytes != qiov->size) { 1025 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1026 qiov = &local_qiov; 1027 } 1028 1029 if (drv->bdrv_co_preadv) { 1030 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1031 goto out; 1032 } 1033 1034 if (drv->bdrv_aio_preadv) { 1035 BlockAIOCB *acb; 1036 CoroutineIOCompletion co = { 1037 .coroutine = qemu_coroutine_self(), 1038 }; 1039 1040 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1041 bdrv_co_io_em_complete, &co); 1042 if (acb == NULL) { 1043 ret = -EIO; 1044 goto out; 1045 } else { 1046 qemu_coroutine_yield(); 1047 ret = co.ret; 1048 goto out; 1049 } 1050 } 1051 1052 sector_num = offset >> BDRV_SECTOR_BITS; 1053 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1054 1055 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1056 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1057 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1058 assert(drv->bdrv_co_readv); 1059 1060 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1061 1062 out: 1063 if (qiov == &local_qiov) { 1064 qemu_iovec_destroy(&local_qiov); 1065 } 1066 1067 return ret; 1068 } 1069 1070 static int coroutine_fn GRAPH_RDLOCK 1071 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, 1072 QEMUIOVector *qiov, size_t qiov_offset, 1073 BdrvRequestFlags flags) 1074 { 1075 BlockDriver *drv = bs->drv; 1076 bool emulate_fua = false; 1077 int64_t sector_num; 1078 unsigned int nb_sectors; 1079 QEMUIOVector local_qiov; 1080 int ret; 1081 assert_bdrv_graph_readable(); 1082 1083 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1084 1085 if (!drv) { 1086 return -ENOMEDIUM; 1087 } 1088 1089 if ((flags & BDRV_REQ_FUA) && 1090 (~bs->supported_write_flags & BDRV_REQ_FUA)) { 1091 flags &= ~BDRV_REQ_FUA; 1092 emulate_fua = true; 1093 } 1094 1095 flags &= bs->supported_write_flags; 1096 1097 if (drv->bdrv_co_pwritev_part) { 1098 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1099 flags); 1100 goto emulate_flags; 1101 } 1102 1103 if (qiov_offset > 0 || bytes != qiov->size) { 1104 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1105 qiov = &local_qiov; 1106 } 1107 1108 if (drv->bdrv_co_pwritev) { 1109 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); 1110 goto emulate_flags; 1111 } 1112 1113 if (drv->bdrv_aio_pwritev) { 1114 BlockAIOCB *acb; 1115 CoroutineIOCompletion co = { 1116 .coroutine = qemu_coroutine_self(), 1117 }; 1118 1119 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, 1120 bdrv_co_io_em_complete, &co); 1121 if (acb == NULL) { 1122 ret = -EIO; 1123 } else { 1124 qemu_coroutine_yield(); 1125 ret = co.ret; 1126 } 1127 goto emulate_flags; 1128 } 1129 1130 sector_num = offset >> BDRV_SECTOR_BITS; 1131 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1132 1133 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1134 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1135 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1136 1137 assert(drv->bdrv_co_writev); 1138 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); 1139 1140 emulate_flags: 1141 if (ret == 0 && emulate_fua) { 1142 ret = bdrv_co_flush(bs); 1143 } 1144 1145 if (qiov == &local_qiov) { 1146 qemu_iovec_destroy(&local_qiov); 1147 } 1148 1149 return ret; 1150 } 1151 1152 static int coroutine_fn GRAPH_RDLOCK 1153 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1154 int64_t bytes, QEMUIOVector *qiov, 1155 size_t qiov_offset) 1156 { 1157 BlockDriver *drv = bs->drv; 1158 QEMUIOVector local_qiov; 1159 int ret; 1160 assert_bdrv_graph_readable(); 1161 1162 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1163 1164 if (!drv) { 1165 return -ENOMEDIUM; 1166 } 1167 1168 if (!block_driver_can_compress(drv)) { 1169 return -ENOTSUP; 1170 } 1171 1172 if (drv->bdrv_co_pwritev_compressed_part) { 1173 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1174 qiov, qiov_offset); 1175 } 1176 1177 if (qiov_offset == 0) { 1178 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1179 } 1180 1181 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1182 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1183 qemu_iovec_destroy(&local_qiov); 1184 1185 return ret; 1186 } 1187 1188 static int coroutine_fn GRAPH_RDLOCK 1189 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, 1190 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1191 { 1192 BlockDriverState *bs = child->bs; 1193 1194 /* Perform I/O through a temporary buffer so that users who scribble over 1195 * their read buffer while the operation is in progress do not end up 1196 * modifying the image file. This is critical for zero-copy guest I/O 1197 * where anything might happen inside guest memory. 1198 */ 1199 void *bounce_buffer = NULL; 1200 1201 BlockDriver *drv = bs->drv; 1202 int64_t align_offset; 1203 int64_t align_bytes; 1204 int64_t skip_bytes; 1205 int ret; 1206 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1207 BDRV_REQUEST_MAX_BYTES); 1208 int64_t progress = 0; 1209 bool skip_write; 1210 1211 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1212 1213 if (!drv) { 1214 return -ENOMEDIUM; 1215 } 1216 1217 /* 1218 * Do not write anything when the BDS is inactive. That is not 1219 * allowed, and it would not help. 1220 */ 1221 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1222 1223 /* FIXME We cannot require callers to have write permissions when all they 1224 * are doing is a read request. If we did things right, write permissions 1225 * would be obtained anyway, but internally by the copy-on-read code. As 1226 * long as it is implemented here rather than in a separate filter driver, 1227 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1228 * it could request permissions. Therefore we have to bypass the permission 1229 * system for the moment. */ 1230 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1231 1232 /* Cover entire cluster so no additional backing file I/O is required when 1233 * allocating cluster in the image file. Note that this value may exceed 1234 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1235 * is one reason we loop rather than doing it all at once. 1236 */ 1237 bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes); 1238 skip_bytes = offset - align_offset; 1239 1240 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1241 align_offset, align_bytes); 1242 1243 while (align_bytes) { 1244 int64_t pnum; 1245 1246 if (skip_write) { 1247 ret = 1; /* "already allocated", so nothing will be copied */ 1248 pnum = MIN(align_bytes, max_transfer); 1249 } else { 1250 ret = bdrv_co_is_allocated(bs, align_offset, 1251 MIN(align_bytes, max_transfer), &pnum); 1252 if (ret < 0) { 1253 /* 1254 * Safe to treat errors in querying allocation as if 1255 * unallocated; we'll probably fail again soon on the 1256 * read, but at least that will set a decent errno. 1257 */ 1258 pnum = MIN(align_bytes, max_transfer); 1259 } 1260 1261 /* Stop at EOF if the image ends in the middle of the cluster */ 1262 if (ret == 0 && pnum == 0) { 1263 assert(progress >= bytes); 1264 break; 1265 } 1266 1267 assert(skip_bytes < pnum); 1268 } 1269 1270 if (ret <= 0) { 1271 QEMUIOVector local_qiov; 1272 1273 /* Must copy-on-read; use the bounce buffer */ 1274 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1275 if (!bounce_buffer) { 1276 int64_t max_we_need = MAX(pnum, align_bytes - pnum); 1277 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1278 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1279 1280 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1281 if (!bounce_buffer) { 1282 ret = -ENOMEM; 1283 goto err; 1284 } 1285 } 1286 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1287 1288 ret = bdrv_driver_preadv(bs, align_offset, pnum, 1289 &local_qiov, 0, 0); 1290 if (ret < 0) { 1291 goto err; 1292 } 1293 1294 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE); 1295 if (drv->bdrv_co_pwrite_zeroes && 1296 buffer_is_zero(bounce_buffer, pnum)) { 1297 /* FIXME: Should we (perhaps conditionally) be setting 1298 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1299 * that still correctly reads as zero? */ 1300 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum, 1301 BDRV_REQ_WRITE_UNCHANGED); 1302 } else { 1303 /* This does not change the data on the disk, it is not 1304 * necessary to flush even in cache=writethrough mode. 1305 */ 1306 ret = bdrv_driver_pwritev(bs, align_offset, pnum, 1307 &local_qiov, 0, 1308 BDRV_REQ_WRITE_UNCHANGED); 1309 } 1310 1311 if (ret < 0) { 1312 /* It might be okay to ignore write errors for guest 1313 * requests. If this is a deliberate copy-on-read 1314 * then we don't want to ignore the error. Simply 1315 * report it in all cases. 1316 */ 1317 goto err; 1318 } 1319 1320 if (!(flags & BDRV_REQ_PREFETCH)) { 1321 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1322 bounce_buffer + skip_bytes, 1323 MIN(pnum - skip_bytes, bytes - progress)); 1324 } 1325 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1326 /* Read directly into the destination */ 1327 ret = bdrv_driver_preadv(bs, offset + progress, 1328 MIN(pnum - skip_bytes, bytes - progress), 1329 qiov, qiov_offset + progress, 0); 1330 if (ret < 0) { 1331 goto err; 1332 } 1333 } 1334 1335 align_offset += pnum; 1336 align_bytes -= pnum; 1337 progress += pnum - skip_bytes; 1338 skip_bytes = 0; 1339 } 1340 ret = 0; 1341 1342 err: 1343 qemu_vfree(bounce_buffer); 1344 return ret; 1345 } 1346 1347 /* 1348 * Forwards an already correctly aligned request to the BlockDriver. This 1349 * handles copy on read, zeroing after EOF, and fragmentation of large 1350 * reads; any other features must be implemented by the caller. 1351 */ 1352 static int coroutine_fn GRAPH_RDLOCK 1353 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req, 1354 int64_t offset, int64_t bytes, int64_t align, 1355 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1356 { 1357 BlockDriverState *bs = child->bs; 1358 int64_t total_bytes, max_bytes; 1359 int ret = 0; 1360 int64_t bytes_remaining = bytes; 1361 int max_transfer; 1362 1363 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1364 assert(is_power_of_2(align)); 1365 assert((offset & (align - 1)) == 0); 1366 assert((bytes & (align - 1)) == 0); 1367 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1368 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1369 align); 1370 1371 /* 1372 * TODO: We would need a per-BDS .supported_read_flags and 1373 * potential fallback support, if we ever implement any read flags 1374 * to pass through to drivers. For now, there aren't any 1375 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint. 1376 */ 1377 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH | 1378 BDRV_REQ_REGISTERED_BUF))); 1379 1380 /* Handle Copy on Read and associated serialisation */ 1381 if (flags & BDRV_REQ_COPY_ON_READ) { 1382 /* If we touch the same cluster it counts as an overlap. This 1383 * guarantees that allocating writes will be serialized and not race 1384 * with each other for the same cluster. For example, in copy-on-read 1385 * it ensures that the CoR read and write operations are atomic and 1386 * guest writes cannot interleave between them. */ 1387 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1388 } else { 1389 bdrv_wait_serialising_requests(req); 1390 } 1391 1392 if (flags & BDRV_REQ_COPY_ON_READ) { 1393 int64_t pnum; 1394 1395 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1396 flags &= ~BDRV_REQ_COPY_ON_READ; 1397 1398 ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum); 1399 if (ret < 0) { 1400 goto out; 1401 } 1402 1403 if (!ret || pnum != bytes) { 1404 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1405 qiov, qiov_offset, flags); 1406 goto out; 1407 } else if (flags & BDRV_REQ_PREFETCH) { 1408 goto out; 1409 } 1410 } 1411 1412 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1413 total_bytes = bdrv_co_getlength(bs); 1414 if (total_bytes < 0) { 1415 ret = total_bytes; 1416 goto out; 1417 } 1418 1419 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); 1420 1421 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1422 if (bytes <= max_bytes && bytes <= max_transfer) { 1423 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1424 goto out; 1425 } 1426 1427 while (bytes_remaining) { 1428 int64_t num; 1429 1430 if (max_bytes) { 1431 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1432 assert(num); 1433 1434 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1435 num, qiov, 1436 qiov_offset + bytes - bytes_remaining, 1437 flags); 1438 max_bytes -= num; 1439 } else { 1440 num = bytes_remaining; 1441 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1442 0, bytes_remaining); 1443 } 1444 if (ret < 0) { 1445 goto out; 1446 } 1447 bytes_remaining -= num; 1448 } 1449 1450 out: 1451 return ret < 0 ? ret : 0; 1452 } 1453 1454 /* 1455 * Request padding 1456 * 1457 * |<---- align ----->| |<----- align ---->| 1458 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1459 * | | | | | | 1460 * -*----------$-------*-------- ... --------*-----$------------*--- 1461 * | | | | | | 1462 * | offset | | end | 1463 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1464 * [buf ... ) [tail_buf ) 1465 * 1466 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1467 * is placed at the beginning of @buf and @tail at the @end. 1468 * 1469 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1470 * around tail, if tail exists. 1471 * 1472 * @merge_reads is true for small requests, 1473 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1474 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1475 * 1476 * @write is true for write requests, false for read requests. 1477 * 1478 * If padding makes the vector too long (exceeding IOV_MAX), then we need to 1479 * merge existing vector elements into a single one. @collapse_bounce_buf acts 1480 * as the bounce buffer in such cases. @pre_collapse_qiov has the pre-collapse 1481 * I/O vector elements so for read requests, the data can be copied back after 1482 * the read is done. 1483 */ 1484 typedef struct BdrvRequestPadding { 1485 uint8_t *buf; 1486 size_t buf_len; 1487 uint8_t *tail_buf; 1488 size_t head; 1489 size_t tail; 1490 bool merge_reads; 1491 bool write; 1492 QEMUIOVector local_qiov; 1493 1494 uint8_t *collapse_bounce_buf; 1495 size_t collapse_len; 1496 QEMUIOVector pre_collapse_qiov; 1497 } BdrvRequestPadding; 1498 1499 static bool bdrv_init_padding(BlockDriverState *bs, 1500 int64_t offset, int64_t bytes, 1501 bool write, 1502 BdrvRequestPadding *pad) 1503 { 1504 int64_t align = bs->bl.request_alignment; 1505 int64_t sum; 1506 1507 bdrv_check_request(offset, bytes, &error_abort); 1508 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1509 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1510 1511 memset(pad, 0, sizeof(*pad)); 1512 1513 pad->head = offset & (align - 1); 1514 pad->tail = ((offset + bytes) & (align - 1)); 1515 if (pad->tail) { 1516 pad->tail = align - pad->tail; 1517 } 1518 1519 if (!pad->head && !pad->tail) { 1520 return false; 1521 } 1522 1523 assert(bytes); /* Nothing good in aligning zero-length requests */ 1524 1525 sum = pad->head + bytes + pad->tail; 1526 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1527 pad->buf = qemu_blockalign(bs, pad->buf_len); 1528 pad->merge_reads = sum == pad->buf_len; 1529 if (pad->tail) { 1530 pad->tail_buf = pad->buf + pad->buf_len - align; 1531 } 1532 1533 pad->write = write; 1534 1535 return true; 1536 } 1537 1538 static int coroutine_fn GRAPH_RDLOCK 1539 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req, 1540 BdrvRequestPadding *pad, bool zero_middle) 1541 { 1542 QEMUIOVector local_qiov; 1543 BlockDriverState *bs = child->bs; 1544 uint64_t align = bs->bl.request_alignment; 1545 int ret; 1546 1547 assert(req->serialising && pad->buf); 1548 1549 if (pad->head || pad->merge_reads) { 1550 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1551 1552 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1553 1554 if (pad->head) { 1555 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1556 } 1557 if (pad->merge_reads && pad->tail) { 1558 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1559 } 1560 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1561 align, &local_qiov, 0, 0); 1562 if (ret < 0) { 1563 return ret; 1564 } 1565 if (pad->head) { 1566 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1567 } 1568 if (pad->merge_reads && pad->tail) { 1569 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1570 } 1571 1572 if (pad->merge_reads) { 1573 goto zero_mem; 1574 } 1575 } 1576 1577 if (pad->tail) { 1578 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1579 1580 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1581 ret = bdrv_aligned_preadv( 1582 child, req, 1583 req->overlap_offset + req->overlap_bytes - align, 1584 align, align, &local_qiov, 0, 0); 1585 if (ret < 0) { 1586 return ret; 1587 } 1588 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1589 } 1590 1591 zero_mem: 1592 if (zero_middle) { 1593 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1594 } 1595 1596 return 0; 1597 } 1598 1599 /** 1600 * Free *pad's associated buffers, and perform any necessary finalization steps. 1601 */ 1602 static void bdrv_padding_finalize(BdrvRequestPadding *pad) 1603 { 1604 if (pad->collapse_bounce_buf) { 1605 if (!pad->write) { 1606 /* 1607 * If padding required elements in the vector to be collapsed into a 1608 * bounce buffer, copy the bounce buffer content back 1609 */ 1610 qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0, 1611 pad->collapse_bounce_buf, pad->collapse_len); 1612 } 1613 qemu_vfree(pad->collapse_bounce_buf); 1614 qemu_iovec_destroy(&pad->pre_collapse_qiov); 1615 } 1616 if (pad->buf) { 1617 qemu_vfree(pad->buf); 1618 qemu_iovec_destroy(&pad->local_qiov); 1619 } 1620 memset(pad, 0, sizeof(*pad)); 1621 } 1622 1623 /* 1624 * Create pad->local_qiov by wrapping @iov in the padding head and tail, while 1625 * ensuring that the resulting vector will not exceed IOV_MAX elements. 1626 * 1627 * To ensure this, when necessary, the first two or three elements of @iov are 1628 * merged into pad->collapse_bounce_buf and replaced by a reference to that 1629 * bounce buffer in pad->local_qiov. 1630 * 1631 * After performing a read request, the data from the bounce buffer must be 1632 * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()). 1633 */ 1634 static int bdrv_create_padded_qiov(BlockDriverState *bs, 1635 BdrvRequestPadding *pad, 1636 struct iovec *iov, int niov, 1637 size_t iov_offset, size_t bytes) 1638 { 1639 int padded_niov, surplus_count, collapse_count; 1640 1641 /* Assert this invariant */ 1642 assert(niov <= IOV_MAX); 1643 1644 /* 1645 * Cannot pad if resulting length would exceed SIZE_MAX. Returning an error 1646 * to the guest is not ideal, but there is little else we can do. At least 1647 * this will practically never happen on 64-bit systems. 1648 */ 1649 if (SIZE_MAX - pad->head < bytes || 1650 SIZE_MAX - pad->head - bytes < pad->tail) 1651 { 1652 return -EINVAL; 1653 } 1654 1655 /* Length of the resulting IOV if we just concatenated everything */ 1656 padded_niov = !!pad->head + niov + !!pad->tail; 1657 1658 qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX)); 1659 1660 if (pad->head) { 1661 qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head); 1662 } 1663 1664 /* 1665 * If padded_niov > IOV_MAX, we cannot just concatenate everything. 1666 * Instead, merge the first two or three elements of @iov to reduce the 1667 * number of vector elements as necessary. 1668 */ 1669 if (padded_niov > IOV_MAX) { 1670 /* 1671 * Only head and tail can have lead to the number of entries exceeding 1672 * IOV_MAX, so we can exceed it by the head and tail at most. We need 1673 * to reduce the number of elements by `surplus_count`, so we merge that 1674 * many elements plus one into one element. 1675 */ 1676 surplus_count = padded_niov - IOV_MAX; 1677 assert(surplus_count <= !!pad->head + !!pad->tail); 1678 collapse_count = surplus_count + 1; 1679 1680 /* 1681 * Move the elements to collapse into `pad->pre_collapse_qiov`, then 1682 * advance `iov` (and associated variables) by those elements. 1683 */ 1684 qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count); 1685 qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov, 1686 collapse_count, iov_offset, SIZE_MAX); 1687 iov += collapse_count; 1688 iov_offset = 0; 1689 niov -= collapse_count; 1690 bytes -= pad->pre_collapse_qiov.size; 1691 1692 /* 1693 * Construct the bounce buffer to match the length of the to-collapse 1694 * vector elements, and for write requests, initialize it with the data 1695 * from those elements. Then add it to `pad->local_qiov`. 1696 */ 1697 pad->collapse_len = pad->pre_collapse_qiov.size; 1698 pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len); 1699 if (pad->write) { 1700 qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0, 1701 pad->collapse_bounce_buf, pad->collapse_len); 1702 } 1703 qemu_iovec_add(&pad->local_qiov, 1704 pad->collapse_bounce_buf, pad->collapse_len); 1705 } 1706 1707 qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes); 1708 1709 if (pad->tail) { 1710 qemu_iovec_add(&pad->local_qiov, 1711 pad->buf + pad->buf_len - pad->tail, pad->tail); 1712 } 1713 1714 assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX)); 1715 return 0; 1716 } 1717 1718 /* 1719 * bdrv_pad_request 1720 * 1721 * Exchange request parameters with padded request if needed. Don't include RMW 1722 * read of padding, bdrv_padding_rmw_read() should be called separately if 1723 * needed. 1724 * 1725 * @write is true for write requests, false for read requests. 1726 * 1727 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1728 * - on function start they represent original request 1729 * - on failure or when padding is not needed they are unchanged 1730 * - on success when padding is needed they represent padded request 1731 */ 1732 static int bdrv_pad_request(BlockDriverState *bs, 1733 QEMUIOVector **qiov, size_t *qiov_offset, 1734 int64_t *offset, int64_t *bytes, 1735 bool write, 1736 BdrvRequestPadding *pad, bool *padded, 1737 BdrvRequestFlags *flags) 1738 { 1739 int ret; 1740 struct iovec *sliced_iov; 1741 int sliced_niov; 1742 size_t sliced_head, sliced_tail; 1743 1744 /* Should have been checked by the caller already */ 1745 ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset); 1746 if (ret < 0) { 1747 return ret; 1748 } 1749 1750 if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) { 1751 if (padded) { 1752 *padded = false; 1753 } 1754 return 0; 1755 } 1756 1757 sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, 1758 &sliced_head, &sliced_tail, 1759 &sliced_niov); 1760 1761 /* Guaranteed by bdrv_check_request32() */ 1762 assert(*bytes <= SIZE_MAX); 1763 ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, 1764 sliced_head, *bytes); 1765 if (ret < 0) { 1766 bdrv_padding_finalize(pad); 1767 return ret; 1768 } 1769 *bytes += pad->head + pad->tail; 1770 *offset -= pad->head; 1771 *qiov = &pad->local_qiov; 1772 *qiov_offset = 0; 1773 if (padded) { 1774 *padded = true; 1775 } 1776 if (flags) { 1777 /* Can't use optimization hint with bounce buffer */ 1778 *flags &= ~BDRV_REQ_REGISTERED_BUF; 1779 } 1780 1781 return 0; 1782 } 1783 1784 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1785 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1786 BdrvRequestFlags flags) 1787 { 1788 IO_CODE(); 1789 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1790 } 1791 1792 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1793 int64_t offset, int64_t bytes, 1794 QEMUIOVector *qiov, size_t qiov_offset, 1795 BdrvRequestFlags flags) 1796 { 1797 BlockDriverState *bs = child->bs; 1798 BdrvTrackedRequest req; 1799 BdrvRequestPadding pad; 1800 int ret; 1801 IO_CODE(); 1802 1803 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1804 1805 if (!bdrv_co_is_inserted(bs)) { 1806 return -ENOMEDIUM; 1807 } 1808 1809 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1810 if (ret < 0) { 1811 return ret; 1812 } 1813 1814 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1815 /* 1816 * Aligning zero request is nonsense. Even if driver has special meaning 1817 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1818 * it to driver due to request_alignment. 1819 * 1820 * Still, no reason to return an error if someone do unaligned 1821 * zero-length read occasionally. 1822 */ 1823 return 0; 1824 } 1825 1826 bdrv_inc_in_flight(bs); 1827 1828 /* Don't do copy-on-read if we read data before write operation */ 1829 if (qatomic_read(&bs->copy_on_read)) { 1830 flags |= BDRV_REQ_COPY_ON_READ; 1831 } 1832 1833 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false, 1834 &pad, NULL, &flags); 1835 if (ret < 0) { 1836 goto fail; 1837 } 1838 1839 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1840 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1841 bs->bl.request_alignment, 1842 qiov, qiov_offset, flags); 1843 tracked_request_end(&req); 1844 bdrv_padding_finalize(&pad); 1845 1846 fail: 1847 bdrv_dec_in_flight(bs); 1848 1849 return ret; 1850 } 1851 1852 static int coroutine_fn GRAPH_RDLOCK 1853 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 1854 BdrvRequestFlags flags) 1855 { 1856 BlockDriver *drv = bs->drv; 1857 QEMUIOVector qiov; 1858 void *buf = NULL; 1859 int ret = 0; 1860 bool need_flush = false; 1861 int head = 0; 1862 int tail = 0; 1863 1864 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, 1865 INT64_MAX); 1866 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1867 bs->bl.request_alignment); 1868 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1869 1870 assert_bdrv_graph_readable(); 1871 bdrv_check_request(offset, bytes, &error_abort); 1872 1873 if (!drv) { 1874 return -ENOMEDIUM; 1875 } 1876 1877 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1878 return -ENOTSUP; 1879 } 1880 1881 /* By definition there is no user buffer so this flag doesn't make sense */ 1882 if (flags & BDRV_REQ_REGISTERED_BUF) { 1883 return -EINVAL; 1884 } 1885 1886 /* Invalidate the cached block-status data range if this write overlaps */ 1887 bdrv_bsc_invalidate_range(bs, offset, bytes); 1888 1889 assert(alignment % bs->bl.request_alignment == 0); 1890 head = offset % alignment; 1891 tail = (offset + bytes) % alignment; 1892 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1893 assert(max_write_zeroes >= bs->bl.request_alignment); 1894 1895 while (bytes > 0 && !ret) { 1896 int64_t num = bytes; 1897 1898 /* Align request. Block drivers can expect the "bulk" of the request 1899 * to be aligned, and that unaligned requests do not cross cluster 1900 * boundaries. 1901 */ 1902 if (head) { 1903 /* Make a small request up to the first aligned sector. For 1904 * convenience, limit this request to max_transfer even if 1905 * we don't need to fall back to writes. */ 1906 num = MIN(MIN(bytes, max_transfer), alignment - head); 1907 head = (head + num) % alignment; 1908 assert(num < max_write_zeroes); 1909 } else if (tail && num > alignment) { 1910 /* Shorten the request to the last aligned sector. */ 1911 num -= tail; 1912 } 1913 1914 /* limit request size */ 1915 if (num > max_write_zeroes) { 1916 num = max_write_zeroes; 1917 } 1918 1919 ret = -ENOTSUP; 1920 /* First try the efficient write zeroes operation */ 1921 if (drv->bdrv_co_pwrite_zeroes) { 1922 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1923 flags & bs->supported_zero_flags); 1924 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1925 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1926 need_flush = true; 1927 } 1928 } else { 1929 assert(!bs->supported_zero_flags); 1930 } 1931 1932 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1933 /* Fall back to bounce buffer if write zeroes is unsupported */ 1934 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1935 1936 if ((flags & BDRV_REQ_FUA) && 1937 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1938 /* No need for bdrv_driver_pwrite() to do a fallback 1939 * flush on each chunk; use just one at the end */ 1940 write_flags &= ~BDRV_REQ_FUA; 1941 need_flush = true; 1942 } 1943 num = MIN(num, max_transfer); 1944 if (buf == NULL) { 1945 buf = qemu_try_blockalign0(bs, num); 1946 if (buf == NULL) { 1947 ret = -ENOMEM; 1948 goto fail; 1949 } 1950 } 1951 qemu_iovec_init_buf(&qiov, buf, num); 1952 1953 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1954 1955 /* Keep bounce buffer around if it is big enough for all 1956 * all future requests. 1957 */ 1958 if (num < max_transfer) { 1959 qemu_vfree(buf); 1960 buf = NULL; 1961 } 1962 } 1963 1964 offset += num; 1965 bytes -= num; 1966 } 1967 1968 fail: 1969 if (ret == 0 && need_flush) { 1970 ret = bdrv_co_flush(bs); 1971 } 1972 qemu_vfree(buf); 1973 return ret; 1974 } 1975 1976 static inline int coroutine_fn GRAPH_RDLOCK 1977 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1978 BdrvTrackedRequest *req, int flags) 1979 { 1980 BlockDriverState *bs = child->bs; 1981 1982 bdrv_check_request(offset, bytes, &error_abort); 1983 1984 if (bdrv_is_read_only(bs)) { 1985 return -EPERM; 1986 } 1987 1988 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1989 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1990 assert(!(flags & ~BDRV_REQ_MASK)); 1991 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1992 1993 if (flags & BDRV_REQ_SERIALISING) { 1994 QEMU_LOCK_GUARD(&bs->reqs_lock); 1995 1996 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1997 1998 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1999 return -EBUSY; 2000 } 2001 2002 bdrv_wait_serialising_requests_locked(req); 2003 } else { 2004 bdrv_wait_serialising_requests(req); 2005 } 2006 2007 assert(req->overlap_offset <= offset); 2008 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 2009 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 2010 child->perm & BLK_PERM_RESIZE); 2011 2012 switch (req->type) { 2013 case BDRV_TRACKED_WRITE: 2014 case BDRV_TRACKED_DISCARD: 2015 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 2016 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 2017 } else { 2018 assert(child->perm & BLK_PERM_WRITE); 2019 } 2020 bdrv_write_threshold_check_write(bs, offset, bytes); 2021 return 0; 2022 case BDRV_TRACKED_TRUNCATE: 2023 assert(child->perm & BLK_PERM_RESIZE); 2024 return 0; 2025 default: 2026 abort(); 2027 } 2028 } 2029 2030 static inline void coroutine_fn 2031 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 2032 BdrvTrackedRequest *req, int ret) 2033 { 2034 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 2035 BlockDriverState *bs = child->bs; 2036 2037 bdrv_check_request(offset, bytes, &error_abort); 2038 2039 qatomic_inc(&bs->write_gen); 2040 2041 /* 2042 * Discard cannot extend the image, but in error handling cases, such as 2043 * when reverting a qcow2 cluster allocation, the discarded range can pass 2044 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 2045 * here. Instead, just skip it, since semantically a discard request 2046 * beyond EOF cannot expand the image anyway. 2047 */ 2048 if (ret == 0 && 2049 (req->type == BDRV_TRACKED_TRUNCATE || 2050 end_sector > bs->total_sectors) && 2051 req->type != BDRV_TRACKED_DISCARD) { 2052 bs->total_sectors = end_sector; 2053 bdrv_parent_cb_resize(bs); 2054 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 2055 } 2056 if (req->bytes) { 2057 switch (req->type) { 2058 case BDRV_TRACKED_WRITE: 2059 stat64_max(&bs->wr_highest_offset, offset + bytes); 2060 /* fall through, to set dirty bits */ 2061 case BDRV_TRACKED_DISCARD: 2062 bdrv_set_dirty(bs, offset, bytes); 2063 break; 2064 default: 2065 break; 2066 } 2067 } 2068 } 2069 2070 /* 2071 * Forwards an already correctly aligned write request to the BlockDriver, 2072 * after possibly fragmenting it. 2073 */ 2074 static int coroutine_fn GRAPH_RDLOCK 2075 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, 2076 int64_t offset, int64_t bytes, int64_t align, 2077 QEMUIOVector *qiov, size_t qiov_offset, 2078 BdrvRequestFlags flags) 2079 { 2080 BlockDriverState *bs = child->bs; 2081 BlockDriver *drv = bs->drv; 2082 int ret; 2083 2084 int64_t bytes_remaining = bytes; 2085 int max_transfer; 2086 2087 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 2088 2089 if (!drv) { 2090 return -ENOMEDIUM; 2091 } 2092 2093 if (bdrv_has_readonly_bitmaps(bs)) { 2094 return -EPERM; 2095 } 2096 2097 assert(is_power_of_2(align)); 2098 assert((offset & (align - 1)) == 0); 2099 assert((bytes & (align - 1)) == 0); 2100 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 2101 align); 2102 2103 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 2104 2105 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 2106 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 2107 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 2108 flags |= BDRV_REQ_ZERO_WRITE; 2109 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 2110 flags |= BDRV_REQ_MAY_UNMAP; 2111 } 2112 2113 /* Can't use optimization hint with bufferless zero write */ 2114 flags &= ~BDRV_REQ_REGISTERED_BUF; 2115 } 2116 2117 if (ret < 0) { 2118 /* Do nothing, write notifier decided to fail this request */ 2119 } else if (flags & BDRV_REQ_ZERO_WRITE) { 2120 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO); 2121 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 2122 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 2123 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 2124 qiov, qiov_offset); 2125 } else if (bytes <= max_transfer) { 2126 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 2127 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 2128 } else { 2129 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 2130 while (bytes_remaining) { 2131 int num = MIN(bytes_remaining, max_transfer); 2132 int local_flags = flags; 2133 2134 assert(num); 2135 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 2136 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 2137 /* If FUA is going to be emulated by flush, we only 2138 * need to flush on the last iteration */ 2139 local_flags &= ~BDRV_REQ_FUA; 2140 } 2141 2142 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 2143 num, qiov, 2144 qiov_offset + bytes - bytes_remaining, 2145 local_flags); 2146 if (ret < 0) { 2147 break; 2148 } 2149 bytes_remaining -= num; 2150 } 2151 } 2152 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE); 2153 2154 if (ret >= 0) { 2155 ret = 0; 2156 } 2157 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 2158 2159 return ret; 2160 } 2161 2162 static int coroutine_fn GRAPH_RDLOCK 2163 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, 2164 BdrvRequestFlags flags, BdrvTrackedRequest *req) 2165 { 2166 BlockDriverState *bs = child->bs; 2167 QEMUIOVector local_qiov; 2168 uint64_t align = bs->bl.request_alignment; 2169 int ret = 0; 2170 bool padding; 2171 BdrvRequestPadding pad; 2172 2173 /* This flag doesn't make sense for padding or zero writes */ 2174 flags &= ~BDRV_REQ_REGISTERED_BUF; 2175 2176 padding = bdrv_init_padding(bs, offset, bytes, true, &pad); 2177 if (padding) { 2178 assert(!(flags & BDRV_REQ_NO_WAIT)); 2179 bdrv_make_request_serialising(req, align); 2180 2181 bdrv_padding_rmw_read(child, req, &pad, true); 2182 2183 if (pad.head || pad.merge_reads) { 2184 int64_t aligned_offset = offset & ~(align - 1); 2185 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2186 2187 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2188 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2189 align, &local_qiov, 0, 2190 flags & ~BDRV_REQ_ZERO_WRITE); 2191 if (ret < 0 || pad.merge_reads) { 2192 /* Error or all work is done */ 2193 goto out; 2194 } 2195 offset += write_bytes - pad.head; 2196 bytes -= write_bytes - pad.head; 2197 } 2198 } 2199 2200 assert(!bytes || (offset & (align - 1)) == 0); 2201 if (bytes >= align) { 2202 /* Write the aligned part in the middle. */ 2203 int64_t aligned_bytes = bytes & ~(align - 1); 2204 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2205 NULL, 0, flags); 2206 if (ret < 0) { 2207 goto out; 2208 } 2209 bytes -= aligned_bytes; 2210 offset += aligned_bytes; 2211 } 2212 2213 assert(!bytes || (offset & (align - 1)) == 0); 2214 if (bytes) { 2215 assert(align == pad.tail + bytes); 2216 2217 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2218 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2219 &local_qiov, 0, 2220 flags & ~BDRV_REQ_ZERO_WRITE); 2221 } 2222 2223 out: 2224 bdrv_padding_finalize(&pad); 2225 2226 return ret; 2227 } 2228 2229 /* 2230 * Handle a write request in coroutine context 2231 */ 2232 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2233 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2234 BdrvRequestFlags flags) 2235 { 2236 IO_CODE(); 2237 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2238 } 2239 2240 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2241 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2242 BdrvRequestFlags flags) 2243 { 2244 BlockDriverState *bs = child->bs; 2245 BdrvTrackedRequest req; 2246 uint64_t align = bs->bl.request_alignment; 2247 BdrvRequestPadding pad; 2248 int ret; 2249 bool padded = false; 2250 IO_CODE(); 2251 2252 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2253 2254 if (!bdrv_co_is_inserted(bs)) { 2255 return -ENOMEDIUM; 2256 } 2257 2258 if (flags & BDRV_REQ_ZERO_WRITE) { 2259 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 2260 } else { 2261 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2262 } 2263 if (ret < 0) { 2264 return ret; 2265 } 2266 2267 /* If the request is misaligned then we can't make it efficient */ 2268 if ((flags & BDRV_REQ_NO_FALLBACK) && 2269 !QEMU_IS_ALIGNED(offset | bytes, align)) 2270 { 2271 return -ENOTSUP; 2272 } 2273 2274 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2275 /* 2276 * Aligning zero request is nonsense. Even if driver has special meaning 2277 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2278 * it to driver due to request_alignment. 2279 * 2280 * Still, no reason to return an error if someone do unaligned 2281 * zero-length write occasionally. 2282 */ 2283 return 0; 2284 } 2285 2286 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2287 /* 2288 * Pad request for following read-modify-write cycle. 2289 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2290 * alignment only if there is no ZERO flag. 2291 */ 2292 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true, 2293 &pad, &padded, &flags); 2294 if (ret < 0) { 2295 return ret; 2296 } 2297 } 2298 2299 bdrv_inc_in_flight(bs); 2300 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2301 2302 if (flags & BDRV_REQ_ZERO_WRITE) { 2303 assert(!padded); 2304 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2305 goto out; 2306 } 2307 2308 if (padded) { 2309 /* 2310 * Request was unaligned to request_alignment and therefore 2311 * padded. We are going to do read-modify-write, and must 2312 * serialize the request to prevent interactions of the 2313 * widened region with other transactions. 2314 */ 2315 assert(!(flags & BDRV_REQ_NO_WAIT)); 2316 bdrv_make_request_serialising(&req, align); 2317 bdrv_padding_rmw_read(child, &req, &pad, false); 2318 } 2319 2320 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2321 qiov, qiov_offset, flags); 2322 2323 bdrv_padding_finalize(&pad); 2324 2325 out: 2326 tracked_request_end(&req); 2327 bdrv_dec_in_flight(bs); 2328 2329 return ret; 2330 } 2331 2332 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2333 int64_t bytes, BdrvRequestFlags flags) 2334 { 2335 IO_CODE(); 2336 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2337 assert_bdrv_graph_readable(); 2338 2339 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2340 flags &= ~BDRV_REQ_MAY_UNMAP; 2341 } 2342 2343 return bdrv_co_pwritev(child, offset, bytes, NULL, 2344 BDRV_REQ_ZERO_WRITE | flags); 2345 } 2346 2347 /* 2348 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2349 */ 2350 int bdrv_flush_all(void) 2351 { 2352 BdrvNextIterator it; 2353 BlockDriverState *bs = NULL; 2354 int result = 0; 2355 2356 GLOBAL_STATE_CODE(); 2357 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2358 2359 /* 2360 * bdrv queue is managed by record/replay, 2361 * creating new flush request for stopping 2362 * the VM may break the determinism 2363 */ 2364 if (replay_events_enabled()) { 2365 return result; 2366 } 2367 2368 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2369 AioContext *aio_context = bdrv_get_aio_context(bs); 2370 int ret; 2371 2372 aio_context_acquire(aio_context); 2373 ret = bdrv_flush(bs); 2374 if (ret < 0 && !result) { 2375 result = ret; 2376 } 2377 aio_context_release(aio_context); 2378 } 2379 2380 return result; 2381 } 2382 2383 /* 2384 * Returns the allocation status of the specified sectors. 2385 * Drivers not implementing the functionality are assumed to not support 2386 * backing files, hence all their sectors are reported as allocated. 2387 * 2388 * If 'want_zero' is true, the caller is querying for mapping 2389 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2390 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2391 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2392 * 2393 * If 'offset' is beyond the end of the disk image the return value is 2394 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2395 * 2396 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2397 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2398 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2399 * 2400 * 'pnum' is set to the number of bytes (including and immediately 2401 * following the specified offset) that are easily known to be in the 2402 * same allocated/unallocated state. Note that a second call starting 2403 * at the original offset plus returned pnum may have the same status. 2404 * The returned value is non-zero on success except at end-of-file. 2405 * 2406 * Returns negative errno on failure. Otherwise, if the 2407 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2408 * set to the host mapping and BDS corresponding to the guest offset. 2409 */ 2410 static int coroutine_fn GRAPH_RDLOCK 2411 bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, 2412 int64_t offset, int64_t bytes, 2413 int64_t *pnum, int64_t *map, BlockDriverState **file) 2414 { 2415 int64_t total_size; 2416 int64_t n; /* bytes */ 2417 int ret; 2418 int64_t local_map = 0; 2419 BlockDriverState *local_file = NULL; 2420 int64_t aligned_offset, aligned_bytes; 2421 uint32_t align; 2422 bool has_filtered_child; 2423 2424 assert(pnum); 2425 assert_bdrv_graph_readable(); 2426 *pnum = 0; 2427 total_size = bdrv_co_getlength(bs); 2428 if (total_size < 0) { 2429 ret = total_size; 2430 goto early_out; 2431 } 2432 2433 if (offset >= total_size) { 2434 ret = BDRV_BLOCK_EOF; 2435 goto early_out; 2436 } 2437 if (!bytes) { 2438 ret = 0; 2439 goto early_out; 2440 } 2441 2442 n = total_size - offset; 2443 if (n < bytes) { 2444 bytes = n; 2445 } 2446 2447 /* Must be non-NULL or bdrv_co_getlength() would have failed */ 2448 assert(bs->drv); 2449 has_filtered_child = bdrv_filter_child(bs); 2450 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2451 *pnum = bytes; 2452 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2453 if (offset + bytes == total_size) { 2454 ret |= BDRV_BLOCK_EOF; 2455 } 2456 if (bs->drv->protocol_name) { 2457 ret |= BDRV_BLOCK_OFFSET_VALID; 2458 local_map = offset; 2459 local_file = bs; 2460 } 2461 goto early_out; 2462 } 2463 2464 bdrv_inc_in_flight(bs); 2465 2466 /* Round out to request_alignment boundaries */ 2467 align = bs->bl.request_alignment; 2468 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2469 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2470 2471 if (bs->drv->bdrv_co_block_status) { 2472 /* 2473 * Use the block-status cache only for protocol nodes: Format 2474 * drivers are generally quick to inquire the status, but protocol 2475 * drivers often need to get information from outside of qemu, so 2476 * we do not have control over the actual implementation. There 2477 * have been cases where inquiring the status took an unreasonably 2478 * long time, and we can do nothing in qemu to fix it. 2479 * This is especially problematic for images with large data areas, 2480 * because finding the few holes in them and giving them special 2481 * treatment does not gain much performance. Therefore, we try to 2482 * cache the last-identified data region. 2483 * 2484 * Second, limiting ourselves to protocol nodes allows us to assume 2485 * the block status for data regions to be DATA | OFFSET_VALID, and 2486 * that the host offset is the same as the guest offset. 2487 * 2488 * Note that it is possible that external writers zero parts of 2489 * the cached regions without the cache being invalidated, and so 2490 * we may report zeroes as data. This is not catastrophic, 2491 * however, because reporting zeroes as data is fine. 2492 */ 2493 if (QLIST_EMPTY(&bs->children) && 2494 bdrv_bsc_is_data(bs, aligned_offset, pnum)) 2495 { 2496 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2497 local_file = bs; 2498 local_map = aligned_offset; 2499 } else { 2500 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2501 aligned_bytes, pnum, &local_map, 2502 &local_file); 2503 2504 /* 2505 * Note that checking QLIST_EMPTY(&bs->children) is also done when 2506 * the cache is queried above. Technically, we do not need to check 2507 * it here; the worst that can happen is that we fill the cache for 2508 * non-protocol nodes, and then it is never used. However, filling 2509 * the cache requires an RCU update, so double check here to avoid 2510 * such an update if possible. 2511 * 2512 * Check want_zero, because we only want to update the cache when we 2513 * have accurate information about what is zero and what is data. 2514 */ 2515 if (want_zero && 2516 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && 2517 QLIST_EMPTY(&bs->children)) 2518 { 2519 /* 2520 * When a protocol driver reports BLOCK_OFFSET_VALID, the 2521 * returned local_map value must be the same as the offset we 2522 * have passed (aligned_offset), and local_bs must be the node 2523 * itself. 2524 * Assert this, because we follow this rule when reading from 2525 * the cache (see the `local_file = bs` and 2526 * `local_map = aligned_offset` assignments above), and the 2527 * result the cache delivers must be the same as the driver 2528 * would deliver. 2529 */ 2530 assert(local_file == bs); 2531 assert(local_map == aligned_offset); 2532 bdrv_bsc_fill(bs, aligned_offset, *pnum); 2533 } 2534 } 2535 } else { 2536 /* Default code for filters */ 2537 2538 local_file = bdrv_filter_bs(bs); 2539 assert(local_file); 2540 2541 *pnum = aligned_bytes; 2542 local_map = aligned_offset; 2543 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2544 } 2545 if (ret < 0) { 2546 *pnum = 0; 2547 goto out; 2548 } 2549 2550 /* 2551 * The driver's result must be a non-zero multiple of request_alignment. 2552 * Clamp pnum and adjust map to original request. 2553 */ 2554 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2555 align > offset - aligned_offset); 2556 if (ret & BDRV_BLOCK_RECURSE) { 2557 assert(ret & BDRV_BLOCK_DATA); 2558 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2559 assert(!(ret & BDRV_BLOCK_ZERO)); 2560 } 2561 2562 *pnum -= offset - aligned_offset; 2563 if (*pnum > bytes) { 2564 *pnum = bytes; 2565 } 2566 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2567 local_map += offset - aligned_offset; 2568 } 2569 2570 if (ret & BDRV_BLOCK_RAW) { 2571 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2572 ret = bdrv_co_do_block_status(local_file, want_zero, local_map, 2573 *pnum, pnum, &local_map, &local_file); 2574 goto out; 2575 } 2576 2577 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2578 ret |= BDRV_BLOCK_ALLOCATED; 2579 } else if (bs->drv->supports_backing) { 2580 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2581 2582 if (!cow_bs) { 2583 ret |= BDRV_BLOCK_ZERO; 2584 } else if (want_zero) { 2585 int64_t size2 = bdrv_co_getlength(cow_bs); 2586 2587 if (size2 >= 0 && offset >= size2) { 2588 ret |= BDRV_BLOCK_ZERO; 2589 } 2590 } 2591 } 2592 2593 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2594 local_file && local_file != bs && 2595 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2596 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2597 int64_t file_pnum; 2598 int ret2; 2599 2600 ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map, 2601 *pnum, &file_pnum, NULL, NULL); 2602 if (ret2 >= 0) { 2603 /* Ignore errors. This is just providing extra information, it 2604 * is useful but not necessary. 2605 */ 2606 if (ret2 & BDRV_BLOCK_EOF && 2607 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2608 /* 2609 * It is valid for the format block driver to read 2610 * beyond the end of the underlying file's current 2611 * size; such areas read as zero. 2612 */ 2613 ret |= BDRV_BLOCK_ZERO; 2614 } else { 2615 /* Limit request to the range reported by the protocol driver */ 2616 *pnum = file_pnum; 2617 ret |= (ret2 & BDRV_BLOCK_ZERO); 2618 } 2619 } 2620 } 2621 2622 out: 2623 bdrv_dec_in_flight(bs); 2624 if (ret >= 0 && offset + *pnum == total_size) { 2625 ret |= BDRV_BLOCK_EOF; 2626 } 2627 early_out: 2628 if (file) { 2629 *file = local_file; 2630 } 2631 if (map) { 2632 *map = local_map; 2633 } 2634 return ret; 2635 } 2636 2637 int coroutine_fn 2638 bdrv_co_common_block_status_above(BlockDriverState *bs, 2639 BlockDriverState *base, 2640 bool include_base, 2641 bool want_zero, 2642 int64_t offset, 2643 int64_t bytes, 2644 int64_t *pnum, 2645 int64_t *map, 2646 BlockDriverState **file, 2647 int *depth) 2648 { 2649 int ret; 2650 BlockDriverState *p; 2651 int64_t eof = 0; 2652 int dummy; 2653 IO_CODE(); 2654 2655 assert(!include_base || base); /* Can't include NULL base */ 2656 assert_bdrv_graph_readable(); 2657 2658 if (!depth) { 2659 depth = &dummy; 2660 } 2661 *depth = 0; 2662 2663 if (!include_base && bs == base) { 2664 *pnum = bytes; 2665 return 0; 2666 } 2667 2668 ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum, 2669 map, file); 2670 ++*depth; 2671 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2672 return ret; 2673 } 2674 2675 if (ret & BDRV_BLOCK_EOF) { 2676 eof = offset + *pnum; 2677 } 2678 2679 assert(*pnum <= bytes); 2680 bytes = *pnum; 2681 2682 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2683 p = bdrv_filter_or_cow_bs(p)) 2684 { 2685 ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum, 2686 map, file); 2687 ++*depth; 2688 if (ret < 0) { 2689 return ret; 2690 } 2691 if (*pnum == 0) { 2692 /* 2693 * The top layer deferred to this layer, and because this layer is 2694 * short, any zeroes that we synthesize beyond EOF behave as if they 2695 * were allocated at this layer. 2696 * 2697 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2698 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2699 * below. 2700 */ 2701 assert(ret & BDRV_BLOCK_EOF); 2702 *pnum = bytes; 2703 if (file) { 2704 *file = p; 2705 } 2706 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2707 break; 2708 } 2709 if (ret & BDRV_BLOCK_ALLOCATED) { 2710 /* 2711 * We've found the node and the status, we must break. 2712 * 2713 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2714 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2715 * below. 2716 */ 2717 ret &= ~BDRV_BLOCK_EOF; 2718 break; 2719 } 2720 2721 if (p == base) { 2722 assert(include_base); 2723 break; 2724 } 2725 2726 /* 2727 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2728 * let's continue the diving. 2729 */ 2730 assert(*pnum <= bytes); 2731 bytes = *pnum; 2732 } 2733 2734 if (offset + *pnum == eof) { 2735 ret |= BDRV_BLOCK_EOF; 2736 } 2737 2738 return ret; 2739 } 2740 2741 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2742 BlockDriverState *base, 2743 int64_t offset, int64_t bytes, 2744 int64_t *pnum, int64_t *map, 2745 BlockDriverState **file) 2746 { 2747 IO_CODE(); 2748 return bdrv_co_common_block_status_above(bs, base, false, true, offset, 2749 bytes, pnum, map, file, NULL); 2750 } 2751 2752 int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset, 2753 int64_t bytes, int64_t *pnum, 2754 int64_t *map, BlockDriverState **file) 2755 { 2756 IO_CODE(); 2757 return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2758 offset, bytes, pnum, map, file); 2759 } 2760 2761 /* 2762 * Check @bs (and its backing chain) to see if the range defined 2763 * by @offset and @bytes is known to read as zeroes. 2764 * Return 1 if that is the case, 0 otherwise and -errno on error. 2765 * This test is meant to be fast rather than accurate so returning 0 2766 * does not guarantee non-zero data. 2767 */ 2768 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2769 int64_t bytes) 2770 { 2771 int ret; 2772 int64_t pnum = bytes; 2773 IO_CODE(); 2774 2775 if (!bytes) { 2776 return 1; 2777 } 2778 2779 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, 2780 bytes, &pnum, NULL, NULL, NULL); 2781 2782 if (ret < 0) { 2783 return ret; 2784 } 2785 2786 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2787 } 2788 2789 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, 2790 int64_t bytes, int64_t *pnum) 2791 { 2792 int ret; 2793 int64_t dummy; 2794 IO_CODE(); 2795 2796 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, 2797 bytes, pnum ? pnum : &dummy, NULL, 2798 NULL, NULL); 2799 if (ret < 0) { 2800 return ret; 2801 } 2802 return !!(ret & BDRV_BLOCK_ALLOCATED); 2803 } 2804 2805 /* 2806 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2807 * 2808 * Return a positive depth if (a prefix of) the given range is allocated 2809 * in any image between BASE and TOP (BASE is only included if include_base 2810 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2811 * BASE can be NULL to check if the given offset is allocated in any 2812 * image of the chain. Return 0 otherwise, or negative errno on 2813 * failure. 2814 * 2815 * 'pnum' is set to the number of bytes (including and immediately 2816 * following the specified offset) that are known to be in the same 2817 * allocated/unallocated state. Note that a subsequent call starting 2818 * at 'offset + *pnum' may return the same allocation status (in other 2819 * words, the result is not necessarily the maximum possible range); 2820 * but 'pnum' will only be 0 when end of file is reached. 2821 */ 2822 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs, 2823 BlockDriverState *base, 2824 bool include_base, int64_t offset, 2825 int64_t bytes, int64_t *pnum) 2826 { 2827 int depth; 2828 int ret; 2829 IO_CODE(); 2830 2831 ret = bdrv_co_common_block_status_above(bs, base, include_base, false, 2832 offset, bytes, pnum, NULL, NULL, 2833 &depth); 2834 if (ret < 0) { 2835 return ret; 2836 } 2837 2838 if (ret & BDRV_BLOCK_ALLOCATED) { 2839 return depth; 2840 } 2841 return 0; 2842 } 2843 2844 int coroutine_fn 2845 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2846 { 2847 BlockDriver *drv = bs->drv; 2848 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2849 int ret; 2850 IO_CODE(); 2851 assert_bdrv_graph_readable(); 2852 2853 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2854 if (ret < 0) { 2855 return ret; 2856 } 2857 2858 if (!drv) { 2859 return -ENOMEDIUM; 2860 } 2861 2862 bdrv_inc_in_flight(bs); 2863 2864 if (drv->bdrv_co_load_vmstate) { 2865 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); 2866 } else if (child_bs) { 2867 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2868 } else { 2869 ret = -ENOTSUP; 2870 } 2871 2872 bdrv_dec_in_flight(bs); 2873 2874 return ret; 2875 } 2876 2877 int coroutine_fn 2878 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2879 { 2880 BlockDriver *drv = bs->drv; 2881 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2882 int ret; 2883 IO_CODE(); 2884 assert_bdrv_graph_readable(); 2885 2886 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2887 if (ret < 0) { 2888 return ret; 2889 } 2890 2891 if (!drv) { 2892 return -ENOMEDIUM; 2893 } 2894 2895 bdrv_inc_in_flight(bs); 2896 2897 if (drv->bdrv_co_save_vmstate) { 2898 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); 2899 } else if (child_bs) { 2900 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2901 } else { 2902 ret = -ENOTSUP; 2903 } 2904 2905 bdrv_dec_in_flight(bs); 2906 2907 return ret; 2908 } 2909 2910 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2911 int64_t pos, int size) 2912 { 2913 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2914 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2915 IO_CODE(); 2916 2917 return ret < 0 ? ret : size; 2918 } 2919 2920 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2921 int64_t pos, int size) 2922 { 2923 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2924 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2925 IO_CODE(); 2926 2927 return ret < 0 ? ret : size; 2928 } 2929 2930 /**************************************************************/ 2931 /* async I/Os */ 2932 2933 /** 2934 * Synchronously cancels an acb. Must be called with the BQL held and the acb 2935 * must be processed with the BQL held too (IOThreads are not allowed). 2936 * 2937 * Use bdrv_aio_cancel_async() instead when possible. 2938 */ 2939 void bdrv_aio_cancel(BlockAIOCB *acb) 2940 { 2941 GLOBAL_STATE_CODE(); 2942 qemu_aio_ref(acb); 2943 bdrv_aio_cancel_async(acb); 2944 AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1); 2945 qemu_aio_unref(acb); 2946 } 2947 2948 /* Async version of aio cancel. The caller is not blocked if the acb implements 2949 * cancel_async, otherwise we do nothing and let the request normally complete. 2950 * In either case the completion callback must be called. */ 2951 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2952 { 2953 IO_CODE(); 2954 if (acb->aiocb_info->cancel_async) { 2955 acb->aiocb_info->cancel_async(acb); 2956 } 2957 } 2958 2959 /**************************************************************/ 2960 /* Coroutine block device emulation */ 2961 2962 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2963 { 2964 BdrvChild *primary_child = bdrv_primary_child(bs); 2965 BdrvChild *child; 2966 int current_gen; 2967 int ret = 0; 2968 IO_CODE(); 2969 2970 assert_bdrv_graph_readable(); 2971 bdrv_inc_in_flight(bs); 2972 2973 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || 2974 bdrv_is_sg(bs)) { 2975 goto early_exit; 2976 } 2977 2978 qemu_mutex_lock(&bs->reqs_lock); 2979 current_gen = qatomic_read(&bs->write_gen); 2980 2981 /* Wait until any previous flushes are completed */ 2982 while (bs->active_flush_req) { 2983 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2984 } 2985 2986 /* Flushes reach this point in nondecreasing current_gen order. */ 2987 bs->active_flush_req = true; 2988 qemu_mutex_unlock(&bs->reqs_lock); 2989 2990 /* Write back all layers by calling one driver function */ 2991 if (bs->drv->bdrv_co_flush) { 2992 ret = bs->drv->bdrv_co_flush(bs); 2993 goto out; 2994 } 2995 2996 /* Write back cached data to the OS even with cache=unsafe */ 2997 BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2998 if (bs->drv->bdrv_co_flush_to_os) { 2999 ret = bs->drv->bdrv_co_flush_to_os(bs); 3000 if (ret < 0) { 3001 goto out; 3002 } 3003 } 3004 3005 /* But don't actually force it to the disk with cache=unsafe */ 3006 if (bs->open_flags & BDRV_O_NO_FLUSH) { 3007 goto flush_children; 3008 } 3009 3010 /* Check if we really need to flush anything */ 3011 if (bs->flushed_gen == current_gen) { 3012 goto flush_children; 3013 } 3014 3015 BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 3016 if (!bs->drv) { 3017 /* bs->drv->bdrv_co_flush() might have ejected the BDS 3018 * (even in case of apparent success) */ 3019 ret = -ENOMEDIUM; 3020 goto out; 3021 } 3022 if (bs->drv->bdrv_co_flush_to_disk) { 3023 ret = bs->drv->bdrv_co_flush_to_disk(bs); 3024 } else if (bs->drv->bdrv_aio_flush) { 3025 BlockAIOCB *acb; 3026 CoroutineIOCompletion co = { 3027 .coroutine = qemu_coroutine_self(), 3028 }; 3029 3030 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 3031 if (acb == NULL) { 3032 ret = -EIO; 3033 } else { 3034 qemu_coroutine_yield(); 3035 ret = co.ret; 3036 } 3037 } else { 3038 /* 3039 * Some block drivers always operate in either writethrough or unsafe 3040 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 3041 * know how the server works (because the behaviour is hardcoded or 3042 * depends on server-side configuration), so we can't ensure that 3043 * everything is safe on disk. Returning an error doesn't work because 3044 * that would break guests even if the server operates in writethrough 3045 * mode. 3046 * 3047 * Let's hope the user knows what he's doing. 3048 */ 3049 ret = 0; 3050 } 3051 3052 if (ret < 0) { 3053 goto out; 3054 } 3055 3056 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 3057 * in the case of cache=unsafe, so there are no useless flushes. 3058 */ 3059 flush_children: 3060 ret = 0; 3061 QLIST_FOREACH(child, &bs->children, next) { 3062 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 3063 int this_child_ret = bdrv_co_flush(child->bs); 3064 if (!ret) { 3065 ret = this_child_ret; 3066 } 3067 } 3068 } 3069 3070 out: 3071 /* Notify any pending flushes that we have completed */ 3072 if (ret == 0) { 3073 bs->flushed_gen = current_gen; 3074 } 3075 3076 qemu_mutex_lock(&bs->reqs_lock); 3077 bs->active_flush_req = false; 3078 /* Return value is ignored - it's ok if wait queue is empty */ 3079 qemu_co_queue_next(&bs->flush_queue); 3080 qemu_mutex_unlock(&bs->reqs_lock); 3081 3082 early_exit: 3083 bdrv_dec_in_flight(bs); 3084 return ret; 3085 } 3086 3087 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 3088 int64_t bytes) 3089 { 3090 BdrvTrackedRequest req; 3091 int ret; 3092 int64_t max_pdiscard; 3093 int head, tail, align; 3094 BlockDriverState *bs = child->bs; 3095 IO_CODE(); 3096 assert_bdrv_graph_readable(); 3097 3098 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { 3099 return -ENOMEDIUM; 3100 } 3101 3102 if (bdrv_has_readonly_bitmaps(bs)) { 3103 return -EPERM; 3104 } 3105 3106 ret = bdrv_check_request(offset, bytes, NULL); 3107 if (ret < 0) { 3108 return ret; 3109 } 3110 3111 /* Do nothing if disabled. */ 3112 if (!(bs->open_flags & BDRV_O_UNMAP)) { 3113 return 0; 3114 } 3115 3116 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 3117 return 0; 3118 } 3119 3120 /* Invalidate the cached block-status data range if this discard overlaps */ 3121 bdrv_bsc_invalidate_range(bs, offset, bytes); 3122 3123 /* Discard is advisory, but some devices track and coalesce 3124 * unaligned requests, so we must pass everything down rather than 3125 * round here. Still, most devices will just silently ignore 3126 * unaligned requests (by returning -ENOTSUP), so we must fragment 3127 * the request accordingly. */ 3128 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3129 assert(align % bs->bl.request_alignment == 0); 3130 head = offset % align; 3131 tail = (offset + bytes) % align; 3132 3133 bdrv_inc_in_flight(bs); 3134 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3135 3136 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3137 if (ret < 0) { 3138 goto out; 3139 } 3140 3141 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), 3142 align); 3143 assert(max_pdiscard >= bs->bl.request_alignment); 3144 3145 while (bytes > 0) { 3146 int64_t num = bytes; 3147 3148 if (head) { 3149 /* Make small requests to get to alignment boundaries. */ 3150 num = MIN(bytes, align - head); 3151 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3152 num %= bs->bl.request_alignment; 3153 } 3154 head = (head + num) % align; 3155 assert(num < max_pdiscard); 3156 } else if (tail) { 3157 if (num > align) { 3158 /* Shorten the request to the last aligned cluster. */ 3159 num -= tail; 3160 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3161 tail > bs->bl.request_alignment) { 3162 tail %= bs->bl.request_alignment; 3163 num -= tail; 3164 } 3165 } 3166 /* limit request size */ 3167 if (num > max_pdiscard) { 3168 num = max_pdiscard; 3169 } 3170 3171 if (!bs->drv) { 3172 ret = -ENOMEDIUM; 3173 goto out; 3174 } 3175 if (bs->drv->bdrv_co_pdiscard) { 3176 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3177 } else { 3178 BlockAIOCB *acb; 3179 CoroutineIOCompletion co = { 3180 .coroutine = qemu_coroutine_self(), 3181 }; 3182 3183 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3184 bdrv_co_io_em_complete, &co); 3185 if (acb == NULL) { 3186 ret = -EIO; 3187 goto out; 3188 } else { 3189 qemu_coroutine_yield(); 3190 ret = co.ret; 3191 } 3192 } 3193 if (ret && ret != -ENOTSUP) { 3194 goto out; 3195 } 3196 3197 offset += num; 3198 bytes -= num; 3199 } 3200 ret = 0; 3201 out: 3202 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3203 tracked_request_end(&req); 3204 bdrv_dec_in_flight(bs); 3205 return ret; 3206 } 3207 3208 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3209 { 3210 BlockDriver *drv = bs->drv; 3211 CoroutineIOCompletion co = { 3212 .coroutine = qemu_coroutine_self(), 3213 }; 3214 BlockAIOCB *acb; 3215 IO_CODE(); 3216 assert_bdrv_graph_readable(); 3217 3218 bdrv_inc_in_flight(bs); 3219 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3220 co.ret = -ENOTSUP; 3221 goto out; 3222 } 3223 3224 if (drv->bdrv_co_ioctl) { 3225 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3226 } else { 3227 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3228 if (!acb) { 3229 co.ret = -ENOTSUP; 3230 goto out; 3231 } 3232 qemu_coroutine_yield(); 3233 } 3234 out: 3235 bdrv_dec_in_flight(bs); 3236 return co.ret; 3237 } 3238 3239 int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset, 3240 unsigned int *nr_zones, 3241 BlockZoneDescriptor *zones) 3242 { 3243 BlockDriver *drv = bs->drv; 3244 CoroutineIOCompletion co = { 3245 .coroutine = qemu_coroutine_self(), 3246 }; 3247 IO_CODE(); 3248 3249 bdrv_inc_in_flight(bs); 3250 if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) { 3251 co.ret = -ENOTSUP; 3252 goto out; 3253 } 3254 co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones); 3255 out: 3256 bdrv_dec_in_flight(bs); 3257 return co.ret; 3258 } 3259 3260 int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, 3261 int64_t offset, int64_t len) 3262 { 3263 BlockDriver *drv = bs->drv; 3264 CoroutineIOCompletion co = { 3265 .coroutine = qemu_coroutine_self(), 3266 }; 3267 IO_CODE(); 3268 3269 bdrv_inc_in_flight(bs); 3270 if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) { 3271 co.ret = -ENOTSUP; 3272 goto out; 3273 } 3274 co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len); 3275 out: 3276 bdrv_dec_in_flight(bs); 3277 return co.ret; 3278 } 3279 3280 int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset, 3281 QEMUIOVector *qiov, 3282 BdrvRequestFlags flags) 3283 { 3284 int ret; 3285 BlockDriver *drv = bs->drv; 3286 CoroutineIOCompletion co = { 3287 .coroutine = qemu_coroutine_self(), 3288 }; 3289 IO_CODE(); 3290 3291 ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL); 3292 if (ret < 0) { 3293 return ret; 3294 } 3295 3296 bdrv_inc_in_flight(bs); 3297 if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) { 3298 co.ret = -ENOTSUP; 3299 goto out; 3300 } 3301 co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags); 3302 out: 3303 bdrv_dec_in_flight(bs); 3304 return co.ret; 3305 } 3306 3307 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3308 { 3309 IO_CODE(); 3310 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3311 } 3312 3313 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3314 { 3315 IO_CODE(); 3316 return memset(qemu_blockalign(bs, size), 0, size); 3317 } 3318 3319 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3320 { 3321 size_t align = bdrv_opt_mem_align(bs); 3322 IO_CODE(); 3323 3324 /* Ensure that NULL is never returned on success */ 3325 assert(align > 0); 3326 if (size == 0) { 3327 size = align; 3328 } 3329 3330 return qemu_try_memalign(align, size); 3331 } 3332 3333 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3334 { 3335 void *mem = qemu_try_blockalign(bs, size); 3336 IO_CODE(); 3337 3338 if (mem) { 3339 memset(mem, 0, size); 3340 } 3341 3342 return mem; 3343 } 3344 3345 /* Helper that undoes bdrv_register_buf() when it fails partway through */ 3346 static void GRAPH_RDLOCK 3347 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size, 3348 BdrvChild *final_child) 3349 { 3350 BdrvChild *child; 3351 3352 GLOBAL_STATE_CODE(); 3353 assert_bdrv_graph_readable(); 3354 3355 QLIST_FOREACH(child, &bs->children, next) { 3356 if (child == final_child) { 3357 break; 3358 } 3359 3360 bdrv_unregister_buf(child->bs, host, size); 3361 } 3362 3363 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3364 bs->drv->bdrv_unregister_buf(bs, host, size); 3365 } 3366 } 3367 3368 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, 3369 Error **errp) 3370 { 3371 BdrvChild *child; 3372 3373 GLOBAL_STATE_CODE(); 3374 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3375 3376 if (bs->drv && bs->drv->bdrv_register_buf) { 3377 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { 3378 return false; 3379 } 3380 } 3381 QLIST_FOREACH(child, &bs->children, next) { 3382 if (!bdrv_register_buf(child->bs, host, size, errp)) { 3383 bdrv_register_buf_rollback(bs, host, size, child); 3384 return false; 3385 } 3386 } 3387 return true; 3388 } 3389 3390 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) 3391 { 3392 BdrvChild *child; 3393 3394 GLOBAL_STATE_CODE(); 3395 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3396 3397 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3398 bs->drv->bdrv_unregister_buf(bs, host, size); 3399 } 3400 QLIST_FOREACH(child, &bs->children, next) { 3401 bdrv_unregister_buf(child->bs, host, size); 3402 } 3403 } 3404 3405 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal( 3406 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3407 int64_t dst_offset, int64_t bytes, 3408 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3409 bool recurse_src) 3410 { 3411 BdrvTrackedRequest req; 3412 int ret; 3413 assert_bdrv_graph_readable(); 3414 3415 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3416 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3417 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3418 assert(!(read_flags & BDRV_REQ_NO_WAIT)); 3419 assert(!(write_flags & BDRV_REQ_NO_WAIT)); 3420 3421 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { 3422 return -ENOMEDIUM; 3423 } 3424 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3425 if (ret) { 3426 return ret; 3427 } 3428 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3429 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3430 } 3431 3432 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { 3433 return -ENOMEDIUM; 3434 } 3435 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3436 if (ret) { 3437 return ret; 3438 } 3439 3440 if (!src->bs->drv->bdrv_co_copy_range_from 3441 || !dst->bs->drv->bdrv_co_copy_range_to 3442 || src->bs->encrypted || dst->bs->encrypted) { 3443 return -ENOTSUP; 3444 } 3445 3446 if (recurse_src) { 3447 bdrv_inc_in_flight(src->bs); 3448 tracked_request_begin(&req, src->bs, src_offset, bytes, 3449 BDRV_TRACKED_READ); 3450 3451 /* BDRV_REQ_SERIALISING is only for write operation */ 3452 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3453 bdrv_wait_serialising_requests(&req); 3454 3455 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3456 src, src_offset, 3457 dst, dst_offset, 3458 bytes, 3459 read_flags, write_flags); 3460 3461 tracked_request_end(&req); 3462 bdrv_dec_in_flight(src->bs); 3463 } else { 3464 bdrv_inc_in_flight(dst->bs); 3465 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3466 BDRV_TRACKED_WRITE); 3467 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3468 write_flags); 3469 if (!ret) { 3470 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3471 src, src_offset, 3472 dst, dst_offset, 3473 bytes, 3474 read_flags, write_flags); 3475 } 3476 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3477 tracked_request_end(&req); 3478 bdrv_dec_in_flight(dst->bs); 3479 } 3480 3481 return ret; 3482 } 3483 3484 /* Copy range from @src to @dst. 3485 * 3486 * See the comment of bdrv_co_copy_range for the parameter and return value 3487 * semantics. */ 3488 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3489 BdrvChild *dst, int64_t dst_offset, 3490 int64_t bytes, 3491 BdrvRequestFlags read_flags, 3492 BdrvRequestFlags write_flags) 3493 { 3494 IO_CODE(); 3495 assert_bdrv_graph_readable(); 3496 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3497 read_flags, write_flags); 3498 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3499 bytes, read_flags, write_flags, true); 3500 } 3501 3502 /* Copy range from @src to @dst. 3503 * 3504 * See the comment of bdrv_co_copy_range for the parameter and return value 3505 * semantics. */ 3506 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3507 BdrvChild *dst, int64_t dst_offset, 3508 int64_t bytes, 3509 BdrvRequestFlags read_flags, 3510 BdrvRequestFlags write_flags) 3511 { 3512 IO_CODE(); 3513 assert_bdrv_graph_readable(); 3514 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3515 read_flags, write_flags); 3516 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3517 bytes, read_flags, write_flags, false); 3518 } 3519 3520 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3521 BdrvChild *dst, int64_t dst_offset, 3522 int64_t bytes, BdrvRequestFlags read_flags, 3523 BdrvRequestFlags write_flags) 3524 { 3525 IO_CODE(); 3526 assert_bdrv_graph_readable(); 3527 3528 return bdrv_co_copy_range_from(src, src_offset, 3529 dst, dst_offset, 3530 bytes, read_flags, write_flags); 3531 } 3532 3533 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3534 { 3535 BdrvChild *c; 3536 QLIST_FOREACH(c, &bs->parents, next_parent) { 3537 if (c->klass->resize) { 3538 c->klass->resize(c); 3539 } 3540 } 3541 } 3542 3543 /** 3544 * Truncate file to 'offset' bytes (needed only for file protocols) 3545 * 3546 * If 'exact' is true, the file must be resized to exactly the given 3547 * 'offset'. Otherwise, it is sufficient for the node to be at least 3548 * 'offset' bytes in length. 3549 */ 3550 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3551 PreallocMode prealloc, BdrvRequestFlags flags, 3552 Error **errp) 3553 { 3554 BlockDriverState *bs = child->bs; 3555 BdrvChild *filtered, *backing; 3556 BlockDriver *drv = bs->drv; 3557 BdrvTrackedRequest req; 3558 int64_t old_size, new_bytes; 3559 int ret; 3560 IO_CODE(); 3561 assert_bdrv_graph_readable(); 3562 3563 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3564 if (!drv) { 3565 error_setg(errp, "No medium inserted"); 3566 return -ENOMEDIUM; 3567 } 3568 if (offset < 0) { 3569 error_setg(errp, "Image size cannot be negative"); 3570 return -EINVAL; 3571 } 3572 3573 ret = bdrv_check_request(offset, 0, errp); 3574 if (ret < 0) { 3575 return ret; 3576 } 3577 3578 old_size = bdrv_co_getlength(bs); 3579 if (old_size < 0) { 3580 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3581 return old_size; 3582 } 3583 3584 if (bdrv_is_read_only(bs)) { 3585 error_setg(errp, "Image is read-only"); 3586 return -EACCES; 3587 } 3588 3589 if (offset > old_size) { 3590 new_bytes = offset - old_size; 3591 } else { 3592 new_bytes = 0; 3593 } 3594 3595 bdrv_inc_in_flight(bs); 3596 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3597 BDRV_TRACKED_TRUNCATE); 3598 3599 /* If we are growing the image and potentially using preallocation for the 3600 * new area, we need to make sure that no write requests are made to it 3601 * concurrently or they might be overwritten by preallocation. */ 3602 if (new_bytes) { 3603 bdrv_make_request_serialising(&req, 1); 3604 } 3605 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3606 0); 3607 if (ret < 0) { 3608 error_setg_errno(errp, -ret, 3609 "Failed to prepare request for truncation"); 3610 goto out; 3611 } 3612 3613 filtered = bdrv_filter_child(bs); 3614 backing = bdrv_cow_child(bs); 3615 3616 /* 3617 * If the image has a backing file that is large enough that it would 3618 * provide data for the new area, we cannot leave it unallocated because 3619 * then the backing file content would become visible. Instead, zero-fill 3620 * the new area. 3621 * 3622 * Note that if the image has a backing file, but was opened without the 3623 * backing file, taking care of keeping things consistent with that backing 3624 * file is the user's responsibility. 3625 */ 3626 if (new_bytes && backing) { 3627 int64_t backing_len; 3628 3629 backing_len = bdrv_co_getlength(backing->bs); 3630 if (backing_len < 0) { 3631 ret = backing_len; 3632 error_setg_errno(errp, -ret, "Could not get backing file size"); 3633 goto out; 3634 } 3635 3636 if (backing_len > old_size) { 3637 flags |= BDRV_REQ_ZERO_WRITE; 3638 } 3639 } 3640 3641 if (drv->bdrv_co_truncate) { 3642 if (flags & ~bs->supported_truncate_flags) { 3643 error_setg(errp, "Block driver does not support requested flags"); 3644 ret = -ENOTSUP; 3645 goto out; 3646 } 3647 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3648 } else if (filtered) { 3649 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3650 } else { 3651 error_setg(errp, "Image format driver does not support resize"); 3652 ret = -ENOTSUP; 3653 goto out; 3654 } 3655 if (ret < 0) { 3656 goto out; 3657 } 3658 3659 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3660 if (ret < 0) { 3661 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3662 } else { 3663 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3664 } 3665 /* 3666 * It's possible that truncation succeeded but bdrv_refresh_total_sectors 3667 * failed, but the latter doesn't affect how we should finish the request. 3668 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. 3669 */ 3670 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3671 3672 out: 3673 tracked_request_end(&req); 3674 bdrv_dec_in_flight(bs); 3675 3676 return ret; 3677 } 3678 3679 void bdrv_cancel_in_flight(BlockDriverState *bs) 3680 { 3681 GLOBAL_STATE_CODE(); 3682 if (!bs || !bs->drv) { 3683 return; 3684 } 3685 3686 if (bs->drv->bdrv_cancel_in_flight) { 3687 bs->drv->bdrv_cancel_in_flight(bs); 3688 } 3689 } 3690 3691 int coroutine_fn 3692 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, 3693 QEMUIOVector *qiov, size_t qiov_offset) 3694 { 3695 BlockDriverState *bs = child->bs; 3696 BlockDriver *drv = bs->drv; 3697 int ret; 3698 IO_CODE(); 3699 assert_bdrv_graph_readable(); 3700 3701 if (!drv) { 3702 return -ENOMEDIUM; 3703 } 3704 3705 if (!drv->bdrv_co_preadv_snapshot) { 3706 return -ENOTSUP; 3707 } 3708 3709 bdrv_inc_in_flight(bs); 3710 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); 3711 bdrv_dec_in_flight(bs); 3712 3713 return ret; 3714 } 3715 3716 int coroutine_fn 3717 bdrv_co_snapshot_block_status(BlockDriverState *bs, 3718 bool want_zero, int64_t offset, int64_t bytes, 3719 int64_t *pnum, int64_t *map, 3720 BlockDriverState **file) 3721 { 3722 BlockDriver *drv = bs->drv; 3723 int ret; 3724 IO_CODE(); 3725 assert_bdrv_graph_readable(); 3726 3727 if (!drv) { 3728 return -ENOMEDIUM; 3729 } 3730 3731 if (!drv->bdrv_co_snapshot_block_status) { 3732 return -ENOTSUP; 3733 } 3734 3735 bdrv_inc_in_flight(bs); 3736 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, 3737 pnum, map, file); 3738 bdrv_dec_in_flight(bs); 3739 3740 return ret; 3741 } 3742 3743 int coroutine_fn 3744 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) 3745 { 3746 BlockDriver *drv = bs->drv; 3747 int ret; 3748 IO_CODE(); 3749 assert_bdrv_graph_readable(); 3750 3751 if (!drv) { 3752 return -ENOMEDIUM; 3753 } 3754 3755 if (!drv->bdrv_co_pdiscard_snapshot) { 3756 return -ENOTSUP; 3757 } 3758 3759 bdrv_inc_in_flight(bs); 3760 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); 3761 bdrv_dec_in_flight(bs); 3762 3763 return ret; 3764 } 3765