1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "trace.h" 26 #include "block/blockjob.h" 27 #include "block/block_int.h" 28 #include "block/throttle-groups.h" 29 #include "qemu/error-report.h" 30 31 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 32 33 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 34 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 35 BlockCompletionFunc *cb, void *opaque); 36 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 37 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 38 BlockCompletionFunc *cb, void *opaque); 39 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 40 int64_t sector_num, int nb_sectors, 41 QEMUIOVector *iov); 42 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 43 int64_t sector_num, int nb_sectors, 44 QEMUIOVector *iov); 45 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 46 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 47 BdrvRequestFlags flags); 48 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 49 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 50 BdrvRequestFlags flags); 51 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 52 int64_t sector_num, 53 QEMUIOVector *qiov, 54 int nb_sectors, 55 BdrvRequestFlags flags, 56 BlockCompletionFunc *cb, 57 void *opaque, 58 bool is_write); 59 static void coroutine_fn bdrv_co_do_rw(void *opaque); 60 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 61 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 62 63 /* throttling disk I/O limits */ 64 void bdrv_set_io_limits(BlockDriverState *bs, 65 ThrottleConfig *cfg) 66 { 67 int i; 68 69 throttle_group_config(bs, cfg); 70 71 for (i = 0; i < 2; i++) { 72 qemu_co_enter_next(&bs->throttled_reqs[i]); 73 } 74 } 75 76 /* this function drain all the throttled IOs */ 77 static bool bdrv_start_throttled_reqs(BlockDriverState *bs) 78 { 79 bool drained = false; 80 bool enabled = bs->io_limits_enabled; 81 int i; 82 83 bs->io_limits_enabled = false; 84 85 for (i = 0; i < 2; i++) { 86 while (qemu_co_enter_next(&bs->throttled_reqs[i])) { 87 drained = true; 88 } 89 } 90 91 bs->io_limits_enabled = enabled; 92 93 return drained; 94 } 95 96 void bdrv_io_limits_disable(BlockDriverState *bs) 97 { 98 bs->io_limits_enabled = false; 99 bdrv_start_throttled_reqs(bs); 100 throttle_group_unregister_bs(bs); 101 } 102 103 /* should be called before bdrv_set_io_limits if a limit is set */ 104 void bdrv_io_limits_enable(BlockDriverState *bs, const char *group) 105 { 106 assert(!bs->io_limits_enabled); 107 throttle_group_register_bs(bs, group); 108 bs->io_limits_enabled = true; 109 } 110 111 void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group) 112 { 113 /* this bs is not part of any group */ 114 if (!bs->throttle_state) { 115 return; 116 } 117 118 /* this bs is a part of the same group than the one we want */ 119 if (!g_strcmp0(throttle_group_get_name(bs), group)) { 120 return; 121 } 122 123 /* need to change the group this bs belong to */ 124 bdrv_io_limits_disable(bs); 125 bdrv_io_limits_enable(bs, group); 126 } 127 128 void bdrv_setup_io_funcs(BlockDriver *bdrv) 129 { 130 /* Block drivers without coroutine functions need emulation */ 131 if (!bdrv->bdrv_co_readv) { 132 bdrv->bdrv_co_readv = bdrv_co_readv_em; 133 bdrv->bdrv_co_writev = bdrv_co_writev_em; 134 135 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 136 * the block driver lacks aio we need to emulate that too. 137 */ 138 if (!bdrv->bdrv_aio_readv) { 139 /* add AIO emulation layer */ 140 bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 141 bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 142 } 143 } 144 } 145 146 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 147 { 148 BlockDriver *drv = bs->drv; 149 Error *local_err = NULL; 150 151 memset(&bs->bl, 0, sizeof(bs->bl)); 152 153 if (!drv) { 154 return; 155 } 156 157 /* Take some limits from the children as a default */ 158 if (bs->file) { 159 bdrv_refresh_limits(bs->file, &local_err); 160 if (local_err) { 161 error_propagate(errp, local_err); 162 return; 163 } 164 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; 165 bs->bl.max_transfer_length = bs->file->bl.max_transfer_length; 166 bs->bl.min_mem_alignment = bs->file->bl.min_mem_alignment; 167 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; 168 } else { 169 bs->bl.min_mem_alignment = 512; 170 bs->bl.opt_mem_alignment = getpagesize(); 171 } 172 173 if (bs->backing_hd) { 174 bdrv_refresh_limits(bs->backing_hd, &local_err); 175 if (local_err) { 176 error_propagate(errp, local_err); 177 return; 178 } 179 bs->bl.opt_transfer_length = 180 MAX(bs->bl.opt_transfer_length, 181 bs->backing_hd->bl.opt_transfer_length); 182 bs->bl.max_transfer_length = 183 MIN_NON_ZERO(bs->bl.max_transfer_length, 184 bs->backing_hd->bl.max_transfer_length); 185 bs->bl.opt_mem_alignment = 186 MAX(bs->bl.opt_mem_alignment, 187 bs->backing_hd->bl.opt_mem_alignment); 188 bs->bl.min_mem_alignment = 189 MAX(bs->bl.min_mem_alignment, 190 bs->backing_hd->bl.min_mem_alignment); 191 } 192 193 /* Then let the driver override it */ 194 if (drv->bdrv_refresh_limits) { 195 drv->bdrv_refresh_limits(bs, errp); 196 } 197 } 198 199 /** 200 * The copy-on-read flag is actually a reference count so multiple users may 201 * use the feature without worrying about clobbering its previous state. 202 * Copy-on-read stays enabled until all users have called to disable it. 203 */ 204 void bdrv_enable_copy_on_read(BlockDriverState *bs) 205 { 206 bs->copy_on_read++; 207 } 208 209 void bdrv_disable_copy_on_read(BlockDriverState *bs) 210 { 211 assert(bs->copy_on_read > 0); 212 bs->copy_on_read--; 213 } 214 215 /* Check if any requests are in-flight (including throttled requests) */ 216 static bool bdrv_requests_pending(BlockDriverState *bs) 217 { 218 if (!QLIST_EMPTY(&bs->tracked_requests)) { 219 return true; 220 } 221 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { 222 return true; 223 } 224 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { 225 return true; 226 } 227 if (bs->file && bdrv_requests_pending(bs->file)) { 228 return true; 229 } 230 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { 231 return true; 232 } 233 return false; 234 } 235 236 static bool bdrv_drain_one(BlockDriverState *bs) 237 { 238 bool bs_busy; 239 240 bdrv_flush_io_queue(bs); 241 bdrv_start_throttled_reqs(bs); 242 bs_busy = bdrv_requests_pending(bs); 243 bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy); 244 return bs_busy; 245 } 246 247 /* 248 * Wait for pending requests to complete on a single BlockDriverState subtree 249 * 250 * See the warning in bdrv_drain_all(). This function can only be called if 251 * you are sure nothing can generate I/O because you have op blockers 252 * installed. 253 * 254 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 255 * AioContext. 256 */ 257 void bdrv_drain(BlockDriverState *bs) 258 { 259 while (bdrv_drain_one(bs)) { 260 /* Keep iterating */ 261 } 262 } 263 264 /* 265 * Wait for pending requests to complete across all BlockDriverStates 266 * 267 * This function does not flush data to disk, use bdrv_flush_all() for that 268 * after calling this function. 269 * 270 * Note that completion of an asynchronous I/O operation can trigger any 271 * number of other I/O operations on other devices---for example a coroutine 272 * can be arbitrarily complex and a constant flow of I/O can come until the 273 * coroutine is complete. Because of this, it is not possible to have a 274 * function to drain a single device's I/O queue. 275 */ 276 void bdrv_drain_all(void) 277 { 278 /* Always run first iteration so any pending completion BHs run */ 279 bool busy = true; 280 BlockDriverState *bs = NULL; 281 282 while ((bs = bdrv_next(bs))) { 283 AioContext *aio_context = bdrv_get_aio_context(bs); 284 285 aio_context_acquire(aio_context); 286 if (bs->job) { 287 block_job_pause(bs->job); 288 } 289 aio_context_release(aio_context); 290 } 291 292 while (busy) { 293 busy = false; 294 bs = NULL; 295 296 while ((bs = bdrv_next(bs))) { 297 AioContext *aio_context = bdrv_get_aio_context(bs); 298 299 aio_context_acquire(aio_context); 300 busy |= bdrv_drain_one(bs); 301 aio_context_release(aio_context); 302 } 303 } 304 305 bs = NULL; 306 while ((bs = bdrv_next(bs))) { 307 AioContext *aio_context = bdrv_get_aio_context(bs); 308 309 aio_context_acquire(aio_context); 310 if (bs->job) { 311 block_job_resume(bs->job); 312 } 313 aio_context_release(aio_context); 314 } 315 } 316 317 /** 318 * Remove an active request from the tracked requests list 319 * 320 * This function should be called when a tracked request is completing. 321 */ 322 static void tracked_request_end(BdrvTrackedRequest *req) 323 { 324 if (req->serialising) { 325 req->bs->serialising_in_flight--; 326 } 327 328 QLIST_REMOVE(req, list); 329 qemu_co_queue_restart_all(&req->wait_queue); 330 } 331 332 /** 333 * Add an active request to the tracked requests list 334 */ 335 static void tracked_request_begin(BdrvTrackedRequest *req, 336 BlockDriverState *bs, 337 int64_t offset, 338 unsigned int bytes, bool is_write) 339 { 340 *req = (BdrvTrackedRequest){ 341 .bs = bs, 342 .offset = offset, 343 .bytes = bytes, 344 .is_write = is_write, 345 .co = qemu_coroutine_self(), 346 .serialising = false, 347 .overlap_offset = offset, 348 .overlap_bytes = bytes, 349 }; 350 351 qemu_co_queue_init(&req->wait_queue); 352 353 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 354 } 355 356 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 357 { 358 int64_t overlap_offset = req->offset & ~(align - 1); 359 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 360 - overlap_offset; 361 362 if (!req->serialising) { 363 req->bs->serialising_in_flight++; 364 req->serialising = true; 365 } 366 367 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 368 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 369 } 370 371 /** 372 * Round a region to cluster boundaries 373 */ 374 void bdrv_round_to_clusters(BlockDriverState *bs, 375 int64_t sector_num, int nb_sectors, 376 int64_t *cluster_sector_num, 377 int *cluster_nb_sectors) 378 { 379 BlockDriverInfo bdi; 380 381 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 382 *cluster_sector_num = sector_num; 383 *cluster_nb_sectors = nb_sectors; 384 } else { 385 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 386 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 387 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 388 nb_sectors, c); 389 } 390 } 391 392 static int bdrv_get_cluster_size(BlockDriverState *bs) 393 { 394 BlockDriverInfo bdi; 395 int ret; 396 397 ret = bdrv_get_info(bs, &bdi); 398 if (ret < 0 || bdi.cluster_size == 0) { 399 return bs->request_alignment; 400 } else { 401 return bdi.cluster_size; 402 } 403 } 404 405 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 406 int64_t offset, unsigned int bytes) 407 { 408 /* aaaa bbbb */ 409 if (offset >= req->overlap_offset + req->overlap_bytes) { 410 return false; 411 } 412 /* bbbb aaaa */ 413 if (req->overlap_offset >= offset + bytes) { 414 return false; 415 } 416 return true; 417 } 418 419 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 420 { 421 BlockDriverState *bs = self->bs; 422 BdrvTrackedRequest *req; 423 bool retry; 424 bool waited = false; 425 426 if (!bs->serialising_in_flight) { 427 return false; 428 } 429 430 do { 431 retry = false; 432 QLIST_FOREACH(req, &bs->tracked_requests, list) { 433 if (req == self || (!req->serialising && !self->serialising)) { 434 continue; 435 } 436 if (tracked_request_overlaps(req, self->overlap_offset, 437 self->overlap_bytes)) 438 { 439 /* Hitting this means there was a reentrant request, for 440 * example, a block driver issuing nested requests. This must 441 * never happen since it means deadlock. 442 */ 443 assert(qemu_coroutine_self() != req->co); 444 445 /* If the request is already (indirectly) waiting for us, or 446 * will wait for us as soon as it wakes up, then just go on 447 * (instead of producing a deadlock in the former case). */ 448 if (!req->waiting_for) { 449 self->waiting_for = req; 450 qemu_co_queue_wait(&req->wait_queue); 451 self->waiting_for = NULL; 452 retry = true; 453 waited = true; 454 break; 455 } 456 } 457 } 458 } while (retry); 459 460 return waited; 461 } 462 463 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 464 size_t size) 465 { 466 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 467 return -EIO; 468 } 469 470 if (!bdrv_is_inserted(bs)) { 471 return -ENOMEDIUM; 472 } 473 474 if (offset < 0) { 475 return -EIO; 476 } 477 478 return 0; 479 } 480 481 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 482 int nb_sectors) 483 { 484 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 485 return -EIO; 486 } 487 488 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 489 nb_sectors * BDRV_SECTOR_SIZE); 490 } 491 492 typedef struct RwCo { 493 BlockDriverState *bs; 494 int64_t offset; 495 QEMUIOVector *qiov; 496 bool is_write; 497 int ret; 498 BdrvRequestFlags flags; 499 } RwCo; 500 501 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 502 { 503 RwCo *rwco = opaque; 504 505 if (!rwco->is_write) { 506 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, 507 rwco->qiov->size, rwco->qiov, 508 rwco->flags); 509 } else { 510 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, 511 rwco->qiov->size, rwco->qiov, 512 rwco->flags); 513 } 514 } 515 516 /* 517 * Process a vectored synchronous request using coroutines 518 */ 519 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 520 QEMUIOVector *qiov, bool is_write, 521 BdrvRequestFlags flags) 522 { 523 Coroutine *co; 524 RwCo rwco = { 525 .bs = bs, 526 .offset = offset, 527 .qiov = qiov, 528 .is_write = is_write, 529 .ret = NOT_DONE, 530 .flags = flags, 531 }; 532 533 /** 534 * In sync call context, when the vcpu is blocked, this throttling timer 535 * will not fire; so the I/O throttling function has to be disabled here 536 * if it has been enabled. 537 */ 538 if (bs->io_limits_enabled) { 539 fprintf(stderr, "Disabling I/O throttling on '%s' due " 540 "to synchronous I/O.\n", bdrv_get_device_name(bs)); 541 bdrv_io_limits_disable(bs); 542 } 543 544 if (qemu_in_coroutine()) { 545 /* Fast-path if already in coroutine context */ 546 bdrv_rw_co_entry(&rwco); 547 } else { 548 AioContext *aio_context = bdrv_get_aio_context(bs); 549 550 co = qemu_coroutine_create(bdrv_rw_co_entry); 551 qemu_coroutine_enter(co, &rwco); 552 while (rwco.ret == NOT_DONE) { 553 aio_poll(aio_context, true); 554 } 555 } 556 return rwco.ret; 557 } 558 559 /* 560 * Process a synchronous request using coroutines 561 */ 562 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 563 int nb_sectors, bool is_write, BdrvRequestFlags flags) 564 { 565 QEMUIOVector qiov; 566 struct iovec iov = { 567 .iov_base = (void *)buf, 568 .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 569 }; 570 571 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 572 return -EINVAL; 573 } 574 575 qemu_iovec_init_external(&qiov, &iov, 1); 576 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 577 &qiov, is_write, flags); 578 } 579 580 /* return < 0 if error. See bdrv_write() for the return codes */ 581 int bdrv_read(BlockDriverState *bs, int64_t sector_num, 582 uint8_t *buf, int nb_sectors) 583 { 584 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 585 } 586 587 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ 588 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, 589 uint8_t *buf, int nb_sectors) 590 { 591 bool enabled; 592 int ret; 593 594 enabled = bs->io_limits_enabled; 595 bs->io_limits_enabled = false; 596 ret = bdrv_read(bs, sector_num, buf, nb_sectors); 597 bs->io_limits_enabled = enabled; 598 return ret; 599 } 600 601 /* Return < 0 if error. Important errors are: 602 -EIO generic I/O error (may happen for all errors) 603 -ENOMEDIUM No media inserted. 604 -EINVAL Invalid sector number or nb_sectors 605 -EACCES Trying to write a read-only device 606 */ 607 int bdrv_write(BlockDriverState *bs, int64_t sector_num, 608 const uint8_t *buf, int nb_sectors) 609 { 610 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 611 } 612 613 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, 614 int nb_sectors, BdrvRequestFlags flags) 615 { 616 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 617 BDRV_REQ_ZERO_WRITE | flags); 618 } 619 620 /* 621 * Completely zero out a block device with the help of bdrv_write_zeroes. 622 * The operation is sped up by checking the block status and only writing 623 * zeroes to the device if they currently do not return zeroes. Optional 624 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). 625 * 626 * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 627 */ 628 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 629 { 630 int64_t target_sectors, ret, nb_sectors, sector_num = 0; 631 int n; 632 633 target_sectors = bdrv_nb_sectors(bs); 634 if (target_sectors < 0) { 635 return target_sectors; 636 } 637 638 for (;;) { 639 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 640 if (nb_sectors <= 0) { 641 return 0; 642 } 643 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); 644 if (ret < 0) { 645 error_report("error getting block status at sector %" PRId64 ": %s", 646 sector_num, strerror(-ret)); 647 return ret; 648 } 649 if (ret & BDRV_BLOCK_ZERO) { 650 sector_num += n; 651 continue; 652 } 653 ret = bdrv_write_zeroes(bs, sector_num, n, flags); 654 if (ret < 0) { 655 error_report("error writing zeroes at sector %" PRId64 ": %s", 656 sector_num, strerror(-ret)); 657 return ret; 658 } 659 sector_num += n; 660 } 661 } 662 663 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 664 { 665 QEMUIOVector qiov; 666 struct iovec iov = { 667 .iov_base = (void *)buf, 668 .iov_len = bytes, 669 }; 670 int ret; 671 672 if (bytes < 0) { 673 return -EINVAL; 674 } 675 676 qemu_iovec_init_external(&qiov, &iov, 1); 677 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); 678 if (ret < 0) { 679 return ret; 680 } 681 682 return bytes; 683 } 684 685 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 686 { 687 int ret; 688 689 ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 690 if (ret < 0) { 691 return ret; 692 } 693 694 return qiov->size; 695 } 696 697 int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 698 const void *buf, int bytes) 699 { 700 QEMUIOVector qiov; 701 struct iovec iov = { 702 .iov_base = (void *) buf, 703 .iov_len = bytes, 704 }; 705 706 if (bytes < 0) { 707 return -EINVAL; 708 } 709 710 qemu_iovec_init_external(&qiov, &iov, 1); 711 return bdrv_pwritev(bs, offset, &qiov); 712 } 713 714 /* 715 * Writes to the file and ensures that no writes are reordered across this 716 * request (acts as a barrier) 717 * 718 * Returns 0 on success, -errno in error cases. 719 */ 720 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 721 const void *buf, int count) 722 { 723 int ret; 724 725 ret = bdrv_pwrite(bs, offset, buf, count); 726 if (ret < 0) { 727 return ret; 728 } 729 730 /* No flush needed for cache modes that already do it */ 731 if (bs->enable_write_cache) { 732 bdrv_flush(bs); 733 } 734 735 return 0; 736 } 737 738 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 739 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 740 { 741 /* Perform I/O through a temporary buffer so that users who scribble over 742 * their read buffer while the operation is in progress do not end up 743 * modifying the image file. This is critical for zero-copy guest I/O 744 * where anything might happen inside guest memory. 745 */ 746 void *bounce_buffer; 747 748 BlockDriver *drv = bs->drv; 749 struct iovec iov; 750 QEMUIOVector bounce_qiov; 751 int64_t cluster_sector_num; 752 int cluster_nb_sectors; 753 size_t skip_bytes; 754 int ret; 755 756 /* Cover entire cluster so no additional backing file I/O is required when 757 * allocating cluster in the image file. 758 */ 759 bdrv_round_to_clusters(bs, sector_num, nb_sectors, 760 &cluster_sector_num, &cluster_nb_sectors); 761 762 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 763 cluster_sector_num, cluster_nb_sectors); 764 765 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 766 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 767 if (bounce_buffer == NULL) { 768 ret = -ENOMEM; 769 goto err; 770 } 771 772 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 773 774 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 775 &bounce_qiov); 776 if (ret < 0) { 777 goto err; 778 } 779 780 if (drv->bdrv_co_write_zeroes && 781 buffer_is_zero(bounce_buffer, iov.iov_len)) { 782 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 783 cluster_nb_sectors, 0); 784 } else { 785 /* This does not change the data on the disk, it is not necessary 786 * to flush even in cache=writethrough mode. 787 */ 788 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 789 &bounce_qiov); 790 } 791 792 if (ret < 0) { 793 /* It might be okay to ignore write errors for guest requests. If this 794 * is a deliberate copy-on-read then we don't want to ignore the error. 795 * Simply report it in all cases. 796 */ 797 goto err; 798 } 799 800 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 801 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 802 nb_sectors * BDRV_SECTOR_SIZE); 803 804 err: 805 qemu_vfree(bounce_buffer); 806 return ret; 807 } 808 809 /* 810 * Forwards an already correctly aligned request to the BlockDriver. This 811 * handles copy on read and zeroing after EOF; any other features must be 812 * implemented by the caller. 813 */ 814 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 815 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 816 int64_t align, QEMUIOVector *qiov, int flags) 817 { 818 BlockDriver *drv = bs->drv; 819 int ret; 820 821 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 822 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 823 824 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 825 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 826 assert(!qiov || bytes == qiov->size); 827 828 /* Handle Copy on Read and associated serialisation */ 829 if (flags & BDRV_REQ_COPY_ON_READ) { 830 /* If we touch the same cluster it counts as an overlap. This 831 * guarantees that allocating writes will be serialized and not race 832 * with each other for the same cluster. For example, in copy-on-read 833 * it ensures that the CoR read and write operations are atomic and 834 * guest writes cannot interleave between them. */ 835 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 836 } 837 838 wait_serialising_requests(req); 839 840 if (flags & BDRV_REQ_COPY_ON_READ) { 841 int pnum; 842 843 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 844 if (ret < 0) { 845 goto out; 846 } 847 848 if (!ret || pnum != nb_sectors) { 849 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 850 goto out; 851 } 852 } 853 854 /* Forward the request to the BlockDriver */ 855 if (!bs->zero_beyond_eof) { 856 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 857 } else { 858 /* Read zeros after EOF */ 859 int64_t total_sectors, max_nb_sectors; 860 861 total_sectors = bdrv_nb_sectors(bs); 862 if (total_sectors < 0) { 863 ret = total_sectors; 864 goto out; 865 } 866 867 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), 868 align >> BDRV_SECTOR_BITS); 869 if (nb_sectors < max_nb_sectors) { 870 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 871 } else if (max_nb_sectors > 0) { 872 QEMUIOVector local_qiov; 873 874 qemu_iovec_init(&local_qiov, qiov->niov); 875 qemu_iovec_concat(&local_qiov, qiov, 0, 876 max_nb_sectors * BDRV_SECTOR_SIZE); 877 878 ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors, 879 &local_qiov); 880 881 qemu_iovec_destroy(&local_qiov); 882 } else { 883 ret = 0; 884 } 885 886 /* Reading beyond end of file is supposed to produce zeroes */ 887 if (ret == 0 && total_sectors < sector_num + nb_sectors) { 888 uint64_t offset = MAX(0, total_sectors - sector_num); 889 uint64_t bytes = (sector_num + nb_sectors - offset) * 890 BDRV_SECTOR_SIZE; 891 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 892 } 893 } 894 895 out: 896 return ret; 897 } 898 899 /* 900 * Handle a read request in coroutine context 901 */ 902 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 903 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 904 BdrvRequestFlags flags) 905 { 906 BlockDriver *drv = bs->drv; 907 BdrvTrackedRequest req; 908 909 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 910 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 911 uint8_t *head_buf = NULL; 912 uint8_t *tail_buf = NULL; 913 QEMUIOVector local_qiov; 914 bool use_local_qiov = false; 915 int ret; 916 917 if (!drv) { 918 return -ENOMEDIUM; 919 } 920 921 ret = bdrv_check_byte_request(bs, offset, bytes); 922 if (ret < 0) { 923 return ret; 924 } 925 926 if (bs->copy_on_read) { 927 flags |= BDRV_REQ_COPY_ON_READ; 928 } 929 930 /* throttling disk I/O */ 931 if (bs->io_limits_enabled) { 932 throttle_group_co_io_limits_intercept(bs, bytes, false); 933 } 934 935 /* Align read if necessary by padding qiov */ 936 if (offset & (align - 1)) { 937 head_buf = qemu_blockalign(bs, align); 938 qemu_iovec_init(&local_qiov, qiov->niov + 2); 939 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 940 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 941 use_local_qiov = true; 942 943 bytes += offset & (align - 1); 944 offset = offset & ~(align - 1); 945 } 946 947 if ((offset + bytes) & (align - 1)) { 948 if (!use_local_qiov) { 949 qemu_iovec_init(&local_qiov, qiov->niov + 1); 950 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 951 use_local_qiov = true; 952 } 953 tail_buf = qemu_blockalign(bs, align); 954 qemu_iovec_add(&local_qiov, tail_buf, 955 align - ((offset + bytes) & (align - 1))); 956 957 bytes = ROUND_UP(bytes, align); 958 } 959 960 tracked_request_begin(&req, bs, offset, bytes, false); 961 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 962 use_local_qiov ? &local_qiov : qiov, 963 flags); 964 tracked_request_end(&req); 965 966 if (use_local_qiov) { 967 qemu_iovec_destroy(&local_qiov); 968 qemu_vfree(head_buf); 969 qemu_vfree(tail_buf); 970 } 971 972 return ret; 973 } 974 975 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 976 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 977 BdrvRequestFlags flags) 978 { 979 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 980 return -EINVAL; 981 } 982 983 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, 984 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 985 } 986 987 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 988 int nb_sectors, QEMUIOVector *qiov) 989 { 990 trace_bdrv_co_readv(bs, sector_num, nb_sectors); 991 992 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 993 } 994 995 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 996 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 997 { 998 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 999 1000 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 1001 BDRV_REQ_COPY_ON_READ); 1002 } 1003 1004 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 1005 1006 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 1007 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 1008 { 1009 BlockDriver *drv = bs->drv; 1010 QEMUIOVector qiov; 1011 struct iovec iov = {0}; 1012 int ret = 0; 1013 1014 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes, 1015 BDRV_REQUEST_MAX_SECTORS); 1016 1017 while (nb_sectors > 0 && !ret) { 1018 int num = nb_sectors; 1019 1020 /* Align request. Block drivers can expect the "bulk" of the request 1021 * to be aligned. 1022 */ 1023 if (bs->bl.write_zeroes_alignment 1024 && num > bs->bl.write_zeroes_alignment) { 1025 if (sector_num % bs->bl.write_zeroes_alignment != 0) { 1026 /* Make a small request up to the first aligned sector. */ 1027 num = bs->bl.write_zeroes_alignment; 1028 num -= sector_num % bs->bl.write_zeroes_alignment; 1029 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { 1030 /* Shorten the request to the last aligned sector. num cannot 1031 * underflow because num > bs->bl.write_zeroes_alignment. 1032 */ 1033 num -= (sector_num + num) % bs->bl.write_zeroes_alignment; 1034 } 1035 } 1036 1037 /* limit request size */ 1038 if (num > max_write_zeroes) { 1039 num = max_write_zeroes; 1040 } 1041 1042 ret = -ENOTSUP; 1043 /* First try the efficient write zeroes operation */ 1044 if (drv->bdrv_co_write_zeroes) { 1045 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); 1046 } 1047 1048 if (ret == -ENOTSUP) { 1049 /* Fall back to bounce buffer if write zeroes is unsupported */ 1050 int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, 1051 MAX_WRITE_ZEROES_BOUNCE_BUFFER); 1052 num = MIN(num, max_xfer_len); 1053 iov.iov_len = num * BDRV_SECTOR_SIZE; 1054 if (iov.iov_base == NULL) { 1055 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); 1056 if (iov.iov_base == NULL) { 1057 ret = -ENOMEM; 1058 goto fail; 1059 } 1060 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); 1061 } 1062 qemu_iovec_init_external(&qiov, &iov, 1); 1063 1064 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); 1065 1066 /* Keep bounce buffer around if it is big enough for all 1067 * all future requests. 1068 */ 1069 if (num < max_xfer_len) { 1070 qemu_vfree(iov.iov_base); 1071 iov.iov_base = NULL; 1072 } 1073 } 1074 1075 sector_num += num; 1076 nb_sectors -= num; 1077 } 1078 1079 fail: 1080 qemu_vfree(iov.iov_base); 1081 return ret; 1082 } 1083 1084 /* 1085 * Forwards an already correctly aligned write request to the BlockDriver. 1086 */ 1087 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 1088 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1089 QEMUIOVector *qiov, int flags) 1090 { 1091 BlockDriver *drv = bs->drv; 1092 bool waited; 1093 int ret; 1094 1095 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 1096 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 1097 1098 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1099 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1100 assert(!qiov || bytes == qiov->size); 1101 1102 waited = wait_serialising_requests(req); 1103 assert(!waited || !req->serialising); 1104 assert(req->overlap_offset <= offset); 1105 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1106 1107 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 1108 1109 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1110 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && 1111 qemu_iovec_is_zero(qiov)) { 1112 flags |= BDRV_REQ_ZERO_WRITE; 1113 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1114 flags |= BDRV_REQ_MAY_UNMAP; 1115 } 1116 } 1117 1118 if (ret < 0) { 1119 /* Do nothing, write notifier decided to fail this request */ 1120 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1121 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); 1122 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); 1123 } else { 1124 BLKDBG_EVENT(bs, BLKDBG_PWRITEV); 1125 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 1126 } 1127 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); 1128 1129 if (ret == 0 && !bs->enable_write_cache) { 1130 ret = bdrv_co_flush(bs); 1131 } 1132 1133 bdrv_set_dirty(bs, sector_num, nb_sectors); 1134 1135 block_acct_highest_sector(&bs->stats, sector_num, nb_sectors); 1136 1137 if (ret >= 0) { 1138 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 1139 } 1140 1141 return ret; 1142 } 1143 1144 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs, 1145 int64_t offset, 1146 unsigned int bytes, 1147 BdrvRequestFlags flags, 1148 BdrvTrackedRequest *req) 1149 { 1150 uint8_t *buf = NULL; 1151 QEMUIOVector local_qiov; 1152 struct iovec iov; 1153 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 1154 unsigned int head_padding_bytes, tail_padding_bytes; 1155 int ret = 0; 1156 1157 head_padding_bytes = offset & (align - 1); 1158 tail_padding_bytes = align - ((offset + bytes) & (align - 1)); 1159 1160 1161 assert(flags & BDRV_REQ_ZERO_WRITE); 1162 if (head_padding_bytes || tail_padding_bytes) { 1163 buf = qemu_blockalign(bs, align); 1164 iov = (struct iovec) { 1165 .iov_base = buf, 1166 .iov_len = align, 1167 }; 1168 qemu_iovec_init_external(&local_qiov, &iov, 1); 1169 } 1170 if (head_padding_bytes) { 1171 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 1172 1173 /* RMW the unaligned part before head. */ 1174 mark_request_serialising(req, align); 1175 wait_serialising_requests(req); 1176 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); 1177 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align, 1178 align, &local_qiov, 0); 1179 if (ret < 0) { 1180 goto fail; 1181 } 1182 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1183 1184 memset(buf + head_padding_bytes, 0, zero_bytes); 1185 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align, 1186 &local_qiov, 1187 flags & ~BDRV_REQ_ZERO_WRITE); 1188 if (ret < 0) { 1189 goto fail; 1190 } 1191 offset += zero_bytes; 1192 bytes -= zero_bytes; 1193 } 1194 1195 assert(!bytes || (offset & (align - 1)) == 0); 1196 if (bytes >= align) { 1197 /* Write the aligned part in the middle. */ 1198 uint64_t aligned_bytes = bytes & ~(align - 1); 1199 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, 1200 NULL, flags); 1201 if (ret < 0) { 1202 goto fail; 1203 } 1204 bytes -= aligned_bytes; 1205 offset += aligned_bytes; 1206 } 1207 1208 assert(!bytes || (offset & (align - 1)) == 0); 1209 if (bytes) { 1210 assert(align == tail_padding_bytes + bytes); 1211 /* RMW the unaligned part after tail. */ 1212 mark_request_serialising(req, align); 1213 wait_serialising_requests(req); 1214 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); 1215 ret = bdrv_aligned_preadv(bs, req, offset, align, 1216 align, &local_qiov, 0); 1217 if (ret < 0) { 1218 goto fail; 1219 } 1220 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1221 1222 memset(buf, 0, bytes); 1223 ret = bdrv_aligned_pwritev(bs, req, offset, align, 1224 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 1225 } 1226 fail: 1227 qemu_vfree(buf); 1228 return ret; 1229 1230 } 1231 1232 /* 1233 * Handle a write request in coroutine context 1234 */ 1235 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 1236 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1237 BdrvRequestFlags flags) 1238 { 1239 BdrvTrackedRequest req; 1240 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 1241 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 1242 uint8_t *head_buf = NULL; 1243 uint8_t *tail_buf = NULL; 1244 QEMUIOVector local_qiov; 1245 bool use_local_qiov = false; 1246 int ret; 1247 1248 if (!bs->drv) { 1249 return -ENOMEDIUM; 1250 } 1251 if (bs->read_only) { 1252 return -EPERM; 1253 } 1254 1255 ret = bdrv_check_byte_request(bs, offset, bytes); 1256 if (ret < 0) { 1257 return ret; 1258 } 1259 1260 /* throttling disk I/O */ 1261 if (bs->io_limits_enabled) { 1262 throttle_group_co_io_limits_intercept(bs, bytes, true); 1263 } 1264 1265 /* 1266 * Align write if necessary by performing a read-modify-write cycle. 1267 * Pad qiov with the read parts and be sure to have a tracked request not 1268 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 1269 */ 1270 tracked_request_begin(&req, bs, offset, bytes, true); 1271 1272 if (!qiov) { 1273 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); 1274 goto out; 1275 } 1276 1277 if (offset & (align - 1)) { 1278 QEMUIOVector head_qiov; 1279 struct iovec head_iov; 1280 1281 mark_request_serialising(&req, align); 1282 wait_serialising_requests(&req); 1283 1284 head_buf = qemu_blockalign(bs, align); 1285 head_iov = (struct iovec) { 1286 .iov_base = head_buf, 1287 .iov_len = align, 1288 }; 1289 qemu_iovec_init_external(&head_qiov, &head_iov, 1); 1290 1291 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); 1292 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 1293 align, &head_qiov, 0); 1294 if (ret < 0) { 1295 goto fail; 1296 } 1297 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1298 1299 qemu_iovec_init(&local_qiov, qiov->niov + 2); 1300 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 1301 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1302 use_local_qiov = true; 1303 1304 bytes += offset & (align - 1); 1305 offset = offset & ~(align - 1); 1306 } 1307 1308 if ((offset + bytes) & (align - 1)) { 1309 QEMUIOVector tail_qiov; 1310 struct iovec tail_iov; 1311 size_t tail_bytes; 1312 bool waited; 1313 1314 mark_request_serialising(&req, align); 1315 waited = wait_serialising_requests(&req); 1316 assert(!waited || !use_local_qiov); 1317 1318 tail_buf = qemu_blockalign(bs, align); 1319 tail_iov = (struct iovec) { 1320 .iov_base = tail_buf, 1321 .iov_len = align, 1322 }; 1323 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 1324 1325 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); 1326 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 1327 align, &tail_qiov, 0); 1328 if (ret < 0) { 1329 goto fail; 1330 } 1331 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1332 1333 if (!use_local_qiov) { 1334 qemu_iovec_init(&local_qiov, qiov->niov + 1); 1335 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1336 use_local_qiov = true; 1337 } 1338 1339 tail_bytes = (offset + bytes) & (align - 1); 1340 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 1341 1342 bytes = ROUND_UP(bytes, align); 1343 } 1344 1345 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 1346 use_local_qiov ? &local_qiov : qiov, 1347 flags); 1348 1349 fail: 1350 1351 if (use_local_qiov) { 1352 qemu_iovec_destroy(&local_qiov); 1353 } 1354 qemu_vfree(head_buf); 1355 qemu_vfree(tail_buf); 1356 out: 1357 tracked_request_end(&req); 1358 return ret; 1359 } 1360 1361 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 1362 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 1363 BdrvRequestFlags flags) 1364 { 1365 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 1366 return -EINVAL; 1367 } 1368 1369 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 1370 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 1371 } 1372 1373 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 1374 int nb_sectors, QEMUIOVector *qiov) 1375 { 1376 trace_bdrv_co_writev(bs, sector_num, nb_sectors); 1377 1378 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 1379 } 1380 1381 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 1382 int64_t sector_num, int nb_sectors, 1383 BdrvRequestFlags flags) 1384 { 1385 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); 1386 1387 if (!(bs->open_flags & BDRV_O_UNMAP)) { 1388 flags &= ~BDRV_REQ_MAY_UNMAP; 1389 } 1390 1391 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 1392 BDRV_REQ_ZERO_WRITE | flags); 1393 } 1394 1395 int bdrv_flush_all(void) 1396 { 1397 BlockDriverState *bs = NULL; 1398 int result = 0; 1399 1400 while ((bs = bdrv_next(bs))) { 1401 AioContext *aio_context = bdrv_get_aio_context(bs); 1402 int ret; 1403 1404 aio_context_acquire(aio_context); 1405 ret = bdrv_flush(bs); 1406 if (ret < 0 && !result) { 1407 result = ret; 1408 } 1409 aio_context_release(aio_context); 1410 } 1411 1412 return result; 1413 } 1414 1415 typedef struct BdrvCoGetBlockStatusData { 1416 BlockDriverState *bs; 1417 BlockDriverState *base; 1418 int64_t sector_num; 1419 int nb_sectors; 1420 int *pnum; 1421 int64_t ret; 1422 bool done; 1423 } BdrvCoGetBlockStatusData; 1424 1425 /* 1426 * Returns the allocation status of the specified sectors. 1427 * Drivers not implementing the functionality are assumed to not support 1428 * backing files, hence all their sectors are reported as allocated. 1429 * 1430 * If 'sector_num' is beyond the end of the disk image the return value is 0 1431 * and 'pnum' is set to 0. 1432 * 1433 * 'pnum' is set to the number of sectors (including and immediately following 1434 * the specified sector) that are known to be in the same 1435 * allocated/unallocated state. 1436 * 1437 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 1438 * beyond the end of the disk image it will be clamped. 1439 */ 1440 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 1441 int64_t sector_num, 1442 int nb_sectors, int *pnum) 1443 { 1444 int64_t total_sectors; 1445 int64_t n; 1446 int64_t ret, ret2; 1447 1448 total_sectors = bdrv_nb_sectors(bs); 1449 if (total_sectors < 0) { 1450 return total_sectors; 1451 } 1452 1453 if (sector_num >= total_sectors) { 1454 *pnum = 0; 1455 return 0; 1456 } 1457 1458 n = total_sectors - sector_num; 1459 if (n < nb_sectors) { 1460 nb_sectors = n; 1461 } 1462 1463 if (!bs->drv->bdrv_co_get_block_status) { 1464 *pnum = nb_sectors; 1465 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 1466 if (bs->drv->protocol_name) { 1467 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 1468 } 1469 return ret; 1470 } 1471 1472 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); 1473 if (ret < 0) { 1474 *pnum = 0; 1475 return ret; 1476 } 1477 1478 if (ret & BDRV_BLOCK_RAW) { 1479 assert(ret & BDRV_BLOCK_OFFSET_VALID); 1480 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 1481 *pnum, pnum); 1482 } 1483 1484 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 1485 ret |= BDRV_BLOCK_ALLOCATED; 1486 } else { 1487 if (bdrv_unallocated_blocks_are_zero(bs)) { 1488 ret |= BDRV_BLOCK_ZERO; 1489 } else if (bs->backing_hd) { 1490 BlockDriverState *bs2 = bs->backing_hd; 1491 int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 1492 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 1493 ret |= BDRV_BLOCK_ZERO; 1494 } 1495 } 1496 } 1497 1498 if (bs->file && 1499 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 1500 (ret & BDRV_BLOCK_OFFSET_VALID)) { 1501 int file_pnum; 1502 1503 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 1504 *pnum, &file_pnum); 1505 if (ret2 >= 0) { 1506 /* Ignore errors. This is just providing extra information, it 1507 * is useful but not necessary. 1508 */ 1509 if (!file_pnum) { 1510 /* !file_pnum indicates an offset at or beyond the EOF; it is 1511 * perfectly valid for the format block driver to point to such 1512 * offsets, so catch it and mark everything as zero */ 1513 ret |= BDRV_BLOCK_ZERO; 1514 } else { 1515 /* Limit request to the range reported by the protocol driver */ 1516 *pnum = file_pnum; 1517 ret |= (ret2 & BDRV_BLOCK_ZERO); 1518 } 1519 } 1520 } 1521 1522 return ret; 1523 } 1524 1525 /* Coroutine wrapper for bdrv_get_block_status() */ 1526 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) 1527 { 1528 BdrvCoGetBlockStatusData *data = opaque; 1529 BlockDriverState *bs = data->bs; 1530 1531 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, 1532 data->pnum); 1533 data->done = true; 1534 } 1535 1536 /* 1537 * Synchronous wrapper around bdrv_co_get_block_status(). 1538 * 1539 * See bdrv_co_get_block_status() for details. 1540 */ 1541 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, 1542 int nb_sectors, int *pnum) 1543 { 1544 Coroutine *co; 1545 BdrvCoGetBlockStatusData data = { 1546 .bs = bs, 1547 .sector_num = sector_num, 1548 .nb_sectors = nb_sectors, 1549 .pnum = pnum, 1550 .done = false, 1551 }; 1552 1553 if (qemu_in_coroutine()) { 1554 /* Fast-path if already in coroutine context */ 1555 bdrv_get_block_status_co_entry(&data); 1556 } else { 1557 AioContext *aio_context = bdrv_get_aio_context(bs); 1558 1559 co = qemu_coroutine_create(bdrv_get_block_status_co_entry); 1560 qemu_coroutine_enter(co, &data); 1561 while (!data.done) { 1562 aio_poll(aio_context, true); 1563 } 1564 } 1565 return data.ret; 1566 } 1567 1568 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 1569 int nb_sectors, int *pnum) 1570 { 1571 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); 1572 if (ret < 0) { 1573 return ret; 1574 } 1575 return !!(ret & BDRV_BLOCK_ALLOCATED); 1576 } 1577 1578 /* 1579 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 1580 * 1581 * Return true if the given sector is allocated in any image between 1582 * BASE and TOP (inclusive). BASE can be NULL to check if the given 1583 * sector is allocated in any image of the chain. Return false otherwise. 1584 * 1585 * 'pnum' is set to the number of sectors (including and immediately following 1586 * the specified sector) that are known to be in the same 1587 * allocated/unallocated state. 1588 * 1589 */ 1590 int bdrv_is_allocated_above(BlockDriverState *top, 1591 BlockDriverState *base, 1592 int64_t sector_num, 1593 int nb_sectors, int *pnum) 1594 { 1595 BlockDriverState *intermediate; 1596 int ret, n = nb_sectors; 1597 1598 intermediate = top; 1599 while (intermediate && intermediate != base) { 1600 int pnum_inter; 1601 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 1602 &pnum_inter); 1603 if (ret < 0) { 1604 return ret; 1605 } else if (ret) { 1606 *pnum = pnum_inter; 1607 return 1; 1608 } 1609 1610 /* 1611 * [sector_num, nb_sectors] is unallocated on top but intermediate 1612 * might have 1613 * 1614 * [sector_num+x, nr_sectors] allocated. 1615 */ 1616 if (n > pnum_inter && 1617 (intermediate == top || 1618 sector_num + pnum_inter < intermediate->total_sectors)) { 1619 n = pnum_inter; 1620 } 1621 1622 intermediate = intermediate->backing_hd; 1623 } 1624 1625 *pnum = n; 1626 return 0; 1627 } 1628 1629 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 1630 const uint8_t *buf, int nb_sectors) 1631 { 1632 BlockDriver *drv = bs->drv; 1633 int ret; 1634 1635 if (!drv) { 1636 return -ENOMEDIUM; 1637 } 1638 if (!drv->bdrv_write_compressed) { 1639 return -ENOTSUP; 1640 } 1641 ret = bdrv_check_request(bs, sector_num, nb_sectors); 1642 if (ret < 0) { 1643 return ret; 1644 } 1645 1646 assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 1647 1648 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 1649 } 1650 1651 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 1652 int64_t pos, int size) 1653 { 1654 QEMUIOVector qiov; 1655 struct iovec iov = { 1656 .iov_base = (void *) buf, 1657 .iov_len = size, 1658 }; 1659 1660 qemu_iovec_init_external(&qiov, &iov, 1); 1661 return bdrv_writev_vmstate(bs, &qiov, pos); 1662 } 1663 1664 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 1665 { 1666 BlockDriver *drv = bs->drv; 1667 1668 if (!drv) { 1669 return -ENOMEDIUM; 1670 } else if (drv->bdrv_save_vmstate) { 1671 return drv->bdrv_save_vmstate(bs, qiov, pos); 1672 } else if (bs->file) { 1673 return bdrv_writev_vmstate(bs->file, qiov, pos); 1674 } 1675 1676 return -ENOTSUP; 1677 } 1678 1679 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 1680 int64_t pos, int size) 1681 { 1682 BlockDriver *drv = bs->drv; 1683 if (!drv) 1684 return -ENOMEDIUM; 1685 if (drv->bdrv_load_vmstate) 1686 return drv->bdrv_load_vmstate(bs, buf, pos, size); 1687 if (bs->file) 1688 return bdrv_load_vmstate(bs->file, buf, pos, size); 1689 return -ENOTSUP; 1690 } 1691 1692 /**************************************************************/ 1693 /* async I/Os */ 1694 1695 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 1696 QEMUIOVector *qiov, int nb_sectors, 1697 BlockCompletionFunc *cb, void *opaque) 1698 { 1699 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 1700 1701 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 1702 cb, opaque, false); 1703 } 1704 1705 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 1706 QEMUIOVector *qiov, int nb_sectors, 1707 BlockCompletionFunc *cb, void *opaque) 1708 { 1709 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 1710 1711 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 1712 cb, opaque, true); 1713 } 1714 1715 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, 1716 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, 1717 BlockCompletionFunc *cb, void *opaque) 1718 { 1719 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); 1720 1721 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, 1722 BDRV_REQ_ZERO_WRITE | flags, 1723 cb, opaque, true); 1724 } 1725 1726 1727 typedef struct MultiwriteCB { 1728 int error; 1729 int num_requests; 1730 int num_callbacks; 1731 struct { 1732 BlockCompletionFunc *cb; 1733 void *opaque; 1734 QEMUIOVector *free_qiov; 1735 } callbacks[]; 1736 } MultiwriteCB; 1737 1738 static void multiwrite_user_cb(MultiwriteCB *mcb) 1739 { 1740 int i; 1741 1742 for (i = 0; i < mcb->num_callbacks; i++) { 1743 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 1744 if (mcb->callbacks[i].free_qiov) { 1745 qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 1746 } 1747 g_free(mcb->callbacks[i].free_qiov); 1748 } 1749 } 1750 1751 static void multiwrite_cb(void *opaque, int ret) 1752 { 1753 MultiwriteCB *mcb = opaque; 1754 1755 trace_multiwrite_cb(mcb, ret); 1756 1757 if (ret < 0 && !mcb->error) { 1758 mcb->error = ret; 1759 } 1760 1761 mcb->num_requests--; 1762 if (mcb->num_requests == 0) { 1763 multiwrite_user_cb(mcb); 1764 g_free(mcb); 1765 } 1766 } 1767 1768 static int multiwrite_req_compare(const void *a, const void *b) 1769 { 1770 const BlockRequest *req1 = a, *req2 = b; 1771 1772 /* 1773 * Note that we can't simply subtract req2->sector from req1->sector 1774 * here as that could overflow the return value. 1775 */ 1776 if (req1->sector > req2->sector) { 1777 return 1; 1778 } else if (req1->sector < req2->sector) { 1779 return -1; 1780 } else { 1781 return 0; 1782 } 1783 } 1784 1785 /* 1786 * Takes a bunch of requests and tries to merge them. Returns the number of 1787 * requests that remain after merging. 1788 */ 1789 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 1790 int num_reqs, MultiwriteCB *mcb) 1791 { 1792 int i, outidx; 1793 1794 // Sort requests by start sector 1795 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 1796 1797 // Check if adjacent requests touch the same clusters. If so, combine them, 1798 // filling up gaps with zero sectors. 1799 outidx = 0; 1800 for (i = 1; i < num_reqs; i++) { 1801 int merge = 0; 1802 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 1803 1804 // Handle exactly sequential writes and overlapping writes. 1805 if (reqs[i].sector <= oldreq_last) { 1806 merge = 1; 1807 } 1808 1809 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { 1810 merge = 0; 1811 } 1812 1813 if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors + 1814 reqs[i].nb_sectors > bs->bl.max_transfer_length) { 1815 merge = 0; 1816 } 1817 1818 if (merge) { 1819 size_t size; 1820 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 1821 qemu_iovec_init(qiov, 1822 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 1823 1824 // Add the first request to the merged one. If the requests are 1825 // overlapping, drop the last sectors of the first request. 1826 size = (reqs[i].sector - reqs[outidx].sector) << 9; 1827 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); 1828 1829 // We should need to add any zeros between the two requests 1830 assert (reqs[i].sector <= oldreq_last); 1831 1832 // Add the second request 1833 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); 1834 1835 // Add tail of first request, if necessary 1836 if (qiov->size < reqs[outidx].qiov->size) { 1837 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, 1838 reqs[outidx].qiov->size - qiov->size); 1839 } 1840 1841 reqs[outidx].nb_sectors = qiov->size >> 9; 1842 reqs[outidx].qiov = qiov; 1843 1844 mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 1845 } else { 1846 outidx++; 1847 reqs[outidx].sector = reqs[i].sector; 1848 reqs[outidx].nb_sectors = reqs[i].nb_sectors; 1849 reqs[outidx].qiov = reqs[i].qiov; 1850 } 1851 } 1852 1853 block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1); 1854 1855 return outidx + 1; 1856 } 1857 1858 /* 1859 * Submit multiple AIO write requests at once. 1860 * 1861 * On success, the function returns 0 and all requests in the reqs array have 1862 * been submitted. In error case this function returns -1, and any of the 1863 * requests may or may not be submitted yet. In particular, this means that the 1864 * callback will be called for some of the requests, for others it won't. The 1865 * caller must check the error field of the BlockRequest to wait for the right 1866 * callbacks (if error != 0, no callback will be called). 1867 * 1868 * The implementation may modify the contents of the reqs array, e.g. to merge 1869 * requests. However, the fields opaque and error are left unmodified as they 1870 * are used to signal failure for a single request to the caller. 1871 */ 1872 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 1873 { 1874 MultiwriteCB *mcb; 1875 int i; 1876 1877 /* don't submit writes if we don't have a medium */ 1878 if (bs->drv == NULL) { 1879 for (i = 0; i < num_reqs; i++) { 1880 reqs[i].error = -ENOMEDIUM; 1881 } 1882 return -1; 1883 } 1884 1885 if (num_reqs == 0) { 1886 return 0; 1887 } 1888 1889 // Create MultiwriteCB structure 1890 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 1891 mcb->num_requests = 0; 1892 mcb->num_callbacks = num_reqs; 1893 1894 for (i = 0; i < num_reqs; i++) { 1895 mcb->callbacks[i].cb = reqs[i].cb; 1896 mcb->callbacks[i].opaque = reqs[i].opaque; 1897 } 1898 1899 // Check for mergable requests 1900 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 1901 1902 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 1903 1904 /* Run the aio requests. */ 1905 mcb->num_requests = num_reqs; 1906 for (i = 0; i < num_reqs; i++) { 1907 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, 1908 reqs[i].nb_sectors, reqs[i].flags, 1909 multiwrite_cb, mcb, 1910 true); 1911 } 1912 1913 return 0; 1914 } 1915 1916 void bdrv_aio_cancel(BlockAIOCB *acb) 1917 { 1918 qemu_aio_ref(acb); 1919 bdrv_aio_cancel_async(acb); 1920 while (acb->refcnt > 1) { 1921 if (acb->aiocb_info->get_aio_context) { 1922 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 1923 } else if (acb->bs) { 1924 aio_poll(bdrv_get_aio_context(acb->bs), true); 1925 } else { 1926 abort(); 1927 } 1928 } 1929 qemu_aio_unref(acb); 1930 } 1931 1932 /* Async version of aio cancel. The caller is not blocked if the acb implements 1933 * cancel_async, otherwise we do nothing and let the request normally complete. 1934 * In either case the completion callback must be called. */ 1935 void bdrv_aio_cancel_async(BlockAIOCB *acb) 1936 { 1937 if (acb->aiocb_info->cancel_async) { 1938 acb->aiocb_info->cancel_async(acb); 1939 } 1940 } 1941 1942 /**************************************************************/ 1943 /* async block device emulation */ 1944 1945 typedef struct BlockAIOCBSync { 1946 BlockAIOCB common; 1947 QEMUBH *bh; 1948 int ret; 1949 /* vector translation state */ 1950 QEMUIOVector *qiov; 1951 uint8_t *bounce; 1952 int is_write; 1953 } BlockAIOCBSync; 1954 1955 static const AIOCBInfo bdrv_em_aiocb_info = { 1956 .aiocb_size = sizeof(BlockAIOCBSync), 1957 }; 1958 1959 static void bdrv_aio_bh_cb(void *opaque) 1960 { 1961 BlockAIOCBSync *acb = opaque; 1962 1963 if (!acb->is_write && acb->ret >= 0) { 1964 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); 1965 } 1966 qemu_vfree(acb->bounce); 1967 acb->common.cb(acb->common.opaque, acb->ret); 1968 qemu_bh_delete(acb->bh); 1969 acb->bh = NULL; 1970 qemu_aio_unref(acb); 1971 } 1972 1973 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 1974 int64_t sector_num, 1975 QEMUIOVector *qiov, 1976 int nb_sectors, 1977 BlockCompletionFunc *cb, 1978 void *opaque, 1979 int is_write) 1980 1981 { 1982 BlockAIOCBSync *acb; 1983 1984 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); 1985 acb->is_write = is_write; 1986 acb->qiov = qiov; 1987 acb->bounce = qemu_try_blockalign(bs, qiov->size); 1988 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); 1989 1990 if (acb->bounce == NULL) { 1991 acb->ret = -ENOMEM; 1992 } else if (is_write) { 1993 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); 1994 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 1995 } else { 1996 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 1997 } 1998 1999 qemu_bh_schedule(acb->bh); 2000 2001 return &acb->common; 2002 } 2003 2004 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 2005 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 2006 BlockCompletionFunc *cb, void *opaque) 2007 { 2008 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 2009 } 2010 2011 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 2012 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 2013 BlockCompletionFunc *cb, void *opaque) 2014 { 2015 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 2016 } 2017 2018 2019 typedef struct BlockAIOCBCoroutine { 2020 BlockAIOCB common; 2021 BlockRequest req; 2022 bool is_write; 2023 bool need_bh; 2024 bool *done; 2025 QEMUBH* bh; 2026 } BlockAIOCBCoroutine; 2027 2028 static const AIOCBInfo bdrv_em_co_aiocb_info = { 2029 .aiocb_size = sizeof(BlockAIOCBCoroutine), 2030 }; 2031 2032 static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 2033 { 2034 if (!acb->need_bh) { 2035 acb->common.cb(acb->common.opaque, acb->req.error); 2036 qemu_aio_unref(acb); 2037 } 2038 } 2039 2040 static void bdrv_co_em_bh(void *opaque) 2041 { 2042 BlockAIOCBCoroutine *acb = opaque; 2043 2044 assert(!acb->need_bh); 2045 qemu_bh_delete(acb->bh); 2046 bdrv_co_complete(acb); 2047 } 2048 2049 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 2050 { 2051 acb->need_bh = false; 2052 if (acb->req.error != -EINPROGRESS) { 2053 BlockDriverState *bs = acb->common.bs; 2054 2055 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 2056 qemu_bh_schedule(acb->bh); 2057 } 2058 } 2059 2060 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 2061 static void coroutine_fn bdrv_co_do_rw(void *opaque) 2062 { 2063 BlockAIOCBCoroutine *acb = opaque; 2064 BlockDriverState *bs = acb->common.bs; 2065 2066 if (!acb->is_write) { 2067 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 2068 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 2069 } else { 2070 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 2071 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 2072 } 2073 2074 bdrv_co_complete(acb); 2075 } 2076 2077 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 2078 int64_t sector_num, 2079 QEMUIOVector *qiov, 2080 int nb_sectors, 2081 BdrvRequestFlags flags, 2082 BlockCompletionFunc *cb, 2083 void *opaque, 2084 bool is_write) 2085 { 2086 Coroutine *co; 2087 BlockAIOCBCoroutine *acb; 2088 2089 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 2090 acb->need_bh = true; 2091 acb->req.error = -EINPROGRESS; 2092 acb->req.sector = sector_num; 2093 acb->req.nb_sectors = nb_sectors; 2094 acb->req.qiov = qiov; 2095 acb->req.flags = flags; 2096 acb->is_write = is_write; 2097 2098 co = qemu_coroutine_create(bdrv_co_do_rw); 2099 qemu_coroutine_enter(co, acb); 2100 2101 bdrv_co_maybe_schedule_bh(acb); 2102 return &acb->common; 2103 } 2104 2105 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 2106 { 2107 BlockAIOCBCoroutine *acb = opaque; 2108 BlockDriverState *bs = acb->common.bs; 2109 2110 acb->req.error = bdrv_co_flush(bs); 2111 bdrv_co_complete(acb); 2112 } 2113 2114 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 2115 BlockCompletionFunc *cb, void *opaque) 2116 { 2117 trace_bdrv_aio_flush(bs, opaque); 2118 2119 Coroutine *co; 2120 BlockAIOCBCoroutine *acb; 2121 2122 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 2123 acb->need_bh = true; 2124 acb->req.error = -EINPROGRESS; 2125 2126 co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 2127 qemu_coroutine_enter(co, acb); 2128 2129 bdrv_co_maybe_schedule_bh(acb); 2130 return &acb->common; 2131 } 2132 2133 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 2134 { 2135 BlockAIOCBCoroutine *acb = opaque; 2136 BlockDriverState *bs = acb->common.bs; 2137 2138 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 2139 bdrv_co_complete(acb); 2140 } 2141 2142 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 2143 int64_t sector_num, int nb_sectors, 2144 BlockCompletionFunc *cb, void *opaque) 2145 { 2146 Coroutine *co; 2147 BlockAIOCBCoroutine *acb; 2148 2149 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 2150 2151 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 2152 acb->need_bh = true; 2153 acb->req.error = -EINPROGRESS; 2154 acb->req.sector = sector_num; 2155 acb->req.nb_sectors = nb_sectors; 2156 co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 2157 qemu_coroutine_enter(co, acb); 2158 2159 bdrv_co_maybe_schedule_bh(acb); 2160 return &acb->common; 2161 } 2162 2163 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 2164 BlockCompletionFunc *cb, void *opaque) 2165 { 2166 BlockAIOCB *acb; 2167 2168 acb = g_slice_alloc(aiocb_info->aiocb_size); 2169 acb->aiocb_info = aiocb_info; 2170 acb->bs = bs; 2171 acb->cb = cb; 2172 acb->opaque = opaque; 2173 acb->refcnt = 1; 2174 return acb; 2175 } 2176 2177 void qemu_aio_ref(void *p) 2178 { 2179 BlockAIOCB *acb = p; 2180 acb->refcnt++; 2181 } 2182 2183 void qemu_aio_unref(void *p) 2184 { 2185 BlockAIOCB *acb = p; 2186 assert(acb->refcnt > 0); 2187 if (--acb->refcnt == 0) { 2188 g_slice_free1(acb->aiocb_info->aiocb_size, acb); 2189 } 2190 } 2191 2192 /**************************************************************/ 2193 /* Coroutine block device emulation */ 2194 2195 typedef struct CoroutineIOCompletion { 2196 Coroutine *coroutine; 2197 int ret; 2198 } CoroutineIOCompletion; 2199 2200 static void bdrv_co_io_em_complete(void *opaque, int ret) 2201 { 2202 CoroutineIOCompletion *co = opaque; 2203 2204 co->ret = ret; 2205 qemu_coroutine_enter(co->coroutine, NULL); 2206 } 2207 2208 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 2209 int nb_sectors, QEMUIOVector *iov, 2210 bool is_write) 2211 { 2212 CoroutineIOCompletion co = { 2213 .coroutine = qemu_coroutine_self(), 2214 }; 2215 BlockAIOCB *acb; 2216 2217 if (is_write) { 2218 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 2219 bdrv_co_io_em_complete, &co); 2220 } else { 2221 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 2222 bdrv_co_io_em_complete, &co); 2223 } 2224 2225 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 2226 if (!acb) { 2227 return -EIO; 2228 } 2229 qemu_coroutine_yield(); 2230 2231 return co.ret; 2232 } 2233 2234 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 2235 int64_t sector_num, int nb_sectors, 2236 QEMUIOVector *iov) 2237 { 2238 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 2239 } 2240 2241 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 2242 int64_t sector_num, int nb_sectors, 2243 QEMUIOVector *iov) 2244 { 2245 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 2246 } 2247 2248 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 2249 { 2250 RwCo *rwco = opaque; 2251 2252 rwco->ret = bdrv_co_flush(rwco->bs); 2253 } 2254 2255 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2256 { 2257 int ret; 2258 2259 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { 2260 return 0; 2261 } 2262 2263 /* Write back cached data to the OS even with cache=unsafe */ 2264 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 2265 if (bs->drv->bdrv_co_flush_to_os) { 2266 ret = bs->drv->bdrv_co_flush_to_os(bs); 2267 if (ret < 0) { 2268 return ret; 2269 } 2270 } 2271 2272 /* But don't actually force it to the disk with cache=unsafe */ 2273 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2274 goto flush_parent; 2275 } 2276 2277 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 2278 if (bs->drv->bdrv_co_flush_to_disk) { 2279 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2280 } else if (bs->drv->bdrv_aio_flush) { 2281 BlockAIOCB *acb; 2282 CoroutineIOCompletion co = { 2283 .coroutine = qemu_coroutine_self(), 2284 }; 2285 2286 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2287 if (acb == NULL) { 2288 ret = -EIO; 2289 } else { 2290 qemu_coroutine_yield(); 2291 ret = co.ret; 2292 } 2293 } else { 2294 /* 2295 * Some block drivers always operate in either writethrough or unsafe 2296 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2297 * know how the server works (because the behaviour is hardcoded or 2298 * depends on server-side configuration), so we can't ensure that 2299 * everything is safe on disk. Returning an error doesn't work because 2300 * that would break guests even if the server operates in writethrough 2301 * mode. 2302 * 2303 * Let's hope the user knows what he's doing. 2304 */ 2305 ret = 0; 2306 } 2307 if (ret < 0) { 2308 return ret; 2309 } 2310 2311 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2312 * in the case of cache=unsafe, so there are no useless flushes. 2313 */ 2314 flush_parent: 2315 return bdrv_co_flush(bs->file); 2316 } 2317 2318 int bdrv_flush(BlockDriverState *bs) 2319 { 2320 Coroutine *co; 2321 RwCo rwco = { 2322 .bs = bs, 2323 .ret = NOT_DONE, 2324 }; 2325 2326 if (qemu_in_coroutine()) { 2327 /* Fast-path if already in coroutine context */ 2328 bdrv_flush_co_entry(&rwco); 2329 } else { 2330 AioContext *aio_context = bdrv_get_aio_context(bs); 2331 2332 co = qemu_coroutine_create(bdrv_flush_co_entry); 2333 qemu_coroutine_enter(co, &rwco); 2334 while (rwco.ret == NOT_DONE) { 2335 aio_poll(aio_context, true); 2336 } 2337 } 2338 2339 return rwco.ret; 2340 } 2341 2342 typedef struct DiscardCo { 2343 BlockDriverState *bs; 2344 int64_t sector_num; 2345 int nb_sectors; 2346 int ret; 2347 } DiscardCo; 2348 static void coroutine_fn bdrv_discard_co_entry(void *opaque) 2349 { 2350 DiscardCo *rwco = opaque; 2351 2352 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 2353 } 2354 2355 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 2356 int nb_sectors) 2357 { 2358 int max_discard, ret; 2359 2360 if (!bs->drv) { 2361 return -ENOMEDIUM; 2362 } 2363 2364 ret = bdrv_check_request(bs, sector_num, nb_sectors); 2365 if (ret < 0) { 2366 return ret; 2367 } else if (bs->read_only) { 2368 return -EPERM; 2369 } 2370 2371 bdrv_reset_dirty(bs, sector_num, nb_sectors); 2372 2373 /* Do nothing if disabled. */ 2374 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2375 return 0; 2376 } 2377 2378 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 2379 return 0; 2380 } 2381 2382 max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); 2383 while (nb_sectors > 0) { 2384 int ret; 2385 int num = nb_sectors; 2386 2387 /* align request */ 2388 if (bs->bl.discard_alignment && 2389 num >= bs->bl.discard_alignment && 2390 sector_num % bs->bl.discard_alignment) { 2391 if (num > bs->bl.discard_alignment) { 2392 num = bs->bl.discard_alignment; 2393 } 2394 num -= sector_num % bs->bl.discard_alignment; 2395 } 2396 2397 /* limit request size */ 2398 if (num > max_discard) { 2399 num = max_discard; 2400 } 2401 2402 if (bs->drv->bdrv_co_discard) { 2403 ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 2404 } else { 2405 BlockAIOCB *acb; 2406 CoroutineIOCompletion co = { 2407 .coroutine = qemu_coroutine_self(), 2408 }; 2409 2410 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 2411 bdrv_co_io_em_complete, &co); 2412 if (acb == NULL) { 2413 return -EIO; 2414 } else { 2415 qemu_coroutine_yield(); 2416 ret = co.ret; 2417 } 2418 } 2419 if (ret && ret != -ENOTSUP) { 2420 return ret; 2421 } 2422 2423 sector_num += num; 2424 nb_sectors -= num; 2425 } 2426 return 0; 2427 } 2428 2429 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 2430 { 2431 Coroutine *co; 2432 DiscardCo rwco = { 2433 .bs = bs, 2434 .sector_num = sector_num, 2435 .nb_sectors = nb_sectors, 2436 .ret = NOT_DONE, 2437 }; 2438 2439 if (qemu_in_coroutine()) { 2440 /* Fast-path if already in coroutine context */ 2441 bdrv_discard_co_entry(&rwco); 2442 } else { 2443 AioContext *aio_context = bdrv_get_aio_context(bs); 2444 2445 co = qemu_coroutine_create(bdrv_discard_co_entry); 2446 qemu_coroutine_enter(co, &rwco); 2447 while (rwco.ret == NOT_DONE) { 2448 aio_poll(aio_context, true); 2449 } 2450 } 2451 2452 return rwco.ret; 2453 } 2454 2455 /* needed for generic scsi interface */ 2456 2457 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 2458 { 2459 BlockDriver *drv = bs->drv; 2460 2461 if (drv && drv->bdrv_ioctl) 2462 return drv->bdrv_ioctl(bs, req, buf); 2463 return -ENOTSUP; 2464 } 2465 2466 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 2467 unsigned long int req, void *buf, 2468 BlockCompletionFunc *cb, void *opaque) 2469 { 2470 BlockDriver *drv = bs->drv; 2471 2472 if (drv && drv->bdrv_aio_ioctl) 2473 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); 2474 return NULL; 2475 } 2476 2477 void *qemu_blockalign(BlockDriverState *bs, size_t size) 2478 { 2479 return qemu_memalign(bdrv_opt_mem_align(bs), size); 2480 } 2481 2482 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 2483 { 2484 return memset(qemu_blockalign(bs, size), 0, size); 2485 } 2486 2487 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 2488 { 2489 size_t align = bdrv_opt_mem_align(bs); 2490 2491 /* Ensure that NULL is never returned on success */ 2492 assert(align > 0); 2493 if (size == 0) { 2494 size = align; 2495 } 2496 2497 return qemu_try_memalign(align, size); 2498 } 2499 2500 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 2501 { 2502 void *mem = qemu_try_blockalign(bs, size); 2503 2504 if (mem) { 2505 memset(mem, 0, size); 2506 } 2507 2508 return mem; 2509 } 2510 2511 /* 2512 * Check if all memory in this vector is sector aligned. 2513 */ 2514 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 2515 { 2516 int i; 2517 size_t alignment = bdrv_min_mem_align(bs); 2518 2519 for (i = 0; i < qiov->niov; i++) { 2520 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 2521 return false; 2522 } 2523 if (qiov->iov[i].iov_len % alignment) { 2524 return false; 2525 } 2526 } 2527 2528 return true; 2529 } 2530 2531 void bdrv_add_before_write_notifier(BlockDriverState *bs, 2532 NotifierWithReturn *notifier) 2533 { 2534 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 2535 } 2536 2537 void bdrv_io_plug(BlockDriverState *bs) 2538 { 2539 BlockDriver *drv = bs->drv; 2540 if (drv && drv->bdrv_io_plug) { 2541 drv->bdrv_io_plug(bs); 2542 } else if (bs->file) { 2543 bdrv_io_plug(bs->file); 2544 } 2545 } 2546 2547 void bdrv_io_unplug(BlockDriverState *bs) 2548 { 2549 BlockDriver *drv = bs->drv; 2550 if (drv && drv->bdrv_io_unplug) { 2551 drv->bdrv_io_unplug(bs); 2552 } else if (bs->file) { 2553 bdrv_io_unplug(bs->file); 2554 } 2555 } 2556 2557 void bdrv_flush_io_queue(BlockDriverState *bs) 2558 { 2559 BlockDriver *drv = bs->drv; 2560 if (drv && drv->bdrv_flush_io_queue) { 2561 drv->bdrv_flush_io_queue(bs); 2562 } else if (bs->file) { 2563 bdrv_flush_io_queue(bs->file); 2564 } 2565 } 2566