14f1043b4SStefan Hajnoczi /* 24f1043b4SStefan Hajnoczi * Image streaming 34f1043b4SStefan Hajnoczi * 44f1043b4SStefan Hajnoczi * Copyright IBM, Corp. 2011 54f1043b4SStefan Hajnoczi * 64f1043b4SStefan Hajnoczi * Authors: 74f1043b4SStefan Hajnoczi * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 84f1043b4SStefan Hajnoczi * 94f1043b4SStefan Hajnoczi * This work is licensed under the terms of the GNU LGPL, version 2 or later. 104f1043b4SStefan Hajnoczi * See the COPYING.LIB file in the top-level directory. 114f1043b4SStefan Hajnoczi * 124f1043b4SStefan Hajnoczi */ 134f1043b4SStefan Hajnoczi 144f1043b4SStefan Hajnoczi #include "trace.h" 154f1043b4SStefan Hajnoczi #include "block_int.h" 164f1043b4SStefan Hajnoczi 174f1043b4SStefan Hajnoczi enum { 184f1043b4SStefan Hajnoczi /* 194f1043b4SStefan Hajnoczi * Size of data buffer for populating the image file. This should be large 204f1043b4SStefan Hajnoczi * enough to process multiple clusters in a single call, so that populating 214f1043b4SStefan Hajnoczi * contiguous regions of the image is efficient. 224f1043b4SStefan Hajnoczi */ 234f1043b4SStefan Hajnoczi STREAM_BUFFER_SIZE = 512 * 1024, /* in bytes */ 244f1043b4SStefan Hajnoczi }; 254f1043b4SStefan Hajnoczi 265094a6c0SStefan Hajnoczi #define SLICE_TIME 100000000ULL /* ns */ 275094a6c0SStefan Hajnoczi 285094a6c0SStefan Hajnoczi typedef struct { 295094a6c0SStefan Hajnoczi int64_t next_slice_time; 305094a6c0SStefan Hajnoczi uint64_t slice_quota; 315094a6c0SStefan Hajnoczi uint64_t dispatched; 325094a6c0SStefan Hajnoczi } RateLimit; 335094a6c0SStefan Hajnoczi 345094a6c0SStefan Hajnoczi static int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n) 355094a6c0SStefan Hajnoczi { 365094a6c0SStefan Hajnoczi int64_t now = qemu_get_clock_ns(rt_clock); 375094a6c0SStefan Hajnoczi 385094a6c0SStefan Hajnoczi if (limit->next_slice_time < now) { 395094a6c0SStefan Hajnoczi limit->next_slice_time = now + SLICE_TIME; 405094a6c0SStefan Hajnoczi limit->dispatched = 0; 415094a6c0SStefan Hajnoczi } 42b21d677eSPaolo Bonzini if (limit->dispatched == 0 || limit->dispatched + n <= limit->slice_quota) { 435094a6c0SStefan Hajnoczi limit->dispatched += n; 44b21d677eSPaolo Bonzini return 0; 45b21d677eSPaolo Bonzini } else { 46b21d677eSPaolo Bonzini limit->dispatched = n; 47b21d677eSPaolo Bonzini return limit->next_slice_time - now; 485094a6c0SStefan Hajnoczi } 495094a6c0SStefan Hajnoczi } 505094a6c0SStefan Hajnoczi 515094a6c0SStefan Hajnoczi static void ratelimit_set_speed(RateLimit *limit, uint64_t speed) 525094a6c0SStefan Hajnoczi { 535094a6c0SStefan Hajnoczi limit->slice_quota = speed / (1000000000ULL / SLICE_TIME); 545094a6c0SStefan Hajnoczi } 555094a6c0SStefan Hajnoczi 564f1043b4SStefan Hajnoczi typedef struct StreamBlockJob { 574f1043b4SStefan Hajnoczi BlockJob common; 585094a6c0SStefan Hajnoczi RateLimit limit; 594f1043b4SStefan Hajnoczi BlockDriverState *base; 60c8c3080fSMarcelo Tosatti char backing_file_id[1024]; 614f1043b4SStefan Hajnoczi } StreamBlockJob; 624f1043b4SStefan Hajnoczi 634f1043b4SStefan Hajnoczi static int coroutine_fn stream_populate(BlockDriverState *bs, 644f1043b4SStefan Hajnoczi int64_t sector_num, int nb_sectors, 654f1043b4SStefan Hajnoczi void *buf) 664f1043b4SStefan Hajnoczi { 674f1043b4SStefan Hajnoczi struct iovec iov = { 684f1043b4SStefan Hajnoczi .iov_base = buf, 694f1043b4SStefan Hajnoczi .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 704f1043b4SStefan Hajnoczi }; 714f1043b4SStefan Hajnoczi QEMUIOVector qiov; 724f1043b4SStefan Hajnoczi 734f1043b4SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 744f1043b4SStefan Hajnoczi 754f1043b4SStefan Hajnoczi /* Copy-on-read the unallocated clusters */ 764f1043b4SStefan Hajnoczi return bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, &qiov); 774f1043b4SStefan Hajnoczi } 784f1043b4SStefan Hajnoczi 795a67a104SMarcelo Tosatti static void close_unused_images(BlockDriverState *top, BlockDriverState *base, 805a67a104SMarcelo Tosatti const char *base_id) 815a67a104SMarcelo Tosatti { 825a67a104SMarcelo Tosatti BlockDriverState *intermediate; 835a67a104SMarcelo Tosatti intermediate = top->backing_hd; 845a67a104SMarcelo Tosatti 855a67a104SMarcelo Tosatti while (intermediate) { 865a67a104SMarcelo Tosatti BlockDriverState *unused; 875a67a104SMarcelo Tosatti 885a67a104SMarcelo Tosatti /* reached base */ 895a67a104SMarcelo Tosatti if (intermediate == base) { 905a67a104SMarcelo Tosatti break; 915a67a104SMarcelo Tosatti } 925a67a104SMarcelo Tosatti 935a67a104SMarcelo Tosatti unused = intermediate; 945a67a104SMarcelo Tosatti intermediate = intermediate->backing_hd; 955a67a104SMarcelo Tosatti unused->backing_hd = NULL; 965a67a104SMarcelo Tosatti bdrv_delete(unused); 975a67a104SMarcelo Tosatti } 985a67a104SMarcelo Tosatti top->backing_hd = base; 995a67a104SMarcelo Tosatti } 1005a67a104SMarcelo Tosatti 101c8c3080fSMarcelo Tosatti /* 102c8c3080fSMarcelo Tosatti * Given an image chain: [BASE] -> [INTER1] -> [INTER2] -> [TOP] 103c8c3080fSMarcelo Tosatti * 104c8c3080fSMarcelo Tosatti * Return true if the given sector is allocated in top. 105c8c3080fSMarcelo Tosatti * Return false if the given sector is allocated in intermediate images. 106c8c3080fSMarcelo Tosatti * Return true otherwise. 107c8c3080fSMarcelo Tosatti * 108c8c3080fSMarcelo Tosatti * 'pnum' is set to the number of sectors (including and immediately following 109c8c3080fSMarcelo Tosatti * the specified sector) that are known to be in the same 110c8c3080fSMarcelo Tosatti * allocated/unallocated state. 111c8c3080fSMarcelo Tosatti * 112c8c3080fSMarcelo Tosatti */ 113c8c3080fSMarcelo Tosatti static int coroutine_fn is_allocated_base(BlockDriverState *top, 114c8c3080fSMarcelo Tosatti BlockDriverState *base, 115c8c3080fSMarcelo Tosatti int64_t sector_num, 116c8c3080fSMarcelo Tosatti int nb_sectors, int *pnum) 117c8c3080fSMarcelo Tosatti { 118c8c3080fSMarcelo Tosatti BlockDriverState *intermediate; 119c8c3080fSMarcelo Tosatti int ret, n; 120c8c3080fSMarcelo Tosatti 121c8c3080fSMarcelo Tosatti ret = bdrv_co_is_allocated(top, sector_num, nb_sectors, &n); 122c8c3080fSMarcelo Tosatti if (ret) { 123c8c3080fSMarcelo Tosatti *pnum = n; 124c8c3080fSMarcelo Tosatti return ret; 125c8c3080fSMarcelo Tosatti } 126c8c3080fSMarcelo Tosatti 127c8c3080fSMarcelo Tosatti /* 128c8c3080fSMarcelo Tosatti * Is the unallocated chunk [sector_num, n] also 129c8c3080fSMarcelo Tosatti * unallocated between base and top? 130c8c3080fSMarcelo Tosatti */ 131c8c3080fSMarcelo Tosatti intermediate = top->backing_hd; 132c8c3080fSMarcelo Tosatti 133efcc7a23SPaolo Bonzini while (intermediate != base) { 134c8c3080fSMarcelo Tosatti int pnum_inter; 135c8c3080fSMarcelo Tosatti 136c8c3080fSMarcelo Tosatti ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors, 137c8c3080fSMarcelo Tosatti &pnum_inter); 138c8c3080fSMarcelo Tosatti if (ret < 0) { 139c8c3080fSMarcelo Tosatti return ret; 140c8c3080fSMarcelo Tosatti } else if (ret) { 141c8c3080fSMarcelo Tosatti *pnum = pnum_inter; 142c8c3080fSMarcelo Tosatti return 0; 143c8c3080fSMarcelo Tosatti } 144c8c3080fSMarcelo Tosatti 145c8c3080fSMarcelo Tosatti /* 146c8c3080fSMarcelo Tosatti * [sector_num, nb_sectors] is unallocated on top but intermediate 147c8c3080fSMarcelo Tosatti * might have 148c8c3080fSMarcelo Tosatti * 149c8c3080fSMarcelo Tosatti * [sector_num+x, nr_sectors] allocated. 150c8c3080fSMarcelo Tosatti */ 151c8c3080fSMarcelo Tosatti if (n > pnum_inter) { 152c8c3080fSMarcelo Tosatti n = pnum_inter; 153c8c3080fSMarcelo Tosatti } 154c8c3080fSMarcelo Tosatti 155c8c3080fSMarcelo Tosatti intermediate = intermediate->backing_hd; 156c8c3080fSMarcelo Tosatti } 157c8c3080fSMarcelo Tosatti 158efcc7a23SPaolo Bonzini *pnum = n; 159c8c3080fSMarcelo Tosatti return 1; 160c8c3080fSMarcelo Tosatti } 161c8c3080fSMarcelo Tosatti 1624f1043b4SStefan Hajnoczi static void coroutine_fn stream_run(void *opaque) 1634f1043b4SStefan Hajnoczi { 1644f1043b4SStefan Hajnoczi StreamBlockJob *s = opaque; 1654f1043b4SStefan Hajnoczi BlockDriverState *bs = s->common.bs; 166c8c3080fSMarcelo Tosatti BlockDriverState *base = s->base; 1674f1043b4SStefan Hajnoczi int64_t sector_num, end; 1684f1043b4SStefan Hajnoczi int ret = 0; 169*04120e3bSAnthony Liguori int n = 0; 1704f1043b4SStefan Hajnoczi void *buf; 1714f1043b4SStefan Hajnoczi 1724f1043b4SStefan Hajnoczi s->common.len = bdrv_getlength(bs); 1734f1043b4SStefan Hajnoczi if (s->common.len < 0) { 1744f1043b4SStefan Hajnoczi block_job_complete(&s->common, s->common.len); 1754f1043b4SStefan Hajnoczi return; 1764f1043b4SStefan Hajnoczi } 1774f1043b4SStefan Hajnoczi 1784f1043b4SStefan Hajnoczi end = s->common.len >> BDRV_SECTOR_BITS; 1794f1043b4SStefan Hajnoczi buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE); 1804f1043b4SStefan Hajnoczi 1814f1043b4SStefan Hajnoczi /* Turn on copy-on-read for the whole block device so that guest read 1824f1043b4SStefan Hajnoczi * requests help us make progress. Only do this when copying the entire 1834f1043b4SStefan Hajnoczi * backing chain since the copy-on-read operation does not take base into 1844f1043b4SStefan Hajnoczi * account. 1854f1043b4SStefan Hajnoczi */ 1864f1043b4SStefan Hajnoczi if (!base) { 1874f1043b4SStefan Hajnoczi bdrv_enable_copy_on_read(bs); 1884f1043b4SStefan Hajnoczi } 1894f1043b4SStefan Hajnoczi 1904f1043b4SStefan Hajnoczi for (sector_num = 0; sector_num < end; sector_num += n) { 1914513eafeSPaolo Bonzini uint64_t delay_ns = 0; 1924513eafeSPaolo Bonzini 1934513eafeSPaolo Bonzini wait: 1944513eafeSPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 1954513eafeSPaolo Bonzini * with no pending I/O here so that qemu_aio_flush() returns. 1964513eafeSPaolo Bonzini */ 1974513eafeSPaolo Bonzini block_job_sleep_ns(&s->common, rt_clock, delay_ns); 1984f1043b4SStefan Hajnoczi if (block_job_is_cancelled(&s->common)) { 1994f1043b4SStefan Hajnoczi break; 2004f1043b4SStefan Hajnoczi } 2014f1043b4SStefan Hajnoczi 202c8c3080fSMarcelo Tosatti ret = is_allocated_base(bs, base, sector_num, 2034f1043b4SStefan Hajnoczi STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n); 2044f1043b4SStefan Hajnoczi trace_stream_one_iteration(s, sector_num, n, ret); 2054f1043b4SStefan Hajnoczi if (ret == 0) { 2065094a6c0SStefan Hajnoczi if (s->common.speed) { 2074513eafeSPaolo Bonzini delay_ns = ratelimit_calculate_delay(&s->limit, n); 2085094a6c0SStefan Hajnoczi if (delay_ns > 0) { 2094513eafeSPaolo Bonzini goto wait; 2105094a6c0SStefan Hajnoczi } 2115094a6c0SStefan Hajnoczi } 2124f1043b4SStefan Hajnoczi ret = stream_populate(bs, sector_num, n, buf); 2134f1043b4SStefan Hajnoczi } 2144f1043b4SStefan Hajnoczi if (ret < 0) { 2154f1043b4SStefan Hajnoczi break; 2164f1043b4SStefan Hajnoczi } 217c8c3080fSMarcelo Tosatti ret = 0; 2184f1043b4SStefan Hajnoczi 2194f1043b4SStefan Hajnoczi /* Publish progress */ 2204f1043b4SStefan Hajnoczi s->common.offset += n * BDRV_SECTOR_SIZE; 2214f1043b4SStefan Hajnoczi } 2224f1043b4SStefan Hajnoczi 2234f1043b4SStefan Hajnoczi if (!base) { 2244f1043b4SStefan Hajnoczi bdrv_disable_copy_on_read(bs); 2254f1043b4SStefan Hajnoczi } 2264f1043b4SStefan Hajnoczi 2273e914655SPaolo Bonzini if (!block_job_is_cancelled(&s->common) && sector_num == end && ret == 0) { 228f6133defSPaolo Bonzini const char *base_id = NULL, *base_fmt = NULL; 229c8c3080fSMarcelo Tosatti if (base) { 230c8c3080fSMarcelo Tosatti base_id = s->backing_file_id; 231f6133defSPaolo Bonzini if (base->drv) { 232f6133defSPaolo Bonzini base_fmt = base->drv->format_name; 233c8c3080fSMarcelo Tosatti } 234f6133defSPaolo Bonzini } 235f6133defSPaolo Bonzini ret = bdrv_change_backing_file(bs, base_id, base_fmt); 2365a67a104SMarcelo Tosatti close_unused_images(bs, base, base_id); 2374f1043b4SStefan Hajnoczi } 2384f1043b4SStefan Hajnoczi 2394f1043b4SStefan Hajnoczi qemu_vfree(buf); 2404f1043b4SStefan Hajnoczi block_job_complete(&s->common, ret); 2414f1043b4SStefan Hajnoczi } 2424f1043b4SStefan Hajnoczi 243882ec7ceSStefan Hajnoczi static void stream_set_speed(BlockJob *job, int64_t speed, Error **errp) 2445094a6c0SStefan Hajnoczi { 2455094a6c0SStefan Hajnoczi StreamBlockJob *s = container_of(job, StreamBlockJob, common); 2465094a6c0SStefan Hajnoczi 247882ec7ceSStefan Hajnoczi if (speed < 0) { 248882ec7ceSStefan Hajnoczi error_set(errp, QERR_INVALID_PARAMETER, "speed"); 2499e6636c7SStefan Hajnoczi return; 2505094a6c0SStefan Hajnoczi } 251882ec7ceSStefan Hajnoczi ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE); 2525094a6c0SStefan Hajnoczi } 2535094a6c0SStefan Hajnoczi 2544f1043b4SStefan Hajnoczi static BlockJobType stream_job_type = { 2554f1043b4SStefan Hajnoczi .instance_size = sizeof(StreamBlockJob), 2564f1043b4SStefan Hajnoczi .job_type = "stream", 2575094a6c0SStefan Hajnoczi .set_speed = stream_set_speed, 2584f1043b4SStefan Hajnoczi }; 2594f1043b4SStefan Hajnoczi 260fd7f8c65SStefan Hajnoczi void stream_start(BlockDriverState *bs, BlockDriverState *base, 261c83c66c3SStefan Hajnoczi const char *base_id, int64_t speed, 262c83c66c3SStefan Hajnoczi BlockDriverCompletionFunc *cb, 263fd7f8c65SStefan Hajnoczi void *opaque, Error **errp) 2644f1043b4SStefan Hajnoczi { 2654f1043b4SStefan Hajnoczi StreamBlockJob *s; 2664f1043b4SStefan Hajnoczi 267c83c66c3SStefan Hajnoczi s = block_job_create(&stream_job_type, bs, speed, cb, opaque, errp); 2684f1043b4SStefan Hajnoczi if (!s) { 269fd7f8c65SStefan Hajnoczi return; 2704f1043b4SStefan Hajnoczi } 2714f1043b4SStefan Hajnoczi 2724f1043b4SStefan Hajnoczi s->base = base; 273c8c3080fSMarcelo Tosatti if (base_id) { 274c8c3080fSMarcelo Tosatti pstrcpy(s->backing_file_id, sizeof(s->backing_file_id), base_id); 275c8c3080fSMarcelo Tosatti } 2764f1043b4SStefan Hajnoczi 277fa4478d5SPaolo Bonzini s->common.co = qemu_coroutine_create(stream_run); 278fa4478d5SPaolo Bonzini trace_stream_start(bs, base, s, s->common.co, opaque); 279fa4478d5SPaolo Bonzini qemu_coroutine_enter(s->common.co, s); 2804f1043b4SStefan Hajnoczi } 281