xref: /qemu/block/stream.c (revision 29b62a10)
1 /*
2  * Image streaming
3  *
4  * Copyright IBM, Corp. 2011
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "trace.h"
16 #include "block/block_int.h"
17 #include "block/blockjob_int.h"
18 #include "qapi/error.h"
19 #include "qapi/qmp/qerror.h"
20 #include "qapi/qmp/qdict.h"
21 #include "qemu/ratelimit.h"
22 #include "sysemu/block-backend.h"
23 #include "block/copy-on-read.h"
24 
25 enum {
26     /*
27      * Maximum chunk size to feed to copy-on-read.  This should be
28      * large enough to process multiple clusters in a single call, so
29      * that populating contiguous regions of the image is efficient.
30      */
31     STREAM_CHUNK = 512 * 1024, /* in bytes */
32 };
33 
34 typedef struct StreamBlockJob {
35     BlockJob common;
36     BlockBackend *blk;
37     BlockDriverState *base_overlay; /* COW overlay (stream from this) */
38     BlockDriverState *above_base;   /* Node directly above the base */
39     BlockDriverState *cor_filter_bs;
40     BlockDriverState *target_bs;
41     BlockdevOnError on_error;
42     char *backing_file_str;
43     bool bs_read_only;
44 } StreamBlockJob;
45 
46 static int coroutine_fn stream_populate(BlockBackend *blk,
47                                         int64_t offset, uint64_t bytes)
48 {
49     assert(bytes < SIZE_MAX);
50 
51     return blk_co_preadv(blk, offset, bytes, NULL, BDRV_REQ_PREFETCH);
52 }
53 
54 static int stream_prepare(Job *job)
55 {
56     StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
57     BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
58     BlockDriverState *base;
59     BlockDriverState *unfiltered_base;
60     Error *local_err = NULL;
61     int ret = 0;
62 
63     /* We should drop filter at this point, as filter hold the backing chain */
64     bdrv_cor_filter_drop(s->cor_filter_bs);
65     s->cor_filter_bs = NULL;
66 
67     /*
68      * bdrv_set_backing_hd() requires that unfiltered_bs is drained. Drain
69      * already here and use bdrv_set_backing_hd_drained() instead because
70      * the polling during drained_begin() might change the graph, and if we do
71      * this only later, we may end up working with the wrong base node (or it
72      * might even have gone away by the time we want to use it).
73      */
74     bdrv_drained_begin(unfiltered_bs);
75 
76     base = bdrv_filter_or_cow_bs(s->above_base);
77     unfiltered_base = bdrv_skip_filters(base);
78 
79     if (bdrv_cow_child(unfiltered_bs)) {
80         const char *base_id = NULL, *base_fmt = NULL;
81         if (unfiltered_base) {
82             base_id = s->backing_file_str ?: unfiltered_base->filename;
83             if (unfiltered_base->drv) {
84                 base_fmt = unfiltered_base->drv->format_name;
85             }
86         }
87 
88         bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err);
89 
90         /*
91          * This call will do I/O, so the graph can change again from here on.
92          * We have already completed the graph change, so we are not in danger
93          * of operating on the wrong node any more if this happens.
94          */
95         ret = bdrv_change_backing_file(unfiltered_bs, base_id, base_fmt, false);
96         if (local_err) {
97             error_report_err(local_err);
98             ret = -EPERM;
99             goto out;
100         }
101     }
102 
103 out:
104     bdrv_drained_end(unfiltered_bs);
105     return ret;
106 }
107 
108 static void stream_clean(Job *job)
109 {
110     StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
111 
112     if (s->cor_filter_bs) {
113         bdrv_cor_filter_drop(s->cor_filter_bs);
114         s->cor_filter_bs = NULL;
115     }
116 
117     blk_unref(s->blk);
118     s->blk = NULL;
119 
120     /* Reopen the image back in read-only mode if necessary */
121     if (s->bs_read_only) {
122         /* Give up write permissions before making it read-only */
123         bdrv_reopen_set_read_only(s->target_bs, true, NULL);
124     }
125 
126     g_free(s->backing_file_str);
127 }
128 
129 static int coroutine_fn stream_run(Job *job, Error **errp)
130 {
131     StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
132     BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
133     int64_t len;
134     int64_t offset = 0;
135     uint64_t delay_ns = 0;
136     int error = 0;
137     int64_t n = 0; /* bytes */
138 
139     if (unfiltered_bs == s->base_overlay) {
140         /* Nothing to stream */
141         return 0;
142     }
143 
144     len = bdrv_getlength(s->target_bs);
145     if (len < 0) {
146         return len;
147     }
148     job_progress_set_remaining(&s->common.job, len);
149 
150     for ( ; offset < len; offset += n) {
151         bool copy;
152         int ret;
153 
154         /* Note that even when no rate limit is applied we need to yield
155          * with no pending I/O here so that bdrv_drain_all() returns.
156          */
157         job_sleep_ns(&s->common.job, delay_ns);
158         if (job_is_cancelled(&s->common.job)) {
159             break;
160         }
161 
162         copy = false;
163 
164         ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
165         if (ret == 1) {
166             /* Allocated in the top, no need to copy.  */
167         } else if (ret >= 0) {
168             /* Copy if allocated in the intermediate images.  Limit to the
169              * known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE).  */
170             ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
171                                           s->base_overlay, true,
172                                           offset, n, &n);
173             /* Finish early if end of backing file has been reached */
174             if (ret == 0 && n == 0) {
175                 n = len - offset;
176             }
177 
178             copy = (ret > 0);
179         }
180         trace_stream_one_iteration(s, offset, n, ret);
181         if (copy) {
182             ret = stream_populate(s->blk, offset, n);
183         }
184         if (ret < 0) {
185             BlockErrorAction action =
186                 block_job_error_action(&s->common, s->on_error, true, -ret);
187             if (action == BLOCK_ERROR_ACTION_STOP) {
188                 n = 0;
189                 continue;
190             }
191             if (error == 0) {
192                 error = ret;
193             }
194             if (action == BLOCK_ERROR_ACTION_REPORT) {
195                 break;
196             }
197         }
198 
199         /* Publish progress */
200         job_progress_update(&s->common.job, n);
201         if (copy) {
202             delay_ns = block_job_ratelimit_get_delay(&s->common, n);
203         } else {
204             delay_ns = 0;
205         }
206     }
207 
208     /* Do not remove the backing file if an error was there but ignored. */
209     return error;
210 }
211 
212 static const BlockJobDriver stream_job_driver = {
213     .job_driver = {
214         .instance_size = sizeof(StreamBlockJob),
215         .job_type      = JOB_TYPE_STREAM,
216         .free          = block_job_free,
217         .run           = stream_run,
218         .prepare       = stream_prepare,
219         .clean         = stream_clean,
220         .user_resume   = block_job_user_resume,
221     },
222 };
223 
224 void stream_start(const char *job_id, BlockDriverState *bs,
225                   BlockDriverState *base, const char *backing_file_str,
226                   BlockDriverState *bottom,
227                   int creation_flags, int64_t speed,
228                   BlockdevOnError on_error,
229                   const char *filter_node_name,
230                   Error **errp)
231 {
232     StreamBlockJob *s = NULL;
233     BlockDriverState *iter;
234     bool bs_read_only;
235     int basic_flags = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED;
236     BlockDriverState *base_overlay;
237     BlockDriverState *cor_filter_bs = NULL;
238     BlockDriverState *above_base;
239     QDict *opts;
240     int ret;
241 
242     GLOBAL_STATE_CODE();
243 
244     assert(!(base && bottom));
245     assert(!(backing_file_str && bottom));
246 
247     if (bottom) {
248         /*
249          * New simple interface. The code is written in terms of old interface
250          * with @base parameter (still, it doesn't freeze link to base, so in
251          * this mean old code is correct for new interface). So, for now, just
252          * emulate base_overlay and above_base. Still, when old interface
253          * finally removed, we should refactor code to use only "bottom", but
254          * not "*base*" things.
255          */
256         assert(!bottom->drv->is_filter);
257         base_overlay = above_base = bottom;
258     } else {
259         base_overlay = bdrv_find_overlay(bs, base);
260         if (!base_overlay) {
261             error_setg(errp, "'%s' is not in the backing chain of '%s'",
262                        base->node_name, bs->node_name);
263             return;
264         }
265 
266         /*
267          * Find the node directly above @base.  @base_overlay is a COW overlay,
268          * so it must have a bdrv_cow_child(), but it is the immediate overlay
269          * of @base, so between the two there can only be filters.
270          */
271         above_base = base_overlay;
272         if (bdrv_cow_bs(above_base) != base) {
273             above_base = bdrv_cow_bs(above_base);
274             while (bdrv_filter_bs(above_base) != base) {
275                 above_base = bdrv_filter_bs(above_base);
276             }
277         }
278     }
279 
280     /* Make sure that the image is opened in read-write mode */
281     bs_read_only = bdrv_is_read_only(bs);
282     if (bs_read_only) {
283         int ret;
284         /* Hold the chain during reopen */
285         if (bdrv_freeze_backing_chain(bs, above_base, errp) < 0) {
286             return;
287         }
288 
289         ret = bdrv_reopen_set_read_only(bs, false, errp);
290 
291         /* failure, or cor-filter will hold the chain */
292         bdrv_unfreeze_backing_chain(bs, above_base);
293 
294         if (ret < 0) {
295             return;
296         }
297     }
298 
299     opts = qdict_new();
300 
301     qdict_put_str(opts, "driver", "copy-on-read");
302     qdict_put_str(opts, "file", bdrv_get_node_name(bs));
303     /* Pass the base_overlay node name as 'bottom' to COR driver */
304     qdict_put_str(opts, "bottom", base_overlay->node_name);
305     if (filter_node_name) {
306         qdict_put_str(opts, "node-name", filter_node_name);
307     }
308 
309     cor_filter_bs = bdrv_insert_node(bs, opts, BDRV_O_RDWR, errp);
310     if (!cor_filter_bs) {
311         goto fail;
312     }
313 
314     if (!filter_node_name) {
315         cor_filter_bs->implicit = true;
316     }
317 
318     s = block_job_create(job_id, &stream_job_driver, NULL, cor_filter_bs,
319                          0, BLK_PERM_ALL,
320                          speed, creation_flags, NULL, NULL, errp);
321     if (!s) {
322         goto fail;
323     }
324 
325     s->blk = blk_new_with_bs(cor_filter_bs, BLK_PERM_CONSISTENT_READ,
326                              basic_flags | BLK_PERM_WRITE, errp);
327     if (!s->blk) {
328         goto fail;
329     }
330     /*
331      * Disable request queuing in the BlockBackend to avoid deadlocks on drain:
332      * The job reports that it's busy until it reaches a pause point.
333      */
334     blk_set_disable_request_queuing(s->blk, true);
335     blk_set_allow_aio_context_change(s->blk, true);
336 
337     /*
338      * Prevent concurrent jobs trying to modify the graph structure here, we
339      * already have our own plans. Also don't allow resize as the image size is
340      * queried only at the job start and then cached.
341      */
342     if (block_job_add_bdrv(&s->common, "active node", bs, 0,
343                            basic_flags | BLK_PERM_WRITE, errp)) {
344         goto fail;
345     }
346 
347     /* Block all intermediate nodes between bs and base, because they will
348      * disappear from the chain after this operation. The streaming job reads
349      * every block only once, assuming that it doesn't change, so forbid writes
350      * and resizes. Reassign the base node pointer because the backing BS of the
351      * bottom node might change after the call to bdrv_reopen_set_read_only()
352      * due to parallel block jobs running.
353      * above_base node might change after the call to
354      * bdrv_reopen_set_read_only() due to parallel block jobs running.
355      */
356     base = bdrv_filter_or_cow_bs(above_base);
357     for (iter = bdrv_filter_or_cow_bs(bs); iter != base;
358          iter = bdrv_filter_or_cow_bs(iter))
359     {
360         ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
361                                  basic_flags, errp);
362         if (ret < 0) {
363             goto fail;
364         }
365     }
366 
367     s->base_overlay = base_overlay;
368     s->above_base = above_base;
369     s->backing_file_str = g_strdup(backing_file_str);
370     s->cor_filter_bs = cor_filter_bs;
371     s->target_bs = bs;
372     s->bs_read_only = bs_read_only;
373 
374     s->on_error = on_error;
375     trace_stream_start(bs, base, s);
376     job_start(&s->common.job);
377     return;
378 
379 fail:
380     if (s) {
381         job_early_fail(&s->common.job);
382     }
383     if (cor_filter_bs) {
384         bdrv_cor_filter_drop(cor_filter_bs);
385     }
386     if (bs_read_only) {
387         bdrv_reopen_set_read_only(bs, true, NULL);
388     }
389 }
390