xref: /qemu/block/qcow2-cluster.c (revision a193352f)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include <zlib.h>
27 
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "qcow2.h"
32 #include "qemu/bswap.h"
33 #include "trace.h"
34 
35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
36 {
37     BDRVQcow2State *s = bs->opaque;
38     int new_l1_size, i, ret;
39 
40     if (exact_size >= s->l1_size) {
41         return 0;
42     }
43 
44     new_l1_size = exact_size;
45 
46 #ifdef DEBUG_ALLOC2
47     fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48 #endif
49 
50     BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
51     ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
52                                        new_l1_size * sizeof(uint64_t),
53                              (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
54     if (ret < 0) {
55         goto fail;
56     }
57 
58     ret = bdrv_flush(bs->file->bs);
59     if (ret < 0) {
60         goto fail;
61     }
62 
63     BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
64     for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
65         if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66             continue;
67         }
68         qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
69                             s->cluster_size, QCOW2_DISCARD_ALWAYS);
70         s->l1_table[i] = 0;
71     }
72     return 0;
73 
74 fail:
75     /*
76      * If the write in the l1_table failed the image may contain a partially
77      * overwritten l1_table. In this case it would be better to clear the
78      * l1_table in memory to avoid possible image corruption.
79      */
80     memset(s->l1_table + new_l1_size, 0,
81            (s->l1_size - new_l1_size) * sizeof(uint64_t));
82     return ret;
83 }
84 
85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86                         bool exact_size)
87 {
88     BDRVQcow2State *s = bs->opaque;
89     int new_l1_size2, ret, i;
90     uint64_t *new_l1_table;
91     int64_t old_l1_table_offset, old_l1_size;
92     int64_t new_l1_table_offset, new_l1_size;
93     uint8_t data[12];
94 
95     if (min_size <= s->l1_size)
96         return 0;
97 
98     /* Do a sanity check on min_size before trying to calculate new_l1_size
99      * (this prevents overflows during the while loop for the calculation of
100      * new_l1_size) */
101     if (min_size > INT_MAX / sizeof(uint64_t)) {
102         return -EFBIG;
103     }
104 
105     if (exact_size) {
106         new_l1_size = min_size;
107     } else {
108         /* Bump size up to reduce the number of times we have to grow */
109         new_l1_size = s->l1_size;
110         if (new_l1_size == 0) {
111             new_l1_size = 1;
112         }
113         while (min_size > new_l1_size) {
114             new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
115         }
116     }
117 
118     QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
119     if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
120         return -EFBIG;
121     }
122 
123 #ifdef DEBUG_ALLOC2
124     fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
125             s->l1_size, new_l1_size);
126 #endif
127 
128     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
129     new_l1_table = qemu_try_blockalign(bs->file->bs,
130                                        ROUND_UP(new_l1_size2, 512));
131     if (new_l1_table == NULL) {
132         return -ENOMEM;
133     }
134     memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512));
135 
136     if (s->l1_size) {
137         memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
138     }
139 
140     /* write new table (align to cluster) */
141     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
142     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
143     if (new_l1_table_offset < 0) {
144         qemu_vfree(new_l1_table);
145         return new_l1_table_offset;
146     }
147 
148     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
149     if (ret < 0) {
150         goto fail;
151     }
152 
153     /* the L1 position has not yet been updated, so these clusters must
154      * indeed be completely free */
155     ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
156                                         new_l1_size2);
157     if (ret < 0) {
158         goto fail;
159     }
160 
161     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
162     for(i = 0; i < s->l1_size; i++)
163         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
164     ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
165                            new_l1_table, new_l1_size2);
166     if (ret < 0)
167         goto fail;
168     for(i = 0; i < s->l1_size; i++)
169         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
170 
171     /* set new table */
172     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
173     stl_be_p(data, new_l1_size);
174     stq_be_p(data + 4, new_l1_table_offset);
175     ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
176                            data, sizeof(data));
177     if (ret < 0) {
178         goto fail;
179     }
180     qemu_vfree(s->l1_table);
181     old_l1_table_offset = s->l1_table_offset;
182     s->l1_table_offset = new_l1_table_offset;
183     s->l1_table = new_l1_table;
184     old_l1_size = s->l1_size;
185     s->l1_size = new_l1_size;
186     qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
187                         QCOW2_DISCARD_OTHER);
188     return 0;
189  fail:
190     qemu_vfree(new_l1_table);
191     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
192                         QCOW2_DISCARD_OTHER);
193     return ret;
194 }
195 
196 /*
197  * l2_load
198  *
199  * @bs: The BlockDriverState
200  * @offset: A guest offset, used to calculate what slice of the L2
201  *          table to load.
202  * @l2_offset: Offset to the L2 table in the image file.
203  * @l2_slice: Location to store the pointer to the L2 slice.
204  *
205  * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
206  * that are loaded by the qcow2 cache). If the slice is in the cache,
207  * the cache is used; otherwise the L2 slice is loaded from the image
208  * file.
209  */
210 static int l2_load(BlockDriverState *bs, uint64_t offset,
211                    uint64_t l2_offset, uint64_t **l2_slice)
212 {
213     BDRVQcow2State *s = bs->opaque;
214     int start_of_slice = sizeof(uint64_t) *
215         (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
216 
217     return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
218                            (void **)l2_slice);
219 }
220 
221 /*
222  * Writes one sector of the L1 table to the disk (can't update single entries
223  * and we really don't want bdrv_pread to perform a read-modify-write)
224  */
225 #define L1_ENTRIES_PER_SECTOR (512 / 8)
226 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
227 {
228     BDRVQcow2State *s = bs->opaque;
229     uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
230     int l1_start_index;
231     int i, ret;
232 
233     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
234     for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
235          i++)
236     {
237         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
238     }
239 
240     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
241             s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
242     if (ret < 0) {
243         return ret;
244     }
245 
246     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
247     ret = bdrv_pwrite_sync(bs->file,
248                            s->l1_table_offset + 8 * l1_start_index,
249                            buf, sizeof(buf));
250     if (ret < 0) {
251         return ret;
252     }
253 
254     return 0;
255 }
256 
257 /*
258  * l2_allocate
259  *
260  * Allocate a new l2 entry in the file. If l1_index points to an already
261  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
262  * table) copy the contents of the old L2 table into the newly allocated one.
263  * Otherwise the new table is initialized with zeros.
264  *
265  */
266 
267 static int l2_allocate(BlockDriverState *bs, int l1_index)
268 {
269     BDRVQcow2State *s = bs->opaque;
270     uint64_t old_l2_offset;
271     uint64_t *l2_slice = NULL;
272     unsigned slice, slice_size2, n_slices;
273     int64_t l2_offset;
274     int ret;
275 
276     old_l2_offset = s->l1_table[l1_index];
277 
278     trace_qcow2_l2_allocate(bs, l1_index);
279 
280     /* allocate a new l2 entry */
281 
282     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
283     if (l2_offset < 0) {
284         ret = l2_offset;
285         goto fail;
286     }
287 
288     /* If we're allocating the table at offset 0 then something is wrong */
289     if (l2_offset == 0) {
290         qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
291                                 "allocation of L2 table at offset 0");
292         ret = -EIO;
293         goto fail;
294     }
295 
296     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
297     if (ret < 0) {
298         goto fail;
299     }
300 
301     /* allocate a new entry in the l2 cache */
302 
303     slice_size2 = s->l2_slice_size * sizeof(uint64_t);
304     n_slices = s->cluster_size / slice_size2;
305 
306     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
307     for (slice = 0; slice < n_slices; slice++) {
308         ret = qcow2_cache_get_empty(bs, s->l2_table_cache,
309                                     l2_offset + slice * slice_size2,
310                                     (void **) &l2_slice);
311         if (ret < 0) {
312             goto fail;
313         }
314 
315         if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
316             /* if there was no old l2 table, clear the new slice */
317             memset(l2_slice, 0, slice_size2);
318         } else {
319             uint64_t *old_slice;
320             uint64_t old_l2_slice_offset =
321                 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2;
322 
323             /* if there was an old l2 table, read a slice from the disk */
324             BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
325             ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset,
326                                   (void **) &old_slice);
327             if (ret < 0) {
328                 goto fail;
329             }
330 
331             memcpy(l2_slice, old_slice, slice_size2);
332 
333             qcow2_cache_put(s->l2_table_cache, (void **) &old_slice);
334         }
335 
336         /* write the l2 slice to the file */
337         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
338 
339         trace_qcow2_l2_allocate_write_l2(bs, l1_index);
340         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
341         qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
342     }
343 
344     ret = qcow2_cache_flush(bs, s->l2_table_cache);
345     if (ret < 0) {
346         goto fail;
347     }
348 
349     /* update the L1 entry */
350     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
351     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
352     ret = qcow2_write_l1_entry(bs, l1_index);
353     if (ret < 0) {
354         goto fail;
355     }
356 
357     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
358     return 0;
359 
360 fail:
361     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
362     if (l2_slice != NULL) {
363         qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
364     }
365     s->l1_table[l1_index] = old_l2_offset;
366     if (l2_offset > 0) {
367         qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
368                             QCOW2_DISCARD_ALWAYS);
369     }
370     return ret;
371 }
372 
373 /*
374  * Checks how many clusters in a given L2 slice are contiguous in the image
375  * file. As soon as one of the flags in the bitmask stop_flags changes compared
376  * to the first cluster, the search is stopped and the cluster is not counted
377  * as contiguous. (This allows it, for example, to stop at the first compressed
378  * cluster which may require a different handling)
379  */
380 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
381         uint64_t *l2_slice, uint64_t stop_flags)
382 {
383     int i;
384     QCow2ClusterType first_cluster_type;
385     uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
386     uint64_t first_entry = be64_to_cpu(l2_slice[0]);
387     uint64_t offset = first_entry & mask;
388 
389     if (!offset) {
390         return 0;
391     }
392 
393     /* must be allocated */
394     first_cluster_type = qcow2_get_cluster_type(first_entry);
395     assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
396            first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
397 
398     for (i = 0; i < nb_clusters; i++) {
399         uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask;
400         if (offset + (uint64_t) i * cluster_size != l2_entry) {
401             break;
402         }
403     }
404 
405 	return i;
406 }
407 
408 /*
409  * Checks how many consecutive unallocated clusters in a given L2
410  * slice have the same cluster type.
411  */
412 static int count_contiguous_clusters_unallocated(int nb_clusters,
413                                                  uint64_t *l2_slice,
414                                                  QCow2ClusterType wanted_type)
415 {
416     int i;
417 
418     assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
419            wanted_type == QCOW2_CLUSTER_UNALLOCATED);
420     for (i = 0; i < nb_clusters; i++) {
421         uint64_t entry = be64_to_cpu(l2_slice[i]);
422         QCow2ClusterType type = qcow2_get_cluster_type(entry);
423 
424         if (type != wanted_type) {
425             break;
426         }
427     }
428 
429     return i;
430 }
431 
432 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
433                                             uint64_t src_cluster_offset,
434                                             unsigned offset_in_cluster,
435                                             QEMUIOVector *qiov)
436 {
437     int ret;
438 
439     if (qiov->size == 0) {
440         return 0;
441     }
442 
443     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
444 
445     if (!bs->drv) {
446         return -ENOMEDIUM;
447     }
448 
449     /* Call .bdrv_co_readv() directly instead of using the public block-layer
450      * interface.  This avoids double I/O throttling and request tracking,
451      * which can lead to deadlock when block layer copy-on-read is enabled.
452      */
453     ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
454                                   qiov->size, qiov, 0);
455     if (ret < 0) {
456         return ret;
457     }
458 
459     return 0;
460 }
461 
462 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
463                                                 uint64_t src_cluster_offset,
464                                                 uint64_t cluster_offset,
465                                                 unsigned offset_in_cluster,
466                                                 uint8_t *buffer,
467                                                 unsigned bytes)
468 {
469     if (bytes && bs->encrypted) {
470         BDRVQcow2State *s = bs->opaque;
471         int64_t offset = (s->crypt_physical_offset ?
472                           (cluster_offset + offset_in_cluster) :
473                           (src_cluster_offset + offset_in_cluster));
474         assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
475         assert((bytes & ~BDRV_SECTOR_MASK) == 0);
476         assert(s->crypto);
477         if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) {
478             return false;
479         }
480     }
481     return true;
482 }
483 
484 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
485                                              uint64_t cluster_offset,
486                                              unsigned offset_in_cluster,
487                                              QEMUIOVector *qiov)
488 {
489     int ret;
490 
491     if (qiov->size == 0) {
492         return 0;
493     }
494 
495     ret = qcow2_pre_write_overlap_check(bs, 0,
496             cluster_offset + offset_in_cluster, qiov->size);
497     if (ret < 0) {
498         return ret;
499     }
500 
501     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
502     ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
503                           qiov->size, qiov, 0);
504     if (ret < 0) {
505         return ret;
506     }
507 
508     return 0;
509 }
510 
511 
512 /*
513  * get_cluster_offset
514  *
515  * For a given offset of the virtual disk, find the cluster type and offset in
516  * the qcow2 file. The offset is stored in *cluster_offset.
517  *
518  * On entry, *bytes is the maximum number of contiguous bytes starting at
519  * offset that we are interested in.
520  *
521  * On exit, *bytes is the number of bytes starting at offset that have the same
522  * cluster type and (if applicable) are stored contiguously in the image file.
523  * Compressed clusters are always returned one by one.
524  *
525  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
526  * cases.
527  */
528 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
529                              unsigned int *bytes, uint64_t *cluster_offset)
530 {
531     BDRVQcow2State *s = bs->opaque;
532     unsigned int l2_index;
533     uint64_t l1_index, l2_offset, *l2_slice;
534     int c;
535     unsigned int offset_in_cluster;
536     uint64_t bytes_available, bytes_needed, nb_clusters;
537     QCow2ClusterType type;
538     int ret;
539 
540     offset_in_cluster = offset_into_cluster(s, offset);
541     bytes_needed = (uint64_t) *bytes + offset_in_cluster;
542 
543     /* compute how many bytes there are between the start of the cluster
544      * containing offset and the end of the l2 slice that contains
545      * the entry pointing to it */
546     bytes_available =
547         ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset)))
548         << s->cluster_bits;
549 
550     if (bytes_needed > bytes_available) {
551         bytes_needed = bytes_available;
552     }
553 
554     *cluster_offset = 0;
555 
556     /* seek to the l2 offset in the l1 table */
557 
558     l1_index = offset_to_l1_index(s, offset);
559     if (l1_index >= s->l1_size) {
560         type = QCOW2_CLUSTER_UNALLOCATED;
561         goto out;
562     }
563 
564     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
565     if (!l2_offset) {
566         type = QCOW2_CLUSTER_UNALLOCATED;
567         goto out;
568     }
569 
570     if (offset_into_cluster(s, l2_offset)) {
571         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
572                                 " unaligned (L1 index: %#" PRIx64 ")",
573                                 l2_offset, l1_index);
574         return -EIO;
575     }
576 
577     /* load the l2 slice in memory */
578 
579     ret = l2_load(bs, offset, l2_offset, &l2_slice);
580     if (ret < 0) {
581         return ret;
582     }
583 
584     /* find the cluster offset for the given disk offset */
585 
586     l2_index = offset_to_l2_slice_index(s, offset);
587     *cluster_offset = be64_to_cpu(l2_slice[l2_index]);
588 
589     nb_clusters = size_to_clusters(s, bytes_needed);
590     /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
591      * integers; the minimum cluster size is 512, so this assertion is always
592      * true */
593     assert(nb_clusters <= INT_MAX);
594 
595     type = qcow2_get_cluster_type(*cluster_offset);
596     if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
597                                 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
598         qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
599                                 " in pre-v3 image (L2 offset: %#" PRIx64
600                                 ", L2 index: %#x)", l2_offset, l2_index);
601         ret = -EIO;
602         goto fail;
603     }
604     switch (type) {
605     case QCOW2_CLUSTER_COMPRESSED:
606         /* Compressed clusters can only be processed one by one */
607         c = 1;
608         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
609         break;
610     case QCOW2_CLUSTER_ZERO_PLAIN:
611     case QCOW2_CLUSTER_UNALLOCATED:
612         /* how many empty clusters ? */
613         c = count_contiguous_clusters_unallocated(nb_clusters,
614                                                   &l2_slice[l2_index], type);
615         *cluster_offset = 0;
616         break;
617     case QCOW2_CLUSTER_ZERO_ALLOC:
618     case QCOW2_CLUSTER_NORMAL:
619         /* how many allocated clusters ? */
620         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
621                                       &l2_slice[l2_index], QCOW_OFLAG_ZERO);
622         *cluster_offset &= L2E_OFFSET_MASK;
623         if (offset_into_cluster(s, *cluster_offset)) {
624             qcow2_signal_corruption(bs, true, -1, -1,
625                                     "Cluster allocation offset %#"
626                                     PRIx64 " unaligned (L2 offset: %#" PRIx64
627                                     ", L2 index: %#x)", *cluster_offset,
628                                     l2_offset, l2_index);
629             ret = -EIO;
630             goto fail;
631         }
632         break;
633     default:
634         abort();
635     }
636 
637     qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
638 
639     bytes_available = (int64_t)c * s->cluster_size;
640 
641 out:
642     if (bytes_available > bytes_needed) {
643         bytes_available = bytes_needed;
644     }
645 
646     /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
647      * subtracting offset_in_cluster will therefore definitely yield something
648      * not exceeding UINT_MAX */
649     assert(bytes_available - offset_in_cluster <= UINT_MAX);
650     *bytes = bytes_available - offset_in_cluster;
651 
652     return type;
653 
654 fail:
655     qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice);
656     return ret;
657 }
658 
659 /*
660  * get_cluster_table
661  *
662  * for a given disk offset, load (and allocate if needed)
663  * the appropriate slice of its l2 table.
664  *
665  * the cluster index in the l2 slice is given to the caller.
666  *
667  * Returns 0 on success, -errno in failure case
668  */
669 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
670                              uint64_t **new_l2_slice,
671                              int *new_l2_index)
672 {
673     BDRVQcow2State *s = bs->opaque;
674     unsigned int l2_index;
675     uint64_t l1_index, l2_offset;
676     uint64_t *l2_slice = NULL;
677     int ret;
678 
679     /* seek to the l2 offset in the l1 table */
680 
681     l1_index = offset_to_l1_index(s, offset);
682     if (l1_index >= s->l1_size) {
683         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
684         if (ret < 0) {
685             return ret;
686         }
687     }
688 
689     assert(l1_index < s->l1_size);
690     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
691     if (offset_into_cluster(s, l2_offset)) {
692         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
693                                 " unaligned (L1 index: %#" PRIx64 ")",
694                                 l2_offset, l1_index);
695         return -EIO;
696     }
697 
698     if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) {
699         /* First allocate a new L2 table (and do COW if needed) */
700         ret = l2_allocate(bs, l1_index);
701         if (ret < 0) {
702             return ret;
703         }
704 
705         /* Then decrease the refcount of the old table */
706         if (l2_offset) {
707             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
708                                 QCOW2_DISCARD_OTHER);
709         }
710 
711         /* Get the offset of the newly-allocated l2 table */
712         l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
713         assert(offset_into_cluster(s, l2_offset) == 0);
714     }
715 
716     /* load the l2 slice in memory */
717     ret = l2_load(bs, offset, l2_offset, &l2_slice);
718     if (ret < 0) {
719         return ret;
720     }
721 
722     /* find the cluster offset for the given disk offset */
723 
724     l2_index = offset_to_l2_slice_index(s, offset);
725 
726     *new_l2_slice = l2_slice;
727     *new_l2_index = l2_index;
728 
729     return 0;
730 }
731 
732 /*
733  * alloc_compressed_cluster_offset
734  *
735  * For a given offset of the disk image, return cluster offset in
736  * qcow2 file.
737  *
738  * If the offset is not found, allocate a new compressed cluster.
739  *
740  * Return the cluster offset if successful,
741  * Return 0, otherwise.
742  *
743  */
744 
745 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
746                                                uint64_t offset,
747                                                int compressed_size)
748 {
749     BDRVQcow2State *s = bs->opaque;
750     int l2_index, ret;
751     uint64_t *l2_slice;
752     int64_t cluster_offset;
753     int nb_csectors;
754 
755     ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
756     if (ret < 0) {
757         return 0;
758     }
759 
760     /* Compression can't overwrite anything. Fail if the cluster was already
761      * allocated. */
762     cluster_offset = be64_to_cpu(l2_slice[l2_index]);
763     if (cluster_offset & L2E_OFFSET_MASK) {
764         qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
765         return 0;
766     }
767 
768     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
769     if (cluster_offset < 0) {
770         qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
771         return 0;
772     }
773 
774     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
775                   (cluster_offset >> 9);
776 
777     cluster_offset |= QCOW_OFLAG_COMPRESSED |
778                       ((uint64_t)nb_csectors << s->csize_shift);
779 
780     /* update L2 table */
781 
782     /* compressed clusters never have the copied flag */
783 
784     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
785     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
786     l2_slice[l2_index] = cpu_to_be64(cluster_offset);
787     qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
788 
789     return cluster_offset;
790 }
791 
792 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
793 {
794     BDRVQcow2State *s = bs->opaque;
795     Qcow2COWRegion *start = &m->cow_start;
796     Qcow2COWRegion *end = &m->cow_end;
797     unsigned buffer_size;
798     unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
799     bool merge_reads;
800     uint8_t *start_buffer, *end_buffer;
801     QEMUIOVector qiov;
802     int ret;
803 
804     assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
805     assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
806     assert(start->offset + start->nb_bytes <= end->offset);
807     assert(!m->data_qiov || m->data_qiov->size == data_bytes);
808 
809     if (start->nb_bytes == 0 && end->nb_bytes == 0) {
810         return 0;
811     }
812 
813     /* If we have to read both the start and end COW regions and the
814      * middle region is not too large then perform just one read
815      * operation */
816     merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
817     if (merge_reads) {
818         buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
819     } else {
820         /* If we have to do two reads, add some padding in the middle
821          * if necessary to make sure that the end region is optimally
822          * aligned. */
823         size_t align = bdrv_opt_mem_align(bs);
824         assert(align > 0 && align <= UINT_MAX);
825         assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
826                UINT_MAX - end->nb_bytes);
827         buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
828     }
829 
830     /* Reserve a buffer large enough to store all the data that we're
831      * going to read */
832     start_buffer = qemu_try_blockalign(bs, buffer_size);
833     if (start_buffer == NULL) {
834         return -ENOMEM;
835     }
836     /* The part of the buffer where the end region is located */
837     end_buffer = start_buffer + buffer_size - end->nb_bytes;
838 
839     qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
840 
841     qemu_co_mutex_unlock(&s->lock);
842     /* First we read the existing data from both COW regions. We
843      * either read the whole region in one go, or the start and end
844      * regions separately. */
845     if (merge_reads) {
846         qemu_iovec_add(&qiov, start_buffer, buffer_size);
847         ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
848     } else {
849         qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
850         ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
851         if (ret < 0) {
852             goto fail;
853         }
854 
855         qemu_iovec_reset(&qiov);
856         qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
857         ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
858     }
859     if (ret < 0) {
860         goto fail;
861     }
862 
863     /* Encrypt the data if necessary before writing it */
864     if (bs->encrypted) {
865         if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
866                                     start->offset, start_buffer,
867                                     start->nb_bytes) ||
868             !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
869                                     end->offset, end_buffer, end->nb_bytes)) {
870             ret = -EIO;
871             goto fail;
872         }
873     }
874 
875     /* And now we can write everything. If we have the guest data we
876      * can write everything in one single operation */
877     if (m->data_qiov) {
878         qemu_iovec_reset(&qiov);
879         if (start->nb_bytes) {
880             qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
881         }
882         qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
883         if (end->nb_bytes) {
884             qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
885         }
886         /* NOTE: we have a write_aio blkdebug event here followed by
887          * a cow_write one in do_perform_cow_write(), but there's only
888          * one single I/O operation */
889         BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
890         ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
891     } else {
892         /* If there's no guest data then write both COW regions separately */
893         qemu_iovec_reset(&qiov);
894         qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
895         ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
896         if (ret < 0) {
897             goto fail;
898         }
899 
900         qemu_iovec_reset(&qiov);
901         qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
902         ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
903     }
904 
905 fail:
906     qemu_co_mutex_lock(&s->lock);
907 
908     /*
909      * Before we update the L2 table to actually point to the new cluster, we
910      * need to be sure that the refcounts have been increased and COW was
911      * handled.
912      */
913     if (ret == 0) {
914         qcow2_cache_depends_on_flush(s->l2_table_cache);
915     }
916 
917     qemu_vfree(start_buffer);
918     qemu_iovec_destroy(&qiov);
919     return ret;
920 }
921 
922 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
923 {
924     BDRVQcow2State *s = bs->opaque;
925     int i, j = 0, l2_index, ret;
926     uint64_t *old_cluster, *l2_slice;
927     uint64_t cluster_offset = m->alloc_offset;
928 
929     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
930     assert(m->nb_clusters > 0);
931 
932     old_cluster = g_try_new(uint64_t, m->nb_clusters);
933     if (old_cluster == NULL) {
934         ret = -ENOMEM;
935         goto err;
936     }
937 
938     /* copy content of unmodified sectors */
939     ret = perform_cow(bs, m);
940     if (ret < 0) {
941         goto err;
942     }
943 
944     /* Update L2 table. */
945     if (s->use_lazy_refcounts) {
946         qcow2_mark_dirty(bs);
947     }
948     if (qcow2_need_accurate_refcounts(s)) {
949         qcow2_cache_set_dependency(bs, s->l2_table_cache,
950                                    s->refcount_block_cache);
951     }
952 
953     ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index);
954     if (ret < 0) {
955         goto err;
956     }
957     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
958 
959     assert(l2_index + m->nb_clusters <= s->l2_slice_size);
960     for (i = 0; i < m->nb_clusters; i++) {
961         /* if two concurrent writes happen to the same unallocated cluster
962          * each write allocates separate cluster and writes data concurrently.
963          * The first one to complete updates l2 table with pointer to its
964          * cluster the second one has to do RMW (which is done above by
965          * perform_cow()), update l2 table with its cluster pointer and free
966          * old cluster. This is what this loop does */
967         if (l2_slice[l2_index + i] != 0) {
968             old_cluster[j++] = l2_slice[l2_index + i];
969         }
970 
971         l2_slice[l2_index + i] = cpu_to_be64((cluster_offset +
972                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
973      }
974 
975 
976     qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
977 
978     /*
979      * If this was a COW, we need to decrease the refcount of the old cluster.
980      *
981      * Don't discard clusters that reach a refcount of 0 (e.g. compressed
982      * clusters), the next write will reuse them anyway.
983      */
984     if (!m->keep_old_clusters && j != 0) {
985         for (i = 0; i < j; i++) {
986             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
987                                     QCOW2_DISCARD_NEVER);
988         }
989     }
990 
991     ret = 0;
992 err:
993     g_free(old_cluster);
994     return ret;
995  }
996 
997 /**
998  * Frees the allocated clusters because the request failed and they won't
999  * actually be linked.
1000  */
1001 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
1002 {
1003     BDRVQcow2State *s = bs->opaque;
1004     qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits,
1005                         QCOW2_DISCARD_NEVER);
1006 }
1007 
1008 /*
1009  * Returns the number of contiguous clusters that can be used for an allocating
1010  * write, but require COW to be performed (this includes yet unallocated space,
1011  * which must copy from the backing file)
1012  */
1013 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
1014     uint64_t *l2_slice, int l2_index)
1015 {
1016     int i;
1017 
1018     for (i = 0; i < nb_clusters; i++) {
1019         uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1020         QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
1021 
1022         switch(cluster_type) {
1023         case QCOW2_CLUSTER_NORMAL:
1024             if (l2_entry & QCOW_OFLAG_COPIED) {
1025                 goto out;
1026             }
1027             break;
1028         case QCOW2_CLUSTER_UNALLOCATED:
1029         case QCOW2_CLUSTER_COMPRESSED:
1030         case QCOW2_CLUSTER_ZERO_PLAIN:
1031         case QCOW2_CLUSTER_ZERO_ALLOC:
1032             break;
1033         default:
1034             abort();
1035         }
1036     }
1037 
1038 out:
1039     assert(i <= nb_clusters);
1040     return i;
1041 }
1042 
1043 /*
1044  * Check if there already is an AIO write request in flight which allocates
1045  * the same cluster. In this case we need to wait until the previous
1046  * request has completed and updated the L2 table accordingly.
1047  *
1048  * Returns:
1049  *   0       if there was no dependency. *cur_bytes indicates the number of
1050  *           bytes from guest_offset that can be read before the next
1051  *           dependency must be processed (or the request is complete)
1052  *
1053  *   -EAGAIN if we had to wait for another request, previously gathered
1054  *           information on cluster allocation may be invalid now. The caller
1055  *           must start over anyway, so consider *cur_bytes undefined.
1056  */
1057 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1058     uint64_t *cur_bytes, QCowL2Meta **m)
1059 {
1060     BDRVQcow2State *s = bs->opaque;
1061     QCowL2Meta *old_alloc;
1062     uint64_t bytes = *cur_bytes;
1063 
1064     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1065 
1066         uint64_t start = guest_offset;
1067         uint64_t end = start + bytes;
1068         uint64_t old_start = l2meta_cow_start(old_alloc);
1069         uint64_t old_end = l2meta_cow_end(old_alloc);
1070 
1071         if (end <= old_start || start >= old_end) {
1072             /* No intersection */
1073         } else {
1074             if (start < old_start) {
1075                 /* Stop at the start of a running allocation */
1076                 bytes = old_start - start;
1077             } else {
1078                 bytes = 0;
1079             }
1080 
1081             /* Stop if already an l2meta exists. After yielding, it wouldn't
1082              * be valid any more, so we'd have to clean up the old L2Metas
1083              * and deal with requests depending on them before starting to
1084              * gather new ones. Not worth the trouble. */
1085             if (bytes == 0 && *m) {
1086                 *cur_bytes = 0;
1087                 return 0;
1088             }
1089 
1090             if (bytes == 0) {
1091                 /* Wait for the dependency to complete. We need to recheck
1092                  * the free/allocated clusters when we continue. */
1093                 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1094                 return -EAGAIN;
1095             }
1096         }
1097     }
1098 
1099     /* Make sure that existing clusters and new allocations are only used up to
1100      * the next dependency if we shortened the request above */
1101     *cur_bytes = bytes;
1102 
1103     return 0;
1104 }
1105 
1106 /*
1107  * Checks how many already allocated clusters that don't require a copy on
1108  * write there are at the given guest_offset (up to *bytes). If
1109  * *host_offset is not zero, only physically contiguous clusters beginning at
1110  * this host offset are counted.
1111  *
1112  * Note that guest_offset may not be cluster aligned. In this case, the
1113  * returned *host_offset points to exact byte referenced by guest_offset and
1114  * therefore isn't cluster aligned as well.
1115  *
1116  * Returns:
1117  *   0:     if no allocated clusters are available at the given offset.
1118  *          *bytes is normally unchanged. It is set to 0 if the cluster
1119  *          is allocated and doesn't need COW, but doesn't have the right
1120  *          physical offset.
1121  *
1122  *   1:     if allocated clusters that don't require a COW are available at
1123  *          the requested offset. *bytes may have decreased and describes
1124  *          the length of the area that can be written to.
1125  *
1126  *  -errno: in error cases
1127  */
1128 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1129     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1130 {
1131     BDRVQcow2State *s = bs->opaque;
1132     int l2_index;
1133     uint64_t cluster_offset;
1134     uint64_t *l2_slice;
1135     uint64_t nb_clusters;
1136     unsigned int keep_clusters;
1137     int ret;
1138 
1139     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1140                               *bytes);
1141 
1142     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
1143                                 == offset_into_cluster(s, *host_offset));
1144 
1145     /*
1146      * Calculate the number of clusters to look for. We stop at L2 slice
1147      * boundaries to keep things simple.
1148      */
1149     nb_clusters =
1150         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1151 
1152     l2_index = offset_to_l2_slice_index(s, guest_offset);
1153     nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1154     assert(nb_clusters <= INT_MAX);
1155 
1156     /* Find L2 entry for the first involved cluster */
1157     ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1158     if (ret < 0) {
1159         return ret;
1160     }
1161 
1162     cluster_offset = be64_to_cpu(l2_slice[l2_index]);
1163 
1164     /* Check how many clusters are already allocated and don't need COW */
1165     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1166         && (cluster_offset & QCOW_OFLAG_COPIED))
1167     {
1168         /* If a specific host_offset is required, check it */
1169         bool offset_matches =
1170             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1171 
1172         if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1173             qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1174                                     "%#llx unaligned (guest offset: %#" PRIx64
1175                                     ")", cluster_offset & L2E_OFFSET_MASK,
1176                                     guest_offset);
1177             ret = -EIO;
1178             goto out;
1179         }
1180 
1181         if (*host_offset != 0 && !offset_matches) {
1182             *bytes = 0;
1183             ret = 0;
1184             goto out;
1185         }
1186 
1187         /* We keep all QCOW_OFLAG_COPIED clusters */
1188         keep_clusters =
1189             count_contiguous_clusters(nb_clusters, s->cluster_size,
1190                                       &l2_slice[l2_index],
1191                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1192         assert(keep_clusters <= nb_clusters);
1193 
1194         *bytes = MIN(*bytes,
1195                  keep_clusters * s->cluster_size
1196                  - offset_into_cluster(s, guest_offset));
1197 
1198         ret = 1;
1199     } else {
1200         ret = 0;
1201     }
1202 
1203     /* Cleanup */
1204 out:
1205     qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1206 
1207     /* Only return a host offset if we actually made progress. Otherwise we
1208      * would make requirements for handle_alloc() that it can't fulfill */
1209     if (ret > 0) {
1210         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1211                      + offset_into_cluster(s, guest_offset);
1212     }
1213 
1214     return ret;
1215 }
1216 
1217 /*
1218  * Allocates new clusters for the given guest_offset.
1219  *
1220  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1221  * contain the number of clusters that have been allocated and are contiguous
1222  * in the image file.
1223  *
1224  * If *host_offset is non-zero, it specifies the offset in the image file at
1225  * which the new clusters must start. *nb_clusters can be 0 on return in this
1226  * case if the cluster at host_offset is already in use. If *host_offset is
1227  * zero, the clusters can be allocated anywhere in the image file.
1228  *
1229  * *host_offset is updated to contain the offset into the image file at which
1230  * the first allocated cluster starts.
1231  *
1232  * Return 0 on success and -errno in error cases. -EAGAIN means that the
1233  * function has been waiting for another request and the allocation must be
1234  * restarted, but the whole request should not be failed.
1235  */
1236 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1237                                    uint64_t *host_offset, uint64_t *nb_clusters)
1238 {
1239     BDRVQcow2State *s = bs->opaque;
1240 
1241     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1242                                          *host_offset, *nb_clusters);
1243 
1244     /* Allocate new clusters */
1245     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1246     if (*host_offset == 0) {
1247         int64_t cluster_offset =
1248             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1249         if (cluster_offset < 0) {
1250             return cluster_offset;
1251         }
1252         *host_offset = cluster_offset;
1253         return 0;
1254     } else {
1255         int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1256         if (ret < 0) {
1257             return ret;
1258         }
1259         *nb_clusters = ret;
1260         return 0;
1261     }
1262 }
1263 
1264 /*
1265  * Allocates new clusters for an area that either is yet unallocated or needs a
1266  * copy on write. If *host_offset is non-zero, clusters are only allocated if
1267  * the new allocation can match the specified host offset.
1268  *
1269  * Note that guest_offset may not be cluster aligned. In this case, the
1270  * returned *host_offset points to exact byte referenced by guest_offset and
1271  * therefore isn't cluster aligned as well.
1272  *
1273  * Returns:
1274  *   0:     if no clusters could be allocated. *bytes is set to 0,
1275  *          *host_offset is left unchanged.
1276  *
1277  *   1:     if new clusters were allocated. *bytes may be decreased if the
1278  *          new allocation doesn't cover all of the requested area.
1279  *          *host_offset is updated to contain the host offset of the first
1280  *          newly allocated cluster.
1281  *
1282  *  -errno: in error cases
1283  */
1284 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1285     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1286 {
1287     BDRVQcow2State *s = bs->opaque;
1288     int l2_index;
1289     uint64_t *l2_slice;
1290     uint64_t entry;
1291     uint64_t nb_clusters;
1292     int ret;
1293     bool keep_old_clusters = false;
1294 
1295     uint64_t alloc_cluster_offset = 0;
1296 
1297     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1298                              *bytes);
1299     assert(*bytes > 0);
1300 
1301     /*
1302      * Calculate the number of clusters to look for. We stop at L2 slice
1303      * boundaries to keep things simple.
1304      */
1305     nb_clusters =
1306         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1307 
1308     l2_index = offset_to_l2_slice_index(s, guest_offset);
1309     nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1310     assert(nb_clusters <= INT_MAX);
1311 
1312     /* Find L2 entry for the first involved cluster */
1313     ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1314     if (ret < 0) {
1315         return ret;
1316     }
1317 
1318     entry = be64_to_cpu(l2_slice[l2_index]);
1319 
1320     /* For the moment, overwrite compressed clusters one by one */
1321     if (entry & QCOW_OFLAG_COMPRESSED) {
1322         nb_clusters = 1;
1323     } else {
1324         nb_clusters = count_cow_clusters(s, nb_clusters, l2_slice, l2_index);
1325     }
1326 
1327     /* This function is only called when there were no non-COW clusters, so if
1328      * we can't find any unallocated or COW clusters either, something is
1329      * wrong with our code. */
1330     assert(nb_clusters > 0);
1331 
1332     if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1333         (entry & QCOW_OFLAG_COPIED) &&
1334         (!*host_offset ||
1335          start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1336     {
1337         int preallocated_nb_clusters;
1338 
1339         if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) {
1340             qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero "
1341                                     "cluster offset %#llx unaligned (guest "
1342                                     "offset: %#" PRIx64 ")",
1343                                     entry & L2E_OFFSET_MASK, guest_offset);
1344             ret = -EIO;
1345             goto fail;
1346         }
1347 
1348         /* Try to reuse preallocated zero clusters; contiguous normal clusters
1349          * would be fine, too, but count_cow_clusters() above has limited
1350          * nb_clusters already to a range of COW clusters */
1351         preallocated_nb_clusters =
1352             count_contiguous_clusters(nb_clusters, s->cluster_size,
1353                                       &l2_slice[l2_index], QCOW_OFLAG_COPIED);
1354         assert(preallocated_nb_clusters > 0);
1355 
1356         nb_clusters = preallocated_nb_clusters;
1357         alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1358 
1359         /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1360          * should not free them. */
1361         keep_old_clusters = true;
1362     }
1363 
1364     qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1365 
1366     if (!alloc_cluster_offset) {
1367         /* Allocate, if necessary at a given offset in the image file */
1368         alloc_cluster_offset = start_of_cluster(s, *host_offset);
1369         ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1370                                       &nb_clusters);
1371         if (ret < 0) {
1372             goto fail;
1373         }
1374 
1375         /* Can't extend contiguous allocation */
1376         if (nb_clusters == 0) {
1377             *bytes = 0;
1378             return 0;
1379         }
1380 
1381         /* !*host_offset would overwrite the image header and is reserved for
1382          * "no host offset preferred". If 0 was a valid host offset, it'd
1383          * trigger the following overlap check; do that now to avoid having an
1384          * invalid value in *host_offset. */
1385         if (!alloc_cluster_offset) {
1386             ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1387                                                 nb_clusters * s->cluster_size);
1388             assert(ret < 0);
1389             goto fail;
1390         }
1391     }
1392 
1393     /*
1394      * Save info needed for meta data update.
1395      *
1396      * requested_bytes: Number of bytes from the start of the first
1397      * newly allocated cluster to the end of the (possibly shortened
1398      * before) write request.
1399      *
1400      * avail_bytes: Number of bytes from the start of the first
1401      * newly allocated to the end of the last newly allocated cluster.
1402      *
1403      * nb_bytes: The number of bytes from the start of the first
1404      * newly allocated cluster to the end of the area that the write
1405      * request actually writes to (excluding COW at the end)
1406      */
1407     uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1408     int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1409     int nb_bytes = MIN(requested_bytes, avail_bytes);
1410     QCowL2Meta *old_m = *m;
1411 
1412     *m = g_malloc0(sizeof(**m));
1413 
1414     **m = (QCowL2Meta) {
1415         .next           = old_m,
1416 
1417         .alloc_offset   = alloc_cluster_offset,
1418         .offset         = start_of_cluster(s, guest_offset),
1419         .nb_clusters    = nb_clusters,
1420 
1421         .keep_old_clusters  = keep_old_clusters,
1422 
1423         .cow_start = {
1424             .offset     = 0,
1425             .nb_bytes   = offset_into_cluster(s, guest_offset),
1426         },
1427         .cow_end = {
1428             .offset     = nb_bytes,
1429             .nb_bytes   = avail_bytes - nb_bytes,
1430         },
1431     };
1432     qemu_co_queue_init(&(*m)->dependent_requests);
1433     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1434 
1435     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1436     *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1437     assert(*bytes != 0);
1438 
1439     return 1;
1440 
1441 fail:
1442     if (*m && (*m)->nb_clusters > 0) {
1443         QLIST_REMOVE(*m, next_in_flight);
1444     }
1445     return ret;
1446 }
1447 
1448 /*
1449  * alloc_cluster_offset
1450  *
1451  * For a given offset on the virtual disk, find the cluster offset in qcow2
1452  * file. If the offset is not found, allocate a new cluster.
1453  *
1454  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1455  * other fields in m are meaningless.
1456  *
1457  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1458  * contiguous clusters that have been allocated. In this case, the other
1459  * fields of m are valid and contain information about the first allocated
1460  * cluster.
1461  *
1462  * If the request conflicts with another write request in flight, the coroutine
1463  * is queued and will be reentered when the dependency has completed.
1464  *
1465  * Return 0 on success and -errno in error cases
1466  */
1467 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1468                                unsigned int *bytes, uint64_t *host_offset,
1469                                QCowL2Meta **m)
1470 {
1471     BDRVQcow2State *s = bs->opaque;
1472     uint64_t start, remaining;
1473     uint64_t cluster_offset;
1474     uint64_t cur_bytes;
1475     int ret;
1476 
1477     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1478 
1479 again:
1480     start = offset;
1481     remaining = *bytes;
1482     cluster_offset = 0;
1483     *host_offset = 0;
1484     cur_bytes = 0;
1485     *m = NULL;
1486 
1487     while (true) {
1488 
1489         if (!*host_offset) {
1490             *host_offset = start_of_cluster(s, cluster_offset);
1491         }
1492 
1493         assert(remaining >= cur_bytes);
1494 
1495         start           += cur_bytes;
1496         remaining       -= cur_bytes;
1497         cluster_offset  += cur_bytes;
1498 
1499         if (remaining == 0) {
1500             break;
1501         }
1502 
1503         cur_bytes = remaining;
1504 
1505         /*
1506          * Now start gathering as many contiguous clusters as possible:
1507          *
1508          * 1. Check for overlaps with in-flight allocations
1509          *
1510          *      a) Overlap not in the first cluster -> shorten this request and
1511          *         let the caller handle the rest in its next loop iteration.
1512          *
1513          *      b) Real overlaps of two requests. Yield and restart the search
1514          *         for contiguous clusters (the situation could have changed
1515          *         while we were sleeping)
1516          *
1517          *      c) TODO: Request starts in the same cluster as the in-flight
1518          *         allocation ends. Shorten the COW of the in-fight allocation,
1519          *         set cluster_offset to write to the same cluster and set up
1520          *         the right synchronisation between the in-flight request and
1521          *         the new one.
1522          */
1523         ret = handle_dependencies(bs, start, &cur_bytes, m);
1524         if (ret == -EAGAIN) {
1525             /* Currently handle_dependencies() doesn't yield if we already had
1526              * an allocation. If it did, we would have to clean up the L2Meta
1527              * structs before starting over. */
1528             assert(*m == NULL);
1529             goto again;
1530         } else if (ret < 0) {
1531             return ret;
1532         } else if (cur_bytes == 0) {
1533             break;
1534         } else {
1535             /* handle_dependencies() may have decreased cur_bytes (shortened
1536              * the allocations below) so that the next dependency is processed
1537              * correctly during the next loop iteration. */
1538         }
1539 
1540         /*
1541          * 2. Count contiguous COPIED clusters.
1542          */
1543         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1544         if (ret < 0) {
1545             return ret;
1546         } else if (ret) {
1547             continue;
1548         } else if (cur_bytes == 0) {
1549             break;
1550         }
1551 
1552         /*
1553          * 3. If the request still hasn't completed, allocate new clusters,
1554          *    considering any cluster_offset of steps 1c or 2.
1555          */
1556         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1557         if (ret < 0) {
1558             return ret;
1559         } else if (ret) {
1560             continue;
1561         } else {
1562             assert(cur_bytes == 0);
1563             break;
1564         }
1565     }
1566 
1567     *bytes -= remaining;
1568     assert(*bytes > 0);
1569     assert(*host_offset != 0);
1570 
1571     return 0;
1572 }
1573 
1574 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1575                              const uint8_t *buf, int buf_size)
1576 {
1577     z_stream strm1, *strm = &strm1;
1578     int ret, out_len;
1579 
1580     memset(strm, 0, sizeof(*strm));
1581 
1582     strm->next_in = (uint8_t *)buf;
1583     strm->avail_in = buf_size;
1584     strm->next_out = out_buf;
1585     strm->avail_out = out_buf_size;
1586 
1587     ret = inflateInit2(strm, -12);
1588     if (ret != Z_OK)
1589         return -1;
1590     ret = inflate(strm, Z_FINISH);
1591     out_len = strm->next_out - out_buf;
1592     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1593         out_len != out_buf_size) {
1594         inflateEnd(strm);
1595         return -1;
1596     }
1597     inflateEnd(strm);
1598     return 0;
1599 }
1600 
1601 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1602 {
1603     BDRVQcow2State *s = bs->opaque;
1604     int ret, csize, nb_csectors, sector_offset;
1605     uint64_t coffset;
1606 
1607     coffset = cluster_offset & s->cluster_offset_mask;
1608     if (s->cluster_cache_offset != coffset) {
1609         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1610         sector_offset = coffset & 511;
1611         csize = nb_csectors * 512 - sector_offset;
1612 
1613         /* Allocate buffers on first decompress operation, most images are
1614          * uncompressed and the memory overhead can be avoided.  The buffers
1615          * are freed in .bdrv_close().
1616          */
1617         if (!s->cluster_data) {
1618             /* one more sector for decompressed data alignment */
1619             s->cluster_data = qemu_try_blockalign(bs->file->bs,
1620                     QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512);
1621             if (!s->cluster_data) {
1622                 return -ENOMEM;
1623             }
1624         }
1625         if (!s->cluster_cache) {
1626             s->cluster_cache = g_malloc(s->cluster_size);
1627         }
1628 
1629         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1630         ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
1631                         nb_csectors);
1632         if (ret < 0) {
1633             return ret;
1634         }
1635         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1636                               s->cluster_data + sector_offset, csize) < 0) {
1637             return -EIO;
1638         }
1639         s->cluster_cache_offset = coffset;
1640     }
1641     return 0;
1642 }
1643 
1644 /*
1645  * This discards as many clusters of nb_clusters as possible at once (i.e.
1646  * all clusters in the same L2 slice) and returns the number of discarded
1647  * clusters.
1648  */
1649 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1650                                uint64_t nb_clusters,
1651                                enum qcow2_discard_type type, bool full_discard)
1652 {
1653     BDRVQcow2State *s = bs->opaque;
1654     uint64_t *l2_slice;
1655     int l2_index;
1656     int ret;
1657     int i;
1658 
1659     ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1660     if (ret < 0) {
1661         return ret;
1662     }
1663 
1664     /* Limit nb_clusters to one L2 slice */
1665     nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1666     assert(nb_clusters <= INT_MAX);
1667 
1668     for (i = 0; i < nb_clusters; i++) {
1669         uint64_t old_l2_entry;
1670 
1671         old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1672 
1673         /*
1674          * If full_discard is false, make sure that a discarded area reads back
1675          * as zeroes for v3 images (we cannot do it for v2 without actually
1676          * writing a zero-filled buffer). We can skip the operation if the
1677          * cluster is already marked as zero, or if it's unallocated and we
1678          * don't have a backing file.
1679          *
1680          * TODO We might want to use bdrv_block_status(bs) here, but we're
1681          * holding s->lock, so that doesn't work today.
1682          *
1683          * If full_discard is true, the sector should not read back as zeroes,
1684          * but rather fall through to the backing file.
1685          */
1686         switch (qcow2_get_cluster_type(old_l2_entry)) {
1687         case QCOW2_CLUSTER_UNALLOCATED:
1688             if (full_discard || !bs->backing) {
1689                 continue;
1690             }
1691             break;
1692 
1693         case QCOW2_CLUSTER_ZERO_PLAIN:
1694             if (!full_discard) {
1695                 continue;
1696             }
1697             break;
1698 
1699         case QCOW2_CLUSTER_ZERO_ALLOC:
1700         case QCOW2_CLUSTER_NORMAL:
1701         case QCOW2_CLUSTER_COMPRESSED:
1702             break;
1703 
1704         default:
1705             abort();
1706         }
1707 
1708         /* First remove L2 entries */
1709         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1710         if (!full_discard && s->qcow_version >= 3) {
1711             l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1712         } else {
1713             l2_slice[l2_index + i] = cpu_to_be64(0);
1714         }
1715 
1716         /* Then decrease the refcount */
1717         qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1718     }
1719 
1720     qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1721 
1722     return nb_clusters;
1723 }
1724 
1725 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1726                           uint64_t bytes, enum qcow2_discard_type type,
1727                           bool full_discard)
1728 {
1729     BDRVQcow2State *s = bs->opaque;
1730     uint64_t end_offset = offset + bytes;
1731     uint64_t nb_clusters;
1732     int64_t cleared;
1733     int ret;
1734 
1735     /* Caller must pass aligned values, except at image end */
1736     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1737     assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1738            end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1739 
1740     nb_clusters = size_to_clusters(s, bytes);
1741 
1742     s->cache_discards = true;
1743 
1744     /* Each L2 slice is handled by its own loop iteration */
1745     while (nb_clusters > 0) {
1746         cleared = discard_in_l2_slice(bs, offset, nb_clusters, type,
1747                                       full_discard);
1748         if (cleared < 0) {
1749             ret = cleared;
1750             goto fail;
1751         }
1752 
1753         nb_clusters -= cleared;
1754         offset += (cleared * s->cluster_size);
1755     }
1756 
1757     ret = 0;
1758 fail:
1759     s->cache_discards = false;
1760     qcow2_process_discards(bs, ret);
1761 
1762     return ret;
1763 }
1764 
1765 /*
1766  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1767  * all clusters in the same L2 slice) and returns the number of zeroed
1768  * clusters.
1769  */
1770 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1771                             uint64_t nb_clusters, int flags)
1772 {
1773     BDRVQcow2State *s = bs->opaque;
1774     uint64_t *l2_slice;
1775     int l2_index;
1776     int ret;
1777     int i;
1778     bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1779 
1780     ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1781     if (ret < 0) {
1782         return ret;
1783     }
1784 
1785     /* Limit nb_clusters to one L2 slice */
1786     nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1787     assert(nb_clusters <= INT_MAX);
1788 
1789     for (i = 0; i < nb_clusters; i++) {
1790         uint64_t old_offset;
1791         QCow2ClusterType cluster_type;
1792 
1793         old_offset = be64_to_cpu(l2_slice[l2_index + i]);
1794 
1795         /*
1796          * Minimize L2 changes if the cluster already reads back as
1797          * zeroes with correct allocation.
1798          */
1799         cluster_type = qcow2_get_cluster_type(old_offset);
1800         if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1801             (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1802             continue;
1803         }
1804 
1805         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1806         if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1807             l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1808             qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1809         } else {
1810             l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1811         }
1812     }
1813 
1814     qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1815 
1816     return nb_clusters;
1817 }
1818 
1819 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1820                           uint64_t bytes, int flags)
1821 {
1822     BDRVQcow2State *s = bs->opaque;
1823     uint64_t end_offset = offset + bytes;
1824     uint64_t nb_clusters;
1825     int64_t cleared;
1826     int ret;
1827 
1828     /* Caller must pass aligned values, except at image end */
1829     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1830     assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1831            end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1832 
1833     /* The zero flag is only supported by version 3 and newer */
1834     if (s->qcow_version < 3) {
1835         return -ENOTSUP;
1836     }
1837 
1838     /* Each L2 slice is handled by its own loop iteration */
1839     nb_clusters = size_to_clusters(s, bytes);
1840 
1841     s->cache_discards = true;
1842 
1843     while (nb_clusters > 0) {
1844         cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
1845         if (cleared < 0) {
1846             ret = cleared;
1847             goto fail;
1848         }
1849 
1850         nb_clusters -= cleared;
1851         offset += (cleared * s->cluster_size);
1852     }
1853 
1854     ret = 0;
1855 fail:
1856     s->cache_discards = false;
1857     qcow2_process_discards(bs, ret);
1858 
1859     return ret;
1860 }
1861 
1862 /*
1863  * Expands all zero clusters in a specific L1 table (or deallocates them, for
1864  * non-backed non-pre-allocated zero clusters).
1865  *
1866  * l1_entries and *visited_l1_entries are used to keep track of progress for
1867  * status_cb(). l1_entries contains the total number of L1 entries and
1868  * *visited_l1_entries counts all visited L1 entries.
1869  */
1870 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1871                                       int l1_size, int64_t *visited_l1_entries,
1872                                       int64_t l1_entries,
1873                                       BlockDriverAmendStatusCB *status_cb,
1874                                       void *cb_opaque)
1875 {
1876     BDRVQcow2State *s = bs->opaque;
1877     bool is_active_l1 = (l1_table == s->l1_table);
1878     uint64_t *l2_slice = NULL;
1879     unsigned slice, slice_size2, n_slices;
1880     int ret;
1881     int i, j;
1882 
1883     slice_size2 = s->l2_slice_size * sizeof(uint64_t);
1884     n_slices = s->cluster_size / slice_size2;
1885 
1886     if (!is_active_l1) {
1887         /* inactive L2 tables require a buffer to be stored in when loading
1888          * them from disk */
1889         l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2);
1890         if (l2_slice == NULL) {
1891             return -ENOMEM;
1892         }
1893     }
1894 
1895     for (i = 0; i < l1_size; i++) {
1896         uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1897         uint64_t l2_refcount;
1898 
1899         if (!l2_offset) {
1900             /* unallocated */
1901             (*visited_l1_entries)++;
1902             if (status_cb) {
1903                 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1904             }
1905             continue;
1906         }
1907 
1908         if (offset_into_cluster(s, l2_offset)) {
1909             qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1910                                     PRIx64 " unaligned (L1 index: %#x)",
1911                                     l2_offset, i);
1912             ret = -EIO;
1913             goto fail;
1914         }
1915 
1916         ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1917                                  &l2_refcount);
1918         if (ret < 0) {
1919             goto fail;
1920         }
1921 
1922         for (slice = 0; slice < n_slices; slice++) {
1923             uint64_t slice_offset = l2_offset + slice * slice_size2;
1924             bool l2_dirty = false;
1925             if (is_active_l1) {
1926                 /* get active L2 tables from cache */
1927                 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset,
1928                                       (void **)&l2_slice);
1929             } else {
1930                 /* load inactive L2 tables from disk */
1931                 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2);
1932             }
1933             if (ret < 0) {
1934                 goto fail;
1935             }
1936 
1937             for (j = 0; j < s->l2_slice_size; j++) {
1938                 uint64_t l2_entry = be64_to_cpu(l2_slice[j]);
1939                 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1940                 QCow2ClusterType cluster_type =
1941                     qcow2_get_cluster_type(l2_entry);
1942 
1943                 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1944                     cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1945                     continue;
1946                 }
1947 
1948                 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1949                     if (!bs->backing) {
1950                         /* not backed; therefore we can simply deallocate the
1951                          * cluster */
1952                         l2_slice[j] = 0;
1953                         l2_dirty = true;
1954                         continue;
1955                     }
1956 
1957                     offset = qcow2_alloc_clusters(bs, s->cluster_size);
1958                     if (offset < 0) {
1959                         ret = offset;
1960                         goto fail;
1961                     }
1962 
1963                     if (l2_refcount > 1) {
1964                         /* For shared L2 tables, set the refcount accordingly
1965                          * (it is already 1 and needs to be l2_refcount) */
1966                         ret = qcow2_update_cluster_refcount(
1967                             bs, offset >> s->cluster_bits,
1968                             refcount_diff(1, l2_refcount), false,
1969                             QCOW2_DISCARD_OTHER);
1970                         if (ret < 0) {
1971                             qcow2_free_clusters(bs, offset, s->cluster_size,
1972                                                 QCOW2_DISCARD_OTHER);
1973                             goto fail;
1974                         }
1975                     }
1976                 }
1977 
1978                 if (offset_into_cluster(s, offset)) {
1979                     int l2_index = slice * s->l2_slice_size + j;
1980                     qcow2_signal_corruption(
1981                         bs, true, -1, -1,
1982                         "Cluster allocation offset "
1983                         "%#" PRIx64 " unaligned (L2 offset: %#"
1984                         PRIx64 ", L2 index: %#x)", offset,
1985                         l2_offset, l2_index);
1986                     if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1987                         qcow2_free_clusters(bs, offset, s->cluster_size,
1988                                             QCOW2_DISCARD_ALWAYS);
1989                     }
1990                     ret = -EIO;
1991                     goto fail;
1992                 }
1993 
1994                 ret = qcow2_pre_write_overlap_check(bs, 0, offset,
1995                                                     s->cluster_size);
1996                 if (ret < 0) {
1997                     if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1998                         qcow2_free_clusters(bs, offset, s->cluster_size,
1999                                             QCOW2_DISCARD_ALWAYS);
2000                     }
2001                     goto fail;
2002                 }
2003 
2004                 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
2005                 if (ret < 0) {
2006                     if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
2007                         qcow2_free_clusters(bs, offset, s->cluster_size,
2008                                             QCOW2_DISCARD_ALWAYS);
2009                     }
2010                     goto fail;
2011                 }
2012 
2013                 if (l2_refcount == 1) {
2014                     l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
2015                 } else {
2016                     l2_slice[j] = cpu_to_be64(offset);
2017                 }
2018                 l2_dirty = true;
2019             }
2020 
2021             if (is_active_l1) {
2022                 if (l2_dirty) {
2023                     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
2024                     qcow2_cache_depends_on_flush(s->l2_table_cache);
2025                 }
2026                 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2027             } else {
2028                 if (l2_dirty) {
2029                     ret = qcow2_pre_write_overlap_check(
2030                         bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2,
2031                         slice_offset, slice_size2);
2032                     if (ret < 0) {
2033                         goto fail;
2034                     }
2035 
2036                     ret = bdrv_pwrite(bs->file, slice_offset,
2037                                       l2_slice, slice_size2);
2038                     if (ret < 0) {
2039                         goto fail;
2040                     }
2041                 }
2042             }
2043         }
2044 
2045         (*visited_l1_entries)++;
2046         if (status_cb) {
2047             status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2048         }
2049     }
2050 
2051     ret = 0;
2052 
2053 fail:
2054     if (l2_slice) {
2055         if (!is_active_l1) {
2056             qemu_vfree(l2_slice);
2057         } else {
2058             qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2059         }
2060     }
2061     return ret;
2062 }
2063 
2064 /*
2065  * For backed images, expands all zero clusters on the image. For non-backed
2066  * images, deallocates all non-pre-allocated zero clusters (and claims the
2067  * allocation for pre-allocated ones). This is important for downgrading to a
2068  * qcow2 version which doesn't yet support metadata zero clusters.
2069  */
2070 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2071                                BlockDriverAmendStatusCB *status_cb,
2072                                void *cb_opaque)
2073 {
2074     BDRVQcow2State *s = bs->opaque;
2075     uint64_t *l1_table = NULL;
2076     int64_t l1_entries = 0, visited_l1_entries = 0;
2077     int ret;
2078     int i, j;
2079 
2080     if (status_cb) {
2081         l1_entries = s->l1_size;
2082         for (i = 0; i < s->nb_snapshots; i++) {
2083             l1_entries += s->snapshots[i].l1_size;
2084         }
2085     }
2086 
2087     ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2088                                      &visited_l1_entries, l1_entries,
2089                                      status_cb, cb_opaque);
2090     if (ret < 0) {
2091         goto fail;
2092     }
2093 
2094     /* Inactive L1 tables may point to active L2 tables - therefore it is
2095      * necessary to flush the L2 table cache before trying to access the L2
2096      * tables pointed to by inactive L1 entries (else we might try to expand
2097      * zero clusters that have already been expanded); furthermore, it is also
2098      * necessary to empty the L2 table cache, since it may contain tables which
2099      * are now going to be modified directly on disk, bypassing the cache.
2100      * qcow2_cache_empty() does both for us. */
2101     ret = qcow2_cache_empty(bs, s->l2_table_cache);
2102     if (ret < 0) {
2103         goto fail;
2104     }
2105 
2106     for (i = 0; i < s->nb_snapshots; i++) {
2107         int l1_size2;
2108         uint64_t *new_l1_table;
2109         Error *local_err = NULL;
2110 
2111         ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
2112                                    s->snapshots[i].l1_size, sizeof(uint64_t),
2113                                    QCOW_MAX_L1_SIZE, "Snapshot L1 table",
2114                                    &local_err);
2115         if (ret < 0) {
2116             error_report_err(local_err);
2117             goto fail;
2118         }
2119 
2120         l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
2121         new_l1_table = g_try_realloc(l1_table, l1_size2);
2122 
2123         if (!new_l1_table) {
2124             ret = -ENOMEM;
2125             goto fail;
2126         }
2127 
2128         l1_table = new_l1_table;
2129 
2130         ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset,
2131                          l1_table, l1_size2);
2132         if (ret < 0) {
2133             goto fail;
2134         }
2135 
2136         for (j = 0; j < s->snapshots[i].l1_size; j++) {
2137             be64_to_cpus(&l1_table[j]);
2138         }
2139 
2140         ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2141                                          &visited_l1_entries, l1_entries,
2142                                          status_cb, cb_opaque);
2143         if (ret < 0) {
2144             goto fail;
2145         }
2146     }
2147 
2148     ret = 0;
2149 
2150 fail:
2151     g_free(l1_table);
2152     return ret;
2153 }
2154