xref: /qemu/block/qcow2-cluster.c (revision 7a4e543d)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include <zlib.h>
27 
28 #include "qemu-common.h"
29 #include "block/block_int.h"
30 #include "block/qcow2.h"
31 #include "trace.h"
32 
33 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
34                         bool exact_size)
35 {
36     BDRVQcow2State *s = bs->opaque;
37     int new_l1_size2, ret, i;
38     uint64_t *new_l1_table;
39     int64_t old_l1_table_offset, old_l1_size;
40     int64_t new_l1_table_offset, new_l1_size;
41     uint8_t data[12];
42 
43     if (min_size <= s->l1_size)
44         return 0;
45 
46     /* Do a sanity check on min_size before trying to calculate new_l1_size
47      * (this prevents overflows during the while loop for the calculation of
48      * new_l1_size) */
49     if (min_size > INT_MAX / sizeof(uint64_t)) {
50         return -EFBIG;
51     }
52 
53     if (exact_size) {
54         new_l1_size = min_size;
55     } else {
56         /* Bump size up to reduce the number of times we have to grow */
57         new_l1_size = s->l1_size;
58         if (new_l1_size == 0) {
59             new_l1_size = 1;
60         }
61         while (min_size > new_l1_size) {
62             new_l1_size = (new_l1_size * 3 + 1) / 2;
63         }
64     }
65 
66     if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
67         return -EFBIG;
68     }
69 
70 #ifdef DEBUG_ALLOC2
71     fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
72             s->l1_size, new_l1_size);
73 #endif
74 
75     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
76     new_l1_table = qemu_try_blockalign(bs->file->bs,
77                                        align_offset(new_l1_size2, 512));
78     if (new_l1_table == NULL) {
79         return -ENOMEM;
80     }
81     memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
82 
83     memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
84 
85     /* write new table (align to cluster) */
86     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
87     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
88     if (new_l1_table_offset < 0) {
89         qemu_vfree(new_l1_table);
90         return new_l1_table_offset;
91     }
92 
93     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
94     if (ret < 0) {
95         goto fail;
96     }
97 
98     /* the L1 position has not yet been updated, so these clusters must
99      * indeed be completely free */
100     ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
101                                         new_l1_size2);
102     if (ret < 0) {
103         goto fail;
104     }
105 
106     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
107     for(i = 0; i < s->l1_size; i++)
108         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
109     ret = bdrv_pwrite_sync(bs->file->bs, new_l1_table_offset,
110                            new_l1_table, new_l1_size2);
111     if (ret < 0)
112         goto fail;
113     for(i = 0; i < s->l1_size; i++)
114         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
115 
116     /* set new table */
117     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
118     cpu_to_be32w((uint32_t*)data, new_l1_size);
119     stq_be_p(data + 4, new_l1_table_offset);
120     ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, l1_size),
121                            data, sizeof(data));
122     if (ret < 0) {
123         goto fail;
124     }
125     qemu_vfree(s->l1_table);
126     old_l1_table_offset = s->l1_table_offset;
127     s->l1_table_offset = new_l1_table_offset;
128     s->l1_table = new_l1_table;
129     old_l1_size = s->l1_size;
130     s->l1_size = new_l1_size;
131     qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
132                         QCOW2_DISCARD_OTHER);
133     return 0;
134  fail:
135     qemu_vfree(new_l1_table);
136     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
137                         QCOW2_DISCARD_OTHER);
138     return ret;
139 }
140 
141 /*
142  * l2_load
143  *
144  * Loads a L2 table into memory. If the table is in the cache, the cache
145  * is used; otherwise the L2 table is loaded from the image file.
146  *
147  * Returns a pointer to the L2 table on success, or NULL if the read from
148  * the image file failed.
149  */
150 
151 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
152     uint64_t **l2_table)
153 {
154     BDRVQcow2State *s = bs->opaque;
155     int ret;
156 
157     ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
158 
159     return ret;
160 }
161 
162 /*
163  * Writes one sector of the L1 table to the disk (can't update single entries
164  * and we really don't want bdrv_pread to perform a read-modify-write)
165  */
166 #define L1_ENTRIES_PER_SECTOR (512 / 8)
167 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
168 {
169     BDRVQcow2State *s = bs->opaque;
170     uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
171     int l1_start_index;
172     int i, ret;
173 
174     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
175     for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
176          i++)
177     {
178         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
179     }
180 
181     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
182             s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
183     if (ret < 0) {
184         return ret;
185     }
186 
187     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
188     ret = bdrv_pwrite_sync(bs->file->bs,
189                            s->l1_table_offset + 8 * l1_start_index,
190                            buf, sizeof(buf));
191     if (ret < 0) {
192         return ret;
193     }
194 
195     return 0;
196 }
197 
198 /*
199  * l2_allocate
200  *
201  * Allocate a new l2 entry in the file. If l1_index points to an already
202  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
203  * table) copy the contents of the old L2 table into the newly allocated one.
204  * Otherwise the new table is initialized with zeros.
205  *
206  */
207 
208 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
209 {
210     BDRVQcow2State *s = bs->opaque;
211     uint64_t old_l2_offset;
212     uint64_t *l2_table = NULL;
213     int64_t l2_offset;
214     int ret;
215 
216     old_l2_offset = s->l1_table[l1_index];
217 
218     trace_qcow2_l2_allocate(bs, l1_index);
219 
220     /* allocate a new l2 entry */
221 
222     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
223     if (l2_offset < 0) {
224         ret = l2_offset;
225         goto fail;
226     }
227 
228     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
229     if (ret < 0) {
230         goto fail;
231     }
232 
233     /* allocate a new entry in the l2 cache */
234 
235     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
236     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
237     if (ret < 0) {
238         goto fail;
239     }
240 
241     l2_table = *table;
242 
243     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
244         /* if there was no old l2 table, clear the new table */
245         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
246     } else {
247         uint64_t* old_table;
248 
249         /* if there was an old l2 table, read it from the disk */
250         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
251         ret = qcow2_cache_get(bs, s->l2_table_cache,
252             old_l2_offset & L1E_OFFSET_MASK,
253             (void**) &old_table);
254         if (ret < 0) {
255             goto fail;
256         }
257 
258         memcpy(l2_table, old_table, s->cluster_size);
259 
260         qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
261     }
262 
263     /* write the l2 table to the file */
264     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
265 
266     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
267     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
268     ret = qcow2_cache_flush(bs, s->l2_table_cache);
269     if (ret < 0) {
270         goto fail;
271     }
272 
273     /* update the L1 entry */
274     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
275     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
276     ret = qcow2_write_l1_entry(bs, l1_index);
277     if (ret < 0) {
278         goto fail;
279     }
280 
281     *table = l2_table;
282     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
283     return 0;
284 
285 fail:
286     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
287     if (l2_table != NULL) {
288         qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
289     }
290     s->l1_table[l1_index] = old_l2_offset;
291     if (l2_offset > 0) {
292         qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
293                             QCOW2_DISCARD_ALWAYS);
294     }
295     return ret;
296 }
297 
298 /*
299  * Checks how many clusters in a given L2 table are contiguous in the image
300  * file. As soon as one of the flags in the bitmask stop_flags changes compared
301  * to the first cluster, the search is stopped and the cluster is not counted
302  * as contiguous. (This allows it, for example, to stop at the first compressed
303  * cluster which may require a different handling)
304  */
305 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
306         uint64_t *l2_table, uint64_t stop_flags)
307 {
308     int i;
309     uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
310     uint64_t first_entry = be64_to_cpu(l2_table[0]);
311     uint64_t offset = first_entry & mask;
312 
313     if (!offset)
314         return 0;
315 
316     assert(qcow2_get_cluster_type(first_entry) == QCOW2_CLUSTER_NORMAL);
317 
318     for (i = 0; i < nb_clusters; i++) {
319         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
320         if (offset + (uint64_t) i * cluster_size != l2_entry) {
321             break;
322         }
323     }
324 
325 	return i;
326 }
327 
328 static int count_contiguous_clusters_by_type(int nb_clusters,
329                                              uint64_t *l2_table,
330                                              int wanted_type)
331 {
332     int i;
333 
334     for (i = 0; i < nb_clusters; i++) {
335         int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
336 
337         if (type != wanted_type) {
338             break;
339         }
340     }
341 
342     return i;
343 }
344 
345 /* The crypt function is compatible with the linux cryptoloop
346    algorithm for < 4 GB images. NOTE: out_buf == in_buf is
347    supported */
348 int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
349                           uint8_t *out_buf, const uint8_t *in_buf,
350                           int nb_sectors, bool enc,
351                           Error **errp)
352 {
353     union {
354         uint64_t ll[2];
355         uint8_t b[16];
356     } ivec;
357     int i;
358     int ret;
359 
360     for(i = 0; i < nb_sectors; i++) {
361         ivec.ll[0] = cpu_to_le64(sector_num);
362         ivec.ll[1] = 0;
363         if (qcrypto_cipher_setiv(s->cipher,
364                                  ivec.b, G_N_ELEMENTS(ivec.b),
365                                  errp) < 0) {
366             return -1;
367         }
368         if (enc) {
369             ret = qcrypto_cipher_encrypt(s->cipher,
370                                          in_buf,
371                                          out_buf,
372                                          512,
373                                          errp);
374         } else {
375             ret = qcrypto_cipher_decrypt(s->cipher,
376                                          in_buf,
377                                          out_buf,
378                                          512,
379                                          errp);
380         }
381         if (ret < 0) {
382             return -1;
383         }
384         sector_num++;
385         in_buf += 512;
386         out_buf += 512;
387     }
388     return 0;
389 }
390 
391 static int coroutine_fn copy_sectors(BlockDriverState *bs,
392                                      uint64_t start_sect,
393                                      uint64_t cluster_offset,
394                                      int n_start, int n_end)
395 {
396     BDRVQcow2State *s = bs->opaque;
397     QEMUIOVector qiov;
398     struct iovec iov;
399     int n, ret;
400 
401     n = n_end - n_start;
402     if (n <= 0) {
403         return 0;
404     }
405 
406     iov.iov_len = n * BDRV_SECTOR_SIZE;
407     iov.iov_base = qemu_try_blockalign(bs, iov.iov_len);
408     if (iov.iov_base == NULL) {
409         return -ENOMEM;
410     }
411 
412     qemu_iovec_init_external(&qiov, &iov, 1);
413 
414     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
415 
416     if (!bs->drv) {
417         ret = -ENOMEDIUM;
418         goto out;
419     }
420 
421     /* Call .bdrv_co_readv() directly instead of using the public block-layer
422      * interface.  This avoids double I/O throttling and request tracking,
423      * which can lead to deadlock when block layer copy-on-read is enabled.
424      */
425     ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
426     if (ret < 0) {
427         goto out;
428     }
429 
430     if (bs->encrypted) {
431         Error *err = NULL;
432         assert(s->cipher);
433         if (qcow2_encrypt_sectors(s, start_sect + n_start,
434                                   iov.iov_base, iov.iov_base, n,
435                                   true, &err) < 0) {
436             ret = -EIO;
437             error_free(err);
438             goto out;
439         }
440     }
441 
442     ret = qcow2_pre_write_overlap_check(bs, 0,
443             cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE);
444     if (ret < 0) {
445         goto out;
446     }
447 
448     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
449     ret = bdrv_co_writev(bs->file->bs, (cluster_offset >> 9) + n_start, n,
450                          &qiov);
451     if (ret < 0) {
452         goto out;
453     }
454 
455     ret = 0;
456 out:
457     qemu_vfree(iov.iov_base);
458     return ret;
459 }
460 
461 
462 /*
463  * get_cluster_offset
464  *
465  * For a given offset of the disk image, find the cluster offset in
466  * qcow2 file. The offset is stored in *cluster_offset.
467  *
468  * on entry, *num is the number of contiguous sectors we'd like to
469  * access following offset.
470  *
471  * on exit, *num is the number of contiguous sectors we can read.
472  *
473  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
474  * cases.
475  */
476 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
477     int *num, uint64_t *cluster_offset)
478 {
479     BDRVQcow2State *s = bs->opaque;
480     unsigned int l2_index;
481     uint64_t l1_index, l2_offset, *l2_table;
482     int l1_bits, c;
483     unsigned int index_in_cluster, nb_clusters;
484     uint64_t nb_available, nb_needed;
485     int ret;
486 
487     index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
488     nb_needed = *num + index_in_cluster;
489 
490     l1_bits = s->l2_bits + s->cluster_bits;
491 
492     /* compute how many bytes there are between the offset and
493      * the end of the l1 entry
494      */
495 
496     nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
497 
498     /* compute the number of available sectors */
499 
500     nb_available = (nb_available >> 9) + index_in_cluster;
501 
502     if (nb_needed > nb_available) {
503         nb_needed = nb_available;
504     }
505     assert(nb_needed <= INT_MAX);
506 
507     *cluster_offset = 0;
508 
509     /* seek to the l2 offset in the l1 table */
510 
511     l1_index = offset >> l1_bits;
512     if (l1_index >= s->l1_size) {
513         ret = QCOW2_CLUSTER_UNALLOCATED;
514         goto out;
515     }
516 
517     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
518     if (!l2_offset) {
519         ret = QCOW2_CLUSTER_UNALLOCATED;
520         goto out;
521     }
522 
523     if (offset_into_cluster(s, l2_offset)) {
524         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
525                                 " unaligned (L1 index: %#" PRIx64 ")",
526                                 l2_offset, l1_index);
527         return -EIO;
528     }
529 
530     /* load the l2 table in memory */
531 
532     ret = l2_load(bs, l2_offset, &l2_table);
533     if (ret < 0) {
534         return ret;
535     }
536 
537     /* find the cluster offset for the given disk offset */
538 
539     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
540     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
541 
542     /* nb_needed <= INT_MAX, thus nb_clusters <= INT_MAX, too */
543     nb_clusters = size_to_clusters(s, nb_needed << 9);
544 
545     ret = qcow2_get_cluster_type(*cluster_offset);
546     switch (ret) {
547     case QCOW2_CLUSTER_COMPRESSED:
548         /* Compressed clusters can only be processed one by one */
549         c = 1;
550         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
551         break;
552     case QCOW2_CLUSTER_ZERO:
553         if (s->qcow_version < 3) {
554             qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
555                                     " in pre-v3 image (L2 offset: %#" PRIx64
556                                     ", L2 index: %#x)", l2_offset, l2_index);
557             ret = -EIO;
558             goto fail;
559         }
560         c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index],
561                                               QCOW2_CLUSTER_ZERO);
562         *cluster_offset = 0;
563         break;
564     case QCOW2_CLUSTER_UNALLOCATED:
565         /* how many empty clusters ? */
566         c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index],
567                                               QCOW2_CLUSTER_UNALLOCATED);
568         *cluster_offset = 0;
569         break;
570     case QCOW2_CLUSTER_NORMAL:
571         /* how many allocated clusters ? */
572         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
573                 &l2_table[l2_index], QCOW_OFLAG_ZERO);
574         *cluster_offset &= L2E_OFFSET_MASK;
575         if (offset_into_cluster(s, *cluster_offset)) {
576             qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#"
577                                     PRIx64 " unaligned (L2 offset: %#" PRIx64
578                                     ", L2 index: %#x)", *cluster_offset,
579                                     l2_offset, l2_index);
580             ret = -EIO;
581             goto fail;
582         }
583         break;
584     default:
585         abort();
586     }
587 
588     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
589 
590     nb_available = (c * s->cluster_sectors);
591 
592 out:
593     if (nb_available > nb_needed)
594         nb_available = nb_needed;
595 
596     *num = nb_available - index_in_cluster;
597 
598     return ret;
599 
600 fail:
601     qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
602     return ret;
603 }
604 
605 /*
606  * get_cluster_table
607  *
608  * for a given disk offset, load (and allocate if needed)
609  * the l2 table.
610  *
611  * the l2 table offset in the qcow2 file and the cluster index
612  * in the l2 table are given to the caller.
613  *
614  * Returns 0 on success, -errno in failure case
615  */
616 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
617                              uint64_t **new_l2_table,
618                              int *new_l2_index)
619 {
620     BDRVQcow2State *s = bs->opaque;
621     unsigned int l2_index;
622     uint64_t l1_index, l2_offset;
623     uint64_t *l2_table = NULL;
624     int ret;
625 
626     /* seek to the l2 offset in the l1 table */
627 
628     l1_index = offset >> (s->l2_bits + s->cluster_bits);
629     if (l1_index >= s->l1_size) {
630         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
631         if (ret < 0) {
632             return ret;
633         }
634     }
635 
636     assert(l1_index < s->l1_size);
637     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
638     if (offset_into_cluster(s, l2_offset)) {
639         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
640                                 " unaligned (L1 index: %#" PRIx64 ")",
641                                 l2_offset, l1_index);
642         return -EIO;
643     }
644 
645     /* seek the l2 table of the given l2 offset */
646 
647     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
648         /* load the l2 table in memory */
649         ret = l2_load(bs, l2_offset, &l2_table);
650         if (ret < 0) {
651             return ret;
652         }
653     } else {
654         /* First allocate a new L2 table (and do COW if needed) */
655         ret = l2_allocate(bs, l1_index, &l2_table);
656         if (ret < 0) {
657             return ret;
658         }
659 
660         /* Then decrease the refcount of the old table */
661         if (l2_offset) {
662             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
663                                 QCOW2_DISCARD_OTHER);
664         }
665     }
666 
667     /* find the cluster offset for the given disk offset */
668 
669     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
670 
671     *new_l2_table = l2_table;
672     *new_l2_index = l2_index;
673 
674     return 0;
675 }
676 
677 /*
678  * alloc_compressed_cluster_offset
679  *
680  * For a given offset of the disk image, return cluster offset in
681  * qcow2 file.
682  *
683  * If the offset is not found, allocate a new compressed cluster.
684  *
685  * Return the cluster offset if successful,
686  * Return 0, otherwise.
687  *
688  */
689 
690 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
691                                                uint64_t offset,
692                                                int compressed_size)
693 {
694     BDRVQcow2State *s = bs->opaque;
695     int l2_index, ret;
696     uint64_t *l2_table;
697     int64_t cluster_offset;
698     int nb_csectors;
699 
700     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
701     if (ret < 0) {
702         return 0;
703     }
704 
705     /* Compression can't overwrite anything. Fail if the cluster was already
706      * allocated. */
707     cluster_offset = be64_to_cpu(l2_table[l2_index]);
708     if (cluster_offset & L2E_OFFSET_MASK) {
709         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
710         return 0;
711     }
712 
713     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
714     if (cluster_offset < 0) {
715         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
716         return 0;
717     }
718 
719     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
720                   (cluster_offset >> 9);
721 
722     cluster_offset |= QCOW_OFLAG_COMPRESSED |
723                       ((uint64_t)nb_csectors << s->csize_shift);
724 
725     /* update L2 table */
726 
727     /* compressed clusters never have the copied flag */
728 
729     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
730     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
731     l2_table[l2_index] = cpu_to_be64(cluster_offset);
732     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
733 
734     return cluster_offset;
735 }
736 
737 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
738 {
739     BDRVQcow2State *s = bs->opaque;
740     int ret;
741 
742     if (r->nb_sectors == 0) {
743         return 0;
744     }
745 
746     qemu_co_mutex_unlock(&s->lock);
747     ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
748                        r->offset / BDRV_SECTOR_SIZE,
749                        r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
750     qemu_co_mutex_lock(&s->lock);
751 
752     if (ret < 0) {
753         return ret;
754     }
755 
756     /*
757      * Before we update the L2 table to actually point to the new cluster, we
758      * need to be sure that the refcounts have been increased and COW was
759      * handled.
760      */
761     qcow2_cache_depends_on_flush(s->l2_table_cache);
762 
763     return 0;
764 }
765 
766 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
767 {
768     BDRVQcow2State *s = bs->opaque;
769     int i, j = 0, l2_index, ret;
770     uint64_t *old_cluster, *l2_table;
771     uint64_t cluster_offset = m->alloc_offset;
772 
773     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
774     assert(m->nb_clusters > 0);
775 
776     old_cluster = g_try_new(uint64_t, m->nb_clusters);
777     if (old_cluster == NULL) {
778         ret = -ENOMEM;
779         goto err;
780     }
781 
782     /* copy content of unmodified sectors */
783     ret = perform_cow(bs, m, &m->cow_start);
784     if (ret < 0) {
785         goto err;
786     }
787 
788     ret = perform_cow(bs, m, &m->cow_end);
789     if (ret < 0) {
790         goto err;
791     }
792 
793     /* Update L2 table. */
794     if (s->use_lazy_refcounts) {
795         qcow2_mark_dirty(bs);
796     }
797     if (qcow2_need_accurate_refcounts(s)) {
798         qcow2_cache_set_dependency(bs, s->l2_table_cache,
799                                    s->refcount_block_cache);
800     }
801 
802     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
803     if (ret < 0) {
804         goto err;
805     }
806     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
807 
808     assert(l2_index + m->nb_clusters <= s->l2_size);
809     for (i = 0; i < m->nb_clusters; i++) {
810         /* if two concurrent writes happen to the same unallocated cluster
811 	 * each write allocates separate cluster and writes data concurrently.
812 	 * The first one to complete updates l2 table with pointer to its
813 	 * cluster the second one has to do RMW (which is done above by
814 	 * copy_sectors()), update l2 table with its cluster pointer and free
815 	 * old cluster. This is what this loop does */
816         if(l2_table[l2_index + i] != 0)
817             old_cluster[j++] = l2_table[l2_index + i];
818 
819         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
820                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
821      }
822 
823 
824     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
825 
826     /*
827      * If this was a COW, we need to decrease the refcount of the old cluster.
828      *
829      * Don't discard clusters that reach a refcount of 0 (e.g. compressed
830      * clusters), the next write will reuse them anyway.
831      */
832     if (j != 0) {
833         for (i = 0; i < j; i++) {
834             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
835                                     QCOW2_DISCARD_NEVER);
836         }
837     }
838 
839     ret = 0;
840 err:
841     g_free(old_cluster);
842     return ret;
843  }
844 
845 /*
846  * Returns the number of contiguous clusters that can be used for an allocating
847  * write, but require COW to be performed (this includes yet unallocated space,
848  * which must copy from the backing file)
849  */
850 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
851     uint64_t *l2_table, int l2_index)
852 {
853     int i;
854 
855     for (i = 0; i < nb_clusters; i++) {
856         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
857         int cluster_type = qcow2_get_cluster_type(l2_entry);
858 
859         switch(cluster_type) {
860         case QCOW2_CLUSTER_NORMAL:
861             if (l2_entry & QCOW_OFLAG_COPIED) {
862                 goto out;
863             }
864             break;
865         case QCOW2_CLUSTER_UNALLOCATED:
866         case QCOW2_CLUSTER_COMPRESSED:
867         case QCOW2_CLUSTER_ZERO:
868             break;
869         default:
870             abort();
871         }
872     }
873 
874 out:
875     assert(i <= nb_clusters);
876     return i;
877 }
878 
879 /*
880  * Check if there already is an AIO write request in flight which allocates
881  * the same cluster. In this case we need to wait until the previous
882  * request has completed and updated the L2 table accordingly.
883  *
884  * Returns:
885  *   0       if there was no dependency. *cur_bytes indicates the number of
886  *           bytes from guest_offset that can be read before the next
887  *           dependency must be processed (or the request is complete)
888  *
889  *   -EAGAIN if we had to wait for another request, previously gathered
890  *           information on cluster allocation may be invalid now. The caller
891  *           must start over anyway, so consider *cur_bytes undefined.
892  */
893 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
894     uint64_t *cur_bytes, QCowL2Meta **m)
895 {
896     BDRVQcow2State *s = bs->opaque;
897     QCowL2Meta *old_alloc;
898     uint64_t bytes = *cur_bytes;
899 
900     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
901 
902         uint64_t start = guest_offset;
903         uint64_t end = start + bytes;
904         uint64_t old_start = l2meta_cow_start(old_alloc);
905         uint64_t old_end = l2meta_cow_end(old_alloc);
906 
907         if (end <= old_start || start >= old_end) {
908             /* No intersection */
909         } else {
910             if (start < old_start) {
911                 /* Stop at the start of a running allocation */
912                 bytes = old_start - start;
913             } else {
914                 bytes = 0;
915             }
916 
917             /* Stop if already an l2meta exists. After yielding, it wouldn't
918              * be valid any more, so we'd have to clean up the old L2Metas
919              * and deal with requests depending on them before starting to
920              * gather new ones. Not worth the trouble. */
921             if (bytes == 0 && *m) {
922                 *cur_bytes = 0;
923                 return 0;
924             }
925 
926             if (bytes == 0) {
927                 /* Wait for the dependency to complete. We need to recheck
928                  * the free/allocated clusters when we continue. */
929                 qemu_co_mutex_unlock(&s->lock);
930                 qemu_co_queue_wait(&old_alloc->dependent_requests);
931                 qemu_co_mutex_lock(&s->lock);
932                 return -EAGAIN;
933             }
934         }
935     }
936 
937     /* Make sure that existing clusters and new allocations are only used up to
938      * the next dependency if we shortened the request above */
939     *cur_bytes = bytes;
940 
941     return 0;
942 }
943 
944 /*
945  * Checks how many already allocated clusters that don't require a copy on
946  * write there are at the given guest_offset (up to *bytes). If
947  * *host_offset is not zero, only physically contiguous clusters beginning at
948  * this host offset are counted.
949  *
950  * Note that guest_offset may not be cluster aligned. In this case, the
951  * returned *host_offset points to exact byte referenced by guest_offset and
952  * therefore isn't cluster aligned as well.
953  *
954  * Returns:
955  *   0:     if no allocated clusters are available at the given offset.
956  *          *bytes is normally unchanged. It is set to 0 if the cluster
957  *          is allocated and doesn't need COW, but doesn't have the right
958  *          physical offset.
959  *
960  *   1:     if allocated clusters that don't require a COW are available at
961  *          the requested offset. *bytes may have decreased and describes
962  *          the length of the area that can be written to.
963  *
964  *  -errno: in error cases
965  */
966 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
967     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
968 {
969     BDRVQcow2State *s = bs->opaque;
970     int l2_index;
971     uint64_t cluster_offset;
972     uint64_t *l2_table;
973     uint64_t nb_clusters;
974     unsigned int keep_clusters;
975     int ret;
976 
977     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
978                               *bytes);
979 
980     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
981                                 == offset_into_cluster(s, *host_offset));
982 
983     /*
984      * Calculate the number of clusters to look for. We stop at L2 table
985      * boundaries to keep things simple.
986      */
987     nb_clusters =
988         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
989 
990     l2_index = offset_to_l2_index(s, guest_offset);
991     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
992     assert(nb_clusters <= INT_MAX);
993 
994     /* Find L2 entry for the first involved cluster */
995     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
996     if (ret < 0) {
997         return ret;
998     }
999 
1000     cluster_offset = be64_to_cpu(l2_table[l2_index]);
1001 
1002     /* Check how many clusters are already allocated and don't need COW */
1003     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1004         && (cluster_offset & QCOW_OFLAG_COPIED))
1005     {
1006         /* If a specific host_offset is required, check it */
1007         bool offset_matches =
1008             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1009 
1010         if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1011             qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1012                                     "%#llx unaligned (guest offset: %#" PRIx64
1013                                     ")", cluster_offset & L2E_OFFSET_MASK,
1014                                     guest_offset);
1015             ret = -EIO;
1016             goto out;
1017         }
1018 
1019         if (*host_offset != 0 && !offset_matches) {
1020             *bytes = 0;
1021             ret = 0;
1022             goto out;
1023         }
1024 
1025         /* We keep all QCOW_OFLAG_COPIED clusters */
1026         keep_clusters =
1027             count_contiguous_clusters(nb_clusters, s->cluster_size,
1028                                       &l2_table[l2_index],
1029                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1030         assert(keep_clusters <= nb_clusters);
1031 
1032         *bytes = MIN(*bytes,
1033                  keep_clusters * s->cluster_size
1034                  - offset_into_cluster(s, guest_offset));
1035 
1036         ret = 1;
1037     } else {
1038         ret = 0;
1039     }
1040 
1041     /* Cleanup */
1042 out:
1043     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1044 
1045     /* Only return a host offset if we actually made progress. Otherwise we
1046      * would make requirements for handle_alloc() that it can't fulfill */
1047     if (ret > 0) {
1048         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1049                      + offset_into_cluster(s, guest_offset);
1050     }
1051 
1052     return ret;
1053 }
1054 
1055 /*
1056  * Allocates new clusters for the given guest_offset.
1057  *
1058  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1059  * contain the number of clusters that have been allocated and are contiguous
1060  * in the image file.
1061  *
1062  * If *host_offset is non-zero, it specifies the offset in the image file at
1063  * which the new clusters must start. *nb_clusters can be 0 on return in this
1064  * case if the cluster at host_offset is already in use. If *host_offset is
1065  * zero, the clusters can be allocated anywhere in the image file.
1066  *
1067  * *host_offset is updated to contain the offset into the image file at which
1068  * the first allocated cluster starts.
1069  *
1070  * Return 0 on success and -errno in error cases. -EAGAIN means that the
1071  * function has been waiting for another request and the allocation must be
1072  * restarted, but the whole request should not be failed.
1073  */
1074 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1075                                    uint64_t *host_offset, uint64_t *nb_clusters)
1076 {
1077     BDRVQcow2State *s = bs->opaque;
1078 
1079     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1080                                          *host_offset, *nb_clusters);
1081 
1082     /* Allocate new clusters */
1083     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1084     if (*host_offset == 0) {
1085         int64_t cluster_offset =
1086             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1087         if (cluster_offset < 0) {
1088             return cluster_offset;
1089         }
1090         *host_offset = cluster_offset;
1091         return 0;
1092     } else {
1093         int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1094         if (ret < 0) {
1095             return ret;
1096         }
1097         *nb_clusters = ret;
1098         return 0;
1099     }
1100 }
1101 
1102 /*
1103  * Allocates new clusters for an area that either is yet unallocated or needs a
1104  * copy on write. If *host_offset is non-zero, clusters are only allocated if
1105  * the new allocation can match the specified host offset.
1106  *
1107  * Note that guest_offset may not be cluster aligned. In this case, the
1108  * returned *host_offset points to exact byte referenced by guest_offset and
1109  * therefore isn't cluster aligned as well.
1110  *
1111  * Returns:
1112  *   0:     if no clusters could be allocated. *bytes is set to 0,
1113  *          *host_offset is left unchanged.
1114  *
1115  *   1:     if new clusters were allocated. *bytes may be decreased if the
1116  *          new allocation doesn't cover all of the requested area.
1117  *          *host_offset is updated to contain the host offset of the first
1118  *          newly allocated cluster.
1119  *
1120  *  -errno: in error cases
1121  */
1122 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1123     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1124 {
1125     BDRVQcow2State *s = bs->opaque;
1126     int l2_index;
1127     uint64_t *l2_table;
1128     uint64_t entry;
1129     uint64_t nb_clusters;
1130     int ret;
1131 
1132     uint64_t alloc_cluster_offset;
1133 
1134     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1135                              *bytes);
1136     assert(*bytes > 0);
1137 
1138     /*
1139      * Calculate the number of clusters to look for. We stop at L2 table
1140      * boundaries to keep things simple.
1141      */
1142     nb_clusters =
1143         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1144 
1145     l2_index = offset_to_l2_index(s, guest_offset);
1146     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1147     assert(nb_clusters <= INT_MAX);
1148 
1149     /* Find L2 entry for the first involved cluster */
1150     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1151     if (ret < 0) {
1152         return ret;
1153     }
1154 
1155     entry = be64_to_cpu(l2_table[l2_index]);
1156 
1157     /* For the moment, overwrite compressed clusters one by one */
1158     if (entry & QCOW_OFLAG_COMPRESSED) {
1159         nb_clusters = 1;
1160     } else {
1161         nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1162     }
1163 
1164     /* This function is only called when there were no non-COW clusters, so if
1165      * we can't find any unallocated or COW clusters either, something is
1166      * wrong with our code. */
1167     assert(nb_clusters > 0);
1168 
1169     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1170 
1171     /* Allocate, if necessary at a given offset in the image file */
1172     alloc_cluster_offset = start_of_cluster(s, *host_offset);
1173     ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1174                                   &nb_clusters);
1175     if (ret < 0) {
1176         goto fail;
1177     }
1178 
1179     /* Can't extend contiguous allocation */
1180     if (nb_clusters == 0) {
1181         *bytes = 0;
1182         return 0;
1183     }
1184 
1185     /* !*host_offset would overwrite the image header and is reserved for "no
1186      * host offset preferred". If 0 was a valid host offset, it'd trigger the
1187      * following overlap check; do that now to avoid having an invalid value in
1188      * *host_offset. */
1189     if (!alloc_cluster_offset) {
1190         ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1191                                             nb_clusters * s->cluster_size);
1192         assert(ret < 0);
1193         goto fail;
1194     }
1195 
1196     /*
1197      * Save info needed for meta data update.
1198      *
1199      * requested_sectors: Number of sectors from the start of the first
1200      * newly allocated cluster to the end of the (possibly shortened
1201      * before) write request.
1202      *
1203      * avail_sectors: Number of sectors from the start of the first
1204      * newly allocated to the end of the last newly allocated cluster.
1205      *
1206      * nb_sectors: The number of sectors from the start of the first
1207      * newly allocated cluster to the end of the area that the write
1208      * request actually writes to (excluding COW at the end)
1209      */
1210     int requested_sectors =
1211         (*bytes + offset_into_cluster(s, guest_offset))
1212         >> BDRV_SECTOR_BITS;
1213     int avail_sectors = nb_clusters
1214                         << (s->cluster_bits - BDRV_SECTOR_BITS);
1215     int alloc_n_start = offset_into_cluster(s, guest_offset)
1216                         >> BDRV_SECTOR_BITS;
1217     int nb_sectors = MIN(requested_sectors, avail_sectors);
1218     QCowL2Meta *old_m = *m;
1219 
1220     *m = g_malloc0(sizeof(**m));
1221 
1222     **m = (QCowL2Meta) {
1223         .next           = old_m,
1224 
1225         .alloc_offset   = alloc_cluster_offset,
1226         .offset         = start_of_cluster(s, guest_offset),
1227         .nb_clusters    = nb_clusters,
1228         .nb_available   = nb_sectors,
1229 
1230         .cow_start = {
1231             .offset     = 0,
1232             .nb_sectors = alloc_n_start,
1233         },
1234         .cow_end = {
1235             .offset     = nb_sectors * BDRV_SECTOR_SIZE,
1236             .nb_sectors = avail_sectors - nb_sectors,
1237         },
1238     };
1239     qemu_co_queue_init(&(*m)->dependent_requests);
1240     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1241 
1242     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1243     *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1244                          - offset_into_cluster(s, guest_offset));
1245     assert(*bytes != 0);
1246 
1247     return 1;
1248 
1249 fail:
1250     if (*m && (*m)->nb_clusters > 0) {
1251         QLIST_REMOVE(*m, next_in_flight);
1252     }
1253     return ret;
1254 }
1255 
1256 /*
1257  * alloc_cluster_offset
1258  *
1259  * For a given offset on the virtual disk, find the cluster offset in qcow2
1260  * file. If the offset is not found, allocate a new cluster.
1261  *
1262  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1263  * other fields in m are meaningless.
1264  *
1265  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1266  * contiguous clusters that have been allocated. In this case, the other
1267  * fields of m are valid and contain information about the first allocated
1268  * cluster.
1269  *
1270  * If the request conflicts with another write request in flight, the coroutine
1271  * is queued and will be reentered when the dependency has completed.
1272  *
1273  * Return 0 on success and -errno in error cases
1274  */
1275 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1276     int *num, uint64_t *host_offset, QCowL2Meta **m)
1277 {
1278     BDRVQcow2State *s = bs->opaque;
1279     uint64_t start, remaining;
1280     uint64_t cluster_offset;
1281     uint64_t cur_bytes;
1282     int ret;
1283 
1284     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num);
1285 
1286     assert((offset & ~BDRV_SECTOR_MASK) == 0);
1287 
1288 again:
1289     start = offset;
1290     remaining = (uint64_t)*num << BDRV_SECTOR_BITS;
1291     cluster_offset = 0;
1292     *host_offset = 0;
1293     cur_bytes = 0;
1294     *m = NULL;
1295 
1296     while (true) {
1297 
1298         if (!*host_offset) {
1299             *host_offset = start_of_cluster(s, cluster_offset);
1300         }
1301 
1302         assert(remaining >= cur_bytes);
1303 
1304         start           += cur_bytes;
1305         remaining       -= cur_bytes;
1306         cluster_offset  += cur_bytes;
1307 
1308         if (remaining == 0) {
1309             break;
1310         }
1311 
1312         cur_bytes = remaining;
1313 
1314         /*
1315          * Now start gathering as many contiguous clusters as possible:
1316          *
1317          * 1. Check for overlaps with in-flight allocations
1318          *
1319          *      a) Overlap not in the first cluster -> shorten this request and
1320          *         let the caller handle the rest in its next loop iteration.
1321          *
1322          *      b) Real overlaps of two requests. Yield and restart the search
1323          *         for contiguous clusters (the situation could have changed
1324          *         while we were sleeping)
1325          *
1326          *      c) TODO: Request starts in the same cluster as the in-flight
1327          *         allocation ends. Shorten the COW of the in-fight allocation,
1328          *         set cluster_offset to write to the same cluster and set up
1329          *         the right synchronisation between the in-flight request and
1330          *         the new one.
1331          */
1332         ret = handle_dependencies(bs, start, &cur_bytes, m);
1333         if (ret == -EAGAIN) {
1334             /* Currently handle_dependencies() doesn't yield if we already had
1335              * an allocation. If it did, we would have to clean up the L2Meta
1336              * structs before starting over. */
1337             assert(*m == NULL);
1338             goto again;
1339         } else if (ret < 0) {
1340             return ret;
1341         } else if (cur_bytes == 0) {
1342             break;
1343         } else {
1344             /* handle_dependencies() may have decreased cur_bytes (shortened
1345              * the allocations below) so that the next dependency is processed
1346              * correctly during the next loop iteration. */
1347         }
1348 
1349         /*
1350          * 2. Count contiguous COPIED clusters.
1351          */
1352         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1353         if (ret < 0) {
1354             return ret;
1355         } else if (ret) {
1356             continue;
1357         } else if (cur_bytes == 0) {
1358             break;
1359         }
1360 
1361         /*
1362          * 3. If the request still hasn't completed, allocate new clusters,
1363          *    considering any cluster_offset of steps 1c or 2.
1364          */
1365         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1366         if (ret < 0) {
1367             return ret;
1368         } else if (ret) {
1369             continue;
1370         } else {
1371             assert(cur_bytes == 0);
1372             break;
1373         }
1374     }
1375 
1376     *num -= remaining >> BDRV_SECTOR_BITS;
1377     assert(*num > 0);
1378     assert(*host_offset != 0);
1379 
1380     return 0;
1381 }
1382 
1383 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1384                              const uint8_t *buf, int buf_size)
1385 {
1386     z_stream strm1, *strm = &strm1;
1387     int ret, out_len;
1388 
1389     memset(strm, 0, sizeof(*strm));
1390 
1391     strm->next_in = (uint8_t *)buf;
1392     strm->avail_in = buf_size;
1393     strm->next_out = out_buf;
1394     strm->avail_out = out_buf_size;
1395 
1396     ret = inflateInit2(strm, -12);
1397     if (ret != Z_OK)
1398         return -1;
1399     ret = inflate(strm, Z_FINISH);
1400     out_len = strm->next_out - out_buf;
1401     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1402         out_len != out_buf_size) {
1403         inflateEnd(strm);
1404         return -1;
1405     }
1406     inflateEnd(strm);
1407     return 0;
1408 }
1409 
1410 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1411 {
1412     BDRVQcow2State *s = bs->opaque;
1413     int ret, csize, nb_csectors, sector_offset;
1414     uint64_t coffset;
1415 
1416     coffset = cluster_offset & s->cluster_offset_mask;
1417     if (s->cluster_cache_offset != coffset) {
1418         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1419         sector_offset = coffset & 511;
1420         csize = nb_csectors * 512 - sector_offset;
1421         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1422         ret = bdrv_read(bs->file->bs, coffset >> 9, s->cluster_data,
1423                         nb_csectors);
1424         if (ret < 0) {
1425             return ret;
1426         }
1427         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1428                               s->cluster_data + sector_offset, csize) < 0) {
1429             return -EIO;
1430         }
1431         s->cluster_cache_offset = coffset;
1432     }
1433     return 0;
1434 }
1435 
1436 /*
1437  * This discards as many clusters of nb_clusters as possible at once (i.e.
1438  * all clusters in the same L2 table) and returns the number of discarded
1439  * clusters.
1440  */
1441 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1442                              uint64_t nb_clusters, enum qcow2_discard_type type,
1443                              bool full_discard)
1444 {
1445     BDRVQcow2State *s = bs->opaque;
1446     uint64_t *l2_table;
1447     int l2_index;
1448     int ret;
1449     int i;
1450 
1451     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1452     if (ret < 0) {
1453         return ret;
1454     }
1455 
1456     /* Limit nb_clusters to one L2 table */
1457     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1458     assert(nb_clusters <= INT_MAX);
1459 
1460     for (i = 0; i < nb_clusters; i++) {
1461         uint64_t old_l2_entry;
1462 
1463         old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1464 
1465         /*
1466          * If full_discard is false, make sure that a discarded area reads back
1467          * as zeroes for v3 images (we cannot do it for v2 without actually
1468          * writing a zero-filled buffer). We can skip the operation if the
1469          * cluster is already marked as zero, or if it's unallocated and we
1470          * don't have a backing file.
1471          *
1472          * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1473          * holding s->lock, so that doesn't work today.
1474          *
1475          * If full_discard is true, the sector should not read back as zeroes,
1476          * but rather fall through to the backing file.
1477          */
1478         switch (qcow2_get_cluster_type(old_l2_entry)) {
1479             case QCOW2_CLUSTER_UNALLOCATED:
1480                 if (full_discard || !bs->backing) {
1481                     continue;
1482                 }
1483                 break;
1484 
1485             case QCOW2_CLUSTER_ZERO:
1486                 if (!full_discard) {
1487                     continue;
1488                 }
1489                 break;
1490 
1491             case QCOW2_CLUSTER_NORMAL:
1492             case QCOW2_CLUSTER_COMPRESSED:
1493                 break;
1494 
1495             default:
1496                 abort();
1497         }
1498 
1499         /* First remove L2 entries */
1500         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1501         if (!full_discard && s->qcow_version >= 3) {
1502             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1503         } else {
1504             l2_table[l2_index + i] = cpu_to_be64(0);
1505         }
1506 
1507         /* Then decrease the refcount */
1508         qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1509     }
1510 
1511     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1512 
1513     return nb_clusters;
1514 }
1515 
1516 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1517     int nb_sectors, enum qcow2_discard_type type, bool full_discard)
1518 {
1519     BDRVQcow2State *s = bs->opaque;
1520     uint64_t end_offset;
1521     uint64_t nb_clusters;
1522     int ret;
1523 
1524     end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1525 
1526     /* Round start up and end down */
1527     offset = align_offset(offset, s->cluster_size);
1528     end_offset = start_of_cluster(s, end_offset);
1529 
1530     if (offset > end_offset) {
1531         return 0;
1532     }
1533 
1534     nb_clusters = size_to_clusters(s, end_offset - offset);
1535 
1536     s->cache_discards = true;
1537 
1538     /* Each L2 table is handled by its own loop iteration */
1539     while (nb_clusters > 0) {
1540         ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard);
1541         if (ret < 0) {
1542             goto fail;
1543         }
1544 
1545         nb_clusters -= ret;
1546         offset += (ret * s->cluster_size);
1547     }
1548 
1549     ret = 0;
1550 fail:
1551     s->cache_discards = false;
1552     qcow2_process_discards(bs, ret);
1553 
1554     return ret;
1555 }
1556 
1557 /*
1558  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1559  * all clusters in the same L2 table) and returns the number of zeroed
1560  * clusters.
1561  */
1562 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1563                           uint64_t nb_clusters)
1564 {
1565     BDRVQcow2State *s = bs->opaque;
1566     uint64_t *l2_table;
1567     int l2_index;
1568     int ret;
1569     int i;
1570 
1571     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1572     if (ret < 0) {
1573         return ret;
1574     }
1575 
1576     /* Limit nb_clusters to one L2 table */
1577     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1578     assert(nb_clusters <= INT_MAX);
1579 
1580     for (i = 0; i < nb_clusters; i++) {
1581         uint64_t old_offset;
1582 
1583         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1584 
1585         /* Update L2 entries */
1586         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1587         if (old_offset & QCOW_OFLAG_COMPRESSED) {
1588             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1589             qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1590         } else {
1591             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1592         }
1593     }
1594 
1595     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1596 
1597     return nb_clusters;
1598 }
1599 
1600 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1601 {
1602     BDRVQcow2State *s = bs->opaque;
1603     uint64_t nb_clusters;
1604     int ret;
1605 
1606     /* The zero flag is only supported by version 3 and newer */
1607     if (s->qcow_version < 3) {
1608         return -ENOTSUP;
1609     }
1610 
1611     /* Each L2 table is handled by its own loop iteration */
1612     nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1613 
1614     s->cache_discards = true;
1615 
1616     while (nb_clusters > 0) {
1617         ret = zero_single_l2(bs, offset, nb_clusters);
1618         if (ret < 0) {
1619             goto fail;
1620         }
1621 
1622         nb_clusters -= ret;
1623         offset += (ret * s->cluster_size);
1624     }
1625 
1626     ret = 0;
1627 fail:
1628     s->cache_discards = false;
1629     qcow2_process_discards(bs, ret);
1630 
1631     return ret;
1632 }
1633 
1634 /*
1635  * Expands all zero clusters in a specific L1 table (or deallocates them, for
1636  * non-backed non-pre-allocated zero clusters).
1637  *
1638  * l1_entries and *visited_l1_entries are used to keep track of progress for
1639  * status_cb(). l1_entries contains the total number of L1 entries and
1640  * *visited_l1_entries counts all visited L1 entries.
1641  */
1642 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1643                                       int l1_size, int64_t *visited_l1_entries,
1644                                       int64_t l1_entries,
1645                                       BlockDriverAmendStatusCB *status_cb,
1646                                       void *cb_opaque)
1647 {
1648     BDRVQcow2State *s = bs->opaque;
1649     bool is_active_l1 = (l1_table == s->l1_table);
1650     uint64_t *l2_table = NULL;
1651     int ret;
1652     int i, j;
1653 
1654     if (!is_active_l1) {
1655         /* inactive L2 tables require a buffer to be stored in when loading
1656          * them from disk */
1657         l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size);
1658         if (l2_table == NULL) {
1659             return -ENOMEM;
1660         }
1661     }
1662 
1663     for (i = 0; i < l1_size; i++) {
1664         uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1665         bool l2_dirty = false;
1666         uint64_t l2_refcount;
1667 
1668         if (!l2_offset) {
1669             /* unallocated */
1670             (*visited_l1_entries)++;
1671             if (status_cb) {
1672                 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1673             }
1674             continue;
1675         }
1676 
1677         if (offset_into_cluster(s, l2_offset)) {
1678             qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1679                                     PRIx64 " unaligned (L1 index: %#x)",
1680                                     l2_offset, i);
1681             ret = -EIO;
1682             goto fail;
1683         }
1684 
1685         if (is_active_l1) {
1686             /* get active L2 tables from cache */
1687             ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1688                     (void **)&l2_table);
1689         } else {
1690             /* load inactive L2 tables from disk */
1691             ret = bdrv_read(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE,
1692                             (void *)l2_table, s->cluster_sectors);
1693         }
1694         if (ret < 0) {
1695             goto fail;
1696         }
1697 
1698         ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1699                                  &l2_refcount);
1700         if (ret < 0) {
1701             goto fail;
1702         }
1703 
1704         for (j = 0; j < s->l2_size; j++) {
1705             uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1706             int64_t offset = l2_entry & L2E_OFFSET_MASK;
1707             int cluster_type = qcow2_get_cluster_type(l2_entry);
1708             bool preallocated = offset != 0;
1709 
1710             if (cluster_type != QCOW2_CLUSTER_ZERO) {
1711                 continue;
1712             }
1713 
1714             if (!preallocated) {
1715                 if (!bs->backing) {
1716                     /* not backed; therefore we can simply deallocate the
1717                      * cluster */
1718                     l2_table[j] = 0;
1719                     l2_dirty = true;
1720                     continue;
1721                 }
1722 
1723                 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1724                 if (offset < 0) {
1725                     ret = offset;
1726                     goto fail;
1727                 }
1728 
1729                 if (l2_refcount > 1) {
1730                     /* For shared L2 tables, set the refcount accordingly (it is
1731                      * already 1 and needs to be l2_refcount) */
1732                     ret = qcow2_update_cluster_refcount(bs,
1733                             offset >> s->cluster_bits,
1734                             refcount_diff(1, l2_refcount), false,
1735                             QCOW2_DISCARD_OTHER);
1736                     if (ret < 0) {
1737                         qcow2_free_clusters(bs, offset, s->cluster_size,
1738                                             QCOW2_DISCARD_OTHER);
1739                         goto fail;
1740                     }
1741                 }
1742             }
1743 
1744             if (offset_into_cluster(s, offset)) {
1745                 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1746                                         "%#" PRIx64 " unaligned (L2 offset: %#"
1747                                         PRIx64 ", L2 index: %#x)", offset,
1748                                         l2_offset, j);
1749                 if (!preallocated) {
1750                     qcow2_free_clusters(bs, offset, s->cluster_size,
1751                                         QCOW2_DISCARD_ALWAYS);
1752                 }
1753                 ret = -EIO;
1754                 goto fail;
1755             }
1756 
1757             ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1758             if (ret < 0) {
1759                 if (!preallocated) {
1760                     qcow2_free_clusters(bs, offset, s->cluster_size,
1761                                         QCOW2_DISCARD_ALWAYS);
1762                 }
1763                 goto fail;
1764             }
1765 
1766             ret = bdrv_write_zeroes(bs->file->bs, offset / BDRV_SECTOR_SIZE,
1767                                     s->cluster_sectors, 0);
1768             if (ret < 0) {
1769                 if (!preallocated) {
1770                     qcow2_free_clusters(bs, offset, s->cluster_size,
1771                                         QCOW2_DISCARD_ALWAYS);
1772                 }
1773                 goto fail;
1774             }
1775 
1776             if (l2_refcount == 1) {
1777                 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1778             } else {
1779                 l2_table[j] = cpu_to_be64(offset);
1780             }
1781             l2_dirty = true;
1782         }
1783 
1784         if (is_active_l1) {
1785             if (l2_dirty) {
1786                 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1787                 qcow2_cache_depends_on_flush(s->l2_table_cache);
1788             }
1789             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1790         } else {
1791             if (l2_dirty) {
1792                 ret = qcow2_pre_write_overlap_check(bs,
1793                         QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1794                         s->cluster_size);
1795                 if (ret < 0) {
1796                     goto fail;
1797                 }
1798 
1799                 ret = bdrv_write(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE,
1800                                  (void *)l2_table, s->cluster_sectors);
1801                 if (ret < 0) {
1802                     goto fail;
1803                 }
1804             }
1805         }
1806 
1807         (*visited_l1_entries)++;
1808         if (status_cb) {
1809             status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1810         }
1811     }
1812 
1813     ret = 0;
1814 
1815 fail:
1816     if (l2_table) {
1817         if (!is_active_l1) {
1818             qemu_vfree(l2_table);
1819         } else {
1820             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1821         }
1822     }
1823     return ret;
1824 }
1825 
1826 /*
1827  * For backed images, expands all zero clusters on the image. For non-backed
1828  * images, deallocates all non-pre-allocated zero clusters (and claims the
1829  * allocation for pre-allocated ones). This is important for downgrading to a
1830  * qcow2 version which doesn't yet support metadata zero clusters.
1831  */
1832 int qcow2_expand_zero_clusters(BlockDriverState *bs,
1833                                BlockDriverAmendStatusCB *status_cb,
1834                                void *cb_opaque)
1835 {
1836     BDRVQcow2State *s = bs->opaque;
1837     uint64_t *l1_table = NULL;
1838     int64_t l1_entries = 0, visited_l1_entries = 0;
1839     int ret;
1840     int i, j;
1841 
1842     if (status_cb) {
1843         l1_entries = s->l1_size;
1844         for (i = 0; i < s->nb_snapshots; i++) {
1845             l1_entries += s->snapshots[i].l1_size;
1846         }
1847     }
1848 
1849     ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
1850                                      &visited_l1_entries, l1_entries,
1851                                      status_cb, cb_opaque);
1852     if (ret < 0) {
1853         goto fail;
1854     }
1855 
1856     /* Inactive L1 tables may point to active L2 tables - therefore it is
1857      * necessary to flush the L2 table cache before trying to access the L2
1858      * tables pointed to by inactive L1 entries (else we might try to expand
1859      * zero clusters that have already been expanded); furthermore, it is also
1860      * necessary to empty the L2 table cache, since it may contain tables which
1861      * are now going to be modified directly on disk, bypassing the cache.
1862      * qcow2_cache_empty() does both for us. */
1863     ret = qcow2_cache_empty(bs, s->l2_table_cache);
1864     if (ret < 0) {
1865         goto fail;
1866     }
1867 
1868     for (i = 0; i < s->nb_snapshots; i++) {
1869         int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) +
1870                 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE;
1871 
1872         l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
1873 
1874         ret = bdrv_read(bs->file->bs,
1875                         s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
1876                         (void *)l1_table, l1_sectors);
1877         if (ret < 0) {
1878             goto fail;
1879         }
1880 
1881         for (j = 0; j < s->snapshots[i].l1_size; j++) {
1882             be64_to_cpus(&l1_table[j]);
1883         }
1884 
1885         ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
1886                                          &visited_l1_entries, l1_entries,
1887                                          status_cb, cb_opaque);
1888         if (ret < 0) {
1889             goto fail;
1890         }
1891     }
1892 
1893     ret = 0;
1894 
1895 fail:
1896     g_free(l1_table);
1897     return ret;
1898 }
1899