xref: /qemu/block/qed.c (revision f5956d71)
1 /*
2  * QEMU Enhanced Disk Format
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11  * See the COPYING.LIB file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "qemu/bswap.h"
19 #include "trace.h"
20 #include "qed.h"
21 #include "qapi/qmp/qerror.h"
22 #include "sysemu/block-backend.h"
23 
24 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
25                           const char *filename)
26 {
27     const QEDHeader *header = (const QEDHeader *)buf;
28 
29     if (buf_size < sizeof(*header)) {
30         return 0;
31     }
32     if (le32_to_cpu(header->magic) != QED_MAGIC) {
33         return 0;
34     }
35     return 100;
36 }
37 
38 /**
39  * Check whether an image format is raw
40  *
41  * @fmt:    Backing file format, may be NULL
42  */
43 static bool qed_fmt_is_raw(const char *fmt)
44 {
45     return fmt && strcmp(fmt, "raw") == 0;
46 }
47 
48 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
49 {
50     cpu->magic = le32_to_cpu(le->magic);
51     cpu->cluster_size = le32_to_cpu(le->cluster_size);
52     cpu->table_size = le32_to_cpu(le->table_size);
53     cpu->header_size = le32_to_cpu(le->header_size);
54     cpu->features = le64_to_cpu(le->features);
55     cpu->compat_features = le64_to_cpu(le->compat_features);
56     cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
57     cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
58     cpu->image_size = le64_to_cpu(le->image_size);
59     cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
60     cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
61 }
62 
63 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
64 {
65     le->magic = cpu_to_le32(cpu->magic);
66     le->cluster_size = cpu_to_le32(cpu->cluster_size);
67     le->table_size = cpu_to_le32(cpu->table_size);
68     le->header_size = cpu_to_le32(cpu->header_size);
69     le->features = cpu_to_le64(cpu->features);
70     le->compat_features = cpu_to_le64(cpu->compat_features);
71     le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
72     le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
73     le->image_size = cpu_to_le64(cpu->image_size);
74     le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
75     le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
76 }
77 
78 int qed_write_header_sync(BDRVQEDState *s)
79 {
80     QEDHeader le;
81     int ret;
82 
83     qed_header_cpu_to_le(&s->header, &le);
84     ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
85     if (ret != sizeof(le)) {
86         return ret;
87     }
88     return 0;
89 }
90 
91 /**
92  * Update header in-place (does not rewrite backing filename or other strings)
93  *
94  * This function only updates known header fields in-place and does not affect
95  * extra data after the QED header.
96  */
97 static int coroutine_fn qed_write_header(BDRVQEDState *s)
98 {
99     /* We must write full sectors for O_DIRECT but cannot necessarily generate
100      * the data following the header if an unrecognized compat feature is
101      * active.  Therefore, first read the sectors containing the header, update
102      * them, and write back.
103      */
104 
105     int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
106     size_t len = nsectors * BDRV_SECTOR_SIZE;
107     uint8_t *buf;
108     struct iovec iov;
109     QEMUIOVector qiov;
110     int ret;
111 
112     buf = qemu_blockalign(s->bs, len);
113     iov = (struct iovec) {
114         .iov_base = buf,
115         .iov_len = len,
116     };
117     qemu_iovec_init_external(&qiov, &iov, 1);
118 
119     ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0);
120     if (ret < 0) {
121         goto out;
122     }
123 
124     /* Update header */
125     qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
126 
127     ret = bdrv_co_pwritev(s->bs->file, 0, qiov.size,  &qiov, 0);
128     if (ret < 0) {
129         goto out;
130     }
131 
132     ret = 0;
133 out:
134     qemu_vfree(buf);
135     return ret;
136 }
137 
138 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
139 {
140     uint64_t table_entries;
141     uint64_t l2_size;
142 
143     table_entries = (table_size * cluster_size) / sizeof(uint64_t);
144     l2_size = table_entries * cluster_size;
145 
146     return l2_size * table_entries;
147 }
148 
149 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
150 {
151     if (cluster_size < QED_MIN_CLUSTER_SIZE ||
152         cluster_size > QED_MAX_CLUSTER_SIZE) {
153         return false;
154     }
155     if (cluster_size & (cluster_size - 1)) {
156         return false; /* not power of 2 */
157     }
158     return true;
159 }
160 
161 static bool qed_is_table_size_valid(uint32_t table_size)
162 {
163     if (table_size < QED_MIN_TABLE_SIZE ||
164         table_size > QED_MAX_TABLE_SIZE) {
165         return false;
166     }
167     if (table_size & (table_size - 1)) {
168         return false; /* not power of 2 */
169     }
170     return true;
171 }
172 
173 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
174                                     uint32_t table_size)
175 {
176     if (image_size % BDRV_SECTOR_SIZE != 0) {
177         return false; /* not multiple of sector size */
178     }
179     if (image_size > qed_max_image_size(cluster_size, table_size)) {
180         return false; /* image is too large */
181     }
182     return true;
183 }
184 
185 /**
186  * Read a string of known length from the image file
187  *
188  * @file:       Image file
189  * @offset:     File offset to start of string, in bytes
190  * @n:          String length in bytes
191  * @buf:        Destination buffer
192  * @buflen:     Destination buffer length in bytes
193  * @ret:        0 on success, -errno on failure
194  *
195  * The string is NUL-terminated.
196  */
197 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
198                            char *buf, size_t buflen)
199 {
200     int ret;
201     if (n >= buflen) {
202         return -EINVAL;
203     }
204     ret = bdrv_pread(file, offset, buf, n);
205     if (ret < 0) {
206         return ret;
207     }
208     buf[n] = '\0';
209     return 0;
210 }
211 
212 /**
213  * Allocate new clusters
214  *
215  * @s:          QED state
216  * @n:          Number of contiguous clusters to allocate
217  * @ret:        Offset of first allocated cluster
218  *
219  * This function only produces the offset where the new clusters should be
220  * written.  It updates BDRVQEDState but does not make any changes to the image
221  * file.
222  */
223 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
224 {
225     uint64_t offset = s->file_size;
226     s->file_size += n * s->header.cluster_size;
227     return offset;
228 }
229 
230 QEDTable *qed_alloc_table(BDRVQEDState *s)
231 {
232     /* Honor O_DIRECT memory alignment requirements */
233     return qemu_blockalign(s->bs,
234                            s->header.cluster_size * s->header.table_size);
235 }
236 
237 /**
238  * Allocate a new zeroed L2 table
239  */
240 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
241 {
242     CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
243 
244     l2_table->table = qed_alloc_table(s);
245     l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
246 
247     memset(l2_table->table->offsets, 0,
248            s->header.cluster_size * s->header.table_size);
249     return l2_table;
250 }
251 
252 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
253 {
254     assert(!s->allocating_write_reqs_plugged);
255 
256     s->allocating_write_reqs_plugged = true;
257 }
258 
259 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
260 {
261     assert(s->allocating_write_reqs_plugged);
262 
263     s->allocating_write_reqs_plugged = false;
264     qemu_co_enter_next(&s->allocating_write_reqs);
265 }
266 
267 static void coroutine_fn qed_need_check_timer_entry(void *opaque)
268 {
269     BDRVQEDState *s = opaque;
270     int ret;
271 
272     /* The timer should only fire when allocating writes have drained */
273     assert(!s->allocating_acb);
274 
275     trace_qed_need_check_timer_cb(s);
276 
277     qed_acquire(s);
278     qed_plug_allocating_write_reqs(s);
279 
280     /* Ensure writes are on disk before clearing flag */
281     ret = bdrv_co_flush(s->bs->file->bs);
282     qed_release(s);
283     if (ret < 0) {
284         qed_unplug_allocating_write_reqs(s);
285         return;
286     }
287 
288     s->header.features &= ~QED_F_NEED_CHECK;
289     ret = qed_write_header(s);
290     (void) ret;
291 
292     qed_unplug_allocating_write_reqs(s);
293 
294     ret = bdrv_co_flush(s->bs);
295     (void) ret;
296 }
297 
298 static void qed_need_check_timer_cb(void *opaque)
299 {
300     Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
301     qemu_coroutine_enter(co);
302 }
303 
304 void qed_acquire(BDRVQEDState *s)
305 {
306     aio_context_acquire(bdrv_get_aio_context(s->bs));
307 }
308 
309 void qed_release(BDRVQEDState *s)
310 {
311     aio_context_release(bdrv_get_aio_context(s->bs));
312 }
313 
314 static void qed_start_need_check_timer(BDRVQEDState *s)
315 {
316     trace_qed_start_need_check_timer(s);
317 
318     /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
319      * migration.
320      */
321     timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
322                    NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
323 }
324 
325 /* It's okay to call this multiple times or when no timer is started */
326 static void qed_cancel_need_check_timer(BDRVQEDState *s)
327 {
328     trace_qed_cancel_need_check_timer(s);
329     timer_del(s->need_check_timer);
330 }
331 
332 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
333 {
334     BDRVQEDState *s = bs->opaque;
335 
336     qed_cancel_need_check_timer(s);
337     timer_free(s->need_check_timer);
338 }
339 
340 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
341                                         AioContext *new_context)
342 {
343     BDRVQEDState *s = bs->opaque;
344 
345     s->need_check_timer = aio_timer_new(new_context,
346                                         QEMU_CLOCK_VIRTUAL, SCALE_NS,
347                                         qed_need_check_timer_cb, s);
348     if (s->header.features & QED_F_NEED_CHECK) {
349         qed_start_need_check_timer(s);
350     }
351 }
352 
353 static void bdrv_qed_drain(BlockDriverState *bs)
354 {
355     BDRVQEDState *s = bs->opaque;
356 
357     /* Fire the timer immediately in order to start doing I/O as soon as the
358      * header is flushed.
359      */
360     if (s->need_check_timer && timer_pending(s->need_check_timer)) {
361         qed_cancel_need_check_timer(s);
362         qed_need_check_timer_cb(s);
363     }
364 }
365 
366 static int bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags,
367                             Error **errp)
368 {
369     BDRVQEDState *s = bs->opaque;
370     QEDHeader le_header;
371     int64_t file_size;
372     int ret;
373 
374     s->bs = bs;
375     qemu_co_queue_init(&s->allocating_write_reqs);
376 
377     ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
378     if (ret < 0) {
379         return ret;
380     }
381     qed_header_le_to_cpu(&le_header, &s->header);
382 
383     if (s->header.magic != QED_MAGIC) {
384         error_setg(errp, "Image not in QED format");
385         return -EINVAL;
386     }
387     if (s->header.features & ~QED_FEATURE_MASK) {
388         /* image uses unsupported feature bits */
389         error_setg(errp, "Unsupported QED features: %" PRIx64,
390                    s->header.features & ~QED_FEATURE_MASK);
391         return -ENOTSUP;
392     }
393     if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
394         return -EINVAL;
395     }
396 
397     /* Round down file size to the last cluster */
398     file_size = bdrv_getlength(bs->file->bs);
399     if (file_size < 0) {
400         return file_size;
401     }
402     s->file_size = qed_start_of_cluster(s, file_size);
403 
404     if (!qed_is_table_size_valid(s->header.table_size)) {
405         return -EINVAL;
406     }
407     if (!qed_is_image_size_valid(s->header.image_size,
408                                  s->header.cluster_size,
409                                  s->header.table_size)) {
410         return -EINVAL;
411     }
412     if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
413         return -EINVAL;
414     }
415 
416     s->table_nelems = (s->header.cluster_size * s->header.table_size) /
417                       sizeof(uint64_t);
418     s->l2_shift = ctz32(s->header.cluster_size);
419     s->l2_mask = s->table_nelems - 1;
420     s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
421 
422     /* Header size calculation must not overflow uint32_t */
423     if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
424         return -EINVAL;
425     }
426 
427     if ((s->header.features & QED_F_BACKING_FILE)) {
428         if ((uint64_t)s->header.backing_filename_offset +
429             s->header.backing_filename_size >
430             s->header.cluster_size * s->header.header_size) {
431             return -EINVAL;
432         }
433 
434         ret = qed_read_string(bs->file, s->header.backing_filename_offset,
435                               s->header.backing_filename_size, bs->backing_file,
436                               sizeof(bs->backing_file));
437         if (ret < 0) {
438             return ret;
439         }
440 
441         if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
442             pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
443         }
444     }
445 
446     /* Reset unknown autoclear feature bits.  This is a backwards
447      * compatibility mechanism that allows images to be opened by older
448      * programs, which "knock out" unknown feature bits.  When an image is
449      * opened by a newer program again it can detect that the autoclear
450      * feature is no longer valid.
451      */
452     if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
453         !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
454         s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
455 
456         ret = qed_write_header_sync(s);
457         if (ret) {
458             return ret;
459         }
460 
461         /* From here on only known autoclear feature bits are valid */
462         bdrv_flush(bs->file->bs);
463     }
464 
465     s->l1_table = qed_alloc_table(s);
466     qed_init_l2_cache(&s->l2_cache);
467 
468     ret = qed_read_l1_table_sync(s);
469     if (ret) {
470         goto out;
471     }
472 
473     /* If image was not closed cleanly, check consistency */
474     if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
475         /* Read-only images cannot be fixed.  There is no risk of corruption
476          * since write operations are not possible.  Therefore, allow
477          * potentially inconsistent images to be opened read-only.  This can
478          * aid data recovery from an otherwise inconsistent image.
479          */
480         if (!bdrv_is_read_only(bs->file->bs) &&
481             !(flags & BDRV_O_INACTIVE)) {
482             BdrvCheckResult result = {0};
483 
484             ret = qed_check(s, &result, true);
485             if (ret) {
486                 goto out;
487             }
488         }
489     }
490 
491     bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
492 
493 out:
494     if (ret) {
495         qed_free_l2_cache(&s->l2_cache);
496         qemu_vfree(s->l1_table);
497     }
498     return ret;
499 }
500 
501 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
502                          Error **errp)
503 {
504     bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
505                                false, errp);
506     if (!bs->file) {
507         return -EINVAL;
508     }
509 
510     return bdrv_qed_do_open(bs, options, flags, errp);
511 }
512 
513 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
514 {
515     BDRVQEDState *s = bs->opaque;
516 
517     bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
518 }
519 
520 /* We have nothing to do for QED reopen, stubs just return
521  * success */
522 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
523                                    BlockReopenQueue *queue, Error **errp)
524 {
525     return 0;
526 }
527 
528 static void bdrv_qed_close(BlockDriverState *bs)
529 {
530     BDRVQEDState *s = bs->opaque;
531 
532     bdrv_qed_detach_aio_context(bs);
533 
534     /* Ensure writes reach stable storage */
535     bdrv_flush(bs->file->bs);
536 
537     /* Clean shutdown, no check required on next open */
538     if (s->header.features & QED_F_NEED_CHECK) {
539         s->header.features &= ~QED_F_NEED_CHECK;
540         qed_write_header_sync(s);
541     }
542 
543     qed_free_l2_cache(&s->l2_cache);
544     qemu_vfree(s->l1_table);
545 }
546 
547 static int qed_create(const char *filename, uint32_t cluster_size,
548                       uint64_t image_size, uint32_t table_size,
549                       const char *backing_file, const char *backing_fmt,
550                       QemuOpts *opts, Error **errp)
551 {
552     QEDHeader header = {
553         .magic = QED_MAGIC,
554         .cluster_size = cluster_size,
555         .table_size = table_size,
556         .header_size = 1,
557         .features = 0,
558         .compat_features = 0,
559         .l1_table_offset = cluster_size,
560         .image_size = image_size,
561     };
562     QEDHeader le_header;
563     uint8_t *l1_table = NULL;
564     size_t l1_size = header.cluster_size * header.table_size;
565     Error *local_err = NULL;
566     int ret = 0;
567     BlockBackend *blk;
568 
569     ret = bdrv_create_file(filename, opts, &local_err);
570     if (ret < 0) {
571         error_propagate(errp, local_err);
572         return ret;
573     }
574 
575     blk = blk_new_open(filename, NULL, NULL,
576                        BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
577                        &local_err);
578     if (blk == NULL) {
579         error_propagate(errp, local_err);
580         return -EIO;
581     }
582 
583     blk_set_allow_write_beyond_eof(blk, true);
584 
585     /* File must start empty and grow, check truncate is supported */
586     ret = blk_truncate(blk, 0, errp);
587     if (ret < 0) {
588         goto out;
589     }
590 
591     if (backing_file) {
592         header.features |= QED_F_BACKING_FILE;
593         header.backing_filename_offset = sizeof(le_header);
594         header.backing_filename_size = strlen(backing_file);
595 
596         if (qed_fmt_is_raw(backing_fmt)) {
597             header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
598         }
599     }
600 
601     qed_header_cpu_to_le(&header, &le_header);
602     ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
603     if (ret < 0) {
604         goto out;
605     }
606     ret = blk_pwrite(blk, sizeof(le_header), backing_file,
607                      header.backing_filename_size, 0);
608     if (ret < 0) {
609         goto out;
610     }
611 
612     l1_table = g_malloc0(l1_size);
613     ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
614     if (ret < 0) {
615         goto out;
616     }
617 
618     ret = 0; /* success */
619 out:
620     g_free(l1_table);
621     blk_unref(blk);
622     return ret;
623 }
624 
625 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
626 {
627     uint64_t image_size = 0;
628     uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
629     uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
630     char *backing_file = NULL;
631     char *backing_fmt = NULL;
632     int ret;
633 
634     image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
635                           BDRV_SECTOR_SIZE);
636     backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
637     backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
638     cluster_size = qemu_opt_get_size_del(opts,
639                                          BLOCK_OPT_CLUSTER_SIZE,
640                                          QED_DEFAULT_CLUSTER_SIZE);
641     table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
642                                        QED_DEFAULT_TABLE_SIZE);
643 
644     if (!qed_is_cluster_size_valid(cluster_size)) {
645         error_setg(errp, "QED cluster size must be within range [%u, %u] "
646                          "and power of 2",
647                    QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
648         ret = -EINVAL;
649         goto finish;
650     }
651     if (!qed_is_table_size_valid(table_size)) {
652         error_setg(errp, "QED table size must be within range [%u, %u] "
653                          "and power of 2",
654                    QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
655         ret = -EINVAL;
656         goto finish;
657     }
658     if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
659         error_setg(errp, "QED image size must be a non-zero multiple of "
660                          "cluster size and less than %" PRIu64 " bytes",
661                    qed_max_image_size(cluster_size, table_size));
662         ret = -EINVAL;
663         goto finish;
664     }
665 
666     ret = qed_create(filename, cluster_size, image_size, table_size,
667                      backing_file, backing_fmt, opts, errp);
668 
669 finish:
670     g_free(backing_file);
671     g_free(backing_fmt);
672     return ret;
673 }
674 
675 typedef struct {
676     BlockDriverState *bs;
677     Coroutine *co;
678     uint64_t pos;
679     int64_t status;
680     int *pnum;
681     BlockDriverState **file;
682 } QEDIsAllocatedCB;
683 
684 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
685 {
686     QEDIsAllocatedCB *cb = opaque;
687     BDRVQEDState *s = cb->bs->opaque;
688     *cb->pnum = len / BDRV_SECTOR_SIZE;
689     switch (ret) {
690     case QED_CLUSTER_FOUND:
691         offset |= qed_offset_into_cluster(s, cb->pos);
692         cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
693         *cb->file = cb->bs->file->bs;
694         break;
695     case QED_CLUSTER_ZERO:
696         cb->status = BDRV_BLOCK_ZERO;
697         break;
698     case QED_CLUSTER_L2:
699     case QED_CLUSTER_L1:
700         cb->status = 0;
701         break;
702     default:
703         assert(ret < 0);
704         cb->status = ret;
705         break;
706     }
707 
708     if (cb->co) {
709         aio_co_wake(cb->co);
710     }
711 }
712 
713 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
714                                                  int64_t sector_num,
715                                                  int nb_sectors, int *pnum,
716                                                  BlockDriverState **file)
717 {
718     BDRVQEDState *s = bs->opaque;
719     size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
720     QEDIsAllocatedCB cb = {
721         .bs = bs,
722         .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
723         .status = BDRV_BLOCK_OFFSET_MASK,
724         .pnum = pnum,
725         .file = file,
726     };
727     QEDRequest request = { .l2_table = NULL };
728     uint64_t offset;
729     int ret;
730 
731     ret = qed_find_cluster(s, &request, cb.pos, &len, &offset);
732     qed_is_allocated_cb(&cb, ret, offset, len);
733 
734     /* The callback was invoked immediately */
735     assert(cb.status != BDRV_BLOCK_OFFSET_MASK);
736 
737     qed_unref_l2_cache_entry(request.l2_table);
738 
739     return cb.status;
740 }
741 
742 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
743 {
744     return acb->bs->opaque;
745 }
746 
747 /**
748  * Read from the backing file or zero-fill if no backing file
749  *
750  * @s:              QED state
751  * @pos:            Byte position in device
752  * @qiov:           Destination I/O vector
753  * @backing_qiov:   Possibly shortened copy of qiov, to be allocated here
754  * @cb:             Completion function
755  * @opaque:         User data for completion function
756  *
757  * This function reads qiov->size bytes starting at pos from the backing file.
758  * If there is no backing file then zeroes are read.
759  */
760 static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
761                                               QEMUIOVector *qiov,
762                                               QEMUIOVector **backing_qiov)
763 {
764     uint64_t backing_length = 0;
765     size_t size;
766     int ret;
767 
768     /* If there is a backing file, get its length.  Treat the absence of a
769      * backing file like a zero length backing file.
770      */
771     if (s->bs->backing) {
772         int64_t l = bdrv_getlength(s->bs->backing->bs);
773         if (l < 0) {
774             return l;
775         }
776         backing_length = l;
777     }
778 
779     /* Zero all sectors if reading beyond the end of the backing file */
780     if (pos >= backing_length ||
781         pos + qiov->size > backing_length) {
782         qemu_iovec_memset(qiov, 0, 0, qiov->size);
783     }
784 
785     /* Complete now if there are no backing file sectors to read */
786     if (pos >= backing_length) {
787         return 0;
788     }
789 
790     /* If the read straddles the end of the backing file, shorten it */
791     size = MIN((uint64_t)backing_length - pos, qiov->size);
792 
793     assert(*backing_qiov == NULL);
794     *backing_qiov = g_new(QEMUIOVector, 1);
795     qemu_iovec_init(*backing_qiov, qiov->niov);
796     qemu_iovec_concat(*backing_qiov, qiov, 0, size);
797 
798     BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
799     ret = bdrv_co_preadv(s->bs->backing, pos, size, *backing_qiov, 0);
800     if (ret < 0) {
801         return ret;
802     }
803     return 0;
804 }
805 
806 /**
807  * Copy data from backing file into the image
808  *
809  * @s:          QED state
810  * @pos:        Byte position in device
811  * @len:        Number of bytes
812  * @offset:     Byte offset in image file
813  */
814 static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
815                                                    uint64_t pos, uint64_t len,
816                                                    uint64_t offset)
817 {
818     QEMUIOVector qiov;
819     QEMUIOVector *backing_qiov = NULL;
820     struct iovec iov;
821     int ret;
822 
823     /* Skip copy entirely if there is no work to do */
824     if (len == 0) {
825         return 0;
826     }
827 
828     iov = (struct iovec) {
829         .iov_base = qemu_blockalign(s->bs, len),
830         .iov_len = len,
831     };
832     qemu_iovec_init_external(&qiov, &iov, 1);
833 
834     ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov);
835 
836     if (backing_qiov) {
837         qemu_iovec_destroy(backing_qiov);
838         g_free(backing_qiov);
839         backing_qiov = NULL;
840     }
841 
842     if (ret) {
843         goto out;
844     }
845 
846     BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
847     ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
848     if (ret < 0) {
849         goto out;
850     }
851     ret = 0;
852 out:
853     qemu_vfree(iov.iov_base);
854     return ret;
855 }
856 
857 /**
858  * Link one or more contiguous clusters into a table
859  *
860  * @s:              QED state
861  * @table:          L2 table
862  * @index:          First cluster index
863  * @n:              Number of contiguous clusters
864  * @cluster:        First cluster offset
865  *
866  * The cluster offset may be an allocated byte offset in the image file, the
867  * zero cluster marker, or the unallocated cluster marker.
868  */
869 static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
870                                              int index, unsigned int n,
871                                              uint64_t cluster)
872 {
873     int i;
874     for (i = index; i < index + n; i++) {
875         table->offsets[i] = cluster;
876         if (!qed_offset_is_unalloc_cluster(cluster) &&
877             !qed_offset_is_zero_cluster(cluster)) {
878             cluster += s->header.cluster_size;
879         }
880     }
881 }
882 
883 static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
884 {
885     BDRVQEDState *s = acb_to_s(acb);
886 
887     /* Free resources */
888     qemu_iovec_destroy(&acb->cur_qiov);
889     qed_unref_l2_cache_entry(acb->request.l2_table);
890 
891     /* Free the buffer we may have allocated for zero writes */
892     if (acb->flags & QED_AIOCB_ZERO) {
893         qemu_vfree(acb->qiov->iov[0].iov_base);
894         acb->qiov->iov[0].iov_base = NULL;
895     }
896 
897     /* Start next allocating write request waiting behind this one.  Note that
898      * requests enqueue themselves when they first hit an unallocated cluster
899      * but they wait until the entire request is finished before waking up the
900      * next request in the queue.  This ensures that we don't cycle through
901      * requests multiple times but rather finish one at a time completely.
902      */
903     if (acb == s->allocating_acb) {
904         s->allocating_acb = NULL;
905         if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
906             qemu_co_enter_next(&s->allocating_write_reqs);
907         } else if (s->header.features & QED_F_NEED_CHECK) {
908             qed_start_need_check_timer(s);
909         }
910     }
911 }
912 
913 /**
914  * Update L1 table with new L2 table offset and write it out
915  */
916 static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb)
917 {
918     BDRVQEDState *s = acb_to_s(acb);
919     CachedL2Table *l2_table = acb->request.l2_table;
920     uint64_t l2_offset = l2_table->offset;
921     int index, ret;
922 
923     index = qed_l1_index(s, acb->cur_pos);
924     s->l1_table->offsets[index] = l2_table->offset;
925 
926     ret = qed_write_l1_table(s, index, 1);
927 
928     /* Commit the current L2 table to the cache */
929     qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
930 
931     /* This is guaranteed to succeed because we just committed the entry to the
932      * cache.
933      */
934     acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
935     assert(acb->request.l2_table != NULL);
936 
937     return ret;
938 }
939 
940 
941 /**
942  * Update L2 table with new cluster offsets and write them out
943  */
944 static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
945 {
946     BDRVQEDState *s = acb_to_s(acb);
947     bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
948     int index, ret;
949 
950     if (need_alloc) {
951         qed_unref_l2_cache_entry(acb->request.l2_table);
952         acb->request.l2_table = qed_new_l2_table(s);
953     }
954 
955     index = qed_l2_index(s, acb->cur_pos);
956     qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
957                          offset);
958 
959     if (need_alloc) {
960         /* Write out the whole new L2 table */
961         ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
962         if (ret) {
963             return ret;
964         }
965         return qed_aio_write_l1_update(acb);
966     } else {
967         /* Write out only the updated part of the L2 table */
968         ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
969                                  false);
970         if (ret) {
971             return ret;
972         }
973     }
974     return 0;
975 }
976 
977 /**
978  * Write data to the image file
979  */
980 static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb)
981 {
982     BDRVQEDState *s = acb_to_s(acb);
983     uint64_t offset = acb->cur_cluster +
984                       qed_offset_into_cluster(s, acb->cur_pos);
985     int ret;
986 
987     trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
988 
989     BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
990     ret = bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
991                           &acb->cur_qiov, 0);
992     if (ret < 0) {
993         return ret;
994     }
995 
996     if (acb->find_cluster_ret != QED_CLUSTER_FOUND) {
997         if (s->bs->backing) {
998             /*
999              * Flush new data clusters before updating the L2 table
1000              *
1001              * This flush is necessary when a backing file is in use.  A crash
1002              * during an allocating write could result in empty clusters in the
1003              * image.  If the write only touched a subregion of the cluster,
1004              * then backing image sectors have been lost in the untouched
1005              * region.  The solution is to flush after writing a new data
1006              * cluster and before updating the L2 table.
1007              */
1008             ret = bdrv_co_flush(s->bs->file->bs);
1009             if (ret < 0) {
1010                 return ret;
1011             }
1012         }
1013         ret = qed_aio_write_l2_update(acb, acb->cur_cluster);
1014         if (ret < 0) {
1015             return ret;
1016         }
1017     }
1018     return 0;
1019 }
1020 
1021 /**
1022  * Populate untouched regions of new data cluster
1023  */
1024 static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb)
1025 {
1026     BDRVQEDState *s = acb_to_s(acb);
1027     uint64_t start, len, offset;
1028     int ret;
1029 
1030     /* Populate front untouched region of new data cluster */
1031     start = qed_start_of_cluster(s, acb->cur_pos);
1032     len = qed_offset_into_cluster(s, acb->cur_pos);
1033 
1034     trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1035     ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
1036     if (ret < 0) {
1037         return ret;
1038     }
1039 
1040     /* Populate back untouched region of new data cluster */
1041     start = acb->cur_pos + acb->cur_qiov.size;
1042     len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1043     offset = acb->cur_cluster +
1044              qed_offset_into_cluster(s, acb->cur_pos) +
1045              acb->cur_qiov.size;
1046 
1047     trace_qed_aio_write_postfill(s, acb, start, len, offset);
1048     ret = qed_copy_from_backing_file(s, start, len, offset);
1049     if (ret < 0) {
1050         return ret;
1051     }
1052 
1053     return qed_aio_write_main(acb);
1054 }
1055 
1056 /**
1057  * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1058  */
1059 static bool qed_should_set_need_check(BDRVQEDState *s)
1060 {
1061     /* The flush before L2 update path ensures consistency */
1062     if (s->bs->backing) {
1063         return false;
1064     }
1065 
1066     return !(s->header.features & QED_F_NEED_CHECK);
1067 }
1068 
1069 /**
1070  * Write new data cluster
1071  *
1072  * @acb:        Write request
1073  * @len:        Length in bytes
1074  *
1075  * This path is taken when writing to previously unallocated clusters.
1076  */
1077 static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1078 {
1079     BDRVQEDState *s = acb_to_s(acb);
1080     int ret;
1081 
1082     /* Cancel timer when the first allocating request comes in */
1083     if (s->allocating_acb == NULL) {
1084         qed_cancel_need_check_timer(s);
1085     }
1086 
1087     /* Freeze this request if another allocating write is in progress */
1088     if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1089         if (s->allocating_acb != NULL) {
1090             qemu_co_queue_wait(&s->allocating_write_reqs, NULL);
1091             assert(s->allocating_acb == NULL);
1092         }
1093         s->allocating_acb = acb;
1094         return -EAGAIN; /* start over with looking up table entries */
1095     }
1096 
1097     acb->cur_nclusters = qed_bytes_to_clusters(s,
1098             qed_offset_into_cluster(s, acb->cur_pos) + len);
1099     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1100 
1101     if (acb->flags & QED_AIOCB_ZERO) {
1102         /* Skip ahead if the clusters are already zero */
1103         if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1104             return 0;
1105         }
1106     } else {
1107         acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1108     }
1109 
1110     if (qed_should_set_need_check(s)) {
1111         s->header.features |= QED_F_NEED_CHECK;
1112         ret = qed_write_header(s);
1113         if (ret < 0) {
1114             return ret;
1115         }
1116     }
1117 
1118     if (acb->flags & QED_AIOCB_ZERO) {
1119         ret = qed_aio_write_l2_update(acb, 1);
1120     } else {
1121         ret = qed_aio_write_cow(acb);
1122     }
1123     if (ret < 0) {
1124         return ret;
1125     }
1126     return 0;
1127 }
1128 
1129 /**
1130  * Write data cluster in place
1131  *
1132  * @acb:        Write request
1133  * @offset:     Cluster offset in bytes
1134  * @len:        Length in bytes
1135  *
1136  * This path is taken when writing to already allocated clusters.
1137  */
1138 static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset,
1139                                               size_t len)
1140 {
1141     /* Allocate buffer for zero writes */
1142     if (acb->flags & QED_AIOCB_ZERO) {
1143         struct iovec *iov = acb->qiov->iov;
1144 
1145         if (!iov->iov_base) {
1146             iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
1147             if (iov->iov_base == NULL) {
1148                 return -ENOMEM;
1149             }
1150             memset(iov->iov_base, 0, iov->iov_len);
1151         }
1152     }
1153 
1154     /* Calculate the I/O vector */
1155     acb->cur_cluster = offset;
1156     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1157 
1158     /* Do the actual write */
1159     return qed_aio_write_main(acb);
1160 }
1161 
1162 /**
1163  * Write data cluster
1164  *
1165  * @opaque:     Write request
1166  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1167  * @offset:     Cluster offset in bytes
1168  * @len:        Length in bytes
1169  */
1170 static int coroutine_fn qed_aio_write_data(void *opaque, int ret,
1171                                            uint64_t offset, size_t len)
1172 {
1173     QEDAIOCB *acb = opaque;
1174 
1175     trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1176 
1177     acb->find_cluster_ret = ret;
1178 
1179     switch (ret) {
1180     case QED_CLUSTER_FOUND:
1181         return qed_aio_write_inplace(acb, offset, len);
1182 
1183     case QED_CLUSTER_L2:
1184     case QED_CLUSTER_L1:
1185     case QED_CLUSTER_ZERO:
1186         return qed_aio_write_alloc(acb, len);
1187 
1188     default:
1189         g_assert_not_reached();
1190     }
1191 }
1192 
1193 /**
1194  * Read data cluster
1195  *
1196  * @opaque:     Read request
1197  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1198  * @offset:     Cluster offset in bytes
1199  * @len:        Length in bytes
1200  */
1201 static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
1202                                           uint64_t offset, size_t len)
1203 {
1204     QEDAIOCB *acb = opaque;
1205     BDRVQEDState *s = acb_to_s(acb);
1206     BlockDriverState *bs = acb->bs;
1207 
1208     /* Adjust offset into cluster */
1209     offset += qed_offset_into_cluster(s, acb->cur_pos);
1210 
1211     trace_qed_aio_read_data(s, acb, ret, offset, len);
1212 
1213     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1214 
1215     /* Handle zero cluster and backing file reads */
1216     if (ret == QED_CLUSTER_ZERO) {
1217         qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1218         return 0;
1219     } else if (ret != QED_CLUSTER_FOUND) {
1220         return qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1221                                      &acb->backing_qiov);
1222     }
1223 
1224     BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1225     ret = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1226                          &acb->cur_qiov, 0);
1227     if (ret < 0) {
1228         return ret;
1229     }
1230     return 0;
1231 }
1232 
1233 /**
1234  * Begin next I/O or complete the request
1235  */
1236 static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb)
1237 {
1238     BDRVQEDState *s = acb_to_s(acb);
1239     uint64_t offset;
1240     size_t len;
1241     int ret;
1242 
1243     while (1) {
1244         trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
1245 
1246         if (acb->backing_qiov) {
1247             qemu_iovec_destroy(acb->backing_qiov);
1248             g_free(acb->backing_qiov);
1249             acb->backing_qiov = NULL;
1250         }
1251 
1252         acb->qiov_offset += acb->cur_qiov.size;
1253         acb->cur_pos += acb->cur_qiov.size;
1254         qemu_iovec_reset(&acb->cur_qiov);
1255 
1256         /* Complete request */
1257         if (acb->cur_pos >= acb->end_pos) {
1258             ret = 0;
1259             break;
1260         }
1261 
1262         /* Find next cluster and start I/O */
1263         len = acb->end_pos - acb->cur_pos;
1264         ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1265         if (ret < 0) {
1266             break;
1267         }
1268 
1269         if (acb->flags & QED_AIOCB_WRITE) {
1270             ret = qed_aio_write_data(acb, ret, offset, len);
1271         } else {
1272             ret = qed_aio_read_data(acb, ret, offset, len);
1273         }
1274 
1275         if (ret < 0 && ret != -EAGAIN) {
1276             break;
1277         }
1278     }
1279 
1280     trace_qed_aio_complete(s, acb, ret);
1281     qed_aio_complete(acb);
1282     return ret;
1283 }
1284 
1285 static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num,
1286                                        QEMUIOVector *qiov, int nb_sectors,
1287                                        int flags)
1288 {
1289     QEDAIOCB acb = {
1290         .bs         = bs,
1291         .cur_pos    = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1292         .end_pos    = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1293         .qiov       = qiov,
1294         .flags      = flags,
1295     };
1296     qemu_iovec_init(&acb.cur_qiov, qiov->niov);
1297 
1298     trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
1299 
1300     /* Start request */
1301     return qed_aio_next_io(&acb);
1302 }
1303 
1304 static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs,
1305                                           int64_t sector_num, int nb_sectors,
1306                                           QEMUIOVector *qiov)
1307 {
1308     return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
1309 }
1310 
1311 static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
1312                                            int64_t sector_num, int nb_sectors,
1313                                            QEMUIOVector *qiov)
1314 {
1315     return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
1316 }
1317 
1318 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1319                                                   int64_t offset,
1320                                                   int bytes,
1321                                                   BdrvRequestFlags flags)
1322 {
1323     BDRVQEDState *s = bs->opaque;
1324     QEMUIOVector qiov;
1325     struct iovec iov;
1326 
1327     /* Fall back if the request is not aligned */
1328     if (qed_offset_into_cluster(s, offset) ||
1329         qed_offset_into_cluster(s, bytes)) {
1330         return -ENOTSUP;
1331     }
1332 
1333     /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
1334      * then it will be allocated during request processing.
1335      */
1336     iov.iov_base = NULL;
1337     iov.iov_len = bytes;
1338 
1339     qemu_iovec_init_external(&qiov, &iov, 1);
1340     return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1341                           bytes >> BDRV_SECTOR_BITS,
1342                           QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1343 }
1344 
1345 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset, Error **errp)
1346 {
1347     BDRVQEDState *s = bs->opaque;
1348     uint64_t old_image_size;
1349     int ret;
1350 
1351     if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1352                                  s->header.table_size)) {
1353         error_setg(errp, "Invalid image size specified");
1354         return -EINVAL;
1355     }
1356 
1357     if ((uint64_t)offset < s->header.image_size) {
1358         error_setg(errp, "Shrinking images is currently not supported");
1359         return -ENOTSUP;
1360     }
1361 
1362     old_image_size = s->header.image_size;
1363     s->header.image_size = offset;
1364     ret = qed_write_header_sync(s);
1365     if (ret < 0) {
1366         s->header.image_size = old_image_size;
1367         error_setg_errno(errp, -ret, "Failed to update the image size");
1368     }
1369     return ret;
1370 }
1371 
1372 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1373 {
1374     BDRVQEDState *s = bs->opaque;
1375     return s->header.image_size;
1376 }
1377 
1378 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1379 {
1380     BDRVQEDState *s = bs->opaque;
1381 
1382     memset(bdi, 0, sizeof(*bdi));
1383     bdi->cluster_size = s->header.cluster_size;
1384     bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1385     bdi->unallocated_blocks_are_zero = true;
1386     bdi->can_write_zeroes_with_unmap = true;
1387     return 0;
1388 }
1389 
1390 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1391                                         const char *backing_file,
1392                                         const char *backing_fmt)
1393 {
1394     BDRVQEDState *s = bs->opaque;
1395     QEDHeader new_header, le_header;
1396     void *buffer;
1397     size_t buffer_len, backing_file_len;
1398     int ret;
1399 
1400     /* Refuse to set backing filename if unknown compat feature bits are
1401      * active.  If the image uses an unknown compat feature then we may not
1402      * know the layout of data following the header structure and cannot safely
1403      * add a new string.
1404      */
1405     if (backing_file && (s->header.compat_features &
1406                          ~QED_COMPAT_FEATURE_MASK)) {
1407         return -ENOTSUP;
1408     }
1409 
1410     memcpy(&new_header, &s->header, sizeof(new_header));
1411 
1412     new_header.features &= ~(QED_F_BACKING_FILE |
1413                              QED_F_BACKING_FORMAT_NO_PROBE);
1414 
1415     /* Adjust feature flags */
1416     if (backing_file) {
1417         new_header.features |= QED_F_BACKING_FILE;
1418 
1419         if (qed_fmt_is_raw(backing_fmt)) {
1420             new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1421         }
1422     }
1423 
1424     /* Calculate new header size */
1425     backing_file_len = 0;
1426 
1427     if (backing_file) {
1428         backing_file_len = strlen(backing_file);
1429     }
1430 
1431     buffer_len = sizeof(new_header);
1432     new_header.backing_filename_offset = buffer_len;
1433     new_header.backing_filename_size = backing_file_len;
1434     buffer_len += backing_file_len;
1435 
1436     /* Make sure we can rewrite header without failing */
1437     if (buffer_len > new_header.header_size * new_header.cluster_size) {
1438         return -ENOSPC;
1439     }
1440 
1441     /* Prepare new header */
1442     buffer = g_malloc(buffer_len);
1443 
1444     qed_header_cpu_to_le(&new_header, &le_header);
1445     memcpy(buffer, &le_header, sizeof(le_header));
1446     buffer_len = sizeof(le_header);
1447 
1448     if (backing_file) {
1449         memcpy(buffer + buffer_len, backing_file, backing_file_len);
1450         buffer_len += backing_file_len;
1451     }
1452 
1453     /* Write new header */
1454     ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1455     g_free(buffer);
1456     if (ret == 0) {
1457         memcpy(&s->header, &new_header, sizeof(new_header));
1458     }
1459     return ret;
1460 }
1461 
1462 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1463 {
1464     BDRVQEDState *s = bs->opaque;
1465     Error *local_err = NULL;
1466     int ret;
1467 
1468     bdrv_qed_close(bs);
1469 
1470     memset(s, 0, sizeof(BDRVQEDState));
1471     ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
1472     if (local_err) {
1473         error_propagate(errp, local_err);
1474         error_prepend(errp, "Could not reopen qed layer: ");
1475         return;
1476     } else if (ret < 0) {
1477         error_setg_errno(errp, -ret, "Could not reopen qed layer");
1478         return;
1479     }
1480 }
1481 
1482 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1483                           BdrvCheckMode fix)
1484 {
1485     BDRVQEDState *s = bs->opaque;
1486 
1487     return qed_check(s, result, !!fix);
1488 }
1489 
1490 static QemuOptsList qed_create_opts = {
1491     .name = "qed-create-opts",
1492     .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1493     .desc = {
1494         {
1495             .name = BLOCK_OPT_SIZE,
1496             .type = QEMU_OPT_SIZE,
1497             .help = "Virtual disk size"
1498         },
1499         {
1500             .name = BLOCK_OPT_BACKING_FILE,
1501             .type = QEMU_OPT_STRING,
1502             .help = "File name of a base image"
1503         },
1504         {
1505             .name = BLOCK_OPT_BACKING_FMT,
1506             .type = QEMU_OPT_STRING,
1507             .help = "Image format of the base image"
1508         },
1509         {
1510             .name = BLOCK_OPT_CLUSTER_SIZE,
1511             .type = QEMU_OPT_SIZE,
1512             .help = "Cluster size (in bytes)",
1513             .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1514         },
1515         {
1516             .name = BLOCK_OPT_TABLE_SIZE,
1517             .type = QEMU_OPT_SIZE,
1518             .help = "L1/L2 table size (in clusters)"
1519         },
1520         { /* end of list */ }
1521     }
1522 };
1523 
1524 static BlockDriver bdrv_qed = {
1525     .format_name              = "qed",
1526     .instance_size            = sizeof(BDRVQEDState),
1527     .create_opts              = &qed_create_opts,
1528     .supports_backing         = true,
1529 
1530     .bdrv_probe               = bdrv_qed_probe,
1531     .bdrv_open                = bdrv_qed_open,
1532     .bdrv_close               = bdrv_qed_close,
1533     .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1534     .bdrv_child_perm          = bdrv_format_default_perms,
1535     .bdrv_create              = bdrv_qed_create,
1536     .bdrv_has_zero_init       = bdrv_has_zero_init_1,
1537     .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1538     .bdrv_co_readv            = bdrv_qed_co_readv,
1539     .bdrv_co_writev           = bdrv_qed_co_writev,
1540     .bdrv_co_pwrite_zeroes    = bdrv_qed_co_pwrite_zeroes,
1541     .bdrv_truncate            = bdrv_qed_truncate,
1542     .bdrv_getlength           = bdrv_qed_getlength,
1543     .bdrv_get_info            = bdrv_qed_get_info,
1544     .bdrv_refresh_limits      = bdrv_qed_refresh_limits,
1545     .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1546     .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1547     .bdrv_check               = bdrv_qed_check,
1548     .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
1549     .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
1550     .bdrv_drain               = bdrv_qed_drain,
1551 };
1552 
1553 static void bdrv_qed_init(void)
1554 {
1555     bdrv_register(&bdrv_qed);
1556 }
1557 
1558 block_init(bdrv_qed_init);
1559