xref: /qemu/tests/qemu-iotests/124 (revision 6f0dd6c5)
1#!/usr/bin/env python
2#
3# Tests for incremental drive-backup
4#
5# Copyright (C) 2015 John Snow for Red Hat, Inc.
6#
7# Based on 056.
8#
9# This program is free software; you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation; either version 2 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program.  If not, see <http://www.gnu.org/licenses/>.
21#
22
23import os
24import iotests
25
26
27def io_write_patterns(img, patterns):
28    for pattern in patterns:
29        iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
30
31
32def try_remove(img):
33    try:
34        os.remove(img)
35    except OSError:
36        pass
37
38
39def transaction_action(action, **kwargs):
40    return {
41        'type': action,
42        'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
43    }
44
45
46def transaction_bitmap_clear(node, name, **kwargs):
47    return transaction_action('block-dirty-bitmap-clear',
48                              node=node, name=name, **kwargs)
49
50
51def transaction_drive_backup(device, target, **kwargs):
52    return transaction_action('drive-backup', job_id=device, device=device,
53                              target=target, **kwargs)
54
55
56class Bitmap:
57    def __init__(self, name, drive):
58        self.name = name
59        self.drive = drive
60        self.num = 0
61        self.backups = list()
62
63    def base_target(self):
64        return (self.drive['backup'], None)
65
66    def new_target(self, num=None):
67        if num is None:
68            num = self.num
69        self.num = num + 1
70        base = os.path.join(iotests.test_dir,
71                            "%s.%s." % (self.drive['id'], self.name))
72        suff = "%i.%s" % (num, self.drive['fmt'])
73        target = base + "inc" + suff
74        reference = base + "ref" + suff
75        self.backups.append((target, reference))
76        return (target, reference)
77
78    def last_target(self):
79        if self.backups:
80            return self.backups[-1]
81        return self.base_target()
82
83    def del_target(self):
84        for image in self.backups.pop():
85            try_remove(image)
86        self.num -= 1
87
88    def cleanup(self):
89        for backup in self.backups:
90            for image in backup:
91                try_remove(image)
92
93
94class TestIncrementalBackupBase(iotests.QMPTestCase):
95    def __init__(self, *args):
96        super(TestIncrementalBackupBase, self).__init__(*args)
97        self.bitmaps = list()
98        self.files = list()
99        self.drives = list()
100        self.vm = iotests.VM()
101        self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
102
103
104    def setUp(self):
105        # Create a base image with a distinctive patterning
106        drive0 = self.add_node('drive0')
107        self.img_create(drive0['file'], drive0['fmt'])
108        self.vm.add_drive(drive0['file'])
109        self.write_default_pattern(drive0['file'])
110        self.vm.launch()
111
112
113    def write_default_pattern(self, target):
114        io_write_patterns(target, (('0x41', 0, 512),
115                                   ('0xd5', '1M', '32k'),
116                                   ('0xdc', '32M', '124k')))
117
118
119    def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
120        if path is None:
121            path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
122        if backup is None:
123            backup = os.path.join(iotests.test_dir,
124                                  '%s.full.backup.%s' % (node_id, fmt))
125
126        self.drives.append({
127            'id': node_id,
128            'file': path,
129            'backup': backup,
130            'fmt': fmt })
131        return self.drives[-1]
132
133
134    def img_create(self, img, fmt=iotests.imgfmt, size='64M',
135                   parent=None, parentFormat=None, **kwargs):
136        optargs = []
137        for k,v in kwargs.items():
138            optargs = optargs + ['-o', '%s=%s' % (k,v)]
139        args = ['create', '-f', fmt] + optargs + [img, size]
140        if parent:
141            if parentFormat is None:
142                parentFormat = fmt
143            args = args + ['-b', parent, '-F', parentFormat]
144        iotests.qemu_img(*args)
145        self.files.append(img)
146
147
148    def do_qmp_backup(self, error='Input/output error', **kwargs):
149        res = self.vm.qmp('drive-backup', **kwargs)
150        self.assert_qmp(res, 'return', {})
151        return self.wait_qmp_backup(kwargs['device'], error)
152
153
154    def ignore_job_status_change_events(self):
155        while True:
156            e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
157            if e['data']['status'] == 'null':
158                break
159
160    def wait_qmp_backup(self, device, error='Input/output error'):
161        event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
162                                   match={'data': {'device': device}})
163        self.assertNotEqual(event, None)
164        self.ignore_job_status_change_events()
165
166        try:
167            failure = self.dictpath(event, 'data/error')
168        except AssertionError:
169            # Backup succeeded.
170            self.assert_qmp(event, 'data/offset', event['data']['len'])
171            return True
172        else:
173            # Backup failed.
174            self.assert_qmp(event, 'data/error', error)
175            return False
176
177
178    def wait_qmp_backup_cancelled(self, device):
179        event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
180                                   match={'data': {'device': device}})
181        self.assertNotEqual(event, None)
182        self.ignore_job_status_change_events()
183
184
185    def create_anchor_backup(self, drive=None):
186        if drive is None:
187            drive = self.drives[-1]
188        res = self.do_qmp_backup(job_id=drive['id'],
189                                 device=drive['id'], sync='full',
190                                 format=drive['fmt'], target=drive['backup'])
191        self.assertTrue(res)
192        self.files.append(drive['backup'])
193        return drive['backup']
194
195
196    def make_reference_backup(self, bitmap=None):
197        if bitmap is None:
198            bitmap = self.bitmaps[-1]
199        _, reference = bitmap.last_target()
200        res = self.do_qmp_backup(job_id=bitmap.drive['id'],
201                                 device=bitmap.drive['id'], sync='full',
202                                 format=bitmap.drive['fmt'], target=reference)
203        self.assertTrue(res)
204
205
206    def add_bitmap(self, name, drive, **kwargs):
207        bitmap = Bitmap(name, drive)
208        self.bitmaps.append(bitmap)
209        result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
210                             name=bitmap.name, **kwargs)
211        self.assert_qmp(result, 'return', {})
212        return bitmap
213
214
215    def prepare_backup(self, bitmap=None, parent=None, **kwargs):
216        if bitmap is None:
217            bitmap = self.bitmaps[-1]
218        if parent is None:
219            parent, _ = bitmap.last_target()
220
221        target, _ = bitmap.new_target()
222        self.img_create(target, bitmap.drive['fmt'], parent=parent,
223                        **kwargs)
224        return target
225
226
227    def create_incremental(self, bitmap=None, parent=None,
228                           parentFormat=None, validate=True,
229                           target=None):
230        if bitmap is None:
231            bitmap = self.bitmaps[-1]
232        if parent is None:
233            parent, _ = bitmap.last_target()
234
235        if target is None:
236            target = self.prepare_backup(bitmap, parent)
237        res = self.do_qmp_backup(job_id=bitmap.drive['id'],
238                                 device=bitmap.drive['id'],
239                                 sync='incremental', bitmap=bitmap.name,
240                                 format=bitmap.drive['fmt'], target=target,
241                                 mode='existing')
242        if not res:
243            bitmap.del_target();
244            self.assertFalse(validate)
245        else:
246            self.make_reference_backup(bitmap)
247        return res
248
249
250    def check_backups(self):
251        for bitmap in self.bitmaps:
252            for incremental, reference in bitmap.backups:
253                self.assertTrue(iotests.compare_images(incremental, reference))
254            last = bitmap.last_target()[0]
255            self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
256
257
258    def hmp_io_writes(self, drive, patterns):
259        for pattern in patterns:
260            self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
261        self.vm.hmp_qemu_io(drive, 'flush')
262
263
264    def do_incremental_simple(self, **kwargs):
265        self.create_anchor_backup()
266        self.add_bitmap('bitmap0', self.drives[0], **kwargs)
267
268        # Sanity: Create a "hollow" incremental backup
269        self.create_incremental()
270        # Three writes: One complete overwrite, one new segment,
271        # and one partial overlap.
272        self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
273                                                  ('0xfe', '16M', '256k'),
274                                                  ('0x64', '32736k', '64k')))
275        self.create_incremental()
276        # Three more writes, one of each kind, like above
277        self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
278                                                  ('0x55', '8M', '352k'),
279                                                  ('0x78', '15872k', '1M')))
280        self.create_incremental()
281        self.vm.shutdown()
282        self.check_backups()
283
284
285    def tearDown(self):
286        self.vm.shutdown()
287        for bitmap in self.bitmaps:
288            bitmap.cleanup()
289        for filename in self.files:
290            try_remove(filename)
291
292
293
294class TestIncrementalBackup(TestIncrementalBackupBase):
295    def test_incremental_simple(self):
296        '''
297        Test: Create and verify three incremental backups.
298
299        Create a bitmap and a full backup before VM execution begins,
300        then create a series of three incremental backups "during execution,"
301        i.e.; after IO requests begin modifying the drive.
302        '''
303        return self.do_incremental_simple()
304
305
306    def test_small_granularity(self):
307        '''
308        Test: Create and verify backups made with a small granularity bitmap.
309
310        Perform the same test as test_incremental_simple, but with a granularity
311        of only 32KiB instead of the present default of 64KiB.
312        '''
313        return self.do_incremental_simple(granularity=32768)
314
315
316    def test_large_granularity(self):
317        '''
318        Test: Create and verify backups made with a large granularity bitmap.
319
320        Perform the same test as test_incremental_simple, but with a granularity
321        of 128KiB instead of the present default of 64KiB.
322        '''
323        return self.do_incremental_simple(granularity=131072)
324
325
326    def test_larger_cluster_target(self):
327        '''
328        Test: Create and verify backups made to a larger cluster size target.
329
330        With a default granularity of 64KiB, verify that backups made to a
331        larger cluster size target of 128KiB without a backing file works.
332        '''
333        drive0 = self.drives[0]
334
335        # Create a cluster_size=128k full backup / "anchor" backup
336        self.img_create(drive0['backup'], cluster_size='128k')
337        self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
338                                           format=drive0['fmt'],
339                                           target=drive0['backup'],
340                                           mode='existing'))
341
342        # Create bitmap and dirty it with some new writes.
343        # overwrite [32736, 32799] which will dirty bitmap clusters at
344        # 32M-64K and 32M. 32M+64K will be left undirtied.
345        bitmap0 = self.add_bitmap('bitmap0', drive0)
346        self.hmp_io_writes(drive0['id'],
347                           (('0xab', 0, 512),
348                            ('0xfe', '16M', '256k'),
349                            ('0x64', '32736k', '64k')))
350        # Check the dirty bitmap stats
351        result = self.vm.qmp('query-block')
352        self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/name', 'bitmap0')
353        self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/count', 458752)
354        self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/granularity', 65536)
355        self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/status', 'active')
356        self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/persistent', False)
357
358        # Prepare a cluster_size=128k backup target without a backing file.
359        (target, _) = bitmap0.new_target()
360        self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
361
362        # Perform Incremental Backup
363        self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
364                                           sync='incremental',
365                                           bitmap=bitmap0.name,
366                                           format=bitmap0.drive['fmt'],
367                                           target=target,
368                                           mode='existing'))
369        self.make_reference_backup(bitmap0)
370
371        # Add the backing file, then compare and exit.
372        iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
373                         drive0['backup'], '-F', drive0['fmt'], target)
374        self.vm.shutdown()
375        self.check_backups()
376
377
378    def test_incremental_transaction(self):
379        '''Test: Verify backups made from transactionally created bitmaps.
380
381        Create a bitmap "before" VM execution begins, then create a second
382        bitmap AFTER writes have already occurred. Use transactions to create
383        a full backup and synchronize both bitmaps to this backup.
384        Create an incremental backup through both bitmaps and verify that
385        both backups match the current drive0 image.
386        '''
387
388        drive0 = self.drives[0]
389        bitmap0 = self.add_bitmap('bitmap0', drive0)
390        self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
391                                          ('0xfe', '16M', '256k'),
392                                          ('0x64', '32736k', '64k')))
393        bitmap1 = self.add_bitmap('bitmap1', drive0)
394
395        result = self.vm.qmp('transaction', actions=[
396            transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
397            transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
398            transaction_drive_backup(drive0['id'], drive0['backup'],
399                                     sync='full', format=drive0['fmt'])
400        ])
401        self.assert_qmp(result, 'return', {})
402        self.wait_until_completed(drive0['id'])
403        self.files.append(drive0['backup'])
404
405        self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
406                                          ('0x55', '8M', '352k'),
407                                          ('0x78', '15872k', '1M')))
408        # Both bitmaps should be correctly in sync.
409        self.create_incremental(bitmap0)
410        self.create_incremental(bitmap1)
411        self.vm.shutdown()
412        self.check_backups()
413
414
415    def do_transaction_failure_test(self, race=False):
416        # Create a second drive, with pattern:
417        drive1 = self.add_node('drive1')
418        self.img_create(drive1['file'], drive1['fmt'])
419        io_write_patterns(drive1['file'], (('0x14', 0, 512),
420                                           ('0x5d', '1M', '32k'),
421                                           ('0xcd', '32M', '124k')))
422
423        # Create a blkdebug interface to this img as 'drive1'
424        result = self.vm.qmp('blockdev-add',
425            node_name=drive1['id'],
426            driver=drive1['fmt'],
427            file={
428                'driver': 'blkdebug',
429                'image': {
430                    'driver': 'file',
431                    'filename': drive1['file']
432                },
433                'set-state': [{
434                    'event': 'flush_to_disk',
435                    'state': 1,
436                    'new_state': 2
437                }],
438                'inject-error': [{
439                    'event': 'read_aio',
440                    'errno': 5,
441                    'state': 2,
442                    'immediately': False,
443                    'once': True
444                }],
445            }
446        )
447        self.assert_qmp(result, 'return', {})
448
449        # Create bitmaps and full backups for both drives
450        drive0 = self.drives[0]
451        dr0bm0 = self.add_bitmap('bitmap0', drive0)
452        dr1bm0 = self.add_bitmap('bitmap0', drive1)
453        self.create_anchor_backup(drive0)
454        self.create_anchor_backup(drive1)
455        self.assert_no_active_block_jobs()
456        self.assertFalse(self.vm.get_qmp_events(wait=False))
457
458        # Emulate some writes
459        if not race:
460            self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
461                                              ('0xfe', '16M', '256k'),
462                                              ('0x64', '32736k', '64k')))
463        self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
464                                          ('0xef', '16M', '256k'),
465                                          ('0x46', '32736k', '64k')))
466
467        # Create incremental backup targets
468        target0 = self.prepare_backup(dr0bm0)
469        target1 = self.prepare_backup(dr1bm0)
470
471        # Ask for a new incremental backup per-each drive,
472        # expecting drive1's backup to fail. In the 'race' test,
473        # we expect drive1 to attempt to cancel the empty drive0 job.
474        transaction = [
475            transaction_drive_backup(drive0['id'], target0, sync='incremental',
476                                     format=drive0['fmt'], mode='existing',
477                                     bitmap=dr0bm0.name),
478            transaction_drive_backup(drive1['id'], target1, sync='incremental',
479                                     format=drive1['fmt'], mode='existing',
480                                     bitmap=dr1bm0.name)
481        ]
482        result = self.vm.qmp('transaction', actions=transaction,
483                             properties={'completion-mode': 'grouped'} )
484        self.assert_qmp(result, 'return', {})
485
486        # Observe that drive0's backup is cancelled and drive1 completes with
487        # an error.
488        self.wait_qmp_backup_cancelled(drive0['id'])
489        self.assertFalse(self.wait_qmp_backup(drive1['id']))
490        error = self.vm.event_wait('BLOCK_JOB_ERROR')
491        self.assert_qmp(error, 'data', {'device': drive1['id'],
492                                        'action': 'report',
493                                        'operation': 'read'})
494        self.assertFalse(self.vm.get_qmp_events(wait=False))
495        self.assert_no_active_block_jobs()
496
497        # Delete drive0's successful target and eliminate our record of the
498        # unsuccessful drive1 target.
499        dr0bm0.del_target()
500        dr1bm0.del_target()
501        if race:
502            # Don't re-run the transaction, we only wanted to test the race.
503            self.vm.shutdown()
504            return
505
506        # Re-run the same transaction:
507        target0 = self.prepare_backup(dr0bm0)
508        target1 = self.prepare_backup(dr1bm0)
509
510        # Re-run the exact same transaction.
511        result = self.vm.qmp('transaction', actions=transaction,
512                             properties={'completion-mode':'grouped'})
513        self.assert_qmp(result, 'return', {})
514
515        # Both should complete successfully this time.
516        self.assertTrue(self.wait_qmp_backup(drive0['id']))
517        self.assertTrue(self.wait_qmp_backup(drive1['id']))
518        self.make_reference_backup(dr0bm0)
519        self.make_reference_backup(dr1bm0)
520        self.assertFalse(self.vm.get_qmp_events(wait=False))
521        self.assert_no_active_block_jobs()
522
523        # And the images should of course validate.
524        self.vm.shutdown()
525        self.check_backups()
526
527    def test_transaction_failure(self):
528        '''Test: Verify backups made from a transaction that partially fails.
529
530        Add a second drive with its own unique pattern, and add a bitmap to each
531        drive. Use blkdebug to interfere with the backup on just one drive and
532        attempt to create a coherent incremental backup across both drives.
533
534        verify a failure in one but not both, then delete the failed stubs and
535        re-run the same transaction.
536
537        verify that both incrementals are created successfully.
538        '''
539        self.do_transaction_failure_test()
540
541    def test_transaction_failure_race(self):
542        '''Test: Verify that transactions with jobs that have no data to
543        transfer do not cause race conditions in the cancellation of the entire
544        transaction job group.
545        '''
546        self.do_transaction_failure_test(race=True)
547
548
549    def test_sync_dirty_bitmap_missing(self):
550        self.assert_no_active_block_jobs()
551        self.files.append(self.err_img)
552        result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
553                             sync='incremental', format=self.drives[0]['fmt'],
554                             target=self.err_img)
555        self.assert_qmp(result, 'error/class', 'GenericError')
556
557
558    def test_sync_dirty_bitmap_not_found(self):
559        self.assert_no_active_block_jobs()
560        self.files.append(self.err_img)
561        result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
562                             sync='incremental', bitmap='unknown',
563                             format=self.drives[0]['fmt'], target=self.err_img)
564        self.assert_qmp(result, 'error/class', 'GenericError')
565
566
567    def test_sync_dirty_bitmap_bad_granularity(self):
568        '''
569        Test: Test what happens if we provide an improper granularity.
570
571        The granularity must always be a power of 2.
572        '''
573        self.assert_no_active_block_jobs()
574        self.assertRaises(AssertionError, self.add_bitmap,
575                          'bitmap0', self.drives[0],
576                          granularity=64000)
577
578    def test_growing_before_backup(self):
579        '''
580        Test: Add a bitmap, truncate the image, write past the old
581              end, do a backup.
582
583        Incremental backup should not ignore dirty bits past the old
584        image end.
585        '''
586        self.assert_no_active_block_jobs()
587
588        self.create_anchor_backup()
589
590        self.add_bitmap('bitmap0', self.drives[0])
591
592        res = self.vm.qmp('block_resize', device=self.drives[0]['id'],
593                          size=(65 * 1048576))
594        self.assert_qmp(res, 'return', {})
595
596        # Dirty the image past the old end
597        self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
598
599        target = self.prepare_backup(size='65M')
600        self.create_incremental(target=target)
601
602        self.vm.shutdown()
603        self.check_backups()
604
605
606class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
607    '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
608
609    def setUp(self):
610        drive0 = self.add_node('drive0')
611        self.img_create(drive0['file'], drive0['fmt'])
612        self.write_default_pattern(drive0['file'])
613        self.vm.launch()
614
615    def test_incremental_failure(self):
616        '''Test: Verify backups made after a failure are correct.
617
618        Simulate a failure during an incremental backup block job,
619        emulate additional writes, then create another incremental backup
620        afterwards and verify that the backup created is correct.
621        '''
622
623        drive0 = self.drives[0]
624        result = self.vm.qmp('blockdev-add',
625            node_name=drive0['id'],
626            driver=drive0['fmt'],
627            file={
628                'driver': 'blkdebug',
629                'image': {
630                    'driver': 'file',
631                    'filename': drive0['file']
632                },
633                'set-state': [{
634                    'event': 'flush_to_disk',
635                    'state': 1,
636                    'new_state': 2
637                }],
638                'inject-error': [{
639                    'event': 'read_aio',
640                    'errno': 5,
641                    'state': 2,
642                    'immediately': False,
643                    'once': True
644                }],
645            }
646        )
647        self.assert_qmp(result, 'return', {})
648
649        self.create_anchor_backup(drive0)
650        self.add_bitmap('bitmap0', drive0)
651        # Note: at this point, during a normal execution,
652        # Assume that the VM resumes and begins issuing IO requests here.
653
654        self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
655                                          ('0xfe', '16M', '256k'),
656                                          ('0x64', '32736k', '64k')))
657
658        result = self.create_incremental(validate=False)
659        self.assertFalse(result)
660        self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
661                                          ('0x55', '8M', '352k'),
662                                          ('0x78', '15872k', '1M')))
663        self.create_incremental()
664        self.vm.shutdown()
665        self.check_backups()
666
667    def test_incremental_pause(self):
668        """
669        Test an incremental backup that errors into a pause and is resumed.
670        """
671
672        drive0 = self.drives[0]
673        # NB: The blkdebug script here looks for a "flush, read, read" pattern.
674        # The flush occurs in hmp_io_writes, the first read in device_add, and
675        # the last read during the block job.
676        result = self.vm.qmp('blockdev-add',
677                             node_name=drive0['id'],
678                             driver=drive0['fmt'],
679                             file={
680                                 'driver': 'blkdebug',
681                                 'image': {
682                                     'driver': 'file',
683                                     'filename': drive0['file']
684                                 },
685                                 'set-state': [{
686                                     'event': 'flush_to_disk',
687                                     'state': 1,
688                                     'new_state': 2
689                                 },{
690                                     'event': 'read_aio',
691                                     'state': 2,
692                                     'new_state': 3
693                                 }],
694                                 'inject-error': [{
695                                     'event': 'read_aio',
696                                     'errno': 5,
697                                     'state': 3,
698                                     'immediately': False,
699                                     'once': True
700                                 }],
701                             })
702        self.assert_qmp(result, 'return', {})
703        self.create_anchor_backup(drive0)
704        bitmap = self.add_bitmap('bitmap0', drive0)
705
706        # Emulate guest activity
707        self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
708                                          ('0xfe', '16M', '256k'),
709                                          ('0x64', '32736k', '64k')))
710
711        # For the purposes of query-block visibility of bitmaps, add a drive
712        # frontend after we've written data; otherwise we can't use hmp-io
713        result = self.vm.qmp("device_add",
714                             id="device0",
715                             drive=drive0['id'],
716                             driver="virtio-blk")
717        self.assert_qmp(result, 'return', {})
718
719        # Bitmap Status Check
720        query = self.vm.qmp('query-block')
721        ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
722               if bmap.get('name') == bitmap.name][0]
723        self.assert_qmp(ret, 'count', 458752)
724        self.assert_qmp(ret, 'granularity', 65536)
725        self.assert_qmp(ret, 'status', 'active')
726        self.assert_qmp(ret, 'busy', False)
727        self.assert_qmp(ret, 'recording', True)
728
729        # Start backup
730        parent, _ = bitmap.last_target()
731        target = self.prepare_backup(bitmap, parent)
732        res = self.vm.qmp('drive-backup',
733                          job_id=bitmap.drive['id'],
734                          device=bitmap.drive['id'],
735                          sync='incremental',
736                          bitmap=bitmap.name,
737                          format=bitmap.drive['fmt'],
738                          target=target,
739                          mode='existing',
740                          on_source_error='stop')
741        self.assert_qmp(res, 'return', {})
742
743        # Wait for the error
744        event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
745                                   match={"data":{"device":bitmap.drive['id']}})
746        self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
747                                        'action': 'stop',
748                                        'operation': 'read'})
749
750        # Bitmap Status Check
751        query = self.vm.qmp('query-block')
752        ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
753               if bmap.get('name') == bitmap.name][0]
754        self.assert_qmp(ret, 'count', 458752)
755        self.assert_qmp(ret, 'granularity', 65536)
756        self.assert_qmp(ret, 'status', 'frozen')
757        self.assert_qmp(ret, 'busy', True)
758        self.assert_qmp(ret, 'recording', True)
759
760        # Resume and check incremental backup for consistency
761        res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
762        self.assert_qmp(res, 'return', {})
763        self.wait_qmp_backup(bitmap.drive['id'])
764
765        # Bitmap Status Check
766        query = self.vm.qmp('query-block')
767        ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
768               if bmap.get('name') == bitmap.name][0]
769        self.assert_qmp(ret, 'count', 0)
770        self.assert_qmp(ret, 'granularity', 65536)
771        self.assert_qmp(ret, 'status', 'active')
772        self.assert_qmp(ret, 'busy', False)
773        self.assert_qmp(ret, 'recording', True)
774
775        # Finalize / Cleanup
776        self.make_reference_backup(bitmap)
777        self.vm.shutdown()
778        self.check_backups()
779
780
781if __name__ == '__main__':
782    iotests.main(supported_fmts=['qcow2'])
783