xref: /qemu/qapi/migration.json (revision fa3673e4)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @skipped: number of skipped zero pages. Always zero, only provided for
27#     compatibility (since 1.5)
28#
29# @normal: number of normal pages (since 1.2)
30#
31# @normal-bytes: number of normal bytes sent (since 1.2)
32#
33# @dirty-pages-rate: number of pages dirtied by second by the guest
34#     (since 1.3)
35#
36# @mbps: throughput in megabits/sec.  (since 1.6)
37#
38# @dirty-sync-count: number of times that dirty ram was synchronized
39#     (since 2.1)
40#
41# @postcopy-requests: The number of page requests received from the
42#     destination (since 2.7)
43#
44# @page-size: The number of bytes per page for the various page-based
45#     statistics (since 2.10)
46#
47# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
48#
49# @pages-per-second: the number of memory pages transferred per second
50#     (Since 4.0)
51#
52# @precopy-bytes: The number of bytes sent in the pre-copy phase
53#     (since 7.0).
54#
55# @downtime-bytes: The number of bytes sent while the guest is paused
56#     (since 7.0).
57#
58# @postcopy-bytes: The number of bytes sent during the post-copy phase
59#     (since 7.0).
60#
61# @dirty-sync-missed-zero-copy: Number of times dirty RAM
62#     synchronization could not avoid copying dirty pages.  This is
63#     between 0 and @dirty-sync-count * @multifd-channels.  (since
64#     7.1)
65#
66# Features:
67#
68# @deprecated: Member @skipped is always zero since 1.5.3
69#
70# Since: 0.14
71#
72##
73{ 'struct': 'MigrationStats',
74  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
75           'duplicate': 'int',
76           'skipped': { 'type': 'int', 'features': [ 'deprecated' ] },
77           'normal': 'int',
78           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
79           'mbps': 'number', 'dirty-sync-count': 'int',
80           'postcopy-requests': 'int', 'page-size': 'int',
81           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
82           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
83           'postcopy-bytes': 'uint64',
84           'dirty-sync-missed-zero-copy': 'uint64' } }
85
86##
87# @XBZRLECacheStats:
88#
89# Detailed XBZRLE migration cache statistics
90#
91# @cache-size: XBZRLE cache size
92#
93# @bytes: amount of bytes already transferred to the target VM
94#
95# @pages: amount of pages transferred to the target VM
96#
97# @cache-miss: number of cache miss
98#
99# @cache-miss-rate: rate of cache miss (since 2.1)
100#
101# @encoding-rate: rate of encoded bytes (since 5.1)
102#
103# @overflow: number of overflows
104#
105# Since: 1.2
106##
107{ 'struct': 'XBZRLECacheStats',
108  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
109           'cache-miss': 'int', 'cache-miss-rate': 'number',
110           'encoding-rate': 'number', 'overflow': 'int' } }
111
112##
113# @CompressionStats:
114#
115# Detailed migration compression statistics
116#
117# @pages: amount of pages compressed and transferred to the target VM
118#
119# @busy: count of times that no free thread was available to compress
120#     data
121#
122# @busy-rate: rate of thread busy
123#
124# @compressed-size: amount of bytes after compression
125#
126# @compression-rate: rate of compressed size
127#
128# Since: 3.1
129##
130{ 'struct': 'CompressionStats',
131  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
132           'compressed-size': 'int', 'compression-rate': 'number' } }
133
134##
135# @MigrationStatus:
136#
137# An enumeration of migration status.
138#
139# @none: no migration has ever happened.
140#
141# @setup: migration process has been initiated.
142#
143# @cancelling: in the process of cancelling migration.
144#
145# @cancelled: cancelling migration is finished.
146#
147# @active: in the process of doing migration.
148#
149# @postcopy-active: like active, but now in postcopy mode.  (since
150#     2.5)
151#
152# @postcopy-paused: during postcopy but paused.  (since 3.0)
153#
154# @postcopy-recover: trying to recover from a paused postcopy.  (since
155#     3.0)
156#
157# @completed: migration is finished.
158#
159# @failed: some error occurred during migration process.
160#
161# @colo: VM is in the process of fault tolerance, VM can not get into
162#     this state unless colo capability is enabled for migration.
163#     (since 2.8)
164#
165# @pre-switchover: Paused before device serialisation.  (since 2.11)
166#
167# @device: During device serialisation when pause-before-switchover is
168#     enabled (since 2.11)
169#
170# @wait-unplug: wait for device unplug request by guest OS to be
171#     completed.  (since 4.2)
172#
173# Since: 2.3
174##
175{ 'enum': 'MigrationStatus',
176  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
177            'active', 'postcopy-active', 'postcopy-paused',
178            'postcopy-recover', 'completed', 'failed', 'colo',
179            'pre-switchover', 'device', 'wait-unplug' ] }
180##
181# @VfioStats:
182#
183# Detailed VFIO devices migration statistics
184#
185# @transferred: amount of bytes transferred to the target VM by VFIO
186#     devices
187#
188# Since: 5.2
189##
190{ 'struct': 'VfioStats',
191  'data': {'transferred': 'int' } }
192
193##
194# @MigrationInfo:
195#
196# Information about current migration process.
197#
198# @status: @MigrationStatus describing the current migration status.
199#     If this field is not returned, no migration process has been
200#     initiated
201#
202# @ram: @MigrationStats containing detailed migration status, only
203#     returned if status is 'active' or 'completed'(since 1.2)
204#
205# @disk: @MigrationStats containing detailed disk migration status,
206#     only returned if status is 'active' and it is a block migration
207#
208# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
209#     migration statistics, only returned if XBZRLE feature is on and
210#     status is 'active' or 'completed' (since 1.2)
211#
212# @total-time: total amount of milliseconds since migration started.
213#     If migration has ended, it returns the total migration time.
214#     (since 1.2)
215#
216# @downtime: only present when migration finishes correctly total
217#     downtime in milliseconds for the guest.  (since 1.3)
218#
219# @expected-downtime: only present while migration is active expected
220#     downtime in milliseconds for the guest in last walk of the dirty
221#     bitmap.  (since 1.3)
222#
223# @setup-time: amount of setup time in milliseconds *before* the
224#     iterations begin but *after* the QMP command is issued.  This is
225#     designed to provide an accounting of any activities (such as
226#     RDMA pinning) which may be expensive, but do not actually occur
227#     during the iterative migration rounds themselves.  (since 1.6)
228#
229# @cpu-throttle-percentage: percentage of time guest cpus are being
230#     throttled during auto-converge.  This is only present when
231#     auto-converge has started throttling guest cpus.  (Since 2.7)
232#
233# @error-desc: the human readable error description string. Clients
234#     should not attempt to parse the error strings.  (Since 2.7)
235#
236# @postcopy-blocktime: total time when all vCPU were blocked during
237#     postcopy live migration.  This is only present when the
238#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
239#
240# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
241#     This is only present when the postcopy-blocktime migration
242#     capability is enabled.  (Since 3.0)
243#
244# @compression: migration compression statistics, only returned if
245#     compression feature is on and status is 'active' or 'completed'
246#     (Since 3.1)
247#
248# @socket-address: Only used for tcp, to know what the real port is
249#     (Since 4.0)
250#
251# @vfio: @VfioStats containing detailed VFIO devices migration
252#     statistics, only returned if VFIO device is present, migration
253#     is supported by all VFIO devices and status is 'active' or
254#     'completed' (since 5.2)
255#
256# @blocked-reasons: A list of reasons an outgoing migration is
257#     blocked.  Present and non-empty when migration is blocked.
258#     (since 6.0)
259#
260# @dirty-limit-throttle-time-per-round: Maximum throttle time
261#     (in microseconds) of virtual CPUs each dirty ring full round,
262#     which shows how MigrationCapability dirty-limit affects the
263#     guest during live migration.  (Since 8.1)
264#
265# @dirty-limit-ring-full-time: Estimated average dirty ring full time
266#     (in microseconds) for each dirty ring full round.  The value
267#     equals the dirty ring memory size divided by the average dirty
268#     page rate of the virtual CPU, which can be used to observe the
269#     average memory load of the virtual CPU indirectly.  Note that
270#     zero means guest doesn't dirty memory.  (Since 8.1)
271#
272# Features:
273#
274# @deprecated: Member @disk is deprecated because block migration is.
275#     Member @compression is deprecated because it is unreliable and
276#     untested.  It is recommended to use multifd migration, which
277#     offers an alternative compression implementation that is
278#     reliable and tested.
279#
280# Since: 0.14
281##
282{ 'struct': 'MigrationInfo',
283  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
284           '*disk': { 'type': 'MigrationStats', 'features': [ 'deprecated' ] },
285           '*vfio': 'VfioStats',
286           '*xbzrle-cache': 'XBZRLECacheStats',
287           '*total-time': 'int',
288           '*expected-downtime': 'int',
289           '*downtime': 'int',
290           '*setup-time': 'int',
291           '*cpu-throttle-percentage': 'int',
292           '*error-desc': 'str',
293           '*blocked-reasons': ['str'],
294           '*postcopy-blocktime': 'uint32',
295           '*postcopy-vcpu-blocktime': ['uint32'],
296           '*compression': { 'type': 'CompressionStats', 'features': [ 'deprecated' ] },
297           '*socket-address': ['SocketAddress'],
298           '*dirty-limit-throttle-time-per-round': 'uint64',
299           '*dirty-limit-ring-full-time': 'uint64'} }
300
301##
302# @query-migrate:
303#
304# Returns information about current migration process.  If migration
305# is active there will be another json-object with RAM migration
306# status and if block migration is active another one with block
307# migration status.
308#
309# Returns: @MigrationInfo
310#
311# Since: 0.14
312#
313# Examples:
314#
315# 1. Before the first migration
316#
317# -> { "execute": "query-migrate" }
318# <- { "return": {} }
319#
320# 2. Migration is done and has succeeded
321#
322# -> { "execute": "query-migrate" }
323# <- { "return": {
324#         "status": "completed",
325#         "total-time":12345,
326#         "setup-time":12345,
327#         "downtime":12345,
328#         "ram":{
329#           "transferred":123,
330#           "remaining":123,
331#           "total":246,
332#           "duplicate":123,
333#           "normal":123,
334#           "normal-bytes":123456,
335#           "dirty-sync-count":15
336#         }
337#      }
338#    }
339#
340# 3. Migration is done and has failed
341#
342# -> { "execute": "query-migrate" }
343# <- { "return": { "status": "failed" } }
344#
345# 4. Migration is being performed and is not a block migration:
346#
347# -> { "execute": "query-migrate" }
348# <- {
349#       "return":{
350#          "status":"active",
351#          "total-time":12345,
352#          "setup-time":12345,
353#          "expected-downtime":12345,
354#          "ram":{
355#             "transferred":123,
356#             "remaining":123,
357#             "total":246,
358#             "duplicate":123,
359#             "normal":123,
360#             "normal-bytes":123456,
361#             "dirty-sync-count":15
362#          }
363#       }
364#    }
365#
366# 5. Migration is being performed and is a block migration:
367#
368# -> { "execute": "query-migrate" }
369# <- {
370#       "return":{
371#          "status":"active",
372#          "total-time":12345,
373#          "setup-time":12345,
374#          "expected-downtime":12345,
375#          "ram":{
376#             "total":1057024,
377#             "remaining":1053304,
378#             "transferred":3720,
379#             "duplicate":123,
380#             "normal":123,
381#             "normal-bytes":123456,
382#             "dirty-sync-count":15
383#          },
384#          "disk":{
385#             "total":20971520,
386#             "remaining":20880384,
387#             "transferred":91136
388#          }
389#       }
390#    }
391#
392# 6. Migration is being performed and XBZRLE is active:
393#
394# -> { "execute": "query-migrate" }
395# <- {
396#       "return":{
397#          "status":"active",
398#          "total-time":12345,
399#          "setup-time":12345,
400#          "expected-downtime":12345,
401#          "ram":{
402#             "total":1057024,
403#             "remaining":1053304,
404#             "transferred":3720,
405#             "duplicate":10,
406#             "normal":3333,
407#             "normal-bytes":3412992,
408#             "dirty-sync-count":15
409#          },
410#          "xbzrle-cache":{
411#             "cache-size":67108864,
412#             "bytes":20971520,
413#             "pages":2444343,
414#             "cache-miss":2244,
415#             "cache-miss-rate":0.123,
416#             "encoding-rate":80.1,
417#             "overflow":34434
418#          }
419#       }
420#    }
421##
422{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
423
424##
425# @MigrationCapability:
426#
427# Migration capabilities enumeration
428#
429# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
430#     Encoding). This feature allows us to minimize migration traffic
431#     for certain work loads, by sending compressed difference of the
432#     pages
433#
434# @rdma-pin-all: Controls whether or not the entire VM memory
435#     footprint is mlock()'d on demand or all at once.  Refer to
436#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
437#
438# @zero-blocks: During storage migration encode blocks of zeroes
439#     efficiently.  This essentially saves 1MB of zeroes per block on
440#     the wire.  Enabling requires source and target VM to support
441#     this feature.  To enable it is sufficient to enable the
442#     capability on the source VM. The feature is disabled by default.
443#     (since 1.6)
444#
445# @compress: Use multiple compression threads to accelerate live
446#     migration.  This feature can help to reduce the migration
447#     traffic, by sending compressed pages.  Please note that if
448#     compress and xbzrle are both on, compress only takes effect in
449#     the ram bulk stage, after that, it will be disabled and only
450#     xbzrle takes effect, this can help to minimize migration
451#     traffic.  The feature is disabled by default.  (since 2.4)
452#
453# @events: generate events for each migration state change (since 2.4)
454#
455# @auto-converge: If enabled, QEMU will automatically throttle down
456#     the guest to speed up convergence of RAM migration.  (since 1.6)
457#
458# @postcopy-ram: Start executing on the migration target before all of
459#     RAM has been migrated, pulling the remaining pages along as
460#     needed.  The capacity must have the same setting on both source
461#     and target or migration will not even start.  NOTE: If the
462#     migration fails during postcopy the VM will fail.  (since 2.6)
463#
464# @x-colo: If enabled, migration will never end, and the state of the
465#     VM on the primary side will be migrated continuously to the VM
466#     on secondary side, this process is called COarse-Grain LOck
467#     Stepping (COLO) for Non-stop Service.  (since 2.8)
468#
469# @release-ram: if enabled, qemu will free the migrated ram pages on
470#     the source during postcopy-ram migration.  (since 2.9)
471#
472# @block: If enabled, QEMU will also migrate the contents of all block
473#     devices.  Default is disabled.  A possible alternative uses
474#     mirror jobs to a builtin NBD server on the destination, which
475#     offers more flexibility.  (Since 2.10)
476#
477# @return-path: If enabled, migration will use the return path even
478#     for precopy.  (since 2.10)
479#
480# @pause-before-switchover: Pause outgoing migration before
481#     serialising device state and before disabling block IO (since
482#     2.11)
483#
484# @multifd: Use more than one fd for migration (since 4.0)
485#
486# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
487#     (since 2.12)
488#
489# @postcopy-blocktime: Calculate downtime for postcopy live migration
490#     (since 3.0)
491#
492# @late-block-activate: If enabled, the destination will not activate
493#     block devices (and thus take locks) immediately at the end of
494#     migration.  (since 3.0)
495#
496# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
497#     that is accessible on the destination machine.  (since 4.0)
498#
499# @validate-uuid: Send the UUID of the source to allow the destination
500#     to ensure it is the same.  (since 4.2)
501#
502# @background-snapshot: If enabled, the migration stream will be a
503#     snapshot of the VM exactly at the point when the migration
504#     procedure starts.  The VM RAM is saved with running VM. (since
505#     6.0)
506#
507# @zero-copy-send: Controls behavior on sending memory pages on
508#     migration.  When true, enables a zero-copy mechanism for sending
509#     memory pages, if host supports it.  Requires that QEMU be
510#     permitted to use locked memory for guest RAM pages.  (since 7.1)
511#
512# @postcopy-preempt: If enabled, the migration process will allow
513#     postcopy requests to preempt precopy stream, so postcopy
514#     requests will be handled faster.  This is a performance feature
515#     and should not affect the correctness of postcopy migration.
516#     (since 7.1)
517#
518# @switchover-ack: If enabled, migration will not stop the source VM
519#     and complete the migration until an ACK is received from the
520#     destination that it's OK to do so.  Exactly when this ACK is
521#     sent depends on the migrated devices that use this feature.  For
522#     example, a device can use it to make sure some of its data is
523#     sent and loaded in the destination before doing switchover.
524#     This can reduce downtime if devices that support this capability
525#     are present.  'return-path' capability must be enabled to use
526#     it.  (since 8.1)
527#
528# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
529#     keep their dirty page rate within @vcpu-dirty-limit.  This can
530#     improve responsiveness of large guests during live migration,
531#     and can result in more stable read performance.  Requires KVM
532#     with accelerator property "dirty-ring-size" set.  (Since 8.1)
533#
534# Features:
535#
536# @deprecated: Member @block is deprecated.  Use blockdev-mirror with
537#     NBD instead.  Member @compression is deprecated because it is
538#     unreliable and untested.  It is recommended to use multifd
539#     migration, which offers an alternative compression
540#     implementation that is reliable and tested.
541#
542# @unstable: Members @x-colo and @x-ignore-shared are experimental.
543#
544# Since: 1.2
545##
546{ 'enum': 'MigrationCapability',
547  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
548           { 'name': 'compress', 'features': [ 'deprecated' ] },
549           'events', 'postcopy-ram',
550           { 'name': 'x-colo', 'features': [ 'unstable' ] },
551           'release-ram',
552           { 'name': 'block', 'features': [ 'deprecated' ] },
553           'return-path', 'pause-before-switchover', 'multifd',
554           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
555           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
556           'validate-uuid', 'background-snapshot',
557           'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
558           'dirty-limit'] }
559
560##
561# @MigrationCapabilityStatus:
562#
563# Migration capability information
564#
565# @capability: capability enum
566#
567# @state: capability state bool
568#
569# Since: 1.2
570##
571{ 'struct': 'MigrationCapabilityStatus',
572  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
573
574##
575# @migrate-set-capabilities:
576#
577# Enable/Disable the following migration capabilities (like xbzrle)
578#
579# @capabilities: json array of capability modifications to make
580#
581# Since: 1.2
582#
583# Example:
584#
585# -> { "execute": "migrate-set-capabilities" , "arguments":
586#      { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
587# <- { "return": {} }
588##
589{ 'command': 'migrate-set-capabilities',
590  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
591
592##
593# @query-migrate-capabilities:
594#
595# Returns information about the current migration capabilities status
596#
597# Returns: @MigrationCapabilityStatus
598#
599# Since: 1.2
600#
601# Example:
602#
603# -> { "execute": "query-migrate-capabilities" }
604# <- { "return": [
605#       {"state": false, "capability": "xbzrle"},
606#       {"state": false, "capability": "rdma-pin-all"},
607#       {"state": false, "capability": "auto-converge"},
608#       {"state": false, "capability": "zero-blocks"},
609#       {"state": false, "capability": "compress"},
610#       {"state": true, "capability": "events"},
611#       {"state": false, "capability": "postcopy-ram"},
612#       {"state": false, "capability": "x-colo"}
613#    ]}
614##
615{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
616
617##
618# @MultiFDCompression:
619#
620# An enumeration of multifd compression methods.
621#
622# @none: no compression.
623#
624# @zlib: use zlib compression method.
625#
626# @zstd: use zstd compression method.
627#
628# Since: 5.0
629##
630{ 'enum': 'MultiFDCompression',
631  'data': [ 'none', 'zlib',
632            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
633
634##
635# @MigMode:
636#
637# @normal: the original form of migration. (since 8.2)
638#
639##
640{ 'enum': 'MigMode',
641  'data': [ 'normal' ] }
642
643##
644# @BitmapMigrationBitmapAliasTransform:
645#
646# @persistent: If present, the bitmap will be made persistent or
647#     transient depending on this parameter.
648#
649# Since: 6.0
650##
651{ 'struct': 'BitmapMigrationBitmapAliasTransform',
652  'data': {
653      '*persistent': 'bool'
654  } }
655
656##
657# @BitmapMigrationBitmapAlias:
658#
659# @name: The name of the bitmap.
660#
661# @alias: An alias name for migration (for example the bitmap name on
662#     the opposite site).
663#
664# @transform: Allows the modification of the migrated bitmap.  (since
665#     6.0)
666#
667# Since: 5.2
668##
669{ 'struct': 'BitmapMigrationBitmapAlias',
670  'data': {
671      'name': 'str',
672      'alias': 'str',
673      '*transform': 'BitmapMigrationBitmapAliasTransform'
674  } }
675
676##
677# @BitmapMigrationNodeAlias:
678#
679# Maps a block node name and the bitmaps it has to aliases for dirty
680# bitmap migration.
681#
682# @node-name: A block node name.
683#
684# @alias: An alias block node name for migration (for example the node
685#     name on the opposite site).
686#
687# @bitmaps: Mappings for the bitmaps on this node.
688#
689# Since: 5.2
690##
691{ 'struct': 'BitmapMigrationNodeAlias',
692  'data': {
693      'node-name': 'str',
694      'alias': 'str',
695      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
696  } }
697
698##
699# @MigrationParameter:
700#
701# Migration parameters enumeration
702#
703# @announce-initial: Initial delay (in milliseconds) before sending
704#     the first announce (Since 4.0)
705#
706# @announce-max: Maximum delay (in milliseconds) between packets in
707#     the announcement (Since 4.0)
708#
709# @announce-rounds: Number of self-announce packets sent after
710#     migration (Since 4.0)
711#
712# @announce-step: Increase in delay (in milliseconds) between
713#     subsequent packets in the announcement (Since 4.0)
714#
715# @compress-level: Set the compression level to be used in live
716#     migration, the compression level is an integer between 0 and 9,
717#     where 0 means no compression, 1 means the best compression
718#     speed, and 9 means best compression ratio which will consume
719#     more CPU.
720#
721# @compress-threads: Set compression thread count to be used in live
722#     migration, the compression thread count is an integer between 1
723#     and 255.
724#
725# @compress-wait-thread: Controls behavior when all compression
726#     threads are currently busy.  If true (default), wait for a free
727#     compression thread to become available; otherwise, send the page
728#     uncompressed.  (Since 3.1)
729#
730# @decompress-threads: Set decompression thread count to be used in
731#     live migration, the decompression thread count is an integer
732#     between 1 and 255. Usually, decompression is at least 4 times as
733#     fast as compression, so set the decompress-threads to the number
734#     about 1/4 of compress-threads is adequate.
735#
736# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
737#     bytes_xfer_period to trigger throttling.  It is expressed as
738#     percentage.  The default value is 50. (Since 5.0)
739#
740# @cpu-throttle-initial: Initial percentage of time guest cpus are
741#     throttled when migration auto-converge is activated.  The
742#     default value is 20. (Since 2.7)
743#
744# @cpu-throttle-increment: throttle percentage increase each time
745#     auto-converge detects that migration is not making progress.
746#     The default value is 10. (Since 2.7)
747#
748# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
749#     the tail stage of throttling, the Guest is very sensitive to CPU
750#     percentage while the @cpu-throttle -increment is excessive
751#     usually at tail stage.  If this parameter is true, we will
752#     compute the ideal CPU percentage used by the Guest, which may
753#     exactly make the dirty rate match the dirty rate threshold.
754#     Then we will choose a smaller throttle increment between the one
755#     specified by @cpu-throttle-increment and the one generated by
756#     ideal CPU percentage.  Therefore, it is compatible to
757#     traditional throttling, meanwhile the throttle increment won't
758#     be excessive at tail stage.  The default value is false.  (Since
759#     5.1)
760#
761# @tls-creds: ID of the 'tls-creds' object that provides credentials
762#     for establishing a TLS connection over the migration data
763#     channel.  On the outgoing side of the migration, the credentials
764#     must be for a 'client' endpoint, while for the incoming side the
765#     credentials must be for a 'server' endpoint.  Setting this will
766#     enable TLS for all migrations.  The default is unset, resulting
767#     in unsecured migration at the QEMU level.  (Since 2.7)
768#
769# @tls-hostname: hostname of the target host for the migration.  This
770#     is required when using x509 based TLS credentials and the
771#     migration URI does not already include a hostname.  For example
772#     if using fd: or exec: based migration, the hostname must be
773#     provided so that the server's x509 certificate identity can be
774#     validated.  (Since 2.7)
775#
776# @tls-authz: ID of the 'authz' object subclass that provides access
777#     control checking of the TLS x509 certificate distinguished name.
778#     This object is only resolved at time of use, so can be deleted
779#     and recreated on the fly while the migration server is active.
780#     If missing, it will default to denying access (Since 4.0)
781#
782# @max-bandwidth: to set maximum speed for migration.  maximum speed
783#     in bytes per second.  (Since 2.8)
784#
785# @avail-switchover-bandwidth: to set the available bandwidth that
786#     migration can use during switchover phase.  NOTE!  This does not
787#     limit the bandwidth during switchover, but only for calculations when
788#     making decisions to switchover.  By default, this value is zero,
789#     which means QEMU will estimate the bandwidth automatically.  This can
790#     be set when the estimated value is not accurate, while the user is
791#     able to guarantee such bandwidth is available when switching over.
792#     When specified correctly, this can make the switchover decision much
793#     more accurate.  (Since 8.2)
794#
795# @downtime-limit: set maximum tolerated downtime for migration.
796#     maximum downtime in milliseconds (Since 2.8)
797#
798# @x-checkpoint-delay: The delay time (in ms) between two COLO
799#     checkpoints in periodic mode.  (Since 2.8)
800#
801# @block-incremental: Affects how much storage is migrated when the
802#     block migration capability is enabled.  When false, the entire
803#     storage backing chain is migrated into a flattened image at the
804#     destination; when true, only the active qcow2 layer is migrated
805#     and the destination must already have access to the same backing
806#     chain as was used on the source.  (since 2.10)
807#
808# @multifd-channels: Number of channels used to migrate data in
809#     parallel.  This is the same number that the number of sockets
810#     used for migration.  The default value is 2 (since 4.0)
811#
812# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
813#     needs to be a multiple of the target page size and a power of 2
814#     (Since 2.11)
815#
816# @max-postcopy-bandwidth: Background transfer bandwidth during
817#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
818#     (Since 3.0)
819#
820# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
821#     (Since 3.1)
822#
823# @multifd-compression: Which compression method to use.  Defaults to
824#     none.  (Since 5.0)
825#
826# @multifd-zlib-level: Set the compression level to be used in live
827#     migration, the compression level is an integer between 0 and 9,
828#     where 0 means no compression, 1 means the best compression
829#     speed, and 9 means best compression ratio which will consume
830#     more CPU. Defaults to 1. (Since 5.0)
831#
832# @multifd-zstd-level: Set the compression level to be used in live
833#     migration, the compression level is an integer between 0 and 20,
834#     where 0 means no compression, 1 means the best compression
835#     speed, and 20 means best compression ratio which will consume
836#     more CPU. Defaults to 1. (Since 5.0)
837#
838# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
839#     aliases for the purpose of dirty bitmap migration.  Such aliases
840#     may for example be the corresponding names on the opposite site.
841#     The mapping must be one-to-one, but not necessarily complete: On
842#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
843#     will be ignored.  On the destination, encountering an unmapped
844#     alias in the incoming migration stream will result in a report,
845#     and all further bitmap migration data will then be discarded.
846#     Note that the destination does not know about bitmaps it does
847#     not receive, so there is no limitation or requirement regarding
848#     the number of bitmaps received, or how they are named, or on
849#     which nodes they are placed.  By default (when this parameter
850#     has never been set), bitmap names are mapped to themselves.
851#     Nodes are mapped to their block device name if there is one, and
852#     to their node name otherwise.  (Since 5.2)
853#
854# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
855#     limit during live migration.  Should be in the range 1 to 1000ms.
856#     Defaults to 1000ms.  (Since 8.1)
857#
858# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
859#     Defaults to 1.  (Since 8.1)
860#
861# @mode: Migration mode. See description in @MigMode. Default is 'normal'.
862#        (Since 8.2)
863#
864# Features:
865#
866# @deprecated: Member @block-incremental is deprecated.  Use
867#     blockdev-mirror with NBD instead.  Members @compress-level,
868#     @compress-threads, @decompress-threads and @compress-wait-thread
869#     are deprecated because @compression is deprecated.
870#
871# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
872#     are experimental.
873#
874# Since: 2.4
875##
876{ 'enum': 'MigrationParameter',
877  'data': ['announce-initial', 'announce-max',
878           'announce-rounds', 'announce-step',
879           { 'name': 'compress-level', 'features': [ 'deprecated' ] },
880           { 'name': 'compress-threads', 'features': [ 'deprecated' ] },
881           { 'name': 'decompress-threads', 'features': [ 'deprecated' ] },
882           { 'name': 'compress-wait-thread', 'features': [ 'deprecated' ] },
883           'throttle-trigger-threshold',
884           'cpu-throttle-initial', 'cpu-throttle-increment',
885           'cpu-throttle-tailslow',
886           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
887           'avail-switchover-bandwidth', 'downtime-limit',
888           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
889           { 'name': 'block-incremental', 'features': [ 'deprecated' ] },
890           'multifd-channels',
891           'xbzrle-cache-size', 'max-postcopy-bandwidth',
892           'max-cpu-throttle', 'multifd-compression',
893           'multifd-zlib-level', 'multifd-zstd-level',
894           'block-bitmap-mapping',
895           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
896           'vcpu-dirty-limit',
897           'mode'] }
898
899##
900# @MigrateSetParameters:
901#
902# @announce-initial: Initial delay (in milliseconds) before sending
903#     the first announce (Since 4.0)
904#
905# @announce-max: Maximum delay (in milliseconds) between packets in
906#     the announcement (Since 4.0)
907#
908# @announce-rounds: Number of self-announce packets sent after
909#     migration (Since 4.0)
910#
911# @announce-step: Increase in delay (in milliseconds) between
912#     subsequent packets in the announcement (Since 4.0)
913#
914# @compress-level: compression level
915#
916# @compress-threads: compression thread count
917#
918# @compress-wait-thread: Controls behavior when all compression
919#     threads are currently busy.  If true (default), wait for a free
920#     compression thread to become available; otherwise, send the page
921#     uncompressed.  (Since 3.1)
922#
923# @decompress-threads: decompression thread count
924#
925# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
926#     bytes_xfer_period to trigger throttling.  It is expressed as
927#     percentage.  The default value is 50. (Since 5.0)
928#
929# @cpu-throttle-initial: Initial percentage of time guest cpus are
930#     throttled when migration auto-converge is activated.  The
931#     default value is 20. (Since 2.7)
932#
933# @cpu-throttle-increment: throttle percentage increase each time
934#     auto-converge detects that migration is not making progress.
935#     The default value is 10. (Since 2.7)
936#
937# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
938#     the tail stage of throttling, the Guest is very sensitive to CPU
939#     percentage while the @cpu-throttle -increment is excessive
940#     usually at tail stage.  If this parameter is true, we will
941#     compute the ideal CPU percentage used by the Guest, which may
942#     exactly make the dirty rate match the dirty rate threshold.
943#     Then we will choose a smaller throttle increment between the one
944#     specified by @cpu-throttle-increment and the one generated by
945#     ideal CPU percentage.  Therefore, it is compatible to
946#     traditional throttling, meanwhile the throttle increment won't
947#     be excessive at tail stage.  The default value is false.  (Since
948#     5.1)
949#
950# @tls-creds: ID of the 'tls-creds' object that provides credentials
951#     for establishing a TLS connection over the migration data
952#     channel.  On the outgoing side of the migration, the credentials
953#     must be for a 'client' endpoint, while for the incoming side the
954#     credentials must be for a 'server' endpoint.  Setting this to a
955#     non-empty string enables TLS for all migrations.  An empty
956#     string means that QEMU will use plain text mode for migration,
957#     rather than TLS (Since 2.9) Previously (since 2.7), this was
958#     reported by omitting tls-creds instead.
959#
960# @tls-hostname: hostname of the target host for the migration.  This
961#     is required when using x509 based TLS credentials and the
962#     migration URI does not already include a hostname.  For example
963#     if using fd: or exec: based migration, the hostname must be
964#     provided so that the server's x509 certificate identity can be
965#     validated.  (Since 2.7) An empty string means that QEMU will use
966#     the hostname associated with the migration URI, if any.  (Since
967#     2.9) Previously (since 2.7), this was reported by omitting
968#     tls-hostname instead.
969#
970# @max-bandwidth: to set maximum speed for migration.  maximum speed
971#     in bytes per second.  (Since 2.8)
972#
973# @avail-switchover-bandwidth: to set the available bandwidth that
974#     migration can use during switchover phase.  NOTE!  This does not
975#     limit the bandwidth during switchover, but only for calculations when
976#     making decisions to switchover.  By default, this value is zero,
977#     which means QEMU will estimate the bandwidth automatically.  This can
978#     be set when the estimated value is not accurate, while the user is
979#     able to guarantee such bandwidth is available when switching over.
980#     When specified correctly, this can make the switchover decision much
981#     more accurate.  (Since 8.2)
982#
983# @downtime-limit: set maximum tolerated downtime for migration.
984#     maximum downtime in milliseconds (Since 2.8)
985#
986# @x-checkpoint-delay: the delay time between two COLO checkpoints.
987#     (Since 2.8)
988#
989# @block-incremental: Affects how much storage is migrated when the
990#     block migration capability is enabled.  When false, the entire
991#     storage backing chain is migrated into a flattened image at the
992#     destination; when true, only the active qcow2 layer is migrated
993#     and the destination must already have access to the same backing
994#     chain as was used on the source.  (since 2.10)
995#
996# @multifd-channels: Number of channels used to migrate data in
997#     parallel.  This is the same number that the number of sockets
998#     used for migration.  The default value is 2 (since 4.0)
999#
1000# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1001#     needs to be a multiple of the target page size and a power of 2
1002#     (Since 2.11)
1003#
1004# @max-postcopy-bandwidth: Background transfer bandwidth during
1005#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1006#     (Since 3.0)
1007#
1008# @max-cpu-throttle: maximum cpu throttle percentage.  The default
1009#     value is 99. (Since 3.1)
1010#
1011# @multifd-compression: Which compression method to use.  Defaults to
1012#     none.  (Since 5.0)
1013#
1014# @multifd-zlib-level: Set the compression level to be used in live
1015#     migration, the compression level is an integer between 0 and 9,
1016#     where 0 means no compression, 1 means the best compression
1017#     speed, and 9 means best compression ratio which will consume
1018#     more CPU. Defaults to 1. (Since 5.0)
1019#
1020# @multifd-zstd-level: Set the compression level to be used in live
1021#     migration, the compression level is an integer between 0 and 20,
1022#     where 0 means no compression, 1 means the best compression
1023#     speed, and 20 means best compression ratio which will consume
1024#     more CPU. Defaults to 1. (Since 5.0)
1025#
1026# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1027#     aliases for the purpose of dirty bitmap migration.  Such aliases
1028#     may for example be the corresponding names on the opposite site.
1029#     The mapping must be one-to-one, but not necessarily complete: On
1030#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1031#     will be ignored.  On the destination, encountering an unmapped
1032#     alias in the incoming migration stream will result in a report,
1033#     and all further bitmap migration data will then be discarded.
1034#     Note that the destination does not know about bitmaps it does
1035#     not receive, so there is no limitation or requirement regarding
1036#     the number of bitmaps received, or how they are named, or on
1037#     which nodes they are placed.  By default (when this parameter
1038#     has never been set), bitmap names are mapped to themselves.
1039#     Nodes are mapped to their block device name if there is one, and
1040#     to their node name otherwise.  (Since 5.2)
1041#
1042# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1043#     limit during live migration.  Should be in the range 1 to 1000ms.
1044#     Defaults to 1000ms.  (Since 8.1)
1045#
1046# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1047#     Defaults to 1.  (Since 8.1)
1048#
1049# @mode: Migration mode. See description in @MigMode. Default is 'normal'.
1050#        (Since 8.2)
1051#
1052# Features:
1053#
1054# @deprecated: Member @block-incremental is deprecated.  Use
1055#     blockdev-mirror with NBD instead.  Members @compress-level,
1056#     @compress-threads, @decompress-threads and @compress-wait-thread
1057#     are deprecated because @compression is deprecated.
1058#
1059# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1060#     are experimental.
1061#
1062# TODO: either fuse back into MigrationParameters, or make
1063#     MigrationParameters members mandatory
1064#
1065# Since: 2.4
1066##
1067{ 'struct': 'MigrateSetParameters',
1068  'data': { '*announce-initial': 'size',
1069            '*announce-max': 'size',
1070            '*announce-rounds': 'size',
1071            '*announce-step': 'size',
1072            '*compress-level': { 'type': 'uint8',
1073                                 'features': [ 'deprecated' ] },
1074            '*compress-threads':  { 'type': 'uint8',
1075                                    'features': [ 'deprecated' ] },
1076            '*compress-wait-thread':  { 'type': 'bool',
1077                                        'features': [ 'deprecated' ] },
1078            '*decompress-threads':  { 'type': 'uint8',
1079                                      'features': [ 'deprecated' ] },
1080            '*throttle-trigger-threshold': 'uint8',
1081            '*cpu-throttle-initial': 'uint8',
1082            '*cpu-throttle-increment': 'uint8',
1083            '*cpu-throttle-tailslow': 'bool',
1084            '*tls-creds': 'StrOrNull',
1085            '*tls-hostname': 'StrOrNull',
1086            '*tls-authz': 'StrOrNull',
1087            '*max-bandwidth': 'size',
1088            '*avail-switchover-bandwidth': 'size',
1089            '*downtime-limit': 'uint64',
1090            '*x-checkpoint-delay': { 'type': 'uint32',
1091                                     'features': [ 'unstable' ] },
1092            '*block-incremental': { 'type': 'bool',
1093                                    'features': [ 'deprecated' ] },
1094            '*multifd-channels': 'uint8',
1095            '*xbzrle-cache-size': 'size',
1096            '*max-postcopy-bandwidth': 'size',
1097            '*max-cpu-throttle': 'uint8',
1098            '*multifd-compression': 'MultiFDCompression',
1099            '*multifd-zlib-level': 'uint8',
1100            '*multifd-zstd-level': 'uint8',
1101            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1102            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1103                                            'features': [ 'unstable' ] },
1104            '*vcpu-dirty-limit': 'uint64',
1105            '*mode': 'MigMode'} }
1106
1107##
1108# @migrate-set-parameters:
1109#
1110# Set various migration parameters.
1111#
1112# Since: 2.4
1113#
1114# Example:
1115#
1116# -> { "execute": "migrate-set-parameters" ,
1117#      "arguments": { "multifd-channels": 5 } }
1118# <- { "return": {} }
1119##
1120{ 'command': 'migrate-set-parameters', 'boxed': true,
1121  'data': 'MigrateSetParameters' }
1122
1123##
1124# @MigrationParameters:
1125#
1126# The optional members aren't actually optional.
1127#
1128# @announce-initial: Initial delay (in milliseconds) before sending
1129#     the first announce (Since 4.0)
1130#
1131# @announce-max: Maximum delay (in milliseconds) between packets in
1132#     the announcement (Since 4.0)
1133#
1134# @announce-rounds: Number of self-announce packets sent after
1135#     migration (Since 4.0)
1136#
1137# @announce-step: Increase in delay (in milliseconds) between
1138#     subsequent packets in the announcement (Since 4.0)
1139#
1140# @compress-level: compression level
1141#
1142# @compress-threads: compression thread count
1143#
1144# @compress-wait-thread: Controls behavior when all compression
1145#     threads are currently busy.  If true (default), wait for a free
1146#     compression thread to become available; otherwise, send the page
1147#     uncompressed.  (Since 3.1)
1148#
1149# @decompress-threads: decompression thread count
1150#
1151# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1152#     bytes_xfer_period to trigger throttling.  It is expressed as
1153#     percentage.  The default value is 50. (Since 5.0)
1154#
1155# @cpu-throttle-initial: Initial percentage of time guest cpus are
1156#     throttled when migration auto-converge is activated.  (Since
1157#     2.7)
1158#
1159# @cpu-throttle-increment: throttle percentage increase each time
1160#     auto-converge detects that migration is not making progress.
1161#     (Since 2.7)
1162#
1163# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1164#     the tail stage of throttling, the Guest is very sensitive to CPU
1165#     percentage while the @cpu-throttle -increment is excessive
1166#     usually at tail stage.  If this parameter is true, we will
1167#     compute the ideal CPU percentage used by the Guest, which may
1168#     exactly make the dirty rate match the dirty rate threshold.
1169#     Then we will choose a smaller throttle increment between the one
1170#     specified by @cpu-throttle-increment and the one generated by
1171#     ideal CPU percentage.  Therefore, it is compatible to
1172#     traditional throttling, meanwhile the throttle increment won't
1173#     be excessive at tail stage.  The default value is false.  (Since
1174#     5.1)
1175#
1176# @tls-creds: ID of the 'tls-creds' object that provides credentials
1177#     for establishing a TLS connection over the migration data
1178#     channel.  On the outgoing side of the migration, the credentials
1179#     must be for a 'client' endpoint, while for the incoming side the
1180#     credentials must be for a 'server' endpoint.  An empty string
1181#     means that QEMU will use plain text mode for migration, rather
1182#     than TLS (Since 2.7) Note: 2.8 reports this by omitting
1183#     tls-creds instead.
1184#
1185# @tls-hostname: hostname of the target host for the migration.  This
1186#     is required when using x509 based TLS credentials and the
1187#     migration URI does not already include a hostname.  For example
1188#     if using fd: or exec: based migration, the hostname must be
1189#     provided so that the server's x509 certificate identity can be
1190#     validated.  (Since 2.7) An empty string means that QEMU will use
1191#     the hostname associated with the migration URI, if any.  (Since
1192#     2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1193#
1194# @tls-authz: ID of the 'authz' object subclass that provides access
1195#     control checking of the TLS x509 certificate distinguished name.
1196#     (Since 4.0)
1197#
1198# @max-bandwidth: to set maximum speed for migration.  maximum speed
1199#     in bytes per second.  (Since 2.8)
1200#
1201# @avail-switchover-bandwidth: to set the available bandwidth that
1202#     migration can use during switchover phase.  NOTE!  This does not
1203#     limit the bandwidth during switchover, but only for calculations when
1204#     making decisions to switchover.  By default, this value is zero,
1205#     which means QEMU will estimate the bandwidth automatically.  This can
1206#     be set when the estimated value is not accurate, while the user is
1207#     able to guarantee such bandwidth is available when switching over.
1208#     When specified correctly, this can make the switchover decision much
1209#     more accurate.  (Since 8.2)
1210#
1211# @downtime-limit: set maximum tolerated downtime for migration.
1212#     maximum downtime in milliseconds (Since 2.8)
1213#
1214# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1215#     (Since 2.8)
1216#
1217# @block-incremental: Affects how much storage is migrated when the
1218#     block migration capability is enabled.  When false, the entire
1219#     storage backing chain is migrated into a flattened image at the
1220#     destination; when true, only the active qcow2 layer is migrated
1221#     and the destination must already have access to the same backing
1222#     chain as was used on the source.  (since 2.10)
1223#
1224# @multifd-channels: Number of channels used to migrate data in
1225#     parallel.  This is the same number that the number of sockets
1226#     used for migration.  The default value is 2 (since 4.0)
1227#
1228# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1229#     needs to be a multiple of the target page size and a power of 2
1230#     (Since 2.11)
1231#
1232# @max-postcopy-bandwidth: Background transfer bandwidth during
1233#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1234#     (Since 3.0)
1235#
1236# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1237#     (Since 3.1)
1238#
1239# @multifd-compression: Which compression method to use.  Defaults to
1240#     none.  (Since 5.0)
1241#
1242# @multifd-zlib-level: Set the compression level to be used in live
1243#     migration, the compression level is an integer between 0 and 9,
1244#     where 0 means no compression, 1 means the best compression
1245#     speed, and 9 means best compression ratio which will consume
1246#     more CPU. Defaults to 1. (Since 5.0)
1247#
1248# @multifd-zstd-level: Set the compression level to be used in live
1249#     migration, the compression level is an integer between 0 and 20,
1250#     where 0 means no compression, 1 means the best compression
1251#     speed, and 20 means best compression ratio which will consume
1252#     more CPU. Defaults to 1. (Since 5.0)
1253#
1254# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1255#     aliases for the purpose of dirty bitmap migration.  Such aliases
1256#     may for example be the corresponding names on the opposite site.
1257#     The mapping must be one-to-one, but not necessarily complete: On
1258#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1259#     will be ignored.  On the destination, encountering an unmapped
1260#     alias in the incoming migration stream will result in a report,
1261#     and all further bitmap migration data will then be discarded.
1262#     Note that the destination does not know about bitmaps it does
1263#     not receive, so there is no limitation or requirement regarding
1264#     the number of bitmaps received, or how they are named, or on
1265#     which nodes they are placed.  By default (when this parameter
1266#     has never been set), bitmap names are mapped to themselves.
1267#     Nodes are mapped to their block device name if there is one, and
1268#     to their node name otherwise.  (Since 5.2)
1269#
1270# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1271#     limit during live migration.  Should be in the range 1 to 1000ms.
1272#     Defaults to 1000ms.  (Since 8.1)
1273#
1274# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1275#     Defaults to 1.  (Since 8.1)
1276#
1277# @mode: Migration mode. See description in @MigMode. Default is 'normal'.
1278#        (Since 8.2)
1279#
1280# Features:
1281#
1282# @deprecated: Member @block-incremental is deprecated.  Use
1283#     blockdev-mirror with NBD instead.  Members @compress-level,
1284#     @compress-threads, @decompress-threads and @compress-wait-thread
1285#     are deprecated because @compression is deprecated.
1286#
1287# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1288#     are experimental.
1289#
1290# Since: 2.4
1291##
1292{ 'struct': 'MigrationParameters',
1293  'data': { '*announce-initial': 'size',
1294            '*announce-max': 'size',
1295            '*announce-rounds': 'size',
1296            '*announce-step': 'size',
1297            '*compress-level': { 'type': 'uint8',
1298                                 'features': [ 'deprecated' ] },
1299            '*compress-threads': { 'type': 'uint8',
1300                                   'features': [ 'deprecated' ] },
1301            '*compress-wait-thread': { 'type': 'bool',
1302                                       'features': [ 'deprecated' ] },
1303            '*decompress-threads': { 'type': 'uint8',
1304                                     'features': [ 'deprecated' ] },
1305            '*throttle-trigger-threshold': 'uint8',
1306            '*cpu-throttle-initial': 'uint8',
1307            '*cpu-throttle-increment': 'uint8',
1308            '*cpu-throttle-tailslow': 'bool',
1309            '*tls-creds': 'str',
1310            '*tls-hostname': 'str',
1311            '*tls-authz': 'str',
1312            '*max-bandwidth': 'size',
1313            '*avail-switchover-bandwidth': 'size',
1314            '*downtime-limit': 'uint64',
1315            '*x-checkpoint-delay': { 'type': 'uint32',
1316                                     'features': [ 'unstable' ] },
1317            '*block-incremental': { 'type': 'bool',
1318                                    'features': [ 'deprecated' ] },
1319            '*multifd-channels': 'uint8',
1320            '*xbzrle-cache-size': 'size',
1321            '*max-postcopy-bandwidth': 'size',
1322            '*max-cpu-throttle': 'uint8',
1323            '*multifd-compression': 'MultiFDCompression',
1324            '*multifd-zlib-level': 'uint8',
1325            '*multifd-zstd-level': 'uint8',
1326            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1327            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1328                                            'features': [ 'unstable' ] },
1329            '*vcpu-dirty-limit': 'uint64',
1330            '*mode': 'MigMode'} }
1331
1332##
1333# @query-migrate-parameters:
1334#
1335# Returns information about the current migration parameters
1336#
1337# Returns: @MigrationParameters
1338#
1339# Since: 2.4
1340#
1341# Example:
1342#
1343# -> { "execute": "query-migrate-parameters" }
1344# <- { "return": {
1345#          "multifd-channels": 2,
1346#          "cpu-throttle-increment": 10,
1347#          "cpu-throttle-initial": 20,
1348#          "max-bandwidth": 33554432,
1349#          "downtime-limit": 300
1350#       }
1351#    }
1352##
1353{ 'command': 'query-migrate-parameters',
1354  'returns': 'MigrationParameters' }
1355
1356##
1357# @migrate-start-postcopy:
1358#
1359# Followup to a migration command to switch the migration to postcopy
1360# mode.  The postcopy-ram capability must be set on both source and
1361# destination before the original migration command.
1362#
1363# Since: 2.5
1364#
1365# Example:
1366#
1367# -> { "execute": "migrate-start-postcopy" }
1368# <- { "return": {} }
1369##
1370{ 'command': 'migrate-start-postcopy' }
1371
1372##
1373# @MIGRATION:
1374#
1375# Emitted when a migration event happens
1376#
1377# @status: @MigrationStatus describing the current migration status.
1378#
1379# Since: 2.4
1380#
1381# Example:
1382#
1383# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1384#     "event": "MIGRATION",
1385#     "data": {"status": "completed"} }
1386##
1387{ 'event': 'MIGRATION',
1388  'data': {'status': 'MigrationStatus'}}
1389
1390##
1391# @MIGRATION_PASS:
1392#
1393# Emitted from the source side of a migration at the start of each
1394# pass (when it syncs the dirty bitmap)
1395#
1396# @pass: An incrementing count (starting at 1 on the first pass)
1397#
1398# Since: 2.6
1399#
1400# Example:
1401#
1402# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1403#       "event": "MIGRATION_PASS", "data": {"pass": 2} }
1404##
1405{ 'event': 'MIGRATION_PASS',
1406  'data': { 'pass': 'int' } }
1407
1408##
1409# @COLOMessage:
1410#
1411# The message transmission between Primary side and Secondary side.
1412#
1413# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1414#
1415# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1416#     checkpointing
1417#
1418# @checkpoint-reply: SVM gets PVM's checkpoint request
1419#
1420# @vmstate-send: VM's state will be sent by PVM.
1421#
1422# @vmstate-size: The total size of VMstate.
1423#
1424# @vmstate-received: VM's state has been received by SVM.
1425#
1426# @vmstate-loaded: VM's state has been loaded by SVM.
1427#
1428# Since: 2.8
1429##
1430{ 'enum': 'COLOMessage',
1431  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1432            'vmstate-send', 'vmstate-size', 'vmstate-received',
1433            'vmstate-loaded' ] }
1434
1435##
1436# @COLOMode:
1437#
1438# The COLO current mode.
1439#
1440# @none: COLO is disabled.
1441#
1442# @primary: COLO node in primary side.
1443#
1444# @secondary: COLO node in slave side.
1445#
1446# Since: 2.8
1447##
1448{ 'enum': 'COLOMode',
1449  'data': [ 'none', 'primary', 'secondary'] }
1450
1451##
1452# @FailoverStatus:
1453#
1454# An enumeration of COLO failover status
1455#
1456# @none: no failover has ever happened
1457#
1458# @require: got failover requirement but not handled
1459#
1460# @active: in the process of doing failover
1461#
1462# @completed: finish the process of failover
1463#
1464# @relaunch: restart the failover process, from 'none' -> 'completed'
1465#     (Since 2.9)
1466#
1467# Since: 2.8
1468##
1469{ 'enum': 'FailoverStatus',
1470  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1471
1472##
1473# @COLO_EXIT:
1474#
1475# Emitted when VM finishes COLO mode due to some errors happening or
1476# at the request of users.
1477#
1478# @mode: report COLO mode when COLO exited.
1479#
1480# @reason: describes the reason for the COLO exit.
1481#
1482# Since: 3.1
1483#
1484# Example:
1485#
1486# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1487#      "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1488##
1489{ 'event': 'COLO_EXIT',
1490  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1491
1492##
1493# @COLOExitReason:
1494#
1495# The reason for a COLO exit.
1496#
1497# @none: failover has never happened.  This state does not occur in
1498#     the COLO_EXIT event, and is only visible in the result of
1499#     query-colo-status.
1500#
1501# @request: COLO exit is due to an external request.
1502#
1503# @error: COLO exit is due to an internal error.
1504#
1505# @processing: COLO is currently handling a failover (since 4.0).
1506#
1507# Since: 3.1
1508##
1509{ 'enum': 'COLOExitReason',
1510  'data': [ 'none', 'request', 'error' , 'processing' ] }
1511
1512##
1513# @x-colo-lost-heartbeat:
1514#
1515# Tell qemu that heartbeat is lost, request it to do takeover
1516# procedures.  If this command is sent to the PVM, the Primary side
1517# will exit COLO mode.  If sent to the Secondary, the Secondary side
1518# will run failover work, then takes over server operation to become
1519# the service VM.
1520#
1521# Features:
1522#
1523# @unstable: This command is experimental.
1524#
1525# Since: 2.8
1526#
1527# Example:
1528#
1529# -> { "execute": "x-colo-lost-heartbeat" }
1530# <- { "return": {} }
1531##
1532{ 'command': 'x-colo-lost-heartbeat',
1533  'features': [ 'unstable' ],
1534  'if': 'CONFIG_REPLICATION' }
1535
1536##
1537# @migrate_cancel:
1538#
1539# Cancel the current executing migration process.
1540#
1541# Returns: nothing on success
1542#
1543# Notes: This command succeeds even if there is no migration process
1544#     running.
1545#
1546# Since: 0.14
1547#
1548# Example:
1549#
1550# -> { "execute": "migrate_cancel" }
1551# <- { "return": {} }
1552##
1553{ 'command': 'migrate_cancel' }
1554
1555##
1556# @migrate-continue:
1557#
1558# Continue migration when it's in a paused state.
1559#
1560# @state: The state the migration is currently expected to be in
1561#
1562# Returns: nothing on success
1563#
1564# Since: 2.11
1565#
1566# Example:
1567#
1568# -> { "execute": "migrate-continue" , "arguments":
1569#      { "state": "pre-switchover" } }
1570# <- { "return": {} }
1571##
1572{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1573
1574##
1575# @migrate:
1576#
1577# Migrates the current running guest to another Virtual Machine.
1578#
1579# @uri: the Uniform Resource Identifier of the destination VM
1580#
1581# @blk: do block migration (full disk copy)
1582#
1583# @inc: incremental disk copy migration
1584#
1585# @detach: this argument exists only for compatibility reasons and is
1586#     ignored by QEMU
1587#
1588# @resume: resume one paused migration, default "off". (since 3.0)
1589#
1590# Features:
1591#
1592# @deprecated: Members @inc and @blk are deprecated.  Use
1593#     blockdev-mirror with NBD instead.
1594#
1595# Returns: nothing on success
1596#
1597# Since: 0.14
1598#
1599# Notes:
1600#
1601# 1. The 'query-migrate' command should be used to check migration's
1602#    progress and final result (this information is provided by the
1603#    'status' member)
1604#
1605# 2. All boolean arguments default to false
1606#
1607# 3. The user Monitor's "detach" argument is invalid in QMP and should
1608#    not be used
1609#
1610# Example:
1611#
1612# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1613# <- { "return": {} }
1614##
1615{ 'command': 'migrate',
1616  'data': {'uri': 'str',
1617           '*blk': { 'type': 'bool', 'features': [ 'deprecated' ] },
1618           '*inc': { 'type': 'bool', 'features': [ 'deprecated' ] },
1619           '*detach': 'bool', '*resume': 'bool' } }
1620
1621##
1622# @migrate-incoming:
1623#
1624# Start an incoming migration, the qemu must have been started with
1625# -incoming defer
1626#
1627# @uri: The Uniform Resource Identifier identifying the source or
1628#     address to listen on
1629#
1630# Returns: nothing on success
1631#
1632# Since: 2.3
1633#
1634# Notes:
1635#
1636# 1. It's a bad idea to use a string for the uri, but it needs
1637#    to stay compatible with -incoming and the format of the uri
1638#    is already exposed above libvirt.
1639#
1640# 2. QEMU must be started with -incoming defer to allow
1641#    migrate-incoming to be used.
1642#
1643# 3. The uri format is the same as for -incoming
1644#
1645# Example:
1646#
1647# -> { "execute": "migrate-incoming",
1648#      "arguments": { "uri": "tcp::4446" } }
1649# <- { "return": {} }
1650##
1651{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1652
1653##
1654# @xen-save-devices-state:
1655#
1656# Save the state of all devices to file.  The RAM and the block
1657# devices of the VM are not saved by this command.
1658#
1659# @filename: the file to save the state of the devices to as binary
1660#     data.  See xen-save-devices-state.txt for a description of the
1661#     binary format.
1662#
1663# @live: Optional argument to ask QEMU to treat this command as part
1664#     of a live migration.  Default to true.  (since 2.11)
1665#
1666# Returns: Nothing on success
1667#
1668# Since: 1.1
1669#
1670# Example:
1671#
1672# -> { "execute": "xen-save-devices-state",
1673#      "arguments": { "filename": "/tmp/save" } }
1674# <- { "return": {} }
1675##
1676{ 'command': 'xen-save-devices-state',
1677  'data': {'filename': 'str', '*live':'bool' } }
1678
1679##
1680# @xen-set-global-dirty-log:
1681#
1682# Enable or disable the global dirty log mode.
1683#
1684# @enable: true to enable, false to disable.
1685#
1686# Returns: nothing
1687#
1688# Since: 1.3
1689#
1690# Example:
1691#
1692# -> { "execute": "xen-set-global-dirty-log",
1693#      "arguments": { "enable": true } }
1694# <- { "return": {} }
1695##
1696{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1697
1698##
1699# @xen-load-devices-state:
1700#
1701# Load the state of all devices from file.  The RAM and the block
1702# devices of the VM are not loaded by this command.
1703#
1704# @filename: the file to load the state of the devices from as binary
1705#     data.  See xen-save-devices-state.txt for a description of the
1706#     binary format.
1707#
1708# Since: 2.7
1709#
1710# Example:
1711#
1712# -> { "execute": "xen-load-devices-state",
1713#      "arguments": { "filename": "/tmp/resume" } }
1714# <- { "return": {} }
1715##
1716{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1717
1718##
1719# @xen-set-replication:
1720#
1721# Enable or disable replication.
1722#
1723# @enable: true to enable, false to disable.
1724#
1725# @primary: true for primary or false for secondary.
1726#
1727# @failover: true to do failover, false to stop.  but cannot be
1728#     specified if 'enable' is true.  default value is false.
1729#
1730# Returns: nothing.
1731#
1732# Example:
1733#
1734# -> { "execute": "xen-set-replication",
1735#      "arguments": {"enable": true, "primary": false} }
1736# <- { "return": {} }
1737#
1738# Since: 2.9
1739##
1740{ 'command': 'xen-set-replication',
1741  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1742  'if': 'CONFIG_REPLICATION' }
1743
1744##
1745# @ReplicationStatus:
1746#
1747# The result format for 'query-xen-replication-status'.
1748#
1749# @error: true if an error happened, false if replication is normal.
1750#
1751# @desc: the human readable error description string, when @error is
1752#     'true'.
1753#
1754# Since: 2.9
1755##
1756{ 'struct': 'ReplicationStatus',
1757  'data': { 'error': 'bool', '*desc': 'str' },
1758  'if': 'CONFIG_REPLICATION' }
1759
1760##
1761# @query-xen-replication-status:
1762#
1763# Query replication status while the vm is running.
1764#
1765# Returns: A @ReplicationStatus object showing the status.
1766#
1767# Example:
1768#
1769# -> { "execute": "query-xen-replication-status" }
1770# <- { "return": { "error": false } }
1771#
1772# Since: 2.9
1773##
1774{ 'command': 'query-xen-replication-status',
1775  'returns': 'ReplicationStatus',
1776  'if': 'CONFIG_REPLICATION' }
1777
1778##
1779# @xen-colo-do-checkpoint:
1780#
1781# Xen uses this command to notify replication to trigger a checkpoint.
1782#
1783# Returns: nothing.
1784#
1785# Example:
1786#
1787# -> { "execute": "xen-colo-do-checkpoint" }
1788# <- { "return": {} }
1789#
1790# Since: 2.9
1791##
1792{ 'command': 'xen-colo-do-checkpoint',
1793  'if': 'CONFIG_REPLICATION' }
1794
1795##
1796# @COLOStatus:
1797#
1798# The result format for 'query-colo-status'.
1799#
1800# @mode: COLO running mode.  If COLO is running, this field will
1801#     return 'primary' or 'secondary'.
1802#
1803# @last-mode: COLO last running mode.  If COLO is running, this field
1804#     will return same like mode field, after failover we can use this
1805#     field to get last colo mode.  (since 4.0)
1806#
1807# @reason: describes the reason for the COLO exit.
1808#
1809# Since: 3.1
1810##
1811{ 'struct': 'COLOStatus',
1812  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1813            'reason': 'COLOExitReason' },
1814  'if': 'CONFIG_REPLICATION' }
1815
1816##
1817# @query-colo-status:
1818#
1819# Query COLO status while the vm is running.
1820#
1821# Returns: A @COLOStatus object showing the status.
1822#
1823# Example:
1824#
1825# -> { "execute": "query-colo-status" }
1826# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1827#
1828# Since: 3.1
1829##
1830{ 'command': 'query-colo-status',
1831  'returns': 'COLOStatus',
1832  'if': 'CONFIG_REPLICATION' }
1833
1834##
1835# @migrate-recover:
1836#
1837# Provide a recovery migration stream URI.
1838#
1839# @uri: the URI to be used for the recovery of migration stream.
1840#
1841# Returns: nothing.
1842#
1843# Example:
1844#
1845# -> { "execute": "migrate-recover",
1846#      "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1847# <- { "return": {} }
1848#
1849# Since: 3.0
1850##
1851{ 'command': 'migrate-recover',
1852  'data': { 'uri': 'str' },
1853  'allow-oob': true }
1854
1855##
1856# @migrate-pause:
1857#
1858# Pause a migration.  Currently it only supports postcopy.
1859#
1860# Returns: nothing.
1861#
1862# Example:
1863#
1864# -> { "execute": "migrate-pause" }
1865# <- { "return": {} }
1866#
1867# Since: 3.0
1868##
1869{ 'command': 'migrate-pause', 'allow-oob': true }
1870
1871##
1872# @UNPLUG_PRIMARY:
1873#
1874# Emitted from source side of a migration when migration state is
1875# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1876# resources in QEMU are kept on standby to be able to re-plug it in
1877# case of migration failure.
1878#
1879# @device-id: QEMU device id of the unplugged device
1880#
1881# Since: 4.2
1882#
1883# Example:
1884#
1885# <- { "event": "UNPLUG_PRIMARY",
1886#      "data": { "device-id": "hostdev0" },
1887#      "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1888##
1889{ 'event': 'UNPLUG_PRIMARY',
1890  'data': { 'device-id': 'str' } }
1891
1892##
1893# @DirtyRateVcpu:
1894#
1895# Dirty rate of vcpu.
1896#
1897# @id: vcpu index.
1898#
1899# @dirty-rate: dirty rate.
1900#
1901# Since: 6.2
1902##
1903{ 'struct': 'DirtyRateVcpu',
1904  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1905
1906##
1907# @DirtyRateStatus:
1908#
1909# Dirty page rate measurement status.
1910#
1911# @unstarted: measuring thread has not been started yet
1912#
1913# @measuring: measuring thread is running
1914#
1915# @measured: dirty page rate is measured and the results are available
1916#
1917# Since: 5.2
1918##
1919{ 'enum': 'DirtyRateStatus',
1920  'data': [ 'unstarted', 'measuring', 'measured'] }
1921
1922##
1923# @DirtyRateMeasureMode:
1924#
1925# Method used to measure dirty page rate.  Differences between
1926# available methods are explained in @calc-dirty-rate.
1927#
1928# @page-sampling: use page sampling
1929#
1930# @dirty-ring: use dirty ring
1931#
1932# @dirty-bitmap: use dirty bitmap
1933#
1934# Since: 6.2
1935##
1936{ 'enum': 'DirtyRateMeasureMode',
1937  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1938
1939##
1940# @TimeUnit:
1941#
1942# Specifies unit in which time-related value is specified.
1943#
1944# @second: value is in seconds
1945#
1946# @millisecond: value is in milliseconds
1947#
1948# Since 8.2
1949#
1950##
1951{ 'enum': 'TimeUnit',
1952  'data': ['second', 'millisecond'] }
1953
1954##
1955# @DirtyRateInfo:
1956#
1957# Information about measured dirty page rate.
1958#
1959# @dirty-rate: an estimate of the dirty page rate of the VM in units
1960#     of MiB/s.  Value is present only when @status is 'measured'.
1961#
1962# @status: current status of dirty page rate measurements
1963#
1964# @start-time: start time in units of second for calculation
1965#
1966# @calc-time: time period for which dirty page rate was measured,
1967#     expressed and rounded down to @calc-time-unit.
1968#
1969# @calc-time-unit: time unit of @calc-time  (Since 8.2)
1970#
1971# @sample-pages: number of sampled pages per GiB of guest memory.
1972#     Valid only in page-sampling mode (Since 6.1)
1973#
1974# @mode: mode that was used to measure dirty page rate (Since 6.2)
1975#
1976# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
1977#     specified (Since 6.2)
1978#
1979# Since: 5.2
1980##
1981{ 'struct': 'DirtyRateInfo',
1982  'data': {'*dirty-rate': 'int64',
1983           'status': 'DirtyRateStatus',
1984           'start-time': 'int64',
1985           'calc-time': 'int64',
1986           'calc-time-unit': 'TimeUnit',
1987           'sample-pages': 'uint64',
1988           'mode': 'DirtyRateMeasureMode',
1989           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1990
1991##
1992# @calc-dirty-rate:
1993#
1994# Start measuring dirty page rate of the VM.  Results can be retrieved
1995# with @query-dirty-rate after measurements are completed.
1996#
1997# Dirty page rate is the number of pages changed in a given time
1998# period expressed in MiB/s.  The following methods of calculation are
1999# available:
2000#
2001# 1. In page sampling mode, a random subset of pages are selected and
2002#    hashed twice: once at the beginning of measurement time period,
2003#    and once again at the end.  If two hashes for some page are
2004#    different, the page is counted as changed.  Since this method
2005#    relies on sampling and hashing, calculated dirty page rate is
2006#    only an estimate of its true value.  Increasing @sample-pages
2007#    improves estimation quality at the cost of higher computational
2008#    overhead.
2009#
2010# 2. Dirty bitmap mode captures writes to memory (for example by
2011#    temporarily revoking write access to all pages) and counting page
2012#    faults.  Information about modified pages is collected into a
2013#    bitmap, where each bit corresponds to one guest page.  This mode
2014#    requires that KVM accelerator property "dirty-ring-size" is *not*
2015#    set.
2016#
2017# 3. Dirty ring mode is similar to dirty bitmap mode, but the
2018#    information about modified pages is collected into ring buffer.
2019#    This mode tracks page modification per each vCPU separately.  It
2020#    requires that KVM accelerator property "dirty-ring-size" is set.
2021#
2022# @calc-time: time period for which dirty page rate is calculated.
2023#     By default it is specified in seconds, but the unit can be set
2024#     explicitly with @calc-time-unit.  Note that larger @calc-time
2025#     values will typically result in smaller dirty page rates because
2026#     page dirtying is a one-time event.  Once some page is counted
2027#     as dirty during @calc-time period, further writes to this page
2028#     will not increase dirty page rate anymore.
2029#
2030# @calc-time-unit: time unit in which @calc-time is specified.
2031#     By default it is seconds. (Since 8.2)
2032#
2033# @sample-pages: number of sampled pages per each GiB of guest memory.
2034#     Default value is 512.  For 4KiB guest pages this corresponds to
2035#     sampling ratio of 0.2%.  This argument is used only in page
2036#     sampling mode.  (Since 6.1)
2037#
2038# @mode: mechanism for tracking dirty pages.  Default value is
2039#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
2040#     (Since 6.1)
2041#
2042# Since: 5.2
2043#
2044# Example:
2045#
2046# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
2047#                                                 'sample-pages': 512} }
2048# <- { "return": {} }
2049#
2050# Measure dirty rate using dirty bitmap for 500 milliseconds:
2051#
2052# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
2053#     "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
2054#
2055# <- { "return": {} }
2056##
2057{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
2058                                         '*calc-time-unit': 'TimeUnit',
2059                                         '*sample-pages': 'int',
2060                                         '*mode': 'DirtyRateMeasureMode'} }
2061
2062##
2063# @query-dirty-rate:
2064#
2065# Query results of the most recent invocation of @calc-dirty-rate.
2066#
2067# @calc-time-unit: time unit in which to report calculation time.
2068#     By default it is reported in seconds. (Since 8.2)
2069#
2070# Since: 5.2
2071#
2072# Examples:
2073#
2074# 1. Measurement is in progress:
2075#
2076# <- {"status": "measuring", "sample-pages": 512,
2077#     "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2078#     "calc-time-unit": "second"}
2079#
2080# 2. Measurement has been completed:
2081#
2082# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
2083#     "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2084#     "calc-time-unit": "second"}
2085##
2086{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
2087                                 'returns': 'DirtyRateInfo' }
2088
2089##
2090# @DirtyLimitInfo:
2091#
2092# Dirty page rate limit information of a virtual CPU.
2093#
2094# @cpu-index: index of a virtual CPU.
2095#
2096# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
2097#     CPU, 0 means unlimited.
2098#
2099# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
2100#
2101# Since: 7.1
2102##
2103{ 'struct': 'DirtyLimitInfo',
2104  'data': { 'cpu-index': 'int',
2105            'limit-rate': 'uint64',
2106            'current-rate': 'uint64' } }
2107
2108##
2109# @set-vcpu-dirty-limit:
2110#
2111# Set the upper limit of dirty page rate for virtual CPUs.
2112#
2113# Requires KVM with accelerator property "dirty-ring-size" set.  A
2114# virtual CPU's dirty page rate is a measure of its memory load.  To
2115# observe dirty page rates, use @calc-dirty-rate.
2116#
2117# @cpu-index: index of a virtual CPU, default is all.
2118#
2119# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
2120#
2121# Since: 7.1
2122#
2123# Example:
2124#
2125# -> {"execute": "set-vcpu-dirty-limit"}
2126#     "arguments": { "dirty-rate": 200,
2127#                    "cpu-index": 1 } }
2128# <- { "return": {} }
2129##
2130{ 'command': 'set-vcpu-dirty-limit',
2131  'data': { '*cpu-index': 'int',
2132            'dirty-rate': 'uint64' } }
2133
2134##
2135# @cancel-vcpu-dirty-limit:
2136#
2137# Cancel the upper limit of dirty page rate for virtual CPUs.
2138#
2139# Cancel the dirty page limit for the vCPU which has been set with
2140# set-vcpu-dirty-limit command.  Note that this command requires
2141# support from dirty ring, same as the "set-vcpu-dirty-limit".
2142#
2143# @cpu-index: index of a virtual CPU, default is all.
2144#
2145# Since: 7.1
2146#
2147# Example:
2148#
2149# -> {"execute": "cancel-vcpu-dirty-limit"},
2150#     "arguments": { "cpu-index": 1 } }
2151# <- { "return": {} }
2152##
2153{ 'command': 'cancel-vcpu-dirty-limit',
2154  'data': { '*cpu-index': 'int'} }
2155
2156##
2157# @query-vcpu-dirty-limit:
2158#
2159# Returns information about virtual CPU dirty page rate limits, if
2160# any.
2161#
2162# Since: 7.1
2163#
2164# Example:
2165#
2166# -> {"execute": "query-vcpu-dirty-limit"}
2167# <- {"return": [
2168#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2169#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2170##
2171{ 'command': 'query-vcpu-dirty-limit',
2172  'returns': [ 'DirtyLimitInfo' ] }
2173
2174##
2175# @MigrationThreadInfo:
2176#
2177# Information about migrationthreads
2178#
2179# @name: the name of migration thread
2180#
2181# @thread-id: ID of the underlying host thread
2182#
2183# Since: 7.2
2184##
2185{ 'struct': 'MigrationThreadInfo',
2186  'data': {'name': 'str',
2187           'thread-id': 'int'} }
2188
2189##
2190# @query-migrationthreads:
2191#
2192# Returns information of migration threads
2193#
2194# data: migration thread name
2195#
2196# Returns: information about migration threads
2197#
2198# Since: 7.2
2199##
2200{ 'command': 'query-migrationthreads',
2201  'returns': ['MigrationThreadInfo'] }
2202
2203##
2204# @snapshot-save:
2205#
2206# Save a VM snapshot
2207#
2208# @job-id: identifier for the newly created job
2209#
2210# @tag: name of the snapshot to create
2211#
2212# @vmstate: block device node name to save vmstate to
2213#
2214# @devices: list of block device node names to save a snapshot to
2215#
2216# Applications should not assume that the snapshot save is complete
2217# when this command returns.  The job commands / events must be used
2218# to determine completion and to fetch details of any errors that
2219# arise.
2220#
2221# Note that execution of the guest CPUs may be stopped during the time
2222# it takes to save the snapshot.  A future version of QEMU may ensure
2223# CPUs are executing continuously.
2224#
2225# It is strongly recommended that @devices contain all writable block
2226# device nodes if a consistent snapshot is required.
2227#
2228# If @tag already exists, an error will be reported
2229#
2230# Returns: nothing
2231#
2232# Example:
2233#
2234# -> { "execute": "snapshot-save",
2235#      "arguments": {
2236#         "job-id": "snapsave0",
2237#         "tag": "my-snap",
2238#         "vmstate": "disk0",
2239#         "devices": ["disk0", "disk1"]
2240#      }
2241#    }
2242# <- { "return": { } }
2243# <- {"event": "JOB_STATUS_CHANGE",
2244#     "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2245#     "data": {"status": "created", "id": "snapsave0"}}
2246# <- {"event": "JOB_STATUS_CHANGE",
2247#     "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2248#     "data": {"status": "running", "id": "snapsave0"}}
2249# <- {"event": "STOP",
2250#     "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2251# <- {"event": "RESUME",
2252#     "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2253# <- {"event": "JOB_STATUS_CHANGE",
2254#     "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2255#     "data": {"status": "waiting", "id": "snapsave0"}}
2256# <- {"event": "JOB_STATUS_CHANGE",
2257#     "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2258#     "data": {"status": "pending", "id": "snapsave0"}}
2259# <- {"event": "JOB_STATUS_CHANGE",
2260#     "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2261#     "data": {"status": "concluded", "id": "snapsave0"}}
2262# -> {"execute": "query-jobs"}
2263# <- {"return": [{"current-progress": 1,
2264#                 "status": "concluded",
2265#                 "total-progress": 1,
2266#                 "type": "snapshot-save",
2267#                 "id": "snapsave0"}]}
2268#
2269# Since: 6.0
2270##
2271{ 'command': 'snapshot-save',
2272  'data': { 'job-id': 'str',
2273            'tag': 'str',
2274            'vmstate': 'str',
2275            'devices': ['str'] } }
2276
2277##
2278# @snapshot-load:
2279#
2280# Load a VM snapshot
2281#
2282# @job-id: identifier for the newly created job
2283#
2284# @tag: name of the snapshot to load.
2285#
2286# @vmstate: block device node name to load vmstate from
2287#
2288# @devices: list of block device node names to load a snapshot from
2289#
2290# Applications should not assume that the snapshot load is complete
2291# when this command returns.  The job commands / events must be used
2292# to determine completion and to fetch details of any errors that
2293# arise.
2294#
2295# Note that execution of the guest CPUs will be stopped during the
2296# time it takes to load the snapshot.
2297#
2298# It is strongly recommended that @devices contain all writable block
2299# device nodes that can have changed since the original @snapshot-save
2300# command execution.
2301#
2302# Returns: nothing
2303#
2304# Example:
2305#
2306# -> { "execute": "snapshot-load",
2307#      "arguments": {
2308#         "job-id": "snapload0",
2309#         "tag": "my-snap",
2310#         "vmstate": "disk0",
2311#         "devices": ["disk0", "disk1"]
2312#      }
2313#    }
2314# <- { "return": { } }
2315# <- {"event": "JOB_STATUS_CHANGE",
2316#     "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2317#     "data": {"status": "created", "id": "snapload0"}}
2318# <- {"event": "JOB_STATUS_CHANGE",
2319#     "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2320#     "data": {"status": "running", "id": "snapload0"}}
2321# <- {"event": "STOP",
2322#     "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2323# <- {"event": "RESUME",
2324#     "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2325# <- {"event": "JOB_STATUS_CHANGE",
2326#     "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2327#     "data": {"status": "waiting", "id": "snapload0"}}
2328# <- {"event": "JOB_STATUS_CHANGE",
2329#     "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2330#     "data": {"status": "pending", "id": "snapload0"}}
2331# <- {"event": "JOB_STATUS_CHANGE",
2332#     "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2333#     "data": {"status": "concluded", "id": "snapload0"}}
2334# -> {"execute": "query-jobs"}
2335# <- {"return": [{"current-progress": 1,
2336#                 "status": "concluded",
2337#                 "total-progress": 1,
2338#                 "type": "snapshot-load",
2339#                 "id": "snapload0"}]}
2340#
2341# Since: 6.0
2342##
2343{ 'command': 'snapshot-load',
2344  'data': { 'job-id': 'str',
2345            'tag': 'str',
2346            'vmstate': 'str',
2347            'devices': ['str'] } }
2348
2349##
2350# @snapshot-delete:
2351#
2352# Delete a VM snapshot
2353#
2354# @job-id: identifier for the newly created job
2355#
2356# @tag: name of the snapshot to delete.
2357#
2358# @devices: list of block device node names to delete a snapshot from
2359#
2360# Applications should not assume that the snapshot delete is complete
2361# when this command returns.  The job commands / events must be used
2362# to determine completion and to fetch details of any errors that
2363# arise.
2364#
2365# Returns: nothing
2366#
2367# Example:
2368#
2369# -> { "execute": "snapshot-delete",
2370#      "arguments": {
2371#         "job-id": "snapdelete0",
2372#         "tag": "my-snap",
2373#         "devices": ["disk0", "disk1"]
2374#      }
2375#    }
2376# <- { "return": { } }
2377# <- {"event": "JOB_STATUS_CHANGE",
2378#     "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2379#     "data": {"status": "created", "id": "snapdelete0"}}
2380# <- {"event": "JOB_STATUS_CHANGE",
2381#     "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2382#     "data": {"status": "running", "id": "snapdelete0"}}
2383# <- {"event": "JOB_STATUS_CHANGE",
2384#     "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2385#     "data": {"status": "waiting", "id": "snapdelete0"}}
2386# <- {"event": "JOB_STATUS_CHANGE",
2387#     "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2388#     "data": {"status": "pending", "id": "snapdelete0"}}
2389# <- {"event": "JOB_STATUS_CHANGE",
2390#     "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2391#     "data": {"status": "concluded", "id": "snapdelete0"}}
2392# -> {"execute": "query-jobs"}
2393# <- {"return": [{"current-progress": 1,
2394#                 "status": "concluded",
2395#                 "total-progress": 1,
2396#                 "type": "snapshot-delete",
2397#                 "id": "snapdelete0"}]}
2398#
2399# Since: 6.0
2400##
2401{ 'command': 'snapshot-delete',
2402  'data': { 'job-id': 'str',
2403            'tag': 'str',
2404            'devices': ['str'] } }
2405