1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @skipped: number of skipped zero pages. Always zero, only provided for 27# compatibility (since 1.5) 28# 29# @normal: number of normal pages (since 1.2) 30# 31# @normal-bytes: number of normal bytes sent (since 1.2) 32# 33# @dirty-pages-rate: number of pages dirtied by second by the guest 34# (since 1.3) 35# 36# @mbps: throughput in megabits/sec. (since 1.6) 37# 38# @dirty-sync-count: number of times that dirty ram was synchronized 39# (since 2.1) 40# 41# @postcopy-requests: The number of page requests received from the 42# destination (since 2.7) 43# 44# @page-size: The number of bytes per page for the various page-based 45# statistics (since 2.10) 46# 47# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 48# 49# @pages-per-second: the number of memory pages transferred per second 50# (Since 4.0) 51# 52# @precopy-bytes: The number of bytes sent in the pre-copy phase 53# (since 7.0). 54# 55# @downtime-bytes: The number of bytes sent while the guest is paused 56# (since 7.0). 57# 58# @postcopy-bytes: The number of bytes sent during the post-copy phase 59# (since 7.0). 60# 61# @dirty-sync-missed-zero-copy: Number of times dirty RAM 62# synchronization could not avoid copying dirty pages. This is 63# between 0 and @dirty-sync-count * @multifd-channels. (since 64# 7.1) 65# 66# Features: 67# 68# @deprecated: Member @skipped is always zero since 1.5.3 69# 70# Since: 0.14 71# 72## 73{ 'struct': 'MigrationStats', 74 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 75 'duplicate': 'int', 76 'skipped': { 'type': 'int', 'features': [ 'deprecated' ] }, 77 'normal': 'int', 78 'normal-bytes': 'int', 'dirty-pages-rate': 'int', 79 'mbps': 'number', 'dirty-sync-count': 'int', 80 'postcopy-requests': 'int', 'page-size': 'int', 81 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64', 82 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64', 83 'postcopy-bytes': 'uint64', 84 'dirty-sync-missed-zero-copy': 'uint64' } } 85 86## 87# @XBZRLECacheStats: 88# 89# Detailed XBZRLE migration cache statistics 90# 91# @cache-size: XBZRLE cache size 92# 93# @bytes: amount of bytes already transferred to the target VM 94# 95# @pages: amount of pages transferred to the target VM 96# 97# @cache-miss: number of cache miss 98# 99# @cache-miss-rate: rate of cache miss (since 2.1) 100# 101# @encoding-rate: rate of encoded bytes (since 5.1) 102# 103# @overflow: number of overflows 104# 105# Since: 1.2 106## 107{ 'struct': 'XBZRLECacheStats', 108 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 109 'cache-miss': 'int', 'cache-miss-rate': 'number', 110 'encoding-rate': 'number', 'overflow': 'int' } } 111 112## 113# @CompressionStats: 114# 115# Detailed migration compression statistics 116# 117# @pages: amount of pages compressed and transferred to the target VM 118# 119# @busy: count of times that no free thread was available to compress 120# data 121# 122# @busy-rate: rate of thread busy 123# 124# @compressed-size: amount of bytes after compression 125# 126# @compression-rate: rate of compressed size 127# 128# Since: 3.1 129## 130{ 'struct': 'CompressionStats', 131 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 132 'compressed-size': 'int', 'compression-rate': 'number' } } 133 134## 135# @MigrationStatus: 136# 137# An enumeration of migration status. 138# 139# @none: no migration has ever happened. 140# 141# @setup: migration process has been initiated. 142# 143# @cancelling: in the process of cancelling migration. 144# 145# @cancelled: cancelling migration is finished. 146# 147# @active: in the process of doing migration. 148# 149# @postcopy-active: like active, but now in postcopy mode. (since 150# 2.5) 151# 152# @postcopy-paused: during postcopy but paused. (since 3.0) 153# 154# @postcopy-recover: trying to recover from a paused postcopy. (since 155# 3.0) 156# 157# @completed: migration is finished. 158# 159# @failed: some error occurred during migration process. 160# 161# @colo: VM is in the process of fault tolerance, VM can not get into 162# this state unless colo capability is enabled for migration. 163# (since 2.8) 164# 165# @pre-switchover: Paused before device serialisation. (since 2.11) 166# 167# @device: During device serialisation when pause-before-switchover is 168# enabled (since 2.11) 169# 170# @wait-unplug: wait for device unplug request by guest OS to be 171# completed. (since 4.2) 172# 173# Since: 2.3 174## 175{ 'enum': 'MigrationStatus', 176 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 177 'active', 'postcopy-active', 'postcopy-paused', 178 'postcopy-recover', 'completed', 'failed', 'colo', 179 'pre-switchover', 'device', 'wait-unplug' ] } 180## 181# @VfioStats: 182# 183# Detailed VFIO devices migration statistics 184# 185# @transferred: amount of bytes transferred to the target VM by VFIO 186# devices 187# 188# Since: 5.2 189## 190{ 'struct': 'VfioStats', 191 'data': {'transferred': 'int' } } 192 193## 194# @MigrationInfo: 195# 196# Information about current migration process. 197# 198# @status: @MigrationStatus describing the current migration status. 199# If this field is not returned, no migration process has been 200# initiated 201# 202# @ram: @MigrationStats containing detailed migration status, only 203# returned if status is 'active' or 'completed'(since 1.2) 204# 205# @disk: @MigrationStats containing detailed disk migration status, 206# only returned if status is 'active' and it is a block migration 207# 208# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 209# migration statistics, only returned if XBZRLE feature is on and 210# status is 'active' or 'completed' (since 1.2) 211# 212# @total-time: total amount of milliseconds since migration started. 213# If migration has ended, it returns the total migration time. 214# (since 1.2) 215# 216# @downtime: only present when migration finishes correctly total 217# downtime in milliseconds for the guest. (since 1.3) 218# 219# @expected-downtime: only present while migration is active expected 220# downtime in milliseconds for the guest in last walk of the dirty 221# bitmap. (since 1.3) 222# 223# @setup-time: amount of setup time in milliseconds *before* the 224# iterations begin but *after* the QMP command is issued. This is 225# designed to provide an accounting of any activities (such as 226# RDMA pinning) which may be expensive, but do not actually occur 227# during the iterative migration rounds themselves. (since 1.6) 228# 229# @cpu-throttle-percentage: percentage of time guest cpus are being 230# throttled during auto-converge. This is only present when 231# auto-converge has started throttling guest cpus. (Since 2.7) 232# 233# @error-desc: the human readable error description string. Clients 234# should not attempt to parse the error strings. (Since 2.7) 235# 236# @postcopy-blocktime: total time when all vCPU were blocked during 237# postcopy live migration. This is only present when the 238# postcopy-blocktime migration capability is enabled. (Since 3.0) 239# 240# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 241# This is only present when the postcopy-blocktime migration 242# capability is enabled. (Since 3.0) 243# 244# @compression: migration compression statistics, only returned if 245# compression feature is on and status is 'active' or 'completed' 246# (Since 3.1) 247# 248# @socket-address: Only used for tcp, to know what the real port is 249# (Since 4.0) 250# 251# @vfio: @VfioStats containing detailed VFIO devices migration 252# statistics, only returned if VFIO device is present, migration 253# is supported by all VFIO devices and status is 'active' or 254# 'completed' (since 5.2) 255# 256# @blocked-reasons: A list of reasons an outgoing migration is 257# blocked. Present and non-empty when migration is blocked. 258# (since 6.0) 259# 260# @dirty-limit-throttle-time-per-round: Maximum throttle time 261# (in microseconds) of virtual CPUs each dirty ring full round, 262# which shows how MigrationCapability dirty-limit affects the 263# guest during live migration. (Since 8.1) 264# 265# @dirty-limit-ring-full-time: Estimated average dirty ring full time 266# (in microseconds) for each dirty ring full round. The value 267# equals the dirty ring memory size divided by the average dirty 268# page rate of the virtual CPU, which can be used to observe the 269# average memory load of the virtual CPU indirectly. Note that 270# zero means guest doesn't dirty memory. (Since 8.1) 271# 272# Features: 273# 274# @deprecated: Member @disk is deprecated because block migration is. 275# Member @compression is deprecated because it is unreliable and 276# untested. It is recommended to use multifd migration, which 277# offers an alternative compression implementation that is 278# reliable and tested. 279# 280# Since: 0.14 281## 282{ 'struct': 'MigrationInfo', 283 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 284 '*disk': { 'type': 'MigrationStats', 'features': [ 'deprecated' ] }, 285 '*vfio': 'VfioStats', 286 '*xbzrle-cache': 'XBZRLECacheStats', 287 '*total-time': 'int', 288 '*expected-downtime': 'int', 289 '*downtime': 'int', 290 '*setup-time': 'int', 291 '*cpu-throttle-percentage': 'int', 292 '*error-desc': 'str', 293 '*blocked-reasons': ['str'], 294 '*postcopy-blocktime': 'uint32', 295 '*postcopy-vcpu-blocktime': ['uint32'], 296 '*compression': { 'type': 'CompressionStats', 'features': [ 'deprecated' ] }, 297 '*socket-address': ['SocketAddress'], 298 '*dirty-limit-throttle-time-per-round': 'uint64', 299 '*dirty-limit-ring-full-time': 'uint64'} } 300 301## 302# @query-migrate: 303# 304# Returns information about current migration process. If migration 305# is active there will be another json-object with RAM migration 306# status and if block migration is active another one with block 307# migration status. 308# 309# Returns: @MigrationInfo 310# 311# Since: 0.14 312# 313# Examples: 314# 315# 1. Before the first migration 316# 317# -> { "execute": "query-migrate" } 318# <- { "return": {} } 319# 320# 2. Migration is done and has succeeded 321# 322# -> { "execute": "query-migrate" } 323# <- { "return": { 324# "status": "completed", 325# "total-time":12345, 326# "setup-time":12345, 327# "downtime":12345, 328# "ram":{ 329# "transferred":123, 330# "remaining":123, 331# "total":246, 332# "duplicate":123, 333# "normal":123, 334# "normal-bytes":123456, 335# "dirty-sync-count":15 336# } 337# } 338# } 339# 340# 3. Migration is done and has failed 341# 342# -> { "execute": "query-migrate" } 343# <- { "return": { "status": "failed" } } 344# 345# 4. Migration is being performed and is not a block migration: 346# 347# -> { "execute": "query-migrate" } 348# <- { 349# "return":{ 350# "status":"active", 351# "total-time":12345, 352# "setup-time":12345, 353# "expected-downtime":12345, 354# "ram":{ 355# "transferred":123, 356# "remaining":123, 357# "total":246, 358# "duplicate":123, 359# "normal":123, 360# "normal-bytes":123456, 361# "dirty-sync-count":15 362# } 363# } 364# } 365# 366# 5. Migration is being performed and is a block migration: 367# 368# -> { "execute": "query-migrate" } 369# <- { 370# "return":{ 371# "status":"active", 372# "total-time":12345, 373# "setup-time":12345, 374# "expected-downtime":12345, 375# "ram":{ 376# "total":1057024, 377# "remaining":1053304, 378# "transferred":3720, 379# "duplicate":123, 380# "normal":123, 381# "normal-bytes":123456, 382# "dirty-sync-count":15 383# }, 384# "disk":{ 385# "total":20971520, 386# "remaining":20880384, 387# "transferred":91136 388# } 389# } 390# } 391# 392# 6. Migration is being performed and XBZRLE is active: 393# 394# -> { "execute": "query-migrate" } 395# <- { 396# "return":{ 397# "status":"active", 398# "total-time":12345, 399# "setup-time":12345, 400# "expected-downtime":12345, 401# "ram":{ 402# "total":1057024, 403# "remaining":1053304, 404# "transferred":3720, 405# "duplicate":10, 406# "normal":3333, 407# "normal-bytes":3412992, 408# "dirty-sync-count":15 409# }, 410# "xbzrle-cache":{ 411# "cache-size":67108864, 412# "bytes":20971520, 413# "pages":2444343, 414# "cache-miss":2244, 415# "cache-miss-rate":0.123, 416# "encoding-rate":80.1, 417# "overflow":34434 418# } 419# } 420# } 421## 422{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 423 424## 425# @MigrationCapability: 426# 427# Migration capabilities enumeration 428# 429# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 430# Encoding). This feature allows us to minimize migration traffic 431# for certain work loads, by sending compressed difference of the 432# pages 433# 434# @rdma-pin-all: Controls whether or not the entire VM memory 435# footprint is mlock()'d on demand or all at once. Refer to 436# docs/rdma.txt for usage. Disabled by default. (since 2.0) 437# 438# @zero-blocks: During storage migration encode blocks of zeroes 439# efficiently. This essentially saves 1MB of zeroes per block on 440# the wire. Enabling requires source and target VM to support 441# this feature. To enable it is sufficient to enable the 442# capability on the source VM. The feature is disabled by default. 443# (since 1.6) 444# 445# @compress: Use multiple compression threads to accelerate live 446# migration. This feature can help to reduce the migration 447# traffic, by sending compressed pages. Please note that if 448# compress and xbzrle are both on, compress only takes effect in 449# the ram bulk stage, after that, it will be disabled and only 450# xbzrle takes effect, this can help to minimize migration 451# traffic. The feature is disabled by default. (since 2.4) 452# 453# @events: generate events for each migration state change (since 2.4) 454# 455# @auto-converge: If enabled, QEMU will automatically throttle down 456# the guest to speed up convergence of RAM migration. (since 1.6) 457# 458# @postcopy-ram: Start executing on the migration target before all of 459# RAM has been migrated, pulling the remaining pages along as 460# needed. The capacity must have the same setting on both source 461# and target or migration will not even start. NOTE: If the 462# migration fails during postcopy the VM will fail. (since 2.6) 463# 464# @x-colo: If enabled, migration will never end, and the state of the 465# VM on the primary side will be migrated continuously to the VM 466# on secondary side, this process is called COarse-Grain LOck 467# Stepping (COLO) for Non-stop Service. (since 2.8) 468# 469# @release-ram: if enabled, qemu will free the migrated ram pages on 470# the source during postcopy-ram migration. (since 2.9) 471# 472# @block: If enabled, QEMU will also migrate the contents of all block 473# devices. Default is disabled. A possible alternative uses 474# mirror jobs to a builtin NBD server on the destination, which 475# offers more flexibility. (Since 2.10) 476# 477# @return-path: If enabled, migration will use the return path even 478# for precopy. (since 2.10) 479# 480# @pause-before-switchover: Pause outgoing migration before 481# serialising device state and before disabling block IO (since 482# 2.11) 483# 484# @multifd: Use more than one fd for migration (since 4.0) 485# 486# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 487# (since 2.12) 488# 489# @postcopy-blocktime: Calculate downtime for postcopy live migration 490# (since 3.0) 491# 492# @late-block-activate: If enabled, the destination will not activate 493# block devices (and thus take locks) immediately at the end of 494# migration. (since 3.0) 495# 496# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 497# that is accessible on the destination machine. (since 4.0) 498# 499# @validate-uuid: Send the UUID of the source to allow the destination 500# to ensure it is the same. (since 4.2) 501# 502# @background-snapshot: If enabled, the migration stream will be a 503# snapshot of the VM exactly at the point when the migration 504# procedure starts. The VM RAM is saved with running VM. (since 505# 6.0) 506# 507# @zero-copy-send: Controls behavior on sending memory pages on 508# migration. When true, enables a zero-copy mechanism for sending 509# memory pages, if host supports it. Requires that QEMU be 510# permitted to use locked memory for guest RAM pages. (since 7.1) 511# 512# @postcopy-preempt: If enabled, the migration process will allow 513# postcopy requests to preempt precopy stream, so postcopy 514# requests will be handled faster. This is a performance feature 515# and should not affect the correctness of postcopy migration. 516# (since 7.1) 517# 518# @switchover-ack: If enabled, migration will not stop the source VM 519# and complete the migration until an ACK is received from the 520# destination that it's OK to do so. Exactly when this ACK is 521# sent depends on the migrated devices that use this feature. For 522# example, a device can use it to make sure some of its data is 523# sent and loaded in the destination before doing switchover. 524# This can reduce downtime if devices that support this capability 525# are present. 'return-path' capability must be enabled to use 526# it. (since 8.1) 527# 528# @dirty-limit: If enabled, migration will throttle vCPUs as needed to 529# keep their dirty page rate within @vcpu-dirty-limit. This can 530# improve responsiveness of large guests during live migration, 531# and can result in more stable read performance. Requires KVM 532# with accelerator property "dirty-ring-size" set. (Since 8.1) 533# 534# @mapped-ram: Migrate using fixed offsets in the migration file for 535# each RAM page. Requires a migration URI that supports seeking, 536# such as a file. (since 9.0) 537# 538# Features: 539# 540# @deprecated: Member @block is deprecated. Use blockdev-mirror with 541# NBD instead. Member @compress is deprecated because it is 542# unreliable and untested. It is recommended to use multifd 543# migration, which offers an alternative compression 544# implementation that is reliable and tested. 545# 546# @unstable: Members @x-colo and @x-ignore-shared are experimental. 547# 548# Since: 1.2 549## 550{ 'enum': 'MigrationCapability', 551 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 552 { 'name': 'compress', 'features': [ 'deprecated' ] }, 553 'events', 'postcopy-ram', 554 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 555 'release-ram', 556 { 'name': 'block', 'features': [ 'deprecated' ] }, 557 'return-path', 'pause-before-switchover', 'multifd', 558 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 559 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 560 'validate-uuid', 'background-snapshot', 561 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', 562 'dirty-limit', 'mapped-ram'] } 563 564## 565# @MigrationCapabilityStatus: 566# 567# Migration capability information 568# 569# @capability: capability enum 570# 571# @state: capability state bool 572# 573# Since: 1.2 574## 575{ 'struct': 'MigrationCapabilityStatus', 576 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } } 577 578## 579# @migrate-set-capabilities: 580# 581# Enable/Disable the following migration capabilities (like xbzrle) 582# 583# @capabilities: json array of capability modifications to make 584# 585# Since: 1.2 586# 587# Example: 588# 589# -> { "execute": "migrate-set-capabilities" , "arguments": 590# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 591# <- { "return": {} } 592## 593{ 'command': 'migrate-set-capabilities', 594 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 595 596## 597# @query-migrate-capabilities: 598# 599# Returns information about the current migration capabilities status 600# 601# Returns: @MigrationCapabilityStatus 602# 603# Since: 1.2 604# 605# Example: 606# 607# -> { "execute": "query-migrate-capabilities" } 608# <- { "return": [ 609# {"state": false, "capability": "xbzrle"}, 610# {"state": false, "capability": "rdma-pin-all"}, 611# {"state": false, "capability": "auto-converge"}, 612# {"state": false, "capability": "zero-blocks"}, 613# {"state": false, "capability": "compress"}, 614# {"state": true, "capability": "events"}, 615# {"state": false, "capability": "postcopy-ram"}, 616# {"state": false, "capability": "x-colo"} 617# ]} 618## 619{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 620 621## 622# @MultiFDCompression: 623# 624# An enumeration of multifd compression methods. 625# 626# @none: no compression. 627# 628# @zlib: use zlib compression method. 629# 630# @zstd: use zstd compression method. 631# 632# Since: 5.0 633## 634{ 'enum': 'MultiFDCompression', 635 'data': [ 'none', 'zlib', 636 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 637 638## 639# @MigMode: 640# 641# @normal: the original form of migration. (since 8.2) 642# 643# @cpr-reboot: The migrate command stops the VM and saves state to 644# the URI. After quitting QEMU, the user resumes by running 645# QEMU -incoming. 646# 647# This mode allows the user to quit QEMU, optionally update and 648# reboot the OS, and restart QEMU. If the user reboots, the URI 649# must persist across the reboot, such as by using a file. 650# 651# Unlike normal mode, the use of certain local storage options 652# does not block the migration, but the user must not modify the 653# contents of guest block devices between the quit and restart. 654# 655# This mode supports VFIO devices provided the user first puts 656# the guest in the suspended runstate, such as by issuing 657# guest-suspend-ram to the QEMU guest agent. 658# 659# Best performance is achieved when the memory backend is shared 660# and the @x-ignore-shared migration capability is set, but this 661# is not required. Further, if the user reboots before restarting 662# such a configuration, the shared memory must persist across the 663# reboot, such as by backing it with a dax device. 664# 665# @cpr-reboot may not be used with postcopy, background-snapshot, 666# or COLO. 667# 668# (since 8.2) 669## 670{ 'enum': 'MigMode', 671 'data': [ 'normal', 'cpr-reboot' ] } 672 673## 674# @BitmapMigrationBitmapAliasTransform: 675# 676# @persistent: If present, the bitmap will be made persistent or 677# transient depending on this parameter. 678# 679# Since: 6.0 680## 681{ 'struct': 'BitmapMigrationBitmapAliasTransform', 682 'data': { 683 '*persistent': 'bool' 684 } } 685 686## 687# @BitmapMigrationBitmapAlias: 688# 689# @name: The name of the bitmap. 690# 691# @alias: An alias name for migration (for example the bitmap name on 692# the opposite site). 693# 694# @transform: Allows the modification of the migrated bitmap. (since 695# 6.0) 696# 697# Since: 5.2 698## 699{ 'struct': 'BitmapMigrationBitmapAlias', 700 'data': { 701 'name': 'str', 702 'alias': 'str', 703 '*transform': 'BitmapMigrationBitmapAliasTransform' 704 } } 705 706## 707# @BitmapMigrationNodeAlias: 708# 709# Maps a block node name and the bitmaps it has to aliases for dirty 710# bitmap migration. 711# 712# @node-name: A block node name. 713# 714# @alias: An alias block node name for migration (for example the node 715# name on the opposite site). 716# 717# @bitmaps: Mappings for the bitmaps on this node. 718# 719# Since: 5.2 720## 721{ 'struct': 'BitmapMigrationNodeAlias', 722 'data': { 723 'node-name': 'str', 724 'alias': 'str', 725 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 726 } } 727 728## 729# @MigrationParameter: 730# 731# Migration parameters enumeration 732# 733# @announce-initial: Initial delay (in milliseconds) before sending 734# the first announce (Since 4.0) 735# 736# @announce-max: Maximum delay (in milliseconds) between packets in 737# the announcement (Since 4.0) 738# 739# @announce-rounds: Number of self-announce packets sent after 740# migration (Since 4.0) 741# 742# @announce-step: Increase in delay (in milliseconds) between 743# subsequent packets in the announcement (Since 4.0) 744# 745# @compress-level: Set the compression level to be used in live 746# migration, the compression level is an integer between 0 and 9, 747# where 0 means no compression, 1 means the best compression 748# speed, and 9 means best compression ratio which will consume 749# more CPU. 750# 751# @compress-threads: Set compression thread count to be used in live 752# migration, the compression thread count is an integer between 1 753# and 255. 754# 755# @compress-wait-thread: Controls behavior when all compression 756# threads are currently busy. If true (default), wait for a free 757# compression thread to become available; otherwise, send the page 758# uncompressed. (Since 3.1) 759# 760# @decompress-threads: Set decompression thread count to be used in 761# live migration, the decompression thread count is an integer 762# between 1 and 255. Usually, decompression is at least 4 times as 763# fast as compression, so set the decompress-threads to the number 764# about 1/4 of compress-threads is adequate. 765# 766# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 767# bytes_xfer_period to trigger throttling. It is expressed as 768# percentage. The default value is 50. (Since 5.0) 769# 770# @cpu-throttle-initial: Initial percentage of time guest cpus are 771# throttled when migration auto-converge is activated. The 772# default value is 20. (Since 2.7) 773# 774# @cpu-throttle-increment: throttle percentage increase each time 775# auto-converge detects that migration is not making progress. 776# The default value is 10. (Since 2.7) 777# 778# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 779# the tail stage of throttling, the Guest is very sensitive to CPU 780# percentage while the @cpu-throttle -increment is excessive 781# usually at tail stage. If this parameter is true, we will 782# compute the ideal CPU percentage used by the Guest, which may 783# exactly make the dirty rate match the dirty rate threshold. 784# Then we will choose a smaller throttle increment between the one 785# specified by @cpu-throttle-increment and the one generated by 786# ideal CPU percentage. Therefore, it is compatible to 787# traditional throttling, meanwhile the throttle increment won't 788# be excessive at tail stage. The default value is false. (Since 789# 5.1) 790# 791# @tls-creds: ID of the 'tls-creds' object that provides credentials 792# for establishing a TLS connection over the migration data 793# channel. On the outgoing side of the migration, the credentials 794# must be for a 'client' endpoint, while for the incoming side the 795# credentials must be for a 'server' endpoint. Setting this will 796# enable TLS for all migrations. The default is unset, resulting 797# in unsecured migration at the QEMU level. (Since 2.7) 798# 799# @tls-hostname: hostname of the target host for the migration. This 800# is required when using x509 based TLS credentials and the 801# migration URI does not already include a hostname. For example 802# if using fd: or exec: based migration, the hostname must be 803# provided so that the server's x509 certificate identity can be 804# validated. (Since 2.7) 805# 806# @tls-authz: ID of the 'authz' object subclass that provides access 807# control checking of the TLS x509 certificate distinguished name. 808# This object is only resolved at time of use, so can be deleted 809# and recreated on the fly while the migration server is active. 810# If missing, it will default to denying access (Since 4.0) 811# 812# @max-bandwidth: to set maximum speed for migration. maximum speed 813# in bytes per second. (Since 2.8) 814# 815# @avail-switchover-bandwidth: to set the available bandwidth that 816# migration can use during switchover phase. NOTE! This does not 817# limit the bandwidth during switchover, but only for calculations when 818# making decisions to switchover. By default, this value is zero, 819# which means QEMU will estimate the bandwidth automatically. This can 820# be set when the estimated value is not accurate, while the user is 821# able to guarantee such bandwidth is available when switching over. 822# When specified correctly, this can make the switchover decision much 823# more accurate. (Since 8.2) 824# 825# @downtime-limit: set maximum tolerated downtime for migration. 826# maximum downtime in milliseconds (Since 2.8) 827# 828# @x-checkpoint-delay: The delay time (in ms) between two COLO 829# checkpoints in periodic mode. (Since 2.8) 830# 831# @block-incremental: Affects how much storage is migrated when the 832# block migration capability is enabled. When false, the entire 833# storage backing chain is migrated into a flattened image at the 834# destination; when true, only the active qcow2 layer is migrated 835# and the destination must already have access to the same backing 836# chain as was used on the source. (since 2.10) 837# 838# @multifd-channels: Number of channels used to migrate data in 839# parallel. This is the same number that the number of sockets 840# used for migration. The default value is 2 (since 4.0) 841# 842# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 843# needs to be a multiple of the target page size and a power of 2 844# (Since 2.11) 845# 846# @max-postcopy-bandwidth: Background transfer bandwidth during 847# postcopy. Defaults to 0 (unlimited). In bytes per second. 848# (Since 3.0) 849# 850# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 851# (Since 3.1) 852# 853# @multifd-compression: Which compression method to use. Defaults to 854# none. (Since 5.0) 855# 856# @multifd-zlib-level: Set the compression level to be used in live 857# migration, the compression level is an integer between 0 and 9, 858# where 0 means no compression, 1 means the best compression 859# speed, and 9 means best compression ratio which will consume 860# more CPU. Defaults to 1. (Since 5.0) 861# 862# @multifd-zstd-level: Set the compression level to be used in live 863# migration, the compression level is an integer between 0 and 20, 864# where 0 means no compression, 1 means the best compression 865# speed, and 20 means best compression ratio which will consume 866# more CPU. Defaults to 1. (Since 5.0) 867# 868# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 869# aliases for the purpose of dirty bitmap migration. Such aliases 870# may for example be the corresponding names on the opposite site. 871# The mapping must be one-to-one, but not necessarily complete: On 872# the source, unmapped bitmaps and all bitmaps on unmapped nodes 873# will be ignored. On the destination, encountering an unmapped 874# alias in the incoming migration stream will result in a report, 875# and all further bitmap migration data will then be discarded. 876# Note that the destination does not know about bitmaps it does 877# not receive, so there is no limitation or requirement regarding 878# the number of bitmaps received, or how they are named, or on 879# which nodes they are placed. By default (when this parameter 880# has never been set), bitmap names are mapped to themselves. 881# Nodes are mapped to their block device name if there is one, and 882# to their node name otherwise. (Since 5.2) 883# 884# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 885# limit during live migration. Should be in the range 1 to 1000ms. 886# Defaults to 1000ms. (Since 8.1) 887# 888# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 889# Defaults to 1. (Since 8.1) 890# 891# @mode: Migration mode. See description in @MigMode. Default is 'normal'. 892# (Since 8.2) 893# 894# Features: 895# 896# @deprecated: Member @block-incremental is deprecated. Use 897# blockdev-mirror with NBD instead. Members @compress-level, 898# @compress-threads, @decompress-threads and @compress-wait-thread 899# are deprecated because @compression is deprecated. 900# 901# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 902# are experimental. 903# 904# Since: 2.4 905## 906{ 'enum': 'MigrationParameter', 907 'data': ['announce-initial', 'announce-max', 908 'announce-rounds', 'announce-step', 909 { 'name': 'compress-level', 'features': [ 'deprecated' ] }, 910 { 'name': 'compress-threads', 'features': [ 'deprecated' ] }, 911 { 'name': 'decompress-threads', 'features': [ 'deprecated' ] }, 912 { 'name': 'compress-wait-thread', 'features': [ 'deprecated' ] }, 913 'throttle-trigger-threshold', 914 'cpu-throttle-initial', 'cpu-throttle-increment', 915 'cpu-throttle-tailslow', 916 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 917 'avail-switchover-bandwidth', 'downtime-limit', 918 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 919 { 'name': 'block-incremental', 'features': [ 'deprecated' ] }, 920 'multifd-channels', 921 'xbzrle-cache-size', 'max-postcopy-bandwidth', 922 'max-cpu-throttle', 'multifd-compression', 923 'multifd-zlib-level', 'multifd-zstd-level', 924 'block-bitmap-mapping', 925 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 926 'vcpu-dirty-limit', 927 'mode'] } 928 929## 930# @MigrateSetParameters: 931# 932# @announce-initial: Initial delay (in milliseconds) before sending 933# the first announce (Since 4.0) 934# 935# @announce-max: Maximum delay (in milliseconds) between packets in 936# the announcement (Since 4.0) 937# 938# @announce-rounds: Number of self-announce packets sent after 939# migration (Since 4.0) 940# 941# @announce-step: Increase in delay (in milliseconds) between 942# subsequent packets in the announcement (Since 4.0) 943# 944# @compress-level: compression level 945# 946# @compress-threads: compression thread count 947# 948# @compress-wait-thread: Controls behavior when all compression 949# threads are currently busy. If true (default), wait for a free 950# compression thread to become available; otherwise, send the page 951# uncompressed. (Since 3.1) 952# 953# @decompress-threads: decompression thread count 954# 955# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 956# bytes_xfer_period to trigger throttling. It is expressed as 957# percentage. The default value is 50. (Since 5.0) 958# 959# @cpu-throttle-initial: Initial percentage of time guest cpus are 960# throttled when migration auto-converge is activated. The 961# default value is 20. (Since 2.7) 962# 963# @cpu-throttle-increment: throttle percentage increase each time 964# auto-converge detects that migration is not making progress. 965# The default value is 10. (Since 2.7) 966# 967# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 968# the tail stage of throttling, the Guest is very sensitive to CPU 969# percentage while the @cpu-throttle -increment is excessive 970# usually at tail stage. If this parameter is true, we will 971# compute the ideal CPU percentage used by the Guest, which may 972# exactly make the dirty rate match the dirty rate threshold. 973# Then we will choose a smaller throttle increment between the one 974# specified by @cpu-throttle-increment and the one generated by 975# ideal CPU percentage. Therefore, it is compatible to 976# traditional throttling, meanwhile the throttle increment won't 977# be excessive at tail stage. The default value is false. (Since 978# 5.1) 979# 980# @tls-creds: ID of the 'tls-creds' object that provides credentials 981# for establishing a TLS connection over the migration data 982# channel. On the outgoing side of the migration, the credentials 983# must be for a 'client' endpoint, while for the incoming side the 984# credentials must be for a 'server' endpoint. Setting this to a 985# non-empty string enables TLS for all migrations. An empty 986# string means that QEMU will use plain text mode for migration, 987# rather than TLS (Since 2.9) Previously (since 2.7), this was 988# reported by omitting tls-creds instead. 989# 990# @tls-hostname: hostname of the target host for the migration. This 991# is required when using x509 based TLS credentials and the 992# migration URI does not already include a hostname. For example 993# if using fd: or exec: based migration, the hostname must be 994# provided so that the server's x509 certificate identity can be 995# validated. (Since 2.7) An empty string means that QEMU will use 996# the hostname associated with the migration URI, if any. (Since 997# 2.9) Previously (since 2.7), this was reported by omitting 998# tls-hostname instead. 999# 1000# @tls-authz: ID of the 'authz' object subclass that provides access 1001# control checking of the TLS x509 certificate distinguished name. 1002# (Since 4.0) 1003# 1004# @max-bandwidth: to set maximum speed for migration. maximum speed 1005# in bytes per second. (Since 2.8) 1006# 1007# @avail-switchover-bandwidth: to set the available bandwidth that 1008# migration can use during switchover phase. NOTE! This does not 1009# limit the bandwidth during switchover, but only for calculations when 1010# making decisions to switchover. By default, this value is zero, 1011# which means QEMU will estimate the bandwidth automatically. This can 1012# be set when the estimated value is not accurate, while the user is 1013# able to guarantee such bandwidth is available when switching over. 1014# When specified correctly, this can make the switchover decision much 1015# more accurate. (Since 8.2) 1016# 1017# @downtime-limit: set maximum tolerated downtime for migration. 1018# maximum downtime in milliseconds (Since 2.8) 1019# 1020# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1021# (Since 2.8) 1022# 1023# @block-incremental: Affects how much storage is migrated when the 1024# block migration capability is enabled. When false, the entire 1025# storage backing chain is migrated into a flattened image at the 1026# destination; when true, only the active qcow2 layer is migrated 1027# and the destination must already have access to the same backing 1028# chain as was used on the source. (since 2.10) 1029# 1030# @multifd-channels: Number of channels used to migrate data in 1031# parallel. This is the same number that the number of sockets 1032# used for migration. The default value is 2 (since 4.0) 1033# 1034# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1035# needs to be a multiple of the target page size and a power of 2 1036# (Since 2.11) 1037# 1038# @max-postcopy-bandwidth: Background transfer bandwidth during 1039# postcopy. Defaults to 0 (unlimited). In bytes per second. 1040# (Since 3.0) 1041# 1042# @max-cpu-throttle: maximum cpu throttle percentage. The default 1043# value is 99. (Since 3.1) 1044# 1045# @multifd-compression: Which compression method to use. Defaults to 1046# none. (Since 5.0) 1047# 1048# @multifd-zlib-level: Set the compression level to be used in live 1049# migration, the compression level is an integer between 0 and 9, 1050# where 0 means no compression, 1 means the best compression 1051# speed, and 9 means best compression ratio which will consume 1052# more CPU. Defaults to 1. (Since 5.0) 1053# 1054# @multifd-zstd-level: Set the compression level to be used in live 1055# migration, the compression level is an integer between 0 and 20, 1056# where 0 means no compression, 1 means the best compression 1057# speed, and 20 means best compression ratio which will consume 1058# more CPU. Defaults to 1. (Since 5.0) 1059# 1060# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1061# aliases for the purpose of dirty bitmap migration. Such aliases 1062# may for example be the corresponding names on the opposite site. 1063# The mapping must be one-to-one, but not necessarily complete: On 1064# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1065# will be ignored. On the destination, encountering an unmapped 1066# alias in the incoming migration stream will result in a report, 1067# and all further bitmap migration data will then be discarded. 1068# Note that the destination does not know about bitmaps it does 1069# not receive, so there is no limitation or requirement regarding 1070# the number of bitmaps received, or how they are named, or on 1071# which nodes they are placed. By default (when this parameter 1072# has never been set), bitmap names are mapped to themselves. 1073# Nodes are mapped to their block device name if there is one, and 1074# to their node name otherwise. (Since 5.2) 1075# 1076# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1077# limit during live migration. Should be in the range 1 to 1000ms. 1078# Defaults to 1000ms. (Since 8.1) 1079# 1080# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1081# Defaults to 1. (Since 8.1) 1082# 1083# @mode: Migration mode. See description in @MigMode. Default is 'normal'. 1084# (Since 8.2) 1085# 1086# Features: 1087# 1088# @deprecated: Member @block-incremental is deprecated. Use 1089# blockdev-mirror with NBD instead. Members @compress-level, 1090# @compress-threads, @decompress-threads and @compress-wait-thread 1091# are deprecated because @compression is deprecated. 1092# 1093# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1094# are experimental. 1095# 1096# TODO: either fuse back into MigrationParameters, or make 1097# MigrationParameters members mandatory 1098# 1099# Since: 2.4 1100## 1101{ 'struct': 'MigrateSetParameters', 1102 'data': { '*announce-initial': 'size', 1103 '*announce-max': 'size', 1104 '*announce-rounds': 'size', 1105 '*announce-step': 'size', 1106 '*compress-level': { 'type': 'uint8', 1107 'features': [ 'deprecated' ] }, 1108 '*compress-threads': { 'type': 'uint8', 1109 'features': [ 'deprecated' ] }, 1110 '*compress-wait-thread': { 'type': 'bool', 1111 'features': [ 'deprecated' ] }, 1112 '*decompress-threads': { 'type': 'uint8', 1113 'features': [ 'deprecated' ] }, 1114 '*throttle-trigger-threshold': 'uint8', 1115 '*cpu-throttle-initial': 'uint8', 1116 '*cpu-throttle-increment': 'uint8', 1117 '*cpu-throttle-tailslow': 'bool', 1118 '*tls-creds': 'StrOrNull', 1119 '*tls-hostname': 'StrOrNull', 1120 '*tls-authz': 'StrOrNull', 1121 '*max-bandwidth': 'size', 1122 '*avail-switchover-bandwidth': 'size', 1123 '*downtime-limit': 'uint64', 1124 '*x-checkpoint-delay': { 'type': 'uint32', 1125 'features': [ 'unstable' ] }, 1126 '*block-incremental': { 'type': 'bool', 1127 'features': [ 'deprecated' ] }, 1128 '*multifd-channels': 'uint8', 1129 '*xbzrle-cache-size': 'size', 1130 '*max-postcopy-bandwidth': 'size', 1131 '*max-cpu-throttle': 'uint8', 1132 '*multifd-compression': 'MultiFDCompression', 1133 '*multifd-zlib-level': 'uint8', 1134 '*multifd-zstd-level': 'uint8', 1135 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1136 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1137 'features': [ 'unstable' ] }, 1138 '*vcpu-dirty-limit': 'uint64', 1139 '*mode': 'MigMode'} } 1140 1141## 1142# @migrate-set-parameters: 1143# 1144# Set various migration parameters. 1145# 1146# Since: 2.4 1147# 1148# Example: 1149# 1150# -> { "execute": "migrate-set-parameters" , 1151# "arguments": { "multifd-channels": 5 } } 1152# <- { "return": {} } 1153## 1154{ 'command': 'migrate-set-parameters', 'boxed': true, 1155 'data': 'MigrateSetParameters' } 1156 1157## 1158# @MigrationParameters: 1159# 1160# The optional members aren't actually optional. 1161# 1162# @announce-initial: Initial delay (in milliseconds) before sending 1163# the first announce (Since 4.0) 1164# 1165# @announce-max: Maximum delay (in milliseconds) between packets in 1166# the announcement (Since 4.0) 1167# 1168# @announce-rounds: Number of self-announce packets sent after 1169# migration (Since 4.0) 1170# 1171# @announce-step: Increase in delay (in milliseconds) between 1172# subsequent packets in the announcement (Since 4.0) 1173# 1174# @compress-level: compression level 1175# 1176# @compress-threads: compression thread count 1177# 1178# @compress-wait-thread: Controls behavior when all compression 1179# threads are currently busy. If true (default), wait for a free 1180# compression thread to become available; otherwise, send the page 1181# uncompressed. (Since 3.1) 1182# 1183# @decompress-threads: decompression thread count 1184# 1185# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1186# bytes_xfer_period to trigger throttling. It is expressed as 1187# percentage. The default value is 50. (Since 5.0) 1188# 1189# @cpu-throttle-initial: Initial percentage of time guest cpus are 1190# throttled when migration auto-converge is activated. (Since 1191# 2.7) 1192# 1193# @cpu-throttle-increment: throttle percentage increase each time 1194# auto-converge detects that migration is not making progress. 1195# (Since 2.7) 1196# 1197# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1198# the tail stage of throttling, the Guest is very sensitive to CPU 1199# percentage while the @cpu-throttle -increment is excessive 1200# usually at tail stage. If this parameter is true, we will 1201# compute the ideal CPU percentage used by the Guest, which may 1202# exactly make the dirty rate match the dirty rate threshold. 1203# Then we will choose a smaller throttle increment between the one 1204# specified by @cpu-throttle-increment and the one generated by 1205# ideal CPU percentage. Therefore, it is compatible to 1206# traditional throttling, meanwhile the throttle increment won't 1207# be excessive at tail stage. The default value is false. (Since 1208# 5.1) 1209# 1210# @tls-creds: ID of the 'tls-creds' object that provides credentials 1211# for establishing a TLS connection over the migration data 1212# channel. On the outgoing side of the migration, the credentials 1213# must be for a 'client' endpoint, while for the incoming side the 1214# credentials must be for a 'server' endpoint. An empty string 1215# means that QEMU will use plain text mode for migration, rather 1216# than TLS (Since 2.7) Note: 2.8 reports this by omitting 1217# tls-creds instead. 1218# 1219# @tls-hostname: hostname of the target host for the migration. This 1220# is required when using x509 based TLS credentials and the 1221# migration URI does not already include a hostname. For example 1222# if using fd: or exec: based migration, the hostname must be 1223# provided so that the server's x509 certificate identity can be 1224# validated. (Since 2.7) An empty string means that QEMU will use 1225# the hostname associated with the migration URI, if any. (Since 1226# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. 1227# 1228# @tls-authz: ID of the 'authz' object subclass that provides access 1229# control checking of the TLS x509 certificate distinguished name. 1230# (Since 4.0) 1231# 1232# @max-bandwidth: to set maximum speed for migration. maximum speed 1233# in bytes per second. (Since 2.8) 1234# 1235# @avail-switchover-bandwidth: to set the available bandwidth that 1236# migration can use during switchover phase. NOTE! This does not 1237# limit the bandwidth during switchover, but only for calculations when 1238# making decisions to switchover. By default, this value is zero, 1239# which means QEMU will estimate the bandwidth automatically. This can 1240# be set when the estimated value is not accurate, while the user is 1241# able to guarantee such bandwidth is available when switching over. 1242# When specified correctly, this can make the switchover decision much 1243# more accurate. (Since 8.2) 1244# 1245# @downtime-limit: set maximum tolerated downtime for migration. 1246# maximum downtime in milliseconds (Since 2.8) 1247# 1248# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1249# (Since 2.8) 1250# 1251# @block-incremental: Affects how much storage is migrated when the 1252# block migration capability is enabled. When false, the entire 1253# storage backing chain is migrated into a flattened image at the 1254# destination; when true, only the active qcow2 layer is migrated 1255# and the destination must already have access to the same backing 1256# chain as was used on the source. (since 2.10) 1257# 1258# @multifd-channels: Number of channels used to migrate data in 1259# parallel. This is the same number that the number of sockets 1260# used for migration. The default value is 2 (since 4.0) 1261# 1262# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1263# needs to be a multiple of the target page size and a power of 2 1264# (Since 2.11) 1265# 1266# @max-postcopy-bandwidth: Background transfer bandwidth during 1267# postcopy. Defaults to 0 (unlimited). In bytes per second. 1268# (Since 3.0) 1269# 1270# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1271# (Since 3.1) 1272# 1273# @multifd-compression: Which compression method to use. Defaults to 1274# none. (Since 5.0) 1275# 1276# @multifd-zlib-level: Set the compression level to be used in live 1277# migration, the compression level is an integer between 0 and 9, 1278# where 0 means no compression, 1 means the best compression 1279# speed, and 9 means best compression ratio which will consume 1280# more CPU. Defaults to 1. (Since 5.0) 1281# 1282# @multifd-zstd-level: Set the compression level to be used in live 1283# migration, the compression level is an integer between 0 and 20, 1284# where 0 means no compression, 1 means the best compression 1285# speed, and 20 means best compression ratio which will consume 1286# more CPU. Defaults to 1. (Since 5.0) 1287# 1288# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1289# aliases for the purpose of dirty bitmap migration. Such aliases 1290# may for example be the corresponding names on the opposite site. 1291# The mapping must be one-to-one, but not necessarily complete: On 1292# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1293# will be ignored. On the destination, encountering an unmapped 1294# alias in the incoming migration stream will result in a report, 1295# and all further bitmap migration data will then be discarded. 1296# Note that the destination does not know about bitmaps it does 1297# not receive, so there is no limitation or requirement regarding 1298# the number of bitmaps received, or how they are named, or on 1299# which nodes they are placed. By default (when this parameter 1300# has never been set), bitmap names are mapped to themselves. 1301# Nodes are mapped to their block device name if there is one, and 1302# to their node name otherwise. (Since 5.2) 1303# 1304# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1305# limit during live migration. Should be in the range 1 to 1000ms. 1306# Defaults to 1000ms. (Since 8.1) 1307# 1308# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1309# Defaults to 1. (Since 8.1) 1310# 1311# @mode: Migration mode. See description in @MigMode. Default is 'normal'. 1312# (Since 8.2) 1313# 1314# Features: 1315# 1316# @deprecated: Member @block-incremental is deprecated. Use 1317# blockdev-mirror with NBD instead. Members @compress-level, 1318# @compress-threads, @decompress-threads and @compress-wait-thread 1319# are deprecated because @compression is deprecated. 1320# 1321# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1322# are experimental. 1323# 1324# Since: 2.4 1325## 1326{ 'struct': 'MigrationParameters', 1327 'data': { '*announce-initial': 'size', 1328 '*announce-max': 'size', 1329 '*announce-rounds': 'size', 1330 '*announce-step': 'size', 1331 '*compress-level': { 'type': 'uint8', 1332 'features': [ 'deprecated' ] }, 1333 '*compress-threads': { 'type': 'uint8', 1334 'features': [ 'deprecated' ] }, 1335 '*compress-wait-thread': { 'type': 'bool', 1336 'features': [ 'deprecated' ] }, 1337 '*decompress-threads': { 'type': 'uint8', 1338 'features': [ 'deprecated' ] }, 1339 '*throttle-trigger-threshold': 'uint8', 1340 '*cpu-throttle-initial': 'uint8', 1341 '*cpu-throttle-increment': 'uint8', 1342 '*cpu-throttle-tailslow': 'bool', 1343 '*tls-creds': 'str', 1344 '*tls-hostname': 'str', 1345 '*tls-authz': 'str', 1346 '*max-bandwidth': 'size', 1347 '*avail-switchover-bandwidth': 'size', 1348 '*downtime-limit': 'uint64', 1349 '*x-checkpoint-delay': { 'type': 'uint32', 1350 'features': [ 'unstable' ] }, 1351 '*block-incremental': { 'type': 'bool', 1352 'features': [ 'deprecated' ] }, 1353 '*multifd-channels': 'uint8', 1354 '*xbzrle-cache-size': 'size', 1355 '*max-postcopy-bandwidth': 'size', 1356 '*max-cpu-throttle': 'uint8', 1357 '*multifd-compression': 'MultiFDCompression', 1358 '*multifd-zlib-level': 'uint8', 1359 '*multifd-zstd-level': 'uint8', 1360 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1361 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1362 'features': [ 'unstable' ] }, 1363 '*vcpu-dirty-limit': 'uint64', 1364 '*mode': 'MigMode'} } 1365 1366## 1367# @query-migrate-parameters: 1368# 1369# Returns information about the current migration parameters 1370# 1371# Returns: @MigrationParameters 1372# 1373# Since: 2.4 1374# 1375# Example: 1376# 1377# -> { "execute": "query-migrate-parameters" } 1378# <- { "return": { 1379# "multifd-channels": 2, 1380# "cpu-throttle-increment": 10, 1381# "cpu-throttle-initial": 20, 1382# "max-bandwidth": 33554432, 1383# "downtime-limit": 300 1384# } 1385# } 1386## 1387{ 'command': 'query-migrate-parameters', 1388 'returns': 'MigrationParameters' } 1389 1390## 1391# @migrate-start-postcopy: 1392# 1393# Followup to a migration command to switch the migration to postcopy 1394# mode. The postcopy-ram capability must be set on both source and 1395# destination before the original migration command. 1396# 1397# Since: 2.5 1398# 1399# Example: 1400# 1401# -> { "execute": "migrate-start-postcopy" } 1402# <- { "return": {} } 1403## 1404{ 'command': 'migrate-start-postcopy' } 1405 1406## 1407# @MIGRATION: 1408# 1409# Emitted when a migration event happens 1410# 1411# @status: @MigrationStatus describing the current migration status. 1412# 1413# Since: 2.4 1414# 1415# Example: 1416# 1417# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1418# "event": "MIGRATION", 1419# "data": {"status": "completed"} } 1420## 1421{ 'event': 'MIGRATION', 1422 'data': {'status': 'MigrationStatus'}} 1423 1424## 1425# @MIGRATION_PASS: 1426# 1427# Emitted from the source side of a migration at the start of each 1428# pass (when it syncs the dirty bitmap) 1429# 1430# @pass: An incrementing count (starting at 1 on the first pass) 1431# 1432# Since: 2.6 1433# 1434# Example: 1435# 1436# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1437# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1438## 1439{ 'event': 'MIGRATION_PASS', 1440 'data': { 'pass': 'int' } } 1441 1442## 1443# @COLOMessage: 1444# 1445# The message transmission between Primary side and Secondary side. 1446# 1447# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1448# 1449# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1450# checkpointing 1451# 1452# @checkpoint-reply: SVM gets PVM's checkpoint request 1453# 1454# @vmstate-send: VM's state will be sent by PVM. 1455# 1456# @vmstate-size: The total size of VMstate. 1457# 1458# @vmstate-received: VM's state has been received by SVM. 1459# 1460# @vmstate-loaded: VM's state has been loaded by SVM. 1461# 1462# Since: 2.8 1463## 1464{ 'enum': 'COLOMessage', 1465 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1466 'vmstate-send', 'vmstate-size', 'vmstate-received', 1467 'vmstate-loaded' ] } 1468 1469## 1470# @COLOMode: 1471# 1472# The COLO current mode. 1473# 1474# @none: COLO is disabled. 1475# 1476# @primary: COLO node in primary side. 1477# 1478# @secondary: COLO node in slave side. 1479# 1480# Since: 2.8 1481## 1482{ 'enum': 'COLOMode', 1483 'data': [ 'none', 'primary', 'secondary'] } 1484 1485## 1486# @FailoverStatus: 1487# 1488# An enumeration of COLO failover status 1489# 1490# @none: no failover has ever happened 1491# 1492# @require: got failover requirement but not handled 1493# 1494# @active: in the process of doing failover 1495# 1496# @completed: finish the process of failover 1497# 1498# @relaunch: restart the failover process, from 'none' -> 'completed' 1499# (Since 2.9) 1500# 1501# Since: 2.8 1502## 1503{ 'enum': 'FailoverStatus', 1504 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1505 1506## 1507# @COLO_EXIT: 1508# 1509# Emitted when VM finishes COLO mode due to some errors happening or 1510# at the request of users. 1511# 1512# @mode: report COLO mode when COLO exited. 1513# 1514# @reason: describes the reason for the COLO exit. 1515# 1516# Since: 3.1 1517# 1518# Example: 1519# 1520# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1521# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1522## 1523{ 'event': 'COLO_EXIT', 1524 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1525 1526## 1527# @COLOExitReason: 1528# 1529# The reason for a COLO exit. 1530# 1531# @none: failover has never happened. This state does not occur in 1532# the COLO_EXIT event, and is only visible in the result of 1533# query-colo-status. 1534# 1535# @request: COLO exit is due to an external request. 1536# 1537# @error: COLO exit is due to an internal error. 1538# 1539# @processing: COLO is currently handling a failover (since 4.0). 1540# 1541# Since: 3.1 1542## 1543{ 'enum': 'COLOExitReason', 1544 'data': [ 'none', 'request', 'error' , 'processing' ] } 1545 1546## 1547# @x-colo-lost-heartbeat: 1548# 1549# Tell qemu that heartbeat is lost, request it to do takeover 1550# procedures. If this command is sent to the PVM, the Primary side 1551# will exit COLO mode. If sent to the Secondary, the Secondary side 1552# will run failover work, then takes over server operation to become 1553# the service VM. 1554# 1555# Features: 1556# 1557# @unstable: This command is experimental. 1558# 1559# Since: 2.8 1560# 1561# Example: 1562# 1563# -> { "execute": "x-colo-lost-heartbeat" } 1564# <- { "return": {} } 1565## 1566{ 'command': 'x-colo-lost-heartbeat', 1567 'features': [ 'unstable' ], 1568 'if': 'CONFIG_REPLICATION' } 1569 1570## 1571# @migrate_cancel: 1572# 1573# Cancel the current executing migration process. 1574# 1575# Notes: This command succeeds even if there is no migration process 1576# running. 1577# 1578# Since: 0.14 1579# 1580# Example: 1581# 1582# -> { "execute": "migrate_cancel" } 1583# <- { "return": {} } 1584## 1585{ 'command': 'migrate_cancel' } 1586 1587## 1588# @migrate-continue: 1589# 1590# Continue migration when it's in a paused state. 1591# 1592# @state: The state the migration is currently expected to be in 1593# 1594# Since: 2.11 1595# 1596# Example: 1597# 1598# -> { "execute": "migrate-continue" , "arguments": 1599# { "state": "pre-switchover" } } 1600# <- { "return": {} } 1601## 1602{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1603 1604## 1605# @MigrationAddressType: 1606# 1607# The migration stream transport mechanisms. 1608# 1609# @socket: Migrate via socket. 1610# 1611# @exec: Direct the migration stream to another process. 1612# 1613# @rdma: Migrate via RDMA. 1614# 1615# @file: Direct the migration stream to a file. 1616# 1617# Since: 8.2 1618## 1619{ 'enum': 'MigrationAddressType', 1620 'data': [ 'socket', 'exec', 'rdma', 'file' ] } 1621 1622## 1623# @FileMigrationArgs: 1624# 1625# @filename: The file to receive the migration stream 1626# 1627# @offset: The file offset where the migration stream will start 1628# 1629# Since: 8.2 1630## 1631{ 'struct': 'FileMigrationArgs', 1632 'data': { 'filename': 'str', 1633 'offset': 'uint64' } } 1634 1635## 1636# @MigrationExecCommand: 1637# 1638# @args: command (list head) and arguments to execute. 1639# 1640# Since: 8.2 1641## 1642{ 'struct': 'MigrationExecCommand', 1643 'data': {'args': [ 'str' ] } } 1644 1645## 1646# @MigrationAddress: 1647# 1648# Migration endpoint configuration. 1649# 1650# @transport: The migration stream transport mechanism 1651# 1652# Since: 8.2 1653## 1654{ 'union': 'MigrationAddress', 1655 'base': { 'transport' : 'MigrationAddressType'}, 1656 'discriminator': 'transport', 1657 'data': { 1658 'socket': 'SocketAddress', 1659 'exec': 'MigrationExecCommand', 1660 'rdma': 'InetSocketAddress', 1661 'file': 'FileMigrationArgs' } } 1662 1663## 1664# @MigrationChannelType: 1665# 1666# The migration channel-type request options. 1667# 1668# @main: Main outbound migration channel. 1669# 1670# Since: 8.1 1671## 1672{ 'enum': 'MigrationChannelType', 1673 'data': [ 'main' ] } 1674 1675## 1676# @MigrationChannel: 1677# 1678# Migration stream channel parameters. 1679# 1680# @channel-type: Channel type for transferring packet information. 1681# 1682# @addr: Migration endpoint configuration on destination interface. 1683# 1684# Since: 8.1 1685## 1686{ 'struct': 'MigrationChannel', 1687 'data': { 1688 'channel-type': 'MigrationChannelType', 1689 'addr': 'MigrationAddress' } } 1690 1691## 1692# @migrate: 1693# 1694# Migrates the current running guest to another Virtual Machine. 1695# 1696# @uri: the Uniform Resource Identifier of the destination VM 1697# 1698# @channels: list of migration stream channels with each stream in the 1699# list connected to a destination interface endpoint. 1700# 1701# @blk: do block migration (full disk copy) 1702# 1703# @inc: incremental disk copy migration 1704# 1705# @detach: this argument exists only for compatibility reasons and is 1706# ignored by QEMU 1707# 1708# @resume: resume one paused migration, default "off". (since 3.0) 1709# 1710# Features: 1711# 1712# @deprecated: Members @inc and @blk are deprecated. Use 1713# blockdev-mirror with NBD instead. 1714# 1715# Since: 0.14 1716# 1717# Notes: 1718# 1719# 1. The 'query-migrate' command should be used to check 1720# migration's progress and final result (this information is 1721# provided by the 'status' member) 1722# 1723# 2. All boolean arguments default to false 1724# 1725# 3. The user Monitor's "detach" argument is invalid in QMP and 1726# should not be used 1727# 1728# 4. The uri argument should have the Uniform Resource Identifier 1729# of default destination VM. This connection will be bound to 1730# default network. 1731# 1732# 5. For now, number of migration streams is restricted to one, 1733# i.e number of items in 'channels' list is just 1. 1734# 1735# 6. The 'uri' and 'channels' arguments are mutually exclusive; 1736# exactly one of the two should be present. 1737# 1738# Example: 1739# 1740# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1741# <- { "return": {} } 1742# 1743# -> { "execute": "migrate", 1744# "arguments": { 1745# "channels": [ { "channel-type": "main", 1746# "addr": { "transport": "socket", 1747# "type": "inet", 1748# "host": "10.12.34.9", 1749# "port": "1050" } } ] } } 1750# <- { "return": {} } 1751# 1752# -> { "execute": "migrate", 1753# "arguments": { 1754# "channels": [ { "channel-type": "main", 1755# "addr": { "transport": "exec", 1756# "args": [ "/bin/nc", "-p", "6000", 1757# "/some/sock" ] } } ] } } 1758# <- { "return": {} } 1759# 1760# -> { "execute": "migrate", 1761# "arguments": { 1762# "channels": [ { "channel-type": "main", 1763# "addr": { "transport": "rdma", 1764# "host": "10.12.34.9", 1765# "port": "1050" } } ] } } 1766# <- { "return": {} } 1767# 1768# -> { "execute": "migrate", 1769# "arguments": { 1770# "channels": [ { "channel-type": "main", 1771# "addr": { "transport": "file", 1772# "filename": "/tmp/migfile", 1773# "offset": "0x1000" } } ] } } 1774# <- { "return": {} } 1775# 1776## 1777{ 'command': 'migrate', 1778 'data': {'*uri': 'str', 1779 '*channels': [ 'MigrationChannel' ], 1780 '*blk': { 'type': 'bool', 'features': [ 'deprecated' ] }, 1781 '*inc': { 'type': 'bool', 'features': [ 'deprecated' ] }, 1782 '*detach': 'bool', '*resume': 'bool' } } 1783 1784## 1785# @migrate-incoming: 1786# 1787# Start an incoming migration, the qemu must have been started with 1788# -incoming defer 1789# 1790# @uri: The Uniform Resource Identifier identifying the source or 1791# address to listen on 1792# 1793# @channels: list of migration stream channels with each stream in the 1794# list connected to a destination interface endpoint. 1795# 1796# Since: 2.3 1797# 1798# Notes: 1799# 1800# 1. It's a bad idea to use a string for the uri, but it needs to 1801# stay compatible with -incoming and the format of the uri is 1802# already exposed above libvirt. 1803# 1804# 2. QEMU must be started with -incoming defer to allow 1805# migrate-incoming to be used. 1806# 1807# 3. The uri format is the same as for -incoming 1808# 1809# 4. For now, number of migration streams is restricted to one, 1810# i.e number of items in 'channels' list is just 1. 1811# 1812# 5. The 'uri' and 'channels' arguments are mutually exclusive; 1813# exactly one of the two should be present. 1814# 1815# Example: 1816# 1817# -> { "execute": "migrate-incoming", 1818# "arguments": { "uri": "tcp:0:4446" } } 1819# <- { "return": {} } 1820# 1821# -> { "execute": "migrate-incoming", 1822# "arguments": { 1823# "channels": [ { "channel-type": "main", 1824# "addr": { "transport": "socket", 1825# "type": "inet", 1826# "host": "10.12.34.9", 1827# "port": "1050" } } ] } } 1828# <- { "return": {} } 1829# 1830# -> { "execute": "migrate-incoming", 1831# "arguments": { 1832# "channels": [ { "channel-type": "main", 1833# "addr": { "transport": "exec", 1834# "args": [ "/bin/nc", "-p", "6000", 1835# "/some/sock" ] } } ] } } 1836# <- { "return": {} } 1837# 1838# -> { "execute": "migrate-incoming", 1839# "arguments": { 1840# "channels": [ { "channel-type": "main", 1841# "addr": { "transport": "rdma", 1842# "host": "10.12.34.9", 1843# "port": "1050" } } ] } } 1844# <- { "return": {} } 1845## 1846{ 'command': 'migrate-incoming', 1847 'data': {'*uri': 'str', 1848 '*channels': [ 'MigrationChannel' ] } } 1849 1850## 1851# @xen-save-devices-state: 1852# 1853# Save the state of all devices to file. The RAM and the block 1854# devices of the VM are not saved by this command. 1855# 1856# @filename: the file to save the state of the devices to as binary 1857# data. See xen-save-devices-state.txt for a description of the 1858# binary format. 1859# 1860# @live: Optional argument to ask QEMU to treat this command as part 1861# of a live migration. Default to true. (since 2.11) 1862# 1863# Since: 1.1 1864# 1865# Example: 1866# 1867# -> { "execute": "xen-save-devices-state", 1868# "arguments": { "filename": "/tmp/save" } } 1869# <- { "return": {} } 1870## 1871{ 'command': 'xen-save-devices-state', 1872 'data': {'filename': 'str', '*live':'bool' } } 1873 1874## 1875# @xen-set-global-dirty-log: 1876# 1877# Enable or disable the global dirty log mode. 1878# 1879# @enable: true to enable, false to disable. 1880# 1881# Since: 1.3 1882# 1883# Example: 1884# 1885# -> { "execute": "xen-set-global-dirty-log", 1886# "arguments": { "enable": true } } 1887# <- { "return": {} } 1888## 1889{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1890 1891## 1892# @xen-load-devices-state: 1893# 1894# Load the state of all devices from file. The RAM and the block 1895# devices of the VM are not loaded by this command. 1896# 1897# @filename: the file to load the state of the devices from as binary 1898# data. See xen-save-devices-state.txt for a description of the 1899# binary format. 1900# 1901# Since: 2.7 1902# 1903# Example: 1904# 1905# -> { "execute": "xen-load-devices-state", 1906# "arguments": { "filename": "/tmp/resume" } } 1907# <- { "return": {} } 1908## 1909{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1910 1911## 1912# @xen-set-replication: 1913# 1914# Enable or disable replication. 1915# 1916# @enable: true to enable, false to disable. 1917# 1918# @primary: true for primary or false for secondary. 1919# 1920# @failover: true to do failover, false to stop. but cannot be 1921# specified if 'enable' is true. default value is false. 1922# 1923# Example: 1924# 1925# -> { "execute": "xen-set-replication", 1926# "arguments": {"enable": true, "primary": false} } 1927# <- { "return": {} } 1928# 1929# Since: 2.9 1930## 1931{ 'command': 'xen-set-replication', 1932 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' }, 1933 'if': 'CONFIG_REPLICATION' } 1934 1935## 1936# @ReplicationStatus: 1937# 1938# The result format for 'query-xen-replication-status'. 1939# 1940# @error: true if an error happened, false if replication is normal. 1941# 1942# @desc: the human readable error description string, when @error is 1943# 'true'. 1944# 1945# Since: 2.9 1946## 1947{ 'struct': 'ReplicationStatus', 1948 'data': { 'error': 'bool', '*desc': 'str' }, 1949 'if': 'CONFIG_REPLICATION' } 1950 1951## 1952# @query-xen-replication-status: 1953# 1954# Query replication status while the vm is running. 1955# 1956# Returns: A @ReplicationStatus object showing the status. 1957# 1958# Example: 1959# 1960# -> { "execute": "query-xen-replication-status" } 1961# <- { "return": { "error": false } } 1962# 1963# Since: 2.9 1964## 1965{ 'command': 'query-xen-replication-status', 1966 'returns': 'ReplicationStatus', 1967 'if': 'CONFIG_REPLICATION' } 1968 1969## 1970# @xen-colo-do-checkpoint: 1971# 1972# Xen uses this command to notify replication to trigger a checkpoint. 1973# 1974# Example: 1975# 1976# -> { "execute": "xen-colo-do-checkpoint" } 1977# <- { "return": {} } 1978# 1979# Since: 2.9 1980## 1981{ 'command': 'xen-colo-do-checkpoint', 1982 'if': 'CONFIG_REPLICATION' } 1983 1984## 1985# @COLOStatus: 1986# 1987# The result format for 'query-colo-status'. 1988# 1989# @mode: COLO running mode. If COLO is running, this field will 1990# return 'primary' or 'secondary'. 1991# 1992# @last-mode: COLO last running mode. If COLO is running, this field 1993# will return same like mode field, after failover we can use this 1994# field to get last colo mode. (since 4.0) 1995# 1996# @reason: describes the reason for the COLO exit. 1997# 1998# Since: 3.1 1999## 2000{ 'struct': 'COLOStatus', 2001 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 2002 'reason': 'COLOExitReason' }, 2003 'if': 'CONFIG_REPLICATION' } 2004 2005## 2006# @query-colo-status: 2007# 2008# Query COLO status while the vm is running. 2009# 2010# Returns: A @COLOStatus object showing the status. 2011# 2012# Example: 2013# 2014# -> { "execute": "query-colo-status" } 2015# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 2016# 2017# Since: 3.1 2018## 2019{ 'command': 'query-colo-status', 2020 'returns': 'COLOStatus', 2021 'if': 'CONFIG_REPLICATION' } 2022 2023## 2024# @migrate-recover: 2025# 2026# Provide a recovery migration stream URI. 2027# 2028# @uri: the URI to be used for the recovery of migration stream. 2029# 2030# Example: 2031# 2032# -> { "execute": "migrate-recover", 2033# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 2034# <- { "return": {} } 2035# 2036# Since: 3.0 2037## 2038{ 'command': 'migrate-recover', 2039 'data': { 'uri': 'str' }, 2040 'allow-oob': true } 2041 2042## 2043# @migrate-pause: 2044# 2045# Pause a migration. Currently it only supports postcopy. 2046# 2047# Example: 2048# 2049# -> { "execute": "migrate-pause" } 2050# <- { "return": {} } 2051# 2052# Since: 3.0 2053## 2054{ 'command': 'migrate-pause', 'allow-oob': true } 2055 2056## 2057# @UNPLUG_PRIMARY: 2058# 2059# Emitted from source side of a migration when migration state is 2060# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 2061# resources in QEMU are kept on standby to be able to re-plug it in 2062# case of migration failure. 2063# 2064# @device-id: QEMU device id of the unplugged device 2065# 2066# Since: 4.2 2067# 2068# Example: 2069# 2070# <- { "event": "UNPLUG_PRIMARY", 2071# "data": { "device-id": "hostdev0" }, 2072# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 2073## 2074{ 'event': 'UNPLUG_PRIMARY', 2075 'data': { 'device-id': 'str' } } 2076 2077## 2078# @DirtyRateVcpu: 2079# 2080# Dirty rate of vcpu. 2081# 2082# @id: vcpu index. 2083# 2084# @dirty-rate: dirty rate. 2085# 2086# Since: 6.2 2087## 2088{ 'struct': 'DirtyRateVcpu', 2089 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 2090 2091## 2092# @DirtyRateStatus: 2093# 2094# Dirty page rate measurement status. 2095# 2096# @unstarted: measuring thread has not been started yet 2097# 2098# @measuring: measuring thread is running 2099# 2100# @measured: dirty page rate is measured and the results are available 2101# 2102# Since: 5.2 2103## 2104{ 'enum': 'DirtyRateStatus', 2105 'data': [ 'unstarted', 'measuring', 'measured'] } 2106 2107## 2108# @DirtyRateMeasureMode: 2109# 2110# Method used to measure dirty page rate. Differences between 2111# available methods are explained in @calc-dirty-rate. 2112# 2113# @page-sampling: use page sampling 2114# 2115# @dirty-ring: use dirty ring 2116# 2117# @dirty-bitmap: use dirty bitmap 2118# 2119# Since: 6.2 2120## 2121{ 'enum': 'DirtyRateMeasureMode', 2122 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 2123 2124## 2125# @TimeUnit: 2126# 2127# Specifies unit in which time-related value is specified. 2128# 2129# @second: value is in seconds 2130# 2131# @millisecond: value is in milliseconds 2132# 2133# Since: 8.2 2134# 2135## 2136{ 'enum': 'TimeUnit', 2137 'data': ['second', 'millisecond'] } 2138 2139## 2140# @DirtyRateInfo: 2141# 2142# Information about measured dirty page rate. 2143# 2144# @dirty-rate: an estimate of the dirty page rate of the VM in units 2145# of MiB/s. Value is present only when @status is 'measured'. 2146# 2147# @status: current status of dirty page rate measurements 2148# 2149# @start-time: start time in units of second for calculation 2150# 2151# @calc-time: time period for which dirty page rate was measured, 2152# expressed and rounded down to @calc-time-unit. 2153# 2154# @calc-time-unit: time unit of @calc-time (Since 8.2) 2155# 2156# @sample-pages: number of sampled pages per GiB of guest memory. 2157# Valid only in page-sampling mode (Since 6.1) 2158# 2159# @mode: mode that was used to measure dirty page rate (Since 6.2) 2160# 2161# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was 2162# specified (Since 6.2) 2163# 2164# Since: 5.2 2165## 2166{ 'struct': 'DirtyRateInfo', 2167 'data': {'*dirty-rate': 'int64', 2168 'status': 'DirtyRateStatus', 2169 'start-time': 'int64', 2170 'calc-time': 'int64', 2171 'calc-time-unit': 'TimeUnit', 2172 'sample-pages': 'uint64', 2173 'mode': 'DirtyRateMeasureMode', 2174 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 2175 2176## 2177# @calc-dirty-rate: 2178# 2179# Start measuring dirty page rate of the VM. Results can be retrieved 2180# with @query-dirty-rate after measurements are completed. 2181# 2182# Dirty page rate is the number of pages changed in a given time 2183# period expressed in MiB/s. The following methods of calculation are 2184# available: 2185# 2186# 1. In page sampling mode, a random subset of pages are selected and 2187# hashed twice: once at the beginning of measurement time period, 2188# and once again at the end. If two hashes for some page are 2189# different, the page is counted as changed. Since this method 2190# relies on sampling and hashing, calculated dirty page rate is 2191# only an estimate of its true value. Increasing @sample-pages 2192# improves estimation quality at the cost of higher computational 2193# overhead. 2194# 2195# 2. Dirty bitmap mode captures writes to memory (for example by 2196# temporarily revoking write access to all pages) and counting page 2197# faults. Information about modified pages is collected into a 2198# bitmap, where each bit corresponds to one guest page. This mode 2199# requires that KVM accelerator property "dirty-ring-size" is *not* 2200# set. 2201# 2202# 3. Dirty ring mode is similar to dirty bitmap mode, but the 2203# information about modified pages is collected into ring buffer. 2204# This mode tracks page modification per each vCPU separately. It 2205# requires that KVM accelerator property "dirty-ring-size" is set. 2206# 2207# @calc-time: time period for which dirty page rate is calculated. 2208# By default it is specified in seconds, but the unit can be set 2209# explicitly with @calc-time-unit. Note that larger @calc-time 2210# values will typically result in smaller dirty page rates because 2211# page dirtying is a one-time event. Once some page is counted 2212# as dirty during @calc-time period, further writes to this page 2213# will not increase dirty page rate anymore. 2214# 2215# @calc-time-unit: time unit in which @calc-time is specified. 2216# By default it is seconds. (Since 8.2) 2217# 2218# @sample-pages: number of sampled pages per each GiB of guest memory. 2219# Default value is 512. For 4KiB guest pages this corresponds to 2220# sampling ratio of 0.2%. This argument is used only in page 2221# sampling mode. (Since 6.1) 2222# 2223# @mode: mechanism for tracking dirty pages. Default value is 2224# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'. 2225# (Since 6.1) 2226# 2227# Since: 5.2 2228# 2229# Example: 2230# 2231# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 2232# 'sample-pages': 512} } 2233# <- { "return": {} } 2234# 2235# Measure dirty rate using dirty bitmap for 500 milliseconds: 2236# 2237# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, 2238# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } 2239# 2240# <- { "return": {} } 2241## 2242{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 2243 '*calc-time-unit': 'TimeUnit', 2244 '*sample-pages': 'int', 2245 '*mode': 'DirtyRateMeasureMode'} } 2246 2247## 2248# @query-dirty-rate: 2249# 2250# Query results of the most recent invocation of @calc-dirty-rate. 2251# 2252# @calc-time-unit: time unit in which to report calculation time. 2253# By default it is reported in seconds. (Since 8.2) 2254# 2255# Since: 5.2 2256# 2257# Examples: 2258# 2259# 1. Measurement is in progress: 2260# 2261# <- {"status": "measuring", "sample-pages": 512, 2262# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2263# "calc-time-unit": "second"} 2264# 2265# 2. Measurement has been completed: 2266# 2267# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, 2268# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2269# "calc-time-unit": "second"} 2270## 2271{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 2272 'returns': 'DirtyRateInfo' } 2273 2274## 2275# @DirtyLimitInfo: 2276# 2277# Dirty page rate limit information of a virtual CPU. 2278# 2279# @cpu-index: index of a virtual CPU. 2280# 2281# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 2282# CPU, 0 means unlimited. 2283# 2284# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 2285# 2286# Since: 7.1 2287## 2288{ 'struct': 'DirtyLimitInfo', 2289 'data': { 'cpu-index': 'int', 2290 'limit-rate': 'uint64', 2291 'current-rate': 'uint64' } } 2292 2293## 2294# @set-vcpu-dirty-limit: 2295# 2296# Set the upper limit of dirty page rate for virtual CPUs. 2297# 2298# Requires KVM with accelerator property "dirty-ring-size" set. A 2299# virtual CPU's dirty page rate is a measure of its memory load. To 2300# observe dirty page rates, use @calc-dirty-rate. 2301# 2302# @cpu-index: index of a virtual CPU, default is all. 2303# 2304# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 2305# 2306# Since: 7.1 2307# 2308# Example: 2309# 2310# -> {"execute": "set-vcpu-dirty-limit"} 2311# "arguments": { "dirty-rate": 200, 2312# "cpu-index": 1 } } 2313# <- { "return": {} } 2314## 2315{ 'command': 'set-vcpu-dirty-limit', 2316 'data': { '*cpu-index': 'int', 2317 'dirty-rate': 'uint64' } } 2318 2319## 2320# @cancel-vcpu-dirty-limit: 2321# 2322# Cancel the upper limit of dirty page rate for virtual CPUs. 2323# 2324# Cancel the dirty page limit for the vCPU which has been set with 2325# set-vcpu-dirty-limit command. Note that this command requires 2326# support from dirty ring, same as the "set-vcpu-dirty-limit". 2327# 2328# @cpu-index: index of a virtual CPU, default is all. 2329# 2330# Since: 7.1 2331# 2332# Example: 2333# 2334# -> {"execute": "cancel-vcpu-dirty-limit"}, 2335# "arguments": { "cpu-index": 1 } } 2336# <- { "return": {} } 2337## 2338{ 'command': 'cancel-vcpu-dirty-limit', 2339 'data': { '*cpu-index': 'int'} } 2340 2341## 2342# @query-vcpu-dirty-limit: 2343# 2344# Returns information about virtual CPU dirty page rate limits, if 2345# any. 2346# 2347# Since: 7.1 2348# 2349# Example: 2350# 2351# -> {"execute": "query-vcpu-dirty-limit"} 2352# <- {"return": [ 2353# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 2354# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 2355## 2356{ 'command': 'query-vcpu-dirty-limit', 2357 'returns': [ 'DirtyLimitInfo' ] } 2358 2359## 2360# @MigrationThreadInfo: 2361# 2362# Information about migrationthreads 2363# 2364# @name: the name of migration thread 2365# 2366# @thread-id: ID of the underlying host thread 2367# 2368# Since: 7.2 2369## 2370{ 'struct': 'MigrationThreadInfo', 2371 'data': {'name': 'str', 2372 'thread-id': 'int'} } 2373 2374## 2375# @query-migrationthreads: 2376# 2377# Returns information of migration threads 2378# 2379# data: migration thread name 2380# 2381# Returns: information about migration threads 2382# 2383# Since: 7.2 2384## 2385{ 'command': 'query-migrationthreads', 2386 'returns': ['MigrationThreadInfo'] } 2387 2388## 2389# @snapshot-save: 2390# 2391# Save a VM snapshot 2392# 2393# @job-id: identifier for the newly created job 2394# 2395# @tag: name of the snapshot to create 2396# 2397# @vmstate: block device node name to save vmstate to 2398# 2399# @devices: list of block device node names to save a snapshot to 2400# 2401# Applications should not assume that the snapshot save is complete 2402# when this command returns. The job commands / events must be used 2403# to determine completion and to fetch details of any errors that 2404# arise. 2405# 2406# Note that execution of the guest CPUs may be stopped during the time 2407# it takes to save the snapshot. A future version of QEMU may ensure 2408# CPUs are executing continuously. 2409# 2410# It is strongly recommended that @devices contain all writable block 2411# device nodes if a consistent snapshot is required. 2412# 2413# If @tag already exists, an error will be reported 2414# 2415# Example: 2416# 2417# -> { "execute": "snapshot-save", 2418# "arguments": { 2419# "job-id": "snapsave0", 2420# "tag": "my-snap", 2421# "vmstate": "disk0", 2422# "devices": ["disk0", "disk1"] 2423# } 2424# } 2425# <- { "return": { } } 2426# <- {"event": "JOB_STATUS_CHANGE", 2427# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2428# "data": {"status": "created", "id": "snapsave0"}} 2429# <- {"event": "JOB_STATUS_CHANGE", 2430# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2431# "data": {"status": "running", "id": "snapsave0"}} 2432# <- {"event": "STOP", 2433# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2434# <- {"event": "RESUME", 2435# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2436# <- {"event": "JOB_STATUS_CHANGE", 2437# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2438# "data": {"status": "waiting", "id": "snapsave0"}} 2439# <- {"event": "JOB_STATUS_CHANGE", 2440# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2441# "data": {"status": "pending", "id": "snapsave0"}} 2442# <- {"event": "JOB_STATUS_CHANGE", 2443# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2444# "data": {"status": "concluded", "id": "snapsave0"}} 2445# -> {"execute": "query-jobs"} 2446# <- {"return": [{"current-progress": 1, 2447# "status": "concluded", 2448# "total-progress": 1, 2449# "type": "snapshot-save", 2450# "id": "snapsave0"}]} 2451# 2452# Since: 6.0 2453## 2454{ 'command': 'snapshot-save', 2455 'data': { 'job-id': 'str', 2456 'tag': 'str', 2457 'vmstate': 'str', 2458 'devices': ['str'] } } 2459 2460## 2461# @snapshot-load: 2462# 2463# Load a VM snapshot 2464# 2465# @job-id: identifier for the newly created job 2466# 2467# @tag: name of the snapshot to load. 2468# 2469# @vmstate: block device node name to load vmstate from 2470# 2471# @devices: list of block device node names to load a snapshot from 2472# 2473# Applications should not assume that the snapshot load is complete 2474# when this command returns. The job commands / events must be used 2475# to determine completion and to fetch details of any errors that 2476# arise. 2477# 2478# Note that execution of the guest CPUs will be stopped during the 2479# time it takes to load the snapshot. 2480# 2481# It is strongly recommended that @devices contain all writable block 2482# device nodes that can have changed since the original @snapshot-save 2483# command execution. 2484# 2485# Example: 2486# 2487# -> { "execute": "snapshot-load", 2488# "arguments": { 2489# "job-id": "snapload0", 2490# "tag": "my-snap", 2491# "vmstate": "disk0", 2492# "devices": ["disk0", "disk1"] 2493# } 2494# } 2495# <- { "return": { } } 2496# <- {"event": "JOB_STATUS_CHANGE", 2497# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2498# "data": {"status": "created", "id": "snapload0"}} 2499# <- {"event": "JOB_STATUS_CHANGE", 2500# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2501# "data": {"status": "running", "id": "snapload0"}} 2502# <- {"event": "STOP", 2503# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2504# <- {"event": "RESUME", 2505# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2506# <- {"event": "JOB_STATUS_CHANGE", 2507# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2508# "data": {"status": "waiting", "id": "snapload0"}} 2509# <- {"event": "JOB_STATUS_CHANGE", 2510# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2511# "data": {"status": "pending", "id": "snapload0"}} 2512# <- {"event": "JOB_STATUS_CHANGE", 2513# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2514# "data": {"status": "concluded", "id": "snapload0"}} 2515# -> {"execute": "query-jobs"} 2516# <- {"return": [{"current-progress": 1, 2517# "status": "concluded", 2518# "total-progress": 1, 2519# "type": "snapshot-load", 2520# "id": "snapload0"}]} 2521# 2522# Since: 6.0 2523## 2524{ 'command': 'snapshot-load', 2525 'data': { 'job-id': 'str', 2526 'tag': 'str', 2527 'vmstate': 'str', 2528 'devices': ['str'] } } 2529 2530## 2531# @snapshot-delete: 2532# 2533# Delete a VM snapshot 2534# 2535# @job-id: identifier for the newly created job 2536# 2537# @tag: name of the snapshot to delete. 2538# 2539# @devices: list of block device node names to delete a snapshot from 2540# 2541# Applications should not assume that the snapshot delete is complete 2542# when this command returns. The job commands / events must be used 2543# to determine completion and to fetch details of any errors that 2544# arise. 2545# 2546# Example: 2547# 2548# -> { "execute": "snapshot-delete", 2549# "arguments": { 2550# "job-id": "snapdelete0", 2551# "tag": "my-snap", 2552# "devices": ["disk0", "disk1"] 2553# } 2554# } 2555# <- { "return": { } } 2556# <- {"event": "JOB_STATUS_CHANGE", 2557# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2558# "data": {"status": "created", "id": "snapdelete0"}} 2559# <- {"event": "JOB_STATUS_CHANGE", 2560# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2561# "data": {"status": "running", "id": "snapdelete0"}} 2562# <- {"event": "JOB_STATUS_CHANGE", 2563# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2564# "data": {"status": "waiting", "id": "snapdelete0"}} 2565# <- {"event": "JOB_STATUS_CHANGE", 2566# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2567# "data": {"status": "pending", "id": "snapdelete0"}} 2568# <- {"event": "JOB_STATUS_CHANGE", 2569# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2570# "data": {"status": "concluded", "id": "snapdelete0"}} 2571# -> {"execute": "query-jobs"} 2572# <- {"return": [{"current-progress": 1, 2573# "status": "concluded", 2574# "total-progress": 1, 2575# "type": "snapshot-delete", 2576# "id": "snapdelete0"}]} 2577# 2578# Since: 6.0 2579## 2580{ 'command': 'snapshot-delete', 2581 'data': { 'job-id': 'str', 2582 'tag': 'str', 2583 'devices': ['str'] } } 2584