1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @skipped: number of skipped zero pages. Always zero, only provided for 27# compatibility (since 1.5) 28# 29# @normal: number of normal pages (since 1.2) 30# 31# @normal-bytes: number of normal bytes sent (since 1.2) 32# 33# @dirty-pages-rate: number of pages dirtied by second by the guest 34# (since 1.3) 35# 36# @mbps: throughput in megabits/sec. (since 1.6) 37# 38# @dirty-sync-count: number of times that dirty ram was synchronized 39# (since 2.1) 40# 41# @postcopy-requests: The number of page requests received from the 42# destination (since 2.7) 43# 44# @page-size: The number of bytes per page for the various page-based 45# statistics (since 2.10) 46# 47# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 48# 49# @pages-per-second: the number of memory pages transferred per second 50# (Since 4.0) 51# 52# @precopy-bytes: The number of bytes sent in the pre-copy phase 53# (since 7.0). 54# 55# @downtime-bytes: The number of bytes sent while the guest is paused 56# (since 7.0). 57# 58# @postcopy-bytes: The number of bytes sent during the post-copy phase 59# (since 7.0). 60# 61# @dirty-sync-missed-zero-copy: Number of times dirty RAM 62# synchronization could not avoid copying dirty pages. This is 63# between 0 and @dirty-sync-count * @multifd-channels. (since 64# 7.1) 65# 66# Features: 67# 68# @deprecated: Member @skipped is always zero since 1.5.3 69# 70# Since: 0.14 71# 72## 73{ 'struct': 'MigrationStats', 74 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 75 'duplicate': 'int', 76 'skipped': { 'type': 'int', 'features': [ 'deprecated' ] }, 77 'normal': 'int', 78 'normal-bytes': 'int', 'dirty-pages-rate': 'int', 79 'mbps': 'number', 'dirty-sync-count': 'int', 80 'postcopy-requests': 'int', 'page-size': 'int', 81 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64', 82 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64', 83 'postcopy-bytes': 'uint64', 84 'dirty-sync-missed-zero-copy': 'uint64' } } 85 86## 87# @XBZRLECacheStats: 88# 89# Detailed XBZRLE migration cache statistics 90# 91# @cache-size: XBZRLE cache size 92# 93# @bytes: amount of bytes already transferred to the target VM 94# 95# @pages: amount of pages transferred to the target VM 96# 97# @cache-miss: number of cache miss 98# 99# @cache-miss-rate: rate of cache miss (since 2.1) 100# 101# @encoding-rate: rate of encoded bytes (since 5.1) 102# 103# @overflow: number of overflows 104# 105# Since: 1.2 106## 107{ 'struct': 'XBZRLECacheStats', 108 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 109 'cache-miss': 'int', 'cache-miss-rate': 'number', 110 'encoding-rate': 'number', 'overflow': 'int' } } 111 112## 113# @CompressionStats: 114# 115# Detailed migration compression statistics 116# 117# @pages: amount of pages compressed and transferred to the target VM 118# 119# @busy: count of times that no free thread was available to compress 120# data 121# 122# @busy-rate: rate of thread busy 123# 124# @compressed-size: amount of bytes after compression 125# 126# @compression-rate: rate of compressed size 127# 128# Since: 3.1 129## 130{ 'struct': 'CompressionStats', 131 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 132 'compressed-size': 'int', 'compression-rate': 'number' } } 133 134## 135# @MigrationStatus: 136# 137# An enumeration of migration status. 138# 139# @none: no migration has ever happened. 140# 141# @setup: migration process has been initiated. 142# 143# @cancelling: in the process of cancelling migration. 144# 145# @cancelled: cancelling migration is finished. 146# 147# @active: in the process of doing migration. 148# 149# @postcopy-active: like active, but now in postcopy mode. (since 150# 2.5) 151# 152# @postcopy-paused: during postcopy but paused. (since 3.0) 153# 154# @postcopy-recover: trying to recover from a paused postcopy. (since 155# 3.0) 156# 157# @completed: migration is finished. 158# 159# @failed: some error occurred during migration process. 160# 161# @colo: VM is in the process of fault tolerance, VM can not get into 162# this state unless colo capability is enabled for migration. 163# (since 2.8) 164# 165# @pre-switchover: Paused before device serialisation. (since 2.11) 166# 167# @device: During device serialisation when pause-before-switchover is 168# enabled (since 2.11) 169# 170# @wait-unplug: wait for device unplug request by guest OS to be 171# completed. (since 4.2) 172# 173# Since: 2.3 174## 175{ 'enum': 'MigrationStatus', 176 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 177 'active', 'postcopy-active', 'postcopy-paused', 178 'postcopy-recover', 'completed', 'failed', 'colo', 179 'pre-switchover', 'device', 'wait-unplug' ] } 180## 181# @VfioStats: 182# 183# Detailed VFIO devices migration statistics 184# 185# @transferred: amount of bytes transferred to the target VM by VFIO 186# devices 187# 188# Since: 5.2 189## 190{ 'struct': 'VfioStats', 191 'data': {'transferred': 'int' } } 192 193## 194# @MigrationInfo: 195# 196# Information about current migration process. 197# 198# @status: @MigrationStatus describing the current migration status. 199# If this field is not returned, no migration process has been 200# initiated 201# 202# @ram: @MigrationStats containing detailed migration status, only 203# returned if status is 'active' or 'completed'(since 1.2) 204# 205# @disk: @MigrationStats containing detailed disk migration status, 206# only returned if status is 'active' and it is a block migration 207# 208# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 209# migration statistics, only returned if XBZRLE feature is on and 210# status is 'active' or 'completed' (since 1.2) 211# 212# @total-time: total amount of milliseconds since migration started. 213# If migration has ended, it returns the total migration time. 214# (since 1.2) 215# 216# @downtime: only present when migration finishes correctly total 217# downtime in milliseconds for the guest. (since 1.3) 218# 219# @expected-downtime: only present while migration is active expected 220# downtime in milliseconds for the guest in last walk of the dirty 221# bitmap. (since 1.3) 222# 223# @setup-time: amount of setup time in milliseconds *before* the 224# iterations begin but *after* the QMP command is issued. This is 225# designed to provide an accounting of any activities (such as 226# RDMA pinning) which may be expensive, but do not actually occur 227# during the iterative migration rounds themselves. (since 1.6) 228# 229# @cpu-throttle-percentage: percentage of time guest cpus are being 230# throttled during auto-converge. This is only present when 231# auto-converge has started throttling guest cpus. (Since 2.7) 232# 233# @error-desc: the human readable error description string. Clients 234# should not attempt to parse the error strings. (Since 2.7) 235# 236# @postcopy-blocktime: total time when all vCPU were blocked during 237# postcopy live migration. This is only present when the 238# postcopy-blocktime migration capability is enabled. (Since 3.0) 239# 240# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 241# This is only present when the postcopy-blocktime migration 242# capability is enabled. (Since 3.0) 243# 244# @compression: migration compression statistics, only returned if 245# compression feature is on and status is 'active' or 'completed' 246# (Since 3.1) 247# 248# @socket-address: Only used for tcp, to know what the real port is 249# (Since 4.0) 250# 251# @vfio: @VfioStats containing detailed VFIO devices migration 252# statistics, only returned if VFIO device is present, migration 253# is supported by all VFIO devices and status is 'active' or 254# 'completed' (since 5.2) 255# 256# @blocked-reasons: A list of reasons an outgoing migration is 257# blocked. Present and non-empty when migration is blocked. 258# (since 6.0) 259# 260# @dirty-limit-throttle-time-per-round: Maximum throttle time 261# (in microseconds) of virtual CPUs each dirty ring full round, 262# which shows how MigrationCapability dirty-limit affects the 263# guest during live migration. (Since 8.1) 264# 265# @dirty-limit-ring-full-time: Estimated average dirty ring full time 266# (in microseconds) for each dirty ring full round. The value 267# equals the dirty ring memory size divided by the average dirty 268# page rate of the virtual CPU, which can be used to observe the 269# average memory load of the virtual CPU indirectly. Note that 270# zero means guest doesn't dirty memory. (Since 8.1) 271# 272# Features: 273# 274# @deprecated: Member @disk is deprecated because block migration is. 275# Member @compression is deprecated because it is unreliable and 276# untested. It is recommended to use multifd migration, which 277# offers an alternative compression implementation that is 278# reliable and tested. 279# 280# Since: 0.14 281## 282{ 'struct': 'MigrationInfo', 283 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 284 '*disk': { 'type': 'MigrationStats', 'features': [ 'deprecated' ] }, 285 '*vfio': 'VfioStats', 286 '*xbzrle-cache': 'XBZRLECacheStats', 287 '*total-time': 'int', 288 '*expected-downtime': 'int', 289 '*downtime': 'int', 290 '*setup-time': 'int', 291 '*cpu-throttle-percentage': 'int', 292 '*error-desc': 'str', 293 '*blocked-reasons': ['str'], 294 '*postcopy-blocktime': 'uint32', 295 '*postcopy-vcpu-blocktime': ['uint32'], 296 '*compression': { 'type': 'CompressionStats', 'features': [ 'deprecated' ] }, 297 '*socket-address': ['SocketAddress'], 298 '*dirty-limit-throttle-time-per-round': 'uint64', 299 '*dirty-limit-ring-full-time': 'uint64'} } 300 301## 302# @query-migrate: 303# 304# Returns information about current migration process. If migration 305# is active there will be another json-object with RAM migration 306# status and if block migration is active another one with block 307# migration status. 308# 309# Returns: @MigrationInfo 310# 311# Since: 0.14 312# 313# Examples: 314# 315# 1. Before the first migration 316# 317# -> { "execute": "query-migrate" } 318# <- { "return": {} } 319# 320# 2. Migration is done and has succeeded 321# 322# -> { "execute": "query-migrate" } 323# <- { "return": { 324# "status": "completed", 325# "total-time":12345, 326# "setup-time":12345, 327# "downtime":12345, 328# "ram":{ 329# "transferred":123, 330# "remaining":123, 331# "total":246, 332# "duplicate":123, 333# "normal":123, 334# "normal-bytes":123456, 335# "dirty-sync-count":15 336# } 337# } 338# } 339# 340# 3. Migration is done and has failed 341# 342# -> { "execute": "query-migrate" } 343# <- { "return": { "status": "failed" } } 344# 345# 4. Migration is being performed and is not a block migration: 346# 347# -> { "execute": "query-migrate" } 348# <- { 349# "return":{ 350# "status":"active", 351# "total-time":12345, 352# "setup-time":12345, 353# "expected-downtime":12345, 354# "ram":{ 355# "transferred":123, 356# "remaining":123, 357# "total":246, 358# "duplicate":123, 359# "normal":123, 360# "normal-bytes":123456, 361# "dirty-sync-count":15 362# } 363# } 364# } 365# 366# 5. Migration is being performed and is a block migration: 367# 368# -> { "execute": "query-migrate" } 369# <- { 370# "return":{ 371# "status":"active", 372# "total-time":12345, 373# "setup-time":12345, 374# "expected-downtime":12345, 375# "ram":{ 376# "total":1057024, 377# "remaining":1053304, 378# "transferred":3720, 379# "duplicate":123, 380# "normal":123, 381# "normal-bytes":123456, 382# "dirty-sync-count":15 383# }, 384# "disk":{ 385# "total":20971520, 386# "remaining":20880384, 387# "transferred":91136 388# } 389# } 390# } 391# 392# 6. Migration is being performed and XBZRLE is active: 393# 394# -> { "execute": "query-migrate" } 395# <- { 396# "return":{ 397# "status":"active", 398# "total-time":12345, 399# "setup-time":12345, 400# "expected-downtime":12345, 401# "ram":{ 402# "total":1057024, 403# "remaining":1053304, 404# "transferred":3720, 405# "duplicate":10, 406# "normal":3333, 407# "normal-bytes":3412992, 408# "dirty-sync-count":15 409# }, 410# "xbzrle-cache":{ 411# "cache-size":67108864, 412# "bytes":20971520, 413# "pages":2444343, 414# "cache-miss":2244, 415# "cache-miss-rate":0.123, 416# "encoding-rate":80.1, 417# "overflow":34434 418# } 419# } 420# } 421## 422{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 423 424## 425# @MigrationCapability: 426# 427# Migration capabilities enumeration 428# 429# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 430# Encoding). This feature allows us to minimize migration traffic 431# for certain work loads, by sending compressed difference of the 432# pages 433# 434# @rdma-pin-all: Controls whether or not the entire VM memory 435# footprint is mlock()'d on demand or all at once. Refer to 436# docs/rdma.txt for usage. Disabled by default. (since 2.0) 437# 438# @zero-blocks: During storage migration encode blocks of zeroes 439# efficiently. This essentially saves 1MB of zeroes per block on 440# the wire. Enabling requires source and target VM to support 441# this feature. To enable it is sufficient to enable the 442# capability on the source VM. The feature is disabled by default. 443# (since 1.6) 444# 445# @compress: Use multiple compression threads to accelerate live 446# migration. This feature can help to reduce the migration 447# traffic, by sending compressed pages. Please note that if 448# compress and xbzrle are both on, compress only takes effect in 449# the ram bulk stage, after that, it will be disabled and only 450# xbzrle takes effect, this can help to minimize migration 451# traffic. The feature is disabled by default. (since 2.4) 452# 453# @events: generate events for each migration state change (since 2.4) 454# 455# @auto-converge: If enabled, QEMU will automatically throttle down 456# the guest to speed up convergence of RAM migration. (since 1.6) 457# 458# @postcopy-ram: Start executing on the migration target before all of 459# RAM has been migrated, pulling the remaining pages along as 460# needed. The capacity must have the same setting on both source 461# and target or migration will not even start. NOTE: If the 462# migration fails during postcopy the VM will fail. (since 2.6) 463# 464# @x-colo: If enabled, migration will never end, and the state of the 465# VM on the primary side will be migrated continuously to the VM 466# on secondary side, this process is called COarse-Grain LOck 467# Stepping (COLO) for Non-stop Service. (since 2.8) 468# 469# @release-ram: if enabled, qemu will free the migrated ram pages on 470# the source during postcopy-ram migration. (since 2.9) 471# 472# @block: If enabled, QEMU will also migrate the contents of all block 473# devices. Default is disabled. A possible alternative uses 474# mirror jobs to a builtin NBD server on the destination, which 475# offers more flexibility. (Since 2.10) 476# 477# @return-path: If enabled, migration will use the return path even 478# for precopy. (since 2.10) 479# 480# @pause-before-switchover: Pause outgoing migration before 481# serialising device state and before disabling block IO (since 482# 2.11) 483# 484# @multifd: Use more than one fd for migration (since 4.0) 485# 486# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 487# (since 2.12) 488# 489# @postcopy-blocktime: Calculate downtime for postcopy live migration 490# (since 3.0) 491# 492# @late-block-activate: If enabled, the destination will not activate 493# block devices (and thus take locks) immediately at the end of 494# migration. (since 3.0) 495# 496# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 497# that is accessible on the destination machine. (since 4.0) 498# 499# @validate-uuid: Send the UUID of the source to allow the destination 500# to ensure it is the same. (since 4.2) 501# 502# @background-snapshot: If enabled, the migration stream will be a 503# snapshot of the VM exactly at the point when the migration 504# procedure starts. The VM RAM is saved with running VM. (since 505# 6.0) 506# 507# @zero-copy-send: Controls behavior on sending memory pages on 508# migration. When true, enables a zero-copy mechanism for sending 509# memory pages, if host supports it. Requires that QEMU be 510# permitted to use locked memory for guest RAM pages. (since 7.1) 511# 512# @postcopy-preempt: If enabled, the migration process will allow 513# postcopy requests to preempt precopy stream, so postcopy 514# requests will be handled faster. This is a performance feature 515# and should not affect the correctness of postcopy migration. 516# (since 7.1) 517# 518# @switchover-ack: If enabled, migration will not stop the source VM 519# and complete the migration until an ACK is received from the 520# destination that it's OK to do so. Exactly when this ACK is 521# sent depends on the migrated devices that use this feature. For 522# example, a device can use it to make sure some of its data is 523# sent and loaded in the destination before doing switchover. 524# This can reduce downtime if devices that support this capability 525# are present. 'return-path' capability must be enabled to use 526# it. (since 8.1) 527# 528# @dirty-limit: If enabled, migration will throttle vCPUs as needed to 529# keep their dirty page rate within @vcpu-dirty-limit. This can 530# improve responsiveness of large guests during live migration, 531# and can result in more stable read performance. Requires KVM 532# with accelerator property "dirty-ring-size" set. (Since 8.1) 533# 534# Features: 535# 536# @deprecated: Member @block is deprecated. Use blockdev-mirror with 537# NBD instead. Member @compress is deprecated because it is 538# unreliable and untested. It is recommended to use multifd 539# migration, which offers an alternative compression 540# implementation that is reliable and tested. 541# 542# @unstable: Members @x-colo and @x-ignore-shared are experimental. 543# 544# Since: 1.2 545## 546{ 'enum': 'MigrationCapability', 547 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 548 { 'name': 'compress', 'features': [ 'deprecated' ] }, 549 'events', 'postcopy-ram', 550 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 551 'release-ram', 552 { 'name': 'block', 'features': [ 'deprecated' ] }, 553 'return-path', 'pause-before-switchover', 'multifd', 554 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 555 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 556 'validate-uuid', 'background-snapshot', 557 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', 558 'dirty-limit'] } 559 560## 561# @MigrationCapabilityStatus: 562# 563# Migration capability information 564# 565# @capability: capability enum 566# 567# @state: capability state bool 568# 569# Since: 1.2 570## 571{ 'struct': 'MigrationCapabilityStatus', 572 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } } 573 574## 575# @migrate-set-capabilities: 576# 577# Enable/Disable the following migration capabilities (like xbzrle) 578# 579# @capabilities: json array of capability modifications to make 580# 581# Since: 1.2 582# 583# Example: 584# 585# -> { "execute": "migrate-set-capabilities" , "arguments": 586# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 587# <- { "return": {} } 588## 589{ 'command': 'migrate-set-capabilities', 590 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 591 592## 593# @query-migrate-capabilities: 594# 595# Returns information about the current migration capabilities status 596# 597# Returns: @MigrationCapabilityStatus 598# 599# Since: 1.2 600# 601# Example: 602# 603# -> { "execute": "query-migrate-capabilities" } 604# <- { "return": [ 605# {"state": false, "capability": "xbzrle"}, 606# {"state": false, "capability": "rdma-pin-all"}, 607# {"state": false, "capability": "auto-converge"}, 608# {"state": false, "capability": "zero-blocks"}, 609# {"state": false, "capability": "compress"}, 610# {"state": true, "capability": "events"}, 611# {"state": false, "capability": "postcopy-ram"}, 612# {"state": false, "capability": "x-colo"} 613# ]} 614## 615{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 616 617## 618# @MultiFDCompression: 619# 620# An enumeration of multifd compression methods. 621# 622# @none: no compression. 623# 624# @zlib: use zlib compression method. 625# 626# @zstd: use zstd compression method. 627# 628# Since: 5.0 629## 630{ 'enum': 'MultiFDCompression', 631 'data': [ 'none', 'zlib', 632 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 633 634## 635# @MigMode: 636# 637# @normal: the original form of migration. (since 8.2) 638# 639# @cpr-reboot: The migrate command stops the VM and saves state to 640# the URI. After quitting QEMU, the user resumes by running 641# QEMU -incoming. 642# 643# This mode allows the user to quit QEMU, optionally update and 644# reboot the OS, and restart QEMU. If the user reboots, the URI 645# must persist across the reboot, such as by using a file. 646# 647# Unlike normal mode, the use of certain local storage options 648# does not block the migration, but the user must not modify the 649# contents of guest block devices between the quit and restart. 650# 651# This mode supports VFIO devices provided the user first puts 652# the guest in the suspended runstate, such as by issuing 653# guest-suspend-ram to the QEMU guest agent. 654# 655# Best performance is achieved when the memory backend is shared 656# and the @x-ignore-shared migration capability is set, but this 657# is not required. Further, if the user reboots before restarting 658# such a configuration, the shared memory must persist across the 659# reboot, such as by backing it with a dax device. 660# 661# @cpr-reboot may not be used with postcopy, background-snapshot, 662# or COLO. 663# 664# (since 8.2) 665## 666{ 'enum': 'MigMode', 667 'data': [ 'normal', 'cpr-reboot' ] } 668 669## 670# @BitmapMigrationBitmapAliasTransform: 671# 672# @persistent: If present, the bitmap will be made persistent or 673# transient depending on this parameter. 674# 675# Since: 6.0 676## 677{ 'struct': 'BitmapMigrationBitmapAliasTransform', 678 'data': { 679 '*persistent': 'bool' 680 } } 681 682## 683# @BitmapMigrationBitmapAlias: 684# 685# @name: The name of the bitmap. 686# 687# @alias: An alias name for migration (for example the bitmap name on 688# the opposite site). 689# 690# @transform: Allows the modification of the migrated bitmap. (since 691# 6.0) 692# 693# Since: 5.2 694## 695{ 'struct': 'BitmapMigrationBitmapAlias', 696 'data': { 697 'name': 'str', 698 'alias': 'str', 699 '*transform': 'BitmapMigrationBitmapAliasTransform' 700 } } 701 702## 703# @BitmapMigrationNodeAlias: 704# 705# Maps a block node name and the bitmaps it has to aliases for dirty 706# bitmap migration. 707# 708# @node-name: A block node name. 709# 710# @alias: An alias block node name for migration (for example the node 711# name on the opposite site). 712# 713# @bitmaps: Mappings for the bitmaps on this node. 714# 715# Since: 5.2 716## 717{ 'struct': 'BitmapMigrationNodeAlias', 718 'data': { 719 'node-name': 'str', 720 'alias': 'str', 721 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 722 } } 723 724## 725# @MigrationParameter: 726# 727# Migration parameters enumeration 728# 729# @announce-initial: Initial delay (in milliseconds) before sending 730# the first announce (Since 4.0) 731# 732# @announce-max: Maximum delay (in milliseconds) between packets in 733# the announcement (Since 4.0) 734# 735# @announce-rounds: Number of self-announce packets sent after 736# migration (Since 4.0) 737# 738# @announce-step: Increase in delay (in milliseconds) between 739# subsequent packets in the announcement (Since 4.0) 740# 741# @compress-level: Set the compression level to be used in live 742# migration, the compression level is an integer between 0 and 9, 743# where 0 means no compression, 1 means the best compression 744# speed, and 9 means best compression ratio which will consume 745# more CPU. 746# 747# @compress-threads: Set compression thread count to be used in live 748# migration, the compression thread count is an integer between 1 749# and 255. 750# 751# @compress-wait-thread: Controls behavior when all compression 752# threads are currently busy. If true (default), wait for a free 753# compression thread to become available; otherwise, send the page 754# uncompressed. (Since 3.1) 755# 756# @decompress-threads: Set decompression thread count to be used in 757# live migration, the decompression thread count is an integer 758# between 1 and 255. Usually, decompression is at least 4 times as 759# fast as compression, so set the decompress-threads to the number 760# about 1/4 of compress-threads is adequate. 761# 762# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 763# bytes_xfer_period to trigger throttling. It is expressed as 764# percentage. The default value is 50. (Since 5.0) 765# 766# @cpu-throttle-initial: Initial percentage of time guest cpus are 767# throttled when migration auto-converge is activated. The 768# default value is 20. (Since 2.7) 769# 770# @cpu-throttle-increment: throttle percentage increase each time 771# auto-converge detects that migration is not making progress. 772# The default value is 10. (Since 2.7) 773# 774# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 775# the tail stage of throttling, the Guest is very sensitive to CPU 776# percentage while the @cpu-throttle -increment is excessive 777# usually at tail stage. If this parameter is true, we will 778# compute the ideal CPU percentage used by the Guest, which may 779# exactly make the dirty rate match the dirty rate threshold. 780# Then we will choose a smaller throttle increment between the one 781# specified by @cpu-throttle-increment and the one generated by 782# ideal CPU percentage. Therefore, it is compatible to 783# traditional throttling, meanwhile the throttle increment won't 784# be excessive at tail stage. The default value is false. (Since 785# 5.1) 786# 787# @tls-creds: ID of the 'tls-creds' object that provides credentials 788# for establishing a TLS connection over the migration data 789# channel. On the outgoing side of the migration, the credentials 790# must be for a 'client' endpoint, while for the incoming side the 791# credentials must be for a 'server' endpoint. Setting this will 792# enable TLS for all migrations. The default is unset, resulting 793# in unsecured migration at the QEMU level. (Since 2.7) 794# 795# @tls-hostname: hostname of the target host for the migration. This 796# is required when using x509 based TLS credentials and the 797# migration URI does not already include a hostname. For example 798# if using fd: or exec: based migration, the hostname must be 799# provided so that the server's x509 certificate identity can be 800# validated. (Since 2.7) 801# 802# @tls-authz: ID of the 'authz' object subclass that provides access 803# control checking of the TLS x509 certificate distinguished name. 804# This object is only resolved at time of use, so can be deleted 805# and recreated on the fly while the migration server is active. 806# If missing, it will default to denying access (Since 4.0) 807# 808# @max-bandwidth: to set maximum speed for migration. maximum speed 809# in bytes per second. (Since 2.8) 810# 811# @avail-switchover-bandwidth: to set the available bandwidth that 812# migration can use during switchover phase. NOTE! This does not 813# limit the bandwidth during switchover, but only for calculations when 814# making decisions to switchover. By default, this value is zero, 815# which means QEMU will estimate the bandwidth automatically. This can 816# be set when the estimated value is not accurate, while the user is 817# able to guarantee such bandwidth is available when switching over. 818# When specified correctly, this can make the switchover decision much 819# more accurate. (Since 8.2) 820# 821# @downtime-limit: set maximum tolerated downtime for migration. 822# maximum downtime in milliseconds (Since 2.8) 823# 824# @x-checkpoint-delay: The delay time (in ms) between two COLO 825# checkpoints in periodic mode. (Since 2.8) 826# 827# @block-incremental: Affects how much storage is migrated when the 828# block migration capability is enabled. When false, the entire 829# storage backing chain is migrated into a flattened image at the 830# destination; when true, only the active qcow2 layer is migrated 831# and the destination must already have access to the same backing 832# chain as was used on the source. (since 2.10) 833# 834# @multifd-channels: Number of channels used to migrate data in 835# parallel. This is the same number that the number of sockets 836# used for migration. The default value is 2 (since 4.0) 837# 838# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 839# needs to be a multiple of the target page size and a power of 2 840# (Since 2.11) 841# 842# @max-postcopy-bandwidth: Background transfer bandwidth during 843# postcopy. Defaults to 0 (unlimited). In bytes per second. 844# (Since 3.0) 845# 846# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 847# (Since 3.1) 848# 849# @multifd-compression: Which compression method to use. Defaults to 850# none. (Since 5.0) 851# 852# @multifd-zlib-level: Set the compression level to be used in live 853# migration, the compression level is an integer between 0 and 9, 854# where 0 means no compression, 1 means the best compression 855# speed, and 9 means best compression ratio which will consume 856# more CPU. Defaults to 1. (Since 5.0) 857# 858# @multifd-zstd-level: Set the compression level to be used in live 859# migration, the compression level is an integer between 0 and 20, 860# where 0 means no compression, 1 means the best compression 861# speed, and 20 means best compression ratio which will consume 862# more CPU. Defaults to 1. (Since 5.0) 863# 864# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 865# aliases for the purpose of dirty bitmap migration. Such aliases 866# may for example be the corresponding names on the opposite site. 867# The mapping must be one-to-one, but not necessarily complete: On 868# the source, unmapped bitmaps and all bitmaps on unmapped nodes 869# will be ignored. On the destination, encountering an unmapped 870# alias in the incoming migration stream will result in a report, 871# and all further bitmap migration data will then be discarded. 872# Note that the destination does not know about bitmaps it does 873# not receive, so there is no limitation or requirement regarding 874# the number of bitmaps received, or how they are named, or on 875# which nodes they are placed. By default (when this parameter 876# has never been set), bitmap names are mapped to themselves. 877# Nodes are mapped to their block device name if there is one, and 878# to their node name otherwise. (Since 5.2) 879# 880# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 881# limit during live migration. Should be in the range 1 to 1000ms. 882# Defaults to 1000ms. (Since 8.1) 883# 884# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 885# Defaults to 1. (Since 8.1) 886# 887# @mode: Migration mode. See description in @MigMode. Default is 'normal'. 888# (Since 8.2) 889# 890# Features: 891# 892# @deprecated: Member @block-incremental is deprecated. Use 893# blockdev-mirror with NBD instead. Members @compress-level, 894# @compress-threads, @decompress-threads and @compress-wait-thread 895# are deprecated because @compression is deprecated. 896# 897# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 898# are experimental. 899# 900# Since: 2.4 901## 902{ 'enum': 'MigrationParameter', 903 'data': ['announce-initial', 'announce-max', 904 'announce-rounds', 'announce-step', 905 { 'name': 'compress-level', 'features': [ 'deprecated' ] }, 906 { 'name': 'compress-threads', 'features': [ 'deprecated' ] }, 907 { 'name': 'decompress-threads', 'features': [ 'deprecated' ] }, 908 { 'name': 'compress-wait-thread', 'features': [ 'deprecated' ] }, 909 'throttle-trigger-threshold', 910 'cpu-throttle-initial', 'cpu-throttle-increment', 911 'cpu-throttle-tailslow', 912 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 913 'avail-switchover-bandwidth', 'downtime-limit', 914 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 915 { 'name': 'block-incremental', 'features': [ 'deprecated' ] }, 916 'multifd-channels', 917 'xbzrle-cache-size', 'max-postcopy-bandwidth', 918 'max-cpu-throttle', 'multifd-compression', 919 'multifd-zlib-level', 'multifd-zstd-level', 920 'block-bitmap-mapping', 921 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 922 'vcpu-dirty-limit', 923 'mode'] } 924 925## 926# @MigrateSetParameters: 927# 928# @announce-initial: Initial delay (in milliseconds) before sending 929# the first announce (Since 4.0) 930# 931# @announce-max: Maximum delay (in milliseconds) between packets in 932# the announcement (Since 4.0) 933# 934# @announce-rounds: Number of self-announce packets sent after 935# migration (Since 4.0) 936# 937# @announce-step: Increase in delay (in milliseconds) between 938# subsequent packets in the announcement (Since 4.0) 939# 940# @compress-level: compression level 941# 942# @compress-threads: compression thread count 943# 944# @compress-wait-thread: Controls behavior when all compression 945# threads are currently busy. If true (default), wait for a free 946# compression thread to become available; otherwise, send the page 947# uncompressed. (Since 3.1) 948# 949# @decompress-threads: decompression thread count 950# 951# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 952# bytes_xfer_period to trigger throttling. It is expressed as 953# percentage. The default value is 50. (Since 5.0) 954# 955# @cpu-throttle-initial: Initial percentage of time guest cpus are 956# throttled when migration auto-converge is activated. The 957# default value is 20. (Since 2.7) 958# 959# @cpu-throttle-increment: throttle percentage increase each time 960# auto-converge detects that migration is not making progress. 961# The default value is 10. (Since 2.7) 962# 963# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 964# the tail stage of throttling, the Guest is very sensitive to CPU 965# percentage while the @cpu-throttle -increment is excessive 966# usually at tail stage. If this parameter is true, we will 967# compute the ideal CPU percentage used by the Guest, which may 968# exactly make the dirty rate match the dirty rate threshold. 969# Then we will choose a smaller throttle increment between the one 970# specified by @cpu-throttle-increment and the one generated by 971# ideal CPU percentage. Therefore, it is compatible to 972# traditional throttling, meanwhile the throttle increment won't 973# be excessive at tail stage. The default value is false. (Since 974# 5.1) 975# 976# @tls-creds: ID of the 'tls-creds' object that provides credentials 977# for establishing a TLS connection over the migration data 978# channel. On the outgoing side of the migration, the credentials 979# must be for a 'client' endpoint, while for the incoming side the 980# credentials must be for a 'server' endpoint. Setting this to a 981# non-empty string enables TLS for all migrations. An empty 982# string means that QEMU will use plain text mode for migration, 983# rather than TLS (Since 2.9) Previously (since 2.7), this was 984# reported by omitting tls-creds instead. 985# 986# @tls-hostname: hostname of the target host for the migration. This 987# is required when using x509 based TLS credentials and the 988# migration URI does not already include a hostname. For example 989# if using fd: or exec: based migration, the hostname must be 990# provided so that the server's x509 certificate identity can be 991# validated. (Since 2.7) An empty string means that QEMU will use 992# the hostname associated with the migration URI, if any. (Since 993# 2.9) Previously (since 2.7), this was reported by omitting 994# tls-hostname instead. 995# 996# @tls-authz: ID of the 'authz' object subclass that provides access 997# control checking of the TLS x509 certificate distinguished name. 998# (Since 4.0) 999# 1000# @max-bandwidth: to set maximum speed for migration. maximum speed 1001# in bytes per second. (Since 2.8) 1002# 1003# @avail-switchover-bandwidth: to set the available bandwidth that 1004# migration can use during switchover phase. NOTE! This does not 1005# limit the bandwidth during switchover, but only for calculations when 1006# making decisions to switchover. By default, this value is zero, 1007# which means QEMU will estimate the bandwidth automatically. This can 1008# be set when the estimated value is not accurate, while the user is 1009# able to guarantee such bandwidth is available when switching over. 1010# When specified correctly, this can make the switchover decision much 1011# more accurate. (Since 8.2) 1012# 1013# @downtime-limit: set maximum tolerated downtime for migration. 1014# maximum downtime in milliseconds (Since 2.8) 1015# 1016# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1017# (Since 2.8) 1018# 1019# @block-incremental: Affects how much storage is migrated when the 1020# block migration capability is enabled. When false, the entire 1021# storage backing chain is migrated into a flattened image at the 1022# destination; when true, only the active qcow2 layer is migrated 1023# and the destination must already have access to the same backing 1024# chain as was used on the source. (since 2.10) 1025# 1026# @multifd-channels: Number of channels used to migrate data in 1027# parallel. This is the same number that the number of sockets 1028# used for migration. The default value is 2 (since 4.0) 1029# 1030# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1031# needs to be a multiple of the target page size and a power of 2 1032# (Since 2.11) 1033# 1034# @max-postcopy-bandwidth: Background transfer bandwidth during 1035# postcopy. Defaults to 0 (unlimited). In bytes per second. 1036# (Since 3.0) 1037# 1038# @max-cpu-throttle: maximum cpu throttle percentage. The default 1039# value is 99. (Since 3.1) 1040# 1041# @multifd-compression: Which compression method to use. Defaults to 1042# none. (Since 5.0) 1043# 1044# @multifd-zlib-level: Set the compression level to be used in live 1045# migration, the compression level is an integer between 0 and 9, 1046# where 0 means no compression, 1 means the best compression 1047# speed, and 9 means best compression ratio which will consume 1048# more CPU. Defaults to 1. (Since 5.0) 1049# 1050# @multifd-zstd-level: Set the compression level to be used in live 1051# migration, the compression level is an integer between 0 and 20, 1052# where 0 means no compression, 1 means the best compression 1053# speed, and 20 means best compression ratio which will consume 1054# more CPU. Defaults to 1. (Since 5.0) 1055# 1056# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1057# aliases for the purpose of dirty bitmap migration. Such aliases 1058# may for example be the corresponding names on the opposite site. 1059# The mapping must be one-to-one, but not necessarily complete: On 1060# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1061# will be ignored. On the destination, encountering an unmapped 1062# alias in the incoming migration stream will result in a report, 1063# and all further bitmap migration data will then be discarded. 1064# Note that the destination does not know about bitmaps it does 1065# not receive, so there is no limitation or requirement regarding 1066# the number of bitmaps received, or how they are named, or on 1067# which nodes they are placed. By default (when this parameter 1068# has never been set), bitmap names are mapped to themselves. 1069# Nodes are mapped to their block device name if there is one, and 1070# to their node name otherwise. (Since 5.2) 1071# 1072# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1073# limit during live migration. Should be in the range 1 to 1000ms. 1074# Defaults to 1000ms. (Since 8.1) 1075# 1076# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1077# Defaults to 1. (Since 8.1) 1078# 1079# @mode: Migration mode. See description in @MigMode. Default is 'normal'. 1080# (Since 8.2) 1081# 1082# Features: 1083# 1084# @deprecated: Member @block-incremental is deprecated. Use 1085# blockdev-mirror with NBD instead. Members @compress-level, 1086# @compress-threads, @decompress-threads and @compress-wait-thread 1087# are deprecated because @compression is deprecated. 1088# 1089# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1090# are experimental. 1091# 1092# TODO: either fuse back into MigrationParameters, or make 1093# MigrationParameters members mandatory 1094# 1095# Since: 2.4 1096## 1097{ 'struct': 'MigrateSetParameters', 1098 'data': { '*announce-initial': 'size', 1099 '*announce-max': 'size', 1100 '*announce-rounds': 'size', 1101 '*announce-step': 'size', 1102 '*compress-level': { 'type': 'uint8', 1103 'features': [ 'deprecated' ] }, 1104 '*compress-threads': { 'type': 'uint8', 1105 'features': [ 'deprecated' ] }, 1106 '*compress-wait-thread': { 'type': 'bool', 1107 'features': [ 'deprecated' ] }, 1108 '*decompress-threads': { 'type': 'uint8', 1109 'features': [ 'deprecated' ] }, 1110 '*throttle-trigger-threshold': 'uint8', 1111 '*cpu-throttle-initial': 'uint8', 1112 '*cpu-throttle-increment': 'uint8', 1113 '*cpu-throttle-tailslow': 'bool', 1114 '*tls-creds': 'StrOrNull', 1115 '*tls-hostname': 'StrOrNull', 1116 '*tls-authz': 'StrOrNull', 1117 '*max-bandwidth': 'size', 1118 '*avail-switchover-bandwidth': 'size', 1119 '*downtime-limit': 'uint64', 1120 '*x-checkpoint-delay': { 'type': 'uint32', 1121 'features': [ 'unstable' ] }, 1122 '*block-incremental': { 'type': 'bool', 1123 'features': [ 'deprecated' ] }, 1124 '*multifd-channels': 'uint8', 1125 '*xbzrle-cache-size': 'size', 1126 '*max-postcopy-bandwidth': 'size', 1127 '*max-cpu-throttle': 'uint8', 1128 '*multifd-compression': 'MultiFDCompression', 1129 '*multifd-zlib-level': 'uint8', 1130 '*multifd-zstd-level': 'uint8', 1131 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1132 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1133 'features': [ 'unstable' ] }, 1134 '*vcpu-dirty-limit': 'uint64', 1135 '*mode': 'MigMode'} } 1136 1137## 1138# @migrate-set-parameters: 1139# 1140# Set various migration parameters. 1141# 1142# Since: 2.4 1143# 1144# Example: 1145# 1146# -> { "execute": "migrate-set-parameters" , 1147# "arguments": { "multifd-channels": 5 } } 1148# <- { "return": {} } 1149## 1150{ 'command': 'migrate-set-parameters', 'boxed': true, 1151 'data': 'MigrateSetParameters' } 1152 1153## 1154# @MigrationParameters: 1155# 1156# The optional members aren't actually optional. 1157# 1158# @announce-initial: Initial delay (in milliseconds) before sending 1159# the first announce (Since 4.0) 1160# 1161# @announce-max: Maximum delay (in milliseconds) between packets in 1162# the announcement (Since 4.0) 1163# 1164# @announce-rounds: Number of self-announce packets sent after 1165# migration (Since 4.0) 1166# 1167# @announce-step: Increase in delay (in milliseconds) between 1168# subsequent packets in the announcement (Since 4.0) 1169# 1170# @compress-level: compression level 1171# 1172# @compress-threads: compression thread count 1173# 1174# @compress-wait-thread: Controls behavior when all compression 1175# threads are currently busy. If true (default), wait for a free 1176# compression thread to become available; otherwise, send the page 1177# uncompressed. (Since 3.1) 1178# 1179# @decompress-threads: decompression thread count 1180# 1181# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1182# bytes_xfer_period to trigger throttling. It is expressed as 1183# percentage. The default value is 50. (Since 5.0) 1184# 1185# @cpu-throttle-initial: Initial percentage of time guest cpus are 1186# throttled when migration auto-converge is activated. (Since 1187# 2.7) 1188# 1189# @cpu-throttle-increment: throttle percentage increase each time 1190# auto-converge detects that migration is not making progress. 1191# (Since 2.7) 1192# 1193# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1194# the tail stage of throttling, the Guest is very sensitive to CPU 1195# percentage while the @cpu-throttle -increment is excessive 1196# usually at tail stage. If this parameter is true, we will 1197# compute the ideal CPU percentage used by the Guest, which may 1198# exactly make the dirty rate match the dirty rate threshold. 1199# Then we will choose a smaller throttle increment between the one 1200# specified by @cpu-throttle-increment and the one generated by 1201# ideal CPU percentage. Therefore, it is compatible to 1202# traditional throttling, meanwhile the throttle increment won't 1203# be excessive at tail stage. The default value is false. (Since 1204# 5.1) 1205# 1206# @tls-creds: ID of the 'tls-creds' object that provides credentials 1207# for establishing a TLS connection over the migration data 1208# channel. On the outgoing side of the migration, the credentials 1209# must be for a 'client' endpoint, while for the incoming side the 1210# credentials must be for a 'server' endpoint. An empty string 1211# means that QEMU will use plain text mode for migration, rather 1212# than TLS (Since 2.7) Note: 2.8 reports this by omitting 1213# tls-creds instead. 1214# 1215# @tls-hostname: hostname of the target host for the migration. This 1216# is required when using x509 based TLS credentials and the 1217# migration URI does not already include a hostname. For example 1218# if using fd: or exec: based migration, the hostname must be 1219# provided so that the server's x509 certificate identity can be 1220# validated. (Since 2.7) An empty string means that QEMU will use 1221# the hostname associated with the migration URI, if any. (Since 1222# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. 1223# 1224# @tls-authz: ID of the 'authz' object subclass that provides access 1225# control checking of the TLS x509 certificate distinguished name. 1226# (Since 4.0) 1227# 1228# @max-bandwidth: to set maximum speed for migration. maximum speed 1229# in bytes per second. (Since 2.8) 1230# 1231# @avail-switchover-bandwidth: to set the available bandwidth that 1232# migration can use during switchover phase. NOTE! This does not 1233# limit the bandwidth during switchover, but only for calculations when 1234# making decisions to switchover. By default, this value is zero, 1235# which means QEMU will estimate the bandwidth automatically. This can 1236# be set when the estimated value is not accurate, while the user is 1237# able to guarantee such bandwidth is available when switching over. 1238# When specified correctly, this can make the switchover decision much 1239# more accurate. (Since 8.2) 1240# 1241# @downtime-limit: set maximum tolerated downtime for migration. 1242# maximum downtime in milliseconds (Since 2.8) 1243# 1244# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1245# (Since 2.8) 1246# 1247# @block-incremental: Affects how much storage is migrated when the 1248# block migration capability is enabled. When false, the entire 1249# storage backing chain is migrated into a flattened image at the 1250# destination; when true, only the active qcow2 layer is migrated 1251# and the destination must already have access to the same backing 1252# chain as was used on the source. (since 2.10) 1253# 1254# @multifd-channels: Number of channels used to migrate data in 1255# parallel. This is the same number that the number of sockets 1256# used for migration. The default value is 2 (since 4.0) 1257# 1258# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1259# needs to be a multiple of the target page size and a power of 2 1260# (Since 2.11) 1261# 1262# @max-postcopy-bandwidth: Background transfer bandwidth during 1263# postcopy. Defaults to 0 (unlimited). In bytes per second. 1264# (Since 3.0) 1265# 1266# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1267# (Since 3.1) 1268# 1269# @multifd-compression: Which compression method to use. Defaults to 1270# none. (Since 5.0) 1271# 1272# @multifd-zlib-level: Set the compression level to be used in live 1273# migration, the compression level is an integer between 0 and 9, 1274# where 0 means no compression, 1 means the best compression 1275# speed, and 9 means best compression ratio which will consume 1276# more CPU. Defaults to 1. (Since 5.0) 1277# 1278# @multifd-zstd-level: Set the compression level to be used in live 1279# migration, the compression level is an integer between 0 and 20, 1280# where 0 means no compression, 1 means the best compression 1281# speed, and 20 means best compression ratio which will consume 1282# more CPU. Defaults to 1. (Since 5.0) 1283# 1284# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1285# aliases for the purpose of dirty bitmap migration. Such aliases 1286# may for example be the corresponding names on the opposite site. 1287# The mapping must be one-to-one, but not necessarily complete: On 1288# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1289# will be ignored. On the destination, encountering an unmapped 1290# alias in the incoming migration stream will result in a report, 1291# and all further bitmap migration data will then be discarded. 1292# Note that the destination does not know about bitmaps it does 1293# not receive, so there is no limitation or requirement regarding 1294# the number of bitmaps received, or how they are named, or on 1295# which nodes they are placed. By default (when this parameter 1296# has never been set), bitmap names are mapped to themselves. 1297# Nodes are mapped to their block device name if there is one, and 1298# to their node name otherwise. (Since 5.2) 1299# 1300# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1301# limit during live migration. Should be in the range 1 to 1000ms. 1302# Defaults to 1000ms. (Since 8.1) 1303# 1304# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1305# Defaults to 1. (Since 8.1) 1306# 1307# @mode: Migration mode. See description in @MigMode. Default is 'normal'. 1308# (Since 8.2) 1309# 1310# Features: 1311# 1312# @deprecated: Member @block-incremental is deprecated. Use 1313# blockdev-mirror with NBD instead. Members @compress-level, 1314# @compress-threads, @decompress-threads and @compress-wait-thread 1315# are deprecated because @compression is deprecated. 1316# 1317# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1318# are experimental. 1319# 1320# Since: 2.4 1321## 1322{ 'struct': 'MigrationParameters', 1323 'data': { '*announce-initial': 'size', 1324 '*announce-max': 'size', 1325 '*announce-rounds': 'size', 1326 '*announce-step': 'size', 1327 '*compress-level': { 'type': 'uint8', 1328 'features': [ 'deprecated' ] }, 1329 '*compress-threads': { 'type': 'uint8', 1330 'features': [ 'deprecated' ] }, 1331 '*compress-wait-thread': { 'type': 'bool', 1332 'features': [ 'deprecated' ] }, 1333 '*decompress-threads': { 'type': 'uint8', 1334 'features': [ 'deprecated' ] }, 1335 '*throttle-trigger-threshold': 'uint8', 1336 '*cpu-throttle-initial': 'uint8', 1337 '*cpu-throttle-increment': 'uint8', 1338 '*cpu-throttle-tailslow': 'bool', 1339 '*tls-creds': 'str', 1340 '*tls-hostname': 'str', 1341 '*tls-authz': 'str', 1342 '*max-bandwidth': 'size', 1343 '*avail-switchover-bandwidth': 'size', 1344 '*downtime-limit': 'uint64', 1345 '*x-checkpoint-delay': { 'type': 'uint32', 1346 'features': [ 'unstable' ] }, 1347 '*block-incremental': { 'type': 'bool', 1348 'features': [ 'deprecated' ] }, 1349 '*multifd-channels': 'uint8', 1350 '*xbzrle-cache-size': 'size', 1351 '*max-postcopy-bandwidth': 'size', 1352 '*max-cpu-throttle': 'uint8', 1353 '*multifd-compression': 'MultiFDCompression', 1354 '*multifd-zlib-level': 'uint8', 1355 '*multifd-zstd-level': 'uint8', 1356 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1357 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1358 'features': [ 'unstable' ] }, 1359 '*vcpu-dirty-limit': 'uint64', 1360 '*mode': 'MigMode'} } 1361 1362## 1363# @query-migrate-parameters: 1364# 1365# Returns information about the current migration parameters 1366# 1367# Returns: @MigrationParameters 1368# 1369# Since: 2.4 1370# 1371# Example: 1372# 1373# -> { "execute": "query-migrate-parameters" } 1374# <- { "return": { 1375# "multifd-channels": 2, 1376# "cpu-throttle-increment": 10, 1377# "cpu-throttle-initial": 20, 1378# "max-bandwidth": 33554432, 1379# "downtime-limit": 300 1380# } 1381# } 1382## 1383{ 'command': 'query-migrate-parameters', 1384 'returns': 'MigrationParameters' } 1385 1386## 1387# @migrate-start-postcopy: 1388# 1389# Followup to a migration command to switch the migration to postcopy 1390# mode. The postcopy-ram capability must be set on both source and 1391# destination before the original migration command. 1392# 1393# Since: 2.5 1394# 1395# Example: 1396# 1397# -> { "execute": "migrate-start-postcopy" } 1398# <- { "return": {} } 1399## 1400{ 'command': 'migrate-start-postcopy' } 1401 1402## 1403# @MIGRATION: 1404# 1405# Emitted when a migration event happens 1406# 1407# @status: @MigrationStatus describing the current migration status. 1408# 1409# Since: 2.4 1410# 1411# Example: 1412# 1413# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1414# "event": "MIGRATION", 1415# "data": {"status": "completed"} } 1416## 1417{ 'event': 'MIGRATION', 1418 'data': {'status': 'MigrationStatus'}} 1419 1420## 1421# @MIGRATION_PASS: 1422# 1423# Emitted from the source side of a migration at the start of each 1424# pass (when it syncs the dirty bitmap) 1425# 1426# @pass: An incrementing count (starting at 1 on the first pass) 1427# 1428# Since: 2.6 1429# 1430# Example: 1431# 1432# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1433# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1434## 1435{ 'event': 'MIGRATION_PASS', 1436 'data': { 'pass': 'int' } } 1437 1438## 1439# @COLOMessage: 1440# 1441# The message transmission between Primary side and Secondary side. 1442# 1443# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1444# 1445# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1446# checkpointing 1447# 1448# @checkpoint-reply: SVM gets PVM's checkpoint request 1449# 1450# @vmstate-send: VM's state will be sent by PVM. 1451# 1452# @vmstate-size: The total size of VMstate. 1453# 1454# @vmstate-received: VM's state has been received by SVM. 1455# 1456# @vmstate-loaded: VM's state has been loaded by SVM. 1457# 1458# Since: 2.8 1459## 1460{ 'enum': 'COLOMessage', 1461 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1462 'vmstate-send', 'vmstate-size', 'vmstate-received', 1463 'vmstate-loaded' ] } 1464 1465## 1466# @COLOMode: 1467# 1468# The COLO current mode. 1469# 1470# @none: COLO is disabled. 1471# 1472# @primary: COLO node in primary side. 1473# 1474# @secondary: COLO node in slave side. 1475# 1476# Since: 2.8 1477## 1478{ 'enum': 'COLOMode', 1479 'data': [ 'none', 'primary', 'secondary'] } 1480 1481## 1482# @FailoverStatus: 1483# 1484# An enumeration of COLO failover status 1485# 1486# @none: no failover has ever happened 1487# 1488# @require: got failover requirement but not handled 1489# 1490# @active: in the process of doing failover 1491# 1492# @completed: finish the process of failover 1493# 1494# @relaunch: restart the failover process, from 'none' -> 'completed' 1495# (Since 2.9) 1496# 1497# Since: 2.8 1498## 1499{ 'enum': 'FailoverStatus', 1500 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1501 1502## 1503# @COLO_EXIT: 1504# 1505# Emitted when VM finishes COLO mode due to some errors happening or 1506# at the request of users. 1507# 1508# @mode: report COLO mode when COLO exited. 1509# 1510# @reason: describes the reason for the COLO exit. 1511# 1512# Since: 3.1 1513# 1514# Example: 1515# 1516# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1517# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1518## 1519{ 'event': 'COLO_EXIT', 1520 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1521 1522## 1523# @COLOExitReason: 1524# 1525# The reason for a COLO exit. 1526# 1527# @none: failover has never happened. This state does not occur in 1528# the COLO_EXIT event, and is only visible in the result of 1529# query-colo-status. 1530# 1531# @request: COLO exit is due to an external request. 1532# 1533# @error: COLO exit is due to an internal error. 1534# 1535# @processing: COLO is currently handling a failover (since 4.0). 1536# 1537# Since: 3.1 1538## 1539{ 'enum': 'COLOExitReason', 1540 'data': [ 'none', 'request', 'error' , 'processing' ] } 1541 1542## 1543# @x-colo-lost-heartbeat: 1544# 1545# Tell qemu that heartbeat is lost, request it to do takeover 1546# procedures. If this command is sent to the PVM, the Primary side 1547# will exit COLO mode. If sent to the Secondary, the Secondary side 1548# will run failover work, then takes over server operation to become 1549# the service VM. 1550# 1551# Features: 1552# 1553# @unstable: This command is experimental. 1554# 1555# Since: 2.8 1556# 1557# Example: 1558# 1559# -> { "execute": "x-colo-lost-heartbeat" } 1560# <- { "return": {} } 1561## 1562{ 'command': 'x-colo-lost-heartbeat', 1563 'features': [ 'unstable' ], 1564 'if': 'CONFIG_REPLICATION' } 1565 1566## 1567# @migrate_cancel: 1568# 1569# Cancel the current executing migration process. 1570# 1571# Returns: nothing on success 1572# 1573# Notes: This command succeeds even if there is no migration process 1574# running. 1575# 1576# Since: 0.14 1577# 1578# Example: 1579# 1580# -> { "execute": "migrate_cancel" } 1581# <- { "return": {} } 1582## 1583{ 'command': 'migrate_cancel' } 1584 1585## 1586# @migrate-continue: 1587# 1588# Continue migration when it's in a paused state. 1589# 1590# @state: The state the migration is currently expected to be in 1591# 1592# Returns: nothing on success 1593# 1594# Since: 2.11 1595# 1596# Example: 1597# 1598# -> { "execute": "migrate-continue" , "arguments": 1599# { "state": "pre-switchover" } } 1600# <- { "return": {} } 1601## 1602{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1603 1604## 1605# @MigrationAddressType: 1606# 1607# The migration stream transport mechanisms. 1608# 1609# @socket: Migrate via socket. 1610# 1611# @exec: Direct the migration stream to another process. 1612# 1613# @rdma: Migrate via RDMA. 1614# 1615# @file: Direct the migration stream to a file. 1616# 1617# Since: 8.2 1618## 1619{ 'enum': 'MigrationAddressType', 1620 'data': [ 'socket', 'exec', 'rdma', 'file' ] } 1621 1622## 1623# @FileMigrationArgs: 1624# 1625# @filename: The file to receive the migration stream 1626# 1627# @offset: The file offset where the migration stream will start 1628# 1629# Since: 8.2 1630## 1631{ 'struct': 'FileMigrationArgs', 1632 'data': { 'filename': 'str', 1633 'offset': 'uint64' } } 1634 1635## 1636# @MigrationExecCommand: 1637# 1638# @args: command (list head) and arguments to execute. 1639# 1640# Since: 8.2 1641## 1642{ 'struct': 'MigrationExecCommand', 1643 'data': {'args': [ 'str' ] } } 1644 1645## 1646# @MigrationAddress: 1647# 1648# Migration endpoint configuration. 1649# 1650# @transport: The migration stream transport mechanism 1651# 1652# Since: 8.2 1653## 1654{ 'union': 'MigrationAddress', 1655 'base': { 'transport' : 'MigrationAddressType'}, 1656 'discriminator': 'transport', 1657 'data': { 1658 'socket': 'SocketAddress', 1659 'exec': 'MigrationExecCommand', 1660 'rdma': 'InetSocketAddress', 1661 'file': 'FileMigrationArgs' } } 1662 1663## 1664# @MigrationChannelType: 1665# 1666# The migration channel-type request options. 1667# 1668# @main: Main outbound migration channel. 1669# 1670# Since: 8.1 1671## 1672{ 'enum': 'MigrationChannelType', 1673 'data': [ 'main' ] } 1674 1675## 1676# @MigrationChannel: 1677# 1678# Migration stream channel parameters. 1679# 1680# @channel-type: Channel type for transferring packet information. 1681# 1682# @addr: Migration endpoint configuration on destination interface. 1683# 1684# Since: 8.1 1685## 1686{ 'struct': 'MigrationChannel', 1687 'data': { 1688 'channel-type': 'MigrationChannelType', 1689 'addr': 'MigrationAddress' } } 1690 1691## 1692# @migrate: 1693# 1694# Migrates the current running guest to another Virtual Machine. 1695# 1696# @uri: the Uniform Resource Identifier of the destination VM 1697# 1698# @channels: list of migration stream channels with each stream in the 1699# list connected to a destination interface endpoint. 1700# 1701# @blk: do block migration (full disk copy) 1702# 1703# @inc: incremental disk copy migration 1704# 1705# @detach: this argument exists only for compatibility reasons and is 1706# ignored by QEMU 1707# 1708# @resume: resume one paused migration, default "off". (since 3.0) 1709# 1710# Features: 1711# 1712# @deprecated: Members @inc and @blk are deprecated. Use 1713# blockdev-mirror with NBD instead. 1714# 1715# Returns: nothing on success 1716# 1717# Since: 0.14 1718# 1719# Notes: 1720# 1721# 1. The 'query-migrate' command should be used to check 1722# migration's progress and final result (this information is 1723# provided by the 'status' member) 1724# 1725# 2. All boolean arguments default to false 1726# 1727# 3. The user Monitor's "detach" argument is invalid in QMP and 1728# should not be used 1729# 1730# 4. The uri argument should have the Uniform Resource Identifier 1731# of default destination VM. This connection will be bound to 1732# default network. 1733# 1734# 5. For now, number of migration streams is restricted to one, 1735# i.e number of items in 'channels' list is just 1. 1736# 1737# 6. The 'uri' and 'channels' arguments are mutually exclusive; 1738# exactly one of the two should be present. 1739# 1740# Example: 1741# 1742# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1743# <- { "return": {} } 1744# 1745# -> { "execute": "migrate", 1746# "arguments": { 1747# "channels": [ { "channel-type": "main", 1748# "addr": { "transport": "socket", 1749# "type": "inet", 1750# "host": "10.12.34.9", 1751# "port": "1050" } } ] } } 1752# <- { "return": {} } 1753# 1754# -> { "execute": "migrate", 1755# "arguments": { 1756# "channels": [ { "channel-type": "main", 1757# "addr": { "transport": "exec", 1758# "args": [ "/bin/nc", "-p", "6000", 1759# "/some/sock" ] } } ] } } 1760# <- { "return": {} } 1761# 1762# -> { "execute": "migrate", 1763# "arguments": { 1764# "channels": [ { "channel-type": "main", 1765# "addr": { "transport": "rdma", 1766# "host": "10.12.34.9", 1767# "port": "1050" } } ] } } 1768# <- { "return": {} } 1769# 1770# -> { "execute": "migrate", 1771# "arguments": { 1772# "channels": [ { "channel-type": "main", 1773# "addr": { "transport": "file", 1774# "filename": "/tmp/migfile", 1775# "offset": "0x1000" } } ] } } 1776# <- { "return": {} } 1777# 1778## 1779{ 'command': 'migrate', 1780 'data': {'*uri': 'str', 1781 '*channels': [ 'MigrationChannel' ], 1782 '*blk': { 'type': 'bool', 'features': [ 'deprecated' ] }, 1783 '*inc': { 'type': 'bool', 'features': [ 'deprecated' ] }, 1784 '*detach': 'bool', '*resume': 'bool' } } 1785 1786## 1787# @migrate-incoming: 1788# 1789# Start an incoming migration, the qemu must have been started with 1790# -incoming defer 1791# 1792# @uri: The Uniform Resource Identifier identifying the source or 1793# address to listen on 1794# 1795# @channels: list of migration stream channels with each stream in the 1796# list connected to a destination interface endpoint. 1797# 1798# Returns: nothing on success 1799# 1800# Since: 2.3 1801# 1802# Notes: 1803# 1804# 1. It's a bad idea to use a string for the uri, but it needs to 1805# stay compatible with -incoming and the format of the uri is 1806# already exposed above libvirt. 1807# 1808# 2. QEMU must be started with -incoming defer to allow 1809# migrate-incoming to be used. 1810# 1811# 3. The uri format is the same as for -incoming 1812# 1813# 4. For now, number of migration streams is restricted to one, 1814# i.e number of items in 'channels' list is just 1. 1815# 1816# 5. The 'uri' and 'channels' arguments are mutually exclusive; 1817# exactly one of the two should be present. 1818# 1819# Example: 1820# 1821# -> { "execute": "migrate-incoming", 1822# "arguments": { "uri": "tcp:0:4446" } } 1823# <- { "return": {} } 1824# 1825# -> { "execute": "migrate-incoming", 1826# "arguments": { 1827# "channels": [ { "channel-type": "main", 1828# "addr": { "transport": "socket", 1829# "type": "inet", 1830# "host": "10.12.34.9", 1831# "port": "1050" } } ] } } 1832# <- { "return": {} } 1833# 1834# -> { "execute": "migrate-incoming", 1835# "arguments": { 1836# "channels": [ { "channel-type": "main", 1837# "addr": { "transport": "exec", 1838# "args": [ "/bin/nc", "-p", "6000", 1839# "/some/sock" ] } } ] } } 1840# <- { "return": {} } 1841# 1842# -> { "execute": "migrate-incoming", 1843# "arguments": { 1844# "channels": [ { "channel-type": "main", 1845# "addr": { "transport": "rdma", 1846# "host": "10.12.34.9", 1847# "port": "1050" } } ] } } 1848# <- { "return": {} } 1849## 1850{ 'command': 'migrate-incoming', 1851 'data': {'*uri': 'str', 1852 '*channels': [ 'MigrationChannel' ] } } 1853 1854## 1855# @xen-save-devices-state: 1856# 1857# Save the state of all devices to file. The RAM and the block 1858# devices of the VM are not saved by this command. 1859# 1860# @filename: the file to save the state of the devices to as binary 1861# data. See xen-save-devices-state.txt for a description of the 1862# binary format. 1863# 1864# @live: Optional argument to ask QEMU to treat this command as part 1865# of a live migration. Default to true. (since 2.11) 1866# 1867# Returns: Nothing on success 1868# 1869# Since: 1.1 1870# 1871# Example: 1872# 1873# -> { "execute": "xen-save-devices-state", 1874# "arguments": { "filename": "/tmp/save" } } 1875# <- { "return": {} } 1876## 1877{ 'command': 'xen-save-devices-state', 1878 'data': {'filename': 'str', '*live':'bool' } } 1879 1880## 1881# @xen-set-global-dirty-log: 1882# 1883# Enable or disable the global dirty log mode. 1884# 1885# @enable: true to enable, false to disable. 1886# 1887# Returns: nothing 1888# 1889# Since: 1.3 1890# 1891# Example: 1892# 1893# -> { "execute": "xen-set-global-dirty-log", 1894# "arguments": { "enable": true } } 1895# <- { "return": {} } 1896## 1897{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1898 1899## 1900# @xen-load-devices-state: 1901# 1902# Load the state of all devices from file. The RAM and the block 1903# devices of the VM are not loaded by this command. 1904# 1905# @filename: the file to load the state of the devices from as binary 1906# data. See xen-save-devices-state.txt for a description of the 1907# binary format. 1908# 1909# Since: 2.7 1910# 1911# Example: 1912# 1913# -> { "execute": "xen-load-devices-state", 1914# "arguments": { "filename": "/tmp/resume" } } 1915# <- { "return": {} } 1916## 1917{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1918 1919## 1920# @xen-set-replication: 1921# 1922# Enable or disable replication. 1923# 1924# @enable: true to enable, false to disable. 1925# 1926# @primary: true for primary or false for secondary. 1927# 1928# @failover: true to do failover, false to stop. but cannot be 1929# specified if 'enable' is true. default value is false. 1930# 1931# Returns: nothing. 1932# 1933# Example: 1934# 1935# -> { "execute": "xen-set-replication", 1936# "arguments": {"enable": true, "primary": false} } 1937# <- { "return": {} } 1938# 1939# Since: 2.9 1940## 1941{ 'command': 'xen-set-replication', 1942 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' }, 1943 'if': 'CONFIG_REPLICATION' } 1944 1945## 1946# @ReplicationStatus: 1947# 1948# The result format for 'query-xen-replication-status'. 1949# 1950# @error: true if an error happened, false if replication is normal. 1951# 1952# @desc: the human readable error description string, when @error is 1953# 'true'. 1954# 1955# Since: 2.9 1956## 1957{ 'struct': 'ReplicationStatus', 1958 'data': { 'error': 'bool', '*desc': 'str' }, 1959 'if': 'CONFIG_REPLICATION' } 1960 1961## 1962# @query-xen-replication-status: 1963# 1964# Query replication status while the vm is running. 1965# 1966# Returns: A @ReplicationStatus object showing the status. 1967# 1968# Example: 1969# 1970# -> { "execute": "query-xen-replication-status" } 1971# <- { "return": { "error": false } } 1972# 1973# Since: 2.9 1974## 1975{ 'command': 'query-xen-replication-status', 1976 'returns': 'ReplicationStatus', 1977 'if': 'CONFIG_REPLICATION' } 1978 1979## 1980# @xen-colo-do-checkpoint: 1981# 1982# Xen uses this command to notify replication to trigger a checkpoint. 1983# 1984# Returns: nothing. 1985# 1986# Example: 1987# 1988# -> { "execute": "xen-colo-do-checkpoint" } 1989# <- { "return": {} } 1990# 1991# Since: 2.9 1992## 1993{ 'command': 'xen-colo-do-checkpoint', 1994 'if': 'CONFIG_REPLICATION' } 1995 1996## 1997# @COLOStatus: 1998# 1999# The result format for 'query-colo-status'. 2000# 2001# @mode: COLO running mode. If COLO is running, this field will 2002# return 'primary' or 'secondary'. 2003# 2004# @last-mode: COLO last running mode. If COLO is running, this field 2005# will return same like mode field, after failover we can use this 2006# field to get last colo mode. (since 4.0) 2007# 2008# @reason: describes the reason for the COLO exit. 2009# 2010# Since: 3.1 2011## 2012{ 'struct': 'COLOStatus', 2013 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 2014 'reason': 'COLOExitReason' }, 2015 'if': 'CONFIG_REPLICATION' } 2016 2017## 2018# @query-colo-status: 2019# 2020# Query COLO status while the vm is running. 2021# 2022# Returns: A @COLOStatus object showing the status. 2023# 2024# Example: 2025# 2026# -> { "execute": "query-colo-status" } 2027# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 2028# 2029# Since: 3.1 2030## 2031{ 'command': 'query-colo-status', 2032 'returns': 'COLOStatus', 2033 'if': 'CONFIG_REPLICATION' } 2034 2035## 2036# @migrate-recover: 2037# 2038# Provide a recovery migration stream URI. 2039# 2040# @uri: the URI to be used for the recovery of migration stream. 2041# 2042# Returns: nothing. 2043# 2044# Example: 2045# 2046# -> { "execute": "migrate-recover", 2047# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 2048# <- { "return": {} } 2049# 2050# Since: 3.0 2051## 2052{ 'command': 'migrate-recover', 2053 'data': { 'uri': 'str' }, 2054 'allow-oob': true } 2055 2056## 2057# @migrate-pause: 2058# 2059# Pause a migration. Currently it only supports postcopy. 2060# 2061# Returns: nothing. 2062# 2063# Example: 2064# 2065# -> { "execute": "migrate-pause" } 2066# <- { "return": {} } 2067# 2068# Since: 3.0 2069## 2070{ 'command': 'migrate-pause', 'allow-oob': true } 2071 2072## 2073# @UNPLUG_PRIMARY: 2074# 2075# Emitted from source side of a migration when migration state is 2076# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 2077# resources in QEMU are kept on standby to be able to re-plug it in 2078# case of migration failure. 2079# 2080# @device-id: QEMU device id of the unplugged device 2081# 2082# Since: 4.2 2083# 2084# Example: 2085# 2086# <- { "event": "UNPLUG_PRIMARY", 2087# "data": { "device-id": "hostdev0" }, 2088# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 2089## 2090{ 'event': 'UNPLUG_PRIMARY', 2091 'data': { 'device-id': 'str' } } 2092 2093## 2094# @DirtyRateVcpu: 2095# 2096# Dirty rate of vcpu. 2097# 2098# @id: vcpu index. 2099# 2100# @dirty-rate: dirty rate. 2101# 2102# Since: 6.2 2103## 2104{ 'struct': 'DirtyRateVcpu', 2105 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 2106 2107## 2108# @DirtyRateStatus: 2109# 2110# Dirty page rate measurement status. 2111# 2112# @unstarted: measuring thread has not been started yet 2113# 2114# @measuring: measuring thread is running 2115# 2116# @measured: dirty page rate is measured and the results are available 2117# 2118# Since: 5.2 2119## 2120{ 'enum': 'DirtyRateStatus', 2121 'data': [ 'unstarted', 'measuring', 'measured'] } 2122 2123## 2124# @DirtyRateMeasureMode: 2125# 2126# Method used to measure dirty page rate. Differences between 2127# available methods are explained in @calc-dirty-rate. 2128# 2129# @page-sampling: use page sampling 2130# 2131# @dirty-ring: use dirty ring 2132# 2133# @dirty-bitmap: use dirty bitmap 2134# 2135# Since: 6.2 2136## 2137{ 'enum': 'DirtyRateMeasureMode', 2138 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 2139 2140## 2141# @TimeUnit: 2142# 2143# Specifies unit in which time-related value is specified. 2144# 2145# @second: value is in seconds 2146# 2147# @millisecond: value is in milliseconds 2148# 2149# Since: 8.2 2150# 2151## 2152{ 'enum': 'TimeUnit', 2153 'data': ['second', 'millisecond'] } 2154 2155## 2156# @DirtyRateInfo: 2157# 2158# Information about measured dirty page rate. 2159# 2160# @dirty-rate: an estimate of the dirty page rate of the VM in units 2161# of MiB/s. Value is present only when @status is 'measured'. 2162# 2163# @status: current status of dirty page rate measurements 2164# 2165# @start-time: start time in units of second for calculation 2166# 2167# @calc-time: time period for which dirty page rate was measured, 2168# expressed and rounded down to @calc-time-unit. 2169# 2170# @calc-time-unit: time unit of @calc-time (Since 8.2) 2171# 2172# @sample-pages: number of sampled pages per GiB of guest memory. 2173# Valid only in page-sampling mode (Since 6.1) 2174# 2175# @mode: mode that was used to measure dirty page rate (Since 6.2) 2176# 2177# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was 2178# specified (Since 6.2) 2179# 2180# Since: 5.2 2181## 2182{ 'struct': 'DirtyRateInfo', 2183 'data': {'*dirty-rate': 'int64', 2184 'status': 'DirtyRateStatus', 2185 'start-time': 'int64', 2186 'calc-time': 'int64', 2187 'calc-time-unit': 'TimeUnit', 2188 'sample-pages': 'uint64', 2189 'mode': 'DirtyRateMeasureMode', 2190 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 2191 2192## 2193# @calc-dirty-rate: 2194# 2195# Start measuring dirty page rate of the VM. Results can be retrieved 2196# with @query-dirty-rate after measurements are completed. 2197# 2198# Dirty page rate is the number of pages changed in a given time 2199# period expressed in MiB/s. The following methods of calculation are 2200# available: 2201# 2202# 1. In page sampling mode, a random subset of pages are selected and 2203# hashed twice: once at the beginning of measurement time period, 2204# and once again at the end. If two hashes for some page are 2205# different, the page is counted as changed. Since this method 2206# relies on sampling and hashing, calculated dirty page rate is 2207# only an estimate of its true value. Increasing @sample-pages 2208# improves estimation quality at the cost of higher computational 2209# overhead. 2210# 2211# 2. Dirty bitmap mode captures writes to memory (for example by 2212# temporarily revoking write access to all pages) and counting page 2213# faults. Information about modified pages is collected into a 2214# bitmap, where each bit corresponds to one guest page. This mode 2215# requires that KVM accelerator property "dirty-ring-size" is *not* 2216# set. 2217# 2218# 3. Dirty ring mode is similar to dirty bitmap mode, but the 2219# information about modified pages is collected into ring buffer. 2220# This mode tracks page modification per each vCPU separately. It 2221# requires that KVM accelerator property "dirty-ring-size" is set. 2222# 2223# @calc-time: time period for which dirty page rate is calculated. 2224# By default it is specified in seconds, but the unit can be set 2225# explicitly with @calc-time-unit. Note that larger @calc-time 2226# values will typically result in smaller dirty page rates because 2227# page dirtying is a one-time event. Once some page is counted 2228# as dirty during @calc-time period, further writes to this page 2229# will not increase dirty page rate anymore. 2230# 2231# @calc-time-unit: time unit in which @calc-time is specified. 2232# By default it is seconds. (Since 8.2) 2233# 2234# @sample-pages: number of sampled pages per each GiB of guest memory. 2235# Default value is 512. For 4KiB guest pages this corresponds to 2236# sampling ratio of 0.2%. This argument is used only in page 2237# sampling mode. (Since 6.1) 2238# 2239# @mode: mechanism for tracking dirty pages. Default value is 2240# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'. 2241# (Since 6.1) 2242# 2243# Since: 5.2 2244# 2245# Example: 2246# 2247# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 2248# 'sample-pages': 512} } 2249# <- { "return": {} } 2250# 2251# Measure dirty rate using dirty bitmap for 500 milliseconds: 2252# 2253# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, 2254# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } 2255# 2256# <- { "return": {} } 2257## 2258{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 2259 '*calc-time-unit': 'TimeUnit', 2260 '*sample-pages': 'int', 2261 '*mode': 'DirtyRateMeasureMode'} } 2262 2263## 2264# @query-dirty-rate: 2265# 2266# Query results of the most recent invocation of @calc-dirty-rate. 2267# 2268# @calc-time-unit: time unit in which to report calculation time. 2269# By default it is reported in seconds. (Since 8.2) 2270# 2271# Since: 5.2 2272# 2273# Examples: 2274# 2275# 1. Measurement is in progress: 2276# 2277# <- {"status": "measuring", "sample-pages": 512, 2278# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2279# "calc-time-unit": "second"} 2280# 2281# 2. Measurement has been completed: 2282# 2283# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, 2284# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2285# "calc-time-unit": "second"} 2286## 2287{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 2288 'returns': 'DirtyRateInfo' } 2289 2290## 2291# @DirtyLimitInfo: 2292# 2293# Dirty page rate limit information of a virtual CPU. 2294# 2295# @cpu-index: index of a virtual CPU. 2296# 2297# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 2298# CPU, 0 means unlimited. 2299# 2300# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 2301# 2302# Since: 7.1 2303## 2304{ 'struct': 'DirtyLimitInfo', 2305 'data': { 'cpu-index': 'int', 2306 'limit-rate': 'uint64', 2307 'current-rate': 'uint64' } } 2308 2309## 2310# @set-vcpu-dirty-limit: 2311# 2312# Set the upper limit of dirty page rate for virtual CPUs. 2313# 2314# Requires KVM with accelerator property "dirty-ring-size" set. A 2315# virtual CPU's dirty page rate is a measure of its memory load. To 2316# observe dirty page rates, use @calc-dirty-rate. 2317# 2318# @cpu-index: index of a virtual CPU, default is all. 2319# 2320# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 2321# 2322# Since: 7.1 2323# 2324# Example: 2325# 2326# -> {"execute": "set-vcpu-dirty-limit"} 2327# "arguments": { "dirty-rate": 200, 2328# "cpu-index": 1 } } 2329# <- { "return": {} } 2330## 2331{ 'command': 'set-vcpu-dirty-limit', 2332 'data': { '*cpu-index': 'int', 2333 'dirty-rate': 'uint64' } } 2334 2335## 2336# @cancel-vcpu-dirty-limit: 2337# 2338# Cancel the upper limit of dirty page rate for virtual CPUs. 2339# 2340# Cancel the dirty page limit for the vCPU which has been set with 2341# set-vcpu-dirty-limit command. Note that this command requires 2342# support from dirty ring, same as the "set-vcpu-dirty-limit". 2343# 2344# @cpu-index: index of a virtual CPU, default is all. 2345# 2346# Since: 7.1 2347# 2348# Example: 2349# 2350# -> {"execute": "cancel-vcpu-dirty-limit"}, 2351# "arguments": { "cpu-index": 1 } } 2352# <- { "return": {} } 2353## 2354{ 'command': 'cancel-vcpu-dirty-limit', 2355 'data': { '*cpu-index': 'int'} } 2356 2357## 2358# @query-vcpu-dirty-limit: 2359# 2360# Returns information about virtual CPU dirty page rate limits, if 2361# any. 2362# 2363# Since: 7.1 2364# 2365# Example: 2366# 2367# -> {"execute": "query-vcpu-dirty-limit"} 2368# <- {"return": [ 2369# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 2370# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 2371## 2372{ 'command': 'query-vcpu-dirty-limit', 2373 'returns': [ 'DirtyLimitInfo' ] } 2374 2375## 2376# @MigrationThreadInfo: 2377# 2378# Information about migrationthreads 2379# 2380# @name: the name of migration thread 2381# 2382# @thread-id: ID of the underlying host thread 2383# 2384# Since: 7.2 2385## 2386{ 'struct': 'MigrationThreadInfo', 2387 'data': {'name': 'str', 2388 'thread-id': 'int'} } 2389 2390## 2391# @query-migrationthreads: 2392# 2393# Returns information of migration threads 2394# 2395# data: migration thread name 2396# 2397# Returns: information about migration threads 2398# 2399# Since: 7.2 2400## 2401{ 'command': 'query-migrationthreads', 2402 'returns': ['MigrationThreadInfo'] } 2403 2404## 2405# @snapshot-save: 2406# 2407# Save a VM snapshot 2408# 2409# @job-id: identifier for the newly created job 2410# 2411# @tag: name of the snapshot to create 2412# 2413# @vmstate: block device node name to save vmstate to 2414# 2415# @devices: list of block device node names to save a snapshot to 2416# 2417# Applications should not assume that the snapshot save is complete 2418# when this command returns. The job commands / events must be used 2419# to determine completion and to fetch details of any errors that 2420# arise. 2421# 2422# Note that execution of the guest CPUs may be stopped during the time 2423# it takes to save the snapshot. A future version of QEMU may ensure 2424# CPUs are executing continuously. 2425# 2426# It is strongly recommended that @devices contain all writable block 2427# device nodes if a consistent snapshot is required. 2428# 2429# If @tag already exists, an error will be reported 2430# 2431# Returns: nothing 2432# 2433# Example: 2434# 2435# -> { "execute": "snapshot-save", 2436# "arguments": { 2437# "job-id": "snapsave0", 2438# "tag": "my-snap", 2439# "vmstate": "disk0", 2440# "devices": ["disk0", "disk1"] 2441# } 2442# } 2443# <- { "return": { } } 2444# <- {"event": "JOB_STATUS_CHANGE", 2445# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2446# "data": {"status": "created", "id": "snapsave0"}} 2447# <- {"event": "JOB_STATUS_CHANGE", 2448# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2449# "data": {"status": "running", "id": "snapsave0"}} 2450# <- {"event": "STOP", 2451# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2452# <- {"event": "RESUME", 2453# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2454# <- {"event": "JOB_STATUS_CHANGE", 2455# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2456# "data": {"status": "waiting", "id": "snapsave0"}} 2457# <- {"event": "JOB_STATUS_CHANGE", 2458# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2459# "data": {"status": "pending", "id": "snapsave0"}} 2460# <- {"event": "JOB_STATUS_CHANGE", 2461# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2462# "data": {"status": "concluded", "id": "snapsave0"}} 2463# -> {"execute": "query-jobs"} 2464# <- {"return": [{"current-progress": 1, 2465# "status": "concluded", 2466# "total-progress": 1, 2467# "type": "snapshot-save", 2468# "id": "snapsave0"}]} 2469# 2470# Since: 6.0 2471## 2472{ 'command': 'snapshot-save', 2473 'data': { 'job-id': 'str', 2474 'tag': 'str', 2475 'vmstate': 'str', 2476 'devices': ['str'] } } 2477 2478## 2479# @snapshot-load: 2480# 2481# Load a VM snapshot 2482# 2483# @job-id: identifier for the newly created job 2484# 2485# @tag: name of the snapshot to load. 2486# 2487# @vmstate: block device node name to load vmstate from 2488# 2489# @devices: list of block device node names to load a snapshot from 2490# 2491# Applications should not assume that the snapshot load is complete 2492# when this command returns. The job commands / events must be used 2493# to determine completion and to fetch details of any errors that 2494# arise. 2495# 2496# Note that execution of the guest CPUs will be stopped during the 2497# time it takes to load the snapshot. 2498# 2499# It is strongly recommended that @devices contain all writable block 2500# device nodes that can have changed since the original @snapshot-save 2501# command execution. 2502# 2503# Returns: nothing 2504# 2505# Example: 2506# 2507# -> { "execute": "snapshot-load", 2508# "arguments": { 2509# "job-id": "snapload0", 2510# "tag": "my-snap", 2511# "vmstate": "disk0", 2512# "devices": ["disk0", "disk1"] 2513# } 2514# } 2515# <- { "return": { } } 2516# <- {"event": "JOB_STATUS_CHANGE", 2517# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2518# "data": {"status": "created", "id": "snapload0"}} 2519# <- {"event": "JOB_STATUS_CHANGE", 2520# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2521# "data": {"status": "running", "id": "snapload0"}} 2522# <- {"event": "STOP", 2523# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2524# <- {"event": "RESUME", 2525# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2526# <- {"event": "JOB_STATUS_CHANGE", 2527# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2528# "data": {"status": "waiting", "id": "snapload0"}} 2529# <- {"event": "JOB_STATUS_CHANGE", 2530# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2531# "data": {"status": "pending", "id": "snapload0"}} 2532# <- {"event": "JOB_STATUS_CHANGE", 2533# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2534# "data": {"status": "concluded", "id": "snapload0"}} 2535# -> {"execute": "query-jobs"} 2536# <- {"return": [{"current-progress": 1, 2537# "status": "concluded", 2538# "total-progress": 1, 2539# "type": "snapshot-load", 2540# "id": "snapload0"}]} 2541# 2542# Since: 6.0 2543## 2544{ 'command': 'snapshot-load', 2545 'data': { 'job-id': 'str', 2546 'tag': 'str', 2547 'vmstate': 'str', 2548 'devices': ['str'] } } 2549 2550## 2551# @snapshot-delete: 2552# 2553# Delete a VM snapshot 2554# 2555# @job-id: identifier for the newly created job 2556# 2557# @tag: name of the snapshot to delete. 2558# 2559# @devices: list of block device node names to delete a snapshot from 2560# 2561# Applications should not assume that the snapshot delete is complete 2562# when this command returns. The job commands / events must be used 2563# to determine completion and to fetch details of any errors that 2564# arise. 2565# 2566# Returns: nothing 2567# 2568# Example: 2569# 2570# -> { "execute": "snapshot-delete", 2571# "arguments": { 2572# "job-id": "snapdelete0", 2573# "tag": "my-snap", 2574# "devices": ["disk0", "disk1"] 2575# } 2576# } 2577# <- { "return": { } } 2578# <- {"event": "JOB_STATUS_CHANGE", 2579# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2580# "data": {"status": "created", "id": "snapdelete0"}} 2581# <- {"event": "JOB_STATUS_CHANGE", 2582# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2583# "data": {"status": "running", "id": "snapdelete0"}} 2584# <- {"event": "JOB_STATUS_CHANGE", 2585# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2586# "data": {"status": "waiting", "id": "snapdelete0"}} 2587# <- {"event": "JOB_STATUS_CHANGE", 2588# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2589# "data": {"status": "pending", "id": "snapdelete0"}} 2590# <- {"event": "JOB_STATUS_CHANGE", 2591# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2592# "data": {"status": "concluded", "id": "snapdelete0"}} 2593# -> {"execute": "query-jobs"} 2594# <- {"return": [{"current-progress": 1, 2595# "status": "concluded", 2596# "total-progress": 1, 2597# "type": "snapshot-delete", 2598# "id": "snapdelete0"}]} 2599# 2600# Since: 6.0 2601## 2602{ 'command': 'snapshot-delete', 2603 'data': { 'job-id': 'str', 2604 'tag': 'str', 2605 'devices': ['str'] } } 2606