]> git.proxmox.com Git - mirror_qemu.git/blob - qapi/migration.json
qapi/migration: Introduce vcpu-dirty-limit parameters
[mirror_qemu.git] / qapi / migration.json
1 # -*- Mode: Python -*-
2 # vim: filetype=python
3 #
4
5 ##
6 # = Migration
7 ##
8
9 { 'include': 'common.json' }
10 { 'include': 'sockets.json' }
11
12 ##
13 # @MigrationStats:
14 #
15 # Detailed migration status.
16 #
17 # @transferred: amount of bytes already transferred to the target VM
18 #
19 # @remaining: amount of bytes remaining to be transferred to the
20 # target VM
21 #
22 # @total: total amount of bytes involved in the migration process
23 #
24 # @duplicate: number of duplicate (zero) pages (since 1.2)
25 #
26 # @skipped: number of skipped zero pages (since 1.5)
27 #
28 # @normal: number of normal pages (since 1.2)
29 #
30 # @normal-bytes: number of normal bytes sent (since 1.2)
31 #
32 # @dirty-pages-rate: number of pages dirtied by second by the guest
33 # (since 1.3)
34 #
35 # @mbps: throughput in megabits/sec. (since 1.6)
36 #
37 # @dirty-sync-count: number of times that dirty ram was synchronized
38 # (since 2.1)
39 #
40 # @postcopy-requests: The number of page requests received from the
41 # destination (since 2.7)
42 #
43 # @page-size: The number of bytes per page for the various page-based
44 # statistics (since 2.10)
45 #
46 # @multifd-bytes: The number of bytes sent through multifd (since 3.0)
47 #
48 # @pages-per-second: the number of memory pages transferred per second
49 # (Since 4.0)
50 #
51 # @precopy-bytes: The number of bytes sent in the pre-copy phase
52 # (since 7.0).
53 #
54 # @downtime-bytes: The number of bytes sent while the guest is paused
55 # (since 7.0).
56 #
57 # @postcopy-bytes: The number of bytes sent during the post-copy phase
58 # (since 7.0).
59 #
60 # @dirty-sync-missed-zero-copy: Number of times dirty RAM
61 # synchronization could not avoid copying dirty pages. This is
62 # between 0 and @dirty-sync-count * @multifd-channels. (since
63 # 7.1)
64 #
65 # Since: 0.14
66 ##
67 { 'struct': 'MigrationStats',
68 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
69 'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
70 'normal-bytes': 'int', 'dirty-pages-rate': 'int',
71 'mbps': 'number', 'dirty-sync-count': 'int',
72 'postcopy-requests': 'int', 'page-size': 'int',
73 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
74 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
75 'postcopy-bytes': 'uint64',
76 'dirty-sync-missed-zero-copy': 'uint64' } }
77
78 ##
79 # @XBZRLECacheStats:
80 #
81 # Detailed XBZRLE migration cache statistics
82 #
83 # @cache-size: XBZRLE cache size
84 #
85 # @bytes: amount of bytes already transferred to the target VM
86 #
87 # @pages: amount of pages transferred to the target VM
88 #
89 # @cache-miss: number of cache miss
90 #
91 # @cache-miss-rate: rate of cache miss (since 2.1)
92 #
93 # @encoding-rate: rate of encoded bytes (since 5.1)
94 #
95 # @overflow: number of overflows
96 #
97 # Since: 1.2
98 ##
99 { 'struct': 'XBZRLECacheStats',
100 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
101 'cache-miss': 'int', 'cache-miss-rate': 'number',
102 'encoding-rate': 'number', 'overflow': 'int' } }
103
104 ##
105 # @CompressionStats:
106 #
107 # Detailed migration compression statistics
108 #
109 # @pages: amount of pages compressed and transferred to the target VM
110 #
111 # @busy: count of times that no free thread was available to compress
112 # data
113 #
114 # @busy-rate: rate of thread busy
115 #
116 # @compressed-size: amount of bytes after compression
117 #
118 # @compression-rate: rate of compressed size
119 #
120 # Since: 3.1
121 ##
122 { 'struct': 'CompressionStats',
123 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
124 'compressed-size': 'int', 'compression-rate': 'number' } }
125
126 ##
127 # @MigrationStatus:
128 #
129 # An enumeration of migration status.
130 #
131 # @none: no migration has ever happened.
132 #
133 # @setup: migration process has been initiated.
134 #
135 # @cancelling: in the process of cancelling migration.
136 #
137 # @cancelled: cancelling migration is finished.
138 #
139 # @active: in the process of doing migration.
140 #
141 # @postcopy-active: like active, but now in postcopy mode. (since
142 # 2.5)
143 #
144 # @postcopy-paused: during postcopy but paused. (since 3.0)
145 #
146 # @postcopy-recover: trying to recover from a paused postcopy. (since
147 # 3.0)
148 #
149 # @completed: migration is finished.
150 #
151 # @failed: some error occurred during migration process.
152 #
153 # @colo: VM is in the process of fault tolerance, VM can not get into
154 # this state unless colo capability is enabled for migration.
155 # (since 2.8)
156 #
157 # @pre-switchover: Paused before device serialisation. (since 2.11)
158 #
159 # @device: During device serialisation when pause-before-switchover is
160 # enabled (since 2.11)
161 #
162 # @wait-unplug: wait for device unplug request by guest OS to be
163 # completed. (since 4.2)
164 #
165 # Since: 2.3
166 ##
167 { 'enum': 'MigrationStatus',
168 'data': [ 'none', 'setup', 'cancelling', 'cancelled',
169 'active', 'postcopy-active', 'postcopy-paused',
170 'postcopy-recover', 'completed', 'failed', 'colo',
171 'pre-switchover', 'device', 'wait-unplug' ] }
172 ##
173 # @VfioStats:
174 #
175 # Detailed VFIO devices migration statistics
176 #
177 # @transferred: amount of bytes transferred to the target VM by VFIO
178 # devices
179 #
180 # Since: 5.2
181 ##
182 { 'struct': 'VfioStats',
183 'data': {'transferred': 'int' } }
184
185 ##
186 # @MigrationInfo:
187 #
188 # Information about current migration process.
189 #
190 # @status: @MigrationStatus describing the current migration status.
191 # If this field is not returned, no migration process has been
192 # initiated
193 #
194 # @ram: @MigrationStats containing detailed migration status, only
195 # returned if status is 'active' or 'completed'(since 1.2)
196 #
197 # @disk: @MigrationStats containing detailed disk migration status,
198 # only returned if status is 'active' and it is a block migration
199 #
200 # @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
201 # migration statistics, only returned if XBZRLE feature is on and
202 # status is 'active' or 'completed' (since 1.2)
203 #
204 # @total-time: total amount of milliseconds since migration started.
205 # If migration has ended, it returns the total migration time.
206 # (since 1.2)
207 #
208 # @downtime: only present when migration finishes correctly total
209 # downtime in milliseconds for the guest. (since 1.3)
210 #
211 # @expected-downtime: only present while migration is active expected
212 # downtime in milliseconds for the guest in last walk of the dirty
213 # bitmap. (since 1.3)
214 #
215 # @setup-time: amount of setup time in milliseconds *before* the
216 # iterations begin but *after* the QMP command is issued. This is
217 # designed to provide an accounting of any activities (such as
218 # RDMA pinning) which may be expensive, but do not actually occur
219 # during the iterative migration rounds themselves. (since 1.6)
220 #
221 # @cpu-throttle-percentage: percentage of time guest cpus are being
222 # throttled during auto-converge. This is only present when
223 # auto-converge has started throttling guest cpus. (Since 2.7)
224 #
225 # @error-desc: the human readable error description string, when
226 # @status is 'failed'. Clients should not attempt to parse the
227 # error strings. (Since 2.7)
228 #
229 # @postcopy-blocktime: total time when all vCPU were blocked during
230 # postcopy live migration. This is only present when the
231 # postcopy-blocktime migration capability is enabled. (Since 3.0)
232 #
233 # @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
234 # This is only present when the postcopy-blocktime migration
235 # capability is enabled. (Since 3.0)
236 #
237 # @compression: migration compression statistics, only returned if
238 # compression feature is on and status is 'active' or 'completed'
239 # (Since 3.1)
240 #
241 # @socket-address: Only used for tcp, to know what the real port is
242 # (Since 4.0)
243 #
244 # @vfio: @VfioStats containing detailed VFIO devices migration
245 # statistics, only returned if VFIO device is present, migration
246 # is supported by all VFIO devices and status is 'active' or
247 # 'completed' (since 5.2)
248 #
249 # @blocked-reasons: A list of reasons an outgoing migration is
250 # blocked. Present and non-empty when migration is blocked.
251 # (since 6.0)
252 #
253 # Since: 0.14
254 ##
255 { 'struct': 'MigrationInfo',
256 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
257 '*disk': 'MigrationStats',
258 '*vfio': 'VfioStats',
259 '*xbzrle-cache': 'XBZRLECacheStats',
260 '*total-time': 'int',
261 '*expected-downtime': 'int',
262 '*downtime': 'int',
263 '*setup-time': 'int',
264 '*cpu-throttle-percentage': 'int',
265 '*error-desc': 'str',
266 '*blocked-reasons': ['str'],
267 '*postcopy-blocktime': 'uint32',
268 '*postcopy-vcpu-blocktime': ['uint32'],
269 '*compression': 'CompressionStats',
270 '*socket-address': ['SocketAddress'] } }
271
272 ##
273 # @query-migrate:
274 #
275 # Returns information about current migration process. If migration
276 # is active there will be another json-object with RAM migration
277 # status and if block migration is active another one with block
278 # migration status.
279 #
280 # Returns: @MigrationInfo
281 #
282 # Since: 0.14
283 #
284 # Examples:
285 #
286 # 1. Before the first migration
287 #
288 # -> { "execute": "query-migrate" }
289 # <- { "return": {} }
290 #
291 # 2. Migration is done and has succeeded
292 #
293 # -> { "execute": "query-migrate" }
294 # <- { "return": {
295 # "status": "completed",
296 # "total-time":12345,
297 # "setup-time":12345,
298 # "downtime":12345,
299 # "ram":{
300 # "transferred":123,
301 # "remaining":123,
302 # "total":246,
303 # "duplicate":123,
304 # "normal":123,
305 # "normal-bytes":123456,
306 # "dirty-sync-count":15
307 # }
308 # }
309 # }
310 #
311 # 3. Migration is done and has failed
312 #
313 # -> { "execute": "query-migrate" }
314 # <- { "return": { "status": "failed" } }
315 #
316 # 4. Migration is being performed and is not a block migration:
317 #
318 # -> { "execute": "query-migrate" }
319 # <- {
320 # "return":{
321 # "status":"active",
322 # "total-time":12345,
323 # "setup-time":12345,
324 # "expected-downtime":12345,
325 # "ram":{
326 # "transferred":123,
327 # "remaining":123,
328 # "total":246,
329 # "duplicate":123,
330 # "normal":123,
331 # "normal-bytes":123456,
332 # "dirty-sync-count":15
333 # }
334 # }
335 # }
336 #
337 # 5. Migration is being performed and is a block migration:
338 #
339 # -> { "execute": "query-migrate" }
340 # <- {
341 # "return":{
342 # "status":"active",
343 # "total-time":12345,
344 # "setup-time":12345,
345 # "expected-downtime":12345,
346 # "ram":{
347 # "total":1057024,
348 # "remaining":1053304,
349 # "transferred":3720,
350 # "duplicate":123,
351 # "normal":123,
352 # "normal-bytes":123456,
353 # "dirty-sync-count":15
354 # },
355 # "disk":{
356 # "total":20971520,
357 # "remaining":20880384,
358 # "transferred":91136
359 # }
360 # }
361 # }
362 #
363 # 6. Migration is being performed and XBZRLE is active:
364 #
365 # -> { "execute": "query-migrate" }
366 # <- {
367 # "return":{
368 # "status":"active",
369 # "total-time":12345,
370 # "setup-time":12345,
371 # "expected-downtime":12345,
372 # "ram":{
373 # "total":1057024,
374 # "remaining":1053304,
375 # "transferred":3720,
376 # "duplicate":10,
377 # "normal":3333,
378 # "normal-bytes":3412992,
379 # "dirty-sync-count":15
380 # },
381 # "xbzrle-cache":{
382 # "cache-size":67108864,
383 # "bytes":20971520,
384 # "pages":2444343,
385 # "cache-miss":2244,
386 # "cache-miss-rate":0.123,
387 # "encoding-rate":80.1,
388 # "overflow":34434
389 # }
390 # }
391 # }
392 ##
393 { 'command': 'query-migrate', 'returns': 'MigrationInfo' }
394
395 ##
396 # @MigrationCapability:
397 #
398 # Migration capabilities enumeration
399 #
400 # @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
401 # Encoding). This feature allows us to minimize migration traffic
402 # for certain work loads, by sending compressed difference of the
403 # pages
404 #
405 # @rdma-pin-all: Controls whether or not the entire VM memory
406 # footprint is mlock()'d on demand or all at once. Refer to
407 # docs/rdma.txt for usage. Disabled by default. (since 2.0)
408 #
409 # @zero-blocks: During storage migration encode blocks of zeroes
410 # efficiently. This essentially saves 1MB of zeroes per block on
411 # the wire. Enabling requires source and target VM to support
412 # this feature. To enable it is sufficient to enable the
413 # capability on the source VM. The feature is disabled by default.
414 # (since 1.6)
415 #
416 # @compress: Use multiple compression threads to accelerate live
417 # migration. This feature can help to reduce the migration
418 # traffic, by sending compressed pages. Please note that if
419 # compress and xbzrle are both on, compress only takes effect in
420 # the ram bulk stage, after that, it will be disabled and only
421 # xbzrle takes effect, this can help to minimize migration
422 # traffic. The feature is disabled by default. (since 2.4 )
423 #
424 # @events: generate events for each migration state change (since 2.4
425 # )
426 #
427 # @auto-converge: If enabled, QEMU will automatically throttle down
428 # the guest to speed up convergence of RAM migration. (since 1.6)
429 #
430 # @postcopy-ram: Start executing on the migration target before all of
431 # RAM has been migrated, pulling the remaining pages along as
432 # needed. The capacity must have the same setting on both source
433 # and target or migration will not even start. NOTE: If the
434 # migration fails during postcopy the VM will fail. (since 2.6)
435 #
436 # @x-colo: If enabled, migration will never end, and the state of the
437 # VM on the primary side will be migrated continuously to the VM
438 # on secondary side, this process is called COarse-Grain LOck
439 # Stepping (COLO) for Non-stop Service. (since 2.8)
440 #
441 # @release-ram: if enabled, qemu will free the migrated ram pages on
442 # the source during postcopy-ram migration. (since 2.9)
443 #
444 # @block: If enabled, QEMU will also migrate the contents of all block
445 # devices. Default is disabled. A possible alternative uses
446 # mirror jobs to a builtin NBD server on the destination, which
447 # offers more flexibility. (Since 2.10)
448 #
449 # @return-path: If enabled, migration will use the return path even
450 # for precopy. (since 2.10)
451 #
452 # @pause-before-switchover: Pause outgoing migration before
453 # serialising device state and before disabling block IO (since
454 # 2.11)
455 #
456 # @multifd: Use more than one fd for migration (since 4.0)
457 #
458 # @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
459 # (since 2.12)
460 #
461 # @postcopy-blocktime: Calculate downtime for postcopy live migration
462 # (since 3.0)
463 #
464 # @late-block-activate: If enabled, the destination will not activate
465 # block devices (and thus take locks) immediately at the end of
466 # migration. (since 3.0)
467 #
468 # @x-ignore-shared: If enabled, QEMU will not migrate shared memory that is
469 # accessible on the destination machine. (since 4.0)
470 #
471 # @validate-uuid: Send the UUID of the source to allow the destination
472 # to ensure it is the same. (since 4.2)
473 #
474 # @background-snapshot: If enabled, the migration stream will be a
475 # snapshot of the VM exactly at the point when the migration
476 # procedure starts. The VM RAM is saved with running VM. (since
477 # 6.0)
478 #
479 # @zero-copy-send: Controls behavior on sending memory pages on
480 # migration. When true, enables a zero-copy mechanism for sending
481 # memory pages, if host supports it. Requires that QEMU be
482 # permitted to use locked memory for guest RAM pages. (since 7.1)
483 #
484 # @postcopy-preempt: If enabled, the migration process will allow
485 # postcopy requests to preempt precopy stream, so postcopy
486 # requests will be handled faster. This is a performance feature
487 # and should not affect the correctness of postcopy migration.
488 # (since 7.1)
489 #
490 # @switchover-ack: If enabled, migration will not stop the source VM
491 # and complete the migration until an ACK is received from the
492 # destination that it's OK to do so. Exactly when this ACK is
493 # sent depends on the migrated devices that use this feature.
494 # For example, a device can use it to make sure some of its data
495 # is sent and loaded in the destination before doing switchover.
496 # This can reduce downtime if devices that support this capability
497 # are present. 'return-path' capability must be enabled to use
498 # it. (since 8.1)
499 #
500 # Features:
501 #
502 # @unstable: Members @x-colo and @x-ignore-shared are experimental.
503 #
504 # Since: 1.2
505 ##
506 { 'enum': 'MigrationCapability',
507 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
508 'compress', 'events', 'postcopy-ram',
509 { 'name': 'x-colo', 'features': [ 'unstable' ] },
510 'release-ram',
511 'block', 'return-path', 'pause-before-switchover', 'multifd',
512 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
513 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
514 'validate-uuid', 'background-snapshot',
515 'zero-copy-send', 'postcopy-preempt', 'switchover-ack'] }
516
517 ##
518 # @MigrationCapabilityStatus:
519 #
520 # Migration capability information
521 #
522 # @capability: capability enum
523 #
524 # @state: capability state bool
525 #
526 # Since: 1.2
527 ##
528 { 'struct': 'MigrationCapabilityStatus',
529 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
530
531 ##
532 # @migrate-set-capabilities:
533 #
534 # Enable/Disable the following migration capabilities (like xbzrle)
535 #
536 # @capabilities: json array of capability modifications to make
537 #
538 # Since: 1.2
539 #
540 # Example:
541 #
542 # -> { "execute": "migrate-set-capabilities" , "arguments":
543 # { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
544 # <- { "return": {} }
545 ##
546 { 'command': 'migrate-set-capabilities',
547 'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
548
549 ##
550 # @query-migrate-capabilities:
551 #
552 # Returns information about the current migration capabilities status
553 #
554 # Returns: @MigrationCapabilityStatus
555 #
556 # Since: 1.2
557 #
558 # Example:
559 #
560 # -> { "execute": "query-migrate-capabilities" }
561 # <- { "return": [
562 # {"state": false, "capability": "xbzrle"},
563 # {"state": false, "capability": "rdma-pin-all"},
564 # {"state": false, "capability": "auto-converge"},
565 # {"state": false, "capability": "zero-blocks"},
566 # {"state": false, "capability": "compress"},
567 # {"state": true, "capability": "events"},
568 # {"state": false, "capability": "postcopy-ram"},
569 # {"state": false, "capability": "x-colo"}
570 # ]}
571 ##
572 { 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']}
573
574 ##
575 # @MultiFDCompression:
576 #
577 # An enumeration of multifd compression methods.
578 #
579 # @none: no compression.
580 #
581 # @zlib: use zlib compression method.
582 #
583 # @zstd: use zstd compression method.
584 #
585 # Since: 5.0
586 ##
587 { 'enum': 'MultiFDCompression',
588 'data': [ 'none', 'zlib',
589 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
590
591 ##
592 # @BitmapMigrationBitmapAliasTransform:
593 #
594 # @persistent: If present, the bitmap will be made persistent or
595 # transient depending on this parameter.
596 #
597 # Since: 6.0
598 ##
599 { 'struct': 'BitmapMigrationBitmapAliasTransform',
600 'data': {
601 '*persistent': 'bool'
602 } }
603
604 ##
605 # @BitmapMigrationBitmapAlias:
606 #
607 # @name: The name of the bitmap.
608 #
609 # @alias: An alias name for migration (for example the bitmap name on
610 # the opposite site).
611 #
612 # @transform: Allows the modification of the migrated bitmap. (since
613 # 6.0)
614 #
615 # Since: 5.2
616 ##
617 { 'struct': 'BitmapMigrationBitmapAlias',
618 'data': {
619 'name': 'str',
620 'alias': 'str',
621 '*transform': 'BitmapMigrationBitmapAliasTransform'
622 } }
623
624 ##
625 # @BitmapMigrationNodeAlias:
626 #
627 # Maps a block node name and the bitmaps it has to aliases for dirty
628 # bitmap migration.
629 #
630 # @node-name: A block node name.
631 #
632 # @alias: An alias block node name for migration (for example the node
633 # name on the opposite site).
634 #
635 # @bitmaps: Mappings for the bitmaps on this node.
636 #
637 # Since: 5.2
638 ##
639 { 'struct': 'BitmapMigrationNodeAlias',
640 'data': {
641 'node-name': 'str',
642 'alias': 'str',
643 'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
644 } }
645
646 ##
647 # @MigrationParameter:
648 #
649 # Migration parameters enumeration
650 #
651 # @announce-initial: Initial delay (in milliseconds) before sending
652 # the first announce (Since 4.0)
653 #
654 # @announce-max: Maximum delay (in milliseconds) between packets in
655 # the announcement (Since 4.0)
656 #
657 # @announce-rounds: Number of self-announce packets sent after
658 # migration (Since 4.0)
659 #
660 # @announce-step: Increase in delay (in milliseconds) between
661 # subsequent packets in the announcement (Since 4.0)
662 #
663 # @compress-level: Set the compression level to be used in live
664 # migration, the compression level is an integer between 0 and 9,
665 # where 0 means no compression, 1 means the best compression
666 # speed, and 9 means best compression ratio which will consume
667 # more CPU.
668 #
669 # @compress-threads: Set compression thread count to be used in live
670 # migration, the compression thread count is an integer between 1
671 # and 255.
672 #
673 # @compress-wait-thread: Controls behavior when all compression
674 # threads are currently busy. If true (default), wait for a free
675 # compression thread to become available; otherwise, send the page
676 # uncompressed. (Since 3.1)
677 #
678 # @decompress-threads: Set decompression thread count to be used in
679 # live migration, the decompression thread count is an integer
680 # between 1 and 255. Usually, decompression is at least 4 times as
681 # fast as compression, so set the decompress-threads to the number
682 # about 1/4 of compress-threads is adequate.
683 #
684 # @throttle-trigger-threshold: The ratio of bytes_dirty_period and
685 # bytes_xfer_period to trigger throttling. It is expressed as
686 # percentage. The default value is 50. (Since 5.0)
687 #
688 # @cpu-throttle-initial: Initial percentage of time guest cpus are
689 # throttled when migration auto-converge is activated. The
690 # default value is 20. (Since 2.7)
691 #
692 # @cpu-throttle-increment: throttle percentage increase each time
693 # auto-converge detects that migration is not making progress.
694 # The default value is 10. (Since 2.7)
695 #
696 # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
697 # the tail stage of throttling, the Guest is very sensitive to CPU
698 # percentage while the @cpu-throttle -increment is excessive
699 # usually at tail stage. If this parameter is true, we will
700 # compute the ideal CPU percentage used by the Guest, which may
701 # exactly make the dirty rate match the dirty rate threshold.
702 # Then we will choose a smaller throttle increment between the one
703 # specified by @cpu-throttle-increment and the one generated by
704 # ideal CPU percentage. Therefore, it is compatible to
705 # traditional throttling, meanwhile the throttle increment won't
706 # be excessive at tail stage. The default value is false. (Since
707 # 5.1)
708 #
709 # @tls-creds: ID of the 'tls-creds' object that provides credentials
710 # for establishing a TLS connection over the migration data
711 # channel. On the outgoing side of the migration, the credentials
712 # must be for a 'client' endpoint, while for the incoming side the
713 # credentials must be for a 'server' endpoint. Setting this will
714 # enable TLS for all migrations. The default is unset, resulting
715 # in unsecured migration at the QEMU level. (Since 2.7)
716 #
717 # @tls-hostname: hostname of the target host for the migration. This
718 # is required when using x509 based TLS credentials and the
719 # migration URI does not already include a hostname. For example
720 # if using fd: or exec: based migration, the hostname must be
721 # provided so that the server's x509 certificate identity can be
722 # validated. (Since 2.7)
723 #
724 # @tls-authz: ID of the 'authz' object subclass that provides access
725 # control checking of the TLS x509 certificate distinguished name.
726 # This object is only resolved at time of use, so can be deleted
727 # and recreated on the fly while the migration server is active.
728 # If missing, it will default to denying access (Since 4.0)
729 #
730 # @max-bandwidth: to set maximum speed for migration. maximum speed
731 # in bytes per second. (Since 2.8)
732 #
733 # @downtime-limit: set maximum tolerated downtime for migration.
734 # maximum downtime in milliseconds (Since 2.8)
735 #
736 # @x-checkpoint-delay: The delay time (in ms) between two COLO
737 # checkpoints in periodic mode. (Since 2.8)
738 #
739 # @block-incremental: Affects how much storage is migrated when the
740 # block migration capability is enabled. When false, the entire
741 # storage backing chain is migrated into a flattened image at the
742 # destination; when true, only the active qcow2 layer is migrated
743 # and the destination must already have access to the same backing
744 # chain as was used on the source. (since 2.10)
745 #
746 # @multifd-channels: Number of channels used to migrate data in
747 # parallel. This is the same number that the number of sockets
748 # used for migration. The default value is 2 (since 4.0)
749 #
750 # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
751 # needs to be a multiple of the target page size and a power of 2
752 # (Since 2.11)
753 #
754 # @max-postcopy-bandwidth: Background transfer bandwidth during
755 # postcopy. Defaults to 0 (unlimited). In bytes per second.
756 # (Since 3.0)
757 #
758 # @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
759 # (Since 3.1)
760 #
761 # @multifd-compression: Which compression method to use. Defaults to
762 # none. (Since 5.0)
763 #
764 # @multifd-zlib-level: Set the compression level to be used in live
765 # migration, the compression level is an integer between 0 and 9,
766 # where 0 means no compression, 1 means the best compression
767 # speed, and 9 means best compression ratio which will consume
768 # more CPU. Defaults to 1. (Since 5.0)
769 #
770 # @multifd-zstd-level: Set the compression level to be used in live
771 # migration, the compression level is an integer between 0 and 20,
772 # where 0 means no compression, 1 means the best compression
773 # speed, and 20 means best compression ratio which will consume
774 # more CPU. Defaults to 1. (Since 5.0)
775 #
776 # @block-bitmap-mapping: Maps block nodes and bitmaps on them to
777 # aliases for the purpose of dirty bitmap migration. Such aliases
778 # may for example be the corresponding names on the opposite site.
779 # The mapping must be one-to-one, but not necessarily complete: On
780 # the source, unmapped bitmaps and all bitmaps on unmapped nodes
781 # will be ignored. On the destination, encountering an unmapped
782 # alias in the incoming migration stream will result in a report,
783 # and all further bitmap migration data will then be discarded.
784 # Note that the destination does not know about bitmaps it does
785 # not receive, so there is no limitation or requirement regarding
786 # the number of bitmaps received, or how they are named, or on
787 # which nodes they are placed. By default (when this parameter
788 # has never been set), bitmap names are mapped to themselves.
789 # Nodes are mapped to their block device name if there is one, and
790 # to their node name otherwise. (Since 5.2)
791 #
792 # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
793 # live migration. Should be in the range 1 to 1000ms,
794 # defaults to 1000ms. (Since 8.1)
795 #
796 # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
797 # Defaults to 1. (Since 8.1)
798 #
799 # Features:
800 #
801 # @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
802 # are experimental.
803 #
804 # Since: 2.4
805 ##
806 { 'enum': 'MigrationParameter',
807 'data': ['announce-initial', 'announce-max',
808 'announce-rounds', 'announce-step',
809 'compress-level', 'compress-threads', 'decompress-threads',
810 'compress-wait-thread', 'throttle-trigger-threshold',
811 'cpu-throttle-initial', 'cpu-throttle-increment',
812 'cpu-throttle-tailslow',
813 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
814 'downtime-limit',
815 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
816 'block-incremental',
817 'multifd-channels',
818 'xbzrle-cache-size', 'max-postcopy-bandwidth',
819 'max-cpu-throttle', 'multifd-compression',
820 'multifd-zlib-level', 'multifd-zstd-level',
821 'block-bitmap-mapping',
822 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
823 'vcpu-dirty-limit'] }
824
825 ##
826 # @MigrateSetParameters:
827 #
828 # @announce-initial: Initial delay (in milliseconds) before sending
829 # the first announce (Since 4.0)
830 #
831 # @announce-max: Maximum delay (in milliseconds) between packets in
832 # the announcement (Since 4.0)
833 #
834 # @announce-rounds: Number of self-announce packets sent after
835 # migration (Since 4.0)
836 #
837 # @announce-step: Increase in delay (in milliseconds) between
838 # subsequent packets in the announcement (Since 4.0)
839 #
840 # @compress-level: compression level
841 #
842 # @compress-threads: compression thread count
843 #
844 # @compress-wait-thread: Controls behavior when all compression
845 # threads are currently busy. If true (default), wait for a free
846 # compression thread to become available; otherwise, send the page
847 # uncompressed. (Since 3.1)
848 #
849 # @decompress-threads: decompression thread count
850 #
851 # @throttle-trigger-threshold: The ratio of bytes_dirty_period and
852 # bytes_xfer_period to trigger throttling. It is expressed as
853 # percentage. The default value is 50. (Since 5.0)
854 #
855 # @cpu-throttle-initial: Initial percentage of time guest cpus are
856 # throttled when migration auto-converge is activated. The
857 # default value is 20. (Since 2.7)
858 #
859 # @cpu-throttle-increment: throttle percentage increase each time
860 # auto-converge detects that migration is not making progress.
861 # The default value is 10. (Since 2.7)
862 #
863 # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
864 # the tail stage of throttling, the Guest is very sensitive to CPU
865 # percentage while the @cpu-throttle -increment is excessive
866 # usually at tail stage. If this parameter is true, we will
867 # compute the ideal CPU percentage used by the Guest, which may
868 # exactly make the dirty rate match the dirty rate threshold.
869 # Then we will choose a smaller throttle increment between the one
870 # specified by @cpu-throttle-increment and the one generated by
871 # ideal CPU percentage. Therefore, it is compatible to
872 # traditional throttling, meanwhile the throttle increment won't
873 # be excessive at tail stage. The default value is false. (Since
874 # 5.1)
875 #
876 # @tls-creds: ID of the 'tls-creds' object that provides credentials
877 # for establishing a TLS connection over the migration data
878 # channel. On the outgoing side of the migration, the credentials
879 # must be for a 'client' endpoint, while for the incoming side the
880 # credentials must be for a 'server' endpoint. Setting this to a
881 # non-empty string enables TLS for all migrations. An empty
882 # string means that QEMU will use plain text mode for migration,
883 # rather than TLS (Since 2.9) Previously (since 2.7), this was
884 # reported by omitting tls-creds instead.
885 #
886 # @tls-hostname: hostname of the target host for the migration. This
887 # is required when using x509 based TLS credentials and the
888 # migration URI does not already include a hostname. For example
889 # if using fd: or exec: based migration, the hostname must be
890 # provided so that the server's x509 certificate identity can be
891 # validated. (Since 2.7) An empty string means that QEMU will use
892 # the hostname associated with the migration URI, if any. (Since
893 # 2.9) Previously (since 2.7), this was reported by omitting
894 # tls-hostname instead.
895 #
896 # @max-bandwidth: to set maximum speed for migration. maximum speed
897 # in bytes per second. (Since 2.8)
898 #
899 # @downtime-limit: set maximum tolerated downtime for migration.
900 # maximum downtime in milliseconds (Since 2.8)
901 #
902 # @x-checkpoint-delay: the delay time between two COLO checkpoints.
903 # (Since 2.8)
904 #
905 # @block-incremental: Affects how much storage is migrated when the
906 # block migration capability is enabled. When false, the entire
907 # storage backing chain is migrated into a flattened image at the
908 # destination; when true, only the active qcow2 layer is migrated
909 # and the destination must already have access to the same backing
910 # chain as was used on the source. (since 2.10)
911 #
912 # @multifd-channels: Number of channels used to migrate data in
913 # parallel. This is the same number that the number of sockets
914 # used for migration. The default value is 2 (since 4.0)
915 #
916 # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
917 # needs to be a multiple of the target page size and a power of 2
918 # (Since 2.11)
919 #
920 # @max-postcopy-bandwidth: Background transfer bandwidth during
921 # postcopy. Defaults to 0 (unlimited). In bytes per second.
922 # (Since 3.0)
923 #
924 # @max-cpu-throttle: maximum cpu throttle percentage. The default
925 # value is 99. (Since 3.1)
926 #
927 # @multifd-compression: Which compression method to use. Defaults to
928 # none. (Since 5.0)
929 #
930 # @multifd-zlib-level: Set the compression level to be used in live
931 # migration, the compression level is an integer between 0 and 9,
932 # where 0 means no compression, 1 means the best compression
933 # speed, and 9 means best compression ratio which will consume
934 # more CPU. Defaults to 1. (Since 5.0)
935 #
936 # @multifd-zstd-level: Set the compression level to be used in live
937 # migration, the compression level is an integer between 0 and 20,
938 # where 0 means no compression, 1 means the best compression
939 # speed, and 20 means best compression ratio which will consume
940 # more CPU. Defaults to 1. (Since 5.0)
941 #
942 # @block-bitmap-mapping: Maps block nodes and bitmaps on them to
943 # aliases for the purpose of dirty bitmap migration. Such aliases
944 # may for example be the corresponding names on the opposite site.
945 # The mapping must be one-to-one, but not necessarily complete: On
946 # the source, unmapped bitmaps and all bitmaps on unmapped nodes
947 # will be ignored. On the destination, encountering an unmapped
948 # alias in the incoming migration stream will result in a report,
949 # and all further bitmap migration data will then be discarded.
950 # Note that the destination does not know about bitmaps it does
951 # not receive, so there is no limitation or requirement regarding
952 # the number of bitmaps received, or how they are named, or on
953 # which nodes they are placed. By default (when this parameter
954 # has never been set), bitmap names are mapped to themselves.
955 # Nodes are mapped to their block device name if there is one, and
956 # to their node name otherwise. (Since 5.2)
957 #
958 # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
959 # live migration. Should be in the range 1 to 1000ms,
960 # defaults to 1000ms. (Since 8.1)
961 #
962 # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
963 # Defaults to 1. (Since 8.1)
964 #
965 # Features:
966 #
967 # @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
968 # are experimental.
969 #
970 # TODO: either fuse back into MigrationParameters, or make
971 # MigrationParameters members mandatory
972 #
973 # Since: 2.4
974 ##
975 { 'struct': 'MigrateSetParameters',
976 'data': { '*announce-initial': 'size',
977 '*announce-max': 'size',
978 '*announce-rounds': 'size',
979 '*announce-step': 'size',
980 '*compress-level': 'uint8',
981 '*compress-threads': 'uint8',
982 '*compress-wait-thread': 'bool',
983 '*decompress-threads': 'uint8',
984 '*throttle-trigger-threshold': 'uint8',
985 '*cpu-throttle-initial': 'uint8',
986 '*cpu-throttle-increment': 'uint8',
987 '*cpu-throttle-tailslow': 'bool',
988 '*tls-creds': 'StrOrNull',
989 '*tls-hostname': 'StrOrNull',
990 '*tls-authz': 'StrOrNull',
991 '*max-bandwidth': 'size',
992 '*downtime-limit': 'uint64',
993 '*x-checkpoint-delay': { 'type': 'uint32',
994 'features': [ 'unstable' ] },
995 '*block-incremental': 'bool',
996 '*multifd-channels': 'uint8',
997 '*xbzrle-cache-size': 'size',
998 '*max-postcopy-bandwidth': 'size',
999 '*max-cpu-throttle': 'uint8',
1000 '*multifd-compression': 'MultiFDCompression',
1001 '*multifd-zlib-level': 'uint8',
1002 '*multifd-zstd-level': 'uint8',
1003 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1004 '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1005 'features': [ 'unstable' ] },
1006 '*vcpu-dirty-limit': 'uint64'} }
1007
1008 ##
1009 # @migrate-set-parameters:
1010 #
1011 # Set various migration parameters.
1012 #
1013 # Since: 2.4
1014 #
1015 # Example:
1016 #
1017 # -> { "execute": "migrate-set-parameters" ,
1018 # "arguments": { "compress-level": 1 } }
1019 # <- { "return": {} }
1020 ##
1021 { 'command': 'migrate-set-parameters', 'boxed': true,
1022 'data': 'MigrateSetParameters' }
1023
1024 ##
1025 # @MigrationParameters:
1026 #
1027 # The optional members aren't actually optional.
1028 #
1029 # @announce-initial: Initial delay (in milliseconds) before sending
1030 # the first announce (Since 4.0)
1031 #
1032 # @announce-max: Maximum delay (in milliseconds) between packets in
1033 # the announcement (Since 4.0)
1034 #
1035 # @announce-rounds: Number of self-announce packets sent after
1036 # migration (Since 4.0)
1037 #
1038 # @announce-step: Increase in delay (in milliseconds) between
1039 # subsequent packets in the announcement (Since 4.0)
1040 #
1041 # @compress-level: compression level
1042 #
1043 # @compress-threads: compression thread count
1044 #
1045 # @compress-wait-thread: Controls behavior when all compression
1046 # threads are currently busy. If true (default), wait for a free
1047 # compression thread to become available; otherwise, send the page
1048 # uncompressed. (Since 3.1)
1049 #
1050 # @decompress-threads: decompression thread count
1051 #
1052 # @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1053 # bytes_xfer_period to trigger throttling. It is expressed as
1054 # percentage. The default value is 50. (Since 5.0)
1055 #
1056 # @cpu-throttle-initial: Initial percentage of time guest cpus are
1057 # throttled when migration auto-converge is activated. (Since
1058 # 2.7)
1059 #
1060 # @cpu-throttle-increment: throttle percentage increase each time
1061 # auto-converge detects that migration is not making progress.
1062 # (Since 2.7)
1063 #
1064 # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1065 # the tail stage of throttling, the Guest is very sensitive to CPU
1066 # percentage while the @cpu-throttle -increment is excessive
1067 # usually at tail stage. If this parameter is true, we will
1068 # compute the ideal CPU percentage used by the Guest, which may
1069 # exactly make the dirty rate match the dirty rate threshold.
1070 # Then we will choose a smaller throttle increment between the one
1071 # specified by @cpu-throttle-increment and the one generated by
1072 # ideal CPU percentage. Therefore, it is compatible to
1073 # traditional throttling, meanwhile the throttle increment won't
1074 # be excessive at tail stage. The default value is false. (Since
1075 # 5.1)
1076 #
1077 # @tls-creds: ID of the 'tls-creds' object that provides credentials
1078 # for establishing a TLS connection over the migration data
1079 # channel. On the outgoing side of the migration, the credentials
1080 # must be for a 'client' endpoint, while for the incoming side the
1081 # credentials must be for a 'server' endpoint. An empty string
1082 # means that QEMU will use plain text mode for migration, rather
1083 # than TLS (Since 2.7) Note: 2.8 reports this by omitting
1084 # tls-creds instead.
1085 #
1086 # @tls-hostname: hostname of the target host for the migration. This
1087 # is required when using x509 based TLS credentials and the
1088 # migration URI does not already include a hostname. For example
1089 # if using fd: or exec: based migration, the hostname must be
1090 # provided so that the server's x509 certificate identity can be
1091 # validated. (Since 2.7) An empty string means that QEMU will use
1092 # the hostname associated with the migration URI, if any. (Since
1093 # 2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1094 #
1095 # @tls-authz: ID of the 'authz' object subclass that provides access
1096 # control checking of the TLS x509 certificate distinguished name.
1097 # (Since 4.0)
1098 #
1099 # @max-bandwidth: to set maximum speed for migration. maximum speed
1100 # in bytes per second. (Since 2.8)
1101 #
1102 # @downtime-limit: set maximum tolerated downtime for migration.
1103 # maximum downtime in milliseconds (Since 2.8)
1104 #
1105 # @x-checkpoint-delay: the delay time between two COLO checkpoints.
1106 # (Since 2.8)
1107 #
1108 # @block-incremental: Affects how much storage is migrated when the
1109 # block migration capability is enabled. When false, the entire
1110 # storage backing chain is migrated into a flattened image at the
1111 # destination; when true, only the active qcow2 layer is migrated
1112 # and the destination must already have access to the same backing
1113 # chain as was used on the source. (since 2.10)
1114 #
1115 # @multifd-channels: Number of channels used to migrate data in
1116 # parallel. This is the same number that the number of sockets
1117 # used for migration. The default value is 2 (since 4.0)
1118 #
1119 # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
1120 # needs to be a multiple of the target page size and a power of 2
1121 # (Since 2.11)
1122 #
1123 # @max-postcopy-bandwidth: Background transfer bandwidth during
1124 # postcopy. Defaults to 0 (unlimited). In bytes per second.
1125 # (Since 3.0)
1126 #
1127 # @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
1128 # (Since 3.1)
1129 #
1130 # @multifd-compression: Which compression method to use. Defaults to
1131 # none. (Since 5.0)
1132 #
1133 # @multifd-zlib-level: Set the compression level to be used in live
1134 # migration, the compression level is an integer between 0 and 9,
1135 # where 0 means no compression, 1 means the best compression
1136 # speed, and 9 means best compression ratio which will consume
1137 # more CPU. Defaults to 1. (Since 5.0)
1138 #
1139 # @multifd-zstd-level: Set the compression level to be used in live
1140 # migration, the compression level is an integer between 0 and 20,
1141 # where 0 means no compression, 1 means the best compression
1142 # speed, and 20 means best compression ratio which will consume
1143 # more CPU. Defaults to 1. (Since 5.0)
1144 #
1145 # @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1146 # aliases for the purpose of dirty bitmap migration. Such aliases
1147 # may for example be the corresponding names on the opposite site.
1148 # The mapping must be one-to-one, but not necessarily complete: On
1149 # the source, unmapped bitmaps and all bitmaps on unmapped nodes
1150 # will be ignored. On the destination, encountering an unmapped
1151 # alias in the incoming migration stream will result in a report,
1152 # and all further bitmap migration data will then be discarded.
1153 # Note that the destination does not know about bitmaps it does
1154 # not receive, so there is no limitation or requirement regarding
1155 # the number of bitmaps received, or how they are named, or on
1156 # which nodes they are placed. By default (when this parameter
1157 # has never been set), bitmap names are mapped to themselves.
1158 # Nodes are mapped to their block device name if there is one, and
1159 # to their node name otherwise. (Since 5.2)
1160 #
1161 # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
1162 # live migration. Should be in the range 1 to 1000ms,
1163 # defaults to 1000ms. (Since 8.1)
1164 #
1165 # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1166 # Defaults to 1. (Since 8.1)
1167 #
1168 # Features:
1169 #
1170 # @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1171 # are experimental.
1172 #
1173 # Since: 2.4
1174 ##
1175 { 'struct': 'MigrationParameters',
1176 'data': { '*announce-initial': 'size',
1177 '*announce-max': 'size',
1178 '*announce-rounds': 'size',
1179 '*announce-step': 'size',
1180 '*compress-level': 'uint8',
1181 '*compress-threads': 'uint8',
1182 '*compress-wait-thread': 'bool',
1183 '*decompress-threads': 'uint8',
1184 '*throttle-trigger-threshold': 'uint8',
1185 '*cpu-throttle-initial': 'uint8',
1186 '*cpu-throttle-increment': 'uint8',
1187 '*cpu-throttle-tailslow': 'bool',
1188 '*tls-creds': 'str',
1189 '*tls-hostname': 'str',
1190 '*tls-authz': 'str',
1191 '*max-bandwidth': 'size',
1192 '*downtime-limit': 'uint64',
1193 '*x-checkpoint-delay': { 'type': 'uint32',
1194 'features': [ 'unstable' ] },
1195 '*block-incremental': 'bool',
1196 '*multifd-channels': 'uint8',
1197 '*xbzrle-cache-size': 'size',
1198 '*max-postcopy-bandwidth': 'size',
1199 '*max-cpu-throttle': 'uint8',
1200 '*multifd-compression': 'MultiFDCompression',
1201 '*multifd-zlib-level': 'uint8',
1202 '*multifd-zstd-level': 'uint8',
1203 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1204 '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1205 'features': [ 'unstable' ] },
1206 '*vcpu-dirty-limit': 'uint64'} }
1207
1208 ##
1209 # @query-migrate-parameters:
1210 #
1211 # Returns information about the current migration parameters
1212 #
1213 # Returns: @MigrationParameters
1214 #
1215 # Since: 2.4
1216 #
1217 # Example:
1218 #
1219 # -> { "execute": "query-migrate-parameters" }
1220 # <- { "return": {
1221 # "decompress-threads": 2,
1222 # "cpu-throttle-increment": 10,
1223 # "compress-threads": 8,
1224 # "compress-level": 1,
1225 # "cpu-throttle-initial": 20,
1226 # "max-bandwidth": 33554432,
1227 # "downtime-limit": 300
1228 # }
1229 # }
1230 ##
1231 { 'command': 'query-migrate-parameters',
1232 'returns': 'MigrationParameters' }
1233
1234 ##
1235 # @migrate-start-postcopy:
1236 #
1237 # Followup to a migration command to switch the migration to postcopy
1238 # mode. The postcopy-ram capability must be set on both source and
1239 # destination before the original migration command.
1240 #
1241 # Since: 2.5
1242 #
1243 # Example:
1244 #
1245 # -> { "execute": "migrate-start-postcopy" }
1246 # <- { "return": {} }
1247 ##
1248 { 'command': 'migrate-start-postcopy' }
1249
1250 ##
1251 # @MIGRATION:
1252 #
1253 # Emitted when a migration event happens
1254 #
1255 # @status: @MigrationStatus describing the current migration status.
1256 #
1257 # Since: 2.4
1258 #
1259 # Example:
1260 #
1261 # <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1262 # "event": "MIGRATION",
1263 # "data": {"status": "completed"} }
1264 ##
1265 { 'event': 'MIGRATION',
1266 'data': {'status': 'MigrationStatus'}}
1267
1268 ##
1269 # @MIGRATION_PASS:
1270 #
1271 # Emitted from the source side of a migration at the start of each
1272 # pass (when it syncs the dirty bitmap)
1273 #
1274 # @pass: An incrementing count (starting at 1 on the first pass)
1275 #
1276 # Since: 2.6
1277 #
1278 # Example:
1279 #
1280 # <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1281 # "event": "MIGRATION_PASS", "data": {"pass": 2} }
1282 ##
1283 { 'event': 'MIGRATION_PASS',
1284 'data': { 'pass': 'int' } }
1285
1286 ##
1287 # @COLOMessage:
1288 #
1289 # The message transmission between Primary side and Secondary side.
1290 #
1291 # @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1292 #
1293 # @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1294 # checkpointing
1295 #
1296 # @checkpoint-reply: SVM gets PVM's checkpoint request
1297 #
1298 # @vmstate-send: VM's state will be sent by PVM.
1299 #
1300 # @vmstate-size: The total size of VMstate.
1301 #
1302 # @vmstate-received: VM's state has been received by SVM.
1303 #
1304 # @vmstate-loaded: VM's state has been loaded by SVM.
1305 #
1306 # Since: 2.8
1307 ##
1308 { 'enum': 'COLOMessage',
1309 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1310 'vmstate-send', 'vmstate-size', 'vmstate-received',
1311 'vmstate-loaded' ] }
1312
1313 ##
1314 # @COLOMode:
1315 #
1316 # The COLO current mode.
1317 #
1318 # @none: COLO is disabled.
1319 #
1320 # @primary: COLO node in primary side.
1321 #
1322 # @secondary: COLO node in slave side.
1323 #
1324 # Since: 2.8
1325 ##
1326 { 'enum': 'COLOMode',
1327 'data': [ 'none', 'primary', 'secondary'] }
1328
1329 ##
1330 # @FailoverStatus:
1331 #
1332 # An enumeration of COLO failover status
1333 #
1334 # @none: no failover has ever happened
1335 #
1336 # @require: got failover requirement but not handled
1337 #
1338 # @active: in the process of doing failover
1339 #
1340 # @completed: finish the process of failover
1341 #
1342 # @relaunch: restart the failover process, from 'none' -> 'completed'
1343 # (Since 2.9)
1344 #
1345 # Since: 2.8
1346 ##
1347 { 'enum': 'FailoverStatus',
1348 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1349
1350 ##
1351 # @COLO_EXIT:
1352 #
1353 # Emitted when VM finishes COLO mode due to some errors happening or
1354 # at the request of users.
1355 #
1356 # @mode: report COLO mode when COLO exited.
1357 #
1358 # @reason: describes the reason for the COLO exit.
1359 #
1360 # Since: 3.1
1361 #
1362 # Example:
1363 #
1364 # <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1365 # "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1366 ##
1367 { 'event': 'COLO_EXIT',
1368 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1369
1370 ##
1371 # @COLOExitReason:
1372 #
1373 # The reason for a COLO exit.
1374 #
1375 # @none: failover has never happened. This state does not occur in
1376 # the COLO_EXIT event, and is only visible in the result of
1377 # query-colo-status.
1378 #
1379 # @request: COLO exit is due to an external request.
1380 #
1381 # @error: COLO exit is due to an internal error.
1382 #
1383 # @processing: COLO is currently handling a failover (since 4.0).
1384 #
1385 # Since: 3.1
1386 ##
1387 { 'enum': 'COLOExitReason',
1388 'data': [ 'none', 'request', 'error' , 'processing' ] }
1389
1390 ##
1391 # @x-colo-lost-heartbeat:
1392 #
1393 # Tell qemu that heartbeat is lost, request it to do takeover
1394 # procedures. If this command is sent to the PVM, the Primary side
1395 # will exit COLO mode. If sent to the Secondary, the Secondary side
1396 # will run failover work, then takes over server operation to become
1397 # the service VM.
1398 #
1399 # Features:
1400 #
1401 # @unstable: This command is experimental.
1402 #
1403 # Since: 2.8
1404 #
1405 # Example:
1406 #
1407 # -> { "execute": "x-colo-lost-heartbeat" }
1408 # <- { "return": {} }
1409 ##
1410 { 'command': 'x-colo-lost-heartbeat',
1411 'features': [ 'unstable' ],
1412 'if': 'CONFIG_REPLICATION' }
1413
1414 ##
1415 # @migrate_cancel:
1416 #
1417 # Cancel the current executing migration process.
1418 #
1419 # Returns: nothing on success
1420 #
1421 # Notes: This command succeeds even if there is no migration process
1422 # running.
1423 #
1424 # Since: 0.14
1425 #
1426 # Example:
1427 #
1428 # -> { "execute": "migrate_cancel" }
1429 # <- { "return": {} }
1430 ##
1431 { 'command': 'migrate_cancel' }
1432
1433 ##
1434 # @migrate-continue:
1435 #
1436 # Continue migration when it's in a paused state.
1437 #
1438 # @state: The state the migration is currently expected to be in
1439 #
1440 # Returns: nothing on success
1441 #
1442 # Since: 2.11
1443 #
1444 # Example:
1445 #
1446 # -> { "execute": "migrate-continue" , "arguments":
1447 # { "state": "pre-switchover" } }
1448 # <- { "return": {} }
1449 ##
1450 { 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1451
1452 ##
1453 # @migrate:
1454 #
1455 # Migrates the current running guest to another Virtual Machine.
1456 #
1457 # @uri: the Uniform Resource Identifier of the destination VM
1458 #
1459 # @blk: do block migration (full disk copy)
1460 #
1461 # @inc: incremental disk copy migration
1462 #
1463 # @detach: this argument exists only for compatibility reasons and is
1464 # ignored by QEMU
1465 #
1466 # @resume: resume one paused migration, default "off". (since 3.0)
1467 #
1468 # Returns: nothing on success
1469 #
1470 # Since: 0.14
1471 #
1472 # Notes:
1473 #
1474 # 1. The 'query-migrate' command should be used to check migration's
1475 # progress and final result (this information is provided by the
1476 # 'status' member)
1477 #
1478 # 2. All boolean arguments default to false
1479 #
1480 # 3. The user Monitor's "detach" argument is invalid in QMP and should
1481 # not be used
1482 #
1483 # Example:
1484 #
1485 # -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1486 # <- { "return": {} }
1487 ##
1488 { 'command': 'migrate',
1489 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool',
1490 '*detach': 'bool', '*resume': 'bool' } }
1491
1492 ##
1493 # @migrate-incoming:
1494 #
1495 # Start an incoming migration, the qemu must have been started with
1496 # -incoming defer
1497 #
1498 # @uri: The Uniform Resource Identifier identifying the source or
1499 # address to listen on
1500 #
1501 # Returns: nothing on success
1502 #
1503 # Since: 2.3
1504 #
1505 # Notes:
1506 #
1507 # 1. It's a bad idea to use a string for the uri, but it needs
1508 # to stay compatible with -incoming and the format of the uri
1509 # is already exposed above libvirt.
1510 #
1511 # 2. QEMU must be started with -incoming defer to allow
1512 # migrate-incoming to be used.
1513 #
1514 # 3. The uri format is the same as for -incoming
1515 #
1516 # Example:
1517 #
1518 # -> { "execute": "migrate-incoming",
1519 # "arguments": { "uri": "tcp::4446" } }
1520 # <- { "return": {} }
1521 ##
1522 { 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1523
1524 ##
1525 # @xen-save-devices-state:
1526 #
1527 # Save the state of all devices to file. The RAM and the block
1528 # devices of the VM are not saved by this command.
1529 #
1530 # @filename: the file to save the state of the devices to as binary
1531 # data. See xen-save-devices-state.txt for a description of the
1532 # binary format.
1533 #
1534 # @live: Optional argument to ask QEMU to treat this command as part
1535 # of a live migration. Default to true. (since 2.11)
1536 #
1537 # Returns: Nothing on success
1538 #
1539 # Since: 1.1
1540 #
1541 # Example:
1542 #
1543 # -> { "execute": "xen-save-devices-state",
1544 # "arguments": { "filename": "/tmp/save" } }
1545 # <- { "return": {} }
1546 ##
1547 { 'command': 'xen-save-devices-state',
1548 'data': {'filename': 'str', '*live':'bool' } }
1549
1550 ##
1551 # @xen-set-global-dirty-log:
1552 #
1553 # Enable or disable the global dirty log mode.
1554 #
1555 # @enable: true to enable, false to disable.
1556 #
1557 # Returns: nothing
1558 #
1559 # Since: 1.3
1560 #
1561 # Example:
1562 #
1563 # -> { "execute": "xen-set-global-dirty-log",
1564 # "arguments": { "enable": true } }
1565 # <- { "return": {} }
1566 ##
1567 { 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1568
1569 ##
1570 # @xen-load-devices-state:
1571 #
1572 # Load the state of all devices from file. The RAM and the block
1573 # devices of the VM are not loaded by this command.
1574 #
1575 # @filename: the file to load the state of the devices from as binary
1576 # data. See xen-save-devices-state.txt for a description of the
1577 # binary format.
1578 #
1579 # Since: 2.7
1580 #
1581 # Example:
1582 #
1583 # -> { "execute": "xen-load-devices-state",
1584 # "arguments": { "filename": "/tmp/resume" } }
1585 # <- { "return": {} }
1586 ##
1587 { 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1588
1589 ##
1590 # @xen-set-replication:
1591 #
1592 # Enable or disable replication.
1593 #
1594 # @enable: true to enable, false to disable.
1595 #
1596 # @primary: true for primary or false for secondary.
1597 #
1598 # @failover: true to do failover, false to stop. but cannot be
1599 # specified if 'enable' is true. default value is false.
1600 #
1601 # Returns: nothing.
1602 #
1603 # Example:
1604 #
1605 # -> { "execute": "xen-set-replication",
1606 # "arguments": {"enable": true, "primary": false} }
1607 # <- { "return": {} }
1608 #
1609 # Since: 2.9
1610 ##
1611 { 'command': 'xen-set-replication',
1612 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1613 'if': 'CONFIG_REPLICATION' }
1614
1615 ##
1616 # @ReplicationStatus:
1617 #
1618 # The result format for 'query-xen-replication-status'.
1619 #
1620 # @error: true if an error happened, false if replication is normal.
1621 #
1622 # @desc: the human readable error description string, when @error is
1623 # 'true'.
1624 #
1625 # Since: 2.9
1626 ##
1627 { 'struct': 'ReplicationStatus',
1628 'data': { 'error': 'bool', '*desc': 'str' },
1629 'if': 'CONFIG_REPLICATION' }
1630
1631 ##
1632 # @query-xen-replication-status:
1633 #
1634 # Query replication status while the vm is running.
1635 #
1636 # Returns: A @ReplicationStatus object showing the status.
1637 #
1638 # Example:
1639 #
1640 # -> { "execute": "query-xen-replication-status" }
1641 # <- { "return": { "error": false } }
1642 #
1643 # Since: 2.9
1644 ##
1645 { 'command': 'query-xen-replication-status',
1646 'returns': 'ReplicationStatus',
1647 'if': 'CONFIG_REPLICATION' }
1648
1649 ##
1650 # @xen-colo-do-checkpoint:
1651 #
1652 # Xen uses this command to notify replication to trigger a checkpoint.
1653 #
1654 # Returns: nothing.
1655 #
1656 # Example:
1657 #
1658 # -> { "execute": "xen-colo-do-checkpoint" }
1659 # <- { "return": {} }
1660 #
1661 # Since: 2.9
1662 ##
1663 { 'command': 'xen-colo-do-checkpoint',
1664 'if': 'CONFIG_REPLICATION' }
1665
1666 ##
1667 # @COLOStatus:
1668 #
1669 # The result format for 'query-colo-status'.
1670 #
1671 # @mode: COLO running mode. If COLO is running, this field will
1672 # return 'primary' or 'secondary'.
1673 #
1674 # @last-mode: COLO last running mode. If COLO is running, this field
1675 # will return same like mode field, after failover we can use this
1676 # field to get last colo mode. (since 4.0)
1677 #
1678 # @reason: describes the reason for the COLO exit.
1679 #
1680 # Since: 3.1
1681 ##
1682 { 'struct': 'COLOStatus',
1683 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1684 'reason': 'COLOExitReason' },
1685 'if': 'CONFIG_REPLICATION' }
1686
1687 ##
1688 # @query-colo-status:
1689 #
1690 # Query COLO status while the vm is running.
1691 #
1692 # Returns: A @COLOStatus object showing the status.
1693 #
1694 # Example:
1695 #
1696 # -> { "execute": "query-colo-status" }
1697 # <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1698 #
1699 # Since: 3.1
1700 ##
1701 { 'command': 'query-colo-status',
1702 'returns': 'COLOStatus',
1703 'if': 'CONFIG_REPLICATION' }
1704
1705 ##
1706 # @migrate-recover:
1707 #
1708 # Provide a recovery migration stream URI.
1709 #
1710 # @uri: the URI to be used for the recovery of migration stream.
1711 #
1712 # Returns: nothing.
1713 #
1714 # Example:
1715 #
1716 # -> { "execute": "migrate-recover",
1717 # "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1718 # <- { "return": {} }
1719 #
1720 # Since: 3.0
1721 ##
1722 { 'command': 'migrate-recover',
1723 'data': { 'uri': 'str' },
1724 'allow-oob': true }
1725
1726 ##
1727 # @migrate-pause:
1728 #
1729 # Pause a migration. Currently it only supports postcopy.
1730 #
1731 # Returns: nothing.
1732 #
1733 # Example:
1734 #
1735 # -> { "execute": "migrate-pause" }
1736 # <- { "return": {} }
1737 #
1738 # Since: 3.0
1739 ##
1740 { 'command': 'migrate-pause', 'allow-oob': true }
1741
1742 ##
1743 # @UNPLUG_PRIMARY:
1744 #
1745 # Emitted from source side of a migration when migration state is
1746 # WAIT_UNPLUG. Device was unplugged by guest operating system. Device
1747 # resources in QEMU are kept on standby to be able to re-plug it in
1748 # case of migration failure.
1749 #
1750 # @device-id: QEMU device id of the unplugged device
1751 #
1752 # Since: 4.2
1753 #
1754 # Example:
1755 #
1756 # <- { "event": "UNPLUG_PRIMARY",
1757 # "data": { "device-id": "hostdev0" },
1758 # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1759 ##
1760 { 'event': 'UNPLUG_PRIMARY',
1761 'data': { 'device-id': 'str' } }
1762
1763 ##
1764 # @DirtyRateVcpu:
1765 #
1766 # Dirty rate of vcpu.
1767 #
1768 # @id: vcpu index.
1769 #
1770 # @dirty-rate: dirty rate.
1771 #
1772 # Since: 6.2
1773 ##
1774 { 'struct': 'DirtyRateVcpu',
1775 'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1776
1777 ##
1778 # @DirtyRateStatus:
1779 #
1780 # Dirty page rate measurement status.
1781 #
1782 # @unstarted: measuring thread has not been started yet
1783 #
1784 # @measuring: measuring thread is running
1785 #
1786 # @measured: dirty page rate is measured and the results are available
1787 #
1788 # Since: 5.2
1789 ##
1790 { 'enum': 'DirtyRateStatus',
1791 'data': [ 'unstarted', 'measuring', 'measured'] }
1792
1793 ##
1794 # @DirtyRateMeasureMode:
1795 #
1796 # Method used to measure dirty page rate. Differences between
1797 # available methods are explained in @calc-dirty-rate.
1798 #
1799 # @page-sampling: use page sampling
1800 #
1801 # @dirty-ring: use dirty ring
1802 #
1803 # @dirty-bitmap: use dirty bitmap
1804 #
1805 # Since: 6.2
1806 ##
1807 { 'enum': 'DirtyRateMeasureMode',
1808 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1809
1810 ##
1811 # @DirtyRateInfo:
1812 #
1813 # Information about measured dirty page rate.
1814 #
1815 # @dirty-rate: an estimate of the dirty page rate of the VM in units
1816 # of MiB/s. Value is present only when @status is 'measured'.
1817 #
1818 # @status: current status of dirty page rate measurements
1819 #
1820 # @start-time: start time in units of second for calculation
1821 #
1822 # @calc-time: time period for which dirty page rate was measured
1823 # (in seconds)
1824 #
1825 # @sample-pages: number of sampled pages per GiB of guest memory.
1826 # Valid only in page-sampling mode (Since 6.1)
1827 #
1828 # @mode: mode that was used to measure dirty page rate (Since 6.2)
1829 #
1830 # @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
1831 # specified (Since 6.2)
1832 #
1833 # Since: 5.2
1834 ##
1835 { 'struct': 'DirtyRateInfo',
1836 'data': {'*dirty-rate': 'int64',
1837 'status': 'DirtyRateStatus',
1838 'start-time': 'int64',
1839 'calc-time': 'int64',
1840 'sample-pages': 'uint64',
1841 'mode': 'DirtyRateMeasureMode',
1842 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1843
1844 ##
1845 # @calc-dirty-rate:
1846 #
1847 # Start measuring dirty page rate of the VM. Results can be retrieved
1848 # with @query-dirty-rate after measurements are completed.
1849 #
1850 # Dirty page rate is the number of pages changed in a given time
1851 # period expressed in MiB/s. The following methods of calculation are
1852 # available:
1853 #
1854 # 1. In page sampling mode, a random subset of pages are selected and
1855 # hashed twice: once at the beginning of measurement time period,
1856 # and once again at the end. If two hashes for some page are
1857 # different, the page is counted as changed. Since this method
1858 # relies on sampling and hashing, calculated dirty page rate is
1859 # only an estimate of its true value. Increasing @sample-pages
1860 # improves estimation quality at the cost of higher computational
1861 # overhead.
1862 #
1863 # 2. Dirty bitmap mode captures writes to memory (for example by
1864 # temporarily revoking write access to all pages) and counting page
1865 # faults. Information about modified pages is collected into a
1866 # bitmap, where each bit corresponds to one guest page. This mode
1867 # requires that KVM accelerator property "dirty-ring-size" is *not*
1868 # set.
1869 #
1870 # 3. Dirty ring mode is similar to dirty bitmap mode, but the
1871 # information about modified pages is collected into ring buffer.
1872 # This mode tracks page modification per each vCPU separately. It
1873 # requires that KVM accelerator property "dirty-ring-size" is set.
1874 #
1875 # @calc-time: time period in units of second for which dirty page rate
1876 # is calculated. Note that larger @calc-time values will
1877 # typically result in smaller dirty page rates because page
1878 # dirtying is a one-time event. Once some page is counted as
1879 # dirty during @calc-time period, further writes to this page will
1880 # not increase dirty page rate anymore.
1881 #
1882 # @sample-pages: number of sampled pages per each GiB of guest memory.
1883 # Default value is 512. For 4KiB guest pages this corresponds to
1884 # sampling ratio of 0.2%. This argument is used only in page
1885 # sampling mode. (Since 6.1)
1886 #
1887 # @mode: mechanism for tracking dirty pages. Default value is
1888 # 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'.
1889 # (Since 6.1)
1890 #
1891 # Since: 5.2
1892 #
1893 # Example:
1894 #
1895 # -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
1896 # 'sample-pages': 512} }
1897 # <- { "return": {} }
1898 ##
1899 { 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
1900 '*sample-pages': 'int',
1901 '*mode': 'DirtyRateMeasureMode'} }
1902
1903 ##
1904 # @query-dirty-rate:
1905 #
1906 # Query results of the most recent invocation of @calc-dirty-rate.
1907 #
1908 # Since: 5.2
1909 #
1910 # Examples:
1911 #
1912 # 1. Measurement is in progress:
1913 #
1914 # <- {"status": "measuring", "sample-pages": 512,
1915 # "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1916 #
1917 # 2. Measurement has been completed:
1918 #
1919 # <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
1920 # "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1921 ##
1922 { 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' }
1923
1924 ##
1925 # @DirtyLimitInfo:
1926 #
1927 # Dirty page rate limit information of a virtual CPU.
1928 #
1929 # @cpu-index: index of a virtual CPU.
1930 #
1931 # @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
1932 # CPU, 0 means unlimited.
1933 #
1934 # @current-rate: current dirty page rate (MB/s) for a virtual CPU.
1935 #
1936 # Since: 7.1
1937 ##
1938 { 'struct': 'DirtyLimitInfo',
1939 'data': { 'cpu-index': 'int',
1940 'limit-rate': 'uint64',
1941 'current-rate': 'uint64' } }
1942
1943 ##
1944 # @set-vcpu-dirty-limit:
1945 #
1946 # Set the upper limit of dirty page rate for virtual CPUs.
1947 #
1948 # Requires KVM with accelerator property "dirty-ring-size" set. A
1949 # virtual CPU's dirty page rate is a measure of its memory load. To
1950 # observe dirty page rates, use @calc-dirty-rate.
1951 #
1952 # @cpu-index: index of a virtual CPU, default is all.
1953 #
1954 # @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
1955 #
1956 # Since: 7.1
1957 #
1958 # Example:
1959 #
1960 # -> {"execute": "set-vcpu-dirty-limit"}
1961 # "arguments": { "dirty-rate": 200,
1962 # "cpu-index": 1 } }
1963 # <- { "return": {} }
1964 ##
1965 { 'command': 'set-vcpu-dirty-limit',
1966 'data': { '*cpu-index': 'int',
1967 'dirty-rate': 'uint64' } }
1968
1969 ##
1970 # @cancel-vcpu-dirty-limit:
1971 #
1972 # Cancel the upper limit of dirty page rate for virtual CPUs.
1973 #
1974 # Cancel the dirty page limit for the vCPU which has been set with
1975 # set-vcpu-dirty-limit command. Note that this command requires
1976 # support from dirty ring, same as the "set-vcpu-dirty-limit".
1977 #
1978 # @cpu-index: index of a virtual CPU, default is all.
1979 #
1980 # Since: 7.1
1981 #
1982 # Example:
1983 #
1984 # -> {"execute": "cancel-vcpu-dirty-limit"},
1985 # "arguments": { "cpu-index": 1 } }
1986 # <- { "return": {} }
1987 ##
1988 { 'command': 'cancel-vcpu-dirty-limit',
1989 'data': { '*cpu-index': 'int'} }
1990
1991 ##
1992 # @query-vcpu-dirty-limit:
1993 #
1994 # Returns information about virtual CPU dirty page rate limits, if
1995 # any.
1996 #
1997 # Since: 7.1
1998 #
1999 # Example:
2000 #
2001 # -> {"execute": "query-vcpu-dirty-limit"}
2002 # <- {"return": [
2003 # { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2004 # { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2005 ##
2006 { 'command': 'query-vcpu-dirty-limit',
2007 'returns': [ 'DirtyLimitInfo' ] }
2008
2009 ##
2010 # @MigrationThreadInfo:
2011 #
2012 # Information about migrationthreads
2013 #
2014 # @name: the name of migration thread
2015 #
2016 # @thread-id: ID of the underlying host thread
2017 #
2018 # Since: 7.2
2019 ##
2020 { 'struct': 'MigrationThreadInfo',
2021 'data': {'name': 'str',
2022 'thread-id': 'int'} }
2023
2024 ##
2025 # @query-migrationthreads:
2026 #
2027 # Returns information of migration threads
2028 #
2029 # data: migration thread name
2030 #
2031 # Returns: information about migration threads
2032 #
2033 # Since: 7.2
2034 ##
2035 { 'command': 'query-migrationthreads',
2036 'returns': ['MigrationThreadInfo'] }
2037
2038 ##
2039 # @snapshot-save:
2040 #
2041 # Save a VM snapshot
2042 #
2043 # @job-id: identifier for the newly created job
2044 #
2045 # @tag: name of the snapshot to create
2046 #
2047 # @vmstate: block device node name to save vmstate to
2048 #
2049 # @devices: list of block device node names to save a snapshot to
2050 #
2051 # Applications should not assume that the snapshot save is complete
2052 # when this command returns. The job commands / events must be used
2053 # to determine completion and to fetch details of any errors that
2054 # arise.
2055 #
2056 # Note that execution of the guest CPUs may be stopped during the time
2057 # it takes to save the snapshot. A future version of QEMU may ensure
2058 # CPUs are executing continuously.
2059 #
2060 # It is strongly recommended that @devices contain all writable block
2061 # device nodes if a consistent snapshot is required.
2062 #
2063 # If @tag already exists, an error will be reported
2064 #
2065 # Returns: nothing
2066 #
2067 # Example:
2068 #
2069 # -> { "execute": "snapshot-save",
2070 # "arguments": {
2071 # "job-id": "snapsave0",
2072 # "tag": "my-snap",
2073 # "vmstate": "disk0",
2074 # "devices": ["disk0", "disk1"]
2075 # }
2076 # }
2077 # <- { "return": { } }
2078 # <- {"event": "JOB_STATUS_CHANGE",
2079 # "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2080 # "data": {"status": "created", "id": "snapsave0"}}
2081 # <- {"event": "JOB_STATUS_CHANGE",
2082 # "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2083 # "data": {"status": "running", "id": "snapsave0"}}
2084 # <- {"event": "STOP",
2085 # "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2086 # <- {"event": "RESUME",
2087 # "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2088 # <- {"event": "JOB_STATUS_CHANGE",
2089 # "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2090 # "data": {"status": "waiting", "id": "snapsave0"}}
2091 # <- {"event": "JOB_STATUS_CHANGE",
2092 # "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2093 # "data": {"status": "pending", "id": "snapsave0"}}
2094 # <- {"event": "JOB_STATUS_CHANGE",
2095 # "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2096 # "data": {"status": "concluded", "id": "snapsave0"}}
2097 # -> {"execute": "query-jobs"}
2098 # <- {"return": [{"current-progress": 1,
2099 # "status": "concluded",
2100 # "total-progress": 1,
2101 # "type": "snapshot-save",
2102 # "id": "snapsave0"}]}
2103 #
2104 # Since: 6.0
2105 ##
2106 { 'command': 'snapshot-save',
2107 'data': { 'job-id': 'str',
2108 'tag': 'str',
2109 'vmstate': 'str',
2110 'devices': ['str'] } }
2111
2112 ##
2113 # @snapshot-load:
2114 #
2115 # Load a VM snapshot
2116 #
2117 # @job-id: identifier for the newly created job
2118 #
2119 # @tag: name of the snapshot to load.
2120 #
2121 # @vmstate: block device node name to load vmstate from
2122 #
2123 # @devices: list of block device node names to load a snapshot from
2124 #
2125 # Applications should not assume that the snapshot load is complete
2126 # when this command returns. The job commands / events must be used
2127 # to determine completion and to fetch details of any errors that
2128 # arise.
2129 #
2130 # Note that execution of the guest CPUs will be stopped during the
2131 # time it takes to load the snapshot.
2132 #
2133 # It is strongly recommended that @devices contain all writable block
2134 # device nodes that can have changed since the original @snapshot-save
2135 # command execution.
2136 #
2137 # Returns: nothing
2138 #
2139 # Example:
2140 #
2141 # -> { "execute": "snapshot-load",
2142 # "arguments": {
2143 # "job-id": "snapload0",
2144 # "tag": "my-snap",
2145 # "vmstate": "disk0",
2146 # "devices": ["disk0", "disk1"]
2147 # }
2148 # }
2149 # <- { "return": { } }
2150 # <- {"event": "JOB_STATUS_CHANGE",
2151 # "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2152 # "data": {"status": "created", "id": "snapload0"}}
2153 # <- {"event": "JOB_STATUS_CHANGE",
2154 # "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2155 # "data": {"status": "running", "id": "snapload0"}}
2156 # <- {"event": "STOP",
2157 # "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2158 # <- {"event": "RESUME",
2159 # "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2160 # <- {"event": "JOB_STATUS_CHANGE",
2161 # "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2162 # "data": {"status": "waiting", "id": "snapload0"}}
2163 # <- {"event": "JOB_STATUS_CHANGE",
2164 # "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2165 # "data": {"status": "pending", "id": "snapload0"}}
2166 # <- {"event": "JOB_STATUS_CHANGE",
2167 # "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2168 # "data": {"status": "concluded", "id": "snapload0"}}
2169 # -> {"execute": "query-jobs"}
2170 # <- {"return": [{"current-progress": 1,
2171 # "status": "concluded",
2172 # "total-progress": 1,
2173 # "type": "snapshot-load",
2174 # "id": "snapload0"}]}
2175 #
2176 # Since: 6.0
2177 ##
2178 { 'command': 'snapshot-load',
2179 'data': { 'job-id': 'str',
2180 'tag': 'str',
2181 'vmstate': 'str',
2182 'devices': ['str'] } }
2183
2184 ##
2185 # @snapshot-delete:
2186 #
2187 # Delete a VM snapshot
2188 #
2189 # @job-id: identifier for the newly created job
2190 #
2191 # @tag: name of the snapshot to delete.
2192 #
2193 # @devices: list of block device node names to delete a snapshot from
2194 #
2195 # Applications should not assume that the snapshot delete is complete
2196 # when this command returns. The job commands / events must be used
2197 # to determine completion and to fetch details of any errors that
2198 # arise.
2199 #
2200 # Returns: nothing
2201 #
2202 # Example:
2203 #
2204 # -> { "execute": "snapshot-delete",
2205 # "arguments": {
2206 # "job-id": "snapdelete0",
2207 # "tag": "my-snap",
2208 # "devices": ["disk0", "disk1"]
2209 # }
2210 # }
2211 # <- { "return": { } }
2212 # <- {"event": "JOB_STATUS_CHANGE",
2213 # "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2214 # "data": {"status": "created", "id": "snapdelete0"}}
2215 # <- {"event": "JOB_STATUS_CHANGE",
2216 # "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2217 # "data": {"status": "running", "id": "snapdelete0"}}
2218 # <- {"event": "JOB_STATUS_CHANGE",
2219 # "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2220 # "data": {"status": "waiting", "id": "snapdelete0"}}
2221 # <- {"event": "JOB_STATUS_CHANGE",
2222 # "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2223 # "data": {"status": "pending", "id": "snapdelete0"}}
2224 # <- {"event": "JOB_STATUS_CHANGE",
2225 # "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2226 # "data": {"status": "concluded", "id": "snapdelete0"}}
2227 # -> {"execute": "query-jobs"}
2228 # <- {"return": [{"current-progress": 1,
2229 # "status": "concluded",
2230 # "total-progress": 1,
2231 # "type": "snapshot-delete",
2232 # "id": "snapdelete0"}]}
2233 #
2234 # Since: 6.0
2235 ##
2236 { 'command': 'snapshot-delete',
2237 'data': { 'job-id': 'str',
2238 'tag': 'str',
2239 'devices': ['str'] } }