]> git.proxmox.com Git - mirror_qemu.git/blob - qapi/migration.json
net: add initial support for AF_XDP network backend
[mirror_qemu.git] / qapi / migration.json
1 # -*- Mode: Python -*-
2 # vim: filetype=python
3 #
4
5 ##
6 # = Migration
7 ##
8
9 { 'include': 'common.json' }
10 { 'include': 'sockets.json' }
11
12 ##
13 # @MigrationStats:
14 #
15 # Detailed migration status.
16 #
17 # @transferred: amount of bytes already transferred to the target VM
18 #
19 # @remaining: amount of bytes remaining to be transferred to the
20 # target VM
21 #
22 # @total: total amount of bytes involved in the migration process
23 #
24 # @duplicate: number of duplicate (zero) pages (since 1.2)
25 #
26 # @skipped: number of skipped zero pages. Always zero, only provided for
27 # compatibility (since 1.5)
28 #
29 # @normal: number of normal pages (since 1.2)
30 #
31 # @normal-bytes: number of normal bytes sent (since 1.2)
32 #
33 # @dirty-pages-rate: number of pages dirtied by second by the guest
34 # (since 1.3)
35 #
36 # @mbps: throughput in megabits/sec. (since 1.6)
37 #
38 # @dirty-sync-count: number of times that dirty ram was synchronized
39 # (since 2.1)
40 #
41 # @postcopy-requests: The number of page requests received from the
42 # destination (since 2.7)
43 #
44 # @page-size: The number of bytes per page for the various page-based
45 # statistics (since 2.10)
46 #
47 # @multifd-bytes: The number of bytes sent through multifd (since 3.0)
48 #
49 # @pages-per-second: the number of memory pages transferred per second
50 # (Since 4.0)
51 #
52 # @precopy-bytes: The number of bytes sent in the pre-copy phase
53 # (since 7.0).
54 #
55 # @downtime-bytes: The number of bytes sent while the guest is paused
56 # (since 7.0).
57 #
58 # @postcopy-bytes: The number of bytes sent during the post-copy phase
59 # (since 7.0).
60 #
61 # @dirty-sync-missed-zero-copy: Number of times dirty RAM
62 # synchronization could not avoid copying dirty pages. This is
63 # between 0 and @dirty-sync-count * @multifd-channels. (since
64 # 7.1)
65 #
66 # Features:
67 #
68 # @deprecated: Member @skipped is always zero since 1.5.3
69 #
70 # Since: 0.14
71 #
72 ##
73 { 'struct': 'MigrationStats',
74 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
75 'duplicate': 'int',
76 'skipped': { 'type': 'int', 'features': ['deprecated'] },
77 'normal': 'int',
78 'normal-bytes': 'int', 'dirty-pages-rate': 'int',
79 'mbps': 'number', 'dirty-sync-count': 'int',
80 'postcopy-requests': 'int', 'page-size': 'int',
81 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
82 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
83 'postcopy-bytes': 'uint64',
84 'dirty-sync-missed-zero-copy': 'uint64' } }
85
86 ##
87 # @XBZRLECacheStats:
88 #
89 # Detailed XBZRLE migration cache statistics
90 #
91 # @cache-size: XBZRLE cache size
92 #
93 # @bytes: amount of bytes already transferred to the target VM
94 #
95 # @pages: amount of pages transferred to the target VM
96 #
97 # @cache-miss: number of cache miss
98 #
99 # @cache-miss-rate: rate of cache miss (since 2.1)
100 #
101 # @encoding-rate: rate of encoded bytes (since 5.1)
102 #
103 # @overflow: number of overflows
104 #
105 # Since: 1.2
106 ##
107 { 'struct': 'XBZRLECacheStats',
108 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
109 'cache-miss': 'int', 'cache-miss-rate': 'number',
110 'encoding-rate': 'number', 'overflow': 'int' } }
111
112 ##
113 # @CompressionStats:
114 #
115 # Detailed migration compression statistics
116 #
117 # @pages: amount of pages compressed and transferred to the target VM
118 #
119 # @busy: count of times that no free thread was available to compress
120 # data
121 #
122 # @busy-rate: rate of thread busy
123 #
124 # @compressed-size: amount of bytes after compression
125 #
126 # @compression-rate: rate of compressed size
127 #
128 # Since: 3.1
129 ##
130 { 'struct': 'CompressionStats',
131 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
132 'compressed-size': 'int', 'compression-rate': 'number' } }
133
134 ##
135 # @MigrationStatus:
136 #
137 # An enumeration of migration status.
138 #
139 # @none: no migration has ever happened.
140 #
141 # @setup: migration process has been initiated.
142 #
143 # @cancelling: in the process of cancelling migration.
144 #
145 # @cancelled: cancelling migration is finished.
146 #
147 # @active: in the process of doing migration.
148 #
149 # @postcopy-active: like active, but now in postcopy mode. (since
150 # 2.5)
151 #
152 # @postcopy-paused: during postcopy but paused. (since 3.0)
153 #
154 # @postcopy-recover: trying to recover from a paused postcopy. (since
155 # 3.0)
156 #
157 # @completed: migration is finished.
158 #
159 # @failed: some error occurred during migration process.
160 #
161 # @colo: VM is in the process of fault tolerance, VM can not get into
162 # this state unless colo capability is enabled for migration.
163 # (since 2.8)
164 #
165 # @pre-switchover: Paused before device serialisation. (since 2.11)
166 #
167 # @device: During device serialisation when pause-before-switchover is
168 # enabled (since 2.11)
169 #
170 # @wait-unplug: wait for device unplug request by guest OS to be
171 # completed. (since 4.2)
172 #
173 # Since: 2.3
174 ##
175 { 'enum': 'MigrationStatus',
176 'data': [ 'none', 'setup', 'cancelling', 'cancelled',
177 'active', 'postcopy-active', 'postcopy-paused',
178 'postcopy-recover', 'completed', 'failed', 'colo',
179 'pre-switchover', 'device', 'wait-unplug' ] }
180 ##
181 # @VfioStats:
182 #
183 # Detailed VFIO devices migration statistics
184 #
185 # @transferred: amount of bytes transferred to the target VM by VFIO
186 # devices
187 #
188 # Since: 5.2
189 ##
190 { 'struct': 'VfioStats',
191 'data': {'transferred': 'int' } }
192
193 ##
194 # @MigrationInfo:
195 #
196 # Information about current migration process.
197 #
198 # @status: @MigrationStatus describing the current migration status.
199 # If this field is not returned, no migration process has been
200 # initiated
201 #
202 # @ram: @MigrationStats containing detailed migration status, only
203 # returned if status is 'active' or 'completed'(since 1.2)
204 #
205 # @disk: @MigrationStats containing detailed disk migration status,
206 # only returned if status is 'active' and it is a block migration
207 #
208 # @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
209 # migration statistics, only returned if XBZRLE feature is on and
210 # status is 'active' or 'completed' (since 1.2)
211 #
212 # @total-time: total amount of milliseconds since migration started.
213 # If migration has ended, it returns the total migration time.
214 # (since 1.2)
215 #
216 # @downtime: only present when migration finishes correctly total
217 # downtime in milliseconds for the guest. (since 1.3)
218 #
219 # @expected-downtime: only present while migration is active expected
220 # downtime in milliseconds for the guest in last walk of the dirty
221 # bitmap. (since 1.3)
222 #
223 # @setup-time: amount of setup time in milliseconds *before* the
224 # iterations begin but *after* the QMP command is issued. This is
225 # designed to provide an accounting of any activities (such as
226 # RDMA pinning) which may be expensive, but do not actually occur
227 # during the iterative migration rounds themselves. (since 1.6)
228 #
229 # @cpu-throttle-percentage: percentage of time guest cpus are being
230 # throttled during auto-converge. This is only present when
231 # auto-converge has started throttling guest cpus. (Since 2.7)
232 #
233 # @error-desc: the human readable error description string, when
234 # @status is 'failed'. Clients should not attempt to parse the
235 # error strings. (Since 2.7)
236 #
237 # @postcopy-blocktime: total time when all vCPU were blocked during
238 # postcopy live migration. This is only present when the
239 # postcopy-blocktime migration capability is enabled. (Since 3.0)
240 #
241 # @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
242 # This is only present when the postcopy-blocktime migration
243 # capability is enabled. (Since 3.0)
244 #
245 # @compression: migration compression statistics, only returned if
246 # compression feature is on and status is 'active' or 'completed'
247 # (Since 3.1)
248 #
249 # @socket-address: Only used for tcp, to know what the real port is
250 # (Since 4.0)
251 #
252 # @vfio: @VfioStats containing detailed VFIO devices migration
253 # statistics, only returned if VFIO device is present, migration
254 # is supported by all VFIO devices and status is 'active' or
255 # 'completed' (since 5.2)
256 #
257 # @blocked-reasons: A list of reasons an outgoing migration is
258 # blocked. Present and non-empty when migration is blocked.
259 # (since 6.0)
260 #
261 # @dirty-limit-throttle-time-per-round: Maximum throttle time
262 # (in microseconds) of virtual CPUs each dirty ring full round,
263 # which shows how MigrationCapability dirty-limit affects the
264 # guest during live migration. (Since 8.1)
265 #
266 # @dirty-limit-ring-full-time: Estimated average dirty ring full time
267 # (in microseconds) for each dirty ring full round. The value
268 # equals the dirty ring memory size divided by the average dirty
269 # page rate of the virtual CPU, which can be used to observe the
270 # average memory load of the virtual CPU indirectly. Note that
271 # zero means guest doesn't dirty memory. (Since 8.1)
272 #
273 # Since: 0.14
274 ##
275 { 'struct': 'MigrationInfo',
276 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
277 '*disk': 'MigrationStats',
278 '*vfio': 'VfioStats',
279 '*xbzrle-cache': 'XBZRLECacheStats',
280 '*total-time': 'int',
281 '*expected-downtime': 'int',
282 '*downtime': 'int',
283 '*setup-time': 'int',
284 '*cpu-throttle-percentage': 'int',
285 '*error-desc': 'str',
286 '*blocked-reasons': ['str'],
287 '*postcopy-blocktime': 'uint32',
288 '*postcopy-vcpu-blocktime': ['uint32'],
289 '*compression': 'CompressionStats',
290 '*socket-address': ['SocketAddress'],
291 '*dirty-limit-throttle-time-per-round': 'uint64',
292 '*dirty-limit-ring-full-time': 'uint64'} }
293
294 ##
295 # @query-migrate:
296 #
297 # Returns information about current migration process. If migration
298 # is active there will be another json-object with RAM migration
299 # status and if block migration is active another one with block
300 # migration status.
301 #
302 # Returns: @MigrationInfo
303 #
304 # Since: 0.14
305 #
306 # Examples:
307 #
308 # 1. Before the first migration
309 #
310 # -> { "execute": "query-migrate" }
311 # <- { "return": {} }
312 #
313 # 2. Migration is done and has succeeded
314 #
315 # -> { "execute": "query-migrate" }
316 # <- { "return": {
317 # "status": "completed",
318 # "total-time":12345,
319 # "setup-time":12345,
320 # "downtime":12345,
321 # "ram":{
322 # "transferred":123,
323 # "remaining":123,
324 # "total":246,
325 # "duplicate":123,
326 # "normal":123,
327 # "normal-bytes":123456,
328 # "dirty-sync-count":15
329 # }
330 # }
331 # }
332 #
333 # 3. Migration is done and has failed
334 #
335 # -> { "execute": "query-migrate" }
336 # <- { "return": { "status": "failed" } }
337 #
338 # 4. Migration is being performed and is not a block migration:
339 #
340 # -> { "execute": "query-migrate" }
341 # <- {
342 # "return":{
343 # "status":"active",
344 # "total-time":12345,
345 # "setup-time":12345,
346 # "expected-downtime":12345,
347 # "ram":{
348 # "transferred":123,
349 # "remaining":123,
350 # "total":246,
351 # "duplicate":123,
352 # "normal":123,
353 # "normal-bytes":123456,
354 # "dirty-sync-count":15
355 # }
356 # }
357 # }
358 #
359 # 5. Migration is being performed and is a block migration:
360 #
361 # -> { "execute": "query-migrate" }
362 # <- {
363 # "return":{
364 # "status":"active",
365 # "total-time":12345,
366 # "setup-time":12345,
367 # "expected-downtime":12345,
368 # "ram":{
369 # "total":1057024,
370 # "remaining":1053304,
371 # "transferred":3720,
372 # "duplicate":123,
373 # "normal":123,
374 # "normal-bytes":123456,
375 # "dirty-sync-count":15
376 # },
377 # "disk":{
378 # "total":20971520,
379 # "remaining":20880384,
380 # "transferred":91136
381 # }
382 # }
383 # }
384 #
385 # 6. Migration is being performed and XBZRLE is active:
386 #
387 # -> { "execute": "query-migrate" }
388 # <- {
389 # "return":{
390 # "status":"active",
391 # "total-time":12345,
392 # "setup-time":12345,
393 # "expected-downtime":12345,
394 # "ram":{
395 # "total":1057024,
396 # "remaining":1053304,
397 # "transferred":3720,
398 # "duplicate":10,
399 # "normal":3333,
400 # "normal-bytes":3412992,
401 # "dirty-sync-count":15
402 # },
403 # "xbzrle-cache":{
404 # "cache-size":67108864,
405 # "bytes":20971520,
406 # "pages":2444343,
407 # "cache-miss":2244,
408 # "cache-miss-rate":0.123,
409 # "encoding-rate":80.1,
410 # "overflow":34434
411 # }
412 # }
413 # }
414 ##
415 { 'command': 'query-migrate', 'returns': 'MigrationInfo' }
416
417 ##
418 # @MigrationCapability:
419 #
420 # Migration capabilities enumeration
421 #
422 # @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
423 # Encoding). This feature allows us to minimize migration traffic
424 # for certain work loads, by sending compressed difference of the
425 # pages
426 #
427 # @rdma-pin-all: Controls whether or not the entire VM memory
428 # footprint is mlock()'d on demand or all at once. Refer to
429 # docs/rdma.txt for usage. Disabled by default. (since 2.0)
430 #
431 # @zero-blocks: During storage migration encode blocks of zeroes
432 # efficiently. This essentially saves 1MB of zeroes per block on
433 # the wire. Enabling requires source and target VM to support
434 # this feature. To enable it is sufficient to enable the
435 # capability on the source VM. The feature is disabled by default.
436 # (since 1.6)
437 #
438 # @compress: Use multiple compression threads to accelerate live
439 # migration. This feature can help to reduce the migration
440 # traffic, by sending compressed pages. Please note that if
441 # compress and xbzrle are both on, compress only takes effect in
442 # the ram bulk stage, after that, it will be disabled and only
443 # xbzrle takes effect, this can help to minimize migration
444 # traffic. The feature is disabled by default. (since 2.4 )
445 #
446 # @events: generate events for each migration state change (since 2.4
447 # )
448 #
449 # @auto-converge: If enabled, QEMU will automatically throttle down
450 # the guest to speed up convergence of RAM migration. (since 1.6)
451 #
452 # @postcopy-ram: Start executing on the migration target before all of
453 # RAM has been migrated, pulling the remaining pages along as
454 # needed. The capacity must have the same setting on both source
455 # and target or migration will not even start. NOTE: If the
456 # migration fails during postcopy the VM will fail. (since 2.6)
457 #
458 # @x-colo: If enabled, migration will never end, and the state of the
459 # VM on the primary side will be migrated continuously to the VM
460 # on secondary side, this process is called COarse-Grain LOck
461 # Stepping (COLO) for Non-stop Service. (since 2.8)
462 #
463 # @release-ram: if enabled, qemu will free the migrated ram pages on
464 # the source during postcopy-ram migration. (since 2.9)
465 #
466 # @block: If enabled, QEMU will also migrate the contents of all block
467 # devices. Default is disabled. A possible alternative uses
468 # mirror jobs to a builtin NBD server on the destination, which
469 # offers more flexibility. (Since 2.10)
470 #
471 # @return-path: If enabled, migration will use the return path even
472 # for precopy. (since 2.10)
473 #
474 # @pause-before-switchover: Pause outgoing migration before
475 # serialising device state and before disabling block IO (since
476 # 2.11)
477 #
478 # @multifd: Use more than one fd for migration (since 4.0)
479 #
480 # @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
481 # (since 2.12)
482 #
483 # @postcopy-blocktime: Calculate downtime for postcopy live migration
484 # (since 3.0)
485 #
486 # @late-block-activate: If enabled, the destination will not activate
487 # block devices (and thus take locks) immediately at the end of
488 # migration. (since 3.0)
489 #
490 # @x-ignore-shared: If enabled, QEMU will not migrate shared memory
491 # that is accessible on the destination machine. (since 4.0)
492 #
493 # @validate-uuid: Send the UUID of the source to allow the destination
494 # to ensure it is the same. (since 4.2)
495 #
496 # @background-snapshot: If enabled, the migration stream will be a
497 # snapshot of the VM exactly at the point when the migration
498 # procedure starts. The VM RAM is saved with running VM. (since
499 # 6.0)
500 #
501 # @zero-copy-send: Controls behavior on sending memory pages on
502 # migration. When true, enables a zero-copy mechanism for sending
503 # memory pages, if host supports it. Requires that QEMU be
504 # permitted to use locked memory for guest RAM pages. (since 7.1)
505 #
506 # @postcopy-preempt: If enabled, the migration process will allow
507 # postcopy requests to preempt precopy stream, so postcopy
508 # requests will be handled faster. This is a performance feature
509 # and should not affect the correctness of postcopy migration.
510 # (since 7.1)
511 #
512 # @switchover-ack: If enabled, migration will not stop the source VM
513 # and complete the migration until an ACK is received from the
514 # destination that it's OK to do so. Exactly when this ACK is
515 # sent depends on the migrated devices that use this feature. For
516 # example, a device can use it to make sure some of its data is
517 # sent and loaded in the destination before doing switchover.
518 # This can reduce downtime if devices that support this capability
519 # are present. 'return-path' capability must be enabled to use
520 # it. (since 8.1)
521 #
522 # @dirty-limit: If enabled, migration will throttle vCPUs as needed to
523 # keep their dirty page rate within @vcpu-dirty-limit. This can
524 # improve responsiveness of large guests during live migration,
525 # and can result in more stable read performance. Requires KVM
526 # with accelerator property "dirty-ring-size" set. (Since 8.1)
527 #
528 # Features:
529 #
530 # @unstable: Members @x-colo and @x-ignore-shared are experimental.
531 #
532 # Since: 1.2
533 ##
534 { 'enum': 'MigrationCapability',
535 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
536 'compress', 'events', 'postcopy-ram',
537 { 'name': 'x-colo', 'features': [ 'unstable' ] },
538 'release-ram',
539 'block', 'return-path', 'pause-before-switchover', 'multifd',
540 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
541 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
542 'validate-uuid', 'background-snapshot',
543 'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
544 'dirty-limit'] }
545
546 ##
547 # @MigrationCapabilityStatus:
548 #
549 # Migration capability information
550 #
551 # @capability: capability enum
552 #
553 # @state: capability state bool
554 #
555 # Since: 1.2
556 ##
557 { 'struct': 'MigrationCapabilityStatus',
558 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
559
560 ##
561 # @migrate-set-capabilities:
562 #
563 # Enable/Disable the following migration capabilities (like xbzrle)
564 #
565 # @capabilities: json array of capability modifications to make
566 #
567 # Since: 1.2
568 #
569 # Example:
570 #
571 # -> { "execute": "migrate-set-capabilities" , "arguments":
572 # { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
573 # <- { "return": {} }
574 ##
575 { 'command': 'migrate-set-capabilities',
576 'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
577
578 ##
579 # @query-migrate-capabilities:
580 #
581 # Returns information about the current migration capabilities status
582 #
583 # Returns: @MigrationCapabilityStatus
584 #
585 # Since: 1.2
586 #
587 # Example:
588 #
589 # -> { "execute": "query-migrate-capabilities" }
590 # <- { "return": [
591 # {"state": false, "capability": "xbzrle"},
592 # {"state": false, "capability": "rdma-pin-all"},
593 # {"state": false, "capability": "auto-converge"},
594 # {"state": false, "capability": "zero-blocks"},
595 # {"state": false, "capability": "compress"},
596 # {"state": true, "capability": "events"},
597 # {"state": false, "capability": "postcopy-ram"},
598 # {"state": false, "capability": "x-colo"}
599 # ]}
600 ##
601 { 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']}
602
603 ##
604 # @MultiFDCompression:
605 #
606 # An enumeration of multifd compression methods.
607 #
608 # @none: no compression.
609 #
610 # @zlib: use zlib compression method.
611 #
612 # @zstd: use zstd compression method.
613 #
614 # Since: 5.0
615 ##
616 { 'enum': 'MultiFDCompression',
617 'data': [ 'none', 'zlib',
618 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
619
620 ##
621 # @BitmapMigrationBitmapAliasTransform:
622 #
623 # @persistent: If present, the bitmap will be made persistent or
624 # transient depending on this parameter.
625 #
626 # Since: 6.0
627 ##
628 { 'struct': 'BitmapMigrationBitmapAliasTransform',
629 'data': {
630 '*persistent': 'bool'
631 } }
632
633 ##
634 # @BitmapMigrationBitmapAlias:
635 #
636 # @name: The name of the bitmap.
637 #
638 # @alias: An alias name for migration (for example the bitmap name on
639 # the opposite site).
640 #
641 # @transform: Allows the modification of the migrated bitmap. (since
642 # 6.0)
643 #
644 # Since: 5.2
645 ##
646 { 'struct': 'BitmapMigrationBitmapAlias',
647 'data': {
648 'name': 'str',
649 'alias': 'str',
650 '*transform': 'BitmapMigrationBitmapAliasTransform'
651 } }
652
653 ##
654 # @BitmapMigrationNodeAlias:
655 #
656 # Maps a block node name and the bitmaps it has to aliases for dirty
657 # bitmap migration.
658 #
659 # @node-name: A block node name.
660 #
661 # @alias: An alias block node name for migration (for example the node
662 # name on the opposite site).
663 #
664 # @bitmaps: Mappings for the bitmaps on this node.
665 #
666 # Since: 5.2
667 ##
668 { 'struct': 'BitmapMigrationNodeAlias',
669 'data': {
670 'node-name': 'str',
671 'alias': 'str',
672 'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
673 } }
674
675 ##
676 # @MigrationParameter:
677 #
678 # Migration parameters enumeration
679 #
680 # @announce-initial: Initial delay (in milliseconds) before sending
681 # the first announce (Since 4.0)
682 #
683 # @announce-max: Maximum delay (in milliseconds) between packets in
684 # the announcement (Since 4.0)
685 #
686 # @announce-rounds: Number of self-announce packets sent after
687 # migration (Since 4.0)
688 #
689 # @announce-step: Increase in delay (in milliseconds) between
690 # subsequent packets in the announcement (Since 4.0)
691 #
692 # @compress-level: Set the compression level to be used in live
693 # migration, the compression level is an integer between 0 and 9,
694 # where 0 means no compression, 1 means the best compression
695 # speed, and 9 means best compression ratio which will consume
696 # more CPU.
697 #
698 # @compress-threads: Set compression thread count to be used in live
699 # migration, the compression thread count is an integer between 1
700 # and 255.
701 #
702 # @compress-wait-thread: Controls behavior when all compression
703 # threads are currently busy. If true (default), wait for a free
704 # compression thread to become available; otherwise, send the page
705 # uncompressed. (Since 3.1)
706 #
707 # @decompress-threads: Set decompression thread count to be used in
708 # live migration, the decompression thread count is an integer
709 # between 1 and 255. Usually, decompression is at least 4 times as
710 # fast as compression, so set the decompress-threads to the number
711 # about 1/4 of compress-threads is adequate.
712 #
713 # @throttle-trigger-threshold: The ratio of bytes_dirty_period and
714 # bytes_xfer_period to trigger throttling. It is expressed as
715 # percentage. The default value is 50. (Since 5.0)
716 #
717 # @cpu-throttle-initial: Initial percentage of time guest cpus are
718 # throttled when migration auto-converge is activated. The
719 # default value is 20. (Since 2.7)
720 #
721 # @cpu-throttle-increment: throttle percentage increase each time
722 # auto-converge detects that migration is not making progress.
723 # The default value is 10. (Since 2.7)
724 #
725 # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
726 # the tail stage of throttling, the Guest is very sensitive to CPU
727 # percentage while the @cpu-throttle -increment is excessive
728 # usually at tail stage. If this parameter is true, we will
729 # compute the ideal CPU percentage used by the Guest, which may
730 # exactly make the dirty rate match the dirty rate threshold.
731 # Then we will choose a smaller throttle increment between the one
732 # specified by @cpu-throttle-increment and the one generated by
733 # ideal CPU percentage. Therefore, it is compatible to
734 # traditional throttling, meanwhile the throttle increment won't
735 # be excessive at tail stage. The default value is false. (Since
736 # 5.1)
737 #
738 # @tls-creds: ID of the 'tls-creds' object that provides credentials
739 # for establishing a TLS connection over the migration data
740 # channel. On the outgoing side of the migration, the credentials
741 # must be for a 'client' endpoint, while for the incoming side the
742 # credentials must be for a 'server' endpoint. Setting this will
743 # enable TLS for all migrations. The default is unset, resulting
744 # in unsecured migration at the QEMU level. (Since 2.7)
745 #
746 # @tls-hostname: hostname of the target host for the migration. This
747 # is required when using x509 based TLS credentials and the
748 # migration URI does not already include a hostname. For example
749 # if using fd: or exec: based migration, the hostname must be
750 # provided so that the server's x509 certificate identity can be
751 # validated. (Since 2.7)
752 #
753 # @tls-authz: ID of the 'authz' object subclass that provides access
754 # control checking of the TLS x509 certificate distinguished name.
755 # This object is only resolved at time of use, so can be deleted
756 # and recreated on the fly while the migration server is active.
757 # If missing, it will default to denying access (Since 4.0)
758 #
759 # @max-bandwidth: to set maximum speed for migration. maximum speed
760 # in bytes per second. (Since 2.8)
761 #
762 # @downtime-limit: set maximum tolerated downtime for migration.
763 # maximum downtime in milliseconds (Since 2.8)
764 #
765 # @x-checkpoint-delay: The delay time (in ms) between two COLO
766 # checkpoints in periodic mode. (Since 2.8)
767 #
768 # @block-incremental: Affects how much storage is migrated when the
769 # block migration capability is enabled. When false, the entire
770 # storage backing chain is migrated into a flattened image at the
771 # destination; when true, only the active qcow2 layer is migrated
772 # and the destination must already have access to the same backing
773 # chain as was used on the source. (since 2.10)
774 #
775 # @multifd-channels: Number of channels used to migrate data in
776 # parallel. This is the same number that the number of sockets
777 # used for migration. The default value is 2 (since 4.0)
778 #
779 # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
780 # needs to be a multiple of the target page size and a power of 2
781 # (Since 2.11)
782 #
783 # @max-postcopy-bandwidth: Background transfer bandwidth during
784 # postcopy. Defaults to 0 (unlimited). In bytes per second.
785 # (Since 3.0)
786 #
787 # @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
788 # (Since 3.1)
789 #
790 # @multifd-compression: Which compression method to use. Defaults to
791 # none. (Since 5.0)
792 #
793 # @multifd-zlib-level: Set the compression level to be used in live
794 # migration, the compression level is an integer between 0 and 9,
795 # where 0 means no compression, 1 means the best compression
796 # speed, and 9 means best compression ratio which will consume
797 # more CPU. Defaults to 1. (Since 5.0)
798 #
799 # @multifd-zstd-level: Set the compression level to be used in live
800 # migration, the compression level is an integer between 0 and 20,
801 # where 0 means no compression, 1 means the best compression
802 # speed, and 20 means best compression ratio which will consume
803 # more CPU. Defaults to 1. (Since 5.0)
804 #
805 # @block-bitmap-mapping: Maps block nodes and bitmaps on them to
806 # aliases for the purpose of dirty bitmap migration. Such aliases
807 # may for example be the corresponding names on the opposite site.
808 # The mapping must be one-to-one, but not necessarily complete: On
809 # the source, unmapped bitmaps and all bitmaps on unmapped nodes
810 # will be ignored. On the destination, encountering an unmapped
811 # alias in the incoming migration stream will result in a report,
812 # and all further bitmap migration data will then be discarded.
813 # Note that the destination does not know about bitmaps it does
814 # not receive, so there is no limitation or requirement regarding
815 # the number of bitmaps received, or how they are named, or on
816 # which nodes they are placed. By default (when this parameter
817 # has never been set), bitmap names are mapped to themselves.
818 # Nodes are mapped to their block device name if there is one, and
819 # to their node name otherwise. (Since 5.2)
820 #
821 # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
822 # limit during live migration. Should be in the range 1 to 1000ms.
823 # Defaults to 1000ms. (Since 8.1)
824 #
825 # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
826 # Defaults to 1. (Since 8.1)
827 #
828 # Features:
829 #
830 # @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
831 # are experimental.
832 #
833 # Since: 2.4
834 ##
835 { 'enum': 'MigrationParameter',
836 'data': ['announce-initial', 'announce-max',
837 'announce-rounds', 'announce-step',
838 'compress-level', 'compress-threads', 'decompress-threads',
839 'compress-wait-thread', 'throttle-trigger-threshold',
840 'cpu-throttle-initial', 'cpu-throttle-increment',
841 'cpu-throttle-tailslow',
842 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
843 'downtime-limit',
844 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
845 'block-incremental',
846 'multifd-channels',
847 'xbzrle-cache-size', 'max-postcopy-bandwidth',
848 'max-cpu-throttle', 'multifd-compression',
849 'multifd-zlib-level', 'multifd-zstd-level',
850 'block-bitmap-mapping',
851 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
852 'vcpu-dirty-limit'] }
853
854 ##
855 # @MigrateSetParameters:
856 #
857 # @announce-initial: Initial delay (in milliseconds) before sending
858 # the first announce (Since 4.0)
859 #
860 # @announce-max: Maximum delay (in milliseconds) between packets in
861 # the announcement (Since 4.0)
862 #
863 # @announce-rounds: Number of self-announce packets sent after
864 # migration (Since 4.0)
865 #
866 # @announce-step: Increase in delay (in milliseconds) between
867 # subsequent packets in the announcement (Since 4.0)
868 #
869 # @compress-level: compression level
870 #
871 # @compress-threads: compression thread count
872 #
873 # @compress-wait-thread: Controls behavior when all compression
874 # threads are currently busy. If true (default), wait for a free
875 # compression thread to become available; otherwise, send the page
876 # uncompressed. (Since 3.1)
877 #
878 # @decompress-threads: decompression thread count
879 #
880 # @throttle-trigger-threshold: The ratio of bytes_dirty_period and
881 # bytes_xfer_period to trigger throttling. It is expressed as
882 # percentage. The default value is 50. (Since 5.0)
883 #
884 # @cpu-throttle-initial: Initial percentage of time guest cpus are
885 # throttled when migration auto-converge is activated. The
886 # default value is 20. (Since 2.7)
887 #
888 # @cpu-throttle-increment: throttle percentage increase each time
889 # auto-converge detects that migration is not making progress.
890 # The default value is 10. (Since 2.7)
891 #
892 # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
893 # the tail stage of throttling, the Guest is very sensitive to CPU
894 # percentage while the @cpu-throttle -increment is excessive
895 # usually at tail stage. If this parameter is true, we will
896 # compute the ideal CPU percentage used by the Guest, which may
897 # exactly make the dirty rate match the dirty rate threshold.
898 # Then we will choose a smaller throttle increment between the one
899 # specified by @cpu-throttle-increment and the one generated by
900 # ideal CPU percentage. Therefore, it is compatible to
901 # traditional throttling, meanwhile the throttle increment won't
902 # be excessive at tail stage. The default value is false. (Since
903 # 5.1)
904 #
905 # @tls-creds: ID of the 'tls-creds' object that provides credentials
906 # for establishing a TLS connection over the migration data
907 # channel. On the outgoing side of the migration, the credentials
908 # must be for a 'client' endpoint, while for the incoming side the
909 # credentials must be for a 'server' endpoint. Setting this to a
910 # non-empty string enables TLS for all migrations. An empty
911 # string means that QEMU will use plain text mode for migration,
912 # rather than TLS (Since 2.9) Previously (since 2.7), this was
913 # reported by omitting tls-creds instead.
914 #
915 # @tls-hostname: hostname of the target host for the migration. This
916 # is required when using x509 based TLS credentials and the
917 # migration URI does not already include a hostname. For example
918 # if using fd: or exec: based migration, the hostname must be
919 # provided so that the server's x509 certificate identity can be
920 # validated. (Since 2.7) An empty string means that QEMU will use
921 # the hostname associated with the migration URI, if any. (Since
922 # 2.9) Previously (since 2.7), this was reported by omitting
923 # tls-hostname instead.
924 #
925 # @max-bandwidth: to set maximum speed for migration. maximum speed
926 # in bytes per second. (Since 2.8)
927 #
928 # @downtime-limit: set maximum tolerated downtime for migration.
929 # maximum downtime in milliseconds (Since 2.8)
930 #
931 # @x-checkpoint-delay: the delay time between two COLO checkpoints.
932 # (Since 2.8)
933 #
934 # @block-incremental: Affects how much storage is migrated when the
935 # block migration capability is enabled. When false, the entire
936 # storage backing chain is migrated into a flattened image at the
937 # destination; when true, only the active qcow2 layer is migrated
938 # and the destination must already have access to the same backing
939 # chain as was used on the source. (since 2.10)
940 #
941 # @multifd-channels: Number of channels used to migrate data in
942 # parallel. This is the same number that the number of sockets
943 # used for migration. The default value is 2 (since 4.0)
944 #
945 # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
946 # needs to be a multiple of the target page size and a power of 2
947 # (Since 2.11)
948 #
949 # @max-postcopy-bandwidth: Background transfer bandwidth during
950 # postcopy. Defaults to 0 (unlimited). In bytes per second.
951 # (Since 3.0)
952 #
953 # @max-cpu-throttle: maximum cpu throttle percentage. The default
954 # value is 99. (Since 3.1)
955 #
956 # @multifd-compression: Which compression method to use. Defaults to
957 # none. (Since 5.0)
958 #
959 # @multifd-zlib-level: Set the compression level to be used in live
960 # migration, the compression level is an integer between 0 and 9,
961 # where 0 means no compression, 1 means the best compression
962 # speed, and 9 means best compression ratio which will consume
963 # more CPU. Defaults to 1. (Since 5.0)
964 #
965 # @multifd-zstd-level: Set the compression level to be used in live
966 # migration, the compression level is an integer between 0 and 20,
967 # where 0 means no compression, 1 means the best compression
968 # speed, and 20 means best compression ratio which will consume
969 # more CPU. Defaults to 1. (Since 5.0)
970 #
971 # @block-bitmap-mapping: Maps block nodes and bitmaps on them to
972 # aliases for the purpose of dirty bitmap migration. Such aliases
973 # may for example be the corresponding names on the opposite site.
974 # The mapping must be one-to-one, but not necessarily complete: On
975 # the source, unmapped bitmaps and all bitmaps on unmapped nodes
976 # will be ignored. On the destination, encountering an unmapped
977 # alias in the incoming migration stream will result in a report,
978 # and all further bitmap migration data will then be discarded.
979 # Note that the destination does not know about bitmaps it does
980 # not receive, so there is no limitation or requirement regarding
981 # the number of bitmaps received, or how they are named, or on
982 # which nodes they are placed. By default (when this parameter
983 # has never been set), bitmap names are mapped to themselves.
984 # Nodes are mapped to their block device name if there is one, and
985 # to their node name otherwise. (Since 5.2)
986 #
987 # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
988 # limit during live migration. Should be in the range 1 to 1000ms.
989 # Defaults to 1000ms. (Since 8.1)
990 #
991 # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
992 # Defaults to 1. (Since 8.1)
993 #
994 # Features:
995 #
996 # @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
997 # are experimental.
998 #
999 # TODO: either fuse back into MigrationParameters, or make
1000 # MigrationParameters members mandatory
1001 #
1002 # Since: 2.4
1003 ##
1004 { 'struct': 'MigrateSetParameters',
1005 'data': { '*announce-initial': 'size',
1006 '*announce-max': 'size',
1007 '*announce-rounds': 'size',
1008 '*announce-step': 'size',
1009 '*compress-level': 'uint8',
1010 '*compress-threads': 'uint8',
1011 '*compress-wait-thread': 'bool',
1012 '*decompress-threads': 'uint8',
1013 '*throttle-trigger-threshold': 'uint8',
1014 '*cpu-throttle-initial': 'uint8',
1015 '*cpu-throttle-increment': 'uint8',
1016 '*cpu-throttle-tailslow': 'bool',
1017 '*tls-creds': 'StrOrNull',
1018 '*tls-hostname': 'StrOrNull',
1019 '*tls-authz': 'StrOrNull',
1020 '*max-bandwidth': 'size',
1021 '*downtime-limit': 'uint64',
1022 '*x-checkpoint-delay': { 'type': 'uint32',
1023 'features': [ 'unstable' ] },
1024 '*block-incremental': 'bool',
1025 '*multifd-channels': 'uint8',
1026 '*xbzrle-cache-size': 'size',
1027 '*max-postcopy-bandwidth': 'size',
1028 '*max-cpu-throttle': 'uint8',
1029 '*multifd-compression': 'MultiFDCompression',
1030 '*multifd-zlib-level': 'uint8',
1031 '*multifd-zstd-level': 'uint8',
1032 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1033 '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1034 'features': [ 'unstable' ] },
1035 '*vcpu-dirty-limit': 'uint64'} }
1036
1037 ##
1038 # @migrate-set-parameters:
1039 #
1040 # Set various migration parameters.
1041 #
1042 # Since: 2.4
1043 #
1044 # Example:
1045 #
1046 # -> { "execute": "migrate-set-parameters" ,
1047 # "arguments": { "compress-level": 1 } }
1048 # <- { "return": {} }
1049 ##
1050 { 'command': 'migrate-set-parameters', 'boxed': true,
1051 'data': 'MigrateSetParameters' }
1052
1053 ##
1054 # @MigrationParameters:
1055 #
1056 # The optional members aren't actually optional.
1057 #
1058 # @announce-initial: Initial delay (in milliseconds) before sending
1059 # the first announce (Since 4.0)
1060 #
1061 # @announce-max: Maximum delay (in milliseconds) between packets in
1062 # the announcement (Since 4.0)
1063 #
1064 # @announce-rounds: Number of self-announce packets sent after
1065 # migration (Since 4.0)
1066 #
1067 # @announce-step: Increase in delay (in milliseconds) between
1068 # subsequent packets in the announcement (Since 4.0)
1069 #
1070 # @compress-level: compression level
1071 #
1072 # @compress-threads: compression thread count
1073 #
1074 # @compress-wait-thread: Controls behavior when all compression
1075 # threads are currently busy. If true (default), wait for a free
1076 # compression thread to become available; otherwise, send the page
1077 # uncompressed. (Since 3.1)
1078 #
1079 # @decompress-threads: decompression thread count
1080 #
1081 # @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1082 # bytes_xfer_period to trigger throttling. It is expressed as
1083 # percentage. The default value is 50. (Since 5.0)
1084 #
1085 # @cpu-throttle-initial: Initial percentage of time guest cpus are
1086 # throttled when migration auto-converge is activated. (Since
1087 # 2.7)
1088 #
1089 # @cpu-throttle-increment: throttle percentage increase each time
1090 # auto-converge detects that migration is not making progress.
1091 # (Since 2.7)
1092 #
1093 # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1094 # the tail stage of throttling, the Guest is very sensitive to CPU
1095 # percentage while the @cpu-throttle -increment is excessive
1096 # usually at tail stage. If this parameter is true, we will
1097 # compute the ideal CPU percentage used by the Guest, which may
1098 # exactly make the dirty rate match the dirty rate threshold.
1099 # Then we will choose a smaller throttle increment between the one
1100 # specified by @cpu-throttle-increment and the one generated by
1101 # ideal CPU percentage. Therefore, it is compatible to
1102 # traditional throttling, meanwhile the throttle increment won't
1103 # be excessive at tail stage. The default value is false. (Since
1104 # 5.1)
1105 #
1106 # @tls-creds: ID of the 'tls-creds' object that provides credentials
1107 # for establishing a TLS connection over the migration data
1108 # channel. On the outgoing side of the migration, the credentials
1109 # must be for a 'client' endpoint, while for the incoming side the
1110 # credentials must be for a 'server' endpoint. An empty string
1111 # means that QEMU will use plain text mode for migration, rather
1112 # than TLS (Since 2.7) Note: 2.8 reports this by omitting
1113 # tls-creds instead.
1114 #
1115 # @tls-hostname: hostname of the target host for the migration. This
1116 # is required when using x509 based TLS credentials and the
1117 # migration URI does not already include a hostname. For example
1118 # if using fd: or exec: based migration, the hostname must be
1119 # provided so that the server's x509 certificate identity can be
1120 # validated. (Since 2.7) An empty string means that QEMU will use
1121 # the hostname associated with the migration URI, if any. (Since
1122 # 2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1123 #
1124 # @tls-authz: ID of the 'authz' object subclass that provides access
1125 # control checking of the TLS x509 certificate distinguished name.
1126 # (Since 4.0)
1127 #
1128 # @max-bandwidth: to set maximum speed for migration. maximum speed
1129 # in bytes per second. (Since 2.8)
1130 #
1131 # @downtime-limit: set maximum tolerated downtime for migration.
1132 # maximum downtime in milliseconds (Since 2.8)
1133 #
1134 # @x-checkpoint-delay: the delay time between two COLO checkpoints.
1135 # (Since 2.8)
1136 #
1137 # @block-incremental: Affects how much storage is migrated when the
1138 # block migration capability is enabled. When false, the entire
1139 # storage backing chain is migrated into a flattened image at the
1140 # destination; when true, only the active qcow2 layer is migrated
1141 # and the destination must already have access to the same backing
1142 # chain as was used on the source. (since 2.10)
1143 #
1144 # @multifd-channels: Number of channels used to migrate data in
1145 # parallel. This is the same number that the number of sockets
1146 # used for migration. The default value is 2 (since 4.0)
1147 #
1148 # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
1149 # needs to be a multiple of the target page size and a power of 2
1150 # (Since 2.11)
1151 #
1152 # @max-postcopy-bandwidth: Background transfer bandwidth during
1153 # postcopy. Defaults to 0 (unlimited). In bytes per second.
1154 # (Since 3.0)
1155 #
1156 # @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
1157 # (Since 3.1)
1158 #
1159 # @multifd-compression: Which compression method to use. Defaults to
1160 # none. (Since 5.0)
1161 #
1162 # @multifd-zlib-level: Set the compression level to be used in live
1163 # migration, the compression level is an integer between 0 and 9,
1164 # where 0 means no compression, 1 means the best compression
1165 # speed, and 9 means best compression ratio which will consume
1166 # more CPU. Defaults to 1. (Since 5.0)
1167 #
1168 # @multifd-zstd-level: Set the compression level to be used in live
1169 # migration, the compression level is an integer between 0 and 20,
1170 # where 0 means no compression, 1 means the best compression
1171 # speed, and 20 means best compression ratio which will consume
1172 # more CPU. Defaults to 1. (Since 5.0)
1173 #
1174 # @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1175 # aliases for the purpose of dirty bitmap migration. Such aliases
1176 # may for example be the corresponding names on the opposite site.
1177 # The mapping must be one-to-one, but not necessarily complete: On
1178 # the source, unmapped bitmaps and all bitmaps on unmapped nodes
1179 # will be ignored. On the destination, encountering an unmapped
1180 # alias in the incoming migration stream will result in a report,
1181 # and all further bitmap migration data will then be discarded.
1182 # Note that the destination does not know about bitmaps it does
1183 # not receive, so there is no limitation or requirement regarding
1184 # the number of bitmaps received, or how they are named, or on
1185 # which nodes they are placed. By default (when this parameter
1186 # has never been set), bitmap names are mapped to themselves.
1187 # Nodes are mapped to their block device name if there is one, and
1188 # to their node name otherwise. (Since 5.2)
1189 #
1190 # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1191 # limit during live migration. Should be in the range 1 to 1000ms.
1192 # Defaults to 1000ms. (Since 8.1)
1193 #
1194 # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1195 # Defaults to 1. (Since 8.1)
1196 #
1197 # Features:
1198 #
1199 # @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1200 # are experimental.
1201 #
1202 # Since: 2.4
1203 ##
1204 { 'struct': 'MigrationParameters',
1205 'data': { '*announce-initial': 'size',
1206 '*announce-max': 'size',
1207 '*announce-rounds': 'size',
1208 '*announce-step': 'size',
1209 '*compress-level': 'uint8',
1210 '*compress-threads': 'uint8',
1211 '*compress-wait-thread': 'bool',
1212 '*decompress-threads': 'uint8',
1213 '*throttle-trigger-threshold': 'uint8',
1214 '*cpu-throttle-initial': 'uint8',
1215 '*cpu-throttle-increment': 'uint8',
1216 '*cpu-throttle-tailslow': 'bool',
1217 '*tls-creds': 'str',
1218 '*tls-hostname': 'str',
1219 '*tls-authz': 'str',
1220 '*max-bandwidth': 'size',
1221 '*downtime-limit': 'uint64',
1222 '*x-checkpoint-delay': { 'type': 'uint32',
1223 'features': [ 'unstable' ] },
1224 '*block-incremental': 'bool',
1225 '*multifd-channels': 'uint8',
1226 '*xbzrle-cache-size': 'size',
1227 '*max-postcopy-bandwidth': 'size',
1228 '*max-cpu-throttle': 'uint8',
1229 '*multifd-compression': 'MultiFDCompression',
1230 '*multifd-zlib-level': 'uint8',
1231 '*multifd-zstd-level': 'uint8',
1232 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1233 '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1234 'features': [ 'unstable' ] },
1235 '*vcpu-dirty-limit': 'uint64'} }
1236
1237 ##
1238 # @query-migrate-parameters:
1239 #
1240 # Returns information about the current migration parameters
1241 #
1242 # Returns: @MigrationParameters
1243 #
1244 # Since: 2.4
1245 #
1246 # Example:
1247 #
1248 # -> { "execute": "query-migrate-parameters" }
1249 # <- { "return": {
1250 # "decompress-threads": 2,
1251 # "cpu-throttle-increment": 10,
1252 # "compress-threads": 8,
1253 # "compress-level": 1,
1254 # "cpu-throttle-initial": 20,
1255 # "max-bandwidth": 33554432,
1256 # "downtime-limit": 300
1257 # }
1258 # }
1259 ##
1260 { 'command': 'query-migrate-parameters',
1261 'returns': 'MigrationParameters' }
1262
1263 ##
1264 # @migrate-start-postcopy:
1265 #
1266 # Followup to a migration command to switch the migration to postcopy
1267 # mode. The postcopy-ram capability must be set on both source and
1268 # destination before the original migration command.
1269 #
1270 # Since: 2.5
1271 #
1272 # Example:
1273 #
1274 # -> { "execute": "migrate-start-postcopy" }
1275 # <- { "return": {} }
1276 ##
1277 { 'command': 'migrate-start-postcopy' }
1278
1279 ##
1280 # @MIGRATION:
1281 #
1282 # Emitted when a migration event happens
1283 #
1284 # @status: @MigrationStatus describing the current migration status.
1285 #
1286 # Since: 2.4
1287 #
1288 # Example:
1289 #
1290 # <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1291 # "event": "MIGRATION",
1292 # "data": {"status": "completed"} }
1293 ##
1294 { 'event': 'MIGRATION',
1295 'data': {'status': 'MigrationStatus'}}
1296
1297 ##
1298 # @MIGRATION_PASS:
1299 #
1300 # Emitted from the source side of a migration at the start of each
1301 # pass (when it syncs the dirty bitmap)
1302 #
1303 # @pass: An incrementing count (starting at 1 on the first pass)
1304 #
1305 # Since: 2.6
1306 #
1307 # Example:
1308 #
1309 # <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1310 # "event": "MIGRATION_PASS", "data": {"pass": 2} }
1311 ##
1312 { 'event': 'MIGRATION_PASS',
1313 'data': { 'pass': 'int' } }
1314
1315 ##
1316 # @COLOMessage:
1317 #
1318 # The message transmission between Primary side and Secondary side.
1319 #
1320 # @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1321 #
1322 # @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1323 # checkpointing
1324 #
1325 # @checkpoint-reply: SVM gets PVM's checkpoint request
1326 #
1327 # @vmstate-send: VM's state will be sent by PVM.
1328 #
1329 # @vmstate-size: The total size of VMstate.
1330 #
1331 # @vmstate-received: VM's state has been received by SVM.
1332 #
1333 # @vmstate-loaded: VM's state has been loaded by SVM.
1334 #
1335 # Since: 2.8
1336 ##
1337 { 'enum': 'COLOMessage',
1338 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1339 'vmstate-send', 'vmstate-size', 'vmstate-received',
1340 'vmstate-loaded' ] }
1341
1342 ##
1343 # @COLOMode:
1344 #
1345 # The COLO current mode.
1346 #
1347 # @none: COLO is disabled.
1348 #
1349 # @primary: COLO node in primary side.
1350 #
1351 # @secondary: COLO node in slave side.
1352 #
1353 # Since: 2.8
1354 ##
1355 { 'enum': 'COLOMode',
1356 'data': [ 'none', 'primary', 'secondary'] }
1357
1358 ##
1359 # @FailoverStatus:
1360 #
1361 # An enumeration of COLO failover status
1362 #
1363 # @none: no failover has ever happened
1364 #
1365 # @require: got failover requirement but not handled
1366 #
1367 # @active: in the process of doing failover
1368 #
1369 # @completed: finish the process of failover
1370 #
1371 # @relaunch: restart the failover process, from 'none' -> 'completed'
1372 # (Since 2.9)
1373 #
1374 # Since: 2.8
1375 ##
1376 { 'enum': 'FailoverStatus',
1377 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1378
1379 ##
1380 # @COLO_EXIT:
1381 #
1382 # Emitted when VM finishes COLO mode due to some errors happening or
1383 # at the request of users.
1384 #
1385 # @mode: report COLO mode when COLO exited.
1386 #
1387 # @reason: describes the reason for the COLO exit.
1388 #
1389 # Since: 3.1
1390 #
1391 # Example:
1392 #
1393 # <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1394 # "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1395 ##
1396 { 'event': 'COLO_EXIT',
1397 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1398
1399 ##
1400 # @COLOExitReason:
1401 #
1402 # The reason for a COLO exit.
1403 #
1404 # @none: failover has never happened. This state does not occur in
1405 # the COLO_EXIT event, and is only visible in the result of
1406 # query-colo-status.
1407 #
1408 # @request: COLO exit is due to an external request.
1409 #
1410 # @error: COLO exit is due to an internal error.
1411 #
1412 # @processing: COLO is currently handling a failover (since 4.0).
1413 #
1414 # Since: 3.1
1415 ##
1416 { 'enum': 'COLOExitReason',
1417 'data': [ 'none', 'request', 'error' , 'processing' ] }
1418
1419 ##
1420 # @x-colo-lost-heartbeat:
1421 #
1422 # Tell qemu that heartbeat is lost, request it to do takeover
1423 # procedures. If this command is sent to the PVM, the Primary side
1424 # will exit COLO mode. If sent to the Secondary, the Secondary side
1425 # will run failover work, then takes over server operation to become
1426 # the service VM.
1427 #
1428 # Features:
1429 #
1430 # @unstable: This command is experimental.
1431 #
1432 # Since: 2.8
1433 #
1434 # Example:
1435 #
1436 # -> { "execute": "x-colo-lost-heartbeat" }
1437 # <- { "return": {} }
1438 ##
1439 { 'command': 'x-colo-lost-heartbeat',
1440 'features': [ 'unstable' ],
1441 'if': 'CONFIG_REPLICATION' }
1442
1443 ##
1444 # @migrate_cancel:
1445 #
1446 # Cancel the current executing migration process.
1447 #
1448 # Returns: nothing on success
1449 #
1450 # Notes: This command succeeds even if there is no migration process
1451 # running.
1452 #
1453 # Since: 0.14
1454 #
1455 # Example:
1456 #
1457 # -> { "execute": "migrate_cancel" }
1458 # <- { "return": {} }
1459 ##
1460 { 'command': 'migrate_cancel' }
1461
1462 ##
1463 # @migrate-continue:
1464 #
1465 # Continue migration when it's in a paused state.
1466 #
1467 # @state: The state the migration is currently expected to be in
1468 #
1469 # Returns: nothing on success
1470 #
1471 # Since: 2.11
1472 #
1473 # Example:
1474 #
1475 # -> { "execute": "migrate-continue" , "arguments":
1476 # { "state": "pre-switchover" } }
1477 # <- { "return": {} }
1478 ##
1479 { 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1480
1481 ##
1482 # @migrate:
1483 #
1484 # Migrates the current running guest to another Virtual Machine.
1485 #
1486 # @uri: the Uniform Resource Identifier of the destination VM
1487 #
1488 # @blk: do block migration (full disk copy)
1489 #
1490 # @inc: incremental disk copy migration
1491 #
1492 # @detach: this argument exists only for compatibility reasons and is
1493 # ignored by QEMU
1494 #
1495 # @resume: resume one paused migration, default "off". (since 3.0)
1496 #
1497 # Returns: nothing on success
1498 #
1499 # Since: 0.14
1500 #
1501 # Notes:
1502 #
1503 # 1. The 'query-migrate' command should be used to check migration's
1504 # progress and final result (this information is provided by the
1505 # 'status' member)
1506 #
1507 # 2. All boolean arguments default to false
1508 #
1509 # 3. The user Monitor's "detach" argument is invalid in QMP and should
1510 # not be used
1511 #
1512 # Example:
1513 #
1514 # -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1515 # <- { "return": {} }
1516 ##
1517 { 'command': 'migrate',
1518 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool',
1519 '*detach': 'bool', '*resume': 'bool' } }
1520
1521 ##
1522 # @migrate-incoming:
1523 #
1524 # Start an incoming migration, the qemu must have been started with
1525 # -incoming defer
1526 #
1527 # @uri: The Uniform Resource Identifier identifying the source or
1528 # address to listen on
1529 #
1530 # Returns: nothing on success
1531 #
1532 # Since: 2.3
1533 #
1534 # Notes:
1535 #
1536 # 1. It's a bad idea to use a string for the uri, but it needs
1537 # to stay compatible with -incoming and the format of the uri
1538 # is already exposed above libvirt.
1539 #
1540 # 2. QEMU must be started with -incoming defer to allow
1541 # migrate-incoming to be used.
1542 #
1543 # 3. The uri format is the same as for -incoming
1544 #
1545 # Example:
1546 #
1547 # -> { "execute": "migrate-incoming",
1548 # "arguments": { "uri": "tcp::4446" } }
1549 # <- { "return": {} }
1550 ##
1551 { 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1552
1553 ##
1554 # @xen-save-devices-state:
1555 #
1556 # Save the state of all devices to file. The RAM and the block
1557 # devices of the VM are not saved by this command.
1558 #
1559 # @filename: the file to save the state of the devices to as binary
1560 # data. See xen-save-devices-state.txt for a description of the
1561 # binary format.
1562 #
1563 # @live: Optional argument to ask QEMU to treat this command as part
1564 # of a live migration. Default to true. (since 2.11)
1565 #
1566 # Returns: Nothing on success
1567 #
1568 # Since: 1.1
1569 #
1570 # Example:
1571 #
1572 # -> { "execute": "xen-save-devices-state",
1573 # "arguments": { "filename": "/tmp/save" } }
1574 # <- { "return": {} }
1575 ##
1576 { 'command': 'xen-save-devices-state',
1577 'data': {'filename': 'str', '*live':'bool' } }
1578
1579 ##
1580 # @xen-set-global-dirty-log:
1581 #
1582 # Enable or disable the global dirty log mode.
1583 #
1584 # @enable: true to enable, false to disable.
1585 #
1586 # Returns: nothing
1587 #
1588 # Since: 1.3
1589 #
1590 # Example:
1591 #
1592 # -> { "execute": "xen-set-global-dirty-log",
1593 # "arguments": { "enable": true } }
1594 # <- { "return": {} }
1595 ##
1596 { 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1597
1598 ##
1599 # @xen-load-devices-state:
1600 #
1601 # Load the state of all devices from file. The RAM and the block
1602 # devices of the VM are not loaded by this command.
1603 #
1604 # @filename: the file to load the state of the devices from as binary
1605 # data. See xen-save-devices-state.txt for a description of the
1606 # binary format.
1607 #
1608 # Since: 2.7
1609 #
1610 # Example:
1611 #
1612 # -> { "execute": "xen-load-devices-state",
1613 # "arguments": { "filename": "/tmp/resume" } }
1614 # <- { "return": {} }
1615 ##
1616 { 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1617
1618 ##
1619 # @xen-set-replication:
1620 #
1621 # Enable or disable replication.
1622 #
1623 # @enable: true to enable, false to disable.
1624 #
1625 # @primary: true for primary or false for secondary.
1626 #
1627 # @failover: true to do failover, false to stop. but cannot be
1628 # specified if 'enable' is true. default value is false.
1629 #
1630 # Returns: nothing.
1631 #
1632 # Example:
1633 #
1634 # -> { "execute": "xen-set-replication",
1635 # "arguments": {"enable": true, "primary": false} }
1636 # <- { "return": {} }
1637 #
1638 # Since: 2.9
1639 ##
1640 { 'command': 'xen-set-replication',
1641 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1642 'if': 'CONFIG_REPLICATION' }
1643
1644 ##
1645 # @ReplicationStatus:
1646 #
1647 # The result format for 'query-xen-replication-status'.
1648 #
1649 # @error: true if an error happened, false if replication is normal.
1650 #
1651 # @desc: the human readable error description string, when @error is
1652 # 'true'.
1653 #
1654 # Since: 2.9
1655 ##
1656 { 'struct': 'ReplicationStatus',
1657 'data': { 'error': 'bool', '*desc': 'str' },
1658 'if': 'CONFIG_REPLICATION' }
1659
1660 ##
1661 # @query-xen-replication-status:
1662 #
1663 # Query replication status while the vm is running.
1664 #
1665 # Returns: A @ReplicationStatus object showing the status.
1666 #
1667 # Example:
1668 #
1669 # -> { "execute": "query-xen-replication-status" }
1670 # <- { "return": { "error": false } }
1671 #
1672 # Since: 2.9
1673 ##
1674 { 'command': 'query-xen-replication-status',
1675 'returns': 'ReplicationStatus',
1676 'if': 'CONFIG_REPLICATION' }
1677
1678 ##
1679 # @xen-colo-do-checkpoint:
1680 #
1681 # Xen uses this command to notify replication to trigger a checkpoint.
1682 #
1683 # Returns: nothing.
1684 #
1685 # Example:
1686 #
1687 # -> { "execute": "xen-colo-do-checkpoint" }
1688 # <- { "return": {} }
1689 #
1690 # Since: 2.9
1691 ##
1692 { 'command': 'xen-colo-do-checkpoint',
1693 'if': 'CONFIG_REPLICATION' }
1694
1695 ##
1696 # @COLOStatus:
1697 #
1698 # The result format for 'query-colo-status'.
1699 #
1700 # @mode: COLO running mode. If COLO is running, this field will
1701 # return 'primary' or 'secondary'.
1702 #
1703 # @last-mode: COLO last running mode. If COLO is running, this field
1704 # will return same like mode field, after failover we can use this
1705 # field to get last colo mode. (since 4.0)
1706 #
1707 # @reason: describes the reason for the COLO exit.
1708 #
1709 # Since: 3.1
1710 ##
1711 { 'struct': 'COLOStatus',
1712 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1713 'reason': 'COLOExitReason' },
1714 'if': 'CONFIG_REPLICATION' }
1715
1716 ##
1717 # @query-colo-status:
1718 #
1719 # Query COLO status while the vm is running.
1720 #
1721 # Returns: A @COLOStatus object showing the status.
1722 #
1723 # Example:
1724 #
1725 # -> { "execute": "query-colo-status" }
1726 # <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1727 #
1728 # Since: 3.1
1729 ##
1730 { 'command': 'query-colo-status',
1731 'returns': 'COLOStatus',
1732 'if': 'CONFIG_REPLICATION' }
1733
1734 ##
1735 # @migrate-recover:
1736 #
1737 # Provide a recovery migration stream URI.
1738 #
1739 # @uri: the URI to be used for the recovery of migration stream.
1740 #
1741 # Returns: nothing.
1742 #
1743 # Example:
1744 #
1745 # -> { "execute": "migrate-recover",
1746 # "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1747 # <- { "return": {} }
1748 #
1749 # Since: 3.0
1750 ##
1751 { 'command': 'migrate-recover',
1752 'data': { 'uri': 'str' },
1753 'allow-oob': true }
1754
1755 ##
1756 # @migrate-pause:
1757 #
1758 # Pause a migration. Currently it only supports postcopy.
1759 #
1760 # Returns: nothing.
1761 #
1762 # Example:
1763 #
1764 # -> { "execute": "migrate-pause" }
1765 # <- { "return": {} }
1766 #
1767 # Since: 3.0
1768 ##
1769 { 'command': 'migrate-pause', 'allow-oob': true }
1770
1771 ##
1772 # @UNPLUG_PRIMARY:
1773 #
1774 # Emitted from source side of a migration when migration state is
1775 # WAIT_UNPLUG. Device was unplugged by guest operating system. Device
1776 # resources in QEMU are kept on standby to be able to re-plug it in
1777 # case of migration failure.
1778 #
1779 # @device-id: QEMU device id of the unplugged device
1780 #
1781 # Since: 4.2
1782 #
1783 # Example:
1784 #
1785 # <- { "event": "UNPLUG_PRIMARY",
1786 # "data": { "device-id": "hostdev0" },
1787 # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1788 ##
1789 { 'event': 'UNPLUG_PRIMARY',
1790 'data': { 'device-id': 'str' } }
1791
1792 ##
1793 # @DirtyRateVcpu:
1794 #
1795 # Dirty rate of vcpu.
1796 #
1797 # @id: vcpu index.
1798 #
1799 # @dirty-rate: dirty rate.
1800 #
1801 # Since: 6.2
1802 ##
1803 { 'struct': 'DirtyRateVcpu',
1804 'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1805
1806 ##
1807 # @DirtyRateStatus:
1808 #
1809 # Dirty page rate measurement status.
1810 #
1811 # @unstarted: measuring thread has not been started yet
1812 #
1813 # @measuring: measuring thread is running
1814 #
1815 # @measured: dirty page rate is measured and the results are available
1816 #
1817 # Since: 5.2
1818 ##
1819 { 'enum': 'DirtyRateStatus',
1820 'data': [ 'unstarted', 'measuring', 'measured'] }
1821
1822 ##
1823 # @DirtyRateMeasureMode:
1824 #
1825 # Method used to measure dirty page rate. Differences between
1826 # available methods are explained in @calc-dirty-rate.
1827 #
1828 # @page-sampling: use page sampling
1829 #
1830 # @dirty-ring: use dirty ring
1831 #
1832 # @dirty-bitmap: use dirty bitmap
1833 #
1834 # Since: 6.2
1835 ##
1836 { 'enum': 'DirtyRateMeasureMode',
1837 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1838
1839 ##
1840 # @DirtyRateInfo:
1841 #
1842 # Information about measured dirty page rate.
1843 #
1844 # @dirty-rate: an estimate of the dirty page rate of the VM in units
1845 # of MiB/s. Value is present only when @status is 'measured'.
1846 #
1847 # @status: current status of dirty page rate measurements
1848 #
1849 # @start-time: start time in units of second for calculation
1850 #
1851 # @calc-time: time period for which dirty page rate was measured
1852 # (in seconds)
1853 #
1854 # @sample-pages: number of sampled pages per GiB of guest memory.
1855 # Valid only in page-sampling mode (Since 6.1)
1856 #
1857 # @mode: mode that was used to measure dirty page rate (Since 6.2)
1858 #
1859 # @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
1860 # specified (Since 6.2)
1861 #
1862 # Since: 5.2
1863 ##
1864 { 'struct': 'DirtyRateInfo',
1865 'data': {'*dirty-rate': 'int64',
1866 'status': 'DirtyRateStatus',
1867 'start-time': 'int64',
1868 'calc-time': 'int64',
1869 'sample-pages': 'uint64',
1870 'mode': 'DirtyRateMeasureMode',
1871 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1872
1873 ##
1874 # @calc-dirty-rate:
1875 #
1876 # Start measuring dirty page rate of the VM. Results can be retrieved
1877 # with @query-dirty-rate after measurements are completed.
1878 #
1879 # Dirty page rate is the number of pages changed in a given time
1880 # period expressed in MiB/s. The following methods of calculation are
1881 # available:
1882 #
1883 # 1. In page sampling mode, a random subset of pages are selected and
1884 # hashed twice: once at the beginning of measurement time period,
1885 # and once again at the end. If two hashes for some page are
1886 # different, the page is counted as changed. Since this method
1887 # relies on sampling and hashing, calculated dirty page rate is
1888 # only an estimate of its true value. Increasing @sample-pages
1889 # improves estimation quality at the cost of higher computational
1890 # overhead.
1891 #
1892 # 2. Dirty bitmap mode captures writes to memory (for example by
1893 # temporarily revoking write access to all pages) and counting page
1894 # faults. Information about modified pages is collected into a
1895 # bitmap, where each bit corresponds to one guest page. This mode
1896 # requires that KVM accelerator property "dirty-ring-size" is *not*
1897 # set.
1898 #
1899 # 3. Dirty ring mode is similar to dirty bitmap mode, but the
1900 # information about modified pages is collected into ring buffer.
1901 # This mode tracks page modification per each vCPU separately. It
1902 # requires that KVM accelerator property "dirty-ring-size" is set.
1903 #
1904 # @calc-time: time period in units of second for which dirty page rate
1905 # is calculated. Note that larger @calc-time values will
1906 # typically result in smaller dirty page rates because page
1907 # dirtying is a one-time event. Once some page is counted as
1908 # dirty during @calc-time period, further writes to this page will
1909 # not increase dirty page rate anymore.
1910 #
1911 # @sample-pages: number of sampled pages per each GiB of guest memory.
1912 # Default value is 512. For 4KiB guest pages this corresponds to
1913 # sampling ratio of 0.2%. This argument is used only in page
1914 # sampling mode. (Since 6.1)
1915 #
1916 # @mode: mechanism for tracking dirty pages. Default value is
1917 # 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'.
1918 # (Since 6.1)
1919 #
1920 # Since: 5.2
1921 #
1922 # Example:
1923 #
1924 # -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
1925 # 'sample-pages': 512} }
1926 # <- { "return": {} }
1927 ##
1928 { 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
1929 '*sample-pages': 'int',
1930 '*mode': 'DirtyRateMeasureMode'} }
1931
1932 ##
1933 # @query-dirty-rate:
1934 #
1935 # Query results of the most recent invocation of @calc-dirty-rate.
1936 #
1937 # Since: 5.2
1938 #
1939 # Examples:
1940 #
1941 # 1. Measurement is in progress:
1942 #
1943 # <- {"status": "measuring", "sample-pages": 512,
1944 # "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1945 #
1946 # 2. Measurement has been completed:
1947 #
1948 # <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
1949 # "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1950 ##
1951 { 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' }
1952
1953 ##
1954 # @DirtyLimitInfo:
1955 #
1956 # Dirty page rate limit information of a virtual CPU.
1957 #
1958 # @cpu-index: index of a virtual CPU.
1959 #
1960 # @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
1961 # CPU, 0 means unlimited.
1962 #
1963 # @current-rate: current dirty page rate (MB/s) for a virtual CPU.
1964 #
1965 # Since: 7.1
1966 ##
1967 { 'struct': 'DirtyLimitInfo',
1968 'data': { 'cpu-index': 'int',
1969 'limit-rate': 'uint64',
1970 'current-rate': 'uint64' } }
1971
1972 ##
1973 # @set-vcpu-dirty-limit:
1974 #
1975 # Set the upper limit of dirty page rate for virtual CPUs.
1976 #
1977 # Requires KVM with accelerator property "dirty-ring-size" set. A
1978 # virtual CPU's dirty page rate is a measure of its memory load. To
1979 # observe dirty page rates, use @calc-dirty-rate.
1980 #
1981 # @cpu-index: index of a virtual CPU, default is all.
1982 #
1983 # @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
1984 #
1985 # Since: 7.1
1986 #
1987 # Example:
1988 #
1989 # -> {"execute": "set-vcpu-dirty-limit"}
1990 # "arguments": { "dirty-rate": 200,
1991 # "cpu-index": 1 } }
1992 # <- { "return": {} }
1993 ##
1994 { 'command': 'set-vcpu-dirty-limit',
1995 'data': { '*cpu-index': 'int',
1996 'dirty-rate': 'uint64' } }
1997
1998 ##
1999 # @cancel-vcpu-dirty-limit:
2000 #
2001 # Cancel the upper limit of dirty page rate for virtual CPUs.
2002 #
2003 # Cancel the dirty page limit for the vCPU which has been set with
2004 # set-vcpu-dirty-limit command. Note that this command requires
2005 # support from dirty ring, same as the "set-vcpu-dirty-limit".
2006 #
2007 # @cpu-index: index of a virtual CPU, default is all.
2008 #
2009 # Since: 7.1
2010 #
2011 # Example:
2012 #
2013 # -> {"execute": "cancel-vcpu-dirty-limit"},
2014 # "arguments": { "cpu-index": 1 } }
2015 # <- { "return": {} }
2016 ##
2017 { 'command': 'cancel-vcpu-dirty-limit',
2018 'data': { '*cpu-index': 'int'} }
2019
2020 ##
2021 # @query-vcpu-dirty-limit:
2022 #
2023 # Returns information about virtual CPU dirty page rate limits, if
2024 # any.
2025 #
2026 # Since: 7.1
2027 #
2028 # Example:
2029 #
2030 # -> {"execute": "query-vcpu-dirty-limit"}
2031 # <- {"return": [
2032 # { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2033 # { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2034 ##
2035 { 'command': 'query-vcpu-dirty-limit',
2036 'returns': [ 'DirtyLimitInfo' ] }
2037
2038 ##
2039 # @MigrationThreadInfo:
2040 #
2041 # Information about migrationthreads
2042 #
2043 # @name: the name of migration thread
2044 #
2045 # @thread-id: ID of the underlying host thread
2046 #
2047 # Since: 7.2
2048 ##
2049 { 'struct': 'MigrationThreadInfo',
2050 'data': {'name': 'str',
2051 'thread-id': 'int'} }
2052
2053 ##
2054 # @query-migrationthreads:
2055 #
2056 # Returns information of migration threads
2057 #
2058 # data: migration thread name
2059 #
2060 # Returns: information about migration threads
2061 #
2062 # Since: 7.2
2063 ##
2064 { 'command': 'query-migrationthreads',
2065 'returns': ['MigrationThreadInfo'] }
2066
2067 ##
2068 # @snapshot-save:
2069 #
2070 # Save a VM snapshot
2071 #
2072 # @job-id: identifier for the newly created job
2073 #
2074 # @tag: name of the snapshot to create
2075 #
2076 # @vmstate: block device node name to save vmstate to
2077 #
2078 # @devices: list of block device node names to save a snapshot to
2079 #
2080 # Applications should not assume that the snapshot save is complete
2081 # when this command returns. The job commands / events must be used
2082 # to determine completion and to fetch details of any errors that
2083 # arise.
2084 #
2085 # Note that execution of the guest CPUs may be stopped during the time
2086 # it takes to save the snapshot. A future version of QEMU may ensure
2087 # CPUs are executing continuously.
2088 #
2089 # It is strongly recommended that @devices contain all writable block
2090 # device nodes if a consistent snapshot is required.
2091 #
2092 # If @tag already exists, an error will be reported
2093 #
2094 # Returns: nothing
2095 #
2096 # Example:
2097 #
2098 # -> { "execute": "snapshot-save",
2099 # "arguments": {
2100 # "job-id": "snapsave0",
2101 # "tag": "my-snap",
2102 # "vmstate": "disk0",
2103 # "devices": ["disk0", "disk1"]
2104 # }
2105 # }
2106 # <- { "return": { } }
2107 # <- {"event": "JOB_STATUS_CHANGE",
2108 # "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2109 # "data": {"status": "created", "id": "snapsave0"}}
2110 # <- {"event": "JOB_STATUS_CHANGE",
2111 # "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2112 # "data": {"status": "running", "id": "snapsave0"}}
2113 # <- {"event": "STOP",
2114 # "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2115 # <- {"event": "RESUME",
2116 # "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2117 # <- {"event": "JOB_STATUS_CHANGE",
2118 # "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2119 # "data": {"status": "waiting", "id": "snapsave0"}}
2120 # <- {"event": "JOB_STATUS_CHANGE",
2121 # "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2122 # "data": {"status": "pending", "id": "snapsave0"}}
2123 # <- {"event": "JOB_STATUS_CHANGE",
2124 # "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2125 # "data": {"status": "concluded", "id": "snapsave0"}}
2126 # -> {"execute": "query-jobs"}
2127 # <- {"return": [{"current-progress": 1,
2128 # "status": "concluded",
2129 # "total-progress": 1,
2130 # "type": "snapshot-save",
2131 # "id": "snapsave0"}]}
2132 #
2133 # Since: 6.0
2134 ##
2135 { 'command': 'snapshot-save',
2136 'data': { 'job-id': 'str',
2137 'tag': 'str',
2138 'vmstate': 'str',
2139 'devices': ['str'] } }
2140
2141 ##
2142 # @snapshot-load:
2143 #
2144 # Load a VM snapshot
2145 #
2146 # @job-id: identifier for the newly created job
2147 #
2148 # @tag: name of the snapshot to load.
2149 #
2150 # @vmstate: block device node name to load vmstate from
2151 #
2152 # @devices: list of block device node names to load a snapshot from
2153 #
2154 # Applications should not assume that the snapshot load is complete
2155 # when this command returns. The job commands / events must be used
2156 # to determine completion and to fetch details of any errors that
2157 # arise.
2158 #
2159 # Note that execution of the guest CPUs will be stopped during the
2160 # time it takes to load the snapshot.
2161 #
2162 # It is strongly recommended that @devices contain all writable block
2163 # device nodes that can have changed since the original @snapshot-save
2164 # command execution.
2165 #
2166 # Returns: nothing
2167 #
2168 # Example:
2169 #
2170 # -> { "execute": "snapshot-load",
2171 # "arguments": {
2172 # "job-id": "snapload0",
2173 # "tag": "my-snap",
2174 # "vmstate": "disk0",
2175 # "devices": ["disk0", "disk1"]
2176 # }
2177 # }
2178 # <- { "return": { } }
2179 # <- {"event": "JOB_STATUS_CHANGE",
2180 # "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2181 # "data": {"status": "created", "id": "snapload0"}}
2182 # <- {"event": "JOB_STATUS_CHANGE",
2183 # "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2184 # "data": {"status": "running", "id": "snapload0"}}
2185 # <- {"event": "STOP",
2186 # "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2187 # <- {"event": "RESUME",
2188 # "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2189 # <- {"event": "JOB_STATUS_CHANGE",
2190 # "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2191 # "data": {"status": "waiting", "id": "snapload0"}}
2192 # <- {"event": "JOB_STATUS_CHANGE",
2193 # "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2194 # "data": {"status": "pending", "id": "snapload0"}}
2195 # <- {"event": "JOB_STATUS_CHANGE",
2196 # "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2197 # "data": {"status": "concluded", "id": "snapload0"}}
2198 # -> {"execute": "query-jobs"}
2199 # <- {"return": [{"current-progress": 1,
2200 # "status": "concluded",
2201 # "total-progress": 1,
2202 # "type": "snapshot-load",
2203 # "id": "snapload0"}]}
2204 #
2205 # Since: 6.0
2206 ##
2207 { 'command': 'snapshot-load',
2208 'data': { 'job-id': 'str',
2209 'tag': 'str',
2210 'vmstate': 'str',
2211 'devices': ['str'] } }
2212
2213 ##
2214 # @snapshot-delete:
2215 #
2216 # Delete a VM snapshot
2217 #
2218 # @job-id: identifier for the newly created job
2219 #
2220 # @tag: name of the snapshot to delete.
2221 #
2222 # @devices: list of block device node names to delete a snapshot from
2223 #
2224 # Applications should not assume that the snapshot delete is complete
2225 # when this command returns. The job commands / events must be used
2226 # to determine completion and to fetch details of any errors that
2227 # arise.
2228 #
2229 # Returns: nothing
2230 #
2231 # Example:
2232 #
2233 # -> { "execute": "snapshot-delete",
2234 # "arguments": {
2235 # "job-id": "snapdelete0",
2236 # "tag": "my-snap",
2237 # "devices": ["disk0", "disk1"]
2238 # }
2239 # }
2240 # <- { "return": { } }
2241 # <- {"event": "JOB_STATUS_CHANGE",
2242 # "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2243 # "data": {"status": "created", "id": "snapdelete0"}}
2244 # <- {"event": "JOB_STATUS_CHANGE",
2245 # "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2246 # "data": {"status": "running", "id": "snapdelete0"}}
2247 # <- {"event": "JOB_STATUS_CHANGE",
2248 # "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2249 # "data": {"status": "waiting", "id": "snapdelete0"}}
2250 # <- {"event": "JOB_STATUS_CHANGE",
2251 # "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2252 # "data": {"status": "pending", "id": "snapdelete0"}}
2253 # <- {"event": "JOB_STATUS_CHANGE",
2254 # "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2255 # "data": {"status": "concluded", "id": "snapdelete0"}}
2256 # -> {"execute": "query-jobs"}
2257 # <- {"return": [{"current-progress": 1,
2258 # "status": "concluded",
2259 # "total-progress": 1,
2260 # "type": "snapshot-delete",
2261 # "id": "snapdelete0"}]}
2262 #
2263 # Since: 6.0
2264 ##
2265 { 'command': 'snapshot-delete',
2266 'data': { 'job-id': 'str',
2267 'tag': 'str',
2268 'devices': ['str'] } }