]> git.proxmox.com Git - mirror_qemu.git/blob - migration/migration-stats.h
migration: Plug memory leak on HMP migrate error path
[mirror_qemu.git] / migration / migration-stats.h
1 /*
2 * Migration stats
3 *
4 * Copyright (c) 2012-2023 Red Hat Inc
5 *
6 * Authors:
7 * Juan Quintela <quintela@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #ifndef QEMU_MIGRATION_STATS_H
14 #define QEMU_MIGRATION_STATS_H
15
16 #include "qemu/stats64.h"
17
18 /*
19 * Amount of time to allocate to each "chunk" of bandwidth-throttled
20 * data.
21 */
22 #define BUFFER_DELAY 100
23
24 /*
25 * If rate_limit_max is 0, there is special code to remove the rate
26 * limit.
27 */
28 #define RATE_LIMIT_DISABLED 0
29
30 /*
31 * These are the ram migration statistic counters. It is loosely
32 * based on MigrationStats. We change to Stat64 any counter that
33 * needs to be updated using atomic ops (can be accessed by more than
34 * one thread).
35 */
36 typedef struct {
37 /*
38 * Number of bytes that were dirty last time that we synced with
39 * the guest memory. We use that to calculate the downtime. As
40 * the remaining dirty amounts to what we know that is still dirty
41 * since last iteration, not counting what the guest has dirtied
42 * since we synchronized bitmaps.
43 */
44 Stat64 dirty_bytes_last_sync;
45 /*
46 * Number of pages dirtied per second.
47 */
48 Stat64 dirty_pages_rate;
49 /*
50 * Number of times we have synchronized guest bitmaps.
51 */
52 Stat64 dirty_sync_count;
53 /*
54 * Number of times zero copy failed to send any page using zero
55 * copy.
56 */
57 Stat64 dirty_sync_missed_zero_copy;
58 /*
59 * Number of bytes sent at migration completion stage while the
60 * guest is stopped.
61 */
62 Stat64 downtime_bytes;
63 /*
64 * Number of bytes sent through multifd channels.
65 */
66 Stat64 multifd_bytes;
67 /*
68 * Number of pages transferred that were not full of zeros.
69 */
70 Stat64 normal_pages;
71 /*
72 * Number of bytes sent during postcopy.
73 */
74 Stat64 postcopy_bytes;
75 /*
76 * Number of postcopy page faults that we have handled during
77 * postcopy stage.
78 */
79 Stat64 postcopy_requests;
80 /*
81 * Number of bytes sent during precopy stage.
82 */
83 Stat64 precopy_bytes;
84 /*
85 * Number of bytes transferred with QEMUFile.
86 */
87 Stat64 qemu_file_transferred;
88 /*
89 * Amount of transferred data at the start of current cycle.
90 */
91 Stat64 rate_limit_start;
92 /*
93 * Maximum amount of data we can send in a cycle.
94 */
95 Stat64 rate_limit_max;
96 /*
97 * Number of bytes sent through RDMA.
98 */
99 Stat64 rdma_bytes;
100 /*
101 * Number of pages transferred that were full of zeros.
102 */
103 Stat64 zero_pages;
104 } MigrationAtomicStats;
105
106 extern MigrationAtomicStats mig_stats;
107
108 /**
109 * migration_rate_get: Get the maximum amount that can be transferred.
110 *
111 * Returns the maximum number of bytes that can be transferred in a cycle.
112 */
113 uint64_t migration_rate_get(void);
114
115 /**
116 * migration_rate_reset: Reset the rate limit counter.
117 *
118 * This is called when we know we start a new transfer cycle.
119 */
120 void migration_rate_reset(void);
121
122 /**
123 * migration_rate_set: Set the maximum amount that can be transferred.
124 *
125 * Sets the maximum amount of bytes that can be transferred in one cycle.
126 *
127 * @new_rate: new maximum amount
128 */
129 void migration_rate_set(uint64_t new_rate);
130
131 /**
132 * migration_transferred_bytes: Return number of bytes transferred
133 *
134 * Returns how many bytes have we transferred since the beginning of
135 * the migration. It accounts for bytes sent through any migration
136 * channel, multifd, qemu_file, rdma, ....
137 */
138 uint64_t migration_transferred_bytes(void);
139 #endif