]> git.proxmox.com Git - mirror_qemu.git/blob - util/stats64.c
Merge tag 'migration-20230427-pull-request' of https://gitlab.com/juan.quintela/qemu...
[mirror_qemu.git] / util / stats64.c
1 /*
2 * Atomic operations on 64-bit quantities.
3 *
4 * Copyright (C) 2017 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 */
11
12 #include "qemu/osdep.h"
13 #include "qemu/atomic.h"
14 #include "qemu/stats64.h"
15 #include "qemu/processor.h"
16
17 #ifndef CONFIG_ATOMIC64
18 static inline void stat64_rdlock(Stat64 *s)
19 {
20 /* Keep out incoming writers to avoid them starving us. */
21 qatomic_add(&s->lock, 2);
22
23 /* If there is a concurrent writer, wait for it. */
24 while (qatomic_read(&s->lock) & 1) {
25 cpu_relax();
26 }
27 }
28
29 static inline void stat64_rdunlock(Stat64 *s)
30 {
31 qatomic_sub(&s->lock, 2);
32 }
33
34 static inline bool stat64_wrtrylock(Stat64 *s)
35 {
36 return qatomic_cmpxchg(&s->lock, 0, 1) == 0;
37 }
38
39 static inline void stat64_wrunlock(Stat64 *s)
40 {
41 qatomic_dec(&s->lock);
42 }
43
44 uint64_t stat64_get(const Stat64 *s)
45 {
46 uint32_t high, low;
47
48 stat64_rdlock((Stat64 *)s);
49
50 /* 64-bit writes always take the lock, so we can read in
51 * any order.
52 */
53 high = qatomic_read(&s->high);
54 low = qatomic_read(&s->low);
55 stat64_rdunlock((Stat64 *)s);
56
57 return ((uint64_t)high << 32) | low;
58 }
59
60 void stat64_set(Stat64 *s, uint64_t val)
61 {
62 while (!stat64_wrtrylock(s)) {
63 cpu_relax();
64 }
65
66 qatomic_set(&s->high, val >> 32);
67 qatomic_set(&s->low, val);
68 stat64_wrunlock(s);
69 }
70
71 bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
72 {
73 uint32_t old;
74
75 if (!stat64_wrtrylock(s)) {
76 cpu_relax();
77 return false;
78 }
79
80 /* 64-bit reads always take the lock, so they don't care about the
81 * order of our update. By updating s->low first, we can check
82 * whether we have to carry into s->high.
83 */
84 old = qatomic_fetch_add(&s->low, low);
85 high += (old + low) < old;
86 qatomic_add(&s->high, high);
87 stat64_wrunlock(s);
88 return true;
89 }
90
91 bool stat64_min_slow(Stat64 *s, uint64_t value)
92 {
93 uint32_t high, low;
94 uint64_t orig;
95
96 if (!stat64_wrtrylock(s)) {
97 cpu_relax();
98 return false;
99 }
100
101 high = qatomic_read(&s->high);
102 low = qatomic_read(&s->low);
103
104 orig = ((uint64_t)high << 32) | low;
105 if (value < orig) {
106 /* We have to set low before high, just like stat64_min reads
107 * high before low. The value may become higher temporarily, but
108 * stat64_get does not notice (it takes the lock) and the only ill
109 * effect on stat64_min is that the slow path may be triggered
110 * unnecessarily.
111 */
112 qatomic_set(&s->low, (uint32_t)value);
113 smp_wmb();
114 qatomic_set(&s->high, value >> 32);
115 }
116 stat64_wrunlock(s);
117 return true;
118 }
119
120 bool stat64_max_slow(Stat64 *s, uint64_t value)
121 {
122 uint32_t high, low;
123 uint64_t orig;
124
125 if (!stat64_wrtrylock(s)) {
126 cpu_relax();
127 return false;
128 }
129
130 high = qatomic_read(&s->high);
131 low = qatomic_read(&s->low);
132
133 orig = ((uint64_t)high << 32) | low;
134 if (value > orig) {
135 /* We have to set low before high, just like stat64_max reads
136 * high before low. The value may become lower temporarily, but
137 * stat64_get does not notice (it takes the lock) and the only ill
138 * effect on stat64_max is that the slow path may be triggered
139 * unnecessarily.
140 */
141 qatomic_set(&s->low, (uint32_t)value);
142 smp_wmb();
143 qatomic_set(&s->high, value >> 32);
144 }
145 stat64_wrunlock(s);
146 return true;
147 }
148 #endif