]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/stats64.h
Merge tag 'pull-riscv-to-apply-20240110' of https://github.com/alistair23/qemu into...
[mirror_qemu.git] / include / qemu / stats64.h
CommitLineData
ae2d489c
PB
1/*
2 * Atomic operations on 64-bit quantities.
3 *
4 * Copyright (C) 2017 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 */
11
12#ifndef QEMU_STATS64_H
177d9e0d 13#define QEMU_STATS64_H
ae2d489c
PB
14
15#include "qemu/atomic.h"
16
17/* This provides atomic operations on 64-bit type, using a reader-writer
18 * spinlock on architectures that do not have 64-bit accesses. Even on
19 * those architectures, it tries hard not to take the lock.
20 */
21
22typedef struct Stat64 {
23#ifdef CONFIG_ATOMIC64
9ef0c6d6 24 aligned_uint64_t value;
ae2d489c
PB
25#else
26 uint32_t low, high;
27 uint32_t lock;
28#endif
29} Stat64;
30
31#ifdef CONFIG_ATOMIC64
32static inline void stat64_init(Stat64 *s, uint64_t value)
33{
34 /* This is not guaranteed to be atomic! */
35 *s = (Stat64) { value };
36}
37
38static inline uint64_t stat64_get(const Stat64 *s)
39{
d73415a3 40 return qatomic_read__nocheck(&s->value);
ae2d489c
PB
41}
42
7757b55e
PB
43static inline void stat64_set(Stat64 *s, uint64_t value)
44{
45 qatomic_set__nocheck(&s->value, value);
46}
47
ae2d489c
PB
48static inline void stat64_add(Stat64 *s, uint64_t value)
49{
d73415a3 50 qatomic_add(&s->value, value);
ae2d489c
PB
51}
52
53static inline void stat64_min(Stat64 *s, uint64_t value)
54{
d73415a3 55 uint64_t orig = qatomic_read__nocheck(&s->value);
ae2d489c 56 while (orig > value) {
d73415a3 57 orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
ae2d489c
PB
58 }
59}
60
61static inline void stat64_max(Stat64 *s, uint64_t value)
62{
d73415a3 63 uint64_t orig = qatomic_read__nocheck(&s->value);
ae2d489c 64 while (orig < value) {
d73415a3 65 orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
ae2d489c
PB
66 }
67}
68#else
69uint64_t stat64_get(const Stat64 *s);
7757b55e 70void stat64_set(Stat64 *s, uint64_t value);
ae2d489c
PB
71bool stat64_min_slow(Stat64 *s, uint64_t value);
72bool stat64_max_slow(Stat64 *s, uint64_t value);
73bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high);
74
75static inline void stat64_init(Stat64 *s, uint64_t value)
76{
77 /* This is not guaranteed to be atomic! */
78 *s = (Stat64) { .low = value, .high = value >> 32, .lock = 0 };
79}
80
81static inline void stat64_add(Stat64 *s, uint64_t value)
82{
83 uint32_t low, high;
84 high = value >> 32;
85 low = (uint32_t) value;
86 if (!low) {
87 if (high) {
d73415a3 88 qatomic_add(&s->high, high);
ae2d489c
PB
89 }
90 return;
91 }
92
93 for (;;) {
94 uint32_t orig = s->low;
95 uint32_t result = orig + low;
96 uint32_t old;
97
98 if (result < low || high) {
99 /* If the high part is affected, take the lock. */
100 if (stat64_add32_carry(s, low, high)) {
101 return;
102 }
103 continue;
104 }
105
106 /* No carry, try with a 32-bit cmpxchg. The result is independent of
107 * the high 32 bits, so it can race just fine with stat64_add32_carry
108 * and even stat64_get!
109 */
d73415a3 110 old = qatomic_cmpxchg(&s->low, orig, result);
ae2d489c
PB
111 if (orig == old) {
112 return;
113 }
114 }
115}
116
117static inline void stat64_min(Stat64 *s, uint64_t value)
118{
119 uint32_t low, high;
120 uint32_t orig_low, orig_high;
121
122 high = value >> 32;
123 low = (uint32_t) value;
124 do {
d73415a3 125 orig_high = qatomic_read(&s->high);
ae2d489c
PB
126 if (orig_high < high) {
127 return;
128 }
129
130 if (orig_high == high) {
131 /* High 32 bits are equal. Read low after high, otherwise we
132 * can get a false positive (e.g. 0x1235,0x0000 changes to
133 * 0x1234,0x8000 and we read it as 0x1234,0x0000). Pairs with
134 * the write barrier in stat64_min_slow.
135 */
136 smp_rmb();
d73415a3 137 orig_low = qatomic_read(&s->low);
ae2d489c
PB
138 if (orig_low <= low) {
139 return;
140 }
141
142 /* See if we were lucky and a writer raced against us. The
143 * barrier is theoretically unnecessary, but if we remove it
144 * we may miss being lucky.
145 */
146 smp_rmb();
d73415a3 147 orig_high = qatomic_read(&s->high);
ae2d489c
PB
148 if (orig_high < high) {
149 return;
150 }
151 }
152
153 /* If the value changes in any way, we have to take the lock. */
154 } while (!stat64_min_slow(s, value));
155}
156
157static inline void stat64_max(Stat64 *s, uint64_t value)
158{
159 uint32_t low, high;
160 uint32_t orig_low, orig_high;
161
162 high = value >> 32;
163 low = (uint32_t) value;
164 do {
d73415a3 165 orig_high = qatomic_read(&s->high);
ae2d489c
PB
166 if (orig_high > high) {
167 return;
168 }
169
170 if (orig_high == high) {
171 /* High 32 bits are equal. Read low after high, otherwise we
172 * can get a false positive (e.g. 0x1234,0x8000 changes to
173 * 0x1235,0x0000 and we read it as 0x1235,0x8000). Pairs with
174 * the write barrier in stat64_max_slow.
175 */
176 smp_rmb();
d73415a3 177 orig_low = qatomic_read(&s->low);
ae2d489c
PB
178 if (orig_low >= low) {
179 return;
180 }
181
182 /* See if we were lucky and a writer raced against us. The
183 * barrier is theoretically unnecessary, but if we remove it
184 * we may miss being lucky.
185 */
186 smp_rmb();
d73415a3 187 orig_high = qatomic_read(&s->high);
ae2d489c
PB
188 if (orig_high > high) {
189 return;
190 }
191 }
192
193 /* If the value changes in any way, we have to take the lock. */
194 } while (!stat64_max_slow(s, value));
195}
196
197#endif
198
199#endif