]>
Commit | Line | Data |
---|---|---|
ae2d489c PB |
1 | /* |
2 | * Atomic operations on 64-bit quantities. | |
3 | * | |
4 | * Copyright (C) 2017 Red Hat, Inc. | |
5 | * | |
6 | * Author: Paolo Bonzini <pbonzini@redhat.com> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
9 | * See the COPYING file in the top-level directory. | |
10 | */ | |
11 | ||
12 | #include "qemu/osdep.h" | |
13 | #include "qemu/atomic.h" | |
14 | #include "qemu/stats64.h" | |
15 | #include "qemu/processor.h" | |
16 | ||
17 | #ifndef CONFIG_ATOMIC64 | |
18 | static inline void stat64_rdlock(Stat64 *s) | |
19 | { | |
20 | /* Keep out incoming writers to avoid them starving us. */ | |
d73415a3 | 21 | qatomic_add(&s->lock, 2); |
ae2d489c PB |
22 | |
23 | /* If there is a concurrent writer, wait for it. */ | |
d73415a3 | 24 | while (qatomic_read(&s->lock) & 1) { |
ae2d489c PB |
25 | cpu_relax(); |
26 | } | |
27 | } | |
28 | ||
29 | static inline void stat64_rdunlock(Stat64 *s) | |
30 | { | |
d73415a3 | 31 | qatomic_sub(&s->lock, 2); |
ae2d489c PB |
32 | } |
33 | ||
34 | static inline bool stat64_wrtrylock(Stat64 *s) | |
35 | { | |
d73415a3 | 36 | return qatomic_cmpxchg(&s->lock, 0, 1) == 0; |
ae2d489c PB |
37 | } |
38 | ||
39 | static inline void stat64_wrunlock(Stat64 *s) | |
40 | { | |
d73415a3 | 41 | qatomic_dec(&s->lock); |
ae2d489c PB |
42 | } |
43 | ||
44 | uint64_t stat64_get(const Stat64 *s) | |
45 | { | |
46 | uint32_t high, low; | |
47 | ||
48 | stat64_rdlock((Stat64 *)s); | |
49 | ||
50 | /* 64-bit writes always take the lock, so we can read in | |
51 | * any order. | |
52 | */ | |
d73415a3 SH |
53 | high = qatomic_read(&s->high); |
54 | low = qatomic_read(&s->low); | |
ae2d489c PB |
55 | stat64_rdunlock((Stat64 *)s); |
56 | ||
57 | return ((uint64_t)high << 32) | low; | |
58 | } | |
59 | ||
7757b55e PB |
60 | void stat64_set(Stat64 *s, uint64_t val) |
61 | { | |
62 | while (!stat64_wrtrylock(s)) { | |
63 | cpu_relax(); | |
64 | } | |
65 | ||
66 | qatomic_set(&s->high, val >> 32); | |
67 | qatomic_set(&s->low, val); | |
68 | stat64_wrunlock(s); | |
69 | } | |
70 | ||
ae2d489c PB |
71 | bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) |
72 | { | |
73 | uint32_t old; | |
74 | ||
75 | if (!stat64_wrtrylock(s)) { | |
76 | cpu_relax(); | |
77 | return false; | |
78 | } | |
79 | ||
80 | /* 64-bit reads always take the lock, so they don't care about the | |
81 | * order of our update. By updating s->low first, we can check | |
82 | * whether we have to carry into s->high. | |
83 | */ | |
d73415a3 | 84 | old = qatomic_fetch_add(&s->low, low); |
ae2d489c | 85 | high += (old + low) < old; |
d73415a3 | 86 | qatomic_add(&s->high, high); |
ae2d489c PB |
87 | stat64_wrunlock(s); |
88 | return true; | |
89 | } | |
90 | ||
91 | bool stat64_min_slow(Stat64 *s, uint64_t value) | |
92 | { | |
93 | uint32_t high, low; | |
94 | uint64_t orig; | |
95 | ||
96 | if (!stat64_wrtrylock(s)) { | |
97 | cpu_relax(); | |
98 | return false; | |
99 | } | |
100 | ||
d73415a3 SH |
101 | high = qatomic_read(&s->high); |
102 | low = qatomic_read(&s->low); | |
ae2d489c PB |
103 | |
104 | orig = ((uint64_t)high << 32) | low; | |
26a5db32 | 105 | if (value < orig) { |
ae2d489c PB |
106 | /* We have to set low before high, just like stat64_min reads |
107 | * high before low. The value may become higher temporarily, but | |
108 | * stat64_get does not notice (it takes the lock) and the only ill | |
109 | * effect on stat64_min is that the slow path may be triggered | |
110 | * unnecessarily. | |
111 | */ | |
d73415a3 | 112 | qatomic_set(&s->low, (uint32_t)value); |
ae2d489c | 113 | smp_wmb(); |
d73415a3 | 114 | qatomic_set(&s->high, value >> 32); |
ae2d489c PB |
115 | } |
116 | stat64_wrunlock(s); | |
117 | return true; | |
118 | } | |
119 | ||
120 | bool stat64_max_slow(Stat64 *s, uint64_t value) | |
121 | { | |
122 | uint32_t high, low; | |
123 | uint64_t orig; | |
124 | ||
125 | if (!stat64_wrtrylock(s)) { | |
126 | cpu_relax(); | |
127 | return false; | |
128 | } | |
129 | ||
d73415a3 SH |
130 | high = qatomic_read(&s->high); |
131 | low = qatomic_read(&s->low); | |
ae2d489c PB |
132 | |
133 | orig = ((uint64_t)high << 32) | low; | |
26a5db32 | 134 | if (value > orig) { |
ae2d489c PB |
135 | /* We have to set low before high, just like stat64_max reads |
136 | * high before low. The value may become lower temporarily, but | |
137 | * stat64_get does not notice (it takes the lock) and the only ill | |
138 | * effect on stat64_max is that the slow path may be triggered | |
139 | * unnecessarily. | |
140 | */ | |
d73415a3 | 141 | qatomic_set(&s->low, (uint32_t)value); |
ae2d489c | 142 | smp_wmb(); |
d73415a3 | 143 | qatomic_set(&s->high, value >> 32); |
ae2d489c PB |
144 | } |
145 | stat64_wrunlock(s); | |
146 | return true; | |
147 | } | |
148 | #endif |