]>
git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/atomic_template.h
2 * Atomic helper templates
3 * Included from tcg-runtime.c and cputlb.c.
5 * Copyright (c) 2016 Red Hat, Inc
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/plugin.h"
25 # define DATA_TYPE Int128
26 # define BSWAP bswap128
30 # define DATA_TYPE aligned_uint64_t
31 # define SDATA_TYPE aligned_int64_t
32 # define BSWAP bswap64
36 # define DATA_TYPE uint32_t
37 # define SDATA_TYPE int32_t
38 # define BSWAP bswap32
42 # define DATA_TYPE uint16_t
43 # define SDATA_TYPE int16_t
44 # define BSWAP bswap16
48 # define DATA_TYPE uint8_t
49 # define SDATA_TYPE int8_t
53 # error unsupported data size
57 # define ABI_TYPE DATA_TYPE
59 # define ABI_TYPE uint32_t
62 /* Define host-endian atomic operations. Note that END is used within
63 the ATOMIC_NAME macro, and redefined below. */
72 ABI_TYPE
ATOMIC_NAME(cmpxchg
)(CPUArchState
*env
, abi_ptr addr
,
73 ABI_TYPE cmpv
, ABI_TYPE newv
,
74 MemOpIdx oi
, uintptr_t retaddr
)
76 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
, retaddr
);
80 ret
= atomic16_cmpxchg(haddr
, cmpv
, newv
);
82 ret
= qatomic_cmpxchg__nocheck(haddr
, cmpv
, newv
);
85 atomic_trace_rmw_post(env
, addr
, oi
);
90 ABI_TYPE
ATOMIC_NAME(xchg
)(CPUArchState
*env
, abi_ptr addr
, ABI_TYPE val
,
91 MemOpIdx oi
, uintptr_t retaddr
)
93 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
, retaddr
);
96 ret
= qatomic_xchg__nocheck(haddr
, val
);
98 atomic_trace_rmw_post(env
, addr
, oi
);
102 #define GEN_ATOMIC_HELPER(X) \
103 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
104 ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
106 DATA_TYPE *haddr, ret; \
107 haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
108 ret = qatomic_##X(haddr, val); \
109 ATOMIC_MMU_CLEANUP; \
110 atomic_trace_rmw_post(env, addr, oi); \
114 GEN_ATOMIC_HELPER(fetch_add
)
115 GEN_ATOMIC_HELPER(fetch_and
)
116 GEN_ATOMIC_HELPER(fetch_or
)
117 GEN_ATOMIC_HELPER(fetch_xor
)
118 GEN_ATOMIC_HELPER(add_fetch
)
119 GEN_ATOMIC_HELPER(and_fetch
)
120 GEN_ATOMIC_HELPER(or_fetch
)
121 GEN_ATOMIC_HELPER(xor_fetch
)
123 #undef GEN_ATOMIC_HELPER
126 * These helpers are, as a whole, full barriers. Within the helper,
127 * the leading barrier is explicit and the trailing barrier is within
130 * Trace this load + RMW loop as a single RMW op. This way, regardless
131 * of CF_PARALLEL's value, we'll trace just a read and a write.
133 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
134 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
135 ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
137 XDATA_TYPE *haddr, cmp, old, new, val = xval; \
138 haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
140 cmp = qatomic_read__nocheck(haddr); \
142 old = cmp; new = FN(old, val); \
143 cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
144 } while (cmp != old); \
145 ATOMIC_MMU_CLEANUP; \
146 atomic_trace_rmw_post(env, addr, oi); \
150 GEN_ATOMIC_HELPER_FN(fetch_smin
, MIN
, SDATA_TYPE
, old
)
151 GEN_ATOMIC_HELPER_FN(fetch_umin
, MIN
, DATA_TYPE
, old
)
152 GEN_ATOMIC_HELPER_FN(fetch_smax
, MAX
, SDATA_TYPE
, old
)
153 GEN_ATOMIC_HELPER_FN(fetch_umax
, MAX
, DATA_TYPE
, old
)
155 GEN_ATOMIC_HELPER_FN(smin_fetch
, MIN
, SDATA_TYPE
, new)
156 GEN_ATOMIC_HELPER_FN(umin_fetch
, MIN
, DATA_TYPE
, new)
157 GEN_ATOMIC_HELPER_FN(smax_fetch
, MAX
, SDATA_TYPE
, new)
158 GEN_ATOMIC_HELPER_FN(umax_fetch
, MAX
, DATA_TYPE
, new)
160 #undef GEN_ATOMIC_HELPER_FN
161 #endif /* DATA SIZE < 16 */
167 /* Define reverse-host-endian atomic operations. Note that END is used
168 within the ATOMIC_NAME macro. */
175 ABI_TYPE
ATOMIC_NAME(cmpxchg
)(CPUArchState
*env
, abi_ptr addr
,
176 ABI_TYPE cmpv
, ABI_TYPE newv
,
177 MemOpIdx oi
, uintptr_t retaddr
)
179 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
, retaddr
);
183 ret
= atomic16_cmpxchg(haddr
, BSWAP(cmpv
), BSWAP(newv
));
185 ret
= qatomic_cmpxchg__nocheck(haddr
, BSWAP(cmpv
), BSWAP(newv
));
188 atomic_trace_rmw_post(env
, addr
, oi
);
193 ABI_TYPE
ATOMIC_NAME(xchg
)(CPUArchState
*env
, abi_ptr addr
, ABI_TYPE val
,
194 MemOpIdx oi
, uintptr_t retaddr
)
196 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
, retaddr
);
199 ret
= qatomic_xchg__nocheck(haddr
, BSWAP(val
));
201 atomic_trace_rmw_post(env
, addr
, oi
);
205 #define GEN_ATOMIC_HELPER(X) \
206 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
207 ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
209 DATA_TYPE *haddr, ret; \
210 haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
211 ret = qatomic_##X(haddr, BSWAP(val)); \
212 ATOMIC_MMU_CLEANUP; \
213 atomic_trace_rmw_post(env, addr, oi); \
217 GEN_ATOMIC_HELPER(fetch_and
)
218 GEN_ATOMIC_HELPER(fetch_or
)
219 GEN_ATOMIC_HELPER(fetch_xor
)
220 GEN_ATOMIC_HELPER(and_fetch
)
221 GEN_ATOMIC_HELPER(or_fetch
)
222 GEN_ATOMIC_HELPER(xor_fetch
)
224 #undef GEN_ATOMIC_HELPER
226 /* These helpers are, as a whole, full barriers. Within the helper,
227 * the leading barrier is explicit and the trailing barrier is within
230 * Trace this load + RMW loop as a single RMW op. This way, regardless
231 * of CF_PARALLEL's value, we'll trace just a read and a write.
233 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
234 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
235 ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
237 XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
238 haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
240 ldn = qatomic_read__nocheck(haddr); \
242 ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
243 ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
244 } while (ldo != ldn); \
245 ATOMIC_MMU_CLEANUP; \
246 atomic_trace_rmw_post(env, addr, oi); \
250 GEN_ATOMIC_HELPER_FN(fetch_smin
, MIN
, SDATA_TYPE
, old
)
251 GEN_ATOMIC_HELPER_FN(fetch_umin
, MIN
, DATA_TYPE
, old
)
252 GEN_ATOMIC_HELPER_FN(fetch_smax
, MAX
, SDATA_TYPE
, old
)
253 GEN_ATOMIC_HELPER_FN(fetch_umax
, MAX
, DATA_TYPE
, old
)
255 GEN_ATOMIC_HELPER_FN(smin_fetch
, MIN
, SDATA_TYPE
, new)
256 GEN_ATOMIC_HELPER_FN(umin_fetch
, MIN
, DATA_TYPE
, new)
257 GEN_ATOMIC_HELPER_FN(smax_fetch
, MAX
, SDATA_TYPE
, new)
258 GEN_ATOMIC_HELPER_FN(umax_fetch
, MAX
, DATA_TYPE
, new)
260 /* Note that for addition, we need to use a separate cmpxchg loop instead
261 of bswaps for the reverse-host-endian helpers. */
262 #define ADD(X, Y) (X + Y)
263 GEN_ATOMIC_HELPER_FN(fetch_add
, ADD
, DATA_TYPE
, old
)
264 GEN_ATOMIC_HELPER_FN(add_fetch
, ADD
, DATA_TYPE
, new)
267 #undef GEN_ATOMIC_HELPER_FN
268 #endif /* DATA_SIZE < 16 */
271 #endif /* DATA_SIZE > 1 */