2 * Routines common to user and system emulation of load/store.
4 * Copyright (c) 2022 Linaro, Ltd.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #include "host/load-extract-al16-al8.h"
13 #include "host/store-insert-al16.h"
15 #ifdef CONFIG_ATOMIC64
16 # define HAVE_al8 true
18 # define HAVE_al8 false
20 #define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
25 * Return the lg2 bytes of atomicity required by @memop for @p.
26 * If the operation must be split into two operations to be
27 * examined separately for atomicity, return -lg2.
29 static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
31 MemOp atom = memop & MO_ATOM_MASK;
32 MemOp size = memop & MO_SIZE;
33 MemOp half = size ? size - 1 : 0;
42 case MO_ATOM_IFALIGN_PAIR:
47 tmp = (1 << size) - 1;
48 atmax = p & tmp ? MO_8 : size;
51 case MO_ATOM_WITHIN16:
53 atmax = (tmp + (1 << size) <= 16 ? size : MO_8);
56 case MO_ATOM_WITHIN16_PAIR:
58 if (tmp + (1 << size) <= 16) {
60 } else if (tmp + (1 << half) == 16) {
62 * The pair exactly straddles the boundary.
63 * Both halves are naturally aligned and atomic.
68 * One of the pair crosses the boundary, and is non-atomic.
69 * The other of the pair does not cross, and is atomic.
75 case MO_ATOM_SUBALIGN:
77 * Examine the alignment of p to determine if there are subobjects
78 * that must be aligned. Note that we only really need ctz4() --
79 * any more sigificant bits are discarded by the immediately
80 * following comparison.
83 atmax = MIN(size, tmp);
87 g_assert_not_reached();
91 * Here we have the architectural atomicity of the operation.
92 * However, when executing in a serial context, we need no extra
93 * host atomicity in order to avoid racing. This reduction
94 * avoids looping with cpu_loop_exit_atomic.
96 if (cpu_in_serial_context(env_cpu(env))) {
106 * Atomically load 2 aligned bytes from @pv.
108 static inline uint16_t load_atomic2(void *pv)
110 uint16_t *p = __builtin_assume_aligned(pv, 2);
111 return qatomic_read(p);
118 * Atomically load 4 aligned bytes from @pv.
120 static inline uint32_t load_atomic4(void *pv)
122 uint32_t *p = __builtin_assume_aligned(pv, 4);
123 return qatomic_read(p);
130 * Atomically load 8 aligned bytes from @pv.
132 static inline uint64_t load_atomic8(void *pv)
134 uint64_t *p = __builtin_assume_aligned(pv, 8);
136 qemu_build_assert(HAVE_al8);
137 return qatomic_read__nocheck(p);
141 * load_atomic8_or_exit:
143 * @ra: host unwind address
146 * Atomically load 8 aligned bytes from @pv.
147 * If this is not possible, longjmp out to restart serially.
149 static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
152 return load_atomic8(pv);
155 #ifdef CONFIG_USER_ONLY
157 * If the page is not writable, then assume the value is immutable
158 * and requires no locking. This ignores the case of MAP_SHARED with
159 * another process, because the fallback start_exclusive solution
160 * provides no protection across processes.
162 if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
163 uint64_t *p = __builtin_assume_aligned(pv, 8);
168 /* Ultimate fallback: re-execute in serial context. */
169 cpu_loop_exit_atomic(env_cpu(env), ra);
173 * load_atomic16_or_exit:
175 * @ra: host unwind address
178 * Atomically load 16 aligned bytes from @pv.
179 * If this is not possible, longjmp out to restart serially.
181 static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
183 Int128 *p = __builtin_assume_aligned(pv, 16);
185 if (HAVE_ATOMIC128_RO) {
186 return atomic16_read_ro(p);
189 #ifdef CONFIG_USER_ONLY
191 * We can only use cmpxchg to emulate a load if the page is writable.
192 * If the page is not writable, then assume the value is immutable
193 * and requires no locking. This ignores the case of MAP_SHARED with
194 * another process, because the fallback start_exclusive solution
195 * provides no protection across processes.
197 if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
203 * In system mode all guest pages are writable, and for user-only
204 * we have just checked writability. Try cmpxchg.
206 if (HAVE_ATOMIC128_RW) {
207 return atomic16_read_rw(p);
210 /* Ultimate fallback: re-execute in serial context. */
211 cpu_loop_exit_atomic(env_cpu(env), ra);
215 * load_atom_extract_al4x2:
218 * Load 4 bytes from @p, from two sequential atomic 4-byte loads.
220 static uint32_t load_atom_extract_al4x2(void *pv)
222 uintptr_t pi = (uintptr_t)pv;
223 int sh = (pi & 3) * 8;
226 pv = (void *)(pi & ~3);
227 a = load_atomic4(pv);
228 b = load_atomic4(pv + 4);
230 if (HOST_BIG_ENDIAN) {
231 return (a << sh) | (b >> (-sh & 31));
233 return (a >> sh) | (b << (-sh & 31));
238 * load_atom_extract_al8x2:
241 * Load 8 bytes from @p, from two sequential atomic 8-byte loads.
243 static uint64_t load_atom_extract_al8x2(void *pv)
245 uintptr_t pi = (uintptr_t)pv;
246 int sh = (pi & 7) * 8;
249 pv = (void *)(pi & ~7);
250 a = load_atomic8(pv);
251 b = load_atomic8(pv + 8);
253 if (HOST_BIG_ENDIAN) {
254 return (a << sh) | (b >> (-sh & 63));
256 return (a >> sh) | (b << (-sh & 63));
261 * load_atom_extract_al8_or_exit:
263 * @ra: host unwind address
265 * @s: object size in bytes, @s <= 4.
267 * Atomically load @s bytes from @p, when p % s != 0, and [p, p+s-1] does
268 * not cross an 8-byte boundary. This means that we can perform an atomic
269 * 8-byte load and extract.
270 * The value is returned in the low bits of a uint32_t.
272 static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
275 uintptr_t pi = (uintptr_t)pv;
277 int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
279 pv = (void *)(pi & ~7);
280 return load_atomic8_or_exit(env, ra, pv) >> shr;
284 * load_atom_extract_al16_or_exit:
286 * @ra: host unwind address
288 * @s: object size in bytes, @s <= 8.
290 * Atomically load @s bytes from @p, when p % 16 < 8
291 * and p % 16 + s > 8. I.e. does not cross a 16-byte
292 * boundary, but *does* cross an 8-byte boundary.
293 * This is the slow version, so we must have eliminated
294 * any faster load_atom_extract_al8_or_exit case.
296 * If this is not possible, longjmp out to restart serially.
298 static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
301 uintptr_t pi = (uintptr_t)pv;
303 int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8;
307 * Note constraints above: p & 8 must be clear.
308 * Provoke SIGBUS if possible otherwise.
310 pv = (void *)(pi & ~7);
311 r = load_atomic16_or_exit(env, ra, pv);
313 r = int128_urshift(r, shr);
314 return int128_getlo(r);
321 * Load 4 bytes from @pv, with two 2-byte atomic loads.
323 static inline uint32_t load_atom_4_by_2(void *pv)
325 uint32_t a = load_atomic2(pv);
326 uint32_t b = load_atomic2(pv + 2);
328 if (HOST_BIG_ENDIAN) {
329 return (a << 16) | b;
331 return (b << 16) | a;
339 * Load 8 bytes from @pv, with four 2-byte atomic loads.
341 static inline uint64_t load_atom_8_by_2(void *pv)
343 uint32_t a = load_atom_4_by_2(pv);
344 uint32_t b = load_atom_4_by_2(pv + 4);
346 if (HOST_BIG_ENDIAN) {
347 return ((uint64_t)a << 32) | b;
349 return ((uint64_t)b << 32) | a;
357 * Load 8 bytes from @pv, with two 4-byte atomic loads.
359 static inline uint64_t load_atom_8_by_4(void *pv)
361 uint32_t a = load_atomic4(pv);
362 uint32_t b = load_atomic4(pv + 4);
364 if (HOST_BIG_ENDIAN) {
365 return ((uint64_t)a << 32) | b;
367 return ((uint64_t)b << 32) | a;
372 * load_atom_8_by_8_or_4:
375 * Load 8 bytes from aligned @pv, with at least 4-byte atomicity.
377 static inline uint64_t load_atom_8_by_8_or_4(void *pv)
380 return load_atomic8(pv);
382 return load_atom_8_by_4(pv);
389 * @memop: the full memory op
391 * Load 2 bytes from @p, honoring the atomicity of @memop.
393 static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
394 void *pv, MemOp memop)
396 uintptr_t pi = (uintptr_t)pv;
399 if (likely((pi & 1) == 0)) {
400 return load_atomic2(pv);
402 if (HAVE_ATOMIC128_RO) {
403 return load_atom_extract_al16_or_al8(pv, 2);
406 atmax = required_atomicity(env, pi, memop);
409 return lduw_he_p(pv);
411 /* The only case remaining is MO_ATOM_WITHIN16. */
412 if (!HAVE_al8_fast && (pi & 3) == 1) {
413 /* Big or little endian, we want the middle two bytes. */
414 return load_atomic4(pv - 1) >> 8;
416 if ((pi & 15) != 7) {
417 return load_atom_extract_al8_or_exit(env, ra, pv, 2);
419 return load_atom_extract_al16_or_exit(env, ra, pv, 2);
421 g_assert_not_reached();
428 * @memop: the full memory op
430 * Load 4 bytes from @p, honoring the atomicity of @memop.
432 static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
433 void *pv, MemOp memop)
435 uintptr_t pi = (uintptr_t)pv;
438 if (likely((pi & 3) == 0)) {
439 return load_atomic4(pv);
441 if (HAVE_ATOMIC128_RO) {
442 return load_atom_extract_al16_or_al8(pv, 4);
445 atmax = required_atomicity(env, pi, memop);
451 * For MO_ATOM_IFALIGN, this is more atomicity than required,
452 * but it's trivially supported on all hosts, better than 4
453 * individual byte loads (when the host requires alignment),
454 * and overlaps with the MO_ATOM_SUBALIGN case of p % 2 == 0.
456 return load_atom_extract_al4x2(pv);
459 return load_atom_extract_al8_or_exit(env, ra, pv, 4);
461 return load_atom_extract_al16_or_exit(env, ra, pv, 4);
463 g_assert_not_reached();
470 * @memop: the full memory op
472 * Load 8 bytes from @p, honoring the atomicity of @memop.
474 static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
475 void *pv, MemOp memop)
477 uintptr_t pi = (uintptr_t)pv;
481 * If the host does not support 8-byte atomics, wait until we have
482 * examined the atomicity parameters below.
484 if (HAVE_al8 && likely((pi & 7) == 0)) {
485 return load_atomic8(pv);
487 if (HAVE_ATOMIC128_RO) {
488 return load_atom_extract_al16_or_al8(pv, 8);
491 atmax = required_atomicity(env, pi, memop);
492 if (atmax == MO_64) {
493 if (!HAVE_al8 && (pi & 7) == 0) {
494 load_atomic8_or_exit(env, ra, pv);
496 return load_atom_extract_al16_or_exit(env, ra, pv, 8);
499 return load_atom_extract_al8x2(pv);
505 return load_atom_8_by_2(pv);
507 return load_atom_8_by_4(pv);
510 return load_atom_extract_al8x2(pv);
512 cpu_loop_exit_atomic(env_cpu(env), ra);
514 g_assert_not_reached();
521 * @memop: the full memory op
523 * Load 16 bytes from @p, honoring the atomicity of @memop.
525 static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
526 void *pv, MemOp memop)
528 uintptr_t pi = (uintptr_t)pv;
534 * If the host does not support 16-byte atomics, wait until we have
535 * examined the atomicity parameters below.
537 if (HAVE_ATOMIC128_RO && likely((pi & 15) == 0)) {
538 return atomic16_read_ro(pv);
541 atmax = required_atomicity(env, pi, memop);
547 a = load_atom_8_by_2(pv);
548 b = load_atom_8_by_2(pv + 8);
551 a = load_atom_8_by_4(pv);
552 b = load_atom_8_by_4(pv + 8);
556 cpu_loop_exit_atomic(env_cpu(env), ra);
558 a = load_atomic8(pv);
559 b = load_atomic8(pv + 8);
563 cpu_loop_exit_atomic(env_cpu(env), ra);
565 a = load_atom_extract_al8x2(pv);
566 b = load_atom_extract_al8x2(pv + 8);
569 return load_atomic16_or_exit(env, ra, pv);
571 g_assert_not_reached();
573 return int128_make128(HOST_BIG_ENDIAN ? b : a, HOST_BIG_ENDIAN ? a : b);
579 * @val: value to store
581 * Atomically store 2 aligned bytes to @pv.
583 static inline void store_atomic2(void *pv, uint16_t val)
585 uint16_t *p = __builtin_assume_aligned(pv, 2);
592 * @val: value to store
594 * Atomically store 4 aligned bytes to @pv.
596 static inline void store_atomic4(void *pv, uint32_t val)
598 uint32_t *p = __builtin_assume_aligned(pv, 4);
605 * @val: value to store
607 * Atomically store 8 aligned bytes to @pv.
609 static inline void store_atomic8(void *pv, uint64_t val)
611 uint64_t *p = __builtin_assume_aligned(pv, 8);
613 qemu_build_assert(HAVE_al8);
614 qatomic_set__nocheck(p, val);
620 static inline void store_atom_4_by_2(void *pv, uint32_t val)
622 store_atomic2(pv, val >> (HOST_BIG_ENDIAN ? 16 : 0));
623 store_atomic2(pv + 2, val >> (HOST_BIG_ENDIAN ? 0 : 16));
629 static inline void store_atom_8_by_2(void *pv, uint64_t val)
631 store_atom_4_by_2(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0));
632 store_atom_4_by_2(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32));
638 static inline void store_atom_8_by_4(void *pv, uint64_t val)
640 store_atomic4(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0));
641 store_atomic4(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32));
645 * store_atom_insert_al4:
647 * @val: shifted value to store
648 * @msk: mask for value to store
650 * Atomically store @val to @p, masked by @msk.
652 static void store_atom_insert_al4(uint32_t *p, uint32_t val, uint32_t msk)
656 p = __builtin_assume_aligned(p, 4);
657 old = qatomic_read(p);
659 new = (old & ~msk) | val;
660 } while (!__atomic_compare_exchange_n(p, &old, new, true,
661 __ATOMIC_RELAXED, __ATOMIC_RELAXED));
665 * store_atom_insert_al8:
667 * @val: shifted value to store
668 * @msk: mask for value to store
670 * Atomically store @val to @p masked by @msk.
672 static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
676 qemu_build_assert(HAVE_al8);
677 p = __builtin_assume_aligned(p, 8);
678 old = qatomic_read__nocheck(p);
680 new = (old & ~msk) | val;
681 } while (!__atomic_compare_exchange_n(p, &old, new, true,
682 __ATOMIC_RELAXED, __ATOMIC_RELAXED));
688 * @size: number of bytes to store
689 * @val_le: data to store
691 * Store @size bytes at @p. The bytes to store are extracted in little-endian order
692 * from @val_le; return the bytes of @val_le beyond @size that have not been stored.
694 static uint64_t store_bytes_leN(void *pv, int size, uint64_t val_le)
697 for (int i = 0; i < size; i++, val_le >>= 8) {
706 * @size: number of bytes to store
707 * @val_le: data to store
709 * As store_bytes_leN, but atomically on each aligned part.
712 static uint64_t store_parts_leN(void *pv, int size, uint64_t val_le)
717 /* Find minimum of alignment and size */
718 switch (((uintptr_t)pv | size) & 7) {
720 store_atomic4(pv, le32_to_cpu(val_le));
726 store_atomic2(pv, le16_to_cpu(val_le));
731 *(uint8_t *)pv = val_le;
736 g_assert_not_reached();
748 * @size: number of bytes to store
749 * @val_le: data to store
751 * As store_bytes_leN, but atomically as a whole.
752 * Four aligned bytes are guaranteed to cover the store.
754 static uint64_t store_whole_le4(void *pv, int size, uint64_t val_le)
757 int o = (uintptr_t)pv & 3;
759 uint32_t m = MAKE_64BIT_MASK(0, sz);
762 if (HOST_BIG_ENDIAN) {
763 v = bswap32(val_le) >> sh;
764 m = bswap32(m) >> sh;
769 store_atom_insert_al4(pv - o, v, m);
776 * @size: number of bytes to store
777 * @val_le: data to store
779 * As store_bytes_leN, but atomically as a whole.
780 * Eight aligned bytes are guaranteed to cover the store.
782 static uint64_t store_whole_le8(void *pv, int size, uint64_t val_le)
785 int o = (uintptr_t)pv & 7;
787 uint64_t m = MAKE_64BIT_MASK(0, sz);
790 qemu_build_assert(HAVE_al8);
791 if (HOST_BIG_ENDIAN) {
792 v = bswap64(val_le) >> sh;
793 m = bswap64(m) >> sh;
798 store_atom_insert_al8(pv - o, v, m);
805 * @size: number of bytes to store
806 * @val_le: data to store
808 * As store_bytes_leN, but atomically as a whole.
809 * 16 aligned bytes are guaranteed to cover the store.
811 static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
814 int o = (uintptr_t)pv & 15;
818 qemu_build_assert(HAVE_ATOMIC128_RW);
820 /* Like MAKE_64BIT_MASK(0, sz), but larger. */
822 m = int128_make64(MAKE_64BIT_MASK(0, sz));
824 m = int128_make128(-1, MAKE_64BIT_MASK(0, sz - 64));
827 if (HOST_BIG_ENDIAN) {
828 v = int128_urshift(bswap128(val_le), sh);
829 m = int128_urshift(bswap128(m), sh);
831 v = int128_lshift(val_le, sh);
832 m = int128_lshift(m, sh);
834 store_atom_insert_al16(pv - o, v, m);
836 /* Unused if sz <= 64. */
837 return int128_gethi(val_le) >> (sz - 64);
843 * @val: the value to store
844 * @memop: the full memory op
846 * Store 2 bytes to @p, honoring the atomicity of @memop.
848 static void store_atom_2(CPUArchState *env, uintptr_t ra,
849 void *pv, MemOp memop, uint16_t val)
851 uintptr_t pi = (uintptr_t)pv;
854 if (likely((pi & 1) == 0)) {
855 store_atomic2(pv, val);
859 atmax = required_atomicity(env, pi, memop);
866 * The only case remaining is MO_ATOM_WITHIN16.
867 * Big or little endian, we want the middle two bytes in each test.
870 store_atom_insert_al4(pv - 1, (uint32_t)val << 8, MAKE_64BIT_MASK(8, 16));
872 } else if ((pi & 7) == 3) {
874 store_atom_insert_al8(pv - 3, (uint64_t)val << 24, MAKE_64BIT_MASK(24, 16));
877 } else if ((pi & 15) == 7) {
878 if (HAVE_ATOMIC128_RW) {
879 Int128 v = int128_lshift(int128_make64(val), 56);
880 Int128 m = int128_lshift(int128_make64(0xffff), 56);
881 store_atom_insert_al16(pv - 7, v, m);
885 g_assert_not_reached();
888 cpu_loop_exit_atomic(env_cpu(env), ra);
894 * @val: the value to store
895 * @memop: the full memory op
897 * Store 4 bytes to @p, honoring the atomicity of @memop.
899 static void store_atom_4(CPUArchState *env, uintptr_t ra,
900 void *pv, MemOp memop, uint32_t val)
902 uintptr_t pi = (uintptr_t)pv;
905 if (likely((pi & 3) == 0)) {
906 store_atomic4(pv, val);
910 atmax = required_atomicity(env, pi, memop);
916 store_atom_4_by_2(pv, val);
920 uint32_t val_le = cpu_to_le32(val);
926 val_le = store_whole_le4(pv, s1, val_le);
927 *(uint8_t *)(pv + 3) = val_le;
930 *(uint8_t *)pv = val_le;
931 store_whole_le4(pv + 1, s2, val_le >> 8);
933 case 0: /* aligned */
934 case 2: /* atmax MO_16 */
936 g_assert_not_reached();
943 store_whole_le8(pv, 4, cpu_to_le32(val));
947 if (HAVE_ATOMIC128_RW) {
948 store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val)));
952 cpu_loop_exit_atomic(env_cpu(env), ra);
954 g_assert_not_reached();
961 * @val: the value to store
962 * @memop: the full memory op
964 * Store 8 bytes to @p, honoring the atomicity of @memop.
966 static void store_atom_8(CPUArchState *env, uintptr_t ra,
967 void *pv, MemOp memop, uint64_t val)
969 uintptr_t pi = (uintptr_t)pv;
972 if (HAVE_al8 && likely((pi & 7) == 0)) {
973 store_atomic8(pv, val);
977 atmax = required_atomicity(env, pi, memop);
983 store_atom_8_by_2(pv, val);
986 store_atom_8_by_4(pv, val);
990 uint64_t val_le = cpu_to_le64(val);
996 val_le = store_whole_le8(pv, s1, val_le);
997 store_bytes_leN(pv + s1, s2, val_le);
1000 val_le = store_bytes_leN(pv, s1, val_le);
1001 store_whole_le8(pv + s1, s2, val_le);
1003 case 0: /* aligned */
1004 case 4: /* atmax MO_32 */
1006 g_assert_not_reached();
1012 if (HAVE_ATOMIC128_RW) {
1013 store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val)));
1018 g_assert_not_reached();
1020 cpu_loop_exit_atomic(env_cpu(env), ra);
1026 * @val: the value to store
1027 * @memop: the full memory op
1029 * Store 16 bytes to @p, honoring the atomicity of @memop.
1031 static void store_atom_16(CPUArchState *env, uintptr_t ra,
1032 void *pv, MemOp memop, Int128 val)
1034 uintptr_t pi = (uintptr_t)pv;
1038 if (HAVE_ATOMIC128_RW && likely((pi & 15) == 0)) {
1039 atomic16_set(pv, val);
1043 atmax = required_atomicity(env, pi, memop);
1045 a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
1046 b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
1049 memcpy(pv, &val, 16);
1052 store_atom_8_by_2(pv, a);
1053 store_atom_8_by_2(pv + 8, b);
1056 store_atom_8_by_4(pv, a);
1057 store_atom_8_by_4(pv + 8, b);
1061 store_atomic8(pv, a);
1062 store_atomic8(pv + 8, b);
1067 if (HAVE_ATOMIC128_RW) {
1072 if (HOST_BIG_ENDIAN) {
1073 val = bswap128(val);
1077 val_le = store_whole_le16(pv, s1, val);
1078 store_bytes_leN(pv + s1, s2, val_le);
1081 store_bytes_leN(pv, s1, int128_getlo(val));
1082 val = int128_urshift(val, s1 * 8);
1083 store_whole_le16(pv + s1, s2, val);
1085 case 0: /* aligned */
1086 case 8: /* atmax MO_64 */
1088 g_assert_not_reached();
1094 if (HAVE_ATOMIC128_RW) {
1095 atomic16_set(pv, val);
1100 g_assert_not_reached();
1102 cpu_loop_exit_atomic(env_cpu(env), ra);