2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
24 #include "qemu/bitops.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translate-all.h"
28 #include "exec/helper-proto.h"
29 #include "qemu/atomic128.h"
30 #include "trace/trace-root.h"
31 #include "tcg/tcg-ldst.h"
34 __thread
uintptr_t helper_retaddr
;
36 //#define DEBUG_SIGNAL
39 * Adjust the pc to pass to cpu_restore_state; return the memop type.
41 MMUAccessType
adjust_signal_pc(uintptr_t *pc
, bool is_write
)
43 switch (helper_retaddr
) {
46 * Fault during host memory operation within a helper function.
47 * The helper's host return address, saved here, gives us a
48 * pointer into the generated code that will unwind to the
56 * Fault during host memory operation within generated code.
57 * (Or, a unrelated bug within qemu, but we can't tell from here).
59 * We take the host pc from the signal frame. However, we cannot
60 * use that value directly. Within cpu_restore_state_from_tb, we
61 * assume PC comes from GETPC(), as used by the helper functions,
62 * so we adjust the address by -GETPC_ADJ to form an address that
63 * is within the call insn, so that the address does not accidentally
64 * match the beginning of the next guest insn. However, when the
65 * pc comes from the signal frame it points to the actual faulting
66 * host memory insn and not the return from a call insn.
68 * Therefore, adjust to compensate for what will be done later
69 * by cpu_restore_state_from_tb.
76 * Fault during host read for translation, or loosely, "execution".
78 * The guest pc is already pointing to the start of the TB for which
79 * code is being generated. If the guest translator manages the
80 * page crossings correctly, this is exactly the correct address
81 * (and if the translator doesn't handle page boundaries correctly
82 * there's little we can do about that here). Therefore, do not
83 * trigger the unwinder.
86 return MMU_INST_FETCH
;
89 return is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
93 * handle_sigsegv_accerr_write:
94 * @cpu: the cpu context
95 * @old_set: the sigset_t from the signal ucontext_t
96 * @host_pc: the host pc, adjusted for the signal
97 * @guest_addr: the guest address of the fault
99 * Return true if the write fault has been handled, and should be re-tried.
101 * Note that it is important that we don't call page_unprotect() unless
102 * this is really a "write to nonwritable page" fault, because
103 * page_unprotect() assumes that if it is called for an access to
104 * a page that's writable this means we had two threads racing and
105 * another thread got there first and already made the page writable;
106 * so we will retry the access. If we were to call page_unprotect()
107 * for some other kind of fault that should really be passed to the
108 * guest, we'd end up in an infinite loop of retrying the faulting access.
110 bool handle_sigsegv_accerr_write(CPUState
*cpu
, sigset_t
*old_set
,
111 uintptr_t host_pc
, abi_ptr guest_addr
)
113 switch (page_unprotect(guest_addr
, host_pc
)) {
116 * Fault not caused by a page marked unwritable to protect
117 * cached translations, must be the guest binary's problem.
122 * Fault caused by protection of cached translation; TBs
123 * invalidated, so resume execution.
128 * Fault caused by protection of cached translation, and the
129 * currently executing TB was modified and must be exited immediately.
131 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
132 cpu_loop_exit_noexc(cpu
);
135 g_assert_not_reached();
139 typedef struct PageFlagsNode
{
141 IntervalTreeNode itree
;
145 static IntervalTreeRoot pageflags_root
;
147 static PageFlagsNode
*pageflags_find(target_ulong start
, target_long last
)
151 n
= interval_tree_iter_first(&pageflags_root
, start
, last
);
152 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
155 static PageFlagsNode
*pageflags_next(PageFlagsNode
*p
, target_ulong start
,
160 n
= interval_tree_iter_next(&p
->itree
, start
, last
);
161 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
164 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
170 for (n
= interval_tree_iter_first(&pageflags_root
, 0, -1);
172 n
= interval_tree_iter_next(n
, 0, -1)) {
173 PageFlagsNode
*p
= container_of(n
, PageFlagsNode
, itree
);
175 rc
= fn(priv
, n
->start
, n
->last
+ 1, p
->flags
);
185 static int dump_region(void *priv
, target_ulong start
,
186 target_ulong end
, unsigned long prot
)
188 FILE *f
= (FILE *)priv
;
190 fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
" "TARGET_FMT_lx
" %c%c%c\n",
191 start
, end
, end
- start
,
192 ((prot
& PAGE_READ
) ? 'r' : '-'),
193 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
194 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
198 /* dump memory mappings */
199 void page_dump(FILE *f
)
201 const int length
= sizeof(target_ulong
) * 2;
203 fprintf(f
, "%-*s %-*s %-*s %s\n",
204 length
, "start", length
, "end", length
, "size", "prot");
205 walk_memory_regions(f
, dump_region
);
208 int page_get_flags(target_ulong address
)
210 PageFlagsNode
*p
= pageflags_find(address
, address
);
213 * See util/interval-tree.c re lockless lookups: no false positives but
214 * there are false negatives. If we find nothing, retry with the mmap
220 if (have_mmap_lock()) {
225 p
= pageflags_find(address
, address
);
227 return p
? p
->flags
: 0;
230 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
231 static void pageflags_create(target_ulong start
, target_ulong last
, int flags
)
233 PageFlagsNode
*p
= g_new(PageFlagsNode
, 1);
235 p
->itree
.start
= start
;
236 p
->itree
.last
= last
;
238 interval_tree_insert(&p
->itree
, &pageflags_root
);
241 /* A subroutine of page_set_flags: remove everything in [start,last]. */
242 static bool pageflags_unset(target_ulong start
, target_ulong last
)
244 bool inval_tb
= false;
247 PageFlagsNode
*p
= pageflags_find(start
, last
);
254 if (p
->flags
& PAGE_EXEC
) {
258 interval_tree_remove(&p
->itree
, &pageflags_root
);
259 p_last
= p
->itree
.last
;
261 if (p
->itree
.start
< start
) {
262 /* Truncate the node from the end, or split out the middle. */
263 p
->itree
.last
= start
- 1;
264 interval_tree_insert(&p
->itree
, &pageflags_root
);
266 pageflags_create(last
+ 1, p_last
, p
->flags
);
269 } else if (p_last
<= last
) {
270 /* Range completely covers node -- remove it. */
273 /* Truncate the node from the start. */
274 p
->itree
.start
= last
+ 1;
275 interval_tree_insert(&p
->itree
, &pageflags_root
);
284 * A subroutine of page_set_flags: nothing overlaps [start,last],
285 * but check adjacent mappings and maybe merge into a single range.
287 static void pageflags_create_merge(target_ulong start
, target_ulong last
,
290 PageFlagsNode
*next
= NULL
, *prev
= NULL
;
293 prev
= pageflags_find(start
- 1, start
- 1);
295 if (prev
->flags
== flags
) {
296 interval_tree_remove(&prev
->itree
, &pageflags_root
);
303 next
= pageflags_find(last
+ 1, last
+ 1);
305 if (next
->flags
== flags
) {
306 interval_tree_remove(&next
->itree
, &pageflags_root
);
315 prev
->itree
.last
= next
->itree
.last
;
316 g_free_rcu(next
, rcu
);
318 prev
->itree
.last
= last
;
320 interval_tree_insert(&prev
->itree
, &pageflags_root
);
322 next
->itree
.start
= start
;
323 interval_tree_insert(&next
->itree
, &pageflags_root
);
325 pageflags_create(start
, last
, flags
);
330 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
331 * By default, they are not kept.
333 #ifndef PAGE_TARGET_STICKY
334 #define PAGE_TARGET_STICKY 0
336 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
338 /* A subroutine of page_set_flags: add flags to [start,last]. */
339 static bool pageflags_set_clear(target_ulong start
, target_ulong last
,
340 int set_flags
, int clear_flags
)
343 target_ulong p_start
, p_last
;
344 int p_flags
, merge_flags
;
345 bool inval_tb
= false;
348 p
= pageflags_find(start
, last
);
351 pageflags_create_merge(start
, last
, set_flags
);
356 p_start
= p
->itree
.start
;
357 p_last
= p
->itree
.last
;
359 /* Using mprotect on a page does not change sticky bits. */
360 merge_flags
= (p_flags
& ~clear_flags
) | set_flags
;
363 * Need to flush if an overlapping executable region
364 * removes exec, or adds write.
366 if ((p_flags
& PAGE_EXEC
)
367 && (!(merge_flags
& PAGE_EXEC
)
368 || (merge_flags
& ~p_flags
& PAGE_WRITE
))) {
373 * If there is an exact range match, update and return without
374 * attempting to merge with adjacent regions.
376 if (start
== p_start
&& last
== p_last
) {
378 p
->flags
= merge_flags
;
380 interval_tree_remove(&p
->itree
, &pageflags_root
);
387 * If sticky bits affect the original mapping, then we must be more
388 * careful about the existing intervals and the separate flags.
390 if (set_flags
!= merge_flags
) {
391 if (p_start
< start
) {
392 interval_tree_remove(&p
->itree
, &pageflags_root
);
393 p
->itree
.last
= start
- 1;
394 interval_tree_insert(&p
->itree
, &pageflags_root
);
398 pageflags_create(start
, last
, merge_flags
);
400 pageflags_create(last
+ 1, p_last
, p_flags
);
403 pageflags_create(start
, p_last
, merge_flags
);
411 if (start
< p_start
&& set_flags
) {
412 pageflags_create(start
, p_start
- 1, set_flags
);
415 interval_tree_remove(&p
->itree
, &pageflags_root
);
416 p
->itree
.start
= last
+ 1;
417 interval_tree_insert(&p
->itree
, &pageflags_root
);
419 pageflags_create(start
, last
, merge_flags
);
423 p
->flags
= merge_flags
;
425 interval_tree_remove(&p
->itree
, &pageflags_root
);
437 /* If flags are not changing for this range, incorporate it. */
438 if (set_flags
== p_flags
) {
439 if (start
< p_start
) {
440 interval_tree_remove(&p
->itree
, &pageflags_root
);
441 p
->itree
.start
= start
;
442 interval_tree_insert(&p
->itree
, &pageflags_root
);
451 /* Maybe split out head and/or tail ranges with the original flags. */
452 interval_tree_remove(&p
->itree
, &pageflags_root
);
453 if (p_start
< start
) {
454 p
->itree
.last
= start
- 1;
455 interval_tree_insert(&p
->itree
, &pageflags_root
);
461 pageflags_create(last
+ 1, p_last
, p_flags
);
463 } else if (last
< p_last
) {
464 p
->itree
.start
= last
+ 1;
465 interval_tree_insert(&p
->itree
, &pageflags_root
);
471 pageflags_create(start
, last
, set_flags
);
479 * Modify the flags of a page and invalidate the code if necessary.
480 * The flag PAGE_WRITE_ORG is positioned automatically depending
481 * on PAGE_WRITE. The mmap_lock should already be held.
483 void page_set_flags(target_ulong start
, target_ulong last
, int flags
)
486 bool inval_tb
= false;
488 /* This function should never be called with addresses outside the
489 guest address space. If this assert fires, it probably indicates
490 a missing call to h2g_valid. */
491 assert(start
<= last
);
492 assert(last
<= GUEST_ADDR_MAX
);
493 /* Only set PAGE_ANON with new mappings. */
494 assert(!(flags
& PAGE_ANON
) || (flags
& PAGE_RESET
));
495 assert_memory_lock();
497 start
&= TARGET_PAGE_MASK
;
498 last
|= ~TARGET_PAGE_MASK
;
500 if (!(flags
& PAGE_VALID
)) {
503 reset
= flags
& PAGE_RESET
;
504 flags
&= ~PAGE_RESET
;
505 if (flags
& PAGE_WRITE
) {
506 flags
|= PAGE_WRITE_ORG
;
510 if (!flags
|| reset
) {
511 page_reset_target_data(start
, last
);
512 inval_tb
|= pageflags_unset(start
, last
);
515 inval_tb
|= pageflags_set_clear(start
, last
, flags
,
516 ~(reset
? 0 : PAGE_STICKY
));
519 tb_invalidate_phys_range(start
, last
);
523 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
526 int locked
; /* tri-state: =0: unlocked, +1: global, -1: local */
530 return 0; /* trivial length */
533 last
= start
+ len
- 1;
535 return -1; /* wrap around */
538 locked
= have_mmap_lock();
540 PageFlagsNode
*p
= pageflags_find(start
, last
);
546 * Lockless lookups have false negatives.
547 * Retry with the lock held.
551 p
= pageflags_find(start
, last
);
554 ret
= -1; /* entire region invalid */
558 if (start
< p
->itree
.start
) {
559 ret
= -1; /* initial bytes invalid */
563 missing
= flags
& ~p
->flags
;
564 if (missing
& PAGE_READ
) {
565 ret
= -1; /* page not readable */
568 if (missing
& PAGE_WRITE
) {
569 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
570 ret
= -1; /* page not writable */
573 /* Asking about writable, but has been protected: undo. */
574 if (!page_unprotect(start
, 0)) {
578 /* TODO: page_unprotect should take a range, not a single page. */
579 if (last
- start
< TARGET_PAGE_SIZE
) {
583 start
+= TARGET_PAGE_SIZE
;
587 if (last
<= p
->itree
.last
) {
591 start
= p
->itree
.last
+ 1;
594 /* Release the lock if acquired locally. */
601 void page_protect(tb_page_addr_t address
)
604 target_ulong start
, last
;
607 assert_memory_lock();
609 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
610 start
= address
& TARGET_PAGE_MASK
;
611 last
= start
+ TARGET_PAGE_SIZE
- 1;
613 start
= address
& qemu_host_page_mask
;
614 last
= start
+ qemu_host_page_size
- 1;
617 p
= pageflags_find(start
, last
);
623 if (unlikely(p
->itree
.last
< last
)) {
624 /* More than one protection region covers the one host page. */
625 assert(TARGET_PAGE_SIZE
< qemu_host_page_size
);
626 while ((p
= pageflags_next(p
, start
, last
)) != NULL
) {
631 if (prot
& PAGE_WRITE
) {
632 pageflags_set_clear(start
, last
, 0, PAGE_WRITE
);
633 mprotect(g2h_untagged(start
), qemu_host_page_size
,
634 prot
& (PAGE_READ
| PAGE_EXEC
) ? PROT_READ
: PROT_NONE
);
639 * Called from signal handler: invalidate the code and unprotect the
640 * page. Return 0 if the fault was not handled, 1 if it was handled,
641 * and 2 if it was handled but the caller must cause the TB to be
642 * immediately exited. (We can only return 2 if the 'pc' argument is
645 int page_unprotect(target_ulong address
, uintptr_t pc
)
648 bool current_tb_invalidated
;
651 * Technically this isn't safe inside a signal handler. However we
652 * know this only ever happens in a synchronous SEGV handler, so in
653 * practice it seems to be ok.
657 p
= pageflags_find(address
, address
);
659 /* If this address was not really writable, nothing to do. */
660 if (!p
|| !(p
->flags
& PAGE_WRITE_ORG
)) {
665 current_tb_invalidated
= false;
666 if (p
->flags
& PAGE_WRITE
) {
668 * If the page is actually marked WRITE then assume this is because
669 * this thread raced with another one which got here first and
670 * set the page to PAGE_WRITE and did the TB invalidate for us.
672 #ifdef TARGET_HAS_PRECISE_SMC
673 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
675 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
679 target_ulong start
, len
, i
;
682 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
683 start
= address
& TARGET_PAGE_MASK
;
684 len
= TARGET_PAGE_SIZE
;
685 prot
= p
->flags
| PAGE_WRITE
;
686 pageflags_set_clear(start
, start
+ len
- 1, PAGE_WRITE
, 0);
687 current_tb_invalidated
= tb_invalidate_phys_page_unwind(start
, pc
);
689 start
= address
& qemu_host_page_mask
;
690 len
= qemu_host_page_size
;
693 for (i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
694 target_ulong addr
= start
+ i
;
696 p
= pageflags_find(addr
, addr
);
699 if (p
->flags
& PAGE_WRITE_ORG
) {
701 pageflags_set_clear(addr
, addr
+ TARGET_PAGE_SIZE
- 1,
706 * Since the content will be modified, we must invalidate
707 * the corresponding translated code.
709 current_tb_invalidated
|=
710 tb_invalidate_phys_page_unwind(addr
, pc
);
713 if (prot
& PAGE_EXEC
) {
714 prot
= (prot
& ~PAGE_EXEC
) | PAGE_READ
;
716 mprotect((void *)g2h_untagged(start
), len
, prot
& PAGE_BITS
);
720 /* If current TB was invalidated return to main loop */
721 return current_tb_invalidated
? 2 : 1;
724 static int probe_access_internal(CPUArchState
*env
, target_ulong addr
,
725 int fault_size
, MMUAccessType access_type
,
726 bool nonfault
, uintptr_t ra
)
731 switch (access_type
) {
733 acc_flag
= PAGE_WRITE_ORG
;
736 acc_flag
= PAGE_READ
;
739 acc_flag
= PAGE_EXEC
;
742 g_assert_not_reached();
745 if (guest_addr_valid_untagged(addr
)) {
746 int page_flags
= page_get_flags(addr
);
747 if (page_flags
& acc_flag
) {
748 return 0; /* success */
750 maperr
= !(page_flags
& PAGE_VALID
);
756 return TLB_INVALID_MASK
;
759 cpu_loop_exit_sigsegv(env_cpu(env
), addr
, access_type
, maperr
, ra
);
762 int probe_access_flags(CPUArchState
*env
, target_ulong addr
, int size
,
763 MMUAccessType access_type
, int mmu_idx
,
764 bool nonfault
, void **phost
, uintptr_t ra
)
768 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
769 flags
= probe_access_internal(env
, addr
, size
, access_type
, nonfault
, ra
);
770 *phost
= flags
? NULL
: g2h(env_cpu(env
), addr
);
774 void *probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
775 MMUAccessType access_type
, int mmu_idx
, uintptr_t ra
)
779 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
780 flags
= probe_access_internal(env
, addr
, size
, access_type
, false, ra
);
781 g_assert(flags
== 0);
783 return size
? g2h(env_cpu(env
), addr
) : NULL
;
786 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, target_ulong addr
,
791 flags
= probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
, false, 0);
792 g_assert(flags
== 0);
795 *hostp
= g2h_untagged(addr
);
800 #ifdef TARGET_PAGE_DATA_SIZE
802 * Allocate chunks of target data together. For the only current user,
803 * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
804 * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
807 #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
809 typedef struct TargetPageDataNode
{
811 IntervalTreeNode itree
;
812 char data
[TPD_PAGES
][TARGET_PAGE_DATA_SIZE
] __attribute__((aligned
));
813 } TargetPageDataNode
;
815 static IntervalTreeRoot targetdata_root
;
817 void page_reset_target_data(target_ulong start
, target_ulong last
)
819 IntervalTreeNode
*n
, *next
;
821 assert_memory_lock();
823 start
&= TARGET_PAGE_MASK
;
824 last
|= ~TARGET_PAGE_MASK
;
826 for (n
= interval_tree_iter_first(&targetdata_root
, start
, last
),
827 next
= n
? interval_tree_iter_next(n
, start
, last
) : NULL
;
830 next
= next
? interval_tree_iter_next(n
, start
, last
) : NULL
) {
831 target_ulong n_start
, n_last
, p_ofs
, p_len
;
832 TargetPageDataNode
*t
= container_of(n
, TargetPageDataNode
, itree
);
834 if (n
->start
>= start
&& n
->last
<= last
) {
835 interval_tree_remove(n
, &targetdata_root
);
840 if (n
->start
< start
) {
842 p_ofs
= (start
- n
->start
) >> TARGET_PAGE_BITS
;
847 n_last
= MIN(last
, n
->last
);
848 p_len
= (n_last
+ 1 - n_start
) >> TARGET_PAGE_BITS
;
850 memset(t
->data
[p_ofs
], 0, p_len
* TARGET_PAGE_DATA_SIZE
);
854 void *page_get_target_data(target_ulong address
)
857 TargetPageDataNode
*t
;
858 target_ulong page
, region
;
860 page
= address
& TARGET_PAGE_MASK
;
861 region
= address
& TBD_MASK
;
863 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
866 * See util/interval-tree.c re lockless lookups: no false positives
867 * but there are false negatives. If we find nothing, retry with
868 * the mmap lock acquired. We also need the lock for the
869 * allocation + insert.
872 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
874 t
= g_new0(TargetPageDataNode
, 1);
877 n
->last
= region
| ~TBD_MASK
;
878 interval_tree_insert(n
, &targetdata_root
);
883 t
= container_of(n
, TargetPageDataNode
, itree
);
884 return t
->data
[(page
- region
) >> TARGET_PAGE_BITS
];
887 void page_reset_target_data(target_ulong start
, target_ulong last
) { }
888 #endif /* TARGET_PAGE_DATA_SIZE */
890 /* The softmmu versions of these helpers are in cputlb.c. */
892 static void *cpu_mmu_lookup(CPUArchState
*env
, abi_ptr addr
,
893 MemOp mop
, uintptr_t ra
, MMUAccessType type
)
895 int a_bits
= get_alignment_bits(mop
);
898 /* Enforce guest required alignment. */
899 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
900 cpu_loop_exit_sigbus(env_cpu(env
), addr
, type
, ra
);
903 ret
= g2h(env_cpu(env
), addr
);
904 set_helper_retaddr(ra
);
908 #include "ldst_atomicity.c.inc"
910 static uint8_t do_ld1_mmu(CPUArchState
*env
, abi_ptr addr
,
911 MemOp mop
, uintptr_t ra
)
916 tcg_debug_assert((mop
& MO_SIZE
) == MO_8
);
917 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
919 clear_helper_retaddr();
923 tcg_target_ulong
helper_ldub_mmu(CPUArchState
*env
, uint64_t addr
,
924 MemOpIdx oi
, uintptr_t ra
)
926 return do_ld1_mmu(env
, addr
, get_memop(oi
), ra
);
929 tcg_target_ulong
helper_ldsb_mmu(CPUArchState
*env
, uint64_t addr
,
930 MemOpIdx oi
, uintptr_t ra
)
932 return (int8_t)do_ld1_mmu(env
, addr
, get_memop(oi
), ra
);
935 uint8_t cpu_ldb_mmu(CPUArchState
*env
, abi_ptr addr
,
936 MemOpIdx oi
, uintptr_t ra
)
938 uint8_t ret
= do_ld1_mmu(env
, addr
, get_memop(oi
), ra
);
939 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
943 static uint16_t do_ld2_he_mmu(CPUArchState
*env
, abi_ptr addr
,
944 MemOp mop
, uintptr_t ra
)
949 tcg_debug_assert((mop
& MO_SIZE
) == MO_16
);
950 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
951 ret
= load_atom_2(env
, ra
, haddr
, mop
);
952 clear_helper_retaddr();
956 tcg_target_ulong
helper_lduw_mmu(CPUArchState
*env
, uint64_t addr
,
957 MemOpIdx oi
, uintptr_t ra
)
959 MemOp mop
= get_memop(oi
);
960 uint16_t ret
= do_ld2_he_mmu(env
, addr
, mop
, ra
);
962 if (mop
& MO_BSWAP
) {
968 tcg_target_ulong
helper_ldsw_mmu(CPUArchState
*env
, uint64_t addr
,
969 MemOpIdx oi
, uintptr_t ra
)
971 MemOp mop
= get_memop(oi
);
972 int16_t ret
= do_ld2_he_mmu(env
, addr
, mop
, ra
);
974 if (mop
& MO_BSWAP
) {
980 uint16_t cpu_ldw_be_mmu(CPUArchState
*env
, abi_ptr addr
,
981 MemOpIdx oi
, uintptr_t ra
)
983 MemOp mop
= get_memop(oi
);
986 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
987 ret
= do_ld2_he_mmu(env
, addr
, mop
, ra
);
988 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
989 return cpu_to_be16(ret
);
992 uint16_t cpu_ldw_le_mmu(CPUArchState
*env
, abi_ptr addr
,
993 MemOpIdx oi
, uintptr_t ra
)
995 MemOp mop
= get_memop(oi
);
998 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
999 ret
= do_ld2_he_mmu(env
, addr
, mop
, ra
);
1000 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1001 return cpu_to_le16(ret
);
1004 static uint32_t do_ld4_he_mmu(CPUArchState
*env
, abi_ptr addr
,
1005 MemOp mop
, uintptr_t ra
)
1010 tcg_debug_assert((mop
& MO_SIZE
) == MO_32
);
1011 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1012 ret
= load_atom_4(env
, ra
, haddr
, mop
);
1013 clear_helper_retaddr();
1017 tcg_target_ulong
helper_ldul_mmu(CPUArchState
*env
, uint64_t addr
,
1018 MemOpIdx oi
, uintptr_t ra
)
1020 MemOp mop
= get_memop(oi
);
1021 uint32_t ret
= do_ld4_he_mmu(env
, addr
, mop
, ra
);
1023 if (mop
& MO_BSWAP
) {
1029 tcg_target_ulong
helper_ldsl_mmu(CPUArchState
*env
, uint64_t addr
,
1030 MemOpIdx oi
, uintptr_t ra
)
1032 MemOp mop
= get_memop(oi
);
1033 int32_t ret
= do_ld4_he_mmu(env
, addr
, mop
, ra
);
1035 if (mop
& MO_BSWAP
) {
1041 uint32_t cpu_ldl_be_mmu(CPUArchState
*env
, abi_ptr addr
,
1042 MemOpIdx oi
, uintptr_t ra
)
1044 MemOp mop
= get_memop(oi
);
1047 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
1048 ret
= do_ld4_he_mmu(env
, addr
, mop
, ra
);
1049 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1050 return cpu_to_be32(ret
);
1053 uint32_t cpu_ldl_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1054 MemOpIdx oi
, uintptr_t ra
)
1056 MemOp mop
= get_memop(oi
);
1059 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
1060 ret
= do_ld4_he_mmu(env
, addr
, mop
, ra
);
1061 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1062 return cpu_to_le32(ret
);
1065 static uint64_t do_ld8_he_mmu(CPUArchState
*env
, abi_ptr addr
,
1066 MemOp mop
, uintptr_t ra
)
1071 tcg_debug_assert((mop
& MO_SIZE
) == MO_64
);
1072 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1073 ret
= load_atom_8(env
, ra
, haddr
, mop
);
1074 clear_helper_retaddr();
1078 uint64_t helper_ldq_mmu(CPUArchState
*env
, uint64_t addr
,
1079 MemOpIdx oi
, uintptr_t ra
)
1081 MemOp mop
= get_memop(oi
);
1082 uint64_t ret
= do_ld8_he_mmu(env
, addr
, mop
, ra
);
1084 if (mop
& MO_BSWAP
) {
1090 uint64_t cpu_ldq_be_mmu(CPUArchState
*env
, abi_ptr addr
,
1091 MemOpIdx oi
, uintptr_t ra
)
1093 MemOp mop
= get_memop(oi
);
1096 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
1097 ret
= do_ld8_he_mmu(env
, addr
, mop
, ra
);
1098 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1099 return cpu_to_be64(ret
);
1102 uint64_t cpu_ldq_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1103 MemOpIdx oi
, uintptr_t ra
)
1105 MemOp mop
= get_memop(oi
);
1108 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
1109 ret
= do_ld8_he_mmu(env
, addr
, mop
, ra
);
1110 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1111 return cpu_to_le64(ret
);
1114 static Int128
do_ld16_he_mmu(CPUArchState
*env
, abi_ptr addr
,
1115 MemOp mop
, uintptr_t ra
)
1120 tcg_debug_assert((mop
& MO_SIZE
) == MO_128
);
1121 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1122 ret
= load_atom_16(env
, ra
, haddr
, mop
);
1123 clear_helper_retaddr();
1127 Int128
helper_ld16_mmu(CPUArchState
*env
, uint64_t addr
,
1128 MemOpIdx oi
, uintptr_t ra
)
1130 MemOp mop
= get_memop(oi
);
1131 Int128 ret
= do_ld16_he_mmu(env
, addr
, mop
, ra
);
1133 if (mop
& MO_BSWAP
) {
1134 ret
= bswap128(ret
);
1139 Int128
helper_ld_i128(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
)
1141 return helper_ld16_mmu(env
, addr
, oi
, GETPC());
1144 Int128
cpu_ld16_be_mmu(CPUArchState
*env
, abi_ptr addr
,
1145 MemOpIdx oi
, uintptr_t ra
)
1147 MemOp mop
= get_memop(oi
);
1150 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
1151 ret
= do_ld16_he_mmu(env
, addr
, mop
, ra
);
1152 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1153 if (!HOST_BIG_ENDIAN
) {
1154 ret
= bswap128(ret
);
1159 Int128
cpu_ld16_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1160 MemOpIdx oi
, uintptr_t ra
)
1162 MemOp mop
= get_memop(oi
);
1165 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
1166 ret
= do_ld16_he_mmu(env
, addr
, mop
, ra
);
1167 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1168 if (HOST_BIG_ENDIAN
) {
1169 ret
= bswap128(ret
);
1174 static void do_st1_mmu(CPUArchState
*env
, abi_ptr addr
, uint8_t val
,
1175 MemOp mop
, uintptr_t ra
)
1179 tcg_debug_assert((mop
& MO_SIZE
) == MO_8
);
1180 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1182 clear_helper_retaddr();
1185 void helper_stb_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
1186 MemOpIdx oi
, uintptr_t ra
)
1188 do_st1_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1191 void cpu_stb_mmu(CPUArchState
*env
, abi_ptr addr
, uint8_t val
,
1192 MemOpIdx oi
, uintptr_t ra
)
1194 do_st1_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1195 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1198 static void do_st2_he_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1199 MemOp mop
, uintptr_t ra
)
1203 tcg_debug_assert((mop
& MO_SIZE
) == MO_16
);
1204 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1205 store_atom_2(env
, ra
, haddr
, mop
, val
);
1206 clear_helper_retaddr();
1209 void helper_stw_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
1210 MemOpIdx oi
, uintptr_t ra
)
1212 MemOp mop
= get_memop(oi
);
1214 if (mop
& MO_BSWAP
) {
1217 do_st2_he_mmu(env
, addr
, val
, mop
, ra
);
1220 void cpu_stw_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1221 MemOpIdx oi
, uintptr_t ra
)
1223 MemOp mop
= get_memop(oi
);
1225 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
1226 do_st2_he_mmu(env
, addr
, be16_to_cpu(val
), mop
, ra
);
1227 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1230 void cpu_stw_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1231 MemOpIdx oi
, uintptr_t ra
)
1233 MemOp mop
= get_memop(oi
);
1235 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
1236 do_st2_he_mmu(env
, addr
, le16_to_cpu(val
), mop
, ra
);
1237 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1240 static void do_st4_he_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1241 MemOp mop
, uintptr_t ra
)
1245 tcg_debug_assert((mop
& MO_SIZE
) == MO_32
);
1246 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1247 store_atom_4(env
, ra
, haddr
, mop
, val
);
1248 clear_helper_retaddr();
1251 void helper_stl_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
1252 MemOpIdx oi
, uintptr_t ra
)
1254 MemOp mop
= get_memop(oi
);
1256 if (mop
& MO_BSWAP
) {
1259 do_st4_he_mmu(env
, addr
, val
, mop
, ra
);
1262 void cpu_stl_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1263 MemOpIdx oi
, uintptr_t ra
)
1265 MemOp mop
= get_memop(oi
);
1267 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
1268 do_st4_he_mmu(env
, addr
, be32_to_cpu(val
), mop
, ra
);
1269 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1272 void cpu_stl_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1273 MemOpIdx oi
, uintptr_t ra
)
1275 MemOp mop
= get_memop(oi
);
1277 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
1278 do_st4_he_mmu(env
, addr
, le32_to_cpu(val
), mop
, ra
);
1279 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1282 static void do_st8_he_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1283 MemOp mop
, uintptr_t ra
)
1287 tcg_debug_assert((mop
& MO_SIZE
) == MO_64
);
1288 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1289 store_atom_8(env
, ra
, haddr
, mop
, val
);
1290 clear_helper_retaddr();
1293 void helper_stq_mmu(CPUArchState
*env
, uint64_t addr
, uint64_t val
,
1294 MemOpIdx oi
, uintptr_t ra
)
1296 MemOp mop
= get_memop(oi
);
1298 if (mop
& MO_BSWAP
) {
1301 do_st8_he_mmu(env
, addr
, val
, mop
, ra
);
1304 void cpu_stq_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1305 MemOpIdx oi
, uintptr_t ra
)
1307 MemOp mop
= get_memop(oi
);
1309 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
1310 do_st8_he_mmu(env
, addr
, cpu_to_be64(val
), mop
, ra
);
1311 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1314 void cpu_stq_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1315 MemOpIdx oi
, uintptr_t ra
)
1317 MemOp mop
= get_memop(oi
);
1319 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
1320 do_st8_he_mmu(env
, addr
, cpu_to_le64(val
), mop
, ra
);
1321 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1324 static void do_st16_he_mmu(CPUArchState
*env
, abi_ptr addr
, Int128 val
,
1325 MemOp mop
, uintptr_t ra
)
1329 tcg_debug_assert((mop
& MO_SIZE
) == MO_128
);
1330 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1331 store_atom_16(env
, ra
, haddr
, mop
, val
);
1332 clear_helper_retaddr();
1335 void helper_st16_mmu(CPUArchState
*env
, uint64_t addr
, Int128 val
,
1336 MemOpIdx oi
, uintptr_t ra
)
1338 MemOp mop
= get_memop(oi
);
1340 if (mop
& MO_BSWAP
) {
1341 val
= bswap128(val
);
1343 do_st16_he_mmu(env
, addr
, val
, mop
, ra
);
1346 void helper_st_i128(CPUArchState
*env
, target_ulong addr
,
1347 Int128 val
, MemOpIdx oi
)
1349 helper_st16_mmu(env
, addr
, val
, oi
, GETPC());
1352 void cpu_st16_be_mmu(CPUArchState
*env
, abi_ptr addr
,
1353 Int128 val
, MemOpIdx oi
, uintptr_t ra
)
1355 MemOp mop
= get_memop(oi
);
1357 tcg_debug_assert((mop
& MO_BSWAP
) == MO_BE
);
1358 if (!HOST_BIG_ENDIAN
) {
1359 val
= bswap128(val
);
1361 do_st16_he_mmu(env
, addr
, val
, mop
, ra
);
1362 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1365 void cpu_st16_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1366 Int128 val
, MemOpIdx oi
, uintptr_t ra
)
1368 MemOp mop
= get_memop(oi
);
1370 tcg_debug_assert((mop
& MO_BSWAP
) == MO_LE
);
1371 if (HOST_BIG_ENDIAN
) {
1372 val
= bswap128(val
);
1374 do_st16_he_mmu(env
, addr
, val
, mop
, ra
);
1375 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1378 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr ptr
)
1382 set_helper_retaddr(1);
1383 ret
= ldub_p(g2h_untagged(ptr
));
1384 clear_helper_retaddr();
1388 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr ptr
)
1392 set_helper_retaddr(1);
1393 ret
= lduw_p(g2h_untagged(ptr
));
1394 clear_helper_retaddr();
1398 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr ptr
)
1402 set_helper_retaddr(1);
1403 ret
= ldl_p(g2h_untagged(ptr
));
1404 clear_helper_retaddr();
1408 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr ptr
)
1412 set_helper_retaddr(1);
1413 ret
= ldq_p(g2h_untagged(ptr
));
1414 clear_helper_retaddr();
1418 uint8_t cpu_ldb_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1419 MemOpIdx oi
, uintptr_t ra
)
1424 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_INST_FETCH
);
1425 ret
= ldub_p(haddr
);
1426 clear_helper_retaddr();
1430 uint16_t cpu_ldw_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1431 MemOpIdx oi
, uintptr_t ra
)
1436 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_INST_FETCH
);
1437 ret
= lduw_p(haddr
);
1438 clear_helper_retaddr();
1439 if (get_memop(oi
) & MO_BSWAP
) {
1445 uint32_t cpu_ldl_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1446 MemOpIdx oi
, uintptr_t ra
)
1451 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_INST_FETCH
);
1453 clear_helper_retaddr();
1454 if (get_memop(oi
) & MO_BSWAP
) {
1460 uint64_t cpu_ldq_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1461 MemOpIdx oi
, uintptr_t ra
)
1466 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
1468 clear_helper_retaddr();
1469 if (get_memop(oi
) & MO_BSWAP
) {
1475 #include "ldst_common.c.inc"
1478 * Do not allow unaligned operations to proceed. Return the host address.
1480 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1482 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1483 MemOpIdx oi
, int size
, int prot
,
1486 MemOp mop
= get_memop(oi
);
1487 int a_bits
= get_alignment_bits(mop
);
1490 /* Enforce guest required alignment. */
1491 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
1492 MMUAccessType t
= prot
== PAGE_READ
? MMU_DATA_LOAD
: MMU_DATA_STORE
;
1493 cpu_loop_exit_sigbus(env_cpu(env
), addr
, t
, retaddr
);
1496 /* Enforce qemu required alignment. */
1497 if (unlikely(addr
& (size
- 1))) {
1498 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1501 ret
= g2h(env_cpu(env
), addr
);
1502 set_helper_retaddr(retaddr
);
1506 #include "atomic_common.c.inc"
1509 * First set of functions passes in OI and RETADDR.
1510 * This makes them callable from other helpers.
1513 #define ATOMIC_NAME(X) \
1514 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1515 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1518 #include "atomic_template.h"
1521 #include "atomic_template.h"
1524 #include "atomic_template.h"
1526 #ifdef CONFIG_ATOMIC64
1528 #include "atomic_template.h"
1531 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1532 #define DATA_SIZE 16
1533 #include "atomic_template.h"