2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "tcg/tcg-ldst.h"
33 __thread
uintptr_t helper_retaddr
;
35 //#define DEBUG_SIGNAL
38 * Adjust the pc to pass to cpu_restore_state; return the memop type.
40 MMUAccessType
adjust_signal_pc(uintptr_t *pc
, bool is_write
)
42 switch (helper_retaddr
) {
45 * Fault during host memory operation within a helper function.
46 * The helper's host return address, saved here, gives us a
47 * pointer into the generated code that will unwind to the
55 * Fault during host memory operation within generated code.
56 * (Or, a unrelated bug within qemu, but we can't tell from here).
58 * We take the host pc from the signal frame. However, we cannot
59 * use that value directly. Within cpu_restore_state_from_tb, we
60 * assume PC comes from GETPC(), as used by the helper functions,
61 * so we adjust the address by -GETPC_ADJ to form an address that
62 * is within the call insn, so that the address does not accidentally
63 * match the beginning of the next guest insn. However, when the
64 * pc comes from the signal frame it points to the actual faulting
65 * host memory insn and not the return from a call insn.
67 * Therefore, adjust to compensate for what will be done later
68 * by cpu_restore_state_from_tb.
75 * Fault during host read for translation, or loosely, "execution".
77 * The guest pc is already pointing to the start of the TB for which
78 * code is being generated. If the guest translator manages the
79 * page crossings correctly, this is exactly the correct address
80 * (and if the translator doesn't handle page boundaries correctly
81 * there's little we can do about that here). Therefore, do not
82 * trigger the unwinder.
85 return MMU_INST_FETCH
;
88 return is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
92 * handle_sigsegv_accerr_write:
93 * @cpu: the cpu context
94 * @old_set: the sigset_t from the signal ucontext_t
95 * @host_pc: the host pc, adjusted for the signal
96 * @guest_addr: the guest address of the fault
98 * Return true if the write fault has been handled, and should be re-tried.
100 * Note that it is important that we don't call page_unprotect() unless
101 * this is really a "write to nonwritable page" fault, because
102 * page_unprotect() assumes that if it is called for an access to
103 * a page that's writable this means we had two threads racing and
104 * another thread got there first and already made the page writable;
105 * so we will retry the access. If we were to call page_unprotect()
106 * for some other kind of fault that should really be passed to the
107 * guest, we'd end up in an infinite loop of retrying the faulting access.
109 bool handle_sigsegv_accerr_write(CPUState
*cpu
, sigset_t
*old_set
,
110 uintptr_t host_pc
, abi_ptr guest_addr
)
112 switch (page_unprotect(guest_addr
, host_pc
)) {
115 * Fault not caused by a page marked unwritable to protect
116 * cached translations, must be the guest binary's problem.
121 * Fault caused by protection of cached translation; TBs
122 * invalidated, so resume execution.
127 * Fault caused by protection of cached translation, and the
128 * currently executing TB was modified and must be exited immediately.
130 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
131 cpu_loop_exit_noexc(cpu
);
134 g_assert_not_reached();
138 typedef struct PageFlagsNode
{
139 IntervalTreeNode itree
;
143 static IntervalTreeRoot pageflags_root
;
145 static PageFlagsNode
*pageflags_find(target_ulong start
, target_long last
)
149 n
= interval_tree_iter_first(&pageflags_root
, start
, last
);
150 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
153 static PageFlagsNode
*pageflags_next(PageFlagsNode
*p
, target_ulong start
,
158 n
= interval_tree_iter_next(&p
->itree
, start
, last
);
159 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
162 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
168 for (n
= interval_tree_iter_first(&pageflags_root
, 0, -1);
170 n
= interval_tree_iter_next(n
, 0, -1)) {
171 PageFlagsNode
*p
= container_of(n
, PageFlagsNode
, itree
);
173 rc
= fn(priv
, n
->start
, n
->last
+ 1, p
->flags
);
183 static int dump_region(void *priv
, target_ulong start
,
184 target_ulong end
, unsigned long prot
)
186 FILE *f
= (FILE *)priv
;
188 fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
" "TARGET_FMT_lx
" %c%c%c\n",
189 start
, end
, end
- start
,
190 ((prot
& PAGE_READ
) ? 'r' : '-'),
191 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
192 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
196 /* dump memory mappings */
197 void page_dump(FILE *f
)
199 const int length
= sizeof(target_ulong
) * 2;
201 fprintf(f
, "%-*s %-*s %-*s %s\n",
202 length
, "start", length
, "end", length
, "size", "prot");
203 walk_memory_regions(f
, dump_region
);
206 int page_get_flags(target_ulong address
)
208 PageFlagsNode
*p
= pageflags_find(address
, address
);
211 * See util/interval-tree.c re lockless lookups: no false positives but
212 * there are false negatives. If we find nothing, retry with the mmap
218 if (have_mmap_lock()) {
223 p
= pageflags_find(address
, address
);
225 return p
? p
->flags
: 0;
228 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
229 static void pageflags_create(target_ulong start
, target_ulong last
, int flags
)
231 PageFlagsNode
*p
= g_new(PageFlagsNode
, 1);
233 p
->itree
.start
= start
;
234 p
->itree
.last
= last
;
236 interval_tree_insert(&p
->itree
, &pageflags_root
);
239 /* A subroutine of page_set_flags: remove everything in [start,last]. */
240 static bool pageflags_unset(target_ulong start
, target_ulong last
)
242 bool inval_tb
= false;
245 PageFlagsNode
*p
= pageflags_find(start
, last
);
252 if (p
->flags
& PAGE_EXEC
) {
256 interval_tree_remove(&p
->itree
, &pageflags_root
);
257 p_last
= p
->itree
.last
;
259 if (p
->itree
.start
< start
) {
260 /* Truncate the node from the end, or split out the middle. */
261 p
->itree
.last
= start
- 1;
262 interval_tree_insert(&p
->itree
, &pageflags_root
);
264 pageflags_create(last
+ 1, p_last
, p
->flags
);
267 } else if (p_last
<= last
) {
268 /* Range completely covers node -- remove it. */
271 /* Truncate the node from the start. */
272 p
->itree
.start
= last
+ 1;
273 interval_tree_insert(&p
->itree
, &pageflags_root
);
282 * A subroutine of page_set_flags: nothing overlaps [start,last],
283 * but check adjacent mappings and maybe merge into a single range.
285 static void pageflags_create_merge(target_ulong start
, target_ulong last
,
288 PageFlagsNode
*next
= NULL
, *prev
= NULL
;
291 prev
= pageflags_find(start
- 1, start
- 1);
293 if (prev
->flags
== flags
) {
294 interval_tree_remove(&prev
->itree
, &pageflags_root
);
301 next
= pageflags_find(last
+ 1, last
+ 1);
303 if (next
->flags
== flags
) {
304 interval_tree_remove(&next
->itree
, &pageflags_root
);
313 prev
->itree
.last
= next
->itree
.last
;
316 prev
->itree
.last
= last
;
318 interval_tree_insert(&prev
->itree
, &pageflags_root
);
320 next
->itree
.start
= start
;
321 interval_tree_insert(&next
->itree
, &pageflags_root
);
323 pageflags_create(start
, last
, flags
);
328 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
329 * By default, they are not kept.
331 #ifndef PAGE_TARGET_STICKY
332 #define PAGE_TARGET_STICKY 0
334 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
336 /* A subroutine of page_set_flags: add flags to [start,last]. */
337 static bool pageflags_set_clear(target_ulong start
, target_ulong last
,
338 int set_flags
, int clear_flags
)
341 target_ulong p_start
, p_last
;
342 int p_flags
, merge_flags
;
343 bool inval_tb
= false;
346 p
= pageflags_find(start
, last
);
349 pageflags_create_merge(start
, last
, set_flags
);
354 p_start
= p
->itree
.start
;
355 p_last
= p
->itree
.last
;
357 /* Using mprotect on a page does not change sticky bits. */
358 merge_flags
= (p_flags
& ~clear_flags
) | set_flags
;
361 * Need to flush if an overlapping executable region
362 * removes exec, or adds write.
364 if ((p_flags
& PAGE_EXEC
)
365 && (!(merge_flags
& PAGE_EXEC
)
366 || (merge_flags
& ~p_flags
& PAGE_WRITE
))) {
371 * If there is an exact range match, update and return without
372 * attempting to merge with adjacent regions.
374 if (start
== p_start
&& last
== p_last
) {
376 p
->flags
= merge_flags
;
378 interval_tree_remove(&p
->itree
, &pageflags_root
);
385 * If sticky bits affect the original mapping, then we must be more
386 * careful about the existing intervals and the separate flags.
388 if (set_flags
!= merge_flags
) {
389 if (p_start
< start
) {
390 interval_tree_remove(&p
->itree
, &pageflags_root
);
391 p
->itree
.last
= start
- 1;
392 interval_tree_insert(&p
->itree
, &pageflags_root
);
396 pageflags_create(start
, last
, merge_flags
);
398 pageflags_create(last
+ 1, p_last
, p_flags
);
401 pageflags_create(start
, p_last
, merge_flags
);
409 if (start
< p_start
&& set_flags
) {
410 pageflags_create(start
, p_start
- 1, set_flags
);
413 interval_tree_remove(&p
->itree
, &pageflags_root
);
414 p
->itree
.start
= last
+ 1;
415 interval_tree_insert(&p
->itree
, &pageflags_root
);
417 pageflags_create(start
, last
, merge_flags
);
421 p
->flags
= merge_flags
;
423 interval_tree_remove(&p
->itree
, &pageflags_root
);
435 /* If flags are not changing for this range, incorporate it. */
436 if (set_flags
== p_flags
) {
437 if (start
< p_start
) {
438 interval_tree_remove(&p
->itree
, &pageflags_root
);
439 p
->itree
.start
= start
;
440 interval_tree_insert(&p
->itree
, &pageflags_root
);
449 /* Maybe split out head and/or tail ranges with the original flags. */
450 interval_tree_remove(&p
->itree
, &pageflags_root
);
451 if (p_start
< start
) {
452 p
->itree
.last
= start
- 1;
453 interval_tree_insert(&p
->itree
, &pageflags_root
);
459 pageflags_create(last
+ 1, p_last
, p_flags
);
461 } else if (last
< p_last
) {
462 p
->itree
.start
= last
+ 1;
463 interval_tree_insert(&p
->itree
, &pageflags_root
);
469 pageflags_create(start
, last
, set_flags
);
477 * Modify the flags of a page and invalidate the code if necessary.
478 * The flag PAGE_WRITE_ORG is positioned automatically depending
479 * on PAGE_WRITE. The mmap_lock should already be held.
481 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
485 bool inval_tb
= false;
487 /* This function should never be called with addresses outside the
488 guest address space. If this assert fires, it probably indicates
489 a missing call to h2g_valid. */
491 assert(end
- 1 <= GUEST_ADDR_MAX
);
492 /* Only set PAGE_ANON with new mappings. */
493 assert(!(flags
& PAGE_ANON
) || (flags
& PAGE_RESET
));
494 assert_memory_lock();
496 start
= start
& TARGET_PAGE_MASK
;
497 end
= TARGET_PAGE_ALIGN(end
);
500 if (!(flags
& PAGE_VALID
)) {
503 reset
= flags
& PAGE_RESET
;
504 flags
&= ~PAGE_RESET
;
505 if (flags
& PAGE_WRITE
) {
506 flags
|= PAGE_WRITE_ORG
;
510 if (!flags
|| reset
) {
511 page_reset_target_data(start
, end
);
512 inval_tb
|= pageflags_unset(start
, last
);
515 inval_tb
|= pageflags_set_clear(start
, last
, flags
,
516 ~(reset
? 0 : PAGE_STICKY
));
519 tb_invalidate_phys_range(start
, end
);
523 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
528 return 0; /* trivial length */
531 last
= start
+ len
- 1;
533 return -1; /* wrap around */
537 PageFlagsNode
*p
= pageflags_find(start
, last
);
541 return -1; /* entire region invalid */
543 if (start
< p
->itree
.start
) {
544 return -1; /* initial bytes invalid */
547 missing
= flags
& ~p
->flags
;
548 if (missing
& PAGE_READ
) {
549 return -1; /* page not readable */
551 if (missing
& PAGE_WRITE
) {
552 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
553 return -1; /* page not writable */
555 /* Asking about writable, but has been protected: undo. */
556 if (!page_unprotect(start
, 0)) {
559 /* TODO: page_unprotect should take a range, not a single page. */
560 if (last
- start
< TARGET_PAGE_SIZE
) {
563 start
+= TARGET_PAGE_SIZE
;
567 if (last
<= p
->itree
.last
) {
570 start
= p
->itree
.last
+ 1;
574 void page_protect(tb_page_addr_t address
)
577 target_ulong start
, last
;
580 assert_memory_lock();
582 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
583 start
= address
& TARGET_PAGE_MASK
;
584 last
= start
+ TARGET_PAGE_SIZE
- 1;
586 start
= address
& qemu_host_page_mask
;
587 last
= start
+ qemu_host_page_size
- 1;
590 p
= pageflags_find(start
, last
);
596 if (unlikely(p
->itree
.last
< last
)) {
597 /* More than one protection region covers the one host page. */
598 assert(TARGET_PAGE_SIZE
< qemu_host_page_size
);
599 while ((p
= pageflags_next(p
, start
, last
)) != NULL
) {
604 if (prot
& PAGE_WRITE
) {
605 pageflags_set_clear(start
, last
, 0, PAGE_WRITE
);
606 mprotect(g2h_untagged(start
), qemu_host_page_size
,
607 prot
& (PAGE_READ
| PAGE_EXEC
) ? PROT_READ
: PROT_NONE
);
612 * Called from signal handler: invalidate the code and unprotect the
613 * page. Return 0 if the fault was not handled, 1 if it was handled,
614 * and 2 if it was handled but the caller must cause the TB to be
615 * immediately exited. (We can only return 2 if the 'pc' argument is
618 int page_unprotect(target_ulong address
, uintptr_t pc
)
621 bool current_tb_invalidated
;
624 * Technically this isn't safe inside a signal handler. However we
625 * know this only ever happens in a synchronous SEGV handler, so in
626 * practice it seems to be ok.
630 p
= pageflags_find(address
, address
);
632 /* If this address was not really writable, nothing to do. */
633 if (!p
|| !(p
->flags
& PAGE_WRITE_ORG
)) {
638 current_tb_invalidated
= false;
639 if (p
->flags
& PAGE_WRITE
) {
641 * If the page is actually marked WRITE then assume this is because
642 * this thread raced with another one which got here first and
643 * set the page to PAGE_WRITE and did the TB invalidate for us.
645 #ifdef TARGET_HAS_PRECISE_SMC
646 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
648 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
652 target_ulong start
, len
, i
;
655 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
656 start
= address
& TARGET_PAGE_MASK
;
657 len
= TARGET_PAGE_SIZE
;
658 prot
= p
->flags
| PAGE_WRITE
;
659 pageflags_set_clear(start
, start
+ len
- 1, PAGE_WRITE
, 0);
660 current_tb_invalidated
= tb_invalidate_phys_page_unwind(start
, pc
);
662 start
= address
& qemu_host_page_mask
;
663 len
= qemu_host_page_size
;
666 for (i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
667 target_ulong addr
= start
+ i
;
669 p
= pageflags_find(addr
, addr
);
672 if (p
->flags
& PAGE_WRITE_ORG
) {
674 pageflags_set_clear(addr
, addr
+ TARGET_PAGE_SIZE
- 1,
679 * Since the content will be modified, we must invalidate
680 * the corresponding translated code.
682 current_tb_invalidated
|=
683 tb_invalidate_phys_page_unwind(addr
, pc
);
686 if (prot
& PAGE_EXEC
) {
687 prot
= (prot
& ~PAGE_EXEC
) | PAGE_READ
;
689 mprotect((void *)g2h_untagged(start
), len
, prot
& PAGE_BITS
);
693 /* If current TB was invalidated return to main loop */
694 return current_tb_invalidated
? 2 : 1;
697 static int probe_access_internal(CPUArchState
*env
, target_ulong addr
,
698 int fault_size
, MMUAccessType access_type
,
699 bool nonfault
, uintptr_t ra
)
704 switch (access_type
) {
706 acc_flag
= PAGE_WRITE_ORG
;
709 acc_flag
= PAGE_READ
;
712 acc_flag
= PAGE_EXEC
;
715 g_assert_not_reached();
718 if (guest_addr_valid_untagged(addr
)) {
719 int page_flags
= page_get_flags(addr
);
720 if (page_flags
& acc_flag
) {
721 return 0; /* success */
723 maperr
= !(page_flags
& PAGE_VALID
);
729 return TLB_INVALID_MASK
;
732 cpu_loop_exit_sigsegv(env_cpu(env
), addr
, access_type
, maperr
, ra
);
735 int probe_access_flags(CPUArchState
*env
, target_ulong addr
,
736 MMUAccessType access_type
, int mmu_idx
,
737 bool nonfault
, void **phost
, uintptr_t ra
)
741 flags
= probe_access_internal(env
, addr
, 0, access_type
, nonfault
, ra
);
742 *phost
= flags
? NULL
: g2h(env_cpu(env
), addr
);
746 void *probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
747 MMUAccessType access_type
, int mmu_idx
, uintptr_t ra
)
751 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
752 flags
= probe_access_internal(env
, addr
, size
, access_type
, false, ra
);
753 g_assert(flags
== 0);
755 return size
? g2h(env_cpu(env
), addr
) : NULL
;
758 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, target_ulong addr
,
763 flags
= probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
, false, 0);
764 g_assert(flags
== 0);
767 *hostp
= g2h_untagged(addr
);
772 #ifdef TARGET_PAGE_DATA_SIZE
774 * Allocate chunks of target data together. For the only current user,
775 * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
776 * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
779 #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
781 typedef struct TargetPageDataNode
{
782 IntervalTreeNode itree
;
783 char data
[TPD_PAGES
][TARGET_PAGE_DATA_SIZE
] __attribute__((aligned
));
784 } TargetPageDataNode
;
786 static IntervalTreeRoot targetdata_root
;
788 void page_reset_target_data(target_ulong start
, target_ulong end
)
790 IntervalTreeNode
*n
, *next
;
793 assert_memory_lock();
795 start
= start
& TARGET_PAGE_MASK
;
796 last
= TARGET_PAGE_ALIGN(end
) - 1;
798 for (n
= interval_tree_iter_first(&targetdata_root
, start
, last
),
799 next
= n
? interval_tree_iter_next(n
, start
, last
) : NULL
;
802 next
= next
? interval_tree_iter_next(n
, start
, last
) : NULL
) {
803 target_ulong n_start
, n_last
, p_ofs
, p_len
;
804 TargetPageDataNode
*t
;
806 if (n
->start
>= start
&& n
->last
<= last
) {
807 interval_tree_remove(n
, &targetdata_root
);
812 if (n
->start
< start
) {
814 p_ofs
= (start
- n
->start
) >> TARGET_PAGE_BITS
;
819 n_last
= MIN(last
, n
->last
);
820 p_len
= (n_last
+ 1 - n_start
) >> TARGET_PAGE_BITS
;
822 t
= container_of(n
, TargetPageDataNode
, itree
);
823 memset(t
->data
[p_ofs
], 0, p_len
* TARGET_PAGE_DATA_SIZE
);
827 void *page_get_target_data(target_ulong address
)
830 TargetPageDataNode
*t
;
831 target_ulong page
, region
;
833 page
= address
& TARGET_PAGE_MASK
;
834 region
= address
& TBD_MASK
;
836 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
839 * See util/interval-tree.c re lockless lookups: no false positives
840 * but there are false negatives. If we find nothing, retry with
841 * the mmap lock acquired. We also need the lock for the
842 * allocation + insert.
845 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
847 t
= g_new0(TargetPageDataNode
, 1);
850 n
->last
= region
| ~TBD_MASK
;
851 interval_tree_insert(n
, &targetdata_root
);
856 t
= container_of(n
, TargetPageDataNode
, itree
);
857 return t
->data
[(page
- region
) >> TARGET_PAGE_BITS
];
860 void page_reset_target_data(target_ulong start
, target_ulong end
) { }
861 #endif /* TARGET_PAGE_DATA_SIZE */
863 /* The softmmu versions of these helpers are in cputlb.c. */
866 * Verify that we have passed the correct MemOp to the correct function.
868 * We could present one function to target code, and dispatch based on
869 * the MemOp, but so far we have worked hard to avoid an indirect function
870 * call along the memory path.
872 static void validate_memop(MemOpIdx oi
, MemOp expected
)
874 #ifdef CONFIG_DEBUG_TCG
875 MemOp have
= get_memop(oi
) & (MO_SIZE
| MO_BSWAP
);
876 assert(have
== expected
);
880 void helper_unaligned_ld(CPUArchState
*env
, target_ulong addr
)
882 cpu_loop_exit_sigbus(env_cpu(env
), addr
, MMU_DATA_LOAD
, GETPC());
885 void helper_unaligned_st(CPUArchState
*env
, target_ulong addr
)
887 cpu_loop_exit_sigbus(env_cpu(env
), addr
, MMU_DATA_STORE
, GETPC());
890 static void *cpu_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
891 MemOpIdx oi
, uintptr_t ra
, MMUAccessType type
)
893 MemOp mop
= get_memop(oi
);
894 int a_bits
= get_alignment_bits(mop
);
897 /* Enforce guest required alignment. */
898 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
899 cpu_loop_exit_sigbus(env_cpu(env
), addr
, type
, ra
);
902 ret
= g2h(env_cpu(env
), addr
);
903 set_helper_retaddr(ra
);
907 uint8_t cpu_ldb_mmu(CPUArchState
*env
, abi_ptr addr
,
908 MemOpIdx oi
, uintptr_t ra
)
913 validate_memop(oi
, MO_UB
);
914 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
916 clear_helper_retaddr();
917 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
921 uint16_t cpu_ldw_be_mmu(CPUArchState
*env
, abi_ptr addr
,
922 MemOpIdx oi
, uintptr_t ra
)
927 validate_memop(oi
, MO_BEUW
);
928 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
929 ret
= lduw_be_p(haddr
);
930 clear_helper_retaddr();
931 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
935 uint32_t cpu_ldl_be_mmu(CPUArchState
*env
, abi_ptr addr
,
936 MemOpIdx oi
, uintptr_t ra
)
941 validate_memop(oi
, MO_BEUL
);
942 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
943 ret
= ldl_be_p(haddr
);
944 clear_helper_retaddr();
945 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
949 uint64_t cpu_ldq_be_mmu(CPUArchState
*env
, abi_ptr addr
,
950 MemOpIdx oi
, uintptr_t ra
)
955 validate_memop(oi
, MO_BEUQ
);
956 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
957 ret
= ldq_be_p(haddr
);
958 clear_helper_retaddr();
959 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
963 uint16_t cpu_ldw_le_mmu(CPUArchState
*env
, abi_ptr addr
,
964 MemOpIdx oi
, uintptr_t ra
)
969 validate_memop(oi
, MO_LEUW
);
970 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
971 ret
= lduw_le_p(haddr
);
972 clear_helper_retaddr();
973 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
977 uint32_t cpu_ldl_le_mmu(CPUArchState
*env
, abi_ptr addr
,
978 MemOpIdx oi
, uintptr_t ra
)
983 validate_memop(oi
, MO_LEUL
);
984 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
985 ret
= ldl_le_p(haddr
);
986 clear_helper_retaddr();
987 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
991 uint64_t cpu_ldq_le_mmu(CPUArchState
*env
, abi_ptr addr
,
992 MemOpIdx oi
, uintptr_t ra
)
997 validate_memop(oi
, MO_LEUQ
);
998 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
999 ret
= ldq_le_p(haddr
);
1000 clear_helper_retaddr();
1001 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1005 void cpu_stb_mmu(CPUArchState
*env
, abi_ptr addr
, uint8_t val
,
1006 MemOpIdx oi
, uintptr_t ra
)
1010 validate_memop(oi
, MO_UB
);
1011 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1013 clear_helper_retaddr();
1014 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1017 void cpu_stw_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1018 MemOpIdx oi
, uintptr_t ra
)
1022 validate_memop(oi
, MO_BEUW
);
1023 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1024 stw_be_p(haddr
, val
);
1025 clear_helper_retaddr();
1026 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1029 void cpu_stl_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1030 MemOpIdx oi
, uintptr_t ra
)
1034 validate_memop(oi
, MO_BEUL
);
1035 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1036 stl_be_p(haddr
, val
);
1037 clear_helper_retaddr();
1038 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1041 void cpu_stq_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1042 MemOpIdx oi
, uintptr_t ra
)
1046 validate_memop(oi
, MO_BEUQ
);
1047 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1048 stq_be_p(haddr
, val
);
1049 clear_helper_retaddr();
1050 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1053 void cpu_stw_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1054 MemOpIdx oi
, uintptr_t ra
)
1058 validate_memop(oi
, MO_LEUW
);
1059 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1060 stw_le_p(haddr
, val
);
1061 clear_helper_retaddr();
1062 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1065 void cpu_stl_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1066 MemOpIdx oi
, uintptr_t ra
)
1070 validate_memop(oi
, MO_LEUL
);
1071 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1072 stl_le_p(haddr
, val
);
1073 clear_helper_retaddr();
1074 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1077 void cpu_stq_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1078 MemOpIdx oi
, uintptr_t ra
)
1082 validate_memop(oi
, MO_LEUQ
);
1083 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1084 stq_le_p(haddr
, val
);
1085 clear_helper_retaddr();
1086 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1089 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr ptr
)
1093 set_helper_retaddr(1);
1094 ret
= ldub_p(g2h_untagged(ptr
));
1095 clear_helper_retaddr();
1099 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr ptr
)
1103 set_helper_retaddr(1);
1104 ret
= lduw_p(g2h_untagged(ptr
));
1105 clear_helper_retaddr();
1109 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr ptr
)
1113 set_helper_retaddr(1);
1114 ret
= ldl_p(g2h_untagged(ptr
));
1115 clear_helper_retaddr();
1119 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr ptr
)
1123 set_helper_retaddr(1);
1124 ret
= ldq_p(g2h_untagged(ptr
));
1125 clear_helper_retaddr();
1129 #include "ldst_common.c.inc"
1132 * Do not allow unaligned operations to proceed. Return the host address.
1134 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1136 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1137 MemOpIdx oi
, int size
, int prot
,
1140 MemOp mop
= get_memop(oi
);
1141 int a_bits
= get_alignment_bits(mop
);
1144 /* Enforce guest required alignment. */
1145 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
1146 MMUAccessType t
= prot
== PAGE_READ
? MMU_DATA_LOAD
: MMU_DATA_STORE
;
1147 cpu_loop_exit_sigbus(env_cpu(env
), addr
, t
, retaddr
);
1150 /* Enforce qemu required alignment. */
1151 if (unlikely(addr
& (size
- 1))) {
1152 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1155 ret
= g2h(env_cpu(env
), addr
);
1156 set_helper_retaddr(retaddr
);
1160 #include "atomic_common.c.inc"
1163 * First set of functions passes in OI and RETADDR.
1164 * This makes them callable from other helpers.
1167 #define ATOMIC_NAME(X) \
1168 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1169 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1172 #include "atomic_template.h"
1175 #include "atomic_template.h"
1178 #include "atomic_template.h"
1180 #ifdef CONFIG_ATOMIC64
1182 #include "atomic_template.h"
1185 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1186 #define DATA_SIZE 16
1187 #include "atomic_template.h"