2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
31 #include "qemu/error-report.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
41 # define DEBUG_TLB_GATE 1
43 # define DEBUG_TLB_LOG_GATE 1
45 # define DEBUG_TLB_LOG_GATE 0
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
61 #define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
67 /* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69 QEMU_BUILD_BUG_ON(sizeof(target_ulong
) > sizeof(run_on_cpu_data
));
74 /* This is OK because CPU architectures generally permit an
75 * implementation to drop entries from the TLB at any time, so
76 * flushing more entries than required is only an efficiency issue,
77 * not a correctness issue.
79 static void tlb_flush_nocheck(CPUState
*cpu
)
81 CPUArchState
*env
= cpu
->env_ptr
;
83 /* The QOM tests will trigger tlb_flushes without setting up TCG
84 * so we bug out here in that case.
90 assert_cpu_is_self(cpu
);
91 tlb_debug("(count: %d)\n", tlb_flush_count
++);
95 memset(env
->tlb_table
, -1, sizeof(env
->tlb_table
));
96 memset(env
->tlb_v_table
, -1, sizeof(env
->tlb_v_table
));
97 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
100 env
->tlb_flush_addr
= -1;
101 env
->tlb_flush_mask
= 0;
105 atomic_mb_set(&cpu
->pending_tlb_flush
, false);
108 static void tlb_flush_global_async_work(CPUState
*cpu
, run_on_cpu_data data
)
110 tlb_flush_nocheck(cpu
);
113 void tlb_flush(CPUState
*cpu
)
115 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
116 if (atomic_cmpxchg(&cpu
->pending_tlb_flush
, false, true) == true) {
117 async_run_on_cpu(cpu
, tlb_flush_global_async_work
,
121 tlb_flush_nocheck(cpu
);
125 static inline void v_tlb_flush_by_mmuidx(CPUState
*cpu
, va_list argp
)
127 CPUArchState
*env
= cpu
->env_ptr
;
129 assert_cpu_is_self(cpu
);
130 tlb_debug("start\n");
135 int mmu_idx
= va_arg(argp
, int);
141 tlb_debug("%d\n", mmu_idx
);
143 memset(env
->tlb_table
[mmu_idx
], -1, sizeof(env
->tlb_table
[0]));
144 memset(env
->tlb_v_table
[mmu_idx
], -1, sizeof(env
->tlb_v_table
[0]));
147 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
152 void tlb_flush_by_mmuidx(CPUState
*cpu
, ...)
156 v_tlb_flush_by_mmuidx(cpu
, argp
);
160 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
162 if (addr
== (tlb_entry
->addr_read
&
163 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
164 addr
== (tlb_entry
->addr_write
&
165 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
166 addr
== (tlb_entry
->addr_code
&
167 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
168 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
172 static void tlb_flush_page_async_work(CPUState
*cpu
, run_on_cpu_data data
)
174 CPUArchState
*env
= cpu
->env_ptr
;
175 target_ulong addr
= (target_ulong
) data
.target_ptr
;
179 assert_cpu_is_self(cpu
);
181 tlb_debug("page :" TARGET_FMT_lx
"\n", addr
);
183 /* Check if we need to flush due to large pages. */
184 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
185 tlb_debug("forcing full flush ("
186 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
187 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
193 addr
&= TARGET_PAGE_MASK
;
194 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
195 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
196 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
199 /* check whether there are entries that need to be flushed in the vtlb */
200 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
202 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
203 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][k
], addr
);
207 tb_flush_jmp_cache(cpu
, addr
);
210 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
212 tlb_debug("page :" TARGET_FMT_lx
"\n", addr
);
214 if (!qemu_cpu_is_self(cpu
)) {
215 async_run_on_cpu(cpu
, tlb_flush_page_async_work
,
216 RUN_ON_CPU_TARGET_PTR(addr
));
218 tlb_flush_page_async_work(cpu
, RUN_ON_CPU_TARGET_PTR(addr
));
222 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, ...)
224 CPUArchState
*env
= cpu
->env_ptr
;
228 va_start(argp
, addr
);
230 assert_cpu_is_self(cpu
);
231 tlb_debug("addr "TARGET_FMT_lx
"\n", addr
);
233 /* Check if we need to flush due to large pages. */
234 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
235 tlb_debug("forced full flush ("
236 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
237 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
239 v_tlb_flush_by_mmuidx(cpu
, argp
);
244 addr
&= TARGET_PAGE_MASK
;
245 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
248 int mmu_idx
= va_arg(argp
, int);
254 tlb_debug("idx %d\n", mmu_idx
);
256 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
258 /* check whether there are vltb entries that need to be flushed */
259 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
260 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][k
], addr
);
265 tb_flush_jmp_cache(cpu
, addr
);
268 void tlb_flush_page_all(target_ulong addr
)
273 async_run_on_cpu(cpu
, tlb_flush_page_async_work
,
274 RUN_ON_CPU_TARGET_PTR(addr
));
278 /* update the TLBs so that writes to code in the virtual page 'addr'
280 void tlb_protect_code(ram_addr_t ram_addr
)
282 cpu_physical_memory_test_and_clear_dirty(ram_addr
, TARGET_PAGE_SIZE
,
286 /* update the TLB so that writes in physical page 'phys_addr' are no longer
287 tested for self modifying code */
288 void tlb_unprotect_code(ram_addr_t ram_addr
)
290 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
293 static bool tlb_is_dirty_ram(CPUTLBEntry
*tlbe
)
295 return (tlbe
->addr_write
& (TLB_INVALID_MASK
|TLB_MMIO
|TLB_NOTDIRTY
)) == 0;
298 void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
, uintptr_t start
,
303 if (tlb_is_dirty_ram(tlb_entry
)) {
304 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
305 if ((addr
- start
) < length
) {
306 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
311 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
317 assert_cpu_is_self(cpu
);
320 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
323 for (i
= 0; i
< CPU_TLB_SIZE
; i
++) {
324 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
328 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
329 tlb_reset_dirty_range(&env
->tlb_v_table
[mmu_idx
][i
],
335 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
337 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
338 tlb_entry
->addr_write
= vaddr
;
342 /* update the TLB corresponding to virtual page vaddr
343 so that it is no longer dirty */
344 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
346 CPUArchState
*env
= cpu
->env_ptr
;
350 assert_cpu_is_self(cpu
);
352 vaddr
&= TARGET_PAGE_MASK
;
353 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
354 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
355 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
358 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
360 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
361 tlb_set_dirty1(&env
->tlb_v_table
[mmu_idx
][k
], vaddr
);
366 /* Our TLB does not support large pages, so remember the area covered by
367 large pages and trigger a full TLB flush if these are invalidated. */
368 static void tlb_add_large_page(CPUArchState
*env
, target_ulong vaddr
,
371 target_ulong mask
= ~(size
- 1);
373 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
374 env
->tlb_flush_addr
= vaddr
& mask
;
375 env
->tlb_flush_mask
= mask
;
378 /* Extend the existing region to include the new page.
379 This is a compromise between unnecessary flushes and the cost
380 of maintaining a full variable size TLB. */
381 mask
&= env
->tlb_flush_mask
;
382 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
385 env
->tlb_flush_addr
&= mask
;
386 env
->tlb_flush_mask
= mask
;
389 /* Add a new TLB entry. At most one entry for a given virtual address
390 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
391 * supplied size is only used by tlb_flush_page.
393 * Called from TCG-generated code, which is under an RCU read-side
396 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
397 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
398 int mmu_idx
, target_ulong size
)
400 CPUArchState
*env
= cpu
->env_ptr
;
401 MemoryRegionSection
*section
;
403 target_ulong address
;
404 target_ulong code_address
;
407 hwaddr iotlb
, xlat
, sz
;
408 unsigned vidx
= env
->vtlb_index
++ % CPU_VTLB_SIZE
;
409 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
411 assert_cpu_is_self(cpu
);
412 assert(size
>= TARGET_PAGE_SIZE
);
413 if (size
!= TARGET_PAGE_SIZE
) {
414 tlb_add_large_page(env
, vaddr
, size
);
418 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr
, &xlat
, &sz
);
419 assert(sz
>= TARGET_PAGE_SIZE
);
421 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
423 vaddr
, paddr
, prot
, mmu_idx
);
426 if (!memory_region_is_ram(section
->mr
) && !memory_region_is_romd(section
->mr
)) {
431 /* TLB_MMIO for rom/romd handled below */
432 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
435 code_address
= address
;
436 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr
, paddr
, xlat
,
439 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
440 te
= &env
->tlb_table
[mmu_idx
][index
];
442 /* do not discard the translation in te, evict it into a victim tlb */
443 env
->tlb_v_table
[mmu_idx
][vidx
] = *te
;
444 env
->iotlb_v
[mmu_idx
][vidx
] = env
->iotlb
[mmu_idx
][index
];
447 env
->iotlb
[mmu_idx
][index
].addr
= iotlb
- vaddr
;
448 env
->iotlb
[mmu_idx
][index
].attrs
= attrs
;
449 te
->addend
= addend
- vaddr
;
450 if (prot
& PAGE_READ
) {
451 te
->addr_read
= address
;
456 if (prot
& PAGE_EXEC
) {
457 te
->addr_code
= code_address
;
461 if (prot
& PAGE_WRITE
) {
462 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
463 || memory_region_is_romd(section
->mr
)) {
464 /* Write access calls the I/O callback. */
465 te
->addr_write
= address
| TLB_MMIO
;
466 } else if (memory_region_is_ram(section
->mr
)
467 && cpu_physical_memory_is_clean(
468 memory_region_get_ram_addr(section
->mr
) + xlat
)) {
469 te
->addr_write
= address
| TLB_NOTDIRTY
;
471 te
->addr_write
= address
;
478 /* Add a new TLB entry, but without specifying the memory
479 * transaction attributes to be used.
481 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
482 hwaddr paddr
, int prot
,
483 int mmu_idx
, target_ulong size
)
485 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
486 prot
, mmu_idx
, size
);
489 static void report_bad_exec(CPUState
*cpu
, target_ulong addr
)
491 /* Accidentally executing outside RAM or ROM is quite common for
492 * several user-error situations, so report it in a way that
493 * makes it clear that this isn't a QEMU bug and provide suggestions
494 * about what a user could do to fix things.
496 error_report("Trying to execute code outside RAM or ROM at 0x"
497 TARGET_FMT_lx
, addr
);
498 error_printf("This usually means one of the following happened:\n\n"
499 "(1) You told QEMU to execute a kernel for the wrong machine "
500 "type, and it crashed on startup (eg trying to run a "
501 "raspberry pi kernel on a versatilepb QEMU machine)\n"
502 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
503 "and QEMU executed a ROM full of no-op instructions until "
504 "it fell off the end\n"
505 "(3) Your guest kernel has a bug and crashed by jumping "
506 "off into nowhere\n\n"
507 "This is almost always one of the first two, so check your "
508 "command line and that you are using the right type of kernel "
509 "for this machine.\n"
510 "If you think option (3) is likely then you can try debugging "
511 "your guest with the -d debug options; in particular "
512 "-d guest_errors will cause the log to include a dump of the "
513 "guest register state at this point.\n\n"
514 "Execution cannot continue; stopping here.\n\n");
516 /* Report also to the logs, with more detail including register dump */
517 qemu_log_mask(LOG_GUEST_ERROR
, "qemu: fatal: Trying to execute code "
518 "outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
519 log_cpu_state_mask(LOG_GUEST_ERROR
, cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
522 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
526 ram_addr
= qemu_ram_addr_from_host(ptr
);
527 if (ram_addr
== RAM_ADDR_INVALID
) {
528 error_report("Bad ram pointer %p", ptr
);
534 /* NOTE: this function can trigger an exception */
535 /* NOTE2: the returned address is not exactly the physical address: it
536 * is actually a ram_addr_t (in system mode; the user mode emulation
537 * version of this function returns a guest virtual address).
539 tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
)
541 int mmu_idx
, page_index
, pd
;
544 CPUState
*cpu
= ENV_GET_CPU(env1
);
545 CPUIOTLBEntry
*iotlbentry
;
547 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
548 mmu_idx
= cpu_mmu_index(env1
, true);
549 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
550 (addr
& TARGET_PAGE_MASK
))) {
551 cpu_ldub_code(env1
, addr
);
553 iotlbentry
= &env1
->iotlb
[mmu_idx
][page_index
];
554 pd
= iotlbentry
->addr
& ~TARGET_PAGE_MASK
;
555 mr
= iotlb_to_region(cpu
, pd
, iotlbentry
->attrs
);
556 if (memory_region_is_unassigned(mr
)) {
557 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
559 if (cc
->do_unassigned_access
) {
560 cc
->do_unassigned_access(cpu
, addr
, false, true, 0, 4);
562 report_bad_exec(cpu
, addr
);
566 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
567 return qemu_ram_addr_from_host_nofail(p
);
570 static uint64_t io_readx(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
571 target_ulong addr
, uintptr_t retaddr
, int size
)
573 CPUState
*cpu
= ENV_GET_CPU(env
);
574 hwaddr physaddr
= iotlbentry
->addr
;
575 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
579 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
580 cpu
->mem_io_pc
= retaddr
;
581 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
582 cpu_io_recompile(cpu
, retaddr
);
585 cpu
->mem_io_vaddr
= addr
;
587 if (mr
->global_locking
) {
588 qemu_mutex_lock_iothread();
591 memory_region_dispatch_read(mr
, physaddr
, &val
, size
, iotlbentry
->attrs
);
593 qemu_mutex_unlock_iothread();
599 static void io_writex(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
600 uint64_t val
, target_ulong addr
,
601 uintptr_t retaddr
, int size
)
603 CPUState
*cpu
= ENV_GET_CPU(env
);
604 hwaddr physaddr
= iotlbentry
->addr
;
605 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
608 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
609 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
610 cpu_io_recompile(cpu
, retaddr
);
612 cpu
->mem_io_vaddr
= addr
;
613 cpu
->mem_io_pc
= retaddr
;
615 if (mr
->global_locking
) {
616 qemu_mutex_lock_iothread();
619 memory_region_dispatch_write(mr
, physaddr
, val
, size
, iotlbentry
->attrs
);
621 qemu_mutex_unlock_iothread();
625 /* Return true if ADDR is present in the victim tlb, and has been copied
626 back to the main tlb. */
627 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
628 size_t elt_ofs
, target_ulong page
)
631 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
632 CPUTLBEntry
*vtlb
= &env
->tlb_v_table
[mmu_idx
][vidx
];
633 target_ulong cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
636 /* Found entry in victim tlb, swap tlb and iotlb. */
637 CPUTLBEntry tmptlb
, *tlb
= &env
->tlb_table
[mmu_idx
][index
];
638 CPUIOTLBEntry tmpio
, *io
= &env
->iotlb
[mmu_idx
][index
];
639 CPUIOTLBEntry
*vio
= &env
->iotlb_v
[mmu_idx
][vidx
];
641 tmptlb
= *tlb
; *tlb
= *vtlb
; *vtlb
= tmptlb
;
642 tmpio
= *io
; *io
= *vio
; *vio
= tmpio
;
649 /* Macro to call the above, with local variables from the use context. */
650 #define VICTIM_TLB_HIT(TY, ADDR) \
651 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
652 (ADDR) & TARGET_PAGE_MASK)
654 /* Probe for whether the specified guest write access is permitted.
655 * If it is not permitted then an exception will be taken in the same
656 * way as if this were a real write access (and we will not return).
657 * Otherwise the function will return, and there will be a valid
658 * entry in the TLB for this access.
660 void probe_write(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
663 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
664 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
666 if ((addr
& TARGET_PAGE_MASK
)
667 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
668 /* TLB entry is for a different page */
669 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
670 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
675 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
676 * operations, or io operations to proceed. Return the host address. */
677 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
678 TCGMemOpIdx oi
, uintptr_t retaddr
)
680 size_t mmu_idx
= get_mmuidx(oi
);
681 size_t index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
682 CPUTLBEntry
*tlbe
= &env
->tlb_table
[mmu_idx
][index
];
683 target_ulong tlb_addr
= tlbe
->addr_write
;
684 TCGMemOp mop
= get_memop(oi
);
685 int a_bits
= get_alignment_bits(mop
);
686 int s_bits
= mop
& MO_SIZE
;
688 /* Adjust the given return address. */
689 retaddr
-= GETPC_ADJ
;
691 /* Enforce guest required alignment. */
692 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
693 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
694 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
698 /* Enforce qemu required alignment. */
699 if (unlikely(addr
& ((1 << s_bits
) - 1))) {
700 /* We get here if guest alignment was not requested,
701 or was not enforced by cpu_unaligned_access above.
702 We might widen the access and emulate, but for now
703 mark an exception and exit the cpu loop. */
707 /* Check TLB entry and enforce page permissions. */
708 if ((addr
& TARGET_PAGE_MASK
)
709 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
710 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
711 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
713 tlb_addr
= tlbe
->addr_write
;
716 /* Notice an IO access, or a notdirty page. */
717 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
718 /* There's really nothing that can be done to
719 support this apart from stop-the-world. */
723 /* Let the guest notice RMW on a write-only page. */
724 if (unlikely(tlbe
->addr_read
!= tlb_addr
)) {
725 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_LOAD
, mmu_idx
, retaddr
);
726 /* Since we don't support reads and writes to different addresses,
727 and we do have the proper page loaded for write, this shouldn't
728 ever return. But just in case, handle via stop-the-world. */
732 return (void *)((uintptr_t)addr
+ tlbe
->addend
);
735 cpu_loop_exit_atomic(ENV_GET_CPU(env
), retaddr
);
738 #ifdef TARGET_WORDS_BIGENDIAN
739 # define TGT_BE(X) (X)
740 # define TGT_LE(X) BSWAP(X)
742 # define TGT_BE(X) BSWAP(X)
743 # define TGT_LE(X) (X)
746 #define MMUSUFFIX _mmu
749 #include "softmmu_template.h"
752 #include "softmmu_template.h"
755 #include "softmmu_template.h"
758 #include "softmmu_template.h"
760 /* First set of helpers allows passing in of OI and RETADDR. This makes
761 them callable from other helpers. */
763 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
764 #define ATOMIC_NAME(X) \
765 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
766 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
769 #include "atomic_template.h"
772 #include "atomic_template.h"
775 #include "atomic_template.h"
777 #ifdef CONFIG_ATOMIC64
779 #include "atomic_template.h"
782 #ifdef CONFIG_ATOMIC128
784 #include "atomic_template.h"
787 /* Second set of helpers are directly callable from TCG as helpers. */
791 #undef ATOMIC_MMU_LOOKUP
792 #define EXTRA_ARGS , TCGMemOpIdx oi
793 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
794 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
797 #include "atomic_template.h"
800 #include "atomic_template.h"
803 #include "atomic_template.h"
805 #ifdef CONFIG_ATOMIC64
807 #include "atomic_template.h"
810 /* Code access functions. */
813 #define MMUSUFFIX _cmmu
815 #define GETPC() ((uintptr_t)0)
816 #define SOFTMMU_CODE_ACCESS
819 #include "softmmu_template.h"
822 #include "softmmu_template.h"
825 #include "softmmu_template.h"
828 #include "softmmu_template.h"