2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
31 #include "qemu/error-report.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
41 # define DEBUG_TLB_GATE 1
43 # define DEBUG_TLB_LOG_GATE 1
45 # define DEBUG_TLB_LOG_GATE 0
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
61 #define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
67 /* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69 QEMU_BUILD_BUG_ON(sizeof(target_ulong
) > sizeof(run_on_cpu_data
));
74 /* This is OK because CPU architectures generally permit an
75 * implementation to drop entries from the TLB at any time, so
76 * flushing more entries than required is only an efficiency issue,
77 * not a correctness issue.
79 static void tlb_flush_nocheck(CPUState
*cpu
)
81 CPUArchState
*env
= cpu
->env_ptr
;
83 /* The QOM tests will trigger tlb_flushes without setting up TCG
84 * so we bug out here in that case.
90 assert_cpu_is_self(cpu
);
91 tlb_debug("(count: %d)\n", tlb_flush_count
++);
95 memset(env
->tlb_table
, -1, sizeof(env
->tlb_table
));
96 memset(env
->tlb_v_table
, -1, sizeof(env
->tlb_v_table
));
97 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
100 env
->tlb_flush_addr
= -1;
101 env
->tlb_flush_mask
= 0;
105 atomic_mb_set(&cpu
->pending_tlb_flush
, false);
108 static void tlb_flush_global_async_work(CPUState
*cpu
, run_on_cpu_data data
)
110 tlb_flush_nocheck(cpu
);
113 void tlb_flush(CPUState
*cpu
)
115 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
116 if (atomic_cmpxchg(&cpu
->pending_tlb_flush
, false, true) == true) {
117 async_run_on_cpu(cpu
, tlb_flush_global_async_work
,
121 tlb_flush_nocheck(cpu
);
125 static inline void v_tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
127 CPUArchState
*env
= cpu
->env_ptr
;
128 unsigned long mmu_idx_bitmask
= idxmap
;
131 assert_cpu_is_self(cpu
);
132 tlb_debug("start\n");
136 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
138 if (test_bit(mmu_idx
, &mmu_idx_bitmask
)) {
139 tlb_debug("%d\n", mmu_idx
);
141 memset(env
->tlb_table
[mmu_idx
], -1, sizeof(env
->tlb_table
[0]));
142 memset(env
->tlb_v_table
[mmu_idx
], -1, sizeof(env
->tlb_v_table
[0]));
146 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
151 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
153 v_tlb_flush_by_mmuidx(cpu
, idxmap
);
156 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
158 if (addr
== (tlb_entry
->addr_read
&
159 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
160 addr
== (tlb_entry
->addr_write
&
161 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
162 addr
== (tlb_entry
->addr_code
&
163 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
164 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
168 static void tlb_flush_page_async_work(CPUState
*cpu
, run_on_cpu_data data
)
170 CPUArchState
*env
= cpu
->env_ptr
;
171 target_ulong addr
= (target_ulong
) data
.target_ptr
;
175 assert_cpu_is_self(cpu
);
177 tlb_debug("page :" TARGET_FMT_lx
"\n", addr
);
179 /* Check if we need to flush due to large pages. */
180 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
181 tlb_debug("forcing full flush ("
182 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
183 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
189 addr
&= TARGET_PAGE_MASK
;
190 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
191 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
192 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
195 /* check whether there are entries that need to be flushed in the vtlb */
196 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
198 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
199 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][k
], addr
);
203 tb_flush_jmp_cache(cpu
, addr
);
206 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
208 tlb_debug("page :" TARGET_FMT_lx
"\n", addr
);
210 if (!qemu_cpu_is_self(cpu
)) {
211 async_run_on_cpu(cpu
, tlb_flush_page_async_work
,
212 RUN_ON_CPU_TARGET_PTR(addr
));
214 tlb_flush_page_async_work(cpu
, RUN_ON_CPU_TARGET_PTR(addr
));
218 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, uint16_t idxmap
)
220 CPUArchState
*env
= cpu
->env_ptr
;
221 unsigned long mmu_idx_bitmap
= idxmap
;
222 int i
, page
, mmu_idx
;
224 assert_cpu_is_self(cpu
);
225 tlb_debug("addr "TARGET_FMT_lx
"\n", addr
);
227 /* Check if we need to flush due to large pages. */
228 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
229 tlb_debug("forced full flush ("
230 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
231 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
233 v_tlb_flush_by_mmuidx(cpu
, idxmap
);
237 addr
&= TARGET_PAGE_MASK
;
238 page
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
240 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
241 if (test_bit(mmu_idx
, &mmu_idx_bitmap
)) {
242 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][page
], addr
);
244 /* check whether there are vltb entries that need to be flushed */
245 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
246 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][i
], addr
);
251 tb_flush_jmp_cache(cpu
, addr
);
254 void tlb_flush_page_all(target_ulong addr
)
259 async_run_on_cpu(cpu
, tlb_flush_page_async_work
,
260 RUN_ON_CPU_TARGET_PTR(addr
));
264 /* update the TLBs so that writes to code in the virtual page 'addr'
266 void tlb_protect_code(ram_addr_t ram_addr
)
268 cpu_physical_memory_test_and_clear_dirty(ram_addr
, TARGET_PAGE_SIZE
,
272 /* update the TLB so that writes in physical page 'phys_addr' are no longer
273 tested for self modifying code */
274 void tlb_unprotect_code(ram_addr_t ram_addr
)
276 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
279 static bool tlb_is_dirty_ram(CPUTLBEntry
*tlbe
)
281 return (tlbe
->addr_write
& (TLB_INVALID_MASK
|TLB_MMIO
|TLB_NOTDIRTY
)) == 0;
284 void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
, uintptr_t start
,
289 if (tlb_is_dirty_ram(tlb_entry
)) {
290 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
291 if ((addr
- start
) < length
) {
292 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
297 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
303 assert_cpu_is_self(cpu
);
306 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
309 for (i
= 0; i
< CPU_TLB_SIZE
; i
++) {
310 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
314 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
315 tlb_reset_dirty_range(&env
->tlb_v_table
[mmu_idx
][i
],
321 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
323 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
324 tlb_entry
->addr_write
= vaddr
;
328 /* update the TLB corresponding to virtual page vaddr
329 so that it is no longer dirty */
330 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
332 CPUArchState
*env
= cpu
->env_ptr
;
336 assert_cpu_is_self(cpu
);
338 vaddr
&= TARGET_PAGE_MASK
;
339 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
340 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
341 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
344 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
346 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
347 tlb_set_dirty1(&env
->tlb_v_table
[mmu_idx
][k
], vaddr
);
352 /* Our TLB does not support large pages, so remember the area covered by
353 large pages and trigger a full TLB flush if these are invalidated. */
354 static void tlb_add_large_page(CPUArchState
*env
, target_ulong vaddr
,
357 target_ulong mask
= ~(size
- 1);
359 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
360 env
->tlb_flush_addr
= vaddr
& mask
;
361 env
->tlb_flush_mask
= mask
;
364 /* Extend the existing region to include the new page.
365 This is a compromise between unnecessary flushes and the cost
366 of maintaining a full variable size TLB. */
367 mask
&= env
->tlb_flush_mask
;
368 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
371 env
->tlb_flush_addr
&= mask
;
372 env
->tlb_flush_mask
= mask
;
375 /* Add a new TLB entry. At most one entry for a given virtual address
376 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
377 * supplied size is only used by tlb_flush_page.
379 * Called from TCG-generated code, which is under an RCU read-side
382 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
383 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
384 int mmu_idx
, target_ulong size
)
386 CPUArchState
*env
= cpu
->env_ptr
;
387 MemoryRegionSection
*section
;
389 target_ulong address
;
390 target_ulong code_address
;
393 hwaddr iotlb
, xlat
, sz
;
394 unsigned vidx
= env
->vtlb_index
++ % CPU_VTLB_SIZE
;
395 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
397 assert_cpu_is_self(cpu
);
398 assert(size
>= TARGET_PAGE_SIZE
);
399 if (size
!= TARGET_PAGE_SIZE
) {
400 tlb_add_large_page(env
, vaddr
, size
);
404 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr
, &xlat
, &sz
);
405 assert(sz
>= TARGET_PAGE_SIZE
);
407 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
409 vaddr
, paddr
, prot
, mmu_idx
);
412 if (!memory_region_is_ram(section
->mr
) && !memory_region_is_romd(section
->mr
)) {
417 /* TLB_MMIO for rom/romd handled below */
418 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
421 code_address
= address
;
422 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr
, paddr
, xlat
,
425 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
426 te
= &env
->tlb_table
[mmu_idx
][index
];
428 /* do not discard the translation in te, evict it into a victim tlb */
429 env
->tlb_v_table
[mmu_idx
][vidx
] = *te
;
430 env
->iotlb_v
[mmu_idx
][vidx
] = env
->iotlb
[mmu_idx
][index
];
433 env
->iotlb
[mmu_idx
][index
].addr
= iotlb
- vaddr
;
434 env
->iotlb
[mmu_idx
][index
].attrs
= attrs
;
435 te
->addend
= addend
- vaddr
;
436 if (prot
& PAGE_READ
) {
437 te
->addr_read
= address
;
442 if (prot
& PAGE_EXEC
) {
443 te
->addr_code
= code_address
;
447 if (prot
& PAGE_WRITE
) {
448 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
449 || memory_region_is_romd(section
->mr
)) {
450 /* Write access calls the I/O callback. */
451 te
->addr_write
= address
| TLB_MMIO
;
452 } else if (memory_region_is_ram(section
->mr
)
453 && cpu_physical_memory_is_clean(
454 memory_region_get_ram_addr(section
->mr
) + xlat
)) {
455 te
->addr_write
= address
| TLB_NOTDIRTY
;
457 te
->addr_write
= address
;
464 /* Add a new TLB entry, but without specifying the memory
465 * transaction attributes to be used.
467 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
468 hwaddr paddr
, int prot
,
469 int mmu_idx
, target_ulong size
)
471 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
472 prot
, mmu_idx
, size
);
475 static void report_bad_exec(CPUState
*cpu
, target_ulong addr
)
477 /* Accidentally executing outside RAM or ROM is quite common for
478 * several user-error situations, so report it in a way that
479 * makes it clear that this isn't a QEMU bug and provide suggestions
480 * about what a user could do to fix things.
482 error_report("Trying to execute code outside RAM or ROM at 0x"
483 TARGET_FMT_lx
, addr
);
484 error_printf("This usually means one of the following happened:\n\n"
485 "(1) You told QEMU to execute a kernel for the wrong machine "
486 "type, and it crashed on startup (eg trying to run a "
487 "raspberry pi kernel on a versatilepb QEMU machine)\n"
488 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
489 "and QEMU executed a ROM full of no-op instructions until "
490 "it fell off the end\n"
491 "(3) Your guest kernel has a bug and crashed by jumping "
492 "off into nowhere\n\n"
493 "This is almost always one of the first two, so check your "
494 "command line and that you are using the right type of kernel "
495 "for this machine.\n"
496 "If you think option (3) is likely then you can try debugging "
497 "your guest with the -d debug options; in particular "
498 "-d guest_errors will cause the log to include a dump of the "
499 "guest register state at this point.\n\n"
500 "Execution cannot continue; stopping here.\n\n");
502 /* Report also to the logs, with more detail including register dump */
503 qemu_log_mask(LOG_GUEST_ERROR
, "qemu: fatal: Trying to execute code "
504 "outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
505 log_cpu_state_mask(LOG_GUEST_ERROR
, cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
508 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
512 ram_addr
= qemu_ram_addr_from_host(ptr
);
513 if (ram_addr
== RAM_ADDR_INVALID
) {
514 error_report("Bad ram pointer %p", ptr
);
520 /* NOTE: this function can trigger an exception */
521 /* NOTE2: the returned address is not exactly the physical address: it
522 * is actually a ram_addr_t (in system mode; the user mode emulation
523 * version of this function returns a guest virtual address).
525 tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
)
527 int mmu_idx
, page_index
, pd
;
530 CPUState
*cpu
= ENV_GET_CPU(env1
);
531 CPUIOTLBEntry
*iotlbentry
;
533 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
534 mmu_idx
= cpu_mmu_index(env1
, true);
535 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
536 (addr
& TARGET_PAGE_MASK
))) {
537 cpu_ldub_code(env1
, addr
);
539 iotlbentry
= &env1
->iotlb
[mmu_idx
][page_index
];
540 pd
= iotlbentry
->addr
& ~TARGET_PAGE_MASK
;
541 mr
= iotlb_to_region(cpu
, pd
, iotlbentry
->attrs
);
542 if (memory_region_is_unassigned(mr
)) {
543 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
545 if (cc
->do_unassigned_access
) {
546 cc
->do_unassigned_access(cpu
, addr
, false, true, 0, 4);
548 report_bad_exec(cpu
, addr
);
552 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
553 return qemu_ram_addr_from_host_nofail(p
);
556 static uint64_t io_readx(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
557 target_ulong addr
, uintptr_t retaddr
, int size
)
559 CPUState
*cpu
= ENV_GET_CPU(env
);
560 hwaddr physaddr
= iotlbentry
->addr
;
561 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
565 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
566 cpu
->mem_io_pc
= retaddr
;
567 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
568 cpu_io_recompile(cpu
, retaddr
);
571 cpu
->mem_io_vaddr
= addr
;
573 if (mr
->global_locking
) {
574 qemu_mutex_lock_iothread();
577 memory_region_dispatch_read(mr
, physaddr
, &val
, size
, iotlbentry
->attrs
);
579 qemu_mutex_unlock_iothread();
585 static void io_writex(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
586 uint64_t val
, target_ulong addr
,
587 uintptr_t retaddr
, int size
)
589 CPUState
*cpu
= ENV_GET_CPU(env
);
590 hwaddr physaddr
= iotlbentry
->addr
;
591 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
594 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
595 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
596 cpu_io_recompile(cpu
, retaddr
);
598 cpu
->mem_io_vaddr
= addr
;
599 cpu
->mem_io_pc
= retaddr
;
601 if (mr
->global_locking
) {
602 qemu_mutex_lock_iothread();
605 memory_region_dispatch_write(mr
, physaddr
, val
, size
, iotlbentry
->attrs
);
607 qemu_mutex_unlock_iothread();
611 /* Return true if ADDR is present in the victim tlb, and has been copied
612 back to the main tlb. */
613 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
614 size_t elt_ofs
, target_ulong page
)
617 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
618 CPUTLBEntry
*vtlb
= &env
->tlb_v_table
[mmu_idx
][vidx
];
619 target_ulong cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
622 /* Found entry in victim tlb, swap tlb and iotlb. */
623 CPUTLBEntry tmptlb
, *tlb
= &env
->tlb_table
[mmu_idx
][index
];
624 CPUIOTLBEntry tmpio
, *io
= &env
->iotlb
[mmu_idx
][index
];
625 CPUIOTLBEntry
*vio
= &env
->iotlb_v
[mmu_idx
][vidx
];
627 tmptlb
= *tlb
; *tlb
= *vtlb
; *vtlb
= tmptlb
;
628 tmpio
= *io
; *io
= *vio
; *vio
= tmpio
;
635 /* Macro to call the above, with local variables from the use context. */
636 #define VICTIM_TLB_HIT(TY, ADDR) \
637 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
638 (ADDR) & TARGET_PAGE_MASK)
640 /* Probe for whether the specified guest write access is permitted.
641 * If it is not permitted then an exception will be taken in the same
642 * way as if this were a real write access (and we will not return).
643 * Otherwise the function will return, and there will be a valid
644 * entry in the TLB for this access.
646 void probe_write(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
649 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
650 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
652 if ((addr
& TARGET_PAGE_MASK
)
653 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
654 /* TLB entry is for a different page */
655 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
656 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
661 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
662 * operations, or io operations to proceed. Return the host address. */
663 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
664 TCGMemOpIdx oi
, uintptr_t retaddr
)
666 size_t mmu_idx
= get_mmuidx(oi
);
667 size_t index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
668 CPUTLBEntry
*tlbe
= &env
->tlb_table
[mmu_idx
][index
];
669 target_ulong tlb_addr
= tlbe
->addr_write
;
670 TCGMemOp mop
= get_memop(oi
);
671 int a_bits
= get_alignment_bits(mop
);
672 int s_bits
= mop
& MO_SIZE
;
674 /* Adjust the given return address. */
675 retaddr
-= GETPC_ADJ
;
677 /* Enforce guest required alignment. */
678 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
679 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
680 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
684 /* Enforce qemu required alignment. */
685 if (unlikely(addr
& ((1 << s_bits
) - 1))) {
686 /* We get here if guest alignment was not requested,
687 or was not enforced by cpu_unaligned_access above.
688 We might widen the access and emulate, but for now
689 mark an exception and exit the cpu loop. */
693 /* Check TLB entry and enforce page permissions. */
694 if ((addr
& TARGET_PAGE_MASK
)
695 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
696 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
697 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
699 tlb_addr
= tlbe
->addr_write
;
702 /* Notice an IO access, or a notdirty page. */
703 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
704 /* There's really nothing that can be done to
705 support this apart from stop-the-world. */
709 /* Let the guest notice RMW on a write-only page. */
710 if (unlikely(tlbe
->addr_read
!= tlb_addr
)) {
711 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_LOAD
, mmu_idx
, retaddr
);
712 /* Since we don't support reads and writes to different addresses,
713 and we do have the proper page loaded for write, this shouldn't
714 ever return. But just in case, handle via stop-the-world. */
718 return (void *)((uintptr_t)addr
+ tlbe
->addend
);
721 cpu_loop_exit_atomic(ENV_GET_CPU(env
), retaddr
);
724 #ifdef TARGET_WORDS_BIGENDIAN
725 # define TGT_BE(X) (X)
726 # define TGT_LE(X) BSWAP(X)
728 # define TGT_BE(X) BSWAP(X)
729 # define TGT_LE(X) (X)
732 #define MMUSUFFIX _mmu
735 #include "softmmu_template.h"
738 #include "softmmu_template.h"
741 #include "softmmu_template.h"
744 #include "softmmu_template.h"
746 /* First set of helpers allows passing in of OI and RETADDR. This makes
747 them callable from other helpers. */
749 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
750 #define ATOMIC_NAME(X) \
751 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
752 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
755 #include "atomic_template.h"
758 #include "atomic_template.h"
761 #include "atomic_template.h"
763 #ifdef CONFIG_ATOMIC64
765 #include "atomic_template.h"
768 #ifdef CONFIG_ATOMIC128
770 #include "atomic_template.h"
773 /* Second set of helpers are directly callable from TCG as helpers. */
777 #undef ATOMIC_MMU_LOOKUP
778 #define EXTRA_ARGS , TCGMemOpIdx oi
779 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
780 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
783 #include "atomic_template.h"
786 #include "atomic_template.h"
789 #include "atomic_template.h"
791 #ifdef CONFIG_ATOMIC64
793 #include "atomic_template.h"
796 /* Code access functions. */
799 #define MMUSUFFIX _cmmu
801 #define GETPC() ((uintptr_t)0)
802 #define SOFTMMU_CODE_ACCESS
805 #include "softmmu_template.h"
808 #include "softmmu_template.h"
811 #include "softmmu_template.h"
814 #include "softmmu_template.h"