2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
32 #include "exec/helper-proto.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
36 #include "trace/trace-root.h"
40 #include "qemu/plugin-memory.h"
42 #include "tcg/tcg-ldst.h"
44 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45 /* #define DEBUG_TLB */
46 /* #define DEBUG_TLB_LOG */
49 # define DEBUG_TLB_GATE 1
51 # define DEBUG_TLB_LOG_GATE 1
53 # define DEBUG_TLB_LOG_GATE 0
56 # define DEBUG_TLB_GATE 0
57 # define DEBUG_TLB_LOG_GATE 0
60 #define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
69 #define assert_cpu_is_self(cpu) do { \
70 if (DEBUG_TLB_GATE) { \
71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
75 /* run_on_cpu_data.target_ptr should always be big enough for a
76 * target_ulong even on 32 bit builds */
77 QEMU_BUILD_BUG_ON(sizeof(target_ulong
) > sizeof(run_on_cpu_data
));
79 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
81 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> 16);
82 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
84 static inline size_t tlb_n_entries(CPUTLBDescFast
*fast
)
86 return (fast
->mask
>> CPU_TLB_ENTRY_BITS
) + 1;
89 static inline size_t sizeof_tlb(CPUTLBDescFast
*fast
)
91 return fast
->mask
+ (1 << CPU_TLB_ENTRY_BITS
);
94 static void tlb_window_reset(CPUTLBDesc
*desc
, int64_t ns
,
97 desc
->window_begin_ns
= ns
;
98 desc
->window_max_entries
= max_entries
;
101 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
103 int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
104 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
106 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
107 qatomic_set(&jc
->array
[i0
+ i
].tb
, NULL
);
112 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
113 * @desc: The CPUTLBDesc portion of the TLB
114 * @fast: The CPUTLBDescFast portion of the same TLB
116 * Called with tlb_lock_held.
118 * We have two main constraints when resizing a TLB: (1) we only resize it
119 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
120 * the array or unnecessarily flushing it), which means we do not control how
121 * frequently the resizing can occur; (2) we don't have access to the guest's
122 * future scheduling decisions, and therefore have to decide the magnitude of
123 * the resize based on past observations.
125 * In general, a memory-hungry process can benefit greatly from an appropriately
126 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
127 * we just have to make the TLB as large as possible; while an oversized TLB
128 * results in minimal TLB miss rates, it also takes longer to be flushed
129 * (flushes can be _very_ frequent), and the reduced locality can also hurt
132 * To achieve near-optimal performance for all kinds of workloads, we:
134 * 1. Aggressively increase the size of the TLB when the use rate of the
135 * TLB being flushed is high, since it is likely that in the near future this
136 * memory-hungry process will execute again, and its memory hungriness will
137 * probably be similar.
139 * 2. Slowly reduce the size of the TLB as the use rate declines over a
140 * reasonably large time window. The rationale is that if in such a time window
141 * we have not observed a high TLB use rate, it is likely that we won't observe
142 * it in the near future. In that case, once a time window expires we downsize
143 * the TLB to match the maximum use rate observed in the window.
145 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
146 * since in that range performance is likely near-optimal. Recall that the TLB
147 * is direct mapped, so we want the use rate to be low (or at least not too
148 * high), since otherwise we are likely to have a significant amount of
151 static void tlb_mmu_resize_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
,
154 size_t old_size
= tlb_n_entries(fast
);
156 size_t new_size
= old_size
;
157 int64_t window_len_ms
= 100;
158 int64_t window_len_ns
= window_len_ms
* 1000 * 1000;
159 bool window_expired
= now
> desc
->window_begin_ns
+ window_len_ns
;
161 if (desc
->n_used_entries
> desc
->window_max_entries
) {
162 desc
->window_max_entries
= desc
->n_used_entries
;
164 rate
= desc
->window_max_entries
* 100 / old_size
;
167 new_size
= MIN(old_size
<< 1, 1 << CPU_TLB_DYN_MAX_BITS
);
168 } else if (rate
< 30 && window_expired
) {
169 size_t ceil
= pow2ceil(desc
->window_max_entries
);
170 size_t expected_rate
= desc
->window_max_entries
* 100 / ceil
;
173 * Avoid undersizing when the max number of entries seen is just below
174 * a pow2. For instance, if max_entries == 1025, the expected use rate
175 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
176 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
177 * later. Thus, make sure that the expected use rate remains below 70%.
178 * (and since we double the size, that means the lowest rate we'd
179 * expect to get is 35%, which is still in the 30-70% range where
180 * we consider that the size is appropriate.)
182 if (expected_rate
> 70) {
185 new_size
= MAX(ceil
, 1 << CPU_TLB_DYN_MIN_BITS
);
188 if (new_size
== old_size
) {
189 if (window_expired
) {
190 tlb_window_reset(desc
, now
, desc
->n_used_entries
);
196 g_free(desc
->fulltlb
);
198 tlb_window_reset(desc
, now
, 0);
199 /* desc->n_used_entries is cleared by the caller */
200 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
201 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
202 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
205 * If the allocations fail, try smaller sizes. We just freed some
206 * memory, so going back to half of new_size has a good chance of working.
207 * Increased memory pressure elsewhere in the system might cause the
208 * allocations to fail though, so we progressively reduce the allocation
209 * size, aborting if we cannot even allocate the smallest TLB we support.
211 while (fast
->table
== NULL
|| desc
->fulltlb
== NULL
) {
212 if (new_size
== (1 << CPU_TLB_DYN_MIN_BITS
)) {
213 error_report("%s: %s", __func__
, strerror(errno
));
216 new_size
= MAX(new_size
>> 1, 1 << CPU_TLB_DYN_MIN_BITS
);
217 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
220 g_free(desc
->fulltlb
);
221 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
222 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
226 static void tlb_mmu_flush_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
)
228 desc
->n_used_entries
= 0;
229 desc
->large_page_addr
= -1;
230 desc
->large_page_mask
= -1;
232 memset(fast
->table
, -1, sizeof_tlb(fast
));
233 memset(desc
->vtable
, -1, sizeof(desc
->vtable
));
236 static void tlb_flush_one_mmuidx_locked(CPUArchState
*env
, int mmu_idx
,
239 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[mmu_idx
];
240 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[mmu_idx
];
242 tlb_mmu_resize_locked(desc
, fast
, now
);
243 tlb_mmu_flush_locked(desc
, fast
);
246 static void tlb_mmu_init(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
, int64_t now
)
248 size_t n_entries
= 1 << CPU_TLB_DYN_DEFAULT_BITS
;
250 tlb_window_reset(desc
, now
, 0);
251 desc
->n_used_entries
= 0;
252 fast
->mask
= (n_entries
- 1) << CPU_TLB_ENTRY_BITS
;
253 fast
->table
= g_new(CPUTLBEntry
, n_entries
);
254 desc
->fulltlb
= g_new(CPUTLBEntryFull
, n_entries
);
255 tlb_mmu_flush_locked(desc
, fast
);
258 static inline void tlb_n_used_entries_inc(CPUArchState
*env
, uintptr_t mmu_idx
)
260 env_tlb(env
)->d
[mmu_idx
].n_used_entries
++;
263 static inline void tlb_n_used_entries_dec(CPUArchState
*env
, uintptr_t mmu_idx
)
265 env_tlb(env
)->d
[mmu_idx
].n_used_entries
--;
268 void tlb_init(CPUState
*cpu
)
270 CPUArchState
*env
= cpu
->env_ptr
;
271 int64_t now
= get_clock_realtime();
274 qemu_spin_init(&env_tlb(env
)->c
.lock
);
276 /* All tlbs are initialized flushed. */
277 env_tlb(env
)->c
.dirty
= 0;
279 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
280 tlb_mmu_init(&env_tlb(env
)->d
[i
], &env_tlb(env
)->f
[i
], now
);
284 void tlb_destroy(CPUState
*cpu
)
286 CPUArchState
*env
= cpu
->env_ptr
;
289 qemu_spin_destroy(&env_tlb(env
)->c
.lock
);
290 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
291 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[i
];
292 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[i
];
295 g_free(desc
->fulltlb
);
299 /* flush_all_helper: run fn across all cpus
301 * If the wait flag is set then the src cpu's helper will be queued as
302 * "safe" work and the loop exited creating a synchronisation point
303 * where all queued work will be finished before execution starts
306 static void flush_all_helper(CPUState
*src
, run_on_cpu_func fn
,
313 async_run_on_cpu(cpu
, fn
, d
);
318 void tlb_flush_counts(size_t *pfull
, size_t *ppart
, size_t *pelide
)
321 size_t full
= 0, part
= 0, elide
= 0;
324 CPUArchState
*env
= cpu
->env_ptr
;
326 full
+= qatomic_read(&env_tlb(env
)->c
.full_flush_count
);
327 part
+= qatomic_read(&env_tlb(env
)->c
.part_flush_count
);
328 elide
+= qatomic_read(&env_tlb(env
)->c
.elide_flush_count
);
335 static void tlb_flush_by_mmuidx_async_work(CPUState
*cpu
, run_on_cpu_data data
)
337 CPUArchState
*env
= cpu
->env_ptr
;
338 uint16_t asked
= data
.host_int
;
339 uint16_t all_dirty
, work
, to_clean
;
340 int64_t now
= get_clock_realtime();
342 assert_cpu_is_self(cpu
);
344 tlb_debug("mmu_idx:0x%04" PRIx16
"\n", asked
);
346 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
348 all_dirty
= env_tlb(env
)->c
.dirty
;
349 to_clean
= asked
& all_dirty
;
350 all_dirty
&= ~to_clean
;
351 env_tlb(env
)->c
.dirty
= all_dirty
;
353 for (work
= to_clean
; work
!= 0; work
&= work
- 1) {
354 int mmu_idx
= ctz32(work
);
355 tlb_flush_one_mmuidx_locked(env
, mmu_idx
, now
);
358 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
360 tcg_flush_jmp_cache(cpu
);
362 if (to_clean
== ALL_MMUIDX_BITS
) {
363 qatomic_set(&env_tlb(env
)->c
.full_flush_count
,
364 env_tlb(env
)->c
.full_flush_count
+ 1);
366 qatomic_set(&env_tlb(env
)->c
.part_flush_count
,
367 env_tlb(env
)->c
.part_flush_count
+ ctpop16(to_clean
));
368 if (to_clean
!= asked
) {
369 qatomic_set(&env_tlb(env
)->c
.elide_flush_count
,
370 env_tlb(env
)->c
.elide_flush_count
+
371 ctpop16(asked
& ~to_clean
));
376 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
378 tlb_debug("mmu_idx: 0x%" PRIx16
"\n", idxmap
);
380 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
381 async_run_on_cpu(cpu
, tlb_flush_by_mmuidx_async_work
,
382 RUN_ON_CPU_HOST_INT(idxmap
));
384 tlb_flush_by_mmuidx_async_work(cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
388 void tlb_flush(CPUState
*cpu
)
390 tlb_flush_by_mmuidx(cpu
, ALL_MMUIDX_BITS
);
393 void tlb_flush_by_mmuidx_all_cpus(CPUState
*src_cpu
, uint16_t idxmap
)
395 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
397 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
399 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
400 fn(src_cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
403 void tlb_flush_all_cpus(CPUState
*src_cpu
)
405 tlb_flush_by_mmuidx_all_cpus(src_cpu
, ALL_MMUIDX_BITS
);
408 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
, uint16_t idxmap
)
410 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
412 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
414 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
415 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
418 void tlb_flush_all_cpus_synced(CPUState
*src_cpu
)
420 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, ALL_MMUIDX_BITS
);
423 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry
*tlb_entry
,
424 target_ulong page
, target_ulong mask
)
427 mask
&= TARGET_PAGE_MASK
| TLB_INVALID_MASK
;
429 return (page
== (tlb_entry
->addr_read
& mask
) ||
430 page
== (tlb_addr_write(tlb_entry
) & mask
) ||
431 page
== (tlb_entry
->addr_code
& mask
));
434 static inline bool tlb_hit_page_anyprot(CPUTLBEntry
*tlb_entry
,
437 return tlb_hit_page_mask_anyprot(tlb_entry
, page
, -1);
441 * tlb_entry_is_empty - return true if the entry is not in use
442 * @te: pointer to CPUTLBEntry
444 static inline bool tlb_entry_is_empty(const CPUTLBEntry
*te
)
446 return te
->addr_read
== -1 && te
->addr_write
== -1 && te
->addr_code
== -1;
449 /* Called with tlb_c.lock held */
450 static bool tlb_flush_entry_mask_locked(CPUTLBEntry
*tlb_entry
,
454 if (tlb_hit_page_mask_anyprot(tlb_entry
, page
, mask
)) {
455 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
461 static inline bool tlb_flush_entry_locked(CPUTLBEntry
*tlb_entry
,
464 return tlb_flush_entry_mask_locked(tlb_entry
, page
, -1);
467 /* Called with tlb_c.lock held */
468 static void tlb_flush_vtlb_page_mask_locked(CPUArchState
*env
, int mmu_idx
,
472 CPUTLBDesc
*d
= &env_tlb(env
)->d
[mmu_idx
];
475 assert_cpu_is_self(env_cpu(env
));
476 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
477 if (tlb_flush_entry_mask_locked(&d
->vtable
[k
], page
, mask
)) {
478 tlb_n_used_entries_dec(env
, mmu_idx
);
483 static inline void tlb_flush_vtlb_page_locked(CPUArchState
*env
, int mmu_idx
,
486 tlb_flush_vtlb_page_mask_locked(env
, mmu_idx
, page
, -1);
489 static void tlb_flush_page_locked(CPUArchState
*env
, int midx
,
492 target_ulong lp_addr
= env_tlb(env
)->d
[midx
].large_page_addr
;
493 target_ulong lp_mask
= env_tlb(env
)->d
[midx
].large_page_mask
;
495 /* Check if we need to flush due to large pages. */
496 if ((page
& lp_mask
) == lp_addr
) {
497 tlb_debug("forcing full flush midx %d ("
498 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
499 midx
, lp_addr
, lp_mask
);
500 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
502 if (tlb_flush_entry_locked(tlb_entry(env
, midx
, page
), page
)) {
503 tlb_n_used_entries_dec(env
, midx
);
505 tlb_flush_vtlb_page_locked(env
, midx
, page
);
510 * tlb_flush_page_by_mmuidx_async_0:
511 * @cpu: cpu on which to flush
512 * @addr: page of virtual address to flush
513 * @idxmap: set of mmu_idx to flush
515 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
516 * at @addr from the tlbs indicated by @idxmap from @cpu.
518 static void tlb_flush_page_by_mmuidx_async_0(CPUState
*cpu
,
522 CPUArchState
*env
= cpu
->env_ptr
;
525 assert_cpu_is_self(cpu
);
527 tlb_debug("page addr:" TARGET_FMT_lx
" mmu_map:0x%x\n", addr
, idxmap
);
529 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
530 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
531 if ((idxmap
>> mmu_idx
) & 1) {
532 tlb_flush_page_locked(env
, mmu_idx
, addr
);
535 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
538 * Discard jump cache entries for any tb which might potentially
539 * overlap the flushed page, which includes the previous.
541 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
542 tb_jmp_cache_clear_page(cpu
, addr
);
546 * tlb_flush_page_by_mmuidx_async_1:
547 * @cpu: cpu on which to flush
548 * @data: encoded addr + idxmap
550 * Helper for tlb_flush_page_by_mmuidx and friends, called through
551 * async_run_on_cpu. The idxmap parameter is encoded in the page
552 * offset of the target_ptr field. This limits the set of mmu_idx
553 * that can be passed via this method.
555 static void tlb_flush_page_by_mmuidx_async_1(CPUState
*cpu
,
556 run_on_cpu_data data
)
558 target_ulong addr_and_idxmap
= (target_ulong
) data
.target_ptr
;
559 target_ulong addr
= addr_and_idxmap
& TARGET_PAGE_MASK
;
560 uint16_t idxmap
= addr_and_idxmap
& ~TARGET_PAGE_MASK
;
562 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
568 } TLBFlushPageByMMUIdxData
;
571 * tlb_flush_page_by_mmuidx_async_2:
572 * @cpu: cpu on which to flush
573 * @data: allocated addr + idxmap
575 * Helper for tlb_flush_page_by_mmuidx and friends, called through
576 * async_run_on_cpu. The addr+idxmap parameters are stored in a
577 * TLBFlushPageByMMUIdxData structure that has been allocated
578 * specifically for this helper. Free the structure when done.
580 static void tlb_flush_page_by_mmuidx_async_2(CPUState
*cpu
,
581 run_on_cpu_data data
)
583 TLBFlushPageByMMUIdxData
*d
= data
.host_ptr
;
585 tlb_flush_page_by_mmuidx_async_0(cpu
, d
->addr
, d
->idxmap
);
589 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, uint16_t idxmap
)
591 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%" PRIx16
"\n", addr
, idxmap
);
593 /* This should already be page aligned */
594 addr
&= TARGET_PAGE_MASK
;
596 if (qemu_cpu_is_self(cpu
)) {
597 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
598 } else if (idxmap
< TARGET_PAGE_SIZE
) {
600 * Most targets have only a few mmu_idx. In the case where
601 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
602 * allocating memory for this operation.
604 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_1
,
605 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
607 TLBFlushPageByMMUIdxData
*d
= g_new(TLBFlushPageByMMUIdxData
, 1);
609 /* Otherwise allocate a structure, freed by the worker. */
612 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_2
,
613 RUN_ON_CPU_HOST_PTR(d
));
617 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
619 tlb_flush_page_by_mmuidx(cpu
, addr
, ALL_MMUIDX_BITS
);
622 void tlb_flush_page_by_mmuidx_all_cpus(CPUState
*src_cpu
, target_ulong addr
,
625 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
627 /* This should already be page aligned */
628 addr
&= TARGET_PAGE_MASK
;
631 * Allocate memory to hold addr+idxmap only when needed.
632 * See tlb_flush_page_by_mmuidx for details.
634 if (idxmap
< TARGET_PAGE_SIZE
) {
635 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
636 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
640 /* Allocate a separate data block for each destination cpu. */
641 CPU_FOREACH(dst_cpu
) {
642 if (dst_cpu
!= src_cpu
) {
643 TLBFlushPageByMMUIdxData
*d
644 = g_new(TLBFlushPageByMMUIdxData
, 1);
648 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
649 RUN_ON_CPU_HOST_PTR(d
));
654 tlb_flush_page_by_mmuidx_async_0(src_cpu
, addr
, idxmap
);
657 void tlb_flush_page_all_cpus(CPUState
*src
, target_ulong addr
)
659 tlb_flush_page_by_mmuidx_all_cpus(src
, addr
, ALL_MMUIDX_BITS
);
662 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
666 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
668 /* This should already be page aligned */
669 addr
&= TARGET_PAGE_MASK
;
672 * Allocate memory to hold addr+idxmap only when needed.
673 * See tlb_flush_page_by_mmuidx for details.
675 if (idxmap
< TARGET_PAGE_SIZE
) {
676 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
677 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
678 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
679 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
682 TLBFlushPageByMMUIdxData
*d
;
684 /* Allocate a separate data block for each destination cpu. */
685 CPU_FOREACH(dst_cpu
) {
686 if (dst_cpu
!= src_cpu
) {
687 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
690 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
691 RUN_ON_CPU_HOST_PTR(d
));
695 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
698 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_2
,
699 RUN_ON_CPU_HOST_PTR(d
));
703 void tlb_flush_page_all_cpus_synced(CPUState
*src
, target_ulong addr
)
705 tlb_flush_page_by_mmuidx_all_cpus_synced(src
, addr
, ALL_MMUIDX_BITS
);
708 static void tlb_flush_range_locked(CPUArchState
*env
, int midx
,
709 target_ulong addr
, target_ulong len
,
712 CPUTLBDesc
*d
= &env_tlb(env
)->d
[midx
];
713 CPUTLBDescFast
*f
= &env_tlb(env
)->f
[midx
];
714 target_ulong mask
= MAKE_64BIT_MASK(0, bits
);
717 * If @bits is smaller than the tlb size, there may be multiple entries
718 * within the TLB; otherwise all addresses that match under @mask hit
719 * the same TLB entry.
720 * TODO: Perhaps allow bits to be a few bits less than the size.
721 * For now, just flush the entire TLB.
723 * If @len is larger than the tlb size, then it will take longer to
724 * test all of the entries in the TLB than it will to flush it all.
726 if (mask
< f
->mask
|| len
> f
->mask
) {
727 tlb_debug("forcing full flush midx %d ("
728 TARGET_FMT_lx
"/" TARGET_FMT_lx
"+" TARGET_FMT_lx
")\n",
729 midx
, addr
, mask
, len
);
730 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
735 * Check if we need to flush due to large pages.
736 * Because large_page_mask contains all 1's from the msb,
737 * we only need to test the end of the range.
739 if (((addr
+ len
- 1) & d
->large_page_mask
) == d
->large_page_addr
) {
740 tlb_debug("forcing full flush midx %d ("
741 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
742 midx
, d
->large_page_addr
, d
->large_page_mask
);
743 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
747 for (target_ulong i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
748 target_ulong page
= addr
+ i
;
749 CPUTLBEntry
*entry
= tlb_entry(env
, midx
, page
);
751 if (tlb_flush_entry_mask_locked(entry
, page
, mask
)) {
752 tlb_n_used_entries_dec(env
, midx
);
754 tlb_flush_vtlb_page_mask_locked(env
, midx
, page
, mask
);
765 static void tlb_flush_range_by_mmuidx_async_0(CPUState
*cpu
,
768 CPUArchState
*env
= cpu
->env_ptr
;
771 assert_cpu_is_self(cpu
);
773 tlb_debug("range:" TARGET_FMT_lx
"/%u+" TARGET_FMT_lx
" mmu_map:0x%x\n",
774 d
.addr
, d
.bits
, d
.len
, d
.idxmap
);
776 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
777 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
778 if ((d
.idxmap
>> mmu_idx
) & 1) {
779 tlb_flush_range_locked(env
, mmu_idx
, d
.addr
, d
.len
, d
.bits
);
782 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
785 * If the length is larger than the jump cache size, then it will take
786 * longer to clear each entry individually than it will to clear it all.
788 if (d
.len
>= (TARGET_PAGE_SIZE
* TB_JMP_CACHE_SIZE
)) {
789 tcg_flush_jmp_cache(cpu
);
794 * Discard jump cache entries for any tb which might potentially
795 * overlap the flushed pages, which includes the previous.
797 d
.addr
-= TARGET_PAGE_SIZE
;
798 for (target_ulong i
= 0, n
= d
.len
/ TARGET_PAGE_SIZE
+ 1; i
< n
; i
++) {
799 tb_jmp_cache_clear_page(cpu
, d
.addr
);
800 d
.addr
+= TARGET_PAGE_SIZE
;
804 static void tlb_flush_range_by_mmuidx_async_1(CPUState
*cpu
,
805 run_on_cpu_data data
)
807 TLBFlushRangeData
*d
= data
.host_ptr
;
808 tlb_flush_range_by_mmuidx_async_0(cpu
, *d
);
812 void tlb_flush_range_by_mmuidx(CPUState
*cpu
, target_ulong addr
,
813 target_ulong len
, uint16_t idxmap
,
819 * If all bits are significant, and len is small,
820 * this devolves to tlb_flush_page.
822 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
823 tlb_flush_page_by_mmuidx(cpu
, addr
, idxmap
);
826 /* If no page bits are significant, this devolves to tlb_flush. */
827 if (bits
< TARGET_PAGE_BITS
) {
828 tlb_flush_by_mmuidx(cpu
, idxmap
);
832 /* This should already be page aligned */
833 d
.addr
= addr
& TARGET_PAGE_MASK
;
838 if (qemu_cpu_is_self(cpu
)) {
839 tlb_flush_range_by_mmuidx_async_0(cpu
, d
);
841 /* Otherwise allocate a structure, freed by the worker. */
842 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
843 async_run_on_cpu(cpu
, tlb_flush_range_by_mmuidx_async_1
,
844 RUN_ON_CPU_HOST_PTR(p
));
848 void tlb_flush_page_bits_by_mmuidx(CPUState
*cpu
, target_ulong addr
,
849 uint16_t idxmap
, unsigned bits
)
851 tlb_flush_range_by_mmuidx(cpu
, addr
, TARGET_PAGE_SIZE
, idxmap
, bits
);
854 void tlb_flush_range_by_mmuidx_all_cpus(CPUState
*src_cpu
,
855 target_ulong addr
, target_ulong len
,
856 uint16_t idxmap
, unsigned bits
)
862 * If all bits are significant, and len is small,
863 * this devolves to tlb_flush_page.
865 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
866 tlb_flush_page_by_mmuidx_all_cpus(src_cpu
, addr
, idxmap
);
869 /* If no page bits are significant, this devolves to tlb_flush. */
870 if (bits
< TARGET_PAGE_BITS
) {
871 tlb_flush_by_mmuidx_all_cpus(src_cpu
, idxmap
);
875 /* This should already be page aligned */
876 d
.addr
= addr
& TARGET_PAGE_MASK
;
881 /* Allocate a separate data block for each destination cpu. */
882 CPU_FOREACH(dst_cpu
) {
883 if (dst_cpu
!= src_cpu
) {
884 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
885 async_run_on_cpu(dst_cpu
,
886 tlb_flush_range_by_mmuidx_async_1
,
887 RUN_ON_CPU_HOST_PTR(p
));
891 tlb_flush_range_by_mmuidx_async_0(src_cpu
, d
);
894 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState
*src_cpu
,
896 uint16_t idxmap
, unsigned bits
)
898 tlb_flush_range_by_mmuidx_all_cpus(src_cpu
, addr
, TARGET_PAGE_SIZE
,
902 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
908 TLBFlushRangeData d
, *p
;
912 * If all bits are significant, and len is small,
913 * this devolves to tlb_flush_page.
915 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
916 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu
, addr
, idxmap
);
919 /* If no page bits are significant, this devolves to tlb_flush. */
920 if (bits
< TARGET_PAGE_BITS
) {
921 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, idxmap
);
925 /* This should already be page aligned */
926 d
.addr
= addr
& TARGET_PAGE_MASK
;
931 /* Allocate a separate data block for each destination cpu. */
932 CPU_FOREACH(dst_cpu
) {
933 if (dst_cpu
!= src_cpu
) {
934 p
= g_memdup(&d
, sizeof(d
));
935 async_run_on_cpu(dst_cpu
, tlb_flush_range_by_mmuidx_async_1
,
936 RUN_ON_CPU_HOST_PTR(p
));
940 p
= g_memdup(&d
, sizeof(d
));
941 async_safe_run_on_cpu(src_cpu
, tlb_flush_range_by_mmuidx_async_1
,
942 RUN_ON_CPU_HOST_PTR(p
));
945 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
950 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu
, addr
, TARGET_PAGE_SIZE
,
954 /* update the TLBs so that writes to code in the virtual page 'addr'
956 void tlb_protect_code(ram_addr_t ram_addr
)
958 cpu_physical_memory_test_and_clear_dirty(ram_addr
& TARGET_PAGE_MASK
,
963 /* update the TLB so that writes in physical page 'phys_addr' are no longer
964 tested for self modifying code */
965 void tlb_unprotect_code(ram_addr_t ram_addr
)
967 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
972 * Dirty write flag handling
974 * When the TCG code writes to a location it looks up the address in
975 * the TLB and uses that data to compute the final address. If any of
976 * the lower bits of the address are set then the slow path is forced.
977 * There are a number of reasons to do this but for normal RAM the
978 * most usual is detecting writes to code regions which may invalidate
981 * Other vCPUs might be reading their TLBs during guest execution, so we update
982 * te->addr_write with qatomic_set. We don't need to worry about this for
983 * oversized guests as MTTCG is disabled for them.
985 * Called with tlb_c.lock held.
987 static void tlb_reset_dirty_range_locked(CPUTLBEntry
*tlb_entry
,
988 uintptr_t start
, uintptr_t length
)
990 uintptr_t addr
= tlb_entry
->addr_write
;
992 if ((addr
& (TLB_INVALID_MASK
| TLB_MMIO
|
993 TLB_DISCARD_WRITE
| TLB_NOTDIRTY
)) == 0) {
994 addr
&= TARGET_PAGE_MASK
;
995 addr
+= tlb_entry
->addend
;
996 if ((addr
- start
) < length
) {
997 #if TCG_OVERSIZED_GUEST
998 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1000 qatomic_set(&tlb_entry
->addr_write
,
1001 tlb_entry
->addr_write
| TLB_NOTDIRTY
);
1008 * Called with tlb_c.lock held.
1009 * Called only from the vCPU context, i.e. the TLB's owner thread.
1011 static inline void copy_tlb_helper_locked(CPUTLBEntry
*d
, const CPUTLBEntry
*s
)
1016 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1018 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1019 * thing actually updated is the target TLB entry ->addr_write flags.
1021 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
1028 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1029 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1031 unsigned int n
= tlb_n_entries(&env_tlb(env
)->f
[mmu_idx
]);
1033 for (i
= 0; i
< n
; i
++) {
1034 tlb_reset_dirty_range_locked(&env_tlb(env
)->f
[mmu_idx
].table
[i
],
1038 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
1039 tlb_reset_dirty_range_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[i
],
1043 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1046 /* Called with tlb_c.lock held */
1047 static inline void tlb_set_dirty1_locked(CPUTLBEntry
*tlb_entry
,
1050 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
1051 tlb_entry
->addr_write
= vaddr
;
1055 /* update the TLB corresponding to virtual page vaddr
1056 so that it is no longer dirty */
1057 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
1059 CPUArchState
*env
= cpu
->env_ptr
;
1062 assert_cpu_is_self(cpu
);
1064 vaddr
&= TARGET_PAGE_MASK
;
1065 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1066 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1067 tlb_set_dirty1_locked(tlb_entry(env
, mmu_idx
, vaddr
), vaddr
);
1070 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1072 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
1073 tlb_set_dirty1_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[k
], vaddr
);
1076 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1079 /* Our TLB does not support large pages, so remember the area covered by
1080 large pages and trigger a full TLB flush if these are invalidated. */
1081 static void tlb_add_large_page(CPUArchState
*env
, int mmu_idx
,
1082 target_ulong vaddr
, target_ulong size
)
1084 target_ulong lp_addr
= env_tlb(env
)->d
[mmu_idx
].large_page_addr
;
1085 target_ulong lp_mask
= ~(size
- 1);
1087 if (lp_addr
== (target_ulong
)-1) {
1088 /* No previous large page. */
1091 /* Extend the existing region to include the new page.
1092 This is a compromise between unnecessary flushes and
1093 the cost of maintaining a full variable size TLB. */
1094 lp_mask
&= env_tlb(env
)->d
[mmu_idx
].large_page_mask
;
1095 while (((lp_addr
^ vaddr
) & lp_mask
) != 0) {
1099 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= lp_addr
& lp_mask
;
1100 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= lp_mask
;
1104 * Add a new TLB entry. At most one entry for a given virtual address
1105 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1106 * supplied size is only used by tlb_flush_page.
1108 * Called from TCG-generated code, which is under an RCU read-side
1111 void tlb_set_page_full(CPUState
*cpu
, int mmu_idx
,
1112 target_ulong vaddr
, CPUTLBEntryFull
*full
)
1114 CPUArchState
*env
= cpu
->env_ptr
;
1115 CPUTLB
*tlb
= env_tlb(env
);
1116 CPUTLBDesc
*desc
= &tlb
->d
[mmu_idx
];
1117 MemoryRegionSection
*section
;
1119 target_ulong address
;
1120 target_ulong write_address
;
1122 CPUTLBEntry
*te
, tn
;
1123 hwaddr iotlb
, xlat
, sz
, paddr_page
;
1124 target_ulong vaddr_page
;
1125 int asidx
, wp_flags
, prot
;
1126 bool is_ram
, is_romd
;
1128 assert_cpu_is_self(cpu
);
1130 if (full
->lg_page_size
<= TARGET_PAGE_BITS
) {
1131 sz
= TARGET_PAGE_SIZE
;
1133 sz
= (hwaddr
)1 << full
->lg_page_size
;
1134 tlb_add_large_page(env
, mmu_idx
, vaddr
, sz
);
1136 vaddr_page
= vaddr
& TARGET_PAGE_MASK
;
1137 paddr_page
= full
->phys_addr
& TARGET_PAGE_MASK
;
1140 asidx
= cpu_asidx_from_attrs(cpu
, full
->attrs
);
1141 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr_page
,
1142 &xlat
, &sz
, full
->attrs
, &prot
);
1143 assert(sz
>= TARGET_PAGE_SIZE
);
1145 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
1146 " prot=%x idx=%d\n",
1147 vaddr
, full
->phys_addr
, prot
, mmu_idx
);
1149 address
= vaddr_page
;
1150 if (full
->lg_page_size
< TARGET_PAGE_BITS
) {
1151 /* Repeat the MMU check and TLB fill on every access. */
1152 address
|= TLB_INVALID_MASK
;
1154 if (full
->attrs
.byte_swap
) {
1155 address
|= TLB_BSWAP
;
1158 is_ram
= memory_region_is_ram(section
->mr
);
1159 is_romd
= memory_region_is_romd(section
->mr
);
1161 if (is_ram
|| is_romd
) {
1162 /* RAM and ROMD both have associated host memory. */
1163 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
1165 /* I/O does not; force the host address to NULL. */
1169 write_address
= address
;
1171 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1173 * Computing is_clean is expensive; avoid all that unless
1174 * the page is actually writable.
1176 if (prot
& PAGE_WRITE
) {
1177 if (section
->readonly
) {
1178 write_address
|= TLB_DISCARD_WRITE
;
1179 } else if (cpu_physical_memory_is_clean(iotlb
)) {
1180 write_address
|= TLB_NOTDIRTY
;
1185 iotlb
= memory_region_section_get_iotlb(cpu
, section
) + xlat
;
1187 * Writes to romd devices must go through MMIO to enable write.
1188 * Reads to romd devices go through the ram_ptr found above,
1189 * but of course reads to I/O must go through MMIO.
1191 write_address
|= TLB_MMIO
;
1193 address
= write_address
;
1197 wp_flags
= cpu_watchpoint_address_matches(cpu
, vaddr_page
,
1200 index
= tlb_index(env
, mmu_idx
, vaddr_page
);
1201 te
= tlb_entry(env
, mmu_idx
, vaddr_page
);
1204 * Hold the TLB lock for the rest of the function. We could acquire/release
1205 * the lock several times in the function, but it is faster to amortize the
1206 * acquisition cost by acquiring it just once. Note that this leads to
1207 * a longer critical section, but this is not a concern since the TLB lock
1208 * is unlikely to be contended.
1210 qemu_spin_lock(&tlb
->c
.lock
);
1212 /* Note that the tlb is no longer clean. */
1213 tlb
->c
.dirty
|= 1 << mmu_idx
;
1215 /* Make sure there's no cached translation for the new page. */
1216 tlb_flush_vtlb_page_locked(env
, mmu_idx
, vaddr_page
);
1219 * Only evict the old entry to the victim tlb if it's for a
1220 * different page; otherwise just overwrite the stale data.
1222 if (!tlb_hit_page_anyprot(te
, vaddr_page
) && !tlb_entry_is_empty(te
)) {
1223 unsigned vidx
= desc
->vindex
++ % CPU_VTLB_SIZE
;
1224 CPUTLBEntry
*tv
= &desc
->vtable
[vidx
];
1226 /* Evict the old entry into the victim tlb. */
1227 copy_tlb_helper_locked(tv
, te
);
1228 desc
->vfulltlb
[vidx
] = desc
->fulltlb
[index
];
1229 tlb_n_used_entries_dec(env
, mmu_idx
);
1232 /* refill the tlb */
1234 * At this point iotlb contains a physical section number in the lower
1235 * TARGET_PAGE_BITS, and either
1236 * + the ram_addr_t of the page base of the target RAM (RAM)
1237 * + the offset within section->mr of the page base (I/O, ROMD)
1238 * We subtract the vaddr_page (which is page aligned and thus won't
1239 * disturb the low bits) to give an offset which can be added to the
1240 * (non-page-aligned) vaddr of the eventual memory access to get
1241 * the MemoryRegion offset for the access. Note that the vaddr we
1242 * subtract here is that of the page base, and not the same as the
1243 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1245 desc
->fulltlb
[index
] = *full
;
1246 desc
->fulltlb
[index
].xlat_section
= iotlb
- vaddr_page
;
1247 desc
->fulltlb
[index
].phys_addr
= paddr_page
;
1248 desc
->fulltlb
[index
].prot
= prot
;
1250 /* Now calculate the new entry */
1251 tn
.addend
= addend
- vaddr_page
;
1252 if (prot
& PAGE_READ
) {
1253 tn
.addr_read
= address
;
1254 if (wp_flags
& BP_MEM_READ
) {
1255 tn
.addr_read
|= TLB_WATCHPOINT
;
1261 if (prot
& PAGE_EXEC
) {
1262 tn
.addr_code
= address
;
1268 if (prot
& PAGE_WRITE
) {
1269 tn
.addr_write
= write_address
;
1270 if (prot
& PAGE_WRITE_INV
) {
1271 tn
.addr_write
|= TLB_INVALID_MASK
;
1273 if (wp_flags
& BP_MEM_WRITE
) {
1274 tn
.addr_write
|= TLB_WATCHPOINT
;
1278 copy_tlb_helper_locked(te
, &tn
);
1279 tlb_n_used_entries_inc(env
, mmu_idx
);
1280 qemu_spin_unlock(&tlb
->c
.lock
);
1283 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
1284 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
1285 int mmu_idx
, target_ulong size
)
1287 CPUTLBEntryFull full
= {
1291 .lg_page_size
= ctz64(size
)
1294 assert(is_power_of_2(size
));
1295 tlb_set_page_full(cpu
, mmu_idx
, vaddr
, &full
);
1298 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
1299 hwaddr paddr
, int prot
,
1300 int mmu_idx
, target_ulong size
)
1302 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
1303 prot
, mmu_idx
, size
);
1307 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1308 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1309 * be discarded and looked up again (e.g. via tlb_entry()).
1311 static void tlb_fill(CPUState
*cpu
, target_ulong addr
, int size
,
1312 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1317 * This is not a probe, so only valid return is success; failure
1318 * should result in exception + longjmp to the cpu loop.
1320 ok
= cpu
->cc
->tcg_ops
->tlb_fill(cpu
, addr
, size
,
1321 access_type
, mmu_idx
, false, retaddr
);
1325 static inline void cpu_unaligned_access(CPUState
*cpu
, vaddr addr
,
1326 MMUAccessType access_type
,
1327 int mmu_idx
, uintptr_t retaddr
)
1329 cpu
->cc
->tcg_ops
->do_unaligned_access(cpu
, addr
, access_type
,
1333 static inline void cpu_transaction_failed(CPUState
*cpu
, hwaddr physaddr
,
1334 vaddr addr
, unsigned size
,
1335 MMUAccessType access_type
,
1336 int mmu_idx
, MemTxAttrs attrs
,
1337 MemTxResult response
,
1340 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1342 if (!cpu
->ignore_memory_transaction_failures
&&
1343 cc
->tcg_ops
->do_transaction_failed
) {
1344 cc
->tcg_ops
->do_transaction_failed(cpu
, physaddr
, addr
, size
,
1345 access_type
, mmu_idx
, attrs
,
1350 static uint64_t io_readx(CPUArchState
*env
, CPUTLBEntryFull
*full
,
1351 int mmu_idx
, target_ulong addr
, uintptr_t retaddr
,
1352 MMUAccessType access_type
, MemOp op
)
1354 CPUState
*cpu
= env_cpu(env
);
1356 MemoryRegionSection
*section
;
1359 bool locked
= false;
1362 section
= iotlb_to_section(cpu
, full
->xlat_section
, full
->attrs
);
1364 mr_offset
= (full
->xlat_section
& TARGET_PAGE_MASK
) + addr
;
1365 cpu
->mem_io_pc
= retaddr
;
1366 if (!cpu
->can_do_io
) {
1367 cpu_io_recompile(cpu
, retaddr
);
1370 if (!qemu_mutex_iothread_locked()) {
1371 qemu_mutex_lock_iothread();
1374 r
= memory_region_dispatch_read(mr
, mr_offset
, &val
, op
, full
->attrs
);
1375 if (r
!= MEMTX_OK
) {
1376 hwaddr physaddr
= mr_offset
+
1377 section
->offset_within_address_space
-
1378 section
->offset_within_region
;
1380 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
), access_type
,
1381 mmu_idx
, full
->attrs
, r
, retaddr
);
1384 qemu_mutex_unlock_iothread();
1391 * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
1392 * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1393 * because of the side effect of io_writex changing memory layout.
1395 static void save_iotlb_data(CPUState
*cs
, MemoryRegionSection
*section
,
1398 #ifdef CONFIG_PLUGIN
1399 SavedIOTLB
*saved
= &cs
->saved_iotlb
;
1400 saved
->section
= section
;
1401 saved
->mr_offset
= mr_offset
;
1405 static void io_writex(CPUArchState
*env
, CPUTLBEntryFull
*full
,
1406 int mmu_idx
, uint64_t val
, target_ulong addr
,
1407 uintptr_t retaddr
, MemOp op
)
1409 CPUState
*cpu
= env_cpu(env
);
1411 MemoryRegionSection
*section
;
1413 bool locked
= false;
1416 section
= iotlb_to_section(cpu
, full
->xlat_section
, full
->attrs
);
1418 mr_offset
= (full
->xlat_section
& TARGET_PAGE_MASK
) + addr
;
1419 if (!cpu
->can_do_io
) {
1420 cpu_io_recompile(cpu
, retaddr
);
1422 cpu
->mem_io_pc
= retaddr
;
1425 * The memory_region_dispatch may trigger a flush/resize
1426 * so for plugins we save the iotlb_data just in case.
1428 save_iotlb_data(cpu
, section
, mr_offset
);
1430 if (!qemu_mutex_iothread_locked()) {
1431 qemu_mutex_lock_iothread();
1434 r
= memory_region_dispatch_write(mr
, mr_offset
, val
, op
, full
->attrs
);
1435 if (r
!= MEMTX_OK
) {
1436 hwaddr physaddr
= mr_offset
+
1437 section
->offset_within_address_space
-
1438 section
->offset_within_region
;
1440 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
),
1441 MMU_DATA_STORE
, mmu_idx
, full
->attrs
, r
,
1445 qemu_mutex_unlock_iothread();
1449 static inline target_ulong
tlb_read_ofs(CPUTLBEntry
*entry
, size_t ofs
)
1451 #if TCG_OVERSIZED_GUEST
1452 return *(target_ulong
*)((uintptr_t)entry
+ ofs
);
1454 /* ofs might correspond to .addr_write, so use qatomic_read */
1455 return qatomic_read((target_ulong
*)((uintptr_t)entry
+ ofs
));
1459 /* Return true if ADDR is present in the victim tlb, and has been copied
1460 back to the main tlb. */
1461 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
1462 size_t elt_ofs
, target_ulong page
)
1466 assert_cpu_is_self(env_cpu(env
));
1467 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
1468 CPUTLBEntry
*vtlb
= &env_tlb(env
)->d
[mmu_idx
].vtable
[vidx
];
1471 /* elt_ofs might correspond to .addr_write, so use qatomic_read */
1472 #if TCG_OVERSIZED_GUEST
1473 cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
1475 cmp
= qatomic_read((target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
));
1479 /* Found entry in victim tlb, swap tlb and iotlb. */
1480 CPUTLBEntry tmptlb
, *tlb
= &env_tlb(env
)->f
[mmu_idx
].table
[index
];
1482 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1483 copy_tlb_helper_locked(&tmptlb
, tlb
);
1484 copy_tlb_helper_locked(tlb
, vtlb
);
1485 copy_tlb_helper_locked(vtlb
, &tmptlb
);
1486 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1488 CPUTLBEntryFull
*f1
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1489 CPUTLBEntryFull
*f2
= &env_tlb(env
)->d
[mmu_idx
].vfulltlb
[vidx
];
1490 CPUTLBEntryFull tmpf
;
1491 tmpf
= *f1
; *f1
= *f2
; *f2
= tmpf
;
1498 /* Macro to call the above, with local variables from the use context. */
1499 #define VICTIM_TLB_HIT(TY, ADDR) \
1500 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1501 (ADDR) & TARGET_PAGE_MASK)
1503 static void notdirty_write(CPUState
*cpu
, vaddr mem_vaddr
, unsigned size
,
1504 CPUTLBEntryFull
*full
, uintptr_t retaddr
)
1506 ram_addr_t ram_addr
= mem_vaddr
+ full
->xlat_section
;
1508 trace_memory_notdirty_write_access(mem_vaddr
, ram_addr
, size
);
1510 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1511 struct page_collection
*pages
1512 = page_collection_lock(ram_addr
, ram_addr
+ size
);
1513 tb_invalidate_phys_page_fast(pages
, ram_addr
, size
, retaddr
);
1514 page_collection_unlock(pages
);
1518 * Set both VGA and migration bits for simplicity and to remove
1519 * the notdirty callback faster.
1521 cpu_physical_memory_set_dirty_range(ram_addr
, size
, DIRTY_CLIENTS_NOCODE
);
1523 /* We remove the notdirty callback only if the code has been flushed. */
1524 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1525 trace_memory_notdirty_set_dirty(mem_vaddr
);
1526 tlb_set_dirty(cpu
, mem_vaddr
);
1530 static int probe_access_internal(CPUArchState
*env
, target_ulong addr
,
1531 int fault_size
, MMUAccessType access_type
,
1532 int mmu_idx
, bool nonfault
,
1533 void **phost
, CPUTLBEntryFull
**pfull
,
1536 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1537 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1538 target_ulong tlb_addr
, page_addr
;
1542 switch (access_type
) {
1544 elt_ofs
= offsetof(CPUTLBEntry
, addr_read
);
1546 case MMU_DATA_STORE
:
1547 elt_ofs
= offsetof(CPUTLBEntry
, addr_write
);
1549 case MMU_INST_FETCH
:
1550 elt_ofs
= offsetof(CPUTLBEntry
, addr_code
);
1553 g_assert_not_reached();
1555 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1557 flags
= TLB_FLAGS_MASK
;
1558 page_addr
= addr
& TARGET_PAGE_MASK
;
1559 if (!tlb_hit_page(tlb_addr
, page_addr
)) {
1560 if (!victim_tlb_hit(env
, mmu_idx
, index
, elt_ofs
, page_addr
)) {
1561 CPUState
*cs
= env_cpu(env
);
1563 if (!cs
->cc
->tcg_ops
->tlb_fill(cs
, addr
, fault_size
, access_type
,
1564 mmu_idx
, nonfault
, retaddr
)) {
1565 /* Non-faulting page table read failed. */
1568 return TLB_INVALID_MASK
;
1571 /* TLB resize via tlb_fill may have moved the entry. */
1572 index
= tlb_index(env
, mmu_idx
, addr
);
1573 entry
= tlb_entry(env
, mmu_idx
, addr
);
1576 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1577 * to force the next access through tlb_fill. We've just
1578 * called tlb_fill, so we know that this entry *is* valid.
1580 flags
&= ~TLB_INVALID_MASK
;
1582 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1586 *pfull
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1588 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1589 if (unlikely(flags
& ~(TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1594 /* Everything else is RAM. */
1595 *phost
= (void *)((uintptr_t)addr
+ entry
->addend
);
1599 int probe_access_full(CPUArchState
*env
, target_ulong addr
,
1600 MMUAccessType access_type
, int mmu_idx
,
1601 bool nonfault
, void **phost
, CPUTLBEntryFull
**pfull
,
1604 int flags
= probe_access_internal(env
, addr
, 0, access_type
, mmu_idx
,
1605 nonfault
, phost
, pfull
, retaddr
);
1607 /* Handle clean RAM pages. */
1608 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1609 notdirty_write(env_cpu(env
), addr
, 1, *pfull
, retaddr
);
1610 flags
&= ~TLB_NOTDIRTY
;
1616 int probe_access_flags(CPUArchState
*env
, target_ulong addr
,
1617 MMUAccessType access_type
, int mmu_idx
,
1618 bool nonfault
, void **phost
, uintptr_t retaddr
)
1620 CPUTLBEntryFull
*full
;
1622 return probe_access_full(env
, addr
, access_type
, mmu_idx
,
1623 nonfault
, phost
, &full
, retaddr
);
1626 void *probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
1627 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1629 CPUTLBEntryFull
*full
;
1633 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1635 flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1636 false, &host
, &full
, retaddr
);
1638 /* Per the interface, size == 0 merely faults the access. */
1643 if (unlikely(flags
& (TLB_NOTDIRTY
| TLB_WATCHPOINT
))) {
1644 /* Handle watchpoints. */
1645 if (flags
& TLB_WATCHPOINT
) {
1646 int wp_access
= (access_type
== MMU_DATA_STORE
1647 ? BP_MEM_WRITE
: BP_MEM_READ
);
1648 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1649 full
->attrs
, wp_access
, retaddr
);
1652 /* Handle clean RAM pages. */
1653 if (flags
& TLB_NOTDIRTY
) {
1654 notdirty_write(env_cpu(env
), addr
, 1, full
, retaddr
);
1661 void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
1662 MMUAccessType access_type
, int mmu_idx
)
1664 CPUTLBEntryFull
*full
;
1668 flags
= probe_access_internal(env
, addr
, 0, access_type
,
1669 mmu_idx
, true, &host
, &full
, 0);
1671 /* No combination of flags are expected by the caller. */
1672 return flags
? NULL
: host
;
1676 * Return a ram_addr_t for the virtual address for execution.
1678 * Return -1 if we can't translate and execute from an entire page
1679 * of RAM. This will force us to execute by loading and translating
1680 * one insn at a time, without caching.
1682 * NOTE: This function will trigger an exception if the page is
1685 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, target_ulong addr
,
1688 CPUTLBEntryFull
*full
;
1691 (void)probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
,
1692 cpu_mmu_index(env
, true), false, &p
, &full
, 0);
1699 return qemu_ram_addr_from_host_nofail(p
);
1702 #ifdef CONFIG_PLUGIN
1704 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1705 * This should be a hot path as we will have just looked this path up
1706 * in the softmmu lookup code (or helper). We don't handle re-fills or
1707 * checking the victim table. This is purely informational.
1709 * This almost never fails as the memory access being instrumented
1710 * should have just filled the TLB. The one corner case is io_writex
1711 * which can cause TLB flushes and potential resizing of the TLBs
1712 * losing the information we need. In those cases we need to recover
1713 * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1714 * from the same thread (which a mem callback will be) this is safe.
1717 bool tlb_plugin_lookup(CPUState
*cpu
, target_ulong addr
, int mmu_idx
,
1718 bool is_store
, struct qemu_plugin_hwaddr
*data
)
1720 CPUArchState
*env
= cpu
->env_ptr
;
1721 CPUTLBEntry
*tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1722 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1723 target_ulong tlb_addr
= is_store
? tlb_addr_write(tlbe
) : tlbe
->addr_read
;
1725 if (likely(tlb_hit(tlb_addr
, addr
))) {
1726 /* We must have an iotlb entry for MMIO */
1727 if (tlb_addr
& TLB_MMIO
) {
1728 CPUTLBEntryFull
*full
;
1729 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1731 data
->v
.io
.section
=
1732 iotlb_to_section(cpu
, full
->xlat_section
, full
->attrs
);
1733 data
->v
.io
.offset
= (full
->xlat_section
& TARGET_PAGE_MASK
) + addr
;
1735 data
->is_io
= false;
1736 data
->v
.ram
.hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1740 SavedIOTLB
*saved
= &cpu
->saved_iotlb
;
1742 data
->v
.io
.section
= saved
->section
;
1743 data
->v
.io
.offset
= saved
->mr_offset
;
1751 * Probe for an atomic operation. Do not allow unaligned operations,
1752 * or io operations to proceed. Return the host address.
1754 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1756 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1757 MemOpIdx oi
, int size
, int prot
,
1760 uintptr_t mmu_idx
= get_mmuidx(oi
);
1761 MemOp mop
= get_memop(oi
);
1762 int a_bits
= get_alignment_bits(mop
);
1765 target_ulong tlb_addr
;
1768 tcg_debug_assert(mmu_idx
< NB_MMU_MODES
);
1770 /* Adjust the given return address. */
1771 retaddr
-= GETPC_ADJ
;
1773 /* Enforce guest required alignment. */
1774 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
1775 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1776 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1780 /* Enforce qemu required alignment. */
1781 if (unlikely(addr
& (size
- 1))) {
1782 /* We get here if guest alignment was not requested,
1783 or was not enforced by cpu_unaligned_access above.
1784 We might widen the access and emulate, but for now
1785 mark an exception and exit the cpu loop. */
1786 goto stop_the_world
;
1789 index
= tlb_index(env
, mmu_idx
, addr
);
1790 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1792 /* Check TLB entry and enforce page permissions. */
1793 if (prot
& PAGE_WRITE
) {
1794 tlb_addr
= tlb_addr_write(tlbe
);
1795 if (!tlb_hit(tlb_addr
, addr
)) {
1796 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1797 tlb_fill(env_cpu(env
), addr
, size
,
1798 MMU_DATA_STORE
, mmu_idx
, retaddr
);
1799 index
= tlb_index(env
, mmu_idx
, addr
);
1800 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1802 tlb_addr
= tlb_addr_write(tlbe
) & ~TLB_INVALID_MASK
;
1805 /* Let the guest notice RMW on a write-only page. */
1806 if ((prot
& PAGE_READ
) &&
1807 unlikely(tlbe
->addr_read
!= (tlb_addr
& ~TLB_NOTDIRTY
))) {
1808 tlb_fill(env_cpu(env
), addr
, size
,
1809 MMU_DATA_LOAD
, mmu_idx
, retaddr
);
1811 * Since we don't support reads and writes to different addresses,
1812 * and we do have the proper page loaded for write, this shouldn't
1813 * ever return. But just in case, handle via stop-the-world.
1815 goto stop_the_world
;
1817 } else /* if (prot & PAGE_READ) */ {
1818 tlb_addr
= tlbe
->addr_read
;
1819 if (!tlb_hit(tlb_addr
, addr
)) {
1820 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1821 tlb_fill(env_cpu(env
), addr
, size
,
1822 MMU_DATA_LOAD
, mmu_idx
, retaddr
);
1823 index
= tlb_index(env
, mmu_idx
, addr
);
1824 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1826 tlb_addr
= tlbe
->addr_read
& ~TLB_INVALID_MASK
;
1830 /* Notice an IO access or a needs-MMU-lookup access */
1831 if (unlikely(tlb_addr
& TLB_MMIO
)) {
1832 /* There's really nothing that can be done to
1833 support this apart from stop-the-world. */
1834 goto stop_the_world
;
1837 hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1839 if (unlikely(tlb_addr
& TLB_NOTDIRTY
)) {
1840 notdirty_write(env_cpu(env
), addr
, size
,
1841 &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
], retaddr
);
1847 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1851 * Verify that we have passed the correct MemOp to the correct function.
1853 * In the case of the helper_*_mmu functions, we will have done this by
1854 * using the MemOp to look up the helper during code generation.
1856 * In the case of the cpu_*_mmu functions, this is up to the caller.
1857 * We could present one function to target code, and dispatch based on
1858 * the MemOp, but so far we have worked hard to avoid an indirect function
1859 * call along the memory path.
1861 static void validate_memop(MemOpIdx oi
, MemOp expected
)
1863 #ifdef CONFIG_DEBUG_TCG
1864 MemOp have
= get_memop(oi
) & (MO_SIZE
| MO_BSWAP
);
1865 assert(have
== expected
);
1872 * We support two different access types. SOFTMMU_CODE_ACCESS is
1873 * specifically for reading instructions from system memory. It is
1874 * called by the translation loop and in some helpers where the code
1875 * is disassembled. It shouldn't be called directly by guest code.
1878 typedef uint64_t FullLoadHelper(CPUArchState
*env
, target_ulong addr
,
1879 MemOpIdx oi
, uintptr_t retaddr
);
1881 static inline uint64_t QEMU_ALWAYS_INLINE
1882 load_memop(const void *haddr
, MemOp op
)
1886 return ldub_p(haddr
);
1888 return lduw_be_p(haddr
);
1890 return lduw_le_p(haddr
);
1892 return (uint32_t)ldl_be_p(haddr
);
1894 return (uint32_t)ldl_le_p(haddr
);
1896 return ldq_be_p(haddr
);
1898 return ldq_le_p(haddr
);
1900 qemu_build_not_reached();
1904 static inline uint64_t QEMU_ALWAYS_INLINE
1905 load_helper(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
,
1906 uintptr_t retaddr
, MemOp op
, bool code_read
,
1907 FullLoadHelper
*full_load
)
1909 const size_t tlb_off
= code_read
?
1910 offsetof(CPUTLBEntry
, addr_code
) : offsetof(CPUTLBEntry
, addr_read
);
1911 const MMUAccessType access_type
=
1912 code_read
? MMU_INST_FETCH
: MMU_DATA_LOAD
;
1913 const unsigned a_bits
= get_alignment_bits(get_memop(oi
));
1914 const size_t size
= memop_size(op
);
1915 uintptr_t mmu_idx
= get_mmuidx(oi
);
1918 target_ulong tlb_addr
;
1922 tcg_debug_assert(mmu_idx
< NB_MMU_MODES
);
1924 /* Handle CPU specific unaligned behaviour */
1925 if (addr
& ((1 << a_bits
) - 1)) {
1926 cpu_unaligned_access(env_cpu(env
), addr
, access_type
,
1930 index
= tlb_index(env
, mmu_idx
, addr
);
1931 entry
= tlb_entry(env
, mmu_idx
, addr
);
1932 tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1934 /* If the TLB entry is for a different page, reload and try again. */
1935 if (!tlb_hit(tlb_addr
, addr
)) {
1936 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
1937 addr
& TARGET_PAGE_MASK
)) {
1938 tlb_fill(env_cpu(env
), addr
, size
,
1939 access_type
, mmu_idx
, retaddr
);
1940 index
= tlb_index(env
, mmu_idx
, addr
);
1941 entry
= tlb_entry(env
, mmu_idx
, addr
);
1943 tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1944 tlb_addr
&= ~TLB_INVALID_MASK
;
1947 /* Handle anything that isn't just a straight memory access. */
1948 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1949 CPUTLBEntryFull
*full
;
1952 /* For anything that is unaligned, recurse through full_load. */
1953 if ((addr
& (size
- 1)) != 0) {
1954 goto do_unaligned_access
;
1957 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1959 /* Handle watchpoints. */
1960 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
1961 /* On watchpoint hit, this will longjmp out. */
1962 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1963 full
->attrs
, BP_MEM_READ
, retaddr
);
1966 need_swap
= size
> 1 && (tlb_addr
& TLB_BSWAP
);
1968 /* Handle I/O access. */
1969 if (likely(tlb_addr
& TLB_MMIO
)) {
1970 return io_readx(env
, full
, mmu_idx
, addr
, retaddr
,
1971 access_type
, op
^ (need_swap
* MO_BSWAP
));
1974 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1977 * Keep these two load_memop separate to ensure that the compiler
1978 * is able to fold the entire function to a single instruction.
1979 * There is a build-time assert inside to remind you of this. ;-)
1981 if (unlikely(need_swap
)) {
1982 return load_memop(haddr
, op
^ MO_BSWAP
);
1984 return load_memop(haddr
, op
);
1987 /* Handle slow unaligned access (it spans two pages or IO). */
1989 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
1990 >= TARGET_PAGE_SIZE
)) {
1991 target_ulong addr1
, addr2
;
1994 do_unaligned_access
:
1995 addr1
= addr
& ~((target_ulong
)size
- 1);
1996 addr2
= addr1
+ size
;
1997 r1
= full_load(env
, addr1
, oi
, retaddr
);
1998 r2
= full_load(env
, addr2
, oi
, retaddr
);
1999 shift
= (addr
& (size
- 1)) * 8;
2001 if (memop_big_endian(op
)) {
2002 /* Big-endian combine. */
2003 res
= (r1
<< shift
) | (r2
>> ((size
* 8) - shift
));
2005 /* Little-endian combine. */
2006 res
= (r1
>> shift
) | (r2
<< ((size
* 8) - shift
));
2008 return res
& MAKE_64BIT_MASK(0, size
* 8);
2011 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
2012 return load_memop(haddr
, op
);
2016 * For the benefit of TCG generated code, we want to avoid the
2017 * complication of ABI-specific return type promotion and always
2018 * return a value extended to the register size of the host. This is
2019 * tcg_target_long, except in the case of a 32-bit host and 64-bit
2020 * data, and for that we always have uint64_t.
2022 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2025 static uint64_t full_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
2026 MemOpIdx oi
, uintptr_t retaddr
)
2028 validate_memop(oi
, MO_UB
);
2029 return load_helper(env
, addr
, oi
, retaddr
, MO_UB
, false, full_ldub_mmu
);
2032 tcg_target_ulong
helper_ret_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
2033 MemOpIdx oi
, uintptr_t retaddr
)
2035 return full_ldub_mmu(env
, addr
, oi
, retaddr
);
2038 static uint64_t full_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2039 MemOpIdx oi
, uintptr_t retaddr
)
2041 validate_memop(oi
, MO_LEUW
);
2042 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUW
, false,
2046 tcg_target_ulong
helper_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2047 MemOpIdx oi
, uintptr_t retaddr
)
2049 return full_le_lduw_mmu(env
, addr
, oi
, retaddr
);
2052 static uint64_t full_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2053 MemOpIdx oi
, uintptr_t retaddr
)
2055 validate_memop(oi
, MO_BEUW
);
2056 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUW
, false,
2060 tcg_target_ulong
helper_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2061 MemOpIdx oi
, uintptr_t retaddr
)
2063 return full_be_lduw_mmu(env
, addr
, oi
, retaddr
);
2066 static uint64_t full_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2067 MemOpIdx oi
, uintptr_t retaddr
)
2069 validate_memop(oi
, MO_LEUL
);
2070 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUL
, false,
2074 tcg_target_ulong
helper_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2075 MemOpIdx oi
, uintptr_t retaddr
)
2077 return full_le_ldul_mmu(env
, addr
, oi
, retaddr
);
2080 static uint64_t full_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2081 MemOpIdx oi
, uintptr_t retaddr
)
2083 validate_memop(oi
, MO_BEUL
);
2084 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUL
, false,
2088 tcg_target_ulong
helper_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2089 MemOpIdx oi
, uintptr_t retaddr
)
2091 return full_be_ldul_mmu(env
, addr
, oi
, retaddr
);
2094 uint64_t helper_le_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
2095 MemOpIdx oi
, uintptr_t retaddr
)
2097 validate_memop(oi
, MO_LEUQ
);
2098 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUQ
, false,
2102 uint64_t helper_be_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
2103 MemOpIdx oi
, uintptr_t retaddr
)
2105 validate_memop(oi
, MO_BEUQ
);
2106 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUQ
, false,
2111 * Provide signed versions of the load routines as well. We can of course
2112 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2116 tcg_target_ulong
helper_ret_ldsb_mmu(CPUArchState
*env
, target_ulong addr
,
2117 MemOpIdx oi
, uintptr_t retaddr
)
2119 return (int8_t)helper_ret_ldub_mmu(env
, addr
, oi
, retaddr
);
2122 tcg_target_ulong
helper_le_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
2123 MemOpIdx oi
, uintptr_t retaddr
)
2125 return (int16_t)helper_le_lduw_mmu(env
, addr
, oi
, retaddr
);
2128 tcg_target_ulong
helper_be_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
2129 MemOpIdx oi
, uintptr_t retaddr
)
2131 return (int16_t)helper_be_lduw_mmu(env
, addr
, oi
, retaddr
);
2134 tcg_target_ulong
helper_le_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
2135 MemOpIdx oi
, uintptr_t retaddr
)
2137 return (int32_t)helper_le_ldul_mmu(env
, addr
, oi
, retaddr
);
2140 tcg_target_ulong
helper_be_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
2141 MemOpIdx oi
, uintptr_t retaddr
)
2143 return (int32_t)helper_be_ldul_mmu(env
, addr
, oi
, retaddr
);
2147 * Load helpers for cpu_ldst.h.
2150 static inline uint64_t cpu_load_helper(CPUArchState
*env
, abi_ptr addr
,
2151 MemOpIdx oi
, uintptr_t retaddr
,
2152 FullLoadHelper
*full_load
)
2156 ret
= full_load(env
, addr
, oi
, retaddr
);
2157 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
2161 uint8_t cpu_ldb_mmu(CPUArchState
*env
, abi_ptr addr
, MemOpIdx oi
, uintptr_t ra
)
2163 return cpu_load_helper(env
, addr
, oi
, ra
, full_ldub_mmu
);
2166 uint16_t cpu_ldw_be_mmu(CPUArchState
*env
, abi_ptr addr
,
2167 MemOpIdx oi
, uintptr_t ra
)
2169 return cpu_load_helper(env
, addr
, oi
, ra
, full_be_lduw_mmu
);
2172 uint32_t cpu_ldl_be_mmu(CPUArchState
*env
, abi_ptr addr
,
2173 MemOpIdx oi
, uintptr_t ra
)
2175 return cpu_load_helper(env
, addr
, oi
, ra
, full_be_ldul_mmu
);
2178 uint64_t cpu_ldq_be_mmu(CPUArchState
*env
, abi_ptr addr
,
2179 MemOpIdx oi
, uintptr_t ra
)
2181 return cpu_load_helper(env
, addr
, oi
, ra
, helper_be_ldq_mmu
);
2184 uint16_t cpu_ldw_le_mmu(CPUArchState
*env
, abi_ptr addr
,
2185 MemOpIdx oi
, uintptr_t ra
)
2187 return cpu_load_helper(env
, addr
, oi
, ra
, full_le_lduw_mmu
);
2190 uint32_t cpu_ldl_le_mmu(CPUArchState
*env
, abi_ptr addr
,
2191 MemOpIdx oi
, uintptr_t ra
)
2193 return cpu_load_helper(env
, addr
, oi
, ra
, full_le_ldul_mmu
);
2196 uint64_t cpu_ldq_le_mmu(CPUArchState
*env
, abi_ptr addr
,
2197 MemOpIdx oi
, uintptr_t ra
)
2199 return cpu_load_helper(env
, addr
, oi
, ra
, helper_le_ldq_mmu
);
2206 static inline void QEMU_ALWAYS_INLINE
2207 store_memop(void *haddr
, uint64_t val
, MemOp op
)
2214 stw_be_p(haddr
, val
);
2217 stw_le_p(haddr
, val
);
2220 stl_be_p(haddr
, val
);
2223 stl_le_p(haddr
, val
);
2226 stq_be_p(haddr
, val
);
2229 stq_le_p(haddr
, val
);
2232 qemu_build_not_reached();
2236 static void full_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2237 MemOpIdx oi
, uintptr_t retaddr
);
2239 static void __attribute__((noinline
))
2240 store_helper_unaligned(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2241 uintptr_t retaddr
, size_t size
, uintptr_t mmu_idx
,
2244 const size_t tlb_off
= offsetof(CPUTLBEntry
, addr_write
);
2245 uintptr_t index
, index2
;
2246 CPUTLBEntry
*entry
, *entry2
;
2247 target_ulong page1
, page2
, tlb_addr
, tlb_addr2
;
2253 * Ensure the second page is in the TLB. Note that the first page
2254 * is already guaranteed to be filled, and that the second page
2255 * cannot evict the first. An exception to this rule is PAGE_WRITE_INV
2256 * handling: the first page could have evicted itself.
2258 page1
= addr
& TARGET_PAGE_MASK
;
2259 page2
= (addr
+ size
) & TARGET_PAGE_MASK
;
2260 size2
= (addr
+ size
) & ~TARGET_PAGE_MASK
;
2261 index2
= tlb_index(env
, mmu_idx
, page2
);
2262 entry2
= tlb_entry(env
, mmu_idx
, page2
);
2264 tlb_addr2
= tlb_addr_write(entry2
);
2265 if (page1
!= page2
&& !tlb_hit_page(tlb_addr2
, page2
)) {
2266 if (!victim_tlb_hit(env
, mmu_idx
, index2
, tlb_off
, page2
)) {
2267 tlb_fill(env_cpu(env
), page2
, size2
, MMU_DATA_STORE
,
2269 index2
= tlb_index(env
, mmu_idx
, page2
);
2270 entry2
= tlb_entry(env
, mmu_idx
, page2
);
2272 tlb_addr2
= tlb_addr_write(entry2
);
2275 index
= tlb_index(env
, mmu_idx
, addr
);
2276 entry
= tlb_entry(env
, mmu_idx
, addr
);
2277 tlb_addr
= tlb_addr_write(entry
);
2280 * Handle watchpoints. Since this may trap, all checks
2281 * must happen before any store.
2283 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
2284 cpu_check_watchpoint(env_cpu(env
), addr
, size
- size2
,
2285 env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
].attrs
,
2286 BP_MEM_WRITE
, retaddr
);
2288 if (unlikely(tlb_addr2
& TLB_WATCHPOINT
)) {
2289 cpu_check_watchpoint(env_cpu(env
), page2
, size2
,
2290 env_tlb(env
)->d
[mmu_idx
].fulltlb
[index2
].attrs
,
2291 BP_MEM_WRITE
, retaddr
);
2295 * XXX: not efficient, but simple.
2296 * This loop must go in the forward direction to avoid issues
2297 * with self-modifying code in Windows 64-bit.
2299 oi
= make_memop_idx(MO_UB
, mmu_idx
);
2301 for (i
= 0; i
< size
; ++i
) {
2302 /* Big-endian extract. */
2303 uint8_t val8
= val
>> (((size
- 1) * 8) - (i
* 8));
2304 full_stb_mmu(env
, addr
+ i
, val8
, oi
, retaddr
);
2307 for (i
= 0; i
< size
; ++i
) {
2308 /* Little-endian extract. */
2309 uint8_t val8
= val
>> (i
* 8);
2310 full_stb_mmu(env
, addr
+ i
, val8
, oi
, retaddr
);
2315 static inline void QEMU_ALWAYS_INLINE
2316 store_helper(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2317 MemOpIdx oi
, uintptr_t retaddr
, MemOp op
)
2319 const size_t tlb_off
= offsetof(CPUTLBEntry
, addr_write
);
2320 const unsigned a_bits
= get_alignment_bits(get_memop(oi
));
2321 const size_t size
= memop_size(op
);
2322 uintptr_t mmu_idx
= get_mmuidx(oi
);
2325 target_ulong tlb_addr
;
2328 tcg_debug_assert(mmu_idx
< NB_MMU_MODES
);
2330 /* Handle CPU specific unaligned behaviour */
2331 if (addr
& ((1 << a_bits
) - 1)) {
2332 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
2336 index
= tlb_index(env
, mmu_idx
, addr
);
2337 entry
= tlb_entry(env
, mmu_idx
, addr
);
2338 tlb_addr
= tlb_addr_write(entry
);
2340 /* If the TLB entry is for a different page, reload and try again. */
2341 if (!tlb_hit(tlb_addr
, addr
)) {
2342 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
2343 addr
& TARGET_PAGE_MASK
)) {
2344 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_STORE
,
2346 index
= tlb_index(env
, mmu_idx
, addr
);
2347 entry
= tlb_entry(env
, mmu_idx
, addr
);
2349 tlb_addr
= tlb_addr_write(entry
) & ~TLB_INVALID_MASK
;
2352 /* Handle anything that isn't just a straight memory access. */
2353 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
2354 CPUTLBEntryFull
*full
;
2357 /* For anything that is unaligned, recurse through byte stores. */
2358 if ((addr
& (size
- 1)) != 0) {
2359 goto do_unaligned_access
;
2362 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
2364 /* Handle watchpoints. */
2365 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
2366 /* On watchpoint hit, this will longjmp out. */
2367 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
2368 full
->attrs
, BP_MEM_WRITE
, retaddr
);
2371 need_swap
= size
> 1 && (tlb_addr
& TLB_BSWAP
);
2373 /* Handle I/O access. */
2374 if (tlb_addr
& TLB_MMIO
) {
2375 io_writex(env
, full
, mmu_idx
, val
, addr
, retaddr
,
2376 op
^ (need_swap
* MO_BSWAP
));
2380 /* Ignore writes to ROM. */
2381 if (unlikely(tlb_addr
& TLB_DISCARD_WRITE
)) {
2385 /* Handle clean RAM pages. */
2386 if (tlb_addr
& TLB_NOTDIRTY
) {
2387 notdirty_write(env_cpu(env
), addr
, size
, full
, retaddr
);
2390 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
2393 * Keep these two store_memop separate to ensure that the compiler
2394 * is able to fold the entire function to a single instruction.
2395 * There is a build-time assert inside to remind you of this. ;-)
2397 if (unlikely(need_swap
)) {
2398 store_memop(haddr
, val
, op
^ MO_BSWAP
);
2400 store_memop(haddr
, val
, op
);
2405 /* Handle slow unaligned access (it spans two pages or IO). */
2407 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
2408 >= TARGET_PAGE_SIZE
)) {
2409 do_unaligned_access
:
2410 store_helper_unaligned(env
, addr
, val
, retaddr
, size
,
2411 mmu_idx
, memop_big_endian(op
));
2415 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
2416 store_memop(haddr
, val
, op
);
2419 static void __attribute__((noinline
))
2420 full_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2421 MemOpIdx oi
, uintptr_t retaddr
)
2423 validate_memop(oi
, MO_UB
);
2424 store_helper(env
, addr
, val
, oi
, retaddr
, MO_UB
);
2427 void helper_ret_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
2428 MemOpIdx oi
, uintptr_t retaddr
)
2430 full_stb_mmu(env
, addr
, val
, oi
, retaddr
);
2433 static void full_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2434 MemOpIdx oi
, uintptr_t retaddr
)
2436 validate_memop(oi
, MO_LEUW
);
2437 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEUW
);
2440 void helper_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2441 MemOpIdx oi
, uintptr_t retaddr
)
2443 full_le_stw_mmu(env
, addr
, val
, oi
, retaddr
);
2446 static void full_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2447 MemOpIdx oi
, uintptr_t retaddr
)
2449 validate_memop(oi
, MO_BEUW
);
2450 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEUW
);
2453 void helper_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2454 MemOpIdx oi
, uintptr_t retaddr
)
2456 full_be_stw_mmu(env
, addr
, val
, oi
, retaddr
);
2459 static void full_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2460 MemOpIdx oi
, uintptr_t retaddr
)
2462 validate_memop(oi
, MO_LEUL
);
2463 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEUL
);
2466 void helper_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2467 MemOpIdx oi
, uintptr_t retaddr
)
2469 full_le_stl_mmu(env
, addr
, val
, oi
, retaddr
);
2472 static void full_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2473 MemOpIdx oi
, uintptr_t retaddr
)
2475 validate_memop(oi
, MO_BEUL
);
2476 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEUL
);
2479 void helper_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2480 MemOpIdx oi
, uintptr_t retaddr
)
2482 full_be_stl_mmu(env
, addr
, val
, oi
, retaddr
);
2485 void helper_le_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2486 MemOpIdx oi
, uintptr_t retaddr
)
2488 validate_memop(oi
, MO_LEUQ
);
2489 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEUQ
);
2492 void helper_be_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2493 MemOpIdx oi
, uintptr_t retaddr
)
2495 validate_memop(oi
, MO_BEUQ
);
2496 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEUQ
);
2500 * Store Helpers for cpu_ldst.h
2503 typedef void FullStoreHelper(CPUArchState
*env
, target_ulong addr
,
2504 uint64_t val
, MemOpIdx oi
, uintptr_t retaddr
);
2506 static inline void cpu_store_helper(CPUArchState
*env
, target_ulong addr
,
2507 uint64_t val
, MemOpIdx oi
, uintptr_t ra
,
2508 FullStoreHelper
*full_store
)
2510 full_store(env
, addr
, val
, oi
, ra
);
2511 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
2514 void cpu_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
2515 MemOpIdx oi
, uintptr_t retaddr
)
2517 cpu_store_helper(env
, addr
, val
, oi
, retaddr
, full_stb_mmu
);
2520 void cpu_stw_be_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2521 MemOpIdx oi
, uintptr_t retaddr
)
2523 cpu_store_helper(env
, addr
, val
, oi
, retaddr
, full_be_stw_mmu
);
2526 void cpu_stl_be_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2527 MemOpIdx oi
, uintptr_t retaddr
)
2529 cpu_store_helper(env
, addr
, val
, oi
, retaddr
, full_be_stl_mmu
);
2532 void cpu_stq_be_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2533 MemOpIdx oi
, uintptr_t retaddr
)
2535 cpu_store_helper(env
, addr
, val
, oi
, retaddr
, helper_be_stq_mmu
);
2538 void cpu_stw_le_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2539 MemOpIdx oi
, uintptr_t retaddr
)
2541 cpu_store_helper(env
, addr
, val
, oi
, retaddr
, full_le_stw_mmu
);
2544 void cpu_stl_le_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2545 MemOpIdx oi
, uintptr_t retaddr
)
2547 cpu_store_helper(env
, addr
, val
, oi
, retaddr
, full_le_stl_mmu
);
2550 void cpu_stq_le_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2551 MemOpIdx oi
, uintptr_t retaddr
)
2553 cpu_store_helper(env
, addr
, val
, oi
, retaddr
, helper_le_stq_mmu
);
2556 #include "ldst_common.c.inc"
2559 * First set of functions passes in OI and RETADDR.
2560 * This makes them callable from other helpers.
2563 #define ATOMIC_NAME(X) \
2564 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2566 #define ATOMIC_MMU_CLEANUP
2568 #include "atomic_common.c.inc"
2571 #include "atomic_template.h"
2574 #include "atomic_template.h"
2577 #include "atomic_template.h"
2579 #ifdef CONFIG_ATOMIC64
2581 #include "atomic_template.h"
2584 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2585 #define DATA_SIZE 16
2586 #include "atomic_template.h"
2589 /* Code access functions. */
2591 static uint64_t full_ldub_code(CPUArchState
*env
, target_ulong addr
,
2592 MemOpIdx oi
, uintptr_t retaddr
)
2594 return load_helper(env
, addr
, oi
, retaddr
, MO_8
, true, full_ldub_code
);
2597 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr addr
)
2599 MemOpIdx oi
= make_memop_idx(MO_UB
, cpu_mmu_index(env
, true));
2600 return full_ldub_code(env
, addr
, oi
, 0);
2603 static uint64_t full_lduw_code(CPUArchState
*env
, target_ulong addr
,
2604 MemOpIdx oi
, uintptr_t retaddr
)
2606 return load_helper(env
, addr
, oi
, retaddr
, MO_TEUW
, true, full_lduw_code
);
2609 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr addr
)
2611 MemOpIdx oi
= make_memop_idx(MO_TEUW
, cpu_mmu_index(env
, true));
2612 return full_lduw_code(env
, addr
, oi
, 0);
2615 static uint64_t full_ldl_code(CPUArchState
*env
, target_ulong addr
,
2616 MemOpIdx oi
, uintptr_t retaddr
)
2618 return load_helper(env
, addr
, oi
, retaddr
, MO_TEUL
, true, full_ldl_code
);
2621 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr addr
)
2623 MemOpIdx oi
= make_memop_idx(MO_TEUL
, cpu_mmu_index(env
, true));
2624 return full_ldl_code(env
, addr
, oi
, 0);
2627 static uint64_t full_ldq_code(CPUArchState
*env
, target_ulong addr
,
2628 MemOpIdx oi
, uintptr_t retaddr
)
2630 return load_helper(env
, addr
, oi
, retaddr
, MO_TEUQ
, true, full_ldq_code
);
2633 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr addr
)
2635 MemOpIdx oi
= make_memop_idx(MO_TEUQ
, cpu_mmu_index(env
, true));
2636 return full_ldq_code(env
, addr
, oi
, 0);