2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
32 #include "exec/helper-proto.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
36 #include "trace/trace-root.h"
40 #include "qemu/plugin-memory.h"
43 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
44 /* #define DEBUG_TLB */
45 /* #define DEBUG_TLB_LOG */
48 # define DEBUG_TLB_GATE 1
50 # define DEBUG_TLB_LOG_GATE 1
52 # define DEBUG_TLB_LOG_GATE 0
55 # define DEBUG_TLB_GATE 0
56 # define DEBUG_TLB_LOG_GATE 0
59 #define tlb_debug(fmt, ...) do { \
60 if (DEBUG_TLB_LOG_GATE) { \
61 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 } else if (DEBUG_TLB_GATE) { \
64 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
68 #define assert_cpu_is_self(cpu) do { \
69 if (DEBUG_TLB_GATE) { \
70 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
74 /* run_on_cpu_data.target_ptr should always be big enough for a
75 * target_ulong even on 32 bit builds */
76 QEMU_BUILD_BUG_ON(sizeof(target_ulong
) > sizeof(run_on_cpu_data
));
78 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> 16);
81 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83 static inline size_t tlb_n_entries(CPUTLBDescFast
*fast
)
85 return (fast
->mask
>> CPU_TLB_ENTRY_BITS
) + 1;
88 static inline size_t sizeof_tlb(CPUTLBDescFast
*fast
)
90 return fast
->mask
+ (1 << CPU_TLB_ENTRY_BITS
);
93 static void tlb_window_reset(CPUTLBDesc
*desc
, int64_t ns
,
96 desc
->window_begin_ns
= ns
;
97 desc
->window_max_entries
= max_entries
;
100 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
102 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
104 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
105 qatomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
109 static void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
111 /* Discard jump cache entries for any tb which might potentially
112 overlap the flushed page. */
113 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
114 tb_jmp_cache_clear_page(cpu
, addr
);
118 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
119 * @desc: The CPUTLBDesc portion of the TLB
120 * @fast: The CPUTLBDescFast portion of the same TLB
122 * Called with tlb_lock_held.
124 * We have two main constraints when resizing a TLB: (1) we only resize it
125 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
126 * the array or unnecessarily flushing it), which means we do not control how
127 * frequently the resizing can occur; (2) we don't have access to the guest's
128 * future scheduling decisions, and therefore have to decide the magnitude of
129 * the resize based on past observations.
131 * In general, a memory-hungry process can benefit greatly from an appropriately
132 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
133 * we just have to make the TLB as large as possible; while an oversized TLB
134 * results in minimal TLB miss rates, it also takes longer to be flushed
135 * (flushes can be _very_ frequent), and the reduced locality can also hurt
138 * To achieve near-optimal performance for all kinds of workloads, we:
140 * 1. Aggressively increase the size of the TLB when the use rate of the
141 * TLB being flushed is high, since it is likely that in the near future this
142 * memory-hungry process will execute again, and its memory hungriness will
143 * probably be similar.
145 * 2. Slowly reduce the size of the TLB as the use rate declines over a
146 * reasonably large time window. The rationale is that if in such a time window
147 * we have not observed a high TLB use rate, it is likely that we won't observe
148 * it in the near future. In that case, once a time window expires we downsize
149 * the TLB to match the maximum use rate observed in the window.
151 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
152 * since in that range performance is likely near-optimal. Recall that the TLB
153 * is direct mapped, so we want the use rate to be low (or at least not too
154 * high), since otherwise we are likely to have a significant amount of
157 static void tlb_mmu_resize_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
,
160 size_t old_size
= tlb_n_entries(fast
);
162 size_t new_size
= old_size
;
163 int64_t window_len_ms
= 100;
164 int64_t window_len_ns
= window_len_ms
* 1000 * 1000;
165 bool window_expired
= now
> desc
->window_begin_ns
+ window_len_ns
;
167 if (desc
->n_used_entries
> desc
->window_max_entries
) {
168 desc
->window_max_entries
= desc
->n_used_entries
;
170 rate
= desc
->window_max_entries
* 100 / old_size
;
173 new_size
= MIN(old_size
<< 1, 1 << CPU_TLB_DYN_MAX_BITS
);
174 } else if (rate
< 30 && window_expired
) {
175 size_t ceil
= pow2ceil(desc
->window_max_entries
);
176 size_t expected_rate
= desc
->window_max_entries
* 100 / ceil
;
179 * Avoid undersizing when the max number of entries seen is just below
180 * a pow2. For instance, if max_entries == 1025, the expected use rate
181 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
182 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
183 * later. Thus, make sure that the expected use rate remains below 70%.
184 * (and since we double the size, that means the lowest rate we'd
185 * expect to get is 35%, which is still in the 30-70% range where
186 * we consider that the size is appropriate.)
188 if (expected_rate
> 70) {
191 new_size
= MAX(ceil
, 1 << CPU_TLB_DYN_MIN_BITS
);
194 if (new_size
== old_size
) {
195 if (window_expired
) {
196 tlb_window_reset(desc
, now
, desc
->n_used_entries
);
204 tlb_window_reset(desc
, now
, 0);
205 /* desc->n_used_entries is cleared by the caller */
206 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
207 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
208 desc
->iotlb
= g_try_new(CPUIOTLBEntry
, new_size
);
211 * If the allocations fail, try smaller sizes. We just freed some
212 * memory, so going back to half of new_size has a good chance of working.
213 * Increased memory pressure elsewhere in the system might cause the
214 * allocations to fail though, so we progressively reduce the allocation
215 * size, aborting if we cannot even allocate the smallest TLB we support.
217 while (fast
->table
== NULL
|| desc
->iotlb
== NULL
) {
218 if (new_size
== (1 << CPU_TLB_DYN_MIN_BITS
)) {
219 error_report("%s: %s", __func__
, strerror(errno
));
222 new_size
= MAX(new_size
>> 1, 1 << CPU_TLB_DYN_MIN_BITS
);
223 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
227 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
228 desc
->iotlb
= g_try_new(CPUIOTLBEntry
, new_size
);
232 static void tlb_mmu_flush_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
)
234 desc
->n_used_entries
= 0;
235 desc
->large_page_addr
= -1;
236 desc
->large_page_mask
= -1;
238 memset(fast
->table
, -1, sizeof_tlb(fast
));
239 memset(desc
->vtable
, -1, sizeof(desc
->vtable
));
242 static void tlb_flush_one_mmuidx_locked(CPUArchState
*env
, int mmu_idx
,
245 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[mmu_idx
];
246 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[mmu_idx
];
248 tlb_mmu_resize_locked(desc
, fast
, now
);
249 tlb_mmu_flush_locked(desc
, fast
);
252 static void tlb_mmu_init(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
, int64_t now
)
254 size_t n_entries
= 1 << CPU_TLB_DYN_DEFAULT_BITS
;
256 tlb_window_reset(desc
, now
, 0);
257 desc
->n_used_entries
= 0;
258 fast
->mask
= (n_entries
- 1) << CPU_TLB_ENTRY_BITS
;
259 fast
->table
= g_new(CPUTLBEntry
, n_entries
);
260 desc
->iotlb
= g_new(CPUIOTLBEntry
, n_entries
);
261 tlb_mmu_flush_locked(desc
, fast
);
264 static inline void tlb_n_used_entries_inc(CPUArchState
*env
, uintptr_t mmu_idx
)
266 env_tlb(env
)->d
[mmu_idx
].n_used_entries
++;
269 static inline void tlb_n_used_entries_dec(CPUArchState
*env
, uintptr_t mmu_idx
)
271 env_tlb(env
)->d
[mmu_idx
].n_used_entries
--;
274 void tlb_init(CPUState
*cpu
)
276 CPUArchState
*env
= cpu
->env_ptr
;
277 int64_t now
= get_clock_realtime();
280 qemu_spin_init(&env_tlb(env
)->c
.lock
);
282 /* All tlbs are initialized flushed. */
283 env_tlb(env
)->c
.dirty
= 0;
285 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
286 tlb_mmu_init(&env_tlb(env
)->d
[i
], &env_tlb(env
)->f
[i
], now
);
290 void tlb_destroy(CPUState
*cpu
)
292 CPUArchState
*env
= cpu
->env_ptr
;
295 qemu_spin_destroy(&env_tlb(env
)->c
.lock
);
296 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
297 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[i
];
298 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[i
];
305 /* flush_all_helper: run fn across all cpus
307 * If the wait flag is set then the src cpu's helper will be queued as
308 * "safe" work and the loop exited creating a synchronisation point
309 * where all queued work will be finished before execution starts
312 static void flush_all_helper(CPUState
*src
, run_on_cpu_func fn
,
319 async_run_on_cpu(cpu
, fn
, d
);
324 void tlb_flush_counts(size_t *pfull
, size_t *ppart
, size_t *pelide
)
327 size_t full
= 0, part
= 0, elide
= 0;
330 CPUArchState
*env
= cpu
->env_ptr
;
332 full
+= qatomic_read(&env_tlb(env
)->c
.full_flush_count
);
333 part
+= qatomic_read(&env_tlb(env
)->c
.part_flush_count
);
334 elide
+= qatomic_read(&env_tlb(env
)->c
.elide_flush_count
);
341 static void tlb_flush_by_mmuidx_async_work(CPUState
*cpu
, run_on_cpu_data data
)
343 CPUArchState
*env
= cpu
->env_ptr
;
344 uint16_t asked
= data
.host_int
;
345 uint16_t all_dirty
, work
, to_clean
;
346 int64_t now
= get_clock_realtime();
348 assert_cpu_is_self(cpu
);
350 tlb_debug("mmu_idx:0x%04" PRIx16
"\n", asked
);
352 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
354 all_dirty
= env_tlb(env
)->c
.dirty
;
355 to_clean
= asked
& all_dirty
;
356 all_dirty
&= ~to_clean
;
357 env_tlb(env
)->c
.dirty
= all_dirty
;
359 for (work
= to_clean
; work
!= 0; work
&= work
- 1) {
360 int mmu_idx
= ctz32(work
);
361 tlb_flush_one_mmuidx_locked(env
, mmu_idx
, now
);
364 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
366 cpu_tb_jmp_cache_clear(cpu
);
368 if (to_clean
== ALL_MMUIDX_BITS
) {
369 qatomic_set(&env_tlb(env
)->c
.full_flush_count
,
370 env_tlb(env
)->c
.full_flush_count
+ 1);
372 qatomic_set(&env_tlb(env
)->c
.part_flush_count
,
373 env_tlb(env
)->c
.part_flush_count
+ ctpop16(to_clean
));
374 if (to_clean
!= asked
) {
375 qatomic_set(&env_tlb(env
)->c
.elide_flush_count
,
376 env_tlb(env
)->c
.elide_flush_count
+
377 ctpop16(asked
& ~to_clean
));
382 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
384 tlb_debug("mmu_idx: 0x%" PRIx16
"\n", idxmap
);
386 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
387 async_run_on_cpu(cpu
, tlb_flush_by_mmuidx_async_work
,
388 RUN_ON_CPU_HOST_INT(idxmap
));
390 tlb_flush_by_mmuidx_async_work(cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
394 void tlb_flush(CPUState
*cpu
)
396 tlb_flush_by_mmuidx(cpu
, ALL_MMUIDX_BITS
);
399 void tlb_flush_by_mmuidx_all_cpus(CPUState
*src_cpu
, uint16_t idxmap
)
401 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
403 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
405 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
406 fn(src_cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
409 void tlb_flush_all_cpus(CPUState
*src_cpu
)
411 tlb_flush_by_mmuidx_all_cpus(src_cpu
, ALL_MMUIDX_BITS
);
414 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
, uint16_t idxmap
)
416 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
418 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
420 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
421 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
424 void tlb_flush_all_cpus_synced(CPUState
*src_cpu
)
426 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, ALL_MMUIDX_BITS
);
429 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry
*tlb_entry
,
430 target_ulong page
, target_ulong mask
)
433 mask
&= TARGET_PAGE_MASK
| TLB_INVALID_MASK
;
435 return (page
== (tlb_entry
->addr_read
& mask
) ||
436 page
== (tlb_addr_write(tlb_entry
) & mask
) ||
437 page
== (tlb_entry
->addr_code
& mask
));
440 static inline bool tlb_hit_page_anyprot(CPUTLBEntry
*tlb_entry
,
443 return tlb_hit_page_mask_anyprot(tlb_entry
, page
, -1);
447 * tlb_entry_is_empty - return true if the entry is not in use
448 * @te: pointer to CPUTLBEntry
450 static inline bool tlb_entry_is_empty(const CPUTLBEntry
*te
)
452 return te
->addr_read
== -1 && te
->addr_write
== -1 && te
->addr_code
== -1;
455 /* Called with tlb_c.lock held */
456 static bool tlb_flush_entry_mask_locked(CPUTLBEntry
*tlb_entry
,
460 if (tlb_hit_page_mask_anyprot(tlb_entry
, page
, mask
)) {
461 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
467 static inline bool tlb_flush_entry_locked(CPUTLBEntry
*tlb_entry
,
470 return tlb_flush_entry_mask_locked(tlb_entry
, page
, -1);
473 /* Called with tlb_c.lock held */
474 static void tlb_flush_vtlb_page_mask_locked(CPUArchState
*env
, int mmu_idx
,
478 CPUTLBDesc
*d
= &env_tlb(env
)->d
[mmu_idx
];
481 assert_cpu_is_self(env_cpu(env
));
482 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
483 if (tlb_flush_entry_mask_locked(&d
->vtable
[k
], page
, mask
)) {
484 tlb_n_used_entries_dec(env
, mmu_idx
);
489 static inline void tlb_flush_vtlb_page_locked(CPUArchState
*env
, int mmu_idx
,
492 tlb_flush_vtlb_page_mask_locked(env
, mmu_idx
, page
, -1);
495 static void tlb_flush_page_locked(CPUArchState
*env
, int midx
,
498 target_ulong lp_addr
= env_tlb(env
)->d
[midx
].large_page_addr
;
499 target_ulong lp_mask
= env_tlb(env
)->d
[midx
].large_page_mask
;
501 /* Check if we need to flush due to large pages. */
502 if ((page
& lp_mask
) == lp_addr
) {
503 tlb_debug("forcing full flush midx %d ("
504 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
505 midx
, lp_addr
, lp_mask
);
506 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
508 if (tlb_flush_entry_locked(tlb_entry(env
, midx
, page
), page
)) {
509 tlb_n_used_entries_dec(env
, midx
);
511 tlb_flush_vtlb_page_locked(env
, midx
, page
);
516 * tlb_flush_page_by_mmuidx_async_0:
517 * @cpu: cpu on which to flush
518 * @addr: page of virtual address to flush
519 * @idxmap: set of mmu_idx to flush
521 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
522 * at @addr from the tlbs indicated by @idxmap from @cpu.
524 static void tlb_flush_page_by_mmuidx_async_0(CPUState
*cpu
,
528 CPUArchState
*env
= cpu
->env_ptr
;
531 assert_cpu_is_self(cpu
);
533 tlb_debug("page addr:" TARGET_FMT_lx
" mmu_map:0x%x\n", addr
, idxmap
);
535 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
536 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
537 if ((idxmap
>> mmu_idx
) & 1) {
538 tlb_flush_page_locked(env
, mmu_idx
, addr
);
541 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
543 tb_flush_jmp_cache(cpu
, addr
);
547 * tlb_flush_page_by_mmuidx_async_1:
548 * @cpu: cpu on which to flush
549 * @data: encoded addr + idxmap
551 * Helper for tlb_flush_page_by_mmuidx and friends, called through
552 * async_run_on_cpu. The idxmap parameter is encoded in the page
553 * offset of the target_ptr field. This limits the set of mmu_idx
554 * that can be passed via this method.
556 static void tlb_flush_page_by_mmuidx_async_1(CPUState
*cpu
,
557 run_on_cpu_data data
)
559 target_ulong addr_and_idxmap
= (target_ulong
) data
.target_ptr
;
560 target_ulong addr
= addr_and_idxmap
& TARGET_PAGE_MASK
;
561 uint16_t idxmap
= addr_and_idxmap
& ~TARGET_PAGE_MASK
;
563 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
569 } TLBFlushPageByMMUIdxData
;
572 * tlb_flush_page_by_mmuidx_async_2:
573 * @cpu: cpu on which to flush
574 * @data: allocated addr + idxmap
576 * Helper for tlb_flush_page_by_mmuidx and friends, called through
577 * async_run_on_cpu. The addr+idxmap parameters are stored in a
578 * TLBFlushPageByMMUIdxData structure that has been allocated
579 * specifically for this helper. Free the structure when done.
581 static void tlb_flush_page_by_mmuidx_async_2(CPUState
*cpu
,
582 run_on_cpu_data data
)
584 TLBFlushPageByMMUIdxData
*d
= data
.host_ptr
;
586 tlb_flush_page_by_mmuidx_async_0(cpu
, d
->addr
, d
->idxmap
);
590 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, uint16_t idxmap
)
592 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%" PRIx16
"\n", addr
, idxmap
);
594 /* This should already be page aligned */
595 addr
&= TARGET_PAGE_MASK
;
597 if (qemu_cpu_is_self(cpu
)) {
598 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
599 } else if (idxmap
< TARGET_PAGE_SIZE
) {
601 * Most targets have only a few mmu_idx. In the case where
602 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
603 * allocating memory for this operation.
605 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_1
,
606 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
608 TLBFlushPageByMMUIdxData
*d
= g_new(TLBFlushPageByMMUIdxData
, 1);
610 /* Otherwise allocate a structure, freed by the worker. */
613 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_2
,
614 RUN_ON_CPU_HOST_PTR(d
));
618 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
620 tlb_flush_page_by_mmuidx(cpu
, addr
, ALL_MMUIDX_BITS
);
623 void tlb_flush_page_by_mmuidx_all_cpus(CPUState
*src_cpu
, target_ulong addr
,
626 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
628 /* This should already be page aligned */
629 addr
&= TARGET_PAGE_MASK
;
632 * Allocate memory to hold addr+idxmap only when needed.
633 * See tlb_flush_page_by_mmuidx for details.
635 if (idxmap
< TARGET_PAGE_SIZE
) {
636 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
637 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
641 /* Allocate a separate data block for each destination cpu. */
642 CPU_FOREACH(dst_cpu
) {
643 if (dst_cpu
!= src_cpu
) {
644 TLBFlushPageByMMUIdxData
*d
645 = g_new(TLBFlushPageByMMUIdxData
, 1);
649 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
650 RUN_ON_CPU_HOST_PTR(d
));
655 tlb_flush_page_by_mmuidx_async_0(src_cpu
, addr
, idxmap
);
658 void tlb_flush_page_all_cpus(CPUState
*src
, target_ulong addr
)
660 tlb_flush_page_by_mmuidx_all_cpus(src
, addr
, ALL_MMUIDX_BITS
);
663 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
667 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
669 /* This should already be page aligned */
670 addr
&= TARGET_PAGE_MASK
;
673 * Allocate memory to hold addr+idxmap only when needed.
674 * See tlb_flush_page_by_mmuidx for details.
676 if (idxmap
< TARGET_PAGE_SIZE
) {
677 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
678 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
679 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
680 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
683 TLBFlushPageByMMUIdxData
*d
;
685 /* Allocate a separate data block for each destination cpu. */
686 CPU_FOREACH(dst_cpu
) {
687 if (dst_cpu
!= src_cpu
) {
688 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
691 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
692 RUN_ON_CPU_HOST_PTR(d
));
696 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
699 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_2
,
700 RUN_ON_CPU_HOST_PTR(d
));
704 void tlb_flush_page_all_cpus_synced(CPUState
*src
, target_ulong addr
)
706 tlb_flush_page_by_mmuidx_all_cpus_synced(src
, addr
, ALL_MMUIDX_BITS
);
709 static void tlb_flush_range_locked(CPUArchState
*env
, int midx
,
710 target_ulong addr
, target_ulong len
,
713 CPUTLBDesc
*d
= &env_tlb(env
)->d
[midx
];
714 CPUTLBDescFast
*f
= &env_tlb(env
)->f
[midx
];
715 target_ulong mask
= MAKE_64BIT_MASK(0, bits
);
718 * If @bits is smaller than the tlb size, there may be multiple entries
719 * within the TLB; otherwise all addresses that match under @mask hit
720 * the same TLB entry.
721 * TODO: Perhaps allow bits to be a few bits less than the size.
722 * For now, just flush the entire TLB.
724 * If @len is larger than the tlb size, then it will take longer to
725 * test all of the entries in the TLB than it will to flush it all.
727 if (mask
< f
->mask
|| len
> f
->mask
) {
728 tlb_debug("forcing full flush midx %d ("
729 TARGET_FMT_lx
"/" TARGET_FMT_lx
"+" TARGET_FMT_lx
")\n",
730 midx
, addr
, mask
, len
);
731 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
736 * Check if we need to flush due to large pages.
737 * Because large_page_mask contains all 1's from the msb,
738 * we only need to test the end of the range.
740 if (((addr
+ len
- 1) & d
->large_page_mask
) == d
->large_page_addr
) {
741 tlb_debug("forcing full flush midx %d ("
742 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
743 midx
, d
->large_page_addr
, d
->large_page_mask
);
744 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
748 for (target_ulong i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
749 target_ulong page
= addr
+ i
;
750 CPUTLBEntry
*entry
= tlb_entry(env
, midx
, page
);
752 if (tlb_flush_entry_mask_locked(entry
, page
, mask
)) {
753 tlb_n_used_entries_dec(env
, midx
);
755 tlb_flush_vtlb_page_mask_locked(env
, midx
, page
, mask
);
766 static void tlb_flush_range_by_mmuidx_async_0(CPUState
*cpu
,
769 CPUArchState
*env
= cpu
->env_ptr
;
772 assert_cpu_is_self(cpu
);
774 tlb_debug("range:" TARGET_FMT_lx
"/%u+" TARGET_FMT_lx
" mmu_map:0x%x\n",
775 d
.addr
, d
.bits
, d
.len
, d
.idxmap
);
777 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
778 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
779 if ((d
.idxmap
>> mmu_idx
) & 1) {
780 tlb_flush_range_locked(env
, mmu_idx
, d
.addr
, d
.len
, d
.bits
);
783 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
785 for (target_ulong i
= 0; i
< d
.len
; i
+= TARGET_PAGE_SIZE
) {
786 tb_flush_jmp_cache(cpu
, d
.addr
+ i
);
790 static void tlb_flush_range_by_mmuidx_async_1(CPUState
*cpu
,
791 run_on_cpu_data data
)
793 TLBFlushRangeData
*d
= data
.host_ptr
;
794 tlb_flush_range_by_mmuidx_async_0(cpu
, *d
);
798 void tlb_flush_range_by_mmuidx(CPUState
*cpu
, target_ulong addr
,
799 target_ulong len
, uint16_t idxmap
,
805 * If all bits are significant, and len is small,
806 * this devolves to tlb_flush_page.
808 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
809 tlb_flush_page_by_mmuidx(cpu
, addr
, idxmap
);
812 /* If no page bits are significant, this devolves to tlb_flush. */
813 if (bits
< TARGET_PAGE_BITS
) {
814 tlb_flush_by_mmuidx(cpu
, idxmap
);
818 /* This should already be page aligned */
819 d
.addr
= addr
& TARGET_PAGE_MASK
;
824 if (qemu_cpu_is_self(cpu
)) {
825 tlb_flush_range_by_mmuidx_async_0(cpu
, d
);
827 /* Otherwise allocate a structure, freed by the worker. */
828 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
829 async_run_on_cpu(cpu
, tlb_flush_range_by_mmuidx_async_1
,
830 RUN_ON_CPU_HOST_PTR(p
));
834 void tlb_flush_page_bits_by_mmuidx(CPUState
*cpu
, target_ulong addr
,
835 uint16_t idxmap
, unsigned bits
)
837 tlb_flush_range_by_mmuidx(cpu
, addr
, TARGET_PAGE_SIZE
, idxmap
, bits
);
840 void tlb_flush_range_by_mmuidx_all_cpus(CPUState
*src_cpu
,
841 target_ulong addr
, target_ulong len
,
842 uint16_t idxmap
, unsigned bits
)
848 * If all bits are significant, and len is small,
849 * this devolves to tlb_flush_page.
851 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
852 tlb_flush_page_by_mmuidx_all_cpus(src_cpu
, addr
, idxmap
);
855 /* If no page bits are significant, this devolves to tlb_flush. */
856 if (bits
< TARGET_PAGE_BITS
) {
857 tlb_flush_by_mmuidx_all_cpus(src_cpu
, idxmap
);
861 /* This should already be page aligned */
862 d
.addr
= addr
& TARGET_PAGE_MASK
;
867 /* Allocate a separate data block for each destination cpu. */
868 CPU_FOREACH(dst_cpu
) {
869 if (dst_cpu
!= src_cpu
) {
870 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
871 async_run_on_cpu(dst_cpu
,
872 tlb_flush_range_by_mmuidx_async_1
,
873 RUN_ON_CPU_HOST_PTR(p
));
877 tlb_flush_range_by_mmuidx_async_0(src_cpu
, d
);
880 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState
*src_cpu
,
882 uint16_t idxmap
, unsigned bits
)
884 tlb_flush_range_by_mmuidx_all_cpus(src_cpu
, addr
, TARGET_PAGE_SIZE
,
888 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
894 TLBFlushRangeData d
, *p
;
898 * If all bits are significant, and len is small,
899 * this devolves to tlb_flush_page.
901 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
902 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu
, addr
, idxmap
);
905 /* If no page bits are significant, this devolves to tlb_flush. */
906 if (bits
< TARGET_PAGE_BITS
) {
907 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, idxmap
);
911 /* This should already be page aligned */
912 d
.addr
= addr
& TARGET_PAGE_MASK
;
917 /* Allocate a separate data block for each destination cpu. */
918 CPU_FOREACH(dst_cpu
) {
919 if (dst_cpu
!= src_cpu
) {
920 p
= g_memdup(&d
, sizeof(d
));
921 async_run_on_cpu(dst_cpu
, tlb_flush_range_by_mmuidx_async_1
,
922 RUN_ON_CPU_HOST_PTR(p
));
926 p
= g_memdup(&d
, sizeof(d
));
927 async_safe_run_on_cpu(src_cpu
, tlb_flush_range_by_mmuidx_async_1
,
928 RUN_ON_CPU_HOST_PTR(p
));
931 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
936 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu
, addr
, TARGET_PAGE_SIZE
,
940 /* update the TLBs so that writes to code in the virtual page 'addr'
942 void tlb_protect_code(ram_addr_t ram_addr
)
944 cpu_physical_memory_test_and_clear_dirty(ram_addr
, TARGET_PAGE_SIZE
,
948 /* update the TLB so that writes in physical page 'phys_addr' are no longer
949 tested for self modifying code */
950 void tlb_unprotect_code(ram_addr_t ram_addr
)
952 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
957 * Dirty write flag handling
959 * When the TCG code writes to a location it looks up the address in
960 * the TLB and uses that data to compute the final address. If any of
961 * the lower bits of the address are set then the slow path is forced.
962 * There are a number of reasons to do this but for normal RAM the
963 * most usual is detecting writes to code regions which may invalidate
966 * Other vCPUs might be reading their TLBs during guest execution, so we update
967 * te->addr_write with qatomic_set. We don't need to worry about this for
968 * oversized guests as MTTCG is disabled for them.
970 * Called with tlb_c.lock held.
972 static void tlb_reset_dirty_range_locked(CPUTLBEntry
*tlb_entry
,
973 uintptr_t start
, uintptr_t length
)
975 uintptr_t addr
= tlb_entry
->addr_write
;
977 if ((addr
& (TLB_INVALID_MASK
| TLB_MMIO
|
978 TLB_DISCARD_WRITE
| TLB_NOTDIRTY
)) == 0) {
979 addr
&= TARGET_PAGE_MASK
;
980 addr
+= tlb_entry
->addend
;
981 if ((addr
- start
) < length
) {
982 #if TCG_OVERSIZED_GUEST
983 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
985 qatomic_set(&tlb_entry
->addr_write
,
986 tlb_entry
->addr_write
| TLB_NOTDIRTY
);
993 * Called with tlb_c.lock held.
994 * Called only from the vCPU context, i.e. the TLB's owner thread.
996 static inline void copy_tlb_helper_locked(CPUTLBEntry
*d
, const CPUTLBEntry
*s
)
1001 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1003 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1004 * thing actually updated is the target TLB entry ->addr_write flags.
1006 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
1013 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1014 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1016 unsigned int n
= tlb_n_entries(&env_tlb(env
)->f
[mmu_idx
]);
1018 for (i
= 0; i
< n
; i
++) {
1019 tlb_reset_dirty_range_locked(&env_tlb(env
)->f
[mmu_idx
].table
[i
],
1023 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
1024 tlb_reset_dirty_range_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[i
],
1028 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1031 /* Called with tlb_c.lock held */
1032 static inline void tlb_set_dirty1_locked(CPUTLBEntry
*tlb_entry
,
1035 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
1036 tlb_entry
->addr_write
= vaddr
;
1040 /* update the TLB corresponding to virtual page vaddr
1041 so that it is no longer dirty */
1042 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
1044 CPUArchState
*env
= cpu
->env_ptr
;
1047 assert_cpu_is_self(cpu
);
1049 vaddr
&= TARGET_PAGE_MASK
;
1050 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1051 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1052 tlb_set_dirty1_locked(tlb_entry(env
, mmu_idx
, vaddr
), vaddr
);
1055 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1057 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
1058 tlb_set_dirty1_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[k
], vaddr
);
1061 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1064 /* Our TLB does not support large pages, so remember the area covered by
1065 large pages and trigger a full TLB flush if these are invalidated. */
1066 static void tlb_add_large_page(CPUArchState
*env
, int mmu_idx
,
1067 target_ulong vaddr
, target_ulong size
)
1069 target_ulong lp_addr
= env_tlb(env
)->d
[mmu_idx
].large_page_addr
;
1070 target_ulong lp_mask
= ~(size
- 1);
1072 if (lp_addr
== (target_ulong
)-1) {
1073 /* No previous large page. */
1076 /* Extend the existing region to include the new page.
1077 This is a compromise between unnecessary flushes and
1078 the cost of maintaining a full variable size TLB. */
1079 lp_mask
&= env_tlb(env
)->d
[mmu_idx
].large_page_mask
;
1080 while (((lp_addr
^ vaddr
) & lp_mask
) != 0) {
1084 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= lp_addr
& lp_mask
;
1085 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= lp_mask
;
1088 /* Add a new TLB entry. At most one entry for a given virtual address
1089 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1090 * supplied size is only used by tlb_flush_page.
1092 * Called from TCG-generated code, which is under an RCU read-side
1095 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
1096 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
1097 int mmu_idx
, target_ulong size
)
1099 CPUArchState
*env
= cpu
->env_ptr
;
1100 CPUTLB
*tlb
= env_tlb(env
);
1101 CPUTLBDesc
*desc
= &tlb
->d
[mmu_idx
];
1102 MemoryRegionSection
*section
;
1104 target_ulong address
;
1105 target_ulong write_address
;
1107 CPUTLBEntry
*te
, tn
;
1108 hwaddr iotlb
, xlat
, sz
, paddr_page
;
1109 target_ulong vaddr_page
;
1110 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
1112 bool is_ram
, is_romd
;
1114 assert_cpu_is_self(cpu
);
1116 if (size
<= TARGET_PAGE_SIZE
) {
1117 sz
= TARGET_PAGE_SIZE
;
1119 tlb_add_large_page(env
, mmu_idx
, vaddr
, size
);
1122 vaddr_page
= vaddr
& TARGET_PAGE_MASK
;
1123 paddr_page
= paddr
& TARGET_PAGE_MASK
;
1125 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr_page
,
1126 &xlat
, &sz
, attrs
, &prot
);
1127 assert(sz
>= TARGET_PAGE_SIZE
);
1129 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
1130 " prot=%x idx=%d\n",
1131 vaddr
, paddr
, prot
, mmu_idx
);
1133 address
= vaddr_page
;
1134 if (size
< TARGET_PAGE_SIZE
) {
1135 /* Repeat the MMU check and TLB fill on every access. */
1136 address
|= TLB_INVALID_MASK
;
1138 if (attrs
.byte_swap
) {
1139 address
|= TLB_BSWAP
;
1142 is_ram
= memory_region_is_ram(section
->mr
);
1143 is_romd
= memory_region_is_romd(section
->mr
);
1145 if (is_ram
|| is_romd
) {
1146 /* RAM and ROMD both have associated host memory. */
1147 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
1149 /* I/O does not; force the host address to NULL. */
1153 write_address
= address
;
1155 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1157 * Computing is_clean is expensive; avoid all that unless
1158 * the page is actually writable.
1160 if (prot
& PAGE_WRITE
) {
1161 if (section
->readonly
) {
1162 write_address
|= TLB_DISCARD_WRITE
;
1163 } else if (cpu_physical_memory_is_clean(iotlb
)) {
1164 write_address
|= TLB_NOTDIRTY
;
1169 iotlb
= memory_region_section_get_iotlb(cpu
, section
) + xlat
;
1171 * Writes to romd devices must go through MMIO to enable write.
1172 * Reads to romd devices go through the ram_ptr found above,
1173 * but of course reads to I/O must go through MMIO.
1175 write_address
|= TLB_MMIO
;
1177 address
= write_address
;
1181 wp_flags
= cpu_watchpoint_address_matches(cpu
, vaddr_page
,
1184 index
= tlb_index(env
, mmu_idx
, vaddr_page
);
1185 te
= tlb_entry(env
, mmu_idx
, vaddr_page
);
1188 * Hold the TLB lock for the rest of the function. We could acquire/release
1189 * the lock several times in the function, but it is faster to amortize the
1190 * acquisition cost by acquiring it just once. Note that this leads to
1191 * a longer critical section, but this is not a concern since the TLB lock
1192 * is unlikely to be contended.
1194 qemu_spin_lock(&tlb
->c
.lock
);
1196 /* Note that the tlb is no longer clean. */
1197 tlb
->c
.dirty
|= 1 << mmu_idx
;
1199 /* Make sure there's no cached translation for the new page. */
1200 tlb_flush_vtlb_page_locked(env
, mmu_idx
, vaddr_page
);
1203 * Only evict the old entry to the victim tlb if it's for a
1204 * different page; otherwise just overwrite the stale data.
1206 if (!tlb_hit_page_anyprot(te
, vaddr_page
) && !tlb_entry_is_empty(te
)) {
1207 unsigned vidx
= desc
->vindex
++ % CPU_VTLB_SIZE
;
1208 CPUTLBEntry
*tv
= &desc
->vtable
[vidx
];
1210 /* Evict the old entry into the victim tlb. */
1211 copy_tlb_helper_locked(tv
, te
);
1212 desc
->viotlb
[vidx
] = desc
->iotlb
[index
];
1213 tlb_n_used_entries_dec(env
, mmu_idx
);
1216 /* refill the tlb */
1218 * At this point iotlb contains a physical section number in the lower
1219 * TARGET_PAGE_BITS, and either
1220 * + the ram_addr_t of the page base of the target RAM (RAM)
1221 * + the offset within section->mr of the page base (I/O, ROMD)
1222 * We subtract the vaddr_page (which is page aligned and thus won't
1223 * disturb the low bits) to give an offset which can be added to the
1224 * (non-page-aligned) vaddr of the eventual memory access to get
1225 * the MemoryRegion offset for the access. Note that the vaddr we
1226 * subtract here is that of the page base, and not the same as the
1227 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1229 desc
->iotlb
[index
].addr
= iotlb
- vaddr_page
;
1230 desc
->iotlb
[index
].attrs
= attrs
;
1232 /* Now calculate the new entry */
1233 tn
.addend
= addend
- vaddr_page
;
1234 if (prot
& PAGE_READ
) {
1235 tn
.addr_read
= address
;
1236 if (wp_flags
& BP_MEM_READ
) {
1237 tn
.addr_read
|= TLB_WATCHPOINT
;
1243 if (prot
& PAGE_EXEC
) {
1244 tn
.addr_code
= address
;
1250 if (prot
& PAGE_WRITE
) {
1251 tn
.addr_write
= write_address
;
1252 if (prot
& PAGE_WRITE_INV
) {
1253 tn
.addr_write
|= TLB_INVALID_MASK
;
1255 if (wp_flags
& BP_MEM_WRITE
) {
1256 tn
.addr_write
|= TLB_WATCHPOINT
;
1260 copy_tlb_helper_locked(te
, &tn
);
1261 tlb_n_used_entries_inc(env
, mmu_idx
);
1262 qemu_spin_unlock(&tlb
->c
.lock
);
1265 /* Add a new TLB entry, but without specifying the memory
1266 * transaction attributes to be used.
1268 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
1269 hwaddr paddr
, int prot
,
1270 int mmu_idx
, target_ulong size
)
1272 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
1273 prot
, mmu_idx
, size
);
1276 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1278 ram_addr_t ram_addr
;
1280 ram_addr
= qemu_ram_addr_from_host(ptr
);
1281 if (ram_addr
== RAM_ADDR_INVALID
) {
1282 error_report("Bad ram pointer %p", ptr
);
1289 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1290 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1291 * be discarded and looked up again (e.g. via tlb_entry()).
1293 static void tlb_fill(CPUState
*cpu
, target_ulong addr
, int size
,
1294 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1296 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1300 * This is not a probe, so only valid return is success; failure
1301 * should result in exception + longjmp to the cpu loop.
1303 ok
= cc
->tcg_ops
->tlb_fill(cpu
, addr
, size
,
1304 access_type
, mmu_idx
, false, retaddr
);
1308 static inline void cpu_unaligned_access(CPUState
*cpu
, vaddr addr
,
1309 MMUAccessType access_type
,
1310 int mmu_idx
, uintptr_t retaddr
)
1312 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1314 cc
->tcg_ops
->do_unaligned_access(cpu
, addr
, access_type
, mmu_idx
, retaddr
);
1317 static inline void cpu_transaction_failed(CPUState
*cpu
, hwaddr physaddr
,
1318 vaddr addr
, unsigned size
,
1319 MMUAccessType access_type
,
1320 int mmu_idx
, MemTxAttrs attrs
,
1321 MemTxResult response
,
1324 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1326 if (!cpu
->ignore_memory_transaction_failures
&&
1327 cc
->tcg_ops
->do_transaction_failed
) {
1328 cc
->tcg_ops
->do_transaction_failed(cpu
, physaddr
, addr
, size
,
1329 access_type
, mmu_idx
, attrs
,
1334 static uint64_t io_readx(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
1335 int mmu_idx
, target_ulong addr
, uintptr_t retaddr
,
1336 MMUAccessType access_type
, MemOp op
)
1338 CPUState
*cpu
= env_cpu(env
);
1340 MemoryRegionSection
*section
;
1343 bool locked
= false;
1346 section
= iotlb_to_section(cpu
, iotlbentry
->addr
, iotlbentry
->attrs
);
1348 mr_offset
= (iotlbentry
->addr
& TARGET_PAGE_MASK
) + addr
;
1349 cpu
->mem_io_pc
= retaddr
;
1350 if (!cpu
->can_do_io
) {
1351 cpu_io_recompile(cpu
, retaddr
);
1354 if (!qemu_mutex_iothread_locked()) {
1355 qemu_mutex_lock_iothread();
1358 r
= memory_region_dispatch_read(mr
, mr_offset
, &val
, op
, iotlbentry
->attrs
);
1359 if (r
!= MEMTX_OK
) {
1360 hwaddr physaddr
= mr_offset
+
1361 section
->offset_within_address_space
-
1362 section
->offset_within_region
;
1364 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
), access_type
,
1365 mmu_idx
, iotlbentry
->attrs
, r
, retaddr
);
1368 qemu_mutex_unlock_iothread();
1375 * Save a potentially trashed IOTLB entry for later lookup by plugin.
1376 * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
1377 * because of the side effect of io_writex changing memory layout.
1379 static void save_iotlb_data(CPUState
*cs
, hwaddr addr
,
1380 MemoryRegionSection
*section
, hwaddr mr_offset
)
1382 #ifdef CONFIG_PLUGIN
1383 SavedIOTLB
*saved
= &cs
->saved_iotlb
;
1385 saved
->section
= section
;
1386 saved
->mr_offset
= mr_offset
;
1390 static void io_writex(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
1391 int mmu_idx
, uint64_t val
, target_ulong addr
,
1392 uintptr_t retaddr
, MemOp op
)
1394 CPUState
*cpu
= env_cpu(env
);
1396 MemoryRegionSection
*section
;
1398 bool locked
= false;
1401 section
= iotlb_to_section(cpu
, iotlbentry
->addr
, iotlbentry
->attrs
);
1403 mr_offset
= (iotlbentry
->addr
& TARGET_PAGE_MASK
) + addr
;
1404 if (!cpu
->can_do_io
) {
1405 cpu_io_recompile(cpu
, retaddr
);
1407 cpu
->mem_io_pc
= retaddr
;
1410 * The memory_region_dispatch may trigger a flush/resize
1411 * so for plugins we save the iotlb_data just in case.
1413 save_iotlb_data(cpu
, iotlbentry
->addr
, section
, mr_offset
);
1415 if (!qemu_mutex_iothread_locked()) {
1416 qemu_mutex_lock_iothread();
1419 r
= memory_region_dispatch_write(mr
, mr_offset
, val
, op
, iotlbentry
->attrs
);
1420 if (r
!= MEMTX_OK
) {
1421 hwaddr physaddr
= mr_offset
+
1422 section
->offset_within_address_space
-
1423 section
->offset_within_region
;
1425 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
),
1426 MMU_DATA_STORE
, mmu_idx
, iotlbentry
->attrs
, r
,
1430 qemu_mutex_unlock_iothread();
1434 static inline target_ulong
tlb_read_ofs(CPUTLBEntry
*entry
, size_t ofs
)
1436 #if TCG_OVERSIZED_GUEST
1437 return *(target_ulong
*)((uintptr_t)entry
+ ofs
);
1439 /* ofs might correspond to .addr_write, so use qatomic_read */
1440 return qatomic_read((target_ulong
*)((uintptr_t)entry
+ ofs
));
1444 /* Return true if ADDR is present in the victim tlb, and has been copied
1445 back to the main tlb. */
1446 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
1447 size_t elt_ofs
, target_ulong page
)
1451 assert_cpu_is_self(env_cpu(env
));
1452 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
1453 CPUTLBEntry
*vtlb
= &env_tlb(env
)->d
[mmu_idx
].vtable
[vidx
];
1456 /* elt_ofs might correspond to .addr_write, so use qatomic_read */
1457 #if TCG_OVERSIZED_GUEST
1458 cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
1460 cmp
= qatomic_read((target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
));
1464 /* Found entry in victim tlb, swap tlb and iotlb. */
1465 CPUTLBEntry tmptlb
, *tlb
= &env_tlb(env
)->f
[mmu_idx
].table
[index
];
1467 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1468 copy_tlb_helper_locked(&tmptlb
, tlb
);
1469 copy_tlb_helper_locked(tlb
, vtlb
);
1470 copy_tlb_helper_locked(vtlb
, &tmptlb
);
1471 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1473 CPUIOTLBEntry tmpio
, *io
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1474 CPUIOTLBEntry
*vio
= &env_tlb(env
)->d
[mmu_idx
].viotlb
[vidx
];
1475 tmpio
= *io
; *io
= *vio
; *vio
= tmpio
;
1482 /* Macro to call the above, with local variables from the use context. */
1483 #define VICTIM_TLB_HIT(TY, ADDR) \
1484 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1485 (ADDR) & TARGET_PAGE_MASK)
1488 * Return a ram_addr_t for the virtual address for execution.
1490 * Return -1 if we can't translate and execute from an entire page
1491 * of RAM. This will force us to execute by loading and translating
1492 * one insn at a time, without caching.
1494 * NOTE: This function will trigger an exception if the page is
1497 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, target_ulong addr
,
1500 uintptr_t mmu_idx
= cpu_mmu_index(env
, true);
1501 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1502 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1505 if (unlikely(!tlb_hit(entry
->addr_code
, addr
))) {
1506 if (!VICTIM_TLB_HIT(addr_code
, addr
)) {
1507 tlb_fill(env_cpu(env
), addr
, 0, MMU_INST_FETCH
, mmu_idx
, 0);
1508 index
= tlb_index(env
, mmu_idx
, addr
);
1509 entry
= tlb_entry(env
, mmu_idx
, addr
);
1511 if (unlikely(entry
->addr_code
& TLB_INVALID_MASK
)) {
1513 * The MMU protection covers a smaller range than a target
1514 * page, so we must redo the MMU check for every insn.
1519 assert(tlb_hit(entry
->addr_code
, addr
));
1522 if (unlikely(entry
->addr_code
& TLB_MMIO
)) {
1523 /* The region is not backed by RAM. */
1530 p
= (void *)((uintptr_t)addr
+ entry
->addend
);
1534 return qemu_ram_addr_from_host_nofail(p
);
1537 tb_page_addr_t
get_page_addr_code(CPUArchState
*env
, target_ulong addr
)
1539 return get_page_addr_code_hostp(env
, addr
, NULL
);
1542 static void notdirty_write(CPUState
*cpu
, vaddr mem_vaddr
, unsigned size
,
1543 CPUIOTLBEntry
*iotlbentry
, uintptr_t retaddr
)
1545 ram_addr_t ram_addr
= mem_vaddr
+ iotlbentry
->addr
;
1547 trace_memory_notdirty_write_access(mem_vaddr
, ram_addr
, size
);
1549 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1550 struct page_collection
*pages
1551 = page_collection_lock(ram_addr
, ram_addr
+ size
);
1552 tb_invalidate_phys_page_fast(pages
, ram_addr
, size
, retaddr
);
1553 page_collection_unlock(pages
);
1557 * Set both VGA and migration bits for simplicity and to remove
1558 * the notdirty callback faster.
1560 cpu_physical_memory_set_dirty_range(ram_addr
, size
, DIRTY_CLIENTS_NOCODE
);
1562 /* We remove the notdirty callback only if the code has been flushed. */
1563 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1564 trace_memory_notdirty_set_dirty(mem_vaddr
);
1565 tlb_set_dirty(cpu
, mem_vaddr
);
1569 static int probe_access_internal(CPUArchState
*env
, target_ulong addr
,
1570 int fault_size
, MMUAccessType access_type
,
1571 int mmu_idx
, bool nonfault
,
1572 void **phost
, uintptr_t retaddr
)
1574 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1575 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1576 target_ulong tlb_addr
, page_addr
;
1580 switch (access_type
) {
1582 elt_ofs
= offsetof(CPUTLBEntry
, addr_read
);
1584 case MMU_DATA_STORE
:
1585 elt_ofs
= offsetof(CPUTLBEntry
, addr_write
);
1587 case MMU_INST_FETCH
:
1588 elt_ofs
= offsetof(CPUTLBEntry
, addr_code
);
1591 g_assert_not_reached();
1593 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1595 page_addr
= addr
& TARGET_PAGE_MASK
;
1596 if (!tlb_hit_page(tlb_addr
, page_addr
)) {
1597 if (!victim_tlb_hit(env
, mmu_idx
, index
, elt_ofs
, page_addr
)) {
1598 CPUState
*cs
= env_cpu(env
);
1599 CPUClass
*cc
= CPU_GET_CLASS(cs
);
1601 if (!cc
->tcg_ops
->tlb_fill(cs
, addr
, fault_size
, access_type
,
1602 mmu_idx
, nonfault
, retaddr
)) {
1603 /* Non-faulting page table read failed. */
1605 return TLB_INVALID_MASK
;
1608 /* TLB resize via tlb_fill may have moved the entry. */
1609 entry
= tlb_entry(env
, mmu_idx
, addr
);
1611 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1613 flags
= tlb_addr
& TLB_FLAGS_MASK
;
1615 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1616 if (unlikely(flags
& ~(TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1621 /* Everything else is RAM. */
1622 *phost
= (void *)((uintptr_t)addr
+ entry
->addend
);
1626 int probe_access_flags(CPUArchState
*env
, target_ulong addr
,
1627 MMUAccessType access_type
, int mmu_idx
,
1628 bool nonfault
, void **phost
, uintptr_t retaddr
)
1632 flags
= probe_access_internal(env
, addr
, 0, access_type
, mmu_idx
,
1633 nonfault
, phost
, retaddr
);
1635 /* Handle clean RAM pages. */
1636 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1637 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1638 CPUIOTLBEntry
*iotlbentry
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1640 notdirty_write(env_cpu(env
), addr
, 1, iotlbentry
, retaddr
);
1641 flags
&= ~TLB_NOTDIRTY
;
1647 void *probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
1648 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1653 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1655 flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1656 false, &host
, retaddr
);
1658 /* Per the interface, size == 0 merely faults the access. */
1663 if (unlikely(flags
& (TLB_NOTDIRTY
| TLB_WATCHPOINT
))) {
1664 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1665 CPUIOTLBEntry
*iotlbentry
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1667 /* Handle watchpoints. */
1668 if (flags
& TLB_WATCHPOINT
) {
1669 int wp_access
= (access_type
== MMU_DATA_STORE
1670 ? BP_MEM_WRITE
: BP_MEM_READ
);
1671 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1672 iotlbentry
->attrs
, wp_access
, retaddr
);
1675 /* Handle clean RAM pages. */
1676 if (flags
& TLB_NOTDIRTY
) {
1677 notdirty_write(env_cpu(env
), addr
, 1, iotlbentry
, retaddr
);
1684 void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
1685 MMUAccessType access_type
, int mmu_idx
)
1690 flags
= probe_access_internal(env
, addr
, 0, access_type
,
1691 mmu_idx
, true, &host
, 0);
1693 /* No combination of flags are expected by the caller. */
1694 return flags
? NULL
: host
;
1697 #ifdef CONFIG_PLUGIN
1699 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1700 * This should be a hot path as we will have just looked this path up
1701 * in the softmmu lookup code (or helper). We don't handle re-fills or
1702 * checking the victim table. This is purely informational.
1704 * This almost never fails as the memory access being instrumented
1705 * should have just filled the TLB. The one corner case is io_writex
1706 * which can cause TLB flushes and potential resizing of the TLBs
1707 * losing the information we need. In those cases we need to recover
1708 * data from a copy of the iotlbentry. As long as this always occurs
1709 * from the same thread (which a mem callback will be) this is safe.
1712 bool tlb_plugin_lookup(CPUState
*cpu
, target_ulong addr
, int mmu_idx
,
1713 bool is_store
, struct qemu_plugin_hwaddr
*data
)
1715 CPUArchState
*env
= cpu
->env_ptr
;
1716 CPUTLBEntry
*tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1717 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1718 target_ulong tlb_addr
= is_store
? tlb_addr_write(tlbe
) : tlbe
->addr_read
;
1720 if (likely(tlb_hit(tlb_addr
, addr
))) {
1721 /* We must have an iotlb entry for MMIO */
1722 if (tlb_addr
& TLB_MMIO
) {
1723 CPUIOTLBEntry
*iotlbentry
;
1724 iotlbentry
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1726 data
->v
.io
.section
= iotlb_to_section(cpu
, iotlbentry
->addr
, iotlbentry
->attrs
);
1727 data
->v
.io
.offset
= (iotlbentry
->addr
& TARGET_PAGE_MASK
) + addr
;
1729 data
->is_io
= false;
1730 data
->v
.ram
.hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1734 SavedIOTLB
*saved
= &cpu
->saved_iotlb
;
1736 data
->v
.io
.section
= saved
->section
;
1737 data
->v
.io
.offset
= saved
->mr_offset
;
1745 * Probe for an atomic operation. Do not allow unaligned operations,
1746 * or io operations to proceed. Return the host address.
1748 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1750 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1751 MemOpIdx oi
, int size
, int prot
,
1754 size_t mmu_idx
= get_mmuidx(oi
);
1755 MemOp mop
= get_memop(oi
);
1756 int a_bits
= get_alignment_bits(mop
);
1759 target_ulong tlb_addr
;
1762 /* Adjust the given return address. */
1763 retaddr
-= GETPC_ADJ
;
1765 /* Enforce guest required alignment. */
1766 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
1767 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1768 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1772 /* Enforce qemu required alignment. */
1773 if (unlikely(addr
& (size
- 1))) {
1774 /* We get here if guest alignment was not requested,
1775 or was not enforced by cpu_unaligned_access above.
1776 We might widen the access and emulate, but for now
1777 mark an exception and exit the cpu loop. */
1778 goto stop_the_world
;
1781 index
= tlb_index(env
, mmu_idx
, addr
);
1782 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1784 /* Check TLB entry and enforce page permissions. */
1785 if (prot
& PAGE_WRITE
) {
1786 tlb_addr
= tlb_addr_write(tlbe
);
1787 if (!tlb_hit(tlb_addr
, addr
)) {
1788 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1789 tlb_fill(env_cpu(env
), addr
, size
,
1790 MMU_DATA_STORE
, mmu_idx
, retaddr
);
1791 index
= tlb_index(env
, mmu_idx
, addr
);
1792 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1794 tlb_addr
= tlb_addr_write(tlbe
) & ~TLB_INVALID_MASK
;
1797 /* Let the guest notice RMW on a write-only page. */
1798 if ((prot
& PAGE_READ
) &&
1799 unlikely(tlbe
->addr_read
!= (tlb_addr
& ~TLB_NOTDIRTY
))) {
1800 tlb_fill(env_cpu(env
), addr
, size
,
1801 MMU_DATA_LOAD
, mmu_idx
, retaddr
);
1803 * Since we don't support reads and writes to different addresses,
1804 * and we do have the proper page loaded for write, this shouldn't
1805 * ever return. But just in case, handle via stop-the-world.
1807 goto stop_the_world
;
1809 } else /* if (prot & PAGE_READ) */ {
1810 tlb_addr
= tlbe
->addr_read
;
1811 if (!tlb_hit(tlb_addr
, addr
)) {
1812 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1813 tlb_fill(env_cpu(env
), addr
, size
,
1814 MMU_DATA_LOAD
, mmu_idx
, retaddr
);
1815 index
= tlb_index(env
, mmu_idx
, addr
);
1816 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1818 tlb_addr
= tlbe
->addr_read
& ~TLB_INVALID_MASK
;
1822 /* Notice an IO access or a needs-MMU-lookup access */
1823 if (unlikely(tlb_addr
& TLB_MMIO
)) {
1824 /* There's really nothing that can be done to
1825 support this apart from stop-the-world. */
1826 goto stop_the_world
;
1829 hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1831 if (unlikely(tlb_addr
& TLB_NOTDIRTY
)) {
1832 notdirty_write(env_cpu(env
), addr
, size
,
1833 &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
], retaddr
);
1839 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1845 * We support two different access types. SOFTMMU_CODE_ACCESS is
1846 * specifically for reading instructions from system memory. It is
1847 * called by the translation loop and in some helpers where the code
1848 * is disassembled. It shouldn't be called directly by guest code.
1851 typedef uint64_t FullLoadHelper(CPUArchState
*env
, target_ulong addr
,
1852 MemOpIdx oi
, uintptr_t retaddr
);
1854 static inline uint64_t QEMU_ALWAYS_INLINE
1855 load_memop(const void *haddr
, MemOp op
)
1859 return ldub_p(haddr
);
1861 return lduw_be_p(haddr
);
1863 return lduw_le_p(haddr
);
1865 return (uint32_t)ldl_be_p(haddr
);
1867 return (uint32_t)ldl_le_p(haddr
);
1869 return ldq_be_p(haddr
);
1871 return ldq_le_p(haddr
);
1873 qemu_build_not_reached();
1877 static inline uint64_t QEMU_ALWAYS_INLINE
1878 load_helper(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
,
1879 uintptr_t retaddr
, MemOp op
, bool code_read
,
1880 FullLoadHelper
*full_load
)
1882 uintptr_t mmu_idx
= get_mmuidx(oi
);
1883 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1884 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1885 target_ulong tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1886 const size_t tlb_off
= code_read
?
1887 offsetof(CPUTLBEntry
, addr_code
) : offsetof(CPUTLBEntry
, addr_read
);
1888 const MMUAccessType access_type
=
1889 code_read
? MMU_INST_FETCH
: MMU_DATA_LOAD
;
1890 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
1893 size_t size
= memop_size(op
);
1895 /* Handle CPU specific unaligned behaviour */
1896 if (addr
& ((1 << a_bits
) - 1)) {
1897 cpu_unaligned_access(env_cpu(env
), addr
, access_type
,
1901 /* If the TLB entry is for a different page, reload and try again. */
1902 if (!tlb_hit(tlb_addr
, addr
)) {
1903 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
1904 addr
& TARGET_PAGE_MASK
)) {
1905 tlb_fill(env_cpu(env
), addr
, size
,
1906 access_type
, mmu_idx
, retaddr
);
1907 index
= tlb_index(env
, mmu_idx
, addr
);
1908 entry
= tlb_entry(env
, mmu_idx
, addr
);
1910 tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1911 tlb_addr
&= ~TLB_INVALID_MASK
;
1914 /* Handle anything that isn't just a straight memory access. */
1915 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1916 CPUIOTLBEntry
*iotlbentry
;
1919 /* For anything that is unaligned, recurse through full_load. */
1920 if ((addr
& (size
- 1)) != 0) {
1921 goto do_unaligned_access
;
1924 iotlbentry
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1926 /* Handle watchpoints. */
1927 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
1928 /* On watchpoint hit, this will longjmp out. */
1929 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1930 iotlbentry
->attrs
, BP_MEM_READ
, retaddr
);
1933 need_swap
= size
> 1 && (tlb_addr
& TLB_BSWAP
);
1935 /* Handle I/O access. */
1936 if (likely(tlb_addr
& TLB_MMIO
)) {
1937 return io_readx(env
, iotlbentry
, mmu_idx
, addr
, retaddr
,
1938 access_type
, op
^ (need_swap
* MO_BSWAP
));
1941 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1944 * Keep these two load_memop separate to ensure that the compiler
1945 * is able to fold the entire function to a single instruction.
1946 * There is a build-time assert inside to remind you of this. ;-)
1948 if (unlikely(need_swap
)) {
1949 return load_memop(haddr
, op
^ MO_BSWAP
);
1951 return load_memop(haddr
, op
);
1954 /* Handle slow unaligned access (it spans two pages or IO). */
1956 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
1957 >= TARGET_PAGE_SIZE
)) {
1958 target_ulong addr1
, addr2
;
1961 do_unaligned_access
:
1962 addr1
= addr
& ~((target_ulong
)size
- 1);
1963 addr2
= addr1
+ size
;
1964 r1
= full_load(env
, addr1
, oi
, retaddr
);
1965 r2
= full_load(env
, addr2
, oi
, retaddr
);
1966 shift
= (addr
& (size
- 1)) * 8;
1968 if (memop_big_endian(op
)) {
1969 /* Big-endian combine. */
1970 res
= (r1
<< shift
) | (r2
>> ((size
* 8) - shift
));
1972 /* Little-endian combine. */
1973 res
= (r1
>> shift
) | (r2
<< ((size
* 8) - shift
));
1975 return res
& MAKE_64BIT_MASK(0, size
* 8);
1978 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1979 return load_memop(haddr
, op
);
1983 * For the benefit of TCG generated code, we want to avoid the
1984 * complication of ABI-specific return type promotion and always
1985 * return a value extended to the register size of the host. This is
1986 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1987 * data, and for that we always have uint64_t.
1989 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1992 static uint64_t full_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1993 MemOpIdx oi
, uintptr_t retaddr
)
1995 return load_helper(env
, addr
, oi
, retaddr
, MO_UB
, false, full_ldub_mmu
);
1998 tcg_target_ulong
helper_ret_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1999 MemOpIdx oi
, uintptr_t retaddr
)
2001 return full_ldub_mmu(env
, addr
, oi
, retaddr
);
2004 static uint64_t full_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2005 MemOpIdx oi
, uintptr_t retaddr
)
2007 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUW
, false,
2011 tcg_target_ulong
helper_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2012 MemOpIdx oi
, uintptr_t retaddr
)
2014 return full_le_lduw_mmu(env
, addr
, oi
, retaddr
);
2017 static uint64_t full_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2018 MemOpIdx oi
, uintptr_t retaddr
)
2020 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUW
, false,
2024 tcg_target_ulong
helper_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
2025 MemOpIdx oi
, uintptr_t retaddr
)
2027 return full_be_lduw_mmu(env
, addr
, oi
, retaddr
);
2030 static uint64_t full_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2031 MemOpIdx oi
, uintptr_t retaddr
)
2033 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUL
, false,
2037 tcg_target_ulong
helper_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2038 MemOpIdx oi
, uintptr_t retaddr
)
2040 return full_le_ldul_mmu(env
, addr
, oi
, retaddr
);
2043 static uint64_t full_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2044 MemOpIdx oi
, uintptr_t retaddr
)
2046 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUL
, false,
2050 tcg_target_ulong
helper_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
2051 MemOpIdx oi
, uintptr_t retaddr
)
2053 return full_be_ldul_mmu(env
, addr
, oi
, retaddr
);
2056 uint64_t helper_le_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
2057 MemOpIdx oi
, uintptr_t retaddr
)
2059 return load_helper(env
, addr
, oi
, retaddr
, MO_LEQ
, false,
2063 uint64_t helper_be_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
2064 MemOpIdx oi
, uintptr_t retaddr
)
2066 return load_helper(env
, addr
, oi
, retaddr
, MO_BEQ
, false,
2071 * Provide signed versions of the load routines as well. We can of course
2072 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2076 tcg_target_ulong
helper_ret_ldsb_mmu(CPUArchState
*env
, target_ulong addr
,
2077 MemOpIdx oi
, uintptr_t retaddr
)
2079 return (int8_t)helper_ret_ldub_mmu(env
, addr
, oi
, retaddr
);
2082 tcg_target_ulong
helper_le_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
2083 MemOpIdx oi
, uintptr_t retaddr
)
2085 return (int16_t)helper_le_lduw_mmu(env
, addr
, oi
, retaddr
);
2088 tcg_target_ulong
helper_be_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
2089 MemOpIdx oi
, uintptr_t retaddr
)
2091 return (int16_t)helper_be_lduw_mmu(env
, addr
, oi
, retaddr
);
2094 tcg_target_ulong
helper_le_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
2095 MemOpIdx oi
, uintptr_t retaddr
)
2097 return (int32_t)helper_le_ldul_mmu(env
, addr
, oi
, retaddr
);
2100 tcg_target_ulong
helper_be_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
2101 MemOpIdx oi
, uintptr_t retaddr
)
2103 return (int32_t)helper_be_ldul_mmu(env
, addr
, oi
, retaddr
);
2107 * Load helpers for cpu_ldst.h.
2110 static inline uint64_t cpu_load_helper(CPUArchState
*env
, abi_ptr addr
,
2111 int mmu_idx
, uintptr_t retaddr
,
2112 MemOp op
, FullLoadHelper
*full_load
)
2114 MemOpIdx oi
= make_memop_idx(op
, mmu_idx
);
2117 trace_guest_ld_before_exec(env_cpu(env
), addr
, oi
);
2119 ret
= full_load(env
, addr
, oi
, retaddr
);
2121 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
2126 uint32_t cpu_ldub_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2127 int mmu_idx
, uintptr_t ra
)
2129 return cpu_load_helper(env
, addr
, mmu_idx
, ra
, MO_UB
, full_ldub_mmu
);
2132 int cpu_ldsb_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2133 int mmu_idx
, uintptr_t ra
)
2135 return (int8_t)cpu_ldub_mmuidx_ra(env
, addr
, mmu_idx
, ra
);
2138 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2139 int mmu_idx
, uintptr_t ra
)
2141 return cpu_load_helper(env
, addr
, mmu_idx
, ra
, MO_BEUW
, full_be_lduw_mmu
);
2144 int cpu_ldsw_be_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2145 int mmu_idx
, uintptr_t ra
)
2147 return (int16_t)cpu_lduw_be_mmuidx_ra(env
, addr
, mmu_idx
, ra
);
2150 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2151 int mmu_idx
, uintptr_t ra
)
2153 return cpu_load_helper(env
, addr
, mmu_idx
, ra
, MO_BEUL
, full_be_ldul_mmu
);
2156 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2157 int mmu_idx
, uintptr_t ra
)
2159 return cpu_load_helper(env
, addr
, mmu_idx
, ra
, MO_BEQ
, helper_be_ldq_mmu
);
2162 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2163 int mmu_idx
, uintptr_t ra
)
2165 return cpu_load_helper(env
, addr
, mmu_idx
, ra
, MO_LEUW
, full_le_lduw_mmu
);
2168 int cpu_ldsw_le_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2169 int mmu_idx
, uintptr_t ra
)
2171 return (int16_t)cpu_lduw_le_mmuidx_ra(env
, addr
, mmu_idx
, ra
);
2174 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2175 int mmu_idx
, uintptr_t ra
)
2177 return cpu_load_helper(env
, addr
, mmu_idx
, ra
, MO_LEUL
, full_le_ldul_mmu
);
2180 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState
*env
, abi_ptr addr
,
2181 int mmu_idx
, uintptr_t ra
)
2183 return cpu_load_helper(env
, addr
, mmu_idx
, ra
, MO_LEQ
, helper_le_ldq_mmu
);
2186 uint32_t cpu_ldub_data_ra(CPUArchState
*env
, target_ulong ptr
,
2189 return cpu_ldub_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2192 int cpu_ldsb_data_ra(CPUArchState
*env
, target_ulong ptr
, uintptr_t retaddr
)
2194 return cpu_ldsb_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2197 uint32_t cpu_lduw_be_data_ra(CPUArchState
*env
, target_ulong ptr
,
2200 return cpu_lduw_be_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2203 int cpu_ldsw_be_data_ra(CPUArchState
*env
, target_ulong ptr
, uintptr_t retaddr
)
2205 return cpu_ldsw_be_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2208 uint32_t cpu_ldl_be_data_ra(CPUArchState
*env
, target_ulong ptr
,
2211 return cpu_ldl_be_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2214 uint64_t cpu_ldq_be_data_ra(CPUArchState
*env
, target_ulong ptr
,
2217 return cpu_ldq_be_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2220 uint32_t cpu_lduw_le_data_ra(CPUArchState
*env
, target_ulong ptr
,
2223 return cpu_lduw_le_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2226 int cpu_ldsw_le_data_ra(CPUArchState
*env
, target_ulong ptr
, uintptr_t retaddr
)
2228 return cpu_ldsw_le_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2231 uint32_t cpu_ldl_le_data_ra(CPUArchState
*env
, target_ulong ptr
,
2234 return cpu_ldl_le_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2237 uint64_t cpu_ldq_le_data_ra(CPUArchState
*env
, target_ulong ptr
,
2240 return cpu_ldq_le_mmuidx_ra(env
, ptr
, cpu_mmu_index(env
, false), retaddr
);
2243 uint32_t cpu_ldub_data(CPUArchState
*env
, target_ulong ptr
)
2245 return cpu_ldub_data_ra(env
, ptr
, 0);
2248 int cpu_ldsb_data(CPUArchState
*env
, target_ulong ptr
)
2250 return cpu_ldsb_data_ra(env
, ptr
, 0);
2253 uint32_t cpu_lduw_be_data(CPUArchState
*env
, target_ulong ptr
)
2255 return cpu_lduw_be_data_ra(env
, ptr
, 0);
2258 int cpu_ldsw_be_data(CPUArchState
*env
, target_ulong ptr
)
2260 return cpu_ldsw_be_data_ra(env
, ptr
, 0);
2263 uint32_t cpu_ldl_be_data(CPUArchState
*env
, target_ulong ptr
)
2265 return cpu_ldl_be_data_ra(env
, ptr
, 0);
2268 uint64_t cpu_ldq_be_data(CPUArchState
*env
, target_ulong ptr
)
2270 return cpu_ldq_be_data_ra(env
, ptr
, 0);
2273 uint32_t cpu_lduw_le_data(CPUArchState
*env
, target_ulong ptr
)
2275 return cpu_lduw_le_data_ra(env
, ptr
, 0);
2278 int cpu_ldsw_le_data(CPUArchState
*env
, target_ulong ptr
)
2280 return cpu_ldsw_le_data_ra(env
, ptr
, 0);
2283 uint32_t cpu_ldl_le_data(CPUArchState
*env
, target_ulong ptr
)
2285 return cpu_ldl_le_data_ra(env
, ptr
, 0);
2288 uint64_t cpu_ldq_le_data(CPUArchState
*env
, target_ulong ptr
)
2290 return cpu_ldq_le_data_ra(env
, ptr
, 0);
2297 static inline void QEMU_ALWAYS_INLINE
2298 store_memop(void *haddr
, uint64_t val
, MemOp op
)
2305 stw_be_p(haddr
, val
);
2308 stw_le_p(haddr
, val
);
2311 stl_be_p(haddr
, val
);
2314 stl_le_p(haddr
, val
);
2317 stq_be_p(haddr
, val
);
2320 stq_le_p(haddr
, val
);
2323 qemu_build_not_reached();
2327 static void __attribute__((noinline
))
2328 store_helper_unaligned(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2329 uintptr_t retaddr
, size_t size
, uintptr_t mmu_idx
,
2332 const size_t tlb_off
= offsetof(CPUTLBEntry
, addr_write
);
2333 uintptr_t index
, index2
;
2334 CPUTLBEntry
*entry
, *entry2
;
2335 target_ulong page2
, tlb_addr
, tlb_addr2
;
2341 * Ensure the second page is in the TLB. Note that the first page
2342 * is already guaranteed to be filled, and that the second page
2343 * cannot evict the first.
2345 page2
= (addr
+ size
) & TARGET_PAGE_MASK
;
2346 size2
= (addr
+ size
) & ~TARGET_PAGE_MASK
;
2347 index2
= tlb_index(env
, mmu_idx
, page2
);
2348 entry2
= tlb_entry(env
, mmu_idx
, page2
);
2350 tlb_addr2
= tlb_addr_write(entry2
);
2351 if (!tlb_hit_page(tlb_addr2
, page2
)) {
2352 if (!victim_tlb_hit(env
, mmu_idx
, index2
, tlb_off
, page2
)) {
2353 tlb_fill(env_cpu(env
), page2
, size2
, MMU_DATA_STORE
,
2355 index2
= tlb_index(env
, mmu_idx
, page2
);
2356 entry2
= tlb_entry(env
, mmu_idx
, page2
);
2358 tlb_addr2
= tlb_addr_write(entry2
);
2361 index
= tlb_index(env
, mmu_idx
, addr
);
2362 entry
= tlb_entry(env
, mmu_idx
, addr
);
2363 tlb_addr
= tlb_addr_write(entry
);
2366 * Handle watchpoints. Since this may trap, all checks
2367 * must happen before any store.
2369 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
2370 cpu_check_watchpoint(env_cpu(env
), addr
, size
- size2
,
2371 env_tlb(env
)->d
[mmu_idx
].iotlb
[index
].attrs
,
2372 BP_MEM_WRITE
, retaddr
);
2374 if (unlikely(tlb_addr2
& TLB_WATCHPOINT
)) {
2375 cpu_check_watchpoint(env_cpu(env
), page2
, size2
,
2376 env_tlb(env
)->d
[mmu_idx
].iotlb
[index2
].attrs
,
2377 BP_MEM_WRITE
, retaddr
);
2381 * XXX: not efficient, but simple.
2382 * This loop must go in the forward direction to avoid issues
2383 * with self-modifying code in Windows 64-bit.
2385 oi
= make_memop_idx(MO_UB
, mmu_idx
);
2387 for (i
= 0; i
< size
; ++i
) {
2388 /* Big-endian extract. */
2389 uint8_t val8
= val
>> (((size
- 1) * 8) - (i
* 8));
2390 helper_ret_stb_mmu(env
, addr
+ i
, val8
, oi
, retaddr
);
2393 for (i
= 0; i
< size
; ++i
) {
2394 /* Little-endian extract. */
2395 uint8_t val8
= val
>> (i
* 8);
2396 helper_ret_stb_mmu(env
, addr
+ i
, val8
, oi
, retaddr
);
2401 static inline void QEMU_ALWAYS_INLINE
2402 store_helper(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2403 MemOpIdx oi
, uintptr_t retaddr
, MemOp op
)
2405 uintptr_t mmu_idx
= get_mmuidx(oi
);
2406 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
2407 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
2408 target_ulong tlb_addr
= tlb_addr_write(entry
);
2409 const size_t tlb_off
= offsetof(CPUTLBEntry
, addr_write
);
2410 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
2412 size_t size
= memop_size(op
);
2414 /* Handle CPU specific unaligned behaviour */
2415 if (addr
& ((1 << a_bits
) - 1)) {
2416 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
2420 /* If the TLB entry is for a different page, reload and try again. */
2421 if (!tlb_hit(tlb_addr
, addr
)) {
2422 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
2423 addr
& TARGET_PAGE_MASK
)) {
2424 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_STORE
,
2426 index
= tlb_index(env
, mmu_idx
, addr
);
2427 entry
= tlb_entry(env
, mmu_idx
, addr
);
2429 tlb_addr
= tlb_addr_write(entry
) & ~TLB_INVALID_MASK
;
2432 /* Handle anything that isn't just a straight memory access. */
2433 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
2434 CPUIOTLBEntry
*iotlbentry
;
2437 /* For anything that is unaligned, recurse through byte stores. */
2438 if ((addr
& (size
- 1)) != 0) {
2439 goto do_unaligned_access
;
2442 iotlbentry
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
2444 /* Handle watchpoints. */
2445 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
2446 /* On watchpoint hit, this will longjmp out. */
2447 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
2448 iotlbentry
->attrs
, BP_MEM_WRITE
, retaddr
);
2451 need_swap
= size
> 1 && (tlb_addr
& TLB_BSWAP
);
2453 /* Handle I/O access. */
2454 if (tlb_addr
& TLB_MMIO
) {
2455 io_writex(env
, iotlbentry
, mmu_idx
, val
, addr
, retaddr
,
2456 op
^ (need_swap
* MO_BSWAP
));
2460 /* Ignore writes to ROM. */
2461 if (unlikely(tlb_addr
& TLB_DISCARD_WRITE
)) {
2465 /* Handle clean RAM pages. */
2466 if (tlb_addr
& TLB_NOTDIRTY
) {
2467 notdirty_write(env_cpu(env
), addr
, size
, iotlbentry
, retaddr
);
2470 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
2473 * Keep these two store_memop separate to ensure that the compiler
2474 * is able to fold the entire function to a single instruction.
2475 * There is a build-time assert inside to remind you of this. ;-)
2477 if (unlikely(need_swap
)) {
2478 store_memop(haddr
, val
, op
^ MO_BSWAP
);
2480 store_memop(haddr
, val
, op
);
2485 /* Handle slow unaligned access (it spans two pages or IO). */
2487 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
2488 >= TARGET_PAGE_SIZE
)) {
2489 do_unaligned_access
:
2490 store_helper_unaligned(env
, addr
, val
, retaddr
, size
,
2491 mmu_idx
, memop_big_endian(op
));
2495 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
2496 store_memop(haddr
, val
, op
);
2499 void __attribute__((noinline
))
2500 helper_ret_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
2501 MemOpIdx oi
, uintptr_t retaddr
)
2503 store_helper(env
, addr
, val
, oi
, retaddr
, MO_UB
);
2506 void helper_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2507 MemOpIdx oi
, uintptr_t retaddr
)
2509 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEUW
);
2512 void helper_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2513 MemOpIdx oi
, uintptr_t retaddr
)
2515 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEUW
);
2518 void helper_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2519 MemOpIdx oi
, uintptr_t retaddr
)
2521 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEUL
);
2524 void helper_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2525 MemOpIdx oi
, uintptr_t retaddr
)
2527 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEUL
);
2530 void helper_le_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2531 MemOpIdx oi
, uintptr_t retaddr
)
2533 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEQ
);
2536 void helper_be_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2537 MemOpIdx oi
, uintptr_t retaddr
)
2539 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEQ
);
2543 * Store Helpers for cpu_ldst.h
2546 static inline void QEMU_ALWAYS_INLINE
2547 cpu_store_helper(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2548 int mmu_idx
, uintptr_t retaddr
, MemOp op
)
2550 MemOpIdx oi
= make_memop_idx(op
, mmu_idx
);
2552 trace_guest_st_before_exec(env_cpu(env
), addr
, oi
);
2554 store_helper(env
, addr
, val
, oi
, retaddr
, op
);
2556 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
2559 void cpu_stb_mmuidx_ra(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2560 int mmu_idx
, uintptr_t retaddr
)
2562 cpu_store_helper(env
, addr
, val
, mmu_idx
, retaddr
, MO_UB
);
2565 void cpu_stw_be_mmuidx_ra(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2566 int mmu_idx
, uintptr_t retaddr
)
2568 cpu_store_helper(env
, addr
, val
, mmu_idx
, retaddr
, MO_BEUW
);
2571 void cpu_stl_be_mmuidx_ra(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2572 int mmu_idx
, uintptr_t retaddr
)
2574 cpu_store_helper(env
, addr
, val
, mmu_idx
, retaddr
, MO_BEUL
);
2577 void cpu_stq_be_mmuidx_ra(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2578 int mmu_idx
, uintptr_t retaddr
)
2580 cpu_store_helper(env
, addr
, val
, mmu_idx
, retaddr
, MO_BEQ
);
2583 void cpu_stw_le_mmuidx_ra(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2584 int mmu_idx
, uintptr_t retaddr
)
2586 cpu_store_helper(env
, addr
, val
, mmu_idx
, retaddr
, MO_LEUW
);
2589 void cpu_stl_le_mmuidx_ra(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2590 int mmu_idx
, uintptr_t retaddr
)
2592 cpu_store_helper(env
, addr
, val
, mmu_idx
, retaddr
, MO_LEUL
);
2595 void cpu_stq_le_mmuidx_ra(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2596 int mmu_idx
, uintptr_t retaddr
)
2598 cpu_store_helper(env
, addr
, val
, mmu_idx
, retaddr
, MO_LEQ
);
2601 void cpu_stb_data_ra(CPUArchState
*env
, target_ulong ptr
,
2602 uint32_t val
, uintptr_t retaddr
)
2604 cpu_stb_mmuidx_ra(env
, ptr
, val
, cpu_mmu_index(env
, false), retaddr
);
2607 void cpu_stw_be_data_ra(CPUArchState
*env
, target_ulong ptr
,
2608 uint32_t val
, uintptr_t retaddr
)
2610 cpu_stw_be_mmuidx_ra(env
, ptr
, val
, cpu_mmu_index(env
, false), retaddr
);
2613 void cpu_stl_be_data_ra(CPUArchState
*env
, target_ulong ptr
,
2614 uint32_t val
, uintptr_t retaddr
)
2616 cpu_stl_be_mmuidx_ra(env
, ptr
, val
, cpu_mmu_index(env
, false), retaddr
);
2619 void cpu_stq_be_data_ra(CPUArchState
*env
, target_ulong ptr
,
2620 uint64_t val
, uintptr_t retaddr
)
2622 cpu_stq_be_mmuidx_ra(env
, ptr
, val
, cpu_mmu_index(env
, false), retaddr
);
2625 void cpu_stw_le_data_ra(CPUArchState
*env
, target_ulong ptr
,
2626 uint32_t val
, uintptr_t retaddr
)
2628 cpu_stw_le_mmuidx_ra(env
, ptr
, val
, cpu_mmu_index(env
, false), retaddr
);
2631 void cpu_stl_le_data_ra(CPUArchState
*env
, target_ulong ptr
,
2632 uint32_t val
, uintptr_t retaddr
)
2634 cpu_stl_le_mmuidx_ra(env
, ptr
, val
, cpu_mmu_index(env
, false), retaddr
);
2637 void cpu_stq_le_data_ra(CPUArchState
*env
, target_ulong ptr
,
2638 uint64_t val
, uintptr_t retaddr
)
2640 cpu_stq_le_mmuidx_ra(env
, ptr
, val
, cpu_mmu_index(env
, false), retaddr
);
2643 void cpu_stb_data(CPUArchState
*env
, target_ulong ptr
, uint32_t val
)
2645 cpu_stb_data_ra(env
, ptr
, val
, 0);
2648 void cpu_stw_be_data(CPUArchState
*env
, target_ulong ptr
, uint32_t val
)
2650 cpu_stw_be_data_ra(env
, ptr
, val
, 0);
2653 void cpu_stl_be_data(CPUArchState
*env
, target_ulong ptr
, uint32_t val
)
2655 cpu_stl_be_data_ra(env
, ptr
, val
, 0);
2658 void cpu_stq_be_data(CPUArchState
*env
, target_ulong ptr
, uint64_t val
)
2660 cpu_stq_be_data_ra(env
, ptr
, val
, 0);
2663 void cpu_stw_le_data(CPUArchState
*env
, target_ulong ptr
, uint32_t val
)
2665 cpu_stw_le_data_ra(env
, ptr
, val
, 0);
2668 void cpu_stl_le_data(CPUArchState
*env
, target_ulong ptr
, uint32_t val
)
2670 cpu_stl_le_data_ra(env
, ptr
, val
, 0);
2673 void cpu_stq_le_data(CPUArchState
*env
, target_ulong ptr
, uint64_t val
)
2675 cpu_stq_le_data_ra(env
, ptr
, val
, 0);
2679 * First set of functions passes in OI and RETADDR.
2680 * This makes them callable from other helpers.
2683 #define ATOMIC_NAME(X) \
2684 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2686 #define ATOMIC_MMU_CLEANUP
2687 #define ATOMIC_MMU_IDX get_mmuidx(oi)
2689 #include "atomic_common.c.inc"
2692 #include "atomic_template.h"
2695 #include "atomic_template.h"
2698 #include "atomic_template.h"
2700 #ifdef CONFIG_ATOMIC64
2702 #include "atomic_template.h"
2705 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2706 #define DATA_SIZE 16
2707 #include "atomic_template.h"
2710 /* Code access functions. */
2712 static uint64_t full_ldub_code(CPUArchState
*env
, target_ulong addr
,
2713 MemOpIdx oi
, uintptr_t retaddr
)
2715 return load_helper(env
, addr
, oi
, retaddr
, MO_8
, true, full_ldub_code
);
2718 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr addr
)
2720 MemOpIdx oi
= make_memop_idx(MO_UB
, cpu_mmu_index(env
, true));
2721 return full_ldub_code(env
, addr
, oi
, 0);
2724 static uint64_t full_lduw_code(CPUArchState
*env
, target_ulong addr
,
2725 MemOpIdx oi
, uintptr_t retaddr
)
2727 return load_helper(env
, addr
, oi
, retaddr
, MO_TEUW
, true, full_lduw_code
);
2730 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr addr
)
2732 MemOpIdx oi
= make_memop_idx(MO_TEUW
, cpu_mmu_index(env
, true));
2733 return full_lduw_code(env
, addr
, oi
, 0);
2736 static uint64_t full_ldl_code(CPUArchState
*env
, target_ulong addr
,
2737 MemOpIdx oi
, uintptr_t retaddr
)
2739 return load_helper(env
, addr
, oi
, retaddr
, MO_TEUL
, true, full_ldl_code
);
2742 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr addr
)
2744 MemOpIdx oi
= make_memop_idx(MO_TEUL
, cpu_mmu_index(env
, true));
2745 return full_ldl_code(env
, addr
, oi
, 0);
2748 static uint64_t full_ldq_code(CPUArchState
*env
, target_ulong addr
,
2749 MemOpIdx oi
, uintptr_t retaddr
)
2751 return load_helper(env
, addr
, oi
, retaddr
, MO_TEQ
, true, full_ldq_code
);
2754 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr addr
)
2756 MemOpIdx oi
= make_memop_idx(MO_TEQ
, cpu_mmu_index(env
, true));
2757 return full_ldq_code(env
, addr
, oi
, 0);