2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
32 #include "exec/helper-proto-common.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
40 #include "qemu/plugin-memory.h"
42 #include "tcg/tcg-ldst.h"
43 #include "tcg/oversized-guest.h"
45 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
46 /* #define DEBUG_TLB */
47 /* #define DEBUG_TLB_LOG */
50 # define DEBUG_TLB_GATE 1
52 # define DEBUG_TLB_LOG_GATE 1
54 # define DEBUG_TLB_LOG_GATE 0
57 # define DEBUG_TLB_GATE 0
58 # define DEBUG_TLB_LOG_GATE 0
61 #define tlb_debug(fmt, ...) do { \
62 if (DEBUG_TLB_LOG_GATE) { \
63 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
65 } else if (DEBUG_TLB_GATE) { \
66 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
70 #define assert_cpu_is_self(cpu) do { \
71 if (DEBUG_TLB_GATE) { \
72 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
76 /* run_on_cpu_data.target_ptr should always be big enough for a
77 * vaddr even on 32 bit builds
79 QEMU_BUILD_BUG_ON(sizeof(vaddr
) > sizeof(run_on_cpu_data
));
81 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
83 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> 16);
84 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
86 static inline size_t tlb_n_entries(CPUTLBDescFast
*fast
)
88 return (fast
->mask
>> CPU_TLB_ENTRY_BITS
) + 1;
91 static inline size_t sizeof_tlb(CPUTLBDescFast
*fast
)
93 return fast
->mask
+ (1 << CPU_TLB_ENTRY_BITS
);
96 static void tlb_window_reset(CPUTLBDesc
*desc
, int64_t ns
,
99 desc
->window_begin_ns
= ns
;
100 desc
->window_max_entries
= max_entries
;
103 static void tb_jmp_cache_clear_page(CPUState
*cpu
, vaddr page_addr
)
105 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
112 i0
= tb_jmp_cache_hash_page(page_addr
);
113 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
114 qatomic_set(&jc
->array
[i0
+ i
].tb
, NULL
);
119 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
120 * @desc: The CPUTLBDesc portion of the TLB
121 * @fast: The CPUTLBDescFast portion of the same TLB
123 * Called with tlb_lock_held.
125 * We have two main constraints when resizing a TLB: (1) we only resize it
126 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
127 * the array or unnecessarily flushing it), which means we do not control how
128 * frequently the resizing can occur; (2) we don't have access to the guest's
129 * future scheduling decisions, and therefore have to decide the magnitude of
130 * the resize based on past observations.
132 * In general, a memory-hungry process can benefit greatly from an appropriately
133 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
134 * we just have to make the TLB as large as possible; while an oversized TLB
135 * results in minimal TLB miss rates, it also takes longer to be flushed
136 * (flushes can be _very_ frequent), and the reduced locality can also hurt
139 * To achieve near-optimal performance for all kinds of workloads, we:
141 * 1. Aggressively increase the size of the TLB when the use rate of the
142 * TLB being flushed is high, since it is likely that in the near future this
143 * memory-hungry process will execute again, and its memory hungriness will
144 * probably be similar.
146 * 2. Slowly reduce the size of the TLB as the use rate declines over a
147 * reasonably large time window. The rationale is that if in such a time window
148 * we have not observed a high TLB use rate, it is likely that we won't observe
149 * it in the near future. In that case, once a time window expires we downsize
150 * the TLB to match the maximum use rate observed in the window.
152 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
153 * since in that range performance is likely near-optimal. Recall that the TLB
154 * is direct mapped, so we want the use rate to be low (or at least not too
155 * high), since otherwise we are likely to have a significant amount of
158 static void tlb_mmu_resize_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
,
161 size_t old_size
= tlb_n_entries(fast
);
163 size_t new_size
= old_size
;
164 int64_t window_len_ms
= 100;
165 int64_t window_len_ns
= window_len_ms
* 1000 * 1000;
166 bool window_expired
= now
> desc
->window_begin_ns
+ window_len_ns
;
168 if (desc
->n_used_entries
> desc
->window_max_entries
) {
169 desc
->window_max_entries
= desc
->n_used_entries
;
171 rate
= desc
->window_max_entries
* 100 / old_size
;
174 new_size
= MIN(old_size
<< 1, 1 << CPU_TLB_DYN_MAX_BITS
);
175 } else if (rate
< 30 && window_expired
) {
176 size_t ceil
= pow2ceil(desc
->window_max_entries
);
177 size_t expected_rate
= desc
->window_max_entries
* 100 / ceil
;
180 * Avoid undersizing when the max number of entries seen is just below
181 * a pow2. For instance, if max_entries == 1025, the expected use rate
182 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
183 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
184 * later. Thus, make sure that the expected use rate remains below 70%.
185 * (and since we double the size, that means the lowest rate we'd
186 * expect to get is 35%, which is still in the 30-70% range where
187 * we consider that the size is appropriate.)
189 if (expected_rate
> 70) {
192 new_size
= MAX(ceil
, 1 << CPU_TLB_DYN_MIN_BITS
);
195 if (new_size
== old_size
) {
196 if (window_expired
) {
197 tlb_window_reset(desc
, now
, desc
->n_used_entries
);
203 g_free(desc
->fulltlb
);
205 tlb_window_reset(desc
, now
, 0);
206 /* desc->n_used_entries is cleared by the caller */
207 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
208 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
209 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
212 * If the allocations fail, try smaller sizes. We just freed some
213 * memory, so going back to half of new_size has a good chance of working.
214 * Increased memory pressure elsewhere in the system might cause the
215 * allocations to fail though, so we progressively reduce the allocation
216 * size, aborting if we cannot even allocate the smallest TLB we support.
218 while (fast
->table
== NULL
|| desc
->fulltlb
== NULL
) {
219 if (new_size
== (1 << CPU_TLB_DYN_MIN_BITS
)) {
220 error_report("%s: %s", __func__
, strerror(errno
));
223 new_size
= MAX(new_size
>> 1, 1 << CPU_TLB_DYN_MIN_BITS
);
224 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
227 g_free(desc
->fulltlb
);
228 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
229 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
233 static void tlb_mmu_flush_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
)
235 desc
->n_used_entries
= 0;
236 desc
->large_page_addr
= -1;
237 desc
->large_page_mask
= -1;
239 memset(fast
->table
, -1, sizeof_tlb(fast
));
240 memset(desc
->vtable
, -1, sizeof(desc
->vtable
));
243 static void tlb_flush_one_mmuidx_locked(CPUArchState
*env
, int mmu_idx
,
246 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[mmu_idx
];
247 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[mmu_idx
];
249 tlb_mmu_resize_locked(desc
, fast
, now
);
250 tlb_mmu_flush_locked(desc
, fast
);
253 static void tlb_mmu_init(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
, int64_t now
)
255 size_t n_entries
= 1 << CPU_TLB_DYN_DEFAULT_BITS
;
257 tlb_window_reset(desc
, now
, 0);
258 desc
->n_used_entries
= 0;
259 fast
->mask
= (n_entries
- 1) << CPU_TLB_ENTRY_BITS
;
260 fast
->table
= g_new(CPUTLBEntry
, n_entries
);
261 desc
->fulltlb
= g_new(CPUTLBEntryFull
, n_entries
);
262 tlb_mmu_flush_locked(desc
, fast
);
265 static inline void tlb_n_used_entries_inc(CPUArchState
*env
, uintptr_t mmu_idx
)
267 env_tlb(env
)->d
[mmu_idx
].n_used_entries
++;
270 static inline void tlb_n_used_entries_dec(CPUArchState
*env
, uintptr_t mmu_idx
)
272 env_tlb(env
)->d
[mmu_idx
].n_used_entries
--;
275 void tlb_init(CPUState
*cpu
)
277 CPUArchState
*env
= cpu
->env_ptr
;
278 int64_t now
= get_clock_realtime();
281 qemu_spin_init(&env_tlb(env
)->c
.lock
);
283 /* All tlbs are initialized flushed. */
284 env_tlb(env
)->c
.dirty
= 0;
286 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
287 tlb_mmu_init(&env_tlb(env
)->d
[i
], &env_tlb(env
)->f
[i
], now
);
291 void tlb_destroy(CPUState
*cpu
)
293 CPUArchState
*env
= cpu
->env_ptr
;
296 qemu_spin_destroy(&env_tlb(env
)->c
.lock
);
297 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
298 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[i
];
299 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[i
];
302 g_free(desc
->fulltlb
);
306 /* flush_all_helper: run fn across all cpus
308 * If the wait flag is set then the src cpu's helper will be queued as
309 * "safe" work and the loop exited creating a synchronisation point
310 * where all queued work will be finished before execution starts
313 static void flush_all_helper(CPUState
*src
, run_on_cpu_func fn
,
320 async_run_on_cpu(cpu
, fn
, d
);
325 void tlb_flush_counts(size_t *pfull
, size_t *ppart
, size_t *pelide
)
328 size_t full
= 0, part
= 0, elide
= 0;
331 CPUArchState
*env
= cpu
->env_ptr
;
333 full
+= qatomic_read(&env_tlb(env
)->c
.full_flush_count
);
334 part
+= qatomic_read(&env_tlb(env
)->c
.part_flush_count
);
335 elide
+= qatomic_read(&env_tlb(env
)->c
.elide_flush_count
);
342 static void tlb_flush_by_mmuidx_async_work(CPUState
*cpu
, run_on_cpu_data data
)
344 CPUArchState
*env
= cpu
->env_ptr
;
345 uint16_t asked
= data
.host_int
;
346 uint16_t all_dirty
, work
, to_clean
;
347 int64_t now
= get_clock_realtime();
349 assert_cpu_is_self(cpu
);
351 tlb_debug("mmu_idx:0x%04" PRIx16
"\n", asked
);
353 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
355 all_dirty
= env_tlb(env
)->c
.dirty
;
356 to_clean
= asked
& all_dirty
;
357 all_dirty
&= ~to_clean
;
358 env_tlb(env
)->c
.dirty
= all_dirty
;
360 for (work
= to_clean
; work
!= 0; work
&= work
- 1) {
361 int mmu_idx
= ctz32(work
);
362 tlb_flush_one_mmuidx_locked(env
, mmu_idx
, now
);
365 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
367 tcg_flush_jmp_cache(cpu
);
369 if (to_clean
== ALL_MMUIDX_BITS
) {
370 qatomic_set(&env_tlb(env
)->c
.full_flush_count
,
371 env_tlb(env
)->c
.full_flush_count
+ 1);
373 qatomic_set(&env_tlb(env
)->c
.part_flush_count
,
374 env_tlb(env
)->c
.part_flush_count
+ ctpop16(to_clean
));
375 if (to_clean
!= asked
) {
376 qatomic_set(&env_tlb(env
)->c
.elide_flush_count
,
377 env_tlb(env
)->c
.elide_flush_count
+
378 ctpop16(asked
& ~to_clean
));
383 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
385 tlb_debug("mmu_idx: 0x%" PRIx16
"\n", idxmap
);
387 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
388 async_run_on_cpu(cpu
, tlb_flush_by_mmuidx_async_work
,
389 RUN_ON_CPU_HOST_INT(idxmap
));
391 tlb_flush_by_mmuidx_async_work(cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
395 void tlb_flush(CPUState
*cpu
)
397 tlb_flush_by_mmuidx(cpu
, ALL_MMUIDX_BITS
);
400 void tlb_flush_by_mmuidx_all_cpus(CPUState
*src_cpu
, uint16_t idxmap
)
402 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
404 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
406 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
407 fn(src_cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
410 void tlb_flush_all_cpus(CPUState
*src_cpu
)
412 tlb_flush_by_mmuidx_all_cpus(src_cpu
, ALL_MMUIDX_BITS
);
415 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
, uint16_t idxmap
)
417 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
419 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
421 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
422 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
425 void tlb_flush_all_cpus_synced(CPUState
*src_cpu
)
427 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, ALL_MMUIDX_BITS
);
430 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry
*tlb_entry
,
431 vaddr page
, vaddr mask
)
434 mask
&= TARGET_PAGE_MASK
| TLB_INVALID_MASK
;
436 return (page
== (tlb_entry
->addr_read
& mask
) ||
437 page
== (tlb_addr_write(tlb_entry
) & mask
) ||
438 page
== (tlb_entry
->addr_code
& mask
));
441 static inline bool tlb_hit_page_anyprot(CPUTLBEntry
*tlb_entry
, vaddr page
)
443 return tlb_hit_page_mask_anyprot(tlb_entry
, page
, -1);
447 * tlb_entry_is_empty - return true if the entry is not in use
448 * @te: pointer to CPUTLBEntry
450 static inline bool tlb_entry_is_empty(const CPUTLBEntry
*te
)
452 return te
->addr_read
== -1 && te
->addr_write
== -1 && te
->addr_code
== -1;
455 /* Called with tlb_c.lock held */
456 static bool tlb_flush_entry_mask_locked(CPUTLBEntry
*tlb_entry
,
460 if (tlb_hit_page_mask_anyprot(tlb_entry
, page
, mask
)) {
461 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
467 static inline bool tlb_flush_entry_locked(CPUTLBEntry
*tlb_entry
, vaddr page
)
469 return tlb_flush_entry_mask_locked(tlb_entry
, page
, -1);
472 /* Called with tlb_c.lock held */
473 static void tlb_flush_vtlb_page_mask_locked(CPUArchState
*env
, int mmu_idx
,
477 CPUTLBDesc
*d
= &env_tlb(env
)->d
[mmu_idx
];
480 assert_cpu_is_self(env_cpu(env
));
481 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
482 if (tlb_flush_entry_mask_locked(&d
->vtable
[k
], page
, mask
)) {
483 tlb_n_used_entries_dec(env
, mmu_idx
);
488 static inline void tlb_flush_vtlb_page_locked(CPUArchState
*env
, int mmu_idx
,
491 tlb_flush_vtlb_page_mask_locked(env
, mmu_idx
, page
, -1);
494 static void tlb_flush_page_locked(CPUArchState
*env
, int midx
, vaddr page
)
496 vaddr lp_addr
= env_tlb(env
)->d
[midx
].large_page_addr
;
497 vaddr lp_mask
= env_tlb(env
)->d
[midx
].large_page_mask
;
499 /* Check if we need to flush due to large pages. */
500 if ((page
& lp_mask
) == lp_addr
) {
501 tlb_debug("forcing full flush midx %d (%016"
502 VADDR_PRIx
"/%016" VADDR_PRIx
")\n",
503 midx
, lp_addr
, lp_mask
);
504 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
506 if (tlb_flush_entry_locked(tlb_entry(env
, midx
, page
), page
)) {
507 tlb_n_used_entries_dec(env
, midx
);
509 tlb_flush_vtlb_page_locked(env
, midx
, page
);
514 * tlb_flush_page_by_mmuidx_async_0:
515 * @cpu: cpu on which to flush
516 * @addr: page of virtual address to flush
517 * @idxmap: set of mmu_idx to flush
519 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
520 * at @addr from the tlbs indicated by @idxmap from @cpu.
522 static void tlb_flush_page_by_mmuidx_async_0(CPUState
*cpu
,
526 CPUArchState
*env
= cpu
->env_ptr
;
529 assert_cpu_is_self(cpu
);
531 tlb_debug("page addr: %016" VADDR_PRIx
" mmu_map:0x%x\n", addr
, idxmap
);
533 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
534 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
535 if ((idxmap
>> mmu_idx
) & 1) {
536 tlb_flush_page_locked(env
, mmu_idx
, addr
);
539 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
542 * Discard jump cache entries for any tb which might potentially
543 * overlap the flushed page, which includes the previous.
545 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
546 tb_jmp_cache_clear_page(cpu
, addr
);
550 * tlb_flush_page_by_mmuidx_async_1:
551 * @cpu: cpu on which to flush
552 * @data: encoded addr + idxmap
554 * Helper for tlb_flush_page_by_mmuidx and friends, called through
555 * async_run_on_cpu. The idxmap parameter is encoded in the page
556 * offset of the target_ptr field. This limits the set of mmu_idx
557 * that can be passed via this method.
559 static void tlb_flush_page_by_mmuidx_async_1(CPUState
*cpu
,
560 run_on_cpu_data data
)
562 vaddr addr_and_idxmap
= data
.target_ptr
;
563 vaddr addr
= addr_and_idxmap
& TARGET_PAGE_MASK
;
564 uint16_t idxmap
= addr_and_idxmap
& ~TARGET_PAGE_MASK
;
566 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
572 } TLBFlushPageByMMUIdxData
;
575 * tlb_flush_page_by_mmuidx_async_2:
576 * @cpu: cpu on which to flush
577 * @data: allocated addr + idxmap
579 * Helper for tlb_flush_page_by_mmuidx and friends, called through
580 * async_run_on_cpu. The addr+idxmap parameters are stored in a
581 * TLBFlushPageByMMUIdxData structure that has been allocated
582 * specifically for this helper. Free the structure when done.
584 static void tlb_flush_page_by_mmuidx_async_2(CPUState
*cpu
,
585 run_on_cpu_data data
)
587 TLBFlushPageByMMUIdxData
*d
= data
.host_ptr
;
589 tlb_flush_page_by_mmuidx_async_0(cpu
, d
->addr
, d
->idxmap
);
593 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, vaddr addr
, uint16_t idxmap
)
595 tlb_debug("addr: %016" VADDR_PRIx
" mmu_idx:%" PRIx16
"\n", addr
, idxmap
);
597 /* This should already be page aligned */
598 addr
&= TARGET_PAGE_MASK
;
600 if (qemu_cpu_is_self(cpu
)) {
601 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
602 } else if (idxmap
< TARGET_PAGE_SIZE
) {
604 * Most targets have only a few mmu_idx. In the case where
605 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
606 * allocating memory for this operation.
608 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_1
,
609 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
611 TLBFlushPageByMMUIdxData
*d
= g_new(TLBFlushPageByMMUIdxData
, 1);
613 /* Otherwise allocate a structure, freed by the worker. */
616 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_2
,
617 RUN_ON_CPU_HOST_PTR(d
));
621 void tlb_flush_page(CPUState
*cpu
, vaddr addr
)
623 tlb_flush_page_by_mmuidx(cpu
, addr
, ALL_MMUIDX_BITS
);
626 void tlb_flush_page_by_mmuidx_all_cpus(CPUState
*src_cpu
, vaddr addr
,
629 tlb_debug("addr: %016" VADDR_PRIx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
631 /* This should already be page aligned */
632 addr
&= TARGET_PAGE_MASK
;
635 * Allocate memory to hold addr+idxmap only when needed.
636 * See tlb_flush_page_by_mmuidx for details.
638 if (idxmap
< TARGET_PAGE_SIZE
) {
639 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
640 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
644 /* Allocate a separate data block for each destination cpu. */
645 CPU_FOREACH(dst_cpu
) {
646 if (dst_cpu
!= src_cpu
) {
647 TLBFlushPageByMMUIdxData
*d
648 = g_new(TLBFlushPageByMMUIdxData
, 1);
652 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
653 RUN_ON_CPU_HOST_PTR(d
));
658 tlb_flush_page_by_mmuidx_async_0(src_cpu
, addr
, idxmap
);
661 void tlb_flush_page_all_cpus(CPUState
*src
, vaddr addr
)
663 tlb_flush_page_by_mmuidx_all_cpus(src
, addr
, ALL_MMUIDX_BITS
);
666 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
670 tlb_debug("addr: %016" VADDR_PRIx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
672 /* This should already be page aligned */
673 addr
&= TARGET_PAGE_MASK
;
676 * Allocate memory to hold addr+idxmap only when needed.
677 * See tlb_flush_page_by_mmuidx for details.
679 if (idxmap
< TARGET_PAGE_SIZE
) {
680 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
681 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
682 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
683 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
686 TLBFlushPageByMMUIdxData
*d
;
688 /* Allocate a separate data block for each destination cpu. */
689 CPU_FOREACH(dst_cpu
) {
690 if (dst_cpu
!= src_cpu
) {
691 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
694 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
695 RUN_ON_CPU_HOST_PTR(d
));
699 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
702 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_2
,
703 RUN_ON_CPU_HOST_PTR(d
));
707 void tlb_flush_page_all_cpus_synced(CPUState
*src
, vaddr addr
)
709 tlb_flush_page_by_mmuidx_all_cpus_synced(src
, addr
, ALL_MMUIDX_BITS
);
712 static void tlb_flush_range_locked(CPUArchState
*env
, int midx
,
713 vaddr addr
, vaddr len
,
716 CPUTLBDesc
*d
= &env_tlb(env
)->d
[midx
];
717 CPUTLBDescFast
*f
= &env_tlb(env
)->f
[midx
];
718 vaddr mask
= MAKE_64BIT_MASK(0, bits
);
721 * If @bits is smaller than the tlb size, there may be multiple entries
722 * within the TLB; otherwise all addresses that match under @mask hit
723 * the same TLB entry.
724 * TODO: Perhaps allow bits to be a few bits less than the size.
725 * For now, just flush the entire TLB.
727 * If @len is larger than the tlb size, then it will take longer to
728 * test all of the entries in the TLB than it will to flush it all.
730 if (mask
< f
->mask
|| len
> f
->mask
) {
731 tlb_debug("forcing full flush midx %d ("
732 "%016" VADDR_PRIx
"/%016" VADDR_PRIx
"+%016" VADDR_PRIx
")\n",
733 midx
, addr
, mask
, len
);
734 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
739 * Check if we need to flush due to large pages.
740 * Because large_page_mask contains all 1's from the msb,
741 * we only need to test the end of the range.
743 if (((addr
+ len
- 1) & d
->large_page_mask
) == d
->large_page_addr
) {
744 tlb_debug("forcing full flush midx %d ("
745 "%016" VADDR_PRIx
"/%016" VADDR_PRIx
")\n",
746 midx
, d
->large_page_addr
, d
->large_page_mask
);
747 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
751 for (vaddr i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
752 vaddr page
= addr
+ i
;
753 CPUTLBEntry
*entry
= tlb_entry(env
, midx
, page
);
755 if (tlb_flush_entry_mask_locked(entry
, page
, mask
)) {
756 tlb_n_used_entries_dec(env
, midx
);
758 tlb_flush_vtlb_page_mask_locked(env
, midx
, page
, mask
);
769 static void tlb_flush_range_by_mmuidx_async_0(CPUState
*cpu
,
772 CPUArchState
*env
= cpu
->env_ptr
;
775 assert_cpu_is_self(cpu
);
777 tlb_debug("range: %016" VADDR_PRIx
"/%u+%016" VADDR_PRIx
" mmu_map:0x%x\n",
778 d
.addr
, d
.bits
, d
.len
, d
.idxmap
);
780 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
781 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
782 if ((d
.idxmap
>> mmu_idx
) & 1) {
783 tlb_flush_range_locked(env
, mmu_idx
, d
.addr
, d
.len
, d
.bits
);
786 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
789 * If the length is larger than the jump cache size, then it will take
790 * longer to clear each entry individually than it will to clear it all.
792 if (d
.len
>= (TARGET_PAGE_SIZE
* TB_JMP_CACHE_SIZE
)) {
793 tcg_flush_jmp_cache(cpu
);
798 * Discard jump cache entries for any tb which might potentially
799 * overlap the flushed pages, which includes the previous.
801 d
.addr
-= TARGET_PAGE_SIZE
;
802 for (vaddr i
= 0, n
= d
.len
/ TARGET_PAGE_SIZE
+ 1; i
< n
; i
++) {
803 tb_jmp_cache_clear_page(cpu
, d
.addr
);
804 d
.addr
+= TARGET_PAGE_SIZE
;
808 static void tlb_flush_range_by_mmuidx_async_1(CPUState
*cpu
,
809 run_on_cpu_data data
)
811 TLBFlushRangeData
*d
= data
.host_ptr
;
812 tlb_flush_range_by_mmuidx_async_0(cpu
, *d
);
816 void tlb_flush_range_by_mmuidx(CPUState
*cpu
, vaddr addr
,
817 vaddr len
, uint16_t idxmap
,
823 * If all bits are significant, and len is small,
824 * this devolves to tlb_flush_page.
826 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
827 tlb_flush_page_by_mmuidx(cpu
, addr
, idxmap
);
830 /* If no page bits are significant, this devolves to tlb_flush. */
831 if (bits
< TARGET_PAGE_BITS
) {
832 tlb_flush_by_mmuidx(cpu
, idxmap
);
836 /* This should already be page aligned */
837 d
.addr
= addr
& TARGET_PAGE_MASK
;
842 if (qemu_cpu_is_self(cpu
)) {
843 tlb_flush_range_by_mmuidx_async_0(cpu
, d
);
845 /* Otherwise allocate a structure, freed by the worker. */
846 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
847 async_run_on_cpu(cpu
, tlb_flush_range_by_mmuidx_async_1
,
848 RUN_ON_CPU_HOST_PTR(p
));
852 void tlb_flush_page_bits_by_mmuidx(CPUState
*cpu
, vaddr addr
,
853 uint16_t idxmap
, unsigned bits
)
855 tlb_flush_range_by_mmuidx(cpu
, addr
, TARGET_PAGE_SIZE
, idxmap
, bits
);
858 void tlb_flush_range_by_mmuidx_all_cpus(CPUState
*src_cpu
,
859 vaddr addr
, vaddr len
,
860 uint16_t idxmap
, unsigned bits
)
866 * If all bits are significant, and len is small,
867 * this devolves to tlb_flush_page.
869 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
870 tlb_flush_page_by_mmuidx_all_cpus(src_cpu
, addr
, idxmap
);
873 /* If no page bits are significant, this devolves to tlb_flush. */
874 if (bits
< TARGET_PAGE_BITS
) {
875 tlb_flush_by_mmuidx_all_cpus(src_cpu
, idxmap
);
879 /* This should already be page aligned */
880 d
.addr
= addr
& TARGET_PAGE_MASK
;
885 /* Allocate a separate data block for each destination cpu. */
886 CPU_FOREACH(dst_cpu
) {
887 if (dst_cpu
!= src_cpu
) {
888 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
889 async_run_on_cpu(dst_cpu
,
890 tlb_flush_range_by_mmuidx_async_1
,
891 RUN_ON_CPU_HOST_PTR(p
));
895 tlb_flush_range_by_mmuidx_async_0(src_cpu
, d
);
898 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState
*src_cpu
,
899 vaddr addr
, uint16_t idxmap
,
902 tlb_flush_range_by_mmuidx_all_cpus(src_cpu
, addr
, TARGET_PAGE_SIZE
,
906 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
912 TLBFlushRangeData d
, *p
;
916 * If all bits are significant, and len is small,
917 * this devolves to tlb_flush_page.
919 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
920 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu
, addr
, idxmap
);
923 /* If no page bits are significant, this devolves to tlb_flush. */
924 if (bits
< TARGET_PAGE_BITS
) {
925 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, idxmap
);
929 /* This should already be page aligned */
930 d
.addr
= addr
& TARGET_PAGE_MASK
;
935 /* Allocate a separate data block for each destination cpu. */
936 CPU_FOREACH(dst_cpu
) {
937 if (dst_cpu
!= src_cpu
) {
938 p
= g_memdup(&d
, sizeof(d
));
939 async_run_on_cpu(dst_cpu
, tlb_flush_range_by_mmuidx_async_1
,
940 RUN_ON_CPU_HOST_PTR(p
));
944 p
= g_memdup(&d
, sizeof(d
));
945 async_safe_run_on_cpu(src_cpu
, tlb_flush_range_by_mmuidx_async_1
,
946 RUN_ON_CPU_HOST_PTR(p
));
949 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
954 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu
, addr
, TARGET_PAGE_SIZE
,
958 /* update the TLBs so that writes to code in the virtual page 'addr'
960 void tlb_protect_code(ram_addr_t ram_addr
)
962 cpu_physical_memory_test_and_clear_dirty(ram_addr
& TARGET_PAGE_MASK
,
967 /* update the TLB so that writes in physical page 'phys_addr' are no longer
968 tested for self modifying code */
969 void tlb_unprotect_code(ram_addr_t ram_addr
)
971 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
976 * Dirty write flag handling
978 * When the TCG code writes to a location it looks up the address in
979 * the TLB and uses that data to compute the final address. If any of
980 * the lower bits of the address are set then the slow path is forced.
981 * There are a number of reasons to do this but for normal RAM the
982 * most usual is detecting writes to code regions which may invalidate
985 * Other vCPUs might be reading their TLBs during guest execution, so we update
986 * te->addr_write with qatomic_set. We don't need to worry about this for
987 * oversized guests as MTTCG is disabled for them.
989 * Called with tlb_c.lock held.
991 static void tlb_reset_dirty_range_locked(CPUTLBEntry
*tlb_entry
,
992 uintptr_t start
, uintptr_t length
)
994 uintptr_t addr
= tlb_entry
->addr_write
;
996 if ((addr
& (TLB_INVALID_MASK
| TLB_MMIO
|
997 TLB_DISCARD_WRITE
| TLB_NOTDIRTY
)) == 0) {
998 addr
&= TARGET_PAGE_MASK
;
999 addr
+= tlb_entry
->addend
;
1000 if ((addr
- start
) < length
) {
1001 #if TARGET_LONG_BITS == 32
1002 uint32_t *ptr_write
= (uint32_t *)&tlb_entry
->addr_write
;
1003 ptr_write
+= HOST_BIG_ENDIAN
;
1004 qatomic_set(ptr_write
, *ptr_write
| TLB_NOTDIRTY
);
1005 #elif TCG_OVERSIZED_GUEST
1006 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1008 qatomic_set(&tlb_entry
->addr_write
,
1009 tlb_entry
->addr_write
| TLB_NOTDIRTY
);
1016 * Called with tlb_c.lock held.
1017 * Called only from the vCPU context, i.e. the TLB's owner thread.
1019 static inline void copy_tlb_helper_locked(CPUTLBEntry
*d
, const CPUTLBEntry
*s
)
1024 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1026 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1027 * thing actually updated is the target TLB entry ->addr_write flags.
1029 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
1036 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1037 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1039 unsigned int n
= tlb_n_entries(&env_tlb(env
)->f
[mmu_idx
]);
1041 for (i
= 0; i
< n
; i
++) {
1042 tlb_reset_dirty_range_locked(&env_tlb(env
)->f
[mmu_idx
].table
[i
],
1046 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
1047 tlb_reset_dirty_range_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[i
],
1051 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1054 /* Called with tlb_c.lock held */
1055 static inline void tlb_set_dirty1_locked(CPUTLBEntry
*tlb_entry
,
1058 if (tlb_entry
->addr_write
== (addr
| TLB_NOTDIRTY
)) {
1059 tlb_entry
->addr_write
= addr
;
1063 /* update the TLB corresponding to virtual page vaddr
1064 so that it is no longer dirty */
1065 void tlb_set_dirty(CPUState
*cpu
, vaddr addr
)
1067 CPUArchState
*env
= cpu
->env_ptr
;
1070 assert_cpu_is_self(cpu
);
1072 addr
&= TARGET_PAGE_MASK
;
1073 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1074 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1075 tlb_set_dirty1_locked(tlb_entry(env
, mmu_idx
, addr
), addr
);
1078 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1080 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
1081 tlb_set_dirty1_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[k
], addr
);
1084 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1087 /* Our TLB does not support large pages, so remember the area covered by
1088 large pages and trigger a full TLB flush if these are invalidated. */
1089 static void tlb_add_large_page(CPUArchState
*env
, int mmu_idx
,
1090 vaddr addr
, uint64_t size
)
1092 vaddr lp_addr
= env_tlb(env
)->d
[mmu_idx
].large_page_addr
;
1093 vaddr lp_mask
= ~(size
- 1);
1095 if (lp_addr
== (vaddr
)-1) {
1096 /* No previous large page. */
1099 /* Extend the existing region to include the new page.
1100 This is a compromise between unnecessary flushes and
1101 the cost of maintaining a full variable size TLB. */
1102 lp_mask
&= env_tlb(env
)->d
[mmu_idx
].large_page_mask
;
1103 while (((lp_addr
^ addr
) & lp_mask
) != 0) {
1107 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= lp_addr
& lp_mask
;
1108 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= lp_mask
;
1111 static inline void tlb_set_compare(CPUTLBEntryFull
*full
, CPUTLBEntry
*ent
,
1112 vaddr address
, int flags
,
1113 MMUAccessType access_type
, bool enable
)
1116 address
|= flags
& TLB_FLAGS_MASK
;
1117 flags
&= TLB_SLOW_FLAGS_MASK
;
1119 address
|= TLB_FORCE_SLOW
;
1125 ent
->addr_idx
[access_type
] = address
;
1126 full
->slow_flags
[access_type
] = flags
;
1130 * Add a new TLB entry. At most one entry for a given virtual address
1131 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1132 * supplied size is only used by tlb_flush_page.
1134 * Called from TCG-generated code, which is under an RCU read-side
1137 void tlb_set_page_full(CPUState
*cpu
, int mmu_idx
,
1138 vaddr addr
, CPUTLBEntryFull
*full
)
1140 CPUArchState
*env
= cpu
->env_ptr
;
1141 CPUTLB
*tlb
= env_tlb(env
);
1142 CPUTLBDesc
*desc
= &tlb
->d
[mmu_idx
];
1143 MemoryRegionSection
*section
;
1144 unsigned int index
, read_flags
, write_flags
;
1146 CPUTLBEntry
*te
, tn
;
1147 hwaddr iotlb
, xlat
, sz
, paddr_page
;
1149 int asidx
, wp_flags
, prot
;
1150 bool is_ram
, is_romd
;
1152 assert_cpu_is_self(cpu
);
1154 if (full
->lg_page_size
<= TARGET_PAGE_BITS
) {
1155 sz
= TARGET_PAGE_SIZE
;
1157 sz
= (hwaddr
)1 << full
->lg_page_size
;
1158 tlb_add_large_page(env
, mmu_idx
, addr
, sz
);
1160 addr_page
= addr
& TARGET_PAGE_MASK
;
1161 paddr_page
= full
->phys_addr
& TARGET_PAGE_MASK
;
1164 asidx
= cpu_asidx_from_attrs(cpu
, full
->attrs
);
1165 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr_page
,
1166 &xlat
, &sz
, full
->attrs
, &prot
);
1167 assert(sz
>= TARGET_PAGE_SIZE
);
1169 tlb_debug("vaddr=%016" VADDR_PRIx
" paddr=0x" HWADDR_FMT_plx
1170 " prot=%x idx=%d\n",
1171 addr
, full
->phys_addr
, prot
, mmu_idx
);
1174 if (full
->lg_page_size
< TARGET_PAGE_BITS
) {
1175 /* Repeat the MMU check and TLB fill on every access. */
1176 read_flags
|= TLB_INVALID_MASK
;
1178 if (full
->attrs
.byte_swap
) {
1179 read_flags
|= TLB_BSWAP
;
1182 is_ram
= memory_region_is_ram(section
->mr
);
1183 is_romd
= memory_region_is_romd(section
->mr
);
1185 if (is_ram
|| is_romd
) {
1186 /* RAM and ROMD both have associated host memory. */
1187 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
1189 /* I/O does not; force the host address to NULL. */
1193 write_flags
= read_flags
;
1195 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1196 assert(!(iotlb
& ~TARGET_PAGE_MASK
));
1198 * Computing is_clean is expensive; avoid all that unless
1199 * the page is actually writable.
1201 if (prot
& PAGE_WRITE
) {
1202 if (section
->readonly
) {
1203 write_flags
|= TLB_DISCARD_WRITE
;
1204 } else if (cpu_physical_memory_is_clean(iotlb
)) {
1205 write_flags
|= TLB_NOTDIRTY
;
1210 iotlb
= memory_region_section_get_iotlb(cpu
, section
) + xlat
;
1212 * Writes to romd devices must go through MMIO to enable write.
1213 * Reads to romd devices go through the ram_ptr found above,
1214 * but of course reads to I/O must go through MMIO.
1216 write_flags
|= TLB_MMIO
;
1218 read_flags
= write_flags
;
1222 wp_flags
= cpu_watchpoint_address_matches(cpu
, addr_page
,
1225 index
= tlb_index(env
, mmu_idx
, addr_page
);
1226 te
= tlb_entry(env
, mmu_idx
, addr_page
);
1229 * Hold the TLB lock for the rest of the function. We could acquire/release
1230 * the lock several times in the function, but it is faster to amortize the
1231 * acquisition cost by acquiring it just once. Note that this leads to
1232 * a longer critical section, but this is not a concern since the TLB lock
1233 * is unlikely to be contended.
1235 qemu_spin_lock(&tlb
->c
.lock
);
1237 /* Note that the tlb is no longer clean. */
1238 tlb
->c
.dirty
|= 1 << mmu_idx
;
1240 /* Make sure there's no cached translation for the new page. */
1241 tlb_flush_vtlb_page_locked(env
, mmu_idx
, addr_page
);
1244 * Only evict the old entry to the victim tlb if it's for a
1245 * different page; otherwise just overwrite the stale data.
1247 if (!tlb_hit_page_anyprot(te
, addr_page
) && !tlb_entry_is_empty(te
)) {
1248 unsigned vidx
= desc
->vindex
++ % CPU_VTLB_SIZE
;
1249 CPUTLBEntry
*tv
= &desc
->vtable
[vidx
];
1251 /* Evict the old entry into the victim tlb. */
1252 copy_tlb_helper_locked(tv
, te
);
1253 desc
->vfulltlb
[vidx
] = desc
->fulltlb
[index
];
1254 tlb_n_used_entries_dec(env
, mmu_idx
);
1257 /* refill the tlb */
1259 * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1260 * aligned ram_addr_t of the page base of the target RAM.
1261 * Otherwise, iotlb contains
1262 * - a physical section number in the lower TARGET_PAGE_BITS
1263 * - the offset within section->mr of the page base (I/O, ROMD) with the
1264 * TARGET_PAGE_BITS masked off.
1265 * We subtract addr_page (which is page aligned and thus won't
1266 * disturb the low bits) to give an offset which can be added to the
1267 * (non-page-aligned) vaddr of the eventual memory access to get
1268 * the MemoryRegion offset for the access. Note that the vaddr we
1269 * subtract here is that of the page base, and not the same as the
1270 * vaddr we add back in io_prepare()/get_page_addr_code().
1272 desc
->fulltlb
[index
] = *full
;
1273 full
= &desc
->fulltlb
[index
];
1274 full
->xlat_section
= iotlb
- addr_page
;
1275 full
->phys_addr
= paddr_page
;
1277 /* Now calculate the new entry */
1278 tn
.addend
= addend
- addr_page
;
1280 tlb_set_compare(full
, &tn
, addr_page
, read_flags
,
1281 MMU_INST_FETCH
, prot
& PAGE_EXEC
);
1283 if (wp_flags
& BP_MEM_READ
) {
1284 read_flags
|= TLB_WATCHPOINT
;
1286 tlb_set_compare(full
, &tn
, addr_page
, read_flags
,
1287 MMU_DATA_LOAD
, prot
& PAGE_READ
);
1289 if (prot
& PAGE_WRITE_INV
) {
1290 write_flags
|= TLB_INVALID_MASK
;
1292 if (wp_flags
& BP_MEM_WRITE
) {
1293 write_flags
|= TLB_WATCHPOINT
;
1295 tlb_set_compare(full
, &tn
, addr_page
, write_flags
,
1296 MMU_DATA_STORE
, prot
& PAGE_WRITE
);
1298 copy_tlb_helper_locked(te
, &tn
);
1299 tlb_n_used_entries_inc(env
, mmu_idx
);
1300 qemu_spin_unlock(&tlb
->c
.lock
);
1303 void tlb_set_page_with_attrs(CPUState
*cpu
, vaddr addr
,
1304 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
1305 int mmu_idx
, uint64_t size
)
1307 CPUTLBEntryFull full
= {
1311 .lg_page_size
= ctz64(size
)
1314 assert(is_power_of_2(size
));
1315 tlb_set_page_full(cpu
, mmu_idx
, addr
, &full
);
1318 void tlb_set_page(CPUState
*cpu
, vaddr addr
,
1319 hwaddr paddr
, int prot
,
1320 int mmu_idx
, uint64_t size
)
1322 tlb_set_page_with_attrs(cpu
, addr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
1323 prot
, mmu_idx
, size
);
1327 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1328 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1329 * be discarded and looked up again (e.g. via tlb_entry()).
1331 static void tlb_fill(CPUState
*cpu
, vaddr addr
, int size
,
1332 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1337 * This is not a probe, so only valid return is success; failure
1338 * should result in exception + longjmp to the cpu loop.
1340 ok
= cpu
->cc
->tcg_ops
->tlb_fill(cpu
, addr
, size
,
1341 access_type
, mmu_idx
, false, retaddr
);
1345 static inline void cpu_unaligned_access(CPUState
*cpu
, vaddr addr
,
1346 MMUAccessType access_type
,
1347 int mmu_idx
, uintptr_t retaddr
)
1349 cpu
->cc
->tcg_ops
->do_unaligned_access(cpu
, addr
, access_type
,
1353 static MemoryRegionSection
*
1354 io_prepare(hwaddr
*out_offset
, CPUArchState
*env
, hwaddr xlat
,
1355 MemTxAttrs attrs
, vaddr addr
, uintptr_t retaddr
)
1357 CPUState
*cpu
= env_cpu(env
);
1358 MemoryRegionSection
*section
;
1361 section
= iotlb_to_section(cpu
, xlat
, attrs
);
1362 mr_offset
= (xlat
& TARGET_PAGE_MASK
) + addr
;
1363 cpu
->mem_io_pc
= retaddr
;
1364 if (!cpu
->can_do_io
) {
1365 cpu_io_recompile(cpu
, retaddr
);
1368 *out_offset
= mr_offset
;
1372 static void io_failed(CPUArchState
*env
, CPUTLBEntryFull
*full
, vaddr addr
,
1373 unsigned size
, MMUAccessType access_type
, int mmu_idx
,
1374 MemTxResult response
, uintptr_t retaddr
)
1376 CPUState
*cpu
= env_cpu(env
);
1378 if (!cpu
->ignore_memory_transaction_failures
) {
1379 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1381 if (cc
->tcg_ops
->do_transaction_failed
) {
1382 hwaddr physaddr
= full
->phys_addr
| (addr
& ~TARGET_PAGE_MASK
);
1384 cc
->tcg_ops
->do_transaction_failed(cpu
, physaddr
, addr
, size
,
1385 access_type
, mmu_idx
,
1386 full
->attrs
, response
, retaddr
);
1391 static void io_writex(CPUArchState
*env
, CPUTLBEntryFull
*full
,
1392 int mmu_idx
, uint64_t val
, vaddr addr
,
1393 uintptr_t retaddr
, MemOp op
)
1395 MemoryRegionSection
*section
;
1400 section
= io_prepare(&mr_offset
, env
, full
->xlat_section
,
1401 full
->attrs
, addr
, retaddr
);
1405 QEMU_IOTHREAD_LOCK_GUARD();
1406 r
= memory_region_dispatch_write(mr
, mr_offset
, val
, op
, full
->attrs
);
1409 if (r
!= MEMTX_OK
) {
1410 io_failed(env
, full
, addr
, memop_size(op
), MMU_DATA_STORE
, mmu_idx
,
1415 /* Return true if ADDR is present in the victim tlb, and has been copied
1416 back to the main tlb. */
1417 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
1418 MMUAccessType access_type
, vaddr page
)
1422 assert_cpu_is_self(env_cpu(env
));
1423 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
1424 CPUTLBEntry
*vtlb
= &env_tlb(env
)->d
[mmu_idx
].vtable
[vidx
];
1425 uint64_t cmp
= tlb_read_idx(vtlb
, access_type
);
1428 /* Found entry in victim tlb, swap tlb and iotlb. */
1429 CPUTLBEntry tmptlb
, *tlb
= &env_tlb(env
)->f
[mmu_idx
].table
[index
];
1431 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1432 copy_tlb_helper_locked(&tmptlb
, tlb
);
1433 copy_tlb_helper_locked(tlb
, vtlb
);
1434 copy_tlb_helper_locked(vtlb
, &tmptlb
);
1435 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1437 CPUTLBEntryFull
*f1
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1438 CPUTLBEntryFull
*f2
= &env_tlb(env
)->d
[mmu_idx
].vfulltlb
[vidx
];
1439 CPUTLBEntryFull tmpf
;
1440 tmpf
= *f1
; *f1
= *f2
; *f2
= tmpf
;
1447 static void notdirty_write(CPUState
*cpu
, vaddr mem_vaddr
, unsigned size
,
1448 CPUTLBEntryFull
*full
, uintptr_t retaddr
)
1450 ram_addr_t ram_addr
= mem_vaddr
+ full
->xlat_section
;
1452 trace_memory_notdirty_write_access(mem_vaddr
, ram_addr
, size
);
1454 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1455 tb_invalidate_phys_range_fast(ram_addr
, size
, retaddr
);
1459 * Set both VGA and migration bits for simplicity and to remove
1460 * the notdirty callback faster.
1462 cpu_physical_memory_set_dirty_range(ram_addr
, size
, DIRTY_CLIENTS_NOCODE
);
1464 /* We remove the notdirty callback only if the code has been flushed. */
1465 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1466 trace_memory_notdirty_set_dirty(mem_vaddr
);
1467 tlb_set_dirty(cpu
, mem_vaddr
);
1471 static int probe_access_internal(CPUArchState
*env
, vaddr addr
,
1472 int fault_size
, MMUAccessType access_type
,
1473 int mmu_idx
, bool nonfault
,
1474 void **phost
, CPUTLBEntryFull
**pfull
,
1475 uintptr_t retaddr
, bool check_mem_cbs
)
1477 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1478 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1479 uint64_t tlb_addr
= tlb_read_idx(entry
, access_type
);
1480 vaddr page_addr
= addr
& TARGET_PAGE_MASK
;
1481 int flags
= TLB_FLAGS_MASK
& ~TLB_FORCE_SLOW
;
1482 bool force_mmio
= check_mem_cbs
&& cpu_plugin_mem_cbs_enabled(env_cpu(env
));
1483 CPUTLBEntryFull
*full
;
1485 if (!tlb_hit_page(tlb_addr
, page_addr
)) {
1486 if (!victim_tlb_hit(env
, mmu_idx
, index
, access_type
, page_addr
)) {
1487 CPUState
*cs
= env_cpu(env
);
1489 if (!cs
->cc
->tcg_ops
->tlb_fill(cs
, addr
, fault_size
, access_type
,
1490 mmu_idx
, nonfault
, retaddr
)) {
1491 /* Non-faulting page table read failed. */
1494 return TLB_INVALID_MASK
;
1497 /* TLB resize via tlb_fill may have moved the entry. */
1498 index
= tlb_index(env
, mmu_idx
, addr
);
1499 entry
= tlb_entry(env
, mmu_idx
, addr
);
1502 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1503 * to force the next access through tlb_fill. We've just
1504 * called tlb_fill, so we know that this entry *is* valid.
1506 flags
&= ~TLB_INVALID_MASK
;
1508 tlb_addr
= tlb_read_idx(entry
, access_type
);
1512 *pfull
= full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1513 flags
|= full
->slow_flags
[access_type
];
1515 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1516 if (unlikely(flags
& ~(TLB_WATCHPOINT
| TLB_NOTDIRTY
))
1518 (access_type
!= MMU_INST_FETCH
&& force_mmio
)) {
1523 /* Everything else is RAM. */
1524 *phost
= (void *)((uintptr_t)addr
+ entry
->addend
);
1528 int probe_access_full(CPUArchState
*env
, vaddr addr
, int size
,
1529 MMUAccessType access_type
, int mmu_idx
,
1530 bool nonfault
, void **phost
, CPUTLBEntryFull
**pfull
,
1533 int flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1534 nonfault
, phost
, pfull
, retaddr
, true);
1536 /* Handle clean RAM pages. */
1537 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1538 notdirty_write(env_cpu(env
), addr
, 1, *pfull
, retaddr
);
1539 flags
&= ~TLB_NOTDIRTY
;
1545 int probe_access_full_mmu(CPUArchState
*env
, vaddr addr
, int size
,
1546 MMUAccessType access_type
, int mmu_idx
,
1547 void **phost
, CPUTLBEntryFull
**pfull
)
1549 void *discard_phost
;
1550 CPUTLBEntryFull
*discard_tlb
;
1552 /* privately handle users that don't need full results */
1553 phost
= phost
? phost
: &discard_phost
;
1554 pfull
= pfull
? pfull
: &discard_tlb
;
1556 int flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1557 true, phost
, pfull
, 0, false);
1559 /* Handle clean RAM pages. */
1560 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1561 notdirty_write(env_cpu(env
), addr
, 1, *pfull
, 0);
1562 flags
&= ~TLB_NOTDIRTY
;
1568 int probe_access_flags(CPUArchState
*env
, vaddr addr
, int size
,
1569 MMUAccessType access_type
, int mmu_idx
,
1570 bool nonfault
, void **phost
, uintptr_t retaddr
)
1572 CPUTLBEntryFull
*full
;
1575 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1577 flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1578 nonfault
, phost
, &full
, retaddr
, true);
1580 /* Handle clean RAM pages. */
1581 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1582 notdirty_write(env_cpu(env
), addr
, 1, full
, retaddr
);
1583 flags
&= ~TLB_NOTDIRTY
;
1589 void *probe_access(CPUArchState
*env
, vaddr addr
, int size
,
1590 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1592 CPUTLBEntryFull
*full
;
1596 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1598 flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1599 false, &host
, &full
, retaddr
, true);
1601 /* Per the interface, size == 0 merely faults the access. */
1606 if (unlikely(flags
& (TLB_NOTDIRTY
| TLB_WATCHPOINT
))) {
1607 /* Handle watchpoints. */
1608 if (flags
& TLB_WATCHPOINT
) {
1609 int wp_access
= (access_type
== MMU_DATA_STORE
1610 ? BP_MEM_WRITE
: BP_MEM_READ
);
1611 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1612 full
->attrs
, wp_access
, retaddr
);
1615 /* Handle clean RAM pages. */
1616 if (flags
& TLB_NOTDIRTY
) {
1617 notdirty_write(env_cpu(env
), addr
, 1, full
, retaddr
);
1624 void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
1625 MMUAccessType access_type
, int mmu_idx
)
1627 CPUTLBEntryFull
*full
;
1631 flags
= probe_access_internal(env
, addr
, 0, access_type
,
1632 mmu_idx
, true, &host
, &full
, 0, false);
1634 /* No combination of flags are expected by the caller. */
1635 return flags
? NULL
: host
;
1639 * Return a ram_addr_t for the virtual address for execution.
1641 * Return -1 if we can't translate and execute from an entire page
1642 * of RAM. This will force us to execute by loading and translating
1643 * one insn at a time, without caching.
1645 * NOTE: This function will trigger an exception if the page is
1648 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, vaddr addr
,
1651 CPUTLBEntryFull
*full
;
1654 (void)probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
,
1655 cpu_mmu_index(env
, true), false,
1656 &p
, &full
, 0, false);
1661 if (full
->lg_page_size
< TARGET_PAGE_BITS
) {
1668 return qemu_ram_addr_from_host_nofail(p
);
1671 /* Load/store with atomicity primitives. */
1672 #include "ldst_atomicity.c.inc"
1674 #ifdef CONFIG_PLUGIN
1676 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1677 * This should be a hot path as we will have just looked this path up
1678 * in the softmmu lookup code (or helper). We don't handle re-fills or
1679 * checking the victim table. This is purely informational.
1681 * The one corner case is i/o write, which can cause changes to the
1682 * address space. Those changes, and the corresponding tlb flush,
1683 * should be delayed until the next TB, so even then this ought not fail.
1684 * But check, Just in Case.
1686 bool tlb_plugin_lookup(CPUState
*cpu
, vaddr addr
, int mmu_idx
,
1687 bool is_store
, struct qemu_plugin_hwaddr
*data
)
1689 CPUArchState
*env
= cpu
->env_ptr
;
1690 CPUTLBEntry
*tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1691 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1692 MMUAccessType access_type
= is_store
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
1693 uint64_t tlb_addr
= tlb_read_idx(tlbe
, access_type
);
1694 CPUTLBEntryFull
*full
;
1696 if (unlikely(!tlb_hit(tlb_addr
, addr
))) {
1700 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1701 data
->phys_addr
= full
->phys_addr
| (addr
& ~TARGET_PAGE_MASK
);
1703 /* We must have an iotlb entry for MMIO */
1704 if (tlb_addr
& TLB_MMIO
) {
1705 MemoryRegionSection
*section
=
1706 iotlb_to_section(cpu
, full
->xlat_section
& ~TARGET_PAGE_MASK
,
1709 data
->mr
= section
->mr
;
1711 data
->is_io
= false;
1719 * Probe for a load/store operation.
1720 * Return the host address and into @flags.
1723 typedef struct MMULookupPageData
{
1724 CPUTLBEntryFull
*full
;
1729 } MMULookupPageData
;
1731 typedef struct MMULookupLocals
{
1732 MMULookupPageData page
[2];
1738 * mmu_lookup1: translate one page
1740 * @data: lookup parameters
1741 * @mmu_idx: virtual address context
1742 * @access_type: load/store/code
1743 * @ra: return address into tcg generated code, or 0
1745 * Resolve the translation for the one page at @data.addr, filling in
1746 * the rest of @data with the results. If the translation fails,
1747 * tlb_fill will longjmp out. Return true if the softmmu tlb for
1748 * @mmu_idx may have resized.
1750 static bool mmu_lookup1(CPUArchState
*env
, MMULookupPageData
*data
,
1751 int mmu_idx
, MMUAccessType access_type
, uintptr_t ra
)
1753 vaddr addr
= data
->addr
;
1754 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1755 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1756 uint64_t tlb_addr
= tlb_read_idx(entry
, access_type
);
1757 bool maybe_resized
= false;
1758 CPUTLBEntryFull
*full
;
1761 /* If the TLB entry is for a different page, reload and try again. */
1762 if (!tlb_hit(tlb_addr
, addr
)) {
1763 if (!victim_tlb_hit(env
, mmu_idx
, index
, access_type
,
1764 addr
& TARGET_PAGE_MASK
)) {
1765 tlb_fill(env_cpu(env
), addr
, data
->size
, access_type
, mmu_idx
, ra
);
1766 maybe_resized
= true;
1767 index
= tlb_index(env
, mmu_idx
, addr
);
1768 entry
= tlb_entry(env
, mmu_idx
, addr
);
1770 tlb_addr
= tlb_read_idx(entry
, access_type
) & ~TLB_INVALID_MASK
;
1773 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1774 flags
= tlb_addr
& (TLB_FLAGS_MASK
& ~TLB_FORCE_SLOW
);
1775 flags
|= full
->slow_flags
[access_type
];
1778 data
->flags
= flags
;
1779 /* Compute haddr speculatively; depending on flags it might be invalid. */
1780 data
->haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1782 return maybe_resized
;
1786 * mmu_watch_or_dirty
1788 * @data: lookup parameters
1789 * @access_type: load/store/code
1790 * @ra: return address into tcg generated code, or 0
1792 * Trigger watchpoints for @data.addr:@data.size;
1793 * record writes to protected clean pages.
1795 static void mmu_watch_or_dirty(CPUArchState
*env
, MMULookupPageData
*data
,
1796 MMUAccessType access_type
, uintptr_t ra
)
1798 CPUTLBEntryFull
*full
= data
->full
;
1799 vaddr addr
= data
->addr
;
1800 int flags
= data
->flags
;
1801 int size
= data
->size
;
1803 /* On watchpoint hit, this will longjmp out. */
1804 if (flags
& TLB_WATCHPOINT
) {
1805 int wp
= access_type
== MMU_DATA_STORE
? BP_MEM_WRITE
: BP_MEM_READ
;
1806 cpu_check_watchpoint(env_cpu(env
), addr
, size
, full
->attrs
, wp
, ra
);
1807 flags
&= ~TLB_WATCHPOINT
;
1810 /* Note that notdirty is only set for writes. */
1811 if (flags
& TLB_NOTDIRTY
) {
1812 notdirty_write(env_cpu(env
), addr
, size
, full
, ra
);
1813 flags
&= ~TLB_NOTDIRTY
;
1815 data
->flags
= flags
;
1819 * mmu_lookup: translate page(s)
1821 * @addr: virtual address
1822 * @oi: combined mmu_idx and MemOp
1823 * @ra: return address into tcg generated code, or 0
1824 * @access_type: load/store/code
1827 * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1828 * bytes. Return true if the lookup crosses a page boundary.
1830 static bool mmu_lookup(CPUArchState
*env
, vaddr addr
, MemOpIdx oi
,
1831 uintptr_t ra
, MMUAccessType type
, MMULookupLocals
*l
)
1837 l
->memop
= get_memop(oi
);
1838 l
->mmu_idx
= get_mmuidx(oi
);
1840 tcg_debug_assert(l
->mmu_idx
< NB_MMU_MODES
);
1842 /* Handle CPU specific unaligned behaviour */
1843 a_bits
= get_alignment_bits(l
->memop
);
1844 if (addr
& ((1 << a_bits
) - 1)) {
1845 cpu_unaligned_access(env_cpu(env
), addr
, type
, l
->mmu_idx
, ra
);
1848 l
->page
[0].addr
= addr
;
1849 l
->page
[0].size
= memop_size(l
->memop
);
1850 l
->page
[1].addr
= (addr
+ l
->page
[0].size
- 1) & TARGET_PAGE_MASK
;
1851 l
->page
[1].size
= 0;
1852 crosspage
= (addr
^ l
->page
[1].addr
) & TARGET_PAGE_MASK
;
1854 if (likely(!crosspage
)) {
1855 mmu_lookup1(env
, &l
->page
[0], l
->mmu_idx
, type
, ra
);
1857 flags
= l
->page
[0].flags
;
1858 if (unlikely(flags
& (TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1859 mmu_watch_or_dirty(env
, &l
->page
[0], type
, ra
);
1861 if (unlikely(flags
& TLB_BSWAP
)) {
1862 l
->memop
^= MO_BSWAP
;
1865 /* Finish compute of page crossing. */
1866 int size0
= l
->page
[1].addr
- addr
;
1867 l
->page
[1].size
= l
->page
[0].size
- size0
;
1868 l
->page
[0].size
= size0
;
1871 * Lookup both pages, recognizing exceptions from either. If the
1872 * second lookup potentially resized, refresh first CPUTLBEntryFull.
1874 mmu_lookup1(env
, &l
->page
[0], l
->mmu_idx
, type
, ra
);
1875 if (mmu_lookup1(env
, &l
->page
[1], l
->mmu_idx
, type
, ra
)) {
1876 uintptr_t index
= tlb_index(env
, l
->mmu_idx
, addr
);
1877 l
->page
[0].full
= &env_tlb(env
)->d
[l
->mmu_idx
].fulltlb
[index
];
1880 flags
= l
->page
[0].flags
| l
->page
[1].flags
;
1881 if (unlikely(flags
& (TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1882 mmu_watch_or_dirty(env
, &l
->page
[0], type
, ra
);
1883 mmu_watch_or_dirty(env
, &l
->page
[1], type
, ra
);
1887 * Since target/sparc is the only user of TLB_BSWAP, and all
1888 * Sparc accesses are aligned, any treatment across two pages
1889 * would be arbitrary. Refuse it until there's a use.
1891 tcg_debug_assert((flags
& TLB_BSWAP
) == 0);
1898 * Probe for an atomic operation. Do not allow unaligned operations,
1899 * or io operations to proceed. Return the host address.
1901 static void *atomic_mmu_lookup(CPUArchState
*env
, vaddr addr
, MemOpIdx oi
,
1902 int size
, uintptr_t retaddr
)
1904 uintptr_t mmu_idx
= get_mmuidx(oi
);
1905 MemOp mop
= get_memop(oi
);
1906 int a_bits
= get_alignment_bits(mop
);
1911 CPUTLBEntryFull
*full
;
1913 tcg_debug_assert(mmu_idx
< NB_MMU_MODES
);
1915 /* Adjust the given return address. */
1916 retaddr
-= GETPC_ADJ
;
1918 /* Enforce guest required alignment. */
1919 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
1920 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1921 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1925 /* Enforce qemu required alignment. */
1926 if (unlikely(addr
& (size
- 1))) {
1927 /* We get here if guest alignment was not requested,
1928 or was not enforced by cpu_unaligned_access above.
1929 We might widen the access and emulate, but for now
1930 mark an exception and exit the cpu loop. */
1931 goto stop_the_world
;
1934 index
= tlb_index(env
, mmu_idx
, addr
);
1935 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1937 /* Check TLB entry and enforce page permissions. */
1938 tlb_addr
= tlb_addr_write(tlbe
);
1939 if (!tlb_hit(tlb_addr
, addr
)) {
1940 if (!victim_tlb_hit(env
, mmu_idx
, index
, MMU_DATA_STORE
,
1941 addr
& TARGET_PAGE_MASK
)) {
1942 tlb_fill(env_cpu(env
), addr
, size
,
1943 MMU_DATA_STORE
, mmu_idx
, retaddr
);
1944 index
= tlb_index(env
, mmu_idx
, addr
);
1945 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1947 tlb_addr
= tlb_addr_write(tlbe
) & ~TLB_INVALID_MASK
;
1951 * Let the guest notice RMW on a write-only page.
1952 * We have just verified that the page is writable.
1953 * Subpage lookups may have left TLB_INVALID_MASK set,
1954 * but addr_read will only be -1 if PAGE_READ was unset.
1956 if (unlikely(tlbe
->addr_read
== -1)) {
1957 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_LOAD
, mmu_idx
, retaddr
);
1959 * Since we don't support reads and writes to different
1960 * addresses, and we do have the proper page loaded for
1961 * write, this shouldn't ever return. But just in case,
1962 * handle via stop-the-world.
1964 goto stop_the_world
;
1966 /* Collect tlb flags for read. */
1967 tlb_addr
|= tlbe
->addr_read
;
1969 /* Notice an IO access or a needs-MMU-lookup access */
1970 if (unlikely(tlb_addr
& (TLB_MMIO
| TLB_DISCARD_WRITE
))) {
1971 /* There's really nothing that can be done to
1972 support this apart from stop-the-world. */
1973 goto stop_the_world
;
1976 hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1977 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1979 if (unlikely(tlb_addr
& TLB_NOTDIRTY
)) {
1980 notdirty_write(env_cpu(env
), addr
, size
, full
, retaddr
);
1983 if (unlikely(tlb_addr
& TLB_FORCE_SLOW
)) {
1986 if (full
->slow_flags
[MMU_DATA_STORE
] & TLB_WATCHPOINT
) {
1987 wp_flags
|= BP_MEM_WRITE
;
1989 if (full
->slow_flags
[MMU_DATA_LOAD
] & TLB_WATCHPOINT
) {
1990 wp_flags
|= BP_MEM_READ
;
1993 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1994 full
->attrs
, wp_flags
, retaddr
);
2001 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
2007 * We support two different access types. SOFTMMU_CODE_ACCESS is
2008 * specifically for reading instructions from system memory. It is
2009 * called by the translation loop and in some helpers where the code
2010 * is disassembled. It shouldn't be called directly by guest code.
2012 * For the benefit of TCG generated code, we want to avoid the
2013 * complication of ABI-specific return type promotion and always
2014 * return a value extended to the register size of the host. This is
2015 * tcg_target_long, except in the case of a 32-bit host and 64-bit
2016 * data, and for that we always have uint64_t.
2018 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2024 * @full: page parameters
2025 * @ret_be: accumulated data
2026 * @addr: virtual address
2027 * @size: number of bytes
2028 * @mmu_idx: virtual address context
2029 * @ra: return address into tcg generated code, or 0
2030 * Context: iothread lock held
2032 * Load @size bytes from @addr, which is memory-mapped i/o.
2033 * The bytes are concatenated in big-endian order with @ret_be.
2035 static uint64_t do_ld_mmio_beN(CPUArchState
*env
, CPUTLBEntryFull
*full
,
2036 uint64_t ret_be
, vaddr addr
, int size
,
2037 int mmu_idx
, MMUAccessType type
, uintptr_t ra
)
2039 MemoryRegionSection
*section
;
2044 tcg_debug_assert(size
> 0 && size
<= 8);
2046 attrs
= full
->attrs
;
2047 section
= io_prepare(&mr_offset
, env
, full
->xlat_section
, attrs
, addr
, ra
);
2056 /* Read aligned pieces up to 8 bytes. */
2057 this_mop
= ctz32(size
| (int)addr
| 8);
2058 this_size
= 1 << this_mop
;
2061 r
= memory_region_dispatch_read(mr
, mr_offset
, &val
, this_mop
, attrs
);
2062 if (unlikely(r
!= MEMTX_OK
)) {
2063 io_failed(env
, full
, addr
, this_size
, type
, mmu_idx
, r
, ra
);
2065 if (this_size
== 8) {
2069 ret_be
= (ret_be
<< (this_size
* 8)) | val
;
2071 mr_offset
+= this_size
;
2080 * @p: translation parameters
2081 * @ret_be: accumulated data
2083 * Load @p->size bytes from @p->haddr, which is RAM.
2084 * The bytes to concatenated in big-endian order with @ret_be.
2086 static uint64_t do_ld_bytes_beN(MMULookupPageData
*p
, uint64_t ret_be
)
2088 uint8_t *haddr
= p
->haddr
;
2089 int i
, size
= p
->size
;
2091 for (i
= 0; i
< size
; i
++) {
2092 ret_be
= (ret_be
<< 8) | haddr
[i
];
2099 * @p: translation parameters
2100 * @ret_be: accumulated data
2102 * As do_ld_bytes_beN, but atomically on each aligned part.
2104 static uint64_t do_ld_parts_beN(MMULookupPageData
*p
, uint64_t ret_be
)
2106 void *haddr
= p
->haddr
;
2114 * Find minimum of alignment and size.
2115 * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2116 * would have only checked the low bits of addr|size once at the start,
2117 * but is just as easy.
2119 switch (((uintptr_t)haddr
| size
) & 7) {
2121 x
= cpu_to_be32(load_atomic4(haddr
));
2122 ret_be
= (ret_be
<< 32) | x
;
2127 x
= cpu_to_be16(load_atomic2(haddr
));
2128 ret_be
= (ret_be
<< 16) | x
;
2132 x
= *(uint8_t *)haddr
;
2133 ret_be
= (ret_be
<< 8) | x
;
2137 g_assert_not_reached();
2141 } while (size
!= 0);
2147 * @p: translation parameters
2148 * @ret_be: accumulated data
2150 * As do_ld_bytes_beN, but with one atomic load.
2151 * Four aligned bytes are guaranteed to cover the load.
2153 static uint64_t do_ld_whole_be4(MMULookupPageData
*p
, uint64_t ret_be
)
2155 int o
= p
->addr
& 3;
2156 uint32_t x
= load_atomic4(p
->haddr
- o
);
2160 x
>>= (4 - p
->size
) * 8;
2161 return (ret_be
<< (p
->size
* 8)) | x
;
2166 * @p: translation parameters
2167 * @ret_be: accumulated data
2169 * As do_ld_bytes_beN, but with one atomic load.
2170 * Eight aligned bytes are guaranteed to cover the load.
2172 static uint64_t do_ld_whole_be8(CPUArchState
*env
, uintptr_t ra
,
2173 MMULookupPageData
*p
, uint64_t ret_be
)
2175 int o
= p
->addr
& 7;
2176 uint64_t x
= load_atomic8_or_exit(env
, ra
, p
->haddr
- o
);
2180 x
>>= (8 - p
->size
) * 8;
2181 return (ret_be
<< (p
->size
* 8)) | x
;
2186 * @p: translation parameters
2187 * @ret_be: accumulated data
2189 * As do_ld_bytes_beN, but with one atomic load.
2190 * 16 aligned bytes are guaranteed to cover the load.
2192 static Int128
do_ld_whole_be16(CPUArchState
*env
, uintptr_t ra
,
2193 MMULookupPageData
*p
, uint64_t ret_be
)
2195 int o
= p
->addr
& 15;
2196 Int128 x
, y
= load_atomic16_or_exit(env
, ra
, p
->haddr
- o
);
2199 if (!HOST_BIG_ENDIAN
) {
2202 y
= int128_lshift(y
, o
* 8);
2203 y
= int128_urshift(y
, (16 - size
) * 8);
2204 x
= int128_make64(ret_be
);
2205 x
= int128_lshift(x
, size
* 8);
2206 return int128_or(x
, y
);
2210 * Wrapper for the above.
2212 static uint64_t do_ld_beN(CPUArchState
*env
, MMULookupPageData
*p
,
2213 uint64_t ret_be
, int mmu_idx
, MMUAccessType type
,
2214 MemOp mop
, uintptr_t ra
)
2217 unsigned tmp
, half_size
;
2219 if (unlikely(p
->flags
& TLB_MMIO
)) {
2220 QEMU_IOTHREAD_LOCK_GUARD();
2221 return do_ld_mmio_beN(env
, p
->full
, ret_be
, p
->addr
, p
->size
,
2226 * It is a given that we cross a page and therefore there is no
2227 * atomicity for the load as a whole, but subobjects may need attention.
2229 atom
= mop
& MO_ATOM_MASK
;
2231 case MO_ATOM_SUBALIGN
:
2232 return do_ld_parts_beN(p
, ret_be
);
2234 case MO_ATOM_IFALIGN_PAIR
:
2235 case MO_ATOM_WITHIN16_PAIR
:
2236 tmp
= mop
& MO_SIZE
;
2237 tmp
= tmp
? tmp
- 1 : 0;
2238 half_size
= 1 << tmp
;
2239 if (atom
== MO_ATOM_IFALIGN_PAIR
2240 ? p
->size
== half_size
2241 : p
->size
>= half_size
) {
2242 if (!HAVE_al8_fast
&& p
->size
< 4) {
2243 return do_ld_whole_be4(p
, ret_be
);
2245 return do_ld_whole_be8(env
, ra
, p
, ret_be
);
2250 case MO_ATOM_IFALIGN
:
2251 case MO_ATOM_WITHIN16
:
2253 return do_ld_bytes_beN(p
, ret_be
);
2256 g_assert_not_reached();
2261 * Wrapper for the above, for 8 < size < 16.
2263 static Int128
do_ld16_beN(CPUArchState
*env
, MMULookupPageData
*p
,
2264 uint64_t a
, int mmu_idx
, MemOp mop
, uintptr_t ra
)
2270 if (unlikely(p
->flags
& TLB_MMIO
)) {
2271 QEMU_IOTHREAD_LOCK_GUARD();
2272 a
= do_ld_mmio_beN(env
, p
->full
, a
, p
->addr
, size
- 8,
2273 mmu_idx
, MMU_DATA_LOAD
, ra
);
2274 b
= do_ld_mmio_beN(env
, p
->full
, 0, p
->addr
+ 8, 8,
2275 mmu_idx
, MMU_DATA_LOAD
, ra
);
2276 return int128_make128(b
, a
);
2280 * It is a given that we cross a page and therefore there is no
2281 * atomicity for the load as a whole, but subobjects may need attention.
2283 atom
= mop
& MO_ATOM_MASK
;
2285 case MO_ATOM_SUBALIGN
:
2287 a
= do_ld_parts_beN(p
, a
);
2288 p
->haddr
+= size
- 8;
2290 b
= do_ld_parts_beN(p
, 0);
2293 case MO_ATOM_WITHIN16_PAIR
:
2294 /* Since size > 8, this is the half that must be atomic. */
2295 return do_ld_whole_be16(env
, ra
, p
, a
);
2297 case MO_ATOM_IFALIGN_PAIR
:
2299 * Since size > 8, both halves are misaligned,
2300 * and so neither is atomic.
2302 case MO_ATOM_IFALIGN
:
2303 case MO_ATOM_WITHIN16
:
2306 a
= do_ld_bytes_beN(p
, a
);
2307 b
= ldq_be_p(p
->haddr
+ size
- 8);
2311 g_assert_not_reached();
2314 return int128_make128(b
, a
);
2317 static uint8_t do_ld_1(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2318 MMUAccessType type
, uintptr_t ra
)
2320 if (unlikely(p
->flags
& TLB_MMIO
)) {
2321 QEMU_IOTHREAD_LOCK_GUARD();
2322 return do_ld_mmio_beN(env
, p
->full
, 0, p
->addr
, 1, mmu_idx
, type
, ra
);
2324 return *(uint8_t *)p
->haddr
;
2328 static uint16_t do_ld_2(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2329 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2333 if (unlikely(p
->flags
& TLB_MMIO
)) {
2334 QEMU_IOTHREAD_LOCK_GUARD();
2335 ret
= do_ld_mmio_beN(env
, p
->full
, 0, p
->addr
, 2, mmu_idx
, type
, ra
);
2336 if ((memop
& MO_BSWAP
) == MO_LE
) {
2340 /* Perform the load host endian, then swap if necessary. */
2341 ret
= load_atom_2(env
, ra
, p
->haddr
, memop
);
2342 if (memop
& MO_BSWAP
) {
2349 static uint32_t do_ld_4(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2350 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2354 if (unlikely(p
->flags
& TLB_MMIO
)) {
2355 QEMU_IOTHREAD_LOCK_GUARD();
2356 ret
= do_ld_mmio_beN(env
, p
->full
, 0, p
->addr
, 4, mmu_idx
, type
, ra
);
2357 if ((memop
& MO_BSWAP
) == MO_LE
) {
2361 /* Perform the load host endian. */
2362 ret
= load_atom_4(env
, ra
, p
->haddr
, memop
);
2363 if (memop
& MO_BSWAP
) {
2370 static uint64_t do_ld_8(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2371 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2375 if (unlikely(p
->flags
& TLB_MMIO
)) {
2376 QEMU_IOTHREAD_LOCK_GUARD();
2377 ret
= do_ld_mmio_beN(env
, p
->full
, 0, p
->addr
, 8, mmu_idx
, type
, ra
);
2378 if ((memop
& MO_BSWAP
) == MO_LE
) {
2382 /* Perform the load host endian. */
2383 ret
= load_atom_8(env
, ra
, p
->haddr
, memop
);
2384 if (memop
& MO_BSWAP
) {
2391 static uint8_t do_ld1_mmu(CPUArchState
*env
, vaddr addr
, MemOpIdx oi
,
2392 uintptr_t ra
, MMUAccessType access_type
)
2397 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2398 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2399 tcg_debug_assert(!crosspage
);
2401 return do_ld_1(env
, &l
.page
[0], l
.mmu_idx
, access_type
, ra
);
2404 tcg_target_ulong
helper_ldub_mmu(CPUArchState
*env
, uint64_t addr
,
2405 MemOpIdx oi
, uintptr_t retaddr
)
2407 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_8
);
2408 return do_ld1_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2411 static uint16_t do_ld2_mmu(CPUArchState
*env
, vaddr addr
, MemOpIdx oi
,
2412 uintptr_t ra
, MMUAccessType access_type
)
2419 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2420 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2421 if (likely(!crosspage
)) {
2422 return do_ld_2(env
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2425 a
= do_ld_1(env
, &l
.page
[0], l
.mmu_idx
, access_type
, ra
);
2426 b
= do_ld_1(env
, &l
.page
[1], l
.mmu_idx
, access_type
, ra
);
2428 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2436 tcg_target_ulong
helper_lduw_mmu(CPUArchState
*env
, uint64_t addr
,
2437 MemOpIdx oi
, uintptr_t retaddr
)
2439 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
2440 return do_ld2_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2443 static uint32_t do_ld4_mmu(CPUArchState
*env
, vaddr addr
, MemOpIdx oi
,
2444 uintptr_t ra
, MMUAccessType access_type
)
2450 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2451 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2452 if (likely(!crosspage
)) {
2453 return do_ld_4(env
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2456 ret
= do_ld_beN(env
, &l
.page
[0], 0, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2457 ret
= do_ld_beN(env
, &l
.page
[1], ret
, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2458 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2464 tcg_target_ulong
helper_ldul_mmu(CPUArchState
*env
, uint64_t addr
,
2465 MemOpIdx oi
, uintptr_t retaddr
)
2467 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
2468 return do_ld4_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2471 static uint64_t do_ld8_mmu(CPUArchState
*env
, vaddr addr
, MemOpIdx oi
,
2472 uintptr_t ra
, MMUAccessType access_type
)
2478 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2479 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2480 if (likely(!crosspage
)) {
2481 return do_ld_8(env
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2484 ret
= do_ld_beN(env
, &l
.page
[0], 0, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2485 ret
= do_ld_beN(env
, &l
.page
[1], ret
, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2486 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2492 uint64_t helper_ldq_mmu(CPUArchState
*env
, uint64_t addr
,
2493 MemOpIdx oi
, uintptr_t retaddr
)
2495 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
2496 return do_ld8_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2500 * Provide signed versions of the load routines as well. We can of course
2501 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2504 tcg_target_ulong
helper_ldsb_mmu(CPUArchState
*env
, uint64_t addr
,
2505 MemOpIdx oi
, uintptr_t retaddr
)
2507 return (int8_t)helper_ldub_mmu(env
, addr
, oi
, retaddr
);
2510 tcg_target_ulong
helper_ldsw_mmu(CPUArchState
*env
, uint64_t addr
,
2511 MemOpIdx oi
, uintptr_t retaddr
)
2513 return (int16_t)helper_lduw_mmu(env
, addr
, oi
, retaddr
);
2516 tcg_target_ulong
helper_ldsl_mmu(CPUArchState
*env
, uint64_t addr
,
2517 MemOpIdx oi
, uintptr_t retaddr
)
2519 return (int32_t)helper_ldul_mmu(env
, addr
, oi
, retaddr
);
2522 static Int128
do_ld16_mmu(CPUArchState
*env
, vaddr addr
,
2523 MemOpIdx oi
, uintptr_t ra
)
2531 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2532 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
, &l
);
2533 if (likely(!crosspage
)) {
2534 if (unlikely(l
.page
[0].flags
& TLB_MMIO
)) {
2535 QEMU_IOTHREAD_LOCK_GUARD();
2536 a
= do_ld_mmio_beN(env
, l
.page
[0].full
, 0, addr
, 8,
2537 l
.mmu_idx
, MMU_DATA_LOAD
, ra
);
2538 b
= do_ld_mmio_beN(env
, l
.page
[0].full
, 0, addr
+ 8, 8,
2539 l
.mmu_idx
, MMU_DATA_LOAD
, ra
);
2540 ret
= int128_make128(b
, a
);
2541 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2542 ret
= bswap128(ret
);
2545 /* Perform the load host endian. */
2546 ret
= load_atom_16(env
, ra
, l
.page
[0].haddr
, l
.memop
);
2547 if (l
.memop
& MO_BSWAP
) {
2548 ret
= bswap128(ret
);
2554 first
= l
.page
[0].size
;
2556 MemOp mop8
= (l
.memop
& ~MO_SIZE
) | MO_64
;
2558 a
= do_ld_8(env
, &l
.page
[0], l
.mmu_idx
, MMU_DATA_LOAD
, mop8
, ra
);
2559 b
= do_ld_8(env
, &l
.page
[1], l
.mmu_idx
, MMU_DATA_LOAD
, mop8
, ra
);
2560 if ((mop8
& MO_BSWAP
) == MO_LE
) {
2561 ret
= int128_make128(a
, b
);
2563 ret
= int128_make128(b
, a
);
2569 a
= do_ld_beN(env
, &l
.page
[0], 0, l
.mmu_idx
,
2570 MMU_DATA_LOAD
, l
.memop
, ra
);
2571 ret
= do_ld16_beN(env
, &l
.page
[1], a
, l
.mmu_idx
, l
.memop
, ra
);
2573 ret
= do_ld16_beN(env
, &l
.page
[0], 0, l
.mmu_idx
, l
.memop
, ra
);
2574 b
= int128_getlo(ret
);
2575 ret
= int128_lshift(ret
, l
.page
[1].size
* 8);
2576 a
= int128_gethi(ret
);
2577 b
= do_ld_beN(env
, &l
.page
[1], b
, l
.mmu_idx
,
2578 MMU_DATA_LOAD
, l
.memop
, ra
);
2579 ret
= int128_make128(b
, a
);
2581 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2582 ret
= bswap128(ret
);
2587 Int128
helper_ld16_mmu(CPUArchState
*env
, uint64_t addr
,
2588 uint32_t oi
, uintptr_t retaddr
)
2590 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
2591 return do_ld16_mmu(env
, addr
, oi
, retaddr
);
2594 Int128
helper_ld_i128(CPUArchState
*env
, uint64_t addr
, uint32_t oi
)
2596 return helper_ld16_mmu(env
, addr
, oi
, GETPC());
2600 * Load helpers for cpu_ldst.h.
2603 static void plugin_load_cb(CPUArchState
*env
, abi_ptr addr
, MemOpIdx oi
)
2605 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
2608 uint8_t cpu_ldb_mmu(CPUArchState
*env
, abi_ptr addr
, MemOpIdx oi
, uintptr_t ra
)
2612 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_UB
);
2613 ret
= do_ld1_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2614 plugin_load_cb(env
, addr
, oi
);
2618 uint16_t cpu_ldw_mmu(CPUArchState
*env
, abi_ptr addr
,
2619 MemOpIdx oi
, uintptr_t ra
)
2623 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
2624 ret
= do_ld2_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2625 plugin_load_cb(env
, addr
, oi
);
2629 uint32_t cpu_ldl_mmu(CPUArchState
*env
, abi_ptr addr
,
2630 MemOpIdx oi
, uintptr_t ra
)
2634 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
2635 ret
= do_ld4_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2636 plugin_load_cb(env
, addr
, oi
);
2640 uint64_t cpu_ldq_mmu(CPUArchState
*env
, abi_ptr addr
,
2641 MemOpIdx oi
, uintptr_t ra
)
2645 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
2646 ret
= do_ld8_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2647 plugin_load_cb(env
, addr
, oi
);
2651 Int128
cpu_ld16_mmu(CPUArchState
*env
, abi_ptr addr
,
2652 MemOpIdx oi
, uintptr_t ra
)
2656 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
2657 ret
= do_ld16_mmu(env
, addr
, oi
, ra
);
2658 plugin_load_cb(env
, addr
, oi
);
2669 * @full: page parameters
2670 * @val_le: data to store
2671 * @addr: virtual address
2672 * @size: number of bytes
2673 * @mmu_idx: virtual address context
2674 * @ra: return address into tcg generated code, or 0
2675 * Context: iothread lock held
2677 * Store @size bytes at @addr, which is memory-mapped i/o.
2678 * The bytes to store are extracted in little-endian order from @val_le;
2679 * return the bytes of @val_le beyond @p->size that have not been stored.
2681 static uint64_t do_st_mmio_leN(CPUArchState
*env
, CPUTLBEntryFull
*full
,
2682 uint64_t val_le
, vaddr addr
, int size
,
2683 int mmu_idx
, uintptr_t ra
)
2685 tcg_debug_assert(size
> 0 && size
<= 8);
2688 /* Store aligned pieces up to 8 bytes. */
2689 switch ((size
| (int)addr
) & 7) {
2694 io_writex(env
, full
, mmu_idx
, val_le
, addr
, ra
, MO_UB
);
2701 io_writex(env
, full
, mmu_idx
, val_le
, addr
, ra
, MO_LEUW
);
2707 io_writex(env
, full
, mmu_idx
, val_le
, addr
, ra
, MO_LEUL
);
2713 io_writex(env
, full
, mmu_idx
, val_le
, addr
, ra
, MO_LEUQ
);
2716 qemu_build_not_reached();
2724 * Wrapper for the above.
2726 static uint64_t do_st_leN(CPUArchState
*env
, MMULookupPageData
*p
,
2727 uint64_t val_le
, int mmu_idx
,
2728 MemOp mop
, uintptr_t ra
)
2731 unsigned tmp
, half_size
;
2733 if (unlikely(p
->flags
& TLB_MMIO
)) {
2734 QEMU_IOTHREAD_LOCK_GUARD();
2735 return do_st_mmio_leN(env
, p
->full
, val_le
, p
->addr
,
2736 p
->size
, mmu_idx
, ra
);
2737 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2738 return val_le
>> (p
->size
* 8);
2742 * It is a given that we cross a page and therefore there is no atomicity
2743 * for the store as a whole, but subobjects may need attention.
2745 atom
= mop
& MO_ATOM_MASK
;
2747 case MO_ATOM_SUBALIGN
:
2748 return store_parts_leN(p
->haddr
, p
->size
, val_le
);
2750 case MO_ATOM_IFALIGN_PAIR
:
2751 case MO_ATOM_WITHIN16_PAIR
:
2752 tmp
= mop
& MO_SIZE
;
2753 tmp
= tmp
? tmp
- 1 : 0;
2754 half_size
= 1 << tmp
;
2755 if (atom
== MO_ATOM_IFALIGN_PAIR
2756 ? p
->size
== half_size
2757 : p
->size
>= half_size
) {
2758 if (!HAVE_al8_fast
&& p
->size
<= 4) {
2759 return store_whole_le4(p
->haddr
, p
->size
, val_le
);
2760 } else if (HAVE_al8
) {
2761 return store_whole_le8(p
->haddr
, p
->size
, val_le
);
2763 cpu_loop_exit_atomic(env_cpu(env
), ra
);
2768 case MO_ATOM_IFALIGN
:
2769 case MO_ATOM_WITHIN16
:
2771 return store_bytes_leN(p
->haddr
, p
->size
, val_le
);
2774 g_assert_not_reached();
2779 * Wrapper for the above, for 8 < size < 16.
2781 static uint64_t do_st16_leN(CPUArchState
*env
, MMULookupPageData
*p
,
2782 Int128 val_le
, int mmu_idx
,
2783 MemOp mop
, uintptr_t ra
)
2788 if (unlikely(p
->flags
& TLB_MMIO
)) {
2789 QEMU_IOTHREAD_LOCK_GUARD();
2790 do_st_mmio_leN(env
, p
->full
, int128_getlo(val_le
),
2791 p
->addr
, 8, mmu_idx
, ra
);
2792 return do_st_mmio_leN(env
, p
->full
, int128_gethi(val_le
),
2793 p
->addr
+ 8, size
- 8, mmu_idx
, ra
);
2794 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2795 return int128_gethi(val_le
) >> ((size
- 8) * 8);
2799 * It is a given that we cross a page and therefore there is no atomicity
2800 * for the store as a whole, but subobjects may need attention.
2802 atom
= mop
& MO_ATOM_MASK
;
2804 case MO_ATOM_SUBALIGN
:
2805 store_parts_leN(p
->haddr
, 8, int128_getlo(val_le
));
2806 return store_parts_leN(p
->haddr
+ 8, p
->size
- 8,
2807 int128_gethi(val_le
));
2809 case MO_ATOM_WITHIN16_PAIR
:
2810 /* Since size > 8, this is the half that must be atomic. */
2811 if (!HAVE_ATOMIC128_RW
) {
2812 cpu_loop_exit_atomic(env_cpu(env
), ra
);
2814 return store_whole_le16(p
->haddr
, p
->size
, val_le
);
2816 case MO_ATOM_IFALIGN_PAIR
:
2818 * Since size > 8, both halves are misaligned,
2819 * and so neither is atomic.
2821 case MO_ATOM_IFALIGN
:
2822 case MO_ATOM_WITHIN16
:
2824 stq_le_p(p
->haddr
, int128_getlo(val_le
));
2825 return store_bytes_leN(p
->haddr
+ 8, p
->size
- 8,
2826 int128_gethi(val_le
));
2829 g_assert_not_reached();
2833 static void do_st_1(CPUArchState
*env
, MMULookupPageData
*p
, uint8_t val
,
2834 int mmu_idx
, uintptr_t ra
)
2836 if (unlikely(p
->flags
& TLB_MMIO
)) {
2837 QEMU_IOTHREAD_LOCK_GUARD();
2838 do_st_mmio_leN(env
, p
->full
, val
, p
->addr
, 1, mmu_idx
, ra
);
2839 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2842 *(uint8_t *)p
->haddr
= val
;
2846 static void do_st_2(CPUArchState
*env
, MMULookupPageData
*p
, uint16_t val
,
2847 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2849 if (unlikely(p
->flags
& TLB_MMIO
)) {
2850 if ((memop
& MO_BSWAP
) != MO_LE
) {
2853 QEMU_IOTHREAD_LOCK_GUARD();
2854 do_st_mmio_leN(env
, p
->full
, val
, p
->addr
, 2, mmu_idx
, ra
);
2855 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2858 /* Swap to host endian if necessary, then store. */
2859 if (memop
& MO_BSWAP
) {
2862 store_atom_2(env
, ra
, p
->haddr
, memop
, val
);
2866 static void do_st_4(CPUArchState
*env
, MMULookupPageData
*p
, uint32_t val
,
2867 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2869 if (unlikely(p
->flags
& TLB_MMIO
)) {
2870 if ((memop
& MO_BSWAP
) != MO_LE
) {
2873 QEMU_IOTHREAD_LOCK_GUARD();
2874 do_st_mmio_leN(env
, p
->full
, val
, p
->addr
, 4, mmu_idx
, ra
);
2875 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2878 /* Swap to host endian if necessary, then store. */
2879 if (memop
& MO_BSWAP
) {
2882 store_atom_4(env
, ra
, p
->haddr
, memop
, val
);
2886 static void do_st_8(CPUArchState
*env
, MMULookupPageData
*p
, uint64_t val
,
2887 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2889 if (unlikely(p
->flags
& TLB_MMIO
)) {
2890 if ((memop
& MO_BSWAP
) != MO_LE
) {
2893 QEMU_IOTHREAD_LOCK_GUARD();
2894 do_st_mmio_leN(env
, p
->full
, val
, p
->addr
, 8, mmu_idx
, ra
);
2895 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2898 /* Swap to host endian if necessary, then store. */
2899 if (memop
& MO_BSWAP
) {
2902 store_atom_8(env
, ra
, p
->haddr
, memop
, val
);
2906 void helper_stb_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
2907 MemOpIdx oi
, uintptr_t ra
)
2912 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_8
);
2913 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2914 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2915 tcg_debug_assert(!crosspage
);
2917 do_st_1(env
, &l
.page
[0], val
, l
.mmu_idx
, ra
);
2920 static void do_st2_mmu(CPUArchState
*env
, vaddr addr
, uint16_t val
,
2921 MemOpIdx oi
, uintptr_t ra
)
2927 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2928 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2929 if (likely(!crosspage
)) {
2930 do_st_2(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2934 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2935 a
= val
, b
= val
>> 8;
2937 b
= val
, a
= val
>> 8;
2939 do_st_1(env
, &l
.page
[0], a
, l
.mmu_idx
, ra
);
2940 do_st_1(env
, &l
.page
[1], b
, l
.mmu_idx
, ra
);
2943 void helper_stw_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
2944 MemOpIdx oi
, uintptr_t retaddr
)
2946 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
2947 do_st2_mmu(env
, addr
, val
, oi
, retaddr
);
2950 static void do_st4_mmu(CPUArchState
*env
, vaddr addr
, uint32_t val
,
2951 MemOpIdx oi
, uintptr_t ra
)
2956 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2957 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2958 if (likely(!crosspage
)) {
2959 do_st_4(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2963 /* Swap to little endian for simplicity, then store by bytes. */
2964 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2967 val
= do_st_leN(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2968 (void) do_st_leN(env
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2971 void helper_stl_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
2972 MemOpIdx oi
, uintptr_t retaddr
)
2974 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
2975 do_st4_mmu(env
, addr
, val
, oi
, retaddr
);
2978 static void do_st8_mmu(CPUArchState
*env
, vaddr addr
, uint64_t val
,
2979 MemOpIdx oi
, uintptr_t ra
)
2984 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2985 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2986 if (likely(!crosspage
)) {
2987 do_st_8(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2991 /* Swap to little endian for simplicity, then store by bytes. */
2992 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2995 val
= do_st_leN(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2996 (void) do_st_leN(env
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2999 void helper_stq_mmu(CPUArchState
*env
, uint64_t addr
, uint64_t val
,
3000 MemOpIdx oi
, uintptr_t retaddr
)
3002 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
3003 do_st8_mmu(env
, addr
, val
, oi
, retaddr
);
3006 static void do_st16_mmu(CPUArchState
*env
, vaddr addr
, Int128 val
,
3007 MemOpIdx oi
, uintptr_t ra
)
3014 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
3015 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
3016 if (likely(!crosspage
)) {
3017 if (unlikely(l
.page
[0].flags
& TLB_MMIO
)) {
3018 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
3019 val
= bswap128(val
);
3021 a
= int128_getlo(val
);
3022 b
= int128_gethi(val
);
3023 QEMU_IOTHREAD_LOCK_GUARD();
3024 do_st_mmio_leN(env
, l
.page
[0].full
, a
, addr
, 8, l
.mmu_idx
, ra
);
3025 do_st_mmio_leN(env
, l
.page
[0].full
, b
, addr
+ 8, 8, l
.mmu_idx
, ra
);
3026 } else if (unlikely(l
.page
[0].flags
& TLB_DISCARD_WRITE
)) {
3029 /* Swap to host endian if necessary, then store. */
3030 if (l
.memop
& MO_BSWAP
) {
3031 val
= bswap128(val
);
3033 store_atom_16(env
, ra
, l
.page
[0].haddr
, l
.memop
, val
);
3038 first
= l
.page
[0].size
;
3040 MemOp mop8
= (l
.memop
& ~(MO_SIZE
| MO_BSWAP
)) | MO_64
;
3042 if (l
.memop
& MO_BSWAP
) {
3043 val
= bswap128(val
);
3045 if (HOST_BIG_ENDIAN
) {
3046 b
= int128_getlo(val
), a
= int128_gethi(val
);
3048 a
= int128_getlo(val
), b
= int128_gethi(val
);
3050 do_st_8(env
, &l
.page
[0], a
, l
.mmu_idx
, mop8
, ra
);
3051 do_st_8(env
, &l
.page
[1], b
, l
.mmu_idx
, mop8
, ra
);
3055 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
3056 val
= bswap128(val
);
3059 do_st_leN(env
, &l
.page
[0], int128_getlo(val
), l
.mmu_idx
, l
.memop
, ra
);
3060 val
= int128_urshift(val
, first
* 8);
3061 do_st16_leN(env
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
3063 b
= do_st16_leN(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
3064 do_st_leN(env
, &l
.page
[1], b
, l
.mmu_idx
, l
.memop
, ra
);
3068 void helper_st16_mmu(CPUArchState
*env
, uint64_t addr
, Int128 val
,
3069 MemOpIdx oi
, uintptr_t retaddr
)
3071 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
3072 do_st16_mmu(env
, addr
, val
, oi
, retaddr
);
3075 void helper_st_i128(CPUArchState
*env
, uint64_t addr
, Int128 val
, MemOpIdx oi
)
3077 helper_st16_mmu(env
, addr
, val
, oi
, GETPC());
3081 * Store Helpers for cpu_ldst.h
3084 static void plugin_store_cb(CPUArchState
*env
, abi_ptr addr
, MemOpIdx oi
)
3086 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
3089 void cpu_stb_mmu(CPUArchState
*env
, abi_ptr addr
, uint8_t val
,
3090 MemOpIdx oi
, uintptr_t retaddr
)
3092 helper_stb_mmu(env
, addr
, val
, oi
, retaddr
);
3093 plugin_store_cb(env
, addr
, oi
);
3096 void cpu_stw_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
3097 MemOpIdx oi
, uintptr_t retaddr
)
3099 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
3100 do_st2_mmu(env
, addr
, val
, oi
, retaddr
);
3101 plugin_store_cb(env
, addr
, oi
);
3104 void cpu_stl_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
3105 MemOpIdx oi
, uintptr_t retaddr
)
3107 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
3108 do_st4_mmu(env
, addr
, val
, oi
, retaddr
);
3109 plugin_store_cb(env
, addr
, oi
);
3112 void cpu_stq_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
3113 MemOpIdx oi
, uintptr_t retaddr
)
3115 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
3116 do_st8_mmu(env
, addr
, val
, oi
, retaddr
);
3117 plugin_store_cb(env
, addr
, oi
);
3120 void cpu_st16_mmu(CPUArchState
*env
, abi_ptr addr
, Int128 val
,
3121 MemOpIdx oi
, uintptr_t retaddr
)
3123 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
3124 do_st16_mmu(env
, addr
, val
, oi
, retaddr
);
3125 plugin_store_cb(env
, addr
, oi
);
3128 #include "ldst_common.c.inc"
3131 * First set of functions passes in OI and RETADDR.
3132 * This makes them callable from other helpers.
3135 #define ATOMIC_NAME(X) \
3136 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
3138 #define ATOMIC_MMU_CLEANUP
3140 #include "atomic_common.c.inc"
3143 #include "atomic_template.h"
3146 #include "atomic_template.h"
3149 #include "atomic_template.h"
3151 #ifdef CONFIG_ATOMIC64
3153 #include "atomic_template.h"
3156 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
3157 #define DATA_SIZE 16
3158 #include "atomic_template.h"
3161 /* Code access functions. */
3163 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr addr
)
3165 MemOpIdx oi
= make_memop_idx(MO_UB
, cpu_mmu_index(env
, true));
3166 return do_ld1_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3169 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr addr
)
3171 MemOpIdx oi
= make_memop_idx(MO_TEUW
, cpu_mmu_index(env
, true));
3172 return do_ld2_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3175 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr addr
)
3177 MemOpIdx oi
= make_memop_idx(MO_TEUL
, cpu_mmu_index(env
, true));
3178 return do_ld4_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3181 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr addr
)
3183 MemOpIdx oi
= make_memop_idx(MO_TEUQ
, cpu_mmu_index(env
, true));
3184 return do_ld8_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3187 uint8_t cpu_ldb_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3188 MemOpIdx oi
, uintptr_t retaddr
)
3190 return do_ld1_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);
3193 uint16_t cpu_ldw_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3194 MemOpIdx oi
, uintptr_t retaddr
)
3196 return do_ld2_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);
3199 uint32_t cpu_ldl_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3200 MemOpIdx oi
, uintptr_t retaddr
)
3202 return do_ld4_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);
3205 uint64_t cpu_ldq_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3206 MemOpIdx oi
, uintptr_t retaddr
)
3208 return do_ld8_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);