2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
32 #include "exec/helper-proto.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
40 #include "qemu/plugin-memory.h"
42 #include "tcg/tcg-ldst.h"
43 #include "exec/helper-proto.h"
45 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
46 /* #define DEBUG_TLB */
47 /* #define DEBUG_TLB_LOG */
50 # define DEBUG_TLB_GATE 1
52 # define DEBUG_TLB_LOG_GATE 1
54 # define DEBUG_TLB_LOG_GATE 0
57 # define DEBUG_TLB_GATE 0
58 # define DEBUG_TLB_LOG_GATE 0
61 #define tlb_debug(fmt, ...) do { \
62 if (DEBUG_TLB_LOG_GATE) { \
63 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
65 } else if (DEBUG_TLB_GATE) { \
66 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
70 #define assert_cpu_is_self(cpu) do { \
71 if (DEBUG_TLB_GATE) { \
72 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
76 /* run_on_cpu_data.target_ptr should always be big enough for a
77 * target_ulong even on 32 bit builds */
78 QEMU_BUILD_BUG_ON(sizeof(target_ulong
) > sizeof(run_on_cpu_data
));
80 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
82 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> 16);
83 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
85 static inline size_t tlb_n_entries(CPUTLBDescFast
*fast
)
87 return (fast
->mask
>> CPU_TLB_ENTRY_BITS
) + 1;
90 static inline size_t sizeof_tlb(CPUTLBDescFast
*fast
)
92 return fast
->mask
+ (1 << CPU_TLB_ENTRY_BITS
);
95 static void tlb_window_reset(CPUTLBDesc
*desc
, int64_t ns
,
98 desc
->window_begin_ns
= ns
;
99 desc
->window_max_entries
= max_entries
;
102 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
104 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
111 i0
= tb_jmp_cache_hash_page(page_addr
);
112 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
113 qatomic_set(&jc
->array
[i0
+ i
].tb
, NULL
);
118 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
119 * @desc: The CPUTLBDesc portion of the TLB
120 * @fast: The CPUTLBDescFast portion of the same TLB
122 * Called with tlb_lock_held.
124 * We have two main constraints when resizing a TLB: (1) we only resize it
125 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
126 * the array or unnecessarily flushing it), which means we do not control how
127 * frequently the resizing can occur; (2) we don't have access to the guest's
128 * future scheduling decisions, and therefore have to decide the magnitude of
129 * the resize based on past observations.
131 * In general, a memory-hungry process can benefit greatly from an appropriately
132 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
133 * we just have to make the TLB as large as possible; while an oversized TLB
134 * results in minimal TLB miss rates, it also takes longer to be flushed
135 * (flushes can be _very_ frequent), and the reduced locality can also hurt
138 * To achieve near-optimal performance for all kinds of workloads, we:
140 * 1. Aggressively increase the size of the TLB when the use rate of the
141 * TLB being flushed is high, since it is likely that in the near future this
142 * memory-hungry process will execute again, and its memory hungriness will
143 * probably be similar.
145 * 2. Slowly reduce the size of the TLB as the use rate declines over a
146 * reasonably large time window. The rationale is that if in such a time window
147 * we have not observed a high TLB use rate, it is likely that we won't observe
148 * it in the near future. In that case, once a time window expires we downsize
149 * the TLB to match the maximum use rate observed in the window.
151 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
152 * since in that range performance is likely near-optimal. Recall that the TLB
153 * is direct mapped, so we want the use rate to be low (or at least not too
154 * high), since otherwise we are likely to have a significant amount of
157 static void tlb_mmu_resize_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
,
160 size_t old_size
= tlb_n_entries(fast
);
162 size_t new_size
= old_size
;
163 int64_t window_len_ms
= 100;
164 int64_t window_len_ns
= window_len_ms
* 1000 * 1000;
165 bool window_expired
= now
> desc
->window_begin_ns
+ window_len_ns
;
167 if (desc
->n_used_entries
> desc
->window_max_entries
) {
168 desc
->window_max_entries
= desc
->n_used_entries
;
170 rate
= desc
->window_max_entries
* 100 / old_size
;
173 new_size
= MIN(old_size
<< 1, 1 << CPU_TLB_DYN_MAX_BITS
);
174 } else if (rate
< 30 && window_expired
) {
175 size_t ceil
= pow2ceil(desc
->window_max_entries
);
176 size_t expected_rate
= desc
->window_max_entries
* 100 / ceil
;
179 * Avoid undersizing when the max number of entries seen is just below
180 * a pow2. For instance, if max_entries == 1025, the expected use rate
181 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
182 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
183 * later. Thus, make sure that the expected use rate remains below 70%.
184 * (and since we double the size, that means the lowest rate we'd
185 * expect to get is 35%, which is still in the 30-70% range where
186 * we consider that the size is appropriate.)
188 if (expected_rate
> 70) {
191 new_size
= MAX(ceil
, 1 << CPU_TLB_DYN_MIN_BITS
);
194 if (new_size
== old_size
) {
195 if (window_expired
) {
196 tlb_window_reset(desc
, now
, desc
->n_used_entries
);
202 g_free(desc
->fulltlb
);
204 tlb_window_reset(desc
, now
, 0);
205 /* desc->n_used_entries is cleared by the caller */
206 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
207 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
208 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
211 * If the allocations fail, try smaller sizes. We just freed some
212 * memory, so going back to half of new_size has a good chance of working.
213 * Increased memory pressure elsewhere in the system might cause the
214 * allocations to fail though, so we progressively reduce the allocation
215 * size, aborting if we cannot even allocate the smallest TLB we support.
217 while (fast
->table
== NULL
|| desc
->fulltlb
== NULL
) {
218 if (new_size
== (1 << CPU_TLB_DYN_MIN_BITS
)) {
219 error_report("%s: %s", __func__
, strerror(errno
));
222 new_size
= MAX(new_size
>> 1, 1 << CPU_TLB_DYN_MIN_BITS
);
223 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
226 g_free(desc
->fulltlb
);
227 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
228 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
232 static void tlb_mmu_flush_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
)
234 desc
->n_used_entries
= 0;
235 desc
->large_page_addr
= -1;
236 desc
->large_page_mask
= -1;
238 memset(fast
->table
, -1, sizeof_tlb(fast
));
239 memset(desc
->vtable
, -1, sizeof(desc
->vtable
));
242 static void tlb_flush_one_mmuidx_locked(CPUArchState
*env
, int mmu_idx
,
245 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[mmu_idx
];
246 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[mmu_idx
];
248 tlb_mmu_resize_locked(desc
, fast
, now
);
249 tlb_mmu_flush_locked(desc
, fast
);
252 static void tlb_mmu_init(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
, int64_t now
)
254 size_t n_entries
= 1 << CPU_TLB_DYN_DEFAULT_BITS
;
256 tlb_window_reset(desc
, now
, 0);
257 desc
->n_used_entries
= 0;
258 fast
->mask
= (n_entries
- 1) << CPU_TLB_ENTRY_BITS
;
259 fast
->table
= g_new(CPUTLBEntry
, n_entries
);
260 desc
->fulltlb
= g_new(CPUTLBEntryFull
, n_entries
);
261 tlb_mmu_flush_locked(desc
, fast
);
264 static inline void tlb_n_used_entries_inc(CPUArchState
*env
, uintptr_t mmu_idx
)
266 env_tlb(env
)->d
[mmu_idx
].n_used_entries
++;
269 static inline void tlb_n_used_entries_dec(CPUArchState
*env
, uintptr_t mmu_idx
)
271 env_tlb(env
)->d
[mmu_idx
].n_used_entries
--;
274 void tlb_init(CPUState
*cpu
)
276 CPUArchState
*env
= cpu
->env_ptr
;
277 int64_t now
= get_clock_realtime();
280 qemu_spin_init(&env_tlb(env
)->c
.lock
);
282 /* All tlbs are initialized flushed. */
283 env_tlb(env
)->c
.dirty
= 0;
285 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
286 tlb_mmu_init(&env_tlb(env
)->d
[i
], &env_tlb(env
)->f
[i
], now
);
290 void tlb_destroy(CPUState
*cpu
)
292 CPUArchState
*env
= cpu
->env_ptr
;
295 qemu_spin_destroy(&env_tlb(env
)->c
.lock
);
296 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
297 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[i
];
298 CPUTLBDescFast
*fast
= &env_tlb(env
)->f
[i
];
301 g_free(desc
->fulltlb
);
305 /* flush_all_helper: run fn across all cpus
307 * If the wait flag is set then the src cpu's helper will be queued as
308 * "safe" work and the loop exited creating a synchronisation point
309 * where all queued work will be finished before execution starts
312 static void flush_all_helper(CPUState
*src
, run_on_cpu_func fn
,
319 async_run_on_cpu(cpu
, fn
, d
);
324 void tlb_flush_counts(size_t *pfull
, size_t *ppart
, size_t *pelide
)
327 size_t full
= 0, part
= 0, elide
= 0;
330 CPUArchState
*env
= cpu
->env_ptr
;
332 full
+= qatomic_read(&env_tlb(env
)->c
.full_flush_count
);
333 part
+= qatomic_read(&env_tlb(env
)->c
.part_flush_count
);
334 elide
+= qatomic_read(&env_tlb(env
)->c
.elide_flush_count
);
341 static void tlb_flush_by_mmuidx_async_work(CPUState
*cpu
, run_on_cpu_data data
)
343 CPUArchState
*env
= cpu
->env_ptr
;
344 uint16_t asked
= data
.host_int
;
345 uint16_t all_dirty
, work
, to_clean
;
346 int64_t now
= get_clock_realtime();
348 assert_cpu_is_self(cpu
);
350 tlb_debug("mmu_idx:0x%04" PRIx16
"\n", asked
);
352 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
354 all_dirty
= env_tlb(env
)->c
.dirty
;
355 to_clean
= asked
& all_dirty
;
356 all_dirty
&= ~to_clean
;
357 env_tlb(env
)->c
.dirty
= all_dirty
;
359 for (work
= to_clean
; work
!= 0; work
&= work
- 1) {
360 int mmu_idx
= ctz32(work
);
361 tlb_flush_one_mmuidx_locked(env
, mmu_idx
, now
);
364 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
366 tcg_flush_jmp_cache(cpu
);
368 if (to_clean
== ALL_MMUIDX_BITS
) {
369 qatomic_set(&env_tlb(env
)->c
.full_flush_count
,
370 env_tlb(env
)->c
.full_flush_count
+ 1);
372 qatomic_set(&env_tlb(env
)->c
.part_flush_count
,
373 env_tlb(env
)->c
.part_flush_count
+ ctpop16(to_clean
));
374 if (to_clean
!= asked
) {
375 qatomic_set(&env_tlb(env
)->c
.elide_flush_count
,
376 env_tlb(env
)->c
.elide_flush_count
+
377 ctpop16(asked
& ~to_clean
));
382 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
384 tlb_debug("mmu_idx: 0x%" PRIx16
"\n", idxmap
);
386 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
387 async_run_on_cpu(cpu
, tlb_flush_by_mmuidx_async_work
,
388 RUN_ON_CPU_HOST_INT(idxmap
));
390 tlb_flush_by_mmuidx_async_work(cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
394 void tlb_flush(CPUState
*cpu
)
396 tlb_flush_by_mmuidx(cpu
, ALL_MMUIDX_BITS
);
399 void tlb_flush_by_mmuidx_all_cpus(CPUState
*src_cpu
, uint16_t idxmap
)
401 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
403 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
405 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
406 fn(src_cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
409 void tlb_flush_all_cpus(CPUState
*src_cpu
)
411 tlb_flush_by_mmuidx_all_cpus(src_cpu
, ALL_MMUIDX_BITS
);
414 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
, uint16_t idxmap
)
416 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
418 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
420 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
421 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
424 void tlb_flush_all_cpus_synced(CPUState
*src_cpu
)
426 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, ALL_MMUIDX_BITS
);
429 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry
*tlb_entry
,
430 target_ulong page
, target_ulong mask
)
433 mask
&= TARGET_PAGE_MASK
| TLB_INVALID_MASK
;
435 return (page
== (tlb_entry
->addr_read
& mask
) ||
436 page
== (tlb_addr_write(tlb_entry
) & mask
) ||
437 page
== (tlb_entry
->addr_code
& mask
));
440 static inline bool tlb_hit_page_anyprot(CPUTLBEntry
*tlb_entry
,
443 return tlb_hit_page_mask_anyprot(tlb_entry
, page
, -1);
447 * tlb_entry_is_empty - return true if the entry is not in use
448 * @te: pointer to CPUTLBEntry
450 static inline bool tlb_entry_is_empty(const CPUTLBEntry
*te
)
452 return te
->addr_read
== -1 && te
->addr_write
== -1 && te
->addr_code
== -1;
455 /* Called with tlb_c.lock held */
456 static bool tlb_flush_entry_mask_locked(CPUTLBEntry
*tlb_entry
,
460 if (tlb_hit_page_mask_anyprot(tlb_entry
, page
, mask
)) {
461 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
467 static inline bool tlb_flush_entry_locked(CPUTLBEntry
*tlb_entry
,
470 return tlb_flush_entry_mask_locked(tlb_entry
, page
, -1);
473 /* Called with tlb_c.lock held */
474 static void tlb_flush_vtlb_page_mask_locked(CPUArchState
*env
, int mmu_idx
,
478 CPUTLBDesc
*d
= &env_tlb(env
)->d
[mmu_idx
];
481 assert_cpu_is_self(env_cpu(env
));
482 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
483 if (tlb_flush_entry_mask_locked(&d
->vtable
[k
], page
, mask
)) {
484 tlb_n_used_entries_dec(env
, mmu_idx
);
489 static inline void tlb_flush_vtlb_page_locked(CPUArchState
*env
, int mmu_idx
,
492 tlb_flush_vtlb_page_mask_locked(env
, mmu_idx
, page
, -1);
495 static void tlb_flush_page_locked(CPUArchState
*env
, int midx
,
498 target_ulong lp_addr
= env_tlb(env
)->d
[midx
].large_page_addr
;
499 target_ulong lp_mask
= env_tlb(env
)->d
[midx
].large_page_mask
;
501 /* Check if we need to flush due to large pages. */
502 if ((page
& lp_mask
) == lp_addr
) {
503 tlb_debug("forcing full flush midx %d ("
504 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
505 midx
, lp_addr
, lp_mask
);
506 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
508 if (tlb_flush_entry_locked(tlb_entry(env
, midx
, page
), page
)) {
509 tlb_n_used_entries_dec(env
, midx
);
511 tlb_flush_vtlb_page_locked(env
, midx
, page
);
516 * tlb_flush_page_by_mmuidx_async_0:
517 * @cpu: cpu on which to flush
518 * @addr: page of virtual address to flush
519 * @idxmap: set of mmu_idx to flush
521 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
522 * at @addr from the tlbs indicated by @idxmap from @cpu.
524 static void tlb_flush_page_by_mmuidx_async_0(CPUState
*cpu
,
528 CPUArchState
*env
= cpu
->env_ptr
;
531 assert_cpu_is_self(cpu
);
533 tlb_debug("page addr:" TARGET_FMT_lx
" mmu_map:0x%x\n", addr
, idxmap
);
535 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
536 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
537 if ((idxmap
>> mmu_idx
) & 1) {
538 tlb_flush_page_locked(env
, mmu_idx
, addr
);
541 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
544 * Discard jump cache entries for any tb which might potentially
545 * overlap the flushed page, which includes the previous.
547 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
548 tb_jmp_cache_clear_page(cpu
, addr
);
552 * tlb_flush_page_by_mmuidx_async_1:
553 * @cpu: cpu on which to flush
554 * @data: encoded addr + idxmap
556 * Helper for tlb_flush_page_by_mmuidx and friends, called through
557 * async_run_on_cpu. The idxmap parameter is encoded in the page
558 * offset of the target_ptr field. This limits the set of mmu_idx
559 * that can be passed via this method.
561 static void tlb_flush_page_by_mmuidx_async_1(CPUState
*cpu
,
562 run_on_cpu_data data
)
564 target_ulong addr_and_idxmap
= (target_ulong
) data
.target_ptr
;
565 target_ulong addr
= addr_and_idxmap
& TARGET_PAGE_MASK
;
566 uint16_t idxmap
= addr_and_idxmap
& ~TARGET_PAGE_MASK
;
568 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
574 } TLBFlushPageByMMUIdxData
;
577 * tlb_flush_page_by_mmuidx_async_2:
578 * @cpu: cpu on which to flush
579 * @data: allocated addr + idxmap
581 * Helper for tlb_flush_page_by_mmuidx and friends, called through
582 * async_run_on_cpu. The addr+idxmap parameters are stored in a
583 * TLBFlushPageByMMUIdxData structure that has been allocated
584 * specifically for this helper. Free the structure when done.
586 static void tlb_flush_page_by_mmuidx_async_2(CPUState
*cpu
,
587 run_on_cpu_data data
)
589 TLBFlushPageByMMUIdxData
*d
= data
.host_ptr
;
591 tlb_flush_page_by_mmuidx_async_0(cpu
, d
->addr
, d
->idxmap
);
595 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, uint16_t idxmap
)
597 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%" PRIx16
"\n", addr
, idxmap
);
599 /* This should already be page aligned */
600 addr
&= TARGET_PAGE_MASK
;
602 if (qemu_cpu_is_self(cpu
)) {
603 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
604 } else if (idxmap
< TARGET_PAGE_SIZE
) {
606 * Most targets have only a few mmu_idx. In the case where
607 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
608 * allocating memory for this operation.
610 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_1
,
611 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
613 TLBFlushPageByMMUIdxData
*d
= g_new(TLBFlushPageByMMUIdxData
, 1);
615 /* Otherwise allocate a structure, freed by the worker. */
618 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_2
,
619 RUN_ON_CPU_HOST_PTR(d
));
623 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
625 tlb_flush_page_by_mmuidx(cpu
, addr
, ALL_MMUIDX_BITS
);
628 void tlb_flush_page_by_mmuidx_all_cpus(CPUState
*src_cpu
, target_ulong addr
,
631 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
633 /* This should already be page aligned */
634 addr
&= TARGET_PAGE_MASK
;
637 * Allocate memory to hold addr+idxmap only when needed.
638 * See tlb_flush_page_by_mmuidx for details.
640 if (idxmap
< TARGET_PAGE_SIZE
) {
641 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
642 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
646 /* Allocate a separate data block for each destination cpu. */
647 CPU_FOREACH(dst_cpu
) {
648 if (dst_cpu
!= src_cpu
) {
649 TLBFlushPageByMMUIdxData
*d
650 = g_new(TLBFlushPageByMMUIdxData
, 1);
654 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
655 RUN_ON_CPU_HOST_PTR(d
));
660 tlb_flush_page_by_mmuidx_async_0(src_cpu
, addr
, idxmap
);
663 void tlb_flush_page_all_cpus(CPUState
*src
, target_ulong addr
)
665 tlb_flush_page_by_mmuidx_all_cpus(src
, addr
, ALL_MMUIDX_BITS
);
668 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
672 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
674 /* This should already be page aligned */
675 addr
&= TARGET_PAGE_MASK
;
678 * Allocate memory to hold addr+idxmap only when needed.
679 * See tlb_flush_page_by_mmuidx for details.
681 if (idxmap
< TARGET_PAGE_SIZE
) {
682 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
683 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
684 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
685 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
688 TLBFlushPageByMMUIdxData
*d
;
690 /* Allocate a separate data block for each destination cpu. */
691 CPU_FOREACH(dst_cpu
) {
692 if (dst_cpu
!= src_cpu
) {
693 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
696 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
697 RUN_ON_CPU_HOST_PTR(d
));
701 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
704 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_2
,
705 RUN_ON_CPU_HOST_PTR(d
));
709 void tlb_flush_page_all_cpus_synced(CPUState
*src
, target_ulong addr
)
711 tlb_flush_page_by_mmuidx_all_cpus_synced(src
, addr
, ALL_MMUIDX_BITS
);
714 static void tlb_flush_range_locked(CPUArchState
*env
, int midx
,
715 target_ulong addr
, target_ulong len
,
718 CPUTLBDesc
*d
= &env_tlb(env
)->d
[midx
];
719 CPUTLBDescFast
*f
= &env_tlb(env
)->f
[midx
];
720 target_ulong mask
= MAKE_64BIT_MASK(0, bits
);
723 * If @bits is smaller than the tlb size, there may be multiple entries
724 * within the TLB; otherwise all addresses that match under @mask hit
725 * the same TLB entry.
726 * TODO: Perhaps allow bits to be a few bits less than the size.
727 * For now, just flush the entire TLB.
729 * If @len is larger than the tlb size, then it will take longer to
730 * test all of the entries in the TLB than it will to flush it all.
732 if (mask
< f
->mask
|| len
> f
->mask
) {
733 tlb_debug("forcing full flush midx %d ("
734 TARGET_FMT_lx
"/" TARGET_FMT_lx
"+" TARGET_FMT_lx
")\n",
735 midx
, addr
, mask
, len
);
736 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
741 * Check if we need to flush due to large pages.
742 * Because large_page_mask contains all 1's from the msb,
743 * we only need to test the end of the range.
745 if (((addr
+ len
- 1) & d
->large_page_mask
) == d
->large_page_addr
) {
746 tlb_debug("forcing full flush midx %d ("
747 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
748 midx
, d
->large_page_addr
, d
->large_page_mask
);
749 tlb_flush_one_mmuidx_locked(env
, midx
, get_clock_realtime());
753 for (target_ulong i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
754 target_ulong page
= addr
+ i
;
755 CPUTLBEntry
*entry
= tlb_entry(env
, midx
, page
);
757 if (tlb_flush_entry_mask_locked(entry
, page
, mask
)) {
758 tlb_n_used_entries_dec(env
, midx
);
760 tlb_flush_vtlb_page_mask_locked(env
, midx
, page
, mask
);
771 static void tlb_flush_range_by_mmuidx_async_0(CPUState
*cpu
,
774 CPUArchState
*env
= cpu
->env_ptr
;
777 assert_cpu_is_self(cpu
);
779 tlb_debug("range:" TARGET_FMT_lx
"/%u+" TARGET_FMT_lx
" mmu_map:0x%x\n",
780 d
.addr
, d
.bits
, d
.len
, d
.idxmap
);
782 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
783 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
784 if ((d
.idxmap
>> mmu_idx
) & 1) {
785 tlb_flush_range_locked(env
, mmu_idx
, d
.addr
, d
.len
, d
.bits
);
788 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
791 * If the length is larger than the jump cache size, then it will take
792 * longer to clear each entry individually than it will to clear it all.
794 if (d
.len
>= (TARGET_PAGE_SIZE
* TB_JMP_CACHE_SIZE
)) {
795 tcg_flush_jmp_cache(cpu
);
800 * Discard jump cache entries for any tb which might potentially
801 * overlap the flushed pages, which includes the previous.
803 d
.addr
-= TARGET_PAGE_SIZE
;
804 for (target_ulong i
= 0, n
= d
.len
/ TARGET_PAGE_SIZE
+ 1; i
< n
; i
++) {
805 tb_jmp_cache_clear_page(cpu
, d
.addr
);
806 d
.addr
+= TARGET_PAGE_SIZE
;
810 static void tlb_flush_range_by_mmuidx_async_1(CPUState
*cpu
,
811 run_on_cpu_data data
)
813 TLBFlushRangeData
*d
= data
.host_ptr
;
814 tlb_flush_range_by_mmuidx_async_0(cpu
, *d
);
818 void tlb_flush_range_by_mmuidx(CPUState
*cpu
, target_ulong addr
,
819 target_ulong len
, uint16_t idxmap
,
825 * If all bits are significant, and len is small,
826 * this devolves to tlb_flush_page.
828 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
829 tlb_flush_page_by_mmuidx(cpu
, addr
, idxmap
);
832 /* If no page bits are significant, this devolves to tlb_flush. */
833 if (bits
< TARGET_PAGE_BITS
) {
834 tlb_flush_by_mmuidx(cpu
, idxmap
);
838 /* This should already be page aligned */
839 d
.addr
= addr
& TARGET_PAGE_MASK
;
844 if (qemu_cpu_is_self(cpu
)) {
845 tlb_flush_range_by_mmuidx_async_0(cpu
, d
);
847 /* Otherwise allocate a structure, freed by the worker. */
848 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
849 async_run_on_cpu(cpu
, tlb_flush_range_by_mmuidx_async_1
,
850 RUN_ON_CPU_HOST_PTR(p
));
854 void tlb_flush_page_bits_by_mmuidx(CPUState
*cpu
, target_ulong addr
,
855 uint16_t idxmap
, unsigned bits
)
857 tlb_flush_range_by_mmuidx(cpu
, addr
, TARGET_PAGE_SIZE
, idxmap
, bits
);
860 void tlb_flush_range_by_mmuidx_all_cpus(CPUState
*src_cpu
,
861 target_ulong addr
, target_ulong len
,
862 uint16_t idxmap
, unsigned bits
)
868 * If all bits are significant, and len is small,
869 * this devolves to tlb_flush_page.
871 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
872 tlb_flush_page_by_mmuidx_all_cpus(src_cpu
, addr
, idxmap
);
875 /* If no page bits are significant, this devolves to tlb_flush. */
876 if (bits
< TARGET_PAGE_BITS
) {
877 tlb_flush_by_mmuidx_all_cpus(src_cpu
, idxmap
);
881 /* This should already be page aligned */
882 d
.addr
= addr
& TARGET_PAGE_MASK
;
887 /* Allocate a separate data block for each destination cpu. */
888 CPU_FOREACH(dst_cpu
) {
889 if (dst_cpu
!= src_cpu
) {
890 TLBFlushRangeData
*p
= g_memdup(&d
, sizeof(d
));
891 async_run_on_cpu(dst_cpu
,
892 tlb_flush_range_by_mmuidx_async_1
,
893 RUN_ON_CPU_HOST_PTR(p
));
897 tlb_flush_range_by_mmuidx_async_0(src_cpu
, d
);
900 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState
*src_cpu
,
902 uint16_t idxmap
, unsigned bits
)
904 tlb_flush_range_by_mmuidx_all_cpus(src_cpu
, addr
, TARGET_PAGE_SIZE
,
908 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
914 TLBFlushRangeData d
, *p
;
918 * If all bits are significant, and len is small,
919 * this devolves to tlb_flush_page.
921 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
922 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu
, addr
, idxmap
);
925 /* If no page bits are significant, this devolves to tlb_flush. */
926 if (bits
< TARGET_PAGE_BITS
) {
927 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, idxmap
);
931 /* This should already be page aligned */
932 d
.addr
= addr
& TARGET_PAGE_MASK
;
937 /* Allocate a separate data block for each destination cpu. */
938 CPU_FOREACH(dst_cpu
) {
939 if (dst_cpu
!= src_cpu
) {
940 p
= g_memdup(&d
, sizeof(d
));
941 async_run_on_cpu(dst_cpu
, tlb_flush_range_by_mmuidx_async_1
,
942 RUN_ON_CPU_HOST_PTR(p
));
946 p
= g_memdup(&d
, sizeof(d
));
947 async_safe_run_on_cpu(src_cpu
, tlb_flush_range_by_mmuidx_async_1
,
948 RUN_ON_CPU_HOST_PTR(p
));
951 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
956 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu
, addr
, TARGET_PAGE_SIZE
,
960 /* update the TLBs so that writes to code in the virtual page 'addr'
962 void tlb_protect_code(ram_addr_t ram_addr
)
964 cpu_physical_memory_test_and_clear_dirty(ram_addr
& TARGET_PAGE_MASK
,
969 /* update the TLB so that writes in physical page 'phys_addr' are no longer
970 tested for self modifying code */
971 void tlb_unprotect_code(ram_addr_t ram_addr
)
973 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
978 * Dirty write flag handling
980 * When the TCG code writes to a location it looks up the address in
981 * the TLB and uses that data to compute the final address. If any of
982 * the lower bits of the address are set then the slow path is forced.
983 * There are a number of reasons to do this but for normal RAM the
984 * most usual is detecting writes to code regions which may invalidate
987 * Other vCPUs might be reading their TLBs during guest execution, so we update
988 * te->addr_write with qatomic_set. We don't need to worry about this for
989 * oversized guests as MTTCG is disabled for them.
991 * Called with tlb_c.lock held.
993 static void tlb_reset_dirty_range_locked(CPUTLBEntry
*tlb_entry
,
994 uintptr_t start
, uintptr_t length
)
996 uintptr_t addr
= tlb_entry
->addr_write
;
998 if ((addr
& (TLB_INVALID_MASK
| TLB_MMIO
|
999 TLB_DISCARD_WRITE
| TLB_NOTDIRTY
)) == 0) {
1000 addr
&= TARGET_PAGE_MASK
;
1001 addr
+= tlb_entry
->addend
;
1002 if ((addr
- start
) < length
) {
1003 #if TARGET_LONG_BITS == 32
1004 uint32_t *ptr_write
= (uint32_t *)&tlb_entry
->addr_write
;
1005 ptr_write
+= HOST_BIG_ENDIAN
;
1006 qatomic_set(ptr_write
, *ptr_write
| TLB_NOTDIRTY
);
1007 #elif TCG_OVERSIZED_GUEST
1008 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1010 qatomic_set(&tlb_entry
->addr_write
,
1011 tlb_entry
->addr_write
| TLB_NOTDIRTY
);
1018 * Called with tlb_c.lock held.
1019 * Called only from the vCPU context, i.e. the TLB's owner thread.
1021 static inline void copy_tlb_helper_locked(CPUTLBEntry
*d
, const CPUTLBEntry
*s
)
1026 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1028 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1029 * thing actually updated is the target TLB entry ->addr_write flags.
1031 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
1038 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1039 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1041 unsigned int n
= tlb_n_entries(&env_tlb(env
)->f
[mmu_idx
]);
1043 for (i
= 0; i
< n
; i
++) {
1044 tlb_reset_dirty_range_locked(&env_tlb(env
)->f
[mmu_idx
].table
[i
],
1048 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
1049 tlb_reset_dirty_range_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[i
],
1053 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1056 /* Called with tlb_c.lock held */
1057 static inline void tlb_set_dirty1_locked(CPUTLBEntry
*tlb_entry
,
1060 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
1061 tlb_entry
->addr_write
= vaddr
;
1065 /* update the TLB corresponding to virtual page vaddr
1066 so that it is no longer dirty */
1067 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
1069 CPUArchState
*env
= cpu
->env_ptr
;
1072 assert_cpu_is_self(cpu
);
1074 vaddr
&= TARGET_PAGE_MASK
;
1075 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1076 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1077 tlb_set_dirty1_locked(tlb_entry(env
, mmu_idx
, vaddr
), vaddr
);
1080 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1082 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
1083 tlb_set_dirty1_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[k
], vaddr
);
1086 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1089 /* Our TLB does not support large pages, so remember the area covered by
1090 large pages and trigger a full TLB flush if these are invalidated. */
1091 static void tlb_add_large_page(CPUArchState
*env
, int mmu_idx
,
1092 target_ulong vaddr
, target_ulong size
)
1094 target_ulong lp_addr
= env_tlb(env
)->d
[mmu_idx
].large_page_addr
;
1095 target_ulong lp_mask
= ~(size
- 1);
1097 if (lp_addr
== (target_ulong
)-1) {
1098 /* No previous large page. */
1101 /* Extend the existing region to include the new page.
1102 This is a compromise between unnecessary flushes and
1103 the cost of maintaining a full variable size TLB. */
1104 lp_mask
&= env_tlb(env
)->d
[mmu_idx
].large_page_mask
;
1105 while (((lp_addr
^ vaddr
) & lp_mask
) != 0) {
1109 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= lp_addr
& lp_mask
;
1110 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= lp_mask
;
1114 * Add a new TLB entry. At most one entry for a given virtual address
1115 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1116 * supplied size is only used by tlb_flush_page.
1118 * Called from TCG-generated code, which is under an RCU read-side
1121 void tlb_set_page_full(CPUState
*cpu
, int mmu_idx
,
1122 target_ulong vaddr
, CPUTLBEntryFull
*full
)
1124 CPUArchState
*env
= cpu
->env_ptr
;
1125 CPUTLB
*tlb
= env_tlb(env
);
1126 CPUTLBDesc
*desc
= &tlb
->d
[mmu_idx
];
1127 MemoryRegionSection
*section
;
1129 target_ulong address
;
1130 target_ulong write_address
;
1132 CPUTLBEntry
*te
, tn
;
1133 hwaddr iotlb
, xlat
, sz
, paddr_page
;
1134 target_ulong vaddr_page
;
1135 int asidx
, wp_flags
, prot
;
1136 bool is_ram
, is_romd
;
1138 assert_cpu_is_self(cpu
);
1140 if (full
->lg_page_size
<= TARGET_PAGE_BITS
) {
1141 sz
= TARGET_PAGE_SIZE
;
1143 sz
= (hwaddr
)1 << full
->lg_page_size
;
1144 tlb_add_large_page(env
, mmu_idx
, vaddr
, sz
);
1146 vaddr_page
= vaddr
& TARGET_PAGE_MASK
;
1147 paddr_page
= full
->phys_addr
& TARGET_PAGE_MASK
;
1150 asidx
= cpu_asidx_from_attrs(cpu
, full
->attrs
);
1151 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr_page
,
1152 &xlat
, &sz
, full
->attrs
, &prot
);
1153 assert(sz
>= TARGET_PAGE_SIZE
);
1155 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" HWADDR_FMT_plx
1156 " prot=%x idx=%d\n",
1157 vaddr
, full
->phys_addr
, prot
, mmu_idx
);
1159 address
= vaddr_page
;
1160 if (full
->lg_page_size
< TARGET_PAGE_BITS
) {
1161 /* Repeat the MMU check and TLB fill on every access. */
1162 address
|= TLB_INVALID_MASK
;
1164 if (full
->attrs
.byte_swap
) {
1165 address
|= TLB_BSWAP
;
1168 is_ram
= memory_region_is_ram(section
->mr
);
1169 is_romd
= memory_region_is_romd(section
->mr
);
1171 if (is_ram
|| is_romd
) {
1172 /* RAM and ROMD both have associated host memory. */
1173 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
1175 /* I/O does not; force the host address to NULL. */
1179 write_address
= address
;
1181 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1183 * Computing is_clean is expensive; avoid all that unless
1184 * the page is actually writable.
1186 if (prot
& PAGE_WRITE
) {
1187 if (section
->readonly
) {
1188 write_address
|= TLB_DISCARD_WRITE
;
1189 } else if (cpu_physical_memory_is_clean(iotlb
)) {
1190 write_address
|= TLB_NOTDIRTY
;
1195 iotlb
= memory_region_section_get_iotlb(cpu
, section
) + xlat
;
1197 * Writes to romd devices must go through MMIO to enable write.
1198 * Reads to romd devices go through the ram_ptr found above,
1199 * but of course reads to I/O must go through MMIO.
1201 write_address
|= TLB_MMIO
;
1203 address
= write_address
;
1207 wp_flags
= cpu_watchpoint_address_matches(cpu
, vaddr_page
,
1210 index
= tlb_index(env
, mmu_idx
, vaddr_page
);
1211 te
= tlb_entry(env
, mmu_idx
, vaddr_page
);
1214 * Hold the TLB lock for the rest of the function. We could acquire/release
1215 * the lock several times in the function, but it is faster to amortize the
1216 * acquisition cost by acquiring it just once. Note that this leads to
1217 * a longer critical section, but this is not a concern since the TLB lock
1218 * is unlikely to be contended.
1220 qemu_spin_lock(&tlb
->c
.lock
);
1222 /* Note that the tlb is no longer clean. */
1223 tlb
->c
.dirty
|= 1 << mmu_idx
;
1225 /* Make sure there's no cached translation for the new page. */
1226 tlb_flush_vtlb_page_locked(env
, mmu_idx
, vaddr_page
);
1229 * Only evict the old entry to the victim tlb if it's for a
1230 * different page; otherwise just overwrite the stale data.
1232 if (!tlb_hit_page_anyprot(te
, vaddr_page
) && !tlb_entry_is_empty(te
)) {
1233 unsigned vidx
= desc
->vindex
++ % CPU_VTLB_SIZE
;
1234 CPUTLBEntry
*tv
= &desc
->vtable
[vidx
];
1236 /* Evict the old entry into the victim tlb. */
1237 copy_tlb_helper_locked(tv
, te
);
1238 desc
->vfulltlb
[vidx
] = desc
->fulltlb
[index
];
1239 tlb_n_used_entries_dec(env
, mmu_idx
);
1242 /* refill the tlb */
1244 * At this point iotlb contains a physical section number in the lower
1245 * TARGET_PAGE_BITS, and either
1246 * + the ram_addr_t of the page base of the target RAM (RAM)
1247 * + the offset within section->mr of the page base (I/O, ROMD)
1248 * We subtract the vaddr_page (which is page aligned and thus won't
1249 * disturb the low bits) to give an offset which can be added to the
1250 * (non-page-aligned) vaddr of the eventual memory access to get
1251 * the MemoryRegion offset for the access. Note that the vaddr we
1252 * subtract here is that of the page base, and not the same as the
1253 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1255 desc
->fulltlb
[index
] = *full
;
1256 desc
->fulltlb
[index
].xlat_section
= iotlb
- vaddr_page
;
1257 desc
->fulltlb
[index
].phys_addr
= paddr_page
;
1259 /* Now calculate the new entry */
1260 tn
.addend
= addend
- vaddr_page
;
1261 if (prot
& PAGE_READ
) {
1262 tn
.addr_read
= address
;
1263 if (wp_flags
& BP_MEM_READ
) {
1264 tn
.addr_read
|= TLB_WATCHPOINT
;
1270 if (prot
& PAGE_EXEC
) {
1271 tn
.addr_code
= address
;
1277 if (prot
& PAGE_WRITE
) {
1278 tn
.addr_write
= write_address
;
1279 if (prot
& PAGE_WRITE_INV
) {
1280 tn
.addr_write
|= TLB_INVALID_MASK
;
1282 if (wp_flags
& BP_MEM_WRITE
) {
1283 tn
.addr_write
|= TLB_WATCHPOINT
;
1287 copy_tlb_helper_locked(te
, &tn
);
1288 tlb_n_used_entries_inc(env
, mmu_idx
);
1289 qemu_spin_unlock(&tlb
->c
.lock
);
1292 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
1293 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
1294 int mmu_idx
, target_ulong size
)
1296 CPUTLBEntryFull full
= {
1300 .lg_page_size
= ctz64(size
)
1303 assert(is_power_of_2(size
));
1304 tlb_set_page_full(cpu
, mmu_idx
, vaddr
, &full
);
1307 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
1308 hwaddr paddr
, int prot
,
1309 int mmu_idx
, target_ulong size
)
1311 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
1312 prot
, mmu_idx
, size
);
1316 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1317 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1318 * be discarded and looked up again (e.g. via tlb_entry()).
1320 static void tlb_fill(CPUState
*cpu
, target_ulong addr
, int size
,
1321 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1326 * This is not a probe, so only valid return is success; failure
1327 * should result in exception + longjmp to the cpu loop.
1329 ok
= cpu
->cc
->tcg_ops
->tlb_fill(cpu
, addr
, size
,
1330 access_type
, mmu_idx
, false, retaddr
);
1334 static inline void cpu_unaligned_access(CPUState
*cpu
, vaddr addr
,
1335 MMUAccessType access_type
,
1336 int mmu_idx
, uintptr_t retaddr
)
1338 cpu
->cc
->tcg_ops
->do_unaligned_access(cpu
, addr
, access_type
,
1342 static inline void cpu_transaction_failed(CPUState
*cpu
, hwaddr physaddr
,
1343 vaddr addr
, unsigned size
,
1344 MMUAccessType access_type
,
1345 int mmu_idx
, MemTxAttrs attrs
,
1346 MemTxResult response
,
1349 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1351 if (!cpu
->ignore_memory_transaction_failures
&&
1352 cc
->tcg_ops
->do_transaction_failed
) {
1353 cc
->tcg_ops
->do_transaction_failed(cpu
, physaddr
, addr
, size
,
1354 access_type
, mmu_idx
, attrs
,
1359 static uint64_t io_readx(CPUArchState
*env
, CPUTLBEntryFull
*full
,
1360 int mmu_idx
, target_ulong addr
, uintptr_t retaddr
,
1361 MMUAccessType access_type
, MemOp op
)
1363 CPUState
*cpu
= env_cpu(env
);
1365 MemoryRegionSection
*section
;
1370 section
= iotlb_to_section(cpu
, full
->xlat_section
, full
->attrs
);
1372 mr_offset
= (full
->xlat_section
& TARGET_PAGE_MASK
) + addr
;
1373 cpu
->mem_io_pc
= retaddr
;
1374 if (!cpu
->can_do_io
) {
1375 cpu_io_recompile(cpu
, retaddr
);
1379 QEMU_IOTHREAD_LOCK_GUARD();
1380 r
= memory_region_dispatch_read(mr
, mr_offset
, &val
, op
, full
->attrs
);
1383 if (r
!= MEMTX_OK
) {
1384 hwaddr physaddr
= mr_offset
+
1385 section
->offset_within_address_space
-
1386 section
->offset_within_region
;
1388 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
), access_type
,
1389 mmu_idx
, full
->attrs
, r
, retaddr
);
1395 * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
1396 * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1397 * because of the side effect of io_writex changing memory layout.
1399 static void save_iotlb_data(CPUState
*cs
, MemoryRegionSection
*section
,
1402 #ifdef CONFIG_PLUGIN
1403 SavedIOTLB
*saved
= &cs
->saved_iotlb
;
1404 saved
->section
= section
;
1405 saved
->mr_offset
= mr_offset
;
1409 static void io_writex(CPUArchState
*env
, CPUTLBEntryFull
*full
,
1410 int mmu_idx
, uint64_t val
, target_ulong addr
,
1411 uintptr_t retaddr
, MemOp op
)
1413 CPUState
*cpu
= env_cpu(env
);
1415 MemoryRegionSection
*section
;
1419 section
= iotlb_to_section(cpu
, full
->xlat_section
, full
->attrs
);
1421 mr_offset
= (full
->xlat_section
& TARGET_PAGE_MASK
) + addr
;
1422 if (!cpu
->can_do_io
) {
1423 cpu_io_recompile(cpu
, retaddr
);
1425 cpu
->mem_io_pc
= retaddr
;
1428 * The memory_region_dispatch may trigger a flush/resize
1429 * so for plugins we save the iotlb_data just in case.
1431 save_iotlb_data(cpu
, section
, mr_offset
);
1434 QEMU_IOTHREAD_LOCK_GUARD();
1435 r
= memory_region_dispatch_write(mr
, mr_offset
, val
, op
, full
->attrs
);
1438 if (r
!= MEMTX_OK
) {
1439 hwaddr physaddr
= mr_offset
+
1440 section
->offset_within_address_space
-
1441 section
->offset_within_region
;
1443 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
),
1444 MMU_DATA_STORE
, mmu_idx
, full
->attrs
, r
,
1449 /* Return true if ADDR is present in the victim tlb, and has been copied
1450 back to the main tlb. */
1451 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
1452 MMUAccessType access_type
, target_ulong page
)
1456 assert_cpu_is_self(env_cpu(env
));
1457 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
1458 CPUTLBEntry
*vtlb
= &env_tlb(env
)->d
[mmu_idx
].vtable
[vidx
];
1459 target_ulong cmp
= tlb_read_idx(vtlb
, access_type
);
1462 /* Found entry in victim tlb, swap tlb and iotlb. */
1463 CPUTLBEntry tmptlb
, *tlb
= &env_tlb(env
)->f
[mmu_idx
].table
[index
];
1465 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1466 copy_tlb_helper_locked(&tmptlb
, tlb
);
1467 copy_tlb_helper_locked(tlb
, vtlb
);
1468 copy_tlb_helper_locked(vtlb
, &tmptlb
);
1469 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1471 CPUTLBEntryFull
*f1
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1472 CPUTLBEntryFull
*f2
= &env_tlb(env
)->d
[mmu_idx
].vfulltlb
[vidx
];
1473 CPUTLBEntryFull tmpf
;
1474 tmpf
= *f1
; *f1
= *f2
; *f2
= tmpf
;
1481 static void notdirty_write(CPUState
*cpu
, vaddr mem_vaddr
, unsigned size
,
1482 CPUTLBEntryFull
*full
, uintptr_t retaddr
)
1484 ram_addr_t ram_addr
= mem_vaddr
+ full
->xlat_section
;
1486 trace_memory_notdirty_write_access(mem_vaddr
, ram_addr
, size
);
1488 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1489 tb_invalidate_phys_range_fast(ram_addr
, size
, retaddr
);
1493 * Set both VGA and migration bits for simplicity and to remove
1494 * the notdirty callback faster.
1496 cpu_physical_memory_set_dirty_range(ram_addr
, size
, DIRTY_CLIENTS_NOCODE
);
1498 /* We remove the notdirty callback only if the code has been flushed. */
1499 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1500 trace_memory_notdirty_set_dirty(mem_vaddr
);
1501 tlb_set_dirty(cpu
, mem_vaddr
);
1505 static int probe_access_internal(CPUArchState
*env
, target_ulong addr
,
1506 int fault_size
, MMUAccessType access_type
,
1507 int mmu_idx
, bool nonfault
,
1508 void **phost
, CPUTLBEntryFull
**pfull
,
1511 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1512 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1513 target_ulong tlb_addr
= tlb_read_idx(entry
, access_type
);
1514 target_ulong page_addr
= addr
& TARGET_PAGE_MASK
;
1515 int flags
= TLB_FLAGS_MASK
;
1517 if (!tlb_hit_page(tlb_addr
, page_addr
)) {
1518 if (!victim_tlb_hit(env
, mmu_idx
, index
, access_type
, page_addr
)) {
1519 CPUState
*cs
= env_cpu(env
);
1521 if (!cs
->cc
->tcg_ops
->tlb_fill(cs
, addr
, fault_size
, access_type
,
1522 mmu_idx
, nonfault
, retaddr
)) {
1523 /* Non-faulting page table read failed. */
1526 return TLB_INVALID_MASK
;
1529 /* TLB resize via tlb_fill may have moved the entry. */
1530 index
= tlb_index(env
, mmu_idx
, addr
);
1531 entry
= tlb_entry(env
, mmu_idx
, addr
);
1534 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1535 * to force the next access through tlb_fill. We've just
1536 * called tlb_fill, so we know that this entry *is* valid.
1538 flags
&= ~TLB_INVALID_MASK
;
1540 tlb_addr
= tlb_read_idx(entry
, access_type
);
1544 *pfull
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1546 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1547 if (unlikely(flags
& ~(TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1552 /* Everything else is RAM. */
1553 *phost
= (void *)((uintptr_t)addr
+ entry
->addend
);
1557 int probe_access_full(CPUArchState
*env
, target_ulong addr
, int size
,
1558 MMUAccessType access_type
, int mmu_idx
,
1559 bool nonfault
, void **phost
, CPUTLBEntryFull
**pfull
,
1562 int flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1563 nonfault
, phost
, pfull
, retaddr
);
1565 /* Handle clean RAM pages. */
1566 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1567 notdirty_write(env_cpu(env
), addr
, 1, *pfull
, retaddr
);
1568 flags
&= ~TLB_NOTDIRTY
;
1574 int probe_access_flags(CPUArchState
*env
, target_ulong addr
, int size
,
1575 MMUAccessType access_type
, int mmu_idx
,
1576 bool nonfault
, void **phost
, uintptr_t retaddr
)
1578 CPUTLBEntryFull
*full
;
1581 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1583 flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1584 nonfault
, phost
, &full
, retaddr
);
1586 /* Handle clean RAM pages. */
1587 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1588 notdirty_write(env_cpu(env
), addr
, 1, full
, retaddr
);
1589 flags
&= ~TLB_NOTDIRTY
;
1595 void *probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
1596 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1598 CPUTLBEntryFull
*full
;
1602 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1604 flags
= probe_access_internal(env
, addr
, size
, access_type
, mmu_idx
,
1605 false, &host
, &full
, retaddr
);
1607 /* Per the interface, size == 0 merely faults the access. */
1612 if (unlikely(flags
& (TLB_NOTDIRTY
| TLB_WATCHPOINT
))) {
1613 /* Handle watchpoints. */
1614 if (flags
& TLB_WATCHPOINT
) {
1615 int wp_access
= (access_type
== MMU_DATA_STORE
1616 ? BP_MEM_WRITE
: BP_MEM_READ
);
1617 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1618 full
->attrs
, wp_access
, retaddr
);
1621 /* Handle clean RAM pages. */
1622 if (flags
& TLB_NOTDIRTY
) {
1623 notdirty_write(env_cpu(env
), addr
, 1, full
, retaddr
);
1630 void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
1631 MMUAccessType access_type
, int mmu_idx
)
1633 CPUTLBEntryFull
*full
;
1637 flags
= probe_access_internal(env
, addr
, 0, access_type
,
1638 mmu_idx
, true, &host
, &full
, 0);
1640 /* No combination of flags are expected by the caller. */
1641 return flags
? NULL
: host
;
1645 * Return a ram_addr_t for the virtual address for execution.
1647 * Return -1 if we can't translate and execute from an entire page
1648 * of RAM. This will force us to execute by loading and translating
1649 * one insn at a time, without caching.
1651 * NOTE: This function will trigger an exception if the page is
1654 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, target_ulong addr
,
1657 CPUTLBEntryFull
*full
;
1660 (void)probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
,
1661 cpu_mmu_index(env
, true), false, &p
, &full
, 0);
1666 if (full
->lg_page_size
< TARGET_PAGE_BITS
) {
1673 return qemu_ram_addr_from_host_nofail(p
);
1676 /* Load/store with atomicity primitives. */
1677 #include "ldst_atomicity.c.inc"
1679 #ifdef CONFIG_PLUGIN
1681 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1682 * This should be a hot path as we will have just looked this path up
1683 * in the softmmu lookup code (or helper). We don't handle re-fills or
1684 * checking the victim table. This is purely informational.
1686 * This almost never fails as the memory access being instrumented
1687 * should have just filled the TLB. The one corner case is io_writex
1688 * which can cause TLB flushes and potential resizing of the TLBs
1689 * losing the information we need. In those cases we need to recover
1690 * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1691 * from the same thread (which a mem callback will be) this is safe.
1694 bool tlb_plugin_lookup(CPUState
*cpu
, target_ulong addr
, int mmu_idx
,
1695 bool is_store
, struct qemu_plugin_hwaddr
*data
)
1697 CPUArchState
*env
= cpu
->env_ptr
;
1698 CPUTLBEntry
*tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1699 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1700 target_ulong tlb_addr
= is_store
? tlb_addr_write(tlbe
) : tlbe
->addr_read
;
1702 if (likely(tlb_hit(tlb_addr
, addr
))) {
1703 /* We must have an iotlb entry for MMIO */
1704 if (tlb_addr
& TLB_MMIO
) {
1705 CPUTLBEntryFull
*full
;
1706 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1708 data
->v
.io
.section
=
1709 iotlb_to_section(cpu
, full
->xlat_section
, full
->attrs
);
1710 data
->v
.io
.offset
= (full
->xlat_section
& TARGET_PAGE_MASK
) + addr
;
1712 data
->is_io
= false;
1713 data
->v
.ram
.hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1717 SavedIOTLB
*saved
= &cpu
->saved_iotlb
;
1719 data
->v
.io
.section
= saved
->section
;
1720 data
->v
.io
.offset
= saved
->mr_offset
;
1728 * Probe for a load/store operation.
1729 * Return the host address and into @flags.
1732 typedef struct MMULookupPageData
{
1733 CPUTLBEntryFull
*full
;
1738 } MMULookupPageData
;
1740 typedef struct MMULookupLocals
{
1741 MMULookupPageData page
[2];
1747 * mmu_lookup1: translate one page
1749 * @data: lookup parameters
1750 * @mmu_idx: virtual address context
1751 * @access_type: load/store/code
1752 * @ra: return address into tcg generated code, or 0
1754 * Resolve the translation for the one page at @data.addr, filling in
1755 * the rest of @data with the results. If the translation fails,
1756 * tlb_fill will longjmp out. Return true if the softmmu tlb for
1757 * @mmu_idx may have resized.
1759 static bool mmu_lookup1(CPUArchState
*env
, MMULookupPageData
*data
,
1760 int mmu_idx
, MMUAccessType access_type
, uintptr_t ra
)
1762 target_ulong addr
= data
->addr
;
1763 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1764 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1765 target_ulong tlb_addr
= tlb_read_idx(entry
, access_type
);
1766 bool maybe_resized
= false;
1768 /* If the TLB entry is for a different page, reload and try again. */
1769 if (!tlb_hit(tlb_addr
, addr
)) {
1770 if (!victim_tlb_hit(env
, mmu_idx
, index
, access_type
,
1771 addr
& TARGET_PAGE_MASK
)) {
1772 tlb_fill(env_cpu(env
), addr
, data
->size
, access_type
, mmu_idx
, ra
);
1773 maybe_resized
= true;
1774 index
= tlb_index(env
, mmu_idx
, addr
);
1775 entry
= tlb_entry(env
, mmu_idx
, addr
);
1777 tlb_addr
= tlb_read_idx(entry
, access_type
) & ~TLB_INVALID_MASK
;
1780 data
->flags
= tlb_addr
& TLB_FLAGS_MASK
;
1781 data
->full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1782 /* Compute haddr speculatively; depending on flags it might be invalid. */
1783 data
->haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1785 return maybe_resized
;
1789 * mmu_watch_or_dirty
1791 * @data: lookup parameters
1792 * @access_type: load/store/code
1793 * @ra: return address into tcg generated code, or 0
1795 * Trigger watchpoints for @data.addr:@data.size;
1796 * record writes to protected clean pages.
1798 static void mmu_watch_or_dirty(CPUArchState
*env
, MMULookupPageData
*data
,
1799 MMUAccessType access_type
, uintptr_t ra
)
1801 CPUTLBEntryFull
*full
= data
->full
;
1802 target_ulong addr
= data
->addr
;
1803 int flags
= data
->flags
;
1804 int size
= data
->size
;
1806 /* On watchpoint hit, this will longjmp out. */
1807 if (flags
& TLB_WATCHPOINT
) {
1808 int wp
= access_type
== MMU_DATA_STORE
? BP_MEM_WRITE
: BP_MEM_READ
;
1809 cpu_check_watchpoint(env_cpu(env
), addr
, size
, full
->attrs
, wp
, ra
);
1810 flags
&= ~TLB_WATCHPOINT
;
1813 /* Note that notdirty is only set for writes. */
1814 if (flags
& TLB_NOTDIRTY
) {
1815 notdirty_write(env_cpu(env
), addr
, size
, full
, ra
);
1816 flags
&= ~TLB_NOTDIRTY
;
1818 data
->flags
= flags
;
1822 * mmu_lookup: translate page(s)
1824 * @addr: virtual address
1825 * @oi: combined mmu_idx and MemOp
1826 * @ra: return address into tcg generated code, or 0
1827 * @access_type: load/store/code
1830 * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1831 * bytes. Return true if the lookup crosses a page boundary.
1833 static bool mmu_lookup(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
,
1834 uintptr_t ra
, MMUAccessType type
, MMULookupLocals
*l
)
1840 l
->memop
= get_memop(oi
);
1841 l
->mmu_idx
= get_mmuidx(oi
);
1843 tcg_debug_assert(l
->mmu_idx
< NB_MMU_MODES
);
1845 /* Handle CPU specific unaligned behaviour */
1846 a_bits
= get_alignment_bits(l
->memop
);
1847 if (addr
& ((1 << a_bits
) - 1)) {
1848 cpu_unaligned_access(env_cpu(env
), addr
, type
, l
->mmu_idx
, ra
);
1851 l
->page
[0].addr
= addr
;
1852 l
->page
[0].size
= memop_size(l
->memop
);
1853 l
->page
[1].addr
= (addr
+ l
->page
[0].size
- 1) & TARGET_PAGE_MASK
;
1854 l
->page
[1].size
= 0;
1855 crosspage
= (addr
^ l
->page
[1].addr
) & TARGET_PAGE_MASK
;
1857 if (likely(!crosspage
)) {
1858 mmu_lookup1(env
, &l
->page
[0], l
->mmu_idx
, type
, ra
);
1860 flags
= l
->page
[0].flags
;
1861 if (unlikely(flags
& (TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1862 mmu_watch_or_dirty(env
, &l
->page
[0], type
, ra
);
1864 if (unlikely(flags
& TLB_BSWAP
)) {
1865 l
->memop
^= MO_BSWAP
;
1868 /* Finish compute of page crossing. */
1869 int size0
= l
->page
[1].addr
- addr
;
1870 l
->page
[1].size
= l
->page
[0].size
- size0
;
1871 l
->page
[0].size
= size0
;
1874 * Lookup both pages, recognizing exceptions from either. If the
1875 * second lookup potentially resized, refresh first CPUTLBEntryFull.
1877 mmu_lookup1(env
, &l
->page
[0], l
->mmu_idx
, type
, ra
);
1878 if (mmu_lookup1(env
, &l
->page
[1], l
->mmu_idx
, type
, ra
)) {
1879 uintptr_t index
= tlb_index(env
, l
->mmu_idx
, addr
);
1880 l
->page
[0].full
= &env_tlb(env
)->d
[l
->mmu_idx
].fulltlb
[index
];
1883 flags
= l
->page
[0].flags
| l
->page
[1].flags
;
1884 if (unlikely(flags
& (TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1885 mmu_watch_or_dirty(env
, &l
->page
[0], type
, ra
);
1886 mmu_watch_or_dirty(env
, &l
->page
[1], type
, ra
);
1890 * Since target/sparc is the only user of TLB_BSWAP, and all
1891 * Sparc accesses are aligned, any treatment across two pages
1892 * would be arbitrary. Refuse it until there's a use.
1894 tcg_debug_assert((flags
& TLB_BSWAP
) == 0);
1901 * Probe for an atomic operation. Do not allow unaligned operations,
1902 * or io operations to proceed. Return the host address.
1904 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1905 MemOpIdx oi
, int size
, uintptr_t retaddr
)
1907 uintptr_t mmu_idx
= get_mmuidx(oi
);
1908 MemOp mop
= get_memop(oi
);
1909 int a_bits
= get_alignment_bits(mop
);
1912 target_ulong tlb_addr
;
1914 CPUTLBEntryFull
*full
;
1916 tcg_debug_assert(mmu_idx
< NB_MMU_MODES
);
1918 /* Adjust the given return address. */
1919 retaddr
-= GETPC_ADJ
;
1921 /* Enforce guest required alignment. */
1922 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
1923 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1924 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1928 /* Enforce qemu required alignment. */
1929 if (unlikely(addr
& (size
- 1))) {
1930 /* We get here if guest alignment was not requested,
1931 or was not enforced by cpu_unaligned_access above.
1932 We might widen the access and emulate, but for now
1933 mark an exception and exit the cpu loop. */
1934 goto stop_the_world
;
1937 index
= tlb_index(env
, mmu_idx
, addr
);
1938 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1940 /* Check TLB entry and enforce page permissions. */
1941 tlb_addr
= tlb_addr_write(tlbe
);
1942 if (!tlb_hit(tlb_addr
, addr
)) {
1943 if (!victim_tlb_hit(env
, mmu_idx
, index
, MMU_DATA_STORE
,
1944 addr
& TARGET_PAGE_MASK
)) {
1945 tlb_fill(env_cpu(env
), addr
, size
,
1946 MMU_DATA_STORE
, mmu_idx
, retaddr
);
1947 index
= tlb_index(env
, mmu_idx
, addr
);
1948 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1950 tlb_addr
= tlb_addr_write(tlbe
) & ~TLB_INVALID_MASK
;
1954 * Let the guest notice RMW on a write-only page.
1955 * We have just verified that the page is writable.
1956 * Subpage lookups may have left TLB_INVALID_MASK set,
1957 * but addr_read will only be -1 if PAGE_READ was unset.
1959 if (unlikely(tlbe
->addr_read
== -1)) {
1960 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_LOAD
, mmu_idx
, retaddr
);
1962 * Since we don't support reads and writes to different
1963 * addresses, and we do have the proper page loaded for
1964 * write, this shouldn't ever return. But just in case,
1965 * handle via stop-the-world.
1967 goto stop_the_world
;
1969 /* Collect TLB_WATCHPOINT for read. */
1970 tlb_addr
|= tlbe
->addr_read
;
1972 /* Notice an IO access or a needs-MMU-lookup access */
1973 if (unlikely(tlb_addr
& (TLB_MMIO
| TLB_DISCARD_WRITE
))) {
1974 /* There's really nothing that can be done to
1975 support this apart from stop-the-world. */
1976 goto stop_the_world
;
1979 hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1980 full
= &env_tlb(env
)->d
[mmu_idx
].fulltlb
[index
];
1982 if (unlikely(tlb_addr
& TLB_NOTDIRTY
)) {
1983 notdirty_write(env_cpu(env
), addr
, size
, full
, retaddr
);
1986 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
1987 cpu_check_watchpoint(env_cpu(env
), addr
, size
, full
->attrs
,
1988 BP_MEM_READ
| BP_MEM_WRITE
, retaddr
);
1994 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
2000 * We support two different access types. SOFTMMU_CODE_ACCESS is
2001 * specifically for reading instructions from system memory. It is
2002 * called by the translation loop and in some helpers where the code
2003 * is disassembled. It shouldn't be called directly by guest code.
2005 * For the benefit of TCG generated code, we want to avoid the
2006 * complication of ABI-specific return type promotion and always
2007 * return a value extended to the register size of the host. This is
2008 * tcg_target_long, except in the case of a 32-bit host and 64-bit
2009 * data, and for that we always have uint64_t.
2011 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2017 * @p: translation parameters
2018 * @ret_be: accumulated data
2019 * @mmu_idx: virtual address context
2020 * @ra: return address into tcg generated code, or 0
2022 * Load @p->size bytes from @p->addr, which is memory-mapped i/o.
2023 * The bytes are concatenated in big-endian order with @ret_be.
2025 static uint64_t do_ld_mmio_beN(CPUArchState
*env
, MMULookupPageData
*p
,
2026 uint64_t ret_be
, int mmu_idx
,
2027 MMUAccessType type
, uintptr_t ra
)
2029 CPUTLBEntryFull
*full
= p
->full
;
2030 target_ulong addr
= p
->addr
;
2031 int i
, size
= p
->size
;
2033 QEMU_IOTHREAD_LOCK_GUARD();
2034 for (i
= 0; i
< size
; i
++) {
2035 uint8_t x
= io_readx(env
, full
, mmu_idx
, addr
+ i
, ra
, type
, MO_UB
);
2036 ret_be
= (ret_be
<< 8) | x
;
2043 * @p: translation parameters
2044 * @ret_be: accumulated data
2046 * Load @p->size bytes from @p->haddr, which is RAM.
2047 * The bytes to concatenated in big-endian order with @ret_be.
2049 static uint64_t do_ld_bytes_beN(MMULookupPageData
*p
, uint64_t ret_be
)
2051 uint8_t *haddr
= p
->haddr
;
2052 int i
, size
= p
->size
;
2054 for (i
= 0; i
< size
; i
++) {
2055 ret_be
= (ret_be
<< 8) | haddr
[i
];
2062 * @p: translation parameters
2063 * @ret_be: accumulated data
2065 * As do_ld_bytes_beN, but atomically on each aligned part.
2067 static uint64_t do_ld_parts_beN(MMULookupPageData
*p
, uint64_t ret_be
)
2069 void *haddr
= p
->haddr
;
2077 * Find minimum of alignment and size.
2078 * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2079 * would have only checked the low bits of addr|size once at the start,
2080 * but is just as easy.
2082 switch (((uintptr_t)haddr
| size
) & 7) {
2084 x
= cpu_to_be32(load_atomic4(haddr
));
2085 ret_be
= (ret_be
<< 32) | x
;
2090 x
= cpu_to_be16(load_atomic2(haddr
));
2091 ret_be
= (ret_be
<< 16) | x
;
2095 x
= *(uint8_t *)haddr
;
2096 ret_be
= (ret_be
<< 8) | x
;
2100 g_assert_not_reached();
2104 } while (size
!= 0);
2110 * @p: translation parameters
2111 * @ret_be: accumulated data
2113 * As do_ld_bytes_beN, but with one atomic load.
2114 * Four aligned bytes are guaranteed to cover the load.
2116 static uint64_t do_ld_whole_be4(MMULookupPageData
*p
, uint64_t ret_be
)
2118 int o
= p
->addr
& 3;
2119 uint32_t x
= load_atomic4(p
->haddr
- o
);
2123 x
>>= (4 - p
->size
) * 8;
2124 return (ret_be
<< (p
->size
* 8)) | x
;
2129 * @p: translation parameters
2130 * @ret_be: accumulated data
2132 * As do_ld_bytes_beN, but with one atomic load.
2133 * Eight aligned bytes are guaranteed to cover the load.
2135 static uint64_t do_ld_whole_be8(CPUArchState
*env
, uintptr_t ra
,
2136 MMULookupPageData
*p
, uint64_t ret_be
)
2138 int o
= p
->addr
& 7;
2139 uint64_t x
= load_atomic8_or_exit(env
, ra
, p
->haddr
- o
);
2143 x
>>= (8 - p
->size
) * 8;
2144 return (ret_be
<< (p
->size
* 8)) | x
;
2149 * @p: translation parameters
2150 * @ret_be: accumulated data
2152 * As do_ld_bytes_beN, but with one atomic load.
2153 * 16 aligned bytes are guaranteed to cover the load.
2155 static Int128
do_ld_whole_be16(CPUArchState
*env
, uintptr_t ra
,
2156 MMULookupPageData
*p
, uint64_t ret_be
)
2158 int o
= p
->addr
& 15;
2159 Int128 x
, y
= load_atomic16_or_exit(env
, ra
, p
->haddr
- o
);
2162 if (!HOST_BIG_ENDIAN
) {
2165 y
= int128_lshift(y
, o
* 8);
2166 y
= int128_urshift(y
, (16 - size
) * 8);
2167 x
= int128_make64(ret_be
);
2168 x
= int128_lshift(x
, size
* 8);
2169 return int128_or(x
, y
);
2173 * Wrapper for the above.
2175 static uint64_t do_ld_beN(CPUArchState
*env
, MMULookupPageData
*p
,
2176 uint64_t ret_be
, int mmu_idx
, MMUAccessType type
,
2177 MemOp mop
, uintptr_t ra
)
2180 unsigned tmp
, half_size
;
2182 if (unlikely(p
->flags
& TLB_MMIO
)) {
2183 return do_ld_mmio_beN(env
, p
, ret_be
, mmu_idx
, type
, ra
);
2187 * It is a given that we cross a page and therefore there is no
2188 * atomicity for the load as a whole, but subobjects may need attention.
2190 atom
= mop
& MO_ATOM_MASK
;
2192 case MO_ATOM_SUBALIGN
:
2193 return do_ld_parts_beN(p
, ret_be
);
2195 case MO_ATOM_IFALIGN_PAIR
:
2196 case MO_ATOM_WITHIN16_PAIR
:
2197 tmp
= mop
& MO_SIZE
;
2198 tmp
= tmp
? tmp
- 1 : 0;
2199 half_size
= 1 << tmp
;
2200 if (atom
== MO_ATOM_IFALIGN_PAIR
2201 ? p
->size
== half_size
2202 : p
->size
>= half_size
) {
2203 if (!HAVE_al8_fast
&& p
->size
< 4) {
2204 return do_ld_whole_be4(p
, ret_be
);
2206 return do_ld_whole_be8(env
, ra
, p
, ret_be
);
2211 case MO_ATOM_IFALIGN
:
2212 case MO_ATOM_WITHIN16
:
2214 return do_ld_bytes_beN(p
, ret_be
);
2217 g_assert_not_reached();
2222 * Wrapper for the above, for 8 < size < 16.
2224 static Int128
do_ld16_beN(CPUArchState
*env
, MMULookupPageData
*p
,
2225 uint64_t a
, int mmu_idx
, MemOp mop
, uintptr_t ra
)
2231 if (unlikely(p
->flags
& TLB_MMIO
)) {
2233 a
= do_ld_mmio_beN(env
, p
, a
, mmu_idx
, MMU_DATA_LOAD
, ra
);
2236 b
= do_ld_mmio_beN(env
, p
, 0, mmu_idx
, MMU_DATA_LOAD
, ra
);
2237 return int128_make128(b
, a
);
2241 * It is a given that we cross a page and therefore there is no
2242 * atomicity for the load as a whole, but subobjects may need attention.
2244 atom
= mop
& MO_ATOM_MASK
;
2246 case MO_ATOM_SUBALIGN
:
2248 a
= do_ld_parts_beN(p
, a
);
2249 p
->haddr
+= size
- 8;
2251 b
= do_ld_parts_beN(p
, 0);
2254 case MO_ATOM_WITHIN16_PAIR
:
2255 /* Since size > 8, this is the half that must be atomic. */
2256 return do_ld_whole_be16(env
, ra
, p
, a
);
2258 case MO_ATOM_IFALIGN_PAIR
:
2260 * Since size > 8, both halves are misaligned,
2261 * and so neither is atomic.
2263 case MO_ATOM_IFALIGN
:
2264 case MO_ATOM_WITHIN16
:
2267 a
= do_ld_bytes_beN(p
, a
);
2268 b
= ldq_be_p(p
->haddr
+ size
- 8);
2272 g_assert_not_reached();
2275 return int128_make128(b
, a
);
2278 static uint8_t do_ld_1(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2279 MMUAccessType type
, uintptr_t ra
)
2281 if (unlikely(p
->flags
& TLB_MMIO
)) {
2282 return io_readx(env
, p
->full
, mmu_idx
, p
->addr
, ra
, type
, MO_UB
);
2284 return *(uint8_t *)p
->haddr
;
2288 static uint16_t do_ld_2(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2289 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2293 if (unlikely(p
->flags
& TLB_MMIO
)) {
2294 return io_readx(env
, p
->full
, mmu_idx
, p
->addr
, ra
, type
, memop
);
2297 /* Perform the load host endian, then swap if necessary. */
2298 ret
= load_atom_2(env
, ra
, p
->haddr
, memop
);
2299 if (memop
& MO_BSWAP
) {
2305 static uint32_t do_ld_4(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2306 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2310 if (unlikely(p
->flags
& TLB_MMIO
)) {
2311 return io_readx(env
, p
->full
, mmu_idx
, p
->addr
, ra
, type
, memop
);
2314 /* Perform the load host endian. */
2315 ret
= load_atom_4(env
, ra
, p
->haddr
, memop
);
2316 if (memop
& MO_BSWAP
) {
2322 static uint64_t do_ld_8(CPUArchState
*env
, MMULookupPageData
*p
, int mmu_idx
,
2323 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2327 if (unlikely(p
->flags
& TLB_MMIO
)) {
2328 return io_readx(env
, p
->full
, mmu_idx
, p
->addr
, ra
, type
, memop
);
2331 /* Perform the load host endian. */
2332 ret
= load_atom_8(env
, ra
, p
->haddr
, memop
);
2333 if (memop
& MO_BSWAP
) {
2339 static uint8_t do_ld1_mmu(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
,
2340 uintptr_t ra
, MMUAccessType access_type
)
2345 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2346 tcg_debug_assert(!crosspage
);
2348 return do_ld_1(env
, &l
.page
[0], l
.mmu_idx
, access_type
, ra
);
2351 tcg_target_ulong
helper_ldub_mmu(CPUArchState
*env
, uint64_t addr
,
2352 MemOpIdx oi
, uintptr_t retaddr
)
2354 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_8
);
2355 return do_ld1_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2358 static uint16_t do_ld2_mmu(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
,
2359 uintptr_t ra
, MMUAccessType access_type
)
2366 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2367 if (likely(!crosspage
)) {
2368 return do_ld_2(env
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2371 a
= do_ld_1(env
, &l
.page
[0], l
.mmu_idx
, access_type
, ra
);
2372 b
= do_ld_1(env
, &l
.page
[1], l
.mmu_idx
, access_type
, ra
);
2374 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2382 tcg_target_ulong
helper_lduw_mmu(CPUArchState
*env
, uint64_t addr
,
2383 MemOpIdx oi
, uintptr_t retaddr
)
2385 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
2386 return do_ld2_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2389 static uint32_t do_ld4_mmu(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
,
2390 uintptr_t ra
, MMUAccessType access_type
)
2396 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2397 if (likely(!crosspage
)) {
2398 return do_ld_4(env
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2401 ret
= do_ld_beN(env
, &l
.page
[0], 0, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2402 ret
= do_ld_beN(env
, &l
.page
[1], ret
, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2403 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2409 tcg_target_ulong
helper_ldul_mmu(CPUArchState
*env
, uint64_t addr
,
2410 MemOpIdx oi
, uintptr_t retaddr
)
2412 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
2413 return do_ld4_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2416 static uint64_t do_ld8_mmu(CPUArchState
*env
, target_ulong addr
, MemOpIdx oi
,
2417 uintptr_t ra
, MMUAccessType access_type
)
2423 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, access_type
, &l
);
2424 if (likely(!crosspage
)) {
2425 return do_ld_8(env
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2428 ret
= do_ld_beN(env
, &l
.page
[0], 0, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2429 ret
= do_ld_beN(env
, &l
.page
[1], ret
, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2430 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2436 uint64_t helper_ldq_mmu(CPUArchState
*env
, uint64_t addr
,
2437 MemOpIdx oi
, uintptr_t retaddr
)
2439 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
2440 return do_ld8_mmu(env
, addr
, oi
, retaddr
, MMU_DATA_LOAD
);
2444 * Provide signed versions of the load routines as well. We can of course
2445 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2448 tcg_target_ulong
helper_ldsb_mmu(CPUArchState
*env
, uint64_t addr
,
2449 MemOpIdx oi
, uintptr_t retaddr
)
2451 return (int8_t)helper_ldub_mmu(env
, addr
, oi
, retaddr
);
2454 tcg_target_ulong
helper_ldsw_mmu(CPUArchState
*env
, uint64_t addr
,
2455 MemOpIdx oi
, uintptr_t retaddr
)
2457 return (int16_t)helper_lduw_mmu(env
, addr
, oi
, retaddr
);
2460 tcg_target_ulong
helper_ldsl_mmu(CPUArchState
*env
, uint64_t addr
,
2461 MemOpIdx oi
, uintptr_t retaddr
)
2463 return (int32_t)helper_ldul_mmu(env
, addr
, oi
, retaddr
);
2466 static Int128
do_ld16_mmu(CPUArchState
*env
, target_ulong addr
,
2467 MemOpIdx oi
, uintptr_t ra
)
2475 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
, &l
);
2476 if (likely(!crosspage
)) {
2477 /* Perform the load host endian. */
2478 if (unlikely(l
.page
[0].flags
& TLB_MMIO
)) {
2479 QEMU_IOTHREAD_LOCK_GUARD();
2480 a
= io_readx(env
, l
.page
[0].full
, l
.mmu_idx
, addr
,
2481 ra
, MMU_DATA_LOAD
, MO_64
);
2482 b
= io_readx(env
, l
.page
[0].full
, l
.mmu_idx
, addr
+ 8,
2483 ra
, MMU_DATA_LOAD
, MO_64
);
2484 ret
= int128_make128(HOST_BIG_ENDIAN
? b
: a
,
2485 HOST_BIG_ENDIAN
? a
: b
);
2487 ret
= load_atom_16(env
, ra
, l
.page
[0].haddr
, l
.memop
);
2489 if (l
.memop
& MO_BSWAP
) {
2490 ret
= bswap128(ret
);
2495 first
= l
.page
[0].size
;
2497 MemOp mop8
= (l
.memop
& ~MO_SIZE
) | MO_64
;
2499 a
= do_ld_8(env
, &l
.page
[0], l
.mmu_idx
, MMU_DATA_LOAD
, mop8
, ra
);
2500 b
= do_ld_8(env
, &l
.page
[1], l
.mmu_idx
, MMU_DATA_LOAD
, mop8
, ra
);
2501 if ((mop8
& MO_BSWAP
) == MO_LE
) {
2502 ret
= int128_make128(a
, b
);
2504 ret
= int128_make128(b
, a
);
2510 a
= do_ld_beN(env
, &l
.page
[0], 0, l
.mmu_idx
,
2511 MMU_DATA_LOAD
, l
.memop
, ra
);
2512 ret
= do_ld16_beN(env
, &l
.page
[1], a
, l
.mmu_idx
, l
.memop
, ra
);
2514 ret
= do_ld16_beN(env
, &l
.page
[0], 0, l
.mmu_idx
, l
.memop
, ra
);
2515 b
= int128_getlo(ret
);
2516 ret
= int128_lshift(ret
, l
.page
[1].size
* 8);
2517 a
= int128_gethi(ret
);
2518 b
= do_ld_beN(env
, &l
.page
[1], b
, l
.mmu_idx
,
2519 MMU_DATA_LOAD
, l
.memop
, ra
);
2520 ret
= int128_make128(b
, a
);
2522 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2523 ret
= bswap128(ret
);
2528 Int128
helper_ld16_mmu(CPUArchState
*env
, uint64_t addr
,
2529 uint32_t oi
, uintptr_t retaddr
)
2531 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
2532 return do_ld16_mmu(env
, addr
, oi
, retaddr
);
2535 Int128
helper_ld_i128(CPUArchState
*env
, uint64_t addr
, uint32_t oi
)
2537 return helper_ld16_mmu(env
, addr
, oi
, GETPC());
2541 * Load helpers for cpu_ldst.h.
2544 static void plugin_load_cb(CPUArchState
*env
, abi_ptr addr
, MemOpIdx oi
)
2546 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
2549 uint8_t cpu_ldb_mmu(CPUArchState
*env
, abi_ptr addr
, MemOpIdx oi
, uintptr_t ra
)
2553 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_UB
);
2554 ret
= do_ld1_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2555 plugin_load_cb(env
, addr
, oi
);
2559 uint16_t cpu_ldw_mmu(CPUArchState
*env
, abi_ptr addr
,
2560 MemOpIdx oi
, uintptr_t ra
)
2564 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
2565 ret
= do_ld2_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2566 plugin_load_cb(env
, addr
, oi
);
2570 uint32_t cpu_ldl_mmu(CPUArchState
*env
, abi_ptr addr
,
2571 MemOpIdx oi
, uintptr_t ra
)
2575 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
2576 ret
= do_ld4_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2577 plugin_load_cb(env
, addr
, oi
);
2581 uint64_t cpu_ldq_mmu(CPUArchState
*env
, abi_ptr addr
,
2582 MemOpIdx oi
, uintptr_t ra
)
2586 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
2587 ret
= do_ld8_mmu(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
2588 plugin_load_cb(env
, addr
, oi
);
2592 Int128
cpu_ld16_mmu(CPUArchState
*env
, abi_ptr addr
,
2593 MemOpIdx oi
, uintptr_t ra
)
2597 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
2598 ret
= do_ld16_mmu(env
, addr
, oi
, ra
);
2599 plugin_load_cb(env
, addr
, oi
);
2610 * @p: translation parameters
2611 * @val_le: data to store
2612 * @mmu_idx: virtual address context
2613 * @ra: return address into tcg generated code, or 0
2615 * Store @p->size bytes at @p->addr, which is memory-mapped i/o.
2616 * The bytes to store are extracted in little-endian order from @val_le;
2617 * return the bytes of @val_le beyond @p->size that have not been stored.
2619 static uint64_t do_st_mmio_leN(CPUArchState
*env
, MMULookupPageData
*p
,
2620 uint64_t val_le
, int mmu_idx
, uintptr_t ra
)
2622 CPUTLBEntryFull
*full
= p
->full
;
2623 target_ulong addr
= p
->addr
;
2624 int i
, size
= p
->size
;
2626 QEMU_IOTHREAD_LOCK_GUARD();
2627 for (i
= 0; i
< size
; i
++, val_le
>>= 8) {
2628 io_writex(env
, full
, mmu_idx
, val_le
, addr
+ i
, ra
, MO_UB
);
2634 * Wrapper for the above.
2636 static uint64_t do_st_leN(CPUArchState
*env
, MMULookupPageData
*p
,
2637 uint64_t val_le
, int mmu_idx
,
2638 MemOp mop
, uintptr_t ra
)
2641 unsigned tmp
, half_size
;
2643 if (unlikely(p
->flags
& TLB_MMIO
)) {
2644 return do_st_mmio_leN(env
, p
, val_le
, mmu_idx
, ra
);
2645 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2646 return val_le
>> (p
->size
* 8);
2650 * It is a given that we cross a page and therefore there is no atomicity
2651 * for the store as a whole, but subobjects may need attention.
2653 atom
= mop
& MO_ATOM_MASK
;
2655 case MO_ATOM_SUBALIGN
:
2656 return store_parts_leN(p
->haddr
, p
->size
, val_le
);
2658 case MO_ATOM_IFALIGN_PAIR
:
2659 case MO_ATOM_WITHIN16_PAIR
:
2660 tmp
= mop
& MO_SIZE
;
2661 tmp
= tmp
? tmp
- 1 : 0;
2662 half_size
= 1 << tmp
;
2663 if (atom
== MO_ATOM_IFALIGN_PAIR
2664 ? p
->size
== half_size
2665 : p
->size
>= half_size
) {
2666 if (!HAVE_al8_fast
&& p
->size
<= 4) {
2667 return store_whole_le4(p
->haddr
, p
->size
, val_le
);
2668 } else if (HAVE_al8
) {
2669 return store_whole_le8(p
->haddr
, p
->size
, val_le
);
2671 cpu_loop_exit_atomic(env_cpu(env
), ra
);
2676 case MO_ATOM_IFALIGN
:
2677 case MO_ATOM_WITHIN16
:
2679 return store_bytes_leN(p
->haddr
, p
->size
, val_le
);
2682 g_assert_not_reached();
2687 * Wrapper for the above, for 8 < size < 16.
2689 static uint64_t do_st16_leN(CPUArchState
*env
, MMULookupPageData
*p
,
2690 Int128 val_le
, int mmu_idx
,
2691 MemOp mop
, uintptr_t ra
)
2696 if (unlikely(p
->flags
& TLB_MMIO
)) {
2698 do_st_mmio_leN(env
, p
, int128_getlo(val_le
), mmu_idx
, ra
);
2701 return do_st_mmio_leN(env
, p
, int128_gethi(val_le
), mmu_idx
, ra
);
2702 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2703 return int128_gethi(val_le
) >> ((size
- 8) * 8);
2707 * It is a given that we cross a page and therefore there is no atomicity
2708 * for the store as a whole, but subobjects may need attention.
2710 atom
= mop
& MO_ATOM_MASK
;
2712 case MO_ATOM_SUBALIGN
:
2713 store_parts_leN(p
->haddr
, 8, int128_getlo(val_le
));
2714 return store_parts_leN(p
->haddr
+ 8, p
->size
- 8,
2715 int128_gethi(val_le
));
2717 case MO_ATOM_WITHIN16_PAIR
:
2718 /* Since size > 8, this is the half that must be atomic. */
2719 if (!HAVE_ATOMIC128_RW
) {
2720 cpu_loop_exit_atomic(env_cpu(env
), ra
);
2722 return store_whole_le16(p
->haddr
, p
->size
, val_le
);
2724 case MO_ATOM_IFALIGN_PAIR
:
2726 * Since size > 8, both halves are misaligned,
2727 * and so neither is atomic.
2729 case MO_ATOM_IFALIGN
:
2731 stq_le_p(p
->haddr
, int128_getlo(val_le
));
2732 return store_bytes_leN(p
->haddr
+ 8, p
->size
- 8,
2733 int128_gethi(val_le
));
2736 g_assert_not_reached();
2740 static void do_st_1(CPUArchState
*env
, MMULookupPageData
*p
, uint8_t val
,
2741 int mmu_idx
, uintptr_t ra
)
2743 if (unlikely(p
->flags
& TLB_MMIO
)) {
2744 io_writex(env
, p
->full
, mmu_idx
, val
, p
->addr
, ra
, MO_UB
);
2745 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2748 *(uint8_t *)p
->haddr
= val
;
2752 static void do_st_2(CPUArchState
*env
, MMULookupPageData
*p
, uint16_t val
,
2753 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2755 if (unlikely(p
->flags
& TLB_MMIO
)) {
2756 io_writex(env
, p
->full
, mmu_idx
, val
, p
->addr
, ra
, memop
);
2757 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2760 /* Swap to host endian if necessary, then store. */
2761 if (memop
& MO_BSWAP
) {
2764 store_atom_2(env
, ra
, p
->haddr
, memop
, val
);
2768 static void do_st_4(CPUArchState
*env
, MMULookupPageData
*p
, uint32_t val
,
2769 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2771 if (unlikely(p
->flags
& TLB_MMIO
)) {
2772 io_writex(env
, p
->full
, mmu_idx
, val
, p
->addr
, ra
, memop
);
2773 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2776 /* Swap to host endian if necessary, then store. */
2777 if (memop
& MO_BSWAP
) {
2780 store_atom_4(env
, ra
, p
->haddr
, memop
, val
);
2784 static void do_st_8(CPUArchState
*env
, MMULookupPageData
*p
, uint64_t val
,
2785 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2787 if (unlikely(p
->flags
& TLB_MMIO
)) {
2788 io_writex(env
, p
->full
, mmu_idx
, val
, p
->addr
, ra
, memop
);
2789 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2792 /* Swap to host endian if necessary, then store. */
2793 if (memop
& MO_BSWAP
) {
2796 store_atom_8(env
, ra
, p
->haddr
, memop
, val
);
2800 void helper_stb_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
2801 MemOpIdx oi
, uintptr_t ra
)
2806 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_8
);
2807 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2808 tcg_debug_assert(!crosspage
);
2810 do_st_1(env
, &l
.page
[0], val
, l
.mmu_idx
, ra
);
2813 static void do_st2_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2814 MemOpIdx oi
, uintptr_t ra
)
2820 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2821 if (likely(!crosspage
)) {
2822 do_st_2(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2826 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2827 a
= val
, b
= val
>> 8;
2829 b
= val
, a
= val
>> 8;
2831 do_st_1(env
, &l
.page
[0], a
, l
.mmu_idx
, ra
);
2832 do_st_1(env
, &l
.page
[1], b
, l
.mmu_idx
, ra
);
2835 void helper_stw_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
2836 MemOpIdx oi
, uintptr_t retaddr
)
2838 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
2839 do_st2_mmu(env
, addr
, val
, oi
, retaddr
);
2842 static void do_st4_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2843 MemOpIdx oi
, uintptr_t ra
)
2848 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2849 if (likely(!crosspage
)) {
2850 do_st_4(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2854 /* Swap to little endian for simplicity, then store by bytes. */
2855 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2858 val
= do_st_leN(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2859 (void) do_st_leN(env
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2862 void helper_stl_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
2863 MemOpIdx oi
, uintptr_t retaddr
)
2865 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
2866 do_st4_mmu(env
, addr
, val
, oi
, retaddr
);
2869 static void do_st8_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
2870 MemOpIdx oi
, uintptr_t ra
)
2875 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2876 if (likely(!crosspage
)) {
2877 do_st_8(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2881 /* Swap to little endian for simplicity, then store by bytes. */
2882 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2885 val
= do_st_leN(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2886 (void) do_st_leN(env
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2889 void helper_stq_mmu(CPUArchState
*env
, uint64_t addr
, uint64_t val
,
2890 MemOpIdx oi
, uintptr_t retaddr
)
2892 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
2893 do_st8_mmu(env
, addr
, val
, oi
, retaddr
);
2896 static void do_st16_mmu(CPUArchState
*env
, target_ulong addr
, Int128 val
,
2897 MemOpIdx oi
, uintptr_t ra
)
2904 crosspage
= mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2905 if (likely(!crosspage
)) {
2906 /* Swap to host endian if necessary, then store. */
2907 if (l
.memop
& MO_BSWAP
) {
2908 val
= bswap128(val
);
2910 if (unlikely(l
.page
[0].flags
& TLB_MMIO
)) {
2911 QEMU_IOTHREAD_LOCK_GUARD();
2912 if (HOST_BIG_ENDIAN
) {
2913 b
= int128_getlo(val
), a
= int128_gethi(val
);
2915 a
= int128_getlo(val
), b
= int128_gethi(val
);
2917 io_writex(env
, l
.page
[0].full
, l
.mmu_idx
, a
, addr
, ra
, MO_64
);
2918 io_writex(env
, l
.page
[0].full
, l
.mmu_idx
, b
, addr
+ 8, ra
, MO_64
);
2919 } else if (unlikely(l
.page
[0].flags
& TLB_DISCARD_WRITE
)) {
2922 store_atom_16(env
, ra
, l
.page
[0].haddr
, l
.memop
, val
);
2927 first
= l
.page
[0].size
;
2929 MemOp mop8
= (l
.memop
& ~(MO_SIZE
| MO_BSWAP
)) | MO_64
;
2931 if (l
.memop
& MO_BSWAP
) {
2932 val
= bswap128(val
);
2934 if (HOST_BIG_ENDIAN
) {
2935 b
= int128_getlo(val
), a
= int128_gethi(val
);
2937 a
= int128_getlo(val
), b
= int128_gethi(val
);
2939 do_st_8(env
, &l
.page
[0], a
, l
.mmu_idx
, mop8
, ra
);
2940 do_st_8(env
, &l
.page
[1], b
, l
.mmu_idx
, mop8
, ra
);
2944 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2945 val
= bswap128(val
);
2948 do_st_leN(env
, &l
.page
[0], int128_getlo(val
), l
.mmu_idx
, l
.memop
, ra
);
2949 val
= int128_urshift(val
, first
* 8);
2950 do_st16_leN(env
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2952 b
= do_st16_leN(env
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2953 do_st_leN(env
, &l
.page
[1], b
, l
.mmu_idx
, l
.memop
, ra
);
2957 void helper_st16_mmu(CPUArchState
*env
, uint64_t addr
, Int128 val
,
2958 MemOpIdx oi
, uintptr_t retaddr
)
2960 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
2961 do_st16_mmu(env
, addr
, val
, oi
, retaddr
);
2964 void helper_st_i128(CPUArchState
*env
, uint64_t addr
, Int128 val
, MemOpIdx oi
)
2966 helper_st16_mmu(env
, addr
, val
, oi
, GETPC());
2970 * Store Helpers for cpu_ldst.h
2973 static void plugin_store_cb(CPUArchState
*env
, abi_ptr addr
, MemOpIdx oi
)
2975 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
2978 void cpu_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
2979 MemOpIdx oi
, uintptr_t retaddr
)
2981 helper_stb_mmu(env
, addr
, val
, oi
, retaddr
);
2982 plugin_store_cb(env
, addr
, oi
);
2985 void cpu_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
2986 MemOpIdx oi
, uintptr_t retaddr
)
2988 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_16
);
2989 do_st2_mmu(env
, addr
, val
, oi
, retaddr
);
2990 plugin_store_cb(env
, addr
, oi
);
2993 void cpu_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
2994 MemOpIdx oi
, uintptr_t retaddr
)
2996 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_32
);
2997 do_st4_mmu(env
, addr
, val
, oi
, retaddr
);
2998 plugin_store_cb(env
, addr
, oi
);
3001 void cpu_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
3002 MemOpIdx oi
, uintptr_t retaddr
)
3004 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_64
);
3005 do_st8_mmu(env
, addr
, val
, oi
, retaddr
);
3006 plugin_store_cb(env
, addr
, oi
);
3009 void cpu_st16_mmu(CPUArchState
*env
, target_ulong addr
, Int128 val
,
3010 MemOpIdx oi
, uintptr_t retaddr
)
3012 tcg_debug_assert((get_memop(oi
) & MO_SIZE
) == MO_128
);
3013 do_st16_mmu(env
, addr
, val
, oi
, retaddr
);
3014 plugin_store_cb(env
, addr
, oi
);
3017 #include "ldst_common.c.inc"
3020 * First set of functions passes in OI and RETADDR.
3021 * This makes them callable from other helpers.
3024 #define ATOMIC_NAME(X) \
3025 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
3027 #define ATOMIC_MMU_CLEANUP
3029 #include "atomic_common.c.inc"
3032 #include "atomic_template.h"
3035 #include "atomic_template.h"
3038 #include "atomic_template.h"
3040 #ifdef CONFIG_ATOMIC64
3042 #include "atomic_template.h"
3045 #if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
3046 #define DATA_SIZE 16
3047 #include "atomic_template.h"
3050 /* Code access functions. */
3052 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr addr
)
3054 MemOpIdx oi
= make_memop_idx(MO_UB
, cpu_mmu_index(env
, true));
3055 return do_ld1_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3058 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr addr
)
3060 MemOpIdx oi
= make_memop_idx(MO_TEUW
, cpu_mmu_index(env
, true));
3061 return do_ld2_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3064 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr addr
)
3066 MemOpIdx oi
= make_memop_idx(MO_TEUL
, cpu_mmu_index(env
, true));
3067 return do_ld4_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3070 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr addr
)
3072 MemOpIdx oi
= make_memop_idx(MO_TEUQ
, cpu_mmu_index(env
, true));
3073 return do_ld8_mmu(env
, addr
, oi
, 0, MMU_INST_FETCH
);
3076 uint8_t cpu_ldb_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3077 MemOpIdx oi
, uintptr_t retaddr
)
3079 return do_ld1_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);
3082 uint16_t cpu_ldw_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3083 MemOpIdx oi
, uintptr_t retaddr
)
3085 return do_ld2_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);
3088 uint32_t cpu_ldl_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3089 MemOpIdx oi
, uintptr_t retaddr
)
3091 return do_ld4_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);
3094 uint64_t cpu_ldq_code_mmu(CPUArchState
*env
, abi_ptr addr
,
3095 MemOpIdx oi
, uintptr_t retaddr
)
3097 return do_ld8_mmu(env
, addr
, oi
, retaddr
, MMU_INST_FETCH
);