]>
Commit | Line | Data |
---|---|---|
0cac1b66 BS |
1 | /* |
2 | * Common CPU TLB handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
7b31bbc2 | 20 | #include "qemu/osdep.h" |
8d04fb55 | 21 | #include "qemu/main-loop.h" |
0cac1b66 | 22 | #include "cpu.h" |
022c62cb PB |
23 | #include "exec/exec-all.h" |
24 | #include "exec/memory.h" | |
25 | #include "exec/address-spaces.h" | |
f08b6170 | 26 | #include "exec/cpu_ldst.h" |
022c62cb | 27 | #include "exec/cputlb.h" |
022c62cb | 28 | #include "exec/memory-internal.h" |
220c3ebd | 29 | #include "exec/ram_addr.h" |
0f590e74 | 30 | #include "tcg/tcg.h" |
d7f30403 PM |
31 | #include "qemu/error-report.h" |
32 | #include "exec/log.h" | |
c482cb11 RH |
33 | #include "exec/helper-proto.h" |
34 | #include "qemu/atomic.h" | |
e6cd4bb5 | 35 | #include "qemu/atomic128.h" |
0cac1b66 | 36 | |
8526e1f4 AB |
37 | /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ |
38 | /* #define DEBUG_TLB */ | |
39 | /* #define DEBUG_TLB_LOG */ | |
40 | ||
41 | #ifdef DEBUG_TLB | |
42 | # define DEBUG_TLB_GATE 1 | |
43 | # ifdef DEBUG_TLB_LOG | |
44 | # define DEBUG_TLB_LOG_GATE 1 | |
45 | # else | |
46 | # define DEBUG_TLB_LOG_GATE 0 | |
47 | # endif | |
48 | #else | |
49 | # define DEBUG_TLB_GATE 0 | |
50 | # define DEBUG_TLB_LOG_GATE 0 | |
51 | #endif | |
52 | ||
53 | #define tlb_debug(fmt, ...) do { \ | |
54 | if (DEBUG_TLB_LOG_GATE) { \ | |
55 | qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ | |
56 | ## __VA_ARGS__); \ | |
57 | } else if (DEBUG_TLB_GATE) { \ | |
58 | fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ | |
59 | } \ | |
60 | } while (0) | |
0cac1b66 | 61 | |
ea9025cb | 62 | #define assert_cpu_is_self(cpu) do { \ |
f0aff0f1 | 63 | if (DEBUG_TLB_GATE) { \ |
ea9025cb | 64 | g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ |
f0aff0f1 AB |
65 | } \ |
66 | } while (0) | |
67 | ||
e3b9ca81 FK |
68 | /* run_on_cpu_data.target_ptr should always be big enough for a |
69 | * target_ulong even on 32 bit builds */ | |
70 | QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); | |
71 | ||
e7218445 AB |
72 | /* We currently can't handle more than 16 bits in the MMUIDX bitmask. |
73 | */ | |
74 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); | |
75 | #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) | |
76 | ||
5005e253 EC |
77 | void tlb_init(CPUState *cpu) |
78 | { | |
71aec354 EC |
79 | CPUArchState *env = cpu->env_ptr; |
80 | ||
53d28455 | 81 | qemu_spin_init(&env->tlb_c.lock); |
5005e253 EC |
82 | } |
83 | ||
c3b9a07a AB |
84 | /* flush_all_helper: run fn across all cpus |
85 | * | |
86 | * If the wait flag is set then the src cpu's helper will be queued as | |
87 | * "safe" work and the loop exited creating a synchronisation point | |
88 | * where all queued work will be finished before execution starts | |
89 | * again. | |
90 | */ | |
91 | static void flush_all_helper(CPUState *src, run_on_cpu_func fn, | |
92 | run_on_cpu_data d) | |
93 | { | |
94 | CPUState *cpu; | |
95 | ||
96 | CPU_FOREACH(cpu) { | |
97 | if (cpu != src) { | |
98 | async_run_on_cpu(cpu, fn, d); | |
99 | } | |
100 | } | |
101 | } | |
102 | ||
e09de0a2 | 103 | void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) |
83974cf4 EC |
104 | { |
105 | CPUState *cpu; | |
e09de0a2 | 106 | size_t full = 0, part = 0, elide = 0; |
83974cf4 EC |
107 | |
108 | CPU_FOREACH(cpu) { | |
109 | CPUArchState *env = cpu->env_ptr; | |
110 | ||
e09de0a2 RH |
111 | full += atomic_read(&env->tlb_c.full_flush_count); |
112 | part += atomic_read(&env->tlb_c.part_flush_count); | |
113 | elide += atomic_read(&env->tlb_c.elide_flush_count); | |
83974cf4 | 114 | } |
e09de0a2 RH |
115 | *pfull = full; |
116 | *ppart = part; | |
117 | *pelide = elide; | |
83974cf4 | 118 | } |
0cac1b66 | 119 | |
1308e026 RH |
120 | static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) |
121 | { | |
122 | memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); | |
123 | memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); | |
124 | env->tlb_d[mmu_idx].large_page_addr = -1; | |
125 | env->tlb_d[mmu_idx].large_page_mask = -1; | |
d5363e58 | 126 | env->tlb_d[mmu_idx].vindex = 0; |
1308e026 RH |
127 | } |
128 | ||
e7218445 | 129 | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) |
d7a74a9d PM |
130 | { |
131 | CPUArchState *env = cpu->env_ptr; | |
e7218445 | 132 | unsigned long mmu_idx_bitmask = data.host_int; |
0336cbf8 | 133 | int mmu_idx; |
d7a74a9d | 134 | |
f0aff0f1 | 135 | assert_cpu_is_self(cpu); |
d7a74a9d | 136 | |
1308e026 | 137 | tlb_debug("mmu_idx:0x%04lx\n", mmu_idx_bitmask); |
e7218445 | 138 | |
53d28455 | 139 | qemu_spin_lock(&env->tlb_c.lock); |
60a2ad7d RH |
140 | env->tlb_c.pending_flush &= ~mmu_idx_bitmask; |
141 | ||
0336cbf8 | 142 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
0336cbf8 | 143 | if (test_bit(mmu_idx, &mmu_idx_bitmask)) { |
1308e026 | 144 | tlb_flush_one_mmuidx_locked(env, mmu_idx); |
0336cbf8 | 145 | } |
d7a74a9d | 146 | } |
53d28455 | 147 | qemu_spin_unlock(&env->tlb_c.lock); |
d7a74a9d | 148 | |
f3ced3c5 | 149 | cpu_tb_jmp_cache_clear(cpu); |
64f2674b RH |
150 | |
151 | if (mmu_idx_bitmask == ALL_MMUIDX_BITS) { | |
e09de0a2 RH |
152 | atomic_set(&env->tlb_c.full_flush_count, |
153 | env->tlb_c.full_flush_count + 1); | |
154 | } else { | |
155 | atomic_set(&env->tlb_c.part_flush_count, | |
156 | env->tlb_c.part_flush_count + ctpop16(mmu_idx_bitmask)); | |
64f2674b | 157 | } |
d7a74a9d PM |
158 | } |
159 | ||
0336cbf8 | 160 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d | 161 | { |
e7218445 AB |
162 | tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); |
163 | ||
64f2674b | 164 | if (cpu->created && !qemu_cpu_is_self(cpu)) { |
60a2ad7d RH |
165 | CPUArchState *env = cpu->env_ptr; |
166 | uint16_t pending, to_clean; | |
e7218445 | 167 | |
60a2ad7d RH |
168 | qemu_spin_lock(&env->tlb_c.lock); |
169 | pending = env->tlb_c.pending_flush; | |
170 | to_clean = idxmap & ~pending; | |
171 | env->tlb_c.pending_flush = pending | idxmap; | |
172 | qemu_spin_unlock(&env->tlb_c.lock); | |
e7218445 | 173 | |
60a2ad7d RH |
174 | if (to_clean) { |
175 | tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", to_clean); | |
e7218445 | 176 | async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, |
60a2ad7d | 177 | RUN_ON_CPU_HOST_INT(to_clean)); |
e7218445 AB |
178 | } |
179 | } else { | |
60a2ad7d | 180 | tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); |
e7218445 | 181 | } |
d7a74a9d PM |
182 | } |
183 | ||
64f2674b RH |
184 | void tlb_flush(CPUState *cpu) |
185 | { | |
186 | tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); | |
187 | } | |
188 | ||
c3b9a07a AB |
189 | void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) |
190 | { | |
191 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
192 | ||
193 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
194 | ||
195 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
196 | fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); | |
197 | } | |
198 | ||
64f2674b RH |
199 | void tlb_flush_all_cpus(CPUState *src_cpu) |
200 | { | |
201 | tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); | |
202 | } | |
203 | ||
204 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) | |
c3b9a07a AB |
205 | { |
206 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
207 | ||
208 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
209 | ||
210 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
211 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
212 | } | |
213 | ||
64f2674b RH |
214 | void tlb_flush_all_cpus_synced(CPUState *src_cpu) |
215 | { | |
216 | tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); | |
217 | } | |
218 | ||
68fea038 RH |
219 | static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, |
220 | target_ulong page) | |
221 | { | |
222 | return tlb_hit_page(tlb_entry->addr_read, page) || | |
403f290c | 223 | tlb_hit_page(tlb_addr_write(tlb_entry), page) || |
68fea038 RH |
224 | tlb_hit_page(tlb_entry->addr_code, page); |
225 | } | |
c3b9a07a | 226 | |
53d28455 | 227 | /* Called with tlb_c.lock held */ |
71aec354 EC |
228 | static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, |
229 | target_ulong page) | |
0cac1b66 | 230 | { |
68fea038 | 231 | if (tlb_hit_page_anyprot(tlb_entry, page)) { |
4fadb3bb | 232 | memset(tlb_entry, -1, sizeof(*tlb_entry)); |
0cac1b66 BS |
233 | } |
234 | } | |
235 | ||
53d28455 | 236 | /* Called with tlb_c.lock held */ |
71aec354 EC |
237 | static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, |
238 | target_ulong page) | |
68fea038 RH |
239 | { |
240 | int k; | |
71aec354 EC |
241 | |
242 | assert_cpu_is_self(ENV_GET_CPU(env)); | |
68fea038 | 243 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
71aec354 | 244 | tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page); |
68fea038 RH |
245 | } |
246 | } | |
247 | ||
1308e026 RH |
248 | static void tlb_flush_page_locked(CPUArchState *env, int midx, |
249 | target_ulong page) | |
250 | { | |
251 | target_ulong lp_addr = env->tlb_d[midx].large_page_addr; | |
252 | target_ulong lp_mask = env->tlb_d[midx].large_page_mask; | |
253 | ||
254 | /* Check if we need to flush due to large pages. */ | |
255 | if ((page & lp_mask) == lp_addr) { | |
256 | tlb_debug("forcing full flush midx %d (" | |
257 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
258 | midx, lp_addr, lp_mask); | |
259 | tlb_flush_one_mmuidx_locked(env, midx); | |
260 | } else { | |
261 | tlb_flush_entry_locked(tlb_entry(env, midx, page), page); | |
262 | tlb_flush_vtlb_page_locked(env, midx, page); | |
263 | } | |
264 | } | |
265 | ||
e7218445 AB |
266 | /* As we are going to hijack the bottom bits of the page address for a |
267 | * mmuidx bit mask we need to fail to build if we can't do that | |
268 | */ | |
269 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); | |
270 | ||
271 | static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, | |
272 | run_on_cpu_data data) | |
d7a74a9d PM |
273 | { |
274 | CPUArchState *env = cpu->env_ptr; | |
e7218445 AB |
275 | target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; |
276 | target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; | |
277 | unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; | |
e7218445 | 278 | int mmu_idx; |
d7a74a9d | 279 | |
f0aff0f1 | 280 | assert_cpu_is_self(cpu); |
d7a74a9d | 281 | |
1308e026 | 282 | tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", |
383beda9 | 283 | addr, mmu_idx_bitmap); |
d7a74a9d | 284 | |
53d28455 | 285 | qemu_spin_lock(&env->tlb_c.lock); |
0336cbf8 AB |
286 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
287 | if (test_bit(mmu_idx, &mmu_idx_bitmap)) { | |
1308e026 | 288 | tlb_flush_page_locked(env, mmu_idx, addr); |
d7a74a9d PM |
289 | } |
290 | } | |
53d28455 | 291 | qemu_spin_unlock(&env->tlb_c.lock); |
d7a74a9d | 292 | |
d7a74a9d PM |
293 | tb_flush_jmp_cache(cpu, addr); |
294 | } | |
295 | ||
e7218445 AB |
296 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) |
297 | { | |
298 | target_ulong addr_and_mmu_idx; | |
299 | ||
300 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); | |
301 | ||
302 | /* This should already be page aligned */ | |
303 | addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | |
304 | addr_and_mmu_idx |= idxmap; | |
305 | ||
306 | if (!qemu_cpu_is_self(cpu)) { | |
1308e026 | 307 | async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, |
e7218445 AB |
308 | RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); |
309 | } else { | |
1308e026 | 310 | tlb_flush_page_by_mmuidx_async_work( |
e7218445 AB |
311 | cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); |
312 | } | |
313 | } | |
314 | ||
f8144c6c RH |
315 | void tlb_flush_page(CPUState *cpu, target_ulong addr) |
316 | { | |
317 | tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); | |
318 | } | |
319 | ||
c3b9a07a AB |
320 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, |
321 | uint16_t idxmap) | |
e3b9ca81 | 322 | { |
1308e026 | 323 | const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; |
c3b9a07a | 324 | target_ulong addr_and_mmu_idx; |
e3b9ca81 | 325 | |
c3b9a07a AB |
326 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
327 | ||
328 | /* This should already be page aligned */ | |
329 | addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | |
330 | addr_and_mmu_idx |= idxmap; | |
331 | ||
332 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
333 | fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
334 | } | |
335 | ||
f8144c6c RH |
336 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
337 | { | |
338 | tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); | |
339 | } | |
340 | ||
c3b9a07a | 341 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
1308e026 RH |
342 | target_ulong addr, |
343 | uint16_t idxmap) | |
c3b9a07a | 344 | { |
1308e026 | 345 | const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; |
c3b9a07a AB |
346 | target_ulong addr_and_mmu_idx; |
347 | ||
348 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); | |
349 | ||
350 | /* This should already be page aligned */ | |
351 | addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | |
352 | addr_and_mmu_idx |= idxmap; | |
353 | ||
354 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
355 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
356 | } | |
357 | ||
f8144c6c | 358 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) |
c3b9a07a | 359 | { |
f8144c6c | 360 | tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); |
e3b9ca81 FK |
361 | } |
362 | ||
0cac1b66 BS |
363 | /* update the TLBs so that writes to code in the virtual page 'addr' |
364 | can be detected */ | |
365 | void tlb_protect_code(ram_addr_t ram_addr) | |
366 | { | |
03eebc9e SH |
367 | cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, |
368 | DIRTY_MEMORY_CODE); | |
0cac1b66 BS |
369 | } |
370 | ||
371 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
372 | tested for self modifying code */ | |
9564f52d | 373 | void tlb_unprotect_code(ram_addr_t ram_addr) |
0cac1b66 | 374 | { |
52159192 | 375 | cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); |
0cac1b66 BS |
376 | } |
377 | ||
0cac1b66 | 378 | |
b0706b71 AB |
379 | /* |
380 | * Dirty write flag handling | |
381 | * | |
382 | * When the TCG code writes to a location it looks up the address in | |
383 | * the TLB and uses that data to compute the final address. If any of | |
384 | * the lower bits of the address are set then the slow path is forced. | |
385 | * There are a number of reasons to do this but for normal RAM the | |
386 | * most usual is detecting writes to code regions which may invalidate | |
387 | * generated code. | |
388 | * | |
71aec354 EC |
389 | * Other vCPUs might be reading their TLBs during guest execution, so we update |
390 | * te->addr_write with atomic_set. We don't need to worry about this for | |
391 | * oversized guests as MTTCG is disabled for them. | |
b0706b71 | 392 | * |
53d28455 | 393 | * Called with tlb_c.lock held. |
b0706b71 | 394 | */ |
71aec354 EC |
395 | static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, |
396 | uintptr_t start, uintptr_t length) | |
0cac1b66 | 397 | { |
b0706b71 | 398 | uintptr_t addr = tlb_entry->addr_write; |
0cac1b66 | 399 | |
b0706b71 AB |
400 | if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { |
401 | addr &= TARGET_PAGE_MASK; | |
402 | addr += tlb_entry->addend; | |
0cac1b66 | 403 | if ((addr - start) < length) { |
71aec354 | 404 | #if TCG_OVERSIZED_GUEST |
0cac1b66 | 405 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
b0706b71 | 406 | #else |
71aec354 EC |
407 | atomic_set(&tlb_entry->addr_write, |
408 | tlb_entry->addr_write | TLB_NOTDIRTY); | |
409 | #endif | |
b0706b71 AB |
410 | } |
411 | } | |
b0706b71 AB |
412 | } |
413 | ||
71aec354 | 414 | /* |
53d28455 | 415 | * Called with tlb_c.lock held. |
71aec354 EC |
416 | * Called only from the vCPU context, i.e. the TLB's owner thread. |
417 | */ | |
418 | static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) | |
b0706b71 | 419 | { |
b0706b71 | 420 | *d = *s; |
0cac1b66 BS |
421 | } |
422 | ||
b0706b71 | 423 | /* This is a cross vCPU call (i.e. another vCPU resetting the flags of |
71aec354 | 424 | * the target vCPU). |
53d28455 | 425 | * We must take tlb_c.lock to avoid racing with another vCPU update. The only |
71aec354 | 426 | * thing actually updated is the target TLB entry ->addr_write flags. |
b0706b71 | 427 | */ |
9a13565d | 428 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) |
0cac1b66 BS |
429 | { |
430 | CPUArchState *env; | |
431 | ||
9a13565d | 432 | int mmu_idx; |
0cac1b66 | 433 | |
9a13565d | 434 | env = cpu->env_ptr; |
53d28455 | 435 | qemu_spin_lock(&env->tlb_c.lock); |
9a13565d PC |
436 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
437 | unsigned int i; | |
0cac1b66 | 438 | |
9a13565d | 439 | for (i = 0; i < CPU_TLB_SIZE; i++) { |
71aec354 EC |
440 | tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, |
441 | length); | |
9a13565d | 442 | } |
88e89a57 | 443 | |
9a13565d | 444 | for (i = 0; i < CPU_VTLB_SIZE; i++) { |
71aec354 EC |
445 | tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, |
446 | length); | |
0cac1b66 BS |
447 | } |
448 | } | |
53d28455 | 449 | qemu_spin_unlock(&env->tlb_c.lock); |
0cac1b66 BS |
450 | } |
451 | ||
53d28455 | 452 | /* Called with tlb_c.lock held */ |
71aec354 EC |
453 | static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, |
454 | target_ulong vaddr) | |
0cac1b66 BS |
455 | { |
456 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { | |
457 | tlb_entry->addr_write = vaddr; | |
458 | } | |
459 | } | |
460 | ||
461 | /* update the TLB corresponding to virtual page vaddr | |
462 | so that it is no longer dirty */ | |
bcae01e4 | 463 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) |
0cac1b66 | 464 | { |
bcae01e4 | 465 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
466 | int mmu_idx; |
467 | ||
f0aff0f1 AB |
468 | assert_cpu_is_self(cpu); |
469 | ||
0cac1b66 | 470 | vaddr &= TARGET_PAGE_MASK; |
53d28455 | 471 | qemu_spin_lock(&env->tlb_c.lock); |
0cac1b66 | 472 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
383beda9 | 473 | tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); |
0cac1b66 | 474 | } |
88e89a57 XT |
475 | |
476 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
477 | int k; | |
478 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
71aec354 | 479 | tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); |
88e89a57 XT |
480 | } |
481 | } | |
53d28455 | 482 | qemu_spin_unlock(&env->tlb_c.lock); |
0cac1b66 BS |
483 | } |
484 | ||
485 | /* Our TLB does not support large pages, so remember the area covered by | |
486 | large pages and trigger a full TLB flush if these are invalidated. */ | |
1308e026 RH |
487 | static void tlb_add_large_page(CPUArchState *env, int mmu_idx, |
488 | target_ulong vaddr, target_ulong size) | |
0cac1b66 | 489 | { |
1308e026 RH |
490 | target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr; |
491 | target_ulong lp_mask = ~(size - 1); | |
0cac1b66 | 492 | |
1308e026 RH |
493 | if (lp_addr == (target_ulong)-1) { |
494 | /* No previous large page. */ | |
495 | lp_addr = vaddr; | |
496 | } else { | |
497 | /* Extend the existing region to include the new page. | |
498 | This is a compromise between unnecessary flushes and | |
499 | the cost of maintaining a full variable size TLB. */ | |
500 | lp_mask &= env->tlb_d[mmu_idx].large_page_mask; | |
501 | while (((lp_addr ^ vaddr) & lp_mask) != 0) { | |
502 | lp_mask <<= 1; | |
503 | } | |
0cac1b66 | 504 | } |
1308e026 RH |
505 | env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask; |
506 | env->tlb_d[mmu_idx].large_page_mask = lp_mask; | |
0cac1b66 BS |
507 | } |
508 | ||
509 | /* Add a new TLB entry. At most one entry for a given virtual address | |
79e2b9ae PB |
510 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
511 | * supplied size is only used by tlb_flush_page. | |
512 | * | |
513 | * Called from TCG-generated code, which is under an RCU read-side | |
514 | * critical section. | |
515 | */ | |
fadc1cbe PM |
516 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
517 | hwaddr paddr, MemTxAttrs attrs, int prot, | |
518 | int mmu_idx, target_ulong size) | |
0cac1b66 | 519 | { |
0c591eb0 | 520 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
521 | MemoryRegionSection *section; |
522 | unsigned int index; | |
523 | target_ulong address; | |
524 | target_ulong code_address; | |
525 | uintptr_t addend; | |
68fea038 | 526 | CPUTLBEntry *te, tn; |
55df6fcf PM |
527 | hwaddr iotlb, xlat, sz, paddr_page; |
528 | target_ulong vaddr_page; | |
d7898cda | 529 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
0cac1b66 | 530 | |
f0aff0f1 | 531 | assert_cpu_is_self(cpu); |
55df6fcf | 532 | |
1308e026 | 533 | if (size <= TARGET_PAGE_SIZE) { |
55df6fcf PM |
534 | sz = TARGET_PAGE_SIZE; |
535 | } else { | |
1308e026 | 536 | tlb_add_large_page(env, mmu_idx, vaddr, size); |
55df6fcf | 537 | sz = size; |
0cac1b66 | 538 | } |
55df6fcf PM |
539 | vaddr_page = vaddr & TARGET_PAGE_MASK; |
540 | paddr_page = paddr & TARGET_PAGE_MASK; | |
149f54b5 | 541 | |
55df6fcf PM |
542 | section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, |
543 | &xlat, &sz, attrs, &prot); | |
149f54b5 PB |
544 | assert(sz >= TARGET_PAGE_SIZE); |
545 | ||
8526e1f4 AB |
546 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
547 | " prot=%x idx=%d\n", | |
548 | vaddr, paddr, prot, mmu_idx); | |
0cac1b66 | 549 | |
55df6fcf PM |
550 | address = vaddr_page; |
551 | if (size < TARGET_PAGE_SIZE) { | |
552 | /* | |
553 | * Slow-path the TLB entries; we will repeat the MMU check and TLB | |
554 | * fill on every access. | |
555 | */ | |
556 | address |= TLB_RECHECK; | |
557 | } | |
558 | if (!memory_region_is_ram(section->mr) && | |
559 | !memory_region_is_romd(section->mr)) { | |
8f3e03cb | 560 | /* IO memory case */ |
0cac1b66 | 561 | address |= TLB_MMIO; |
8f3e03cb PB |
562 | addend = 0; |
563 | } else { | |
564 | /* TLB_MMIO for rom/romd handled below */ | |
149f54b5 | 565 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; |
0cac1b66 | 566 | } |
0cac1b66 BS |
567 | |
568 | code_address = address; | |
55df6fcf PM |
569 | iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, |
570 | paddr_page, xlat, prot, &address); | |
0cac1b66 | 571 | |
383beda9 RH |
572 | index = tlb_index(env, mmu_idx, vaddr_page); |
573 | te = tlb_entry(env, mmu_idx, vaddr_page); | |
b0706b71 | 574 | |
71aec354 EC |
575 | /* |
576 | * Hold the TLB lock for the rest of the function. We could acquire/release | |
577 | * the lock several times in the function, but it is faster to amortize the | |
578 | * acquisition cost by acquiring it just once. Note that this leads to | |
579 | * a longer critical section, but this is not a concern since the TLB lock | |
580 | * is unlikely to be contended. | |
581 | */ | |
53d28455 | 582 | qemu_spin_lock(&env->tlb_c.lock); |
71aec354 EC |
583 | |
584 | /* Make sure there's no cached translation for the new page. */ | |
585 | tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); | |
586 | ||
68fea038 RH |
587 | /* |
588 | * Only evict the old entry to the victim tlb if it's for a | |
589 | * different page; otherwise just overwrite the stale data. | |
590 | */ | |
591 | if (!tlb_hit_page_anyprot(te, vaddr_page)) { | |
d5363e58 | 592 | unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE; |
68fea038 | 593 | CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; |
b0706b71 | 594 | |
68fea038 | 595 | /* Evict the old entry into the victim tlb. */ |
71aec354 | 596 | copy_tlb_helper_locked(tv, te); |
68fea038 RH |
597 | env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; |
598 | } | |
88e89a57 XT |
599 | |
600 | /* refill the tlb */ | |
ace41090 PM |
601 | /* |
602 | * At this point iotlb contains a physical section number in the lower | |
603 | * TARGET_PAGE_BITS, and either | |
604 | * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) | |
605 | * + the offset within section->mr of the page base (otherwise) | |
55df6fcf | 606 | * We subtract the vaddr_page (which is page aligned and thus won't |
ace41090 PM |
607 | * disturb the low bits) to give an offset which can be added to the |
608 | * (non-page-aligned) vaddr of the eventual memory access to get | |
609 | * the MemoryRegion offset for the access. Note that the vaddr we | |
610 | * subtract here is that of the page base, and not the same as the | |
611 | * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). | |
612 | */ | |
55df6fcf | 613 | env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; |
fadc1cbe | 614 | env->iotlb[mmu_idx][index].attrs = attrs; |
b0706b71 AB |
615 | |
616 | /* Now calculate the new entry */ | |
55df6fcf | 617 | tn.addend = addend - vaddr_page; |
0cac1b66 | 618 | if (prot & PAGE_READ) { |
b0706b71 | 619 | tn.addr_read = address; |
0cac1b66 | 620 | } else { |
b0706b71 | 621 | tn.addr_read = -1; |
0cac1b66 BS |
622 | } |
623 | ||
624 | if (prot & PAGE_EXEC) { | |
b0706b71 | 625 | tn.addr_code = code_address; |
0cac1b66 | 626 | } else { |
b0706b71 | 627 | tn.addr_code = -1; |
0cac1b66 | 628 | } |
b0706b71 AB |
629 | |
630 | tn.addr_write = -1; | |
0cac1b66 BS |
631 | if (prot & PAGE_WRITE) { |
632 | if ((memory_region_is_ram(section->mr) && section->readonly) | |
cc5bea60 | 633 | || memory_region_is_romd(section->mr)) { |
0cac1b66 | 634 | /* Write access calls the I/O callback. */ |
b0706b71 | 635 | tn.addr_write = address | TLB_MMIO; |
0cac1b66 | 636 | } else if (memory_region_is_ram(section->mr) |
8e41fb63 | 637 | && cpu_physical_memory_is_clean( |
55df6fcf | 638 | memory_region_get_ram_addr(section->mr) + xlat)) { |
b0706b71 | 639 | tn.addr_write = address | TLB_NOTDIRTY; |
0cac1b66 | 640 | } else { |
b0706b71 | 641 | tn.addr_write = address; |
0cac1b66 | 642 | } |
f52bfb12 DH |
643 | if (prot & PAGE_WRITE_INV) { |
644 | tn.addr_write |= TLB_INVALID_MASK; | |
645 | } | |
0cac1b66 | 646 | } |
b0706b71 | 647 | |
71aec354 | 648 | copy_tlb_helper_locked(te, &tn); |
53d28455 | 649 | qemu_spin_unlock(&env->tlb_c.lock); |
0cac1b66 BS |
650 | } |
651 | ||
fadc1cbe PM |
652 | /* Add a new TLB entry, but without specifying the memory |
653 | * transaction attributes to be used. | |
654 | */ | |
655 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
656 | hwaddr paddr, int prot, | |
657 | int mmu_idx, target_ulong size) | |
658 | { | |
659 | tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, | |
660 | prot, mmu_idx, size); | |
661 | } | |
662 | ||
857baec1 AB |
663 | static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
664 | { | |
665 | ram_addr_t ram_addr; | |
666 | ||
667 | ram_addr = qemu_ram_addr_from_host(ptr); | |
668 | if (ram_addr == RAM_ADDR_INVALID) { | |
669 | error_report("Bad ram pointer %p", ptr); | |
670 | abort(); | |
671 | } | |
672 | return ram_addr; | |
673 | } | |
674 | ||
82a45b96 | 675 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
04e3aabd | 676 | int mmu_idx, |
55df6fcf | 677 | target_ulong addr, uintptr_t retaddr, |
dbea78a4 | 678 | bool recheck, MMUAccessType access_type, int size) |
82a45b96 RH |
679 | { |
680 | CPUState *cpu = ENV_GET_CPU(env); | |
2d54f194 PM |
681 | hwaddr mr_offset; |
682 | MemoryRegionSection *section; | |
683 | MemoryRegion *mr; | |
82a45b96 | 684 | uint64_t val; |
8d04fb55 | 685 | bool locked = false; |
04e3aabd | 686 | MemTxResult r; |
82a45b96 | 687 | |
55df6fcf PM |
688 | if (recheck) { |
689 | /* | |
690 | * This is a TLB_RECHECK access, where the MMU protection | |
691 | * covers a smaller range than a target page, and we must | |
692 | * repeat the MMU check here. This tlb_fill() call might | |
693 | * longjump out if this access should cause a guest exception. | |
694 | */ | |
383beda9 | 695 | CPUTLBEntry *entry; |
55df6fcf PM |
696 | target_ulong tlb_addr; |
697 | ||
698 | tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); | |
699 | ||
383beda9 RH |
700 | entry = tlb_entry(env, mmu_idx, addr); |
701 | tlb_addr = entry->addr_read; | |
55df6fcf PM |
702 | if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { |
703 | /* RAM access */ | |
383beda9 | 704 | uintptr_t haddr = addr + entry->addend; |
55df6fcf PM |
705 | |
706 | return ldn_p((void *)haddr, size); | |
707 | } | |
708 | /* Fall through for handling IO accesses */ | |
709 | } | |
710 | ||
2d54f194 PM |
711 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
712 | mr = section->mr; | |
713 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
82a45b96 RH |
714 | cpu->mem_io_pc = retaddr; |
715 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { | |
716 | cpu_io_recompile(cpu, retaddr); | |
717 | } | |
718 | ||
719 | cpu->mem_io_vaddr = addr; | |
dbea78a4 | 720 | cpu->mem_io_access_type = access_type; |
8d04fb55 | 721 | |
8b812533 | 722 | if (mr->global_locking && !qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
723 | qemu_mutex_lock_iothread(); |
724 | locked = true; | |
725 | } | |
2d54f194 | 726 | r = memory_region_dispatch_read(mr, mr_offset, |
04e3aabd PM |
727 | &val, size, iotlbentry->attrs); |
728 | if (r != MEMTX_OK) { | |
2d54f194 PM |
729 | hwaddr physaddr = mr_offset + |
730 | section->offset_within_address_space - | |
731 | section->offset_within_region; | |
732 | ||
dbea78a4 | 733 | cpu_transaction_failed(cpu, physaddr, addr, size, access_type, |
04e3aabd PM |
734 | mmu_idx, iotlbentry->attrs, r, retaddr); |
735 | } | |
8d04fb55 JK |
736 | if (locked) { |
737 | qemu_mutex_unlock_iothread(); | |
738 | } | |
739 | ||
82a45b96 RH |
740 | return val; |
741 | } | |
742 | ||
743 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | |
04e3aabd | 744 | int mmu_idx, |
82a45b96 | 745 | uint64_t val, target_ulong addr, |
55df6fcf | 746 | uintptr_t retaddr, bool recheck, int size) |
82a45b96 RH |
747 | { |
748 | CPUState *cpu = ENV_GET_CPU(env); | |
2d54f194 PM |
749 | hwaddr mr_offset; |
750 | MemoryRegionSection *section; | |
751 | MemoryRegion *mr; | |
8d04fb55 | 752 | bool locked = false; |
04e3aabd | 753 | MemTxResult r; |
82a45b96 | 754 | |
55df6fcf PM |
755 | if (recheck) { |
756 | /* | |
757 | * This is a TLB_RECHECK access, where the MMU protection | |
758 | * covers a smaller range than a target page, and we must | |
759 | * repeat the MMU check here. This tlb_fill() call might | |
760 | * longjump out if this access should cause a guest exception. | |
761 | */ | |
383beda9 | 762 | CPUTLBEntry *entry; |
55df6fcf PM |
763 | target_ulong tlb_addr; |
764 | ||
765 | tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); | |
766 | ||
383beda9 | 767 | entry = tlb_entry(env, mmu_idx, addr); |
403f290c | 768 | tlb_addr = tlb_addr_write(entry); |
55df6fcf PM |
769 | if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { |
770 | /* RAM access */ | |
383beda9 | 771 | uintptr_t haddr = addr + entry->addend; |
55df6fcf PM |
772 | |
773 | stn_p((void *)haddr, size, val); | |
774 | return; | |
775 | } | |
776 | /* Fall through for handling IO accesses */ | |
777 | } | |
778 | ||
2d54f194 PM |
779 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
780 | mr = section->mr; | |
781 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
82a45b96 RH |
782 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
783 | cpu_io_recompile(cpu, retaddr); | |
784 | } | |
82a45b96 RH |
785 | cpu->mem_io_vaddr = addr; |
786 | cpu->mem_io_pc = retaddr; | |
8d04fb55 | 787 | |
8b812533 | 788 | if (mr->global_locking && !qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
789 | qemu_mutex_lock_iothread(); |
790 | locked = true; | |
791 | } | |
2d54f194 | 792 | r = memory_region_dispatch_write(mr, mr_offset, |
04e3aabd PM |
793 | val, size, iotlbentry->attrs); |
794 | if (r != MEMTX_OK) { | |
2d54f194 PM |
795 | hwaddr physaddr = mr_offset + |
796 | section->offset_within_address_space - | |
797 | section->offset_within_region; | |
798 | ||
04e3aabd PM |
799 | cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, |
800 | mmu_idx, iotlbentry->attrs, r, retaddr); | |
801 | } | |
8d04fb55 JK |
802 | if (locked) { |
803 | qemu_mutex_unlock_iothread(); | |
804 | } | |
82a45b96 RH |
805 | } |
806 | ||
7e9a7c50 RH |
807 | /* Return true if ADDR is present in the victim tlb, and has been copied |
808 | back to the main tlb. */ | |
809 | static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | |
810 | size_t elt_ofs, target_ulong page) | |
811 | { | |
812 | size_t vidx; | |
71aec354 EC |
813 | |
814 | assert_cpu_is_self(ENV_GET_CPU(env)); | |
7e9a7c50 RH |
815 | for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { |
816 | CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; | |
403f290c EC |
817 | target_ulong cmp; |
818 | ||
819 | /* elt_ofs might correspond to .addr_write, so use atomic_read */ | |
820 | #if TCG_OVERSIZED_GUEST | |
821 | cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); | |
822 | #else | |
823 | cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); | |
824 | #endif | |
7e9a7c50 RH |
825 | |
826 | if (cmp == page) { | |
827 | /* Found entry in victim tlb, swap tlb and iotlb. */ | |
828 | CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; | |
b0706b71 | 829 | |
53d28455 | 830 | qemu_spin_lock(&env->tlb_c.lock); |
71aec354 EC |
831 | copy_tlb_helper_locked(&tmptlb, tlb); |
832 | copy_tlb_helper_locked(tlb, vtlb); | |
833 | copy_tlb_helper_locked(vtlb, &tmptlb); | |
53d28455 | 834 | qemu_spin_unlock(&env->tlb_c.lock); |
b0706b71 | 835 | |
7e9a7c50 RH |
836 | CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; |
837 | CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; | |
7e9a7c50 RH |
838 | tmpio = *io; *io = *vio; *vio = tmpio; |
839 | return true; | |
840 | } | |
841 | } | |
842 | return false; | |
843 | } | |
844 | ||
845 | /* Macro to call the above, with local variables from the use context. */ | |
a390284b | 846 | #define VICTIM_TLB_HIT(TY, ADDR) \ |
7e9a7c50 | 847 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ |
a390284b | 848 | (ADDR) & TARGET_PAGE_MASK) |
7e9a7c50 | 849 | |
f2553f04 FK |
850 | /* NOTE: this function can trigger an exception */ |
851 | /* NOTE2: the returned address is not exactly the physical address: it | |
852 | * is actually a ram_addr_t (in system mode; the user mode emulation | |
853 | * version of this function returns a guest virtual address). | |
854 | */ | |
855 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | |
856 | { | |
383beda9 RH |
857 | uintptr_t mmu_idx = cpu_mmu_index(env, true); |
858 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
859 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
f2553f04 | 860 | void *p; |
f2553f04 | 861 | |
383beda9 | 862 | if (unlikely(!tlb_hit(entry->addr_code, addr))) { |
b493ccf1 | 863 | if (!VICTIM_TLB_HIT(addr_code, addr)) { |
98670d47 | 864 | tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); |
71b9a453 | 865 | } |
383beda9 | 866 | assert(tlb_hit(entry->addr_code, addr)); |
f2553f04 | 867 | } |
55df6fcf | 868 | |
383beda9 | 869 | if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { |
55df6fcf | 870 | /* |
55a7cb14 PM |
871 | * Return -1 if we can't translate and execute from an entire |
872 | * page of RAM here, which will cause us to execute by loading | |
873 | * and translating one insn at a time, without caching: | |
874 | * - TLB_RECHECK: means the MMU protection covers a smaller range | |
875 | * than a target page, so we must redo the MMU check every insn | |
876 | * - TLB_MMIO: region is not backed by RAM | |
55df6fcf | 877 | */ |
20cb6ae4 | 878 | return -1; |
55df6fcf PM |
879 | } |
880 | ||
383beda9 | 881 | p = (void *)((uintptr_t)addr + entry->addend); |
f2553f04 FK |
882 | return qemu_ram_addr_from_host_nofail(p); |
883 | } | |
884 | ||
3b08f0a9 RH |
885 | /* Probe for whether the specified guest write access is permitted. |
886 | * If it is not permitted then an exception will be taken in the same | |
887 | * way as if this were a real write access (and we will not return). | |
888 | * Otherwise the function will return, and there will be a valid | |
889 | * entry in the TLB for this access. | |
890 | */ | |
98670d47 | 891 | void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, |
3b08f0a9 RH |
892 | uintptr_t retaddr) |
893 | { | |
383beda9 RH |
894 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
895 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
3b08f0a9 | 896 | |
403f290c | 897 | if (!tlb_hit(tlb_addr_write(entry), addr)) { |
3b08f0a9 RH |
898 | /* TLB entry is for a different page */ |
899 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
98670d47 LV |
900 | tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, |
901 | mmu_idx, retaddr); | |
3b08f0a9 RH |
902 | } |
903 | } | |
904 | } | |
905 | ||
c482cb11 RH |
906 | /* Probe for a read-modify-write atomic operation. Do not allow unaligned |
907 | * operations, or io operations to proceed. Return the host address. */ | |
908 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | |
34d49937 PM |
909 | TCGMemOpIdx oi, uintptr_t retaddr, |
910 | NotDirtyInfo *ndi) | |
c482cb11 RH |
911 | { |
912 | size_t mmu_idx = get_mmuidx(oi); | |
383beda9 RH |
913 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
914 | CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); | |
403f290c | 915 | target_ulong tlb_addr = tlb_addr_write(tlbe); |
c482cb11 RH |
916 | TCGMemOp mop = get_memop(oi); |
917 | int a_bits = get_alignment_bits(mop); | |
918 | int s_bits = mop & MO_SIZE; | |
34d49937 | 919 | void *hostaddr; |
c482cb11 RH |
920 | |
921 | /* Adjust the given return address. */ | |
922 | retaddr -= GETPC_ADJ; | |
923 | ||
924 | /* Enforce guest required alignment. */ | |
925 | if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { | |
926 | /* ??? Maybe indicate atomic op to cpu_unaligned_access */ | |
927 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
928 | mmu_idx, retaddr); | |
929 | } | |
930 | ||
931 | /* Enforce qemu required alignment. */ | |
932 | if (unlikely(addr & ((1 << s_bits) - 1))) { | |
933 | /* We get here if guest alignment was not requested, | |
934 | or was not enforced by cpu_unaligned_access above. | |
935 | We might widen the access and emulate, but for now | |
936 | mark an exception and exit the cpu loop. */ | |
937 | goto stop_the_world; | |
938 | } | |
939 | ||
940 | /* Check TLB entry and enforce page permissions. */ | |
334692bc | 941 | if (!tlb_hit(tlb_addr, addr)) { |
c482cb11 | 942 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
98670d47 LV |
943 | tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, |
944 | mmu_idx, retaddr); | |
c482cb11 | 945 | } |
403f290c | 946 | tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; |
c482cb11 RH |
947 | } |
948 | ||
55df6fcf PM |
949 | /* Notice an IO access or a needs-MMU-lookup access */ |
950 | if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { | |
c482cb11 RH |
951 | /* There's really nothing that can be done to |
952 | support this apart from stop-the-world. */ | |
953 | goto stop_the_world; | |
954 | } | |
955 | ||
956 | /* Let the guest notice RMW on a write-only page. */ | |
34d49937 | 957 | if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { |
98670d47 LV |
958 | tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, |
959 | mmu_idx, retaddr); | |
c482cb11 RH |
960 | /* Since we don't support reads and writes to different addresses, |
961 | and we do have the proper page loaded for write, this shouldn't | |
962 | ever return. But just in case, handle via stop-the-world. */ | |
963 | goto stop_the_world; | |
964 | } | |
965 | ||
34d49937 PM |
966 | hostaddr = (void *)((uintptr_t)addr + tlbe->addend); |
967 | ||
968 | ndi->active = false; | |
969 | if (unlikely(tlb_addr & TLB_NOTDIRTY)) { | |
970 | ndi->active = true; | |
971 | memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, | |
972 | qemu_ram_addr_from_host_nofail(hostaddr), | |
973 | 1 << s_bits); | |
974 | } | |
975 | ||
976 | return hostaddr; | |
c482cb11 RH |
977 | |
978 | stop_the_world: | |
979 | cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); | |
980 | } | |
981 | ||
c86c6e4c RH |
982 | #ifdef TARGET_WORDS_BIGENDIAN |
983 | # define TGT_BE(X) (X) | |
984 | # define TGT_LE(X) BSWAP(X) | |
985 | #else | |
986 | # define TGT_BE(X) BSWAP(X) | |
987 | # define TGT_LE(X) (X) | |
988 | #endif | |
989 | ||
0f590e74 PB |
990 | #define MMUSUFFIX _mmu |
991 | ||
dea21982 | 992 | #define DATA_SIZE 1 |
58ed270d | 993 | #include "softmmu_template.h" |
0f590e74 | 994 | |
dea21982 | 995 | #define DATA_SIZE 2 |
58ed270d | 996 | #include "softmmu_template.h" |
0f590e74 | 997 | |
dea21982 | 998 | #define DATA_SIZE 4 |
58ed270d | 999 | #include "softmmu_template.h" |
0f590e74 | 1000 | |
dea21982 | 1001 | #define DATA_SIZE 8 |
58ed270d | 1002 | #include "softmmu_template.h" |
0f590e74 | 1003 | |
c482cb11 RH |
1004 | /* First set of helpers allows passing in of OI and RETADDR. This makes |
1005 | them callable from other helpers. */ | |
1006 | ||
1007 | #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr | |
1008 | #define ATOMIC_NAME(X) \ | |
1009 | HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) | |
34d49937 PM |
1010 | #define ATOMIC_MMU_DECLS NotDirtyInfo ndi |
1011 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) | |
1012 | #define ATOMIC_MMU_CLEANUP \ | |
1013 | do { \ | |
1014 | if (unlikely(ndi.active)) { \ | |
1015 | memory_notdirty_write_complete(&ndi); \ | |
1016 | } \ | |
1017 | } while (0) | |
c482cb11 RH |
1018 | |
1019 | #define DATA_SIZE 1 | |
1020 | #include "atomic_template.h" | |
1021 | ||
1022 | #define DATA_SIZE 2 | |
1023 | #include "atomic_template.h" | |
1024 | ||
1025 | #define DATA_SIZE 4 | |
1026 | #include "atomic_template.h" | |
1027 | ||
df79b996 | 1028 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
1029 | #define DATA_SIZE 8 |
1030 | #include "atomic_template.h" | |
df79b996 | 1031 | #endif |
c482cb11 | 1032 | |
e6cd4bb5 | 1033 | #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 |
7ebee43e RH |
1034 | #define DATA_SIZE 16 |
1035 | #include "atomic_template.h" | |
1036 | #endif | |
1037 | ||
c482cb11 RH |
1038 | /* Second set of helpers are directly callable from TCG as helpers. */ |
1039 | ||
1040 | #undef EXTRA_ARGS | |
1041 | #undef ATOMIC_NAME | |
1042 | #undef ATOMIC_MMU_LOOKUP | |
1043 | #define EXTRA_ARGS , TCGMemOpIdx oi | |
1044 | #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) | |
34d49937 | 1045 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) |
c482cb11 RH |
1046 | |
1047 | #define DATA_SIZE 1 | |
1048 | #include "atomic_template.h" | |
1049 | ||
1050 | #define DATA_SIZE 2 | |
1051 | #include "atomic_template.h" | |
1052 | ||
1053 | #define DATA_SIZE 4 | |
1054 | #include "atomic_template.h" | |
1055 | ||
df79b996 | 1056 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
1057 | #define DATA_SIZE 8 |
1058 | #include "atomic_template.h" | |
df79b996 | 1059 | #endif |
c482cb11 RH |
1060 | |
1061 | /* Code access functions. */ | |
1062 | ||
1063 | #undef MMUSUFFIX | |
0cac1b66 | 1064 | #define MMUSUFFIX _cmmu |
01ecaf43 RH |
1065 | #undef GETPC |
1066 | #define GETPC() ((uintptr_t)0) | |
0cac1b66 BS |
1067 | #define SOFTMMU_CODE_ACCESS |
1068 | ||
dea21982 | 1069 | #define DATA_SIZE 1 |
58ed270d | 1070 | #include "softmmu_template.h" |
0cac1b66 | 1071 | |
dea21982 | 1072 | #define DATA_SIZE 2 |
58ed270d | 1073 | #include "softmmu_template.h" |
0cac1b66 | 1074 | |
dea21982 | 1075 | #define DATA_SIZE 4 |
58ed270d | 1076 | #include "softmmu_template.h" |
0cac1b66 | 1077 | |
dea21982 | 1078 | #define DATA_SIZE 8 |
58ed270d | 1079 | #include "softmmu_template.h" |