]>
Commit | Line | Data |
---|---|---|
0cac1b66 BS |
1 | /* |
2 | * Common CPU TLB handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
7b31bbc2 | 20 | #include "qemu/osdep.h" |
8d04fb55 | 21 | #include "qemu/main-loop.h" |
0cac1b66 | 22 | #include "cpu.h" |
022c62cb PB |
23 | #include "exec/exec-all.h" |
24 | #include "exec/memory.h" | |
25 | #include "exec/address-spaces.h" | |
f08b6170 | 26 | #include "exec/cpu_ldst.h" |
022c62cb | 27 | #include "exec/cputlb.h" |
022c62cb | 28 | #include "exec/memory-internal.h" |
220c3ebd | 29 | #include "exec/ram_addr.h" |
0f590e74 | 30 | #include "tcg/tcg.h" |
d7f30403 PM |
31 | #include "qemu/error-report.h" |
32 | #include "exec/log.h" | |
c482cb11 RH |
33 | #include "exec/helper-proto.h" |
34 | #include "qemu/atomic.h" | |
0cac1b66 | 35 | |
8526e1f4 AB |
36 | /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ |
37 | /* #define DEBUG_TLB */ | |
38 | /* #define DEBUG_TLB_LOG */ | |
39 | ||
40 | #ifdef DEBUG_TLB | |
41 | # define DEBUG_TLB_GATE 1 | |
42 | # ifdef DEBUG_TLB_LOG | |
43 | # define DEBUG_TLB_LOG_GATE 1 | |
44 | # else | |
45 | # define DEBUG_TLB_LOG_GATE 0 | |
46 | # endif | |
47 | #else | |
48 | # define DEBUG_TLB_GATE 0 | |
49 | # define DEBUG_TLB_LOG_GATE 0 | |
50 | #endif | |
51 | ||
52 | #define tlb_debug(fmt, ...) do { \ | |
53 | if (DEBUG_TLB_LOG_GATE) { \ | |
54 | qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ | |
55 | ## __VA_ARGS__); \ | |
56 | } else if (DEBUG_TLB_GATE) { \ | |
57 | fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ | |
58 | } \ | |
59 | } while (0) | |
0cac1b66 | 60 | |
f0aff0f1 AB |
61 | #define assert_cpu_is_self(this_cpu) do { \ |
62 | if (DEBUG_TLB_GATE) { \ | |
63 | g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \ | |
64 | } \ | |
65 | } while (0) | |
66 | ||
e3b9ca81 FK |
67 | /* run_on_cpu_data.target_ptr should always be big enough for a |
68 | * target_ulong even on 32 bit builds */ | |
69 | QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); | |
70 | ||
e7218445 AB |
71 | /* We currently can't handle more than 16 bits in the MMUIDX bitmask. |
72 | */ | |
73 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); | |
74 | #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) | |
75 | ||
c3b9a07a AB |
76 | /* flush_all_helper: run fn across all cpus |
77 | * | |
78 | * If the wait flag is set then the src cpu's helper will be queued as | |
79 | * "safe" work and the loop exited creating a synchronisation point | |
80 | * where all queued work will be finished before execution starts | |
81 | * again. | |
82 | */ | |
83 | static void flush_all_helper(CPUState *src, run_on_cpu_func fn, | |
84 | run_on_cpu_data d) | |
85 | { | |
86 | CPUState *cpu; | |
87 | ||
88 | CPU_FOREACH(cpu) { | |
89 | if (cpu != src) { | |
90 | async_run_on_cpu(cpu, fn, d); | |
91 | } | |
92 | } | |
93 | } | |
94 | ||
83974cf4 EC |
95 | size_t tlb_flush_count(void) |
96 | { | |
97 | CPUState *cpu; | |
98 | size_t count = 0; | |
99 | ||
100 | CPU_FOREACH(cpu) { | |
101 | CPUArchState *env = cpu->env_ptr; | |
102 | ||
103 | count += atomic_read(&env->tlb_flush_count); | |
104 | } | |
105 | return count; | |
106 | } | |
0cac1b66 | 107 | |
d10eb08f AB |
108 | /* This is OK because CPU architectures generally permit an |
109 | * implementation to drop entries from the TLB at any time, so | |
110 | * flushing more entries than required is only an efficiency issue, | |
111 | * not a correctness issue. | |
0cac1b66 | 112 | */ |
e3b9ca81 | 113 | static void tlb_flush_nocheck(CPUState *cpu) |
0cac1b66 | 114 | { |
00c8cb0a | 115 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 | 116 | |
e3b9ca81 FK |
117 | /* The QOM tests will trigger tlb_flushes without setting up TCG |
118 | * so we bug out here in that case. | |
119 | */ | |
120 | if (!tcg_enabled()) { | |
121 | return; | |
122 | } | |
123 | ||
f0aff0f1 | 124 | assert_cpu_is_self(cpu); |
83974cf4 EC |
125 | atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); |
126 | tlb_debug("(count: %zu)\n", tlb_flush_count()); | |
f0aff0f1 | 127 | |
4fadb3bb | 128 | memset(env->tlb_table, -1, sizeof(env->tlb_table)); |
88e89a57 | 129 | memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); |
f3ced3c5 | 130 | cpu_tb_jmp_cache_clear(cpu); |
0cac1b66 | 131 | |
88e89a57 | 132 | env->vtlb_index = 0; |
0cac1b66 BS |
133 | env->tlb_flush_addr = -1; |
134 | env->tlb_flush_mask = 0; | |
e3b9ca81 | 135 | |
e7218445 | 136 | atomic_mb_set(&cpu->pending_tlb_flush, 0); |
e3b9ca81 FK |
137 | } |
138 | ||
139 | static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data) | |
140 | { | |
141 | tlb_flush_nocheck(cpu); | |
142 | } | |
143 | ||
144 | void tlb_flush(CPUState *cpu) | |
145 | { | |
146 | if (cpu->created && !qemu_cpu_is_self(cpu)) { | |
e7218445 AB |
147 | if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) { |
148 | atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS); | |
e3b9ca81 FK |
149 | async_run_on_cpu(cpu, tlb_flush_global_async_work, |
150 | RUN_ON_CPU_NULL); | |
151 | } | |
152 | } else { | |
153 | tlb_flush_nocheck(cpu); | |
154 | } | |
0cac1b66 BS |
155 | } |
156 | ||
c3b9a07a AB |
157 | void tlb_flush_all_cpus(CPUState *src_cpu) |
158 | { | |
159 | const run_on_cpu_func fn = tlb_flush_global_async_work; | |
160 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); | |
161 | fn(src_cpu, RUN_ON_CPU_NULL); | |
162 | } | |
163 | ||
164 | void tlb_flush_all_cpus_synced(CPUState *src_cpu) | |
165 | { | |
166 | const run_on_cpu_func fn = tlb_flush_global_async_work; | |
167 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); | |
168 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL); | |
169 | } | |
170 | ||
e7218445 | 171 | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) |
d7a74a9d PM |
172 | { |
173 | CPUArchState *env = cpu->env_ptr; | |
e7218445 | 174 | unsigned long mmu_idx_bitmask = data.host_int; |
0336cbf8 | 175 | int mmu_idx; |
d7a74a9d | 176 | |
f0aff0f1 | 177 | assert_cpu_is_self(cpu); |
d7a74a9d | 178 | |
e7218445 AB |
179 | tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask); |
180 | ||
0336cbf8 | 181 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
d7a74a9d | 182 | |
0336cbf8 AB |
183 | if (test_bit(mmu_idx, &mmu_idx_bitmask)) { |
184 | tlb_debug("%d\n", mmu_idx); | |
d7a74a9d | 185 | |
0336cbf8 AB |
186 | memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); |
187 | memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); | |
188 | } | |
d7a74a9d PM |
189 | } |
190 | ||
f3ced3c5 | 191 | cpu_tb_jmp_cache_clear(cpu); |
e3b9ca81 | 192 | |
e7218445 | 193 | tlb_debug("done\n"); |
d7a74a9d PM |
194 | } |
195 | ||
0336cbf8 | 196 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d | 197 | { |
e7218445 AB |
198 | tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); |
199 | ||
200 | if (!qemu_cpu_is_self(cpu)) { | |
201 | uint16_t pending_flushes = idxmap; | |
202 | pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush); | |
203 | ||
204 | if (pending_flushes) { | |
205 | tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes); | |
206 | ||
207 | atomic_or(&cpu->pending_tlb_flush, pending_flushes); | |
208 | async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, | |
209 | RUN_ON_CPU_HOST_INT(pending_flushes)); | |
210 | } | |
211 | } else { | |
212 | tlb_flush_by_mmuidx_async_work(cpu, | |
213 | RUN_ON_CPU_HOST_INT(idxmap)); | |
214 | } | |
d7a74a9d PM |
215 | } |
216 | ||
c3b9a07a AB |
217 | void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) |
218 | { | |
219 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
220 | ||
221 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
222 | ||
223 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
224 | fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); | |
225 | } | |
226 | ||
227 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | |
228 | uint16_t idxmap) | |
229 | { | |
230 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
231 | ||
232 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
233 | ||
234 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
235 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
236 | } | |
237 | ||
68fea038 RH |
238 | static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, |
239 | target_ulong page) | |
240 | { | |
241 | return tlb_hit_page(tlb_entry->addr_read, page) || | |
242 | tlb_hit_page(tlb_entry->addr_write, page) || | |
243 | tlb_hit_page(tlb_entry->addr_code, page); | |
244 | } | |
c3b9a07a | 245 | |
68fea038 | 246 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page) |
0cac1b66 | 247 | { |
68fea038 | 248 | if (tlb_hit_page_anyprot(tlb_entry, page)) { |
4fadb3bb | 249 | memset(tlb_entry, -1, sizeof(*tlb_entry)); |
0cac1b66 BS |
250 | } |
251 | } | |
252 | ||
68fea038 RH |
253 | static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx, |
254 | target_ulong page) | |
255 | { | |
256 | int k; | |
257 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
258 | tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page); | |
259 | } | |
260 | } | |
261 | ||
e3b9ca81 | 262 | static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) |
0cac1b66 | 263 | { |
31b030d4 | 264 | CPUArchState *env = cpu->env_ptr; |
e3b9ca81 | 265 | target_ulong addr = (target_ulong) data.target_ptr; |
0cac1b66 BS |
266 | int i; |
267 | int mmu_idx; | |
268 | ||
f0aff0f1 | 269 | assert_cpu_is_self(cpu); |
e3b9ca81 | 270 | |
8526e1f4 AB |
271 | tlb_debug("page :" TARGET_FMT_lx "\n", addr); |
272 | ||
0cac1b66 BS |
273 | /* Check if we need to flush due to large pages. */ |
274 | if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { | |
8526e1f4 AB |
275 | tlb_debug("forcing full flush (" |
276 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
277 | env->tlb_flush_addr, env->tlb_flush_mask); | |
278 | ||
d10eb08f | 279 | tlb_flush(cpu); |
0cac1b66 BS |
280 | return; |
281 | } | |
0cac1b66 BS |
282 | |
283 | addr &= TARGET_PAGE_MASK; | |
284 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
285 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
286 | tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); | |
68fea038 | 287 | tlb_flush_vtlb_page(env, mmu_idx, addr); |
88e89a57 XT |
288 | } |
289 | ||
611d4f99 | 290 | tb_flush_jmp_cache(cpu, addr); |
0cac1b66 BS |
291 | } |
292 | ||
e3b9ca81 FK |
293 | void tlb_flush_page(CPUState *cpu, target_ulong addr) |
294 | { | |
295 | tlb_debug("page :" TARGET_FMT_lx "\n", addr); | |
296 | ||
297 | if (!qemu_cpu_is_self(cpu)) { | |
298 | async_run_on_cpu(cpu, tlb_flush_page_async_work, | |
299 | RUN_ON_CPU_TARGET_PTR(addr)); | |
300 | } else { | |
301 | tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr)); | |
302 | } | |
303 | } | |
304 | ||
e7218445 AB |
305 | /* As we are going to hijack the bottom bits of the page address for a |
306 | * mmuidx bit mask we need to fail to build if we can't do that | |
307 | */ | |
308 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); | |
309 | ||
310 | static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, | |
311 | run_on_cpu_data data) | |
d7a74a9d PM |
312 | { |
313 | CPUArchState *env = cpu->env_ptr; | |
e7218445 AB |
314 | target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; |
315 | target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; | |
316 | unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; | |
317 | int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
318 | int mmu_idx; | |
d7a74a9d | 319 | |
f0aff0f1 | 320 | assert_cpu_is_self(cpu); |
d7a74a9d | 321 | |
e7218445 AB |
322 | tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", |
323 | page, addr, mmu_idx_bitmap); | |
d7a74a9d | 324 | |
0336cbf8 AB |
325 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
326 | if (test_bit(mmu_idx, &mmu_idx_bitmap)) { | |
327 | tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr); | |
68fea038 | 328 | tlb_flush_vtlb_page(env, mmu_idx, addr); |
d7a74a9d PM |
329 | } |
330 | } | |
d7a74a9d | 331 | |
d7a74a9d PM |
332 | tb_flush_jmp_cache(cpu, addr); |
333 | } | |
334 | ||
e7218445 AB |
335 | static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu, |
336 | run_on_cpu_data data) | |
337 | { | |
338 | CPUArchState *env = cpu->env_ptr; | |
339 | target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; | |
340 | target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; | |
341 | unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; | |
342 | ||
343 | tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap); | |
344 | ||
345 | /* Check if we need to flush due to large pages. */ | |
346 | if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { | |
347 | tlb_debug("forced full flush (" | |
348 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
349 | env->tlb_flush_addr, env->tlb_flush_mask); | |
350 | ||
351 | tlb_flush_by_mmuidx_async_work(cpu, | |
352 | RUN_ON_CPU_HOST_INT(mmu_idx_bitmap)); | |
353 | } else { | |
354 | tlb_flush_page_by_mmuidx_async_work(cpu, data); | |
355 | } | |
356 | } | |
357 | ||
358 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) | |
359 | { | |
360 | target_ulong addr_and_mmu_idx; | |
361 | ||
362 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); | |
363 | ||
364 | /* This should already be page aligned */ | |
365 | addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | |
366 | addr_and_mmu_idx |= idxmap; | |
367 | ||
368 | if (!qemu_cpu_is_self(cpu)) { | |
369 | async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work, | |
370 | RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
371 | } else { | |
372 | tlb_check_page_and_flush_by_mmuidx_async_work( | |
373 | cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
374 | } | |
375 | } | |
376 | ||
c3b9a07a AB |
377 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, |
378 | uint16_t idxmap) | |
e3b9ca81 | 379 | { |
c3b9a07a AB |
380 | const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; |
381 | target_ulong addr_and_mmu_idx; | |
e3b9ca81 | 382 | |
c3b9a07a AB |
383 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
384 | ||
385 | /* This should already be page aligned */ | |
386 | addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | |
387 | addr_and_mmu_idx |= idxmap; | |
388 | ||
389 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
390 | fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
391 | } | |
392 | ||
393 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | |
394 | target_ulong addr, | |
395 | uint16_t idxmap) | |
396 | { | |
397 | const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; | |
398 | target_ulong addr_and_mmu_idx; | |
399 | ||
400 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); | |
401 | ||
402 | /* This should already be page aligned */ | |
403 | addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | |
404 | addr_and_mmu_idx |= idxmap; | |
405 | ||
406 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
407 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | |
408 | } | |
409 | ||
410 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) | |
411 | { | |
412 | const run_on_cpu_func fn = tlb_flush_page_async_work; | |
413 | ||
414 | flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); | |
415 | fn(src, RUN_ON_CPU_TARGET_PTR(addr)); | |
416 | } | |
417 | ||
418 | void tlb_flush_page_all_cpus_synced(CPUState *src, | |
419 | target_ulong addr) | |
420 | { | |
421 | const run_on_cpu_func fn = tlb_flush_page_async_work; | |
422 | ||
423 | flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); | |
424 | async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); | |
e3b9ca81 FK |
425 | } |
426 | ||
0cac1b66 BS |
427 | /* update the TLBs so that writes to code in the virtual page 'addr' |
428 | can be detected */ | |
429 | void tlb_protect_code(ram_addr_t ram_addr) | |
430 | { | |
03eebc9e SH |
431 | cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, |
432 | DIRTY_MEMORY_CODE); | |
0cac1b66 BS |
433 | } |
434 | ||
435 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
436 | tested for self modifying code */ | |
9564f52d | 437 | void tlb_unprotect_code(ram_addr_t ram_addr) |
0cac1b66 | 438 | { |
52159192 | 439 | cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); |
0cac1b66 BS |
440 | } |
441 | ||
0cac1b66 | 442 | |
b0706b71 AB |
443 | /* |
444 | * Dirty write flag handling | |
445 | * | |
446 | * When the TCG code writes to a location it looks up the address in | |
447 | * the TLB and uses that data to compute the final address. If any of | |
448 | * the lower bits of the address are set then the slow path is forced. | |
449 | * There are a number of reasons to do this but for normal RAM the | |
450 | * most usual is detecting writes to code regions which may invalidate | |
451 | * generated code. | |
452 | * | |
453 | * Because we want other vCPUs to respond to changes straight away we | |
454 | * update the te->addr_write field atomically. If the TLB entry has | |
455 | * been changed by the vCPU in the mean time we skip the update. | |
456 | * | |
457 | * As this function uses atomic accesses we also need to ensure | |
458 | * updates to tlb_entries follow the same access rules. We don't need | |
459 | * to worry about this for oversized guests as MTTCG is disabled for | |
460 | * them. | |
461 | */ | |
462 | ||
463 | static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, | |
0cac1b66 BS |
464 | uintptr_t length) |
465 | { | |
b0706b71 AB |
466 | #if TCG_OVERSIZED_GUEST |
467 | uintptr_t addr = tlb_entry->addr_write; | |
0cac1b66 | 468 | |
b0706b71 AB |
469 | if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { |
470 | addr &= TARGET_PAGE_MASK; | |
471 | addr += tlb_entry->addend; | |
0cac1b66 BS |
472 | if ((addr - start) < length) { |
473 | tlb_entry->addr_write |= TLB_NOTDIRTY; | |
474 | } | |
475 | } | |
b0706b71 AB |
476 | #else |
477 | /* paired with atomic_mb_set in tlb_set_page_with_attrs */ | |
478 | uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write); | |
479 | uintptr_t addr = orig_addr; | |
480 | ||
481 | if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { | |
482 | addr &= TARGET_PAGE_MASK; | |
483 | addr += atomic_read(&tlb_entry->addend); | |
484 | if ((addr - start) < length) { | |
485 | uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY; | |
486 | atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr); | |
487 | } | |
488 | } | |
489 | #endif | |
490 | } | |
491 | ||
492 | /* For atomic correctness when running MTTCG we need to use the right | |
493 | * primitives when copying entries */ | |
494 | static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s, | |
495 | bool atomic_set) | |
496 | { | |
497 | #if TCG_OVERSIZED_GUEST | |
498 | *d = *s; | |
499 | #else | |
500 | if (atomic_set) { | |
501 | d->addr_read = s->addr_read; | |
502 | d->addr_code = s->addr_code; | |
503 | atomic_set(&d->addend, atomic_read(&s->addend)); | |
504 | /* Pairs with flag setting in tlb_reset_dirty_range */ | |
505 | atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write)); | |
506 | } else { | |
507 | d->addr_read = s->addr_read; | |
508 | d->addr_write = atomic_read(&s->addr_write); | |
509 | d->addr_code = s->addr_code; | |
510 | d->addend = atomic_read(&s->addend); | |
511 | } | |
512 | #endif | |
0cac1b66 BS |
513 | } |
514 | ||
b0706b71 AB |
515 | /* This is a cross vCPU call (i.e. another vCPU resetting the flags of |
516 | * the target vCPU). As such care needs to be taken that we don't | |
517 | * dangerously race with another vCPU update. The only thing actually | |
518 | * updated is the target TLB entry ->addr_write flags. | |
519 | */ | |
9a13565d | 520 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) |
0cac1b66 BS |
521 | { |
522 | CPUArchState *env; | |
523 | ||
9a13565d | 524 | int mmu_idx; |
0cac1b66 | 525 | |
9a13565d PC |
526 | env = cpu->env_ptr; |
527 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
528 | unsigned int i; | |
0cac1b66 | 529 | |
9a13565d PC |
530 | for (i = 0; i < CPU_TLB_SIZE; i++) { |
531 | tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], | |
532 | start1, length); | |
533 | } | |
88e89a57 | 534 | |
9a13565d PC |
535 | for (i = 0; i < CPU_VTLB_SIZE; i++) { |
536 | tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], | |
537 | start1, length); | |
0cac1b66 BS |
538 | } |
539 | } | |
540 | } | |
541 | ||
542 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) | |
543 | { | |
544 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { | |
545 | tlb_entry->addr_write = vaddr; | |
546 | } | |
547 | } | |
548 | ||
549 | /* update the TLB corresponding to virtual page vaddr | |
550 | so that it is no longer dirty */ | |
bcae01e4 | 551 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) |
0cac1b66 | 552 | { |
bcae01e4 | 553 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
554 | int i; |
555 | int mmu_idx; | |
556 | ||
f0aff0f1 AB |
557 | assert_cpu_is_self(cpu); |
558 | ||
0cac1b66 BS |
559 | vaddr &= TARGET_PAGE_MASK; |
560 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
561 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
562 | tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); | |
563 | } | |
88e89a57 XT |
564 | |
565 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
566 | int k; | |
567 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
568 | tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); | |
569 | } | |
570 | } | |
0cac1b66 BS |
571 | } |
572 | ||
573 | /* Our TLB does not support large pages, so remember the area covered by | |
574 | large pages and trigger a full TLB flush if these are invalidated. */ | |
575 | static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, | |
576 | target_ulong size) | |
577 | { | |
578 | target_ulong mask = ~(size - 1); | |
579 | ||
580 | if (env->tlb_flush_addr == (target_ulong)-1) { | |
581 | env->tlb_flush_addr = vaddr & mask; | |
582 | env->tlb_flush_mask = mask; | |
583 | return; | |
584 | } | |
585 | /* Extend the existing region to include the new page. | |
586 | This is a compromise between unnecessary flushes and the cost | |
587 | of maintaining a full variable size TLB. */ | |
588 | mask &= env->tlb_flush_mask; | |
589 | while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { | |
590 | mask <<= 1; | |
591 | } | |
592 | env->tlb_flush_addr &= mask; | |
593 | env->tlb_flush_mask = mask; | |
594 | } | |
595 | ||
596 | /* Add a new TLB entry. At most one entry for a given virtual address | |
79e2b9ae PB |
597 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
598 | * supplied size is only used by tlb_flush_page. | |
599 | * | |
600 | * Called from TCG-generated code, which is under an RCU read-side | |
601 | * critical section. | |
602 | */ | |
fadc1cbe PM |
603 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
604 | hwaddr paddr, MemTxAttrs attrs, int prot, | |
605 | int mmu_idx, target_ulong size) | |
0cac1b66 | 606 | { |
0c591eb0 | 607 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
608 | MemoryRegionSection *section; |
609 | unsigned int index; | |
610 | target_ulong address; | |
611 | target_ulong code_address; | |
612 | uintptr_t addend; | |
68fea038 | 613 | CPUTLBEntry *te, tn; |
55df6fcf PM |
614 | hwaddr iotlb, xlat, sz, paddr_page; |
615 | target_ulong vaddr_page; | |
d7898cda | 616 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
0cac1b66 | 617 | |
f0aff0f1 | 618 | assert_cpu_is_self(cpu); |
55df6fcf PM |
619 | |
620 | if (size < TARGET_PAGE_SIZE) { | |
621 | sz = TARGET_PAGE_SIZE; | |
622 | } else { | |
623 | if (size > TARGET_PAGE_SIZE) { | |
624 | tlb_add_large_page(env, vaddr, size); | |
625 | } | |
626 | sz = size; | |
0cac1b66 | 627 | } |
55df6fcf PM |
628 | vaddr_page = vaddr & TARGET_PAGE_MASK; |
629 | paddr_page = paddr & TARGET_PAGE_MASK; | |
149f54b5 | 630 | |
55df6fcf PM |
631 | section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, |
632 | &xlat, &sz, attrs, &prot); | |
149f54b5 PB |
633 | assert(sz >= TARGET_PAGE_SIZE); |
634 | ||
8526e1f4 AB |
635 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
636 | " prot=%x idx=%d\n", | |
637 | vaddr, paddr, prot, mmu_idx); | |
0cac1b66 | 638 | |
55df6fcf PM |
639 | address = vaddr_page; |
640 | if (size < TARGET_PAGE_SIZE) { | |
641 | /* | |
642 | * Slow-path the TLB entries; we will repeat the MMU check and TLB | |
643 | * fill on every access. | |
644 | */ | |
645 | address |= TLB_RECHECK; | |
646 | } | |
647 | if (!memory_region_is_ram(section->mr) && | |
648 | !memory_region_is_romd(section->mr)) { | |
8f3e03cb | 649 | /* IO memory case */ |
0cac1b66 | 650 | address |= TLB_MMIO; |
8f3e03cb PB |
651 | addend = 0; |
652 | } else { | |
653 | /* TLB_MMIO for rom/romd handled below */ | |
149f54b5 | 654 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; |
0cac1b66 | 655 | } |
0cac1b66 | 656 | |
68fea038 RH |
657 | /* Make sure there's no cached translation for the new page. */ |
658 | tlb_flush_vtlb_page(env, mmu_idx, vaddr_page); | |
659 | ||
0cac1b66 | 660 | code_address = address; |
55df6fcf PM |
661 | iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, |
662 | paddr_page, xlat, prot, &address); | |
0cac1b66 | 663 | |
55df6fcf | 664 | index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
0cac1b66 | 665 | te = &env->tlb_table[mmu_idx][index]; |
b0706b71 | 666 | |
68fea038 RH |
667 | /* |
668 | * Only evict the old entry to the victim tlb if it's for a | |
669 | * different page; otherwise just overwrite the stale data. | |
670 | */ | |
671 | if (!tlb_hit_page_anyprot(te, vaddr_page)) { | |
672 | unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; | |
673 | CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; | |
b0706b71 | 674 | |
68fea038 RH |
675 | /* Evict the old entry into the victim tlb. */ |
676 | copy_tlb_helper(tv, te, true); | |
677 | env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; | |
678 | } | |
88e89a57 XT |
679 | |
680 | /* refill the tlb */ | |
ace41090 PM |
681 | /* |
682 | * At this point iotlb contains a physical section number in the lower | |
683 | * TARGET_PAGE_BITS, and either | |
684 | * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) | |
685 | * + the offset within section->mr of the page base (otherwise) | |
55df6fcf | 686 | * We subtract the vaddr_page (which is page aligned and thus won't |
ace41090 PM |
687 | * disturb the low bits) to give an offset which can be added to the |
688 | * (non-page-aligned) vaddr of the eventual memory access to get | |
689 | * the MemoryRegion offset for the access. Note that the vaddr we | |
690 | * subtract here is that of the page base, and not the same as the | |
691 | * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). | |
692 | */ | |
55df6fcf | 693 | env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; |
fadc1cbe | 694 | env->iotlb[mmu_idx][index].attrs = attrs; |
b0706b71 AB |
695 | |
696 | /* Now calculate the new entry */ | |
55df6fcf | 697 | tn.addend = addend - vaddr_page; |
0cac1b66 | 698 | if (prot & PAGE_READ) { |
b0706b71 | 699 | tn.addr_read = address; |
0cac1b66 | 700 | } else { |
b0706b71 | 701 | tn.addr_read = -1; |
0cac1b66 BS |
702 | } |
703 | ||
704 | if (prot & PAGE_EXEC) { | |
b0706b71 | 705 | tn.addr_code = code_address; |
0cac1b66 | 706 | } else { |
b0706b71 | 707 | tn.addr_code = -1; |
0cac1b66 | 708 | } |
b0706b71 AB |
709 | |
710 | tn.addr_write = -1; | |
0cac1b66 BS |
711 | if (prot & PAGE_WRITE) { |
712 | if ((memory_region_is_ram(section->mr) && section->readonly) | |
cc5bea60 | 713 | || memory_region_is_romd(section->mr)) { |
0cac1b66 | 714 | /* Write access calls the I/O callback. */ |
b0706b71 | 715 | tn.addr_write = address | TLB_MMIO; |
0cac1b66 | 716 | } else if (memory_region_is_ram(section->mr) |
8e41fb63 | 717 | && cpu_physical_memory_is_clean( |
55df6fcf | 718 | memory_region_get_ram_addr(section->mr) + xlat)) { |
b0706b71 | 719 | tn.addr_write = address | TLB_NOTDIRTY; |
0cac1b66 | 720 | } else { |
b0706b71 | 721 | tn.addr_write = address; |
0cac1b66 | 722 | } |
f52bfb12 DH |
723 | if (prot & PAGE_WRITE_INV) { |
724 | tn.addr_write |= TLB_INVALID_MASK; | |
725 | } | |
0cac1b66 | 726 | } |
b0706b71 AB |
727 | |
728 | /* Pairs with flag setting in tlb_reset_dirty_range */ | |
729 | copy_tlb_helper(te, &tn, true); | |
730 | /* atomic_mb_set(&te->addr_write, write_address); */ | |
0cac1b66 BS |
731 | } |
732 | ||
fadc1cbe PM |
733 | /* Add a new TLB entry, but without specifying the memory |
734 | * transaction attributes to be used. | |
735 | */ | |
736 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
737 | hwaddr paddr, int prot, | |
738 | int mmu_idx, target_ulong size) | |
739 | { | |
740 | tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, | |
741 | prot, mmu_idx, size); | |
742 | } | |
743 | ||
d7f30403 PM |
744 | static void report_bad_exec(CPUState *cpu, target_ulong addr) |
745 | { | |
746 | /* Accidentally executing outside RAM or ROM is quite common for | |
747 | * several user-error situations, so report it in a way that | |
748 | * makes it clear that this isn't a QEMU bug and provide suggestions | |
749 | * about what a user could do to fix things. | |
750 | */ | |
751 | error_report("Trying to execute code outside RAM or ROM at 0x" | |
752 | TARGET_FMT_lx, addr); | |
753 | error_printf("This usually means one of the following happened:\n\n" | |
754 | "(1) You told QEMU to execute a kernel for the wrong machine " | |
755 | "type, and it crashed on startup (eg trying to run a " | |
756 | "raspberry pi kernel on a versatilepb QEMU machine)\n" | |
757 | "(2) You didn't give QEMU a kernel or BIOS filename at all, " | |
758 | "and QEMU executed a ROM full of no-op instructions until " | |
759 | "it fell off the end\n" | |
760 | "(3) Your guest kernel has a bug and crashed by jumping " | |
761 | "off into nowhere\n\n" | |
762 | "This is almost always one of the first two, so check your " | |
763 | "command line and that you are using the right type of kernel " | |
764 | "for this machine.\n" | |
765 | "If you think option (3) is likely then you can try debugging " | |
766 | "your guest with the -d debug options; in particular " | |
767 | "-d guest_errors will cause the log to include a dump of the " | |
768 | "guest register state at this point.\n\n" | |
769 | "Execution cannot continue; stopping here.\n\n"); | |
770 | ||
771 | /* Report also to the logs, with more detail including register dump */ | |
772 | qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code " | |
773 | "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); | |
774 | log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
775 | } | |
776 | ||
857baec1 AB |
777 | static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
778 | { | |
779 | ram_addr_t ram_addr; | |
780 | ||
781 | ram_addr = qemu_ram_addr_from_host(ptr); | |
782 | if (ram_addr == RAM_ADDR_INVALID) { | |
783 | error_report("Bad ram pointer %p", ptr); | |
784 | abort(); | |
785 | } | |
786 | return ram_addr; | |
787 | } | |
788 | ||
82a45b96 | 789 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
04e3aabd | 790 | int mmu_idx, |
55df6fcf PM |
791 | target_ulong addr, uintptr_t retaddr, |
792 | bool recheck, int size) | |
82a45b96 RH |
793 | { |
794 | CPUState *cpu = ENV_GET_CPU(env); | |
2d54f194 PM |
795 | hwaddr mr_offset; |
796 | MemoryRegionSection *section; | |
797 | MemoryRegion *mr; | |
82a45b96 | 798 | uint64_t val; |
8d04fb55 | 799 | bool locked = false; |
04e3aabd | 800 | MemTxResult r; |
82a45b96 | 801 | |
55df6fcf PM |
802 | if (recheck) { |
803 | /* | |
804 | * This is a TLB_RECHECK access, where the MMU protection | |
805 | * covers a smaller range than a target page, and we must | |
806 | * repeat the MMU check here. This tlb_fill() call might | |
807 | * longjump out if this access should cause a guest exception. | |
808 | */ | |
809 | int index; | |
810 | target_ulong tlb_addr; | |
811 | ||
812 | tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); | |
813 | ||
814 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
815 | tlb_addr = env->tlb_table[mmu_idx][index].addr_read; | |
816 | if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { | |
817 | /* RAM access */ | |
818 | uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; | |
819 | ||
820 | return ldn_p((void *)haddr, size); | |
821 | } | |
822 | /* Fall through for handling IO accesses */ | |
823 | } | |
824 | ||
2d54f194 PM |
825 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
826 | mr = section->mr; | |
827 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
82a45b96 RH |
828 | cpu->mem_io_pc = retaddr; |
829 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { | |
830 | cpu_io_recompile(cpu, retaddr); | |
831 | } | |
832 | ||
833 | cpu->mem_io_vaddr = addr; | |
8d04fb55 | 834 | |
8b812533 | 835 | if (mr->global_locking && !qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
836 | qemu_mutex_lock_iothread(); |
837 | locked = true; | |
838 | } | |
2d54f194 | 839 | r = memory_region_dispatch_read(mr, mr_offset, |
04e3aabd PM |
840 | &val, size, iotlbentry->attrs); |
841 | if (r != MEMTX_OK) { | |
2d54f194 PM |
842 | hwaddr physaddr = mr_offset + |
843 | section->offset_within_address_space - | |
844 | section->offset_within_region; | |
845 | ||
04e3aabd PM |
846 | cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD, |
847 | mmu_idx, iotlbentry->attrs, r, retaddr); | |
848 | } | |
8d04fb55 JK |
849 | if (locked) { |
850 | qemu_mutex_unlock_iothread(); | |
851 | } | |
852 | ||
82a45b96 RH |
853 | return val; |
854 | } | |
855 | ||
856 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | |
04e3aabd | 857 | int mmu_idx, |
82a45b96 | 858 | uint64_t val, target_ulong addr, |
55df6fcf | 859 | uintptr_t retaddr, bool recheck, int size) |
82a45b96 RH |
860 | { |
861 | CPUState *cpu = ENV_GET_CPU(env); | |
2d54f194 PM |
862 | hwaddr mr_offset; |
863 | MemoryRegionSection *section; | |
864 | MemoryRegion *mr; | |
8d04fb55 | 865 | bool locked = false; |
04e3aabd | 866 | MemTxResult r; |
82a45b96 | 867 | |
55df6fcf PM |
868 | if (recheck) { |
869 | /* | |
870 | * This is a TLB_RECHECK access, where the MMU protection | |
871 | * covers a smaller range than a target page, and we must | |
872 | * repeat the MMU check here. This tlb_fill() call might | |
873 | * longjump out if this access should cause a guest exception. | |
874 | */ | |
875 | int index; | |
876 | target_ulong tlb_addr; | |
877 | ||
878 | tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); | |
879 | ||
880 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
881 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
882 | if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { | |
883 | /* RAM access */ | |
884 | uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; | |
885 | ||
886 | stn_p((void *)haddr, size, val); | |
887 | return; | |
888 | } | |
889 | /* Fall through for handling IO accesses */ | |
890 | } | |
891 | ||
2d54f194 PM |
892 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
893 | mr = section->mr; | |
894 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
82a45b96 RH |
895 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
896 | cpu_io_recompile(cpu, retaddr); | |
897 | } | |
82a45b96 RH |
898 | cpu->mem_io_vaddr = addr; |
899 | cpu->mem_io_pc = retaddr; | |
8d04fb55 | 900 | |
8b812533 | 901 | if (mr->global_locking && !qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
902 | qemu_mutex_lock_iothread(); |
903 | locked = true; | |
904 | } | |
2d54f194 | 905 | r = memory_region_dispatch_write(mr, mr_offset, |
04e3aabd PM |
906 | val, size, iotlbentry->attrs); |
907 | if (r != MEMTX_OK) { | |
2d54f194 PM |
908 | hwaddr physaddr = mr_offset + |
909 | section->offset_within_address_space - | |
910 | section->offset_within_region; | |
911 | ||
04e3aabd PM |
912 | cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, |
913 | mmu_idx, iotlbentry->attrs, r, retaddr); | |
914 | } | |
8d04fb55 JK |
915 | if (locked) { |
916 | qemu_mutex_unlock_iothread(); | |
917 | } | |
82a45b96 RH |
918 | } |
919 | ||
7e9a7c50 RH |
920 | /* Return true if ADDR is present in the victim tlb, and has been copied |
921 | back to the main tlb. */ | |
922 | static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | |
923 | size_t elt_ofs, target_ulong page) | |
924 | { | |
925 | size_t vidx; | |
926 | for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { | |
927 | CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; | |
928 | target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); | |
929 | ||
930 | if (cmp == page) { | |
931 | /* Found entry in victim tlb, swap tlb and iotlb. */ | |
932 | CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; | |
b0706b71 AB |
933 | |
934 | copy_tlb_helper(&tmptlb, tlb, false); | |
935 | copy_tlb_helper(tlb, vtlb, true); | |
936 | copy_tlb_helper(vtlb, &tmptlb, true); | |
937 | ||
7e9a7c50 RH |
938 | CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; |
939 | CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; | |
7e9a7c50 RH |
940 | tmpio = *io; *io = *vio; *vio = tmpio; |
941 | return true; | |
942 | } | |
943 | } | |
944 | return false; | |
945 | } | |
946 | ||
947 | /* Macro to call the above, with local variables from the use context. */ | |
a390284b | 948 | #define VICTIM_TLB_HIT(TY, ADDR) \ |
7e9a7c50 | 949 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ |
a390284b | 950 | (ADDR) & TARGET_PAGE_MASK) |
7e9a7c50 | 951 | |
f2553f04 FK |
952 | /* NOTE: this function can trigger an exception */ |
953 | /* NOTE2: the returned address is not exactly the physical address: it | |
954 | * is actually a ram_addr_t (in system mode; the user mode emulation | |
955 | * version of this function returns a guest virtual address). | |
956 | */ | |
957 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | |
958 | { | |
2d54f194 | 959 | int mmu_idx, index; |
f2553f04 FK |
960 | void *p; |
961 | MemoryRegion *mr; | |
2d54f194 | 962 | MemoryRegionSection *section; |
f2553f04 FK |
963 | CPUState *cpu = ENV_GET_CPU(env); |
964 | CPUIOTLBEntry *iotlbentry; | |
2d54f194 | 965 | hwaddr physaddr, mr_offset; |
f2553f04 FK |
966 | |
967 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
968 | mmu_idx = cpu_mmu_index(env, true); | |
e4c967a7 | 969 | if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) { |
71b9a453 | 970 | if (!VICTIM_TLB_HIT(addr_read, addr)) { |
98670d47 | 971 | tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); |
71b9a453 | 972 | } |
f2553f04 | 973 | } |
55df6fcf | 974 | |
4b1a3e1e PM |
975 | if (unlikely((env->tlb_table[mmu_idx][index].addr_code & |
976 | (TLB_RECHECK | TLB_INVALID_MASK)) == TLB_RECHECK)) { | |
55df6fcf PM |
977 | /* |
978 | * This is a TLB_RECHECK access, where the MMU protection | |
979 | * covers a smaller range than a target page, and we must | |
980 | * repeat the MMU check here. This tlb_fill() call might | |
981 | * longjump out if this access should cause a guest exception. | |
982 | */ | |
983 | int index; | |
984 | target_ulong tlb_addr; | |
985 | ||
986 | tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0); | |
987 | ||
988 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
989 | tlb_addr = env->tlb_table[mmu_idx][index].addr_code; | |
990 | if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { | |
991 | /* RAM access. We can't handle this, so for now just stop */ | |
992 | cpu_abort(cpu, "Unable to handle guest executing from RAM within " | |
993 | "a small MPU region at 0x" TARGET_FMT_lx, addr); | |
994 | } | |
995 | /* | |
996 | * Fall through to handle IO accesses (which will almost certainly | |
997 | * also result in failure) | |
998 | */ | |
999 | } | |
1000 | ||
f2553f04 | 1001 | iotlbentry = &env->iotlb[mmu_idx][index]; |
2d54f194 PM |
1002 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
1003 | mr = section->mr; | |
f2553f04 | 1004 | if (memory_region_is_unassigned(mr)) { |
c9356746 FK |
1005 | qemu_mutex_lock_iothread(); |
1006 | if (memory_region_request_mmio_ptr(mr, addr)) { | |
1007 | qemu_mutex_unlock_iothread(); | |
1008 | /* A MemoryRegion is potentially added so re-run the | |
1009 | * get_page_addr_code. | |
1010 | */ | |
1011 | return get_page_addr_code(env, addr); | |
1012 | } | |
1013 | qemu_mutex_unlock_iothread(); | |
1014 | ||
04e3aabd PM |
1015 | /* Give the new-style cpu_transaction_failed() hook first chance |
1016 | * to handle this. | |
1017 | * This is not the ideal place to detect and generate CPU | |
1018 | * exceptions for instruction fetch failure (for instance | |
1019 | * we don't know the length of the access that the CPU would | |
1020 | * use, and it would be better to go ahead and try the access | |
1021 | * and use the MemTXResult it produced). However it is the | |
1022 | * simplest place we have currently available for the check. | |
1023 | */ | |
2d54f194 PM |
1024 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; |
1025 | physaddr = mr_offset + | |
1026 | section->offset_within_address_space - | |
1027 | section->offset_within_region; | |
04e3aabd PM |
1028 | cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx, |
1029 | iotlbentry->attrs, MEMTX_DECODE_ERROR, 0); | |
1030 | ||
f2553f04 FK |
1031 | cpu_unassigned_access(cpu, addr, false, true, 0, 4); |
1032 | /* The CPU's unassigned access hook might have longjumped out | |
1033 | * with an exception. If it didn't (or there was no hook) then | |
1034 | * we can't proceed further. | |
1035 | */ | |
1036 | report_bad_exec(cpu, addr); | |
1037 | exit(1); | |
1038 | } | |
1039 | p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend); | |
1040 | return qemu_ram_addr_from_host_nofail(p); | |
1041 | } | |
1042 | ||
3b08f0a9 RH |
1043 | /* Probe for whether the specified guest write access is permitted. |
1044 | * If it is not permitted then an exception will be taken in the same | |
1045 | * way as if this were a real write access (and we will not return). | |
1046 | * Otherwise the function will return, and there will be a valid | |
1047 | * entry in the TLB for this access. | |
1048 | */ | |
98670d47 | 1049 | void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, |
3b08f0a9 RH |
1050 | uintptr_t retaddr) |
1051 | { | |
1052 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1053 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
1054 | ||
334692bc | 1055 | if (!tlb_hit(tlb_addr, addr)) { |
3b08f0a9 RH |
1056 | /* TLB entry is for a different page */ |
1057 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
98670d47 LV |
1058 | tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, |
1059 | mmu_idx, retaddr); | |
3b08f0a9 RH |
1060 | } |
1061 | } | |
1062 | } | |
1063 | ||
c482cb11 RH |
1064 | /* Probe for a read-modify-write atomic operation. Do not allow unaligned |
1065 | * operations, or io operations to proceed. Return the host address. */ | |
1066 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | |
34d49937 PM |
1067 | TCGMemOpIdx oi, uintptr_t retaddr, |
1068 | NotDirtyInfo *ndi) | |
c482cb11 RH |
1069 | { |
1070 | size_t mmu_idx = get_mmuidx(oi); | |
1071 | size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1072 | CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; | |
1073 | target_ulong tlb_addr = tlbe->addr_write; | |
1074 | TCGMemOp mop = get_memop(oi); | |
1075 | int a_bits = get_alignment_bits(mop); | |
1076 | int s_bits = mop & MO_SIZE; | |
34d49937 | 1077 | void *hostaddr; |
c482cb11 RH |
1078 | |
1079 | /* Adjust the given return address. */ | |
1080 | retaddr -= GETPC_ADJ; | |
1081 | ||
1082 | /* Enforce guest required alignment. */ | |
1083 | if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { | |
1084 | /* ??? Maybe indicate atomic op to cpu_unaligned_access */ | |
1085 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
1086 | mmu_idx, retaddr); | |
1087 | } | |
1088 | ||
1089 | /* Enforce qemu required alignment. */ | |
1090 | if (unlikely(addr & ((1 << s_bits) - 1))) { | |
1091 | /* We get here if guest alignment was not requested, | |
1092 | or was not enforced by cpu_unaligned_access above. | |
1093 | We might widen the access and emulate, but for now | |
1094 | mark an exception and exit the cpu loop. */ | |
1095 | goto stop_the_world; | |
1096 | } | |
1097 | ||
1098 | /* Check TLB entry and enforce page permissions. */ | |
334692bc | 1099 | if (!tlb_hit(tlb_addr, addr)) { |
c482cb11 | 1100 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
98670d47 LV |
1101 | tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, |
1102 | mmu_idx, retaddr); | |
c482cb11 | 1103 | } |
f52bfb12 | 1104 | tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; |
c482cb11 RH |
1105 | } |
1106 | ||
55df6fcf PM |
1107 | /* Notice an IO access or a needs-MMU-lookup access */ |
1108 | if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { | |
c482cb11 RH |
1109 | /* There's really nothing that can be done to |
1110 | support this apart from stop-the-world. */ | |
1111 | goto stop_the_world; | |
1112 | } | |
1113 | ||
1114 | /* Let the guest notice RMW on a write-only page. */ | |
34d49937 | 1115 | if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { |
98670d47 LV |
1116 | tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, |
1117 | mmu_idx, retaddr); | |
c482cb11 RH |
1118 | /* Since we don't support reads and writes to different addresses, |
1119 | and we do have the proper page loaded for write, this shouldn't | |
1120 | ever return. But just in case, handle via stop-the-world. */ | |
1121 | goto stop_the_world; | |
1122 | } | |
1123 | ||
34d49937 PM |
1124 | hostaddr = (void *)((uintptr_t)addr + tlbe->addend); |
1125 | ||
1126 | ndi->active = false; | |
1127 | if (unlikely(tlb_addr & TLB_NOTDIRTY)) { | |
1128 | ndi->active = true; | |
1129 | memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, | |
1130 | qemu_ram_addr_from_host_nofail(hostaddr), | |
1131 | 1 << s_bits); | |
1132 | } | |
1133 | ||
1134 | return hostaddr; | |
c482cb11 RH |
1135 | |
1136 | stop_the_world: | |
1137 | cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); | |
1138 | } | |
1139 | ||
c86c6e4c RH |
1140 | #ifdef TARGET_WORDS_BIGENDIAN |
1141 | # define TGT_BE(X) (X) | |
1142 | # define TGT_LE(X) BSWAP(X) | |
1143 | #else | |
1144 | # define TGT_BE(X) BSWAP(X) | |
1145 | # define TGT_LE(X) (X) | |
1146 | #endif | |
1147 | ||
0f590e74 PB |
1148 | #define MMUSUFFIX _mmu |
1149 | ||
dea21982 | 1150 | #define DATA_SIZE 1 |
58ed270d | 1151 | #include "softmmu_template.h" |
0f590e74 | 1152 | |
dea21982 | 1153 | #define DATA_SIZE 2 |
58ed270d | 1154 | #include "softmmu_template.h" |
0f590e74 | 1155 | |
dea21982 | 1156 | #define DATA_SIZE 4 |
58ed270d | 1157 | #include "softmmu_template.h" |
0f590e74 | 1158 | |
dea21982 | 1159 | #define DATA_SIZE 8 |
58ed270d | 1160 | #include "softmmu_template.h" |
0f590e74 | 1161 | |
c482cb11 RH |
1162 | /* First set of helpers allows passing in of OI and RETADDR. This makes |
1163 | them callable from other helpers. */ | |
1164 | ||
1165 | #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr | |
1166 | #define ATOMIC_NAME(X) \ | |
1167 | HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) | |
34d49937 PM |
1168 | #define ATOMIC_MMU_DECLS NotDirtyInfo ndi |
1169 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) | |
1170 | #define ATOMIC_MMU_CLEANUP \ | |
1171 | do { \ | |
1172 | if (unlikely(ndi.active)) { \ | |
1173 | memory_notdirty_write_complete(&ndi); \ | |
1174 | } \ | |
1175 | } while (0) | |
c482cb11 RH |
1176 | |
1177 | #define DATA_SIZE 1 | |
1178 | #include "atomic_template.h" | |
1179 | ||
1180 | #define DATA_SIZE 2 | |
1181 | #include "atomic_template.h" | |
1182 | ||
1183 | #define DATA_SIZE 4 | |
1184 | #include "atomic_template.h" | |
1185 | ||
df79b996 | 1186 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
1187 | #define DATA_SIZE 8 |
1188 | #include "atomic_template.h" | |
df79b996 | 1189 | #endif |
c482cb11 | 1190 | |
7ebee43e RH |
1191 | #ifdef CONFIG_ATOMIC128 |
1192 | #define DATA_SIZE 16 | |
1193 | #include "atomic_template.h" | |
1194 | #endif | |
1195 | ||
c482cb11 RH |
1196 | /* Second set of helpers are directly callable from TCG as helpers. */ |
1197 | ||
1198 | #undef EXTRA_ARGS | |
1199 | #undef ATOMIC_NAME | |
1200 | #undef ATOMIC_MMU_LOOKUP | |
1201 | #define EXTRA_ARGS , TCGMemOpIdx oi | |
1202 | #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) | |
34d49937 | 1203 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) |
c482cb11 RH |
1204 | |
1205 | #define DATA_SIZE 1 | |
1206 | #include "atomic_template.h" | |
1207 | ||
1208 | #define DATA_SIZE 2 | |
1209 | #include "atomic_template.h" | |
1210 | ||
1211 | #define DATA_SIZE 4 | |
1212 | #include "atomic_template.h" | |
1213 | ||
df79b996 | 1214 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
1215 | #define DATA_SIZE 8 |
1216 | #include "atomic_template.h" | |
df79b996 | 1217 | #endif |
c482cb11 RH |
1218 | |
1219 | /* Code access functions. */ | |
1220 | ||
1221 | #undef MMUSUFFIX | |
0cac1b66 | 1222 | #define MMUSUFFIX _cmmu |
01ecaf43 RH |
1223 | #undef GETPC |
1224 | #define GETPC() ((uintptr_t)0) | |
0cac1b66 BS |
1225 | #define SOFTMMU_CODE_ACCESS |
1226 | ||
dea21982 | 1227 | #define DATA_SIZE 1 |
58ed270d | 1228 | #include "softmmu_template.h" |
0cac1b66 | 1229 | |
dea21982 | 1230 | #define DATA_SIZE 2 |
58ed270d | 1231 | #include "softmmu_template.h" |
0cac1b66 | 1232 | |
dea21982 | 1233 | #define DATA_SIZE 4 |
58ed270d | 1234 | #include "softmmu_template.h" |
0cac1b66 | 1235 | |
dea21982 | 1236 | #define DATA_SIZE 8 |
58ed270d | 1237 | #include "softmmu_template.h" |