]>
Commit | Line | Data |
---|---|---|
0cac1b66 BS |
1 | /* |
2 | * Common CPU TLB handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
fb0343d5 | 9 | * version 2.1 of the License, or (at your option) any later version. |
0cac1b66 BS |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
7b31bbc2 | 20 | #include "qemu/osdep.h" |
8d04fb55 | 21 | #include "qemu/main-loop.h" |
0cac1b66 | 22 | #include "cpu.h" |
022c62cb PB |
23 | #include "exec/exec-all.h" |
24 | #include "exec/memory.h" | |
25 | #include "exec/address-spaces.h" | |
f08b6170 | 26 | #include "exec/cpu_ldst.h" |
022c62cb | 27 | #include "exec/cputlb.h" |
022c62cb | 28 | #include "exec/memory-internal.h" |
220c3ebd | 29 | #include "exec/ram_addr.h" |
0f590e74 | 30 | #include "tcg/tcg.h" |
d7f30403 PM |
31 | #include "qemu/error-report.h" |
32 | #include "exec/log.h" | |
c482cb11 RH |
33 | #include "exec/helper-proto.h" |
34 | #include "qemu/atomic.h" | |
e6cd4bb5 | 35 | #include "qemu/atomic128.h" |
707526ad | 36 | #include "translate-all.h" |
243af022 | 37 | #include "trace/trace-root.h" |
d03f1408 | 38 | #include "trace/mem.h" |
235537fa AB |
39 | #ifdef CONFIG_PLUGIN |
40 | #include "qemu/plugin-memory.h" | |
41 | #endif | |
0cac1b66 | 42 | |
8526e1f4 AB |
43 | /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ |
44 | /* #define DEBUG_TLB */ | |
45 | /* #define DEBUG_TLB_LOG */ | |
46 | ||
47 | #ifdef DEBUG_TLB | |
48 | # define DEBUG_TLB_GATE 1 | |
49 | # ifdef DEBUG_TLB_LOG | |
50 | # define DEBUG_TLB_LOG_GATE 1 | |
51 | # else | |
52 | # define DEBUG_TLB_LOG_GATE 0 | |
53 | # endif | |
54 | #else | |
55 | # define DEBUG_TLB_GATE 0 | |
56 | # define DEBUG_TLB_LOG_GATE 0 | |
57 | #endif | |
58 | ||
59 | #define tlb_debug(fmt, ...) do { \ | |
60 | if (DEBUG_TLB_LOG_GATE) { \ | |
61 | qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ | |
62 | ## __VA_ARGS__); \ | |
63 | } else if (DEBUG_TLB_GATE) { \ | |
64 | fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ | |
65 | } \ | |
66 | } while (0) | |
0cac1b66 | 67 | |
ea9025cb | 68 | #define assert_cpu_is_self(cpu) do { \ |
f0aff0f1 | 69 | if (DEBUG_TLB_GATE) { \ |
ea9025cb | 70 | g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ |
f0aff0f1 AB |
71 | } \ |
72 | } while (0) | |
73 | ||
e3b9ca81 FK |
74 | /* run_on_cpu_data.target_ptr should always be big enough for a |
75 | * target_ulong even on 32 bit builds */ | |
76 | QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); | |
77 | ||
e7218445 AB |
78 | /* We currently can't handle more than 16 bits in the MMUIDX bitmask. |
79 | */ | |
80 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); | |
81 | #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) | |
82 | ||
722a1c1e | 83 | static inline size_t tlb_n_entries(CPUTLBDescFast *fast) |
7a1efe1b | 84 | { |
722a1c1e | 85 | return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; |
7a1efe1b RH |
86 | } |
87 | ||
722a1c1e | 88 | static inline size_t sizeof_tlb(CPUTLBDescFast *fast) |
86e1eff8 | 89 | { |
722a1c1e | 90 | return fast->mask + (1 << CPU_TLB_ENTRY_BITS); |
86e1eff8 EC |
91 | } |
92 | ||
79e42085 | 93 | static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, |
86e1eff8 EC |
94 | size_t max_entries) |
95 | { | |
79e42085 RH |
96 | desc->window_begin_ns = ns; |
97 | desc->window_max_entries = max_entries; | |
86e1eff8 EC |
98 | } |
99 | ||
86e1eff8 EC |
100 | /** |
101 | * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary | |
71ccd47b RH |
102 | * @desc: The CPUTLBDesc portion of the TLB |
103 | * @fast: The CPUTLBDescFast portion of the same TLB | |
86e1eff8 EC |
104 | * |
105 | * Called with tlb_lock_held. | |
106 | * | |
107 | * We have two main constraints when resizing a TLB: (1) we only resize it | |
108 | * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing | |
109 | * the array or unnecessarily flushing it), which means we do not control how | |
110 | * frequently the resizing can occur; (2) we don't have access to the guest's | |
111 | * future scheduling decisions, and therefore have to decide the magnitude of | |
112 | * the resize based on past observations. | |
113 | * | |
114 | * In general, a memory-hungry process can benefit greatly from an appropriately | |
115 | * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that | |
116 | * we just have to make the TLB as large as possible; while an oversized TLB | |
117 | * results in minimal TLB miss rates, it also takes longer to be flushed | |
118 | * (flushes can be _very_ frequent), and the reduced locality can also hurt | |
119 | * performance. | |
120 | * | |
121 | * To achieve near-optimal performance for all kinds of workloads, we: | |
122 | * | |
123 | * 1. Aggressively increase the size of the TLB when the use rate of the | |
124 | * TLB being flushed is high, since it is likely that in the near future this | |
125 | * memory-hungry process will execute again, and its memory hungriness will | |
126 | * probably be similar. | |
127 | * | |
128 | * 2. Slowly reduce the size of the TLB as the use rate declines over a | |
129 | * reasonably large time window. The rationale is that if in such a time window | |
130 | * we have not observed a high TLB use rate, it is likely that we won't observe | |
131 | * it in the near future. In that case, once a time window expires we downsize | |
132 | * the TLB to match the maximum use rate observed in the window. | |
133 | * | |
134 | * 3. Try to keep the maximum use rate in a time window in the 30-70% range, | |
135 | * since in that range performance is likely near-optimal. Recall that the TLB | |
136 | * is direct mapped, so we want the use rate to be low (or at least not too | |
137 | * high), since otherwise we are likely to have a significant amount of | |
138 | * conflict misses. | |
139 | */ | |
3c3959f2 RH |
140 | static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, |
141 | int64_t now) | |
86e1eff8 | 142 | { |
71ccd47b | 143 | size_t old_size = tlb_n_entries(fast); |
86e1eff8 EC |
144 | size_t rate; |
145 | size_t new_size = old_size; | |
86e1eff8 EC |
146 | int64_t window_len_ms = 100; |
147 | int64_t window_len_ns = window_len_ms * 1000 * 1000; | |
79e42085 | 148 | bool window_expired = now > desc->window_begin_ns + window_len_ns; |
86e1eff8 | 149 | |
79e42085 RH |
150 | if (desc->n_used_entries > desc->window_max_entries) { |
151 | desc->window_max_entries = desc->n_used_entries; | |
86e1eff8 | 152 | } |
79e42085 | 153 | rate = desc->window_max_entries * 100 / old_size; |
86e1eff8 EC |
154 | |
155 | if (rate > 70) { | |
156 | new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); | |
157 | } else if (rate < 30 && window_expired) { | |
79e42085 RH |
158 | size_t ceil = pow2ceil(desc->window_max_entries); |
159 | size_t expected_rate = desc->window_max_entries * 100 / ceil; | |
86e1eff8 EC |
160 | |
161 | /* | |
162 | * Avoid undersizing when the max number of entries seen is just below | |
163 | * a pow2. For instance, if max_entries == 1025, the expected use rate | |
164 | * would be 1025/2048==50%. However, if max_entries == 1023, we'd get | |
165 | * 1023/1024==99.9% use rate, so we'd likely end up doubling the size | |
166 | * later. Thus, make sure that the expected use rate remains below 70%. | |
167 | * (and since we double the size, that means the lowest rate we'd | |
168 | * expect to get is 35%, which is still in the 30-70% range where | |
169 | * we consider that the size is appropriate.) | |
170 | */ | |
171 | if (expected_rate > 70) { | |
172 | ceil *= 2; | |
173 | } | |
174 | new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); | |
175 | } | |
176 | ||
177 | if (new_size == old_size) { | |
178 | if (window_expired) { | |
79e42085 | 179 | tlb_window_reset(desc, now, desc->n_used_entries); |
86e1eff8 EC |
180 | } |
181 | return; | |
182 | } | |
183 | ||
71ccd47b RH |
184 | g_free(fast->table); |
185 | g_free(desc->iotlb); | |
86e1eff8 | 186 | |
79e42085 | 187 | tlb_window_reset(desc, now, 0); |
86e1eff8 | 188 | /* desc->n_used_entries is cleared by the caller */ |
71ccd47b RH |
189 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; |
190 | fast->table = g_try_new(CPUTLBEntry, new_size); | |
191 | desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | |
192 | ||
86e1eff8 EC |
193 | /* |
194 | * If the allocations fail, try smaller sizes. We just freed some | |
195 | * memory, so going back to half of new_size has a good chance of working. | |
196 | * Increased memory pressure elsewhere in the system might cause the | |
197 | * allocations to fail though, so we progressively reduce the allocation | |
198 | * size, aborting if we cannot even allocate the smallest TLB we support. | |
199 | */ | |
71ccd47b | 200 | while (fast->table == NULL || desc->iotlb == NULL) { |
86e1eff8 EC |
201 | if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { |
202 | error_report("%s: %s", __func__, strerror(errno)); | |
203 | abort(); | |
204 | } | |
205 | new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); | |
71ccd47b | 206 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; |
86e1eff8 | 207 | |
71ccd47b RH |
208 | g_free(fast->table); |
209 | g_free(desc->iotlb); | |
210 | fast->table = g_try_new(CPUTLBEntry, new_size); | |
211 | desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | |
86e1eff8 EC |
212 | } |
213 | } | |
214 | ||
bbf021b0 | 215 | static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) |
86e1eff8 | 216 | { |
5c948e31 RH |
217 | desc->n_used_entries = 0; |
218 | desc->large_page_addr = -1; | |
219 | desc->large_page_mask = -1; | |
220 | desc->vindex = 0; | |
221 | memset(fast->table, -1, sizeof_tlb(fast)); | |
222 | memset(desc->vtable, -1, sizeof(desc->vtable)); | |
86e1eff8 EC |
223 | } |
224 | ||
3c3959f2 RH |
225 | static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, |
226 | int64_t now) | |
bbf021b0 RH |
227 | { |
228 | CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | |
229 | CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; | |
230 | ||
3c3959f2 | 231 | tlb_mmu_resize_locked(desc, fast, now); |
bbf021b0 RH |
232 | tlb_mmu_flush_locked(desc, fast); |
233 | } | |
234 | ||
56e89f76 RH |
235 | static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) |
236 | { | |
237 | size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; | |
238 | ||
239 | tlb_window_reset(desc, now, 0); | |
240 | desc->n_used_entries = 0; | |
241 | fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; | |
242 | fast->table = g_new(CPUTLBEntry, n_entries); | |
243 | desc->iotlb = g_new(CPUIOTLBEntry, n_entries); | |
3c16304a | 244 | tlb_mmu_flush_locked(desc, fast); |
56e89f76 RH |
245 | } |
246 | ||
86e1eff8 EC |
247 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) |
248 | { | |
a40ec84e | 249 | env_tlb(env)->d[mmu_idx].n_used_entries++; |
86e1eff8 EC |
250 | } |
251 | ||
252 | static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) | |
253 | { | |
a40ec84e | 254 | env_tlb(env)->d[mmu_idx].n_used_entries--; |
86e1eff8 EC |
255 | } |
256 | ||
5005e253 EC |
257 | void tlb_init(CPUState *cpu) |
258 | { | |
71aec354 | 259 | CPUArchState *env = cpu->env_ptr; |
56e89f76 RH |
260 | int64_t now = get_clock_realtime(); |
261 | int i; | |
71aec354 | 262 | |
a40ec84e | 263 | qemu_spin_init(&env_tlb(env)->c.lock); |
3d1523ce | 264 | |
3c16304a RH |
265 | /* All tlbs are initialized flushed. */ |
266 | env_tlb(env)->c.dirty = 0; | |
86e1eff8 | 267 | |
56e89f76 RH |
268 | for (i = 0; i < NB_MMU_MODES; i++) { |
269 | tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); | |
270 | } | |
5005e253 EC |
271 | } |
272 | ||
816d9be5 EC |
273 | void tlb_destroy(CPUState *cpu) |
274 | { | |
275 | CPUArchState *env = cpu->env_ptr; | |
276 | int i; | |
277 | ||
278 | qemu_spin_destroy(&env_tlb(env)->c.lock); | |
279 | for (i = 0; i < NB_MMU_MODES; i++) { | |
280 | CPUTLBDesc *desc = &env_tlb(env)->d[i]; | |
281 | CPUTLBDescFast *fast = &env_tlb(env)->f[i]; | |
282 | ||
283 | g_free(fast->table); | |
284 | g_free(desc->iotlb); | |
285 | } | |
286 | } | |
287 | ||
c3b9a07a AB |
288 | /* flush_all_helper: run fn across all cpus |
289 | * | |
290 | * If the wait flag is set then the src cpu's helper will be queued as | |
291 | * "safe" work and the loop exited creating a synchronisation point | |
292 | * where all queued work will be finished before execution starts | |
293 | * again. | |
294 | */ | |
295 | static void flush_all_helper(CPUState *src, run_on_cpu_func fn, | |
296 | run_on_cpu_data d) | |
297 | { | |
298 | CPUState *cpu; | |
299 | ||
300 | CPU_FOREACH(cpu) { | |
301 | if (cpu != src) { | |
302 | async_run_on_cpu(cpu, fn, d); | |
303 | } | |
304 | } | |
305 | } | |
306 | ||
e09de0a2 | 307 | void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) |
83974cf4 EC |
308 | { |
309 | CPUState *cpu; | |
e09de0a2 | 310 | size_t full = 0, part = 0, elide = 0; |
83974cf4 EC |
311 | |
312 | CPU_FOREACH(cpu) { | |
313 | CPUArchState *env = cpu->env_ptr; | |
314 | ||
d73415a3 SH |
315 | full += qatomic_read(&env_tlb(env)->c.full_flush_count); |
316 | part += qatomic_read(&env_tlb(env)->c.part_flush_count); | |
317 | elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); | |
83974cf4 | 318 | } |
e09de0a2 RH |
319 | *pfull = full; |
320 | *ppart = part; | |
321 | *pelide = elide; | |
83974cf4 | 322 | } |
0cac1b66 | 323 | |
e7218445 | 324 | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) |
d7a74a9d PM |
325 | { |
326 | CPUArchState *env = cpu->env_ptr; | |
3d1523ce RH |
327 | uint16_t asked = data.host_int; |
328 | uint16_t all_dirty, work, to_clean; | |
3c3959f2 | 329 | int64_t now = get_clock_realtime(); |
d7a74a9d | 330 | |
f0aff0f1 | 331 | assert_cpu_is_self(cpu); |
d7a74a9d | 332 | |
3d1523ce | 333 | tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); |
e7218445 | 334 | |
a40ec84e | 335 | qemu_spin_lock(&env_tlb(env)->c.lock); |
60a2ad7d | 336 | |
a40ec84e | 337 | all_dirty = env_tlb(env)->c.dirty; |
3d1523ce RH |
338 | to_clean = asked & all_dirty; |
339 | all_dirty &= ~to_clean; | |
a40ec84e | 340 | env_tlb(env)->c.dirty = all_dirty; |
3d1523ce RH |
341 | |
342 | for (work = to_clean; work != 0; work &= work - 1) { | |
343 | int mmu_idx = ctz32(work); | |
3c3959f2 | 344 | tlb_flush_one_mmuidx_locked(env, mmu_idx, now); |
d7a74a9d | 345 | } |
3d1523ce | 346 | |
a40ec84e | 347 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
d7a74a9d | 348 | |
f3ced3c5 | 349 | cpu_tb_jmp_cache_clear(cpu); |
64f2674b | 350 | |
3d1523ce | 351 | if (to_clean == ALL_MMUIDX_BITS) { |
d73415a3 | 352 | qatomic_set(&env_tlb(env)->c.full_flush_count, |
a40ec84e | 353 | env_tlb(env)->c.full_flush_count + 1); |
e09de0a2 | 354 | } else { |
d73415a3 | 355 | qatomic_set(&env_tlb(env)->c.part_flush_count, |
a40ec84e | 356 | env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); |
3d1523ce | 357 | if (to_clean != asked) { |
d73415a3 | 358 | qatomic_set(&env_tlb(env)->c.elide_flush_count, |
a40ec84e | 359 | env_tlb(env)->c.elide_flush_count + |
3d1523ce RH |
360 | ctpop16(asked & ~to_clean)); |
361 | } | |
64f2674b | 362 | } |
d7a74a9d PM |
363 | } |
364 | ||
0336cbf8 | 365 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d | 366 | { |
e7218445 AB |
367 | tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); |
368 | ||
64f2674b | 369 | if (cpu->created && !qemu_cpu_is_self(cpu)) { |
ab651105 RH |
370 | async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, |
371 | RUN_ON_CPU_HOST_INT(idxmap)); | |
e7218445 | 372 | } else { |
60a2ad7d | 373 | tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); |
e7218445 | 374 | } |
d7a74a9d PM |
375 | } |
376 | ||
64f2674b RH |
377 | void tlb_flush(CPUState *cpu) |
378 | { | |
379 | tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); | |
380 | } | |
381 | ||
c3b9a07a AB |
382 | void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) |
383 | { | |
384 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
385 | ||
386 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
387 | ||
388 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
389 | fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); | |
390 | } | |
391 | ||
64f2674b RH |
392 | void tlb_flush_all_cpus(CPUState *src_cpu) |
393 | { | |
394 | tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); | |
395 | } | |
396 | ||
397 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) | |
c3b9a07a AB |
398 | { |
399 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
400 | ||
401 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
402 | ||
403 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
404 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
405 | } | |
406 | ||
64f2674b RH |
407 | void tlb_flush_all_cpus_synced(CPUState *src_cpu) |
408 | { | |
409 | tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); | |
410 | } | |
411 | ||
68fea038 RH |
412 | static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, |
413 | target_ulong page) | |
414 | { | |
415 | return tlb_hit_page(tlb_entry->addr_read, page) || | |
403f290c | 416 | tlb_hit_page(tlb_addr_write(tlb_entry), page) || |
68fea038 RH |
417 | tlb_hit_page(tlb_entry->addr_code, page); |
418 | } | |
c3b9a07a | 419 | |
3cea94bb EC |
420 | /** |
421 | * tlb_entry_is_empty - return true if the entry is not in use | |
422 | * @te: pointer to CPUTLBEntry | |
423 | */ | |
424 | static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) | |
425 | { | |
426 | return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; | |
427 | } | |
428 | ||
53d28455 | 429 | /* Called with tlb_c.lock held */ |
86e1eff8 | 430 | static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, |
71aec354 | 431 | target_ulong page) |
0cac1b66 | 432 | { |
68fea038 | 433 | if (tlb_hit_page_anyprot(tlb_entry, page)) { |
4fadb3bb | 434 | memset(tlb_entry, -1, sizeof(*tlb_entry)); |
86e1eff8 | 435 | return true; |
0cac1b66 | 436 | } |
86e1eff8 | 437 | return false; |
0cac1b66 BS |
438 | } |
439 | ||
53d28455 | 440 | /* Called with tlb_c.lock held */ |
71aec354 EC |
441 | static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, |
442 | target_ulong page) | |
68fea038 | 443 | { |
a40ec84e | 444 | CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; |
68fea038 | 445 | int k; |
71aec354 | 446 | |
29a0af61 | 447 | assert_cpu_is_self(env_cpu(env)); |
68fea038 | 448 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
a40ec84e | 449 | if (tlb_flush_entry_locked(&d->vtable[k], page)) { |
86e1eff8 EC |
450 | tlb_n_used_entries_dec(env, mmu_idx); |
451 | } | |
68fea038 RH |
452 | } |
453 | } | |
454 | ||
1308e026 RH |
455 | static void tlb_flush_page_locked(CPUArchState *env, int midx, |
456 | target_ulong page) | |
457 | { | |
a40ec84e RH |
458 | target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; |
459 | target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; | |
1308e026 RH |
460 | |
461 | /* Check if we need to flush due to large pages. */ | |
462 | if ((page & lp_mask) == lp_addr) { | |
463 | tlb_debug("forcing full flush midx %d (" | |
464 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
465 | midx, lp_addr, lp_mask); | |
3c3959f2 | 466 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); |
1308e026 | 467 | } else { |
86e1eff8 EC |
468 | if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { |
469 | tlb_n_used_entries_dec(env, midx); | |
470 | } | |
1308e026 RH |
471 | tlb_flush_vtlb_page_locked(env, midx, page); |
472 | } | |
473 | } | |
474 | ||
7b7d00e0 RH |
475 | /** |
476 | * tlb_flush_page_by_mmuidx_async_0: | |
477 | * @cpu: cpu on which to flush | |
478 | * @addr: page of virtual address to flush | |
479 | * @idxmap: set of mmu_idx to flush | |
480 | * | |
481 | * Helper for tlb_flush_page_by_mmuidx and friends, flush one page | |
482 | * at @addr from the tlbs indicated by @idxmap from @cpu. | |
e7218445 | 483 | */ |
7b7d00e0 RH |
484 | static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, |
485 | target_ulong addr, | |
486 | uint16_t idxmap) | |
d7a74a9d PM |
487 | { |
488 | CPUArchState *env = cpu->env_ptr; | |
e7218445 | 489 | int mmu_idx; |
d7a74a9d | 490 | |
f0aff0f1 | 491 | assert_cpu_is_self(cpu); |
d7a74a9d | 492 | |
7b7d00e0 | 493 | tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); |
d7a74a9d | 494 | |
a40ec84e | 495 | qemu_spin_lock(&env_tlb(env)->c.lock); |
0336cbf8 | 496 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
7b7d00e0 | 497 | if ((idxmap >> mmu_idx) & 1) { |
1308e026 | 498 | tlb_flush_page_locked(env, mmu_idx, addr); |
d7a74a9d PM |
499 | } |
500 | } | |
a40ec84e | 501 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
d7a74a9d | 502 | |
d7a74a9d PM |
503 | tb_flush_jmp_cache(cpu, addr); |
504 | } | |
505 | ||
7b7d00e0 RH |
506 | /** |
507 | * tlb_flush_page_by_mmuidx_async_1: | |
508 | * @cpu: cpu on which to flush | |
509 | * @data: encoded addr + idxmap | |
510 | * | |
511 | * Helper for tlb_flush_page_by_mmuidx and friends, called through | |
512 | * async_run_on_cpu. The idxmap parameter is encoded in the page | |
513 | * offset of the target_ptr field. This limits the set of mmu_idx | |
514 | * that can be passed via this method. | |
515 | */ | |
516 | static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, | |
517 | run_on_cpu_data data) | |
518 | { | |
519 | target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; | |
520 | target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; | |
521 | uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; | |
522 | ||
523 | tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); | |
524 | } | |
525 | ||
526 | typedef struct { | |
527 | target_ulong addr; | |
528 | uint16_t idxmap; | |
529 | } TLBFlushPageByMMUIdxData; | |
530 | ||
531 | /** | |
532 | * tlb_flush_page_by_mmuidx_async_2: | |
533 | * @cpu: cpu on which to flush | |
534 | * @data: allocated addr + idxmap | |
535 | * | |
536 | * Helper for tlb_flush_page_by_mmuidx and friends, called through | |
537 | * async_run_on_cpu. The addr+idxmap parameters are stored in a | |
538 | * TLBFlushPageByMMUIdxData structure that has been allocated | |
539 | * specifically for this helper. Free the structure when done. | |
540 | */ | |
541 | static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, | |
542 | run_on_cpu_data data) | |
e7218445 | 543 | { |
7b7d00e0 RH |
544 | TLBFlushPageByMMUIdxData *d = data.host_ptr; |
545 | ||
546 | tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); | |
547 | g_free(d); | |
548 | } | |
e7218445 | 549 | |
7b7d00e0 RH |
550 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) |
551 | { | |
e7218445 AB |
552 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); |
553 | ||
554 | /* This should already be page aligned */ | |
7b7d00e0 | 555 | addr &= TARGET_PAGE_MASK; |
e7218445 | 556 | |
7b7d00e0 RH |
557 | if (qemu_cpu_is_self(cpu)) { |
558 | tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); | |
559 | } else if (idxmap < TARGET_PAGE_SIZE) { | |
560 | /* | |
561 | * Most targets have only a few mmu_idx. In the case where | |
562 | * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid | |
563 | * allocating memory for this operation. | |
564 | */ | |
565 | async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, | |
566 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
e7218445 | 567 | } else { |
7b7d00e0 RH |
568 | TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); |
569 | ||
570 | /* Otherwise allocate a structure, freed by the worker. */ | |
571 | d->addr = addr; | |
572 | d->idxmap = idxmap; | |
573 | async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, | |
574 | RUN_ON_CPU_HOST_PTR(d)); | |
e7218445 AB |
575 | } |
576 | } | |
577 | ||
f8144c6c RH |
578 | void tlb_flush_page(CPUState *cpu, target_ulong addr) |
579 | { | |
580 | tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); | |
581 | } | |
582 | ||
c3b9a07a AB |
583 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, |
584 | uint16_t idxmap) | |
e3b9ca81 | 585 | { |
c3b9a07a AB |
586 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
587 | ||
588 | /* This should already be page aligned */ | |
7b7d00e0 RH |
589 | addr &= TARGET_PAGE_MASK; |
590 | ||
591 | /* | |
592 | * Allocate memory to hold addr+idxmap only when needed. | |
593 | * See tlb_flush_page_by_mmuidx for details. | |
594 | */ | |
595 | if (idxmap < TARGET_PAGE_SIZE) { | |
596 | flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, | |
597 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
598 | } else { | |
599 | CPUState *dst_cpu; | |
600 | ||
601 | /* Allocate a separate data block for each destination cpu. */ | |
602 | CPU_FOREACH(dst_cpu) { | |
603 | if (dst_cpu != src_cpu) { | |
604 | TLBFlushPageByMMUIdxData *d | |
605 | = g_new(TLBFlushPageByMMUIdxData, 1); | |
606 | ||
607 | d->addr = addr; | |
608 | d->idxmap = idxmap; | |
609 | async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, | |
610 | RUN_ON_CPU_HOST_PTR(d)); | |
611 | } | |
612 | } | |
613 | } | |
c3b9a07a | 614 | |
7b7d00e0 | 615 | tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); |
c3b9a07a AB |
616 | } |
617 | ||
f8144c6c RH |
618 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
619 | { | |
620 | tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); | |
621 | } | |
622 | ||
c3b9a07a | 623 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
1308e026 RH |
624 | target_ulong addr, |
625 | uint16_t idxmap) | |
c3b9a07a | 626 | { |
c3b9a07a AB |
627 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
628 | ||
629 | /* This should already be page aligned */ | |
7b7d00e0 | 630 | addr &= TARGET_PAGE_MASK; |
c3b9a07a | 631 | |
7b7d00e0 RH |
632 | /* |
633 | * Allocate memory to hold addr+idxmap only when needed. | |
634 | * See tlb_flush_page_by_mmuidx for details. | |
635 | */ | |
636 | if (idxmap < TARGET_PAGE_SIZE) { | |
637 | flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, | |
638 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
639 | async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, | |
640 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
641 | } else { | |
642 | CPUState *dst_cpu; | |
643 | TLBFlushPageByMMUIdxData *d; | |
644 | ||
645 | /* Allocate a separate data block for each destination cpu. */ | |
646 | CPU_FOREACH(dst_cpu) { | |
647 | if (dst_cpu != src_cpu) { | |
648 | d = g_new(TLBFlushPageByMMUIdxData, 1); | |
649 | d->addr = addr; | |
650 | d->idxmap = idxmap; | |
651 | async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, | |
652 | RUN_ON_CPU_HOST_PTR(d)); | |
653 | } | |
654 | } | |
655 | ||
656 | d = g_new(TLBFlushPageByMMUIdxData, 1); | |
657 | d->addr = addr; | |
658 | d->idxmap = idxmap; | |
659 | async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, | |
660 | RUN_ON_CPU_HOST_PTR(d)); | |
661 | } | |
c3b9a07a AB |
662 | } |
663 | ||
f8144c6c | 664 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) |
c3b9a07a | 665 | { |
f8144c6c | 666 | tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); |
e3b9ca81 FK |
667 | } |
668 | ||
0cac1b66 BS |
669 | /* update the TLBs so that writes to code in the virtual page 'addr' |
670 | can be detected */ | |
671 | void tlb_protect_code(ram_addr_t ram_addr) | |
672 | { | |
03eebc9e SH |
673 | cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, |
674 | DIRTY_MEMORY_CODE); | |
0cac1b66 BS |
675 | } |
676 | ||
677 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
678 | tested for self modifying code */ | |
9564f52d | 679 | void tlb_unprotect_code(ram_addr_t ram_addr) |
0cac1b66 | 680 | { |
52159192 | 681 | cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); |
0cac1b66 BS |
682 | } |
683 | ||
0cac1b66 | 684 | |
b0706b71 AB |
685 | /* |
686 | * Dirty write flag handling | |
687 | * | |
688 | * When the TCG code writes to a location it looks up the address in | |
689 | * the TLB and uses that data to compute the final address. If any of | |
690 | * the lower bits of the address are set then the slow path is forced. | |
691 | * There are a number of reasons to do this but for normal RAM the | |
692 | * most usual is detecting writes to code regions which may invalidate | |
693 | * generated code. | |
694 | * | |
71aec354 | 695 | * Other vCPUs might be reading their TLBs during guest execution, so we update |
d73415a3 | 696 | * te->addr_write with qatomic_set. We don't need to worry about this for |
71aec354 | 697 | * oversized guests as MTTCG is disabled for them. |
b0706b71 | 698 | * |
53d28455 | 699 | * Called with tlb_c.lock held. |
b0706b71 | 700 | */ |
71aec354 EC |
701 | static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, |
702 | uintptr_t start, uintptr_t length) | |
0cac1b66 | 703 | { |
b0706b71 | 704 | uintptr_t addr = tlb_entry->addr_write; |
0cac1b66 | 705 | |
7b0d792c RH |
706 | if ((addr & (TLB_INVALID_MASK | TLB_MMIO | |
707 | TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { | |
b0706b71 AB |
708 | addr &= TARGET_PAGE_MASK; |
709 | addr += tlb_entry->addend; | |
0cac1b66 | 710 | if ((addr - start) < length) { |
71aec354 | 711 | #if TCG_OVERSIZED_GUEST |
0cac1b66 | 712 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
b0706b71 | 713 | #else |
d73415a3 | 714 | qatomic_set(&tlb_entry->addr_write, |
71aec354 EC |
715 | tlb_entry->addr_write | TLB_NOTDIRTY); |
716 | #endif | |
b0706b71 AB |
717 | } |
718 | } | |
b0706b71 AB |
719 | } |
720 | ||
71aec354 | 721 | /* |
53d28455 | 722 | * Called with tlb_c.lock held. |
71aec354 EC |
723 | * Called only from the vCPU context, i.e. the TLB's owner thread. |
724 | */ | |
725 | static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) | |
b0706b71 | 726 | { |
b0706b71 | 727 | *d = *s; |
0cac1b66 BS |
728 | } |
729 | ||
b0706b71 | 730 | /* This is a cross vCPU call (i.e. another vCPU resetting the flags of |
71aec354 | 731 | * the target vCPU). |
53d28455 | 732 | * We must take tlb_c.lock to avoid racing with another vCPU update. The only |
71aec354 | 733 | * thing actually updated is the target TLB entry ->addr_write flags. |
b0706b71 | 734 | */ |
9a13565d | 735 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) |
0cac1b66 BS |
736 | { |
737 | CPUArchState *env; | |
738 | ||
9a13565d | 739 | int mmu_idx; |
0cac1b66 | 740 | |
9a13565d | 741 | env = cpu->env_ptr; |
a40ec84e | 742 | qemu_spin_lock(&env_tlb(env)->c.lock); |
9a13565d PC |
743 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
744 | unsigned int i; | |
722a1c1e | 745 | unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); |
0cac1b66 | 746 | |
86e1eff8 | 747 | for (i = 0; i < n; i++) { |
a40ec84e RH |
748 | tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], |
749 | start1, length); | |
9a13565d | 750 | } |
88e89a57 | 751 | |
9a13565d | 752 | for (i = 0; i < CPU_VTLB_SIZE; i++) { |
a40ec84e RH |
753 | tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], |
754 | start1, length); | |
0cac1b66 BS |
755 | } |
756 | } | |
a40ec84e | 757 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
0cac1b66 BS |
758 | } |
759 | ||
53d28455 | 760 | /* Called with tlb_c.lock held */ |
71aec354 EC |
761 | static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, |
762 | target_ulong vaddr) | |
0cac1b66 BS |
763 | { |
764 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { | |
765 | tlb_entry->addr_write = vaddr; | |
766 | } | |
767 | } | |
768 | ||
769 | /* update the TLB corresponding to virtual page vaddr | |
770 | so that it is no longer dirty */ | |
bcae01e4 | 771 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) |
0cac1b66 | 772 | { |
bcae01e4 | 773 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
774 | int mmu_idx; |
775 | ||
f0aff0f1 AB |
776 | assert_cpu_is_self(cpu); |
777 | ||
0cac1b66 | 778 | vaddr &= TARGET_PAGE_MASK; |
a40ec84e | 779 | qemu_spin_lock(&env_tlb(env)->c.lock); |
0cac1b66 | 780 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
383beda9 | 781 | tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); |
0cac1b66 | 782 | } |
88e89a57 XT |
783 | |
784 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
785 | int k; | |
786 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
a40ec84e | 787 | tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); |
88e89a57 XT |
788 | } |
789 | } | |
a40ec84e | 790 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
0cac1b66 BS |
791 | } |
792 | ||
793 | /* Our TLB does not support large pages, so remember the area covered by | |
794 | large pages and trigger a full TLB flush if these are invalidated. */ | |
1308e026 RH |
795 | static void tlb_add_large_page(CPUArchState *env, int mmu_idx, |
796 | target_ulong vaddr, target_ulong size) | |
0cac1b66 | 797 | { |
a40ec84e | 798 | target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; |
1308e026 | 799 | target_ulong lp_mask = ~(size - 1); |
0cac1b66 | 800 | |
1308e026 RH |
801 | if (lp_addr == (target_ulong)-1) { |
802 | /* No previous large page. */ | |
803 | lp_addr = vaddr; | |
804 | } else { | |
805 | /* Extend the existing region to include the new page. | |
806 | This is a compromise between unnecessary flushes and | |
807 | the cost of maintaining a full variable size TLB. */ | |
a40ec84e | 808 | lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; |
1308e026 RH |
809 | while (((lp_addr ^ vaddr) & lp_mask) != 0) { |
810 | lp_mask <<= 1; | |
811 | } | |
0cac1b66 | 812 | } |
a40ec84e RH |
813 | env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; |
814 | env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; | |
0cac1b66 BS |
815 | } |
816 | ||
817 | /* Add a new TLB entry. At most one entry for a given virtual address | |
79e2b9ae PB |
818 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
819 | * supplied size is only used by tlb_flush_page. | |
820 | * | |
821 | * Called from TCG-generated code, which is under an RCU read-side | |
822 | * critical section. | |
823 | */ | |
fadc1cbe PM |
824 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
825 | hwaddr paddr, MemTxAttrs attrs, int prot, | |
826 | int mmu_idx, target_ulong size) | |
0cac1b66 | 827 | { |
0c591eb0 | 828 | CPUArchState *env = cpu->env_ptr; |
a40ec84e RH |
829 | CPUTLB *tlb = env_tlb(env); |
830 | CPUTLBDesc *desc = &tlb->d[mmu_idx]; | |
0cac1b66 BS |
831 | MemoryRegionSection *section; |
832 | unsigned int index; | |
833 | target_ulong address; | |
8f5db641 | 834 | target_ulong write_address; |
0cac1b66 | 835 | uintptr_t addend; |
68fea038 | 836 | CPUTLBEntry *te, tn; |
55df6fcf PM |
837 | hwaddr iotlb, xlat, sz, paddr_page; |
838 | target_ulong vaddr_page; | |
d7898cda | 839 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
50b107c5 | 840 | int wp_flags; |
8f5db641 | 841 | bool is_ram, is_romd; |
0cac1b66 | 842 | |
f0aff0f1 | 843 | assert_cpu_is_self(cpu); |
55df6fcf | 844 | |
1308e026 | 845 | if (size <= TARGET_PAGE_SIZE) { |
55df6fcf PM |
846 | sz = TARGET_PAGE_SIZE; |
847 | } else { | |
1308e026 | 848 | tlb_add_large_page(env, mmu_idx, vaddr, size); |
55df6fcf | 849 | sz = size; |
0cac1b66 | 850 | } |
55df6fcf PM |
851 | vaddr_page = vaddr & TARGET_PAGE_MASK; |
852 | paddr_page = paddr & TARGET_PAGE_MASK; | |
149f54b5 | 853 | |
55df6fcf PM |
854 | section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, |
855 | &xlat, &sz, attrs, &prot); | |
149f54b5 PB |
856 | assert(sz >= TARGET_PAGE_SIZE); |
857 | ||
8526e1f4 AB |
858 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
859 | " prot=%x idx=%d\n", | |
860 | vaddr, paddr, prot, mmu_idx); | |
0cac1b66 | 861 | |
55df6fcf PM |
862 | address = vaddr_page; |
863 | if (size < TARGET_PAGE_SIZE) { | |
30d7e098 RH |
864 | /* Repeat the MMU check and TLB fill on every access. */ |
865 | address |= TLB_INVALID_MASK; | |
55df6fcf | 866 | } |
a26fc6f5 | 867 | if (attrs.byte_swap) { |
5b87b3e6 | 868 | address |= TLB_BSWAP; |
a26fc6f5 | 869 | } |
8f5db641 RH |
870 | |
871 | is_ram = memory_region_is_ram(section->mr); | |
872 | is_romd = memory_region_is_romd(section->mr); | |
873 | ||
874 | if (is_ram || is_romd) { | |
875 | /* RAM and ROMD both have associated host memory. */ | |
876 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; | |
877 | } else { | |
878 | /* I/O does not; force the host address to NULL. */ | |
8f3e03cb | 879 | addend = 0; |
8f5db641 RH |
880 | } |
881 | ||
882 | write_address = address; | |
883 | if (is_ram) { | |
884 | iotlb = memory_region_get_ram_addr(section->mr) + xlat; | |
885 | /* | |
886 | * Computing is_clean is expensive; avoid all that unless | |
887 | * the page is actually writable. | |
888 | */ | |
889 | if (prot & PAGE_WRITE) { | |
890 | if (section->readonly) { | |
891 | write_address |= TLB_DISCARD_WRITE; | |
892 | } else if (cpu_physical_memory_is_clean(iotlb)) { | |
893 | write_address |= TLB_NOTDIRTY; | |
894 | } | |
895 | } | |
8f3e03cb | 896 | } else { |
8f5db641 RH |
897 | /* I/O or ROMD */ |
898 | iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; | |
899 | /* | |
900 | * Writes to romd devices must go through MMIO to enable write. | |
901 | * Reads to romd devices go through the ram_ptr found above, | |
902 | * but of course reads to I/O must go through MMIO. | |
903 | */ | |
904 | write_address |= TLB_MMIO; | |
905 | if (!is_romd) { | |
906 | address = write_address; | |
907 | } | |
0cac1b66 | 908 | } |
0cac1b66 | 909 | |
50b107c5 RH |
910 | wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, |
911 | TARGET_PAGE_SIZE); | |
0cac1b66 | 912 | |
383beda9 RH |
913 | index = tlb_index(env, mmu_idx, vaddr_page); |
914 | te = tlb_entry(env, mmu_idx, vaddr_page); | |
b0706b71 | 915 | |
71aec354 EC |
916 | /* |
917 | * Hold the TLB lock for the rest of the function. We could acquire/release | |
918 | * the lock several times in the function, but it is faster to amortize the | |
919 | * acquisition cost by acquiring it just once. Note that this leads to | |
920 | * a longer critical section, but this is not a concern since the TLB lock | |
921 | * is unlikely to be contended. | |
922 | */ | |
a40ec84e | 923 | qemu_spin_lock(&tlb->c.lock); |
71aec354 | 924 | |
3d1523ce | 925 | /* Note that the tlb is no longer clean. */ |
a40ec84e | 926 | tlb->c.dirty |= 1 << mmu_idx; |
3d1523ce | 927 | |
71aec354 EC |
928 | /* Make sure there's no cached translation for the new page. */ |
929 | tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); | |
930 | ||
68fea038 RH |
931 | /* |
932 | * Only evict the old entry to the victim tlb if it's for a | |
933 | * different page; otherwise just overwrite the stale data. | |
934 | */ | |
3cea94bb | 935 | if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { |
a40ec84e RH |
936 | unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; |
937 | CPUTLBEntry *tv = &desc->vtable[vidx]; | |
b0706b71 | 938 | |
68fea038 | 939 | /* Evict the old entry into the victim tlb. */ |
71aec354 | 940 | copy_tlb_helper_locked(tv, te); |
a40ec84e | 941 | desc->viotlb[vidx] = desc->iotlb[index]; |
86e1eff8 | 942 | tlb_n_used_entries_dec(env, mmu_idx); |
68fea038 | 943 | } |
88e89a57 XT |
944 | |
945 | /* refill the tlb */ | |
ace41090 PM |
946 | /* |
947 | * At this point iotlb contains a physical section number in the lower | |
948 | * TARGET_PAGE_BITS, and either | |
8f5db641 RH |
949 | * + the ram_addr_t of the page base of the target RAM (RAM) |
950 | * + the offset within section->mr of the page base (I/O, ROMD) | |
55df6fcf | 951 | * We subtract the vaddr_page (which is page aligned and thus won't |
ace41090 PM |
952 | * disturb the low bits) to give an offset which can be added to the |
953 | * (non-page-aligned) vaddr of the eventual memory access to get | |
954 | * the MemoryRegion offset for the access. Note that the vaddr we | |
955 | * subtract here is that of the page base, and not the same as the | |
956 | * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). | |
957 | */ | |
a40ec84e RH |
958 | desc->iotlb[index].addr = iotlb - vaddr_page; |
959 | desc->iotlb[index].attrs = attrs; | |
b0706b71 AB |
960 | |
961 | /* Now calculate the new entry */ | |
55df6fcf | 962 | tn.addend = addend - vaddr_page; |
0cac1b66 | 963 | if (prot & PAGE_READ) { |
b0706b71 | 964 | tn.addr_read = address; |
50b107c5 RH |
965 | if (wp_flags & BP_MEM_READ) { |
966 | tn.addr_read |= TLB_WATCHPOINT; | |
967 | } | |
0cac1b66 | 968 | } else { |
b0706b71 | 969 | tn.addr_read = -1; |
0cac1b66 BS |
970 | } |
971 | ||
972 | if (prot & PAGE_EXEC) { | |
8f5db641 | 973 | tn.addr_code = address; |
0cac1b66 | 974 | } else { |
b0706b71 | 975 | tn.addr_code = -1; |
0cac1b66 | 976 | } |
b0706b71 AB |
977 | |
978 | tn.addr_write = -1; | |
0cac1b66 | 979 | if (prot & PAGE_WRITE) { |
8f5db641 | 980 | tn.addr_write = write_address; |
f52bfb12 DH |
981 | if (prot & PAGE_WRITE_INV) { |
982 | tn.addr_write |= TLB_INVALID_MASK; | |
983 | } | |
50b107c5 RH |
984 | if (wp_flags & BP_MEM_WRITE) { |
985 | tn.addr_write |= TLB_WATCHPOINT; | |
986 | } | |
0cac1b66 | 987 | } |
b0706b71 | 988 | |
71aec354 | 989 | copy_tlb_helper_locked(te, &tn); |
86e1eff8 | 990 | tlb_n_used_entries_inc(env, mmu_idx); |
a40ec84e | 991 | qemu_spin_unlock(&tlb->c.lock); |
0cac1b66 BS |
992 | } |
993 | ||
fadc1cbe PM |
994 | /* Add a new TLB entry, but without specifying the memory |
995 | * transaction attributes to be used. | |
996 | */ | |
997 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
998 | hwaddr paddr, int prot, | |
999 | int mmu_idx, target_ulong size) | |
1000 | { | |
1001 | tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, | |
1002 | prot, mmu_idx, size); | |
1003 | } | |
1004 | ||
857baec1 AB |
1005 | static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
1006 | { | |
1007 | ram_addr_t ram_addr; | |
1008 | ||
1009 | ram_addr = qemu_ram_addr_from_host(ptr); | |
1010 | if (ram_addr == RAM_ADDR_INVALID) { | |
1011 | error_report("Bad ram pointer %p", ptr); | |
1012 | abort(); | |
1013 | } | |
1014 | return ram_addr; | |
1015 | } | |
1016 | ||
c319dc13 RH |
1017 | /* |
1018 | * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the | |
1019 | * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must | |
1020 | * be discarded and looked up again (e.g. via tlb_entry()). | |
1021 | */ | |
1022 | static void tlb_fill(CPUState *cpu, target_ulong addr, int size, | |
1023 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | |
1024 | { | |
1025 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
1026 | bool ok; | |
1027 | ||
1028 | /* | |
1029 | * This is not a probe, so only valid return is success; failure | |
1030 | * should result in exception + longjmp to the cpu loop. | |
1031 | */ | |
1032 | ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); | |
1033 | assert(ok); | |
1034 | } | |
1035 | ||
82a45b96 | 1036 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
f1be3696 | 1037 | int mmu_idx, target_ulong addr, uintptr_t retaddr, |
be5c4787 | 1038 | MMUAccessType access_type, MemOp op) |
82a45b96 | 1039 | { |
29a0af61 | 1040 | CPUState *cpu = env_cpu(env); |
2d54f194 PM |
1041 | hwaddr mr_offset; |
1042 | MemoryRegionSection *section; | |
1043 | MemoryRegion *mr; | |
82a45b96 | 1044 | uint64_t val; |
8d04fb55 | 1045 | bool locked = false; |
04e3aabd | 1046 | MemTxResult r; |
82a45b96 | 1047 | |
2d54f194 PM |
1048 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
1049 | mr = section->mr; | |
1050 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
82a45b96 | 1051 | cpu->mem_io_pc = retaddr; |
08565552 | 1052 | if (!cpu->can_do_io) { |
82a45b96 RH |
1053 | cpu_io_recompile(cpu, retaddr); |
1054 | } | |
1055 | ||
8b812533 | 1056 | if (mr->global_locking && !qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
1057 | qemu_mutex_lock_iothread(); |
1058 | locked = true; | |
1059 | } | |
be5c4787 | 1060 | r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); |
04e3aabd | 1061 | if (r != MEMTX_OK) { |
2d54f194 PM |
1062 | hwaddr physaddr = mr_offset + |
1063 | section->offset_within_address_space - | |
1064 | section->offset_within_region; | |
1065 | ||
be5c4787 | 1066 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, |
04e3aabd PM |
1067 | mmu_idx, iotlbentry->attrs, r, retaddr); |
1068 | } | |
8d04fb55 JK |
1069 | if (locked) { |
1070 | qemu_mutex_unlock_iothread(); | |
1071 | } | |
1072 | ||
82a45b96 RH |
1073 | return val; |
1074 | } | |
1075 | ||
2f3a57ee AB |
1076 | /* |
1077 | * Save a potentially trashed IOTLB entry for later lookup by plugin. | |
570ef309 AB |
1078 | * This is read by tlb_plugin_lookup if the iotlb entry doesn't match |
1079 | * because of the side effect of io_writex changing memory layout. | |
2f3a57ee AB |
1080 | */ |
1081 | static void save_iotlb_data(CPUState *cs, hwaddr addr, | |
1082 | MemoryRegionSection *section, hwaddr mr_offset) | |
1083 | { | |
1084 | #ifdef CONFIG_PLUGIN | |
1085 | SavedIOTLB *saved = &cs->saved_iotlb; | |
1086 | saved->addr = addr; | |
1087 | saved->section = section; | |
1088 | saved->mr_offset = mr_offset; | |
1089 | #endif | |
1090 | } | |
1091 | ||
82a45b96 | 1092 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
f1be3696 | 1093 | int mmu_idx, uint64_t val, target_ulong addr, |
be5c4787 | 1094 | uintptr_t retaddr, MemOp op) |
82a45b96 | 1095 | { |
29a0af61 | 1096 | CPUState *cpu = env_cpu(env); |
2d54f194 PM |
1097 | hwaddr mr_offset; |
1098 | MemoryRegionSection *section; | |
1099 | MemoryRegion *mr; | |
8d04fb55 | 1100 | bool locked = false; |
04e3aabd | 1101 | MemTxResult r; |
82a45b96 | 1102 | |
2d54f194 PM |
1103 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
1104 | mr = section->mr; | |
1105 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
08565552 | 1106 | if (!cpu->can_do_io) { |
82a45b96 RH |
1107 | cpu_io_recompile(cpu, retaddr); |
1108 | } | |
82a45b96 | 1109 | cpu->mem_io_pc = retaddr; |
8d04fb55 | 1110 | |
2f3a57ee AB |
1111 | /* |
1112 | * The memory_region_dispatch may trigger a flush/resize | |
1113 | * so for plugins we save the iotlb_data just in case. | |
1114 | */ | |
1115 | save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); | |
1116 | ||
8b812533 | 1117 | if (mr->global_locking && !qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
1118 | qemu_mutex_lock_iothread(); |
1119 | locked = true; | |
1120 | } | |
be5c4787 | 1121 | r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); |
04e3aabd | 1122 | if (r != MEMTX_OK) { |
2d54f194 PM |
1123 | hwaddr physaddr = mr_offset + |
1124 | section->offset_within_address_space - | |
1125 | section->offset_within_region; | |
1126 | ||
be5c4787 TN |
1127 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), |
1128 | MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, | |
1129 | retaddr); | |
04e3aabd | 1130 | } |
8d04fb55 JK |
1131 | if (locked) { |
1132 | qemu_mutex_unlock_iothread(); | |
1133 | } | |
82a45b96 RH |
1134 | } |
1135 | ||
4811e909 RH |
1136 | static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) |
1137 | { | |
1138 | #if TCG_OVERSIZED_GUEST | |
1139 | return *(target_ulong *)((uintptr_t)entry + ofs); | |
1140 | #else | |
d73415a3 SH |
1141 | /* ofs might correspond to .addr_write, so use qatomic_read */ |
1142 | return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); | |
4811e909 RH |
1143 | #endif |
1144 | } | |
1145 | ||
7e9a7c50 RH |
1146 | /* Return true if ADDR is present in the victim tlb, and has been copied |
1147 | back to the main tlb. */ | |
1148 | static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | |
1149 | size_t elt_ofs, target_ulong page) | |
1150 | { | |
1151 | size_t vidx; | |
71aec354 | 1152 | |
29a0af61 | 1153 | assert_cpu_is_self(env_cpu(env)); |
7e9a7c50 | 1154 | for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { |
a40ec84e RH |
1155 | CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; |
1156 | target_ulong cmp; | |
1157 | ||
d73415a3 | 1158 | /* elt_ofs might correspond to .addr_write, so use qatomic_read */ |
a40ec84e RH |
1159 | #if TCG_OVERSIZED_GUEST |
1160 | cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); | |
1161 | #else | |
d73415a3 | 1162 | cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); |
a40ec84e | 1163 | #endif |
7e9a7c50 RH |
1164 | |
1165 | if (cmp == page) { | |
1166 | /* Found entry in victim tlb, swap tlb and iotlb. */ | |
a40ec84e | 1167 | CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; |
b0706b71 | 1168 | |
a40ec84e | 1169 | qemu_spin_lock(&env_tlb(env)->c.lock); |
71aec354 EC |
1170 | copy_tlb_helper_locked(&tmptlb, tlb); |
1171 | copy_tlb_helper_locked(tlb, vtlb); | |
1172 | copy_tlb_helper_locked(vtlb, &tmptlb); | |
a40ec84e | 1173 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
b0706b71 | 1174 | |
a40ec84e RH |
1175 | CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
1176 | CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; | |
7e9a7c50 RH |
1177 | tmpio = *io; *io = *vio; *vio = tmpio; |
1178 | return true; | |
1179 | } | |
1180 | } | |
1181 | return false; | |
1182 | } | |
1183 | ||
1184 | /* Macro to call the above, with local variables from the use context. */ | |
a390284b | 1185 | #define VICTIM_TLB_HIT(TY, ADDR) \ |
7e9a7c50 | 1186 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ |
a390284b | 1187 | (ADDR) & TARGET_PAGE_MASK) |
7e9a7c50 | 1188 | |
30d7e098 RH |
1189 | /* |
1190 | * Return a ram_addr_t for the virtual address for execution. | |
1191 | * | |
1192 | * Return -1 if we can't translate and execute from an entire page | |
1193 | * of RAM. This will force us to execute by loading and translating | |
1194 | * one insn at a time, without caching. | |
1195 | * | |
1196 | * NOTE: This function will trigger an exception if the page is | |
1197 | * not executable. | |
f2553f04 | 1198 | */ |
4b2190da EC |
1199 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, |
1200 | void **hostp) | |
f2553f04 | 1201 | { |
383beda9 RH |
1202 | uintptr_t mmu_idx = cpu_mmu_index(env, true); |
1203 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1204 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
f2553f04 | 1205 | void *p; |
f2553f04 | 1206 | |
383beda9 | 1207 | if (unlikely(!tlb_hit(entry->addr_code, addr))) { |
b493ccf1 | 1208 | if (!VICTIM_TLB_HIT(addr_code, addr)) { |
29a0af61 | 1209 | tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); |
6d967cb8 EC |
1210 | index = tlb_index(env, mmu_idx, addr); |
1211 | entry = tlb_entry(env, mmu_idx, addr); | |
30d7e098 RH |
1212 | |
1213 | if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { | |
1214 | /* | |
1215 | * The MMU protection covers a smaller range than a target | |
1216 | * page, so we must redo the MMU check for every insn. | |
1217 | */ | |
1218 | return -1; | |
1219 | } | |
71b9a453 | 1220 | } |
383beda9 | 1221 | assert(tlb_hit(entry->addr_code, addr)); |
f2553f04 | 1222 | } |
55df6fcf | 1223 | |
30d7e098 RH |
1224 | if (unlikely(entry->addr_code & TLB_MMIO)) { |
1225 | /* The region is not backed by RAM. */ | |
4b2190da EC |
1226 | if (hostp) { |
1227 | *hostp = NULL; | |
1228 | } | |
20cb6ae4 | 1229 | return -1; |
55df6fcf PM |
1230 | } |
1231 | ||
383beda9 | 1232 | p = (void *)((uintptr_t)addr + entry->addend); |
4b2190da EC |
1233 | if (hostp) { |
1234 | *hostp = p; | |
1235 | } | |
f2553f04 FK |
1236 | return qemu_ram_addr_from_host_nofail(p); |
1237 | } | |
1238 | ||
4b2190da EC |
1239 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) |
1240 | { | |
1241 | return get_page_addr_code_hostp(env, addr, NULL); | |
1242 | } | |
1243 | ||
707526ad RH |
1244 | static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, |
1245 | CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) | |
1246 | { | |
1247 | ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; | |
1248 | ||
1249 | trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); | |
1250 | ||
1251 | if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { | |
1252 | struct page_collection *pages | |
1253 | = page_collection_lock(ram_addr, ram_addr + size); | |
5a7c27bb | 1254 | tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); |
707526ad RH |
1255 | page_collection_unlock(pages); |
1256 | } | |
1257 | ||
1258 | /* | |
1259 | * Set both VGA and migration bits for simplicity and to remove | |
1260 | * the notdirty callback faster. | |
1261 | */ | |
1262 | cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); | |
1263 | ||
1264 | /* We remove the notdirty callback only if the code has been flushed. */ | |
1265 | if (!cpu_physical_memory_is_clean(ram_addr)) { | |
1266 | trace_memory_notdirty_set_dirty(mem_vaddr); | |
1267 | tlb_set_dirty(cpu, mem_vaddr); | |
1268 | } | |
1269 | } | |
1270 | ||
069cfe77 RH |
1271 | static int probe_access_internal(CPUArchState *env, target_ulong addr, |
1272 | int fault_size, MMUAccessType access_type, | |
1273 | int mmu_idx, bool nonfault, | |
1274 | void **phost, uintptr_t retaddr) | |
3b08f0a9 | 1275 | { |
383beda9 RH |
1276 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
1277 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
069cfe77 | 1278 | target_ulong tlb_addr, page_addr; |
c25c283d | 1279 | size_t elt_ofs; |
069cfe77 | 1280 | int flags; |
ca86cf32 | 1281 | |
c25c283d DH |
1282 | switch (access_type) { |
1283 | case MMU_DATA_LOAD: | |
1284 | elt_ofs = offsetof(CPUTLBEntry, addr_read); | |
c25c283d DH |
1285 | break; |
1286 | case MMU_DATA_STORE: | |
1287 | elt_ofs = offsetof(CPUTLBEntry, addr_write); | |
c25c283d DH |
1288 | break; |
1289 | case MMU_INST_FETCH: | |
1290 | elt_ofs = offsetof(CPUTLBEntry, addr_code); | |
c25c283d DH |
1291 | break; |
1292 | default: | |
1293 | g_assert_not_reached(); | |
1294 | } | |
1295 | tlb_addr = tlb_read_ofs(entry, elt_ofs); | |
1296 | ||
069cfe77 RH |
1297 | page_addr = addr & TARGET_PAGE_MASK; |
1298 | if (!tlb_hit_page(tlb_addr, page_addr)) { | |
1299 | if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { | |
1300 | CPUState *cs = env_cpu(env); | |
1301 | CPUClass *cc = CPU_GET_CLASS(cs); | |
1302 | ||
1303 | if (!cc->tlb_fill(cs, addr, fault_size, access_type, | |
1304 | mmu_idx, nonfault, retaddr)) { | |
1305 | /* Non-faulting page table read failed. */ | |
1306 | *phost = NULL; | |
1307 | return TLB_INVALID_MASK; | |
1308 | } | |
1309 | ||
1310 | /* TLB resize via tlb_fill may have moved the entry. */ | |
03a98189 | 1311 | entry = tlb_entry(env, mmu_idx, addr); |
3b08f0a9 | 1312 | } |
c25c283d | 1313 | tlb_addr = tlb_read_ofs(entry, elt_ofs); |
03a98189 | 1314 | } |
069cfe77 | 1315 | flags = tlb_addr & TLB_FLAGS_MASK; |
03a98189 | 1316 | |
069cfe77 RH |
1317 | /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ |
1318 | if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { | |
1319 | *phost = NULL; | |
1320 | return TLB_MMIO; | |
fef39ccd DH |
1321 | } |
1322 | ||
069cfe77 RH |
1323 | /* Everything else is RAM. */ |
1324 | *phost = (void *)((uintptr_t)addr + entry->addend); | |
1325 | return flags; | |
1326 | } | |
1327 | ||
1328 | int probe_access_flags(CPUArchState *env, target_ulong addr, | |
1329 | MMUAccessType access_type, int mmu_idx, | |
1330 | bool nonfault, void **phost, uintptr_t retaddr) | |
1331 | { | |
1332 | int flags; | |
1333 | ||
1334 | flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, | |
1335 | nonfault, phost, retaddr); | |
1336 | ||
1337 | /* Handle clean RAM pages. */ | |
1338 | if (unlikely(flags & TLB_NOTDIRTY)) { | |
1339 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
73bc0bd4 | 1340 | CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
fef39ccd | 1341 | |
069cfe77 RH |
1342 | notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); |
1343 | flags &= ~TLB_NOTDIRTY; | |
1344 | } | |
1345 | ||
1346 | return flags; | |
1347 | } | |
1348 | ||
1349 | void *probe_access(CPUArchState *env, target_ulong addr, int size, | |
1350 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | |
1351 | { | |
1352 | void *host; | |
1353 | int flags; | |
1354 | ||
1355 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | |
1356 | ||
1357 | flags = probe_access_internal(env, addr, size, access_type, mmu_idx, | |
1358 | false, &host, retaddr); | |
1359 | ||
1360 | /* Per the interface, size == 0 merely faults the access. */ | |
1361 | if (size == 0) { | |
1362 | return NULL; | |
1363 | } | |
1364 | ||
1365 | if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { | |
1366 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1367 | CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
73bc0bd4 RH |
1368 | |
1369 | /* Handle watchpoints. */ | |
069cfe77 RH |
1370 | if (flags & TLB_WATCHPOINT) { |
1371 | int wp_access = (access_type == MMU_DATA_STORE | |
1372 | ? BP_MEM_WRITE : BP_MEM_READ); | |
73bc0bd4 RH |
1373 | cpu_check_watchpoint(env_cpu(env), addr, size, |
1374 | iotlbentry->attrs, wp_access, retaddr); | |
1375 | } | |
1376 | ||
1377 | /* Handle clean RAM pages. */ | |
069cfe77 RH |
1378 | if (flags & TLB_NOTDIRTY) { |
1379 | notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); | |
73bc0bd4 | 1380 | } |
fef39ccd DH |
1381 | } |
1382 | ||
069cfe77 | 1383 | return host; |
3b08f0a9 RH |
1384 | } |
1385 | ||
4811e909 RH |
1386 | void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, |
1387 | MMUAccessType access_type, int mmu_idx) | |
1388 | { | |
069cfe77 RH |
1389 | void *host; |
1390 | int flags; | |
4811e909 | 1391 | |
069cfe77 RH |
1392 | flags = probe_access_internal(env, addr, 0, access_type, |
1393 | mmu_idx, true, &host, 0); | |
4811e909 | 1394 | |
069cfe77 RH |
1395 | /* No combination of flags are expected by the caller. */ |
1396 | return flags ? NULL : host; | |
4811e909 RH |
1397 | } |
1398 | ||
235537fa AB |
1399 | #ifdef CONFIG_PLUGIN |
1400 | /* | |
1401 | * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. | |
1402 | * This should be a hot path as we will have just looked this path up | |
1403 | * in the softmmu lookup code (or helper). We don't handle re-fills or | |
1404 | * checking the victim table. This is purely informational. | |
1405 | * | |
2f3a57ee AB |
1406 | * This almost never fails as the memory access being instrumented |
1407 | * should have just filled the TLB. The one corner case is io_writex | |
1408 | * which can cause TLB flushes and potential resizing of the TLBs | |
570ef309 AB |
1409 | * losing the information we need. In those cases we need to recover |
1410 | * data from a copy of the iotlbentry. As long as this always occurs | |
1411 | * from the same thread (which a mem callback will be) this is safe. | |
235537fa AB |
1412 | */ |
1413 | ||
1414 | bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, | |
1415 | bool is_store, struct qemu_plugin_hwaddr *data) | |
1416 | { | |
1417 | CPUArchState *env = cpu->env_ptr; | |
1418 | CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); | |
1419 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1420 | target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; | |
1421 | ||
1422 | if (likely(tlb_hit(tlb_addr, addr))) { | |
1423 | /* We must have an iotlb entry for MMIO */ | |
1424 | if (tlb_addr & TLB_MMIO) { | |
1425 | CPUIOTLBEntry *iotlbentry; | |
1426 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
1427 | data->is_io = true; | |
1428 | data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | |
1429 | data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
1430 | } else { | |
1431 | data->is_io = false; | |
1432 | data->v.ram.hostaddr = addr + tlbe->addend; | |
1433 | } | |
1434 | return true; | |
2f3a57ee AB |
1435 | } else { |
1436 | SavedIOTLB *saved = &cpu->saved_iotlb; | |
1437 | data->is_io = true; | |
1438 | data->v.io.section = saved->section; | |
1439 | data->v.io.offset = saved->mr_offset; | |
1440 | return true; | |
235537fa | 1441 | } |
235537fa AB |
1442 | } |
1443 | ||
1444 | #endif | |
1445 | ||
c482cb11 RH |
1446 | /* Probe for a read-modify-write atomic operation. Do not allow unaligned |
1447 | * operations, or io operations to proceed. Return the host address. */ | |
1448 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | |
707526ad | 1449 | TCGMemOpIdx oi, uintptr_t retaddr) |
c482cb11 RH |
1450 | { |
1451 | size_t mmu_idx = get_mmuidx(oi); | |
383beda9 RH |
1452 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
1453 | CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); | |
403f290c | 1454 | target_ulong tlb_addr = tlb_addr_write(tlbe); |
14776ab5 | 1455 | MemOp mop = get_memop(oi); |
c482cb11 RH |
1456 | int a_bits = get_alignment_bits(mop); |
1457 | int s_bits = mop & MO_SIZE; | |
34d49937 | 1458 | void *hostaddr; |
c482cb11 RH |
1459 | |
1460 | /* Adjust the given return address. */ | |
1461 | retaddr -= GETPC_ADJ; | |
1462 | ||
1463 | /* Enforce guest required alignment. */ | |
1464 | if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { | |
1465 | /* ??? Maybe indicate atomic op to cpu_unaligned_access */ | |
29a0af61 | 1466 | cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, |
c482cb11 RH |
1467 | mmu_idx, retaddr); |
1468 | } | |
1469 | ||
1470 | /* Enforce qemu required alignment. */ | |
1471 | if (unlikely(addr & ((1 << s_bits) - 1))) { | |
1472 | /* We get here if guest alignment was not requested, | |
1473 | or was not enforced by cpu_unaligned_access above. | |
1474 | We might widen the access and emulate, but for now | |
1475 | mark an exception and exit the cpu loop. */ | |
1476 | goto stop_the_world; | |
1477 | } | |
1478 | ||
1479 | /* Check TLB entry and enforce page permissions. */ | |
334692bc | 1480 | if (!tlb_hit(tlb_addr, addr)) { |
c482cb11 | 1481 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
29a0af61 | 1482 | tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, |
98670d47 | 1483 | mmu_idx, retaddr); |
6d967cb8 EC |
1484 | index = tlb_index(env, mmu_idx, addr); |
1485 | tlbe = tlb_entry(env, mmu_idx, addr); | |
c482cb11 | 1486 | } |
403f290c | 1487 | tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; |
c482cb11 RH |
1488 | } |
1489 | ||
55df6fcf | 1490 | /* Notice an IO access or a needs-MMU-lookup access */ |
30d7e098 | 1491 | if (unlikely(tlb_addr & TLB_MMIO)) { |
c482cb11 RH |
1492 | /* There's really nothing that can be done to |
1493 | support this apart from stop-the-world. */ | |
1494 | goto stop_the_world; | |
1495 | } | |
1496 | ||
1497 | /* Let the guest notice RMW on a write-only page. */ | |
34d49937 | 1498 | if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { |
29a0af61 | 1499 | tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, |
98670d47 | 1500 | mmu_idx, retaddr); |
c482cb11 RH |
1501 | /* Since we don't support reads and writes to different addresses, |
1502 | and we do have the proper page loaded for write, this shouldn't | |
1503 | ever return. But just in case, handle via stop-the-world. */ | |
1504 | goto stop_the_world; | |
1505 | } | |
1506 | ||
34d49937 PM |
1507 | hostaddr = (void *)((uintptr_t)addr + tlbe->addend); |
1508 | ||
34d49937 | 1509 | if (unlikely(tlb_addr & TLB_NOTDIRTY)) { |
707526ad RH |
1510 | notdirty_write(env_cpu(env), addr, 1 << s_bits, |
1511 | &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); | |
34d49937 PM |
1512 | } |
1513 | ||
1514 | return hostaddr; | |
c482cb11 RH |
1515 | |
1516 | stop_the_world: | |
29a0af61 | 1517 | cpu_loop_exit_atomic(env_cpu(env), retaddr); |
c482cb11 RH |
1518 | } |
1519 | ||
eed56642 AB |
1520 | /* |
1521 | * Load Helpers | |
1522 | * | |
1523 | * We support two different access types. SOFTMMU_CODE_ACCESS is | |
1524 | * specifically for reading instructions from system memory. It is | |
1525 | * called by the translation loop and in some helpers where the code | |
1526 | * is disassembled. It shouldn't be called directly by guest code. | |
1527 | */ | |
0f590e74 | 1528 | |
2dd92606 RH |
1529 | typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, |
1530 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1531 | ||
80d9d1c6 RH |
1532 | static inline uint64_t QEMU_ALWAYS_INLINE |
1533 | load_memop(const void *haddr, MemOp op) | |
1534 | { | |
1535 | switch (op) { | |
1536 | case MO_UB: | |
1537 | return ldub_p(haddr); | |
1538 | case MO_BEUW: | |
1539 | return lduw_be_p(haddr); | |
1540 | case MO_LEUW: | |
1541 | return lduw_le_p(haddr); | |
1542 | case MO_BEUL: | |
1543 | return (uint32_t)ldl_be_p(haddr); | |
1544 | case MO_LEUL: | |
1545 | return (uint32_t)ldl_le_p(haddr); | |
1546 | case MO_BEQ: | |
1547 | return ldq_be_p(haddr); | |
1548 | case MO_LEQ: | |
1549 | return ldq_le_p(haddr); | |
1550 | default: | |
1551 | qemu_build_not_reached(); | |
1552 | } | |
1553 | } | |
1554 | ||
c6b716cd | 1555 | static inline uint64_t QEMU_ALWAYS_INLINE |
2dd92606 | 1556 | load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, |
be5c4787 | 1557 | uintptr_t retaddr, MemOp op, bool code_read, |
2dd92606 | 1558 | FullLoadHelper *full_load) |
eed56642 AB |
1559 | { |
1560 | uintptr_t mmu_idx = get_mmuidx(oi); | |
1561 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1562 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
1563 | target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; | |
1564 | const size_t tlb_off = code_read ? | |
1565 | offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); | |
f1be3696 RH |
1566 | const MMUAccessType access_type = |
1567 | code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; | |
eed56642 AB |
1568 | unsigned a_bits = get_alignment_bits(get_memop(oi)); |
1569 | void *haddr; | |
1570 | uint64_t res; | |
be5c4787 | 1571 | size_t size = memop_size(op); |
eed56642 AB |
1572 | |
1573 | /* Handle CPU specific unaligned behaviour */ | |
1574 | if (addr & ((1 << a_bits) - 1)) { | |
29a0af61 | 1575 | cpu_unaligned_access(env_cpu(env), addr, access_type, |
eed56642 AB |
1576 | mmu_idx, retaddr); |
1577 | } | |
0f590e74 | 1578 | |
eed56642 AB |
1579 | /* If the TLB entry is for a different page, reload and try again. */ |
1580 | if (!tlb_hit(tlb_addr, addr)) { | |
1581 | if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, | |
1582 | addr & TARGET_PAGE_MASK)) { | |
29a0af61 | 1583 | tlb_fill(env_cpu(env), addr, size, |
f1be3696 | 1584 | access_type, mmu_idx, retaddr); |
eed56642 AB |
1585 | index = tlb_index(env, mmu_idx, addr); |
1586 | entry = tlb_entry(env, mmu_idx, addr); | |
1587 | } | |
1588 | tlb_addr = code_read ? entry->addr_code : entry->addr_read; | |
30d7e098 | 1589 | tlb_addr &= ~TLB_INVALID_MASK; |
eed56642 AB |
1590 | } |
1591 | ||
50b107c5 | 1592 | /* Handle anything that isn't just a straight memory access. */ |
eed56642 | 1593 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
50b107c5 | 1594 | CPUIOTLBEntry *iotlbentry; |
5b87b3e6 | 1595 | bool need_swap; |
50b107c5 RH |
1596 | |
1597 | /* For anything that is unaligned, recurse through full_load. */ | |
eed56642 AB |
1598 | if ((addr & (size - 1)) != 0) { |
1599 | goto do_unaligned_access; | |
1600 | } | |
50b107c5 RH |
1601 | |
1602 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
1603 | ||
1604 | /* Handle watchpoints. */ | |
1605 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | |
1606 | /* On watchpoint hit, this will longjmp out. */ | |
1607 | cpu_check_watchpoint(env_cpu(env), addr, size, | |
1608 | iotlbentry->attrs, BP_MEM_READ, retaddr); | |
50b107c5 RH |
1609 | } |
1610 | ||
5b87b3e6 RH |
1611 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); |
1612 | ||
50b107c5 | 1613 | /* Handle I/O access. */ |
5b87b3e6 RH |
1614 | if (likely(tlb_addr & TLB_MMIO)) { |
1615 | return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, | |
1616 | access_type, op ^ (need_swap * MO_BSWAP)); | |
1617 | } | |
1618 | ||
1619 | haddr = (void *)((uintptr_t)addr + entry->addend); | |
1620 | ||
1621 | /* | |
1622 | * Keep these two load_memop separate to ensure that the compiler | |
1623 | * is able to fold the entire function to a single instruction. | |
1624 | * There is a build-time assert inside to remind you of this. ;-) | |
1625 | */ | |
1626 | if (unlikely(need_swap)) { | |
1627 | return load_memop(haddr, op ^ MO_BSWAP); | |
1628 | } | |
1629 | return load_memop(haddr, op); | |
eed56642 AB |
1630 | } |
1631 | ||
1632 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
1633 | if (size > 1 | |
1634 | && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 | |
1635 | >= TARGET_PAGE_SIZE)) { | |
1636 | target_ulong addr1, addr2; | |
8c79b288 | 1637 | uint64_t r1, r2; |
eed56642 AB |
1638 | unsigned shift; |
1639 | do_unaligned_access: | |
ab7a2009 | 1640 | addr1 = addr & ~((target_ulong)size - 1); |
eed56642 | 1641 | addr2 = addr1 + size; |
2dd92606 RH |
1642 | r1 = full_load(env, addr1, oi, retaddr); |
1643 | r2 = full_load(env, addr2, oi, retaddr); | |
eed56642 AB |
1644 | shift = (addr & (size - 1)) * 8; |
1645 | ||
be5c4787 | 1646 | if (memop_big_endian(op)) { |
eed56642 AB |
1647 | /* Big-endian combine. */ |
1648 | res = (r1 << shift) | (r2 >> ((size * 8) - shift)); | |
1649 | } else { | |
1650 | /* Little-endian combine. */ | |
1651 | res = (r1 >> shift) | (r2 << ((size * 8) - shift)); | |
1652 | } | |
1653 | return res & MAKE_64BIT_MASK(0, size * 8); | |
1654 | } | |
1655 | ||
1656 | haddr = (void *)((uintptr_t)addr + entry->addend); | |
80d9d1c6 | 1657 | return load_memop(haddr, op); |
eed56642 AB |
1658 | } |
1659 | ||
1660 | /* | |
1661 | * For the benefit of TCG generated code, we want to avoid the | |
1662 | * complication of ABI-specific return type promotion and always | |
1663 | * return a value extended to the register size of the host. This is | |
1664 | * tcg_target_long, except in the case of a 32-bit host and 64-bit | |
1665 | * data, and for that we always have uint64_t. | |
1666 | * | |
1667 | * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. | |
1668 | */ | |
1669 | ||
2dd92606 RH |
1670 | static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, |
1671 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1672 | { | |
be5c4787 | 1673 | return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); |
2dd92606 RH |
1674 | } |
1675 | ||
fc1bc777 RH |
1676 | tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, |
1677 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 1678 | { |
2dd92606 RH |
1679 | return full_ldub_mmu(env, addr, oi, retaddr); |
1680 | } | |
1681 | ||
1682 | static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, | |
1683 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1684 | { | |
be5c4787 | 1685 | return load_helper(env, addr, oi, retaddr, MO_LEUW, false, |
2dd92606 | 1686 | full_le_lduw_mmu); |
eed56642 AB |
1687 | } |
1688 | ||
fc1bc777 RH |
1689 | tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, |
1690 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 1691 | { |
2dd92606 RH |
1692 | return full_le_lduw_mmu(env, addr, oi, retaddr); |
1693 | } | |
1694 | ||
1695 | static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, | |
1696 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1697 | { | |
be5c4787 | 1698 | return load_helper(env, addr, oi, retaddr, MO_BEUW, false, |
2dd92606 | 1699 | full_be_lduw_mmu); |
eed56642 AB |
1700 | } |
1701 | ||
fc1bc777 RH |
1702 | tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, |
1703 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 1704 | { |
2dd92606 RH |
1705 | return full_be_lduw_mmu(env, addr, oi, retaddr); |
1706 | } | |
1707 | ||
1708 | static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, | |
1709 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1710 | { | |
be5c4787 | 1711 | return load_helper(env, addr, oi, retaddr, MO_LEUL, false, |
2dd92606 | 1712 | full_le_ldul_mmu); |
eed56642 AB |
1713 | } |
1714 | ||
fc1bc777 RH |
1715 | tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, |
1716 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 1717 | { |
2dd92606 RH |
1718 | return full_le_ldul_mmu(env, addr, oi, retaddr); |
1719 | } | |
1720 | ||
1721 | static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, | |
1722 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1723 | { | |
be5c4787 | 1724 | return load_helper(env, addr, oi, retaddr, MO_BEUL, false, |
2dd92606 | 1725 | full_be_ldul_mmu); |
eed56642 AB |
1726 | } |
1727 | ||
fc1bc777 RH |
1728 | tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, |
1729 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 1730 | { |
2dd92606 | 1731 | return full_be_ldul_mmu(env, addr, oi, retaddr); |
eed56642 AB |
1732 | } |
1733 | ||
fc1bc777 RH |
1734 | uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, |
1735 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 1736 | { |
be5c4787 | 1737 | return load_helper(env, addr, oi, retaddr, MO_LEQ, false, |
2dd92606 | 1738 | helper_le_ldq_mmu); |
eed56642 AB |
1739 | } |
1740 | ||
fc1bc777 RH |
1741 | uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, |
1742 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 1743 | { |
be5c4787 | 1744 | return load_helper(env, addr, oi, retaddr, MO_BEQ, false, |
2dd92606 | 1745 | helper_be_ldq_mmu); |
eed56642 AB |
1746 | } |
1747 | ||
1748 | /* | |
1749 | * Provide signed versions of the load routines as well. We can of course | |
1750 | * avoid this for 64-bit data, or for 32-bit data on 32-bit host. | |
1751 | */ | |
1752 | ||
1753 | ||
1754 | tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, | |
1755 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1756 | { | |
1757 | return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); | |
1758 | } | |
1759 | ||
1760 | tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, | |
1761 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1762 | { | |
1763 | return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); | |
1764 | } | |
1765 | ||
1766 | tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, | |
1767 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1768 | { | |
1769 | return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); | |
1770 | } | |
1771 | ||
1772 | tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, | |
1773 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1774 | { | |
1775 | return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); | |
1776 | } | |
1777 | ||
1778 | tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, | |
1779 | TCGMemOpIdx oi, uintptr_t retaddr) | |
1780 | { | |
1781 | return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); | |
1782 | } | |
1783 | ||
d03f1408 RH |
1784 | /* |
1785 | * Load helpers for cpu_ldst.h. | |
1786 | */ | |
1787 | ||
1788 | static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, | |
1789 | int mmu_idx, uintptr_t retaddr, | |
1790 | MemOp op, FullLoadHelper *full_load) | |
1791 | { | |
1792 | uint16_t meminfo; | |
1793 | TCGMemOpIdx oi; | |
1794 | uint64_t ret; | |
1795 | ||
1796 | meminfo = trace_mem_get_info(op, mmu_idx, false); | |
1797 | trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); | |
1798 | ||
1799 | op &= ~MO_SIGN; | |
1800 | oi = make_memop_idx(op, mmu_idx); | |
1801 | ret = full_load(env, addr, oi, retaddr); | |
1802 | ||
1803 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); | |
1804 | ||
1805 | return ret; | |
1806 | } | |
1807 | ||
1808 | uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, | |
1809 | int mmu_idx, uintptr_t ra) | |
1810 | { | |
1811 | return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); | |
1812 | } | |
1813 | ||
1814 | int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, | |
1815 | int mmu_idx, uintptr_t ra) | |
1816 | { | |
1817 | return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, | |
1818 | full_ldub_mmu); | |
1819 | } | |
1820 | ||
b9e60257 RH |
1821 | uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, |
1822 | int mmu_idx, uintptr_t ra) | |
d03f1408 | 1823 | { |
b9e60257 | 1824 | return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); |
d03f1408 RH |
1825 | } |
1826 | ||
b9e60257 RH |
1827 | int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, |
1828 | int mmu_idx, uintptr_t ra) | |
1829 | { | |
1830 | return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW, | |
1831 | full_be_lduw_mmu); | |
1832 | } | |
1833 | ||
1834 | uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, | |
1835 | int mmu_idx, uintptr_t ra) | |
1836 | { | |
1837 | return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); | |
1838 | } | |
1839 | ||
1840 | uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, | |
1841 | int mmu_idx, uintptr_t ra) | |
1842 | { | |
1843 | return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); | |
1844 | } | |
1845 | ||
1846 | uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, | |
1847 | int mmu_idx, uintptr_t ra) | |
1848 | { | |
1849 | return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); | |
1850 | } | |
1851 | ||
1852 | int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, | |
1853 | int mmu_idx, uintptr_t ra) | |
d03f1408 | 1854 | { |
b9e60257 RH |
1855 | return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW, |
1856 | full_le_lduw_mmu); | |
d03f1408 RH |
1857 | } |
1858 | ||
b9e60257 RH |
1859 | uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, |
1860 | int mmu_idx, uintptr_t ra) | |
d03f1408 | 1861 | { |
b9e60257 | 1862 | return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu); |
d03f1408 RH |
1863 | } |
1864 | ||
b9e60257 RH |
1865 | uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, |
1866 | int mmu_idx, uintptr_t ra) | |
d03f1408 | 1867 | { |
b9e60257 | 1868 | return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu); |
d03f1408 RH |
1869 | } |
1870 | ||
cfe04a4b RH |
1871 | uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, |
1872 | uintptr_t retaddr) | |
1873 | { | |
1874 | return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); | |
1875 | } | |
1876 | ||
1877 | int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) | |
1878 | { | |
1879 | return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); | |
1880 | } | |
1881 | ||
b9e60257 RH |
1882 | uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr, |
1883 | uintptr_t retaddr) | |
1884 | { | |
1885 | return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); | |
1886 | } | |
1887 | ||
1888 | int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) | |
cfe04a4b | 1889 | { |
b9e60257 | 1890 | return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); |
cfe04a4b RH |
1891 | } |
1892 | ||
b9e60257 RH |
1893 | uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr, |
1894 | uintptr_t retaddr) | |
cfe04a4b | 1895 | { |
b9e60257 | 1896 | return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); |
cfe04a4b RH |
1897 | } |
1898 | ||
b9e60257 RH |
1899 | uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr, |
1900 | uintptr_t retaddr) | |
cfe04a4b | 1901 | { |
b9e60257 | 1902 | return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); |
cfe04a4b RH |
1903 | } |
1904 | ||
b9e60257 RH |
1905 | uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr, |
1906 | uintptr_t retaddr) | |
cfe04a4b | 1907 | { |
b9e60257 RH |
1908 | return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); |
1909 | } | |
1910 | ||
1911 | int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) | |
1912 | { | |
1913 | return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); | |
1914 | } | |
1915 | ||
1916 | uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr, | |
1917 | uintptr_t retaddr) | |
1918 | { | |
1919 | return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); | |
1920 | } | |
1921 | ||
1922 | uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr, | |
1923 | uintptr_t retaddr) | |
1924 | { | |
1925 | return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); | |
cfe04a4b RH |
1926 | } |
1927 | ||
1928 | uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) | |
1929 | { | |
1930 | return cpu_ldub_data_ra(env, ptr, 0); | |
1931 | } | |
1932 | ||
1933 | int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) | |
1934 | { | |
1935 | return cpu_ldsb_data_ra(env, ptr, 0); | |
1936 | } | |
1937 | ||
b9e60257 RH |
1938 | uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr) |
1939 | { | |
1940 | return cpu_lduw_be_data_ra(env, ptr, 0); | |
1941 | } | |
1942 | ||
1943 | int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr) | |
1944 | { | |
1945 | return cpu_ldsw_be_data_ra(env, ptr, 0); | |
1946 | } | |
1947 | ||
1948 | uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr) | |
1949 | { | |
1950 | return cpu_ldl_be_data_ra(env, ptr, 0); | |
1951 | } | |
1952 | ||
1953 | uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr) | |
cfe04a4b | 1954 | { |
b9e60257 | 1955 | return cpu_ldq_be_data_ra(env, ptr, 0); |
cfe04a4b RH |
1956 | } |
1957 | ||
b9e60257 | 1958 | uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr) |
cfe04a4b | 1959 | { |
b9e60257 | 1960 | return cpu_lduw_le_data_ra(env, ptr, 0); |
cfe04a4b RH |
1961 | } |
1962 | ||
b9e60257 | 1963 | int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr) |
cfe04a4b | 1964 | { |
b9e60257 | 1965 | return cpu_ldsw_le_data_ra(env, ptr, 0); |
cfe04a4b RH |
1966 | } |
1967 | ||
b9e60257 | 1968 | uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr) |
cfe04a4b | 1969 | { |
b9e60257 RH |
1970 | return cpu_ldl_le_data_ra(env, ptr, 0); |
1971 | } | |
1972 | ||
1973 | uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr) | |
1974 | { | |
1975 | return cpu_ldq_le_data_ra(env, ptr, 0); | |
cfe04a4b RH |
1976 | } |
1977 | ||
eed56642 AB |
1978 | /* |
1979 | * Store Helpers | |
1980 | */ | |
1981 | ||
80d9d1c6 RH |
1982 | static inline void QEMU_ALWAYS_INLINE |
1983 | store_memop(void *haddr, uint64_t val, MemOp op) | |
1984 | { | |
1985 | switch (op) { | |
1986 | case MO_UB: | |
1987 | stb_p(haddr, val); | |
1988 | break; | |
1989 | case MO_BEUW: | |
1990 | stw_be_p(haddr, val); | |
1991 | break; | |
1992 | case MO_LEUW: | |
1993 | stw_le_p(haddr, val); | |
1994 | break; | |
1995 | case MO_BEUL: | |
1996 | stl_be_p(haddr, val); | |
1997 | break; | |
1998 | case MO_LEUL: | |
1999 | stl_le_p(haddr, val); | |
2000 | break; | |
2001 | case MO_BEQ: | |
2002 | stq_be_p(haddr, val); | |
2003 | break; | |
2004 | case MO_LEQ: | |
2005 | stq_le_p(haddr, val); | |
2006 | break; | |
2007 | default: | |
2008 | qemu_build_not_reached(); | |
2009 | } | |
2010 | } | |
2011 | ||
6b8b622e RH |
2012 | static void __attribute__((noinline)) |
2013 | store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, | |
2014 | uintptr_t retaddr, size_t size, uintptr_t mmu_idx, | |
2015 | bool big_endian) | |
2016 | { | |
2017 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); | |
2018 | uintptr_t index, index2; | |
2019 | CPUTLBEntry *entry, *entry2; | |
2020 | target_ulong page2, tlb_addr, tlb_addr2; | |
2021 | TCGMemOpIdx oi; | |
2022 | size_t size2; | |
2023 | int i; | |
2024 | ||
2025 | /* | |
2026 | * Ensure the second page is in the TLB. Note that the first page | |
2027 | * is already guaranteed to be filled, and that the second page | |
2028 | * cannot evict the first. | |
2029 | */ | |
2030 | page2 = (addr + size) & TARGET_PAGE_MASK; | |
2031 | size2 = (addr + size) & ~TARGET_PAGE_MASK; | |
2032 | index2 = tlb_index(env, mmu_idx, page2); | |
2033 | entry2 = tlb_entry(env, mmu_idx, page2); | |
2034 | ||
2035 | tlb_addr2 = tlb_addr_write(entry2); | |
2036 | if (!tlb_hit_page(tlb_addr2, page2)) { | |
2037 | if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | |
2038 | tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | |
2039 | mmu_idx, retaddr); | |
2040 | index2 = tlb_index(env, mmu_idx, page2); | |
2041 | entry2 = tlb_entry(env, mmu_idx, page2); | |
2042 | } | |
2043 | tlb_addr2 = tlb_addr_write(entry2); | |
2044 | } | |
2045 | ||
2046 | index = tlb_index(env, mmu_idx, addr); | |
2047 | entry = tlb_entry(env, mmu_idx, addr); | |
2048 | tlb_addr = tlb_addr_write(entry); | |
2049 | ||
2050 | /* | |
2051 | * Handle watchpoints. Since this may trap, all checks | |
2052 | * must happen before any store. | |
2053 | */ | |
2054 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | |
2055 | cpu_check_watchpoint(env_cpu(env), addr, size - size2, | |
2056 | env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | |
2057 | BP_MEM_WRITE, retaddr); | |
2058 | } | |
2059 | if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { | |
2060 | cpu_check_watchpoint(env_cpu(env), page2, size2, | |
2061 | env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, | |
2062 | BP_MEM_WRITE, retaddr); | |
2063 | } | |
2064 | ||
2065 | /* | |
2066 | * XXX: not efficient, but simple. | |
2067 | * This loop must go in the forward direction to avoid issues | |
2068 | * with self-modifying code in Windows 64-bit. | |
2069 | */ | |
2070 | oi = make_memop_idx(MO_UB, mmu_idx); | |
2071 | if (big_endian) { | |
2072 | for (i = 0; i < size; ++i) { | |
2073 | /* Big-endian extract. */ | |
2074 | uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); | |
2075 | helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); | |
2076 | } | |
2077 | } else { | |
2078 | for (i = 0; i < size; ++i) { | |
2079 | /* Little-endian extract. */ | |
2080 | uint8_t val8 = val >> (i * 8); | |
2081 | helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); | |
2082 | } | |
2083 | } | |
2084 | } | |
2085 | ||
c6b716cd | 2086 | static inline void QEMU_ALWAYS_INLINE |
4601f8d1 | 2087 | store_helper(CPUArchState *env, target_ulong addr, uint64_t val, |
be5c4787 | 2088 | TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) |
eed56642 AB |
2089 | { |
2090 | uintptr_t mmu_idx = get_mmuidx(oi); | |
2091 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
2092 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
2093 | target_ulong tlb_addr = tlb_addr_write(entry); | |
2094 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); | |
2095 | unsigned a_bits = get_alignment_bits(get_memop(oi)); | |
2096 | void *haddr; | |
be5c4787 | 2097 | size_t size = memop_size(op); |
eed56642 AB |
2098 | |
2099 | /* Handle CPU specific unaligned behaviour */ | |
2100 | if (addr & ((1 << a_bits) - 1)) { | |
29a0af61 | 2101 | cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, |
eed56642 AB |
2102 | mmu_idx, retaddr); |
2103 | } | |
2104 | ||
2105 | /* If the TLB entry is for a different page, reload and try again. */ | |
2106 | if (!tlb_hit(tlb_addr, addr)) { | |
2107 | if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, | |
2108 | addr & TARGET_PAGE_MASK)) { | |
29a0af61 | 2109 | tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, |
eed56642 AB |
2110 | mmu_idx, retaddr); |
2111 | index = tlb_index(env, mmu_idx, addr); | |
2112 | entry = tlb_entry(env, mmu_idx, addr); | |
2113 | } | |
2114 | tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; | |
2115 | } | |
2116 | ||
50b107c5 | 2117 | /* Handle anything that isn't just a straight memory access. */ |
eed56642 | 2118 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
50b107c5 | 2119 | CPUIOTLBEntry *iotlbentry; |
5b87b3e6 | 2120 | bool need_swap; |
50b107c5 RH |
2121 | |
2122 | /* For anything that is unaligned, recurse through byte stores. */ | |
eed56642 AB |
2123 | if ((addr & (size - 1)) != 0) { |
2124 | goto do_unaligned_access; | |
2125 | } | |
50b107c5 RH |
2126 | |
2127 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
2128 | ||
2129 | /* Handle watchpoints. */ | |
2130 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | |
2131 | /* On watchpoint hit, this will longjmp out. */ | |
2132 | cpu_check_watchpoint(env_cpu(env), addr, size, | |
2133 | iotlbentry->attrs, BP_MEM_WRITE, retaddr); | |
50b107c5 RH |
2134 | } |
2135 | ||
5b87b3e6 RH |
2136 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); |
2137 | ||
50b107c5 | 2138 | /* Handle I/O access. */ |
08565552 | 2139 | if (tlb_addr & TLB_MMIO) { |
5b87b3e6 RH |
2140 | io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, |
2141 | op ^ (need_swap * MO_BSWAP)); | |
2142 | return; | |
2143 | } | |
2144 | ||
7b0d792c RH |
2145 | /* Ignore writes to ROM. */ |
2146 | if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { | |
2147 | return; | |
2148 | } | |
2149 | ||
08565552 RH |
2150 | /* Handle clean RAM pages. */ |
2151 | if (tlb_addr & TLB_NOTDIRTY) { | |
707526ad | 2152 | notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); |
08565552 RH |
2153 | } |
2154 | ||
707526ad RH |
2155 | haddr = (void *)((uintptr_t)addr + entry->addend); |
2156 | ||
5b87b3e6 RH |
2157 | /* |
2158 | * Keep these two store_memop separate to ensure that the compiler | |
2159 | * is able to fold the entire function to a single instruction. | |
2160 | * There is a build-time assert inside to remind you of this. ;-) | |
2161 | */ | |
2162 | if (unlikely(need_swap)) { | |
2163 | store_memop(haddr, val, op ^ MO_BSWAP); | |
2164 | } else { | |
2165 | store_memop(haddr, val, op); | |
2166 | } | |
eed56642 AB |
2167 | return; |
2168 | } | |
2169 | ||
2170 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
2171 | if (size > 1 | |
2172 | && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 | |
2173 | >= TARGET_PAGE_SIZE)) { | |
eed56642 | 2174 | do_unaligned_access: |
6b8b622e RH |
2175 | store_helper_unaligned(env, addr, val, retaddr, size, |
2176 | mmu_idx, memop_big_endian(op)); | |
eed56642 AB |
2177 | return; |
2178 | } | |
2179 | ||
2180 | haddr = (void *)((uintptr_t)addr + entry->addend); | |
80d9d1c6 | 2181 | store_memop(haddr, val, op); |
eed56642 AB |
2182 | } |
2183 | ||
6b8b622e RH |
2184 | void __attribute__((noinline)) |
2185 | helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, | |
2186 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2187 | { |
be5c4787 | 2188 | store_helper(env, addr, val, oi, retaddr, MO_UB); |
eed56642 AB |
2189 | } |
2190 | ||
fc1bc777 RH |
2191 | void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
2192 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2193 | { |
be5c4787 | 2194 | store_helper(env, addr, val, oi, retaddr, MO_LEUW); |
eed56642 AB |
2195 | } |
2196 | ||
fc1bc777 RH |
2197 | void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
2198 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2199 | { |
be5c4787 | 2200 | store_helper(env, addr, val, oi, retaddr, MO_BEUW); |
eed56642 AB |
2201 | } |
2202 | ||
fc1bc777 RH |
2203 | void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
2204 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2205 | { |
be5c4787 | 2206 | store_helper(env, addr, val, oi, retaddr, MO_LEUL); |
eed56642 AB |
2207 | } |
2208 | ||
fc1bc777 RH |
2209 | void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
2210 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2211 | { |
be5c4787 | 2212 | store_helper(env, addr, val, oi, retaddr, MO_BEUL); |
eed56642 AB |
2213 | } |
2214 | ||
fc1bc777 RH |
2215 | void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
2216 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2217 | { |
be5c4787 | 2218 | store_helper(env, addr, val, oi, retaddr, MO_LEQ); |
eed56642 AB |
2219 | } |
2220 | ||
fc1bc777 RH |
2221 | void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
2222 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2223 | { |
be5c4787 | 2224 | store_helper(env, addr, val, oi, retaddr, MO_BEQ); |
eed56642 | 2225 | } |
0f590e74 | 2226 | |
d03f1408 RH |
2227 | /* |
2228 | * Store Helpers for cpu_ldst.h | |
2229 | */ | |
2230 | ||
2231 | static inline void QEMU_ALWAYS_INLINE | |
2232 | cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | |
2233 | int mmu_idx, uintptr_t retaddr, MemOp op) | |
2234 | { | |
2235 | TCGMemOpIdx oi; | |
2236 | uint16_t meminfo; | |
2237 | ||
2238 | meminfo = trace_mem_get_info(op, mmu_idx, true); | |
2239 | trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); | |
2240 | ||
2241 | oi = make_memop_idx(op, mmu_idx); | |
2242 | store_helper(env, addr, val, oi, retaddr, op); | |
2243 | ||
2244 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); | |
2245 | } | |
2246 | ||
2247 | void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, | |
2248 | int mmu_idx, uintptr_t retaddr) | |
2249 | { | |
2250 | cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); | |
2251 | } | |
2252 | ||
b9e60257 RH |
2253 | void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, |
2254 | int mmu_idx, uintptr_t retaddr) | |
d03f1408 | 2255 | { |
b9e60257 | 2256 | cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); |
d03f1408 RH |
2257 | } |
2258 | ||
b9e60257 RH |
2259 | void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, |
2260 | int mmu_idx, uintptr_t retaddr) | |
d03f1408 | 2261 | { |
b9e60257 | 2262 | cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); |
d03f1408 RH |
2263 | } |
2264 | ||
b9e60257 RH |
2265 | void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, |
2266 | int mmu_idx, uintptr_t retaddr) | |
2267 | { | |
2268 | cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); | |
2269 | } | |
2270 | ||
2271 | void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, | |
2272 | int mmu_idx, uintptr_t retaddr) | |
d03f1408 | 2273 | { |
b9e60257 RH |
2274 | cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); |
2275 | } | |
2276 | ||
2277 | void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, | |
2278 | int mmu_idx, uintptr_t retaddr) | |
2279 | { | |
2280 | cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); | |
2281 | } | |
2282 | ||
2283 | void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, | |
2284 | int mmu_idx, uintptr_t retaddr) | |
2285 | { | |
2286 | cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); | |
d03f1408 RH |
2287 | } |
2288 | ||
cfe04a4b RH |
2289 | void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, |
2290 | uint32_t val, uintptr_t retaddr) | |
2291 | { | |
2292 | cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); | |
2293 | } | |
2294 | ||
b9e60257 RH |
2295 | void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr, |
2296 | uint32_t val, uintptr_t retaddr) | |
cfe04a4b | 2297 | { |
b9e60257 | 2298 | cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); |
cfe04a4b RH |
2299 | } |
2300 | ||
b9e60257 RH |
2301 | void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr, |
2302 | uint32_t val, uintptr_t retaddr) | |
cfe04a4b | 2303 | { |
b9e60257 | 2304 | cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); |
cfe04a4b RH |
2305 | } |
2306 | ||
b9e60257 RH |
2307 | void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr, |
2308 | uint64_t val, uintptr_t retaddr) | |
cfe04a4b | 2309 | { |
b9e60257 RH |
2310 | cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); |
2311 | } | |
2312 | ||
2313 | void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr, | |
2314 | uint32_t val, uintptr_t retaddr) | |
2315 | { | |
2316 | cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); | |
2317 | } | |
2318 | ||
2319 | void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr, | |
2320 | uint32_t val, uintptr_t retaddr) | |
2321 | { | |
2322 | cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); | |
2323 | } | |
2324 | ||
2325 | void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr, | |
2326 | uint64_t val, uintptr_t retaddr) | |
2327 | { | |
2328 | cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); | |
cfe04a4b RH |
2329 | } |
2330 | ||
2331 | void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) | |
2332 | { | |
2333 | cpu_stb_data_ra(env, ptr, val, 0); | |
2334 | } | |
2335 | ||
b9e60257 RH |
2336 | void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) |
2337 | { | |
2338 | cpu_stw_be_data_ra(env, ptr, val, 0); | |
2339 | } | |
2340 | ||
2341 | void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) | |
2342 | { | |
2343 | cpu_stl_be_data_ra(env, ptr, val, 0); | |
2344 | } | |
2345 | ||
2346 | void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val) | |
2347 | { | |
2348 | cpu_stq_be_data_ra(env, ptr, val, 0); | |
2349 | } | |
2350 | ||
2351 | void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) | |
cfe04a4b | 2352 | { |
b9e60257 | 2353 | cpu_stw_le_data_ra(env, ptr, val, 0); |
cfe04a4b RH |
2354 | } |
2355 | ||
b9e60257 | 2356 | void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) |
cfe04a4b | 2357 | { |
b9e60257 | 2358 | cpu_stl_le_data_ra(env, ptr, val, 0); |
cfe04a4b RH |
2359 | } |
2360 | ||
b9e60257 | 2361 | void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) |
cfe04a4b | 2362 | { |
b9e60257 | 2363 | cpu_stq_le_data_ra(env, ptr, val, 0); |
cfe04a4b RH |
2364 | } |
2365 | ||
c482cb11 RH |
2366 | /* First set of helpers allows passing in of OI and RETADDR. This makes |
2367 | them callable from other helpers. */ | |
2368 | ||
2369 | #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr | |
2370 | #define ATOMIC_NAME(X) \ | |
2371 | HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) | |
707526ad RH |
2372 | #define ATOMIC_MMU_DECLS |
2373 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) | |
2374 | #define ATOMIC_MMU_CLEANUP | |
504f73f7 | 2375 | #define ATOMIC_MMU_IDX get_mmuidx(oi) |
c482cb11 | 2376 | |
139c1837 | 2377 | #include "atomic_common.c.inc" |
c482cb11 RH |
2378 | |
2379 | #define DATA_SIZE 1 | |
2380 | #include "atomic_template.h" | |
2381 | ||
2382 | #define DATA_SIZE 2 | |
2383 | #include "atomic_template.h" | |
2384 | ||
2385 | #define DATA_SIZE 4 | |
2386 | #include "atomic_template.h" | |
2387 | ||
df79b996 | 2388 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
2389 | #define DATA_SIZE 8 |
2390 | #include "atomic_template.h" | |
df79b996 | 2391 | #endif |
c482cb11 | 2392 | |
e6cd4bb5 | 2393 | #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 |
7ebee43e RH |
2394 | #define DATA_SIZE 16 |
2395 | #include "atomic_template.h" | |
2396 | #endif | |
2397 | ||
c482cb11 RH |
2398 | /* Second set of helpers are directly callable from TCG as helpers. */ |
2399 | ||
2400 | #undef EXTRA_ARGS | |
2401 | #undef ATOMIC_NAME | |
2402 | #undef ATOMIC_MMU_LOOKUP | |
2403 | #define EXTRA_ARGS , TCGMemOpIdx oi | |
2404 | #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) | |
707526ad | 2405 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) |
c482cb11 RH |
2406 | |
2407 | #define DATA_SIZE 1 | |
2408 | #include "atomic_template.h" | |
2409 | ||
2410 | #define DATA_SIZE 2 | |
2411 | #include "atomic_template.h" | |
2412 | ||
2413 | #define DATA_SIZE 4 | |
2414 | #include "atomic_template.h" | |
2415 | ||
df79b996 | 2416 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
2417 | #define DATA_SIZE 8 |
2418 | #include "atomic_template.h" | |
df79b996 | 2419 | #endif |
504f73f7 | 2420 | #undef ATOMIC_MMU_IDX |
c482cb11 RH |
2421 | |
2422 | /* Code access functions. */ | |
2423 | ||
fc4120a3 | 2424 | static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, |
2dd92606 RH |
2425 | TCGMemOpIdx oi, uintptr_t retaddr) |
2426 | { | |
fc4120a3 | 2427 | return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); |
2dd92606 RH |
2428 | } |
2429 | ||
fc4120a3 | 2430 | uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2431 | { |
fc4120a3 RH |
2432 | TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); |
2433 | return full_ldub_code(env, addr, oi, 0); | |
2dd92606 RH |
2434 | } |
2435 | ||
fc4120a3 RH |
2436 | static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, |
2437 | TCGMemOpIdx oi, uintptr_t retaddr) | |
2dd92606 | 2438 | { |
fc4120a3 | 2439 | return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); |
eed56642 | 2440 | } |
0cac1b66 | 2441 | |
fc4120a3 | 2442 | uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2443 | { |
fc4120a3 RH |
2444 | TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); |
2445 | return full_lduw_code(env, addr, oi, 0); | |
2dd92606 RH |
2446 | } |
2447 | ||
fc4120a3 RH |
2448 | static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, |
2449 | TCGMemOpIdx oi, uintptr_t retaddr) | |
2dd92606 | 2450 | { |
fc4120a3 | 2451 | return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); |
eed56642 | 2452 | } |
0cac1b66 | 2453 | |
fc4120a3 | 2454 | uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2455 | { |
fc4120a3 RH |
2456 | TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); |
2457 | return full_ldl_code(env, addr, oi, 0); | |
eed56642 AB |
2458 | } |
2459 | ||
fc4120a3 RH |
2460 | static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, |
2461 | TCGMemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2462 | { |
fc4120a3 | 2463 | return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); |
eed56642 AB |
2464 | } |
2465 | ||
fc4120a3 | 2466 | uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2467 | { |
fc4120a3 RH |
2468 | TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); |
2469 | return full_ldq_code(env, addr, oi, 0); | |
eed56642 | 2470 | } |