]> git.proxmox.com Git - mirror_qemu.git/blame - accel/tcg/cputlb.c
include/hw/core: Create struct CPUJumpCache
[mirror_qemu.git] / accel / tcg / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
0cac1b66
BS
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
8d04fb55 21#include "qemu/main-loop.h"
78271684 22#include "hw/core/tcg-cpu-ops.h"
022c62cb
PB
23#include "exec/exec-all.h"
24#include "exec/memory.h"
f08b6170 25#include "exec/cpu_ldst.h"
022c62cb 26#include "exec/cputlb.h"
022c62cb 27#include "exec/memory-internal.h"
220c3ebd 28#include "exec/ram_addr.h"
0f590e74 29#include "tcg/tcg.h"
d7f30403
PM
30#include "qemu/error-report.h"
31#include "exec/log.h"
c482cb11
RH
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
e6cd4bb5 34#include "qemu/atomic128.h"
3b9bd3f4 35#include "exec/translate-all.h"
243af022 36#include "trace/trace-root.h"
e5ceadff 37#include "tb-hash.h"
65269192 38#include "internal.h"
235537fa
AB
39#ifdef CONFIG_PLUGIN
40#include "qemu/plugin-memory.h"
41#endif
d2ba8026 42#include "tcg/tcg-ldst.h"
0cac1b66 43
8526e1f4
AB
44/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45/* #define DEBUG_TLB */
46/* #define DEBUG_TLB_LOG */
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
0cac1b66 68
ea9025cb 69#define assert_cpu_is_self(cpu) do { \
f0aff0f1 70 if (DEBUG_TLB_GATE) { \
ea9025cb 71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
f0aff0f1
AB
72 } \
73 } while (0)
74
e3b9ca81
FK
75/* run_on_cpu_data.target_ptr should always be big enough for a
76 * target_ulong even on 32 bit builds */
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
e7218445
AB
79/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80 */
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
722a1c1e 84static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
7a1efe1b 85{
722a1c1e 86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
7a1efe1b
RH
87}
88
722a1c1e 89static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
86e1eff8 90{
722a1c1e 91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
86e1eff8
EC
92}
93
79e42085 94static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
86e1eff8
EC
95 size_t max_entries)
96{
79e42085
RH
97 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
86e1eff8
EC
99}
100
0f4abea8
RH
101static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102{
a976a99a
RH
103 int i, i0 = tb_jmp_cache_hash_page(page_addr);
104 CPUJumpCache *jc = cpu->tb_jmp_cache;
0f4abea8
RH
105
106 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
a976a99a 107 qatomic_set(&jc->array[i0 + i].tb, NULL);
0f4abea8
RH
108 }
109}
110
86e1eff8
EC
111/**
112 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
71ccd47b
RH
113 * @desc: The CPUTLBDesc portion of the TLB
114 * @fast: The CPUTLBDescFast portion of the same TLB
86e1eff8
EC
115 *
116 * Called with tlb_lock_held.
117 *
118 * We have two main constraints when resizing a TLB: (1) we only resize it
119 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
120 * the array or unnecessarily flushing it), which means we do not control how
121 * frequently the resizing can occur; (2) we don't have access to the guest's
122 * future scheduling decisions, and therefore have to decide the magnitude of
123 * the resize based on past observations.
124 *
125 * In general, a memory-hungry process can benefit greatly from an appropriately
126 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
127 * we just have to make the TLB as large as possible; while an oversized TLB
128 * results in minimal TLB miss rates, it also takes longer to be flushed
129 * (flushes can be _very_ frequent), and the reduced locality can also hurt
130 * performance.
131 *
132 * To achieve near-optimal performance for all kinds of workloads, we:
133 *
134 * 1. Aggressively increase the size of the TLB when the use rate of the
135 * TLB being flushed is high, since it is likely that in the near future this
136 * memory-hungry process will execute again, and its memory hungriness will
137 * probably be similar.
138 *
139 * 2. Slowly reduce the size of the TLB as the use rate declines over a
140 * reasonably large time window. The rationale is that if in such a time window
141 * we have not observed a high TLB use rate, it is likely that we won't observe
142 * it in the near future. In that case, once a time window expires we downsize
143 * the TLB to match the maximum use rate observed in the window.
144 *
145 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
146 * since in that range performance is likely near-optimal. Recall that the TLB
147 * is direct mapped, so we want the use rate to be low (or at least not too
148 * high), since otherwise we are likely to have a significant amount of
149 * conflict misses.
150 */
3c3959f2
RH
151static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
152 int64_t now)
86e1eff8 153{
71ccd47b 154 size_t old_size = tlb_n_entries(fast);
86e1eff8
EC
155 size_t rate;
156 size_t new_size = old_size;
86e1eff8
EC
157 int64_t window_len_ms = 100;
158 int64_t window_len_ns = window_len_ms * 1000 * 1000;
79e42085 159 bool window_expired = now > desc->window_begin_ns + window_len_ns;
86e1eff8 160
79e42085
RH
161 if (desc->n_used_entries > desc->window_max_entries) {
162 desc->window_max_entries = desc->n_used_entries;
86e1eff8 163 }
79e42085 164 rate = desc->window_max_entries * 100 / old_size;
86e1eff8
EC
165
166 if (rate > 70) {
167 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
168 } else if (rate < 30 && window_expired) {
79e42085
RH
169 size_t ceil = pow2ceil(desc->window_max_entries);
170 size_t expected_rate = desc->window_max_entries * 100 / ceil;
86e1eff8
EC
171
172 /*
173 * Avoid undersizing when the max number of entries seen is just below
174 * a pow2. For instance, if max_entries == 1025, the expected use rate
175 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
176 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
177 * later. Thus, make sure that the expected use rate remains below 70%.
178 * (and since we double the size, that means the lowest rate we'd
179 * expect to get is 35%, which is still in the 30-70% range where
180 * we consider that the size is appropriate.)
181 */
182 if (expected_rate > 70) {
183 ceil *= 2;
184 }
185 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
186 }
187
188 if (new_size == old_size) {
189 if (window_expired) {
79e42085 190 tlb_window_reset(desc, now, desc->n_used_entries);
86e1eff8
EC
191 }
192 return;
193 }
194
71ccd47b 195 g_free(fast->table);
25d3ec58 196 g_free(desc->fulltlb);
86e1eff8 197
79e42085 198 tlb_window_reset(desc, now, 0);
86e1eff8 199 /* desc->n_used_entries is cleared by the caller */
71ccd47b
RH
200 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
201 fast->table = g_try_new(CPUTLBEntry, new_size);
25d3ec58 202 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
71ccd47b 203
86e1eff8
EC
204 /*
205 * If the allocations fail, try smaller sizes. We just freed some
206 * memory, so going back to half of new_size has a good chance of working.
207 * Increased memory pressure elsewhere in the system might cause the
208 * allocations to fail though, so we progressively reduce the allocation
209 * size, aborting if we cannot even allocate the smallest TLB we support.
210 */
25d3ec58 211 while (fast->table == NULL || desc->fulltlb == NULL) {
86e1eff8
EC
212 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
213 error_report("%s: %s", __func__, strerror(errno));
214 abort();
215 }
216 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
71ccd47b 217 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
86e1eff8 218
71ccd47b 219 g_free(fast->table);
25d3ec58 220 g_free(desc->fulltlb);
71ccd47b 221 fast->table = g_try_new(CPUTLBEntry, new_size);
25d3ec58 222 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
86e1eff8
EC
223 }
224}
225
bbf021b0 226static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
86e1eff8 227{
5c948e31
RH
228 desc->n_used_entries = 0;
229 desc->large_page_addr = -1;
230 desc->large_page_mask = -1;
231 desc->vindex = 0;
232 memset(fast->table, -1, sizeof_tlb(fast));
233 memset(desc->vtable, -1, sizeof(desc->vtable));
86e1eff8
EC
234}
235
3c3959f2
RH
236static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
237 int64_t now)
bbf021b0
RH
238{
239 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
240 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
241
3c3959f2 242 tlb_mmu_resize_locked(desc, fast, now);
bbf021b0
RH
243 tlb_mmu_flush_locked(desc, fast);
244}
245
56e89f76
RH
246static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
247{
248 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
249
250 tlb_window_reset(desc, now, 0);
251 desc->n_used_entries = 0;
252 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
253 fast->table = g_new(CPUTLBEntry, n_entries);
25d3ec58 254 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
3c16304a 255 tlb_mmu_flush_locked(desc, fast);
56e89f76
RH
256}
257
86e1eff8
EC
258static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
259{
a40ec84e 260 env_tlb(env)->d[mmu_idx].n_used_entries++;
86e1eff8
EC
261}
262
263static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
264{
a40ec84e 265 env_tlb(env)->d[mmu_idx].n_used_entries--;
86e1eff8
EC
266}
267
5005e253
EC
268void tlb_init(CPUState *cpu)
269{
71aec354 270 CPUArchState *env = cpu->env_ptr;
56e89f76
RH
271 int64_t now = get_clock_realtime();
272 int i;
71aec354 273
a40ec84e 274 qemu_spin_init(&env_tlb(env)->c.lock);
3d1523ce 275
3c16304a
RH
276 /* All tlbs are initialized flushed. */
277 env_tlb(env)->c.dirty = 0;
86e1eff8 278
56e89f76
RH
279 for (i = 0; i < NB_MMU_MODES; i++) {
280 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
281 }
5005e253
EC
282}
283
816d9be5
EC
284void tlb_destroy(CPUState *cpu)
285{
286 CPUArchState *env = cpu->env_ptr;
287 int i;
288
289 qemu_spin_destroy(&env_tlb(env)->c.lock);
290 for (i = 0; i < NB_MMU_MODES; i++) {
291 CPUTLBDesc *desc = &env_tlb(env)->d[i];
292 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
293
294 g_free(fast->table);
25d3ec58 295 g_free(desc->fulltlb);
816d9be5
EC
296 }
297}
298
c3b9a07a
AB
299/* flush_all_helper: run fn across all cpus
300 *
301 * If the wait flag is set then the src cpu's helper will be queued as
302 * "safe" work and the loop exited creating a synchronisation point
303 * where all queued work will be finished before execution starts
304 * again.
305 */
306static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
307 run_on_cpu_data d)
308{
309 CPUState *cpu;
310
311 CPU_FOREACH(cpu) {
312 if (cpu != src) {
313 async_run_on_cpu(cpu, fn, d);
314 }
315 }
316}
317
e09de0a2 318void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
83974cf4
EC
319{
320 CPUState *cpu;
e09de0a2 321 size_t full = 0, part = 0, elide = 0;
83974cf4
EC
322
323 CPU_FOREACH(cpu) {
324 CPUArchState *env = cpu->env_ptr;
325
d73415a3
SH
326 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
327 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
328 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
83974cf4 329 }
e09de0a2
RH
330 *pfull = full;
331 *ppart = part;
332 *pelide = elide;
83974cf4 333}
0cac1b66 334
e7218445 335static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
d7a74a9d
PM
336{
337 CPUArchState *env = cpu->env_ptr;
3d1523ce
RH
338 uint16_t asked = data.host_int;
339 uint16_t all_dirty, work, to_clean;
3c3959f2 340 int64_t now = get_clock_realtime();
d7a74a9d 341
f0aff0f1 342 assert_cpu_is_self(cpu);
d7a74a9d 343
3d1523ce 344 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
e7218445 345
a40ec84e 346 qemu_spin_lock(&env_tlb(env)->c.lock);
60a2ad7d 347
a40ec84e 348 all_dirty = env_tlb(env)->c.dirty;
3d1523ce
RH
349 to_clean = asked & all_dirty;
350 all_dirty &= ~to_clean;
a40ec84e 351 env_tlb(env)->c.dirty = all_dirty;
3d1523ce
RH
352
353 for (work = to_clean; work != 0; work &= work - 1) {
354 int mmu_idx = ctz32(work);
3c3959f2 355 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
d7a74a9d 356 }
3d1523ce 357
a40ec84e 358 qemu_spin_unlock(&env_tlb(env)->c.lock);
d7a74a9d 359
a976a99a 360 tcg_flush_jmp_cache(cpu);
64f2674b 361
3d1523ce 362 if (to_clean == ALL_MMUIDX_BITS) {
d73415a3 363 qatomic_set(&env_tlb(env)->c.full_flush_count,
a40ec84e 364 env_tlb(env)->c.full_flush_count + 1);
e09de0a2 365 } else {
d73415a3 366 qatomic_set(&env_tlb(env)->c.part_flush_count,
a40ec84e 367 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3d1523ce 368 if (to_clean != asked) {
d73415a3 369 qatomic_set(&env_tlb(env)->c.elide_flush_count,
a40ec84e 370 env_tlb(env)->c.elide_flush_count +
3d1523ce
RH
371 ctpop16(asked & ~to_clean));
372 }
64f2674b 373 }
d7a74a9d
PM
374}
375
0336cbf8 376void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d 377{
e7218445
AB
378 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
379
64f2674b 380 if (cpu->created && !qemu_cpu_is_self(cpu)) {
ab651105
RH
381 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
382 RUN_ON_CPU_HOST_INT(idxmap));
e7218445 383 } else {
60a2ad7d 384 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
e7218445 385 }
d7a74a9d
PM
386}
387
64f2674b
RH
388void tlb_flush(CPUState *cpu)
389{
390 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
391}
392
c3b9a07a
AB
393void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
394{
395 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
396
397 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
398
399 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
400 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
401}
402
64f2674b
RH
403void tlb_flush_all_cpus(CPUState *src_cpu)
404{
405 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
406}
407
408void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
c3b9a07a
AB
409{
410 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
411
412 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
413
414 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
415 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
416}
417
64f2674b
RH
418void tlb_flush_all_cpus_synced(CPUState *src_cpu)
419{
420 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
421}
422
3ab6e68c
RH
423static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
424 target_ulong page, target_ulong mask)
425{
426 page &= mask;
427 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
428
429 return (page == (tlb_entry->addr_read & mask) ||
430 page == (tlb_addr_write(tlb_entry) & mask) ||
431 page == (tlb_entry->addr_code & mask));
432}
433
68fea038
RH
434static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
435 target_ulong page)
436{
3ab6e68c 437 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
68fea038 438}
c3b9a07a 439
3cea94bb
EC
440/**
441 * tlb_entry_is_empty - return true if the entry is not in use
442 * @te: pointer to CPUTLBEntry
443 */
444static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
445{
446 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
447}
448
53d28455 449/* Called with tlb_c.lock held */
3ab6e68c
RH
450static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
451 target_ulong page,
452 target_ulong mask)
0cac1b66 453{
3ab6e68c 454 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
4fadb3bb 455 memset(tlb_entry, -1, sizeof(*tlb_entry));
86e1eff8 456 return true;
0cac1b66 457 }
86e1eff8 458 return false;
0cac1b66
BS
459}
460
3ab6e68c
RH
461static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
462 target_ulong page)
463{
464 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
465}
466
53d28455 467/* Called with tlb_c.lock held */
3ab6e68c
RH
468static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
469 target_ulong page,
470 target_ulong mask)
68fea038 471{
a40ec84e 472 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
68fea038 473 int k;
71aec354 474
29a0af61 475 assert_cpu_is_self(env_cpu(env));
68fea038 476 for (k = 0; k < CPU_VTLB_SIZE; k++) {
3ab6e68c 477 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
86e1eff8
EC
478 tlb_n_used_entries_dec(env, mmu_idx);
479 }
68fea038
RH
480 }
481}
482
3ab6e68c
RH
483static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
484 target_ulong page)
485{
486 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
487}
488
1308e026
RH
489static void tlb_flush_page_locked(CPUArchState *env, int midx,
490 target_ulong page)
491{
a40ec84e
RH
492 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
493 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
1308e026
RH
494
495 /* Check if we need to flush due to large pages. */
496 if ((page & lp_mask) == lp_addr) {
497 tlb_debug("forcing full flush midx %d ("
498 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
499 midx, lp_addr, lp_mask);
3c3959f2 500 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
1308e026 501 } else {
86e1eff8
EC
502 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
503 tlb_n_used_entries_dec(env, midx);
504 }
1308e026
RH
505 tlb_flush_vtlb_page_locked(env, midx, page);
506 }
507}
508
7b7d00e0
RH
509/**
510 * tlb_flush_page_by_mmuidx_async_0:
511 * @cpu: cpu on which to flush
512 * @addr: page of virtual address to flush
513 * @idxmap: set of mmu_idx to flush
514 *
515 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
516 * at @addr from the tlbs indicated by @idxmap from @cpu.
e7218445 517 */
7b7d00e0
RH
518static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
519 target_ulong addr,
520 uint16_t idxmap)
d7a74a9d
PM
521{
522 CPUArchState *env = cpu->env_ptr;
e7218445 523 int mmu_idx;
d7a74a9d 524
f0aff0f1 525 assert_cpu_is_self(cpu);
d7a74a9d 526
7b7d00e0 527 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
d7a74a9d 528
a40ec84e 529 qemu_spin_lock(&env_tlb(env)->c.lock);
0336cbf8 530 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
7b7d00e0 531 if ((idxmap >> mmu_idx) & 1) {
1308e026 532 tlb_flush_page_locked(env, mmu_idx, addr);
d7a74a9d
PM
533 }
534 }
a40ec84e 535 qemu_spin_unlock(&env_tlb(env)->c.lock);
d7a74a9d 536
1d41a79b
RH
537 /*
538 * Discard jump cache entries for any tb which might potentially
539 * overlap the flushed page, which includes the previous.
540 */
541 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
542 tb_jmp_cache_clear_page(cpu, addr);
d7a74a9d
PM
543}
544
7b7d00e0
RH
545/**
546 * tlb_flush_page_by_mmuidx_async_1:
547 * @cpu: cpu on which to flush
548 * @data: encoded addr + idxmap
549 *
550 * Helper for tlb_flush_page_by_mmuidx and friends, called through
551 * async_run_on_cpu. The idxmap parameter is encoded in the page
552 * offset of the target_ptr field. This limits the set of mmu_idx
553 * that can be passed via this method.
554 */
555static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
556 run_on_cpu_data data)
557{
558 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
559 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
560 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
561
562 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
563}
564
565typedef struct {
566 target_ulong addr;
567 uint16_t idxmap;
568} TLBFlushPageByMMUIdxData;
569
570/**
571 * tlb_flush_page_by_mmuidx_async_2:
572 * @cpu: cpu on which to flush
573 * @data: allocated addr + idxmap
574 *
575 * Helper for tlb_flush_page_by_mmuidx and friends, called through
576 * async_run_on_cpu. The addr+idxmap parameters are stored in a
577 * TLBFlushPageByMMUIdxData structure that has been allocated
578 * specifically for this helper. Free the structure when done.
579 */
580static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
581 run_on_cpu_data data)
e7218445 582{
7b7d00e0
RH
583 TLBFlushPageByMMUIdxData *d = data.host_ptr;
584
585 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
586 g_free(d);
587}
e7218445 588
7b7d00e0
RH
589void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
590{
e7218445
AB
591 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
592
593 /* This should already be page aligned */
7b7d00e0 594 addr &= TARGET_PAGE_MASK;
e7218445 595
7b7d00e0
RH
596 if (qemu_cpu_is_self(cpu)) {
597 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
598 } else if (idxmap < TARGET_PAGE_SIZE) {
599 /*
600 * Most targets have only a few mmu_idx. In the case where
601 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
602 * allocating memory for this operation.
603 */
604 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
605 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
e7218445 606 } else {
7b7d00e0
RH
607 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
608
609 /* Otherwise allocate a structure, freed by the worker. */
610 d->addr = addr;
611 d->idxmap = idxmap;
612 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
613 RUN_ON_CPU_HOST_PTR(d));
e7218445
AB
614 }
615}
616
f8144c6c
RH
617void tlb_flush_page(CPUState *cpu, target_ulong addr)
618{
619 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
620}
621
c3b9a07a
AB
622void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
623 uint16_t idxmap)
e3b9ca81 624{
c3b9a07a
AB
625 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
626
627 /* This should already be page aligned */
7b7d00e0
RH
628 addr &= TARGET_PAGE_MASK;
629
630 /*
631 * Allocate memory to hold addr+idxmap only when needed.
632 * See tlb_flush_page_by_mmuidx for details.
633 */
634 if (idxmap < TARGET_PAGE_SIZE) {
635 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
636 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
637 } else {
638 CPUState *dst_cpu;
639
640 /* Allocate a separate data block for each destination cpu. */
641 CPU_FOREACH(dst_cpu) {
642 if (dst_cpu != src_cpu) {
643 TLBFlushPageByMMUIdxData *d
644 = g_new(TLBFlushPageByMMUIdxData, 1);
645
646 d->addr = addr;
647 d->idxmap = idxmap;
648 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
649 RUN_ON_CPU_HOST_PTR(d));
650 }
651 }
652 }
c3b9a07a 653
7b7d00e0 654 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
c3b9a07a
AB
655}
656
f8144c6c
RH
657void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
658{
659 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
660}
661
c3b9a07a 662void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
1308e026
RH
663 target_ulong addr,
664 uint16_t idxmap)
c3b9a07a 665{
c3b9a07a
AB
666 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
667
668 /* This should already be page aligned */
7b7d00e0 669 addr &= TARGET_PAGE_MASK;
c3b9a07a 670
7b7d00e0
RH
671 /*
672 * Allocate memory to hold addr+idxmap only when needed.
673 * See tlb_flush_page_by_mmuidx for details.
674 */
675 if (idxmap < TARGET_PAGE_SIZE) {
676 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
677 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
678 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 } else {
681 CPUState *dst_cpu;
682 TLBFlushPageByMMUIdxData *d;
683
684 /* Allocate a separate data block for each destination cpu. */
685 CPU_FOREACH(dst_cpu) {
686 if (dst_cpu != src_cpu) {
687 d = g_new(TLBFlushPageByMMUIdxData, 1);
688 d->addr = addr;
689 d->idxmap = idxmap;
690 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
691 RUN_ON_CPU_HOST_PTR(d));
692 }
693 }
694
695 d = g_new(TLBFlushPageByMMUIdxData, 1);
696 d->addr = addr;
697 d->idxmap = idxmap;
698 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
699 RUN_ON_CPU_HOST_PTR(d));
700 }
c3b9a07a
AB
701}
702
f8144c6c 703void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
c3b9a07a 704{
f8144c6c 705 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
e3b9ca81
FK
706}
707
3c4ddec1
RH
708static void tlb_flush_range_locked(CPUArchState *env, int midx,
709 target_ulong addr, target_ulong len,
710 unsigned bits)
3ab6e68c
RH
711{
712 CPUTLBDesc *d = &env_tlb(env)->d[midx];
713 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
714 target_ulong mask = MAKE_64BIT_MASK(0, bits);
715
716 /*
717 * If @bits is smaller than the tlb size, there may be multiple entries
718 * within the TLB; otherwise all addresses that match under @mask hit
719 * the same TLB entry.
3ab6e68c
RH
720 * TODO: Perhaps allow bits to be a few bits less than the size.
721 * For now, just flush the entire TLB.
3c4ddec1
RH
722 *
723 * If @len is larger than the tlb size, then it will take longer to
724 * test all of the entries in the TLB than it will to flush it all.
3ab6e68c 725 */
3c4ddec1 726 if (mask < f->mask || len > f->mask) {
3ab6e68c 727 tlb_debug("forcing full flush midx %d ("
3c4ddec1
RH
728 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
729 midx, addr, mask, len);
3ab6e68c
RH
730 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
731 return;
732 }
733
3c4ddec1
RH
734 /*
735 * Check if we need to flush due to large pages.
736 * Because large_page_mask contains all 1's from the msb,
737 * we only need to test the end of the range.
738 */
739 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
3ab6e68c
RH
740 tlb_debug("forcing full flush midx %d ("
741 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
742 midx, d->large_page_addr, d->large_page_mask);
743 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
744 return;
745 }
746
3c4ddec1
RH
747 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
748 target_ulong page = addr + i;
749 CPUTLBEntry *entry = tlb_entry(env, midx, page);
750
751 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
752 tlb_n_used_entries_dec(env, midx);
753 }
754 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
3ab6e68c 755 }
3ab6e68c
RH
756}
757
758typedef struct {
759 target_ulong addr;
3c4ddec1 760 target_ulong len;
3ab6e68c
RH
761 uint16_t idxmap;
762 uint16_t bits;
3960a59f 763} TLBFlushRangeData;
3ab6e68c 764
6be48e45
RH
765static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
766 TLBFlushRangeData d)
3ab6e68c
RH
767{
768 CPUArchState *env = cpu->env_ptr;
769 int mmu_idx;
770
771 assert_cpu_is_self(cpu);
772
3c4ddec1
RH
773 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
774 d.addr, d.bits, d.len, d.idxmap);
3ab6e68c
RH
775
776 qemu_spin_lock(&env_tlb(env)->c.lock);
777 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
778 if ((d.idxmap >> mmu_idx) & 1) {
3c4ddec1 779 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
3ab6e68c
RH
780 }
781 }
782 qemu_spin_unlock(&env_tlb(env)->c.lock);
783
cfc2a2d6
IH
784 /*
785 * If the length is larger than the jump cache size, then it will take
786 * longer to clear each entry individually than it will to clear it all.
787 */
788 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
a976a99a 789 tcg_flush_jmp_cache(cpu);
cfc2a2d6
IH
790 return;
791 }
792
1d41a79b
RH
793 /*
794 * Discard jump cache entries for any tb which might potentially
795 * overlap the flushed pages, which includes the previous.
796 */
797 d.addr -= TARGET_PAGE_SIZE;
798 for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
799 tb_jmp_cache_clear_page(cpu, d.addr);
800 d.addr += TARGET_PAGE_SIZE;
3c4ddec1 801 }
3ab6e68c
RH
802}
803
206a583d
RH
804static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
805 run_on_cpu_data data)
3ab6e68c 806{
3960a59f 807 TLBFlushRangeData *d = data.host_ptr;
6be48e45 808 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
3ab6e68c
RH
809 g_free(d);
810}
811
e5b1921b
RH
812void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
813 target_ulong len, uint16_t idxmap,
814 unsigned bits)
3ab6e68c 815{
3960a59f 816 TLBFlushRangeData d;
3ab6e68c 817
e5b1921b
RH
818 /*
819 * If all bits are significant, and len is small,
820 * this devolves to tlb_flush_page.
821 */
822 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
3ab6e68c
RH
823 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
824 return;
825 }
826 /* If no page bits are significant, this devolves to tlb_flush. */
827 if (bits < TARGET_PAGE_BITS) {
828 tlb_flush_by_mmuidx(cpu, idxmap);
829 return;
830 }
831
832 /* This should already be page aligned */
833 d.addr = addr & TARGET_PAGE_MASK;
e5b1921b 834 d.len = len;
3ab6e68c
RH
835 d.idxmap = idxmap;
836 d.bits = bits;
837
838 if (qemu_cpu_is_self(cpu)) {
6be48e45 839 tlb_flush_range_by_mmuidx_async_0(cpu, d);
3ab6e68c 840 } else {
3ab6e68c 841 /* Otherwise allocate a structure, freed by the worker. */
3960a59f 842 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
206a583d 843 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
3ab6e68c
RH
844 RUN_ON_CPU_HOST_PTR(p));
845 }
846}
847
e5b1921b
RH
848void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
849 uint16_t idxmap, unsigned bits)
850{
851 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
852}
853
600b819f
RH
854void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
855 target_ulong addr, target_ulong len,
856 uint16_t idxmap, unsigned bits)
3ab6e68c 857{
3960a59f 858 TLBFlushRangeData d;
d34e4d1a 859 CPUState *dst_cpu;
3ab6e68c 860
600b819f
RH
861 /*
862 * If all bits are significant, and len is small,
863 * this devolves to tlb_flush_page.
864 */
865 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
3ab6e68c
RH
866 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
867 return;
868 }
869 /* If no page bits are significant, this devolves to tlb_flush. */
870 if (bits < TARGET_PAGE_BITS) {
871 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
872 return;
873 }
874
875 /* This should already be page aligned */
876 d.addr = addr & TARGET_PAGE_MASK;
600b819f 877 d.len = len;
3ab6e68c
RH
878 d.idxmap = idxmap;
879 d.bits = bits;
880
d34e4d1a
RH
881 /* Allocate a separate data block for each destination cpu. */
882 CPU_FOREACH(dst_cpu) {
883 if (dst_cpu != src_cpu) {
884 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
885 async_run_on_cpu(dst_cpu,
206a583d 886 tlb_flush_range_by_mmuidx_async_1,
d34e4d1a 887 RUN_ON_CPU_HOST_PTR(p));
3ab6e68c
RH
888 }
889 }
890
6be48e45 891 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
3ab6e68c
RH
892}
893
600b819f
RH
894void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
895 target_ulong addr,
896 uint16_t idxmap, unsigned bits)
897{
898 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
899 idxmap, bits);
900}
901
c13b27d8
RH
902void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
903 target_ulong addr,
904 target_ulong len,
905 uint16_t idxmap,
906 unsigned bits)
3ab6e68c 907{
d34e4d1a
RH
908 TLBFlushRangeData d, *p;
909 CPUState *dst_cpu;
3ab6e68c 910
c13b27d8
RH
911 /*
912 * If all bits are significant, and len is small,
913 * this devolves to tlb_flush_page.
914 */
915 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
3ab6e68c
RH
916 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
917 return;
918 }
919 /* If no page bits are significant, this devolves to tlb_flush. */
920 if (bits < TARGET_PAGE_BITS) {
921 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
922 return;
923 }
924
925 /* This should already be page aligned */
926 d.addr = addr & TARGET_PAGE_MASK;
c13b27d8 927 d.len = len;
3ab6e68c
RH
928 d.idxmap = idxmap;
929 d.bits = bits;
930
d34e4d1a
RH
931 /* Allocate a separate data block for each destination cpu. */
932 CPU_FOREACH(dst_cpu) {
933 if (dst_cpu != src_cpu) {
934 p = g_memdup(&d, sizeof(d));
206a583d 935 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
d34e4d1a 936 RUN_ON_CPU_HOST_PTR(p));
3ab6e68c 937 }
3ab6e68c 938 }
d34e4d1a
RH
939
940 p = g_memdup(&d, sizeof(d));
206a583d 941 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
d34e4d1a 942 RUN_ON_CPU_HOST_PTR(p));
3ab6e68c
RH
943}
944
c13b27d8
RH
945void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
946 target_ulong addr,
947 uint16_t idxmap,
948 unsigned bits)
949{
950 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
951 idxmap, bits);
952}
953
0cac1b66
BS
954/* update the TLBs so that writes to code in the virtual page 'addr'
955 can be detected */
956void tlb_protect_code(ram_addr_t ram_addr)
957{
93b99616
RH
958 cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
959 TARGET_PAGE_SIZE,
03eebc9e 960 DIRTY_MEMORY_CODE);
0cac1b66
BS
961}
962
963/* update the TLB so that writes in physical page 'phys_addr' are no longer
964 tested for self modifying code */
9564f52d 965void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 966{
52159192 967 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
968}
969
0cac1b66 970
b0706b71
AB
971/*
972 * Dirty write flag handling
973 *
974 * When the TCG code writes to a location it looks up the address in
975 * the TLB and uses that data to compute the final address. If any of
976 * the lower bits of the address are set then the slow path is forced.
977 * There are a number of reasons to do this but for normal RAM the
978 * most usual is detecting writes to code regions which may invalidate
979 * generated code.
980 *
71aec354 981 * Other vCPUs might be reading their TLBs during guest execution, so we update
d73415a3 982 * te->addr_write with qatomic_set. We don't need to worry about this for
71aec354 983 * oversized guests as MTTCG is disabled for them.
b0706b71 984 *
53d28455 985 * Called with tlb_c.lock held.
b0706b71 986 */
71aec354
EC
987static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
988 uintptr_t start, uintptr_t length)
0cac1b66 989{
b0706b71 990 uintptr_t addr = tlb_entry->addr_write;
0cac1b66 991
7b0d792c
RH
992 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
993 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
b0706b71
AB
994 addr &= TARGET_PAGE_MASK;
995 addr += tlb_entry->addend;
0cac1b66 996 if ((addr - start) < length) {
71aec354 997#if TCG_OVERSIZED_GUEST
0cac1b66 998 tlb_entry->addr_write |= TLB_NOTDIRTY;
b0706b71 999#else
d73415a3 1000 qatomic_set(&tlb_entry->addr_write,
71aec354
EC
1001 tlb_entry->addr_write | TLB_NOTDIRTY);
1002#endif
b0706b71
AB
1003 }
1004 }
b0706b71
AB
1005}
1006
71aec354 1007/*
53d28455 1008 * Called with tlb_c.lock held.
71aec354
EC
1009 * Called only from the vCPU context, i.e. the TLB's owner thread.
1010 */
1011static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
b0706b71 1012{
b0706b71 1013 *d = *s;
0cac1b66
BS
1014}
1015
b0706b71 1016/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
71aec354 1017 * the target vCPU).
53d28455 1018 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
71aec354 1019 * thing actually updated is the target TLB entry ->addr_write flags.
b0706b71 1020 */
9a13565d 1021void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
1022{
1023 CPUArchState *env;
1024
9a13565d 1025 int mmu_idx;
0cac1b66 1026
9a13565d 1027 env = cpu->env_ptr;
a40ec84e 1028 qemu_spin_lock(&env_tlb(env)->c.lock);
9a13565d
PC
1029 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1030 unsigned int i;
722a1c1e 1031 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
0cac1b66 1032
86e1eff8 1033 for (i = 0; i < n; i++) {
a40ec84e
RH
1034 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1035 start1, length);
9a13565d 1036 }
88e89a57 1037
9a13565d 1038 for (i = 0; i < CPU_VTLB_SIZE; i++) {
a40ec84e
RH
1039 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1040 start1, length);
0cac1b66
BS
1041 }
1042 }
a40ec84e 1043 qemu_spin_unlock(&env_tlb(env)->c.lock);
0cac1b66
BS
1044}
1045
53d28455 1046/* Called with tlb_c.lock held */
71aec354
EC
1047static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1048 target_ulong vaddr)
0cac1b66
BS
1049{
1050 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1051 tlb_entry->addr_write = vaddr;
1052 }
1053}
1054
1055/* update the TLB corresponding to virtual page vaddr
1056 so that it is no longer dirty */
bcae01e4 1057void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 1058{
bcae01e4 1059 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
1060 int mmu_idx;
1061
f0aff0f1
AB
1062 assert_cpu_is_self(cpu);
1063
0cac1b66 1064 vaddr &= TARGET_PAGE_MASK;
a40ec84e 1065 qemu_spin_lock(&env_tlb(env)->c.lock);
0cac1b66 1066 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
383beda9 1067 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
0cac1b66 1068 }
88e89a57
XT
1069
1070 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1071 int k;
1072 for (k = 0; k < CPU_VTLB_SIZE; k++) {
a40ec84e 1073 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
88e89a57
XT
1074 }
1075 }
a40ec84e 1076 qemu_spin_unlock(&env_tlb(env)->c.lock);
0cac1b66
BS
1077}
1078
1079/* Our TLB does not support large pages, so remember the area covered by
1080 large pages and trigger a full TLB flush if these are invalidated. */
1308e026
RH
1081static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1082 target_ulong vaddr, target_ulong size)
0cac1b66 1083{
a40ec84e 1084 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1308e026 1085 target_ulong lp_mask = ~(size - 1);
0cac1b66 1086
1308e026
RH
1087 if (lp_addr == (target_ulong)-1) {
1088 /* No previous large page. */
1089 lp_addr = vaddr;
1090 } else {
1091 /* Extend the existing region to include the new page.
1092 This is a compromise between unnecessary flushes and
1093 the cost of maintaining a full variable size TLB. */
a40ec84e 1094 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1308e026
RH
1095 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1096 lp_mask <<= 1;
1097 }
0cac1b66 1098 }
a40ec84e
RH
1099 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1100 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
0cac1b66
BS
1101}
1102
40473689
RH
1103/*
1104 * Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
1105 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1106 * supplied size is only used by tlb_flush_page.
1107 *
1108 * Called from TCG-generated code, which is under an RCU read-side
1109 * critical section.
1110 */
40473689
RH
1111void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1112 target_ulong vaddr, CPUTLBEntryFull *full)
0cac1b66 1113{
0c591eb0 1114 CPUArchState *env = cpu->env_ptr;
a40ec84e
RH
1115 CPUTLB *tlb = env_tlb(env);
1116 CPUTLBDesc *desc = &tlb->d[mmu_idx];
0cac1b66
BS
1117 MemoryRegionSection *section;
1118 unsigned int index;
1119 target_ulong address;
8f5db641 1120 target_ulong write_address;
0cac1b66 1121 uintptr_t addend;
68fea038 1122 CPUTLBEntry *te, tn;
55df6fcf
PM
1123 hwaddr iotlb, xlat, sz, paddr_page;
1124 target_ulong vaddr_page;
40473689 1125 int asidx, wp_flags, prot;
8f5db641 1126 bool is_ram, is_romd;
0cac1b66 1127
f0aff0f1 1128 assert_cpu_is_self(cpu);
55df6fcf 1129
40473689 1130 if (full->lg_page_size <= TARGET_PAGE_BITS) {
55df6fcf
PM
1131 sz = TARGET_PAGE_SIZE;
1132 } else {
40473689
RH
1133 sz = (hwaddr)1 << full->lg_page_size;
1134 tlb_add_large_page(env, mmu_idx, vaddr, sz);
0cac1b66 1135 }
55df6fcf 1136 vaddr_page = vaddr & TARGET_PAGE_MASK;
40473689 1137 paddr_page = full->phys_addr & TARGET_PAGE_MASK;
149f54b5 1138
40473689
RH
1139 prot = full->prot;
1140 asidx = cpu_asidx_from_attrs(cpu, full->attrs);
55df6fcf 1141 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
40473689 1142 &xlat, &sz, full->attrs, &prot);
149f54b5
PB
1143 assert(sz >= TARGET_PAGE_SIZE);
1144
8526e1f4
AB
1145 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1146 " prot=%x idx=%d\n",
40473689 1147 vaddr, full->phys_addr, prot, mmu_idx);
0cac1b66 1148
55df6fcf 1149 address = vaddr_page;
40473689 1150 if (full->lg_page_size < TARGET_PAGE_BITS) {
30d7e098
RH
1151 /* Repeat the MMU check and TLB fill on every access. */
1152 address |= TLB_INVALID_MASK;
55df6fcf 1153 }
40473689 1154 if (full->attrs.byte_swap) {
5b87b3e6 1155 address |= TLB_BSWAP;
a26fc6f5 1156 }
8f5db641
RH
1157
1158 is_ram = memory_region_is_ram(section->mr);
1159 is_romd = memory_region_is_romd(section->mr);
1160
1161 if (is_ram || is_romd) {
1162 /* RAM and ROMD both have associated host memory. */
1163 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1164 } else {
1165 /* I/O does not; force the host address to NULL. */
8f3e03cb 1166 addend = 0;
8f5db641
RH
1167 }
1168
1169 write_address = address;
1170 if (is_ram) {
1171 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1172 /*
1173 * Computing is_clean is expensive; avoid all that unless
1174 * the page is actually writable.
1175 */
1176 if (prot & PAGE_WRITE) {
1177 if (section->readonly) {
1178 write_address |= TLB_DISCARD_WRITE;
1179 } else if (cpu_physical_memory_is_clean(iotlb)) {
1180 write_address |= TLB_NOTDIRTY;
1181 }
1182 }
8f3e03cb 1183 } else {
8f5db641
RH
1184 /* I/O or ROMD */
1185 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1186 /*
1187 * Writes to romd devices must go through MMIO to enable write.
1188 * Reads to romd devices go through the ram_ptr found above,
1189 * but of course reads to I/O must go through MMIO.
1190 */
1191 write_address |= TLB_MMIO;
1192 if (!is_romd) {
1193 address = write_address;
1194 }
0cac1b66 1195 }
0cac1b66 1196
50b107c5
RH
1197 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1198 TARGET_PAGE_SIZE);
0cac1b66 1199
383beda9
RH
1200 index = tlb_index(env, mmu_idx, vaddr_page);
1201 te = tlb_entry(env, mmu_idx, vaddr_page);
b0706b71 1202
71aec354
EC
1203 /*
1204 * Hold the TLB lock for the rest of the function. We could acquire/release
1205 * the lock several times in the function, but it is faster to amortize the
1206 * acquisition cost by acquiring it just once. Note that this leads to
1207 * a longer critical section, but this is not a concern since the TLB lock
1208 * is unlikely to be contended.
1209 */
a40ec84e 1210 qemu_spin_lock(&tlb->c.lock);
71aec354 1211
3d1523ce 1212 /* Note that the tlb is no longer clean. */
a40ec84e 1213 tlb->c.dirty |= 1 << mmu_idx;
3d1523ce 1214
71aec354
EC
1215 /* Make sure there's no cached translation for the new page. */
1216 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1217
68fea038
RH
1218 /*
1219 * Only evict the old entry to the victim tlb if it's for a
1220 * different page; otherwise just overwrite the stale data.
1221 */
3cea94bb 1222 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
a40ec84e
RH
1223 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1224 CPUTLBEntry *tv = &desc->vtable[vidx];
b0706b71 1225
68fea038 1226 /* Evict the old entry into the victim tlb. */
71aec354 1227 copy_tlb_helper_locked(tv, te);
25d3ec58 1228 desc->vfulltlb[vidx] = desc->fulltlb[index];
86e1eff8 1229 tlb_n_used_entries_dec(env, mmu_idx);
68fea038 1230 }
88e89a57
XT
1231
1232 /* refill the tlb */
ace41090
PM
1233 /*
1234 * At this point iotlb contains a physical section number in the lower
1235 * TARGET_PAGE_BITS, and either
8f5db641
RH
1236 * + the ram_addr_t of the page base of the target RAM (RAM)
1237 * + the offset within section->mr of the page base (I/O, ROMD)
55df6fcf 1238 * We subtract the vaddr_page (which is page aligned and thus won't
ace41090
PM
1239 * disturb the low bits) to give an offset which can be added to the
1240 * (non-page-aligned) vaddr of the eventual memory access to get
1241 * the MemoryRegion offset for the access. Note that the vaddr we
1242 * subtract here is that of the page base, and not the same as the
1243 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1244 */
40473689 1245 desc->fulltlb[index] = *full;
25d3ec58 1246 desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
40473689
RH
1247 desc->fulltlb[index].phys_addr = paddr_page;
1248 desc->fulltlb[index].prot = prot;
b0706b71
AB
1249
1250 /* Now calculate the new entry */
55df6fcf 1251 tn.addend = addend - vaddr_page;
0cac1b66 1252 if (prot & PAGE_READ) {
b0706b71 1253 tn.addr_read = address;
50b107c5
RH
1254 if (wp_flags & BP_MEM_READ) {
1255 tn.addr_read |= TLB_WATCHPOINT;
1256 }
0cac1b66 1257 } else {
b0706b71 1258 tn.addr_read = -1;
0cac1b66
BS
1259 }
1260
1261 if (prot & PAGE_EXEC) {
8f5db641 1262 tn.addr_code = address;
0cac1b66 1263 } else {
b0706b71 1264 tn.addr_code = -1;
0cac1b66 1265 }
b0706b71
AB
1266
1267 tn.addr_write = -1;
0cac1b66 1268 if (prot & PAGE_WRITE) {
8f5db641 1269 tn.addr_write = write_address;
f52bfb12
DH
1270 if (prot & PAGE_WRITE_INV) {
1271 tn.addr_write |= TLB_INVALID_MASK;
1272 }
50b107c5
RH
1273 if (wp_flags & BP_MEM_WRITE) {
1274 tn.addr_write |= TLB_WATCHPOINT;
1275 }
0cac1b66 1276 }
b0706b71 1277
71aec354 1278 copy_tlb_helper_locked(te, &tn);
86e1eff8 1279 tlb_n_used_entries_inc(env, mmu_idx);
a40ec84e 1280 qemu_spin_unlock(&tlb->c.lock);
0cac1b66
BS
1281}
1282
40473689
RH
1283void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1284 hwaddr paddr, MemTxAttrs attrs, int prot,
1285 int mmu_idx, target_ulong size)
1286{
1287 CPUTLBEntryFull full = {
1288 .phys_addr = paddr,
1289 .attrs = attrs,
1290 .prot = prot,
1291 .lg_page_size = ctz64(size)
1292 };
1293
1294 assert(is_power_of_2(size));
1295 tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
1296}
1297
fadc1cbe
PM
1298void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1299 hwaddr paddr, int prot,
1300 int mmu_idx, target_ulong size)
1301{
1302 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1303 prot, mmu_idx, size);
1304}
1305
c319dc13
RH
1306/*
1307 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1308 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1309 * be discarded and looked up again (e.g. via tlb_entry()).
1310 */
1311static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1312 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1313{
c319dc13
RH
1314 bool ok;
1315
1316 /*
1317 * This is not a probe, so only valid return is success; failure
1318 * should result in exception + longjmp to the cpu loop.
1319 */
8810ee2a
AB
1320 ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1321 access_type, mmu_idx, false, retaddr);
c319dc13
RH
1322 assert(ok);
1323}
1324
78271684
CF
1325static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1326 MMUAccessType access_type,
1327 int mmu_idx, uintptr_t retaddr)
1328{
8810ee2a
AB
1329 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1330 mmu_idx, retaddr);
78271684
CF
1331}
1332
1333static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1334 vaddr addr, unsigned size,
1335 MMUAccessType access_type,
1336 int mmu_idx, MemTxAttrs attrs,
1337 MemTxResult response,
1338 uintptr_t retaddr)
1339{
1340 CPUClass *cc = CPU_GET_CLASS(cpu);
1341
1342 if (!cpu->ignore_memory_transaction_failures &&
1343 cc->tcg_ops->do_transaction_failed) {
1344 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1345 access_type, mmu_idx, attrs,
1346 response, retaddr);
1347 }
1348}
1349
25d3ec58 1350static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
f1be3696 1351 int mmu_idx, target_ulong addr, uintptr_t retaddr,
be5c4787 1352 MMUAccessType access_type, MemOp op)
82a45b96 1353{
29a0af61 1354 CPUState *cpu = env_cpu(env);
2d54f194
PM
1355 hwaddr mr_offset;
1356 MemoryRegionSection *section;
1357 MemoryRegion *mr;
82a45b96 1358 uint64_t val;
8d04fb55 1359 bool locked = false;
04e3aabd 1360 MemTxResult r;
82a45b96 1361
25d3ec58 1362 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
2d54f194 1363 mr = section->mr;
25d3ec58 1364 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
82a45b96 1365 cpu->mem_io_pc = retaddr;
08565552 1366 if (!cpu->can_do_io) {
82a45b96
RH
1367 cpu_io_recompile(cpu, retaddr);
1368 }
1369
41744954 1370 if (!qemu_mutex_iothread_locked()) {
8d04fb55
JK
1371 qemu_mutex_lock_iothread();
1372 locked = true;
1373 }
25d3ec58 1374 r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
04e3aabd 1375 if (r != MEMTX_OK) {
2d54f194
PM
1376 hwaddr physaddr = mr_offset +
1377 section->offset_within_address_space -
1378 section->offset_within_region;
1379
be5c4787 1380 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
25d3ec58 1381 mmu_idx, full->attrs, r, retaddr);
04e3aabd 1382 }
8d04fb55
JK
1383 if (locked) {
1384 qemu_mutex_unlock_iothread();
1385 }
1386
82a45b96
RH
1387 return val;
1388}
1389
2f3a57ee 1390/*
25d3ec58
RH
1391 * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
1392 * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
570ef309 1393 * because of the side effect of io_writex changing memory layout.
2f3a57ee 1394 */
37523ff7
RH
1395static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
1396 hwaddr mr_offset)
2f3a57ee
AB
1397{
1398#ifdef CONFIG_PLUGIN
1399 SavedIOTLB *saved = &cs->saved_iotlb;
2f3a57ee
AB
1400 saved->section = section;
1401 saved->mr_offset = mr_offset;
1402#endif
1403}
1404
25d3ec58 1405static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
f1be3696 1406 int mmu_idx, uint64_t val, target_ulong addr,
be5c4787 1407 uintptr_t retaddr, MemOp op)
82a45b96 1408{
29a0af61 1409 CPUState *cpu = env_cpu(env);
2d54f194
PM
1410 hwaddr mr_offset;
1411 MemoryRegionSection *section;
1412 MemoryRegion *mr;
8d04fb55 1413 bool locked = false;
04e3aabd 1414 MemTxResult r;
82a45b96 1415
25d3ec58 1416 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
2d54f194 1417 mr = section->mr;
25d3ec58 1418 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
08565552 1419 if (!cpu->can_do_io) {
82a45b96
RH
1420 cpu_io_recompile(cpu, retaddr);
1421 }
82a45b96 1422 cpu->mem_io_pc = retaddr;
8d04fb55 1423
2f3a57ee
AB
1424 /*
1425 * The memory_region_dispatch may trigger a flush/resize
1426 * so for plugins we save the iotlb_data just in case.
1427 */
37523ff7 1428 save_iotlb_data(cpu, section, mr_offset);
2f3a57ee 1429
41744954 1430 if (!qemu_mutex_iothread_locked()) {
8d04fb55
JK
1431 qemu_mutex_lock_iothread();
1432 locked = true;
1433 }
25d3ec58 1434 r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
04e3aabd 1435 if (r != MEMTX_OK) {
2d54f194
PM
1436 hwaddr physaddr = mr_offset +
1437 section->offset_within_address_space -
1438 section->offset_within_region;
1439
be5c4787 1440 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
25d3ec58 1441 MMU_DATA_STORE, mmu_idx, full->attrs, r,
be5c4787 1442 retaddr);
04e3aabd 1443 }
8d04fb55
JK
1444 if (locked) {
1445 qemu_mutex_unlock_iothread();
1446 }
82a45b96
RH
1447}
1448
4811e909
RH
1449static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1450{
1451#if TCG_OVERSIZED_GUEST
1452 return *(target_ulong *)((uintptr_t)entry + ofs);
1453#else
d73415a3
SH
1454 /* ofs might correspond to .addr_write, so use qatomic_read */
1455 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
4811e909
RH
1456#endif
1457}
1458
7e9a7c50
RH
1459/* Return true if ADDR is present in the victim tlb, and has been copied
1460 back to the main tlb. */
1461static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1462 size_t elt_ofs, target_ulong page)
1463{
1464 size_t vidx;
71aec354 1465
29a0af61 1466 assert_cpu_is_self(env_cpu(env));
7e9a7c50 1467 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
a40ec84e
RH
1468 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1469 target_ulong cmp;
1470
d73415a3 1471 /* elt_ofs might correspond to .addr_write, so use qatomic_read */
a40ec84e
RH
1472#if TCG_OVERSIZED_GUEST
1473 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1474#else
d73415a3 1475 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
a40ec84e 1476#endif
7e9a7c50
RH
1477
1478 if (cmp == page) {
1479 /* Found entry in victim tlb, swap tlb and iotlb. */
a40ec84e 1480 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
b0706b71 1481
a40ec84e 1482 qemu_spin_lock(&env_tlb(env)->c.lock);
71aec354
EC
1483 copy_tlb_helper_locked(&tmptlb, tlb);
1484 copy_tlb_helper_locked(tlb, vtlb);
1485 copy_tlb_helper_locked(vtlb, &tmptlb);
a40ec84e 1486 qemu_spin_unlock(&env_tlb(env)->c.lock);
b0706b71 1487
25d3ec58
RH
1488 CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1489 CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
1490 CPUTLBEntryFull tmpf;
1491 tmpf = *f1; *f1 = *f2; *f2 = tmpf;
7e9a7c50
RH
1492 return true;
1493 }
1494 }
1495 return false;
1496}
1497
1498/* Macro to call the above, with local variables from the use context. */
a390284b 1499#define VICTIM_TLB_HIT(TY, ADDR) \
7e9a7c50 1500 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
a390284b 1501 (ADDR) & TARGET_PAGE_MASK)
7e9a7c50 1502
707526ad 1503static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
25d3ec58 1504 CPUTLBEntryFull *full, uintptr_t retaddr)
707526ad 1505{
25d3ec58 1506 ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
707526ad
RH
1507
1508 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1509
1510 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1511 struct page_collection *pages
1512 = page_collection_lock(ram_addr, ram_addr + size);
5a7c27bb 1513 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
707526ad
RH
1514 page_collection_unlock(pages);
1515 }
1516
1517 /*
1518 * Set both VGA and migration bits for simplicity and to remove
1519 * the notdirty callback faster.
1520 */
1521 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1522
1523 /* We remove the notdirty callback only if the code has been flushed. */
1524 if (!cpu_physical_memory_is_clean(ram_addr)) {
1525 trace_memory_notdirty_set_dirty(mem_vaddr);
1526 tlb_set_dirty(cpu, mem_vaddr);
1527 }
1528}
1529
069cfe77
RH
1530static int probe_access_internal(CPUArchState *env, target_ulong addr,
1531 int fault_size, MMUAccessType access_type,
1532 int mmu_idx, bool nonfault,
af803a4f
RH
1533 void **phost, CPUTLBEntryFull **pfull,
1534 uintptr_t retaddr)
3b08f0a9 1535{
383beda9
RH
1536 uintptr_t index = tlb_index(env, mmu_idx, addr);
1537 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
069cfe77 1538 target_ulong tlb_addr, page_addr;
c25c283d 1539 size_t elt_ofs;
069cfe77 1540 int flags;
ca86cf32 1541
c25c283d
DH
1542 switch (access_type) {
1543 case MMU_DATA_LOAD:
1544 elt_ofs = offsetof(CPUTLBEntry, addr_read);
c25c283d
DH
1545 break;
1546 case MMU_DATA_STORE:
1547 elt_ofs = offsetof(CPUTLBEntry, addr_write);
c25c283d
DH
1548 break;
1549 case MMU_INST_FETCH:
1550 elt_ofs = offsetof(CPUTLBEntry, addr_code);
c25c283d
DH
1551 break;
1552 default:
1553 g_assert_not_reached();
1554 }
1555 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1556
c3c8bf57 1557 flags = TLB_FLAGS_MASK;
069cfe77
RH
1558 page_addr = addr & TARGET_PAGE_MASK;
1559 if (!tlb_hit_page(tlb_addr, page_addr)) {
1560 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1561 CPUState *cs = env_cpu(env);
069cfe77 1562
8810ee2a
AB
1563 if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1564 mmu_idx, nonfault, retaddr)) {
069cfe77
RH
1565 /* Non-faulting page table read failed. */
1566 *phost = NULL;
af803a4f 1567 *pfull = NULL;
069cfe77
RH
1568 return TLB_INVALID_MASK;
1569 }
1570
1571 /* TLB resize via tlb_fill may have moved the entry. */
af803a4f 1572 index = tlb_index(env, mmu_idx, addr);
03a98189 1573 entry = tlb_entry(env, mmu_idx, addr);
c3c8bf57
RH
1574
1575 /*
1576 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1577 * to force the next access through tlb_fill. We've just
1578 * called tlb_fill, so we know that this entry *is* valid.
1579 */
1580 flags &= ~TLB_INVALID_MASK;
3b08f0a9 1581 }
c25c283d 1582 tlb_addr = tlb_read_ofs(entry, elt_ofs);
03a98189 1583 }
c3c8bf57 1584 flags &= tlb_addr;
03a98189 1585
af803a4f
RH
1586 *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1587
069cfe77
RH
1588 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1589 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1590 *phost = NULL;
1591 return TLB_MMIO;
fef39ccd
DH
1592 }
1593
069cfe77
RH
1594 /* Everything else is RAM. */
1595 *phost = (void *)((uintptr_t)addr + entry->addend);
1596 return flags;
1597}
1598
af803a4f
RH
1599int probe_access_full(CPUArchState *env, target_ulong addr,
1600 MMUAccessType access_type, int mmu_idx,
1601 bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1602 uintptr_t retaddr)
069cfe77 1603{
af803a4f
RH
1604 int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1605 nonfault, phost, pfull, retaddr);
069cfe77
RH
1606
1607 /* Handle clean RAM pages. */
1608 if (unlikely(flags & TLB_NOTDIRTY)) {
af803a4f 1609 notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
069cfe77
RH
1610 flags &= ~TLB_NOTDIRTY;
1611 }
1612
1613 return flags;
1614}
1615
af803a4f
RH
1616int probe_access_flags(CPUArchState *env, target_ulong addr,
1617 MMUAccessType access_type, int mmu_idx,
1618 bool nonfault, void **phost, uintptr_t retaddr)
1619{
1620 CPUTLBEntryFull *full;
1621
1622 return probe_access_full(env, addr, access_type, mmu_idx,
1623 nonfault, phost, &full, retaddr);
1624}
1625
069cfe77
RH
1626void *probe_access(CPUArchState *env, target_ulong addr, int size,
1627 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1628{
af803a4f 1629 CPUTLBEntryFull *full;
069cfe77
RH
1630 void *host;
1631 int flags;
1632
1633 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1634
1635 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
af803a4f 1636 false, &host, &full, retaddr);
069cfe77
RH
1637
1638 /* Per the interface, size == 0 merely faults the access. */
1639 if (size == 0) {
1640 return NULL;
1641 }
1642
1643 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
73bc0bd4 1644 /* Handle watchpoints. */
069cfe77
RH
1645 if (flags & TLB_WATCHPOINT) {
1646 int wp_access = (access_type == MMU_DATA_STORE
1647 ? BP_MEM_WRITE : BP_MEM_READ);
73bc0bd4 1648 cpu_check_watchpoint(env_cpu(env), addr, size,
25d3ec58 1649 full->attrs, wp_access, retaddr);
73bc0bd4
RH
1650 }
1651
1652 /* Handle clean RAM pages. */
069cfe77 1653 if (flags & TLB_NOTDIRTY) {
25d3ec58 1654 notdirty_write(env_cpu(env), addr, 1, full, retaddr);
73bc0bd4 1655 }
fef39ccd
DH
1656 }
1657
069cfe77 1658 return host;
3b08f0a9
RH
1659}
1660
4811e909
RH
1661void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1662 MMUAccessType access_type, int mmu_idx)
1663{
af803a4f 1664 CPUTLBEntryFull *full;
069cfe77
RH
1665 void *host;
1666 int flags;
4811e909 1667
069cfe77 1668 flags = probe_access_internal(env, addr, 0, access_type,
af803a4f 1669 mmu_idx, true, &host, &full, 0);
4811e909 1670
069cfe77
RH
1671 /* No combination of flags are expected by the caller. */
1672 return flags ? NULL : host;
4811e909
RH
1673}
1674
7e0d9973
RH
1675/*
1676 * Return a ram_addr_t for the virtual address for execution.
1677 *
1678 * Return -1 if we can't translate and execute from an entire page
1679 * of RAM. This will force us to execute by loading and translating
1680 * one insn at a time, without caching.
1681 *
1682 * NOTE: This function will trigger an exception if the page is
1683 * not executable.
1684 */
1685tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1686 void **hostp)
1687{
af803a4f 1688 CPUTLBEntryFull *full;
7e0d9973
RH
1689 void *p;
1690
1691 (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
af803a4f 1692 cpu_mmu_index(env, true), false, &p, &full, 0);
7e0d9973
RH
1693 if (p == NULL) {
1694 return -1;
1695 }
1696 if (hostp) {
1697 *hostp = p;
1698 }
1699 return qemu_ram_addr_from_host_nofail(p);
1700}
1701
235537fa
AB
1702#ifdef CONFIG_PLUGIN
1703/*
1704 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1705 * This should be a hot path as we will have just looked this path up
1706 * in the softmmu lookup code (or helper). We don't handle re-fills or
1707 * checking the victim table. This is purely informational.
1708 *
2f3a57ee
AB
1709 * This almost never fails as the memory access being instrumented
1710 * should have just filled the TLB. The one corner case is io_writex
1711 * which can cause TLB flushes and potential resizing of the TLBs
570ef309 1712 * losing the information we need. In those cases we need to recover
25d3ec58 1713 * data from a copy of the CPUTLBEntryFull. As long as this always occurs
570ef309 1714 * from the same thread (which a mem callback will be) this is safe.
235537fa
AB
1715 */
1716
1717bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1718 bool is_store, struct qemu_plugin_hwaddr *data)
1719{
1720 CPUArchState *env = cpu->env_ptr;
1721 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1722 uintptr_t index = tlb_index(env, mmu_idx, addr);
1723 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1724
1725 if (likely(tlb_hit(tlb_addr, addr))) {
1726 /* We must have an iotlb entry for MMIO */
1727 if (tlb_addr & TLB_MMIO) {
25d3ec58
RH
1728 CPUTLBEntryFull *full;
1729 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
235537fa 1730 data->is_io = true;
25d3ec58
RH
1731 data->v.io.section =
1732 iotlb_to_section(cpu, full->xlat_section, full->attrs);
1733 data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
235537fa
AB
1734 } else {
1735 data->is_io = false;
2d932039 1736 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
235537fa
AB
1737 }
1738 return true;
2f3a57ee
AB
1739 } else {
1740 SavedIOTLB *saved = &cpu->saved_iotlb;
1741 data->is_io = true;
1742 data->v.io.section = saved->section;
1743 data->v.io.offset = saved->mr_offset;
1744 return true;
235537fa 1745 }
235537fa
AB
1746}
1747
1748#endif
1749
08dff435
RH
1750/*
1751 * Probe for an atomic operation. Do not allow unaligned operations,
1752 * or io operations to proceed. Return the host address.
1753 *
1754 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1755 */
c482cb11 1756static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
9002ffcb 1757 MemOpIdx oi, int size, int prot,
08dff435 1758 uintptr_t retaddr)
c482cb11 1759{
b826044f 1760 uintptr_t mmu_idx = get_mmuidx(oi);
14776ab5 1761 MemOp mop = get_memop(oi);
c482cb11 1762 int a_bits = get_alignment_bits(mop);
08dff435
RH
1763 uintptr_t index;
1764 CPUTLBEntry *tlbe;
1765 target_ulong tlb_addr;
34d49937 1766 void *hostaddr;
c482cb11 1767
b826044f
RH
1768 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1769
c482cb11
RH
1770 /* Adjust the given return address. */
1771 retaddr -= GETPC_ADJ;
1772
1773 /* Enforce guest required alignment. */
1774 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1775 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
29a0af61 1776 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
c482cb11
RH
1777 mmu_idx, retaddr);
1778 }
1779
1780 /* Enforce qemu required alignment. */
08dff435 1781 if (unlikely(addr & (size - 1))) {
c482cb11
RH
1782 /* We get here if guest alignment was not requested,
1783 or was not enforced by cpu_unaligned_access above.
1784 We might widen the access and emulate, but for now
1785 mark an exception and exit the cpu loop. */
1786 goto stop_the_world;
1787 }
1788
08dff435
RH
1789 index = tlb_index(env, mmu_idx, addr);
1790 tlbe = tlb_entry(env, mmu_idx, addr);
1791
c482cb11 1792 /* Check TLB entry and enforce page permissions. */
08dff435
RH
1793 if (prot & PAGE_WRITE) {
1794 tlb_addr = tlb_addr_write(tlbe);
1795 if (!tlb_hit(tlb_addr, addr)) {
1796 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1797 tlb_fill(env_cpu(env), addr, size,
1798 MMU_DATA_STORE, mmu_idx, retaddr);
1799 index = tlb_index(env, mmu_idx, addr);
1800 tlbe = tlb_entry(env, mmu_idx, addr);
1801 }
1802 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1803 }
1804
1805 /* Let the guest notice RMW on a write-only page. */
1806 if ((prot & PAGE_READ) &&
1807 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1808 tlb_fill(env_cpu(env), addr, size,
1809 MMU_DATA_LOAD, mmu_idx, retaddr);
1810 /*
1811 * Since we don't support reads and writes to different addresses,
1812 * and we do have the proper page loaded for write, this shouldn't
1813 * ever return. But just in case, handle via stop-the-world.
1814 */
1815 goto stop_the_world;
1816 }
1817 } else /* if (prot & PAGE_READ) */ {
1818 tlb_addr = tlbe->addr_read;
1819 if (!tlb_hit(tlb_addr, addr)) {
1820 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1821 tlb_fill(env_cpu(env), addr, size,
1822 MMU_DATA_LOAD, mmu_idx, retaddr);
1823 index = tlb_index(env, mmu_idx, addr);
1824 tlbe = tlb_entry(env, mmu_idx, addr);
1825 }
1826 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
c482cb11 1827 }
c482cb11
RH
1828 }
1829
55df6fcf 1830 /* Notice an IO access or a needs-MMU-lookup access */
30d7e098 1831 if (unlikely(tlb_addr & TLB_MMIO)) {
c482cb11
RH
1832 /* There's really nothing that can be done to
1833 support this apart from stop-the-world. */
1834 goto stop_the_world;
1835 }
1836
34d49937
PM
1837 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1838
34d49937 1839 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
08dff435 1840 notdirty_write(env_cpu(env), addr, size,
25d3ec58 1841 &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
34d49937
PM
1842 }
1843
1844 return hostaddr;
c482cb11
RH
1845
1846 stop_the_world:
29a0af61 1847 cpu_loop_exit_atomic(env_cpu(env), retaddr);
c482cb11
RH
1848}
1849
f83bcecb
RH
1850/*
1851 * Verify that we have passed the correct MemOp to the correct function.
1852 *
1853 * In the case of the helper_*_mmu functions, we will have done this by
1854 * using the MemOp to look up the helper during code generation.
1855 *
1856 * In the case of the cpu_*_mmu functions, this is up to the caller.
1857 * We could present one function to target code, and dispatch based on
1858 * the MemOp, but so far we have worked hard to avoid an indirect function
1859 * call along the memory path.
1860 */
1861static void validate_memop(MemOpIdx oi, MemOp expected)
1862{
1863#ifdef CONFIG_DEBUG_TCG
1864 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1865 assert(have == expected);
1866#endif
1867}
1868
eed56642
AB
1869/*
1870 * Load Helpers
1871 *
1872 * We support two different access types. SOFTMMU_CODE_ACCESS is
1873 * specifically for reading instructions from system memory. It is
1874 * called by the translation loop and in some helpers where the code
1875 * is disassembled. It shouldn't be called directly by guest code.
1876 */
0f590e74 1877
2dd92606 1878typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
9002ffcb 1879 MemOpIdx oi, uintptr_t retaddr);
2dd92606 1880
80d9d1c6
RH
1881static inline uint64_t QEMU_ALWAYS_INLINE
1882load_memop(const void *haddr, MemOp op)
1883{
1884 switch (op) {
1885 case MO_UB:
1886 return ldub_p(haddr);
1887 case MO_BEUW:
1888 return lduw_be_p(haddr);
1889 case MO_LEUW:
1890 return lduw_le_p(haddr);
1891 case MO_BEUL:
1892 return (uint32_t)ldl_be_p(haddr);
1893 case MO_LEUL:
1894 return (uint32_t)ldl_le_p(haddr);
fc313c64 1895 case MO_BEUQ:
80d9d1c6 1896 return ldq_be_p(haddr);
fc313c64 1897 case MO_LEUQ:
80d9d1c6
RH
1898 return ldq_le_p(haddr);
1899 default:
1900 qemu_build_not_reached();
1901 }
1902}
1903
c6b716cd 1904static inline uint64_t QEMU_ALWAYS_INLINE
9002ffcb 1905load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
be5c4787 1906 uintptr_t retaddr, MemOp op, bool code_read,
2dd92606 1907 FullLoadHelper *full_load)
eed56642 1908{
eed56642
AB
1909 const size_t tlb_off = code_read ?
1910 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
f1be3696
RH
1911 const MMUAccessType access_type =
1912 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
b826044f
RH
1913 const unsigned a_bits = get_alignment_bits(get_memop(oi));
1914 const size_t size = memop_size(op);
1915 uintptr_t mmu_idx = get_mmuidx(oi);
1916 uintptr_t index;
1917 CPUTLBEntry *entry;
1918 target_ulong tlb_addr;
eed56642
AB
1919 void *haddr;
1920 uint64_t res;
b826044f
RH
1921
1922 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
eed56642
AB
1923
1924 /* Handle CPU specific unaligned behaviour */
1925 if (addr & ((1 << a_bits) - 1)) {
29a0af61 1926 cpu_unaligned_access(env_cpu(env), addr, access_type,
eed56642
AB
1927 mmu_idx, retaddr);
1928 }
0f590e74 1929
b826044f
RH
1930 index = tlb_index(env, mmu_idx, addr);
1931 entry = tlb_entry(env, mmu_idx, addr);
1932 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1933
eed56642
AB
1934 /* If the TLB entry is for a different page, reload and try again. */
1935 if (!tlb_hit(tlb_addr, addr)) {
1936 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1937 addr & TARGET_PAGE_MASK)) {
29a0af61 1938 tlb_fill(env_cpu(env), addr, size,
f1be3696 1939 access_type, mmu_idx, retaddr);
eed56642
AB
1940 index = tlb_index(env, mmu_idx, addr);
1941 entry = tlb_entry(env, mmu_idx, addr);
1942 }
1943 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
30d7e098 1944 tlb_addr &= ~TLB_INVALID_MASK;
eed56642
AB
1945 }
1946
50b107c5 1947 /* Handle anything that isn't just a straight memory access. */
eed56642 1948 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
25d3ec58 1949 CPUTLBEntryFull *full;
5b87b3e6 1950 bool need_swap;
50b107c5
RH
1951
1952 /* For anything that is unaligned, recurse through full_load. */
eed56642
AB
1953 if ((addr & (size - 1)) != 0) {
1954 goto do_unaligned_access;
1955 }
50b107c5 1956
25d3ec58 1957 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
50b107c5
RH
1958
1959 /* Handle watchpoints. */
1960 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1961 /* On watchpoint hit, this will longjmp out. */
1962 cpu_check_watchpoint(env_cpu(env), addr, size,
25d3ec58 1963 full->attrs, BP_MEM_READ, retaddr);
50b107c5
RH
1964 }
1965
5b87b3e6
RH
1966 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1967
50b107c5 1968 /* Handle I/O access. */
5b87b3e6 1969 if (likely(tlb_addr & TLB_MMIO)) {
25d3ec58 1970 return io_readx(env, full, mmu_idx, addr, retaddr,
5b87b3e6
RH
1971 access_type, op ^ (need_swap * MO_BSWAP));
1972 }
1973
1974 haddr = (void *)((uintptr_t)addr + entry->addend);
1975
1976 /*
1977 * Keep these two load_memop separate to ensure that the compiler
1978 * is able to fold the entire function to a single instruction.
1979 * There is a build-time assert inside to remind you of this. ;-)
1980 */
1981 if (unlikely(need_swap)) {
1982 return load_memop(haddr, op ^ MO_BSWAP);
1983 }
1984 return load_memop(haddr, op);
eed56642
AB
1985 }
1986
1987 /* Handle slow unaligned access (it spans two pages or IO). */
1988 if (size > 1
1989 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1990 >= TARGET_PAGE_SIZE)) {
1991 target_ulong addr1, addr2;
8c79b288 1992 uint64_t r1, r2;
eed56642
AB
1993 unsigned shift;
1994 do_unaligned_access:
ab7a2009 1995 addr1 = addr & ~((target_ulong)size - 1);
eed56642 1996 addr2 = addr1 + size;
2dd92606
RH
1997 r1 = full_load(env, addr1, oi, retaddr);
1998 r2 = full_load(env, addr2, oi, retaddr);
eed56642
AB
1999 shift = (addr & (size - 1)) * 8;
2000
be5c4787 2001 if (memop_big_endian(op)) {
eed56642
AB
2002 /* Big-endian combine. */
2003 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2004 } else {
2005 /* Little-endian combine. */
2006 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2007 }
2008 return res & MAKE_64BIT_MASK(0, size * 8);
2009 }
2010
2011 haddr = (void *)((uintptr_t)addr + entry->addend);
80d9d1c6 2012 return load_memop(haddr, op);
eed56642
AB
2013}
2014
2015/*
2016 * For the benefit of TCG generated code, we want to avoid the
2017 * complication of ABI-specific return type promotion and always
2018 * return a value extended to the register size of the host. This is
2019 * tcg_target_long, except in the case of a 32-bit host and 64-bit
2020 * data, and for that we always have uint64_t.
2021 *
2022 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2023 */
2024
2dd92606 2025static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2026 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2027{
f83bcecb 2028 validate_memop(oi, MO_UB);
be5c4787 2029 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2dd92606
RH
2030}
2031
fc1bc777 2032tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2033 MemOpIdx oi, uintptr_t retaddr)
eed56642 2034{
2dd92606
RH
2035 return full_ldub_mmu(env, addr, oi, retaddr);
2036}
2037
2038static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2039 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2040{
f83bcecb 2041 validate_memop(oi, MO_LEUW);
be5c4787 2042 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2dd92606 2043 full_le_lduw_mmu);
eed56642
AB
2044}
2045
fc1bc777 2046tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2047 MemOpIdx oi, uintptr_t retaddr)
eed56642 2048{
2dd92606
RH
2049 return full_le_lduw_mmu(env, addr, oi, retaddr);
2050}
2051
2052static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2053 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2054{
f83bcecb 2055 validate_memop(oi, MO_BEUW);
be5c4787 2056 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2dd92606 2057 full_be_lduw_mmu);
eed56642
AB
2058}
2059
fc1bc777 2060tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2061 MemOpIdx oi, uintptr_t retaddr)
eed56642 2062{
2dd92606
RH
2063 return full_be_lduw_mmu(env, addr, oi, retaddr);
2064}
2065
2066static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2067 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2068{
f83bcecb 2069 validate_memop(oi, MO_LEUL);
be5c4787 2070 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2dd92606 2071 full_le_ldul_mmu);
eed56642
AB
2072}
2073
fc1bc777 2074tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2075 MemOpIdx oi, uintptr_t retaddr)
eed56642 2076{
2dd92606
RH
2077 return full_le_ldul_mmu(env, addr, oi, retaddr);
2078}
2079
2080static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2081 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2082{
f83bcecb 2083 validate_memop(oi, MO_BEUL);
be5c4787 2084 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2dd92606 2085 full_be_ldul_mmu);
eed56642
AB
2086}
2087
fc1bc777 2088tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2089 MemOpIdx oi, uintptr_t retaddr)
eed56642 2090{
2dd92606 2091 return full_be_ldul_mmu(env, addr, oi, retaddr);
eed56642
AB
2092}
2093
fc1bc777 2094uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2095 MemOpIdx oi, uintptr_t retaddr)
eed56642 2096{
fc313c64
FP
2097 validate_memop(oi, MO_LEUQ);
2098 return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
2dd92606 2099 helper_le_ldq_mmu);
eed56642
AB
2100}
2101
fc1bc777 2102uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2103 MemOpIdx oi, uintptr_t retaddr)
eed56642 2104{
fc313c64
FP
2105 validate_memop(oi, MO_BEUQ);
2106 return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
2dd92606 2107 helper_be_ldq_mmu);
eed56642
AB
2108}
2109
2110/*
2111 * Provide signed versions of the load routines as well. We can of course
2112 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2113 */
2114
2115
2116tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2117 MemOpIdx oi, uintptr_t retaddr)
eed56642
AB
2118{
2119 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2120}
2121
2122tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2123 MemOpIdx oi, uintptr_t retaddr)
eed56642
AB
2124{
2125 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2126}
2127
2128tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2129 MemOpIdx oi, uintptr_t retaddr)
eed56642
AB
2130{
2131 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2132}
2133
2134tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2135 MemOpIdx oi, uintptr_t retaddr)
eed56642
AB
2136{
2137 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2138}
2139
2140tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
9002ffcb 2141 MemOpIdx oi, uintptr_t retaddr)
eed56642
AB
2142{
2143 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2144}
2145
d03f1408
RH
2146/*
2147 * Load helpers for cpu_ldst.h.
2148 */
2149
2150static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
f83bcecb
RH
2151 MemOpIdx oi, uintptr_t retaddr,
2152 FullLoadHelper *full_load)
d03f1408 2153{
d03f1408
RH
2154 uint64_t ret;
2155
d03f1408 2156 ret = full_load(env, addr, oi, retaddr);
37aff087 2157 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
d03f1408
RH
2158 return ret;
2159}
2160
f83bcecb 2161uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
d03f1408 2162{
f83bcecb 2163 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
d03f1408
RH
2164}
2165
f83bcecb
RH
2166uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2167 MemOpIdx oi, uintptr_t ra)
cfe04a4b 2168{
f83bcecb 2169 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
cfe04a4b
RH
2170}
2171
f83bcecb
RH
2172uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2173 MemOpIdx oi, uintptr_t ra)
cfe04a4b 2174{
f83bcecb 2175 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
cfe04a4b
RH
2176}
2177
f83bcecb
RH
2178uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2179 MemOpIdx oi, uintptr_t ra)
b9e60257 2180{
46697cb9 2181 return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
b9e60257
RH
2182}
2183
f83bcecb
RH
2184uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2185 MemOpIdx oi, uintptr_t ra)
cfe04a4b 2186{
f83bcecb 2187 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
cfe04a4b
RH
2188}
2189
f83bcecb
RH
2190uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2191 MemOpIdx oi, uintptr_t ra)
cfe04a4b 2192{
f83bcecb 2193 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
cfe04a4b
RH
2194}
2195
f83bcecb
RH
2196uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2197 MemOpIdx oi, uintptr_t ra)
cfe04a4b 2198{
f83bcecb 2199 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
cfe04a4b
RH
2200}
2201
eed56642
AB
2202/*
2203 * Store Helpers
2204 */
2205
80d9d1c6
RH
2206static inline void QEMU_ALWAYS_INLINE
2207store_memop(void *haddr, uint64_t val, MemOp op)
2208{
2209 switch (op) {
2210 case MO_UB:
2211 stb_p(haddr, val);
2212 break;
2213 case MO_BEUW:
2214 stw_be_p(haddr, val);
2215 break;
2216 case MO_LEUW:
2217 stw_le_p(haddr, val);
2218 break;
2219 case MO_BEUL:
2220 stl_be_p(haddr, val);
2221 break;
2222 case MO_LEUL:
2223 stl_le_p(haddr, val);
2224 break;
fc313c64 2225 case MO_BEUQ:
80d9d1c6
RH
2226 stq_be_p(haddr, val);
2227 break;
fc313c64 2228 case MO_LEUQ:
80d9d1c6
RH
2229 stq_le_p(haddr, val);
2230 break;
2231 default:
2232 qemu_build_not_reached();
2233 }
2234}
2235
f83bcecb
RH
2236static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2237 MemOpIdx oi, uintptr_t retaddr);
2238
6b8b622e
RH
2239static void __attribute__((noinline))
2240store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2241 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2242 bool big_endian)
2243{
2244 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2245 uintptr_t index, index2;
2246 CPUTLBEntry *entry, *entry2;
b0f650f0 2247 target_ulong page1, page2, tlb_addr, tlb_addr2;
9002ffcb 2248 MemOpIdx oi;
6b8b622e
RH
2249 size_t size2;
2250 int i;
2251
2252 /*
2253 * Ensure the second page is in the TLB. Note that the first page
2254 * is already guaranteed to be filled, and that the second page
b0f650f0
IL
2255 * cannot evict the first. An exception to this rule is PAGE_WRITE_INV
2256 * handling: the first page could have evicted itself.
6b8b622e 2257 */
b0f650f0 2258 page1 = addr & TARGET_PAGE_MASK;
6b8b622e
RH
2259 page2 = (addr + size) & TARGET_PAGE_MASK;
2260 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2261 index2 = tlb_index(env, mmu_idx, page2);
2262 entry2 = tlb_entry(env, mmu_idx, page2);
2263
2264 tlb_addr2 = tlb_addr_write(entry2);
b0f650f0 2265 if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
6b8b622e
RH
2266 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2267 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2268 mmu_idx, retaddr);
2269 index2 = tlb_index(env, mmu_idx, page2);
2270 entry2 = tlb_entry(env, mmu_idx, page2);
2271 }
2272 tlb_addr2 = tlb_addr_write(entry2);
2273 }
2274
2275 index = tlb_index(env, mmu_idx, addr);
2276 entry = tlb_entry(env, mmu_idx, addr);
2277 tlb_addr = tlb_addr_write(entry);
2278
2279 /*
2280 * Handle watchpoints. Since this may trap, all checks
2281 * must happen before any store.
2282 */
2283 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2284 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
25d3ec58 2285 env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
6b8b622e
RH
2286 BP_MEM_WRITE, retaddr);
2287 }
2288 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2289 cpu_check_watchpoint(env_cpu(env), page2, size2,
25d3ec58 2290 env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
6b8b622e
RH
2291 BP_MEM_WRITE, retaddr);
2292 }
2293
2294 /*
2295 * XXX: not efficient, but simple.
2296 * This loop must go in the forward direction to avoid issues
2297 * with self-modifying code in Windows 64-bit.
2298 */
2299 oi = make_memop_idx(MO_UB, mmu_idx);
2300 if (big_endian) {
2301 for (i = 0; i < size; ++i) {
2302 /* Big-endian extract. */
2303 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
f83bcecb 2304 full_stb_mmu(env, addr + i, val8, oi, retaddr);
6b8b622e
RH
2305 }
2306 } else {
2307 for (i = 0; i < size; ++i) {
2308 /* Little-endian extract. */
2309 uint8_t val8 = val >> (i * 8);
f83bcecb 2310 full_stb_mmu(env, addr + i, val8, oi, retaddr);
6b8b622e
RH
2311 }
2312 }
2313}
2314
c6b716cd 2315static inline void QEMU_ALWAYS_INLINE
4601f8d1 2316store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
9002ffcb 2317 MemOpIdx oi, uintptr_t retaddr, MemOp op)
eed56642 2318{
eed56642 2319 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
b826044f
RH
2320 const unsigned a_bits = get_alignment_bits(get_memop(oi));
2321 const size_t size = memop_size(op);
2322 uintptr_t mmu_idx = get_mmuidx(oi);
2323 uintptr_t index;
2324 CPUTLBEntry *entry;
2325 target_ulong tlb_addr;
eed56642 2326 void *haddr;
b826044f
RH
2327
2328 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
eed56642
AB
2329
2330 /* Handle CPU specific unaligned behaviour */
2331 if (addr & ((1 << a_bits) - 1)) {
29a0af61 2332 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
eed56642
AB
2333 mmu_idx, retaddr);
2334 }
2335
b826044f
RH
2336 index = tlb_index(env, mmu_idx, addr);
2337 entry = tlb_entry(env, mmu_idx, addr);
2338 tlb_addr = tlb_addr_write(entry);
2339
eed56642
AB
2340 /* If the TLB entry is for a different page, reload and try again. */
2341 if (!tlb_hit(tlb_addr, addr)) {
2342 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2343 addr & TARGET_PAGE_MASK)) {
29a0af61 2344 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
eed56642
AB
2345 mmu_idx, retaddr);
2346 index = tlb_index(env, mmu_idx, addr);
2347 entry = tlb_entry(env, mmu_idx, addr);
2348 }
2349 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2350 }
2351
50b107c5 2352 /* Handle anything that isn't just a straight memory access. */
eed56642 2353 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
25d3ec58 2354 CPUTLBEntryFull *full;
5b87b3e6 2355 bool need_swap;
50b107c5
RH
2356
2357 /* For anything that is unaligned, recurse through byte stores. */
eed56642
AB
2358 if ((addr & (size - 1)) != 0) {
2359 goto do_unaligned_access;
2360 }
50b107c5 2361
25d3ec58 2362 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
50b107c5
RH
2363
2364 /* Handle watchpoints. */
2365 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2366 /* On watchpoint hit, this will longjmp out. */
2367 cpu_check_watchpoint(env_cpu(env), addr, size,
25d3ec58 2368 full->attrs, BP_MEM_WRITE, retaddr);
50b107c5
RH
2369 }
2370
5b87b3e6
RH
2371 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2372
50b107c5 2373 /* Handle I/O access. */
08565552 2374 if (tlb_addr & TLB_MMIO) {
25d3ec58 2375 io_writex(env, full, mmu_idx, val, addr, retaddr,
5b87b3e6
RH
2376 op ^ (need_swap * MO_BSWAP));
2377 return;
2378 }
2379
7b0d792c
RH
2380 /* Ignore writes to ROM. */
2381 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2382 return;
2383 }
2384
08565552
RH
2385 /* Handle clean RAM pages. */
2386 if (tlb_addr & TLB_NOTDIRTY) {
25d3ec58 2387 notdirty_write(env_cpu(env), addr, size, full, retaddr);
08565552
RH
2388 }
2389
707526ad
RH
2390 haddr = (void *)((uintptr_t)addr + entry->addend);
2391
5b87b3e6
RH
2392 /*
2393 * Keep these two store_memop separate to ensure that the compiler
2394 * is able to fold the entire function to a single instruction.
2395 * There is a build-time assert inside to remind you of this. ;-)
2396 */
2397 if (unlikely(need_swap)) {
2398 store_memop(haddr, val, op ^ MO_BSWAP);
2399 } else {
2400 store_memop(haddr, val, op);
2401 }
eed56642
AB
2402 return;
2403 }
2404
2405 /* Handle slow unaligned access (it spans two pages or IO). */
2406 if (size > 1
2407 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2408 >= TARGET_PAGE_SIZE)) {
eed56642 2409 do_unaligned_access:
6b8b622e
RH
2410 store_helper_unaligned(env, addr, val, retaddr, size,
2411 mmu_idx, memop_big_endian(op));
eed56642
AB
2412 return;
2413 }
2414
2415 haddr = (void *)((uintptr_t)addr + entry->addend);
80d9d1c6 2416 store_memop(haddr, val, op);
eed56642
AB
2417}
2418
f83bcecb
RH
2419static void __attribute__((noinline))
2420full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2421 MemOpIdx oi, uintptr_t retaddr)
eed56642 2422{
f83bcecb 2423 validate_memop(oi, MO_UB);
be5c4787 2424 store_helper(env, addr, val, oi, retaddr, MO_UB);
eed56642
AB
2425}
2426
f83bcecb
RH
2427void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2428 MemOpIdx oi, uintptr_t retaddr)
2429{
2430 full_stb_mmu(env, addr, val, oi, retaddr);
2431}
2432
2433static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2434 MemOpIdx oi, uintptr_t retaddr)
2435{
2436 validate_memop(oi, MO_LEUW);
2437 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2438}
2439
fc1bc777 2440void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
9002ffcb 2441 MemOpIdx oi, uintptr_t retaddr)
eed56642 2442{
f83bcecb
RH
2443 full_le_stw_mmu(env, addr, val, oi, retaddr);
2444}
2445
2446static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2447 MemOpIdx oi, uintptr_t retaddr)
2448{
2449 validate_memop(oi, MO_BEUW);
2450 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
eed56642
AB
2451}
2452
fc1bc777 2453void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
9002ffcb 2454 MemOpIdx oi, uintptr_t retaddr)
eed56642 2455{
f83bcecb
RH
2456 full_be_stw_mmu(env, addr, val, oi, retaddr);
2457}
2458
2459static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2460 MemOpIdx oi, uintptr_t retaddr)
2461{
2462 validate_memop(oi, MO_LEUL);
2463 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
eed56642
AB
2464}
2465
fc1bc777 2466void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
9002ffcb 2467 MemOpIdx oi, uintptr_t retaddr)
eed56642 2468{
f83bcecb
RH
2469 full_le_stl_mmu(env, addr, val, oi, retaddr);
2470}
2471
2472static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2473 MemOpIdx oi, uintptr_t retaddr)
2474{
2475 validate_memop(oi, MO_BEUL);
2476 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
eed56642
AB
2477}
2478
fc1bc777 2479void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
9002ffcb 2480 MemOpIdx oi, uintptr_t retaddr)
eed56642 2481{
f83bcecb 2482 full_be_stl_mmu(env, addr, val, oi, retaddr);
eed56642
AB
2483}
2484
fc1bc777 2485void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
9002ffcb 2486 MemOpIdx oi, uintptr_t retaddr)
eed56642 2487{
fc313c64
FP
2488 validate_memop(oi, MO_LEUQ);
2489 store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
eed56642
AB
2490}
2491
fc1bc777 2492void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
9002ffcb 2493 MemOpIdx oi, uintptr_t retaddr)
eed56642 2494{
fc313c64
FP
2495 validate_memop(oi, MO_BEUQ);
2496 store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
eed56642 2497}
0f590e74 2498
d03f1408
RH
2499/*
2500 * Store Helpers for cpu_ldst.h
2501 */
2502
f83bcecb
RH
2503typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2504 uint64_t val, MemOpIdx oi, uintptr_t retaddr);
d03f1408 2505
f83bcecb
RH
2506static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2507 uint64_t val, MemOpIdx oi, uintptr_t ra,
2508 FullStoreHelper *full_store)
2509{
f83bcecb 2510 full_store(env, addr, val, oi, ra);
37aff087 2511 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
d03f1408
RH
2512}
2513
f83bcecb
RH
2514void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2515 MemOpIdx oi, uintptr_t retaddr)
d03f1408 2516{
f83bcecb 2517 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
d03f1408
RH
2518}
2519
f83bcecb
RH
2520void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2521 MemOpIdx oi, uintptr_t retaddr)
d03f1408 2522{
f83bcecb 2523 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
d03f1408
RH
2524}
2525
f83bcecb
RH
2526void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2527 MemOpIdx oi, uintptr_t retaddr)
d03f1408 2528{
f83bcecb 2529 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
d03f1408
RH
2530}
2531
f83bcecb
RH
2532void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2533 MemOpIdx oi, uintptr_t retaddr)
b9e60257 2534{
f83bcecb 2535 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
b9e60257
RH
2536}
2537
f83bcecb
RH
2538void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2539 MemOpIdx oi, uintptr_t retaddr)
d03f1408 2540{
f83bcecb 2541 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
b9e60257
RH
2542}
2543
f83bcecb
RH
2544void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2545 MemOpIdx oi, uintptr_t retaddr)
b9e60257 2546{
f83bcecb 2547 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
b9e60257
RH
2548}
2549
f83bcecb
RH
2550void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2551 MemOpIdx oi, uintptr_t retaddr)
b9e60257 2552{
f83bcecb 2553 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
d03f1408
RH
2554}
2555
f83bcecb 2556#include "ldst_common.c.inc"
cfe04a4b 2557
be9568b4
RH
2558/*
2559 * First set of functions passes in OI and RETADDR.
2560 * This makes them callable from other helpers.
2561 */
c482cb11 2562
c482cb11 2563#define ATOMIC_NAME(X) \
be9568b4 2564 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
a754f7f3 2565
707526ad 2566#define ATOMIC_MMU_CLEANUP
c482cb11 2567
139c1837 2568#include "atomic_common.c.inc"
c482cb11
RH
2569
2570#define DATA_SIZE 1
2571#include "atomic_template.h"
2572
2573#define DATA_SIZE 2
2574#include "atomic_template.h"
2575
2576#define DATA_SIZE 4
2577#include "atomic_template.h"
2578
df79b996 2579#ifdef CONFIG_ATOMIC64
c482cb11
RH
2580#define DATA_SIZE 8
2581#include "atomic_template.h"
df79b996 2582#endif
c482cb11 2583
e6cd4bb5 2584#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
7ebee43e
RH
2585#define DATA_SIZE 16
2586#include "atomic_template.h"
2587#endif
2588
c482cb11
RH
2589/* Code access functions. */
2590
fc4120a3 2591static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
9002ffcb 2592 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2593{
fc4120a3 2594 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2dd92606
RH
2595}
2596
fc4120a3 2597uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
eed56642 2598{
9002ffcb 2599 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
fc4120a3 2600 return full_ldub_code(env, addr, oi, 0);
2dd92606
RH
2601}
2602
fc4120a3 2603static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
9002ffcb 2604 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2605{
fc4120a3 2606 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
eed56642 2607}
0cac1b66 2608
fc4120a3 2609uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
eed56642 2610{
9002ffcb 2611 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
fc4120a3 2612 return full_lduw_code(env, addr, oi, 0);
2dd92606
RH
2613}
2614
fc4120a3 2615static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
9002ffcb 2616 MemOpIdx oi, uintptr_t retaddr)
2dd92606 2617{
fc4120a3 2618 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
eed56642 2619}
0cac1b66 2620
fc4120a3 2621uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
eed56642 2622{
9002ffcb 2623 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
fc4120a3 2624 return full_ldl_code(env, addr, oi, 0);
eed56642
AB
2625}
2626
fc4120a3 2627static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
9002ffcb 2628 MemOpIdx oi, uintptr_t retaddr)
eed56642 2629{
fc313c64 2630 return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
eed56642
AB
2631}
2632
fc4120a3 2633uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
eed56642 2634{
fc313c64 2635 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
fc4120a3 2636 return full_ldq_code(env, addr, oi, 0);
eed56642 2637}