]> git.proxmox.com Git - mirror_qemu.git/blame - accel/tcg/cputlb.c
target/i386: Use cpu_*_mmuidx_ra instead of templates
[mirror_qemu.git] / accel / tcg / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
0cac1b66
BS
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
8d04fb55 21#include "qemu/main-loop.h"
0cac1b66 22#include "cpu.h"
022c62cb
PB
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
f08b6170 26#include "exec/cpu_ldst.h"
022c62cb 27#include "exec/cputlb.h"
022c62cb 28#include "exec/memory-internal.h"
220c3ebd 29#include "exec/ram_addr.h"
0f590e74 30#include "tcg/tcg.h"
d7f30403
PM
31#include "qemu/error-report.h"
32#include "exec/log.h"
c482cb11
RH
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
e6cd4bb5 35#include "qemu/atomic128.h"
707526ad 36#include "translate-all.h"
d03f1408
RH
37#include "trace-root.h"
38#include "qemu/plugin.h"
39#include "trace/mem.h"
235537fa
AB
40#ifdef CONFIG_PLUGIN
41#include "qemu/plugin-memory.h"
42#endif
0cac1b66 43
8526e1f4
AB
44/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45/* #define DEBUG_TLB */
46/* #define DEBUG_TLB_LOG */
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
0cac1b66 68
ea9025cb 69#define assert_cpu_is_self(cpu) do { \
f0aff0f1 70 if (DEBUG_TLB_GATE) { \
ea9025cb 71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
f0aff0f1
AB
72 } \
73 } while (0)
74
e3b9ca81
FK
75/* run_on_cpu_data.target_ptr should always be big enough for a
76 * target_ulong even on 32 bit builds */
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
e7218445
AB
79/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80 */
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
86e1eff8
EC
84static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
85{
a40ec84e 86 return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
86e1eff8
EC
87}
88
79e42085 89static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
86e1eff8
EC
90 size_t max_entries)
91{
79e42085
RH
92 desc->window_begin_ns = ns;
93 desc->window_max_entries = max_entries;
86e1eff8
EC
94}
95
96static void tlb_dyn_init(CPUArchState *env)
97{
98 int i;
99
100 for (i = 0; i < NB_MMU_MODES; i++) {
a40ec84e 101 CPUTLBDesc *desc = &env_tlb(env)->d[i];
86e1eff8
EC
102 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
103
79e42085 104 tlb_window_reset(desc, get_clock_realtime(), 0);
86e1eff8 105 desc->n_used_entries = 0;
a40ec84e
RH
106 env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
107 env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
108 env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
86e1eff8
EC
109 }
110}
111
112/**
113 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
114 * @env: CPU that owns the TLB
115 * @mmu_idx: MMU index of the TLB
116 *
117 * Called with tlb_lock_held.
118 *
119 * We have two main constraints when resizing a TLB: (1) we only resize it
120 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
121 * the array or unnecessarily flushing it), which means we do not control how
122 * frequently the resizing can occur; (2) we don't have access to the guest's
123 * future scheduling decisions, and therefore have to decide the magnitude of
124 * the resize based on past observations.
125 *
126 * In general, a memory-hungry process can benefit greatly from an appropriately
127 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
128 * we just have to make the TLB as large as possible; while an oversized TLB
129 * results in minimal TLB miss rates, it also takes longer to be flushed
130 * (flushes can be _very_ frequent), and the reduced locality can also hurt
131 * performance.
132 *
133 * To achieve near-optimal performance for all kinds of workloads, we:
134 *
135 * 1. Aggressively increase the size of the TLB when the use rate of the
136 * TLB being flushed is high, since it is likely that in the near future this
137 * memory-hungry process will execute again, and its memory hungriness will
138 * probably be similar.
139 *
140 * 2. Slowly reduce the size of the TLB as the use rate declines over a
141 * reasonably large time window. The rationale is that if in such a time window
142 * we have not observed a high TLB use rate, it is likely that we won't observe
143 * it in the near future. In that case, once a time window expires we downsize
144 * the TLB to match the maximum use rate observed in the window.
145 *
146 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
147 * since in that range performance is likely near-optimal. Recall that the TLB
148 * is direct mapped, so we want the use rate to be low (or at least not too
149 * high), since otherwise we are likely to have a significant amount of
150 * conflict misses.
151 */
152static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
153{
a40ec84e 154 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
86e1eff8
EC
155 size_t old_size = tlb_n_entries(env, mmu_idx);
156 size_t rate;
157 size_t new_size = old_size;
158 int64_t now = get_clock_realtime();
159 int64_t window_len_ms = 100;
160 int64_t window_len_ns = window_len_ms * 1000 * 1000;
79e42085 161 bool window_expired = now > desc->window_begin_ns + window_len_ns;
86e1eff8 162
79e42085
RH
163 if (desc->n_used_entries > desc->window_max_entries) {
164 desc->window_max_entries = desc->n_used_entries;
86e1eff8 165 }
79e42085 166 rate = desc->window_max_entries * 100 / old_size;
86e1eff8
EC
167
168 if (rate > 70) {
169 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
170 } else if (rate < 30 && window_expired) {
79e42085
RH
171 size_t ceil = pow2ceil(desc->window_max_entries);
172 size_t expected_rate = desc->window_max_entries * 100 / ceil;
86e1eff8
EC
173
174 /*
175 * Avoid undersizing when the max number of entries seen is just below
176 * a pow2. For instance, if max_entries == 1025, the expected use rate
177 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
178 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
179 * later. Thus, make sure that the expected use rate remains below 70%.
180 * (and since we double the size, that means the lowest rate we'd
181 * expect to get is 35%, which is still in the 30-70% range where
182 * we consider that the size is appropriate.)
183 */
184 if (expected_rate > 70) {
185 ceil *= 2;
186 }
187 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
188 }
189
190 if (new_size == old_size) {
191 if (window_expired) {
79e42085 192 tlb_window_reset(desc, now, desc->n_used_entries);
86e1eff8
EC
193 }
194 return;
195 }
196
a40ec84e
RH
197 g_free(env_tlb(env)->f[mmu_idx].table);
198 g_free(env_tlb(env)->d[mmu_idx].iotlb);
86e1eff8 199
79e42085 200 tlb_window_reset(desc, now, 0);
86e1eff8 201 /* desc->n_used_entries is cleared by the caller */
a40ec84e
RH
202 env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
203 env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
204 env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
86e1eff8
EC
205 /*
206 * If the allocations fail, try smaller sizes. We just freed some
207 * memory, so going back to half of new_size has a good chance of working.
208 * Increased memory pressure elsewhere in the system might cause the
209 * allocations to fail though, so we progressively reduce the allocation
210 * size, aborting if we cannot even allocate the smallest TLB we support.
211 */
a40ec84e
RH
212 while (env_tlb(env)->f[mmu_idx].table == NULL ||
213 env_tlb(env)->d[mmu_idx].iotlb == NULL) {
86e1eff8
EC
214 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
215 error_report("%s: %s", __func__, strerror(errno));
216 abort();
217 }
218 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
a40ec84e 219 env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
86e1eff8 220
a40ec84e
RH
221 g_free(env_tlb(env)->f[mmu_idx].table);
222 g_free(env_tlb(env)->d[mmu_idx].iotlb);
223 env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
224 env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
86e1eff8
EC
225 }
226}
227
228static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
229{
230 tlb_mmu_resize_locked(env, mmu_idx);
a40ec84e
RH
231 memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
232 env_tlb(env)->d[mmu_idx].n_used_entries = 0;
86e1eff8
EC
233}
234
235static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
236{
a40ec84e 237 env_tlb(env)->d[mmu_idx].n_used_entries++;
86e1eff8
EC
238}
239
240static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
241{
a40ec84e 242 env_tlb(env)->d[mmu_idx].n_used_entries--;
86e1eff8
EC
243}
244
5005e253
EC
245void tlb_init(CPUState *cpu)
246{
71aec354
EC
247 CPUArchState *env = cpu->env_ptr;
248
a40ec84e 249 qemu_spin_init(&env_tlb(env)->c.lock);
3d1523ce
RH
250
251 /* Ensure that cpu_reset performs a full flush. */
a40ec84e 252 env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
86e1eff8
EC
253
254 tlb_dyn_init(env);
5005e253
EC
255}
256
c3b9a07a
AB
257/* flush_all_helper: run fn across all cpus
258 *
259 * If the wait flag is set then the src cpu's helper will be queued as
260 * "safe" work and the loop exited creating a synchronisation point
261 * where all queued work will be finished before execution starts
262 * again.
263 */
264static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
265 run_on_cpu_data d)
266{
267 CPUState *cpu;
268
269 CPU_FOREACH(cpu) {
270 if (cpu != src) {
271 async_run_on_cpu(cpu, fn, d);
272 }
273 }
274}
275
e09de0a2 276void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
83974cf4
EC
277{
278 CPUState *cpu;
e09de0a2 279 size_t full = 0, part = 0, elide = 0;
83974cf4
EC
280
281 CPU_FOREACH(cpu) {
282 CPUArchState *env = cpu->env_ptr;
283
a40ec84e
RH
284 full += atomic_read(&env_tlb(env)->c.full_flush_count);
285 part += atomic_read(&env_tlb(env)->c.part_flush_count);
286 elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
83974cf4 287 }
e09de0a2
RH
288 *pfull = full;
289 *ppart = part;
290 *pelide = elide;
83974cf4 291}
0cac1b66 292
1308e026
RH
293static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
294{
86e1eff8 295 tlb_table_flush_by_mmuidx(env, mmu_idx);
a40ec84e
RH
296 env_tlb(env)->d[mmu_idx].large_page_addr = -1;
297 env_tlb(env)->d[mmu_idx].large_page_mask = -1;
298 env_tlb(env)->d[mmu_idx].vindex = 0;
299 memset(env_tlb(env)->d[mmu_idx].vtable, -1,
300 sizeof(env_tlb(env)->d[0].vtable));
1308e026
RH
301}
302
e7218445 303static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
d7a74a9d
PM
304{
305 CPUArchState *env = cpu->env_ptr;
3d1523ce
RH
306 uint16_t asked = data.host_int;
307 uint16_t all_dirty, work, to_clean;
d7a74a9d 308
f0aff0f1 309 assert_cpu_is_self(cpu);
d7a74a9d 310
3d1523ce 311 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
e7218445 312
a40ec84e 313 qemu_spin_lock(&env_tlb(env)->c.lock);
60a2ad7d 314
a40ec84e 315 all_dirty = env_tlb(env)->c.dirty;
3d1523ce
RH
316 to_clean = asked & all_dirty;
317 all_dirty &= ~to_clean;
a40ec84e 318 env_tlb(env)->c.dirty = all_dirty;
3d1523ce
RH
319
320 for (work = to_clean; work != 0; work &= work - 1) {
321 int mmu_idx = ctz32(work);
322 tlb_flush_one_mmuidx_locked(env, mmu_idx);
d7a74a9d 323 }
3d1523ce 324
a40ec84e 325 qemu_spin_unlock(&env_tlb(env)->c.lock);
d7a74a9d 326
f3ced3c5 327 cpu_tb_jmp_cache_clear(cpu);
64f2674b 328
3d1523ce 329 if (to_clean == ALL_MMUIDX_BITS) {
a40ec84e
RH
330 atomic_set(&env_tlb(env)->c.full_flush_count,
331 env_tlb(env)->c.full_flush_count + 1);
e09de0a2 332 } else {
a40ec84e
RH
333 atomic_set(&env_tlb(env)->c.part_flush_count,
334 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3d1523ce 335 if (to_clean != asked) {
a40ec84e
RH
336 atomic_set(&env_tlb(env)->c.elide_flush_count,
337 env_tlb(env)->c.elide_flush_count +
3d1523ce
RH
338 ctpop16(asked & ~to_clean));
339 }
64f2674b 340 }
d7a74a9d
PM
341}
342
0336cbf8 343void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d 344{
e7218445
AB
345 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
346
64f2674b 347 if (cpu->created && !qemu_cpu_is_self(cpu)) {
ab651105
RH
348 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
349 RUN_ON_CPU_HOST_INT(idxmap));
e7218445 350 } else {
60a2ad7d 351 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
e7218445 352 }
d7a74a9d
PM
353}
354
64f2674b
RH
355void tlb_flush(CPUState *cpu)
356{
357 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
358}
359
c3b9a07a
AB
360void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
361{
362 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
363
364 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
365
366 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
367 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
368}
369
64f2674b
RH
370void tlb_flush_all_cpus(CPUState *src_cpu)
371{
372 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
373}
374
375void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
c3b9a07a
AB
376{
377 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
378
379 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
380
381 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
382 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
383}
384
64f2674b
RH
385void tlb_flush_all_cpus_synced(CPUState *src_cpu)
386{
387 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
388}
389
68fea038
RH
390static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
391 target_ulong page)
392{
393 return tlb_hit_page(tlb_entry->addr_read, page) ||
403f290c 394 tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
68fea038
RH
395 tlb_hit_page(tlb_entry->addr_code, page);
396}
c3b9a07a 397
3cea94bb
EC
398/**
399 * tlb_entry_is_empty - return true if the entry is not in use
400 * @te: pointer to CPUTLBEntry
401 */
402static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
403{
404 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
405}
406
53d28455 407/* Called with tlb_c.lock held */
86e1eff8 408static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
71aec354 409 target_ulong page)
0cac1b66 410{
68fea038 411 if (tlb_hit_page_anyprot(tlb_entry, page)) {
4fadb3bb 412 memset(tlb_entry, -1, sizeof(*tlb_entry));
86e1eff8 413 return true;
0cac1b66 414 }
86e1eff8 415 return false;
0cac1b66
BS
416}
417
53d28455 418/* Called with tlb_c.lock held */
71aec354
EC
419static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
420 target_ulong page)
68fea038 421{
a40ec84e 422 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
68fea038 423 int k;
71aec354 424
29a0af61 425 assert_cpu_is_self(env_cpu(env));
68fea038 426 for (k = 0; k < CPU_VTLB_SIZE; k++) {
a40ec84e 427 if (tlb_flush_entry_locked(&d->vtable[k], page)) {
86e1eff8
EC
428 tlb_n_used_entries_dec(env, mmu_idx);
429 }
68fea038
RH
430 }
431}
432
1308e026
RH
433static void tlb_flush_page_locked(CPUArchState *env, int midx,
434 target_ulong page)
435{
a40ec84e
RH
436 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
437 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
1308e026
RH
438
439 /* Check if we need to flush due to large pages. */
440 if ((page & lp_mask) == lp_addr) {
441 tlb_debug("forcing full flush midx %d ("
442 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
443 midx, lp_addr, lp_mask);
444 tlb_flush_one_mmuidx_locked(env, midx);
445 } else {
86e1eff8
EC
446 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
447 tlb_n_used_entries_dec(env, midx);
448 }
1308e026
RH
449 tlb_flush_vtlb_page_locked(env, midx, page);
450 }
451}
452
e7218445
AB
453/* As we are going to hijack the bottom bits of the page address for a
454 * mmuidx bit mask we need to fail to build if we can't do that
455 */
456QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
457
458static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
459 run_on_cpu_data data)
d7a74a9d
PM
460{
461 CPUArchState *env = cpu->env_ptr;
e7218445
AB
462 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
463 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
464 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
e7218445 465 int mmu_idx;
d7a74a9d 466
f0aff0f1 467 assert_cpu_is_self(cpu);
d7a74a9d 468
1308e026 469 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
383beda9 470 addr, mmu_idx_bitmap);
d7a74a9d 471
a40ec84e 472 qemu_spin_lock(&env_tlb(env)->c.lock);
0336cbf8
AB
473 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
474 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
1308e026 475 tlb_flush_page_locked(env, mmu_idx, addr);
d7a74a9d
PM
476 }
477 }
a40ec84e 478 qemu_spin_unlock(&env_tlb(env)->c.lock);
d7a74a9d 479
d7a74a9d
PM
480 tb_flush_jmp_cache(cpu, addr);
481}
482
e7218445
AB
483void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
484{
485 target_ulong addr_and_mmu_idx;
486
487 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
488
489 /* This should already be page aligned */
490 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
491 addr_and_mmu_idx |= idxmap;
492
493 if (!qemu_cpu_is_self(cpu)) {
1308e026 494 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
e7218445
AB
495 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
496 } else {
1308e026 497 tlb_flush_page_by_mmuidx_async_work(
e7218445
AB
498 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
499 }
500}
501
f8144c6c
RH
502void tlb_flush_page(CPUState *cpu, target_ulong addr)
503{
504 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
505}
506
c3b9a07a
AB
507void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
508 uint16_t idxmap)
e3b9ca81 509{
1308e026 510 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
c3b9a07a 511 target_ulong addr_and_mmu_idx;
e3b9ca81 512
c3b9a07a
AB
513 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
514
515 /* This should already be page aligned */
516 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
517 addr_and_mmu_idx |= idxmap;
518
519 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
520 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
521}
522
f8144c6c
RH
523void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
524{
525 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
526}
527
c3b9a07a 528void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
1308e026
RH
529 target_ulong addr,
530 uint16_t idxmap)
c3b9a07a 531{
1308e026 532 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
c3b9a07a
AB
533 target_ulong addr_and_mmu_idx;
534
535 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
536
537 /* This should already be page aligned */
538 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
539 addr_and_mmu_idx |= idxmap;
540
541 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
542 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
543}
544
f8144c6c 545void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
c3b9a07a 546{
f8144c6c 547 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
e3b9ca81
FK
548}
549
0cac1b66
BS
550/* update the TLBs so that writes to code in the virtual page 'addr'
551 can be detected */
552void tlb_protect_code(ram_addr_t ram_addr)
553{
03eebc9e
SH
554 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
555 DIRTY_MEMORY_CODE);
0cac1b66
BS
556}
557
558/* update the TLB so that writes in physical page 'phys_addr' are no longer
559 tested for self modifying code */
9564f52d 560void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 561{
52159192 562 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
563}
564
0cac1b66 565
b0706b71
AB
566/*
567 * Dirty write flag handling
568 *
569 * When the TCG code writes to a location it looks up the address in
570 * the TLB and uses that data to compute the final address. If any of
571 * the lower bits of the address are set then the slow path is forced.
572 * There are a number of reasons to do this but for normal RAM the
573 * most usual is detecting writes to code regions which may invalidate
574 * generated code.
575 *
71aec354
EC
576 * Other vCPUs might be reading their TLBs during guest execution, so we update
577 * te->addr_write with atomic_set. We don't need to worry about this for
578 * oversized guests as MTTCG is disabled for them.
b0706b71 579 *
53d28455 580 * Called with tlb_c.lock held.
b0706b71 581 */
71aec354
EC
582static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
583 uintptr_t start, uintptr_t length)
0cac1b66 584{
b0706b71 585 uintptr_t addr = tlb_entry->addr_write;
0cac1b66 586
7b0d792c
RH
587 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
588 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
b0706b71
AB
589 addr &= TARGET_PAGE_MASK;
590 addr += tlb_entry->addend;
0cac1b66 591 if ((addr - start) < length) {
71aec354 592#if TCG_OVERSIZED_GUEST
0cac1b66 593 tlb_entry->addr_write |= TLB_NOTDIRTY;
b0706b71 594#else
71aec354
EC
595 atomic_set(&tlb_entry->addr_write,
596 tlb_entry->addr_write | TLB_NOTDIRTY);
597#endif
b0706b71
AB
598 }
599 }
b0706b71
AB
600}
601
71aec354 602/*
53d28455 603 * Called with tlb_c.lock held.
71aec354
EC
604 * Called only from the vCPU context, i.e. the TLB's owner thread.
605 */
606static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
b0706b71 607{
b0706b71 608 *d = *s;
0cac1b66
BS
609}
610
b0706b71 611/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
71aec354 612 * the target vCPU).
53d28455 613 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
71aec354 614 * thing actually updated is the target TLB entry ->addr_write flags.
b0706b71 615 */
9a13565d 616void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
617{
618 CPUArchState *env;
619
9a13565d 620 int mmu_idx;
0cac1b66 621
9a13565d 622 env = cpu->env_ptr;
a40ec84e 623 qemu_spin_lock(&env_tlb(env)->c.lock);
9a13565d
PC
624 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
625 unsigned int i;
86e1eff8 626 unsigned int n = tlb_n_entries(env, mmu_idx);
0cac1b66 627
86e1eff8 628 for (i = 0; i < n; i++) {
a40ec84e
RH
629 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
630 start1, length);
9a13565d 631 }
88e89a57 632
9a13565d 633 for (i = 0; i < CPU_VTLB_SIZE; i++) {
a40ec84e
RH
634 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
635 start1, length);
0cac1b66
BS
636 }
637 }
a40ec84e 638 qemu_spin_unlock(&env_tlb(env)->c.lock);
0cac1b66
BS
639}
640
53d28455 641/* Called with tlb_c.lock held */
71aec354
EC
642static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
643 target_ulong vaddr)
0cac1b66
BS
644{
645 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
646 tlb_entry->addr_write = vaddr;
647 }
648}
649
650/* update the TLB corresponding to virtual page vaddr
651 so that it is no longer dirty */
bcae01e4 652void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 653{
bcae01e4 654 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
655 int mmu_idx;
656
f0aff0f1
AB
657 assert_cpu_is_self(cpu);
658
0cac1b66 659 vaddr &= TARGET_PAGE_MASK;
a40ec84e 660 qemu_spin_lock(&env_tlb(env)->c.lock);
0cac1b66 661 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
383beda9 662 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
0cac1b66 663 }
88e89a57
XT
664
665 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
666 int k;
667 for (k = 0; k < CPU_VTLB_SIZE; k++) {
a40ec84e 668 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
88e89a57
XT
669 }
670 }
a40ec84e 671 qemu_spin_unlock(&env_tlb(env)->c.lock);
0cac1b66
BS
672}
673
674/* Our TLB does not support large pages, so remember the area covered by
675 large pages and trigger a full TLB flush if these are invalidated. */
1308e026
RH
676static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
677 target_ulong vaddr, target_ulong size)
0cac1b66 678{
a40ec84e 679 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1308e026 680 target_ulong lp_mask = ~(size - 1);
0cac1b66 681
1308e026
RH
682 if (lp_addr == (target_ulong)-1) {
683 /* No previous large page. */
684 lp_addr = vaddr;
685 } else {
686 /* Extend the existing region to include the new page.
687 This is a compromise between unnecessary flushes and
688 the cost of maintaining a full variable size TLB. */
a40ec84e 689 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1308e026
RH
690 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
691 lp_mask <<= 1;
692 }
0cac1b66 693 }
a40ec84e
RH
694 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
695 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
0cac1b66
BS
696}
697
698/* Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
699 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
700 * supplied size is only used by tlb_flush_page.
701 *
702 * Called from TCG-generated code, which is under an RCU read-side
703 * critical section.
704 */
fadc1cbe
PM
705void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
706 hwaddr paddr, MemTxAttrs attrs, int prot,
707 int mmu_idx, target_ulong size)
0cac1b66 708{
0c591eb0 709 CPUArchState *env = cpu->env_ptr;
a40ec84e
RH
710 CPUTLB *tlb = env_tlb(env);
711 CPUTLBDesc *desc = &tlb->d[mmu_idx];
0cac1b66
BS
712 MemoryRegionSection *section;
713 unsigned int index;
714 target_ulong address;
8f5db641 715 target_ulong write_address;
0cac1b66 716 uintptr_t addend;
68fea038 717 CPUTLBEntry *te, tn;
55df6fcf
PM
718 hwaddr iotlb, xlat, sz, paddr_page;
719 target_ulong vaddr_page;
d7898cda 720 int asidx = cpu_asidx_from_attrs(cpu, attrs);
50b107c5 721 int wp_flags;
8f5db641 722 bool is_ram, is_romd;
0cac1b66 723
f0aff0f1 724 assert_cpu_is_self(cpu);
55df6fcf 725
1308e026 726 if (size <= TARGET_PAGE_SIZE) {
55df6fcf
PM
727 sz = TARGET_PAGE_SIZE;
728 } else {
1308e026 729 tlb_add_large_page(env, mmu_idx, vaddr, size);
55df6fcf 730 sz = size;
0cac1b66 731 }
55df6fcf
PM
732 vaddr_page = vaddr & TARGET_PAGE_MASK;
733 paddr_page = paddr & TARGET_PAGE_MASK;
149f54b5 734
55df6fcf
PM
735 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
736 &xlat, &sz, attrs, &prot);
149f54b5
PB
737 assert(sz >= TARGET_PAGE_SIZE);
738
8526e1f4
AB
739 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
740 " prot=%x idx=%d\n",
741 vaddr, paddr, prot, mmu_idx);
0cac1b66 742
55df6fcf
PM
743 address = vaddr_page;
744 if (size < TARGET_PAGE_SIZE) {
30d7e098
RH
745 /* Repeat the MMU check and TLB fill on every access. */
746 address |= TLB_INVALID_MASK;
55df6fcf 747 }
a26fc6f5 748 if (attrs.byte_swap) {
5b87b3e6 749 address |= TLB_BSWAP;
a26fc6f5 750 }
8f5db641
RH
751
752 is_ram = memory_region_is_ram(section->mr);
753 is_romd = memory_region_is_romd(section->mr);
754
755 if (is_ram || is_romd) {
756 /* RAM and ROMD both have associated host memory. */
757 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
758 } else {
759 /* I/O does not; force the host address to NULL. */
8f3e03cb 760 addend = 0;
8f5db641
RH
761 }
762
763 write_address = address;
764 if (is_ram) {
765 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
766 /*
767 * Computing is_clean is expensive; avoid all that unless
768 * the page is actually writable.
769 */
770 if (prot & PAGE_WRITE) {
771 if (section->readonly) {
772 write_address |= TLB_DISCARD_WRITE;
773 } else if (cpu_physical_memory_is_clean(iotlb)) {
774 write_address |= TLB_NOTDIRTY;
775 }
776 }
8f3e03cb 777 } else {
8f5db641
RH
778 /* I/O or ROMD */
779 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
780 /*
781 * Writes to romd devices must go through MMIO to enable write.
782 * Reads to romd devices go through the ram_ptr found above,
783 * but of course reads to I/O must go through MMIO.
784 */
785 write_address |= TLB_MMIO;
786 if (!is_romd) {
787 address = write_address;
788 }
0cac1b66 789 }
0cac1b66 790
50b107c5
RH
791 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
792 TARGET_PAGE_SIZE);
0cac1b66 793
383beda9
RH
794 index = tlb_index(env, mmu_idx, vaddr_page);
795 te = tlb_entry(env, mmu_idx, vaddr_page);
b0706b71 796
71aec354
EC
797 /*
798 * Hold the TLB lock for the rest of the function. We could acquire/release
799 * the lock several times in the function, but it is faster to amortize the
800 * acquisition cost by acquiring it just once. Note that this leads to
801 * a longer critical section, but this is not a concern since the TLB lock
802 * is unlikely to be contended.
803 */
a40ec84e 804 qemu_spin_lock(&tlb->c.lock);
71aec354 805
3d1523ce 806 /* Note that the tlb is no longer clean. */
a40ec84e 807 tlb->c.dirty |= 1 << mmu_idx;
3d1523ce 808
71aec354
EC
809 /* Make sure there's no cached translation for the new page. */
810 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
811
68fea038
RH
812 /*
813 * Only evict the old entry to the victim tlb if it's for a
814 * different page; otherwise just overwrite the stale data.
815 */
3cea94bb 816 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
a40ec84e
RH
817 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
818 CPUTLBEntry *tv = &desc->vtable[vidx];
b0706b71 819
68fea038 820 /* Evict the old entry into the victim tlb. */
71aec354 821 copy_tlb_helper_locked(tv, te);
a40ec84e 822 desc->viotlb[vidx] = desc->iotlb[index];
86e1eff8 823 tlb_n_used_entries_dec(env, mmu_idx);
68fea038 824 }
88e89a57
XT
825
826 /* refill the tlb */
ace41090
PM
827 /*
828 * At this point iotlb contains a physical section number in the lower
829 * TARGET_PAGE_BITS, and either
8f5db641
RH
830 * + the ram_addr_t of the page base of the target RAM (RAM)
831 * + the offset within section->mr of the page base (I/O, ROMD)
55df6fcf 832 * We subtract the vaddr_page (which is page aligned and thus won't
ace41090
PM
833 * disturb the low bits) to give an offset which can be added to the
834 * (non-page-aligned) vaddr of the eventual memory access to get
835 * the MemoryRegion offset for the access. Note that the vaddr we
836 * subtract here is that of the page base, and not the same as the
837 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
838 */
a40ec84e
RH
839 desc->iotlb[index].addr = iotlb - vaddr_page;
840 desc->iotlb[index].attrs = attrs;
b0706b71
AB
841
842 /* Now calculate the new entry */
55df6fcf 843 tn.addend = addend - vaddr_page;
0cac1b66 844 if (prot & PAGE_READ) {
b0706b71 845 tn.addr_read = address;
50b107c5
RH
846 if (wp_flags & BP_MEM_READ) {
847 tn.addr_read |= TLB_WATCHPOINT;
848 }
0cac1b66 849 } else {
b0706b71 850 tn.addr_read = -1;
0cac1b66
BS
851 }
852
853 if (prot & PAGE_EXEC) {
8f5db641 854 tn.addr_code = address;
0cac1b66 855 } else {
b0706b71 856 tn.addr_code = -1;
0cac1b66 857 }
b0706b71
AB
858
859 tn.addr_write = -1;
0cac1b66 860 if (prot & PAGE_WRITE) {
8f5db641 861 tn.addr_write = write_address;
f52bfb12
DH
862 if (prot & PAGE_WRITE_INV) {
863 tn.addr_write |= TLB_INVALID_MASK;
864 }
50b107c5
RH
865 if (wp_flags & BP_MEM_WRITE) {
866 tn.addr_write |= TLB_WATCHPOINT;
867 }
0cac1b66 868 }
b0706b71 869
71aec354 870 copy_tlb_helper_locked(te, &tn);
86e1eff8 871 tlb_n_used_entries_inc(env, mmu_idx);
a40ec84e 872 qemu_spin_unlock(&tlb->c.lock);
0cac1b66
BS
873}
874
fadc1cbe
PM
875/* Add a new TLB entry, but without specifying the memory
876 * transaction attributes to be used.
877 */
878void tlb_set_page(CPUState *cpu, target_ulong vaddr,
879 hwaddr paddr, int prot,
880 int mmu_idx, target_ulong size)
881{
882 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
883 prot, mmu_idx, size);
884}
885
857baec1
AB
886static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
887{
888 ram_addr_t ram_addr;
889
890 ram_addr = qemu_ram_addr_from_host(ptr);
891 if (ram_addr == RAM_ADDR_INVALID) {
892 error_report("Bad ram pointer %p", ptr);
893 abort();
894 }
895 return ram_addr;
896}
897
c319dc13
RH
898/*
899 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
900 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
901 * be discarded and looked up again (e.g. via tlb_entry()).
902 */
903static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
904 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
905{
906 CPUClass *cc = CPU_GET_CLASS(cpu);
907 bool ok;
908
909 /*
910 * This is not a probe, so only valid return is success; failure
911 * should result in exception + longjmp to the cpu loop.
912 */
913 ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
914 assert(ok);
915}
916
82a45b96 917static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
f1be3696 918 int mmu_idx, target_ulong addr, uintptr_t retaddr,
be5c4787 919 MMUAccessType access_type, MemOp op)
82a45b96 920{
29a0af61 921 CPUState *cpu = env_cpu(env);
2d54f194
PM
922 hwaddr mr_offset;
923 MemoryRegionSection *section;
924 MemoryRegion *mr;
82a45b96 925 uint64_t val;
8d04fb55 926 bool locked = false;
04e3aabd 927 MemTxResult r;
82a45b96 928
2d54f194
PM
929 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
930 mr = section->mr;
931 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
82a45b96 932 cpu->mem_io_pc = retaddr;
08565552 933 if (!cpu->can_do_io) {
82a45b96
RH
934 cpu_io_recompile(cpu, retaddr);
935 }
936
8b812533 937 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
8d04fb55
JK
938 qemu_mutex_lock_iothread();
939 locked = true;
940 }
be5c4787 941 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
04e3aabd 942 if (r != MEMTX_OK) {
2d54f194
PM
943 hwaddr physaddr = mr_offset +
944 section->offset_within_address_space -
945 section->offset_within_region;
946
be5c4787 947 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
04e3aabd
PM
948 mmu_idx, iotlbentry->attrs, r, retaddr);
949 }
8d04fb55
JK
950 if (locked) {
951 qemu_mutex_unlock_iothread();
952 }
953
82a45b96
RH
954 return val;
955}
956
957static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
f1be3696 958 int mmu_idx, uint64_t val, target_ulong addr,
be5c4787 959 uintptr_t retaddr, MemOp op)
82a45b96 960{
29a0af61 961 CPUState *cpu = env_cpu(env);
2d54f194
PM
962 hwaddr mr_offset;
963 MemoryRegionSection *section;
964 MemoryRegion *mr;
8d04fb55 965 bool locked = false;
04e3aabd 966 MemTxResult r;
82a45b96 967
2d54f194
PM
968 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
969 mr = section->mr;
970 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
08565552 971 if (!cpu->can_do_io) {
82a45b96
RH
972 cpu_io_recompile(cpu, retaddr);
973 }
82a45b96 974 cpu->mem_io_pc = retaddr;
8d04fb55 975
8b812533 976 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
8d04fb55
JK
977 qemu_mutex_lock_iothread();
978 locked = true;
979 }
be5c4787 980 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
04e3aabd 981 if (r != MEMTX_OK) {
2d54f194
PM
982 hwaddr physaddr = mr_offset +
983 section->offset_within_address_space -
984 section->offset_within_region;
985
be5c4787
TN
986 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
987 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
988 retaddr);
04e3aabd 989 }
8d04fb55
JK
990 if (locked) {
991 qemu_mutex_unlock_iothread();
992 }
82a45b96
RH
993}
994
4811e909
RH
995static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
996{
997#if TCG_OVERSIZED_GUEST
998 return *(target_ulong *)((uintptr_t)entry + ofs);
999#else
1000 /* ofs might correspond to .addr_write, so use atomic_read */
1001 return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
1002#endif
1003}
1004
7e9a7c50
RH
1005/* Return true if ADDR is present in the victim tlb, and has been copied
1006 back to the main tlb. */
1007static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1008 size_t elt_ofs, target_ulong page)
1009{
1010 size_t vidx;
71aec354 1011
29a0af61 1012 assert_cpu_is_self(env_cpu(env));
7e9a7c50 1013 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
a40ec84e
RH
1014 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1015 target_ulong cmp;
1016
1017 /* elt_ofs might correspond to .addr_write, so use atomic_read */
1018#if TCG_OVERSIZED_GUEST
1019 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1020#else
1021 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1022#endif
7e9a7c50
RH
1023
1024 if (cmp == page) {
1025 /* Found entry in victim tlb, swap tlb and iotlb. */
a40ec84e 1026 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
b0706b71 1027
a40ec84e 1028 qemu_spin_lock(&env_tlb(env)->c.lock);
71aec354
EC
1029 copy_tlb_helper_locked(&tmptlb, tlb);
1030 copy_tlb_helper_locked(tlb, vtlb);
1031 copy_tlb_helper_locked(vtlb, &tmptlb);
a40ec84e 1032 qemu_spin_unlock(&env_tlb(env)->c.lock);
b0706b71 1033
a40ec84e
RH
1034 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1035 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
7e9a7c50
RH
1036 tmpio = *io; *io = *vio; *vio = tmpio;
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043/* Macro to call the above, with local variables from the use context. */
a390284b 1044#define VICTIM_TLB_HIT(TY, ADDR) \
7e9a7c50 1045 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
a390284b 1046 (ADDR) & TARGET_PAGE_MASK)
7e9a7c50 1047
30d7e098
RH
1048/*
1049 * Return a ram_addr_t for the virtual address for execution.
1050 *
1051 * Return -1 if we can't translate and execute from an entire page
1052 * of RAM. This will force us to execute by loading and translating
1053 * one insn at a time, without caching.
1054 *
1055 * NOTE: This function will trigger an exception if the page is
1056 * not executable.
f2553f04 1057 */
4b2190da
EC
1058tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1059 void **hostp)
f2553f04 1060{
383beda9
RH
1061 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1062 uintptr_t index = tlb_index(env, mmu_idx, addr);
1063 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
f2553f04 1064 void *p;
f2553f04 1065
383beda9 1066 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
b493ccf1 1067 if (!VICTIM_TLB_HIT(addr_code, addr)) {
29a0af61 1068 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
6d967cb8
EC
1069 index = tlb_index(env, mmu_idx, addr);
1070 entry = tlb_entry(env, mmu_idx, addr);
30d7e098
RH
1071
1072 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1073 /*
1074 * The MMU protection covers a smaller range than a target
1075 * page, so we must redo the MMU check for every insn.
1076 */
1077 return -1;
1078 }
71b9a453 1079 }
383beda9 1080 assert(tlb_hit(entry->addr_code, addr));
f2553f04 1081 }
55df6fcf 1082
30d7e098
RH
1083 if (unlikely(entry->addr_code & TLB_MMIO)) {
1084 /* The region is not backed by RAM. */
4b2190da
EC
1085 if (hostp) {
1086 *hostp = NULL;
1087 }
20cb6ae4 1088 return -1;
55df6fcf
PM
1089 }
1090
383beda9 1091 p = (void *)((uintptr_t)addr + entry->addend);
4b2190da
EC
1092 if (hostp) {
1093 *hostp = p;
1094 }
f2553f04
FK
1095 return qemu_ram_addr_from_host_nofail(p);
1096}
1097
4b2190da
EC
1098tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1099{
1100 return get_page_addr_code_hostp(env, addr, NULL);
1101}
1102
707526ad
RH
1103static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1104 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1105{
1106 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1107
1108 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1109
1110 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1111 struct page_collection *pages
1112 = page_collection_lock(ram_addr, ram_addr + size);
5a7c27bb 1113 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
707526ad
RH
1114 page_collection_unlock(pages);
1115 }
1116
1117 /*
1118 * Set both VGA and migration bits for simplicity and to remove
1119 * the notdirty callback faster.
1120 */
1121 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1122
1123 /* We remove the notdirty callback only if the code has been flushed. */
1124 if (!cpu_physical_memory_is_clean(ram_addr)) {
1125 trace_memory_notdirty_set_dirty(mem_vaddr);
1126 tlb_set_dirty(cpu, mem_vaddr);
1127 }
1128}
1129
c25c283d
DH
1130/*
1131 * Probe for whether the specified guest access is permitted. If it is not
1132 * permitted then an exception will be taken in the same way as if this
1133 * were a real access (and we will not return).
fef39ccd
DH
1134 * If the size is 0 or the page requires I/O access, returns NULL; otherwise,
1135 * returns the address of the host page similar to tlb_vaddr_to_host().
3b08f0a9 1136 */
c25c283d
DH
1137void *probe_access(CPUArchState *env, target_ulong addr, int size,
1138 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
3b08f0a9 1139{
383beda9
RH
1140 uintptr_t index = tlb_index(env, mmu_idx, addr);
1141 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
c25c283d
DH
1142 target_ulong tlb_addr;
1143 size_t elt_ofs;
1144 int wp_access;
3b08f0a9 1145
ca86cf32
DH
1146 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1147
c25c283d
DH
1148 switch (access_type) {
1149 case MMU_DATA_LOAD:
1150 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1151 wp_access = BP_MEM_READ;
1152 break;
1153 case MMU_DATA_STORE:
1154 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1155 wp_access = BP_MEM_WRITE;
1156 break;
1157 case MMU_INST_FETCH:
1158 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1159 wp_access = BP_MEM_READ;
1160 break;
1161 default:
1162 g_assert_not_reached();
1163 }
1164 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1165
03a98189 1166 if (unlikely(!tlb_hit(tlb_addr, addr))) {
c25c283d
DH
1167 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs,
1168 addr & TARGET_PAGE_MASK)) {
1169 tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr);
03a98189
DH
1170 /* TLB resize via tlb_fill may have moved the entry. */
1171 index = tlb_index(env, mmu_idx, addr);
1172 entry = tlb_entry(env, mmu_idx, addr);
3b08f0a9 1173 }
c25c283d 1174 tlb_addr = tlb_read_ofs(entry, elt_ofs);
03a98189
DH
1175 }
1176
fef39ccd
DH
1177 if (!size) {
1178 return NULL;
1179 }
1180
73bc0bd4
RH
1181 if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
1182 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
fef39ccd 1183
73bc0bd4
RH
1184 /* Reject I/O access, or other required slow-path. */
1185 if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
1186 return NULL;
1187 }
1188
1189 /* Handle watchpoints. */
1190 if (tlb_addr & TLB_WATCHPOINT) {
1191 cpu_check_watchpoint(env_cpu(env), addr, size,
1192 iotlbentry->attrs, wp_access, retaddr);
1193 }
1194
1195 /* Handle clean RAM pages. */
1196 if (tlb_addr & TLB_NOTDIRTY) {
1197 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
1198 }
fef39ccd
DH
1199 }
1200
1201 return (void *)((uintptr_t)addr + entry->addend);
3b08f0a9
RH
1202}
1203
4811e909
RH
1204void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1205 MMUAccessType access_type, int mmu_idx)
1206{
1207 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
7f445c8c 1208 target_ulong tlb_addr, page;
4811e909
RH
1209 size_t elt_ofs;
1210
1211 switch (access_type) {
1212 case MMU_DATA_LOAD:
1213 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1214 break;
1215 case MMU_DATA_STORE:
1216 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1217 break;
1218 case MMU_INST_FETCH:
1219 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1220 break;
1221 default:
1222 g_assert_not_reached();
1223 }
1224
1225 page = addr & TARGET_PAGE_MASK;
1226 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1227
1228 if (!tlb_hit_page(tlb_addr, page)) {
1229 uintptr_t index = tlb_index(env, mmu_idx, addr);
1230
1231 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
29a0af61 1232 CPUState *cs = env_cpu(env);
4811e909
RH
1233 CPUClass *cc = CPU_GET_CLASS(cs);
1234
1235 if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1236 /* Non-faulting page table read failed. */
1237 return NULL;
1238 }
1239
1240 /* TLB resize via tlb_fill may have moved the entry. */
1241 entry = tlb_entry(env, mmu_idx, addr);
1242 }
1243 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1244 }
1245
1246 if (tlb_addr & ~TARGET_PAGE_MASK) {
1247 /* IO access */
1248 return NULL;
1249 }
1250
1251 return (void *)((uintptr_t)addr + entry->addend);
1252}
1253
235537fa
AB
1254
1255#ifdef CONFIG_PLUGIN
1256/*
1257 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1258 * This should be a hot path as we will have just looked this path up
1259 * in the softmmu lookup code (or helper). We don't handle re-fills or
1260 * checking the victim table. This is purely informational.
1261 *
1262 * This should never fail as the memory access being instrumented
1263 * should have just filled the TLB.
1264 */
1265
1266bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1267 bool is_store, struct qemu_plugin_hwaddr *data)
1268{
1269 CPUArchState *env = cpu->env_ptr;
1270 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1271 uintptr_t index = tlb_index(env, mmu_idx, addr);
1272 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1273
1274 if (likely(tlb_hit(tlb_addr, addr))) {
1275 /* We must have an iotlb entry for MMIO */
1276 if (tlb_addr & TLB_MMIO) {
1277 CPUIOTLBEntry *iotlbentry;
1278 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1279 data->is_io = true;
1280 data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1281 data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1282 } else {
1283 data->is_io = false;
1284 data->v.ram.hostaddr = addr + tlbe->addend;
1285 }
1286 return true;
1287 }
1288 return false;
1289}
1290
1291#endif
1292
c482cb11
RH
1293/* Probe for a read-modify-write atomic operation. Do not allow unaligned
1294 * operations, or io operations to proceed. Return the host address. */
1295static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
707526ad 1296 TCGMemOpIdx oi, uintptr_t retaddr)
c482cb11
RH
1297{
1298 size_t mmu_idx = get_mmuidx(oi);
383beda9
RH
1299 uintptr_t index = tlb_index(env, mmu_idx, addr);
1300 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
403f290c 1301 target_ulong tlb_addr = tlb_addr_write(tlbe);
14776ab5 1302 MemOp mop = get_memop(oi);
c482cb11
RH
1303 int a_bits = get_alignment_bits(mop);
1304 int s_bits = mop & MO_SIZE;
34d49937 1305 void *hostaddr;
c482cb11
RH
1306
1307 /* Adjust the given return address. */
1308 retaddr -= GETPC_ADJ;
1309
1310 /* Enforce guest required alignment. */
1311 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1312 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
29a0af61 1313 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
c482cb11
RH
1314 mmu_idx, retaddr);
1315 }
1316
1317 /* Enforce qemu required alignment. */
1318 if (unlikely(addr & ((1 << s_bits) - 1))) {
1319 /* We get here if guest alignment was not requested,
1320 or was not enforced by cpu_unaligned_access above.
1321 We might widen the access and emulate, but for now
1322 mark an exception and exit the cpu loop. */
1323 goto stop_the_world;
1324 }
1325
1326 /* Check TLB entry and enforce page permissions. */
334692bc 1327 if (!tlb_hit(tlb_addr, addr)) {
c482cb11 1328 if (!VICTIM_TLB_HIT(addr_write, addr)) {
29a0af61 1329 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
98670d47 1330 mmu_idx, retaddr);
6d967cb8
EC
1331 index = tlb_index(env, mmu_idx, addr);
1332 tlbe = tlb_entry(env, mmu_idx, addr);
c482cb11 1333 }
403f290c 1334 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
c482cb11
RH
1335 }
1336
55df6fcf 1337 /* Notice an IO access or a needs-MMU-lookup access */
30d7e098 1338 if (unlikely(tlb_addr & TLB_MMIO)) {
c482cb11
RH
1339 /* There's really nothing that can be done to
1340 support this apart from stop-the-world. */
1341 goto stop_the_world;
1342 }
1343
1344 /* Let the guest notice RMW on a write-only page. */
34d49937 1345 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
29a0af61 1346 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
98670d47 1347 mmu_idx, retaddr);
c482cb11
RH
1348 /* Since we don't support reads and writes to different addresses,
1349 and we do have the proper page loaded for write, this shouldn't
1350 ever return. But just in case, handle via stop-the-world. */
1351 goto stop_the_world;
1352 }
1353
34d49937
PM
1354 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1355
34d49937 1356 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
707526ad
RH
1357 notdirty_write(env_cpu(env), addr, 1 << s_bits,
1358 &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
34d49937
PM
1359 }
1360
1361 return hostaddr;
c482cb11
RH
1362
1363 stop_the_world:
29a0af61 1364 cpu_loop_exit_atomic(env_cpu(env), retaddr);
c482cb11
RH
1365}
1366
eed56642
AB
1367/*
1368 * Load Helpers
1369 *
1370 * We support two different access types. SOFTMMU_CODE_ACCESS is
1371 * specifically for reading instructions from system memory. It is
1372 * called by the translation loop and in some helpers where the code
1373 * is disassembled. It shouldn't be called directly by guest code.
1374 */
0f590e74 1375
2dd92606
RH
1376typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1377 TCGMemOpIdx oi, uintptr_t retaddr);
1378
80d9d1c6
RH
1379static inline uint64_t QEMU_ALWAYS_INLINE
1380load_memop(const void *haddr, MemOp op)
1381{
1382 switch (op) {
1383 case MO_UB:
1384 return ldub_p(haddr);
1385 case MO_BEUW:
1386 return lduw_be_p(haddr);
1387 case MO_LEUW:
1388 return lduw_le_p(haddr);
1389 case MO_BEUL:
1390 return (uint32_t)ldl_be_p(haddr);
1391 case MO_LEUL:
1392 return (uint32_t)ldl_le_p(haddr);
1393 case MO_BEQ:
1394 return ldq_be_p(haddr);
1395 case MO_LEQ:
1396 return ldq_le_p(haddr);
1397 default:
1398 qemu_build_not_reached();
1399 }
1400}
1401
c6b716cd 1402static inline uint64_t QEMU_ALWAYS_INLINE
2dd92606 1403load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
be5c4787 1404 uintptr_t retaddr, MemOp op, bool code_read,
2dd92606 1405 FullLoadHelper *full_load)
eed56642
AB
1406{
1407 uintptr_t mmu_idx = get_mmuidx(oi);
1408 uintptr_t index = tlb_index(env, mmu_idx, addr);
1409 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1410 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1411 const size_t tlb_off = code_read ?
1412 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
f1be3696
RH
1413 const MMUAccessType access_type =
1414 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
eed56642
AB
1415 unsigned a_bits = get_alignment_bits(get_memop(oi));
1416 void *haddr;
1417 uint64_t res;
be5c4787 1418 size_t size = memop_size(op);
eed56642
AB
1419
1420 /* Handle CPU specific unaligned behaviour */
1421 if (addr & ((1 << a_bits) - 1)) {
29a0af61 1422 cpu_unaligned_access(env_cpu(env), addr, access_type,
eed56642
AB
1423 mmu_idx, retaddr);
1424 }
0f590e74 1425
eed56642
AB
1426 /* If the TLB entry is for a different page, reload and try again. */
1427 if (!tlb_hit(tlb_addr, addr)) {
1428 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1429 addr & TARGET_PAGE_MASK)) {
29a0af61 1430 tlb_fill(env_cpu(env), addr, size,
f1be3696 1431 access_type, mmu_idx, retaddr);
eed56642
AB
1432 index = tlb_index(env, mmu_idx, addr);
1433 entry = tlb_entry(env, mmu_idx, addr);
1434 }
1435 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
30d7e098 1436 tlb_addr &= ~TLB_INVALID_MASK;
eed56642
AB
1437 }
1438
50b107c5 1439 /* Handle anything that isn't just a straight memory access. */
eed56642 1440 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
50b107c5 1441 CPUIOTLBEntry *iotlbentry;
5b87b3e6 1442 bool need_swap;
50b107c5
RH
1443
1444 /* For anything that is unaligned, recurse through full_load. */
eed56642
AB
1445 if ((addr & (size - 1)) != 0) {
1446 goto do_unaligned_access;
1447 }
50b107c5
RH
1448
1449 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1450
1451 /* Handle watchpoints. */
1452 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1453 /* On watchpoint hit, this will longjmp out. */
1454 cpu_check_watchpoint(env_cpu(env), addr, size,
1455 iotlbentry->attrs, BP_MEM_READ, retaddr);
50b107c5
RH
1456 }
1457
5b87b3e6
RH
1458 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1459
50b107c5 1460 /* Handle I/O access. */
5b87b3e6
RH
1461 if (likely(tlb_addr & TLB_MMIO)) {
1462 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1463 access_type, op ^ (need_swap * MO_BSWAP));
1464 }
1465
1466 haddr = (void *)((uintptr_t)addr + entry->addend);
1467
1468 /*
1469 * Keep these two load_memop separate to ensure that the compiler
1470 * is able to fold the entire function to a single instruction.
1471 * There is a build-time assert inside to remind you of this. ;-)
1472 */
1473 if (unlikely(need_swap)) {
1474 return load_memop(haddr, op ^ MO_BSWAP);
1475 }
1476 return load_memop(haddr, op);
eed56642
AB
1477 }
1478
1479 /* Handle slow unaligned access (it spans two pages or IO). */
1480 if (size > 1
1481 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1482 >= TARGET_PAGE_SIZE)) {
1483 target_ulong addr1, addr2;
8c79b288 1484 uint64_t r1, r2;
eed56642
AB
1485 unsigned shift;
1486 do_unaligned_access:
ab7a2009 1487 addr1 = addr & ~((target_ulong)size - 1);
eed56642 1488 addr2 = addr1 + size;
2dd92606
RH
1489 r1 = full_load(env, addr1, oi, retaddr);
1490 r2 = full_load(env, addr2, oi, retaddr);
eed56642
AB
1491 shift = (addr & (size - 1)) * 8;
1492
be5c4787 1493 if (memop_big_endian(op)) {
eed56642
AB
1494 /* Big-endian combine. */
1495 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1496 } else {
1497 /* Little-endian combine. */
1498 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1499 }
1500 return res & MAKE_64BIT_MASK(0, size * 8);
1501 }
1502
1503 haddr = (void *)((uintptr_t)addr + entry->addend);
80d9d1c6 1504 return load_memop(haddr, op);
eed56642
AB
1505}
1506
1507/*
1508 * For the benefit of TCG generated code, we want to avoid the
1509 * complication of ABI-specific return type promotion and always
1510 * return a value extended to the register size of the host. This is
1511 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1512 * data, and for that we always have uint64_t.
1513 *
1514 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1515 */
1516
2dd92606
RH
1517static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1518 TCGMemOpIdx oi, uintptr_t retaddr)
1519{
be5c4787 1520 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2dd92606
RH
1521}
1522
fc1bc777
RH
1523tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1524 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1525{
2dd92606
RH
1526 return full_ldub_mmu(env, addr, oi, retaddr);
1527}
1528
1529static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1530 TCGMemOpIdx oi, uintptr_t retaddr)
1531{
be5c4787 1532 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2dd92606 1533 full_le_lduw_mmu);
eed56642
AB
1534}
1535
fc1bc777
RH
1536tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1537 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1538{
2dd92606
RH
1539 return full_le_lduw_mmu(env, addr, oi, retaddr);
1540}
1541
1542static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1543 TCGMemOpIdx oi, uintptr_t retaddr)
1544{
be5c4787 1545 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2dd92606 1546 full_be_lduw_mmu);
eed56642
AB
1547}
1548
fc1bc777
RH
1549tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1550 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1551{
2dd92606
RH
1552 return full_be_lduw_mmu(env, addr, oi, retaddr);
1553}
1554
1555static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1556 TCGMemOpIdx oi, uintptr_t retaddr)
1557{
be5c4787 1558 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2dd92606 1559 full_le_ldul_mmu);
eed56642
AB
1560}
1561
fc1bc777
RH
1562tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1563 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1564{
2dd92606
RH
1565 return full_le_ldul_mmu(env, addr, oi, retaddr);
1566}
1567
1568static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1569 TCGMemOpIdx oi, uintptr_t retaddr)
1570{
be5c4787 1571 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2dd92606 1572 full_be_ldul_mmu);
eed56642
AB
1573}
1574
fc1bc777
RH
1575tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1576 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1577{
2dd92606 1578 return full_be_ldul_mmu(env, addr, oi, retaddr);
eed56642
AB
1579}
1580
fc1bc777
RH
1581uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1582 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1583{
be5c4787 1584 return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
2dd92606 1585 helper_le_ldq_mmu);
eed56642
AB
1586}
1587
fc1bc777
RH
1588uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1589 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1590{
be5c4787 1591 return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
2dd92606 1592 helper_be_ldq_mmu);
eed56642
AB
1593}
1594
1595/*
1596 * Provide signed versions of the load routines as well. We can of course
1597 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1598 */
1599
1600
1601tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1602 TCGMemOpIdx oi, uintptr_t retaddr)
1603{
1604 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1605}
1606
1607tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1608 TCGMemOpIdx oi, uintptr_t retaddr)
1609{
1610 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1611}
1612
1613tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1614 TCGMemOpIdx oi, uintptr_t retaddr)
1615{
1616 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1617}
1618
1619tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1620 TCGMemOpIdx oi, uintptr_t retaddr)
1621{
1622 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1623}
1624
1625tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1626 TCGMemOpIdx oi, uintptr_t retaddr)
1627{
1628 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1629}
1630
d03f1408
RH
1631/*
1632 * Load helpers for cpu_ldst.h.
1633 */
1634
1635static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
1636 int mmu_idx, uintptr_t retaddr,
1637 MemOp op, FullLoadHelper *full_load)
1638{
1639 uint16_t meminfo;
1640 TCGMemOpIdx oi;
1641 uint64_t ret;
1642
1643 meminfo = trace_mem_get_info(op, mmu_idx, false);
1644 trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
1645
1646 op &= ~MO_SIGN;
1647 oi = make_memop_idx(op, mmu_idx);
1648 ret = full_load(env, addr, oi, retaddr);
1649
1650 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
1651
1652 return ret;
1653}
1654
1655uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1656 int mmu_idx, uintptr_t ra)
1657{
1658 return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
1659}
1660
1661int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1662 int mmu_idx, uintptr_t ra)
1663{
1664 return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
1665 full_ldub_mmu);
1666}
1667
1668uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1669 int mmu_idx, uintptr_t ra)
1670{
1671 return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW,
1672 MO_TE == MO_LE
1673 ? full_le_lduw_mmu : full_be_lduw_mmu);
1674}
1675
1676int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1677 int mmu_idx, uintptr_t ra)
1678{
1679 return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW,
1680 MO_TE == MO_LE
1681 ? full_le_lduw_mmu : full_be_lduw_mmu);
1682}
1683
1684uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1685 int mmu_idx, uintptr_t ra)
1686{
1687 return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL,
1688 MO_TE == MO_LE
1689 ? full_le_ldul_mmu : full_be_ldul_mmu);
1690}
1691
1692uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1693 int mmu_idx, uintptr_t ra)
1694{
1695 return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ,
1696 MO_TE == MO_LE
1697 ? helper_le_ldq_mmu : helper_be_ldq_mmu);
1698}
1699
eed56642
AB
1700/*
1701 * Store Helpers
1702 */
1703
80d9d1c6
RH
1704static inline void QEMU_ALWAYS_INLINE
1705store_memop(void *haddr, uint64_t val, MemOp op)
1706{
1707 switch (op) {
1708 case MO_UB:
1709 stb_p(haddr, val);
1710 break;
1711 case MO_BEUW:
1712 stw_be_p(haddr, val);
1713 break;
1714 case MO_LEUW:
1715 stw_le_p(haddr, val);
1716 break;
1717 case MO_BEUL:
1718 stl_be_p(haddr, val);
1719 break;
1720 case MO_LEUL:
1721 stl_le_p(haddr, val);
1722 break;
1723 case MO_BEQ:
1724 stq_be_p(haddr, val);
1725 break;
1726 case MO_LEQ:
1727 stq_le_p(haddr, val);
1728 break;
1729 default:
1730 qemu_build_not_reached();
1731 }
1732}
1733
c6b716cd 1734static inline void QEMU_ALWAYS_INLINE
4601f8d1 1735store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
be5c4787 1736 TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
eed56642
AB
1737{
1738 uintptr_t mmu_idx = get_mmuidx(oi);
1739 uintptr_t index = tlb_index(env, mmu_idx, addr);
1740 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1741 target_ulong tlb_addr = tlb_addr_write(entry);
1742 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1743 unsigned a_bits = get_alignment_bits(get_memop(oi));
1744 void *haddr;
be5c4787 1745 size_t size = memop_size(op);
eed56642
AB
1746
1747 /* Handle CPU specific unaligned behaviour */
1748 if (addr & ((1 << a_bits) - 1)) {
29a0af61 1749 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
eed56642
AB
1750 mmu_idx, retaddr);
1751 }
1752
1753 /* If the TLB entry is for a different page, reload and try again. */
1754 if (!tlb_hit(tlb_addr, addr)) {
1755 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1756 addr & TARGET_PAGE_MASK)) {
29a0af61 1757 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
eed56642
AB
1758 mmu_idx, retaddr);
1759 index = tlb_index(env, mmu_idx, addr);
1760 entry = tlb_entry(env, mmu_idx, addr);
1761 }
1762 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1763 }
1764
50b107c5 1765 /* Handle anything that isn't just a straight memory access. */
eed56642 1766 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
50b107c5 1767 CPUIOTLBEntry *iotlbentry;
5b87b3e6 1768 bool need_swap;
50b107c5
RH
1769
1770 /* For anything that is unaligned, recurse through byte stores. */
eed56642
AB
1771 if ((addr & (size - 1)) != 0) {
1772 goto do_unaligned_access;
1773 }
50b107c5
RH
1774
1775 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1776
1777 /* Handle watchpoints. */
1778 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1779 /* On watchpoint hit, this will longjmp out. */
1780 cpu_check_watchpoint(env_cpu(env), addr, size,
1781 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
50b107c5
RH
1782 }
1783
5b87b3e6
RH
1784 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1785
50b107c5 1786 /* Handle I/O access. */
08565552 1787 if (tlb_addr & TLB_MMIO) {
5b87b3e6
RH
1788 io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
1789 op ^ (need_swap * MO_BSWAP));
1790 return;
1791 }
1792
7b0d792c
RH
1793 /* Ignore writes to ROM. */
1794 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
1795 return;
1796 }
1797
08565552
RH
1798 /* Handle clean RAM pages. */
1799 if (tlb_addr & TLB_NOTDIRTY) {
707526ad 1800 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
08565552
RH
1801 }
1802
707526ad
RH
1803 haddr = (void *)((uintptr_t)addr + entry->addend);
1804
5b87b3e6
RH
1805 /*
1806 * Keep these two store_memop separate to ensure that the compiler
1807 * is able to fold the entire function to a single instruction.
1808 * There is a build-time assert inside to remind you of this. ;-)
1809 */
1810 if (unlikely(need_swap)) {
1811 store_memop(haddr, val, op ^ MO_BSWAP);
1812 } else {
1813 store_memop(haddr, val, op);
1814 }
eed56642
AB
1815 return;
1816 }
1817
1818 /* Handle slow unaligned access (it spans two pages or IO). */
1819 if (size > 1
1820 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1821 >= TARGET_PAGE_SIZE)) {
1822 int i;
1823 uintptr_t index2;
1824 CPUTLBEntry *entry2;
1825 target_ulong page2, tlb_addr2;
8f7cd2ad
RH
1826 size_t size2;
1827
eed56642
AB
1828 do_unaligned_access:
1829 /*
1830 * Ensure the second page is in the TLB. Note that the first page
1831 * is already guaranteed to be filled, and that the second page
1832 * cannot evict the first.
1833 */
1834 page2 = (addr + size) & TARGET_PAGE_MASK;
8f7cd2ad 1835 size2 = (addr + size) & ~TARGET_PAGE_MASK;
eed56642
AB
1836 index2 = tlb_index(env, mmu_idx, page2);
1837 entry2 = tlb_entry(env, mmu_idx, page2);
1838 tlb_addr2 = tlb_addr_write(entry2);
50b107c5
RH
1839 if (!tlb_hit_page(tlb_addr2, page2)) {
1840 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
1841 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
1842 mmu_idx, retaddr);
1843 index2 = tlb_index(env, mmu_idx, page2);
1844 entry2 = tlb_entry(env, mmu_idx, page2);
1845 }
1846 tlb_addr2 = tlb_addr_write(entry2);
1847 }
1848
1849 /*
1850 * Handle watchpoints. Since this may trap, all checks
1851 * must happen before any store.
1852 */
1853 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1854 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
1855 env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
1856 BP_MEM_WRITE, retaddr);
1857 }
1858 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
1859 cpu_check_watchpoint(env_cpu(env), page2, size2,
1860 env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
1861 BP_MEM_WRITE, retaddr);
eed56642
AB
1862 }
1863
1864 /*
1865 * XXX: not efficient, but simple.
1866 * This loop must go in the forward direction to avoid issues
1867 * with self-modifying code in Windows 64-bit.
1868 */
1869 for (i = 0; i < size; ++i) {
1870 uint8_t val8;
be5c4787 1871 if (memop_big_endian(op)) {
eed56642
AB
1872 /* Big-endian extract. */
1873 val8 = val >> (((size - 1) * 8) - (i * 8));
1874 } else {
1875 /* Little-endian extract. */
1876 val8 = val >> (i * 8);
1877 }
4601f8d1 1878 helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
eed56642
AB
1879 }
1880 return;
1881 }
1882
1883 haddr = (void *)((uintptr_t)addr + entry->addend);
80d9d1c6 1884 store_memop(haddr, val, op);
eed56642
AB
1885}
1886
fc1bc777
RH
1887void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1888 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1889{
be5c4787 1890 store_helper(env, addr, val, oi, retaddr, MO_UB);
eed56642
AB
1891}
1892
fc1bc777
RH
1893void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1894 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1895{
be5c4787 1896 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
eed56642
AB
1897}
1898
fc1bc777
RH
1899void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1900 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1901{
be5c4787 1902 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
eed56642
AB
1903}
1904
fc1bc777
RH
1905void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1906 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1907{
be5c4787 1908 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
eed56642
AB
1909}
1910
fc1bc777
RH
1911void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1912 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1913{
be5c4787 1914 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
eed56642
AB
1915}
1916
fc1bc777
RH
1917void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1918 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1919{
be5c4787 1920 store_helper(env, addr, val, oi, retaddr, MO_LEQ);
eed56642
AB
1921}
1922
fc1bc777
RH
1923void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1924 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1925{
be5c4787 1926 store_helper(env, addr, val, oi, retaddr, MO_BEQ);
eed56642 1927}
0f590e74 1928
d03f1408
RH
1929/*
1930 * Store Helpers for cpu_ldst.h
1931 */
1932
1933static inline void QEMU_ALWAYS_INLINE
1934cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1935 int mmu_idx, uintptr_t retaddr, MemOp op)
1936{
1937 TCGMemOpIdx oi;
1938 uint16_t meminfo;
1939
1940 meminfo = trace_mem_get_info(op, mmu_idx, true);
1941 trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
1942
1943 oi = make_memop_idx(op, mmu_idx);
1944 store_helper(env, addr, val, oi, retaddr, op);
1945
1946 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
1947}
1948
1949void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
1950 int mmu_idx, uintptr_t retaddr)
1951{
1952 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
1953}
1954
1955void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
1956 int mmu_idx, uintptr_t retaddr)
1957{
1958 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW);
1959}
1960
1961void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
1962 int mmu_idx, uintptr_t retaddr)
1963{
1964 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL);
1965}
1966
1967void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
1968 int mmu_idx, uintptr_t retaddr)
1969{
1970 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ);
1971}
1972
c482cb11
RH
1973/* First set of helpers allows passing in of OI and RETADDR. This makes
1974 them callable from other helpers. */
1975
1976#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1977#define ATOMIC_NAME(X) \
1978 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
707526ad
RH
1979#define ATOMIC_MMU_DECLS
1980#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
1981#define ATOMIC_MMU_CLEANUP
504f73f7 1982#define ATOMIC_MMU_IDX get_mmuidx(oi)
c482cb11 1983
cfec3885 1984#include "atomic_common.inc.c"
c482cb11
RH
1985
1986#define DATA_SIZE 1
1987#include "atomic_template.h"
1988
1989#define DATA_SIZE 2
1990#include "atomic_template.h"
1991
1992#define DATA_SIZE 4
1993#include "atomic_template.h"
1994
df79b996 1995#ifdef CONFIG_ATOMIC64
c482cb11
RH
1996#define DATA_SIZE 8
1997#include "atomic_template.h"
df79b996 1998#endif
c482cb11 1999
e6cd4bb5 2000#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
7ebee43e
RH
2001#define DATA_SIZE 16
2002#include "atomic_template.h"
2003#endif
2004
c482cb11
RH
2005/* Second set of helpers are directly callable from TCG as helpers. */
2006
2007#undef EXTRA_ARGS
2008#undef ATOMIC_NAME
2009#undef ATOMIC_MMU_LOOKUP
2010#define EXTRA_ARGS , TCGMemOpIdx oi
2011#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
707526ad 2012#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
c482cb11
RH
2013
2014#define DATA_SIZE 1
2015#include "atomic_template.h"
2016
2017#define DATA_SIZE 2
2018#include "atomic_template.h"
2019
2020#define DATA_SIZE 4
2021#include "atomic_template.h"
2022
df79b996 2023#ifdef CONFIG_ATOMIC64
c482cb11
RH
2024#define DATA_SIZE 8
2025#include "atomic_template.h"
df79b996 2026#endif
504f73f7 2027#undef ATOMIC_MMU_IDX
c482cb11
RH
2028
2029/* Code access functions. */
2030
fc4120a3 2031static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2dd92606
RH
2032 TCGMemOpIdx oi, uintptr_t retaddr)
2033{
fc4120a3 2034 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2dd92606
RH
2035}
2036
fc4120a3 2037uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
eed56642 2038{
fc4120a3
RH
2039 TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2040 return full_ldub_code(env, addr, oi, 0);
2dd92606
RH
2041}
2042
fc4120a3
RH
2043static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2044 TCGMemOpIdx oi, uintptr_t retaddr)
2dd92606 2045{
fc4120a3 2046 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
eed56642 2047}
0cac1b66 2048
fc4120a3 2049uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
eed56642 2050{
fc4120a3
RH
2051 TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2052 return full_lduw_code(env, addr, oi, 0);
2dd92606
RH
2053}
2054
fc4120a3
RH
2055static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2056 TCGMemOpIdx oi, uintptr_t retaddr)
2dd92606 2057{
fc4120a3 2058 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
eed56642 2059}
0cac1b66 2060
fc4120a3 2061uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
eed56642 2062{
fc4120a3
RH
2063 TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2064 return full_ldl_code(env, addr, oi, 0);
eed56642
AB
2065}
2066
fc4120a3
RH
2067static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2068 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 2069{
fc4120a3 2070 return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
eed56642
AB
2071}
2072
fc4120a3 2073uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
eed56642 2074{
fc4120a3
RH
2075 TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
2076 return full_ldq_code(env, addr, oi, 0);
eed56642 2077}