]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/cputlb.c
include/hw/core: Create struct CPUJumpCache
[mirror_qemu.git] / accel / tcg / cputlb.c
1 /*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
29 #include "tcg/tcg.h"
30 #include "qemu/error-report.h"
31 #include "exec/log.h"
32 #include "exec/helper-proto.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
36 #include "trace/trace-root.h"
37 #include "tb-hash.h"
38 #include "internal.h"
39 #ifdef CONFIG_PLUGIN
40 #include "qemu/plugin-memory.h"
41 #endif
42 #include "tcg/tcg-ldst.h"
43
44 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45 /* #define DEBUG_TLB */
46 /* #define DEBUG_TLB_LOG */
47
48 #ifdef DEBUG_TLB
49 # define DEBUG_TLB_GATE 1
50 # ifdef DEBUG_TLB_LOG
51 # define DEBUG_TLB_LOG_GATE 1
52 # else
53 # define DEBUG_TLB_LOG_GATE 0
54 # endif
55 #else
56 # define DEBUG_TLB_GATE 0
57 # define DEBUG_TLB_LOG_GATE 0
58 #endif
59
60 #define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67 } while (0)
68
69 #define assert_cpu_is_self(cpu) do { \
70 if (DEBUG_TLB_GATE) { \
71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
72 } \
73 } while (0)
74
75 /* run_on_cpu_data.target_ptr should always be big enough for a
76 * target_ulong even on 32 bit builds */
77 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
79 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80 */
81 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
84 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
85 {
86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
87 }
88
89 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
90 {
91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
92 }
93
94 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
95 size_t max_entries)
96 {
97 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
99 }
100
101 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102 {
103 int i, i0 = tb_jmp_cache_hash_page(page_addr);
104 CPUJumpCache *jc = cpu->tb_jmp_cache;
105
106 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
107 qatomic_set(&jc->array[i0 + i].tb, NULL);
108 }
109 }
110
111 /**
112 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
113 * @desc: The CPUTLBDesc portion of the TLB
114 * @fast: The CPUTLBDescFast portion of the same TLB
115 *
116 * Called with tlb_lock_held.
117 *
118 * We have two main constraints when resizing a TLB: (1) we only resize it
119 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
120 * the array or unnecessarily flushing it), which means we do not control how
121 * frequently the resizing can occur; (2) we don't have access to the guest's
122 * future scheduling decisions, and therefore have to decide the magnitude of
123 * the resize based on past observations.
124 *
125 * In general, a memory-hungry process can benefit greatly from an appropriately
126 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
127 * we just have to make the TLB as large as possible; while an oversized TLB
128 * results in minimal TLB miss rates, it also takes longer to be flushed
129 * (flushes can be _very_ frequent), and the reduced locality can also hurt
130 * performance.
131 *
132 * To achieve near-optimal performance for all kinds of workloads, we:
133 *
134 * 1. Aggressively increase the size of the TLB when the use rate of the
135 * TLB being flushed is high, since it is likely that in the near future this
136 * memory-hungry process will execute again, and its memory hungriness will
137 * probably be similar.
138 *
139 * 2. Slowly reduce the size of the TLB as the use rate declines over a
140 * reasonably large time window. The rationale is that if in such a time window
141 * we have not observed a high TLB use rate, it is likely that we won't observe
142 * it in the near future. In that case, once a time window expires we downsize
143 * the TLB to match the maximum use rate observed in the window.
144 *
145 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
146 * since in that range performance is likely near-optimal. Recall that the TLB
147 * is direct mapped, so we want the use rate to be low (or at least not too
148 * high), since otherwise we are likely to have a significant amount of
149 * conflict misses.
150 */
151 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
152 int64_t now)
153 {
154 size_t old_size = tlb_n_entries(fast);
155 size_t rate;
156 size_t new_size = old_size;
157 int64_t window_len_ms = 100;
158 int64_t window_len_ns = window_len_ms * 1000 * 1000;
159 bool window_expired = now > desc->window_begin_ns + window_len_ns;
160
161 if (desc->n_used_entries > desc->window_max_entries) {
162 desc->window_max_entries = desc->n_used_entries;
163 }
164 rate = desc->window_max_entries * 100 / old_size;
165
166 if (rate > 70) {
167 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
168 } else if (rate < 30 && window_expired) {
169 size_t ceil = pow2ceil(desc->window_max_entries);
170 size_t expected_rate = desc->window_max_entries * 100 / ceil;
171
172 /*
173 * Avoid undersizing when the max number of entries seen is just below
174 * a pow2. For instance, if max_entries == 1025, the expected use rate
175 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
176 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
177 * later. Thus, make sure that the expected use rate remains below 70%.
178 * (and since we double the size, that means the lowest rate we'd
179 * expect to get is 35%, which is still in the 30-70% range where
180 * we consider that the size is appropriate.)
181 */
182 if (expected_rate > 70) {
183 ceil *= 2;
184 }
185 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
186 }
187
188 if (new_size == old_size) {
189 if (window_expired) {
190 tlb_window_reset(desc, now, desc->n_used_entries);
191 }
192 return;
193 }
194
195 g_free(fast->table);
196 g_free(desc->fulltlb);
197
198 tlb_window_reset(desc, now, 0);
199 /* desc->n_used_entries is cleared by the caller */
200 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
201 fast->table = g_try_new(CPUTLBEntry, new_size);
202 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
203
204 /*
205 * If the allocations fail, try smaller sizes. We just freed some
206 * memory, so going back to half of new_size has a good chance of working.
207 * Increased memory pressure elsewhere in the system might cause the
208 * allocations to fail though, so we progressively reduce the allocation
209 * size, aborting if we cannot even allocate the smallest TLB we support.
210 */
211 while (fast->table == NULL || desc->fulltlb == NULL) {
212 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
213 error_report("%s: %s", __func__, strerror(errno));
214 abort();
215 }
216 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
217 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
218
219 g_free(fast->table);
220 g_free(desc->fulltlb);
221 fast->table = g_try_new(CPUTLBEntry, new_size);
222 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
223 }
224 }
225
226 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
227 {
228 desc->n_used_entries = 0;
229 desc->large_page_addr = -1;
230 desc->large_page_mask = -1;
231 desc->vindex = 0;
232 memset(fast->table, -1, sizeof_tlb(fast));
233 memset(desc->vtable, -1, sizeof(desc->vtable));
234 }
235
236 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
237 int64_t now)
238 {
239 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
240 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
241
242 tlb_mmu_resize_locked(desc, fast, now);
243 tlb_mmu_flush_locked(desc, fast);
244 }
245
246 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
247 {
248 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
249
250 tlb_window_reset(desc, now, 0);
251 desc->n_used_entries = 0;
252 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
253 fast->table = g_new(CPUTLBEntry, n_entries);
254 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
255 tlb_mmu_flush_locked(desc, fast);
256 }
257
258 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
259 {
260 env_tlb(env)->d[mmu_idx].n_used_entries++;
261 }
262
263 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
264 {
265 env_tlb(env)->d[mmu_idx].n_used_entries--;
266 }
267
268 void tlb_init(CPUState *cpu)
269 {
270 CPUArchState *env = cpu->env_ptr;
271 int64_t now = get_clock_realtime();
272 int i;
273
274 qemu_spin_init(&env_tlb(env)->c.lock);
275
276 /* All tlbs are initialized flushed. */
277 env_tlb(env)->c.dirty = 0;
278
279 for (i = 0; i < NB_MMU_MODES; i++) {
280 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
281 }
282 }
283
284 void tlb_destroy(CPUState *cpu)
285 {
286 CPUArchState *env = cpu->env_ptr;
287 int i;
288
289 qemu_spin_destroy(&env_tlb(env)->c.lock);
290 for (i = 0; i < NB_MMU_MODES; i++) {
291 CPUTLBDesc *desc = &env_tlb(env)->d[i];
292 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
293
294 g_free(fast->table);
295 g_free(desc->fulltlb);
296 }
297 }
298
299 /* flush_all_helper: run fn across all cpus
300 *
301 * If the wait flag is set then the src cpu's helper will be queued as
302 * "safe" work and the loop exited creating a synchronisation point
303 * where all queued work will be finished before execution starts
304 * again.
305 */
306 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
307 run_on_cpu_data d)
308 {
309 CPUState *cpu;
310
311 CPU_FOREACH(cpu) {
312 if (cpu != src) {
313 async_run_on_cpu(cpu, fn, d);
314 }
315 }
316 }
317
318 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
319 {
320 CPUState *cpu;
321 size_t full = 0, part = 0, elide = 0;
322
323 CPU_FOREACH(cpu) {
324 CPUArchState *env = cpu->env_ptr;
325
326 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
327 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
328 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
329 }
330 *pfull = full;
331 *ppart = part;
332 *pelide = elide;
333 }
334
335 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
336 {
337 CPUArchState *env = cpu->env_ptr;
338 uint16_t asked = data.host_int;
339 uint16_t all_dirty, work, to_clean;
340 int64_t now = get_clock_realtime();
341
342 assert_cpu_is_self(cpu);
343
344 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
345
346 qemu_spin_lock(&env_tlb(env)->c.lock);
347
348 all_dirty = env_tlb(env)->c.dirty;
349 to_clean = asked & all_dirty;
350 all_dirty &= ~to_clean;
351 env_tlb(env)->c.dirty = all_dirty;
352
353 for (work = to_clean; work != 0; work &= work - 1) {
354 int mmu_idx = ctz32(work);
355 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
356 }
357
358 qemu_spin_unlock(&env_tlb(env)->c.lock);
359
360 tcg_flush_jmp_cache(cpu);
361
362 if (to_clean == ALL_MMUIDX_BITS) {
363 qatomic_set(&env_tlb(env)->c.full_flush_count,
364 env_tlb(env)->c.full_flush_count + 1);
365 } else {
366 qatomic_set(&env_tlb(env)->c.part_flush_count,
367 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
368 if (to_clean != asked) {
369 qatomic_set(&env_tlb(env)->c.elide_flush_count,
370 env_tlb(env)->c.elide_flush_count +
371 ctpop16(asked & ~to_clean));
372 }
373 }
374 }
375
376 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
377 {
378 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
379
380 if (cpu->created && !qemu_cpu_is_self(cpu)) {
381 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
382 RUN_ON_CPU_HOST_INT(idxmap));
383 } else {
384 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
385 }
386 }
387
388 void tlb_flush(CPUState *cpu)
389 {
390 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
391 }
392
393 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
394 {
395 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
396
397 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
398
399 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
400 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
401 }
402
403 void tlb_flush_all_cpus(CPUState *src_cpu)
404 {
405 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
406 }
407
408 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
409 {
410 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
411
412 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
413
414 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
415 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
416 }
417
418 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
419 {
420 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
421 }
422
423 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
424 target_ulong page, target_ulong mask)
425 {
426 page &= mask;
427 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
428
429 return (page == (tlb_entry->addr_read & mask) ||
430 page == (tlb_addr_write(tlb_entry) & mask) ||
431 page == (tlb_entry->addr_code & mask));
432 }
433
434 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
435 target_ulong page)
436 {
437 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
438 }
439
440 /**
441 * tlb_entry_is_empty - return true if the entry is not in use
442 * @te: pointer to CPUTLBEntry
443 */
444 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
445 {
446 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
447 }
448
449 /* Called with tlb_c.lock held */
450 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
451 target_ulong page,
452 target_ulong mask)
453 {
454 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
455 memset(tlb_entry, -1, sizeof(*tlb_entry));
456 return true;
457 }
458 return false;
459 }
460
461 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
462 target_ulong page)
463 {
464 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
465 }
466
467 /* Called with tlb_c.lock held */
468 static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
469 target_ulong page,
470 target_ulong mask)
471 {
472 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
473 int k;
474
475 assert_cpu_is_self(env_cpu(env));
476 for (k = 0; k < CPU_VTLB_SIZE; k++) {
477 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
478 tlb_n_used_entries_dec(env, mmu_idx);
479 }
480 }
481 }
482
483 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
484 target_ulong page)
485 {
486 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
487 }
488
489 static void tlb_flush_page_locked(CPUArchState *env, int midx,
490 target_ulong page)
491 {
492 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
493 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
494
495 /* Check if we need to flush due to large pages. */
496 if ((page & lp_mask) == lp_addr) {
497 tlb_debug("forcing full flush midx %d ("
498 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
499 midx, lp_addr, lp_mask);
500 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
501 } else {
502 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
503 tlb_n_used_entries_dec(env, midx);
504 }
505 tlb_flush_vtlb_page_locked(env, midx, page);
506 }
507 }
508
509 /**
510 * tlb_flush_page_by_mmuidx_async_0:
511 * @cpu: cpu on which to flush
512 * @addr: page of virtual address to flush
513 * @idxmap: set of mmu_idx to flush
514 *
515 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
516 * at @addr from the tlbs indicated by @idxmap from @cpu.
517 */
518 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
519 target_ulong addr,
520 uint16_t idxmap)
521 {
522 CPUArchState *env = cpu->env_ptr;
523 int mmu_idx;
524
525 assert_cpu_is_self(cpu);
526
527 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
528
529 qemu_spin_lock(&env_tlb(env)->c.lock);
530 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
531 if ((idxmap >> mmu_idx) & 1) {
532 tlb_flush_page_locked(env, mmu_idx, addr);
533 }
534 }
535 qemu_spin_unlock(&env_tlb(env)->c.lock);
536
537 /*
538 * Discard jump cache entries for any tb which might potentially
539 * overlap the flushed page, which includes the previous.
540 */
541 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
542 tb_jmp_cache_clear_page(cpu, addr);
543 }
544
545 /**
546 * tlb_flush_page_by_mmuidx_async_1:
547 * @cpu: cpu on which to flush
548 * @data: encoded addr + idxmap
549 *
550 * Helper for tlb_flush_page_by_mmuidx and friends, called through
551 * async_run_on_cpu. The idxmap parameter is encoded in the page
552 * offset of the target_ptr field. This limits the set of mmu_idx
553 * that can be passed via this method.
554 */
555 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
556 run_on_cpu_data data)
557 {
558 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
559 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
560 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
561
562 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
563 }
564
565 typedef struct {
566 target_ulong addr;
567 uint16_t idxmap;
568 } TLBFlushPageByMMUIdxData;
569
570 /**
571 * tlb_flush_page_by_mmuidx_async_2:
572 * @cpu: cpu on which to flush
573 * @data: allocated addr + idxmap
574 *
575 * Helper for tlb_flush_page_by_mmuidx and friends, called through
576 * async_run_on_cpu. The addr+idxmap parameters are stored in a
577 * TLBFlushPageByMMUIdxData structure that has been allocated
578 * specifically for this helper. Free the structure when done.
579 */
580 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
581 run_on_cpu_data data)
582 {
583 TLBFlushPageByMMUIdxData *d = data.host_ptr;
584
585 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
586 g_free(d);
587 }
588
589 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
590 {
591 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
592
593 /* This should already be page aligned */
594 addr &= TARGET_PAGE_MASK;
595
596 if (qemu_cpu_is_self(cpu)) {
597 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
598 } else if (idxmap < TARGET_PAGE_SIZE) {
599 /*
600 * Most targets have only a few mmu_idx. In the case where
601 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
602 * allocating memory for this operation.
603 */
604 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
605 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
606 } else {
607 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
608
609 /* Otherwise allocate a structure, freed by the worker. */
610 d->addr = addr;
611 d->idxmap = idxmap;
612 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
613 RUN_ON_CPU_HOST_PTR(d));
614 }
615 }
616
617 void tlb_flush_page(CPUState *cpu, target_ulong addr)
618 {
619 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
620 }
621
622 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
623 uint16_t idxmap)
624 {
625 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
626
627 /* This should already be page aligned */
628 addr &= TARGET_PAGE_MASK;
629
630 /*
631 * Allocate memory to hold addr+idxmap only when needed.
632 * See tlb_flush_page_by_mmuidx for details.
633 */
634 if (idxmap < TARGET_PAGE_SIZE) {
635 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
636 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
637 } else {
638 CPUState *dst_cpu;
639
640 /* Allocate a separate data block for each destination cpu. */
641 CPU_FOREACH(dst_cpu) {
642 if (dst_cpu != src_cpu) {
643 TLBFlushPageByMMUIdxData *d
644 = g_new(TLBFlushPageByMMUIdxData, 1);
645
646 d->addr = addr;
647 d->idxmap = idxmap;
648 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
649 RUN_ON_CPU_HOST_PTR(d));
650 }
651 }
652 }
653
654 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
655 }
656
657 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
658 {
659 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
660 }
661
662 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
663 target_ulong addr,
664 uint16_t idxmap)
665 {
666 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
667
668 /* This should already be page aligned */
669 addr &= TARGET_PAGE_MASK;
670
671 /*
672 * Allocate memory to hold addr+idxmap only when needed.
673 * See tlb_flush_page_by_mmuidx for details.
674 */
675 if (idxmap < TARGET_PAGE_SIZE) {
676 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
677 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
678 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 } else {
681 CPUState *dst_cpu;
682 TLBFlushPageByMMUIdxData *d;
683
684 /* Allocate a separate data block for each destination cpu. */
685 CPU_FOREACH(dst_cpu) {
686 if (dst_cpu != src_cpu) {
687 d = g_new(TLBFlushPageByMMUIdxData, 1);
688 d->addr = addr;
689 d->idxmap = idxmap;
690 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
691 RUN_ON_CPU_HOST_PTR(d));
692 }
693 }
694
695 d = g_new(TLBFlushPageByMMUIdxData, 1);
696 d->addr = addr;
697 d->idxmap = idxmap;
698 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
699 RUN_ON_CPU_HOST_PTR(d));
700 }
701 }
702
703 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
704 {
705 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
706 }
707
708 static void tlb_flush_range_locked(CPUArchState *env, int midx,
709 target_ulong addr, target_ulong len,
710 unsigned bits)
711 {
712 CPUTLBDesc *d = &env_tlb(env)->d[midx];
713 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
714 target_ulong mask = MAKE_64BIT_MASK(0, bits);
715
716 /*
717 * If @bits is smaller than the tlb size, there may be multiple entries
718 * within the TLB; otherwise all addresses that match under @mask hit
719 * the same TLB entry.
720 * TODO: Perhaps allow bits to be a few bits less than the size.
721 * For now, just flush the entire TLB.
722 *
723 * If @len is larger than the tlb size, then it will take longer to
724 * test all of the entries in the TLB than it will to flush it all.
725 */
726 if (mask < f->mask || len > f->mask) {
727 tlb_debug("forcing full flush midx %d ("
728 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
729 midx, addr, mask, len);
730 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
731 return;
732 }
733
734 /*
735 * Check if we need to flush due to large pages.
736 * Because large_page_mask contains all 1's from the msb,
737 * we only need to test the end of the range.
738 */
739 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
740 tlb_debug("forcing full flush midx %d ("
741 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
742 midx, d->large_page_addr, d->large_page_mask);
743 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
744 return;
745 }
746
747 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
748 target_ulong page = addr + i;
749 CPUTLBEntry *entry = tlb_entry(env, midx, page);
750
751 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
752 tlb_n_used_entries_dec(env, midx);
753 }
754 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
755 }
756 }
757
758 typedef struct {
759 target_ulong addr;
760 target_ulong len;
761 uint16_t idxmap;
762 uint16_t bits;
763 } TLBFlushRangeData;
764
765 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
766 TLBFlushRangeData d)
767 {
768 CPUArchState *env = cpu->env_ptr;
769 int mmu_idx;
770
771 assert_cpu_is_self(cpu);
772
773 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
774 d.addr, d.bits, d.len, d.idxmap);
775
776 qemu_spin_lock(&env_tlb(env)->c.lock);
777 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
778 if ((d.idxmap >> mmu_idx) & 1) {
779 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
780 }
781 }
782 qemu_spin_unlock(&env_tlb(env)->c.lock);
783
784 /*
785 * If the length is larger than the jump cache size, then it will take
786 * longer to clear each entry individually than it will to clear it all.
787 */
788 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
789 tcg_flush_jmp_cache(cpu);
790 return;
791 }
792
793 /*
794 * Discard jump cache entries for any tb which might potentially
795 * overlap the flushed pages, which includes the previous.
796 */
797 d.addr -= TARGET_PAGE_SIZE;
798 for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
799 tb_jmp_cache_clear_page(cpu, d.addr);
800 d.addr += TARGET_PAGE_SIZE;
801 }
802 }
803
804 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
805 run_on_cpu_data data)
806 {
807 TLBFlushRangeData *d = data.host_ptr;
808 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
809 g_free(d);
810 }
811
812 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
813 target_ulong len, uint16_t idxmap,
814 unsigned bits)
815 {
816 TLBFlushRangeData d;
817
818 /*
819 * If all bits are significant, and len is small,
820 * this devolves to tlb_flush_page.
821 */
822 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
823 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
824 return;
825 }
826 /* If no page bits are significant, this devolves to tlb_flush. */
827 if (bits < TARGET_PAGE_BITS) {
828 tlb_flush_by_mmuidx(cpu, idxmap);
829 return;
830 }
831
832 /* This should already be page aligned */
833 d.addr = addr & TARGET_PAGE_MASK;
834 d.len = len;
835 d.idxmap = idxmap;
836 d.bits = bits;
837
838 if (qemu_cpu_is_self(cpu)) {
839 tlb_flush_range_by_mmuidx_async_0(cpu, d);
840 } else {
841 /* Otherwise allocate a structure, freed by the worker. */
842 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
843 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
844 RUN_ON_CPU_HOST_PTR(p));
845 }
846 }
847
848 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
849 uint16_t idxmap, unsigned bits)
850 {
851 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
852 }
853
854 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
855 target_ulong addr, target_ulong len,
856 uint16_t idxmap, unsigned bits)
857 {
858 TLBFlushRangeData d;
859 CPUState *dst_cpu;
860
861 /*
862 * If all bits are significant, and len is small,
863 * this devolves to tlb_flush_page.
864 */
865 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
866 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
867 return;
868 }
869 /* If no page bits are significant, this devolves to tlb_flush. */
870 if (bits < TARGET_PAGE_BITS) {
871 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
872 return;
873 }
874
875 /* This should already be page aligned */
876 d.addr = addr & TARGET_PAGE_MASK;
877 d.len = len;
878 d.idxmap = idxmap;
879 d.bits = bits;
880
881 /* Allocate a separate data block for each destination cpu. */
882 CPU_FOREACH(dst_cpu) {
883 if (dst_cpu != src_cpu) {
884 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
885 async_run_on_cpu(dst_cpu,
886 tlb_flush_range_by_mmuidx_async_1,
887 RUN_ON_CPU_HOST_PTR(p));
888 }
889 }
890
891 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
892 }
893
894 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
895 target_ulong addr,
896 uint16_t idxmap, unsigned bits)
897 {
898 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
899 idxmap, bits);
900 }
901
902 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
903 target_ulong addr,
904 target_ulong len,
905 uint16_t idxmap,
906 unsigned bits)
907 {
908 TLBFlushRangeData d, *p;
909 CPUState *dst_cpu;
910
911 /*
912 * If all bits are significant, and len is small,
913 * this devolves to tlb_flush_page.
914 */
915 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
916 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
917 return;
918 }
919 /* If no page bits are significant, this devolves to tlb_flush. */
920 if (bits < TARGET_PAGE_BITS) {
921 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
922 return;
923 }
924
925 /* This should already be page aligned */
926 d.addr = addr & TARGET_PAGE_MASK;
927 d.len = len;
928 d.idxmap = idxmap;
929 d.bits = bits;
930
931 /* Allocate a separate data block for each destination cpu. */
932 CPU_FOREACH(dst_cpu) {
933 if (dst_cpu != src_cpu) {
934 p = g_memdup(&d, sizeof(d));
935 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
936 RUN_ON_CPU_HOST_PTR(p));
937 }
938 }
939
940 p = g_memdup(&d, sizeof(d));
941 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
942 RUN_ON_CPU_HOST_PTR(p));
943 }
944
945 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
946 target_ulong addr,
947 uint16_t idxmap,
948 unsigned bits)
949 {
950 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
951 idxmap, bits);
952 }
953
954 /* update the TLBs so that writes to code in the virtual page 'addr'
955 can be detected */
956 void tlb_protect_code(ram_addr_t ram_addr)
957 {
958 cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
959 TARGET_PAGE_SIZE,
960 DIRTY_MEMORY_CODE);
961 }
962
963 /* update the TLB so that writes in physical page 'phys_addr' are no longer
964 tested for self modifying code */
965 void tlb_unprotect_code(ram_addr_t ram_addr)
966 {
967 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
968 }
969
970
971 /*
972 * Dirty write flag handling
973 *
974 * When the TCG code writes to a location it looks up the address in
975 * the TLB and uses that data to compute the final address. If any of
976 * the lower bits of the address are set then the slow path is forced.
977 * There are a number of reasons to do this but for normal RAM the
978 * most usual is detecting writes to code regions which may invalidate
979 * generated code.
980 *
981 * Other vCPUs might be reading their TLBs during guest execution, so we update
982 * te->addr_write with qatomic_set. We don't need to worry about this for
983 * oversized guests as MTTCG is disabled for them.
984 *
985 * Called with tlb_c.lock held.
986 */
987 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
988 uintptr_t start, uintptr_t length)
989 {
990 uintptr_t addr = tlb_entry->addr_write;
991
992 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
993 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
994 addr &= TARGET_PAGE_MASK;
995 addr += tlb_entry->addend;
996 if ((addr - start) < length) {
997 #if TCG_OVERSIZED_GUEST
998 tlb_entry->addr_write |= TLB_NOTDIRTY;
999 #else
1000 qatomic_set(&tlb_entry->addr_write,
1001 tlb_entry->addr_write | TLB_NOTDIRTY);
1002 #endif
1003 }
1004 }
1005 }
1006
1007 /*
1008 * Called with tlb_c.lock held.
1009 * Called only from the vCPU context, i.e. the TLB's owner thread.
1010 */
1011 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1012 {
1013 *d = *s;
1014 }
1015
1016 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1017 * the target vCPU).
1018 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1019 * thing actually updated is the target TLB entry ->addr_write flags.
1020 */
1021 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1022 {
1023 CPUArchState *env;
1024
1025 int mmu_idx;
1026
1027 env = cpu->env_ptr;
1028 qemu_spin_lock(&env_tlb(env)->c.lock);
1029 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1030 unsigned int i;
1031 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1032
1033 for (i = 0; i < n; i++) {
1034 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1035 start1, length);
1036 }
1037
1038 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1039 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1040 start1, length);
1041 }
1042 }
1043 qemu_spin_unlock(&env_tlb(env)->c.lock);
1044 }
1045
1046 /* Called with tlb_c.lock held */
1047 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1048 target_ulong vaddr)
1049 {
1050 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1051 tlb_entry->addr_write = vaddr;
1052 }
1053 }
1054
1055 /* update the TLB corresponding to virtual page vaddr
1056 so that it is no longer dirty */
1057 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1058 {
1059 CPUArchState *env = cpu->env_ptr;
1060 int mmu_idx;
1061
1062 assert_cpu_is_self(cpu);
1063
1064 vaddr &= TARGET_PAGE_MASK;
1065 qemu_spin_lock(&env_tlb(env)->c.lock);
1066 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1067 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1068 }
1069
1070 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1071 int k;
1072 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1073 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1074 }
1075 }
1076 qemu_spin_unlock(&env_tlb(env)->c.lock);
1077 }
1078
1079 /* Our TLB does not support large pages, so remember the area covered by
1080 large pages and trigger a full TLB flush if these are invalidated. */
1081 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1082 target_ulong vaddr, target_ulong size)
1083 {
1084 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1085 target_ulong lp_mask = ~(size - 1);
1086
1087 if (lp_addr == (target_ulong)-1) {
1088 /* No previous large page. */
1089 lp_addr = vaddr;
1090 } else {
1091 /* Extend the existing region to include the new page.
1092 This is a compromise between unnecessary flushes and
1093 the cost of maintaining a full variable size TLB. */
1094 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1095 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1096 lp_mask <<= 1;
1097 }
1098 }
1099 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1100 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1101 }
1102
1103 /*
1104 * Add a new TLB entry. At most one entry for a given virtual address
1105 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1106 * supplied size is only used by tlb_flush_page.
1107 *
1108 * Called from TCG-generated code, which is under an RCU read-side
1109 * critical section.
1110 */
1111 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1112 target_ulong vaddr, CPUTLBEntryFull *full)
1113 {
1114 CPUArchState *env = cpu->env_ptr;
1115 CPUTLB *tlb = env_tlb(env);
1116 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1117 MemoryRegionSection *section;
1118 unsigned int index;
1119 target_ulong address;
1120 target_ulong write_address;
1121 uintptr_t addend;
1122 CPUTLBEntry *te, tn;
1123 hwaddr iotlb, xlat, sz, paddr_page;
1124 target_ulong vaddr_page;
1125 int asidx, wp_flags, prot;
1126 bool is_ram, is_romd;
1127
1128 assert_cpu_is_self(cpu);
1129
1130 if (full->lg_page_size <= TARGET_PAGE_BITS) {
1131 sz = TARGET_PAGE_SIZE;
1132 } else {
1133 sz = (hwaddr)1 << full->lg_page_size;
1134 tlb_add_large_page(env, mmu_idx, vaddr, sz);
1135 }
1136 vaddr_page = vaddr & TARGET_PAGE_MASK;
1137 paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1138
1139 prot = full->prot;
1140 asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1141 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1142 &xlat, &sz, full->attrs, &prot);
1143 assert(sz >= TARGET_PAGE_SIZE);
1144
1145 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1146 " prot=%x idx=%d\n",
1147 vaddr, full->phys_addr, prot, mmu_idx);
1148
1149 address = vaddr_page;
1150 if (full->lg_page_size < TARGET_PAGE_BITS) {
1151 /* Repeat the MMU check and TLB fill on every access. */
1152 address |= TLB_INVALID_MASK;
1153 }
1154 if (full->attrs.byte_swap) {
1155 address |= TLB_BSWAP;
1156 }
1157
1158 is_ram = memory_region_is_ram(section->mr);
1159 is_romd = memory_region_is_romd(section->mr);
1160
1161 if (is_ram || is_romd) {
1162 /* RAM and ROMD both have associated host memory. */
1163 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1164 } else {
1165 /* I/O does not; force the host address to NULL. */
1166 addend = 0;
1167 }
1168
1169 write_address = address;
1170 if (is_ram) {
1171 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1172 /*
1173 * Computing is_clean is expensive; avoid all that unless
1174 * the page is actually writable.
1175 */
1176 if (prot & PAGE_WRITE) {
1177 if (section->readonly) {
1178 write_address |= TLB_DISCARD_WRITE;
1179 } else if (cpu_physical_memory_is_clean(iotlb)) {
1180 write_address |= TLB_NOTDIRTY;
1181 }
1182 }
1183 } else {
1184 /* I/O or ROMD */
1185 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1186 /*
1187 * Writes to romd devices must go through MMIO to enable write.
1188 * Reads to romd devices go through the ram_ptr found above,
1189 * but of course reads to I/O must go through MMIO.
1190 */
1191 write_address |= TLB_MMIO;
1192 if (!is_romd) {
1193 address = write_address;
1194 }
1195 }
1196
1197 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1198 TARGET_PAGE_SIZE);
1199
1200 index = tlb_index(env, mmu_idx, vaddr_page);
1201 te = tlb_entry(env, mmu_idx, vaddr_page);
1202
1203 /*
1204 * Hold the TLB lock for the rest of the function. We could acquire/release
1205 * the lock several times in the function, but it is faster to amortize the
1206 * acquisition cost by acquiring it just once. Note that this leads to
1207 * a longer critical section, but this is not a concern since the TLB lock
1208 * is unlikely to be contended.
1209 */
1210 qemu_spin_lock(&tlb->c.lock);
1211
1212 /* Note that the tlb is no longer clean. */
1213 tlb->c.dirty |= 1 << mmu_idx;
1214
1215 /* Make sure there's no cached translation for the new page. */
1216 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1217
1218 /*
1219 * Only evict the old entry to the victim tlb if it's for a
1220 * different page; otherwise just overwrite the stale data.
1221 */
1222 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1223 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1224 CPUTLBEntry *tv = &desc->vtable[vidx];
1225
1226 /* Evict the old entry into the victim tlb. */
1227 copy_tlb_helper_locked(tv, te);
1228 desc->vfulltlb[vidx] = desc->fulltlb[index];
1229 tlb_n_used_entries_dec(env, mmu_idx);
1230 }
1231
1232 /* refill the tlb */
1233 /*
1234 * At this point iotlb contains a physical section number in the lower
1235 * TARGET_PAGE_BITS, and either
1236 * + the ram_addr_t of the page base of the target RAM (RAM)
1237 * + the offset within section->mr of the page base (I/O, ROMD)
1238 * We subtract the vaddr_page (which is page aligned and thus won't
1239 * disturb the low bits) to give an offset which can be added to the
1240 * (non-page-aligned) vaddr of the eventual memory access to get
1241 * the MemoryRegion offset for the access. Note that the vaddr we
1242 * subtract here is that of the page base, and not the same as the
1243 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1244 */
1245 desc->fulltlb[index] = *full;
1246 desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
1247 desc->fulltlb[index].phys_addr = paddr_page;
1248 desc->fulltlb[index].prot = prot;
1249
1250 /* Now calculate the new entry */
1251 tn.addend = addend - vaddr_page;
1252 if (prot & PAGE_READ) {
1253 tn.addr_read = address;
1254 if (wp_flags & BP_MEM_READ) {
1255 tn.addr_read |= TLB_WATCHPOINT;
1256 }
1257 } else {
1258 tn.addr_read = -1;
1259 }
1260
1261 if (prot & PAGE_EXEC) {
1262 tn.addr_code = address;
1263 } else {
1264 tn.addr_code = -1;
1265 }
1266
1267 tn.addr_write = -1;
1268 if (prot & PAGE_WRITE) {
1269 tn.addr_write = write_address;
1270 if (prot & PAGE_WRITE_INV) {
1271 tn.addr_write |= TLB_INVALID_MASK;
1272 }
1273 if (wp_flags & BP_MEM_WRITE) {
1274 tn.addr_write |= TLB_WATCHPOINT;
1275 }
1276 }
1277
1278 copy_tlb_helper_locked(te, &tn);
1279 tlb_n_used_entries_inc(env, mmu_idx);
1280 qemu_spin_unlock(&tlb->c.lock);
1281 }
1282
1283 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1284 hwaddr paddr, MemTxAttrs attrs, int prot,
1285 int mmu_idx, target_ulong size)
1286 {
1287 CPUTLBEntryFull full = {
1288 .phys_addr = paddr,
1289 .attrs = attrs,
1290 .prot = prot,
1291 .lg_page_size = ctz64(size)
1292 };
1293
1294 assert(is_power_of_2(size));
1295 tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
1296 }
1297
1298 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1299 hwaddr paddr, int prot,
1300 int mmu_idx, target_ulong size)
1301 {
1302 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1303 prot, mmu_idx, size);
1304 }
1305
1306 /*
1307 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1308 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1309 * be discarded and looked up again (e.g. via tlb_entry()).
1310 */
1311 static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1312 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1313 {
1314 bool ok;
1315
1316 /*
1317 * This is not a probe, so only valid return is success; failure
1318 * should result in exception + longjmp to the cpu loop.
1319 */
1320 ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1321 access_type, mmu_idx, false, retaddr);
1322 assert(ok);
1323 }
1324
1325 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1326 MMUAccessType access_type,
1327 int mmu_idx, uintptr_t retaddr)
1328 {
1329 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1330 mmu_idx, retaddr);
1331 }
1332
1333 static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1334 vaddr addr, unsigned size,
1335 MMUAccessType access_type,
1336 int mmu_idx, MemTxAttrs attrs,
1337 MemTxResult response,
1338 uintptr_t retaddr)
1339 {
1340 CPUClass *cc = CPU_GET_CLASS(cpu);
1341
1342 if (!cpu->ignore_memory_transaction_failures &&
1343 cc->tcg_ops->do_transaction_failed) {
1344 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1345 access_type, mmu_idx, attrs,
1346 response, retaddr);
1347 }
1348 }
1349
1350 static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1351 int mmu_idx, target_ulong addr, uintptr_t retaddr,
1352 MMUAccessType access_type, MemOp op)
1353 {
1354 CPUState *cpu = env_cpu(env);
1355 hwaddr mr_offset;
1356 MemoryRegionSection *section;
1357 MemoryRegion *mr;
1358 uint64_t val;
1359 bool locked = false;
1360 MemTxResult r;
1361
1362 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1363 mr = section->mr;
1364 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1365 cpu->mem_io_pc = retaddr;
1366 if (!cpu->can_do_io) {
1367 cpu_io_recompile(cpu, retaddr);
1368 }
1369
1370 if (!qemu_mutex_iothread_locked()) {
1371 qemu_mutex_lock_iothread();
1372 locked = true;
1373 }
1374 r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
1375 if (r != MEMTX_OK) {
1376 hwaddr physaddr = mr_offset +
1377 section->offset_within_address_space -
1378 section->offset_within_region;
1379
1380 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1381 mmu_idx, full->attrs, r, retaddr);
1382 }
1383 if (locked) {
1384 qemu_mutex_unlock_iothread();
1385 }
1386
1387 return val;
1388 }
1389
1390 /*
1391 * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
1392 * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1393 * because of the side effect of io_writex changing memory layout.
1394 */
1395 static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
1396 hwaddr mr_offset)
1397 {
1398 #ifdef CONFIG_PLUGIN
1399 SavedIOTLB *saved = &cs->saved_iotlb;
1400 saved->section = section;
1401 saved->mr_offset = mr_offset;
1402 #endif
1403 }
1404
1405 static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1406 int mmu_idx, uint64_t val, target_ulong addr,
1407 uintptr_t retaddr, MemOp op)
1408 {
1409 CPUState *cpu = env_cpu(env);
1410 hwaddr mr_offset;
1411 MemoryRegionSection *section;
1412 MemoryRegion *mr;
1413 bool locked = false;
1414 MemTxResult r;
1415
1416 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1417 mr = section->mr;
1418 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1419 if (!cpu->can_do_io) {
1420 cpu_io_recompile(cpu, retaddr);
1421 }
1422 cpu->mem_io_pc = retaddr;
1423
1424 /*
1425 * The memory_region_dispatch may trigger a flush/resize
1426 * so for plugins we save the iotlb_data just in case.
1427 */
1428 save_iotlb_data(cpu, section, mr_offset);
1429
1430 if (!qemu_mutex_iothread_locked()) {
1431 qemu_mutex_lock_iothread();
1432 locked = true;
1433 }
1434 r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
1435 if (r != MEMTX_OK) {
1436 hwaddr physaddr = mr_offset +
1437 section->offset_within_address_space -
1438 section->offset_within_region;
1439
1440 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1441 MMU_DATA_STORE, mmu_idx, full->attrs, r,
1442 retaddr);
1443 }
1444 if (locked) {
1445 qemu_mutex_unlock_iothread();
1446 }
1447 }
1448
1449 static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1450 {
1451 #if TCG_OVERSIZED_GUEST
1452 return *(target_ulong *)((uintptr_t)entry + ofs);
1453 #else
1454 /* ofs might correspond to .addr_write, so use qatomic_read */
1455 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1456 #endif
1457 }
1458
1459 /* Return true if ADDR is present in the victim tlb, and has been copied
1460 back to the main tlb. */
1461 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1462 size_t elt_ofs, target_ulong page)
1463 {
1464 size_t vidx;
1465
1466 assert_cpu_is_self(env_cpu(env));
1467 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1468 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1469 target_ulong cmp;
1470
1471 /* elt_ofs might correspond to .addr_write, so use qatomic_read */
1472 #if TCG_OVERSIZED_GUEST
1473 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1474 #else
1475 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1476 #endif
1477
1478 if (cmp == page) {
1479 /* Found entry in victim tlb, swap tlb and iotlb. */
1480 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1481
1482 qemu_spin_lock(&env_tlb(env)->c.lock);
1483 copy_tlb_helper_locked(&tmptlb, tlb);
1484 copy_tlb_helper_locked(tlb, vtlb);
1485 copy_tlb_helper_locked(vtlb, &tmptlb);
1486 qemu_spin_unlock(&env_tlb(env)->c.lock);
1487
1488 CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1489 CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
1490 CPUTLBEntryFull tmpf;
1491 tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1492 return true;
1493 }
1494 }
1495 return false;
1496 }
1497
1498 /* Macro to call the above, with local variables from the use context. */
1499 #define VICTIM_TLB_HIT(TY, ADDR) \
1500 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1501 (ADDR) & TARGET_PAGE_MASK)
1502
1503 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1504 CPUTLBEntryFull *full, uintptr_t retaddr)
1505 {
1506 ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1507
1508 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1509
1510 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1511 struct page_collection *pages
1512 = page_collection_lock(ram_addr, ram_addr + size);
1513 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1514 page_collection_unlock(pages);
1515 }
1516
1517 /*
1518 * Set both VGA and migration bits for simplicity and to remove
1519 * the notdirty callback faster.
1520 */
1521 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1522
1523 /* We remove the notdirty callback only if the code has been flushed. */
1524 if (!cpu_physical_memory_is_clean(ram_addr)) {
1525 trace_memory_notdirty_set_dirty(mem_vaddr);
1526 tlb_set_dirty(cpu, mem_vaddr);
1527 }
1528 }
1529
1530 static int probe_access_internal(CPUArchState *env, target_ulong addr,
1531 int fault_size, MMUAccessType access_type,
1532 int mmu_idx, bool nonfault,
1533 void **phost, CPUTLBEntryFull **pfull,
1534 uintptr_t retaddr)
1535 {
1536 uintptr_t index = tlb_index(env, mmu_idx, addr);
1537 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1538 target_ulong tlb_addr, page_addr;
1539 size_t elt_ofs;
1540 int flags;
1541
1542 switch (access_type) {
1543 case MMU_DATA_LOAD:
1544 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1545 break;
1546 case MMU_DATA_STORE:
1547 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1548 break;
1549 case MMU_INST_FETCH:
1550 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1551 break;
1552 default:
1553 g_assert_not_reached();
1554 }
1555 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1556
1557 flags = TLB_FLAGS_MASK;
1558 page_addr = addr & TARGET_PAGE_MASK;
1559 if (!tlb_hit_page(tlb_addr, page_addr)) {
1560 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1561 CPUState *cs = env_cpu(env);
1562
1563 if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1564 mmu_idx, nonfault, retaddr)) {
1565 /* Non-faulting page table read failed. */
1566 *phost = NULL;
1567 *pfull = NULL;
1568 return TLB_INVALID_MASK;
1569 }
1570
1571 /* TLB resize via tlb_fill may have moved the entry. */
1572 index = tlb_index(env, mmu_idx, addr);
1573 entry = tlb_entry(env, mmu_idx, addr);
1574
1575 /*
1576 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1577 * to force the next access through tlb_fill. We've just
1578 * called tlb_fill, so we know that this entry *is* valid.
1579 */
1580 flags &= ~TLB_INVALID_MASK;
1581 }
1582 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1583 }
1584 flags &= tlb_addr;
1585
1586 *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1587
1588 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1589 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1590 *phost = NULL;
1591 return TLB_MMIO;
1592 }
1593
1594 /* Everything else is RAM. */
1595 *phost = (void *)((uintptr_t)addr + entry->addend);
1596 return flags;
1597 }
1598
1599 int probe_access_full(CPUArchState *env, target_ulong addr,
1600 MMUAccessType access_type, int mmu_idx,
1601 bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1602 uintptr_t retaddr)
1603 {
1604 int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1605 nonfault, phost, pfull, retaddr);
1606
1607 /* Handle clean RAM pages. */
1608 if (unlikely(flags & TLB_NOTDIRTY)) {
1609 notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1610 flags &= ~TLB_NOTDIRTY;
1611 }
1612
1613 return flags;
1614 }
1615
1616 int probe_access_flags(CPUArchState *env, target_ulong addr,
1617 MMUAccessType access_type, int mmu_idx,
1618 bool nonfault, void **phost, uintptr_t retaddr)
1619 {
1620 CPUTLBEntryFull *full;
1621
1622 return probe_access_full(env, addr, access_type, mmu_idx,
1623 nonfault, phost, &full, retaddr);
1624 }
1625
1626 void *probe_access(CPUArchState *env, target_ulong addr, int size,
1627 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1628 {
1629 CPUTLBEntryFull *full;
1630 void *host;
1631 int flags;
1632
1633 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1634
1635 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1636 false, &host, &full, retaddr);
1637
1638 /* Per the interface, size == 0 merely faults the access. */
1639 if (size == 0) {
1640 return NULL;
1641 }
1642
1643 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1644 /* Handle watchpoints. */
1645 if (flags & TLB_WATCHPOINT) {
1646 int wp_access = (access_type == MMU_DATA_STORE
1647 ? BP_MEM_WRITE : BP_MEM_READ);
1648 cpu_check_watchpoint(env_cpu(env), addr, size,
1649 full->attrs, wp_access, retaddr);
1650 }
1651
1652 /* Handle clean RAM pages. */
1653 if (flags & TLB_NOTDIRTY) {
1654 notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1655 }
1656 }
1657
1658 return host;
1659 }
1660
1661 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1662 MMUAccessType access_type, int mmu_idx)
1663 {
1664 CPUTLBEntryFull *full;
1665 void *host;
1666 int flags;
1667
1668 flags = probe_access_internal(env, addr, 0, access_type,
1669 mmu_idx, true, &host, &full, 0);
1670
1671 /* No combination of flags are expected by the caller. */
1672 return flags ? NULL : host;
1673 }
1674
1675 /*
1676 * Return a ram_addr_t for the virtual address for execution.
1677 *
1678 * Return -1 if we can't translate and execute from an entire page
1679 * of RAM. This will force us to execute by loading and translating
1680 * one insn at a time, without caching.
1681 *
1682 * NOTE: This function will trigger an exception if the page is
1683 * not executable.
1684 */
1685 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1686 void **hostp)
1687 {
1688 CPUTLBEntryFull *full;
1689 void *p;
1690
1691 (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1692 cpu_mmu_index(env, true), false, &p, &full, 0);
1693 if (p == NULL) {
1694 return -1;
1695 }
1696 if (hostp) {
1697 *hostp = p;
1698 }
1699 return qemu_ram_addr_from_host_nofail(p);
1700 }
1701
1702 #ifdef CONFIG_PLUGIN
1703 /*
1704 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1705 * This should be a hot path as we will have just looked this path up
1706 * in the softmmu lookup code (or helper). We don't handle re-fills or
1707 * checking the victim table. This is purely informational.
1708 *
1709 * This almost never fails as the memory access being instrumented
1710 * should have just filled the TLB. The one corner case is io_writex
1711 * which can cause TLB flushes and potential resizing of the TLBs
1712 * losing the information we need. In those cases we need to recover
1713 * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1714 * from the same thread (which a mem callback will be) this is safe.
1715 */
1716
1717 bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1718 bool is_store, struct qemu_plugin_hwaddr *data)
1719 {
1720 CPUArchState *env = cpu->env_ptr;
1721 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1722 uintptr_t index = tlb_index(env, mmu_idx, addr);
1723 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1724
1725 if (likely(tlb_hit(tlb_addr, addr))) {
1726 /* We must have an iotlb entry for MMIO */
1727 if (tlb_addr & TLB_MMIO) {
1728 CPUTLBEntryFull *full;
1729 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1730 data->is_io = true;
1731 data->v.io.section =
1732 iotlb_to_section(cpu, full->xlat_section, full->attrs);
1733 data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1734 } else {
1735 data->is_io = false;
1736 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1737 }
1738 return true;
1739 } else {
1740 SavedIOTLB *saved = &cpu->saved_iotlb;
1741 data->is_io = true;
1742 data->v.io.section = saved->section;
1743 data->v.io.offset = saved->mr_offset;
1744 return true;
1745 }
1746 }
1747
1748 #endif
1749
1750 /*
1751 * Probe for an atomic operation. Do not allow unaligned operations,
1752 * or io operations to proceed. Return the host address.
1753 *
1754 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1755 */
1756 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1757 MemOpIdx oi, int size, int prot,
1758 uintptr_t retaddr)
1759 {
1760 uintptr_t mmu_idx = get_mmuidx(oi);
1761 MemOp mop = get_memop(oi);
1762 int a_bits = get_alignment_bits(mop);
1763 uintptr_t index;
1764 CPUTLBEntry *tlbe;
1765 target_ulong tlb_addr;
1766 void *hostaddr;
1767
1768 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1769
1770 /* Adjust the given return address. */
1771 retaddr -= GETPC_ADJ;
1772
1773 /* Enforce guest required alignment. */
1774 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1775 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1776 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1777 mmu_idx, retaddr);
1778 }
1779
1780 /* Enforce qemu required alignment. */
1781 if (unlikely(addr & (size - 1))) {
1782 /* We get here if guest alignment was not requested,
1783 or was not enforced by cpu_unaligned_access above.
1784 We might widen the access and emulate, but for now
1785 mark an exception and exit the cpu loop. */
1786 goto stop_the_world;
1787 }
1788
1789 index = tlb_index(env, mmu_idx, addr);
1790 tlbe = tlb_entry(env, mmu_idx, addr);
1791
1792 /* Check TLB entry and enforce page permissions. */
1793 if (prot & PAGE_WRITE) {
1794 tlb_addr = tlb_addr_write(tlbe);
1795 if (!tlb_hit(tlb_addr, addr)) {
1796 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1797 tlb_fill(env_cpu(env), addr, size,
1798 MMU_DATA_STORE, mmu_idx, retaddr);
1799 index = tlb_index(env, mmu_idx, addr);
1800 tlbe = tlb_entry(env, mmu_idx, addr);
1801 }
1802 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1803 }
1804
1805 /* Let the guest notice RMW on a write-only page. */
1806 if ((prot & PAGE_READ) &&
1807 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1808 tlb_fill(env_cpu(env), addr, size,
1809 MMU_DATA_LOAD, mmu_idx, retaddr);
1810 /*
1811 * Since we don't support reads and writes to different addresses,
1812 * and we do have the proper page loaded for write, this shouldn't
1813 * ever return. But just in case, handle via stop-the-world.
1814 */
1815 goto stop_the_world;
1816 }
1817 } else /* if (prot & PAGE_READ) */ {
1818 tlb_addr = tlbe->addr_read;
1819 if (!tlb_hit(tlb_addr, addr)) {
1820 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1821 tlb_fill(env_cpu(env), addr, size,
1822 MMU_DATA_LOAD, mmu_idx, retaddr);
1823 index = tlb_index(env, mmu_idx, addr);
1824 tlbe = tlb_entry(env, mmu_idx, addr);
1825 }
1826 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
1827 }
1828 }
1829
1830 /* Notice an IO access or a needs-MMU-lookup access */
1831 if (unlikely(tlb_addr & TLB_MMIO)) {
1832 /* There's really nothing that can be done to
1833 support this apart from stop-the-world. */
1834 goto stop_the_world;
1835 }
1836
1837 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1838
1839 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1840 notdirty_write(env_cpu(env), addr, size,
1841 &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
1842 }
1843
1844 return hostaddr;
1845
1846 stop_the_world:
1847 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1848 }
1849
1850 /*
1851 * Verify that we have passed the correct MemOp to the correct function.
1852 *
1853 * In the case of the helper_*_mmu functions, we will have done this by
1854 * using the MemOp to look up the helper during code generation.
1855 *
1856 * In the case of the cpu_*_mmu functions, this is up to the caller.
1857 * We could present one function to target code, and dispatch based on
1858 * the MemOp, but so far we have worked hard to avoid an indirect function
1859 * call along the memory path.
1860 */
1861 static void validate_memop(MemOpIdx oi, MemOp expected)
1862 {
1863 #ifdef CONFIG_DEBUG_TCG
1864 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1865 assert(have == expected);
1866 #endif
1867 }
1868
1869 /*
1870 * Load Helpers
1871 *
1872 * We support two different access types. SOFTMMU_CODE_ACCESS is
1873 * specifically for reading instructions from system memory. It is
1874 * called by the translation loop and in some helpers where the code
1875 * is disassembled. It shouldn't be called directly by guest code.
1876 */
1877
1878 typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1879 MemOpIdx oi, uintptr_t retaddr);
1880
1881 static inline uint64_t QEMU_ALWAYS_INLINE
1882 load_memop(const void *haddr, MemOp op)
1883 {
1884 switch (op) {
1885 case MO_UB:
1886 return ldub_p(haddr);
1887 case MO_BEUW:
1888 return lduw_be_p(haddr);
1889 case MO_LEUW:
1890 return lduw_le_p(haddr);
1891 case MO_BEUL:
1892 return (uint32_t)ldl_be_p(haddr);
1893 case MO_LEUL:
1894 return (uint32_t)ldl_le_p(haddr);
1895 case MO_BEUQ:
1896 return ldq_be_p(haddr);
1897 case MO_LEUQ:
1898 return ldq_le_p(haddr);
1899 default:
1900 qemu_build_not_reached();
1901 }
1902 }
1903
1904 static inline uint64_t QEMU_ALWAYS_INLINE
1905 load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1906 uintptr_t retaddr, MemOp op, bool code_read,
1907 FullLoadHelper *full_load)
1908 {
1909 const size_t tlb_off = code_read ?
1910 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1911 const MMUAccessType access_type =
1912 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1913 const unsigned a_bits = get_alignment_bits(get_memop(oi));
1914 const size_t size = memop_size(op);
1915 uintptr_t mmu_idx = get_mmuidx(oi);
1916 uintptr_t index;
1917 CPUTLBEntry *entry;
1918 target_ulong tlb_addr;
1919 void *haddr;
1920 uint64_t res;
1921
1922 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1923
1924 /* Handle CPU specific unaligned behaviour */
1925 if (addr & ((1 << a_bits) - 1)) {
1926 cpu_unaligned_access(env_cpu(env), addr, access_type,
1927 mmu_idx, retaddr);
1928 }
1929
1930 index = tlb_index(env, mmu_idx, addr);
1931 entry = tlb_entry(env, mmu_idx, addr);
1932 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1933
1934 /* If the TLB entry is for a different page, reload and try again. */
1935 if (!tlb_hit(tlb_addr, addr)) {
1936 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1937 addr & TARGET_PAGE_MASK)) {
1938 tlb_fill(env_cpu(env), addr, size,
1939 access_type, mmu_idx, retaddr);
1940 index = tlb_index(env, mmu_idx, addr);
1941 entry = tlb_entry(env, mmu_idx, addr);
1942 }
1943 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1944 tlb_addr &= ~TLB_INVALID_MASK;
1945 }
1946
1947 /* Handle anything that isn't just a straight memory access. */
1948 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1949 CPUTLBEntryFull *full;
1950 bool need_swap;
1951
1952 /* For anything that is unaligned, recurse through full_load. */
1953 if ((addr & (size - 1)) != 0) {
1954 goto do_unaligned_access;
1955 }
1956
1957 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1958
1959 /* Handle watchpoints. */
1960 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1961 /* On watchpoint hit, this will longjmp out. */
1962 cpu_check_watchpoint(env_cpu(env), addr, size,
1963 full->attrs, BP_MEM_READ, retaddr);
1964 }
1965
1966 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1967
1968 /* Handle I/O access. */
1969 if (likely(tlb_addr & TLB_MMIO)) {
1970 return io_readx(env, full, mmu_idx, addr, retaddr,
1971 access_type, op ^ (need_swap * MO_BSWAP));
1972 }
1973
1974 haddr = (void *)((uintptr_t)addr + entry->addend);
1975
1976 /*
1977 * Keep these two load_memop separate to ensure that the compiler
1978 * is able to fold the entire function to a single instruction.
1979 * There is a build-time assert inside to remind you of this. ;-)
1980 */
1981 if (unlikely(need_swap)) {
1982 return load_memop(haddr, op ^ MO_BSWAP);
1983 }
1984 return load_memop(haddr, op);
1985 }
1986
1987 /* Handle slow unaligned access (it spans two pages or IO). */
1988 if (size > 1
1989 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1990 >= TARGET_PAGE_SIZE)) {
1991 target_ulong addr1, addr2;
1992 uint64_t r1, r2;
1993 unsigned shift;
1994 do_unaligned_access:
1995 addr1 = addr & ~((target_ulong)size - 1);
1996 addr2 = addr1 + size;
1997 r1 = full_load(env, addr1, oi, retaddr);
1998 r2 = full_load(env, addr2, oi, retaddr);
1999 shift = (addr & (size - 1)) * 8;
2000
2001 if (memop_big_endian(op)) {
2002 /* Big-endian combine. */
2003 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2004 } else {
2005 /* Little-endian combine. */
2006 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2007 }
2008 return res & MAKE_64BIT_MASK(0, size * 8);
2009 }
2010
2011 haddr = (void *)((uintptr_t)addr + entry->addend);
2012 return load_memop(haddr, op);
2013 }
2014
2015 /*
2016 * For the benefit of TCG generated code, we want to avoid the
2017 * complication of ABI-specific return type promotion and always
2018 * return a value extended to the register size of the host. This is
2019 * tcg_target_long, except in the case of a 32-bit host and 64-bit
2020 * data, and for that we always have uint64_t.
2021 *
2022 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2023 */
2024
2025 static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
2026 MemOpIdx oi, uintptr_t retaddr)
2027 {
2028 validate_memop(oi, MO_UB);
2029 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2030 }
2031
2032 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
2033 MemOpIdx oi, uintptr_t retaddr)
2034 {
2035 return full_ldub_mmu(env, addr, oi, retaddr);
2036 }
2037
2038 static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2039 MemOpIdx oi, uintptr_t retaddr)
2040 {
2041 validate_memop(oi, MO_LEUW);
2042 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2043 full_le_lduw_mmu);
2044 }
2045
2046 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2047 MemOpIdx oi, uintptr_t retaddr)
2048 {
2049 return full_le_lduw_mmu(env, addr, oi, retaddr);
2050 }
2051
2052 static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2053 MemOpIdx oi, uintptr_t retaddr)
2054 {
2055 validate_memop(oi, MO_BEUW);
2056 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2057 full_be_lduw_mmu);
2058 }
2059
2060 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2061 MemOpIdx oi, uintptr_t retaddr)
2062 {
2063 return full_be_lduw_mmu(env, addr, oi, retaddr);
2064 }
2065
2066 static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2067 MemOpIdx oi, uintptr_t retaddr)
2068 {
2069 validate_memop(oi, MO_LEUL);
2070 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2071 full_le_ldul_mmu);
2072 }
2073
2074 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2075 MemOpIdx oi, uintptr_t retaddr)
2076 {
2077 return full_le_ldul_mmu(env, addr, oi, retaddr);
2078 }
2079
2080 static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2081 MemOpIdx oi, uintptr_t retaddr)
2082 {
2083 validate_memop(oi, MO_BEUL);
2084 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2085 full_be_ldul_mmu);
2086 }
2087
2088 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2089 MemOpIdx oi, uintptr_t retaddr)
2090 {
2091 return full_be_ldul_mmu(env, addr, oi, retaddr);
2092 }
2093
2094 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2095 MemOpIdx oi, uintptr_t retaddr)
2096 {
2097 validate_memop(oi, MO_LEUQ);
2098 return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
2099 helper_le_ldq_mmu);
2100 }
2101
2102 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2103 MemOpIdx oi, uintptr_t retaddr)
2104 {
2105 validate_memop(oi, MO_BEUQ);
2106 return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
2107 helper_be_ldq_mmu);
2108 }
2109
2110 /*
2111 * Provide signed versions of the load routines as well. We can of course
2112 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2113 */
2114
2115
2116 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2117 MemOpIdx oi, uintptr_t retaddr)
2118 {
2119 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2120 }
2121
2122 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2123 MemOpIdx oi, uintptr_t retaddr)
2124 {
2125 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2126 }
2127
2128 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2129 MemOpIdx oi, uintptr_t retaddr)
2130 {
2131 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2132 }
2133
2134 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2135 MemOpIdx oi, uintptr_t retaddr)
2136 {
2137 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2138 }
2139
2140 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2141 MemOpIdx oi, uintptr_t retaddr)
2142 {
2143 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2144 }
2145
2146 /*
2147 * Load helpers for cpu_ldst.h.
2148 */
2149
2150 static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2151 MemOpIdx oi, uintptr_t retaddr,
2152 FullLoadHelper *full_load)
2153 {
2154 uint64_t ret;
2155
2156 ret = full_load(env, addr, oi, retaddr);
2157 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2158 return ret;
2159 }
2160
2161 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2162 {
2163 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2164 }
2165
2166 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2167 MemOpIdx oi, uintptr_t ra)
2168 {
2169 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2170 }
2171
2172 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2173 MemOpIdx oi, uintptr_t ra)
2174 {
2175 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2176 }
2177
2178 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2179 MemOpIdx oi, uintptr_t ra)
2180 {
2181 return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
2182 }
2183
2184 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2185 MemOpIdx oi, uintptr_t ra)
2186 {
2187 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2188 }
2189
2190 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2191 MemOpIdx oi, uintptr_t ra)
2192 {
2193 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2194 }
2195
2196 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2197 MemOpIdx oi, uintptr_t ra)
2198 {
2199 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2200 }
2201
2202 /*
2203 * Store Helpers
2204 */
2205
2206 static inline void QEMU_ALWAYS_INLINE
2207 store_memop(void *haddr, uint64_t val, MemOp op)
2208 {
2209 switch (op) {
2210 case MO_UB:
2211 stb_p(haddr, val);
2212 break;
2213 case MO_BEUW:
2214 stw_be_p(haddr, val);
2215 break;
2216 case MO_LEUW:
2217 stw_le_p(haddr, val);
2218 break;
2219 case MO_BEUL:
2220 stl_be_p(haddr, val);
2221 break;
2222 case MO_LEUL:
2223 stl_le_p(haddr, val);
2224 break;
2225 case MO_BEUQ:
2226 stq_be_p(haddr, val);
2227 break;
2228 case MO_LEUQ:
2229 stq_le_p(haddr, val);
2230 break;
2231 default:
2232 qemu_build_not_reached();
2233 }
2234 }
2235
2236 static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2237 MemOpIdx oi, uintptr_t retaddr);
2238
2239 static void __attribute__((noinline))
2240 store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2241 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2242 bool big_endian)
2243 {
2244 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2245 uintptr_t index, index2;
2246 CPUTLBEntry *entry, *entry2;
2247 target_ulong page1, page2, tlb_addr, tlb_addr2;
2248 MemOpIdx oi;
2249 size_t size2;
2250 int i;
2251
2252 /*
2253 * Ensure the second page is in the TLB. Note that the first page
2254 * is already guaranteed to be filled, and that the second page
2255 * cannot evict the first. An exception to this rule is PAGE_WRITE_INV
2256 * handling: the first page could have evicted itself.
2257 */
2258 page1 = addr & TARGET_PAGE_MASK;
2259 page2 = (addr + size) & TARGET_PAGE_MASK;
2260 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2261 index2 = tlb_index(env, mmu_idx, page2);
2262 entry2 = tlb_entry(env, mmu_idx, page2);
2263
2264 tlb_addr2 = tlb_addr_write(entry2);
2265 if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
2266 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2267 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2268 mmu_idx, retaddr);
2269 index2 = tlb_index(env, mmu_idx, page2);
2270 entry2 = tlb_entry(env, mmu_idx, page2);
2271 }
2272 tlb_addr2 = tlb_addr_write(entry2);
2273 }
2274
2275 index = tlb_index(env, mmu_idx, addr);
2276 entry = tlb_entry(env, mmu_idx, addr);
2277 tlb_addr = tlb_addr_write(entry);
2278
2279 /*
2280 * Handle watchpoints. Since this may trap, all checks
2281 * must happen before any store.
2282 */
2283 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2284 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2285 env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
2286 BP_MEM_WRITE, retaddr);
2287 }
2288 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2289 cpu_check_watchpoint(env_cpu(env), page2, size2,
2290 env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
2291 BP_MEM_WRITE, retaddr);
2292 }
2293
2294 /*
2295 * XXX: not efficient, but simple.
2296 * This loop must go in the forward direction to avoid issues
2297 * with self-modifying code in Windows 64-bit.
2298 */
2299 oi = make_memop_idx(MO_UB, mmu_idx);
2300 if (big_endian) {
2301 for (i = 0; i < size; ++i) {
2302 /* Big-endian extract. */
2303 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2304 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2305 }
2306 } else {
2307 for (i = 0; i < size; ++i) {
2308 /* Little-endian extract. */
2309 uint8_t val8 = val >> (i * 8);
2310 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2311 }
2312 }
2313 }
2314
2315 static inline void QEMU_ALWAYS_INLINE
2316 store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2317 MemOpIdx oi, uintptr_t retaddr, MemOp op)
2318 {
2319 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2320 const unsigned a_bits = get_alignment_bits(get_memop(oi));
2321 const size_t size = memop_size(op);
2322 uintptr_t mmu_idx = get_mmuidx(oi);
2323 uintptr_t index;
2324 CPUTLBEntry *entry;
2325 target_ulong tlb_addr;
2326 void *haddr;
2327
2328 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
2329
2330 /* Handle CPU specific unaligned behaviour */
2331 if (addr & ((1 << a_bits) - 1)) {
2332 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2333 mmu_idx, retaddr);
2334 }
2335
2336 index = tlb_index(env, mmu_idx, addr);
2337 entry = tlb_entry(env, mmu_idx, addr);
2338 tlb_addr = tlb_addr_write(entry);
2339
2340 /* If the TLB entry is for a different page, reload and try again. */
2341 if (!tlb_hit(tlb_addr, addr)) {
2342 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2343 addr & TARGET_PAGE_MASK)) {
2344 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2345 mmu_idx, retaddr);
2346 index = tlb_index(env, mmu_idx, addr);
2347 entry = tlb_entry(env, mmu_idx, addr);
2348 }
2349 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2350 }
2351
2352 /* Handle anything that isn't just a straight memory access. */
2353 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2354 CPUTLBEntryFull *full;
2355 bool need_swap;
2356
2357 /* For anything that is unaligned, recurse through byte stores. */
2358 if ((addr & (size - 1)) != 0) {
2359 goto do_unaligned_access;
2360 }
2361
2362 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
2363
2364 /* Handle watchpoints. */
2365 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2366 /* On watchpoint hit, this will longjmp out. */
2367 cpu_check_watchpoint(env_cpu(env), addr, size,
2368 full->attrs, BP_MEM_WRITE, retaddr);
2369 }
2370
2371 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2372
2373 /* Handle I/O access. */
2374 if (tlb_addr & TLB_MMIO) {
2375 io_writex(env, full, mmu_idx, val, addr, retaddr,
2376 op ^ (need_swap * MO_BSWAP));
2377 return;
2378 }
2379
2380 /* Ignore writes to ROM. */
2381 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2382 return;
2383 }
2384
2385 /* Handle clean RAM pages. */
2386 if (tlb_addr & TLB_NOTDIRTY) {
2387 notdirty_write(env_cpu(env), addr, size, full, retaddr);
2388 }
2389
2390 haddr = (void *)((uintptr_t)addr + entry->addend);
2391
2392 /*
2393 * Keep these two store_memop separate to ensure that the compiler
2394 * is able to fold the entire function to a single instruction.
2395 * There is a build-time assert inside to remind you of this. ;-)
2396 */
2397 if (unlikely(need_swap)) {
2398 store_memop(haddr, val, op ^ MO_BSWAP);
2399 } else {
2400 store_memop(haddr, val, op);
2401 }
2402 return;
2403 }
2404
2405 /* Handle slow unaligned access (it spans two pages or IO). */
2406 if (size > 1
2407 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2408 >= TARGET_PAGE_SIZE)) {
2409 do_unaligned_access:
2410 store_helper_unaligned(env, addr, val, retaddr, size,
2411 mmu_idx, memop_big_endian(op));
2412 return;
2413 }
2414
2415 haddr = (void *)((uintptr_t)addr + entry->addend);
2416 store_memop(haddr, val, op);
2417 }
2418
2419 static void __attribute__((noinline))
2420 full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2421 MemOpIdx oi, uintptr_t retaddr)
2422 {
2423 validate_memop(oi, MO_UB);
2424 store_helper(env, addr, val, oi, retaddr, MO_UB);
2425 }
2426
2427 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2428 MemOpIdx oi, uintptr_t retaddr)
2429 {
2430 full_stb_mmu(env, addr, val, oi, retaddr);
2431 }
2432
2433 static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2434 MemOpIdx oi, uintptr_t retaddr)
2435 {
2436 validate_memop(oi, MO_LEUW);
2437 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2438 }
2439
2440 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2441 MemOpIdx oi, uintptr_t retaddr)
2442 {
2443 full_le_stw_mmu(env, addr, val, oi, retaddr);
2444 }
2445
2446 static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2447 MemOpIdx oi, uintptr_t retaddr)
2448 {
2449 validate_memop(oi, MO_BEUW);
2450 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2451 }
2452
2453 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2454 MemOpIdx oi, uintptr_t retaddr)
2455 {
2456 full_be_stw_mmu(env, addr, val, oi, retaddr);
2457 }
2458
2459 static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2460 MemOpIdx oi, uintptr_t retaddr)
2461 {
2462 validate_memop(oi, MO_LEUL);
2463 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2464 }
2465
2466 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2467 MemOpIdx oi, uintptr_t retaddr)
2468 {
2469 full_le_stl_mmu(env, addr, val, oi, retaddr);
2470 }
2471
2472 static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2473 MemOpIdx oi, uintptr_t retaddr)
2474 {
2475 validate_memop(oi, MO_BEUL);
2476 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2477 }
2478
2479 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2480 MemOpIdx oi, uintptr_t retaddr)
2481 {
2482 full_be_stl_mmu(env, addr, val, oi, retaddr);
2483 }
2484
2485 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2486 MemOpIdx oi, uintptr_t retaddr)
2487 {
2488 validate_memop(oi, MO_LEUQ);
2489 store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
2490 }
2491
2492 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2493 MemOpIdx oi, uintptr_t retaddr)
2494 {
2495 validate_memop(oi, MO_BEUQ);
2496 store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
2497 }
2498
2499 /*
2500 * Store Helpers for cpu_ldst.h
2501 */
2502
2503 typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2504 uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2505
2506 static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2507 uint64_t val, MemOpIdx oi, uintptr_t ra,
2508 FullStoreHelper *full_store)
2509 {
2510 full_store(env, addr, val, oi, ra);
2511 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2512 }
2513
2514 void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2515 MemOpIdx oi, uintptr_t retaddr)
2516 {
2517 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2518 }
2519
2520 void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2521 MemOpIdx oi, uintptr_t retaddr)
2522 {
2523 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2524 }
2525
2526 void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2527 MemOpIdx oi, uintptr_t retaddr)
2528 {
2529 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2530 }
2531
2532 void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2533 MemOpIdx oi, uintptr_t retaddr)
2534 {
2535 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2536 }
2537
2538 void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2539 MemOpIdx oi, uintptr_t retaddr)
2540 {
2541 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2542 }
2543
2544 void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2545 MemOpIdx oi, uintptr_t retaddr)
2546 {
2547 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2548 }
2549
2550 void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2551 MemOpIdx oi, uintptr_t retaddr)
2552 {
2553 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2554 }
2555
2556 #include "ldst_common.c.inc"
2557
2558 /*
2559 * First set of functions passes in OI and RETADDR.
2560 * This makes them callable from other helpers.
2561 */
2562
2563 #define ATOMIC_NAME(X) \
2564 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2565
2566 #define ATOMIC_MMU_CLEANUP
2567
2568 #include "atomic_common.c.inc"
2569
2570 #define DATA_SIZE 1
2571 #include "atomic_template.h"
2572
2573 #define DATA_SIZE 2
2574 #include "atomic_template.h"
2575
2576 #define DATA_SIZE 4
2577 #include "atomic_template.h"
2578
2579 #ifdef CONFIG_ATOMIC64
2580 #define DATA_SIZE 8
2581 #include "atomic_template.h"
2582 #endif
2583
2584 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2585 #define DATA_SIZE 16
2586 #include "atomic_template.h"
2587 #endif
2588
2589 /* Code access functions. */
2590
2591 static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2592 MemOpIdx oi, uintptr_t retaddr)
2593 {
2594 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2595 }
2596
2597 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2598 {
2599 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2600 return full_ldub_code(env, addr, oi, 0);
2601 }
2602
2603 static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2604 MemOpIdx oi, uintptr_t retaddr)
2605 {
2606 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2607 }
2608
2609 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2610 {
2611 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2612 return full_lduw_code(env, addr, oi, 0);
2613 }
2614
2615 static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2616 MemOpIdx oi, uintptr_t retaddr)
2617 {
2618 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2619 }
2620
2621 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2622 {
2623 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2624 return full_ldl_code(env, addr, oi, 0);
2625 }
2626
2627 static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2628 MemOpIdx oi, uintptr_t retaddr)
2629 {
2630 return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
2631 }
2632
2633 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2634 {
2635 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2636 return full_ldq_code(env, addr, oi, 0);
2637 }