]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/cputlb.c
cputlb: Remove static tlb sizing
[mirror_qemu.git] / accel / tcg / cputlb.c
1 /*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
40
41 #ifdef DEBUG_TLB
42 # define DEBUG_TLB_GATE 1
43 # ifdef DEBUG_TLB_LOG
44 # define DEBUG_TLB_LOG_GATE 1
45 # else
46 # define DEBUG_TLB_LOG_GATE 0
47 # endif
48 #else
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
51 #endif
52
53 #define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 ## __VA_ARGS__); \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59 } \
60 } while (0)
61
62 #define assert_cpu_is_self(cpu) do { \
63 if (DEBUG_TLB_GATE) { \
64 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
65 } \
66 } while (0)
67
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69 * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73 */
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76
77 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
78 {
79 return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
80 }
81
82 static void tlb_window_reset(CPUTLBWindow *window, int64_t ns,
83 size_t max_entries)
84 {
85 window->begin_ns = ns;
86 window->max_entries = max_entries;
87 }
88
89 static void tlb_dyn_init(CPUArchState *env)
90 {
91 int i;
92
93 for (i = 0; i < NB_MMU_MODES; i++) {
94 CPUTLBDesc *desc = &env->tlb_d[i];
95 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
96
97 tlb_window_reset(&desc->window, get_clock_realtime(), 0);
98 desc->n_used_entries = 0;
99 env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100 env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
101 env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries);
102 }
103 }
104
105 /**
106 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107 * @env: CPU that owns the TLB
108 * @mmu_idx: MMU index of the TLB
109 *
110 * Called with tlb_lock_held.
111 *
112 * We have two main constraints when resizing a TLB: (1) we only resize it
113 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114 * the array or unnecessarily flushing it), which means we do not control how
115 * frequently the resizing can occur; (2) we don't have access to the guest's
116 * future scheduling decisions, and therefore have to decide the magnitude of
117 * the resize based on past observations.
118 *
119 * In general, a memory-hungry process can benefit greatly from an appropriately
120 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121 * we just have to make the TLB as large as possible; while an oversized TLB
122 * results in minimal TLB miss rates, it also takes longer to be flushed
123 * (flushes can be _very_ frequent), and the reduced locality can also hurt
124 * performance.
125 *
126 * To achieve near-optimal performance for all kinds of workloads, we:
127 *
128 * 1. Aggressively increase the size of the TLB when the use rate of the
129 * TLB being flushed is high, since it is likely that in the near future this
130 * memory-hungry process will execute again, and its memory hungriness will
131 * probably be similar.
132 *
133 * 2. Slowly reduce the size of the TLB as the use rate declines over a
134 * reasonably large time window. The rationale is that if in such a time window
135 * we have not observed a high TLB use rate, it is likely that we won't observe
136 * it in the near future. In that case, once a time window expires we downsize
137 * the TLB to match the maximum use rate observed in the window.
138 *
139 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140 * since in that range performance is likely near-optimal. Recall that the TLB
141 * is direct mapped, so we want the use rate to be low (or at least not too
142 * high), since otherwise we are likely to have a significant amount of
143 * conflict misses.
144 */
145 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
146 {
147 CPUTLBDesc *desc = &env->tlb_d[mmu_idx];
148 size_t old_size = tlb_n_entries(env, mmu_idx);
149 size_t rate;
150 size_t new_size = old_size;
151 int64_t now = get_clock_realtime();
152 int64_t window_len_ms = 100;
153 int64_t window_len_ns = window_len_ms * 1000 * 1000;
154 bool window_expired = now > desc->window.begin_ns + window_len_ns;
155
156 if (desc->n_used_entries > desc->window.max_entries) {
157 desc->window.max_entries = desc->n_used_entries;
158 }
159 rate = desc->window.max_entries * 100 / old_size;
160
161 if (rate > 70) {
162 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163 } else if (rate < 30 && window_expired) {
164 size_t ceil = pow2ceil(desc->window.max_entries);
165 size_t expected_rate = desc->window.max_entries * 100 / ceil;
166
167 /*
168 * Avoid undersizing when the max number of entries seen is just below
169 * a pow2. For instance, if max_entries == 1025, the expected use rate
170 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172 * later. Thus, make sure that the expected use rate remains below 70%.
173 * (and since we double the size, that means the lowest rate we'd
174 * expect to get is 35%, which is still in the 30-70% range where
175 * we consider that the size is appropriate.)
176 */
177 if (expected_rate > 70) {
178 ceil *= 2;
179 }
180 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
181 }
182
183 if (new_size == old_size) {
184 if (window_expired) {
185 tlb_window_reset(&desc->window, now, desc->n_used_entries);
186 }
187 return;
188 }
189
190 g_free(env->tlb_table[mmu_idx]);
191 g_free(env->iotlb[mmu_idx]);
192
193 tlb_window_reset(&desc->window, now, 0);
194 /* desc->n_used_entries is cleared by the caller */
195 env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196 env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
197 env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
198 /*
199 * If the allocations fail, try smaller sizes. We just freed some
200 * memory, so going back to half of new_size has a good chance of working.
201 * Increased memory pressure elsewhere in the system might cause the
202 * allocations to fail though, so we progressively reduce the allocation
203 * size, aborting if we cannot even allocate the smallest TLB we support.
204 */
205 while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) {
206 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
207 error_report("%s: %s", __func__, strerror(errno));
208 abort();
209 }
210 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
211 env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
212
213 g_free(env->tlb_table[mmu_idx]);
214 g_free(env->iotlb[mmu_idx]);
215 env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
216 env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
217 }
218 }
219
220 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
221 {
222 tlb_mmu_resize_locked(env, mmu_idx);
223 memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx));
224 env->tlb_d[mmu_idx].n_used_entries = 0;
225 }
226
227 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
228 {
229 env->tlb_d[mmu_idx].n_used_entries++;
230 }
231
232 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
233 {
234 env->tlb_d[mmu_idx].n_used_entries--;
235 }
236
237 void tlb_init(CPUState *cpu)
238 {
239 CPUArchState *env = cpu->env_ptr;
240
241 qemu_spin_init(&env->tlb_c.lock);
242
243 /* Ensure that cpu_reset performs a full flush. */
244 env->tlb_c.dirty = ALL_MMUIDX_BITS;
245
246 tlb_dyn_init(env);
247 }
248
249 /* flush_all_helper: run fn across all cpus
250 *
251 * If the wait flag is set then the src cpu's helper will be queued as
252 * "safe" work and the loop exited creating a synchronisation point
253 * where all queued work will be finished before execution starts
254 * again.
255 */
256 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
257 run_on_cpu_data d)
258 {
259 CPUState *cpu;
260
261 CPU_FOREACH(cpu) {
262 if (cpu != src) {
263 async_run_on_cpu(cpu, fn, d);
264 }
265 }
266 }
267
268 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
269 {
270 CPUState *cpu;
271 size_t full = 0, part = 0, elide = 0;
272
273 CPU_FOREACH(cpu) {
274 CPUArchState *env = cpu->env_ptr;
275
276 full += atomic_read(&env->tlb_c.full_flush_count);
277 part += atomic_read(&env->tlb_c.part_flush_count);
278 elide += atomic_read(&env->tlb_c.elide_flush_count);
279 }
280 *pfull = full;
281 *ppart = part;
282 *pelide = elide;
283 }
284
285 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
286 {
287 tlb_table_flush_by_mmuidx(env, mmu_idx);
288 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
289 env->tlb_d[mmu_idx].large_page_addr = -1;
290 env->tlb_d[mmu_idx].large_page_mask = -1;
291 env->tlb_d[mmu_idx].vindex = 0;
292 }
293
294 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
295 {
296 CPUArchState *env = cpu->env_ptr;
297 uint16_t asked = data.host_int;
298 uint16_t all_dirty, work, to_clean;
299
300 assert_cpu_is_self(cpu);
301
302 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
303
304 qemu_spin_lock(&env->tlb_c.lock);
305
306 all_dirty = env->tlb_c.dirty;
307 to_clean = asked & all_dirty;
308 all_dirty &= ~to_clean;
309 env->tlb_c.dirty = all_dirty;
310
311 for (work = to_clean; work != 0; work &= work - 1) {
312 int mmu_idx = ctz32(work);
313 tlb_flush_one_mmuidx_locked(env, mmu_idx);
314 }
315
316 qemu_spin_unlock(&env->tlb_c.lock);
317
318 cpu_tb_jmp_cache_clear(cpu);
319
320 if (to_clean == ALL_MMUIDX_BITS) {
321 atomic_set(&env->tlb_c.full_flush_count,
322 env->tlb_c.full_flush_count + 1);
323 } else {
324 atomic_set(&env->tlb_c.part_flush_count,
325 env->tlb_c.part_flush_count + ctpop16(to_clean));
326 if (to_clean != asked) {
327 atomic_set(&env->tlb_c.elide_flush_count,
328 env->tlb_c.elide_flush_count +
329 ctpop16(asked & ~to_clean));
330 }
331 }
332 }
333
334 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
335 {
336 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
337
338 if (cpu->created && !qemu_cpu_is_self(cpu)) {
339 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
340 RUN_ON_CPU_HOST_INT(idxmap));
341 } else {
342 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
343 }
344 }
345
346 void tlb_flush(CPUState *cpu)
347 {
348 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
349 }
350
351 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
352 {
353 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
354
355 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
356
357 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
358 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
359 }
360
361 void tlb_flush_all_cpus(CPUState *src_cpu)
362 {
363 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
364 }
365
366 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
367 {
368 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
369
370 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
371
372 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
373 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
374 }
375
376 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
377 {
378 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
379 }
380
381 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
382 target_ulong page)
383 {
384 return tlb_hit_page(tlb_entry->addr_read, page) ||
385 tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
386 tlb_hit_page(tlb_entry->addr_code, page);
387 }
388
389 /**
390 * tlb_entry_is_empty - return true if the entry is not in use
391 * @te: pointer to CPUTLBEntry
392 */
393 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
394 {
395 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
396 }
397
398 /* Called with tlb_c.lock held */
399 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
400 target_ulong page)
401 {
402 if (tlb_hit_page_anyprot(tlb_entry, page)) {
403 memset(tlb_entry, -1, sizeof(*tlb_entry));
404 return true;
405 }
406 return false;
407 }
408
409 /* Called with tlb_c.lock held */
410 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
411 target_ulong page)
412 {
413 int k;
414
415 assert_cpu_is_self(ENV_GET_CPU(env));
416 for (k = 0; k < CPU_VTLB_SIZE; k++) {
417 if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) {
418 tlb_n_used_entries_dec(env, mmu_idx);
419 }
420 }
421 }
422
423 static void tlb_flush_page_locked(CPUArchState *env, int midx,
424 target_ulong page)
425 {
426 target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
427 target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
428
429 /* Check if we need to flush due to large pages. */
430 if ((page & lp_mask) == lp_addr) {
431 tlb_debug("forcing full flush midx %d ("
432 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
433 midx, lp_addr, lp_mask);
434 tlb_flush_one_mmuidx_locked(env, midx);
435 } else {
436 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
437 tlb_n_used_entries_dec(env, midx);
438 }
439 tlb_flush_vtlb_page_locked(env, midx, page);
440 }
441 }
442
443 /* As we are going to hijack the bottom bits of the page address for a
444 * mmuidx bit mask we need to fail to build if we can't do that
445 */
446 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
447
448 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
449 run_on_cpu_data data)
450 {
451 CPUArchState *env = cpu->env_ptr;
452 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
453 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
454 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
455 int mmu_idx;
456
457 assert_cpu_is_self(cpu);
458
459 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
460 addr, mmu_idx_bitmap);
461
462 qemu_spin_lock(&env->tlb_c.lock);
463 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
464 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
465 tlb_flush_page_locked(env, mmu_idx, addr);
466 }
467 }
468 qemu_spin_unlock(&env->tlb_c.lock);
469
470 tb_flush_jmp_cache(cpu, addr);
471 }
472
473 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
474 {
475 target_ulong addr_and_mmu_idx;
476
477 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
478
479 /* This should already be page aligned */
480 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
481 addr_and_mmu_idx |= idxmap;
482
483 if (!qemu_cpu_is_self(cpu)) {
484 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
485 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
486 } else {
487 tlb_flush_page_by_mmuidx_async_work(
488 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489 }
490 }
491
492 void tlb_flush_page(CPUState *cpu, target_ulong addr)
493 {
494 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
495 }
496
497 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
498 uint16_t idxmap)
499 {
500 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
501 target_ulong addr_and_mmu_idx;
502
503 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
504
505 /* This should already be page aligned */
506 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
507 addr_and_mmu_idx |= idxmap;
508
509 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
510 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
511 }
512
513 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
514 {
515 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
516 }
517
518 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
519 target_ulong addr,
520 uint16_t idxmap)
521 {
522 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
523 target_ulong addr_and_mmu_idx;
524
525 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
526
527 /* This should already be page aligned */
528 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
529 addr_and_mmu_idx |= idxmap;
530
531 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
532 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
533 }
534
535 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
536 {
537 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
538 }
539
540 /* update the TLBs so that writes to code in the virtual page 'addr'
541 can be detected */
542 void tlb_protect_code(ram_addr_t ram_addr)
543 {
544 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
545 DIRTY_MEMORY_CODE);
546 }
547
548 /* update the TLB so that writes in physical page 'phys_addr' are no longer
549 tested for self modifying code */
550 void tlb_unprotect_code(ram_addr_t ram_addr)
551 {
552 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
553 }
554
555
556 /*
557 * Dirty write flag handling
558 *
559 * When the TCG code writes to a location it looks up the address in
560 * the TLB and uses that data to compute the final address. If any of
561 * the lower bits of the address are set then the slow path is forced.
562 * There are a number of reasons to do this but for normal RAM the
563 * most usual is detecting writes to code regions which may invalidate
564 * generated code.
565 *
566 * Other vCPUs might be reading their TLBs during guest execution, so we update
567 * te->addr_write with atomic_set. We don't need to worry about this for
568 * oversized guests as MTTCG is disabled for them.
569 *
570 * Called with tlb_c.lock held.
571 */
572 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
573 uintptr_t start, uintptr_t length)
574 {
575 uintptr_t addr = tlb_entry->addr_write;
576
577 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
578 addr &= TARGET_PAGE_MASK;
579 addr += tlb_entry->addend;
580 if ((addr - start) < length) {
581 #if TCG_OVERSIZED_GUEST
582 tlb_entry->addr_write |= TLB_NOTDIRTY;
583 #else
584 atomic_set(&tlb_entry->addr_write,
585 tlb_entry->addr_write | TLB_NOTDIRTY);
586 #endif
587 }
588 }
589 }
590
591 /*
592 * Called with tlb_c.lock held.
593 * Called only from the vCPU context, i.e. the TLB's owner thread.
594 */
595 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
596 {
597 *d = *s;
598 }
599
600 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
601 * the target vCPU).
602 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
603 * thing actually updated is the target TLB entry ->addr_write flags.
604 */
605 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
606 {
607 CPUArchState *env;
608
609 int mmu_idx;
610
611 env = cpu->env_ptr;
612 qemu_spin_lock(&env->tlb_c.lock);
613 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
614 unsigned int i;
615 unsigned int n = tlb_n_entries(env, mmu_idx);
616
617 for (i = 0; i < n; i++) {
618 tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
619 length);
620 }
621
622 for (i = 0; i < CPU_VTLB_SIZE; i++) {
623 tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
624 length);
625 }
626 }
627 qemu_spin_unlock(&env->tlb_c.lock);
628 }
629
630 /* Called with tlb_c.lock held */
631 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
632 target_ulong vaddr)
633 {
634 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
635 tlb_entry->addr_write = vaddr;
636 }
637 }
638
639 /* update the TLB corresponding to virtual page vaddr
640 so that it is no longer dirty */
641 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
642 {
643 CPUArchState *env = cpu->env_ptr;
644 int mmu_idx;
645
646 assert_cpu_is_self(cpu);
647
648 vaddr &= TARGET_PAGE_MASK;
649 qemu_spin_lock(&env->tlb_c.lock);
650 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
651 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
652 }
653
654 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
655 int k;
656 for (k = 0; k < CPU_VTLB_SIZE; k++) {
657 tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
658 }
659 }
660 qemu_spin_unlock(&env->tlb_c.lock);
661 }
662
663 /* Our TLB does not support large pages, so remember the area covered by
664 large pages and trigger a full TLB flush if these are invalidated. */
665 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
666 target_ulong vaddr, target_ulong size)
667 {
668 target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
669 target_ulong lp_mask = ~(size - 1);
670
671 if (lp_addr == (target_ulong)-1) {
672 /* No previous large page. */
673 lp_addr = vaddr;
674 } else {
675 /* Extend the existing region to include the new page.
676 This is a compromise between unnecessary flushes and
677 the cost of maintaining a full variable size TLB. */
678 lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
679 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
680 lp_mask <<= 1;
681 }
682 }
683 env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
684 env->tlb_d[mmu_idx].large_page_mask = lp_mask;
685 }
686
687 /* Add a new TLB entry. At most one entry for a given virtual address
688 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
689 * supplied size is only used by tlb_flush_page.
690 *
691 * Called from TCG-generated code, which is under an RCU read-side
692 * critical section.
693 */
694 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
695 hwaddr paddr, MemTxAttrs attrs, int prot,
696 int mmu_idx, target_ulong size)
697 {
698 CPUArchState *env = cpu->env_ptr;
699 MemoryRegionSection *section;
700 unsigned int index;
701 target_ulong address;
702 target_ulong code_address;
703 uintptr_t addend;
704 CPUTLBEntry *te, tn;
705 hwaddr iotlb, xlat, sz, paddr_page;
706 target_ulong vaddr_page;
707 int asidx = cpu_asidx_from_attrs(cpu, attrs);
708
709 assert_cpu_is_self(cpu);
710
711 if (size <= TARGET_PAGE_SIZE) {
712 sz = TARGET_PAGE_SIZE;
713 } else {
714 tlb_add_large_page(env, mmu_idx, vaddr, size);
715 sz = size;
716 }
717 vaddr_page = vaddr & TARGET_PAGE_MASK;
718 paddr_page = paddr & TARGET_PAGE_MASK;
719
720 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
721 &xlat, &sz, attrs, &prot);
722 assert(sz >= TARGET_PAGE_SIZE);
723
724 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
725 " prot=%x idx=%d\n",
726 vaddr, paddr, prot, mmu_idx);
727
728 address = vaddr_page;
729 if (size < TARGET_PAGE_SIZE) {
730 /*
731 * Slow-path the TLB entries; we will repeat the MMU check and TLB
732 * fill on every access.
733 */
734 address |= TLB_RECHECK;
735 }
736 if (!memory_region_is_ram(section->mr) &&
737 !memory_region_is_romd(section->mr)) {
738 /* IO memory case */
739 address |= TLB_MMIO;
740 addend = 0;
741 } else {
742 /* TLB_MMIO for rom/romd handled below */
743 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
744 }
745
746 code_address = address;
747 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
748 paddr_page, xlat, prot, &address);
749
750 index = tlb_index(env, mmu_idx, vaddr_page);
751 te = tlb_entry(env, mmu_idx, vaddr_page);
752
753 /*
754 * Hold the TLB lock for the rest of the function. We could acquire/release
755 * the lock several times in the function, but it is faster to amortize the
756 * acquisition cost by acquiring it just once. Note that this leads to
757 * a longer critical section, but this is not a concern since the TLB lock
758 * is unlikely to be contended.
759 */
760 qemu_spin_lock(&env->tlb_c.lock);
761
762 /* Note that the tlb is no longer clean. */
763 env->tlb_c.dirty |= 1 << mmu_idx;
764
765 /* Make sure there's no cached translation for the new page. */
766 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
767
768 /*
769 * Only evict the old entry to the victim tlb if it's for a
770 * different page; otherwise just overwrite the stale data.
771 */
772 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
773 unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
774 CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
775
776 /* Evict the old entry into the victim tlb. */
777 copy_tlb_helper_locked(tv, te);
778 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
779 tlb_n_used_entries_dec(env, mmu_idx);
780 }
781
782 /* refill the tlb */
783 /*
784 * At this point iotlb contains a physical section number in the lower
785 * TARGET_PAGE_BITS, and either
786 * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
787 * + the offset within section->mr of the page base (otherwise)
788 * We subtract the vaddr_page (which is page aligned and thus won't
789 * disturb the low bits) to give an offset which can be added to the
790 * (non-page-aligned) vaddr of the eventual memory access to get
791 * the MemoryRegion offset for the access. Note that the vaddr we
792 * subtract here is that of the page base, and not the same as the
793 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
794 */
795 env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
796 env->iotlb[mmu_idx][index].attrs = attrs;
797
798 /* Now calculate the new entry */
799 tn.addend = addend - vaddr_page;
800 if (prot & PAGE_READ) {
801 tn.addr_read = address;
802 } else {
803 tn.addr_read = -1;
804 }
805
806 if (prot & PAGE_EXEC) {
807 tn.addr_code = code_address;
808 } else {
809 tn.addr_code = -1;
810 }
811
812 tn.addr_write = -1;
813 if (prot & PAGE_WRITE) {
814 if ((memory_region_is_ram(section->mr) && section->readonly)
815 || memory_region_is_romd(section->mr)) {
816 /* Write access calls the I/O callback. */
817 tn.addr_write = address | TLB_MMIO;
818 } else if (memory_region_is_ram(section->mr)
819 && cpu_physical_memory_is_clean(
820 memory_region_get_ram_addr(section->mr) + xlat)) {
821 tn.addr_write = address | TLB_NOTDIRTY;
822 } else {
823 tn.addr_write = address;
824 }
825 if (prot & PAGE_WRITE_INV) {
826 tn.addr_write |= TLB_INVALID_MASK;
827 }
828 }
829
830 copy_tlb_helper_locked(te, &tn);
831 tlb_n_used_entries_inc(env, mmu_idx);
832 qemu_spin_unlock(&env->tlb_c.lock);
833 }
834
835 /* Add a new TLB entry, but without specifying the memory
836 * transaction attributes to be used.
837 */
838 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
839 hwaddr paddr, int prot,
840 int mmu_idx, target_ulong size)
841 {
842 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
843 prot, mmu_idx, size);
844 }
845
846 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
847 {
848 ram_addr_t ram_addr;
849
850 ram_addr = qemu_ram_addr_from_host(ptr);
851 if (ram_addr == RAM_ADDR_INVALID) {
852 error_report("Bad ram pointer %p", ptr);
853 abort();
854 }
855 return ram_addr;
856 }
857
858 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
859 int mmu_idx,
860 target_ulong addr, uintptr_t retaddr,
861 bool recheck, MMUAccessType access_type, int size)
862 {
863 CPUState *cpu = ENV_GET_CPU(env);
864 hwaddr mr_offset;
865 MemoryRegionSection *section;
866 MemoryRegion *mr;
867 uint64_t val;
868 bool locked = false;
869 MemTxResult r;
870
871 if (recheck) {
872 /*
873 * This is a TLB_RECHECK access, where the MMU protection
874 * covers a smaller range than a target page, and we must
875 * repeat the MMU check here. This tlb_fill() call might
876 * longjump out if this access should cause a guest exception.
877 */
878 CPUTLBEntry *entry;
879 target_ulong tlb_addr;
880
881 tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
882
883 entry = tlb_entry(env, mmu_idx, addr);
884 tlb_addr = entry->addr_read;
885 if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
886 /* RAM access */
887 uintptr_t haddr = addr + entry->addend;
888
889 return ldn_p((void *)haddr, size);
890 }
891 /* Fall through for handling IO accesses */
892 }
893
894 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
895 mr = section->mr;
896 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
897 cpu->mem_io_pc = retaddr;
898 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
899 cpu_io_recompile(cpu, retaddr);
900 }
901
902 cpu->mem_io_vaddr = addr;
903 cpu->mem_io_access_type = access_type;
904
905 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
906 qemu_mutex_lock_iothread();
907 locked = true;
908 }
909 r = memory_region_dispatch_read(mr, mr_offset,
910 &val, size, iotlbentry->attrs);
911 if (r != MEMTX_OK) {
912 hwaddr physaddr = mr_offset +
913 section->offset_within_address_space -
914 section->offset_within_region;
915
916 cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
917 mmu_idx, iotlbentry->attrs, r, retaddr);
918 }
919 if (locked) {
920 qemu_mutex_unlock_iothread();
921 }
922
923 return val;
924 }
925
926 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
927 int mmu_idx,
928 uint64_t val, target_ulong addr,
929 uintptr_t retaddr, bool recheck, int size)
930 {
931 CPUState *cpu = ENV_GET_CPU(env);
932 hwaddr mr_offset;
933 MemoryRegionSection *section;
934 MemoryRegion *mr;
935 bool locked = false;
936 MemTxResult r;
937
938 if (recheck) {
939 /*
940 * This is a TLB_RECHECK access, where the MMU protection
941 * covers a smaller range than a target page, and we must
942 * repeat the MMU check here. This tlb_fill() call might
943 * longjump out if this access should cause a guest exception.
944 */
945 CPUTLBEntry *entry;
946 target_ulong tlb_addr;
947
948 tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
949
950 entry = tlb_entry(env, mmu_idx, addr);
951 tlb_addr = tlb_addr_write(entry);
952 if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
953 /* RAM access */
954 uintptr_t haddr = addr + entry->addend;
955
956 stn_p((void *)haddr, size, val);
957 return;
958 }
959 /* Fall through for handling IO accesses */
960 }
961
962 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
963 mr = section->mr;
964 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
965 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
966 cpu_io_recompile(cpu, retaddr);
967 }
968 cpu->mem_io_vaddr = addr;
969 cpu->mem_io_pc = retaddr;
970
971 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
972 qemu_mutex_lock_iothread();
973 locked = true;
974 }
975 r = memory_region_dispatch_write(mr, mr_offset,
976 val, size, iotlbentry->attrs);
977 if (r != MEMTX_OK) {
978 hwaddr physaddr = mr_offset +
979 section->offset_within_address_space -
980 section->offset_within_region;
981
982 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
983 mmu_idx, iotlbentry->attrs, r, retaddr);
984 }
985 if (locked) {
986 qemu_mutex_unlock_iothread();
987 }
988 }
989
990 /* Return true if ADDR is present in the victim tlb, and has been copied
991 back to the main tlb. */
992 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
993 size_t elt_ofs, target_ulong page)
994 {
995 size_t vidx;
996
997 assert_cpu_is_self(ENV_GET_CPU(env));
998 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
999 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
1000 target_ulong cmp;
1001
1002 /* elt_ofs might correspond to .addr_write, so use atomic_read */
1003 #if TCG_OVERSIZED_GUEST
1004 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1005 #else
1006 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1007 #endif
1008
1009 if (cmp == page) {
1010 /* Found entry in victim tlb, swap tlb and iotlb. */
1011 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
1012
1013 qemu_spin_lock(&env->tlb_c.lock);
1014 copy_tlb_helper_locked(&tmptlb, tlb);
1015 copy_tlb_helper_locked(tlb, vtlb);
1016 copy_tlb_helper_locked(vtlb, &tmptlb);
1017 qemu_spin_unlock(&env->tlb_c.lock);
1018
1019 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
1020 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
1021 tmpio = *io; *io = *vio; *vio = tmpio;
1022 return true;
1023 }
1024 }
1025 return false;
1026 }
1027
1028 /* Macro to call the above, with local variables from the use context. */
1029 #define VICTIM_TLB_HIT(TY, ADDR) \
1030 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1031 (ADDR) & TARGET_PAGE_MASK)
1032
1033 /* NOTE: this function can trigger an exception */
1034 /* NOTE2: the returned address is not exactly the physical address: it
1035 * is actually a ram_addr_t (in system mode; the user mode emulation
1036 * version of this function returns a guest virtual address).
1037 */
1038 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1039 {
1040 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1041 uintptr_t index = tlb_index(env, mmu_idx, addr);
1042 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1043 void *p;
1044
1045 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1046 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1047 tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1048 }
1049 assert(tlb_hit(entry->addr_code, addr));
1050 }
1051
1052 if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1053 /*
1054 * Return -1 if we can't translate and execute from an entire
1055 * page of RAM here, which will cause us to execute by loading
1056 * and translating one insn at a time, without caching:
1057 * - TLB_RECHECK: means the MMU protection covers a smaller range
1058 * than a target page, so we must redo the MMU check every insn
1059 * - TLB_MMIO: region is not backed by RAM
1060 */
1061 return -1;
1062 }
1063
1064 p = (void *)((uintptr_t)addr + entry->addend);
1065 return qemu_ram_addr_from_host_nofail(p);
1066 }
1067
1068 /* Probe for whether the specified guest write access is permitted.
1069 * If it is not permitted then an exception will be taken in the same
1070 * way as if this were a real write access (and we will not return).
1071 * Otherwise the function will return, and there will be a valid
1072 * entry in the TLB for this access.
1073 */
1074 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1075 uintptr_t retaddr)
1076 {
1077 uintptr_t index = tlb_index(env, mmu_idx, addr);
1078 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1079
1080 if (!tlb_hit(tlb_addr_write(entry), addr)) {
1081 /* TLB entry is for a different page */
1082 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1083 tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1084 mmu_idx, retaddr);
1085 }
1086 }
1087 }
1088
1089 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
1090 * operations, or io operations to proceed. Return the host address. */
1091 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1092 TCGMemOpIdx oi, uintptr_t retaddr,
1093 NotDirtyInfo *ndi)
1094 {
1095 size_t mmu_idx = get_mmuidx(oi);
1096 uintptr_t index = tlb_index(env, mmu_idx, addr);
1097 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1098 target_ulong tlb_addr = tlb_addr_write(tlbe);
1099 TCGMemOp mop = get_memop(oi);
1100 int a_bits = get_alignment_bits(mop);
1101 int s_bits = mop & MO_SIZE;
1102 void *hostaddr;
1103
1104 /* Adjust the given return address. */
1105 retaddr -= GETPC_ADJ;
1106
1107 /* Enforce guest required alignment. */
1108 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1109 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1110 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1111 mmu_idx, retaddr);
1112 }
1113
1114 /* Enforce qemu required alignment. */
1115 if (unlikely(addr & ((1 << s_bits) - 1))) {
1116 /* We get here if guest alignment was not requested,
1117 or was not enforced by cpu_unaligned_access above.
1118 We might widen the access and emulate, but for now
1119 mark an exception and exit the cpu loop. */
1120 goto stop_the_world;
1121 }
1122
1123 /* Check TLB entry and enforce page permissions. */
1124 if (!tlb_hit(tlb_addr, addr)) {
1125 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1126 tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
1127 mmu_idx, retaddr);
1128 }
1129 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1130 }
1131
1132 /* Notice an IO access or a needs-MMU-lookup access */
1133 if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1134 /* There's really nothing that can be done to
1135 support this apart from stop-the-world. */
1136 goto stop_the_world;
1137 }
1138
1139 /* Let the guest notice RMW on a write-only page. */
1140 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1141 tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1142 mmu_idx, retaddr);
1143 /* Since we don't support reads and writes to different addresses,
1144 and we do have the proper page loaded for write, this shouldn't
1145 ever return. But just in case, handle via stop-the-world. */
1146 goto stop_the_world;
1147 }
1148
1149 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1150
1151 ndi->active = false;
1152 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1153 ndi->active = true;
1154 memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
1155 qemu_ram_addr_from_host_nofail(hostaddr),
1156 1 << s_bits);
1157 }
1158
1159 return hostaddr;
1160
1161 stop_the_world:
1162 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1163 }
1164
1165 #ifdef TARGET_WORDS_BIGENDIAN
1166 # define TGT_BE(X) (X)
1167 # define TGT_LE(X) BSWAP(X)
1168 #else
1169 # define TGT_BE(X) BSWAP(X)
1170 # define TGT_LE(X) (X)
1171 #endif
1172
1173 #define MMUSUFFIX _mmu
1174
1175 #define DATA_SIZE 1
1176 #include "softmmu_template.h"
1177
1178 #define DATA_SIZE 2
1179 #include "softmmu_template.h"
1180
1181 #define DATA_SIZE 4
1182 #include "softmmu_template.h"
1183
1184 #define DATA_SIZE 8
1185 #include "softmmu_template.h"
1186
1187 /* First set of helpers allows passing in of OI and RETADDR. This makes
1188 them callable from other helpers. */
1189
1190 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1191 #define ATOMIC_NAME(X) \
1192 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1193 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1194 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1195 #define ATOMIC_MMU_CLEANUP \
1196 do { \
1197 if (unlikely(ndi.active)) { \
1198 memory_notdirty_write_complete(&ndi); \
1199 } \
1200 } while (0)
1201
1202 #define DATA_SIZE 1
1203 #include "atomic_template.h"
1204
1205 #define DATA_SIZE 2
1206 #include "atomic_template.h"
1207
1208 #define DATA_SIZE 4
1209 #include "atomic_template.h"
1210
1211 #ifdef CONFIG_ATOMIC64
1212 #define DATA_SIZE 8
1213 #include "atomic_template.h"
1214 #endif
1215
1216 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1217 #define DATA_SIZE 16
1218 #include "atomic_template.h"
1219 #endif
1220
1221 /* Second set of helpers are directly callable from TCG as helpers. */
1222
1223 #undef EXTRA_ARGS
1224 #undef ATOMIC_NAME
1225 #undef ATOMIC_MMU_LOOKUP
1226 #define EXTRA_ARGS , TCGMemOpIdx oi
1227 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1228 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1229
1230 #define DATA_SIZE 1
1231 #include "atomic_template.h"
1232
1233 #define DATA_SIZE 2
1234 #include "atomic_template.h"
1235
1236 #define DATA_SIZE 4
1237 #include "atomic_template.h"
1238
1239 #ifdef CONFIG_ATOMIC64
1240 #define DATA_SIZE 8
1241 #include "atomic_template.h"
1242 #endif
1243
1244 /* Code access functions. */
1245
1246 #undef MMUSUFFIX
1247 #define MMUSUFFIX _cmmu
1248 #undef GETPC
1249 #define GETPC() ((uintptr_t)0)
1250 #define SOFTMMU_CODE_ACCESS
1251
1252 #define DATA_SIZE 1
1253 #include "softmmu_template.h"
1254
1255 #define DATA_SIZE 2
1256 #include "softmmu_template.h"
1257
1258 #define DATA_SIZE 4
1259 #include "softmmu_template.h"
1260
1261 #define DATA_SIZE 8
1262 #include "softmmu_template.h"