]> git.proxmox.com Git - mirror_qemu.git/blob - cputlb.c
cputlb: atomically update tlb fields used by tlb_reset_dirty
[mirror_qemu.git] / cputlb.c
1 /*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
39
40 #ifdef DEBUG_TLB
41 # define DEBUG_TLB_GATE 1
42 # ifdef DEBUG_TLB_LOG
43 # define DEBUG_TLB_LOG_GATE 1
44 # else
45 # define DEBUG_TLB_LOG_GATE 0
46 # endif
47 #else
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
50 #endif
51
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59 } while (0)
60
61 #define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
67 /* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70
71 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
72 */
73 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75
76 /* statistics */
77 int tlb_flush_count;
78
79 /* This is OK because CPU architectures generally permit an
80 * implementation to drop entries from the TLB at any time, so
81 * flushing more entries than required is only an efficiency issue,
82 * not a correctness issue.
83 */
84 static void tlb_flush_nocheck(CPUState *cpu)
85 {
86 CPUArchState *env = cpu->env_ptr;
87
88 /* The QOM tests will trigger tlb_flushes without setting up TCG
89 * so we bug out here in that case.
90 */
91 if (!tcg_enabled()) {
92 return;
93 }
94
95 assert_cpu_is_self(cpu);
96 tlb_debug("(count: %d)\n", tlb_flush_count++);
97
98 tb_lock();
99
100 memset(env->tlb_table, -1, sizeof(env->tlb_table));
101 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
102 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
103
104 env->vtlb_index = 0;
105 env->tlb_flush_addr = -1;
106 env->tlb_flush_mask = 0;
107
108 tb_unlock();
109
110 atomic_mb_set(&cpu->pending_tlb_flush, 0);
111 }
112
113 static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
114 {
115 tlb_flush_nocheck(cpu);
116 }
117
118 void tlb_flush(CPUState *cpu)
119 {
120 if (cpu->created && !qemu_cpu_is_self(cpu)) {
121 if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
122 atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
123 async_run_on_cpu(cpu, tlb_flush_global_async_work,
124 RUN_ON_CPU_NULL);
125 }
126 } else {
127 tlb_flush_nocheck(cpu);
128 }
129 }
130
131 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
132 {
133 CPUArchState *env = cpu->env_ptr;
134 unsigned long mmu_idx_bitmask = data.host_int;
135 int mmu_idx;
136
137 assert_cpu_is_self(cpu);
138
139 tb_lock();
140
141 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
142
143 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
144
145 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
146 tlb_debug("%d\n", mmu_idx);
147
148 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
149 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
150 }
151 }
152
153 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
154
155 tlb_debug("done\n");
156
157 tb_unlock();
158 }
159
160 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
161 {
162 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
163
164 if (!qemu_cpu_is_self(cpu)) {
165 uint16_t pending_flushes = idxmap;
166 pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
167
168 if (pending_flushes) {
169 tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
170
171 atomic_or(&cpu->pending_tlb_flush, pending_flushes);
172 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
173 RUN_ON_CPU_HOST_INT(pending_flushes));
174 }
175 } else {
176 tlb_flush_by_mmuidx_async_work(cpu,
177 RUN_ON_CPU_HOST_INT(idxmap));
178 }
179 }
180
181 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
182 {
183 if (addr == (tlb_entry->addr_read &
184 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
185 addr == (tlb_entry->addr_write &
186 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
187 addr == (tlb_entry->addr_code &
188 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
189 memset(tlb_entry, -1, sizeof(*tlb_entry));
190 }
191 }
192
193 static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
194 {
195 CPUArchState *env = cpu->env_ptr;
196 target_ulong addr = (target_ulong) data.target_ptr;
197 int i;
198 int mmu_idx;
199
200 assert_cpu_is_self(cpu);
201
202 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
203
204 /* Check if we need to flush due to large pages. */
205 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
206 tlb_debug("forcing full flush ("
207 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
208 env->tlb_flush_addr, env->tlb_flush_mask);
209
210 tlb_flush(cpu);
211 return;
212 }
213
214 addr &= TARGET_PAGE_MASK;
215 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
216 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
217 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
218 }
219
220 /* check whether there are entries that need to be flushed in the vtlb */
221 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
222 int k;
223 for (k = 0; k < CPU_VTLB_SIZE; k++) {
224 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
225 }
226 }
227
228 tb_flush_jmp_cache(cpu, addr);
229 }
230
231 void tlb_flush_page(CPUState *cpu, target_ulong addr)
232 {
233 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
234
235 if (!qemu_cpu_is_self(cpu)) {
236 async_run_on_cpu(cpu, tlb_flush_page_async_work,
237 RUN_ON_CPU_TARGET_PTR(addr));
238 } else {
239 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
240 }
241 }
242
243 /* As we are going to hijack the bottom bits of the page address for a
244 * mmuidx bit mask we need to fail to build if we can't do that
245 */
246 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
247
248 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
249 run_on_cpu_data data)
250 {
251 CPUArchState *env = cpu->env_ptr;
252 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
253 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
254 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
255 int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
256 int mmu_idx;
257 int i;
258
259 assert_cpu_is_self(cpu);
260
261 tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
262 page, addr, mmu_idx_bitmap);
263
264 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
265 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
266 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
267
268 /* check whether there are vltb entries that need to be flushed */
269 for (i = 0; i < CPU_VTLB_SIZE; i++) {
270 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
271 }
272 }
273 }
274
275 tb_flush_jmp_cache(cpu, addr);
276 }
277
278 static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
279 run_on_cpu_data data)
280 {
281 CPUArchState *env = cpu->env_ptr;
282 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
283 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
284 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
285
286 tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
287
288 /* Check if we need to flush due to large pages. */
289 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
290 tlb_debug("forced full flush ("
291 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
292 env->tlb_flush_addr, env->tlb_flush_mask);
293
294 tlb_flush_by_mmuidx_async_work(cpu,
295 RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
296 } else {
297 tlb_flush_page_by_mmuidx_async_work(cpu, data);
298 }
299 }
300
301 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
302 {
303 target_ulong addr_and_mmu_idx;
304
305 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
306
307 /* This should already be page aligned */
308 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
309 addr_and_mmu_idx |= idxmap;
310
311 if (!qemu_cpu_is_self(cpu)) {
312 async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
313 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
314 } else {
315 tlb_check_page_and_flush_by_mmuidx_async_work(
316 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
317 }
318 }
319
320 void tlb_flush_page_all(target_ulong addr)
321 {
322 CPUState *cpu;
323
324 CPU_FOREACH(cpu) {
325 async_run_on_cpu(cpu, tlb_flush_page_async_work,
326 RUN_ON_CPU_TARGET_PTR(addr));
327 }
328 }
329
330 /* update the TLBs so that writes to code in the virtual page 'addr'
331 can be detected */
332 void tlb_protect_code(ram_addr_t ram_addr)
333 {
334 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
335 DIRTY_MEMORY_CODE);
336 }
337
338 /* update the TLB so that writes in physical page 'phys_addr' are no longer
339 tested for self modifying code */
340 void tlb_unprotect_code(ram_addr_t ram_addr)
341 {
342 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
343 }
344
345
346 /*
347 * Dirty write flag handling
348 *
349 * When the TCG code writes to a location it looks up the address in
350 * the TLB and uses that data to compute the final address. If any of
351 * the lower bits of the address are set then the slow path is forced.
352 * There are a number of reasons to do this but for normal RAM the
353 * most usual is detecting writes to code regions which may invalidate
354 * generated code.
355 *
356 * Because we want other vCPUs to respond to changes straight away we
357 * update the te->addr_write field atomically. If the TLB entry has
358 * been changed by the vCPU in the mean time we skip the update.
359 *
360 * As this function uses atomic accesses we also need to ensure
361 * updates to tlb_entries follow the same access rules. We don't need
362 * to worry about this for oversized guests as MTTCG is disabled for
363 * them.
364 */
365
366 static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
367 uintptr_t length)
368 {
369 #if TCG_OVERSIZED_GUEST
370 uintptr_t addr = tlb_entry->addr_write;
371
372 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
373 addr &= TARGET_PAGE_MASK;
374 addr += tlb_entry->addend;
375 if ((addr - start) < length) {
376 tlb_entry->addr_write |= TLB_NOTDIRTY;
377 }
378 }
379 #else
380 /* paired with atomic_mb_set in tlb_set_page_with_attrs */
381 uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
382 uintptr_t addr = orig_addr;
383
384 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
385 addr &= TARGET_PAGE_MASK;
386 addr += atomic_read(&tlb_entry->addend);
387 if ((addr - start) < length) {
388 uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
389 atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
390 }
391 }
392 #endif
393 }
394
395 /* For atomic correctness when running MTTCG we need to use the right
396 * primitives when copying entries */
397 static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
398 bool atomic_set)
399 {
400 #if TCG_OVERSIZED_GUEST
401 *d = *s;
402 #else
403 if (atomic_set) {
404 d->addr_read = s->addr_read;
405 d->addr_code = s->addr_code;
406 atomic_set(&d->addend, atomic_read(&s->addend));
407 /* Pairs with flag setting in tlb_reset_dirty_range */
408 atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
409 } else {
410 d->addr_read = s->addr_read;
411 d->addr_write = atomic_read(&s->addr_write);
412 d->addr_code = s->addr_code;
413 d->addend = atomic_read(&s->addend);
414 }
415 #endif
416 }
417
418 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
419 * the target vCPU). As such care needs to be taken that we don't
420 * dangerously race with another vCPU update. The only thing actually
421 * updated is the target TLB entry ->addr_write flags.
422 */
423 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
424 {
425 CPUArchState *env;
426
427 int mmu_idx;
428
429 env = cpu->env_ptr;
430 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
431 unsigned int i;
432
433 for (i = 0; i < CPU_TLB_SIZE; i++) {
434 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
435 start1, length);
436 }
437
438 for (i = 0; i < CPU_VTLB_SIZE; i++) {
439 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
440 start1, length);
441 }
442 }
443 }
444
445 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
446 {
447 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
448 tlb_entry->addr_write = vaddr;
449 }
450 }
451
452 /* update the TLB corresponding to virtual page vaddr
453 so that it is no longer dirty */
454 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
455 {
456 CPUArchState *env = cpu->env_ptr;
457 int i;
458 int mmu_idx;
459
460 assert_cpu_is_self(cpu);
461
462 vaddr &= TARGET_PAGE_MASK;
463 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
464 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
465 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
466 }
467
468 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
469 int k;
470 for (k = 0; k < CPU_VTLB_SIZE; k++) {
471 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
472 }
473 }
474 }
475
476 /* Our TLB does not support large pages, so remember the area covered by
477 large pages and trigger a full TLB flush if these are invalidated. */
478 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
479 target_ulong size)
480 {
481 target_ulong mask = ~(size - 1);
482
483 if (env->tlb_flush_addr == (target_ulong)-1) {
484 env->tlb_flush_addr = vaddr & mask;
485 env->tlb_flush_mask = mask;
486 return;
487 }
488 /* Extend the existing region to include the new page.
489 This is a compromise between unnecessary flushes and the cost
490 of maintaining a full variable size TLB. */
491 mask &= env->tlb_flush_mask;
492 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
493 mask <<= 1;
494 }
495 env->tlb_flush_addr &= mask;
496 env->tlb_flush_mask = mask;
497 }
498
499 /* Add a new TLB entry. At most one entry for a given virtual address
500 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
501 * supplied size is only used by tlb_flush_page.
502 *
503 * Called from TCG-generated code, which is under an RCU read-side
504 * critical section.
505 */
506 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
507 hwaddr paddr, MemTxAttrs attrs, int prot,
508 int mmu_idx, target_ulong size)
509 {
510 CPUArchState *env = cpu->env_ptr;
511 MemoryRegionSection *section;
512 unsigned int index;
513 target_ulong address;
514 target_ulong code_address;
515 uintptr_t addend;
516 CPUTLBEntry *te, *tv, tn;
517 hwaddr iotlb, xlat, sz;
518 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
519 int asidx = cpu_asidx_from_attrs(cpu, attrs);
520
521 assert_cpu_is_self(cpu);
522 assert(size >= TARGET_PAGE_SIZE);
523 if (size != TARGET_PAGE_SIZE) {
524 tlb_add_large_page(env, vaddr, size);
525 }
526
527 sz = size;
528 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
529 assert(sz >= TARGET_PAGE_SIZE);
530
531 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
532 " prot=%x idx=%d\n",
533 vaddr, paddr, prot, mmu_idx);
534
535 address = vaddr;
536 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
537 /* IO memory case */
538 address |= TLB_MMIO;
539 addend = 0;
540 } else {
541 /* TLB_MMIO for rom/romd handled below */
542 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
543 }
544
545 code_address = address;
546 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
547 prot, &address);
548
549 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
550 te = &env->tlb_table[mmu_idx][index];
551 /* do not discard the translation in te, evict it into a victim tlb */
552 tv = &env->tlb_v_table[mmu_idx][vidx];
553
554 /* addr_write can race with tlb_reset_dirty_range */
555 copy_tlb_helper(tv, te, true);
556
557 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
558
559 /* refill the tlb */
560 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
561 env->iotlb[mmu_idx][index].attrs = attrs;
562
563 /* Now calculate the new entry */
564 tn.addend = addend - vaddr;
565 if (prot & PAGE_READ) {
566 tn.addr_read = address;
567 } else {
568 tn.addr_read = -1;
569 }
570
571 if (prot & PAGE_EXEC) {
572 tn.addr_code = code_address;
573 } else {
574 tn.addr_code = -1;
575 }
576
577 tn.addr_write = -1;
578 if (prot & PAGE_WRITE) {
579 if ((memory_region_is_ram(section->mr) && section->readonly)
580 || memory_region_is_romd(section->mr)) {
581 /* Write access calls the I/O callback. */
582 tn.addr_write = address | TLB_MMIO;
583 } else if (memory_region_is_ram(section->mr)
584 && cpu_physical_memory_is_clean(
585 memory_region_get_ram_addr(section->mr) + xlat)) {
586 tn.addr_write = address | TLB_NOTDIRTY;
587 } else {
588 tn.addr_write = address;
589 }
590 }
591
592 /* Pairs with flag setting in tlb_reset_dirty_range */
593 copy_tlb_helper(te, &tn, true);
594 /* atomic_mb_set(&te->addr_write, write_address); */
595 }
596
597 /* Add a new TLB entry, but without specifying the memory
598 * transaction attributes to be used.
599 */
600 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
601 hwaddr paddr, int prot,
602 int mmu_idx, target_ulong size)
603 {
604 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
605 prot, mmu_idx, size);
606 }
607
608 static void report_bad_exec(CPUState *cpu, target_ulong addr)
609 {
610 /* Accidentally executing outside RAM or ROM is quite common for
611 * several user-error situations, so report it in a way that
612 * makes it clear that this isn't a QEMU bug and provide suggestions
613 * about what a user could do to fix things.
614 */
615 error_report("Trying to execute code outside RAM or ROM at 0x"
616 TARGET_FMT_lx, addr);
617 error_printf("This usually means one of the following happened:\n\n"
618 "(1) You told QEMU to execute a kernel for the wrong machine "
619 "type, and it crashed on startup (eg trying to run a "
620 "raspberry pi kernel on a versatilepb QEMU machine)\n"
621 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
622 "and QEMU executed a ROM full of no-op instructions until "
623 "it fell off the end\n"
624 "(3) Your guest kernel has a bug and crashed by jumping "
625 "off into nowhere\n\n"
626 "This is almost always one of the first two, so check your "
627 "command line and that you are using the right type of kernel "
628 "for this machine.\n"
629 "If you think option (3) is likely then you can try debugging "
630 "your guest with the -d debug options; in particular "
631 "-d guest_errors will cause the log to include a dump of the "
632 "guest register state at this point.\n\n"
633 "Execution cannot continue; stopping here.\n\n");
634
635 /* Report also to the logs, with more detail including register dump */
636 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
637 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
638 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
639 }
640
641 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
642 {
643 ram_addr_t ram_addr;
644
645 ram_addr = qemu_ram_addr_from_host(ptr);
646 if (ram_addr == RAM_ADDR_INVALID) {
647 error_report("Bad ram pointer %p", ptr);
648 abort();
649 }
650 return ram_addr;
651 }
652
653 /* NOTE: this function can trigger an exception */
654 /* NOTE2: the returned address is not exactly the physical address: it
655 * is actually a ram_addr_t (in system mode; the user mode emulation
656 * version of this function returns a guest virtual address).
657 */
658 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
659 {
660 int mmu_idx, page_index, pd;
661 void *p;
662 MemoryRegion *mr;
663 CPUState *cpu = ENV_GET_CPU(env1);
664 CPUIOTLBEntry *iotlbentry;
665
666 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
667 mmu_idx = cpu_mmu_index(env1, true);
668 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
669 (addr & TARGET_PAGE_MASK))) {
670 cpu_ldub_code(env1, addr);
671 }
672 iotlbentry = &env1->iotlb[mmu_idx][page_index];
673 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
674 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
675 if (memory_region_is_unassigned(mr)) {
676 CPUClass *cc = CPU_GET_CLASS(cpu);
677
678 if (cc->do_unassigned_access) {
679 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
680 } else {
681 report_bad_exec(cpu, addr);
682 exit(1);
683 }
684 }
685 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
686 return qemu_ram_addr_from_host_nofail(p);
687 }
688
689 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
690 target_ulong addr, uintptr_t retaddr, int size)
691 {
692 CPUState *cpu = ENV_GET_CPU(env);
693 hwaddr physaddr = iotlbentry->addr;
694 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
695 uint64_t val;
696 bool locked = false;
697
698 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
699 cpu->mem_io_pc = retaddr;
700 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
701 cpu_io_recompile(cpu, retaddr);
702 }
703
704 cpu->mem_io_vaddr = addr;
705
706 if (mr->global_locking) {
707 qemu_mutex_lock_iothread();
708 locked = true;
709 }
710 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
711 if (locked) {
712 qemu_mutex_unlock_iothread();
713 }
714
715 return val;
716 }
717
718 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
719 uint64_t val, target_ulong addr,
720 uintptr_t retaddr, int size)
721 {
722 CPUState *cpu = ENV_GET_CPU(env);
723 hwaddr physaddr = iotlbentry->addr;
724 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
725 bool locked = false;
726
727 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
728 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
729 cpu_io_recompile(cpu, retaddr);
730 }
731 cpu->mem_io_vaddr = addr;
732 cpu->mem_io_pc = retaddr;
733
734 if (mr->global_locking) {
735 qemu_mutex_lock_iothread();
736 locked = true;
737 }
738 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
739 if (locked) {
740 qemu_mutex_unlock_iothread();
741 }
742 }
743
744 /* Return true if ADDR is present in the victim tlb, and has been copied
745 back to the main tlb. */
746 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
747 size_t elt_ofs, target_ulong page)
748 {
749 size_t vidx;
750 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
751 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
752 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
753
754 if (cmp == page) {
755 /* Found entry in victim tlb, swap tlb and iotlb. */
756 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
757
758 copy_tlb_helper(&tmptlb, tlb, false);
759 copy_tlb_helper(tlb, vtlb, true);
760 copy_tlb_helper(vtlb, &tmptlb, true);
761
762 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
763 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
764 tmpio = *io; *io = *vio; *vio = tmpio;
765 return true;
766 }
767 }
768 return false;
769 }
770
771 /* Macro to call the above, with local variables from the use context. */
772 #define VICTIM_TLB_HIT(TY, ADDR) \
773 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
774 (ADDR) & TARGET_PAGE_MASK)
775
776 /* Probe for whether the specified guest write access is permitted.
777 * If it is not permitted then an exception will be taken in the same
778 * way as if this were a real write access (and we will not return).
779 * Otherwise the function will return, and there will be a valid
780 * entry in the TLB for this access.
781 */
782 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
783 uintptr_t retaddr)
784 {
785 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
786 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
787
788 if ((addr & TARGET_PAGE_MASK)
789 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
790 /* TLB entry is for a different page */
791 if (!VICTIM_TLB_HIT(addr_write, addr)) {
792 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
793 }
794 }
795 }
796
797 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
798 * operations, or io operations to proceed. Return the host address. */
799 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
800 TCGMemOpIdx oi, uintptr_t retaddr)
801 {
802 size_t mmu_idx = get_mmuidx(oi);
803 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
804 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
805 target_ulong tlb_addr = tlbe->addr_write;
806 TCGMemOp mop = get_memop(oi);
807 int a_bits = get_alignment_bits(mop);
808 int s_bits = mop & MO_SIZE;
809
810 /* Adjust the given return address. */
811 retaddr -= GETPC_ADJ;
812
813 /* Enforce guest required alignment. */
814 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
815 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
816 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
817 mmu_idx, retaddr);
818 }
819
820 /* Enforce qemu required alignment. */
821 if (unlikely(addr & ((1 << s_bits) - 1))) {
822 /* We get here if guest alignment was not requested,
823 or was not enforced by cpu_unaligned_access above.
824 We might widen the access and emulate, but for now
825 mark an exception and exit the cpu loop. */
826 goto stop_the_world;
827 }
828
829 /* Check TLB entry and enforce page permissions. */
830 if ((addr & TARGET_PAGE_MASK)
831 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
832 if (!VICTIM_TLB_HIT(addr_write, addr)) {
833 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
834 }
835 tlb_addr = tlbe->addr_write;
836 }
837
838 /* Notice an IO access, or a notdirty page. */
839 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
840 /* There's really nothing that can be done to
841 support this apart from stop-the-world. */
842 goto stop_the_world;
843 }
844
845 /* Let the guest notice RMW on a write-only page. */
846 if (unlikely(tlbe->addr_read != tlb_addr)) {
847 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
848 /* Since we don't support reads and writes to different addresses,
849 and we do have the proper page loaded for write, this shouldn't
850 ever return. But just in case, handle via stop-the-world. */
851 goto stop_the_world;
852 }
853
854 return (void *)((uintptr_t)addr + tlbe->addend);
855
856 stop_the_world:
857 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
858 }
859
860 #ifdef TARGET_WORDS_BIGENDIAN
861 # define TGT_BE(X) (X)
862 # define TGT_LE(X) BSWAP(X)
863 #else
864 # define TGT_BE(X) BSWAP(X)
865 # define TGT_LE(X) (X)
866 #endif
867
868 #define MMUSUFFIX _mmu
869
870 #define DATA_SIZE 1
871 #include "softmmu_template.h"
872
873 #define DATA_SIZE 2
874 #include "softmmu_template.h"
875
876 #define DATA_SIZE 4
877 #include "softmmu_template.h"
878
879 #define DATA_SIZE 8
880 #include "softmmu_template.h"
881
882 /* First set of helpers allows passing in of OI and RETADDR. This makes
883 them callable from other helpers. */
884
885 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
886 #define ATOMIC_NAME(X) \
887 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
888 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
889
890 #define DATA_SIZE 1
891 #include "atomic_template.h"
892
893 #define DATA_SIZE 2
894 #include "atomic_template.h"
895
896 #define DATA_SIZE 4
897 #include "atomic_template.h"
898
899 #ifdef CONFIG_ATOMIC64
900 #define DATA_SIZE 8
901 #include "atomic_template.h"
902 #endif
903
904 #ifdef CONFIG_ATOMIC128
905 #define DATA_SIZE 16
906 #include "atomic_template.h"
907 #endif
908
909 /* Second set of helpers are directly callable from TCG as helpers. */
910
911 #undef EXTRA_ARGS
912 #undef ATOMIC_NAME
913 #undef ATOMIC_MMU_LOOKUP
914 #define EXTRA_ARGS , TCGMemOpIdx oi
915 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
916 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
917
918 #define DATA_SIZE 1
919 #include "atomic_template.h"
920
921 #define DATA_SIZE 2
922 #include "atomic_template.h"
923
924 #define DATA_SIZE 4
925 #include "atomic_template.h"
926
927 #ifdef CONFIG_ATOMIC64
928 #define DATA_SIZE 8
929 #include "atomic_template.h"
930 #endif
931
932 /* Code access functions. */
933
934 #undef MMUSUFFIX
935 #define MMUSUFFIX _cmmu
936 #undef GETPC
937 #define GETPC() ((uintptr_t)0)
938 #define SOFTMMU_CODE_ACCESS
939
940 #define DATA_SIZE 1
941 #include "softmmu_template.h"
942
943 #define DATA_SIZE 2
944 #include "softmmu_template.h"
945
946 #define DATA_SIZE 4
947 #include "softmmu_template.h"
948
949 #define DATA_SIZE 8
950 #include "softmmu_template.h"