]> git.proxmox.com Git - mirror_qemu.git/blame - cputlb.c
cputlb: tweak qemu_ram_addr_from_host_nofail reporting
[mirror_qemu.git] / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
8d04fb55 21#include "qemu/main-loop.h"
0cac1b66 22#include "cpu.h"
022c62cb
PB
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
f08b6170 26#include "exec/cpu_ldst.h"
022c62cb 27#include "exec/cputlb.h"
022c62cb 28#include "exec/memory-internal.h"
220c3ebd 29#include "exec/ram_addr.h"
0f590e74 30#include "tcg/tcg.h"
d7f30403
PM
31#include "qemu/error-report.h"
32#include "exec/log.h"
c482cb11
RH
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
0cac1b66 35
8526e1f4
AB
36/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37/* #define DEBUG_TLB */
38/* #define DEBUG_TLB_LOG */
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
0cac1b66 60
f0aff0f1
AB
61#define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
0cac1b66
BS
67/* statistics */
68int tlb_flush_count;
69
d10eb08f
AB
70/* This is OK because CPU architectures generally permit an
71 * implementation to drop entries from the TLB at any time, so
72 * flushing more entries than required is only an efficiency issue,
73 * not a correctness issue.
0cac1b66 74 */
d10eb08f 75void tlb_flush(CPUState *cpu)
0cac1b66 76{
00c8cb0a 77 CPUArchState *env = cpu->env_ptr;
0cac1b66 78
f0aff0f1
AB
79 assert_cpu_is_self(cpu);
80 tlb_debug("(count: %d)\n", tlb_flush_count++);
81
4fadb3bb 82 memset(env->tlb_table, -1, sizeof(env->tlb_table));
88e89a57 83 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
8cd70437 84 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
0cac1b66 85
88e89a57 86 env->vtlb_index = 0;
0cac1b66
BS
87 env->tlb_flush_addr = -1;
88 env->tlb_flush_mask = 0;
0cac1b66
BS
89}
90
d7a74a9d
PM
91static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
92{
93 CPUArchState *env = cpu->env_ptr;
94
f0aff0f1 95 assert_cpu_is_self(cpu);
8526e1f4 96 tlb_debug("start\n");
d7a74a9d
PM
97
98 for (;;) {
99 int mmu_idx = va_arg(argp, int);
100
101 if (mmu_idx < 0) {
102 break;
103 }
104
8526e1f4 105 tlb_debug("%d\n", mmu_idx);
d7a74a9d
PM
106
107 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
108 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
109 }
110
d7a74a9d
PM
111 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
112}
113
114void tlb_flush_by_mmuidx(CPUState *cpu, ...)
115{
116 va_list argp;
117 va_start(argp, cpu);
118 v_tlb_flush_by_mmuidx(cpu, argp);
119 va_end(argp);
120}
121
0cac1b66
BS
122static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
123{
124 if (addr == (tlb_entry->addr_read &
125 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126 addr == (tlb_entry->addr_write &
127 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
128 addr == (tlb_entry->addr_code &
129 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
4fadb3bb 130 memset(tlb_entry, -1, sizeof(*tlb_entry));
0cac1b66
BS
131 }
132}
133
31b030d4 134void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66 135{
31b030d4 136 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
137 int i;
138 int mmu_idx;
139
f0aff0f1 140 assert_cpu_is_self(cpu);
8526e1f4
AB
141 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
142
0cac1b66
BS
143 /* Check if we need to flush due to large pages. */
144 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
145 tlb_debug("forcing full flush ("
146 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
147 env->tlb_flush_addr, env->tlb_flush_mask);
148
d10eb08f 149 tlb_flush(cpu);
0cac1b66
BS
150 return;
151 }
0cac1b66
BS
152
153 addr &= TARGET_PAGE_MASK;
154 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
155 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
156 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
157 }
158
88e89a57
XT
159 /* check whether there are entries that need to be flushed in the vtlb */
160 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
161 int k;
162 for (k = 0; k < CPU_VTLB_SIZE; k++) {
163 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
164 }
165 }
166
611d4f99 167 tb_flush_jmp_cache(cpu, addr);
0cac1b66
BS
168}
169
d7a74a9d
PM
170void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
171{
172 CPUArchState *env = cpu->env_ptr;
173 int i, k;
174 va_list argp;
175
176 va_start(argp, addr);
177
f0aff0f1 178 assert_cpu_is_self(cpu);
8526e1f4
AB
179 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
180
d7a74a9d
PM
181 /* Check if we need to flush due to large pages. */
182 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
183 tlb_debug("forced full flush ("
184 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
185 env->tlb_flush_addr, env->tlb_flush_mask);
186
d7a74a9d
PM
187 v_tlb_flush_by_mmuidx(cpu, argp);
188 va_end(argp);
189 return;
190 }
d7a74a9d
PM
191
192 addr &= TARGET_PAGE_MASK;
193 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
194
195 for (;;) {
196 int mmu_idx = va_arg(argp, int);
197
198 if (mmu_idx < 0) {
199 break;
200 }
201
8526e1f4 202 tlb_debug("idx %d\n", mmu_idx);
d7a74a9d
PM
203
204 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
205
206 /* check whether there are vltb entries that need to be flushed */
207 for (k = 0; k < CPU_VTLB_SIZE; k++) {
208 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
209 }
210 }
211 va_end(argp);
212
d7a74a9d
PM
213 tb_flush_jmp_cache(cpu, addr);
214}
215
0cac1b66
BS
216/* update the TLBs so that writes to code in the virtual page 'addr'
217 can be detected */
218void tlb_protect_code(ram_addr_t ram_addr)
219{
03eebc9e
SH
220 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
221 DIRTY_MEMORY_CODE);
0cac1b66
BS
222}
223
224/* update the TLB so that writes in physical page 'phys_addr' are no longer
225 tested for self modifying code */
9564f52d 226void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 227{
52159192 228 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
229}
230
231static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
232{
233 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
234}
235
236void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
237 uintptr_t length)
238{
239 uintptr_t addr;
240
241 if (tlb_is_dirty_ram(tlb_entry)) {
242 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
243 if ((addr - start) < length) {
244 tlb_entry->addr_write |= TLB_NOTDIRTY;
245 }
246 }
247}
248
9a13565d 249void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
250{
251 CPUArchState *env;
252
9a13565d 253 int mmu_idx;
0cac1b66 254
f0aff0f1
AB
255 assert_cpu_is_self(cpu);
256
9a13565d
PC
257 env = cpu->env_ptr;
258 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
259 unsigned int i;
0cac1b66 260
9a13565d
PC
261 for (i = 0; i < CPU_TLB_SIZE; i++) {
262 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
263 start1, length);
264 }
88e89a57 265
9a13565d
PC
266 for (i = 0; i < CPU_VTLB_SIZE; i++) {
267 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
268 start1, length);
0cac1b66
BS
269 }
270 }
271}
272
273static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
274{
275 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
276 tlb_entry->addr_write = vaddr;
277 }
278}
279
280/* update the TLB corresponding to virtual page vaddr
281 so that it is no longer dirty */
bcae01e4 282void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 283{
bcae01e4 284 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
285 int i;
286 int mmu_idx;
287
f0aff0f1
AB
288 assert_cpu_is_self(cpu);
289
0cac1b66
BS
290 vaddr &= TARGET_PAGE_MASK;
291 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
292 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
293 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
294 }
88e89a57
XT
295
296 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
297 int k;
298 for (k = 0; k < CPU_VTLB_SIZE; k++) {
299 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
300 }
301 }
0cac1b66
BS
302}
303
304/* Our TLB does not support large pages, so remember the area covered by
305 large pages and trigger a full TLB flush if these are invalidated. */
306static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
307 target_ulong size)
308{
309 target_ulong mask = ~(size - 1);
310
311 if (env->tlb_flush_addr == (target_ulong)-1) {
312 env->tlb_flush_addr = vaddr & mask;
313 env->tlb_flush_mask = mask;
314 return;
315 }
316 /* Extend the existing region to include the new page.
317 This is a compromise between unnecessary flushes and the cost
318 of maintaining a full variable size TLB. */
319 mask &= env->tlb_flush_mask;
320 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
321 mask <<= 1;
322 }
323 env->tlb_flush_addr &= mask;
324 env->tlb_flush_mask = mask;
325}
326
327/* Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
328 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
329 * supplied size is only used by tlb_flush_page.
330 *
331 * Called from TCG-generated code, which is under an RCU read-side
332 * critical section.
333 */
fadc1cbe
PM
334void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
335 hwaddr paddr, MemTxAttrs attrs, int prot,
336 int mmu_idx, target_ulong size)
0cac1b66 337{
0c591eb0 338 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
339 MemoryRegionSection *section;
340 unsigned int index;
341 target_ulong address;
342 target_ulong code_address;
343 uintptr_t addend;
344 CPUTLBEntry *te;
149f54b5 345 hwaddr iotlb, xlat, sz;
88e89a57 346 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
d7898cda 347 int asidx = cpu_asidx_from_attrs(cpu, attrs);
0cac1b66 348
f0aff0f1 349 assert_cpu_is_self(cpu);
0cac1b66
BS
350 assert(size >= TARGET_PAGE_SIZE);
351 if (size != TARGET_PAGE_SIZE) {
352 tlb_add_large_page(env, vaddr, size);
353 }
149f54b5
PB
354
355 sz = size;
d7898cda 356 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
149f54b5
PB
357 assert(sz >= TARGET_PAGE_SIZE);
358
8526e1f4
AB
359 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
360 " prot=%x idx=%d\n",
361 vaddr, paddr, prot, mmu_idx);
0cac1b66
BS
362
363 address = vaddr;
8f3e03cb
PB
364 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
365 /* IO memory case */
0cac1b66 366 address |= TLB_MMIO;
8f3e03cb
PB
367 addend = 0;
368 } else {
369 /* TLB_MMIO for rom/romd handled below */
149f54b5 370 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
0cac1b66 371 }
0cac1b66
BS
372
373 code_address = address;
bb0e627a 374 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
149f54b5 375 prot, &address);
0cac1b66
BS
376
377 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0cac1b66 378 te = &env->tlb_table[mmu_idx][index];
88e89a57
XT
379
380 /* do not discard the translation in te, evict it into a victim tlb */
381 env->tlb_v_table[mmu_idx][vidx] = *te;
382 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
383
384 /* refill the tlb */
e469b22f 385 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
fadc1cbe 386 env->iotlb[mmu_idx][index].attrs = attrs;
0cac1b66
BS
387 te->addend = addend - vaddr;
388 if (prot & PAGE_READ) {
389 te->addr_read = address;
390 } else {
391 te->addr_read = -1;
392 }
393
394 if (prot & PAGE_EXEC) {
395 te->addr_code = code_address;
396 } else {
397 te->addr_code = -1;
398 }
399 if (prot & PAGE_WRITE) {
400 if ((memory_region_is_ram(section->mr) && section->readonly)
cc5bea60 401 || memory_region_is_romd(section->mr)) {
0cac1b66
BS
402 /* Write access calls the I/O callback. */
403 te->addr_write = address | TLB_MMIO;
404 } else if (memory_region_is_ram(section->mr)
8e41fb63
FZ
405 && cpu_physical_memory_is_clean(
406 memory_region_get_ram_addr(section->mr) + xlat)) {
0cac1b66
BS
407 te->addr_write = address | TLB_NOTDIRTY;
408 } else {
409 te->addr_write = address;
410 }
411 } else {
412 te->addr_write = -1;
413 }
414}
415
fadc1cbe
PM
416/* Add a new TLB entry, but without specifying the memory
417 * transaction attributes to be used.
418 */
419void tlb_set_page(CPUState *cpu, target_ulong vaddr,
420 hwaddr paddr, int prot,
421 int mmu_idx, target_ulong size)
422{
423 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
424 prot, mmu_idx, size);
425}
426
d7f30403
PM
427static void report_bad_exec(CPUState *cpu, target_ulong addr)
428{
429 /* Accidentally executing outside RAM or ROM is quite common for
430 * several user-error situations, so report it in a way that
431 * makes it clear that this isn't a QEMU bug and provide suggestions
432 * about what a user could do to fix things.
433 */
434 error_report("Trying to execute code outside RAM or ROM at 0x"
435 TARGET_FMT_lx, addr);
436 error_printf("This usually means one of the following happened:\n\n"
437 "(1) You told QEMU to execute a kernel for the wrong machine "
438 "type, and it crashed on startup (eg trying to run a "
439 "raspberry pi kernel on a versatilepb QEMU machine)\n"
440 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
441 "and QEMU executed a ROM full of no-op instructions until "
442 "it fell off the end\n"
443 "(3) Your guest kernel has a bug and crashed by jumping "
444 "off into nowhere\n\n"
445 "This is almost always one of the first two, so check your "
446 "command line and that you are using the right type of kernel "
447 "for this machine.\n"
448 "If you think option (3) is likely then you can try debugging "
449 "your guest with the -d debug options; in particular "
450 "-d guest_errors will cause the log to include a dump of the "
451 "guest register state at this point.\n\n"
452 "Execution cannot continue; stopping here.\n\n");
453
454 /* Report also to the logs, with more detail including register dump */
455 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
456 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
457 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
458}
459
857baec1
AB
460static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
461{
462 ram_addr_t ram_addr;
463
464 ram_addr = qemu_ram_addr_from_host(ptr);
465 if (ram_addr == RAM_ADDR_INVALID) {
466 error_report("Bad ram pointer %p", ptr);
467 abort();
468 }
469 return ram_addr;
470}
471
0cac1b66
BS
472/* NOTE: this function can trigger an exception */
473/* NOTE2: the returned address is not exactly the physical address: it
116aae36
PM
474 * is actually a ram_addr_t (in system mode; the user mode emulation
475 * version of this function returns a guest virtual address).
476 */
0cac1b66
BS
477tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
478{
479 int mmu_idx, page_index, pd;
480 void *p;
481 MemoryRegion *mr;
09daed84 482 CPUState *cpu = ENV_GET_CPU(env1);
a54c87b6 483 CPUIOTLBEntry *iotlbentry;
0cac1b66
BS
484
485 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
97ed5ccd 486 mmu_idx = cpu_mmu_index(env1, true);
0cac1b66
BS
487 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
488 (addr & TARGET_PAGE_MASK))) {
0cac1b66 489 cpu_ldub_code(env1, addr);
0cac1b66 490 }
a54c87b6
PM
491 iotlbentry = &env1->iotlb[mmu_idx][page_index];
492 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
493 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
0cac1b66 494 if (memory_region_is_unassigned(mr)) {
c658b94f
AF
495 CPUClass *cc = CPU_GET_CLASS(cpu);
496
497 if (cc->do_unassigned_access) {
498 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
499 } else {
d7f30403
PM
500 report_bad_exec(cpu, addr);
501 exit(1);
c658b94f 502 }
0cac1b66
BS
503 }
504 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
505 return qemu_ram_addr_from_host_nofail(p);
506}
507
82a45b96
RH
508static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
509 target_ulong addr, uintptr_t retaddr, int size)
510{
511 CPUState *cpu = ENV_GET_CPU(env);
512 hwaddr physaddr = iotlbentry->addr;
513 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
514 uint64_t val;
8d04fb55 515 bool locked = false;
82a45b96
RH
516
517 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
518 cpu->mem_io_pc = retaddr;
519 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
520 cpu_io_recompile(cpu, retaddr);
521 }
522
523 cpu->mem_io_vaddr = addr;
8d04fb55
JK
524
525 if (mr->global_locking) {
526 qemu_mutex_lock_iothread();
527 locked = true;
528 }
82a45b96 529 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
8d04fb55
JK
530 if (locked) {
531 qemu_mutex_unlock_iothread();
532 }
533
82a45b96
RH
534 return val;
535}
536
537static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
538 uint64_t val, target_ulong addr,
539 uintptr_t retaddr, int size)
540{
541 CPUState *cpu = ENV_GET_CPU(env);
542 hwaddr physaddr = iotlbentry->addr;
543 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
8d04fb55 544 bool locked = false;
82a45b96
RH
545
546 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
547 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
548 cpu_io_recompile(cpu, retaddr);
549 }
82a45b96
RH
550 cpu->mem_io_vaddr = addr;
551 cpu->mem_io_pc = retaddr;
8d04fb55
JK
552
553 if (mr->global_locking) {
554 qemu_mutex_lock_iothread();
555 locked = true;
556 }
82a45b96 557 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
8d04fb55
JK
558 if (locked) {
559 qemu_mutex_unlock_iothread();
560 }
82a45b96
RH
561}
562
7e9a7c50
RH
563/* Return true if ADDR is present in the victim tlb, and has been copied
564 back to the main tlb. */
565static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
566 size_t elt_ofs, target_ulong page)
567{
568 size_t vidx;
569 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
570 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
571 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
572
573 if (cmp == page) {
574 /* Found entry in victim tlb, swap tlb and iotlb. */
575 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
576 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
577 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
578
579 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
580 tmpio = *io; *io = *vio; *vio = tmpio;
581 return true;
582 }
583 }
584 return false;
585}
586
587/* Macro to call the above, with local variables from the use context. */
a390284b 588#define VICTIM_TLB_HIT(TY, ADDR) \
7e9a7c50 589 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
a390284b 590 (ADDR) & TARGET_PAGE_MASK)
7e9a7c50 591
3b08f0a9
RH
592/* Probe for whether the specified guest write access is permitted.
593 * If it is not permitted then an exception will be taken in the same
594 * way as if this were a real write access (and we will not return).
595 * Otherwise the function will return, and there will be a valid
596 * entry in the TLB for this access.
597 */
598void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
599 uintptr_t retaddr)
600{
601 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
602 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
603
604 if ((addr & TARGET_PAGE_MASK)
605 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
606 /* TLB entry is for a different page */
607 if (!VICTIM_TLB_HIT(addr_write, addr)) {
608 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
609 }
610 }
611}
612
c482cb11
RH
613/* Probe for a read-modify-write atomic operation. Do not allow unaligned
614 * operations, or io operations to proceed. Return the host address. */
615static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
616 TCGMemOpIdx oi, uintptr_t retaddr)
617{
618 size_t mmu_idx = get_mmuidx(oi);
619 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
620 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
621 target_ulong tlb_addr = tlbe->addr_write;
622 TCGMemOp mop = get_memop(oi);
623 int a_bits = get_alignment_bits(mop);
624 int s_bits = mop & MO_SIZE;
625
626 /* Adjust the given return address. */
627 retaddr -= GETPC_ADJ;
628
629 /* Enforce guest required alignment. */
630 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
631 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
632 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
633 mmu_idx, retaddr);
634 }
635
636 /* Enforce qemu required alignment. */
637 if (unlikely(addr & ((1 << s_bits) - 1))) {
638 /* We get here if guest alignment was not requested,
639 or was not enforced by cpu_unaligned_access above.
640 We might widen the access and emulate, but for now
641 mark an exception and exit the cpu loop. */
642 goto stop_the_world;
643 }
644
645 /* Check TLB entry and enforce page permissions. */
646 if ((addr & TARGET_PAGE_MASK)
647 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
648 if (!VICTIM_TLB_HIT(addr_write, addr)) {
649 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
650 }
651 tlb_addr = tlbe->addr_write;
652 }
653
654 /* Notice an IO access, or a notdirty page. */
655 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
656 /* There's really nothing that can be done to
657 support this apart from stop-the-world. */
658 goto stop_the_world;
659 }
660
661 /* Let the guest notice RMW on a write-only page. */
662 if (unlikely(tlbe->addr_read != tlb_addr)) {
663 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
664 /* Since we don't support reads and writes to different addresses,
665 and we do have the proper page loaded for write, this shouldn't
666 ever return. But just in case, handle via stop-the-world. */
667 goto stop_the_world;
668 }
669
670 return (void *)((uintptr_t)addr + tlbe->addend);
671
672 stop_the_world:
673 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
674}
675
c86c6e4c
RH
676#ifdef TARGET_WORDS_BIGENDIAN
677# define TGT_BE(X) (X)
678# define TGT_LE(X) BSWAP(X)
679#else
680# define TGT_BE(X) BSWAP(X)
681# define TGT_LE(X) (X)
682#endif
683
0f590e74
PB
684#define MMUSUFFIX _mmu
685
dea21982 686#define DATA_SIZE 1
58ed270d 687#include "softmmu_template.h"
0f590e74 688
dea21982 689#define DATA_SIZE 2
58ed270d 690#include "softmmu_template.h"
0f590e74 691
dea21982 692#define DATA_SIZE 4
58ed270d 693#include "softmmu_template.h"
0f590e74 694
dea21982 695#define DATA_SIZE 8
58ed270d 696#include "softmmu_template.h"
0f590e74 697
c482cb11
RH
698/* First set of helpers allows passing in of OI and RETADDR. This makes
699 them callable from other helpers. */
700
701#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
702#define ATOMIC_NAME(X) \
703 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
704#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
705
706#define DATA_SIZE 1
707#include "atomic_template.h"
708
709#define DATA_SIZE 2
710#include "atomic_template.h"
711
712#define DATA_SIZE 4
713#include "atomic_template.h"
714
df79b996 715#ifdef CONFIG_ATOMIC64
c482cb11
RH
716#define DATA_SIZE 8
717#include "atomic_template.h"
df79b996 718#endif
c482cb11 719
7ebee43e
RH
720#ifdef CONFIG_ATOMIC128
721#define DATA_SIZE 16
722#include "atomic_template.h"
723#endif
724
c482cb11
RH
725/* Second set of helpers are directly callable from TCG as helpers. */
726
727#undef EXTRA_ARGS
728#undef ATOMIC_NAME
729#undef ATOMIC_MMU_LOOKUP
730#define EXTRA_ARGS , TCGMemOpIdx oi
731#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
732#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
733
734#define DATA_SIZE 1
735#include "atomic_template.h"
736
737#define DATA_SIZE 2
738#include "atomic_template.h"
739
740#define DATA_SIZE 4
741#include "atomic_template.h"
742
df79b996 743#ifdef CONFIG_ATOMIC64
c482cb11
RH
744#define DATA_SIZE 8
745#include "atomic_template.h"
df79b996 746#endif
c482cb11
RH
747
748/* Code access functions. */
749
750#undef MMUSUFFIX
0cac1b66 751#define MMUSUFFIX _cmmu
01ecaf43
RH
752#undef GETPC
753#define GETPC() ((uintptr_t)0)
0cac1b66
BS
754#define SOFTMMU_CODE_ACCESS
755
dea21982 756#define DATA_SIZE 1
58ed270d 757#include "softmmu_template.h"
0cac1b66 758
dea21982 759#define DATA_SIZE 2
58ed270d 760#include "softmmu_template.h"
0cac1b66 761
dea21982 762#define DATA_SIZE 4
58ed270d 763#include "softmmu_template.h"
0cac1b66 764
dea21982 765#define DATA_SIZE 8
58ed270d 766#include "softmmu_template.h"