]> git.proxmox.com Git - mirror_qemu.git/blame - cputlb.c
cputlb and arm/sparc targets: convert mmuidx flushes from varg to bitmap
[mirror_qemu.git] / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
8d04fb55 21#include "qemu/main-loop.h"
0cac1b66 22#include "cpu.h"
022c62cb
PB
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
f08b6170 26#include "exec/cpu_ldst.h"
022c62cb 27#include "exec/cputlb.h"
022c62cb 28#include "exec/memory-internal.h"
220c3ebd 29#include "exec/ram_addr.h"
0f590e74 30#include "tcg/tcg.h"
d7f30403
PM
31#include "qemu/error-report.h"
32#include "exec/log.h"
c482cb11
RH
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
0cac1b66 35
8526e1f4
AB
36/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37/* #define DEBUG_TLB */
38/* #define DEBUG_TLB_LOG */
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
0cac1b66 60
f0aff0f1
AB
61#define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
e3b9ca81
FK
67/* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70
0cac1b66
BS
71/* statistics */
72int tlb_flush_count;
73
d10eb08f
AB
74/* This is OK because CPU architectures generally permit an
75 * implementation to drop entries from the TLB at any time, so
76 * flushing more entries than required is only an efficiency issue,
77 * not a correctness issue.
0cac1b66 78 */
e3b9ca81 79static void tlb_flush_nocheck(CPUState *cpu)
0cac1b66 80{
00c8cb0a 81 CPUArchState *env = cpu->env_ptr;
0cac1b66 82
e3b9ca81
FK
83 /* The QOM tests will trigger tlb_flushes without setting up TCG
84 * so we bug out here in that case.
85 */
86 if (!tcg_enabled()) {
87 return;
88 }
89
f0aff0f1
AB
90 assert_cpu_is_self(cpu);
91 tlb_debug("(count: %d)\n", tlb_flush_count++);
92
e3b9ca81
FK
93 tb_lock();
94
4fadb3bb 95 memset(env->tlb_table, -1, sizeof(env->tlb_table));
88e89a57 96 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
8cd70437 97 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
0cac1b66 98
88e89a57 99 env->vtlb_index = 0;
0cac1b66
BS
100 env->tlb_flush_addr = -1;
101 env->tlb_flush_mask = 0;
e3b9ca81
FK
102
103 tb_unlock();
104
105 atomic_mb_set(&cpu->pending_tlb_flush, false);
106}
107
108static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
109{
110 tlb_flush_nocheck(cpu);
111}
112
113void tlb_flush(CPUState *cpu)
114{
115 if (cpu->created && !qemu_cpu_is_self(cpu)) {
116 if (atomic_cmpxchg(&cpu->pending_tlb_flush, false, true) == true) {
117 async_run_on_cpu(cpu, tlb_flush_global_async_work,
118 RUN_ON_CPU_NULL);
119 }
120 } else {
121 tlb_flush_nocheck(cpu);
122 }
0cac1b66
BS
123}
124
0336cbf8 125static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
126{
127 CPUArchState *env = cpu->env_ptr;
0336cbf8
AB
128 unsigned long mmu_idx_bitmask = idxmap;
129 int mmu_idx;
d7a74a9d 130
f0aff0f1 131 assert_cpu_is_self(cpu);
8526e1f4 132 tlb_debug("start\n");
d7a74a9d 133
e3b9ca81
FK
134 tb_lock();
135
0336cbf8 136 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
d7a74a9d 137
0336cbf8
AB
138 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
139 tlb_debug("%d\n", mmu_idx);
d7a74a9d 140
0336cbf8
AB
141 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
142 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
143 }
d7a74a9d
PM
144 }
145
d7a74a9d 146 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
e3b9ca81
FK
147
148 tb_unlock();
d7a74a9d
PM
149}
150
0336cbf8 151void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d 152{
0336cbf8 153 v_tlb_flush_by_mmuidx(cpu, idxmap);
d7a74a9d
PM
154}
155
0cac1b66
BS
156static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
157{
158 if (addr == (tlb_entry->addr_read &
159 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
160 addr == (tlb_entry->addr_write &
161 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
162 addr == (tlb_entry->addr_code &
163 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
4fadb3bb 164 memset(tlb_entry, -1, sizeof(*tlb_entry));
0cac1b66
BS
165 }
166}
167
e3b9ca81 168static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
0cac1b66 169{
31b030d4 170 CPUArchState *env = cpu->env_ptr;
e3b9ca81 171 target_ulong addr = (target_ulong) data.target_ptr;
0cac1b66
BS
172 int i;
173 int mmu_idx;
174
f0aff0f1 175 assert_cpu_is_self(cpu);
e3b9ca81 176
8526e1f4
AB
177 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
178
0cac1b66
BS
179 /* Check if we need to flush due to large pages. */
180 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
181 tlb_debug("forcing full flush ("
182 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
183 env->tlb_flush_addr, env->tlb_flush_mask);
184
d10eb08f 185 tlb_flush(cpu);
0cac1b66
BS
186 return;
187 }
0cac1b66
BS
188
189 addr &= TARGET_PAGE_MASK;
190 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
191 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
192 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
193 }
194
88e89a57
XT
195 /* check whether there are entries that need to be flushed in the vtlb */
196 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
197 int k;
198 for (k = 0; k < CPU_VTLB_SIZE; k++) {
199 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
200 }
201 }
202
611d4f99 203 tb_flush_jmp_cache(cpu, addr);
0cac1b66
BS
204}
205
e3b9ca81
FK
206void tlb_flush_page(CPUState *cpu, target_ulong addr)
207{
208 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
209
210 if (!qemu_cpu_is_self(cpu)) {
211 async_run_on_cpu(cpu, tlb_flush_page_async_work,
212 RUN_ON_CPU_TARGET_PTR(addr));
213 } else {
214 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
215 }
216}
217
0336cbf8 218void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
d7a74a9d
PM
219{
220 CPUArchState *env = cpu->env_ptr;
0336cbf8
AB
221 unsigned long mmu_idx_bitmap = idxmap;
222 int i, page, mmu_idx;
d7a74a9d 223
f0aff0f1 224 assert_cpu_is_self(cpu);
8526e1f4
AB
225 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
226
d7a74a9d
PM
227 /* Check if we need to flush due to large pages. */
228 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
229 tlb_debug("forced full flush ("
230 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
231 env->tlb_flush_addr, env->tlb_flush_mask);
232
0336cbf8 233 v_tlb_flush_by_mmuidx(cpu, idxmap);
d7a74a9d
PM
234 return;
235 }
d7a74a9d
PM
236
237 addr &= TARGET_PAGE_MASK;
0336cbf8 238 page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
d7a74a9d 239
0336cbf8
AB
240 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
241 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
242 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
d7a74a9d 243
0336cbf8
AB
244 /* check whether there are vltb entries that need to be flushed */
245 for (i = 0; i < CPU_VTLB_SIZE; i++) {
246 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
247 }
d7a74a9d
PM
248 }
249 }
d7a74a9d 250
d7a74a9d
PM
251 tb_flush_jmp_cache(cpu, addr);
252}
253
e3b9ca81
FK
254void tlb_flush_page_all(target_ulong addr)
255{
256 CPUState *cpu;
257
258 CPU_FOREACH(cpu) {
259 async_run_on_cpu(cpu, tlb_flush_page_async_work,
260 RUN_ON_CPU_TARGET_PTR(addr));
261 }
262}
263
0cac1b66
BS
264/* update the TLBs so that writes to code in the virtual page 'addr'
265 can be detected */
266void tlb_protect_code(ram_addr_t ram_addr)
267{
03eebc9e
SH
268 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
269 DIRTY_MEMORY_CODE);
0cac1b66
BS
270}
271
272/* update the TLB so that writes in physical page 'phys_addr' are no longer
273 tested for self modifying code */
9564f52d 274void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 275{
52159192 276 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
277}
278
279static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
280{
281 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
282}
283
284void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
285 uintptr_t length)
286{
287 uintptr_t addr;
288
289 if (tlb_is_dirty_ram(tlb_entry)) {
290 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
291 if ((addr - start) < length) {
292 tlb_entry->addr_write |= TLB_NOTDIRTY;
293 }
294 }
295}
296
9a13565d 297void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
298{
299 CPUArchState *env;
300
9a13565d 301 int mmu_idx;
0cac1b66 302
f0aff0f1
AB
303 assert_cpu_is_self(cpu);
304
9a13565d
PC
305 env = cpu->env_ptr;
306 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
307 unsigned int i;
0cac1b66 308
9a13565d
PC
309 for (i = 0; i < CPU_TLB_SIZE; i++) {
310 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
311 start1, length);
312 }
88e89a57 313
9a13565d
PC
314 for (i = 0; i < CPU_VTLB_SIZE; i++) {
315 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
316 start1, length);
0cac1b66
BS
317 }
318 }
319}
320
321static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
322{
323 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
324 tlb_entry->addr_write = vaddr;
325 }
326}
327
328/* update the TLB corresponding to virtual page vaddr
329 so that it is no longer dirty */
bcae01e4 330void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 331{
bcae01e4 332 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
333 int i;
334 int mmu_idx;
335
f0aff0f1
AB
336 assert_cpu_is_self(cpu);
337
0cac1b66
BS
338 vaddr &= TARGET_PAGE_MASK;
339 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
340 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
341 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
342 }
88e89a57
XT
343
344 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
345 int k;
346 for (k = 0; k < CPU_VTLB_SIZE; k++) {
347 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
348 }
349 }
0cac1b66
BS
350}
351
352/* Our TLB does not support large pages, so remember the area covered by
353 large pages and trigger a full TLB flush if these are invalidated. */
354static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
355 target_ulong size)
356{
357 target_ulong mask = ~(size - 1);
358
359 if (env->tlb_flush_addr == (target_ulong)-1) {
360 env->tlb_flush_addr = vaddr & mask;
361 env->tlb_flush_mask = mask;
362 return;
363 }
364 /* Extend the existing region to include the new page.
365 This is a compromise between unnecessary flushes and the cost
366 of maintaining a full variable size TLB. */
367 mask &= env->tlb_flush_mask;
368 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
369 mask <<= 1;
370 }
371 env->tlb_flush_addr &= mask;
372 env->tlb_flush_mask = mask;
373}
374
375/* Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
376 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
377 * supplied size is only used by tlb_flush_page.
378 *
379 * Called from TCG-generated code, which is under an RCU read-side
380 * critical section.
381 */
fadc1cbe
PM
382void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
383 hwaddr paddr, MemTxAttrs attrs, int prot,
384 int mmu_idx, target_ulong size)
0cac1b66 385{
0c591eb0 386 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
387 MemoryRegionSection *section;
388 unsigned int index;
389 target_ulong address;
390 target_ulong code_address;
391 uintptr_t addend;
392 CPUTLBEntry *te;
149f54b5 393 hwaddr iotlb, xlat, sz;
88e89a57 394 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
d7898cda 395 int asidx = cpu_asidx_from_attrs(cpu, attrs);
0cac1b66 396
f0aff0f1 397 assert_cpu_is_self(cpu);
0cac1b66
BS
398 assert(size >= TARGET_PAGE_SIZE);
399 if (size != TARGET_PAGE_SIZE) {
400 tlb_add_large_page(env, vaddr, size);
401 }
149f54b5
PB
402
403 sz = size;
d7898cda 404 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
149f54b5
PB
405 assert(sz >= TARGET_PAGE_SIZE);
406
8526e1f4
AB
407 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
408 " prot=%x idx=%d\n",
409 vaddr, paddr, prot, mmu_idx);
0cac1b66
BS
410
411 address = vaddr;
8f3e03cb
PB
412 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
413 /* IO memory case */
0cac1b66 414 address |= TLB_MMIO;
8f3e03cb
PB
415 addend = 0;
416 } else {
417 /* TLB_MMIO for rom/romd handled below */
149f54b5 418 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
0cac1b66 419 }
0cac1b66
BS
420
421 code_address = address;
bb0e627a 422 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
149f54b5 423 prot, &address);
0cac1b66
BS
424
425 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0cac1b66 426 te = &env->tlb_table[mmu_idx][index];
88e89a57
XT
427
428 /* do not discard the translation in te, evict it into a victim tlb */
429 env->tlb_v_table[mmu_idx][vidx] = *te;
430 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
431
432 /* refill the tlb */
e469b22f 433 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
fadc1cbe 434 env->iotlb[mmu_idx][index].attrs = attrs;
0cac1b66
BS
435 te->addend = addend - vaddr;
436 if (prot & PAGE_READ) {
437 te->addr_read = address;
438 } else {
439 te->addr_read = -1;
440 }
441
442 if (prot & PAGE_EXEC) {
443 te->addr_code = code_address;
444 } else {
445 te->addr_code = -1;
446 }
447 if (prot & PAGE_WRITE) {
448 if ((memory_region_is_ram(section->mr) && section->readonly)
cc5bea60 449 || memory_region_is_romd(section->mr)) {
0cac1b66
BS
450 /* Write access calls the I/O callback. */
451 te->addr_write = address | TLB_MMIO;
452 } else if (memory_region_is_ram(section->mr)
8e41fb63
FZ
453 && cpu_physical_memory_is_clean(
454 memory_region_get_ram_addr(section->mr) + xlat)) {
0cac1b66
BS
455 te->addr_write = address | TLB_NOTDIRTY;
456 } else {
457 te->addr_write = address;
458 }
459 } else {
460 te->addr_write = -1;
461 }
462}
463
fadc1cbe
PM
464/* Add a new TLB entry, but without specifying the memory
465 * transaction attributes to be used.
466 */
467void tlb_set_page(CPUState *cpu, target_ulong vaddr,
468 hwaddr paddr, int prot,
469 int mmu_idx, target_ulong size)
470{
471 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
472 prot, mmu_idx, size);
473}
474
d7f30403
PM
475static void report_bad_exec(CPUState *cpu, target_ulong addr)
476{
477 /* Accidentally executing outside RAM or ROM is quite common for
478 * several user-error situations, so report it in a way that
479 * makes it clear that this isn't a QEMU bug and provide suggestions
480 * about what a user could do to fix things.
481 */
482 error_report("Trying to execute code outside RAM or ROM at 0x"
483 TARGET_FMT_lx, addr);
484 error_printf("This usually means one of the following happened:\n\n"
485 "(1) You told QEMU to execute a kernel for the wrong machine "
486 "type, and it crashed on startup (eg trying to run a "
487 "raspberry pi kernel on a versatilepb QEMU machine)\n"
488 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
489 "and QEMU executed a ROM full of no-op instructions until "
490 "it fell off the end\n"
491 "(3) Your guest kernel has a bug and crashed by jumping "
492 "off into nowhere\n\n"
493 "This is almost always one of the first two, so check your "
494 "command line and that you are using the right type of kernel "
495 "for this machine.\n"
496 "If you think option (3) is likely then you can try debugging "
497 "your guest with the -d debug options; in particular "
498 "-d guest_errors will cause the log to include a dump of the "
499 "guest register state at this point.\n\n"
500 "Execution cannot continue; stopping here.\n\n");
501
502 /* Report also to the logs, with more detail including register dump */
503 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
504 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
505 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
506}
507
857baec1
AB
508static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
509{
510 ram_addr_t ram_addr;
511
512 ram_addr = qemu_ram_addr_from_host(ptr);
513 if (ram_addr == RAM_ADDR_INVALID) {
514 error_report("Bad ram pointer %p", ptr);
515 abort();
516 }
517 return ram_addr;
518}
519
0cac1b66
BS
520/* NOTE: this function can trigger an exception */
521/* NOTE2: the returned address is not exactly the physical address: it
116aae36
PM
522 * is actually a ram_addr_t (in system mode; the user mode emulation
523 * version of this function returns a guest virtual address).
524 */
0cac1b66
BS
525tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
526{
527 int mmu_idx, page_index, pd;
528 void *p;
529 MemoryRegion *mr;
09daed84 530 CPUState *cpu = ENV_GET_CPU(env1);
a54c87b6 531 CPUIOTLBEntry *iotlbentry;
0cac1b66
BS
532
533 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
97ed5ccd 534 mmu_idx = cpu_mmu_index(env1, true);
0cac1b66
BS
535 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
536 (addr & TARGET_PAGE_MASK))) {
0cac1b66 537 cpu_ldub_code(env1, addr);
0cac1b66 538 }
a54c87b6
PM
539 iotlbentry = &env1->iotlb[mmu_idx][page_index];
540 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
541 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
0cac1b66 542 if (memory_region_is_unassigned(mr)) {
c658b94f
AF
543 CPUClass *cc = CPU_GET_CLASS(cpu);
544
545 if (cc->do_unassigned_access) {
546 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
547 } else {
d7f30403
PM
548 report_bad_exec(cpu, addr);
549 exit(1);
c658b94f 550 }
0cac1b66
BS
551 }
552 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
553 return qemu_ram_addr_from_host_nofail(p);
554}
555
82a45b96
RH
556static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
557 target_ulong addr, uintptr_t retaddr, int size)
558{
559 CPUState *cpu = ENV_GET_CPU(env);
560 hwaddr physaddr = iotlbentry->addr;
561 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
562 uint64_t val;
8d04fb55 563 bool locked = false;
82a45b96
RH
564
565 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
566 cpu->mem_io_pc = retaddr;
567 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
568 cpu_io_recompile(cpu, retaddr);
569 }
570
571 cpu->mem_io_vaddr = addr;
8d04fb55
JK
572
573 if (mr->global_locking) {
574 qemu_mutex_lock_iothread();
575 locked = true;
576 }
82a45b96 577 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
8d04fb55
JK
578 if (locked) {
579 qemu_mutex_unlock_iothread();
580 }
581
82a45b96
RH
582 return val;
583}
584
585static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
586 uint64_t val, target_ulong addr,
587 uintptr_t retaddr, int size)
588{
589 CPUState *cpu = ENV_GET_CPU(env);
590 hwaddr physaddr = iotlbentry->addr;
591 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
8d04fb55 592 bool locked = false;
82a45b96
RH
593
594 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
595 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
596 cpu_io_recompile(cpu, retaddr);
597 }
82a45b96
RH
598 cpu->mem_io_vaddr = addr;
599 cpu->mem_io_pc = retaddr;
8d04fb55
JK
600
601 if (mr->global_locking) {
602 qemu_mutex_lock_iothread();
603 locked = true;
604 }
82a45b96 605 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
8d04fb55
JK
606 if (locked) {
607 qemu_mutex_unlock_iothread();
608 }
82a45b96
RH
609}
610
7e9a7c50
RH
611/* Return true if ADDR is present in the victim tlb, and has been copied
612 back to the main tlb. */
613static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
614 size_t elt_ofs, target_ulong page)
615{
616 size_t vidx;
617 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
618 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
619 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
620
621 if (cmp == page) {
622 /* Found entry in victim tlb, swap tlb and iotlb. */
623 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
624 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
625 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
626
627 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
628 tmpio = *io; *io = *vio; *vio = tmpio;
629 return true;
630 }
631 }
632 return false;
633}
634
635/* Macro to call the above, with local variables from the use context. */
a390284b 636#define VICTIM_TLB_HIT(TY, ADDR) \
7e9a7c50 637 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
a390284b 638 (ADDR) & TARGET_PAGE_MASK)
7e9a7c50 639
3b08f0a9
RH
640/* Probe for whether the specified guest write access is permitted.
641 * If it is not permitted then an exception will be taken in the same
642 * way as if this were a real write access (and we will not return).
643 * Otherwise the function will return, and there will be a valid
644 * entry in the TLB for this access.
645 */
646void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
647 uintptr_t retaddr)
648{
649 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
650 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
651
652 if ((addr & TARGET_PAGE_MASK)
653 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
654 /* TLB entry is for a different page */
655 if (!VICTIM_TLB_HIT(addr_write, addr)) {
656 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
657 }
658 }
659}
660
c482cb11
RH
661/* Probe for a read-modify-write atomic operation. Do not allow unaligned
662 * operations, or io operations to proceed. Return the host address. */
663static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
664 TCGMemOpIdx oi, uintptr_t retaddr)
665{
666 size_t mmu_idx = get_mmuidx(oi);
667 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
668 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
669 target_ulong tlb_addr = tlbe->addr_write;
670 TCGMemOp mop = get_memop(oi);
671 int a_bits = get_alignment_bits(mop);
672 int s_bits = mop & MO_SIZE;
673
674 /* Adjust the given return address. */
675 retaddr -= GETPC_ADJ;
676
677 /* Enforce guest required alignment. */
678 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
679 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
680 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
681 mmu_idx, retaddr);
682 }
683
684 /* Enforce qemu required alignment. */
685 if (unlikely(addr & ((1 << s_bits) - 1))) {
686 /* We get here if guest alignment was not requested,
687 or was not enforced by cpu_unaligned_access above.
688 We might widen the access and emulate, but for now
689 mark an exception and exit the cpu loop. */
690 goto stop_the_world;
691 }
692
693 /* Check TLB entry and enforce page permissions. */
694 if ((addr & TARGET_PAGE_MASK)
695 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
696 if (!VICTIM_TLB_HIT(addr_write, addr)) {
697 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
698 }
699 tlb_addr = tlbe->addr_write;
700 }
701
702 /* Notice an IO access, or a notdirty page. */
703 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
704 /* There's really nothing that can be done to
705 support this apart from stop-the-world. */
706 goto stop_the_world;
707 }
708
709 /* Let the guest notice RMW on a write-only page. */
710 if (unlikely(tlbe->addr_read != tlb_addr)) {
711 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
712 /* Since we don't support reads and writes to different addresses,
713 and we do have the proper page loaded for write, this shouldn't
714 ever return. But just in case, handle via stop-the-world. */
715 goto stop_the_world;
716 }
717
718 return (void *)((uintptr_t)addr + tlbe->addend);
719
720 stop_the_world:
721 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
722}
723
c86c6e4c
RH
724#ifdef TARGET_WORDS_BIGENDIAN
725# define TGT_BE(X) (X)
726# define TGT_LE(X) BSWAP(X)
727#else
728# define TGT_BE(X) BSWAP(X)
729# define TGT_LE(X) (X)
730#endif
731
0f590e74
PB
732#define MMUSUFFIX _mmu
733
dea21982 734#define DATA_SIZE 1
58ed270d 735#include "softmmu_template.h"
0f590e74 736
dea21982 737#define DATA_SIZE 2
58ed270d 738#include "softmmu_template.h"
0f590e74 739
dea21982 740#define DATA_SIZE 4
58ed270d 741#include "softmmu_template.h"
0f590e74 742
dea21982 743#define DATA_SIZE 8
58ed270d 744#include "softmmu_template.h"
0f590e74 745
c482cb11
RH
746/* First set of helpers allows passing in of OI and RETADDR. This makes
747 them callable from other helpers. */
748
749#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
750#define ATOMIC_NAME(X) \
751 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
752#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
753
754#define DATA_SIZE 1
755#include "atomic_template.h"
756
757#define DATA_SIZE 2
758#include "atomic_template.h"
759
760#define DATA_SIZE 4
761#include "atomic_template.h"
762
df79b996 763#ifdef CONFIG_ATOMIC64
c482cb11
RH
764#define DATA_SIZE 8
765#include "atomic_template.h"
df79b996 766#endif
c482cb11 767
7ebee43e
RH
768#ifdef CONFIG_ATOMIC128
769#define DATA_SIZE 16
770#include "atomic_template.h"
771#endif
772
c482cb11
RH
773/* Second set of helpers are directly callable from TCG as helpers. */
774
775#undef EXTRA_ARGS
776#undef ATOMIC_NAME
777#undef ATOMIC_MMU_LOOKUP
778#define EXTRA_ARGS , TCGMemOpIdx oi
779#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
780#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
781
782#define DATA_SIZE 1
783#include "atomic_template.h"
784
785#define DATA_SIZE 2
786#include "atomic_template.h"
787
788#define DATA_SIZE 4
789#include "atomic_template.h"
790
df79b996 791#ifdef CONFIG_ATOMIC64
c482cb11
RH
792#define DATA_SIZE 8
793#include "atomic_template.h"
df79b996 794#endif
c482cb11
RH
795
796/* Code access functions. */
797
798#undef MMUSUFFIX
0cac1b66 799#define MMUSUFFIX _cmmu
01ecaf43
RH
800#undef GETPC
801#define GETPC() ((uintptr_t)0)
0cac1b66
BS
802#define SOFTMMU_CODE_ACCESS
803
dea21982 804#define DATA_SIZE 1
58ed270d 805#include "softmmu_template.h"
0cac1b66 806
dea21982 807#define DATA_SIZE 2
58ed270d 808#include "softmmu_template.h"
0cac1b66 809
dea21982 810#define DATA_SIZE 4
58ed270d 811#include "softmmu_template.h"
0cac1b66 812
dea21982 813#define DATA_SIZE 8
58ed270d 814#include "softmmu_template.h"