]> git.proxmox.com Git - mirror_qemu.git/blame - cputlb.c
tests: don't check if qtest_spapr_boot() returns NULL
[mirror_qemu.git] / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
0cac1b66 21#include "cpu.h"
022c62cb
PB
22#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
f08b6170 25#include "exec/cpu_ldst.h"
022c62cb 26#include "exec/cputlb.h"
022c62cb 27#include "exec/memory-internal.h"
220c3ebd 28#include "exec/ram_addr.h"
63c91552 29#include "exec/exec-all.h"
0f590e74 30#include "tcg/tcg.h"
d7f30403
PM
31#include "qemu/error-report.h"
32#include "exec/log.h"
c482cb11
RH
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
0cac1b66 35
8526e1f4
AB
36/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37/* #define DEBUG_TLB */
38/* #define DEBUG_TLB_LOG */
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
0cac1b66
BS
60
61/* statistics */
62int tlb_flush_count;
63
0cac1b66
BS
64/* NOTE:
65 * If flush_global is true (the usual case), flush all tlb entries.
66 * If flush_global is false, flush (at least) all tlb entries not
67 * marked global.
68 *
69 * Since QEMU doesn't currently implement a global/not-global flag
70 * for tlb entries, at the moment tlb_flush() will also flush all
71 * tlb entries in the flush_global == false case. This is OK because
72 * CPU architectures generally permit an implementation to drop
73 * entries from the TLB at any time, so flushing more entries than
74 * required is only an efficiency issue, not a correctness issue.
75 */
00c8cb0a 76void tlb_flush(CPUState *cpu, int flush_global)
0cac1b66 77{
00c8cb0a 78 CPUArchState *env = cpu->env_ptr;
0cac1b66 79
8526e1f4
AB
80 tlb_debug("(%d)\n", flush_global);
81
4fadb3bb 82 memset(env->tlb_table, -1, sizeof(env->tlb_table));
88e89a57 83 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
8cd70437 84 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
0cac1b66 85
88e89a57 86 env->vtlb_index = 0;
0cac1b66
BS
87 env->tlb_flush_addr = -1;
88 env->tlb_flush_mask = 0;
89 tlb_flush_count++;
90}
91
d7a74a9d
PM
92static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
93{
94 CPUArchState *env = cpu->env_ptr;
95
8526e1f4 96 tlb_debug("start\n");
d7a74a9d
PM
97
98 for (;;) {
99 int mmu_idx = va_arg(argp, int);
100
101 if (mmu_idx < 0) {
102 break;
103 }
104
8526e1f4 105 tlb_debug("%d\n", mmu_idx);
d7a74a9d
PM
106
107 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
108 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
109 }
110
d7a74a9d
PM
111 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
112}
113
114void tlb_flush_by_mmuidx(CPUState *cpu, ...)
115{
116 va_list argp;
117 va_start(argp, cpu);
118 v_tlb_flush_by_mmuidx(cpu, argp);
119 va_end(argp);
120}
121
0cac1b66
BS
122static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
123{
124 if (addr == (tlb_entry->addr_read &
125 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126 addr == (tlb_entry->addr_write &
127 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
128 addr == (tlb_entry->addr_code &
129 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
4fadb3bb 130 memset(tlb_entry, -1, sizeof(*tlb_entry));
0cac1b66
BS
131 }
132}
133
31b030d4 134void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66 135{
31b030d4 136 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
137 int i;
138 int mmu_idx;
139
8526e1f4
AB
140 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
141
0cac1b66
BS
142 /* Check if we need to flush due to large pages. */
143 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
144 tlb_debug("forcing full flush ("
145 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
146 env->tlb_flush_addr, env->tlb_flush_mask);
147
00c8cb0a 148 tlb_flush(cpu, 1);
0cac1b66
BS
149 return;
150 }
0cac1b66
BS
151
152 addr &= TARGET_PAGE_MASK;
153 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
154 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
155 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
156 }
157
88e89a57
XT
158 /* check whether there are entries that need to be flushed in the vtlb */
159 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
160 int k;
161 for (k = 0; k < CPU_VTLB_SIZE; k++) {
162 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
163 }
164 }
165
611d4f99 166 tb_flush_jmp_cache(cpu, addr);
0cac1b66
BS
167}
168
d7a74a9d
PM
169void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
170{
171 CPUArchState *env = cpu->env_ptr;
172 int i, k;
173 va_list argp;
174
175 va_start(argp, addr);
176
8526e1f4
AB
177 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
178
d7a74a9d
PM
179 /* Check if we need to flush due to large pages. */
180 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
181 tlb_debug("forced full flush ("
182 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
183 env->tlb_flush_addr, env->tlb_flush_mask);
184
d7a74a9d
PM
185 v_tlb_flush_by_mmuidx(cpu, argp);
186 va_end(argp);
187 return;
188 }
d7a74a9d
PM
189
190 addr &= TARGET_PAGE_MASK;
191 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
192
193 for (;;) {
194 int mmu_idx = va_arg(argp, int);
195
196 if (mmu_idx < 0) {
197 break;
198 }
199
8526e1f4 200 tlb_debug("idx %d\n", mmu_idx);
d7a74a9d
PM
201
202 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
203
204 /* check whether there are vltb entries that need to be flushed */
205 for (k = 0; k < CPU_VTLB_SIZE; k++) {
206 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
207 }
208 }
209 va_end(argp);
210
d7a74a9d
PM
211 tb_flush_jmp_cache(cpu, addr);
212}
213
0cac1b66
BS
214/* update the TLBs so that writes to code in the virtual page 'addr'
215 can be detected */
216void tlb_protect_code(ram_addr_t ram_addr)
217{
03eebc9e
SH
218 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
219 DIRTY_MEMORY_CODE);
0cac1b66
BS
220}
221
222/* update the TLB so that writes in physical page 'phys_addr' are no longer
223 tested for self modifying code */
9564f52d 224void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 225{
52159192 226 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
227}
228
229static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
230{
231 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
232}
233
234void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
235 uintptr_t length)
236{
237 uintptr_t addr;
238
239 if (tlb_is_dirty_ram(tlb_entry)) {
240 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
241 if ((addr - start) < length) {
242 tlb_entry->addr_write |= TLB_NOTDIRTY;
243 }
244 }
245}
246
7443b437
PB
247static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
248{
249 ram_addr_t ram_addr;
250
07bdaa41
PB
251 ram_addr = qemu_ram_addr_from_host(ptr);
252 if (ram_addr == RAM_ADDR_INVALID) {
7443b437
PB
253 fprintf(stderr, "Bad ram pointer %p\n", ptr);
254 abort();
255 }
256 return ram_addr;
257}
258
9a13565d 259void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
260{
261 CPUArchState *env;
262
9a13565d 263 int mmu_idx;
0cac1b66 264
9a13565d
PC
265 env = cpu->env_ptr;
266 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
267 unsigned int i;
0cac1b66 268
9a13565d
PC
269 for (i = 0; i < CPU_TLB_SIZE; i++) {
270 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
271 start1, length);
272 }
88e89a57 273
9a13565d
PC
274 for (i = 0; i < CPU_VTLB_SIZE; i++) {
275 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
276 start1, length);
0cac1b66
BS
277 }
278 }
279}
280
281static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
282{
283 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
284 tlb_entry->addr_write = vaddr;
285 }
286}
287
288/* update the TLB corresponding to virtual page vaddr
289 so that it is no longer dirty */
bcae01e4 290void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 291{
bcae01e4 292 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
293 int i;
294 int mmu_idx;
295
296 vaddr &= TARGET_PAGE_MASK;
297 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
298 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
299 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
300 }
88e89a57
XT
301
302 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
303 int k;
304 for (k = 0; k < CPU_VTLB_SIZE; k++) {
305 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
306 }
307 }
0cac1b66
BS
308}
309
310/* Our TLB does not support large pages, so remember the area covered by
311 large pages and trigger a full TLB flush if these are invalidated. */
312static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
313 target_ulong size)
314{
315 target_ulong mask = ~(size - 1);
316
317 if (env->tlb_flush_addr == (target_ulong)-1) {
318 env->tlb_flush_addr = vaddr & mask;
319 env->tlb_flush_mask = mask;
320 return;
321 }
322 /* Extend the existing region to include the new page.
323 This is a compromise between unnecessary flushes and the cost
324 of maintaining a full variable size TLB. */
325 mask &= env->tlb_flush_mask;
326 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
327 mask <<= 1;
328 }
329 env->tlb_flush_addr &= mask;
330 env->tlb_flush_mask = mask;
331}
332
333/* Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
334 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
335 * supplied size is only used by tlb_flush_page.
336 *
337 * Called from TCG-generated code, which is under an RCU read-side
338 * critical section.
339 */
fadc1cbe
PM
340void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
341 hwaddr paddr, MemTxAttrs attrs, int prot,
342 int mmu_idx, target_ulong size)
0cac1b66 343{
0c591eb0 344 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
345 MemoryRegionSection *section;
346 unsigned int index;
347 target_ulong address;
348 target_ulong code_address;
349 uintptr_t addend;
350 CPUTLBEntry *te;
149f54b5 351 hwaddr iotlb, xlat, sz;
88e89a57 352 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
d7898cda 353 int asidx = cpu_asidx_from_attrs(cpu, attrs);
0cac1b66
BS
354
355 assert(size >= TARGET_PAGE_SIZE);
356 if (size != TARGET_PAGE_SIZE) {
357 tlb_add_large_page(env, vaddr, size);
358 }
149f54b5
PB
359
360 sz = size;
d7898cda 361 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
149f54b5
PB
362 assert(sz >= TARGET_PAGE_SIZE);
363
8526e1f4
AB
364 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
365 " prot=%x idx=%d\n",
366 vaddr, paddr, prot, mmu_idx);
0cac1b66
BS
367
368 address = vaddr;
8f3e03cb
PB
369 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
370 /* IO memory case */
0cac1b66 371 address |= TLB_MMIO;
8f3e03cb
PB
372 addend = 0;
373 } else {
374 /* TLB_MMIO for rom/romd handled below */
149f54b5 375 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
0cac1b66 376 }
0cac1b66
BS
377
378 code_address = address;
bb0e627a 379 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
149f54b5 380 prot, &address);
0cac1b66
BS
381
382 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0cac1b66 383 te = &env->tlb_table[mmu_idx][index];
88e89a57
XT
384
385 /* do not discard the translation in te, evict it into a victim tlb */
386 env->tlb_v_table[mmu_idx][vidx] = *te;
387 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
388
389 /* refill the tlb */
e469b22f 390 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
fadc1cbe 391 env->iotlb[mmu_idx][index].attrs = attrs;
0cac1b66
BS
392 te->addend = addend - vaddr;
393 if (prot & PAGE_READ) {
394 te->addr_read = address;
395 } else {
396 te->addr_read = -1;
397 }
398
399 if (prot & PAGE_EXEC) {
400 te->addr_code = code_address;
401 } else {
402 te->addr_code = -1;
403 }
404 if (prot & PAGE_WRITE) {
405 if ((memory_region_is_ram(section->mr) && section->readonly)
cc5bea60 406 || memory_region_is_romd(section->mr)) {
0cac1b66
BS
407 /* Write access calls the I/O callback. */
408 te->addr_write = address | TLB_MMIO;
409 } else if (memory_region_is_ram(section->mr)
8e41fb63
FZ
410 && cpu_physical_memory_is_clean(
411 memory_region_get_ram_addr(section->mr) + xlat)) {
0cac1b66
BS
412 te->addr_write = address | TLB_NOTDIRTY;
413 } else {
414 te->addr_write = address;
415 }
416 } else {
417 te->addr_write = -1;
418 }
419}
420
fadc1cbe
PM
421/* Add a new TLB entry, but without specifying the memory
422 * transaction attributes to be used.
423 */
424void tlb_set_page(CPUState *cpu, target_ulong vaddr,
425 hwaddr paddr, int prot,
426 int mmu_idx, target_ulong size)
427{
428 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
429 prot, mmu_idx, size);
430}
431
d7f30403
PM
432static void report_bad_exec(CPUState *cpu, target_ulong addr)
433{
434 /* Accidentally executing outside RAM or ROM is quite common for
435 * several user-error situations, so report it in a way that
436 * makes it clear that this isn't a QEMU bug and provide suggestions
437 * about what a user could do to fix things.
438 */
439 error_report("Trying to execute code outside RAM or ROM at 0x"
440 TARGET_FMT_lx, addr);
441 error_printf("This usually means one of the following happened:\n\n"
442 "(1) You told QEMU to execute a kernel for the wrong machine "
443 "type, and it crashed on startup (eg trying to run a "
444 "raspberry pi kernel on a versatilepb QEMU machine)\n"
445 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
446 "and QEMU executed a ROM full of no-op instructions until "
447 "it fell off the end\n"
448 "(3) Your guest kernel has a bug and crashed by jumping "
449 "off into nowhere\n\n"
450 "This is almost always one of the first two, so check your "
451 "command line and that you are using the right type of kernel "
452 "for this machine.\n"
453 "If you think option (3) is likely then you can try debugging "
454 "your guest with the -d debug options; in particular "
455 "-d guest_errors will cause the log to include a dump of the "
456 "guest register state at this point.\n\n"
457 "Execution cannot continue; stopping here.\n\n");
458
459 /* Report also to the logs, with more detail including register dump */
460 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
461 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
462 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
463}
464
0cac1b66
BS
465/* NOTE: this function can trigger an exception */
466/* NOTE2: the returned address is not exactly the physical address: it
116aae36
PM
467 * is actually a ram_addr_t (in system mode; the user mode emulation
468 * version of this function returns a guest virtual address).
469 */
0cac1b66
BS
470tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
471{
472 int mmu_idx, page_index, pd;
473 void *p;
474 MemoryRegion *mr;
09daed84 475 CPUState *cpu = ENV_GET_CPU(env1);
a54c87b6 476 CPUIOTLBEntry *iotlbentry;
0cac1b66
BS
477
478 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
97ed5ccd 479 mmu_idx = cpu_mmu_index(env1, true);
0cac1b66
BS
480 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
481 (addr & TARGET_PAGE_MASK))) {
0cac1b66 482 cpu_ldub_code(env1, addr);
0cac1b66 483 }
a54c87b6
PM
484 iotlbentry = &env1->iotlb[mmu_idx][page_index];
485 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
486 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
0cac1b66 487 if (memory_region_is_unassigned(mr)) {
c658b94f
AF
488 CPUClass *cc = CPU_GET_CLASS(cpu);
489
490 if (cc->do_unassigned_access) {
491 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
492 } else {
d7f30403
PM
493 report_bad_exec(cpu, addr);
494 exit(1);
c658b94f 495 }
0cac1b66
BS
496 }
497 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
498 return qemu_ram_addr_from_host_nofail(p);
499}
500
82a45b96
RH
501static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
502 target_ulong addr, uintptr_t retaddr, int size)
503{
504 CPUState *cpu = ENV_GET_CPU(env);
505 hwaddr physaddr = iotlbentry->addr;
506 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
507 uint64_t val;
508
509 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
510 cpu->mem_io_pc = retaddr;
511 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
512 cpu_io_recompile(cpu, retaddr);
513 }
514
515 cpu->mem_io_vaddr = addr;
516 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
517 return val;
518}
519
520static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
521 uint64_t val, target_ulong addr,
522 uintptr_t retaddr, int size)
523{
524 CPUState *cpu = ENV_GET_CPU(env);
525 hwaddr physaddr = iotlbentry->addr;
526 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
527
528 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
529 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
530 cpu_io_recompile(cpu, retaddr);
531 }
532
533 cpu->mem_io_vaddr = addr;
534 cpu->mem_io_pc = retaddr;
535 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
536}
537
7e9a7c50
RH
538/* Return true if ADDR is present in the victim tlb, and has been copied
539 back to the main tlb. */
540static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
541 size_t elt_ofs, target_ulong page)
542{
543 size_t vidx;
544 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
545 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
546 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
547
548 if (cmp == page) {
549 /* Found entry in victim tlb, swap tlb and iotlb. */
550 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
551 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
552 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
553
554 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
555 tmpio = *io; *io = *vio; *vio = tmpio;
556 return true;
557 }
558 }
559 return false;
560}
561
562/* Macro to call the above, with local variables from the use context. */
a390284b 563#define VICTIM_TLB_HIT(TY, ADDR) \
7e9a7c50 564 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
a390284b 565 (ADDR) & TARGET_PAGE_MASK)
7e9a7c50 566
3b08f0a9
RH
567/* Probe for whether the specified guest write access is permitted.
568 * If it is not permitted then an exception will be taken in the same
569 * way as if this were a real write access (and we will not return).
570 * Otherwise the function will return, and there will be a valid
571 * entry in the TLB for this access.
572 */
573void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
574 uintptr_t retaddr)
575{
576 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
577 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
578
579 if ((addr & TARGET_PAGE_MASK)
580 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
581 /* TLB entry is for a different page */
582 if (!VICTIM_TLB_HIT(addr_write, addr)) {
583 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
584 }
585 }
586}
587
c482cb11
RH
588/* Probe for a read-modify-write atomic operation. Do not allow unaligned
589 * operations, or io operations to proceed. Return the host address. */
590static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
591 TCGMemOpIdx oi, uintptr_t retaddr)
592{
593 size_t mmu_idx = get_mmuidx(oi);
594 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
595 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
596 target_ulong tlb_addr = tlbe->addr_write;
597 TCGMemOp mop = get_memop(oi);
598 int a_bits = get_alignment_bits(mop);
599 int s_bits = mop & MO_SIZE;
600
601 /* Adjust the given return address. */
602 retaddr -= GETPC_ADJ;
603
604 /* Enforce guest required alignment. */
605 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
606 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
607 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
608 mmu_idx, retaddr);
609 }
610
611 /* Enforce qemu required alignment. */
612 if (unlikely(addr & ((1 << s_bits) - 1))) {
613 /* We get here if guest alignment was not requested,
614 or was not enforced by cpu_unaligned_access above.
615 We might widen the access and emulate, but for now
616 mark an exception and exit the cpu loop. */
617 goto stop_the_world;
618 }
619
620 /* Check TLB entry and enforce page permissions. */
621 if ((addr & TARGET_PAGE_MASK)
622 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
623 if (!VICTIM_TLB_HIT(addr_write, addr)) {
624 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
625 }
626 tlb_addr = tlbe->addr_write;
627 }
628
629 /* Notice an IO access, or a notdirty page. */
630 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
631 /* There's really nothing that can be done to
632 support this apart from stop-the-world. */
633 goto stop_the_world;
634 }
635
636 /* Let the guest notice RMW on a write-only page. */
637 if (unlikely(tlbe->addr_read != tlb_addr)) {
638 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
639 /* Since we don't support reads and writes to different addresses,
640 and we do have the proper page loaded for write, this shouldn't
641 ever return. But just in case, handle via stop-the-world. */
642 goto stop_the_world;
643 }
644
645 return (void *)((uintptr_t)addr + tlbe->addend);
646
647 stop_the_world:
648 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
649}
650
c86c6e4c
RH
651#ifdef TARGET_WORDS_BIGENDIAN
652# define TGT_BE(X) (X)
653# define TGT_LE(X) BSWAP(X)
654#else
655# define TGT_BE(X) BSWAP(X)
656# define TGT_LE(X) (X)
657#endif
658
0f590e74
PB
659#define MMUSUFFIX _mmu
660
dea21982 661#define DATA_SIZE 1
58ed270d 662#include "softmmu_template.h"
0f590e74 663
dea21982 664#define DATA_SIZE 2
58ed270d 665#include "softmmu_template.h"
0f590e74 666
dea21982 667#define DATA_SIZE 4
58ed270d 668#include "softmmu_template.h"
0f590e74 669
dea21982 670#define DATA_SIZE 8
58ed270d 671#include "softmmu_template.h"
0f590e74 672
c482cb11
RH
673/* First set of helpers allows passing in of OI and RETADDR. This makes
674 them callable from other helpers. */
675
676#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
677#define ATOMIC_NAME(X) \
678 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
679#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
680
681#define DATA_SIZE 1
682#include "atomic_template.h"
683
684#define DATA_SIZE 2
685#include "atomic_template.h"
686
687#define DATA_SIZE 4
688#include "atomic_template.h"
689
df79b996 690#ifdef CONFIG_ATOMIC64
c482cb11
RH
691#define DATA_SIZE 8
692#include "atomic_template.h"
df79b996 693#endif
c482cb11 694
7ebee43e
RH
695#ifdef CONFIG_ATOMIC128
696#define DATA_SIZE 16
697#include "atomic_template.h"
698#endif
699
c482cb11
RH
700/* Second set of helpers are directly callable from TCG as helpers. */
701
702#undef EXTRA_ARGS
703#undef ATOMIC_NAME
704#undef ATOMIC_MMU_LOOKUP
705#define EXTRA_ARGS , TCGMemOpIdx oi
706#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
707#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
708
709#define DATA_SIZE 1
710#include "atomic_template.h"
711
712#define DATA_SIZE 2
713#include "atomic_template.h"
714
715#define DATA_SIZE 4
716#include "atomic_template.h"
717
df79b996 718#ifdef CONFIG_ATOMIC64
c482cb11
RH
719#define DATA_SIZE 8
720#include "atomic_template.h"
df79b996 721#endif
c482cb11
RH
722
723/* Code access functions. */
724
725#undef MMUSUFFIX
0cac1b66 726#define MMUSUFFIX _cmmu
01ecaf43
RH
727#undef GETPC
728#define GETPC() ((uintptr_t)0)
0cac1b66
BS
729#define SOFTMMU_CODE_ACCESS
730
dea21982 731#define DATA_SIZE 1
58ed270d 732#include "softmmu_template.h"
0cac1b66 733
dea21982 734#define DATA_SIZE 2
58ed270d 735#include "softmmu_template.h"
0cac1b66 736
dea21982 737#define DATA_SIZE 4
58ed270d 738#include "softmmu_template.h"
0cac1b66 739
dea21982 740#define DATA_SIZE 8
58ed270d 741#include "softmmu_template.h"