]> git.proxmox.com Git - mirror_qemu.git/blame - cputlb.c
ppc: implement xsrqpxp instruction
[mirror_qemu.git] / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
0cac1b66 21#include "cpu.h"
022c62cb
PB
22#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
f08b6170 25#include "exec/cpu_ldst.h"
022c62cb 26#include "exec/cputlb.h"
022c62cb 27#include "exec/memory-internal.h"
220c3ebd 28#include "exec/ram_addr.h"
0f590e74 29#include "tcg/tcg.h"
d7f30403
PM
30#include "qemu/error-report.h"
31#include "exec/log.h"
c482cb11
RH
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
0cac1b66 34
8526e1f4
AB
35/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
36/* #define DEBUG_TLB */
37/* #define DEBUG_TLB_LOG */
38
39#ifdef DEBUG_TLB
40# define DEBUG_TLB_GATE 1
41# ifdef DEBUG_TLB_LOG
42# define DEBUG_TLB_LOG_GATE 1
43# else
44# define DEBUG_TLB_LOG_GATE 0
45# endif
46#else
47# define DEBUG_TLB_GATE 0
48# define DEBUG_TLB_LOG_GATE 0
49#endif
50
51#define tlb_debug(fmt, ...) do { \
52 if (DEBUG_TLB_LOG_GATE) { \
53 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
54 ## __VA_ARGS__); \
55 } else if (DEBUG_TLB_GATE) { \
56 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
57 } \
58} while (0)
0cac1b66
BS
59
60/* statistics */
61int tlb_flush_count;
62
d10eb08f
AB
63/* This is OK because CPU architectures generally permit an
64 * implementation to drop entries from the TLB at any time, so
65 * flushing more entries than required is only an efficiency issue,
66 * not a correctness issue.
0cac1b66 67 */
d10eb08f 68void tlb_flush(CPUState *cpu)
0cac1b66 69{
00c8cb0a 70 CPUArchState *env = cpu->env_ptr;
0cac1b66 71
4fadb3bb 72 memset(env->tlb_table, -1, sizeof(env->tlb_table));
88e89a57 73 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
8cd70437 74 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
0cac1b66 75
88e89a57 76 env->vtlb_index = 0;
0cac1b66
BS
77 env->tlb_flush_addr = -1;
78 env->tlb_flush_mask = 0;
79 tlb_flush_count++;
80}
81
d7a74a9d
PM
82static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
83{
84 CPUArchState *env = cpu->env_ptr;
85
8526e1f4 86 tlb_debug("start\n");
d7a74a9d
PM
87
88 for (;;) {
89 int mmu_idx = va_arg(argp, int);
90
91 if (mmu_idx < 0) {
92 break;
93 }
94
8526e1f4 95 tlb_debug("%d\n", mmu_idx);
d7a74a9d
PM
96
97 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
98 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
99 }
100
d7a74a9d
PM
101 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
102}
103
104void tlb_flush_by_mmuidx(CPUState *cpu, ...)
105{
106 va_list argp;
107 va_start(argp, cpu);
108 v_tlb_flush_by_mmuidx(cpu, argp);
109 va_end(argp);
110}
111
0cac1b66
BS
112static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
113{
114 if (addr == (tlb_entry->addr_read &
115 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
116 addr == (tlb_entry->addr_write &
117 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
118 addr == (tlb_entry->addr_code &
119 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
4fadb3bb 120 memset(tlb_entry, -1, sizeof(*tlb_entry));
0cac1b66
BS
121 }
122}
123
31b030d4 124void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66 125{
31b030d4 126 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
127 int i;
128 int mmu_idx;
129
8526e1f4
AB
130 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
131
0cac1b66
BS
132 /* Check if we need to flush due to large pages. */
133 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
134 tlb_debug("forcing full flush ("
135 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
136 env->tlb_flush_addr, env->tlb_flush_mask);
137
d10eb08f 138 tlb_flush(cpu);
0cac1b66
BS
139 return;
140 }
0cac1b66
BS
141
142 addr &= TARGET_PAGE_MASK;
143 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
144 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
145 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
146 }
147
88e89a57
XT
148 /* check whether there are entries that need to be flushed in the vtlb */
149 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
150 int k;
151 for (k = 0; k < CPU_VTLB_SIZE; k++) {
152 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
153 }
154 }
155
611d4f99 156 tb_flush_jmp_cache(cpu, addr);
0cac1b66
BS
157}
158
d7a74a9d
PM
159void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
160{
161 CPUArchState *env = cpu->env_ptr;
162 int i, k;
163 va_list argp;
164
165 va_start(argp, addr);
166
8526e1f4
AB
167 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
168
d7a74a9d
PM
169 /* Check if we need to flush due to large pages. */
170 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
171 tlb_debug("forced full flush ("
172 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
173 env->tlb_flush_addr, env->tlb_flush_mask);
174
d7a74a9d
PM
175 v_tlb_flush_by_mmuidx(cpu, argp);
176 va_end(argp);
177 return;
178 }
d7a74a9d
PM
179
180 addr &= TARGET_PAGE_MASK;
181 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
182
183 for (;;) {
184 int mmu_idx = va_arg(argp, int);
185
186 if (mmu_idx < 0) {
187 break;
188 }
189
8526e1f4 190 tlb_debug("idx %d\n", mmu_idx);
d7a74a9d
PM
191
192 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
193
194 /* check whether there are vltb entries that need to be flushed */
195 for (k = 0; k < CPU_VTLB_SIZE; k++) {
196 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
197 }
198 }
199 va_end(argp);
200
d7a74a9d
PM
201 tb_flush_jmp_cache(cpu, addr);
202}
203
0cac1b66
BS
204/* update the TLBs so that writes to code in the virtual page 'addr'
205 can be detected */
206void tlb_protect_code(ram_addr_t ram_addr)
207{
03eebc9e
SH
208 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
209 DIRTY_MEMORY_CODE);
0cac1b66
BS
210}
211
212/* update the TLB so that writes in physical page 'phys_addr' are no longer
213 tested for self modifying code */
9564f52d 214void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 215{
52159192 216 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
217}
218
219static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
220{
221 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
222}
223
224void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
225 uintptr_t length)
226{
227 uintptr_t addr;
228
229 if (tlb_is_dirty_ram(tlb_entry)) {
230 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
231 if ((addr - start) < length) {
232 tlb_entry->addr_write |= TLB_NOTDIRTY;
233 }
234 }
235}
236
7443b437
PB
237static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
238{
239 ram_addr_t ram_addr;
240
07bdaa41
PB
241 ram_addr = qemu_ram_addr_from_host(ptr);
242 if (ram_addr == RAM_ADDR_INVALID) {
7443b437
PB
243 fprintf(stderr, "Bad ram pointer %p\n", ptr);
244 abort();
245 }
246 return ram_addr;
247}
248
9a13565d 249void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
250{
251 CPUArchState *env;
252
9a13565d 253 int mmu_idx;
0cac1b66 254
9a13565d
PC
255 env = cpu->env_ptr;
256 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
257 unsigned int i;
0cac1b66 258
9a13565d
PC
259 for (i = 0; i < CPU_TLB_SIZE; i++) {
260 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
261 start1, length);
262 }
88e89a57 263
9a13565d
PC
264 for (i = 0; i < CPU_VTLB_SIZE; i++) {
265 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
266 start1, length);
0cac1b66
BS
267 }
268 }
269}
270
271static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
272{
273 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
274 tlb_entry->addr_write = vaddr;
275 }
276}
277
278/* update the TLB corresponding to virtual page vaddr
279 so that it is no longer dirty */
bcae01e4 280void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 281{
bcae01e4 282 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
283 int i;
284 int mmu_idx;
285
286 vaddr &= TARGET_PAGE_MASK;
287 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
288 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
289 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
290 }
88e89a57
XT
291
292 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
293 int k;
294 for (k = 0; k < CPU_VTLB_SIZE; k++) {
295 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
296 }
297 }
0cac1b66
BS
298}
299
300/* Our TLB does not support large pages, so remember the area covered by
301 large pages and trigger a full TLB flush if these are invalidated. */
302static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
303 target_ulong size)
304{
305 target_ulong mask = ~(size - 1);
306
307 if (env->tlb_flush_addr == (target_ulong)-1) {
308 env->tlb_flush_addr = vaddr & mask;
309 env->tlb_flush_mask = mask;
310 return;
311 }
312 /* Extend the existing region to include the new page.
313 This is a compromise between unnecessary flushes and the cost
314 of maintaining a full variable size TLB. */
315 mask &= env->tlb_flush_mask;
316 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
317 mask <<= 1;
318 }
319 env->tlb_flush_addr &= mask;
320 env->tlb_flush_mask = mask;
321}
322
323/* Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
324 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
325 * supplied size is only used by tlb_flush_page.
326 *
327 * Called from TCG-generated code, which is under an RCU read-side
328 * critical section.
329 */
fadc1cbe
PM
330void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
331 hwaddr paddr, MemTxAttrs attrs, int prot,
332 int mmu_idx, target_ulong size)
0cac1b66 333{
0c591eb0 334 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
335 MemoryRegionSection *section;
336 unsigned int index;
337 target_ulong address;
338 target_ulong code_address;
339 uintptr_t addend;
340 CPUTLBEntry *te;
149f54b5 341 hwaddr iotlb, xlat, sz;
88e89a57 342 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
d7898cda 343 int asidx = cpu_asidx_from_attrs(cpu, attrs);
0cac1b66
BS
344
345 assert(size >= TARGET_PAGE_SIZE);
346 if (size != TARGET_PAGE_SIZE) {
347 tlb_add_large_page(env, vaddr, size);
348 }
149f54b5
PB
349
350 sz = size;
d7898cda 351 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
149f54b5
PB
352 assert(sz >= TARGET_PAGE_SIZE);
353
8526e1f4
AB
354 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
355 " prot=%x idx=%d\n",
356 vaddr, paddr, prot, mmu_idx);
0cac1b66
BS
357
358 address = vaddr;
8f3e03cb
PB
359 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
360 /* IO memory case */
0cac1b66 361 address |= TLB_MMIO;
8f3e03cb
PB
362 addend = 0;
363 } else {
364 /* TLB_MMIO for rom/romd handled below */
149f54b5 365 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
0cac1b66 366 }
0cac1b66
BS
367
368 code_address = address;
bb0e627a 369 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
149f54b5 370 prot, &address);
0cac1b66
BS
371
372 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0cac1b66 373 te = &env->tlb_table[mmu_idx][index];
88e89a57
XT
374
375 /* do not discard the translation in te, evict it into a victim tlb */
376 env->tlb_v_table[mmu_idx][vidx] = *te;
377 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
378
379 /* refill the tlb */
e469b22f 380 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
fadc1cbe 381 env->iotlb[mmu_idx][index].attrs = attrs;
0cac1b66
BS
382 te->addend = addend - vaddr;
383 if (prot & PAGE_READ) {
384 te->addr_read = address;
385 } else {
386 te->addr_read = -1;
387 }
388
389 if (prot & PAGE_EXEC) {
390 te->addr_code = code_address;
391 } else {
392 te->addr_code = -1;
393 }
394 if (prot & PAGE_WRITE) {
395 if ((memory_region_is_ram(section->mr) && section->readonly)
cc5bea60 396 || memory_region_is_romd(section->mr)) {
0cac1b66
BS
397 /* Write access calls the I/O callback. */
398 te->addr_write = address | TLB_MMIO;
399 } else if (memory_region_is_ram(section->mr)
8e41fb63
FZ
400 && cpu_physical_memory_is_clean(
401 memory_region_get_ram_addr(section->mr) + xlat)) {
0cac1b66
BS
402 te->addr_write = address | TLB_NOTDIRTY;
403 } else {
404 te->addr_write = address;
405 }
406 } else {
407 te->addr_write = -1;
408 }
409}
410
fadc1cbe
PM
411/* Add a new TLB entry, but without specifying the memory
412 * transaction attributes to be used.
413 */
414void tlb_set_page(CPUState *cpu, target_ulong vaddr,
415 hwaddr paddr, int prot,
416 int mmu_idx, target_ulong size)
417{
418 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
419 prot, mmu_idx, size);
420}
421
d7f30403
PM
422static void report_bad_exec(CPUState *cpu, target_ulong addr)
423{
424 /* Accidentally executing outside RAM or ROM is quite common for
425 * several user-error situations, so report it in a way that
426 * makes it clear that this isn't a QEMU bug and provide suggestions
427 * about what a user could do to fix things.
428 */
429 error_report("Trying to execute code outside RAM or ROM at 0x"
430 TARGET_FMT_lx, addr);
431 error_printf("This usually means one of the following happened:\n\n"
432 "(1) You told QEMU to execute a kernel for the wrong machine "
433 "type, and it crashed on startup (eg trying to run a "
434 "raspberry pi kernel on a versatilepb QEMU machine)\n"
435 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
436 "and QEMU executed a ROM full of no-op instructions until "
437 "it fell off the end\n"
438 "(3) Your guest kernel has a bug and crashed by jumping "
439 "off into nowhere\n\n"
440 "This is almost always one of the first two, so check your "
441 "command line and that you are using the right type of kernel "
442 "for this machine.\n"
443 "If you think option (3) is likely then you can try debugging "
444 "your guest with the -d debug options; in particular "
445 "-d guest_errors will cause the log to include a dump of the "
446 "guest register state at this point.\n\n"
447 "Execution cannot continue; stopping here.\n\n");
448
449 /* Report also to the logs, with more detail including register dump */
450 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
451 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
452 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
453}
454
0cac1b66
BS
455/* NOTE: this function can trigger an exception */
456/* NOTE2: the returned address is not exactly the physical address: it
116aae36
PM
457 * is actually a ram_addr_t (in system mode; the user mode emulation
458 * version of this function returns a guest virtual address).
459 */
0cac1b66
BS
460tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
461{
462 int mmu_idx, page_index, pd;
463 void *p;
464 MemoryRegion *mr;
09daed84 465 CPUState *cpu = ENV_GET_CPU(env1);
a54c87b6 466 CPUIOTLBEntry *iotlbentry;
0cac1b66
BS
467
468 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
97ed5ccd 469 mmu_idx = cpu_mmu_index(env1, true);
0cac1b66
BS
470 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
471 (addr & TARGET_PAGE_MASK))) {
0cac1b66 472 cpu_ldub_code(env1, addr);
0cac1b66 473 }
a54c87b6
PM
474 iotlbentry = &env1->iotlb[mmu_idx][page_index];
475 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
476 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
0cac1b66 477 if (memory_region_is_unassigned(mr)) {
c658b94f
AF
478 CPUClass *cc = CPU_GET_CLASS(cpu);
479
480 if (cc->do_unassigned_access) {
481 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
482 } else {
d7f30403
PM
483 report_bad_exec(cpu, addr);
484 exit(1);
c658b94f 485 }
0cac1b66
BS
486 }
487 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
488 return qemu_ram_addr_from_host_nofail(p);
489}
490
82a45b96
RH
491static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
492 target_ulong addr, uintptr_t retaddr, int size)
493{
494 CPUState *cpu = ENV_GET_CPU(env);
495 hwaddr physaddr = iotlbentry->addr;
496 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
497 uint64_t val;
498
499 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
500 cpu->mem_io_pc = retaddr;
501 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
502 cpu_io_recompile(cpu, retaddr);
503 }
504
505 cpu->mem_io_vaddr = addr;
506 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
507 return val;
508}
509
510static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
511 uint64_t val, target_ulong addr,
512 uintptr_t retaddr, int size)
513{
514 CPUState *cpu = ENV_GET_CPU(env);
515 hwaddr physaddr = iotlbentry->addr;
516 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
517
518 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
519 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
520 cpu_io_recompile(cpu, retaddr);
521 }
522
523 cpu->mem_io_vaddr = addr;
524 cpu->mem_io_pc = retaddr;
525 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
526}
527
7e9a7c50
RH
528/* Return true if ADDR is present in the victim tlb, and has been copied
529 back to the main tlb. */
530static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
531 size_t elt_ofs, target_ulong page)
532{
533 size_t vidx;
534 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
535 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
536 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
537
538 if (cmp == page) {
539 /* Found entry in victim tlb, swap tlb and iotlb. */
540 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
541 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
542 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
543
544 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
545 tmpio = *io; *io = *vio; *vio = tmpio;
546 return true;
547 }
548 }
549 return false;
550}
551
552/* Macro to call the above, with local variables from the use context. */
a390284b 553#define VICTIM_TLB_HIT(TY, ADDR) \
7e9a7c50 554 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
a390284b 555 (ADDR) & TARGET_PAGE_MASK)
7e9a7c50 556
3b08f0a9
RH
557/* Probe for whether the specified guest write access is permitted.
558 * If it is not permitted then an exception will be taken in the same
559 * way as if this were a real write access (and we will not return).
560 * Otherwise the function will return, and there will be a valid
561 * entry in the TLB for this access.
562 */
563void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
564 uintptr_t retaddr)
565{
566 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
567 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
568
569 if ((addr & TARGET_PAGE_MASK)
570 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
571 /* TLB entry is for a different page */
572 if (!VICTIM_TLB_HIT(addr_write, addr)) {
573 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
574 }
575 }
576}
577
c482cb11
RH
578/* Probe for a read-modify-write atomic operation. Do not allow unaligned
579 * operations, or io operations to proceed. Return the host address. */
580static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
581 TCGMemOpIdx oi, uintptr_t retaddr)
582{
583 size_t mmu_idx = get_mmuidx(oi);
584 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
585 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
586 target_ulong tlb_addr = tlbe->addr_write;
587 TCGMemOp mop = get_memop(oi);
588 int a_bits = get_alignment_bits(mop);
589 int s_bits = mop & MO_SIZE;
590
591 /* Adjust the given return address. */
592 retaddr -= GETPC_ADJ;
593
594 /* Enforce guest required alignment. */
595 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
596 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
597 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
598 mmu_idx, retaddr);
599 }
600
601 /* Enforce qemu required alignment. */
602 if (unlikely(addr & ((1 << s_bits) - 1))) {
603 /* We get here if guest alignment was not requested,
604 or was not enforced by cpu_unaligned_access above.
605 We might widen the access and emulate, but for now
606 mark an exception and exit the cpu loop. */
607 goto stop_the_world;
608 }
609
610 /* Check TLB entry and enforce page permissions. */
611 if ((addr & TARGET_PAGE_MASK)
612 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
613 if (!VICTIM_TLB_HIT(addr_write, addr)) {
614 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
615 }
616 tlb_addr = tlbe->addr_write;
617 }
618
619 /* Notice an IO access, or a notdirty page. */
620 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
621 /* There's really nothing that can be done to
622 support this apart from stop-the-world. */
623 goto stop_the_world;
624 }
625
626 /* Let the guest notice RMW on a write-only page. */
627 if (unlikely(tlbe->addr_read != tlb_addr)) {
628 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
629 /* Since we don't support reads and writes to different addresses,
630 and we do have the proper page loaded for write, this shouldn't
631 ever return. But just in case, handle via stop-the-world. */
632 goto stop_the_world;
633 }
634
635 return (void *)((uintptr_t)addr + tlbe->addend);
636
637 stop_the_world:
638 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
639}
640
c86c6e4c
RH
641#ifdef TARGET_WORDS_BIGENDIAN
642# define TGT_BE(X) (X)
643# define TGT_LE(X) BSWAP(X)
644#else
645# define TGT_BE(X) BSWAP(X)
646# define TGT_LE(X) (X)
647#endif
648
0f590e74
PB
649#define MMUSUFFIX _mmu
650
dea21982 651#define DATA_SIZE 1
58ed270d 652#include "softmmu_template.h"
0f590e74 653
dea21982 654#define DATA_SIZE 2
58ed270d 655#include "softmmu_template.h"
0f590e74 656
dea21982 657#define DATA_SIZE 4
58ed270d 658#include "softmmu_template.h"
0f590e74 659
dea21982 660#define DATA_SIZE 8
58ed270d 661#include "softmmu_template.h"
0f590e74 662
c482cb11
RH
663/* First set of helpers allows passing in of OI and RETADDR. This makes
664 them callable from other helpers. */
665
666#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
667#define ATOMIC_NAME(X) \
668 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
669#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
670
671#define DATA_SIZE 1
672#include "atomic_template.h"
673
674#define DATA_SIZE 2
675#include "atomic_template.h"
676
677#define DATA_SIZE 4
678#include "atomic_template.h"
679
df79b996 680#ifdef CONFIG_ATOMIC64
c482cb11
RH
681#define DATA_SIZE 8
682#include "atomic_template.h"
df79b996 683#endif
c482cb11 684
7ebee43e
RH
685#ifdef CONFIG_ATOMIC128
686#define DATA_SIZE 16
687#include "atomic_template.h"
688#endif
689
c482cb11
RH
690/* Second set of helpers are directly callable from TCG as helpers. */
691
692#undef EXTRA_ARGS
693#undef ATOMIC_NAME
694#undef ATOMIC_MMU_LOOKUP
695#define EXTRA_ARGS , TCGMemOpIdx oi
696#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
697#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
698
699#define DATA_SIZE 1
700#include "atomic_template.h"
701
702#define DATA_SIZE 2
703#include "atomic_template.h"
704
705#define DATA_SIZE 4
706#include "atomic_template.h"
707
df79b996 708#ifdef CONFIG_ATOMIC64
c482cb11
RH
709#define DATA_SIZE 8
710#include "atomic_template.h"
df79b996 711#endif
c482cb11
RH
712
713/* Code access functions. */
714
715#undef MMUSUFFIX
0cac1b66 716#define MMUSUFFIX _cmmu
01ecaf43
RH
717#undef GETPC
718#define GETPC() ((uintptr_t)0)
0cac1b66
BS
719#define SOFTMMU_CODE_ACCESS
720
dea21982 721#define DATA_SIZE 1
58ed270d 722#include "softmmu_template.h"
0cac1b66 723
dea21982 724#define DATA_SIZE 2
58ed270d 725#include "softmmu_template.h"
0cac1b66 726
dea21982 727#define DATA_SIZE 4
58ed270d 728#include "softmmu_template.h"
0cac1b66 729
dea21982 730#define DATA_SIZE 8
58ed270d 731#include "softmmu_template.h"