]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Convert IO_MEM_SUBPAGE_RAM to be a MemoryRegion
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2 121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 122static MemoryRegion io_mem_subpage_ram;
0e0df1e2 123
e2eef170 124#endif
9fa3e853 125
6a00d601
FB
126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
b3c4bbe5 129DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 130/* 0 = Do not count executed instructions.
bf20dc07 131 1 = Precise instruction counting.
2e70f6ef
PB
132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
6a00d601 134
54936004 135typedef struct PageDesc {
92e873b9 136 /* list of TBs intersecting this ram page */
fd6ce8f6 137 TranslationBlock *first_tb;
9fa3e853
FB
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
54936004
FB
145} PageDesc;
146
41c1b1c9 147/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 154#endif
bedb69ea 155#else
5cd2c5b6 156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 157#endif
54936004 158
5cd2c5b6
RH
159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
54936004
FB
161#define L2_SIZE (1 << L2_BITS)
162
5cd2c5b6
RH
163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
83fb7adf 188unsigned long qemu_real_host_page_size;
83fb7adf
FB
189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
54936004 191
5cd2c5b6
RH
192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
54936004 195
e2eef170 196#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
5cd2c5b6
RH
203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
6d9a1304 206
e2eef170 207static void io_mem_init(void);
62152b8a 208static void memory_map_init(void);
e2eef170 209
33417e70 210/* io memory support */
acbbec5d
AK
211CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
212CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 213void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 214static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
215static int io_mem_watch;
216#endif
33417e70 217
34865134 218/* log support */
1e8b27ca
JR
219#ifdef WIN32
220static const char *logfilename = "qemu.log";
221#else
d9b630fd 222static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 223#endif
34865134
FB
224FILE *logfile;
225int loglevel;
e735b91c 226static int log_append = 0;
34865134 227
e3db7226 228/* statistics */
b3755a91 229#if !defined(CONFIG_USER_ONLY)
e3db7226 230static int tlb_flush_count;
b3755a91 231#endif
e3db7226
FB
232static int tb_flush_count;
233static int tb_phys_invalidate_count;
234
7cb69cae
FB
235#ifdef _WIN32
236static void map_exec(void *addr, long size)
237{
238 DWORD old_protect;
239 VirtualProtect(addr, size,
240 PAGE_EXECUTE_READWRITE, &old_protect);
241
242}
243#else
244static void map_exec(void *addr, long size)
245{
4369415f 246 unsigned long start, end, page_size;
7cb69cae 247
4369415f 248 page_size = getpagesize();
7cb69cae 249 start = (unsigned long)addr;
4369415f 250 start &= ~(page_size - 1);
7cb69cae
FB
251
252 end = (unsigned long)addr + size;
4369415f
FB
253 end += page_size - 1;
254 end &= ~(page_size - 1);
7cb69cae
FB
255
256 mprotect((void *)start, end - start,
257 PROT_READ | PROT_WRITE | PROT_EXEC);
258}
259#endif
260
b346ff46 261static void page_init(void)
54936004 262{
83fb7adf 263 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 264 TARGET_PAGE_SIZE */
c2b48b69
AL
265#ifdef _WIN32
266 {
267 SYSTEM_INFO system_info;
268
269 GetSystemInfo(&system_info);
270 qemu_real_host_page_size = system_info.dwPageSize;
271 }
272#else
273 qemu_real_host_page_size = getpagesize();
274#endif
83fb7adf
FB
275 if (qemu_host_page_size == 0)
276 qemu_host_page_size = qemu_real_host_page_size;
277 if (qemu_host_page_size < TARGET_PAGE_SIZE)
278 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 279 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 280
2e9a5713 281#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 282 {
f01576f1
JL
283#ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry *freep;
285 int i, cnt;
286
287 freep = kinfo_getvmmap(getpid(), &cnt);
288 if (freep) {
289 mmap_lock();
290 for (i = 0; i < cnt; i++) {
291 unsigned long startaddr, endaddr;
292
293 startaddr = freep[i].kve_start;
294 endaddr = freep[i].kve_end;
295 if (h2g_valid(startaddr)) {
296 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
297
298 if (h2g_valid(endaddr)) {
299 endaddr = h2g(endaddr);
fd436907 300 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
301 } else {
302#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 endaddr = ~0ul;
fd436907 304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
305#endif
306 }
307 }
308 }
309 free(freep);
310 mmap_unlock();
311 }
312#else
50a9569b 313 FILE *f;
50a9569b 314
0776590d 315 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 316
fd436907 317 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 318 if (f) {
5cd2c5b6
RH
319 mmap_lock();
320
50a9569b 321 do {
5cd2c5b6
RH
322 unsigned long startaddr, endaddr;
323 int n;
324
325 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
326
327 if (n == 2 && h2g_valid(startaddr)) {
328 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
329
330 if (h2g_valid(endaddr)) {
331 endaddr = h2g(endaddr);
332 } else {
333 endaddr = ~0ul;
334 }
335 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
336 }
337 } while (!feof(f));
5cd2c5b6 338
50a9569b 339 fclose(f);
5cd2c5b6 340 mmap_unlock();
50a9569b 341 }
f01576f1 342#endif
50a9569b
AZ
343 }
344#endif
54936004
FB
345}
346
41c1b1c9 347static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 348{
41c1b1c9
PB
349 PageDesc *pd;
350 void **lp;
351 int i;
352
5cd2c5b6 353#if defined(CONFIG_USER_ONLY)
7267c094 354 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
355# define ALLOC(P, SIZE) \
356 do { \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
359 } while (0)
360#else
361# define ALLOC(P, SIZE) \
7267c094 362 do { P = g_malloc0(SIZE); } while (0)
17e2377a 363#endif
434929bf 364
5cd2c5b6
RH
365 /* Level 1. Always allocated. */
366 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
367
368 /* Level 2..N-1. */
369 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
370 void **p = *lp;
371
372 if (p == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(p, sizeof(void *) * L2_SIZE);
377 *lp = p;
17e2377a 378 }
5cd2c5b6
RH
379
380 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
381 }
382
383 pd = *lp;
384 if (pd == NULL) {
385 if (!alloc) {
386 return NULL;
387 }
388 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
389 *lp = pd;
54936004 390 }
5cd2c5b6
RH
391
392#undef ALLOC
5cd2c5b6
RH
393
394 return pd + (index & (L2_SIZE - 1));
54936004
FB
395}
396
41c1b1c9 397static inline PageDesc *page_find(tb_page_addr_t index)
54936004 398{
5cd2c5b6 399 return page_find_alloc(index, 0);
fd6ce8f6
FB
400}
401
6d9a1304 402#if !defined(CONFIG_USER_ONLY)
c227f099 403static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 404{
e3f4e2a4 405 PhysPageDesc *pd;
5cd2c5b6
RH
406 void **lp;
407 int i;
92e873b9 408
5cd2c5b6
RH
409 /* Level 1. Always allocated. */
410 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 411
5cd2c5b6
RH
412 /* Level 2..N-1. */
413 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
414 void **p = *lp;
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
7267c094 419 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
420 }
421 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 422 }
5cd2c5b6 423
e3f4e2a4 424 pd = *lp;
5cd2c5b6 425 if (pd == NULL) {
e3f4e2a4 426 int i;
5ab97b7f 427 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
428
429 if (!alloc) {
108c49b8 430 return NULL;
5cd2c5b6
RH
431 }
432
7267c094 433 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 434
67c4d23c 435 for (i = 0; i < L2_SIZE; i++) {
0e0df1e2 436 pd[i].phys_offset = io_mem_unassigned.ram_addr;
5ab97b7f 437 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 438 }
92e873b9 439 }
5cd2c5b6
RH
440
441 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
442}
443
f1f6e3b8 444static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 445{
f1f6e3b8
AK
446 PhysPageDesc *p = phys_page_find_alloc(index, 0);
447
448 if (p) {
449 return *p;
450 } else {
451 return (PhysPageDesc) {
0e0df1e2 452 .phys_offset = io_mem_unassigned.ram_addr,
f1f6e3b8
AK
453 .region_offset = index << TARGET_PAGE_BITS,
454 };
455 }
92e873b9
FB
456}
457
c227f099
AL
458static void tlb_protect_code(ram_addr_t ram_addr);
459static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 460 target_ulong vaddr);
c8a706fe
PB
461#define mmap_lock() do { } while(0)
462#define mmap_unlock() do { } while(0)
9fa3e853 463#endif
fd6ce8f6 464
4369415f
FB
465#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466
467#if defined(CONFIG_USER_ONLY)
ccbb4d44 468/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
469 user mode. It will change when a dedicated libc will be used */
470#define USE_STATIC_CODE_GEN_BUFFER
471#endif
472
473#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
474static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
476#endif
477
8fcd3692 478static void code_gen_alloc(unsigned long tb_size)
26a5f13b 479{
4369415f
FB
480#ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer = static_code_gen_buffer;
482 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
483 map_exec(code_gen_buffer, code_gen_buffer_size);
484#else
26a5f13b
FB
485 code_gen_buffer_size = tb_size;
486 if (code_gen_buffer_size == 0) {
4369415f 487#if defined(CONFIG_USER_ONLY)
4369415f
FB
488 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
489#else
ccbb4d44 490 /* XXX: needs adjustments */
94a6b54f 491 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 492#endif
26a5f13b
FB
493 }
494 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
495 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
496 /* The code gen buffer location may have constraints depending on
497 the host cpu and OS */
498#if defined(__linux__)
499 {
500 int flags;
141ac468
BS
501 void *start = NULL;
502
26a5f13b
FB
503 flags = MAP_PRIVATE | MAP_ANONYMOUS;
504#if defined(__x86_64__)
505 flags |= MAP_32BIT;
506 /* Cannot map more than that */
507 if (code_gen_buffer_size > (800 * 1024 * 1024))
508 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
509#elif defined(__sparc_v9__)
510 // Map the buffer below 2G, so we can use direct calls and branches
511 flags |= MAP_FIXED;
512 start = (void *) 0x60000000UL;
513 if (code_gen_buffer_size > (512 * 1024 * 1024))
514 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 515#elif defined(__arm__)
222f23f5 516 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
517 if (code_gen_buffer_size > 16 * 1024 * 1024)
518 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
519#elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
524 }
525 start = (void *)0x90000000UL;
26a5f13b 526#endif
141ac468
BS
527 code_gen_buffer = mmap(start, code_gen_buffer_size,
528 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
529 flags, -1, 0);
530 if (code_gen_buffer == MAP_FAILED) {
531 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
532 exit(1);
533 }
534 }
cbb608a5 535#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
536 || defined(__DragonFly__) || defined(__OpenBSD__) \
537 || defined(__NetBSD__)
06e67a82
AL
538 {
539 int flags;
540 void *addr = NULL;
541 flags = MAP_PRIVATE | MAP_ANONYMOUS;
542#if defined(__x86_64__)
543 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
544 * 0x40000000 is free */
545 flags |= MAP_FIXED;
546 addr = (void *)0x40000000;
547 /* Cannot map more than that */
548 if (code_gen_buffer_size > (800 * 1024 * 1024))
549 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
550#elif defined(__sparc_v9__)
551 // Map the buffer below 2G, so we can use direct calls and branches
552 flags |= MAP_FIXED;
553 addr = (void *) 0x60000000UL;
554 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
555 code_gen_buffer_size = (512 * 1024 * 1024);
556 }
06e67a82
AL
557#endif
558 code_gen_buffer = mmap(addr, code_gen_buffer_size,
559 PROT_WRITE | PROT_READ | PROT_EXEC,
560 flags, -1, 0);
561 if (code_gen_buffer == MAP_FAILED) {
562 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
563 exit(1);
564 }
565 }
26a5f13b 566#else
7267c094 567 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
568 map_exec(code_gen_buffer, code_gen_buffer_size);
569#endif
4369415f 570#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 571 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
572 code_gen_buffer_max_size = code_gen_buffer_size -
573 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 574 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 575 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
576}
577
578/* Must be called before using the QEMU cpus. 'tb_size' is the size
579 (in bytes) allocated to the translation buffer. Zero means default
580 size. */
d5ab9713 581void tcg_exec_init(unsigned long tb_size)
26a5f13b 582{
26a5f13b
FB
583 cpu_gen_init();
584 code_gen_alloc(tb_size);
585 code_gen_ptr = code_gen_buffer;
4369415f 586 page_init();
9002ec79
RH
587#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
588 /* There's no guest base to take into account, so go ahead and
589 initialize the prologue now. */
590 tcg_prologue_init(&tcg_ctx);
591#endif
26a5f13b
FB
592}
593
d5ab9713
JK
594bool tcg_enabled(void)
595{
596 return code_gen_buffer != NULL;
597}
598
599void cpu_exec_init_all(void)
600{
601#if !defined(CONFIG_USER_ONLY)
602 memory_map_init();
603 io_mem_init();
604#endif
605}
606
9656f324
PB
607#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
608
e59fb374 609static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
610{
611 CPUState *env = opaque;
9656f324 612
3098dba0
AJ
613 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
614 version_id is increased. */
615 env->interrupt_request &= ~0x01;
9656f324
PB
616 tlb_flush(env, 1);
617
618 return 0;
619}
e7f4eff7
JQ
620
621static const VMStateDescription vmstate_cpu_common = {
622 .name = "cpu_common",
623 .version_id = 1,
624 .minimum_version_id = 1,
625 .minimum_version_id_old = 1,
e7f4eff7
JQ
626 .post_load = cpu_common_post_load,
627 .fields = (VMStateField []) {
628 VMSTATE_UINT32(halted, CPUState),
629 VMSTATE_UINT32(interrupt_request, CPUState),
630 VMSTATE_END_OF_LIST()
631 }
632};
9656f324
PB
633#endif
634
950f1472
GC
635CPUState *qemu_get_cpu(int cpu)
636{
637 CPUState *env = first_cpu;
638
639 while (env) {
640 if (env->cpu_index == cpu)
641 break;
642 env = env->next_cpu;
643 }
644
645 return env;
646}
647
6a00d601 648void cpu_exec_init(CPUState *env)
fd6ce8f6 649{
6a00d601
FB
650 CPUState **penv;
651 int cpu_index;
652
c2764719
PB
653#if defined(CONFIG_USER_ONLY)
654 cpu_list_lock();
655#endif
6a00d601
FB
656 env->next_cpu = NULL;
657 penv = &first_cpu;
658 cpu_index = 0;
659 while (*penv != NULL) {
1e9fa730 660 penv = &(*penv)->next_cpu;
6a00d601
FB
661 cpu_index++;
662 }
663 env->cpu_index = cpu_index;
268a362c 664 env->numa_node = 0;
72cf2d4f
BS
665 QTAILQ_INIT(&env->breakpoints);
666 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
667#ifndef CONFIG_USER_ONLY
668 env->thread_id = qemu_get_thread_id();
669#endif
6a00d601 670 *penv = env;
c2764719
PB
671#if defined(CONFIG_USER_ONLY)
672 cpu_list_unlock();
673#endif
b3c7724c 674#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
675 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
676 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
677 cpu_save, cpu_load, env);
678#endif
fd6ce8f6
FB
679}
680
d1a1eb74
TG
681/* Allocate a new translation block. Flush the translation buffer if
682 too many translation blocks or too much generated code. */
683static TranslationBlock *tb_alloc(target_ulong pc)
684{
685 TranslationBlock *tb;
686
687 if (nb_tbs >= code_gen_max_blocks ||
688 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
689 return NULL;
690 tb = &tbs[nb_tbs++];
691 tb->pc = pc;
692 tb->cflags = 0;
693 return tb;
694}
695
696void tb_free(TranslationBlock *tb)
697{
698 /* In practice this is mostly used for single use temporary TB
699 Ignore the hard cases and just back up if this TB happens to
700 be the last one generated. */
701 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
702 code_gen_ptr = tb->tc_ptr;
703 nb_tbs--;
704 }
705}
706
9fa3e853
FB
707static inline void invalidate_page_bitmap(PageDesc *p)
708{
709 if (p->code_bitmap) {
7267c094 710 g_free(p->code_bitmap);
9fa3e853
FB
711 p->code_bitmap = NULL;
712 }
713 p->code_write_count = 0;
714}
715
5cd2c5b6
RH
716/* Set to NULL all the 'first_tb' fields in all PageDescs. */
717
718static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 719{
5cd2c5b6 720 int i;
fd6ce8f6 721
5cd2c5b6
RH
722 if (*lp == NULL) {
723 return;
724 }
725 if (level == 0) {
726 PageDesc *pd = *lp;
7296abac 727 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
728 pd[i].first_tb = NULL;
729 invalidate_page_bitmap(pd + i);
fd6ce8f6 730 }
5cd2c5b6
RH
731 } else {
732 void **pp = *lp;
7296abac 733 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
734 page_flush_tb_1 (level - 1, pp + i);
735 }
736 }
737}
738
739static void page_flush_tb(void)
740{
741 int i;
742 for (i = 0; i < V_L1_SIZE; i++) {
743 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
744 }
745}
746
747/* flush all the translation blocks */
d4e8164f 748/* XXX: tb_flush is currently not thread safe */
6a00d601 749void tb_flush(CPUState *env1)
fd6ce8f6 750{
6a00d601 751 CPUState *env;
0124311e 752#if defined(DEBUG_FLUSH)
ab3d1727
BS
753 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
754 (unsigned long)(code_gen_ptr - code_gen_buffer),
755 nb_tbs, nb_tbs > 0 ?
756 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 757#endif
26a5f13b 758 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
759 cpu_abort(env1, "Internal error: code buffer overflow\n");
760
fd6ce8f6 761 nb_tbs = 0;
3b46e624 762
6a00d601
FB
763 for(env = first_cpu; env != NULL; env = env->next_cpu) {
764 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
765 }
9fa3e853 766
8a8a608f 767 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 768 page_flush_tb();
9fa3e853 769
fd6ce8f6 770 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
771 /* XXX: flush processor icache at this point if cache flush is
772 expensive */
e3db7226 773 tb_flush_count++;
fd6ce8f6
FB
774}
775
776#ifdef DEBUG_TB_CHECK
777
bc98a7ef 778static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
779{
780 TranslationBlock *tb;
781 int i;
782 address &= TARGET_PAGE_MASK;
99773bd4
PB
783 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
784 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
785 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
786 address >= tb->pc + tb->size)) {
0bf9e31a
BS
787 printf("ERROR invalidate: address=" TARGET_FMT_lx
788 " PC=%08lx size=%04x\n",
99773bd4 789 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
790 }
791 }
792 }
793}
794
795/* verify that all the pages have correct rights for code */
796static void tb_page_check(void)
797{
798 TranslationBlock *tb;
799 int i, flags1, flags2;
3b46e624 800
99773bd4
PB
801 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
802 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
803 flags1 = page_get_flags(tb->pc);
804 flags2 = page_get_flags(tb->pc + tb->size - 1);
805 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
806 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 807 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
808 }
809 }
810 }
811}
812
813#endif
814
815/* invalidate one TB */
816static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
817 int next_offset)
818{
819 TranslationBlock *tb1;
820 for(;;) {
821 tb1 = *ptb;
822 if (tb1 == tb) {
823 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
824 break;
825 }
826 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
827 }
828}
829
9fa3e853
FB
830static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
831{
832 TranslationBlock *tb1;
833 unsigned int n1;
834
835 for(;;) {
836 tb1 = *ptb;
837 n1 = (long)tb1 & 3;
838 tb1 = (TranslationBlock *)((long)tb1 & ~3);
839 if (tb1 == tb) {
840 *ptb = tb1->page_next[n1];
841 break;
842 }
843 ptb = &tb1->page_next[n1];
844 }
845}
846
d4e8164f
FB
847static inline void tb_jmp_remove(TranslationBlock *tb, int n)
848{
849 TranslationBlock *tb1, **ptb;
850 unsigned int n1;
851
852 ptb = &tb->jmp_next[n];
853 tb1 = *ptb;
854 if (tb1) {
855 /* find tb(n) in circular list */
856 for(;;) {
857 tb1 = *ptb;
858 n1 = (long)tb1 & 3;
859 tb1 = (TranslationBlock *)((long)tb1 & ~3);
860 if (n1 == n && tb1 == tb)
861 break;
862 if (n1 == 2) {
863 ptb = &tb1->jmp_first;
864 } else {
865 ptb = &tb1->jmp_next[n1];
866 }
867 }
868 /* now we can suppress tb(n) from the list */
869 *ptb = tb->jmp_next[n];
870
871 tb->jmp_next[n] = NULL;
872 }
873}
874
875/* reset the jump entry 'n' of a TB so that it is not chained to
876 another TB */
877static inline void tb_reset_jump(TranslationBlock *tb, int n)
878{
879 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
880}
881
41c1b1c9 882void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 883{
6a00d601 884 CPUState *env;
8a40a180 885 PageDesc *p;
d4e8164f 886 unsigned int h, n1;
41c1b1c9 887 tb_page_addr_t phys_pc;
8a40a180 888 TranslationBlock *tb1, *tb2;
3b46e624 889
8a40a180
FB
890 /* remove the TB from the hash list */
891 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
892 h = tb_phys_hash_func(phys_pc);
5fafdf24 893 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
894 offsetof(TranslationBlock, phys_hash_next));
895
896 /* remove the TB from the page list */
897 if (tb->page_addr[0] != page_addr) {
898 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
899 tb_page_remove(&p->first_tb, tb);
900 invalidate_page_bitmap(p);
901 }
902 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
903 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
904 tb_page_remove(&p->first_tb, tb);
905 invalidate_page_bitmap(p);
906 }
907
36bdbe54 908 tb_invalidated_flag = 1;
59817ccb 909
fd6ce8f6 910 /* remove the TB from the hash list */
8a40a180 911 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
912 for(env = first_cpu; env != NULL; env = env->next_cpu) {
913 if (env->tb_jmp_cache[h] == tb)
914 env->tb_jmp_cache[h] = NULL;
915 }
d4e8164f
FB
916
917 /* suppress this TB from the two jump lists */
918 tb_jmp_remove(tb, 0);
919 tb_jmp_remove(tb, 1);
920
921 /* suppress any remaining jumps to this TB */
922 tb1 = tb->jmp_first;
923 for(;;) {
924 n1 = (long)tb1 & 3;
925 if (n1 == 2)
926 break;
927 tb1 = (TranslationBlock *)((long)tb1 & ~3);
928 tb2 = tb1->jmp_next[n1];
929 tb_reset_jump(tb1, n1);
930 tb1->jmp_next[n1] = NULL;
931 tb1 = tb2;
932 }
933 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 934
e3db7226 935 tb_phys_invalidate_count++;
9fa3e853
FB
936}
937
938static inline void set_bits(uint8_t *tab, int start, int len)
939{
940 int end, mask, end1;
941
942 end = start + len;
943 tab += start >> 3;
944 mask = 0xff << (start & 7);
945 if ((start & ~7) == (end & ~7)) {
946 if (start < end) {
947 mask &= ~(0xff << (end & 7));
948 *tab |= mask;
949 }
950 } else {
951 *tab++ |= mask;
952 start = (start + 8) & ~7;
953 end1 = end & ~7;
954 while (start < end1) {
955 *tab++ = 0xff;
956 start += 8;
957 }
958 if (start < end) {
959 mask = ~(0xff << (end & 7));
960 *tab |= mask;
961 }
962 }
963}
964
965static void build_page_bitmap(PageDesc *p)
966{
967 int n, tb_start, tb_end;
968 TranslationBlock *tb;
3b46e624 969
7267c094 970 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
971
972 tb = p->first_tb;
973 while (tb != NULL) {
974 n = (long)tb & 3;
975 tb = (TranslationBlock *)((long)tb & ~3);
976 /* NOTE: this is subtle as a TB may span two physical pages */
977 if (n == 0) {
978 /* NOTE: tb_end may be after the end of the page, but
979 it is not a problem */
980 tb_start = tb->pc & ~TARGET_PAGE_MASK;
981 tb_end = tb_start + tb->size;
982 if (tb_end > TARGET_PAGE_SIZE)
983 tb_end = TARGET_PAGE_SIZE;
984 } else {
985 tb_start = 0;
986 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
987 }
988 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
989 tb = tb->page_next[n];
990 }
991}
992
2e70f6ef
PB
993TranslationBlock *tb_gen_code(CPUState *env,
994 target_ulong pc, target_ulong cs_base,
995 int flags, int cflags)
d720b93d
FB
996{
997 TranslationBlock *tb;
998 uint8_t *tc_ptr;
41c1b1c9
PB
999 tb_page_addr_t phys_pc, phys_page2;
1000 target_ulong virt_page2;
d720b93d
FB
1001 int code_gen_size;
1002
41c1b1c9 1003 phys_pc = get_page_addr_code(env, pc);
c27004ec 1004 tb = tb_alloc(pc);
d720b93d
FB
1005 if (!tb) {
1006 /* flush must be done */
1007 tb_flush(env);
1008 /* cannot fail at this point */
c27004ec 1009 tb = tb_alloc(pc);
2e70f6ef
PB
1010 /* Don't forget to invalidate previous TB info. */
1011 tb_invalidated_flag = 1;
d720b93d
FB
1012 }
1013 tc_ptr = code_gen_ptr;
1014 tb->tc_ptr = tc_ptr;
1015 tb->cs_base = cs_base;
1016 tb->flags = flags;
1017 tb->cflags = cflags;
d07bde88 1018 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1019 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1020
d720b93d 1021 /* check next page if needed */
c27004ec 1022 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1023 phys_page2 = -1;
c27004ec 1024 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1025 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1026 }
41c1b1c9 1027 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1028 return tb;
d720b93d 1029}
3b46e624 1030
9fa3e853
FB
1031/* invalidate all TBs which intersect with the target physical page
1032 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1033 the same physical page. 'is_cpu_write_access' should be true if called
1034 from a real cpu write access: the virtual CPU will exit the current
1035 TB if code is modified inside this TB. */
41c1b1c9 1036void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1037 int is_cpu_write_access)
1038{
6b917547 1039 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1040 CPUState *env = cpu_single_env;
41c1b1c9 1041 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1042 PageDesc *p;
1043 int n;
1044#ifdef TARGET_HAS_PRECISE_SMC
1045 int current_tb_not_found = is_cpu_write_access;
1046 TranslationBlock *current_tb = NULL;
1047 int current_tb_modified = 0;
1048 target_ulong current_pc = 0;
1049 target_ulong current_cs_base = 0;
1050 int current_flags = 0;
1051#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1052
1053 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1054 if (!p)
9fa3e853 1055 return;
5fafdf24 1056 if (!p->code_bitmap &&
d720b93d
FB
1057 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1058 is_cpu_write_access) {
9fa3e853
FB
1059 /* build code bitmap */
1060 build_page_bitmap(p);
1061 }
1062
1063 /* we remove all the TBs in the range [start, end[ */
1064 /* XXX: see if in some cases it could be faster to invalidate all the code */
1065 tb = p->first_tb;
1066 while (tb != NULL) {
1067 n = (long)tb & 3;
1068 tb = (TranslationBlock *)((long)tb & ~3);
1069 tb_next = tb->page_next[n];
1070 /* NOTE: this is subtle as a TB may span two physical pages */
1071 if (n == 0) {
1072 /* NOTE: tb_end may be after the end of the page, but
1073 it is not a problem */
1074 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1075 tb_end = tb_start + tb->size;
1076 } else {
1077 tb_start = tb->page_addr[1];
1078 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1079 }
1080 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1081#ifdef TARGET_HAS_PRECISE_SMC
1082 if (current_tb_not_found) {
1083 current_tb_not_found = 0;
1084 current_tb = NULL;
2e70f6ef 1085 if (env->mem_io_pc) {
d720b93d 1086 /* now we have a real cpu fault */
2e70f6ef 1087 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1088 }
1089 }
1090 if (current_tb == tb &&
2e70f6ef 1091 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1092 /* If we are modifying the current TB, we must stop
1093 its execution. We could be more precise by checking
1094 that the modification is after the current PC, but it
1095 would require a specialized function to partially
1096 restore the CPU state */
3b46e624 1097
d720b93d 1098 current_tb_modified = 1;
618ba8e6 1099 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1100 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1101 &current_flags);
d720b93d
FB
1102 }
1103#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1104 /* we need to do that to handle the case where a signal
1105 occurs while doing tb_phys_invalidate() */
1106 saved_tb = NULL;
1107 if (env) {
1108 saved_tb = env->current_tb;
1109 env->current_tb = NULL;
1110 }
9fa3e853 1111 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1112 if (env) {
1113 env->current_tb = saved_tb;
1114 if (env->interrupt_request && env->current_tb)
1115 cpu_interrupt(env, env->interrupt_request);
1116 }
9fa3e853
FB
1117 }
1118 tb = tb_next;
1119 }
1120#if !defined(CONFIG_USER_ONLY)
1121 /* if no code remaining, no need to continue to use slow writes */
1122 if (!p->first_tb) {
1123 invalidate_page_bitmap(p);
d720b93d 1124 if (is_cpu_write_access) {
2e70f6ef 1125 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1126 }
1127 }
1128#endif
1129#ifdef TARGET_HAS_PRECISE_SMC
1130 if (current_tb_modified) {
1131 /* we generate a block containing just the instruction
1132 modifying the memory. It will ensure that it cannot modify
1133 itself */
ea1c1802 1134 env->current_tb = NULL;
2e70f6ef 1135 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1136 cpu_resume_from_signal(env, NULL);
9fa3e853 1137 }
fd6ce8f6 1138#endif
9fa3e853 1139}
fd6ce8f6 1140
9fa3e853 1141/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1142static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1143{
1144 PageDesc *p;
1145 int offset, b;
59817ccb 1146#if 0
a4193c8a 1147 if (1) {
93fcfe39
AL
1148 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1149 cpu_single_env->mem_io_vaddr, len,
1150 cpu_single_env->eip,
1151 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1152 }
1153#endif
9fa3e853 1154 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1155 if (!p)
9fa3e853
FB
1156 return;
1157 if (p->code_bitmap) {
1158 offset = start & ~TARGET_PAGE_MASK;
1159 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1160 if (b & ((1 << len) - 1))
1161 goto do_invalidate;
1162 } else {
1163 do_invalidate:
d720b93d 1164 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1165 }
1166}
1167
9fa3e853 1168#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1169static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1170 unsigned long pc, void *puc)
9fa3e853 1171{
6b917547 1172 TranslationBlock *tb;
9fa3e853 1173 PageDesc *p;
6b917547 1174 int n;
d720b93d 1175#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1176 TranslationBlock *current_tb = NULL;
d720b93d 1177 CPUState *env = cpu_single_env;
6b917547
AL
1178 int current_tb_modified = 0;
1179 target_ulong current_pc = 0;
1180 target_ulong current_cs_base = 0;
1181 int current_flags = 0;
d720b93d 1182#endif
9fa3e853
FB
1183
1184 addr &= TARGET_PAGE_MASK;
1185 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1186 if (!p)
9fa3e853
FB
1187 return;
1188 tb = p->first_tb;
d720b93d
FB
1189#ifdef TARGET_HAS_PRECISE_SMC
1190 if (tb && pc != 0) {
1191 current_tb = tb_find_pc(pc);
1192 }
1193#endif
9fa3e853
FB
1194 while (tb != NULL) {
1195 n = (long)tb & 3;
1196 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1197#ifdef TARGET_HAS_PRECISE_SMC
1198 if (current_tb == tb &&
2e70f6ef 1199 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1200 /* If we are modifying the current TB, we must stop
1201 its execution. We could be more precise by checking
1202 that the modification is after the current PC, but it
1203 would require a specialized function to partially
1204 restore the CPU state */
3b46e624 1205
d720b93d 1206 current_tb_modified = 1;
618ba8e6 1207 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1208 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1209 &current_flags);
d720b93d
FB
1210 }
1211#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1212 tb_phys_invalidate(tb, addr);
1213 tb = tb->page_next[n];
1214 }
fd6ce8f6 1215 p->first_tb = NULL;
d720b93d
FB
1216#ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1220 itself */
ea1c1802 1221 env->current_tb = NULL;
2e70f6ef 1222 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1223 cpu_resume_from_signal(env, puc);
1224 }
1225#endif
fd6ce8f6 1226}
9fa3e853 1227#endif
fd6ce8f6
FB
1228
1229/* add the tb in the target page and protect it if necessary */
5fafdf24 1230static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1231 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1232{
1233 PageDesc *p;
4429ab44
JQ
1234#ifndef CONFIG_USER_ONLY
1235 bool page_already_protected;
1236#endif
9fa3e853
FB
1237
1238 tb->page_addr[n] = page_addr;
5cd2c5b6 1239 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1240 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1241#ifndef CONFIG_USER_ONLY
1242 page_already_protected = p->first_tb != NULL;
1243#endif
9fa3e853
FB
1244 p->first_tb = (TranslationBlock *)((long)tb | n);
1245 invalidate_page_bitmap(p);
fd6ce8f6 1246
107db443 1247#if defined(TARGET_HAS_SMC) || 1
d720b93d 1248
9fa3e853 1249#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1250 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1251 target_ulong addr;
1252 PageDesc *p2;
9fa3e853
FB
1253 int prot;
1254
fd6ce8f6
FB
1255 /* force the host page as non writable (writes will have a
1256 page fault + mprotect overhead) */
53a5960a 1257 page_addr &= qemu_host_page_mask;
fd6ce8f6 1258 prot = 0;
53a5960a
PB
1259 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1260 addr += TARGET_PAGE_SIZE) {
1261
1262 p2 = page_find (addr >> TARGET_PAGE_BITS);
1263 if (!p2)
1264 continue;
1265 prot |= p2->flags;
1266 p2->flags &= ~PAGE_WRITE;
53a5960a 1267 }
5fafdf24 1268 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1269 (prot & PAGE_BITS) & ~PAGE_WRITE);
1270#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1271 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1272 page_addr);
fd6ce8f6 1273#endif
fd6ce8f6 1274 }
9fa3e853
FB
1275#else
1276 /* if some code is already present, then the pages are already
1277 protected. So we handle the case where only the first TB is
1278 allocated in a physical page */
4429ab44 1279 if (!page_already_protected) {
6a00d601 1280 tlb_protect_code(page_addr);
9fa3e853
FB
1281 }
1282#endif
d720b93d
FB
1283
1284#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1285}
1286
9fa3e853
FB
1287/* add a new TB and link it to the physical page tables. phys_page2 is
1288 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1289void tb_link_page(TranslationBlock *tb,
1290 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1291{
9fa3e853
FB
1292 unsigned int h;
1293 TranslationBlock **ptb;
1294
c8a706fe
PB
1295 /* Grab the mmap lock to stop another thread invalidating this TB
1296 before we are done. */
1297 mmap_lock();
9fa3e853
FB
1298 /* add in the physical hash table */
1299 h = tb_phys_hash_func(phys_pc);
1300 ptb = &tb_phys_hash[h];
1301 tb->phys_hash_next = *ptb;
1302 *ptb = tb;
fd6ce8f6
FB
1303
1304 /* add in the page list */
9fa3e853
FB
1305 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1306 if (phys_page2 != -1)
1307 tb_alloc_page(tb, 1, phys_page2);
1308 else
1309 tb->page_addr[1] = -1;
9fa3e853 1310
d4e8164f
FB
1311 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1312 tb->jmp_next[0] = NULL;
1313 tb->jmp_next[1] = NULL;
1314
1315 /* init original jump addresses */
1316 if (tb->tb_next_offset[0] != 0xffff)
1317 tb_reset_jump(tb, 0);
1318 if (tb->tb_next_offset[1] != 0xffff)
1319 tb_reset_jump(tb, 1);
8a40a180
FB
1320
1321#ifdef DEBUG_TB_CHECK
1322 tb_page_check();
1323#endif
c8a706fe 1324 mmap_unlock();
fd6ce8f6
FB
1325}
1326
9fa3e853
FB
1327/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1328 tb[1].tc_ptr. Return NULL if not found */
1329TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1330{
9fa3e853
FB
1331 int m_min, m_max, m;
1332 unsigned long v;
1333 TranslationBlock *tb;
a513fe19
FB
1334
1335 if (nb_tbs <= 0)
1336 return NULL;
1337 if (tc_ptr < (unsigned long)code_gen_buffer ||
1338 tc_ptr >= (unsigned long)code_gen_ptr)
1339 return NULL;
1340 /* binary search (cf Knuth) */
1341 m_min = 0;
1342 m_max = nb_tbs - 1;
1343 while (m_min <= m_max) {
1344 m = (m_min + m_max) >> 1;
1345 tb = &tbs[m];
1346 v = (unsigned long)tb->tc_ptr;
1347 if (v == tc_ptr)
1348 return tb;
1349 else if (tc_ptr < v) {
1350 m_max = m - 1;
1351 } else {
1352 m_min = m + 1;
1353 }
5fafdf24 1354 }
a513fe19
FB
1355 return &tbs[m_max];
1356}
7501267e 1357
ea041c0e
FB
1358static void tb_reset_jump_recursive(TranslationBlock *tb);
1359
1360static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1361{
1362 TranslationBlock *tb1, *tb_next, **ptb;
1363 unsigned int n1;
1364
1365 tb1 = tb->jmp_next[n];
1366 if (tb1 != NULL) {
1367 /* find head of list */
1368 for(;;) {
1369 n1 = (long)tb1 & 3;
1370 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1371 if (n1 == 2)
1372 break;
1373 tb1 = tb1->jmp_next[n1];
1374 }
1375 /* we are now sure now that tb jumps to tb1 */
1376 tb_next = tb1;
1377
1378 /* remove tb from the jmp_first list */
1379 ptb = &tb_next->jmp_first;
1380 for(;;) {
1381 tb1 = *ptb;
1382 n1 = (long)tb1 & 3;
1383 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1384 if (n1 == n && tb1 == tb)
1385 break;
1386 ptb = &tb1->jmp_next[n1];
1387 }
1388 *ptb = tb->jmp_next[n];
1389 tb->jmp_next[n] = NULL;
3b46e624 1390
ea041c0e
FB
1391 /* suppress the jump to next tb in generated code */
1392 tb_reset_jump(tb, n);
1393
0124311e 1394 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1395 tb_reset_jump_recursive(tb_next);
1396 }
1397}
1398
1399static void tb_reset_jump_recursive(TranslationBlock *tb)
1400{
1401 tb_reset_jump_recursive2(tb, 0);
1402 tb_reset_jump_recursive2(tb, 1);
1403}
1404
1fddef4b 1405#if defined(TARGET_HAS_ICE)
94df27fd
PB
1406#if defined(CONFIG_USER_ONLY)
1407static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1408{
1409 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1410}
1411#else
d720b93d
FB
1412static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1413{
c227f099 1414 target_phys_addr_t addr;
9b3c35e0 1415 target_ulong pd;
c227f099 1416 ram_addr_t ram_addr;
f1f6e3b8 1417 PhysPageDesc p;
d720b93d 1418
c2f07f81
PB
1419 addr = cpu_get_phys_page_debug(env, pc);
1420 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1421 pd = p.phys_offset;
c2f07f81 1422 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1423 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1424}
c27004ec 1425#endif
94df27fd 1426#endif /* TARGET_HAS_ICE */
d720b93d 1427
c527ee8f
PB
1428#if defined(CONFIG_USER_ONLY)
1429void cpu_watchpoint_remove_all(CPUState *env, int mask)
1430
1431{
1432}
1433
1434int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1435 int flags, CPUWatchpoint **watchpoint)
1436{
1437 return -ENOSYS;
1438}
1439#else
6658ffb8 1440/* Add a watchpoint. */
a1d1bb31
AL
1441int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1442 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1443{
b4051334 1444 target_ulong len_mask = ~(len - 1);
c0ce998e 1445 CPUWatchpoint *wp;
6658ffb8 1446
b4051334
AL
1447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1448 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1449 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1450 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1451 return -EINVAL;
1452 }
7267c094 1453 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1454
1455 wp->vaddr = addr;
b4051334 1456 wp->len_mask = len_mask;
a1d1bb31
AL
1457 wp->flags = flags;
1458
2dc9f411 1459 /* keep all GDB-injected watchpoints in front */
c0ce998e 1460 if (flags & BP_GDB)
72cf2d4f 1461 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1462 else
72cf2d4f 1463 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1464
6658ffb8 1465 tlb_flush_page(env, addr);
a1d1bb31
AL
1466
1467 if (watchpoint)
1468 *watchpoint = wp;
1469 return 0;
6658ffb8
PB
1470}
1471
a1d1bb31
AL
1472/* Remove a specific watchpoint. */
1473int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1474 int flags)
6658ffb8 1475{
b4051334 1476 target_ulong len_mask = ~(len - 1);
a1d1bb31 1477 CPUWatchpoint *wp;
6658ffb8 1478
72cf2d4f 1479 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1480 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1481 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1482 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1483 return 0;
1484 }
1485 }
a1d1bb31 1486 return -ENOENT;
6658ffb8
PB
1487}
1488
a1d1bb31
AL
1489/* Remove a specific watchpoint by reference. */
1490void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1491{
72cf2d4f 1492 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1493
a1d1bb31
AL
1494 tlb_flush_page(env, watchpoint->vaddr);
1495
7267c094 1496 g_free(watchpoint);
a1d1bb31
AL
1497}
1498
1499/* Remove all matching watchpoints. */
1500void cpu_watchpoint_remove_all(CPUState *env, int mask)
1501{
c0ce998e 1502 CPUWatchpoint *wp, *next;
a1d1bb31 1503
72cf2d4f 1504 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1505 if (wp->flags & mask)
1506 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1507 }
7d03f82f 1508}
c527ee8f 1509#endif
7d03f82f 1510
a1d1bb31
AL
1511/* Add a breakpoint. */
1512int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1513 CPUBreakpoint **breakpoint)
4c3a88a2 1514{
1fddef4b 1515#if defined(TARGET_HAS_ICE)
c0ce998e 1516 CPUBreakpoint *bp;
3b46e624 1517
7267c094 1518 bp = g_malloc(sizeof(*bp));
4c3a88a2 1519
a1d1bb31
AL
1520 bp->pc = pc;
1521 bp->flags = flags;
1522
2dc9f411 1523 /* keep all GDB-injected breakpoints in front */
c0ce998e 1524 if (flags & BP_GDB)
72cf2d4f 1525 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1526 else
72cf2d4f 1527 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1528
d720b93d 1529 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1530
1531 if (breakpoint)
1532 *breakpoint = bp;
4c3a88a2
FB
1533 return 0;
1534#else
a1d1bb31 1535 return -ENOSYS;
4c3a88a2
FB
1536#endif
1537}
1538
a1d1bb31
AL
1539/* Remove a specific breakpoint. */
1540int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1541{
7d03f82f 1542#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1543 CPUBreakpoint *bp;
1544
72cf2d4f 1545 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1546 if (bp->pc == pc && bp->flags == flags) {
1547 cpu_breakpoint_remove_by_ref(env, bp);
1548 return 0;
1549 }
7d03f82f 1550 }
a1d1bb31
AL
1551 return -ENOENT;
1552#else
1553 return -ENOSYS;
7d03f82f
EI
1554#endif
1555}
1556
a1d1bb31
AL
1557/* Remove a specific breakpoint by reference. */
1558void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1559{
1fddef4b 1560#if defined(TARGET_HAS_ICE)
72cf2d4f 1561 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1562
a1d1bb31
AL
1563 breakpoint_invalidate(env, breakpoint->pc);
1564
7267c094 1565 g_free(breakpoint);
a1d1bb31
AL
1566#endif
1567}
1568
1569/* Remove all matching breakpoints. */
1570void cpu_breakpoint_remove_all(CPUState *env, int mask)
1571{
1572#if defined(TARGET_HAS_ICE)
c0ce998e 1573 CPUBreakpoint *bp, *next;
a1d1bb31 1574
72cf2d4f 1575 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1576 if (bp->flags & mask)
1577 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1578 }
4c3a88a2
FB
1579#endif
1580}
1581
c33a346e
FB
1582/* enable or disable single step mode. EXCP_DEBUG is returned by the
1583 CPU loop after each instruction */
1584void cpu_single_step(CPUState *env, int enabled)
1585{
1fddef4b 1586#if defined(TARGET_HAS_ICE)
c33a346e
FB
1587 if (env->singlestep_enabled != enabled) {
1588 env->singlestep_enabled = enabled;
e22a25c9
AL
1589 if (kvm_enabled())
1590 kvm_update_guest_debug(env, 0);
1591 else {
ccbb4d44 1592 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1593 /* XXX: only flush what is necessary */
1594 tb_flush(env);
1595 }
c33a346e
FB
1596 }
1597#endif
1598}
1599
34865134
FB
1600/* enable or disable low levels log */
1601void cpu_set_log(int log_flags)
1602{
1603 loglevel = log_flags;
1604 if (loglevel && !logfile) {
11fcfab4 1605 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1606 if (!logfile) {
1607 perror(logfilename);
1608 _exit(1);
1609 }
9fa3e853
FB
1610#if !defined(CONFIG_SOFTMMU)
1611 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1612 {
b55266b5 1613 static char logfile_buf[4096];
9fa3e853
FB
1614 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1615 }
daf767b1
SW
1616#elif defined(_WIN32)
1617 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1618 setvbuf(logfile, NULL, _IONBF, 0);
1619#else
34865134 1620 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1621#endif
e735b91c
PB
1622 log_append = 1;
1623 }
1624 if (!loglevel && logfile) {
1625 fclose(logfile);
1626 logfile = NULL;
34865134
FB
1627 }
1628}
1629
1630void cpu_set_log_filename(const char *filename)
1631{
1632 logfilename = strdup(filename);
e735b91c
PB
1633 if (logfile) {
1634 fclose(logfile);
1635 logfile = NULL;
1636 }
1637 cpu_set_log(loglevel);
34865134 1638}
c33a346e 1639
3098dba0 1640static void cpu_unlink_tb(CPUState *env)
ea041c0e 1641{
3098dba0
AJ
1642 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1643 problem and hope the cpu will stop of its own accord. For userspace
1644 emulation this often isn't actually as bad as it sounds. Often
1645 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1646 TranslationBlock *tb;
c227f099 1647 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1648
cab1b4bd 1649 spin_lock(&interrupt_lock);
3098dba0
AJ
1650 tb = env->current_tb;
1651 /* if the cpu is currently executing code, we must unlink it and
1652 all the potentially executing TB */
f76cfe56 1653 if (tb) {
3098dba0
AJ
1654 env->current_tb = NULL;
1655 tb_reset_jump_recursive(tb);
be214e6c 1656 }
cab1b4bd 1657 spin_unlock(&interrupt_lock);
3098dba0
AJ
1658}
1659
97ffbd8d 1660#ifndef CONFIG_USER_ONLY
3098dba0 1661/* mask must never be zero, except for A20 change call */
ec6959d0 1662static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1663{
1664 int old_mask;
be214e6c 1665
2e70f6ef 1666 old_mask = env->interrupt_request;
68a79315 1667 env->interrupt_request |= mask;
3098dba0 1668
8edac960
AL
1669 /*
1670 * If called from iothread context, wake the target cpu in
1671 * case its halted.
1672 */
b7680cb6 1673 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1674 qemu_cpu_kick(env);
1675 return;
1676 }
8edac960 1677
2e70f6ef 1678 if (use_icount) {
266910c4 1679 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1680 if (!can_do_io(env)
be214e6c 1681 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1682 cpu_abort(env, "Raised interrupt while not in I/O function");
1683 }
2e70f6ef 1684 } else {
3098dba0 1685 cpu_unlink_tb(env);
ea041c0e
FB
1686 }
1687}
1688
ec6959d0
JK
1689CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1690
97ffbd8d
JK
1691#else /* CONFIG_USER_ONLY */
1692
1693void cpu_interrupt(CPUState *env, int mask)
1694{
1695 env->interrupt_request |= mask;
1696 cpu_unlink_tb(env);
1697}
1698#endif /* CONFIG_USER_ONLY */
1699
b54ad049
FB
1700void cpu_reset_interrupt(CPUState *env, int mask)
1701{
1702 env->interrupt_request &= ~mask;
1703}
1704
3098dba0
AJ
1705void cpu_exit(CPUState *env)
1706{
1707 env->exit_request = 1;
1708 cpu_unlink_tb(env);
1709}
1710
c7cd6a37 1711const CPULogItem cpu_log_items[] = {
5fafdf24 1712 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1713 "show generated host assembly code for each compiled TB" },
1714 { CPU_LOG_TB_IN_ASM, "in_asm",
1715 "show target assembly code for each compiled TB" },
5fafdf24 1716 { CPU_LOG_TB_OP, "op",
57fec1fe 1717 "show micro ops for each compiled TB" },
f193c797 1718 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1719 "show micro ops "
1720#ifdef TARGET_I386
1721 "before eflags optimization and "
f193c797 1722#endif
e01a1157 1723 "after liveness analysis" },
f193c797
FB
1724 { CPU_LOG_INT, "int",
1725 "show interrupts/exceptions in short format" },
1726 { CPU_LOG_EXEC, "exec",
1727 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1728 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1729 "show CPU state before block translation" },
f193c797
FB
1730#ifdef TARGET_I386
1731 { CPU_LOG_PCALL, "pcall",
1732 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1733 { CPU_LOG_RESET, "cpu_reset",
1734 "show CPU state before CPU resets" },
f193c797 1735#endif
8e3a9fd2 1736#ifdef DEBUG_IOPORT
fd872598
FB
1737 { CPU_LOG_IOPORT, "ioport",
1738 "show all i/o ports accesses" },
8e3a9fd2 1739#endif
f193c797
FB
1740 { 0, NULL, NULL },
1741};
1742
1743static int cmp1(const char *s1, int n, const char *s2)
1744{
1745 if (strlen(s2) != n)
1746 return 0;
1747 return memcmp(s1, s2, n) == 0;
1748}
3b46e624 1749
f193c797
FB
1750/* takes a comma separated list of log masks. Return 0 if error. */
1751int cpu_str_to_log_mask(const char *str)
1752{
c7cd6a37 1753 const CPULogItem *item;
f193c797
FB
1754 int mask;
1755 const char *p, *p1;
1756
1757 p = str;
1758 mask = 0;
1759 for(;;) {
1760 p1 = strchr(p, ',');
1761 if (!p1)
1762 p1 = p + strlen(p);
9742bf26
YT
1763 if(cmp1(p,p1-p,"all")) {
1764 for(item = cpu_log_items; item->mask != 0; item++) {
1765 mask |= item->mask;
1766 }
1767 } else {
1768 for(item = cpu_log_items; item->mask != 0; item++) {
1769 if (cmp1(p, p1 - p, item->name))
1770 goto found;
1771 }
1772 return 0;
f193c797 1773 }
f193c797
FB
1774 found:
1775 mask |= item->mask;
1776 if (*p1 != ',')
1777 break;
1778 p = p1 + 1;
1779 }
1780 return mask;
1781}
ea041c0e 1782
7501267e
FB
1783void cpu_abort(CPUState *env, const char *fmt, ...)
1784{
1785 va_list ap;
493ae1f0 1786 va_list ap2;
7501267e
FB
1787
1788 va_start(ap, fmt);
493ae1f0 1789 va_copy(ap2, ap);
7501267e
FB
1790 fprintf(stderr, "qemu: fatal: ");
1791 vfprintf(stderr, fmt, ap);
1792 fprintf(stderr, "\n");
1793#ifdef TARGET_I386
7fe48483
FB
1794 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1795#else
1796 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1797#endif
93fcfe39
AL
1798 if (qemu_log_enabled()) {
1799 qemu_log("qemu: fatal: ");
1800 qemu_log_vprintf(fmt, ap2);
1801 qemu_log("\n");
f9373291 1802#ifdef TARGET_I386
93fcfe39 1803 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1804#else
93fcfe39 1805 log_cpu_state(env, 0);
f9373291 1806#endif
31b1a7b4 1807 qemu_log_flush();
93fcfe39 1808 qemu_log_close();
924edcae 1809 }
493ae1f0 1810 va_end(ap2);
f9373291 1811 va_end(ap);
fd052bf6
RV
1812#if defined(CONFIG_USER_ONLY)
1813 {
1814 struct sigaction act;
1815 sigfillset(&act.sa_mask);
1816 act.sa_handler = SIG_DFL;
1817 sigaction(SIGABRT, &act, NULL);
1818 }
1819#endif
7501267e
FB
1820 abort();
1821}
1822
c5be9f08
TS
1823CPUState *cpu_copy(CPUState *env)
1824{
01ba9816 1825 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1826 CPUState *next_cpu = new_env->next_cpu;
1827 int cpu_index = new_env->cpu_index;
5a38f081
AL
1828#if defined(TARGET_HAS_ICE)
1829 CPUBreakpoint *bp;
1830 CPUWatchpoint *wp;
1831#endif
1832
c5be9f08 1833 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1834
1835 /* Preserve chaining and index. */
c5be9f08
TS
1836 new_env->next_cpu = next_cpu;
1837 new_env->cpu_index = cpu_index;
5a38f081
AL
1838
1839 /* Clone all break/watchpoints.
1840 Note: Once we support ptrace with hw-debug register access, make sure
1841 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1842 QTAILQ_INIT(&env->breakpoints);
1843 QTAILQ_INIT(&env->watchpoints);
5a38f081 1844#if defined(TARGET_HAS_ICE)
72cf2d4f 1845 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1846 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1847 }
72cf2d4f 1848 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1849 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1850 wp->flags, NULL);
1851 }
1852#endif
1853
c5be9f08
TS
1854 return new_env;
1855}
1856
0124311e
FB
1857#if !defined(CONFIG_USER_ONLY)
1858
5c751e99
EI
1859static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1860{
1861 unsigned int i;
1862
1863 /* Discard jump cache entries for any tb which might potentially
1864 overlap the flushed page. */
1865 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1866 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1867 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1868
1869 i = tb_jmp_cache_hash_page(addr);
1870 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1871 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1872}
1873
08738984
IK
1874static CPUTLBEntry s_cputlb_empty_entry = {
1875 .addr_read = -1,
1876 .addr_write = -1,
1877 .addr_code = -1,
1878 .addend = -1,
1879};
1880
ee8b7021
FB
1881/* NOTE: if flush_global is true, also flush global entries (not
1882 implemented yet) */
1883void tlb_flush(CPUState *env, int flush_global)
33417e70 1884{
33417e70 1885 int i;
0124311e 1886
9fa3e853
FB
1887#if defined(DEBUG_TLB)
1888 printf("tlb_flush:\n");
1889#endif
0124311e
FB
1890 /* must reset current TB so that interrupts cannot modify the
1891 links while we are modifying them */
1892 env->current_tb = NULL;
1893
33417e70 1894 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1895 int mmu_idx;
1896 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1897 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1898 }
33417e70 1899 }
9fa3e853 1900
8a40a180 1901 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1902
d4c430a8
PB
1903 env->tlb_flush_addr = -1;
1904 env->tlb_flush_mask = 0;
e3db7226 1905 tlb_flush_count++;
33417e70
FB
1906}
1907
274da6b2 1908static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1909{
5fafdf24 1910 if (addr == (tlb_entry->addr_read &
84b7b8e7 1911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1912 addr == (tlb_entry->addr_write &
84b7b8e7 1913 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1914 addr == (tlb_entry->addr_code &
84b7b8e7 1915 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1916 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1917 }
61382a50
FB
1918}
1919
2e12669a 1920void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1921{
8a40a180 1922 int i;
cfde4bd9 1923 int mmu_idx;
0124311e 1924
9fa3e853 1925#if defined(DEBUG_TLB)
108c49b8 1926 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1927#endif
d4c430a8
PB
1928 /* Check if we need to flush due to large pages. */
1929 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1930#if defined(DEBUG_TLB)
1931 printf("tlb_flush_page: forced full flush ("
1932 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1933 env->tlb_flush_addr, env->tlb_flush_mask);
1934#endif
1935 tlb_flush(env, 1);
1936 return;
1937 }
0124311e
FB
1938 /* must reset current TB so that interrupts cannot modify the
1939 links while we are modifying them */
1940 env->current_tb = NULL;
61382a50
FB
1941
1942 addr &= TARGET_PAGE_MASK;
1943 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1944 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1945 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1946
5c751e99 1947 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1948}
1949
9fa3e853
FB
1950/* update the TLBs so that writes to code in the virtual page 'addr'
1951 can be detected */
c227f099 1952static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1953{
5fafdf24 1954 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1955 ram_addr + TARGET_PAGE_SIZE,
1956 CODE_DIRTY_FLAG);
9fa3e853
FB
1957}
1958
9fa3e853 1959/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1960 tested for self modifying code */
c227f099 1961static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1962 target_ulong vaddr)
9fa3e853 1963{
f7c11b53 1964 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1965}
1966
5fafdf24 1967static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1968 unsigned long start, unsigned long length)
1969{
1970 unsigned long addr;
0e0df1e2 1971 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
84b7b8e7 1972 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1973 if ((addr - start) < length) {
0f459d16 1974 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1975 }
1976 }
1977}
1978
5579c7f3 1979/* Note: start and end must be within the same ram block. */
c227f099 1980void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1981 int dirty_flags)
1ccde1cb
FB
1982{
1983 CPUState *env;
4f2ac237 1984 unsigned long length, start1;
f7c11b53 1985 int i;
1ccde1cb
FB
1986
1987 start &= TARGET_PAGE_MASK;
1988 end = TARGET_PAGE_ALIGN(end);
1989
1990 length = end - start;
1991 if (length == 0)
1992 return;
f7c11b53 1993 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1994
1ccde1cb
FB
1995 /* we modify the TLB cache so that the dirty bit will be set again
1996 when accessing the range */
b2e0a138 1997 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1998 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1999 address comparisons below. */
b2e0a138 2000 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2001 != (end - 1) - start) {
2002 abort();
2003 }
2004
6a00d601 2005 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2006 int mmu_idx;
2007 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2008 for(i = 0; i < CPU_TLB_SIZE; i++)
2009 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2010 start1, length);
2011 }
6a00d601 2012 }
1ccde1cb
FB
2013}
2014
74576198
AL
2015int cpu_physical_memory_set_dirty_tracking(int enable)
2016{
f6f3fbca 2017 int ret = 0;
74576198 2018 in_migration = enable;
f6f3fbca 2019 return ret;
74576198
AL
2020}
2021
3a7d929e
FB
2022static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2023{
c227f099 2024 ram_addr_t ram_addr;
5579c7f3 2025 void *p;
3a7d929e 2026
0e0df1e2 2027 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
5579c7f3
PB
2028 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2029 + tlb_entry->addend);
e890261f 2030 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2031 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2032 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2033 }
2034 }
2035}
2036
2037/* update the TLB according to the current state of the dirty bits */
2038void cpu_tlb_update_dirty(CPUState *env)
2039{
2040 int i;
cfde4bd9
IY
2041 int mmu_idx;
2042 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2043 for(i = 0; i < CPU_TLB_SIZE; i++)
2044 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2045 }
3a7d929e
FB
2046}
2047
0f459d16 2048static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2049{
0f459d16
PB
2050 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2051 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2052}
2053
0f459d16
PB
2054/* update the TLB corresponding to virtual page vaddr
2055 so that it is no longer dirty */
2056static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2057{
1ccde1cb 2058 int i;
cfde4bd9 2059 int mmu_idx;
1ccde1cb 2060
0f459d16 2061 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2062 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2063 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2064 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2065}
2066
d4c430a8
PB
2067/* Our TLB does not support large pages, so remember the area covered by
2068 large pages and trigger a full TLB flush if these are invalidated. */
2069static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2070 target_ulong size)
2071{
2072 target_ulong mask = ~(size - 1);
2073
2074 if (env->tlb_flush_addr == (target_ulong)-1) {
2075 env->tlb_flush_addr = vaddr & mask;
2076 env->tlb_flush_mask = mask;
2077 return;
2078 }
2079 /* Extend the existing region to include the new page.
2080 This is a compromise between unnecessary flushes and the cost
2081 of maintaining a full variable size TLB. */
2082 mask &= env->tlb_flush_mask;
2083 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2084 mask <<= 1;
2085 }
2086 env->tlb_flush_addr &= mask;
2087 env->tlb_flush_mask = mask;
2088}
2089
1d393fa2
AK
2090static bool is_ram_rom(ram_addr_t pd)
2091{
2092 pd &= ~TARGET_PAGE_MASK;
0e0df1e2 2093 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
1d393fa2
AK
2094}
2095
2096static bool is_ram_rom_romd(ram_addr_t pd)
2097{
2098 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2099}
2100
d4c430a8
PB
2101/* Add a new TLB entry. At most one entry for a given virtual address
2102 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2103 supplied size is only used by tlb_flush_page. */
2104void tlb_set_page(CPUState *env, target_ulong vaddr,
2105 target_phys_addr_t paddr, int prot,
2106 int mmu_idx, target_ulong size)
9fa3e853 2107{
f1f6e3b8 2108 PhysPageDesc p;
4f2ac237 2109 unsigned long pd;
9fa3e853 2110 unsigned int index;
4f2ac237 2111 target_ulong address;
0f459d16 2112 target_ulong code_address;
355b1943 2113 unsigned long addend;
84b7b8e7 2114 CPUTLBEntry *te;
a1d1bb31 2115 CPUWatchpoint *wp;
c227f099 2116 target_phys_addr_t iotlb;
9fa3e853 2117
d4c430a8
PB
2118 assert(size >= TARGET_PAGE_SIZE);
2119 if (size != TARGET_PAGE_SIZE) {
2120 tlb_add_large_page(env, vaddr, size);
2121 }
92e873b9 2122 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2123 pd = p.phys_offset;
9fa3e853 2124#if defined(DEBUG_TLB)
7fd3f494
SW
2125 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2126 " prot=%x idx=%d pd=0x%08lx\n",
2127 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2128#endif
2129
0f459d16 2130 address = vaddr;
1d393fa2 2131 if (!is_ram_rom_romd(pd)) {
0f459d16
PB
2132 /* IO memory case (romd handled later) */
2133 address |= TLB_MMIO;
2134 }
5579c7f3 2135 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1d393fa2 2136 if (is_ram_rom(pd)) {
0f459d16
PB
2137 /* Normal RAM. */
2138 iotlb = pd & TARGET_PAGE_MASK;
0e0df1e2
AK
2139 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2140 iotlb |= io_mem_notdirty.ram_addr;
0f459d16 2141 else
0e0df1e2 2142 iotlb |= io_mem_rom.ram_addr;
0f459d16 2143 } else {
ccbb4d44 2144 /* IO handlers are currently passed a physical address.
0f459d16
PB
2145 It would be nice to pass an offset from the base address
2146 of that region. This would avoid having to special case RAM,
2147 and avoid full address decoding in every device.
2148 We can't use the high bits of pd for this because
2149 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2150 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2151 iotlb += p.region_offset;
0f459d16
PB
2152 }
2153
2154 code_address = address;
2155 /* Make accesses to pages with watchpoints go via the
2156 watchpoint trap routines. */
72cf2d4f 2157 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2158 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2159 /* Avoid trapping reads of pages with a write breakpoint. */
2160 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2161 iotlb = io_mem_watch + paddr;
2162 address |= TLB_MMIO;
2163 break;
2164 }
6658ffb8 2165 }
0f459d16 2166 }
d79acba4 2167
0f459d16
PB
2168 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2169 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2170 te = &env->tlb_table[mmu_idx][index];
2171 te->addend = addend - vaddr;
2172 if (prot & PAGE_READ) {
2173 te->addr_read = address;
2174 } else {
2175 te->addr_read = -1;
2176 }
5c751e99 2177
0f459d16
PB
2178 if (prot & PAGE_EXEC) {
2179 te->addr_code = code_address;
2180 } else {
2181 te->addr_code = -1;
2182 }
2183 if (prot & PAGE_WRITE) {
0e0df1e2 2184 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr ||
0f459d16
PB
2185 (pd & IO_MEM_ROMD)) {
2186 /* Write access calls the I/O callback. */
2187 te->addr_write = address | TLB_MMIO;
0e0df1e2 2188 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
0f459d16
PB
2189 !cpu_physical_memory_is_dirty(pd)) {
2190 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2191 } else {
0f459d16 2192 te->addr_write = address;
9fa3e853 2193 }
0f459d16
PB
2194 } else {
2195 te->addr_write = -1;
9fa3e853 2196 }
9fa3e853
FB
2197}
2198
0124311e
FB
2199#else
2200
ee8b7021 2201void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2202{
2203}
2204
2e12669a 2205void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2206{
2207}
2208
edf8e2af
MW
2209/*
2210 * Walks guest process memory "regions" one by one
2211 * and calls callback function 'fn' for each region.
2212 */
5cd2c5b6
RH
2213
2214struct walk_memory_regions_data
2215{
2216 walk_memory_regions_fn fn;
2217 void *priv;
2218 unsigned long start;
2219 int prot;
2220};
2221
2222static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2223 abi_ulong end, int new_prot)
5cd2c5b6
RH
2224{
2225 if (data->start != -1ul) {
2226 int rc = data->fn(data->priv, data->start, end, data->prot);
2227 if (rc != 0) {
2228 return rc;
2229 }
2230 }
2231
2232 data->start = (new_prot ? end : -1ul);
2233 data->prot = new_prot;
2234
2235 return 0;
2236}
2237
2238static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2239 abi_ulong base, int level, void **lp)
5cd2c5b6 2240{
b480d9b7 2241 abi_ulong pa;
5cd2c5b6
RH
2242 int i, rc;
2243
2244 if (*lp == NULL) {
2245 return walk_memory_regions_end(data, base, 0);
2246 }
2247
2248 if (level == 0) {
2249 PageDesc *pd = *lp;
7296abac 2250 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2251 int prot = pd[i].flags;
2252
2253 pa = base | (i << TARGET_PAGE_BITS);
2254 if (prot != data->prot) {
2255 rc = walk_memory_regions_end(data, pa, prot);
2256 if (rc != 0) {
2257 return rc;
9fa3e853 2258 }
9fa3e853 2259 }
5cd2c5b6
RH
2260 }
2261 } else {
2262 void **pp = *lp;
7296abac 2263 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2264 pa = base | ((abi_ulong)i <<
2265 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2266 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2267 if (rc != 0) {
2268 return rc;
2269 }
2270 }
2271 }
2272
2273 return 0;
2274}
2275
2276int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2277{
2278 struct walk_memory_regions_data data;
2279 unsigned long i;
2280
2281 data.fn = fn;
2282 data.priv = priv;
2283 data.start = -1ul;
2284 data.prot = 0;
2285
2286 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2287 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2288 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2289 if (rc != 0) {
2290 return rc;
9fa3e853 2291 }
33417e70 2292 }
5cd2c5b6
RH
2293
2294 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2295}
2296
b480d9b7
PB
2297static int dump_region(void *priv, abi_ulong start,
2298 abi_ulong end, unsigned long prot)
edf8e2af
MW
2299{
2300 FILE *f = (FILE *)priv;
2301
b480d9b7
PB
2302 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2303 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2304 start, end, end - start,
2305 ((prot & PAGE_READ) ? 'r' : '-'),
2306 ((prot & PAGE_WRITE) ? 'w' : '-'),
2307 ((prot & PAGE_EXEC) ? 'x' : '-'));
2308
2309 return (0);
2310}
2311
2312/* dump memory mappings */
2313void page_dump(FILE *f)
2314{
2315 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2316 "start", "end", "size", "prot");
2317 walk_memory_regions(f, dump_region);
33417e70
FB
2318}
2319
53a5960a 2320int page_get_flags(target_ulong address)
33417e70 2321{
9fa3e853
FB
2322 PageDesc *p;
2323
2324 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2325 if (!p)
9fa3e853
FB
2326 return 0;
2327 return p->flags;
2328}
2329
376a7909
RH
2330/* Modify the flags of a page and invalidate the code if necessary.
2331 The flag PAGE_WRITE_ORG is positioned automatically depending
2332 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2333void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2334{
376a7909
RH
2335 target_ulong addr, len;
2336
2337 /* This function should never be called with addresses outside the
2338 guest address space. If this assert fires, it probably indicates
2339 a missing call to h2g_valid. */
b480d9b7
PB
2340#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2341 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2342#endif
2343 assert(start < end);
9fa3e853
FB
2344
2345 start = start & TARGET_PAGE_MASK;
2346 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2347
2348 if (flags & PAGE_WRITE) {
9fa3e853 2349 flags |= PAGE_WRITE_ORG;
376a7909
RH
2350 }
2351
2352 for (addr = start, len = end - start;
2353 len != 0;
2354 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2355 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2356
2357 /* If the write protection bit is set, then we invalidate
2358 the code inside. */
5fafdf24 2359 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2360 (flags & PAGE_WRITE) &&
2361 p->first_tb) {
d720b93d 2362 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2363 }
2364 p->flags = flags;
2365 }
33417e70
FB
2366}
2367
3d97b40b
TS
2368int page_check_range(target_ulong start, target_ulong len, int flags)
2369{
2370 PageDesc *p;
2371 target_ulong end;
2372 target_ulong addr;
2373
376a7909
RH
2374 /* This function should never be called with addresses outside the
2375 guest address space. If this assert fires, it probably indicates
2376 a missing call to h2g_valid. */
338e9e6c
BS
2377#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2378 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2379#endif
2380
3e0650a9
RH
2381 if (len == 0) {
2382 return 0;
2383 }
376a7909
RH
2384 if (start + len - 1 < start) {
2385 /* We've wrapped around. */
55f280c9 2386 return -1;
376a7909 2387 }
55f280c9 2388
3d97b40b
TS
2389 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2390 start = start & TARGET_PAGE_MASK;
2391
376a7909
RH
2392 for (addr = start, len = end - start;
2393 len != 0;
2394 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2395 p = page_find(addr >> TARGET_PAGE_BITS);
2396 if( !p )
2397 return -1;
2398 if( !(p->flags & PAGE_VALID) )
2399 return -1;
2400
dae3270c 2401 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2402 return -1;
dae3270c
FB
2403 if (flags & PAGE_WRITE) {
2404 if (!(p->flags & PAGE_WRITE_ORG))
2405 return -1;
2406 /* unprotect the page if it was put read-only because it
2407 contains translated code */
2408 if (!(p->flags & PAGE_WRITE)) {
2409 if (!page_unprotect(addr, 0, NULL))
2410 return -1;
2411 }
2412 return 0;
2413 }
3d97b40b
TS
2414 }
2415 return 0;
2416}
2417
9fa3e853 2418/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2419 page. Return TRUE if the fault was successfully handled. */
53a5960a 2420int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2421{
45d679d6
AJ
2422 unsigned int prot;
2423 PageDesc *p;
53a5960a 2424 target_ulong host_start, host_end, addr;
9fa3e853 2425
c8a706fe
PB
2426 /* Technically this isn't safe inside a signal handler. However we
2427 know this only ever happens in a synchronous SEGV handler, so in
2428 practice it seems to be ok. */
2429 mmap_lock();
2430
45d679d6
AJ
2431 p = page_find(address >> TARGET_PAGE_BITS);
2432 if (!p) {
c8a706fe 2433 mmap_unlock();
9fa3e853 2434 return 0;
c8a706fe 2435 }
45d679d6 2436
9fa3e853
FB
2437 /* if the page was really writable, then we change its
2438 protection back to writable */
45d679d6
AJ
2439 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2440 host_start = address & qemu_host_page_mask;
2441 host_end = host_start + qemu_host_page_size;
2442
2443 prot = 0;
2444 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2445 p = page_find(addr >> TARGET_PAGE_BITS);
2446 p->flags |= PAGE_WRITE;
2447 prot |= p->flags;
2448
9fa3e853
FB
2449 /* and since the content will be modified, we must invalidate
2450 the corresponding translated code. */
45d679d6 2451 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2452#ifdef DEBUG_TB_CHECK
45d679d6 2453 tb_invalidate_check(addr);
9fa3e853 2454#endif
9fa3e853 2455 }
45d679d6
AJ
2456 mprotect((void *)g2h(host_start), qemu_host_page_size,
2457 prot & PAGE_BITS);
2458
2459 mmap_unlock();
2460 return 1;
9fa3e853 2461 }
c8a706fe 2462 mmap_unlock();
9fa3e853
FB
2463 return 0;
2464}
2465
6a00d601
FB
2466static inline void tlb_set_dirty(CPUState *env,
2467 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2468{
2469}
9fa3e853
FB
2470#endif /* defined(CONFIG_USER_ONLY) */
2471
e2eef170 2472#if !defined(CONFIG_USER_ONLY)
8da3ff18 2473
c04b2b78
PB
2474#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2475typedef struct subpage_t {
70c68e44 2476 MemoryRegion iomem;
c04b2b78 2477 target_phys_addr_t base;
f6405247
RH
2478 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2479 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2480} subpage_t;
2481
c227f099
AL
2482static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2483 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2484static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2485 ram_addr_t orig_memory,
2486 ram_addr_t region_offset);
db7b5426
BS
2487#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2488 need_subpage) \
2489 do { \
2490 if (addr > start_addr) \
2491 start_addr2 = 0; \
2492 else { \
2493 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2494 if (start_addr2 > 0) \
2495 need_subpage = 1; \
2496 } \
2497 \
49e9fba2 2498 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2499 end_addr2 = TARGET_PAGE_SIZE - 1; \
2500 else { \
2501 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2502 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2503 need_subpage = 1; \
2504 } \
2505 } while (0)
2506
8f2498f9
MT
2507/* register physical memory.
2508 For RAM, 'size' must be a multiple of the target page size.
2509 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2510 io memory page. The address used when calling the IO function is
2511 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2512 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2513 before calculating this offset. This should not be a problem unless
2514 the low bits of start_addr and region_offset differ. */
dd81124b
AK
2515void cpu_register_physical_memory_log(MemoryRegionSection *section,
2516 bool readable, bool readonly)
33417e70 2517{
dd81124b
AK
2518 target_phys_addr_t start_addr = section->offset_within_address_space;
2519 ram_addr_t size = section->size;
2520 ram_addr_t phys_offset = section->mr->ram_addr;
2521 ram_addr_t region_offset = section->offset_within_region;
c227f099 2522 target_phys_addr_t addr, end_addr;
92e873b9 2523 PhysPageDesc *p;
9d42037b 2524 CPUState *env;
c227f099 2525 ram_addr_t orig_size = size;
f6405247 2526 subpage_t *subpage;
33417e70 2527
dd81124b
AK
2528 if (memory_region_is_ram(section->mr)) {
2529 phys_offset += region_offset;
2530 region_offset = 0;
2531 }
2532
2533 if (!readable) {
2534 phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
2535 }
2536
2537 if (readonly) {
2538 phys_offset |= io_mem_rom.ram_addr;
2539 }
2540
3b8e6a2d 2541 assert(size);
f6f3fbca 2542
0e0df1e2 2543 if (phys_offset == io_mem_unassigned.ram_addr) {
67c4d23c
PB
2544 region_offset = start_addr;
2545 }
8da3ff18 2546 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2547 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2548 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2549
2550 addr = start_addr;
2551 do {
f1f6e3b8 2552 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
0e0df1e2 2553 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
c227f099
AL
2554 ram_addr_t orig_memory = p->phys_offset;
2555 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2556 int need_subpage = 0;
2557
2558 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2559 need_subpage);
f6405247 2560 if (need_subpage) {
db7b5426
BS
2561 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2562 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2563 &p->phys_offset, orig_memory,
2564 p->region_offset);
db7b5426
BS
2565 } else {
2566 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2567 >> IO_MEM_SHIFT];
2568 }
8da3ff18
PB
2569 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2570 region_offset);
2571 p->region_offset = 0;
db7b5426
BS
2572 } else {
2573 p->phys_offset = phys_offset;
2774c6d0 2574 p->region_offset = region_offset;
1d393fa2 2575 if (is_ram_rom_romd(phys_offset))
db7b5426
BS
2576 phys_offset += TARGET_PAGE_SIZE;
2577 }
2578 } else {
2579 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2580 p->phys_offset = phys_offset;
8da3ff18 2581 p->region_offset = region_offset;
1d393fa2 2582 if (is_ram_rom_romd(phys_offset)) {
db7b5426 2583 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2584 } else {
c227f099 2585 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2586 int need_subpage = 0;
2587
2588 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2589 end_addr2, need_subpage);
2590
f6405247 2591 if (need_subpage) {
db7b5426 2592 subpage = subpage_init((addr & TARGET_PAGE_MASK),
0e0df1e2
AK
2593 &p->phys_offset,
2594 io_mem_unassigned.ram_addr,
67c4d23c 2595 addr & TARGET_PAGE_MASK);
db7b5426 2596 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2597 phys_offset, region_offset);
2598 p->region_offset = 0;
db7b5426
BS
2599 }
2600 }
2601 }
8da3ff18 2602 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2603 addr += TARGET_PAGE_SIZE;
2604 } while (addr != end_addr);
3b46e624 2605
9d42037b
FB
2606 /* since each CPU stores ram addresses in its TLB cache, we must
2607 reset the modified entries */
2608 /* XXX: slow ! */
2609 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2610 tlb_flush(env, 1);
2611 }
33417e70
FB
2612}
2613
c227f099 2614void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2615{
2616 if (kvm_enabled())
2617 kvm_coalesce_mmio_region(addr, size);
2618}
2619
c227f099 2620void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2621{
2622 if (kvm_enabled())
2623 kvm_uncoalesce_mmio_region(addr, size);
2624}
2625
62a2744c
SY
2626void qemu_flush_coalesced_mmio_buffer(void)
2627{
2628 if (kvm_enabled())
2629 kvm_flush_coalesced_mmio_buffer();
2630}
2631
c902760f
MT
2632#if defined(__linux__) && !defined(TARGET_S390X)
2633
2634#include <sys/vfs.h>
2635
2636#define HUGETLBFS_MAGIC 0x958458f6
2637
2638static long gethugepagesize(const char *path)
2639{
2640 struct statfs fs;
2641 int ret;
2642
2643 do {
9742bf26 2644 ret = statfs(path, &fs);
c902760f
MT
2645 } while (ret != 0 && errno == EINTR);
2646
2647 if (ret != 0) {
9742bf26
YT
2648 perror(path);
2649 return 0;
c902760f
MT
2650 }
2651
2652 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2653 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2654
2655 return fs.f_bsize;
2656}
2657
04b16653
AW
2658static void *file_ram_alloc(RAMBlock *block,
2659 ram_addr_t memory,
2660 const char *path)
c902760f
MT
2661{
2662 char *filename;
2663 void *area;
2664 int fd;
2665#ifdef MAP_POPULATE
2666 int flags;
2667#endif
2668 unsigned long hpagesize;
2669
2670 hpagesize = gethugepagesize(path);
2671 if (!hpagesize) {
9742bf26 2672 return NULL;
c902760f
MT
2673 }
2674
2675 if (memory < hpagesize) {
2676 return NULL;
2677 }
2678
2679 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2680 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2681 return NULL;
2682 }
2683
2684 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2685 return NULL;
c902760f
MT
2686 }
2687
2688 fd = mkstemp(filename);
2689 if (fd < 0) {
9742bf26
YT
2690 perror("unable to create backing store for hugepages");
2691 free(filename);
2692 return NULL;
c902760f
MT
2693 }
2694 unlink(filename);
2695 free(filename);
2696
2697 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2698
2699 /*
2700 * ftruncate is not supported by hugetlbfs in older
2701 * hosts, so don't bother bailing out on errors.
2702 * If anything goes wrong with it under other filesystems,
2703 * mmap will fail.
2704 */
2705 if (ftruncate(fd, memory))
9742bf26 2706 perror("ftruncate");
c902760f
MT
2707
2708#ifdef MAP_POPULATE
2709 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2710 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2711 * to sidestep this quirk.
2712 */
2713 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2714 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2715#else
2716 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2717#endif
2718 if (area == MAP_FAILED) {
9742bf26
YT
2719 perror("file_ram_alloc: can't mmap RAM pages");
2720 close(fd);
2721 return (NULL);
c902760f 2722 }
04b16653 2723 block->fd = fd;
c902760f
MT
2724 return area;
2725}
2726#endif
2727
d17b5288 2728static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2729{
2730 RAMBlock *block, *next_block;
3e837b2c 2731 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2732
2733 if (QLIST_EMPTY(&ram_list.blocks))
2734 return 0;
2735
2736 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2737 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2738
2739 end = block->offset + block->length;
2740
2741 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2742 if (next_block->offset >= end) {
2743 next = MIN(next, next_block->offset);
2744 }
2745 }
2746 if (next - end >= size && next - end < mingap) {
3e837b2c 2747 offset = end;
04b16653
AW
2748 mingap = next - end;
2749 }
2750 }
3e837b2c
AW
2751
2752 if (offset == RAM_ADDR_MAX) {
2753 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2754 (uint64_t)size);
2755 abort();
2756 }
2757
04b16653
AW
2758 return offset;
2759}
2760
2761static ram_addr_t last_ram_offset(void)
d17b5288
AW
2762{
2763 RAMBlock *block;
2764 ram_addr_t last = 0;
2765
2766 QLIST_FOREACH(block, &ram_list.blocks, next)
2767 last = MAX(last, block->offset + block->length);
2768
2769 return last;
2770}
2771
c5705a77 2772void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2773{
2774 RAMBlock *new_block, *block;
2775
c5705a77
AK
2776 new_block = NULL;
2777 QLIST_FOREACH(block, &ram_list.blocks, next) {
2778 if (block->offset == addr) {
2779 new_block = block;
2780 break;
2781 }
2782 }
2783 assert(new_block);
2784 assert(!new_block->idstr[0]);
84b89d78
CM
2785
2786 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2787 char *id = dev->parent_bus->info->get_dev_path(dev);
2788 if (id) {
2789 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2790 g_free(id);
84b89d78
CM
2791 }
2792 }
2793 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2794
2795 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2796 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2797 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2798 new_block->idstr);
2799 abort();
2800 }
2801 }
c5705a77
AK
2802}
2803
2804ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2805 MemoryRegion *mr)
2806{
2807 RAMBlock *new_block;
2808
2809 size = TARGET_PAGE_ALIGN(size);
2810 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2811
7c637366 2812 new_block->mr = mr;
432d268c 2813 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2814 if (host) {
2815 new_block->host = host;
cd19cfa2 2816 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2817 } else {
2818 if (mem_path) {
c902760f 2819#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2820 new_block->host = file_ram_alloc(new_block, size, mem_path);
2821 if (!new_block->host) {
2822 new_block->host = qemu_vmalloc(size);
e78815a5 2823 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2824 }
c902760f 2825#else
6977dfe6
YT
2826 fprintf(stderr, "-mem-path option unsupported\n");
2827 exit(1);
c902760f 2828#endif
6977dfe6 2829 } else {
6b02494d 2830#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2831 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2832 an system defined value, which is at least 256GB. Larger systems
2833 have larger values. We put the guest between the end of data
2834 segment (system break) and this value. We use 32GB as a base to
2835 have enough room for the system break to grow. */
2836 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2837 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2838 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2839 if (new_block->host == MAP_FAILED) {
2840 fprintf(stderr, "Allocating RAM failed\n");
2841 abort();
2842 }
6b02494d 2843#else
868bb33f 2844 if (xen_enabled()) {
fce537d4 2845 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2846 } else {
2847 new_block->host = qemu_vmalloc(size);
2848 }
6b02494d 2849#endif
e78815a5 2850 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2851 }
c902760f 2852 }
94a6b54f
PB
2853 new_block->length = size;
2854
f471a17e 2855 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2856
7267c094 2857 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2858 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2859 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2860 0xff, size >> TARGET_PAGE_BITS);
2861
6f0437e8
JK
2862 if (kvm_enabled())
2863 kvm_setup_guest_memory(new_block->host, size);
2864
94a6b54f
PB
2865 return new_block->offset;
2866}
e9a1ab19 2867
c5705a77 2868ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2869{
c5705a77 2870 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2871}
2872
1f2e98b6
AW
2873void qemu_ram_free_from_ptr(ram_addr_t addr)
2874{
2875 RAMBlock *block;
2876
2877 QLIST_FOREACH(block, &ram_list.blocks, next) {
2878 if (addr == block->offset) {
2879 QLIST_REMOVE(block, next);
7267c094 2880 g_free(block);
1f2e98b6
AW
2881 return;
2882 }
2883 }
2884}
2885
c227f099 2886void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2887{
04b16653
AW
2888 RAMBlock *block;
2889
2890 QLIST_FOREACH(block, &ram_list.blocks, next) {
2891 if (addr == block->offset) {
2892 QLIST_REMOVE(block, next);
cd19cfa2
HY
2893 if (block->flags & RAM_PREALLOC_MASK) {
2894 ;
2895 } else if (mem_path) {
04b16653
AW
2896#if defined (__linux__) && !defined(TARGET_S390X)
2897 if (block->fd) {
2898 munmap(block->host, block->length);
2899 close(block->fd);
2900 } else {
2901 qemu_vfree(block->host);
2902 }
fd28aa13
JK
2903#else
2904 abort();
04b16653
AW
2905#endif
2906 } else {
2907#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2908 munmap(block->host, block->length);
2909#else
868bb33f 2910 if (xen_enabled()) {
e41d7c69 2911 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2912 } else {
2913 qemu_vfree(block->host);
2914 }
04b16653
AW
2915#endif
2916 }
7267c094 2917 g_free(block);
04b16653
AW
2918 return;
2919 }
2920 }
2921
e9a1ab19
FB
2922}
2923
cd19cfa2
HY
2924#ifndef _WIN32
2925void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2926{
2927 RAMBlock *block;
2928 ram_addr_t offset;
2929 int flags;
2930 void *area, *vaddr;
2931
2932 QLIST_FOREACH(block, &ram_list.blocks, next) {
2933 offset = addr - block->offset;
2934 if (offset < block->length) {
2935 vaddr = block->host + offset;
2936 if (block->flags & RAM_PREALLOC_MASK) {
2937 ;
2938 } else {
2939 flags = MAP_FIXED;
2940 munmap(vaddr, length);
2941 if (mem_path) {
2942#if defined(__linux__) && !defined(TARGET_S390X)
2943 if (block->fd) {
2944#ifdef MAP_POPULATE
2945 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2946 MAP_PRIVATE;
2947#else
2948 flags |= MAP_PRIVATE;
2949#endif
2950 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2951 flags, block->fd, offset);
2952 } else {
2953 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2954 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2955 flags, -1, 0);
2956 }
fd28aa13
JK
2957#else
2958 abort();
cd19cfa2
HY
2959#endif
2960 } else {
2961#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2962 flags |= MAP_SHARED | MAP_ANONYMOUS;
2963 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2964 flags, -1, 0);
2965#else
2966 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2967 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2968 flags, -1, 0);
2969#endif
2970 }
2971 if (area != vaddr) {
f15fbc4b
AP
2972 fprintf(stderr, "Could not remap addr: "
2973 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2974 length, addr);
2975 exit(1);
2976 }
2977 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2978 }
2979 return;
2980 }
2981 }
2982}
2983#endif /* !_WIN32 */
2984
dc828ca1 2985/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2986 With the exception of the softmmu code in this file, this should
2987 only be used for local memory (e.g. video ram) that the device owns,
2988 and knows it isn't going to access beyond the end of the block.
2989
2990 It should not be used for general purpose DMA.
2991 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2992 */
c227f099 2993void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2994{
94a6b54f
PB
2995 RAMBlock *block;
2996
f471a17e
AW
2997 QLIST_FOREACH(block, &ram_list.blocks, next) {
2998 if (addr - block->offset < block->length) {
7d82af38
VP
2999 /* Move this entry to to start of the list. */
3000 if (block != QLIST_FIRST(&ram_list.blocks)) {
3001 QLIST_REMOVE(block, next);
3002 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3003 }
868bb33f 3004 if (xen_enabled()) {
432d268c
JN
3005 /* We need to check if the requested address is in the RAM
3006 * because we don't want to map the entire memory in QEMU.
712c2b41 3007 * In that case just map until the end of the page.
432d268c
JN
3008 */
3009 if (block->offset == 0) {
e41d7c69 3010 return xen_map_cache(addr, 0, 0);
432d268c 3011 } else if (block->host == NULL) {
e41d7c69
JK
3012 block->host =
3013 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3014 }
3015 }
f471a17e
AW
3016 return block->host + (addr - block->offset);
3017 }
94a6b54f 3018 }
f471a17e
AW
3019
3020 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3021 abort();
3022
3023 return NULL;
dc828ca1
PB
3024}
3025
b2e0a138
MT
3026/* Return a host pointer to ram allocated with qemu_ram_alloc.
3027 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3028 */
3029void *qemu_safe_ram_ptr(ram_addr_t addr)
3030{
3031 RAMBlock *block;
3032
3033 QLIST_FOREACH(block, &ram_list.blocks, next) {
3034 if (addr - block->offset < block->length) {
868bb33f 3035 if (xen_enabled()) {
432d268c
JN
3036 /* We need to check if the requested address is in the RAM
3037 * because we don't want to map the entire memory in QEMU.
712c2b41 3038 * In that case just map until the end of the page.
432d268c
JN
3039 */
3040 if (block->offset == 0) {
e41d7c69 3041 return xen_map_cache(addr, 0, 0);
432d268c 3042 } else if (block->host == NULL) {
e41d7c69
JK
3043 block->host =
3044 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3045 }
3046 }
b2e0a138
MT
3047 return block->host + (addr - block->offset);
3048 }
3049 }
3050
3051 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3052 abort();
3053
3054 return NULL;
3055}
3056
38bee5dc
SS
3057/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3058 * but takes a size argument */
8ab934f9 3059void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3060{
8ab934f9
SS
3061 if (*size == 0) {
3062 return NULL;
3063 }
868bb33f 3064 if (xen_enabled()) {
e41d7c69 3065 return xen_map_cache(addr, *size, 1);
868bb33f 3066 } else {
38bee5dc
SS
3067 RAMBlock *block;
3068
3069 QLIST_FOREACH(block, &ram_list.blocks, next) {
3070 if (addr - block->offset < block->length) {
3071 if (addr - block->offset + *size > block->length)
3072 *size = block->length - addr + block->offset;
3073 return block->host + (addr - block->offset);
3074 }
3075 }
3076
3077 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3078 abort();
38bee5dc
SS
3079 }
3080}
3081
050a0ddf
AP
3082void qemu_put_ram_ptr(void *addr)
3083{
3084 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3085}
3086
e890261f 3087int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3088{
94a6b54f
PB
3089 RAMBlock *block;
3090 uint8_t *host = ptr;
3091
868bb33f 3092 if (xen_enabled()) {
e41d7c69 3093 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3094 return 0;
3095 }
3096
f471a17e 3097 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3098 /* This case append when the block is not mapped. */
3099 if (block->host == NULL) {
3100 continue;
3101 }
f471a17e 3102 if (host - block->host < block->length) {
e890261f
MT
3103 *ram_addr = block->offset + (host - block->host);
3104 return 0;
f471a17e 3105 }
94a6b54f 3106 }
432d268c 3107
e890261f
MT
3108 return -1;
3109}
f471a17e 3110
e890261f
MT
3111/* Some of the softmmu routines need to translate from a host pointer
3112 (typically a TLB entry) back to a ram offset. */
3113ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3114{
3115 ram_addr_t ram_addr;
f471a17e 3116
e890261f
MT
3117 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3118 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3119 abort();
3120 }
3121 return ram_addr;
5579c7f3
PB
3122}
3123
0e0df1e2
AK
3124static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3125 unsigned size)
e18231a3
BS
3126{
3127#ifdef DEBUG_UNASSIGNED
3128 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3129#endif
5b450407 3130#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3131 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3132#endif
3133 return 0;
3134}
3135
0e0df1e2
AK
3136static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3137 uint64_t val, unsigned size)
e18231a3
BS
3138{
3139#ifdef DEBUG_UNASSIGNED
0e0df1e2 3140 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3141#endif
5b450407 3142#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3143 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3144#endif
33417e70
FB
3145}
3146
0e0df1e2
AK
3147static const MemoryRegionOps unassigned_mem_ops = {
3148 .read = unassigned_mem_read,
3149 .write = unassigned_mem_write,
3150 .endianness = DEVICE_NATIVE_ENDIAN,
3151};
e18231a3 3152
0e0df1e2
AK
3153static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3154 unsigned size)
e18231a3 3155{
0e0df1e2 3156 abort();
e18231a3
BS
3157}
3158
0e0df1e2
AK
3159static void error_mem_write(void *opaque, target_phys_addr_t addr,
3160 uint64_t value, unsigned size)
e18231a3 3161{
0e0df1e2 3162 abort();
33417e70
FB
3163}
3164
0e0df1e2
AK
3165static const MemoryRegionOps error_mem_ops = {
3166 .read = error_mem_read,
3167 .write = error_mem_write,
3168 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3169};
3170
0e0df1e2
AK
3171static const MemoryRegionOps rom_mem_ops = {
3172 .read = error_mem_read,
3173 .write = unassigned_mem_write,
3174 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3175};
3176
0e0df1e2
AK
3177static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3178 uint64_t val, unsigned size)
9fa3e853 3179{
3a7d929e 3180 int dirty_flags;
f7c11b53 3181 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3182 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3183#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3184 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3185 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3186#endif
3a7d929e 3187 }
0e0df1e2
AK
3188 switch (size) {
3189 case 1:
3190 stb_p(qemu_get_ram_ptr(ram_addr), val);
3191 break;
3192 case 2:
3193 stw_p(qemu_get_ram_ptr(ram_addr), val);
3194 break;
3195 case 4:
3196 stl_p(qemu_get_ram_ptr(ram_addr), val);
3197 break;
3198 default:
3199 abort();
3a7d929e 3200 }
f23db169 3201 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3202 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3203 /* we remove the notdirty callback only if the code has been
3204 flushed */
3205 if (dirty_flags == 0xff)
2e70f6ef 3206 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3207}
3208
0e0df1e2
AK
3209static const MemoryRegionOps notdirty_mem_ops = {
3210 .read = error_mem_read,
3211 .write = notdirty_mem_write,
3212 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3213};
3214
0f459d16 3215/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3216static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3217{
3218 CPUState *env = cpu_single_env;
06d55cc1
AL
3219 target_ulong pc, cs_base;
3220 TranslationBlock *tb;
0f459d16 3221 target_ulong vaddr;
a1d1bb31 3222 CPUWatchpoint *wp;
06d55cc1 3223 int cpu_flags;
0f459d16 3224
06d55cc1
AL
3225 if (env->watchpoint_hit) {
3226 /* We re-entered the check after replacing the TB. Now raise
3227 * the debug interrupt so that is will trigger after the
3228 * current instruction. */
3229 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3230 return;
3231 }
2e70f6ef 3232 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3233 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3234 if ((vaddr == (wp->vaddr & len_mask) ||
3235 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3236 wp->flags |= BP_WATCHPOINT_HIT;
3237 if (!env->watchpoint_hit) {
3238 env->watchpoint_hit = wp;
3239 tb = tb_find_pc(env->mem_io_pc);
3240 if (!tb) {
3241 cpu_abort(env, "check_watchpoint: could not find TB for "
3242 "pc=%p", (void *)env->mem_io_pc);
3243 }
618ba8e6 3244 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3245 tb_phys_invalidate(tb, -1);
3246 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3247 env->exception_index = EXCP_DEBUG;
3248 } else {
3249 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3250 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3251 }
3252 cpu_resume_from_signal(env, NULL);
06d55cc1 3253 }
6e140f28
AL
3254 } else {
3255 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3256 }
3257 }
3258}
3259
6658ffb8
PB
3260/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3261 so these check for a hit then pass through to the normal out-of-line
3262 phys routines. */
c227f099 3263static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3264{
b4051334 3265 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3266 return ldub_phys(addr);
3267}
3268
c227f099 3269static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3270{
b4051334 3271 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3272 return lduw_phys(addr);
3273}
3274
c227f099 3275static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3276{
b4051334 3277 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3278 return ldl_phys(addr);
3279}
3280
c227f099 3281static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3282 uint32_t val)
3283{
b4051334 3284 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3285 stb_phys(addr, val);
3286}
3287
c227f099 3288static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3289 uint32_t val)
3290{
b4051334 3291 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3292 stw_phys(addr, val);
3293}
3294
c227f099 3295static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3296 uint32_t val)
3297{
b4051334 3298 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3299 stl_phys(addr, val);
3300}
3301
d60efc6b 3302static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3303 watch_mem_readb,
3304 watch_mem_readw,
3305 watch_mem_readl,
3306};
3307
d60efc6b 3308static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3309 watch_mem_writeb,
3310 watch_mem_writew,
3311 watch_mem_writel,
3312};
6658ffb8 3313
70c68e44
AK
3314static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3315 unsigned len)
db7b5426 3316{
70c68e44 3317 subpage_t *mmio = opaque;
f6405247 3318 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3319#if defined(DEBUG_SUBPAGE)
3320 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3321 mmio, len, addr, idx);
3322#endif
db7b5426 3323
f6405247
RH
3324 addr += mmio->region_offset[idx];
3325 idx = mmio->sub_io_index[idx];
70c68e44 3326 return io_mem_read(idx, addr, len);
db7b5426
BS
3327}
3328
70c68e44
AK
3329static void subpage_write(void *opaque, target_phys_addr_t addr,
3330 uint64_t value, unsigned len)
db7b5426 3331{
70c68e44 3332 subpage_t *mmio = opaque;
f6405247 3333 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3334#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3335 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3336 " idx %d value %"PRIx64"\n",
f6405247 3337 __func__, mmio, len, addr, idx, value);
db7b5426 3338#endif
f6405247
RH
3339
3340 addr += mmio->region_offset[idx];
3341 idx = mmio->sub_io_index[idx];
70c68e44 3342 io_mem_write(idx, addr, value, len);
db7b5426
BS
3343}
3344
70c68e44
AK
3345static const MemoryRegionOps subpage_ops = {
3346 .read = subpage_read,
3347 .write = subpage_write,
3348 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3349};
3350
de712f94
AK
3351static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3352 unsigned size)
56384e8b
AF
3353{
3354 ram_addr_t raddr = addr;
3355 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3356 switch (size) {
3357 case 1: return ldub_p(ptr);
3358 case 2: return lduw_p(ptr);
3359 case 4: return ldl_p(ptr);
3360 default: abort();
3361 }
56384e8b
AF
3362}
3363
de712f94
AK
3364static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3365 uint64_t value, unsigned size)
56384e8b
AF
3366{
3367 ram_addr_t raddr = addr;
3368 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3369 switch (size) {
3370 case 1: return stb_p(ptr, value);
3371 case 2: return stw_p(ptr, value);
3372 case 4: return stl_p(ptr, value);
3373 default: abort();
3374 }
56384e8b
AF
3375}
3376
de712f94
AK
3377static const MemoryRegionOps subpage_ram_ops = {
3378 .read = subpage_ram_read,
3379 .write = subpage_ram_write,
3380 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3381};
3382
c227f099
AL
3383static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3384 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3385{
3386 int idx, eidx;
3387
3388 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3389 return -1;
3390 idx = SUBPAGE_IDX(start);
3391 eidx = SUBPAGE_IDX(end);
3392#if defined(DEBUG_SUBPAGE)
0bf9e31a 3393 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3394 mmio, start, end, idx, eidx, memory);
3395#endif
0e0df1e2 3396 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
de712f94 3397 memory = io_mem_subpage_ram.ram_addr;
56384e8b 3398 }
f6405247 3399 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3400 for (; idx <= eidx; idx++) {
f6405247
RH
3401 mmio->sub_io_index[idx] = memory;
3402 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3403 }
3404
3405 return 0;
3406}
3407
f6405247
RH
3408static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3409 ram_addr_t orig_memory,
3410 ram_addr_t region_offset)
db7b5426 3411{
c227f099 3412 subpage_t *mmio;
db7b5426
BS
3413 int subpage_memory;
3414
7267c094 3415 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3416
3417 mmio->base = base;
70c68e44
AK
3418 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3419 "subpage", TARGET_PAGE_SIZE);
3420 subpage_memory = mmio->iomem.ram_addr;
db7b5426 3421#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3422 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3423 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3424#endif
1eec614b 3425 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3426 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3427
3428 return mmio;
3429}
3430
88715657
AL
3431static int get_free_io_mem_idx(void)
3432{
3433 int i;
3434
3435 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3436 if (!io_mem_used[i]) {
3437 io_mem_used[i] = 1;
3438 return i;
3439 }
c6703b47 3440 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3441 return -1;
3442}
3443
33417e70
FB
3444/* mem_read and mem_write are arrays of functions containing the
3445 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3446 2). Functions can be omitted with a NULL function pointer.
3ee89922 3447 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3448 modified. If it is zero, a new io zone is allocated. The return
3449 value can be used with cpu_register_physical_memory(). (-1) is
3450 returned if error. */
1eed09cb 3451static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3452 CPUReadMemoryFunc * const *mem_read,
3453 CPUWriteMemoryFunc * const *mem_write,
be675c97 3454 void *opaque)
33417e70 3455{
3cab721d
RH
3456 int i;
3457
33417e70 3458 if (io_index <= 0) {
88715657
AL
3459 io_index = get_free_io_mem_idx();
3460 if (io_index == -1)
3461 return io_index;
33417e70 3462 } else {
1eed09cb 3463 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3464 if (io_index >= IO_MEM_NB_ENTRIES)
3465 return -1;
3466 }
b5ff1b31 3467
3cab721d 3468 for (i = 0; i < 3; ++i) {
0e0df1e2
AK
3469 assert(mem_read[i]);
3470 _io_mem_read[io_index][i] = mem_read[i];
3cab721d
RH
3471 }
3472 for (i = 0; i < 3; ++i) {
0e0df1e2
AK
3473 assert(mem_write[i]);
3474 _io_mem_write[io_index][i] = mem_write[i];
3cab721d 3475 }
a4193c8a 3476 io_mem_opaque[io_index] = opaque;
f6405247
RH
3477
3478 return (io_index << IO_MEM_SHIFT);
33417e70 3479}
61382a50 3480
d60efc6b
BS
3481int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3482 CPUWriteMemoryFunc * const *mem_write,
be675c97 3483 void *opaque)
1eed09cb 3484{
be675c97 3485 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
1eed09cb
AK
3486}
3487
88715657
AL
3488void cpu_unregister_io_memory(int io_table_address)
3489{
3490 int i;
3491 int io_index = io_table_address >> IO_MEM_SHIFT;
3492
3493 for (i=0;i < 3; i++) {
0e0df1e2
AK
3494 _io_mem_read[io_index][i] = NULL;
3495 _io_mem_write[io_index][i] = NULL;
88715657
AL
3496 }
3497 io_mem_opaque[io_index] = NULL;
3498 io_mem_used[io_index] = 0;
3499}
3500
e9179ce1
AK
3501static void io_mem_init(void)
3502{
3503 int i;
3504
0e0df1e2
AK
3505 /* Must be first: */
3506 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3507 assert(io_mem_ram.ram_addr == 0);
3508 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3509 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3510 "unassigned", UINT64_MAX);
3511 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3512 "notdirty", UINT64_MAX);
de712f94
AK
3513 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3514 "subpage-ram", UINT64_MAX);
e9179ce1
AK
3515 for (i=0; i<5; i++)
3516 io_mem_used[i] = 1;
3517
3518 io_mem_watch = cpu_register_io_memory(watch_mem_read,
be675c97 3519 watch_mem_write, NULL);
e9179ce1
AK
3520}
3521
62152b8a
AK
3522static void memory_map_init(void)
3523{
7267c094 3524 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3525 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3526 set_system_memory_map(system_memory);
309cb471 3527
7267c094 3528 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3529 memory_region_init(system_io, "io", 65536);
3530 set_system_io_map(system_io);
62152b8a
AK
3531}
3532
3533MemoryRegion *get_system_memory(void)
3534{
3535 return system_memory;
3536}
3537
309cb471
AK
3538MemoryRegion *get_system_io(void)
3539{
3540 return system_io;
3541}
3542
e2eef170
PB
3543#endif /* !defined(CONFIG_USER_ONLY) */
3544
13eb76e0
FB
3545/* physical memory access (slow version, mainly for debug) */
3546#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3547int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3548 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3549{
3550 int l, flags;
3551 target_ulong page;
53a5960a 3552 void * p;
13eb76e0
FB
3553
3554 while (len > 0) {
3555 page = addr & TARGET_PAGE_MASK;
3556 l = (page + TARGET_PAGE_SIZE) - addr;
3557 if (l > len)
3558 l = len;
3559 flags = page_get_flags(page);
3560 if (!(flags & PAGE_VALID))
a68fe89c 3561 return -1;
13eb76e0
FB
3562 if (is_write) {
3563 if (!(flags & PAGE_WRITE))
a68fe89c 3564 return -1;
579a97f7 3565 /* XXX: this code should not depend on lock_user */
72fb7daa 3566 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3567 return -1;
72fb7daa
AJ
3568 memcpy(p, buf, l);
3569 unlock_user(p, addr, l);
13eb76e0
FB
3570 } else {
3571 if (!(flags & PAGE_READ))
a68fe89c 3572 return -1;
579a97f7 3573 /* XXX: this code should not depend on lock_user */
72fb7daa 3574 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3575 return -1;
72fb7daa 3576 memcpy(buf, p, l);
5b257578 3577 unlock_user(p, addr, 0);
13eb76e0
FB
3578 }
3579 len -= l;
3580 buf += l;
3581 addr += l;
3582 }
a68fe89c 3583 return 0;
13eb76e0 3584}
8df1cd07 3585
13eb76e0 3586#else
c227f099 3587void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3588 int len, int is_write)
3589{
3590 int l, io_index;
3591 uint8_t *ptr;
3592 uint32_t val;
c227f099 3593 target_phys_addr_t page;
8ca5692d 3594 ram_addr_t pd;
f1f6e3b8 3595 PhysPageDesc p;
3b46e624 3596
13eb76e0
FB
3597 while (len > 0) {
3598 page = addr & TARGET_PAGE_MASK;
3599 l = (page + TARGET_PAGE_SIZE) - addr;
3600 if (l > len)
3601 l = len;
92e873b9 3602 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3603 pd = p.phys_offset;
3b46e624 3604
13eb76e0 3605 if (is_write) {
0e0df1e2 3606 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
f1f6e3b8 3607 target_phys_addr_t addr1;
13eb76e0 3608 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3609 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3610 /* XXX: could force cpu_single_env to NULL to avoid
3611 potential bugs */
6c2934db 3612 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3613 /* 32 bit write access */
c27004ec 3614 val = ldl_p(buf);
acbbec5d 3615 io_mem_write(io_index, addr1, val, 4);
13eb76e0 3616 l = 4;
6c2934db 3617 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3618 /* 16 bit write access */
c27004ec 3619 val = lduw_p(buf);
acbbec5d 3620 io_mem_write(io_index, addr1, val, 2);
13eb76e0
FB
3621 l = 2;
3622 } else {
1c213d19 3623 /* 8 bit write access */
c27004ec 3624 val = ldub_p(buf);
acbbec5d 3625 io_mem_write(io_index, addr1, val, 1);
13eb76e0
FB
3626 l = 1;
3627 }
3628 } else {
8ca5692d 3629 ram_addr_t addr1;
b448f2f3 3630 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3631 /* RAM case */
5579c7f3 3632 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3633 memcpy(ptr, buf, l);
3a7d929e
FB
3634 if (!cpu_physical_memory_is_dirty(addr1)) {
3635 /* invalidate code */
3636 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3637 /* set dirty bit */
f7c11b53
YT
3638 cpu_physical_memory_set_dirty_flags(
3639 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3640 }
050a0ddf 3641 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3642 }
3643 } else {
1d393fa2 3644 if (!is_ram_rom_romd(pd)) {
f1f6e3b8 3645 target_phys_addr_t addr1;
13eb76e0
FB
3646 /* I/O case */
3647 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3648 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3649 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3650 /* 32 bit read access */
acbbec5d 3651 val = io_mem_read(io_index, addr1, 4);
c27004ec 3652 stl_p(buf, val);
13eb76e0 3653 l = 4;
6c2934db 3654 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3655 /* 16 bit read access */
acbbec5d 3656 val = io_mem_read(io_index, addr1, 2);
c27004ec 3657 stw_p(buf, val);
13eb76e0
FB
3658 l = 2;
3659 } else {
1c213d19 3660 /* 8 bit read access */
acbbec5d 3661 val = io_mem_read(io_index, addr1, 1);
c27004ec 3662 stb_p(buf, val);
13eb76e0
FB
3663 l = 1;
3664 }
3665 } else {
3666 /* RAM case */
050a0ddf
AP
3667 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3668 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3669 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3670 }
3671 }
3672 len -= l;
3673 buf += l;
3674 addr += l;
3675 }
3676}
8df1cd07 3677
d0ecd2aa 3678/* used for ROM loading : can write in RAM and ROM */
c227f099 3679void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3680 const uint8_t *buf, int len)
3681{
3682 int l;
3683 uint8_t *ptr;
c227f099 3684 target_phys_addr_t page;
d0ecd2aa 3685 unsigned long pd;
f1f6e3b8 3686 PhysPageDesc p;
3b46e624 3687
d0ecd2aa
FB
3688 while (len > 0) {
3689 page = addr & TARGET_PAGE_MASK;
3690 l = (page + TARGET_PAGE_SIZE) - addr;
3691 if (l > len)
3692 l = len;
3693 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3694 pd = p.phys_offset;
3b46e624 3695
1d393fa2 3696 if (!is_ram_rom_romd(pd)) {
d0ecd2aa
FB
3697 /* do nothing */
3698 } else {
3699 unsigned long addr1;
3700 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3701 /* ROM/RAM case */
5579c7f3 3702 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3703 memcpy(ptr, buf, l);
050a0ddf 3704 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3705 }
3706 len -= l;
3707 buf += l;
3708 addr += l;
3709 }
3710}
3711
6d16c2f8
AL
3712typedef struct {
3713 void *buffer;
c227f099
AL
3714 target_phys_addr_t addr;
3715 target_phys_addr_t len;
6d16c2f8
AL
3716} BounceBuffer;
3717
3718static BounceBuffer bounce;
3719
ba223c29
AL
3720typedef struct MapClient {
3721 void *opaque;
3722 void (*callback)(void *opaque);
72cf2d4f 3723 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3724} MapClient;
3725
72cf2d4f
BS
3726static QLIST_HEAD(map_client_list, MapClient) map_client_list
3727 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3728
3729void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3730{
7267c094 3731 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3732
3733 client->opaque = opaque;
3734 client->callback = callback;
72cf2d4f 3735 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3736 return client;
3737}
3738
3739void cpu_unregister_map_client(void *_client)
3740{
3741 MapClient *client = (MapClient *)_client;
3742
72cf2d4f 3743 QLIST_REMOVE(client, link);
7267c094 3744 g_free(client);
ba223c29
AL
3745}
3746
3747static void cpu_notify_map_clients(void)
3748{
3749 MapClient *client;
3750
72cf2d4f
BS
3751 while (!QLIST_EMPTY(&map_client_list)) {
3752 client = QLIST_FIRST(&map_client_list);
ba223c29 3753 client->callback(client->opaque);
34d5e948 3754 cpu_unregister_map_client(client);
ba223c29
AL
3755 }
3756}
3757
6d16c2f8
AL
3758/* Map a physical memory region into a host virtual address.
3759 * May map a subset of the requested range, given by and returned in *plen.
3760 * May return NULL if resources needed to perform the mapping are exhausted.
3761 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3762 * Use cpu_register_map_client() to know when retrying the map operation is
3763 * likely to succeed.
6d16c2f8 3764 */
c227f099
AL
3765void *cpu_physical_memory_map(target_phys_addr_t addr,
3766 target_phys_addr_t *plen,
6d16c2f8
AL
3767 int is_write)
3768{
c227f099 3769 target_phys_addr_t len = *plen;
38bee5dc 3770 target_phys_addr_t todo = 0;
6d16c2f8 3771 int l;
c227f099 3772 target_phys_addr_t page;
6d16c2f8 3773 unsigned long pd;
f1f6e3b8 3774 PhysPageDesc p;
f15fbc4b 3775 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3776 ram_addr_t rlen;
3777 void *ret;
6d16c2f8
AL
3778
3779 while (len > 0) {
3780 page = addr & TARGET_PAGE_MASK;
3781 l = (page + TARGET_PAGE_SIZE) - addr;
3782 if (l > len)
3783 l = len;
3784 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3785 pd = p.phys_offset;
6d16c2f8 3786
0e0df1e2 3787 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
38bee5dc 3788 if (todo || bounce.buffer) {
6d16c2f8
AL
3789 break;
3790 }
3791 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3792 bounce.addr = addr;
3793 bounce.len = l;
3794 if (!is_write) {
54f7b4a3 3795 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3796 }
38bee5dc
SS
3797
3798 *plen = l;
3799 return bounce.buffer;
6d16c2f8 3800 }
8ab934f9
SS
3801 if (!todo) {
3802 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3803 }
6d16c2f8
AL
3804
3805 len -= l;
3806 addr += l;
38bee5dc 3807 todo += l;
6d16c2f8 3808 }
8ab934f9
SS
3809 rlen = todo;
3810 ret = qemu_ram_ptr_length(raddr, &rlen);
3811 *plen = rlen;
3812 return ret;
6d16c2f8
AL
3813}
3814
3815/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3816 * Will also mark the memory as dirty if is_write == 1. access_len gives
3817 * the amount of memory that was actually read or written by the caller.
3818 */
c227f099
AL
3819void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3820 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3821{
3822 if (buffer != bounce.buffer) {
3823 if (is_write) {
e890261f 3824 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3825 while (access_len) {
3826 unsigned l;
3827 l = TARGET_PAGE_SIZE;
3828 if (l > access_len)
3829 l = access_len;
3830 if (!cpu_physical_memory_is_dirty(addr1)) {
3831 /* invalidate code */
3832 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3833 /* set dirty bit */
f7c11b53
YT
3834 cpu_physical_memory_set_dirty_flags(
3835 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3836 }
3837 addr1 += l;
3838 access_len -= l;
3839 }
3840 }
868bb33f 3841 if (xen_enabled()) {
e41d7c69 3842 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3843 }
6d16c2f8
AL
3844 return;
3845 }
3846 if (is_write) {
3847 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3848 }
f8a83245 3849 qemu_vfree(bounce.buffer);
6d16c2f8 3850 bounce.buffer = NULL;
ba223c29 3851 cpu_notify_map_clients();
6d16c2f8 3852}
d0ecd2aa 3853
8df1cd07 3854/* warning: addr must be aligned */
1e78bcc1
AG
3855static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3856 enum device_endian endian)
8df1cd07
FB
3857{
3858 int io_index;
3859 uint8_t *ptr;
3860 uint32_t val;
3861 unsigned long pd;
f1f6e3b8 3862 PhysPageDesc p;
8df1cd07
FB
3863
3864 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3865 pd = p.phys_offset;
3b46e624 3866
1d393fa2 3867 if (!is_ram_rom_romd(pd)) {
8df1cd07
FB
3868 /* I/O case */
3869 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3870 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3871 val = io_mem_read(io_index, addr, 4);
1e78bcc1
AG
3872#if defined(TARGET_WORDS_BIGENDIAN)
3873 if (endian == DEVICE_LITTLE_ENDIAN) {
3874 val = bswap32(val);
3875 }
3876#else
3877 if (endian == DEVICE_BIG_ENDIAN) {
3878 val = bswap32(val);
3879 }
3880#endif
8df1cd07
FB
3881 } else {
3882 /* RAM case */
5579c7f3 3883 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3884 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3885 switch (endian) {
3886 case DEVICE_LITTLE_ENDIAN:
3887 val = ldl_le_p(ptr);
3888 break;
3889 case DEVICE_BIG_ENDIAN:
3890 val = ldl_be_p(ptr);
3891 break;
3892 default:
3893 val = ldl_p(ptr);
3894 break;
3895 }
8df1cd07
FB
3896 }
3897 return val;
3898}
3899
1e78bcc1
AG
3900uint32_t ldl_phys(target_phys_addr_t addr)
3901{
3902 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3903}
3904
3905uint32_t ldl_le_phys(target_phys_addr_t addr)
3906{
3907 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3908}
3909
3910uint32_t ldl_be_phys(target_phys_addr_t addr)
3911{
3912 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3913}
3914
84b7b8e7 3915/* warning: addr must be aligned */
1e78bcc1
AG
3916static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3917 enum device_endian endian)
84b7b8e7
FB
3918{
3919 int io_index;
3920 uint8_t *ptr;
3921 uint64_t val;
3922 unsigned long pd;
f1f6e3b8 3923 PhysPageDesc p;
84b7b8e7
FB
3924
3925 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3926 pd = p.phys_offset;
3b46e624 3927
1d393fa2 3928 if (!is_ram_rom_romd(pd)) {
84b7b8e7
FB
3929 /* I/O case */
3930 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3931 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
3932
3933 /* XXX This is broken when device endian != cpu endian.
3934 Fix and add "endian" variable check */
84b7b8e7 3935#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
3936 val = io_mem_read(io_index, addr, 4) << 32;
3937 val |= io_mem_read(io_index, addr + 4, 4);
84b7b8e7 3938#else
acbbec5d
AK
3939 val = io_mem_read(io_index, addr, 4);
3940 val |= io_mem_read(io_index, addr + 4, 4) << 32;
84b7b8e7
FB
3941#endif
3942 } else {
3943 /* RAM case */
5579c7f3 3944 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 3945 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3946 switch (endian) {
3947 case DEVICE_LITTLE_ENDIAN:
3948 val = ldq_le_p(ptr);
3949 break;
3950 case DEVICE_BIG_ENDIAN:
3951 val = ldq_be_p(ptr);
3952 break;
3953 default:
3954 val = ldq_p(ptr);
3955 break;
3956 }
84b7b8e7
FB
3957 }
3958 return val;
3959}
3960
1e78bcc1
AG
3961uint64_t ldq_phys(target_phys_addr_t addr)
3962{
3963 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3964}
3965
3966uint64_t ldq_le_phys(target_phys_addr_t addr)
3967{
3968 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3969}
3970
3971uint64_t ldq_be_phys(target_phys_addr_t addr)
3972{
3973 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3974}
3975
aab33094 3976/* XXX: optimize */
c227f099 3977uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3978{
3979 uint8_t val;
3980 cpu_physical_memory_read(addr, &val, 1);
3981 return val;
3982}
3983
733f0b02 3984/* warning: addr must be aligned */
1e78bcc1
AG
3985static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3986 enum device_endian endian)
aab33094 3987{
733f0b02
MT
3988 int io_index;
3989 uint8_t *ptr;
3990 uint64_t val;
3991 unsigned long pd;
f1f6e3b8 3992 PhysPageDesc p;
733f0b02
MT
3993
3994 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3995 pd = p.phys_offset;
733f0b02 3996
1d393fa2 3997 if (!is_ram_rom_romd(pd)) {
733f0b02
MT
3998 /* I/O case */
3999 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4000 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4001 val = io_mem_read(io_index, addr, 2);
1e78bcc1
AG
4002#if defined(TARGET_WORDS_BIGENDIAN)
4003 if (endian == DEVICE_LITTLE_ENDIAN) {
4004 val = bswap16(val);
4005 }
4006#else
4007 if (endian == DEVICE_BIG_ENDIAN) {
4008 val = bswap16(val);
4009 }
4010#endif
733f0b02
MT
4011 } else {
4012 /* RAM case */
4013 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4014 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4015 switch (endian) {
4016 case DEVICE_LITTLE_ENDIAN:
4017 val = lduw_le_p(ptr);
4018 break;
4019 case DEVICE_BIG_ENDIAN:
4020 val = lduw_be_p(ptr);
4021 break;
4022 default:
4023 val = lduw_p(ptr);
4024 break;
4025 }
733f0b02
MT
4026 }
4027 return val;
aab33094
FB
4028}
4029
1e78bcc1
AG
4030uint32_t lduw_phys(target_phys_addr_t addr)
4031{
4032 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4033}
4034
4035uint32_t lduw_le_phys(target_phys_addr_t addr)
4036{
4037 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4038}
4039
4040uint32_t lduw_be_phys(target_phys_addr_t addr)
4041{
4042 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4043}
4044
8df1cd07
FB
4045/* warning: addr must be aligned. The ram page is not masked as dirty
4046 and the code inside is not invalidated. It is useful if the dirty
4047 bits are used to track modified PTEs */
c227f099 4048void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4049{
4050 int io_index;
4051 uint8_t *ptr;
4052 unsigned long pd;
f1f6e3b8 4053 PhysPageDesc p;
8df1cd07
FB
4054
4055 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4056 pd = p.phys_offset;
3b46e624 4057
0e0df1e2 4058 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
8df1cd07 4059 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4060 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4061 io_mem_write(io_index, addr, val, 4);
8df1cd07 4062 } else {
74576198 4063 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4064 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4065 stl_p(ptr, val);
74576198
AL
4066
4067 if (unlikely(in_migration)) {
4068 if (!cpu_physical_memory_is_dirty(addr1)) {
4069 /* invalidate code */
4070 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4071 /* set dirty bit */
f7c11b53
YT
4072 cpu_physical_memory_set_dirty_flags(
4073 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4074 }
4075 }
8df1cd07
FB
4076 }
4077}
4078
c227f099 4079void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4080{
4081 int io_index;
4082 uint8_t *ptr;
4083 unsigned long pd;
f1f6e3b8 4084 PhysPageDesc p;
bc98a7ef
JM
4085
4086 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4087 pd = p.phys_offset;
3b46e624 4088
0e0df1e2 4089 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bc98a7ef 4090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4091 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef 4092#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4093 io_mem_write(io_index, addr, val >> 32, 4);
4094 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
bc98a7ef 4095#else
acbbec5d
AK
4096 io_mem_write(io_index, addr, (uint32_t)val, 4);
4097 io_mem_write(io_index, addr + 4, val >> 32, 4);
bc98a7ef
JM
4098#endif
4099 } else {
5579c7f3 4100 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4101 (addr & ~TARGET_PAGE_MASK);
4102 stq_p(ptr, val);
4103 }
4104}
4105
8df1cd07 4106/* warning: addr must be aligned */
1e78bcc1
AG
4107static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4108 enum device_endian endian)
8df1cd07
FB
4109{
4110 int io_index;
4111 uint8_t *ptr;
4112 unsigned long pd;
f1f6e3b8 4113 PhysPageDesc p;
8df1cd07
FB
4114
4115 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4116 pd = p.phys_offset;
3b46e624 4117
0e0df1e2 4118 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
8df1cd07 4119 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4120 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4121#if defined(TARGET_WORDS_BIGENDIAN)
4122 if (endian == DEVICE_LITTLE_ENDIAN) {
4123 val = bswap32(val);
4124 }
4125#else
4126 if (endian == DEVICE_BIG_ENDIAN) {
4127 val = bswap32(val);
4128 }
4129#endif
acbbec5d 4130 io_mem_write(io_index, addr, val, 4);
8df1cd07
FB
4131 } else {
4132 unsigned long addr1;
4133 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4134 /* RAM case */
5579c7f3 4135 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4136 switch (endian) {
4137 case DEVICE_LITTLE_ENDIAN:
4138 stl_le_p(ptr, val);
4139 break;
4140 case DEVICE_BIG_ENDIAN:
4141 stl_be_p(ptr, val);
4142 break;
4143 default:
4144 stl_p(ptr, val);
4145 break;
4146 }
3a7d929e
FB
4147 if (!cpu_physical_memory_is_dirty(addr1)) {
4148 /* invalidate code */
4149 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4150 /* set dirty bit */
f7c11b53
YT
4151 cpu_physical_memory_set_dirty_flags(addr1,
4152 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4153 }
8df1cd07
FB
4154 }
4155}
4156
1e78bcc1
AG
4157void stl_phys(target_phys_addr_t addr, uint32_t val)
4158{
4159 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4160}
4161
4162void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4163{
4164 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4165}
4166
4167void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4168{
4169 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4170}
4171
aab33094 4172/* XXX: optimize */
c227f099 4173void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4174{
4175 uint8_t v = val;
4176 cpu_physical_memory_write(addr, &v, 1);
4177}
4178
733f0b02 4179/* warning: addr must be aligned */
1e78bcc1
AG
4180static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4181 enum device_endian endian)
aab33094 4182{
733f0b02
MT
4183 int io_index;
4184 uint8_t *ptr;
4185 unsigned long pd;
f1f6e3b8 4186 PhysPageDesc p;
733f0b02
MT
4187
4188 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4189 pd = p.phys_offset;
733f0b02 4190
0e0df1e2 4191 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
733f0b02 4192 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4193 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4194#if defined(TARGET_WORDS_BIGENDIAN)
4195 if (endian == DEVICE_LITTLE_ENDIAN) {
4196 val = bswap16(val);
4197 }
4198#else
4199 if (endian == DEVICE_BIG_ENDIAN) {
4200 val = bswap16(val);
4201 }
4202#endif
acbbec5d 4203 io_mem_write(io_index, addr, val, 2);
733f0b02
MT
4204 } else {
4205 unsigned long addr1;
4206 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4207 /* RAM case */
4208 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4209 switch (endian) {
4210 case DEVICE_LITTLE_ENDIAN:
4211 stw_le_p(ptr, val);
4212 break;
4213 case DEVICE_BIG_ENDIAN:
4214 stw_be_p(ptr, val);
4215 break;
4216 default:
4217 stw_p(ptr, val);
4218 break;
4219 }
733f0b02
MT
4220 if (!cpu_physical_memory_is_dirty(addr1)) {
4221 /* invalidate code */
4222 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4223 /* set dirty bit */
4224 cpu_physical_memory_set_dirty_flags(addr1,
4225 (0xff & ~CODE_DIRTY_FLAG));
4226 }
4227 }
aab33094
FB
4228}
4229
1e78bcc1
AG
4230void stw_phys(target_phys_addr_t addr, uint32_t val)
4231{
4232 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4233}
4234
4235void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4236{
4237 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4238}
4239
4240void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4241{
4242 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4243}
4244
aab33094 4245/* XXX: optimize */
c227f099 4246void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4247{
4248 val = tswap64(val);
71d2b725 4249 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4250}
4251
1e78bcc1
AG
4252void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4253{
4254 val = cpu_to_le64(val);
4255 cpu_physical_memory_write(addr, &val, 8);
4256}
4257
4258void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4259{
4260 val = cpu_to_be64(val);
4261 cpu_physical_memory_write(addr, &val, 8);
4262}
4263
5e2972fd 4264/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4265int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4266 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4267{
4268 int l;
c227f099 4269 target_phys_addr_t phys_addr;
9b3c35e0 4270 target_ulong page;
13eb76e0
FB
4271
4272 while (len > 0) {
4273 page = addr & TARGET_PAGE_MASK;
4274 phys_addr = cpu_get_phys_page_debug(env, page);
4275 /* if no physical page mapped, return an error */
4276 if (phys_addr == -1)
4277 return -1;
4278 l = (page + TARGET_PAGE_SIZE) - addr;
4279 if (l > len)
4280 l = len;
5e2972fd 4281 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4282 if (is_write)
4283 cpu_physical_memory_write_rom(phys_addr, buf, l);
4284 else
5e2972fd 4285 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4286 len -= l;
4287 buf += l;
4288 addr += l;
4289 }
4290 return 0;
4291}
a68fe89c 4292#endif
13eb76e0 4293
2e70f6ef
PB
4294/* in deterministic execution mode, instructions doing device I/Os
4295 must be at the end of the TB */
4296void cpu_io_recompile(CPUState *env, void *retaddr)
4297{
4298 TranslationBlock *tb;
4299 uint32_t n, cflags;
4300 target_ulong pc, cs_base;
4301 uint64_t flags;
4302
4303 tb = tb_find_pc((unsigned long)retaddr);
4304 if (!tb) {
4305 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4306 retaddr);
4307 }
4308 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4309 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4310 /* Calculate how many instructions had been executed before the fault
bf20dc07 4311 occurred. */
2e70f6ef
PB
4312 n = n - env->icount_decr.u16.low;
4313 /* Generate a new TB ending on the I/O insn. */
4314 n++;
4315 /* On MIPS and SH, delay slot instructions can only be restarted if
4316 they were already the first instruction in the TB. If this is not
bf20dc07 4317 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4318 branch. */
4319#if defined(TARGET_MIPS)
4320 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4321 env->active_tc.PC -= 4;
4322 env->icount_decr.u16.low++;
4323 env->hflags &= ~MIPS_HFLAG_BMASK;
4324 }
4325#elif defined(TARGET_SH4)
4326 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4327 && n > 1) {
4328 env->pc -= 2;
4329 env->icount_decr.u16.low++;
4330 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4331 }
4332#endif
4333 /* This should never happen. */
4334 if (n > CF_COUNT_MASK)
4335 cpu_abort(env, "TB too big during recompile");
4336
4337 cflags = n | CF_LAST_IO;
4338 pc = tb->pc;
4339 cs_base = tb->cs_base;
4340 flags = tb->flags;
4341 tb_phys_invalidate(tb, -1);
4342 /* FIXME: In theory this could raise an exception. In practice
4343 we have already translated the block once so it's probably ok. */
4344 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4345 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4346 the first in the TB) then we end up generating a whole new TB and
4347 repeating the fault, which is horribly inefficient.
4348 Better would be to execute just this insn uncached, or generate a
4349 second new TB. */
4350 cpu_resume_from_signal(env, NULL);
4351}
4352
b3755a91
PB
4353#if !defined(CONFIG_USER_ONLY)
4354
055403b2 4355void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4356{
4357 int i, target_code_size, max_target_code_size;
4358 int direct_jmp_count, direct_jmp2_count, cross_page;
4359 TranslationBlock *tb;
3b46e624 4360
e3db7226
FB
4361 target_code_size = 0;
4362 max_target_code_size = 0;
4363 cross_page = 0;
4364 direct_jmp_count = 0;
4365 direct_jmp2_count = 0;
4366 for(i = 0; i < nb_tbs; i++) {
4367 tb = &tbs[i];
4368 target_code_size += tb->size;
4369 if (tb->size > max_target_code_size)
4370 max_target_code_size = tb->size;
4371 if (tb->page_addr[1] != -1)
4372 cross_page++;
4373 if (tb->tb_next_offset[0] != 0xffff) {
4374 direct_jmp_count++;
4375 if (tb->tb_next_offset[1] != 0xffff) {
4376 direct_jmp2_count++;
4377 }
4378 }
4379 }
4380 /* XXX: avoid using doubles ? */
57fec1fe 4381 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4382 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4383 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4384 cpu_fprintf(f, "TB count %d/%d\n",
4385 nb_tbs, code_gen_max_blocks);
5fafdf24 4386 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4387 nb_tbs ? target_code_size / nb_tbs : 0,
4388 max_target_code_size);
055403b2 4389 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4390 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4391 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4392 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4393 cross_page,
e3db7226
FB
4394 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4395 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4396 direct_jmp_count,
e3db7226
FB
4397 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4398 direct_jmp2_count,
4399 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4400 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4401 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4402 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4403 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4404 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4405}
4406
d39e8222
AK
4407/* NOTE: this function can trigger an exception */
4408/* NOTE2: the returned address is not exactly the physical address: it
4409 is the offset relative to phys_ram_base */
4410tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4411{
4412 int mmu_idx, page_index, pd;
4413 void *p;
4414
4415 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4416 mmu_idx = cpu_mmu_index(env1);
4417 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4418 (addr & TARGET_PAGE_MASK))) {
4419 ldub_code(addr);
4420 }
4421 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
0e0df1e2
AK
4422 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4423 && !(pd & IO_MEM_ROMD)) {
d39e8222
AK
4424#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4425 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4426#else
4427 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4428#endif
4429 }
4430 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4431 return qemu_ram_addr_from_host_nofail(p);
4432}
4433
61382a50 4434#define MMUSUFFIX _cmmu
3917149d 4435#undef GETPC
61382a50
FB
4436#define GETPC() NULL
4437#define env cpu_single_env
b769d8fe 4438#define SOFTMMU_CODE_ACCESS
61382a50
FB
4439
4440#define SHIFT 0
4441#include "softmmu_template.h"
4442
4443#define SHIFT 1
4444#include "softmmu_template.h"
4445
4446#define SHIFT 2
4447#include "softmmu_template.h"
4448
4449#define SHIFT 3
4450#include "softmmu_template.h"
4451
4452#undef env
4453
4454#endif