]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Convert io_mem_watch to be a MemoryRegion
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2 121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 122static MemoryRegion io_mem_subpage_ram;
0e0df1e2 123
e2eef170 124#endif
9fa3e853 125
6a00d601
FB
126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
b3c4bbe5 129DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 130/* 0 = Do not count executed instructions.
bf20dc07 131 1 = Precise instruction counting.
2e70f6ef
PB
132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
6a00d601 134
54936004 135typedef struct PageDesc {
92e873b9 136 /* list of TBs intersecting this ram page */
fd6ce8f6 137 TranslationBlock *first_tb;
9fa3e853
FB
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
54936004
FB
145} PageDesc;
146
41c1b1c9 147/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 154#endif
bedb69ea 155#else
5cd2c5b6 156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 157#endif
54936004 158
5cd2c5b6
RH
159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
54936004
FB
161#define L2_SIZE (1 << L2_BITS)
162
5cd2c5b6
RH
163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
83fb7adf 188unsigned long qemu_real_host_page_size;
83fb7adf
FB
189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
54936004 191
5cd2c5b6
RH
192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
54936004 195
e2eef170 196#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
5cd2c5b6
RH
203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
6d9a1304 206
e2eef170 207static void io_mem_init(void);
62152b8a 208static void memory_map_init(void);
e2eef170 209
33417e70 210/* io memory support */
acbbec5d
AK
211CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
212CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 213void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 214static char io_mem_used[IO_MEM_NB_ENTRIES];
1ec9b909 215static MemoryRegion io_mem_watch;
6658ffb8 216#endif
33417e70 217
34865134 218/* log support */
1e8b27ca
JR
219#ifdef WIN32
220static const char *logfilename = "qemu.log";
221#else
d9b630fd 222static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 223#endif
34865134
FB
224FILE *logfile;
225int loglevel;
e735b91c 226static int log_append = 0;
34865134 227
e3db7226 228/* statistics */
b3755a91 229#if !defined(CONFIG_USER_ONLY)
e3db7226 230static int tlb_flush_count;
b3755a91 231#endif
e3db7226
FB
232static int tb_flush_count;
233static int tb_phys_invalidate_count;
234
7cb69cae
FB
235#ifdef _WIN32
236static void map_exec(void *addr, long size)
237{
238 DWORD old_protect;
239 VirtualProtect(addr, size,
240 PAGE_EXECUTE_READWRITE, &old_protect);
241
242}
243#else
244static void map_exec(void *addr, long size)
245{
4369415f 246 unsigned long start, end, page_size;
7cb69cae 247
4369415f 248 page_size = getpagesize();
7cb69cae 249 start = (unsigned long)addr;
4369415f 250 start &= ~(page_size - 1);
7cb69cae
FB
251
252 end = (unsigned long)addr + size;
4369415f
FB
253 end += page_size - 1;
254 end &= ~(page_size - 1);
7cb69cae
FB
255
256 mprotect((void *)start, end - start,
257 PROT_READ | PROT_WRITE | PROT_EXEC);
258}
259#endif
260
b346ff46 261static void page_init(void)
54936004 262{
83fb7adf 263 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 264 TARGET_PAGE_SIZE */
c2b48b69
AL
265#ifdef _WIN32
266 {
267 SYSTEM_INFO system_info;
268
269 GetSystemInfo(&system_info);
270 qemu_real_host_page_size = system_info.dwPageSize;
271 }
272#else
273 qemu_real_host_page_size = getpagesize();
274#endif
83fb7adf
FB
275 if (qemu_host_page_size == 0)
276 qemu_host_page_size = qemu_real_host_page_size;
277 if (qemu_host_page_size < TARGET_PAGE_SIZE)
278 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 279 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 280
2e9a5713 281#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 282 {
f01576f1
JL
283#ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry *freep;
285 int i, cnt;
286
287 freep = kinfo_getvmmap(getpid(), &cnt);
288 if (freep) {
289 mmap_lock();
290 for (i = 0; i < cnt; i++) {
291 unsigned long startaddr, endaddr;
292
293 startaddr = freep[i].kve_start;
294 endaddr = freep[i].kve_end;
295 if (h2g_valid(startaddr)) {
296 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
297
298 if (h2g_valid(endaddr)) {
299 endaddr = h2g(endaddr);
fd436907 300 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
301 } else {
302#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 endaddr = ~0ul;
fd436907 304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
305#endif
306 }
307 }
308 }
309 free(freep);
310 mmap_unlock();
311 }
312#else
50a9569b 313 FILE *f;
50a9569b 314
0776590d 315 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 316
fd436907 317 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 318 if (f) {
5cd2c5b6
RH
319 mmap_lock();
320
50a9569b 321 do {
5cd2c5b6
RH
322 unsigned long startaddr, endaddr;
323 int n;
324
325 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
326
327 if (n == 2 && h2g_valid(startaddr)) {
328 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
329
330 if (h2g_valid(endaddr)) {
331 endaddr = h2g(endaddr);
332 } else {
333 endaddr = ~0ul;
334 }
335 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
336 }
337 } while (!feof(f));
5cd2c5b6 338
50a9569b 339 fclose(f);
5cd2c5b6 340 mmap_unlock();
50a9569b 341 }
f01576f1 342#endif
50a9569b
AZ
343 }
344#endif
54936004
FB
345}
346
41c1b1c9 347static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 348{
41c1b1c9
PB
349 PageDesc *pd;
350 void **lp;
351 int i;
352
5cd2c5b6 353#if defined(CONFIG_USER_ONLY)
7267c094 354 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
355# define ALLOC(P, SIZE) \
356 do { \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
359 } while (0)
360#else
361# define ALLOC(P, SIZE) \
7267c094 362 do { P = g_malloc0(SIZE); } while (0)
17e2377a 363#endif
434929bf 364
5cd2c5b6
RH
365 /* Level 1. Always allocated. */
366 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
367
368 /* Level 2..N-1. */
369 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
370 void **p = *lp;
371
372 if (p == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(p, sizeof(void *) * L2_SIZE);
377 *lp = p;
17e2377a 378 }
5cd2c5b6
RH
379
380 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
381 }
382
383 pd = *lp;
384 if (pd == NULL) {
385 if (!alloc) {
386 return NULL;
387 }
388 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
389 *lp = pd;
54936004 390 }
5cd2c5b6
RH
391
392#undef ALLOC
5cd2c5b6
RH
393
394 return pd + (index & (L2_SIZE - 1));
54936004
FB
395}
396
41c1b1c9 397static inline PageDesc *page_find(tb_page_addr_t index)
54936004 398{
5cd2c5b6 399 return page_find_alloc(index, 0);
fd6ce8f6
FB
400}
401
6d9a1304 402#if !defined(CONFIG_USER_ONLY)
c227f099 403static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 404{
e3f4e2a4 405 PhysPageDesc *pd;
5cd2c5b6
RH
406 void **lp;
407 int i;
92e873b9 408
5cd2c5b6
RH
409 /* Level 1. Always allocated. */
410 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 411
5cd2c5b6
RH
412 /* Level 2..N-1. */
413 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
414 void **p = *lp;
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
7267c094 419 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
420 }
421 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 422 }
5cd2c5b6 423
e3f4e2a4 424 pd = *lp;
5cd2c5b6 425 if (pd == NULL) {
e3f4e2a4 426 int i;
5ab97b7f 427 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
428
429 if (!alloc) {
108c49b8 430 return NULL;
5cd2c5b6
RH
431 }
432
7267c094 433 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 434
67c4d23c 435 for (i = 0; i < L2_SIZE; i++) {
0e0df1e2 436 pd[i].phys_offset = io_mem_unassigned.ram_addr;
5ab97b7f 437 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 438 }
92e873b9 439 }
5cd2c5b6
RH
440
441 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
442}
443
f1f6e3b8 444static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 445{
f1f6e3b8
AK
446 PhysPageDesc *p = phys_page_find_alloc(index, 0);
447
448 if (p) {
449 return *p;
450 } else {
451 return (PhysPageDesc) {
0e0df1e2 452 .phys_offset = io_mem_unassigned.ram_addr,
f1f6e3b8
AK
453 .region_offset = index << TARGET_PAGE_BITS,
454 };
455 }
92e873b9
FB
456}
457
c227f099
AL
458static void tlb_protect_code(ram_addr_t ram_addr);
459static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 460 target_ulong vaddr);
c8a706fe
PB
461#define mmap_lock() do { } while(0)
462#define mmap_unlock() do { } while(0)
9fa3e853 463#endif
fd6ce8f6 464
4369415f
FB
465#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466
467#if defined(CONFIG_USER_ONLY)
ccbb4d44 468/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
469 user mode. It will change when a dedicated libc will be used */
470#define USE_STATIC_CODE_GEN_BUFFER
471#endif
472
473#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
474static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
476#endif
477
8fcd3692 478static void code_gen_alloc(unsigned long tb_size)
26a5f13b 479{
4369415f
FB
480#ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer = static_code_gen_buffer;
482 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
483 map_exec(code_gen_buffer, code_gen_buffer_size);
484#else
26a5f13b
FB
485 code_gen_buffer_size = tb_size;
486 if (code_gen_buffer_size == 0) {
4369415f 487#if defined(CONFIG_USER_ONLY)
4369415f
FB
488 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
489#else
ccbb4d44 490 /* XXX: needs adjustments */
94a6b54f 491 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 492#endif
26a5f13b
FB
493 }
494 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
495 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
496 /* The code gen buffer location may have constraints depending on
497 the host cpu and OS */
498#if defined(__linux__)
499 {
500 int flags;
141ac468
BS
501 void *start = NULL;
502
26a5f13b
FB
503 flags = MAP_PRIVATE | MAP_ANONYMOUS;
504#if defined(__x86_64__)
505 flags |= MAP_32BIT;
506 /* Cannot map more than that */
507 if (code_gen_buffer_size > (800 * 1024 * 1024))
508 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
509#elif defined(__sparc_v9__)
510 // Map the buffer below 2G, so we can use direct calls and branches
511 flags |= MAP_FIXED;
512 start = (void *) 0x60000000UL;
513 if (code_gen_buffer_size > (512 * 1024 * 1024))
514 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 515#elif defined(__arm__)
222f23f5 516 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
517 if (code_gen_buffer_size > 16 * 1024 * 1024)
518 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
519#elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
524 }
525 start = (void *)0x90000000UL;
26a5f13b 526#endif
141ac468
BS
527 code_gen_buffer = mmap(start, code_gen_buffer_size,
528 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
529 flags, -1, 0);
530 if (code_gen_buffer == MAP_FAILED) {
531 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
532 exit(1);
533 }
534 }
cbb608a5 535#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
536 || defined(__DragonFly__) || defined(__OpenBSD__) \
537 || defined(__NetBSD__)
06e67a82
AL
538 {
539 int flags;
540 void *addr = NULL;
541 flags = MAP_PRIVATE | MAP_ANONYMOUS;
542#if defined(__x86_64__)
543 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
544 * 0x40000000 is free */
545 flags |= MAP_FIXED;
546 addr = (void *)0x40000000;
547 /* Cannot map more than that */
548 if (code_gen_buffer_size > (800 * 1024 * 1024))
549 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
550#elif defined(__sparc_v9__)
551 // Map the buffer below 2G, so we can use direct calls and branches
552 flags |= MAP_FIXED;
553 addr = (void *) 0x60000000UL;
554 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
555 code_gen_buffer_size = (512 * 1024 * 1024);
556 }
06e67a82
AL
557#endif
558 code_gen_buffer = mmap(addr, code_gen_buffer_size,
559 PROT_WRITE | PROT_READ | PROT_EXEC,
560 flags, -1, 0);
561 if (code_gen_buffer == MAP_FAILED) {
562 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
563 exit(1);
564 }
565 }
26a5f13b 566#else
7267c094 567 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
568 map_exec(code_gen_buffer, code_gen_buffer_size);
569#endif
4369415f 570#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 571 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
572 code_gen_buffer_max_size = code_gen_buffer_size -
573 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 574 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 575 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
576}
577
578/* Must be called before using the QEMU cpus. 'tb_size' is the size
579 (in bytes) allocated to the translation buffer. Zero means default
580 size. */
d5ab9713 581void tcg_exec_init(unsigned long tb_size)
26a5f13b 582{
26a5f13b
FB
583 cpu_gen_init();
584 code_gen_alloc(tb_size);
585 code_gen_ptr = code_gen_buffer;
4369415f 586 page_init();
9002ec79
RH
587#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
588 /* There's no guest base to take into account, so go ahead and
589 initialize the prologue now. */
590 tcg_prologue_init(&tcg_ctx);
591#endif
26a5f13b
FB
592}
593
d5ab9713
JK
594bool tcg_enabled(void)
595{
596 return code_gen_buffer != NULL;
597}
598
599void cpu_exec_init_all(void)
600{
601#if !defined(CONFIG_USER_ONLY)
602 memory_map_init();
603 io_mem_init();
604#endif
605}
606
9656f324
PB
607#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
608
e59fb374 609static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
610{
611 CPUState *env = opaque;
9656f324 612
3098dba0
AJ
613 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
614 version_id is increased. */
615 env->interrupt_request &= ~0x01;
9656f324
PB
616 tlb_flush(env, 1);
617
618 return 0;
619}
e7f4eff7
JQ
620
621static const VMStateDescription vmstate_cpu_common = {
622 .name = "cpu_common",
623 .version_id = 1,
624 .minimum_version_id = 1,
625 .minimum_version_id_old = 1,
e7f4eff7
JQ
626 .post_load = cpu_common_post_load,
627 .fields = (VMStateField []) {
628 VMSTATE_UINT32(halted, CPUState),
629 VMSTATE_UINT32(interrupt_request, CPUState),
630 VMSTATE_END_OF_LIST()
631 }
632};
9656f324
PB
633#endif
634
950f1472
GC
635CPUState *qemu_get_cpu(int cpu)
636{
637 CPUState *env = first_cpu;
638
639 while (env) {
640 if (env->cpu_index == cpu)
641 break;
642 env = env->next_cpu;
643 }
644
645 return env;
646}
647
6a00d601 648void cpu_exec_init(CPUState *env)
fd6ce8f6 649{
6a00d601
FB
650 CPUState **penv;
651 int cpu_index;
652
c2764719
PB
653#if defined(CONFIG_USER_ONLY)
654 cpu_list_lock();
655#endif
6a00d601
FB
656 env->next_cpu = NULL;
657 penv = &first_cpu;
658 cpu_index = 0;
659 while (*penv != NULL) {
1e9fa730 660 penv = &(*penv)->next_cpu;
6a00d601
FB
661 cpu_index++;
662 }
663 env->cpu_index = cpu_index;
268a362c 664 env->numa_node = 0;
72cf2d4f
BS
665 QTAILQ_INIT(&env->breakpoints);
666 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
667#ifndef CONFIG_USER_ONLY
668 env->thread_id = qemu_get_thread_id();
669#endif
6a00d601 670 *penv = env;
c2764719
PB
671#if defined(CONFIG_USER_ONLY)
672 cpu_list_unlock();
673#endif
b3c7724c 674#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
675 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
676 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
677 cpu_save, cpu_load, env);
678#endif
fd6ce8f6
FB
679}
680
d1a1eb74
TG
681/* Allocate a new translation block. Flush the translation buffer if
682 too many translation blocks or too much generated code. */
683static TranslationBlock *tb_alloc(target_ulong pc)
684{
685 TranslationBlock *tb;
686
687 if (nb_tbs >= code_gen_max_blocks ||
688 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
689 return NULL;
690 tb = &tbs[nb_tbs++];
691 tb->pc = pc;
692 tb->cflags = 0;
693 return tb;
694}
695
696void tb_free(TranslationBlock *tb)
697{
698 /* In practice this is mostly used for single use temporary TB
699 Ignore the hard cases and just back up if this TB happens to
700 be the last one generated. */
701 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
702 code_gen_ptr = tb->tc_ptr;
703 nb_tbs--;
704 }
705}
706
9fa3e853
FB
707static inline void invalidate_page_bitmap(PageDesc *p)
708{
709 if (p->code_bitmap) {
7267c094 710 g_free(p->code_bitmap);
9fa3e853
FB
711 p->code_bitmap = NULL;
712 }
713 p->code_write_count = 0;
714}
715
5cd2c5b6
RH
716/* Set to NULL all the 'first_tb' fields in all PageDescs. */
717
718static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 719{
5cd2c5b6 720 int i;
fd6ce8f6 721
5cd2c5b6
RH
722 if (*lp == NULL) {
723 return;
724 }
725 if (level == 0) {
726 PageDesc *pd = *lp;
7296abac 727 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
728 pd[i].first_tb = NULL;
729 invalidate_page_bitmap(pd + i);
fd6ce8f6 730 }
5cd2c5b6
RH
731 } else {
732 void **pp = *lp;
7296abac 733 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
734 page_flush_tb_1 (level - 1, pp + i);
735 }
736 }
737}
738
739static void page_flush_tb(void)
740{
741 int i;
742 for (i = 0; i < V_L1_SIZE; i++) {
743 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
744 }
745}
746
747/* flush all the translation blocks */
d4e8164f 748/* XXX: tb_flush is currently not thread safe */
6a00d601 749void tb_flush(CPUState *env1)
fd6ce8f6 750{
6a00d601 751 CPUState *env;
0124311e 752#if defined(DEBUG_FLUSH)
ab3d1727
BS
753 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
754 (unsigned long)(code_gen_ptr - code_gen_buffer),
755 nb_tbs, nb_tbs > 0 ?
756 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 757#endif
26a5f13b 758 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
759 cpu_abort(env1, "Internal error: code buffer overflow\n");
760
fd6ce8f6 761 nb_tbs = 0;
3b46e624 762
6a00d601
FB
763 for(env = first_cpu; env != NULL; env = env->next_cpu) {
764 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
765 }
9fa3e853 766
8a8a608f 767 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 768 page_flush_tb();
9fa3e853 769
fd6ce8f6 770 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
771 /* XXX: flush processor icache at this point if cache flush is
772 expensive */
e3db7226 773 tb_flush_count++;
fd6ce8f6
FB
774}
775
776#ifdef DEBUG_TB_CHECK
777
bc98a7ef 778static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
779{
780 TranslationBlock *tb;
781 int i;
782 address &= TARGET_PAGE_MASK;
99773bd4
PB
783 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
784 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
785 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
786 address >= tb->pc + tb->size)) {
0bf9e31a
BS
787 printf("ERROR invalidate: address=" TARGET_FMT_lx
788 " PC=%08lx size=%04x\n",
99773bd4 789 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
790 }
791 }
792 }
793}
794
795/* verify that all the pages have correct rights for code */
796static void tb_page_check(void)
797{
798 TranslationBlock *tb;
799 int i, flags1, flags2;
3b46e624 800
99773bd4
PB
801 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
802 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
803 flags1 = page_get_flags(tb->pc);
804 flags2 = page_get_flags(tb->pc + tb->size - 1);
805 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
806 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 807 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
808 }
809 }
810 }
811}
812
813#endif
814
815/* invalidate one TB */
816static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
817 int next_offset)
818{
819 TranslationBlock *tb1;
820 for(;;) {
821 tb1 = *ptb;
822 if (tb1 == tb) {
823 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
824 break;
825 }
826 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
827 }
828}
829
9fa3e853
FB
830static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
831{
832 TranslationBlock *tb1;
833 unsigned int n1;
834
835 for(;;) {
836 tb1 = *ptb;
837 n1 = (long)tb1 & 3;
838 tb1 = (TranslationBlock *)((long)tb1 & ~3);
839 if (tb1 == tb) {
840 *ptb = tb1->page_next[n1];
841 break;
842 }
843 ptb = &tb1->page_next[n1];
844 }
845}
846
d4e8164f
FB
847static inline void tb_jmp_remove(TranslationBlock *tb, int n)
848{
849 TranslationBlock *tb1, **ptb;
850 unsigned int n1;
851
852 ptb = &tb->jmp_next[n];
853 tb1 = *ptb;
854 if (tb1) {
855 /* find tb(n) in circular list */
856 for(;;) {
857 tb1 = *ptb;
858 n1 = (long)tb1 & 3;
859 tb1 = (TranslationBlock *)((long)tb1 & ~3);
860 if (n1 == n && tb1 == tb)
861 break;
862 if (n1 == 2) {
863 ptb = &tb1->jmp_first;
864 } else {
865 ptb = &tb1->jmp_next[n1];
866 }
867 }
868 /* now we can suppress tb(n) from the list */
869 *ptb = tb->jmp_next[n];
870
871 tb->jmp_next[n] = NULL;
872 }
873}
874
875/* reset the jump entry 'n' of a TB so that it is not chained to
876 another TB */
877static inline void tb_reset_jump(TranslationBlock *tb, int n)
878{
879 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
880}
881
41c1b1c9 882void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 883{
6a00d601 884 CPUState *env;
8a40a180 885 PageDesc *p;
d4e8164f 886 unsigned int h, n1;
41c1b1c9 887 tb_page_addr_t phys_pc;
8a40a180 888 TranslationBlock *tb1, *tb2;
3b46e624 889
8a40a180
FB
890 /* remove the TB from the hash list */
891 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
892 h = tb_phys_hash_func(phys_pc);
5fafdf24 893 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
894 offsetof(TranslationBlock, phys_hash_next));
895
896 /* remove the TB from the page list */
897 if (tb->page_addr[0] != page_addr) {
898 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
899 tb_page_remove(&p->first_tb, tb);
900 invalidate_page_bitmap(p);
901 }
902 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
903 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
904 tb_page_remove(&p->first_tb, tb);
905 invalidate_page_bitmap(p);
906 }
907
36bdbe54 908 tb_invalidated_flag = 1;
59817ccb 909
fd6ce8f6 910 /* remove the TB from the hash list */
8a40a180 911 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
912 for(env = first_cpu; env != NULL; env = env->next_cpu) {
913 if (env->tb_jmp_cache[h] == tb)
914 env->tb_jmp_cache[h] = NULL;
915 }
d4e8164f
FB
916
917 /* suppress this TB from the two jump lists */
918 tb_jmp_remove(tb, 0);
919 tb_jmp_remove(tb, 1);
920
921 /* suppress any remaining jumps to this TB */
922 tb1 = tb->jmp_first;
923 for(;;) {
924 n1 = (long)tb1 & 3;
925 if (n1 == 2)
926 break;
927 tb1 = (TranslationBlock *)((long)tb1 & ~3);
928 tb2 = tb1->jmp_next[n1];
929 tb_reset_jump(tb1, n1);
930 tb1->jmp_next[n1] = NULL;
931 tb1 = tb2;
932 }
933 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 934
e3db7226 935 tb_phys_invalidate_count++;
9fa3e853
FB
936}
937
938static inline void set_bits(uint8_t *tab, int start, int len)
939{
940 int end, mask, end1;
941
942 end = start + len;
943 tab += start >> 3;
944 mask = 0xff << (start & 7);
945 if ((start & ~7) == (end & ~7)) {
946 if (start < end) {
947 mask &= ~(0xff << (end & 7));
948 *tab |= mask;
949 }
950 } else {
951 *tab++ |= mask;
952 start = (start + 8) & ~7;
953 end1 = end & ~7;
954 while (start < end1) {
955 *tab++ = 0xff;
956 start += 8;
957 }
958 if (start < end) {
959 mask = ~(0xff << (end & 7));
960 *tab |= mask;
961 }
962 }
963}
964
965static void build_page_bitmap(PageDesc *p)
966{
967 int n, tb_start, tb_end;
968 TranslationBlock *tb;
3b46e624 969
7267c094 970 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
971
972 tb = p->first_tb;
973 while (tb != NULL) {
974 n = (long)tb & 3;
975 tb = (TranslationBlock *)((long)tb & ~3);
976 /* NOTE: this is subtle as a TB may span two physical pages */
977 if (n == 0) {
978 /* NOTE: tb_end may be after the end of the page, but
979 it is not a problem */
980 tb_start = tb->pc & ~TARGET_PAGE_MASK;
981 tb_end = tb_start + tb->size;
982 if (tb_end > TARGET_PAGE_SIZE)
983 tb_end = TARGET_PAGE_SIZE;
984 } else {
985 tb_start = 0;
986 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
987 }
988 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
989 tb = tb->page_next[n];
990 }
991}
992
2e70f6ef
PB
993TranslationBlock *tb_gen_code(CPUState *env,
994 target_ulong pc, target_ulong cs_base,
995 int flags, int cflags)
d720b93d
FB
996{
997 TranslationBlock *tb;
998 uint8_t *tc_ptr;
41c1b1c9
PB
999 tb_page_addr_t phys_pc, phys_page2;
1000 target_ulong virt_page2;
d720b93d
FB
1001 int code_gen_size;
1002
41c1b1c9 1003 phys_pc = get_page_addr_code(env, pc);
c27004ec 1004 tb = tb_alloc(pc);
d720b93d
FB
1005 if (!tb) {
1006 /* flush must be done */
1007 tb_flush(env);
1008 /* cannot fail at this point */
c27004ec 1009 tb = tb_alloc(pc);
2e70f6ef
PB
1010 /* Don't forget to invalidate previous TB info. */
1011 tb_invalidated_flag = 1;
d720b93d
FB
1012 }
1013 tc_ptr = code_gen_ptr;
1014 tb->tc_ptr = tc_ptr;
1015 tb->cs_base = cs_base;
1016 tb->flags = flags;
1017 tb->cflags = cflags;
d07bde88 1018 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1019 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1020
d720b93d 1021 /* check next page if needed */
c27004ec 1022 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1023 phys_page2 = -1;
c27004ec 1024 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1025 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1026 }
41c1b1c9 1027 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1028 return tb;
d720b93d 1029}
3b46e624 1030
9fa3e853
FB
1031/* invalidate all TBs which intersect with the target physical page
1032 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1033 the same physical page. 'is_cpu_write_access' should be true if called
1034 from a real cpu write access: the virtual CPU will exit the current
1035 TB if code is modified inside this TB. */
41c1b1c9 1036void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1037 int is_cpu_write_access)
1038{
6b917547 1039 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1040 CPUState *env = cpu_single_env;
41c1b1c9 1041 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1042 PageDesc *p;
1043 int n;
1044#ifdef TARGET_HAS_PRECISE_SMC
1045 int current_tb_not_found = is_cpu_write_access;
1046 TranslationBlock *current_tb = NULL;
1047 int current_tb_modified = 0;
1048 target_ulong current_pc = 0;
1049 target_ulong current_cs_base = 0;
1050 int current_flags = 0;
1051#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1052
1053 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1054 if (!p)
9fa3e853 1055 return;
5fafdf24 1056 if (!p->code_bitmap &&
d720b93d
FB
1057 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1058 is_cpu_write_access) {
9fa3e853
FB
1059 /* build code bitmap */
1060 build_page_bitmap(p);
1061 }
1062
1063 /* we remove all the TBs in the range [start, end[ */
1064 /* XXX: see if in some cases it could be faster to invalidate all the code */
1065 tb = p->first_tb;
1066 while (tb != NULL) {
1067 n = (long)tb & 3;
1068 tb = (TranslationBlock *)((long)tb & ~3);
1069 tb_next = tb->page_next[n];
1070 /* NOTE: this is subtle as a TB may span two physical pages */
1071 if (n == 0) {
1072 /* NOTE: tb_end may be after the end of the page, but
1073 it is not a problem */
1074 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1075 tb_end = tb_start + tb->size;
1076 } else {
1077 tb_start = tb->page_addr[1];
1078 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1079 }
1080 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1081#ifdef TARGET_HAS_PRECISE_SMC
1082 if (current_tb_not_found) {
1083 current_tb_not_found = 0;
1084 current_tb = NULL;
2e70f6ef 1085 if (env->mem_io_pc) {
d720b93d 1086 /* now we have a real cpu fault */
2e70f6ef 1087 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1088 }
1089 }
1090 if (current_tb == tb &&
2e70f6ef 1091 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1092 /* If we are modifying the current TB, we must stop
1093 its execution. We could be more precise by checking
1094 that the modification is after the current PC, but it
1095 would require a specialized function to partially
1096 restore the CPU state */
3b46e624 1097
d720b93d 1098 current_tb_modified = 1;
618ba8e6 1099 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1100 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1101 &current_flags);
d720b93d
FB
1102 }
1103#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1104 /* we need to do that to handle the case where a signal
1105 occurs while doing tb_phys_invalidate() */
1106 saved_tb = NULL;
1107 if (env) {
1108 saved_tb = env->current_tb;
1109 env->current_tb = NULL;
1110 }
9fa3e853 1111 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1112 if (env) {
1113 env->current_tb = saved_tb;
1114 if (env->interrupt_request && env->current_tb)
1115 cpu_interrupt(env, env->interrupt_request);
1116 }
9fa3e853
FB
1117 }
1118 tb = tb_next;
1119 }
1120#if !defined(CONFIG_USER_ONLY)
1121 /* if no code remaining, no need to continue to use slow writes */
1122 if (!p->first_tb) {
1123 invalidate_page_bitmap(p);
d720b93d 1124 if (is_cpu_write_access) {
2e70f6ef 1125 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1126 }
1127 }
1128#endif
1129#ifdef TARGET_HAS_PRECISE_SMC
1130 if (current_tb_modified) {
1131 /* we generate a block containing just the instruction
1132 modifying the memory. It will ensure that it cannot modify
1133 itself */
ea1c1802 1134 env->current_tb = NULL;
2e70f6ef 1135 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1136 cpu_resume_from_signal(env, NULL);
9fa3e853 1137 }
fd6ce8f6 1138#endif
9fa3e853 1139}
fd6ce8f6 1140
9fa3e853 1141/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1142static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1143{
1144 PageDesc *p;
1145 int offset, b;
59817ccb 1146#if 0
a4193c8a 1147 if (1) {
93fcfe39
AL
1148 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1149 cpu_single_env->mem_io_vaddr, len,
1150 cpu_single_env->eip,
1151 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1152 }
1153#endif
9fa3e853 1154 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1155 if (!p)
9fa3e853
FB
1156 return;
1157 if (p->code_bitmap) {
1158 offset = start & ~TARGET_PAGE_MASK;
1159 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1160 if (b & ((1 << len) - 1))
1161 goto do_invalidate;
1162 } else {
1163 do_invalidate:
d720b93d 1164 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1165 }
1166}
1167
9fa3e853 1168#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1169static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1170 unsigned long pc, void *puc)
9fa3e853 1171{
6b917547 1172 TranslationBlock *tb;
9fa3e853 1173 PageDesc *p;
6b917547 1174 int n;
d720b93d 1175#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1176 TranslationBlock *current_tb = NULL;
d720b93d 1177 CPUState *env = cpu_single_env;
6b917547
AL
1178 int current_tb_modified = 0;
1179 target_ulong current_pc = 0;
1180 target_ulong current_cs_base = 0;
1181 int current_flags = 0;
d720b93d 1182#endif
9fa3e853
FB
1183
1184 addr &= TARGET_PAGE_MASK;
1185 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1186 if (!p)
9fa3e853
FB
1187 return;
1188 tb = p->first_tb;
d720b93d
FB
1189#ifdef TARGET_HAS_PRECISE_SMC
1190 if (tb && pc != 0) {
1191 current_tb = tb_find_pc(pc);
1192 }
1193#endif
9fa3e853
FB
1194 while (tb != NULL) {
1195 n = (long)tb & 3;
1196 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1197#ifdef TARGET_HAS_PRECISE_SMC
1198 if (current_tb == tb &&
2e70f6ef 1199 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1200 /* If we are modifying the current TB, we must stop
1201 its execution. We could be more precise by checking
1202 that the modification is after the current PC, but it
1203 would require a specialized function to partially
1204 restore the CPU state */
3b46e624 1205
d720b93d 1206 current_tb_modified = 1;
618ba8e6 1207 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1208 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1209 &current_flags);
d720b93d
FB
1210 }
1211#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1212 tb_phys_invalidate(tb, addr);
1213 tb = tb->page_next[n];
1214 }
fd6ce8f6 1215 p->first_tb = NULL;
d720b93d
FB
1216#ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1220 itself */
ea1c1802 1221 env->current_tb = NULL;
2e70f6ef 1222 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1223 cpu_resume_from_signal(env, puc);
1224 }
1225#endif
fd6ce8f6 1226}
9fa3e853 1227#endif
fd6ce8f6
FB
1228
1229/* add the tb in the target page and protect it if necessary */
5fafdf24 1230static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1231 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1232{
1233 PageDesc *p;
4429ab44
JQ
1234#ifndef CONFIG_USER_ONLY
1235 bool page_already_protected;
1236#endif
9fa3e853
FB
1237
1238 tb->page_addr[n] = page_addr;
5cd2c5b6 1239 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1240 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1241#ifndef CONFIG_USER_ONLY
1242 page_already_protected = p->first_tb != NULL;
1243#endif
9fa3e853
FB
1244 p->first_tb = (TranslationBlock *)((long)tb | n);
1245 invalidate_page_bitmap(p);
fd6ce8f6 1246
107db443 1247#if defined(TARGET_HAS_SMC) || 1
d720b93d 1248
9fa3e853 1249#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1250 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1251 target_ulong addr;
1252 PageDesc *p2;
9fa3e853
FB
1253 int prot;
1254
fd6ce8f6
FB
1255 /* force the host page as non writable (writes will have a
1256 page fault + mprotect overhead) */
53a5960a 1257 page_addr &= qemu_host_page_mask;
fd6ce8f6 1258 prot = 0;
53a5960a
PB
1259 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1260 addr += TARGET_PAGE_SIZE) {
1261
1262 p2 = page_find (addr >> TARGET_PAGE_BITS);
1263 if (!p2)
1264 continue;
1265 prot |= p2->flags;
1266 p2->flags &= ~PAGE_WRITE;
53a5960a 1267 }
5fafdf24 1268 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1269 (prot & PAGE_BITS) & ~PAGE_WRITE);
1270#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1271 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1272 page_addr);
fd6ce8f6 1273#endif
fd6ce8f6 1274 }
9fa3e853
FB
1275#else
1276 /* if some code is already present, then the pages are already
1277 protected. So we handle the case where only the first TB is
1278 allocated in a physical page */
4429ab44 1279 if (!page_already_protected) {
6a00d601 1280 tlb_protect_code(page_addr);
9fa3e853
FB
1281 }
1282#endif
d720b93d
FB
1283
1284#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1285}
1286
9fa3e853
FB
1287/* add a new TB and link it to the physical page tables. phys_page2 is
1288 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1289void tb_link_page(TranslationBlock *tb,
1290 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1291{
9fa3e853
FB
1292 unsigned int h;
1293 TranslationBlock **ptb;
1294
c8a706fe
PB
1295 /* Grab the mmap lock to stop another thread invalidating this TB
1296 before we are done. */
1297 mmap_lock();
9fa3e853
FB
1298 /* add in the physical hash table */
1299 h = tb_phys_hash_func(phys_pc);
1300 ptb = &tb_phys_hash[h];
1301 tb->phys_hash_next = *ptb;
1302 *ptb = tb;
fd6ce8f6
FB
1303
1304 /* add in the page list */
9fa3e853
FB
1305 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1306 if (phys_page2 != -1)
1307 tb_alloc_page(tb, 1, phys_page2);
1308 else
1309 tb->page_addr[1] = -1;
9fa3e853 1310
d4e8164f
FB
1311 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1312 tb->jmp_next[0] = NULL;
1313 tb->jmp_next[1] = NULL;
1314
1315 /* init original jump addresses */
1316 if (tb->tb_next_offset[0] != 0xffff)
1317 tb_reset_jump(tb, 0);
1318 if (tb->tb_next_offset[1] != 0xffff)
1319 tb_reset_jump(tb, 1);
8a40a180
FB
1320
1321#ifdef DEBUG_TB_CHECK
1322 tb_page_check();
1323#endif
c8a706fe 1324 mmap_unlock();
fd6ce8f6
FB
1325}
1326
9fa3e853
FB
1327/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1328 tb[1].tc_ptr. Return NULL if not found */
1329TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1330{
9fa3e853
FB
1331 int m_min, m_max, m;
1332 unsigned long v;
1333 TranslationBlock *tb;
a513fe19
FB
1334
1335 if (nb_tbs <= 0)
1336 return NULL;
1337 if (tc_ptr < (unsigned long)code_gen_buffer ||
1338 tc_ptr >= (unsigned long)code_gen_ptr)
1339 return NULL;
1340 /* binary search (cf Knuth) */
1341 m_min = 0;
1342 m_max = nb_tbs - 1;
1343 while (m_min <= m_max) {
1344 m = (m_min + m_max) >> 1;
1345 tb = &tbs[m];
1346 v = (unsigned long)tb->tc_ptr;
1347 if (v == tc_ptr)
1348 return tb;
1349 else if (tc_ptr < v) {
1350 m_max = m - 1;
1351 } else {
1352 m_min = m + 1;
1353 }
5fafdf24 1354 }
a513fe19
FB
1355 return &tbs[m_max];
1356}
7501267e 1357
ea041c0e
FB
1358static void tb_reset_jump_recursive(TranslationBlock *tb);
1359
1360static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1361{
1362 TranslationBlock *tb1, *tb_next, **ptb;
1363 unsigned int n1;
1364
1365 tb1 = tb->jmp_next[n];
1366 if (tb1 != NULL) {
1367 /* find head of list */
1368 for(;;) {
1369 n1 = (long)tb1 & 3;
1370 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1371 if (n1 == 2)
1372 break;
1373 tb1 = tb1->jmp_next[n1];
1374 }
1375 /* we are now sure now that tb jumps to tb1 */
1376 tb_next = tb1;
1377
1378 /* remove tb from the jmp_first list */
1379 ptb = &tb_next->jmp_first;
1380 for(;;) {
1381 tb1 = *ptb;
1382 n1 = (long)tb1 & 3;
1383 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1384 if (n1 == n && tb1 == tb)
1385 break;
1386 ptb = &tb1->jmp_next[n1];
1387 }
1388 *ptb = tb->jmp_next[n];
1389 tb->jmp_next[n] = NULL;
3b46e624 1390
ea041c0e
FB
1391 /* suppress the jump to next tb in generated code */
1392 tb_reset_jump(tb, n);
1393
0124311e 1394 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1395 tb_reset_jump_recursive(tb_next);
1396 }
1397}
1398
1399static void tb_reset_jump_recursive(TranslationBlock *tb)
1400{
1401 tb_reset_jump_recursive2(tb, 0);
1402 tb_reset_jump_recursive2(tb, 1);
1403}
1404
1fddef4b 1405#if defined(TARGET_HAS_ICE)
94df27fd
PB
1406#if defined(CONFIG_USER_ONLY)
1407static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1408{
1409 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1410}
1411#else
d720b93d
FB
1412static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1413{
c227f099 1414 target_phys_addr_t addr;
9b3c35e0 1415 target_ulong pd;
c227f099 1416 ram_addr_t ram_addr;
f1f6e3b8 1417 PhysPageDesc p;
d720b93d 1418
c2f07f81
PB
1419 addr = cpu_get_phys_page_debug(env, pc);
1420 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1421 pd = p.phys_offset;
c2f07f81 1422 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1423 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1424}
c27004ec 1425#endif
94df27fd 1426#endif /* TARGET_HAS_ICE */
d720b93d 1427
c527ee8f
PB
1428#if defined(CONFIG_USER_ONLY)
1429void cpu_watchpoint_remove_all(CPUState *env, int mask)
1430
1431{
1432}
1433
1434int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1435 int flags, CPUWatchpoint **watchpoint)
1436{
1437 return -ENOSYS;
1438}
1439#else
6658ffb8 1440/* Add a watchpoint. */
a1d1bb31
AL
1441int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1442 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1443{
b4051334 1444 target_ulong len_mask = ~(len - 1);
c0ce998e 1445 CPUWatchpoint *wp;
6658ffb8 1446
b4051334
AL
1447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1448 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1449 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1450 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1451 return -EINVAL;
1452 }
7267c094 1453 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1454
1455 wp->vaddr = addr;
b4051334 1456 wp->len_mask = len_mask;
a1d1bb31
AL
1457 wp->flags = flags;
1458
2dc9f411 1459 /* keep all GDB-injected watchpoints in front */
c0ce998e 1460 if (flags & BP_GDB)
72cf2d4f 1461 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1462 else
72cf2d4f 1463 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1464
6658ffb8 1465 tlb_flush_page(env, addr);
a1d1bb31
AL
1466
1467 if (watchpoint)
1468 *watchpoint = wp;
1469 return 0;
6658ffb8
PB
1470}
1471
a1d1bb31
AL
1472/* Remove a specific watchpoint. */
1473int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1474 int flags)
6658ffb8 1475{
b4051334 1476 target_ulong len_mask = ~(len - 1);
a1d1bb31 1477 CPUWatchpoint *wp;
6658ffb8 1478
72cf2d4f 1479 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1480 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1481 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1482 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1483 return 0;
1484 }
1485 }
a1d1bb31 1486 return -ENOENT;
6658ffb8
PB
1487}
1488
a1d1bb31
AL
1489/* Remove a specific watchpoint by reference. */
1490void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1491{
72cf2d4f 1492 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1493
a1d1bb31
AL
1494 tlb_flush_page(env, watchpoint->vaddr);
1495
7267c094 1496 g_free(watchpoint);
a1d1bb31
AL
1497}
1498
1499/* Remove all matching watchpoints. */
1500void cpu_watchpoint_remove_all(CPUState *env, int mask)
1501{
c0ce998e 1502 CPUWatchpoint *wp, *next;
a1d1bb31 1503
72cf2d4f 1504 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1505 if (wp->flags & mask)
1506 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1507 }
7d03f82f 1508}
c527ee8f 1509#endif
7d03f82f 1510
a1d1bb31
AL
1511/* Add a breakpoint. */
1512int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1513 CPUBreakpoint **breakpoint)
4c3a88a2 1514{
1fddef4b 1515#if defined(TARGET_HAS_ICE)
c0ce998e 1516 CPUBreakpoint *bp;
3b46e624 1517
7267c094 1518 bp = g_malloc(sizeof(*bp));
4c3a88a2 1519
a1d1bb31
AL
1520 bp->pc = pc;
1521 bp->flags = flags;
1522
2dc9f411 1523 /* keep all GDB-injected breakpoints in front */
c0ce998e 1524 if (flags & BP_GDB)
72cf2d4f 1525 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1526 else
72cf2d4f 1527 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1528
d720b93d 1529 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1530
1531 if (breakpoint)
1532 *breakpoint = bp;
4c3a88a2
FB
1533 return 0;
1534#else
a1d1bb31 1535 return -ENOSYS;
4c3a88a2
FB
1536#endif
1537}
1538
a1d1bb31
AL
1539/* Remove a specific breakpoint. */
1540int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1541{
7d03f82f 1542#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1543 CPUBreakpoint *bp;
1544
72cf2d4f 1545 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1546 if (bp->pc == pc && bp->flags == flags) {
1547 cpu_breakpoint_remove_by_ref(env, bp);
1548 return 0;
1549 }
7d03f82f 1550 }
a1d1bb31
AL
1551 return -ENOENT;
1552#else
1553 return -ENOSYS;
7d03f82f
EI
1554#endif
1555}
1556
a1d1bb31
AL
1557/* Remove a specific breakpoint by reference. */
1558void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1559{
1fddef4b 1560#if defined(TARGET_HAS_ICE)
72cf2d4f 1561 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1562
a1d1bb31
AL
1563 breakpoint_invalidate(env, breakpoint->pc);
1564
7267c094 1565 g_free(breakpoint);
a1d1bb31
AL
1566#endif
1567}
1568
1569/* Remove all matching breakpoints. */
1570void cpu_breakpoint_remove_all(CPUState *env, int mask)
1571{
1572#if defined(TARGET_HAS_ICE)
c0ce998e 1573 CPUBreakpoint *bp, *next;
a1d1bb31 1574
72cf2d4f 1575 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1576 if (bp->flags & mask)
1577 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1578 }
4c3a88a2
FB
1579#endif
1580}
1581
c33a346e
FB
1582/* enable or disable single step mode. EXCP_DEBUG is returned by the
1583 CPU loop after each instruction */
1584void cpu_single_step(CPUState *env, int enabled)
1585{
1fddef4b 1586#if defined(TARGET_HAS_ICE)
c33a346e
FB
1587 if (env->singlestep_enabled != enabled) {
1588 env->singlestep_enabled = enabled;
e22a25c9
AL
1589 if (kvm_enabled())
1590 kvm_update_guest_debug(env, 0);
1591 else {
ccbb4d44 1592 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1593 /* XXX: only flush what is necessary */
1594 tb_flush(env);
1595 }
c33a346e
FB
1596 }
1597#endif
1598}
1599
34865134
FB
1600/* enable or disable low levels log */
1601void cpu_set_log(int log_flags)
1602{
1603 loglevel = log_flags;
1604 if (loglevel && !logfile) {
11fcfab4 1605 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1606 if (!logfile) {
1607 perror(logfilename);
1608 _exit(1);
1609 }
9fa3e853
FB
1610#if !defined(CONFIG_SOFTMMU)
1611 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1612 {
b55266b5 1613 static char logfile_buf[4096];
9fa3e853
FB
1614 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1615 }
daf767b1
SW
1616#elif defined(_WIN32)
1617 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1618 setvbuf(logfile, NULL, _IONBF, 0);
1619#else
34865134 1620 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1621#endif
e735b91c
PB
1622 log_append = 1;
1623 }
1624 if (!loglevel && logfile) {
1625 fclose(logfile);
1626 logfile = NULL;
34865134
FB
1627 }
1628}
1629
1630void cpu_set_log_filename(const char *filename)
1631{
1632 logfilename = strdup(filename);
e735b91c
PB
1633 if (logfile) {
1634 fclose(logfile);
1635 logfile = NULL;
1636 }
1637 cpu_set_log(loglevel);
34865134 1638}
c33a346e 1639
3098dba0 1640static void cpu_unlink_tb(CPUState *env)
ea041c0e 1641{
3098dba0
AJ
1642 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1643 problem and hope the cpu will stop of its own accord. For userspace
1644 emulation this often isn't actually as bad as it sounds. Often
1645 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1646 TranslationBlock *tb;
c227f099 1647 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1648
cab1b4bd 1649 spin_lock(&interrupt_lock);
3098dba0
AJ
1650 tb = env->current_tb;
1651 /* if the cpu is currently executing code, we must unlink it and
1652 all the potentially executing TB */
f76cfe56 1653 if (tb) {
3098dba0
AJ
1654 env->current_tb = NULL;
1655 tb_reset_jump_recursive(tb);
be214e6c 1656 }
cab1b4bd 1657 spin_unlock(&interrupt_lock);
3098dba0
AJ
1658}
1659
97ffbd8d 1660#ifndef CONFIG_USER_ONLY
3098dba0 1661/* mask must never be zero, except for A20 change call */
ec6959d0 1662static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1663{
1664 int old_mask;
be214e6c 1665
2e70f6ef 1666 old_mask = env->interrupt_request;
68a79315 1667 env->interrupt_request |= mask;
3098dba0 1668
8edac960
AL
1669 /*
1670 * If called from iothread context, wake the target cpu in
1671 * case its halted.
1672 */
b7680cb6 1673 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1674 qemu_cpu_kick(env);
1675 return;
1676 }
8edac960 1677
2e70f6ef 1678 if (use_icount) {
266910c4 1679 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1680 if (!can_do_io(env)
be214e6c 1681 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1682 cpu_abort(env, "Raised interrupt while not in I/O function");
1683 }
2e70f6ef 1684 } else {
3098dba0 1685 cpu_unlink_tb(env);
ea041c0e
FB
1686 }
1687}
1688
ec6959d0
JK
1689CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1690
97ffbd8d
JK
1691#else /* CONFIG_USER_ONLY */
1692
1693void cpu_interrupt(CPUState *env, int mask)
1694{
1695 env->interrupt_request |= mask;
1696 cpu_unlink_tb(env);
1697}
1698#endif /* CONFIG_USER_ONLY */
1699
b54ad049
FB
1700void cpu_reset_interrupt(CPUState *env, int mask)
1701{
1702 env->interrupt_request &= ~mask;
1703}
1704
3098dba0
AJ
1705void cpu_exit(CPUState *env)
1706{
1707 env->exit_request = 1;
1708 cpu_unlink_tb(env);
1709}
1710
c7cd6a37 1711const CPULogItem cpu_log_items[] = {
5fafdf24 1712 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1713 "show generated host assembly code for each compiled TB" },
1714 { CPU_LOG_TB_IN_ASM, "in_asm",
1715 "show target assembly code for each compiled TB" },
5fafdf24 1716 { CPU_LOG_TB_OP, "op",
57fec1fe 1717 "show micro ops for each compiled TB" },
f193c797 1718 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1719 "show micro ops "
1720#ifdef TARGET_I386
1721 "before eflags optimization and "
f193c797 1722#endif
e01a1157 1723 "after liveness analysis" },
f193c797
FB
1724 { CPU_LOG_INT, "int",
1725 "show interrupts/exceptions in short format" },
1726 { CPU_LOG_EXEC, "exec",
1727 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1728 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1729 "show CPU state before block translation" },
f193c797
FB
1730#ifdef TARGET_I386
1731 { CPU_LOG_PCALL, "pcall",
1732 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1733 { CPU_LOG_RESET, "cpu_reset",
1734 "show CPU state before CPU resets" },
f193c797 1735#endif
8e3a9fd2 1736#ifdef DEBUG_IOPORT
fd872598
FB
1737 { CPU_LOG_IOPORT, "ioport",
1738 "show all i/o ports accesses" },
8e3a9fd2 1739#endif
f193c797
FB
1740 { 0, NULL, NULL },
1741};
1742
1743static int cmp1(const char *s1, int n, const char *s2)
1744{
1745 if (strlen(s2) != n)
1746 return 0;
1747 return memcmp(s1, s2, n) == 0;
1748}
3b46e624 1749
f193c797
FB
1750/* takes a comma separated list of log masks. Return 0 if error. */
1751int cpu_str_to_log_mask(const char *str)
1752{
c7cd6a37 1753 const CPULogItem *item;
f193c797
FB
1754 int mask;
1755 const char *p, *p1;
1756
1757 p = str;
1758 mask = 0;
1759 for(;;) {
1760 p1 = strchr(p, ',');
1761 if (!p1)
1762 p1 = p + strlen(p);
9742bf26
YT
1763 if(cmp1(p,p1-p,"all")) {
1764 for(item = cpu_log_items; item->mask != 0; item++) {
1765 mask |= item->mask;
1766 }
1767 } else {
1768 for(item = cpu_log_items; item->mask != 0; item++) {
1769 if (cmp1(p, p1 - p, item->name))
1770 goto found;
1771 }
1772 return 0;
f193c797 1773 }
f193c797
FB
1774 found:
1775 mask |= item->mask;
1776 if (*p1 != ',')
1777 break;
1778 p = p1 + 1;
1779 }
1780 return mask;
1781}
ea041c0e 1782
7501267e
FB
1783void cpu_abort(CPUState *env, const char *fmt, ...)
1784{
1785 va_list ap;
493ae1f0 1786 va_list ap2;
7501267e
FB
1787
1788 va_start(ap, fmt);
493ae1f0 1789 va_copy(ap2, ap);
7501267e
FB
1790 fprintf(stderr, "qemu: fatal: ");
1791 vfprintf(stderr, fmt, ap);
1792 fprintf(stderr, "\n");
1793#ifdef TARGET_I386
7fe48483
FB
1794 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1795#else
1796 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1797#endif
93fcfe39
AL
1798 if (qemu_log_enabled()) {
1799 qemu_log("qemu: fatal: ");
1800 qemu_log_vprintf(fmt, ap2);
1801 qemu_log("\n");
f9373291 1802#ifdef TARGET_I386
93fcfe39 1803 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1804#else
93fcfe39 1805 log_cpu_state(env, 0);
f9373291 1806#endif
31b1a7b4 1807 qemu_log_flush();
93fcfe39 1808 qemu_log_close();
924edcae 1809 }
493ae1f0 1810 va_end(ap2);
f9373291 1811 va_end(ap);
fd052bf6
RV
1812#if defined(CONFIG_USER_ONLY)
1813 {
1814 struct sigaction act;
1815 sigfillset(&act.sa_mask);
1816 act.sa_handler = SIG_DFL;
1817 sigaction(SIGABRT, &act, NULL);
1818 }
1819#endif
7501267e
FB
1820 abort();
1821}
1822
c5be9f08
TS
1823CPUState *cpu_copy(CPUState *env)
1824{
01ba9816 1825 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1826 CPUState *next_cpu = new_env->next_cpu;
1827 int cpu_index = new_env->cpu_index;
5a38f081
AL
1828#if defined(TARGET_HAS_ICE)
1829 CPUBreakpoint *bp;
1830 CPUWatchpoint *wp;
1831#endif
1832
c5be9f08 1833 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1834
1835 /* Preserve chaining and index. */
c5be9f08
TS
1836 new_env->next_cpu = next_cpu;
1837 new_env->cpu_index = cpu_index;
5a38f081
AL
1838
1839 /* Clone all break/watchpoints.
1840 Note: Once we support ptrace with hw-debug register access, make sure
1841 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1842 QTAILQ_INIT(&env->breakpoints);
1843 QTAILQ_INIT(&env->watchpoints);
5a38f081 1844#if defined(TARGET_HAS_ICE)
72cf2d4f 1845 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1846 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1847 }
72cf2d4f 1848 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1849 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1850 wp->flags, NULL);
1851 }
1852#endif
1853
c5be9f08
TS
1854 return new_env;
1855}
1856
0124311e
FB
1857#if !defined(CONFIG_USER_ONLY)
1858
5c751e99
EI
1859static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1860{
1861 unsigned int i;
1862
1863 /* Discard jump cache entries for any tb which might potentially
1864 overlap the flushed page. */
1865 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1866 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1867 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1868
1869 i = tb_jmp_cache_hash_page(addr);
1870 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1871 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1872}
1873
08738984
IK
1874static CPUTLBEntry s_cputlb_empty_entry = {
1875 .addr_read = -1,
1876 .addr_write = -1,
1877 .addr_code = -1,
1878 .addend = -1,
1879};
1880
ee8b7021
FB
1881/* NOTE: if flush_global is true, also flush global entries (not
1882 implemented yet) */
1883void tlb_flush(CPUState *env, int flush_global)
33417e70 1884{
33417e70 1885 int i;
0124311e 1886
9fa3e853
FB
1887#if defined(DEBUG_TLB)
1888 printf("tlb_flush:\n");
1889#endif
0124311e
FB
1890 /* must reset current TB so that interrupts cannot modify the
1891 links while we are modifying them */
1892 env->current_tb = NULL;
1893
33417e70 1894 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1895 int mmu_idx;
1896 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1897 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1898 }
33417e70 1899 }
9fa3e853 1900
8a40a180 1901 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1902
d4c430a8
PB
1903 env->tlb_flush_addr = -1;
1904 env->tlb_flush_mask = 0;
e3db7226 1905 tlb_flush_count++;
33417e70
FB
1906}
1907
274da6b2 1908static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1909{
5fafdf24 1910 if (addr == (tlb_entry->addr_read &
84b7b8e7 1911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1912 addr == (tlb_entry->addr_write &
84b7b8e7 1913 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1914 addr == (tlb_entry->addr_code &
84b7b8e7 1915 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1916 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1917 }
61382a50
FB
1918}
1919
2e12669a 1920void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1921{
8a40a180 1922 int i;
cfde4bd9 1923 int mmu_idx;
0124311e 1924
9fa3e853 1925#if defined(DEBUG_TLB)
108c49b8 1926 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1927#endif
d4c430a8
PB
1928 /* Check if we need to flush due to large pages. */
1929 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1930#if defined(DEBUG_TLB)
1931 printf("tlb_flush_page: forced full flush ("
1932 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1933 env->tlb_flush_addr, env->tlb_flush_mask);
1934#endif
1935 tlb_flush(env, 1);
1936 return;
1937 }
0124311e
FB
1938 /* must reset current TB so that interrupts cannot modify the
1939 links while we are modifying them */
1940 env->current_tb = NULL;
61382a50
FB
1941
1942 addr &= TARGET_PAGE_MASK;
1943 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1944 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1945 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1946
5c751e99 1947 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1948}
1949
9fa3e853
FB
1950/* update the TLBs so that writes to code in the virtual page 'addr'
1951 can be detected */
c227f099 1952static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1953{
5fafdf24 1954 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1955 ram_addr + TARGET_PAGE_SIZE,
1956 CODE_DIRTY_FLAG);
9fa3e853
FB
1957}
1958
9fa3e853 1959/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1960 tested for self modifying code */
c227f099 1961static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1962 target_ulong vaddr)
9fa3e853 1963{
f7c11b53 1964 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1965}
1966
5fafdf24 1967static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1968 unsigned long start, unsigned long length)
1969{
1970 unsigned long addr;
0e0df1e2 1971 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
84b7b8e7 1972 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1973 if ((addr - start) < length) {
0f459d16 1974 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1975 }
1976 }
1977}
1978
5579c7f3 1979/* Note: start and end must be within the same ram block. */
c227f099 1980void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1981 int dirty_flags)
1ccde1cb
FB
1982{
1983 CPUState *env;
4f2ac237 1984 unsigned long length, start1;
f7c11b53 1985 int i;
1ccde1cb
FB
1986
1987 start &= TARGET_PAGE_MASK;
1988 end = TARGET_PAGE_ALIGN(end);
1989
1990 length = end - start;
1991 if (length == 0)
1992 return;
f7c11b53 1993 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1994
1ccde1cb
FB
1995 /* we modify the TLB cache so that the dirty bit will be set again
1996 when accessing the range */
b2e0a138 1997 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1998 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1999 address comparisons below. */
b2e0a138 2000 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2001 != (end - 1) - start) {
2002 abort();
2003 }
2004
6a00d601 2005 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2006 int mmu_idx;
2007 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2008 for(i = 0; i < CPU_TLB_SIZE; i++)
2009 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2010 start1, length);
2011 }
6a00d601 2012 }
1ccde1cb
FB
2013}
2014
74576198
AL
2015int cpu_physical_memory_set_dirty_tracking(int enable)
2016{
f6f3fbca 2017 int ret = 0;
74576198 2018 in_migration = enable;
f6f3fbca 2019 return ret;
74576198
AL
2020}
2021
3a7d929e
FB
2022static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2023{
c227f099 2024 ram_addr_t ram_addr;
5579c7f3 2025 void *p;
3a7d929e 2026
0e0df1e2 2027 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
5579c7f3
PB
2028 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2029 + tlb_entry->addend);
e890261f 2030 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2031 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2032 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2033 }
2034 }
2035}
2036
2037/* update the TLB according to the current state of the dirty bits */
2038void cpu_tlb_update_dirty(CPUState *env)
2039{
2040 int i;
cfde4bd9
IY
2041 int mmu_idx;
2042 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2043 for(i = 0; i < CPU_TLB_SIZE; i++)
2044 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2045 }
3a7d929e
FB
2046}
2047
0f459d16 2048static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2049{
0f459d16
PB
2050 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2051 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2052}
2053
0f459d16
PB
2054/* update the TLB corresponding to virtual page vaddr
2055 so that it is no longer dirty */
2056static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2057{
1ccde1cb 2058 int i;
cfde4bd9 2059 int mmu_idx;
1ccde1cb 2060
0f459d16 2061 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2062 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2063 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2064 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2065}
2066
d4c430a8
PB
2067/* Our TLB does not support large pages, so remember the area covered by
2068 large pages and trigger a full TLB flush if these are invalidated. */
2069static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2070 target_ulong size)
2071{
2072 target_ulong mask = ~(size - 1);
2073
2074 if (env->tlb_flush_addr == (target_ulong)-1) {
2075 env->tlb_flush_addr = vaddr & mask;
2076 env->tlb_flush_mask = mask;
2077 return;
2078 }
2079 /* Extend the existing region to include the new page.
2080 This is a compromise between unnecessary flushes and the cost
2081 of maintaining a full variable size TLB. */
2082 mask &= env->tlb_flush_mask;
2083 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2084 mask <<= 1;
2085 }
2086 env->tlb_flush_addr &= mask;
2087 env->tlb_flush_mask = mask;
2088}
2089
1d393fa2
AK
2090static bool is_ram_rom(ram_addr_t pd)
2091{
2092 pd &= ~TARGET_PAGE_MASK;
0e0df1e2 2093 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
1d393fa2
AK
2094}
2095
2096static bool is_ram_rom_romd(ram_addr_t pd)
2097{
2098 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2099}
2100
d4c430a8
PB
2101/* Add a new TLB entry. At most one entry for a given virtual address
2102 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2103 supplied size is only used by tlb_flush_page. */
2104void tlb_set_page(CPUState *env, target_ulong vaddr,
2105 target_phys_addr_t paddr, int prot,
2106 int mmu_idx, target_ulong size)
9fa3e853 2107{
f1f6e3b8 2108 PhysPageDesc p;
4f2ac237 2109 unsigned long pd;
9fa3e853 2110 unsigned int index;
4f2ac237 2111 target_ulong address;
0f459d16 2112 target_ulong code_address;
355b1943 2113 unsigned long addend;
84b7b8e7 2114 CPUTLBEntry *te;
a1d1bb31 2115 CPUWatchpoint *wp;
c227f099 2116 target_phys_addr_t iotlb;
9fa3e853 2117
d4c430a8
PB
2118 assert(size >= TARGET_PAGE_SIZE);
2119 if (size != TARGET_PAGE_SIZE) {
2120 tlb_add_large_page(env, vaddr, size);
2121 }
92e873b9 2122 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2123 pd = p.phys_offset;
9fa3e853 2124#if defined(DEBUG_TLB)
7fd3f494
SW
2125 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2126 " prot=%x idx=%d pd=0x%08lx\n",
2127 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2128#endif
2129
0f459d16 2130 address = vaddr;
1d393fa2 2131 if (!is_ram_rom_romd(pd)) {
0f459d16
PB
2132 /* IO memory case (romd handled later) */
2133 address |= TLB_MMIO;
2134 }
5579c7f3 2135 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1d393fa2 2136 if (is_ram_rom(pd)) {
0f459d16
PB
2137 /* Normal RAM. */
2138 iotlb = pd & TARGET_PAGE_MASK;
0e0df1e2
AK
2139 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2140 iotlb |= io_mem_notdirty.ram_addr;
0f459d16 2141 else
0e0df1e2 2142 iotlb |= io_mem_rom.ram_addr;
0f459d16 2143 } else {
ccbb4d44 2144 /* IO handlers are currently passed a physical address.
0f459d16
PB
2145 It would be nice to pass an offset from the base address
2146 of that region. This would avoid having to special case RAM,
2147 and avoid full address decoding in every device.
2148 We can't use the high bits of pd for this because
2149 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2150 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2151 iotlb += p.region_offset;
0f459d16
PB
2152 }
2153
2154 code_address = address;
2155 /* Make accesses to pages with watchpoints go via the
2156 watchpoint trap routines. */
72cf2d4f 2157 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2158 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2159 /* Avoid trapping reads of pages with a write breakpoint. */
2160 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1ec9b909 2161 iotlb = io_mem_watch.ram_addr + paddr;
bf298f83
JK
2162 address |= TLB_MMIO;
2163 break;
2164 }
6658ffb8 2165 }
0f459d16 2166 }
d79acba4 2167
0f459d16
PB
2168 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2169 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2170 te = &env->tlb_table[mmu_idx][index];
2171 te->addend = addend - vaddr;
2172 if (prot & PAGE_READ) {
2173 te->addr_read = address;
2174 } else {
2175 te->addr_read = -1;
2176 }
5c751e99 2177
0f459d16
PB
2178 if (prot & PAGE_EXEC) {
2179 te->addr_code = code_address;
2180 } else {
2181 te->addr_code = -1;
2182 }
2183 if (prot & PAGE_WRITE) {
0e0df1e2 2184 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr ||
0f459d16
PB
2185 (pd & IO_MEM_ROMD)) {
2186 /* Write access calls the I/O callback. */
2187 te->addr_write = address | TLB_MMIO;
0e0df1e2 2188 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
0f459d16
PB
2189 !cpu_physical_memory_is_dirty(pd)) {
2190 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2191 } else {
0f459d16 2192 te->addr_write = address;
9fa3e853 2193 }
0f459d16
PB
2194 } else {
2195 te->addr_write = -1;
9fa3e853 2196 }
9fa3e853
FB
2197}
2198
0124311e
FB
2199#else
2200
ee8b7021 2201void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2202{
2203}
2204
2e12669a 2205void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2206{
2207}
2208
edf8e2af
MW
2209/*
2210 * Walks guest process memory "regions" one by one
2211 * and calls callback function 'fn' for each region.
2212 */
5cd2c5b6
RH
2213
2214struct walk_memory_regions_data
2215{
2216 walk_memory_regions_fn fn;
2217 void *priv;
2218 unsigned long start;
2219 int prot;
2220};
2221
2222static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2223 abi_ulong end, int new_prot)
5cd2c5b6
RH
2224{
2225 if (data->start != -1ul) {
2226 int rc = data->fn(data->priv, data->start, end, data->prot);
2227 if (rc != 0) {
2228 return rc;
2229 }
2230 }
2231
2232 data->start = (new_prot ? end : -1ul);
2233 data->prot = new_prot;
2234
2235 return 0;
2236}
2237
2238static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2239 abi_ulong base, int level, void **lp)
5cd2c5b6 2240{
b480d9b7 2241 abi_ulong pa;
5cd2c5b6
RH
2242 int i, rc;
2243
2244 if (*lp == NULL) {
2245 return walk_memory_regions_end(data, base, 0);
2246 }
2247
2248 if (level == 0) {
2249 PageDesc *pd = *lp;
7296abac 2250 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2251 int prot = pd[i].flags;
2252
2253 pa = base | (i << TARGET_PAGE_BITS);
2254 if (prot != data->prot) {
2255 rc = walk_memory_regions_end(data, pa, prot);
2256 if (rc != 0) {
2257 return rc;
9fa3e853 2258 }
9fa3e853 2259 }
5cd2c5b6
RH
2260 }
2261 } else {
2262 void **pp = *lp;
7296abac 2263 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2264 pa = base | ((abi_ulong)i <<
2265 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2266 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2267 if (rc != 0) {
2268 return rc;
2269 }
2270 }
2271 }
2272
2273 return 0;
2274}
2275
2276int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2277{
2278 struct walk_memory_regions_data data;
2279 unsigned long i;
2280
2281 data.fn = fn;
2282 data.priv = priv;
2283 data.start = -1ul;
2284 data.prot = 0;
2285
2286 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2287 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2288 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2289 if (rc != 0) {
2290 return rc;
9fa3e853 2291 }
33417e70 2292 }
5cd2c5b6
RH
2293
2294 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2295}
2296
b480d9b7
PB
2297static int dump_region(void *priv, abi_ulong start,
2298 abi_ulong end, unsigned long prot)
edf8e2af
MW
2299{
2300 FILE *f = (FILE *)priv;
2301
b480d9b7
PB
2302 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2303 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2304 start, end, end - start,
2305 ((prot & PAGE_READ) ? 'r' : '-'),
2306 ((prot & PAGE_WRITE) ? 'w' : '-'),
2307 ((prot & PAGE_EXEC) ? 'x' : '-'));
2308
2309 return (0);
2310}
2311
2312/* dump memory mappings */
2313void page_dump(FILE *f)
2314{
2315 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2316 "start", "end", "size", "prot");
2317 walk_memory_regions(f, dump_region);
33417e70
FB
2318}
2319
53a5960a 2320int page_get_flags(target_ulong address)
33417e70 2321{
9fa3e853
FB
2322 PageDesc *p;
2323
2324 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2325 if (!p)
9fa3e853
FB
2326 return 0;
2327 return p->flags;
2328}
2329
376a7909
RH
2330/* Modify the flags of a page and invalidate the code if necessary.
2331 The flag PAGE_WRITE_ORG is positioned automatically depending
2332 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2333void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2334{
376a7909
RH
2335 target_ulong addr, len;
2336
2337 /* This function should never be called with addresses outside the
2338 guest address space. If this assert fires, it probably indicates
2339 a missing call to h2g_valid. */
b480d9b7
PB
2340#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2341 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2342#endif
2343 assert(start < end);
9fa3e853
FB
2344
2345 start = start & TARGET_PAGE_MASK;
2346 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2347
2348 if (flags & PAGE_WRITE) {
9fa3e853 2349 flags |= PAGE_WRITE_ORG;
376a7909
RH
2350 }
2351
2352 for (addr = start, len = end - start;
2353 len != 0;
2354 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2355 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2356
2357 /* If the write protection bit is set, then we invalidate
2358 the code inside. */
5fafdf24 2359 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2360 (flags & PAGE_WRITE) &&
2361 p->first_tb) {
d720b93d 2362 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2363 }
2364 p->flags = flags;
2365 }
33417e70
FB
2366}
2367
3d97b40b
TS
2368int page_check_range(target_ulong start, target_ulong len, int flags)
2369{
2370 PageDesc *p;
2371 target_ulong end;
2372 target_ulong addr;
2373
376a7909
RH
2374 /* This function should never be called with addresses outside the
2375 guest address space. If this assert fires, it probably indicates
2376 a missing call to h2g_valid. */
338e9e6c
BS
2377#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2378 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2379#endif
2380
3e0650a9
RH
2381 if (len == 0) {
2382 return 0;
2383 }
376a7909
RH
2384 if (start + len - 1 < start) {
2385 /* We've wrapped around. */
55f280c9 2386 return -1;
376a7909 2387 }
55f280c9 2388
3d97b40b
TS
2389 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2390 start = start & TARGET_PAGE_MASK;
2391
376a7909
RH
2392 for (addr = start, len = end - start;
2393 len != 0;
2394 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2395 p = page_find(addr >> TARGET_PAGE_BITS);
2396 if( !p )
2397 return -1;
2398 if( !(p->flags & PAGE_VALID) )
2399 return -1;
2400
dae3270c 2401 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2402 return -1;
dae3270c
FB
2403 if (flags & PAGE_WRITE) {
2404 if (!(p->flags & PAGE_WRITE_ORG))
2405 return -1;
2406 /* unprotect the page if it was put read-only because it
2407 contains translated code */
2408 if (!(p->flags & PAGE_WRITE)) {
2409 if (!page_unprotect(addr, 0, NULL))
2410 return -1;
2411 }
2412 return 0;
2413 }
3d97b40b
TS
2414 }
2415 return 0;
2416}
2417
9fa3e853 2418/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2419 page. Return TRUE if the fault was successfully handled. */
53a5960a 2420int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2421{
45d679d6
AJ
2422 unsigned int prot;
2423 PageDesc *p;
53a5960a 2424 target_ulong host_start, host_end, addr;
9fa3e853 2425
c8a706fe
PB
2426 /* Technically this isn't safe inside a signal handler. However we
2427 know this only ever happens in a synchronous SEGV handler, so in
2428 practice it seems to be ok. */
2429 mmap_lock();
2430
45d679d6
AJ
2431 p = page_find(address >> TARGET_PAGE_BITS);
2432 if (!p) {
c8a706fe 2433 mmap_unlock();
9fa3e853 2434 return 0;
c8a706fe 2435 }
45d679d6 2436
9fa3e853
FB
2437 /* if the page was really writable, then we change its
2438 protection back to writable */
45d679d6
AJ
2439 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2440 host_start = address & qemu_host_page_mask;
2441 host_end = host_start + qemu_host_page_size;
2442
2443 prot = 0;
2444 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2445 p = page_find(addr >> TARGET_PAGE_BITS);
2446 p->flags |= PAGE_WRITE;
2447 prot |= p->flags;
2448
9fa3e853
FB
2449 /* and since the content will be modified, we must invalidate
2450 the corresponding translated code. */
45d679d6 2451 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2452#ifdef DEBUG_TB_CHECK
45d679d6 2453 tb_invalidate_check(addr);
9fa3e853 2454#endif
9fa3e853 2455 }
45d679d6
AJ
2456 mprotect((void *)g2h(host_start), qemu_host_page_size,
2457 prot & PAGE_BITS);
2458
2459 mmap_unlock();
2460 return 1;
9fa3e853 2461 }
c8a706fe 2462 mmap_unlock();
9fa3e853
FB
2463 return 0;
2464}
2465
6a00d601
FB
2466static inline void tlb_set_dirty(CPUState *env,
2467 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2468{
2469}
9fa3e853
FB
2470#endif /* defined(CONFIG_USER_ONLY) */
2471
e2eef170 2472#if !defined(CONFIG_USER_ONLY)
8da3ff18 2473
c04b2b78
PB
2474#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2475typedef struct subpage_t {
70c68e44 2476 MemoryRegion iomem;
c04b2b78 2477 target_phys_addr_t base;
f6405247
RH
2478 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2479 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2480} subpage_t;
2481
c227f099
AL
2482static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2483 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2484static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2485 ram_addr_t orig_memory,
2486 ram_addr_t region_offset);
db7b5426
BS
2487#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2488 need_subpage) \
2489 do { \
2490 if (addr > start_addr) \
2491 start_addr2 = 0; \
2492 else { \
2493 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2494 if (start_addr2 > 0) \
2495 need_subpage = 1; \
2496 } \
2497 \
49e9fba2 2498 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2499 end_addr2 = TARGET_PAGE_SIZE - 1; \
2500 else { \
2501 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2502 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2503 need_subpage = 1; \
2504 } \
2505 } while (0)
2506
8f2498f9
MT
2507/* register physical memory.
2508 For RAM, 'size' must be a multiple of the target page size.
2509 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2510 io memory page. The address used when calling the IO function is
2511 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2512 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2513 before calculating this offset. This should not be a problem unless
2514 the low bits of start_addr and region_offset differ. */
dd81124b
AK
2515void cpu_register_physical_memory_log(MemoryRegionSection *section,
2516 bool readable, bool readonly)
33417e70 2517{
dd81124b
AK
2518 target_phys_addr_t start_addr = section->offset_within_address_space;
2519 ram_addr_t size = section->size;
2520 ram_addr_t phys_offset = section->mr->ram_addr;
2521 ram_addr_t region_offset = section->offset_within_region;
c227f099 2522 target_phys_addr_t addr, end_addr;
92e873b9 2523 PhysPageDesc *p;
9d42037b 2524 CPUState *env;
c227f099 2525 ram_addr_t orig_size = size;
f6405247 2526 subpage_t *subpage;
33417e70 2527
dd81124b
AK
2528 if (memory_region_is_ram(section->mr)) {
2529 phys_offset += region_offset;
2530 region_offset = 0;
2531 }
2532
2533 if (!readable) {
2534 phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
2535 }
2536
2537 if (readonly) {
2538 phys_offset |= io_mem_rom.ram_addr;
2539 }
2540
3b8e6a2d 2541 assert(size);
f6f3fbca 2542
0e0df1e2 2543 if (phys_offset == io_mem_unassigned.ram_addr) {
67c4d23c
PB
2544 region_offset = start_addr;
2545 }
8da3ff18 2546 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2547 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2548 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2549
2550 addr = start_addr;
2551 do {
f1f6e3b8 2552 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
0e0df1e2 2553 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
c227f099
AL
2554 ram_addr_t orig_memory = p->phys_offset;
2555 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2556 int need_subpage = 0;
2557
2558 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2559 need_subpage);
f6405247 2560 if (need_subpage) {
db7b5426
BS
2561 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2562 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2563 &p->phys_offset, orig_memory,
2564 p->region_offset);
db7b5426
BS
2565 } else {
2566 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2567 >> IO_MEM_SHIFT];
2568 }
8da3ff18
PB
2569 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2570 region_offset);
2571 p->region_offset = 0;
db7b5426
BS
2572 } else {
2573 p->phys_offset = phys_offset;
2774c6d0 2574 p->region_offset = region_offset;
1d393fa2 2575 if (is_ram_rom_romd(phys_offset))
db7b5426
BS
2576 phys_offset += TARGET_PAGE_SIZE;
2577 }
2578 } else {
2579 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2580 p->phys_offset = phys_offset;
8da3ff18 2581 p->region_offset = region_offset;
1d393fa2 2582 if (is_ram_rom_romd(phys_offset)) {
db7b5426 2583 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2584 } else {
c227f099 2585 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2586 int need_subpage = 0;
2587
2588 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2589 end_addr2, need_subpage);
2590
f6405247 2591 if (need_subpage) {
db7b5426 2592 subpage = subpage_init((addr & TARGET_PAGE_MASK),
0e0df1e2
AK
2593 &p->phys_offset,
2594 io_mem_unassigned.ram_addr,
67c4d23c 2595 addr & TARGET_PAGE_MASK);
db7b5426 2596 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2597 phys_offset, region_offset);
2598 p->region_offset = 0;
db7b5426
BS
2599 }
2600 }
2601 }
8da3ff18 2602 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2603 addr += TARGET_PAGE_SIZE;
2604 } while (addr != end_addr);
3b46e624 2605
9d42037b
FB
2606 /* since each CPU stores ram addresses in its TLB cache, we must
2607 reset the modified entries */
2608 /* XXX: slow ! */
2609 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2610 tlb_flush(env, 1);
2611 }
33417e70
FB
2612}
2613
c227f099 2614void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2615{
2616 if (kvm_enabled())
2617 kvm_coalesce_mmio_region(addr, size);
2618}
2619
c227f099 2620void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2621{
2622 if (kvm_enabled())
2623 kvm_uncoalesce_mmio_region(addr, size);
2624}
2625
62a2744c
SY
2626void qemu_flush_coalesced_mmio_buffer(void)
2627{
2628 if (kvm_enabled())
2629 kvm_flush_coalesced_mmio_buffer();
2630}
2631
c902760f
MT
2632#if defined(__linux__) && !defined(TARGET_S390X)
2633
2634#include <sys/vfs.h>
2635
2636#define HUGETLBFS_MAGIC 0x958458f6
2637
2638static long gethugepagesize(const char *path)
2639{
2640 struct statfs fs;
2641 int ret;
2642
2643 do {
9742bf26 2644 ret = statfs(path, &fs);
c902760f
MT
2645 } while (ret != 0 && errno == EINTR);
2646
2647 if (ret != 0) {
9742bf26
YT
2648 perror(path);
2649 return 0;
c902760f
MT
2650 }
2651
2652 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2653 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2654
2655 return fs.f_bsize;
2656}
2657
04b16653
AW
2658static void *file_ram_alloc(RAMBlock *block,
2659 ram_addr_t memory,
2660 const char *path)
c902760f
MT
2661{
2662 char *filename;
2663 void *area;
2664 int fd;
2665#ifdef MAP_POPULATE
2666 int flags;
2667#endif
2668 unsigned long hpagesize;
2669
2670 hpagesize = gethugepagesize(path);
2671 if (!hpagesize) {
9742bf26 2672 return NULL;
c902760f
MT
2673 }
2674
2675 if (memory < hpagesize) {
2676 return NULL;
2677 }
2678
2679 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2680 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2681 return NULL;
2682 }
2683
2684 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2685 return NULL;
c902760f
MT
2686 }
2687
2688 fd = mkstemp(filename);
2689 if (fd < 0) {
9742bf26
YT
2690 perror("unable to create backing store for hugepages");
2691 free(filename);
2692 return NULL;
c902760f
MT
2693 }
2694 unlink(filename);
2695 free(filename);
2696
2697 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2698
2699 /*
2700 * ftruncate is not supported by hugetlbfs in older
2701 * hosts, so don't bother bailing out on errors.
2702 * If anything goes wrong with it under other filesystems,
2703 * mmap will fail.
2704 */
2705 if (ftruncate(fd, memory))
9742bf26 2706 perror("ftruncate");
c902760f
MT
2707
2708#ifdef MAP_POPULATE
2709 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2710 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2711 * to sidestep this quirk.
2712 */
2713 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2714 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2715#else
2716 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2717#endif
2718 if (area == MAP_FAILED) {
9742bf26
YT
2719 perror("file_ram_alloc: can't mmap RAM pages");
2720 close(fd);
2721 return (NULL);
c902760f 2722 }
04b16653 2723 block->fd = fd;
c902760f
MT
2724 return area;
2725}
2726#endif
2727
d17b5288 2728static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2729{
2730 RAMBlock *block, *next_block;
3e837b2c 2731 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2732
2733 if (QLIST_EMPTY(&ram_list.blocks))
2734 return 0;
2735
2736 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2737 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2738
2739 end = block->offset + block->length;
2740
2741 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2742 if (next_block->offset >= end) {
2743 next = MIN(next, next_block->offset);
2744 }
2745 }
2746 if (next - end >= size && next - end < mingap) {
3e837b2c 2747 offset = end;
04b16653
AW
2748 mingap = next - end;
2749 }
2750 }
3e837b2c
AW
2751
2752 if (offset == RAM_ADDR_MAX) {
2753 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2754 (uint64_t)size);
2755 abort();
2756 }
2757
04b16653
AW
2758 return offset;
2759}
2760
2761static ram_addr_t last_ram_offset(void)
d17b5288
AW
2762{
2763 RAMBlock *block;
2764 ram_addr_t last = 0;
2765
2766 QLIST_FOREACH(block, &ram_list.blocks, next)
2767 last = MAX(last, block->offset + block->length);
2768
2769 return last;
2770}
2771
c5705a77 2772void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2773{
2774 RAMBlock *new_block, *block;
2775
c5705a77
AK
2776 new_block = NULL;
2777 QLIST_FOREACH(block, &ram_list.blocks, next) {
2778 if (block->offset == addr) {
2779 new_block = block;
2780 break;
2781 }
2782 }
2783 assert(new_block);
2784 assert(!new_block->idstr[0]);
84b89d78
CM
2785
2786 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2787 char *id = dev->parent_bus->info->get_dev_path(dev);
2788 if (id) {
2789 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2790 g_free(id);
84b89d78
CM
2791 }
2792 }
2793 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2794
2795 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2796 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2797 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2798 new_block->idstr);
2799 abort();
2800 }
2801 }
c5705a77
AK
2802}
2803
2804ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2805 MemoryRegion *mr)
2806{
2807 RAMBlock *new_block;
2808
2809 size = TARGET_PAGE_ALIGN(size);
2810 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2811
7c637366 2812 new_block->mr = mr;
432d268c 2813 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2814 if (host) {
2815 new_block->host = host;
cd19cfa2 2816 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2817 } else {
2818 if (mem_path) {
c902760f 2819#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2820 new_block->host = file_ram_alloc(new_block, size, mem_path);
2821 if (!new_block->host) {
2822 new_block->host = qemu_vmalloc(size);
e78815a5 2823 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2824 }
c902760f 2825#else
6977dfe6
YT
2826 fprintf(stderr, "-mem-path option unsupported\n");
2827 exit(1);
c902760f 2828#endif
6977dfe6 2829 } else {
6b02494d 2830#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2831 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2832 an system defined value, which is at least 256GB. Larger systems
2833 have larger values. We put the guest between the end of data
2834 segment (system break) and this value. We use 32GB as a base to
2835 have enough room for the system break to grow. */
2836 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2837 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2838 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2839 if (new_block->host == MAP_FAILED) {
2840 fprintf(stderr, "Allocating RAM failed\n");
2841 abort();
2842 }
6b02494d 2843#else
868bb33f 2844 if (xen_enabled()) {
fce537d4 2845 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2846 } else {
2847 new_block->host = qemu_vmalloc(size);
2848 }
6b02494d 2849#endif
e78815a5 2850 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2851 }
c902760f 2852 }
94a6b54f
PB
2853 new_block->length = size;
2854
f471a17e 2855 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2856
7267c094 2857 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2858 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2859 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2860 0xff, size >> TARGET_PAGE_BITS);
2861
6f0437e8
JK
2862 if (kvm_enabled())
2863 kvm_setup_guest_memory(new_block->host, size);
2864
94a6b54f
PB
2865 return new_block->offset;
2866}
e9a1ab19 2867
c5705a77 2868ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2869{
c5705a77 2870 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2871}
2872
1f2e98b6
AW
2873void qemu_ram_free_from_ptr(ram_addr_t addr)
2874{
2875 RAMBlock *block;
2876
2877 QLIST_FOREACH(block, &ram_list.blocks, next) {
2878 if (addr == block->offset) {
2879 QLIST_REMOVE(block, next);
7267c094 2880 g_free(block);
1f2e98b6
AW
2881 return;
2882 }
2883 }
2884}
2885
c227f099 2886void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2887{
04b16653
AW
2888 RAMBlock *block;
2889
2890 QLIST_FOREACH(block, &ram_list.blocks, next) {
2891 if (addr == block->offset) {
2892 QLIST_REMOVE(block, next);
cd19cfa2
HY
2893 if (block->flags & RAM_PREALLOC_MASK) {
2894 ;
2895 } else if (mem_path) {
04b16653
AW
2896#if defined (__linux__) && !defined(TARGET_S390X)
2897 if (block->fd) {
2898 munmap(block->host, block->length);
2899 close(block->fd);
2900 } else {
2901 qemu_vfree(block->host);
2902 }
fd28aa13
JK
2903#else
2904 abort();
04b16653
AW
2905#endif
2906 } else {
2907#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2908 munmap(block->host, block->length);
2909#else
868bb33f 2910 if (xen_enabled()) {
e41d7c69 2911 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2912 } else {
2913 qemu_vfree(block->host);
2914 }
04b16653
AW
2915#endif
2916 }
7267c094 2917 g_free(block);
04b16653
AW
2918 return;
2919 }
2920 }
2921
e9a1ab19
FB
2922}
2923
cd19cfa2
HY
2924#ifndef _WIN32
2925void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2926{
2927 RAMBlock *block;
2928 ram_addr_t offset;
2929 int flags;
2930 void *area, *vaddr;
2931
2932 QLIST_FOREACH(block, &ram_list.blocks, next) {
2933 offset = addr - block->offset;
2934 if (offset < block->length) {
2935 vaddr = block->host + offset;
2936 if (block->flags & RAM_PREALLOC_MASK) {
2937 ;
2938 } else {
2939 flags = MAP_FIXED;
2940 munmap(vaddr, length);
2941 if (mem_path) {
2942#if defined(__linux__) && !defined(TARGET_S390X)
2943 if (block->fd) {
2944#ifdef MAP_POPULATE
2945 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2946 MAP_PRIVATE;
2947#else
2948 flags |= MAP_PRIVATE;
2949#endif
2950 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2951 flags, block->fd, offset);
2952 } else {
2953 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2954 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2955 flags, -1, 0);
2956 }
fd28aa13
JK
2957#else
2958 abort();
cd19cfa2
HY
2959#endif
2960 } else {
2961#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2962 flags |= MAP_SHARED | MAP_ANONYMOUS;
2963 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2964 flags, -1, 0);
2965#else
2966 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2967 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2968 flags, -1, 0);
2969#endif
2970 }
2971 if (area != vaddr) {
f15fbc4b
AP
2972 fprintf(stderr, "Could not remap addr: "
2973 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2974 length, addr);
2975 exit(1);
2976 }
2977 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2978 }
2979 return;
2980 }
2981 }
2982}
2983#endif /* !_WIN32 */
2984
dc828ca1 2985/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2986 With the exception of the softmmu code in this file, this should
2987 only be used for local memory (e.g. video ram) that the device owns,
2988 and knows it isn't going to access beyond the end of the block.
2989
2990 It should not be used for general purpose DMA.
2991 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2992 */
c227f099 2993void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2994{
94a6b54f
PB
2995 RAMBlock *block;
2996
f471a17e
AW
2997 QLIST_FOREACH(block, &ram_list.blocks, next) {
2998 if (addr - block->offset < block->length) {
7d82af38
VP
2999 /* Move this entry to to start of the list. */
3000 if (block != QLIST_FIRST(&ram_list.blocks)) {
3001 QLIST_REMOVE(block, next);
3002 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3003 }
868bb33f 3004 if (xen_enabled()) {
432d268c
JN
3005 /* We need to check if the requested address is in the RAM
3006 * because we don't want to map the entire memory in QEMU.
712c2b41 3007 * In that case just map until the end of the page.
432d268c
JN
3008 */
3009 if (block->offset == 0) {
e41d7c69 3010 return xen_map_cache(addr, 0, 0);
432d268c 3011 } else if (block->host == NULL) {
e41d7c69
JK
3012 block->host =
3013 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3014 }
3015 }
f471a17e
AW
3016 return block->host + (addr - block->offset);
3017 }
94a6b54f 3018 }
f471a17e
AW
3019
3020 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3021 abort();
3022
3023 return NULL;
dc828ca1
PB
3024}
3025
b2e0a138
MT
3026/* Return a host pointer to ram allocated with qemu_ram_alloc.
3027 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3028 */
3029void *qemu_safe_ram_ptr(ram_addr_t addr)
3030{
3031 RAMBlock *block;
3032
3033 QLIST_FOREACH(block, &ram_list.blocks, next) {
3034 if (addr - block->offset < block->length) {
868bb33f 3035 if (xen_enabled()) {
432d268c
JN
3036 /* We need to check if the requested address is in the RAM
3037 * because we don't want to map the entire memory in QEMU.
712c2b41 3038 * In that case just map until the end of the page.
432d268c
JN
3039 */
3040 if (block->offset == 0) {
e41d7c69 3041 return xen_map_cache(addr, 0, 0);
432d268c 3042 } else if (block->host == NULL) {
e41d7c69
JK
3043 block->host =
3044 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3045 }
3046 }
b2e0a138
MT
3047 return block->host + (addr - block->offset);
3048 }
3049 }
3050
3051 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3052 abort();
3053
3054 return NULL;
3055}
3056
38bee5dc
SS
3057/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3058 * but takes a size argument */
8ab934f9 3059void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3060{
8ab934f9
SS
3061 if (*size == 0) {
3062 return NULL;
3063 }
868bb33f 3064 if (xen_enabled()) {
e41d7c69 3065 return xen_map_cache(addr, *size, 1);
868bb33f 3066 } else {
38bee5dc
SS
3067 RAMBlock *block;
3068
3069 QLIST_FOREACH(block, &ram_list.blocks, next) {
3070 if (addr - block->offset < block->length) {
3071 if (addr - block->offset + *size > block->length)
3072 *size = block->length - addr + block->offset;
3073 return block->host + (addr - block->offset);
3074 }
3075 }
3076
3077 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3078 abort();
38bee5dc
SS
3079 }
3080}
3081
050a0ddf
AP
3082void qemu_put_ram_ptr(void *addr)
3083{
3084 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3085}
3086
e890261f 3087int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3088{
94a6b54f
PB
3089 RAMBlock *block;
3090 uint8_t *host = ptr;
3091
868bb33f 3092 if (xen_enabled()) {
e41d7c69 3093 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3094 return 0;
3095 }
3096
f471a17e 3097 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3098 /* This case append when the block is not mapped. */
3099 if (block->host == NULL) {
3100 continue;
3101 }
f471a17e 3102 if (host - block->host < block->length) {
e890261f
MT
3103 *ram_addr = block->offset + (host - block->host);
3104 return 0;
f471a17e 3105 }
94a6b54f 3106 }
432d268c 3107
e890261f
MT
3108 return -1;
3109}
f471a17e 3110
e890261f
MT
3111/* Some of the softmmu routines need to translate from a host pointer
3112 (typically a TLB entry) back to a ram offset. */
3113ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3114{
3115 ram_addr_t ram_addr;
f471a17e 3116
e890261f
MT
3117 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3118 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3119 abort();
3120 }
3121 return ram_addr;
5579c7f3
PB
3122}
3123
0e0df1e2
AK
3124static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3125 unsigned size)
e18231a3
BS
3126{
3127#ifdef DEBUG_UNASSIGNED
3128 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3129#endif
5b450407 3130#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3131 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3132#endif
3133 return 0;
3134}
3135
0e0df1e2
AK
3136static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3137 uint64_t val, unsigned size)
e18231a3
BS
3138{
3139#ifdef DEBUG_UNASSIGNED
0e0df1e2 3140 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3141#endif
5b450407 3142#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3143 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3144#endif
33417e70
FB
3145}
3146
0e0df1e2
AK
3147static const MemoryRegionOps unassigned_mem_ops = {
3148 .read = unassigned_mem_read,
3149 .write = unassigned_mem_write,
3150 .endianness = DEVICE_NATIVE_ENDIAN,
3151};
e18231a3 3152
0e0df1e2
AK
3153static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3154 unsigned size)
e18231a3 3155{
0e0df1e2 3156 abort();
e18231a3
BS
3157}
3158
0e0df1e2
AK
3159static void error_mem_write(void *opaque, target_phys_addr_t addr,
3160 uint64_t value, unsigned size)
e18231a3 3161{
0e0df1e2 3162 abort();
33417e70
FB
3163}
3164
0e0df1e2
AK
3165static const MemoryRegionOps error_mem_ops = {
3166 .read = error_mem_read,
3167 .write = error_mem_write,
3168 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3169};
3170
0e0df1e2
AK
3171static const MemoryRegionOps rom_mem_ops = {
3172 .read = error_mem_read,
3173 .write = unassigned_mem_write,
3174 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3175};
3176
0e0df1e2
AK
3177static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3178 uint64_t val, unsigned size)
9fa3e853 3179{
3a7d929e 3180 int dirty_flags;
f7c11b53 3181 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3182 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3183#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3184 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3185 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3186#endif
3a7d929e 3187 }
0e0df1e2
AK
3188 switch (size) {
3189 case 1:
3190 stb_p(qemu_get_ram_ptr(ram_addr), val);
3191 break;
3192 case 2:
3193 stw_p(qemu_get_ram_ptr(ram_addr), val);
3194 break;
3195 case 4:
3196 stl_p(qemu_get_ram_ptr(ram_addr), val);
3197 break;
3198 default:
3199 abort();
3a7d929e 3200 }
f23db169 3201 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3202 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3203 /* we remove the notdirty callback only if the code has been
3204 flushed */
3205 if (dirty_flags == 0xff)
2e70f6ef 3206 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3207}
3208
0e0df1e2
AK
3209static const MemoryRegionOps notdirty_mem_ops = {
3210 .read = error_mem_read,
3211 .write = notdirty_mem_write,
3212 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3213};
3214
0f459d16 3215/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3216static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3217{
3218 CPUState *env = cpu_single_env;
06d55cc1
AL
3219 target_ulong pc, cs_base;
3220 TranslationBlock *tb;
0f459d16 3221 target_ulong vaddr;
a1d1bb31 3222 CPUWatchpoint *wp;
06d55cc1 3223 int cpu_flags;
0f459d16 3224
06d55cc1
AL
3225 if (env->watchpoint_hit) {
3226 /* We re-entered the check after replacing the TB. Now raise
3227 * the debug interrupt so that is will trigger after the
3228 * current instruction. */
3229 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3230 return;
3231 }
2e70f6ef 3232 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3233 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3234 if ((vaddr == (wp->vaddr & len_mask) ||
3235 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3236 wp->flags |= BP_WATCHPOINT_HIT;
3237 if (!env->watchpoint_hit) {
3238 env->watchpoint_hit = wp;
3239 tb = tb_find_pc(env->mem_io_pc);
3240 if (!tb) {
3241 cpu_abort(env, "check_watchpoint: could not find TB for "
3242 "pc=%p", (void *)env->mem_io_pc);
3243 }
618ba8e6 3244 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3245 tb_phys_invalidate(tb, -1);
3246 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3247 env->exception_index = EXCP_DEBUG;
3248 } else {
3249 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3250 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3251 }
3252 cpu_resume_from_signal(env, NULL);
06d55cc1 3253 }
6e140f28
AL
3254 } else {
3255 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3256 }
3257 }
3258}
3259
6658ffb8
PB
3260/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3261 so these check for a hit then pass through to the normal out-of-line
3262 phys routines. */
1ec9b909
AK
3263static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3264 unsigned size)
6658ffb8 3265{
1ec9b909
AK
3266 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3267 switch (size) {
3268 case 1: return ldub_phys(addr);
3269 case 2: return lduw_phys(addr);
3270 case 4: return ldl_phys(addr);
3271 default: abort();
3272 }
6658ffb8
PB
3273}
3274
1ec9b909
AK
3275static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3276 uint64_t val, unsigned size)
6658ffb8 3277{
1ec9b909
AK
3278 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3279 switch (size) {
3280 case 1: stb_phys(addr, val);
3281 case 2: stw_phys(addr, val);
3282 case 4: stl_phys(addr, val);
3283 default: abort();
3284 }
6658ffb8
PB
3285}
3286
1ec9b909
AK
3287static const MemoryRegionOps watch_mem_ops = {
3288 .read = watch_mem_read,
3289 .write = watch_mem_write,
3290 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3291};
6658ffb8 3292
70c68e44
AK
3293static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3294 unsigned len)
db7b5426 3295{
70c68e44 3296 subpage_t *mmio = opaque;
f6405247 3297 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3298#if defined(DEBUG_SUBPAGE)
3299 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3300 mmio, len, addr, idx);
3301#endif
db7b5426 3302
f6405247
RH
3303 addr += mmio->region_offset[idx];
3304 idx = mmio->sub_io_index[idx];
70c68e44 3305 return io_mem_read(idx, addr, len);
db7b5426
BS
3306}
3307
70c68e44
AK
3308static void subpage_write(void *opaque, target_phys_addr_t addr,
3309 uint64_t value, unsigned len)
db7b5426 3310{
70c68e44 3311 subpage_t *mmio = opaque;
f6405247 3312 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3313#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3314 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3315 " idx %d value %"PRIx64"\n",
f6405247 3316 __func__, mmio, len, addr, idx, value);
db7b5426 3317#endif
f6405247
RH
3318
3319 addr += mmio->region_offset[idx];
3320 idx = mmio->sub_io_index[idx];
70c68e44 3321 io_mem_write(idx, addr, value, len);
db7b5426
BS
3322}
3323
70c68e44
AK
3324static const MemoryRegionOps subpage_ops = {
3325 .read = subpage_read,
3326 .write = subpage_write,
3327 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3328};
3329
de712f94
AK
3330static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3331 unsigned size)
56384e8b
AF
3332{
3333 ram_addr_t raddr = addr;
3334 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3335 switch (size) {
3336 case 1: return ldub_p(ptr);
3337 case 2: return lduw_p(ptr);
3338 case 4: return ldl_p(ptr);
3339 default: abort();
3340 }
56384e8b
AF
3341}
3342
de712f94
AK
3343static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3344 uint64_t value, unsigned size)
56384e8b
AF
3345{
3346 ram_addr_t raddr = addr;
3347 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3348 switch (size) {
3349 case 1: return stb_p(ptr, value);
3350 case 2: return stw_p(ptr, value);
3351 case 4: return stl_p(ptr, value);
3352 default: abort();
3353 }
56384e8b
AF
3354}
3355
de712f94
AK
3356static const MemoryRegionOps subpage_ram_ops = {
3357 .read = subpage_ram_read,
3358 .write = subpage_ram_write,
3359 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3360};
3361
c227f099
AL
3362static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3363 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3364{
3365 int idx, eidx;
3366
3367 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3368 return -1;
3369 idx = SUBPAGE_IDX(start);
3370 eidx = SUBPAGE_IDX(end);
3371#if defined(DEBUG_SUBPAGE)
0bf9e31a 3372 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3373 mmio, start, end, idx, eidx, memory);
3374#endif
0e0df1e2 3375 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
de712f94 3376 memory = io_mem_subpage_ram.ram_addr;
56384e8b 3377 }
f6405247 3378 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3379 for (; idx <= eidx; idx++) {
f6405247
RH
3380 mmio->sub_io_index[idx] = memory;
3381 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3382 }
3383
3384 return 0;
3385}
3386
f6405247
RH
3387static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3388 ram_addr_t orig_memory,
3389 ram_addr_t region_offset)
db7b5426 3390{
c227f099 3391 subpage_t *mmio;
db7b5426
BS
3392 int subpage_memory;
3393
7267c094 3394 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3395
3396 mmio->base = base;
70c68e44
AK
3397 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3398 "subpage", TARGET_PAGE_SIZE);
3399 subpage_memory = mmio->iomem.ram_addr;
db7b5426 3400#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3401 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3402 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3403#endif
1eec614b 3404 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3405 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3406
3407 return mmio;
3408}
3409
88715657
AL
3410static int get_free_io_mem_idx(void)
3411{
3412 int i;
3413
3414 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3415 if (!io_mem_used[i]) {
3416 io_mem_used[i] = 1;
3417 return i;
3418 }
c6703b47 3419 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3420 return -1;
3421}
3422
33417e70
FB
3423/* mem_read and mem_write are arrays of functions containing the
3424 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3425 2). Functions can be omitted with a NULL function pointer.
3ee89922 3426 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3427 modified. If it is zero, a new io zone is allocated. The return
3428 value can be used with cpu_register_physical_memory(). (-1) is
3429 returned if error. */
1eed09cb 3430static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3431 CPUReadMemoryFunc * const *mem_read,
3432 CPUWriteMemoryFunc * const *mem_write,
be675c97 3433 void *opaque)
33417e70 3434{
3cab721d
RH
3435 int i;
3436
33417e70 3437 if (io_index <= 0) {
88715657
AL
3438 io_index = get_free_io_mem_idx();
3439 if (io_index == -1)
3440 return io_index;
33417e70 3441 } else {
1eed09cb 3442 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3443 if (io_index >= IO_MEM_NB_ENTRIES)
3444 return -1;
3445 }
b5ff1b31 3446
3cab721d 3447 for (i = 0; i < 3; ++i) {
0e0df1e2
AK
3448 assert(mem_read[i]);
3449 _io_mem_read[io_index][i] = mem_read[i];
3cab721d
RH
3450 }
3451 for (i = 0; i < 3; ++i) {
0e0df1e2
AK
3452 assert(mem_write[i]);
3453 _io_mem_write[io_index][i] = mem_write[i];
3cab721d 3454 }
a4193c8a 3455 io_mem_opaque[io_index] = opaque;
f6405247
RH
3456
3457 return (io_index << IO_MEM_SHIFT);
33417e70 3458}
61382a50 3459
d60efc6b
BS
3460int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3461 CPUWriteMemoryFunc * const *mem_write,
be675c97 3462 void *opaque)
1eed09cb 3463{
be675c97 3464 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
1eed09cb
AK
3465}
3466
88715657
AL
3467void cpu_unregister_io_memory(int io_table_address)
3468{
3469 int i;
3470 int io_index = io_table_address >> IO_MEM_SHIFT;
3471
3472 for (i=0;i < 3; i++) {
0e0df1e2
AK
3473 _io_mem_read[io_index][i] = NULL;
3474 _io_mem_write[io_index][i] = NULL;
88715657
AL
3475 }
3476 io_mem_opaque[io_index] = NULL;
3477 io_mem_used[io_index] = 0;
3478}
3479
e9179ce1
AK
3480static void io_mem_init(void)
3481{
3482 int i;
3483
0e0df1e2
AK
3484 /* Must be first: */
3485 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3486 assert(io_mem_ram.ram_addr == 0);
3487 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3488 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3489 "unassigned", UINT64_MAX);
3490 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3491 "notdirty", UINT64_MAX);
de712f94
AK
3492 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3493 "subpage-ram", UINT64_MAX);
e9179ce1
AK
3494 for (i=0; i<5; i++)
3495 io_mem_used[i] = 1;
3496
1ec9b909
AK
3497 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3498 "watch", UINT64_MAX);
e9179ce1
AK
3499}
3500
62152b8a
AK
3501static void memory_map_init(void)
3502{
7267c094 3503 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3504 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3505 set_system_memory_map(system_memory);
309cb471 3506
7267c094 3507 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3508 memory_region_init(system_io, "io", 65536);
3509 set_system_io_map(system_io);
62152b8a
AK
3510}
3511
3512MemoryRegion *get_system_memory(void)
3513{
3514 return system_memory;
3515}
3516
309cb471
AK
3517MemoryRegion *get_system_io(void)
3518{
3519 return system_io;
3520}
3521
e2eef170
PB
3522#endif /* !defined(CONFIG_USER_ONLY) */
3523
13eb76e0
FB
3524/* physical memory access (slow version, mainly for debug) */
3525#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3526int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3527 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3528{
3529 int l, flags;
3530 target_ulong page;
53a5960a 3531 void * p;
13eb76e0
FB
3532
3533 while (len > 0) {
3534 page = addr & TARGET_PAGE_MASK;
3535 l = (page + TARGET_PAGE_SIZE) - addr;
3536 if (l > len)
3537 l = len;
3538 flags = page_get_flags(page);
3539 if (!(flags & PAGE_VALID))
a68fe89c 3540 return -1;
13eb76e0
FB
3541 if (is_write) {
3542 if (!(flags & PAGE_WRITE))
a68fe89c 3543 return -1;
579a97f7 3544 /* XXX: this code should not depend on lock_user */
72fb7daa 3545 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3546 return -1;
72fb7daa
AJ
3547 memcpy(p, buf, l);
3548 unlock_user(p, addr, l);
13eb76e0
FB
3549 } else {
3550 if (!(flags & PAGE_READ))
a68fe89c 3551 return -1;
579a97f7 3552 /* XXX: this code should not depend on lock_user */
72fb7daa 3553 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3554 return -1;
72fb7daa 3555 memcpy(buf, p, l);
5b257578 3556 unlock_user(p, addr, 0);
13eb76e0
FB
3557 }
3558 len -= l;
3559 buf += l;
3560 addr += l;
3561 }
a68fe89c 3562 return 0;
13eb76e0 3563}
8df1cd07 3564
13eb76e0 3565#else
c227f099 3566void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3567 int len, int is_write)
3568{
3569 int l, io_index;
3570 uint8_t *ptr;
3571 uint32_t val;
c227f099 3572 target_phys_addr_t page;
8ca5692d 3573 ram_addr_t pd;
f1f6e3b8 3574 PhysPageDesc p;
3b46e624 3575
13eb76e0
FB
3576 while (len > 0) {
3577 page = addr & TARGET_PAGE_MASK;
3578 l = (page + TARGET_PAGE_SIZE) - addr;
3579 if (l > len)
3580 l = len;
92e873b9 3581 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3582 pd = p.phys_offset;
3b46e624 3583
13eb76e0 3584 if (is_write) {
0e0df1e2 3585 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
f1f6e3b8 3586 target_phys_addr_t addr1;
13eb76e0 3587 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3588 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3589 /* XXX: could force cpu_single_env to NULL to avoid
3590 potential bugs */
6c2934db 3591 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3592 /* 32 bit write access */
c27004ec 3593 val = ldl_p(buf);
acbbec5d 3594 io_mem_write(io_index, addr1, val, 4);
13eb76e0 3595 l = 4;
6c2934db 3596 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3597 /* 16 bit write access */
c27004ec 3598 val = lduw_p(buf);
acbbec5d 3599 io_mem_write(io_index, addr1, val, 2);
13eb76e0
FB
3600 l = 2;
3601 } else {
1c213d19 3602 /* 8 bit write access */
c27004ec 3603 val = ldub_p(buf);
acbbec5d 3604 io_mem_write(io_index, addr1, val, 1);
13eb76e0
FB
3605 l = 1;
3606 }
3607 } else {
8ca5692d 3608 ram_addr_t addr1;
b448f2f3 3609 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3610 /* RAM case */
5579c7f3 3611 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3612 memcpy(ptr, buf, l);
3a7d929e
FB
3613 if (!cpu_physical_memory_is_dirty(addr1)) {
3614 /* invalidate code */
3615 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3616 /* set dirty bit */
f7c11b53
YT
3617 cpu_physical_memory_set_dirty_flags(
3618 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3619 }
050a0ddf 3620 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3621 }
3622 } else {
1d393fa2 3623 if (!is_ram_rom_romd(pd)) {
f1f6e3b8 3624 target_phys_addr_t addr1;
13eb76e0
FB
3625 /* I/O case */
3626 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3627 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3628 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3629 /* 32 bit read access */
acbbec5d 3630 val = io_mem_read(io_index, addr1, 4);
c27004ec 3631 stl_p(buf, val);
13eb76e0 3632 l = 4;
6c2934db 3633 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3634 /* 16 bit read access */
acbbec5d 3635 val = io_mem_read(io_index, addr1, 2);
c27004ec 3636 stw_p(buf, val);
13eb76e0
FB
3637 l = 2;
3638 } else {
1c213d19 3639 /* 8 bit read access */
acbbec5d 3640 val = io_mem_read(io_index, addr1, 1);
c27004ec 3641 stb_p(buf, val);
13eb76e0
FB
3642 l = 1;
3643 }
3644 } else {
3645 /* RAM case */
050a0ddf
AP
3646 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3647 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3648 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3649 }
3650 }
3651 len -= l;
3652 buf += l;
3653 addr += l;
3654 }
3655}
8df1cd07 3656
d0ecd2aa 3657/* used for ROM loading : can write in RAM and ROM */
c227f099 3658void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3659 const uint8_t *buf, int len)
3660{
3661 int l;
3662 uint8_t *ptr;
c227f099 3663 target_phys_addr_t page;
d0ecd2aa 3664 unsigned long pd;
f1f6e3b8 3665 PhysPageDesc p;
3b46e624 3666
d0ecd2aa
FB
3667 while (len > 0) {
3668 page = addr & TARGET_PAGE_MASK;
3669 l = (page + TARGET_PAGE_SIZE) - addr;
3670 if (l > len)
3671 l = len;
3672 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3673 pd = p.phys_offset;
3b46e624 3674
1d393fa2 3675 if (!is_ram_rom_romd(pd)) {
d0ecd2aa
FB
3676 /* do nothing */
3677 } else {
3678 unsigned long addr1;
3679 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3680 /* ROM/RAM case */
5579c7f3 3681 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3682 memcpy(ptr, buf, l);
050a0ddf 3683 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3684 }
3685 len -= l;
3686 buf += l;
3687 addr += l;
3688 }
3689}
3690
6d16c2f8
AL
3691typedef struct {
3692 void *buffer;
c227f099
AL
3693 target_phys_addr_t addr;
3694 target_phys_addr_t len;
6d16c2f8
AL
3695} BounceBuffer;
3696
3697static BounceBuffer bounce;
3698
ba223c29
AL
3699typedef struct MapClient {
3700 void *opaque;
3701 void (*callback)(void *opaque);
72cf2d4f 3702 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3703} MapClient;
3704
72cf2d4f
BS
3705static QLIST_HEAD(map_client_list, MapClient) map_client_list
3706 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3707
3708void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3709{
7267c094 3710 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3711
3712 client->opaque = opaque;
3713 client->callback = callback;
72cf2d4f 3714 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3715 return client;
3716}
3717
3718void cpu_unregister_map_client(void *_client)
3719{
3720 MapClient *client = (MapClient *)_client;
3721
72cf2d4f 3722 QLIST_REMOVE(client, link);
7267c094 3723 g_free(client);
ba223c29
AL
3724}
3725
3726static void cpu_notify_map_clients(void)
3727{
3728 MapClient *client;
3729
72cf2d4f
BS
3730 while (!QLIST_EMPTY(&map_client_list)) {
3731 client = QLIST_FIRST(&map_client_list);
ba223c29 3732 client->callback(client->opaque);
34d5e948 3733 cpu_unregister_map_client(client);
ba223c29
AL
3734 }
3735}
3736
6d16c2f8
AL
3737/* Map a physical memory region into a host virtual address.
3738 * May map a subset of the requested range, given by and returned in *plen.
3739 * May return NULL if resources needed to perform the mapping are exhausted.
3740 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3741 * Use cpu_register_map_client() to know when retrying the map operation is
3742 * likely to succeed.
6d16c2f8 3743 */
c227f099
AL
3744void *cpu_physical_memory_map(target_phys_addr_t addr,
3745 target_phys_addr_t *plen,
6d16c2f8
AL
3746 int is_write)
3747{
c227f099 3748 target_phys_addr_t len = *plen;
38bee5dc 3749 target_phys_addr_t todo = 0;
6d16c2f8 3750 int l;
c227f099 3751 target_phys_addr_t page;
6d16c2f8 3752 unsigned long pd;
f1f6e3b8 3753 PhysPageDesc p;
f15fbc4b 3754 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3755 ram_addr_t rlen;
3756 void *ret;
6d16c2f8
AL
3757
3758 while (len > 0) {
3759 page = addr & TARGET_PAGE_MASK;
3760 l = (page + TARGET_PAGE_SIZE) - addr;
3761 if (l > len)
3762 l = len;
3763 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3764 pd = p.phys_offset;
6d16c2f8 3765
0e0df1e2 3766 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
38bee5dc 3767 if (todo || bounce.buffer) {
6d16c2f8
AL
3768 break;
3769 }
3770 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3771 bounce.addr = addr;
3772 bounce.len = l;
3773 if (!is_write) {
54f7b4a3 3774 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3775 }
38bee5dc
SS
3776
3777 *plen = l;
3778 return bounce.buffer;
6d16c2f8 3779 }
8ab934f9
SS
3780 if (!todo) {
3781 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3782 }
6d16c2f8
AL
3783
3784 len -= l;
3785 addr += l;
38bee5dc 3786 todo += l;
6d16c2f8 3787 }
8ab934f9
SS
3788 rlen = todo;
3789 ret = qemu_ram_ptr_length(raddr, &rlen);
3790 *plen = rlen;
3791 return ret;
6d16c2f8
AL
3792}
3793
3794/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3795 * Will also mark the memory as dirty if is_write == 1. access_len gives
3796 * the amount of memory that was actually read or written by the caller.
3797 */
c227f099
AL
3798void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3799 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3800{
3801 if (buffer != bounce.buffer) {
3802 if (is_write) {
e890261f 3803 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3804 while (access_len) {
3805 unsigned l;
3806 l = TARGET_PAGE_SIZE;
3807 if (l > access_len)
3808 l = access_len;
3809 if (!cpu_physical_memory_is_dirty(addr1)) {
3810 /* invalidate code */
3811 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3812 /* set dirty bit */
f7c11b53
YT
3813 cpu_physical_memory_set_dirty_flags(
3814 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3815 }
3816 addr1 += l;
3817 access_len -= l;
3818 }
3819 }
868bb33f 3820 if (xen_enabled()) {
e41d7c69 3821 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3822 }
6d16c2f8
AL
3823 return;
3824 }
3825 if (is_write) {
3826 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3827 }
f8a83245 3828 qemu_vfree(bounce.buffer);
6d16c2f8 3829 bounce.buffer = NULL;
ba223c29 3830 cpu_notify_map_clients();
6d16c2f8 3831}
d0ecd2aa 3832
8df1cd07 3833/* warning: addr must be aligned */
1e78bcc1
AG
3834static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3835 enum device_endian endian)
8df1cd07
FB
3836{
3837 int io_index;
3838 uint8_t *ptr;
3839 uint32_t val;
3840 unsigned long pd;
f1f6e3b8 3841 PhysPageDesc p;
8df1cd07
FB
3842
3843 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3844 pd = p.phys_offset;
3b46e624 3845
1d393fa2 3846 if (!is_ram_rom_romd(pd)) {
8df1cd07
FB
3847 /* I/O case */
3848 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3849 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3850 val = io_mem_read(io_index, addr, 4);
1e78bcc1
AG
3851#if defined(TARGET_WORDS_BIGENDIAN)
3852 if (endian == DEVICE_LITTLE_ENDIAN) {
3853 val = bswap32(val);
3854 }
3855#else
3856 if (endian == DEVICE_BIG_ENDIAN) {
3857 val = bswap32(val);
3858 }
3859#endif
8df1cd07
FB
3860 } else {
3861 /* RAM case */
5579c7f3 3862 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3863 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3864 switch (endian) {
3865 case DEVICE_LITTLE_ENDIAN:
3866 val = ldl_le_p(ptr);
3867 break;
3868 case DEVICE_BIG_ENDIAN:
3869 val = ldl_be_p(ptr);
3870 break;
3871 default:
3872 val = ldl_p(ptr);
3873 break;
3874 }
8df1cd07
FB
3875 }
3876 return val;
3877}
3878
1e78bcc1
AG
3879uint32_t ldl_phys(target_phys_addr_t addr)
3880{
3881 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3882}
3883
3884uint32_t ldl_le_phys(target_phys_addr_t addr)
3885{
3886 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3887}
3888
3889uint32_t ldl_be_phys(target_phys_addr_t addr)
3890{
3891 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3892}
3893
84b7b8e7 3894/* warning: addr must be aligned */
1e78bcc1
AG
3895static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3896 enum device_endian endian)
84b7b8e7
FB
3897{
3898 int io_index;
3899 uint8_t *ptr;
3900 uint64_t val;
3901 unsigned long pd;
f1f6e3b8 3902 PhysPageDesc p;
84b7b8e7
FB
3903
3904 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3905 pd = p.phys_offset;
3b46e624 3906
1d393fa2 3907 if (!is_ram_rom_romd(pd)) {
84b7b8e7
FB
3908 /* I/O case */
3909 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3910 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
3911
3912 /* XXX This is broken when device endian != cpu endian.
3913 Fix and add "endian" variable check */
84b7b8e7 3914#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
3915 val = io_mem_read(io_index, addr, 4) << 32;
3916 val |= io_mem_read(io_index, addr + 4, 4);
84b7b8e7 3917#else
acbbec5d
AK
3918 val = io_mem_read(io_index, addr, 4);
3919 val |= io_mem_read(io_index, addr + 4, 4) << 32;
84b7b8e7
FB
3920#endif
3921 } else {
3922 /* RAM case */
5579c7f3 3923 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 3924 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3925 switch (endian) {
3926 case DEVICE_LITTLE_ENDIAN:
3927 val = ldq_le_p(ptr);
3928 break;
3929 case DEVICE_BIG_ENDIAN:
3930 val = ldq_be_p(ptr);
3931 break;
3932 default:
3933 val = ldq_p(ptr);
3934 break;
3935 }
84b7b8e7
FB
3936 }
3937 return val;
3938}
3939
1e78bcc1
AG
3940uint64_t ldq_phys(target_phys_addr_t addr)
3941{
3942 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3943}
3944
3945uint64_t ldq_le_phys(target_phys_addr_t addr)
3946{
3947 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3948}
3949
3950uint64_t ldq_be_phys(target_phys_addr_t addr)
3951{
3952 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3953}
3954
aab33094 3955/* XXX: optimize */
c227f099 3956uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3957{
3958 uint8_t val;
3959 cpu_physical_memory_read(addr, &val, 1);
3960 return val;
3961}
3962
733f0b02 3963/* warning: addr must be aligned */
1e78bcc1
AG
3964static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3965 enum device_endian endian)
aab33094 3966{
733f0b02
MT
3967 int io_index;
3968 uint8_t *ptr;
3969 uint64_t val;
3970 unsigned long pd;
f1f6e3b8 3971 PhysPageDesc p;
733f0b02
MT
3972
3973 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3974 pd = p.phys_offset;
733f0b02 3975
1d393fa2 3976 if (!is_ram_rom_romd(pd)) {
733f0b02
MT
3977 /* I/O case */
3978 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3979 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3980 val = io_mem_read(io_index, addr, 2);
1e78bcc1
AG
3981#if defined(TARGET_WORDS_BIGENDIAN)
3982 if (endian == DEVICE_LITTLE_ENDIAN) {
3983 val = bswap16(val);
3984 }
3985#else
3986 if (endian == DEVICE_BIG_ENDIAN) {
3987 val = bswap16(val);
3988 }
3989#endif
733f0b02
MT
3990 } else {
3991 /* RAM case */
3992 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3993 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3994 switch (endian) {
3995 case DEVICE_LITTLE_ENDIAN:
3996 val = lduw_le_p(ptr);
3997 break;
3998 case DEVICE_BIG_ENDIAN:
3999 val = lduw_be_p(ptr);
4000 break;
4001 default:
4002 val = lduw_p(ptr);
4003 break;
4004 }
733f0b02
MT
4005 }
4006 return val;
aab33094
FB
4007}
4008
1e78bcc1
AG
4009uint32_t lduw_phys(target_phys_addr_t addr)
4010{
4011 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4012}
4013
4014uint32_t lduw_le_phys(target_phys_addr_t addr)
4015{
4016 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4017}
4018
4019uint32_t lduw_be_phys(target_phys_addr_t addr)
4020{
4021 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4022}
4023
8df1cd07
FB
4024/* warning: addr must be aligned. The ram page is not masked as dirty
4025 and the code inside is not invalidated. It is useful if the dirty
4026 bits are used to track modified PTEs */
c227f099 4027void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4028{
4029 int io_index;
4030 uint8_t *ptr;
4031 unsigned long pd;
f1f6e3b8 4032 PhysPageDesc p;
8df1cd07
FB
4033
4034 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4035 pd = p.phys_offset;
3b46e624 4036
0e0df1e2 4037 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
8df1cd07 4038 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4039 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4040 io_mem_write(io_index, addr, val, 4);
8df1cd07 4041 } else {
74576198 4042 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4043 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4044 stl_p(ptr, val);
74576198
AL
4045
4046 if (unlikely(in_migration)) {
4047 if (!cpu_physical_memory_is_dirty(addr1)) {
4048 /* invalidate code */
4049 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4050 /* set dirty bit */
f7c11b53
YT
4051 cpu_physical_memory_set_dirty_flags(
4052 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4053 }
4054 }
8df1cd07
FB
4055 }
4056}
4057
c227f099 4058void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4059{
4060 int io_index;
4061 uint8_t *ptr;
4062 unsigned long pd;
f1f6e3b8 4063 PhysPageDesc p;
bc98a7ef
JM
4064
4065 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4066 pd = p.phys_offset;
3b46e624 4067
0e0df1e2 4068 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bc98a7ef 4069 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4070 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef 4071#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4072 io_mem_write(io_index, addr, val >> 32, 4);
4073 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
bc98a7ef 4074#else
acbbec5d
AK
4075 io_mem_write(io_index, addr, (uint32_t)val, 4);
4076 io_mem_write(io_index, addr + 4, val >> 32, 4);
bc98a7ef
JM
4077#endif
4078 } else {
5579c7f3 4079 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4080 (addr & ~TARGET_PAGE_MASK);
4081 stq_p(ptr, val);
4082 }
4083}
4084
8df1cd07 4085/* warning: addr must be aligned */
1e78bcc1
AG
4086static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4087 enum device_endian endian)
8df1cd07
FB
4088{
4089 int io_index;
4090 uint8_t *ptr;
4091 unsigned long pd;
f1f6e3b8 4092 PhysPageDesc p;
8df1cd07
FB
4093
4094 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4095 pd = p.phys_offset;
3b46e624 4096
0e0df1e2 4097 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
8df1cd07 4098 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4099 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4100#if defined(TARGET_WORDS_BIGENDIAN)
4101 if (endian == DEVICE_LITTLE_ENDIAN) {
4102 val = bswap32(val);
4103 }
4104#else
4105 if (endian == DEVICE_BIG_ENDIAN) {
4106 val = bswap32(val);
4107 }
4108#endif
acbbec5d 4109 io_mem_write(io_index, addr, val, 4);
8df1cd07
FB
4110 } else {
4111 unsigned long addr1;
4112 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4113 /* RAM case */
5579c7f3 4114 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4115 switch (endian) {
4116 case DEVICE_LITTLE_ENDIAN:
4117 stl_le_p(ptr, val);
4118 break;
4119 case DEVICE_BIG_ENDIAN:
4120 stl_be_p(ptr, val);
4121 break;
4122 default:
4123 stl_p(ptr, val);
4124 break;
4125 }
3a7d929e
FB
4126 if (!cpu_physical_memory_is_dirty(addr1)) {
4127 /* invalidate code */
4128 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4129 /* set dirty bit */
f7c11b53
YT
4130 cpu_physical_memory_set_dirty_flags(addr1,
4131 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4132 }
8df1cd07
FB
4133 }
4134}
4135
1e78bcc1
AG
4136void stl_phys(target_phys_addr_t addr, uint32_t val)
4137{
4138 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4139}
4140
4141void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4142{
4143 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4144}
4145
4146void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4147{
4148 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4149}
4150
aab33094 4151/* XXX: optimize */
c227f099 4152void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4153{
4154 uint8_t v = val;
4155 cpu_physical_memory_write(addr, &v, 1);
4156}
4157
733f0b02 4158/* warning: addr must be aligned */
1e78bcc1
AG
4159static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4160 enum device_endian endian)
aab33094 4161{
733f0b02
MT
4162 int io_index;
4163 uint8_t *ptr;
4164 unsigned long pd;
f1f6e3b8 4165 PhysPageDesc p;
733f0b02
MT
4166
4167 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4168 pd = p.phys_offset;
733f0b02 4169
0e0df1e2 4170 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
733f0b02 4171 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4172 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4173#if defined(TARGET_WORDS_BIGENDIAN)
4174 if (endian == DEVICE_LITTLE_ENDIAN) {
4175 val = bswap16(val);
4176 }
4177#else
4178 if (endian == DEVICE_BIG_ENDIAN) {
4179 val = bswap16(val);
4180 }
4181#endif
acbbec5d 4182 io_mem_write(io_index, addr, val, 2);
733f0b02
MT
4183 } else {
4184 unsigned long addr1;
4185 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4186 /* RAM case */
4187 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4188 switch (endian) {
4189 case DEVICE_LITTLE_ENDIAN:
4190 stw_le_p(ptr, val);
4191 break;
4192 case DEVICE_BIG_ENDIAN:
4193 stw_be_p(ptr, val);
4194 break;
4195 default:
4196 stw_p(ptr, val);
4197 break;
4198 }
733f0b02
MT
4199 if (!cpu_physical_memory_is_dirty(addr1)) {
4200 /* invalidate code */
4201 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4202 /* set dirty bit */
4203 cpu_physical_memory_set_dirty_flags(addr1,
4204 (0xff & ~CODE_DIRTY_FLAG));
4205 }
4206 }
aab33094
FB
4207}
4208
1e78bcc1
AG
4209void stw_phys(target_phys_addr_t addr, uint32_t val)
4210{
4211 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4212}
4213
4214void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4215{
4216 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4217}
4218
4219void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4220{
4221 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4222}
4223
aab33094 4224/* XXX: optimize */
c227f099 4225void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4226{
4227 val = tswap64(val);
71d2b725 4228 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4229}
4230
1e78bcc1
AG
4231void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4232{
4233 val = cpu_to_le64(val);
4234 cpu_physical_memory_write(addr, &val, 8);
4235}
4236
4237void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4238{
4239 val = cpu_to_be64(val);
4240 cpu_physical_memory_write(addr, &val, 8);
4241}
4242
5e2972fd 4243/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4244int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4245 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4246{
4247 int l;
c227f099 4248 target_phys_addr_t phys_addr;
9b3c35e0 4249 target_ulong page;
13eb76e0
FB
4250
4251 while (len > 0) {
4252 page = addr & TARGET_PAGE_MASK;
4253 phys_addr = cpu_get_phys_page_debug(env, page);
4254 /* if no physical page mapped, return an error */
4255 if (phys_addr == -1)
4256 return -1;
4257 l = (page + TARGET_PAGE_SIZE) - addr;
4258 if (l > len)
4259 l = len;
5e2972fd 4260 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4261 if (is_write)
4262 cpu_physical_memory_write_rom(phys_addr, buf, l);
4263 else
5e2972fd 4264 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4265 len -= l;
4266 buf += l;
4267 addr += l;
4268 }
4269 return 0;
4270}
a68fe89c 4271#endif
13eb76e0 4272
2e70f6ef
PB
4273/* in deterministic execution mode, instructions doing device I/Os
4274 must be at the end of the TB */
4275void cpu_io_recompile(CPUState *env, void *retaddr)
4276{
4277 TranslationBlock *tb;
4278 uint32_t n, cflags;
4279 target_ulong pc, cs_base;
4280 uint64_t flags;
4281
4282 tb = tb_find_pc((unsigned long)retaddr);
4283 if (!tb) {
4284 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4285 retaddr);
4286 }
4287 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4288 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4289 /* Calculate how many instructions had been executed before the fault
bf20dc07 4290 occurred. */
2e70f6ef
PB
4291 n = n - env->icount_decr.u16.low;
4292 /* Generate a new TB ending on the I/O insn. */
4293 n++;
4294 /* On MIPS and SH, delay slot instructions can only be restarted if
4295 they were already the first instruction in the TB. If this is not
bf20dc07 4296 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4297 branch. */
4298#if defined(TARGET_MIPS)
4299 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4300 env->active_tc.PC -= 4;
4301 env->icount_decr.u16.low++;
4302 env->hflags &= ~MIPS_HFLAG_BMASK;
4303 }
4304#elif defined(TARGET_SH4)
4305 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4306 && n > 1) {
4307 env->pc -= 2;
4308 env->icount_decr.u16.low++;
4309 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4310 }
4311#endif
4312 /* This should never happen. */
4313 if (n > CF_COUNT_MASK)
4314 cpu_abort(env, "TB too big during recompile");
4315
4316 cflags = n | CF_LAST_IO;
4317 pc = tb->pc;
4318 cs_base = tb->cs_base;
4319 flags = tb->flags;
4320 tb_phys_invalidate(tb, -1);
4321 /* FIXME: In theory this could raise an exception. In practice
4322 we have already translated the block once so it's probably ok. */
4323 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4324 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4325 the first in the TB) then we end up generating a whole new TB and
4326 repeating the fault, which is horribly inefficient.
4327 Better would be to execute just this insn uncached, or generate a
4328 second new TB. */
4329 cpu_resume_from_signal(env, NULL);
4330}
4331
b3755a91
PB
4332#if !defined(CONFIG_USER_ONLY)
4333
055403b2 4334void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4335{
4336 int i, target_code_size, max_target_code_size;
4337 int direct_jmp_count, direct_jmp2_count, cross_page;
4338 TranslationBlock *tb;
3b46e624 4339
e3db7226
FB
4340 target_code_size = 0;
4341 max_target_code_size = 0;
4342 cross_page = 0;
4343 direct_jmp_count = 0;
4344 direct_jmp2_count = 0;
4345 for(i = 0; i < nb_tbs; i++) {
4346 tb = &tbs[i];
4347 target_code_size += tb->size;
4348 if (tb->size > max_target_code_size)
4349 max_target_code_size = tb->size;
4350 if (tb->page_addr[1] != -1)
4351 cross_page++;
4352 if (tb->tb_next_offset[0] != 0xffff) {
4353 direct_jmp_count++;
4354 if (tb->tb_next_offset[1] != 0xffff) {
4355 direct_jmp2_count++;
4356 }
4357 }
4358 }
4359 /* XXX: avoid using doubles ? */
57fec1fe 4360 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4361 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4362 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4363 cpu_fprintf(f, "TB count %d/%d\n",
4364 nb_tbs, code_gen_max_blocks);
5fafdf24 4365 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4366 nb_tbs ? target_code_size / nb_tbs : 0,
4367 max_target_code_size);
055403b2 4368 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4369 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4370 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4371 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4372 cross_page,
e3db7226
FB
4373 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4374 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4375 direct_jmp_count,
e3db7226
FB
4376 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4377 direct_jmp2_count,
4378 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4379 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4380 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4381 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4382 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4383 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4384}
4385
d39e8222
AK
4386/* NOTE: this function can trigger an exception */
4387/* NOTE2: the returned address is not exactly the physical address: it
4388 is the offset relative to phys_ram_base */
4389tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4390{
4391 int mmu_idx, page_index, pd;
4392 void *p;
4393
4394 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4395 mmu_idx = cpu_mmu_index(env1);
4396 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4397 (addr & TARGET_PAGE_MASK))) {
4398 ldub_code(addr);
4399 }
4400 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
0e0df1e2
AK
4401 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4402 && !(pd & IO_MEM_ROMD)) {
d39e8222
AK
4403#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4404 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4405#else
4406 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4407#endif
4408 }
4409 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4410 return qemu_ram_addr_from_host_nofail(p);
4411}
4412
61382a50 4413#define MMUSUFFIX _cmmu
3917149d 4414#undef GETPC
61382a50
FB
4415#define GETPC() NULL
4416#define env cpu_single_env
b769d8fe 4417#define SOFTMMU_CODE_ACCESS
61382a50
FB
4418
4419#define SHIFT 0
4420#include "softmmu_template.h"
4421
4422#define SHIFT 1
4423#include "softmmu_template.h"
4424
4425#define SHIFT 2
4426#include "softmmu_template.h"
4427
4428#define SHIFT 3
4429#include "softmmu_template.h"
4430
4431#undef env
4432
4433#endif