]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
exec: add missing breaks to the watch_mem_write
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2 121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 122static MemoryRegion io_mem_subpage_ram;
0e0df1e2 123
e2eef170 124#endif
9fa3e853 125
6a00d601
FB
126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
b3c4bbe5 129DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 130/* 0 = Do not count executed instructions.
bf20dc07 131 1 = Precise instruction counting.
2e70f6ef
PB
132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
6a00d601 134
54936004 135typedef struct PageDesc {
92e873b9 136 /* list of TBs intersecting this ram page */
fd6ce8f6 137 TranslationBlock *first_tb;
9fa3e853
FB
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
54936004
FB
145} PageDesc;
146
41c1b1c9 147/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 154#endif
bedb69ea 155#else
5cd2c5b6 156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 157#endif
54936004 158
5cd2c5b6
RH
159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
54936004
FB
161#define L2_SIZE (1 << L2_BITS)
162
5cd2c5b6
RH
163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
83fb7adf 188unsigned long qemu_real_host_page_size;
83fb7adf
FB
189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
54936004 191
5cd2c5b6
RH
192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
54936004 195
e2eef170 196#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
5cd2c5b6
RH
203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
6d9a1304 206
e2eef170 207static void io_mem_init(void);
62152b8a 208static void memory_map_init(void);
e2eef170 209
33417e70 210/* io memory support */
a621f38d 211MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
511d2b14 212static char io_mem_used[IO_MEM_NB_ENTRIES];
1ec9b909 213static MemoryRegion io_mem_watch;
6658ffb8 214#endif
33417e70 215
34865134 216/* log support */
1e8b27ca
JR
217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
d9b630fd 220static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 221#endif
34865134
FB
222FILE *logfile;
223int loglevel;
e735b91c 224static int log_append = 0;
34865134 225
e3db7226 226/* statistics */
b3755a91 227#if !defined(CONFIG_USER_ONLY)
e3db7226 228static int tlb_flush_count;
b3755a91 229#endif
e3db7226
FB
230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
7cb69cae
FB
233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
4369415f 244 unsigned long start, end, page_size;
7cb69cae 245
4369415f 246 page_size = getpagesize();
7cb69cae 247 start = (unsigned long)addr;
4369415f 248 start &= ~(page_size - 1);
7cb69cae
FB
249
250 end = (unsigned long)addr + size;
4369415f
FB
251 end += page_size - 1;
252 end &= ~(page_size - 1);
7cb69cae
FB
253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
b346ff46 259static void page_init(void)
54936004 260{
83fb7adf 261 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 262 TARGET_PAGE_SIZE */
c2b48b69
AL
263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
83fb7adf
FB
273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 278
2e9a5713 279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 280 {
f01576f1
JL
281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
fd436907 302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
50a9569b 311 FILE *f;
50a9569b 312
0776590d 313 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 314
fd436907 315 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 316 if (f) {
5cd2c5b6
RH
317 mmap_lock();
318
50a9569b 319 do {
5cd2c5b6
RH
320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
334 }
335 } while (!feof(f));
5cd2c5b6 336
50a9569b 337 fclose(f);
5cd2c5b6 338 mmap_unlock();
50a9569b 339 }
f01576f1 340#endif
50a9569b
AZ
341 }
342#endif
54936004
FB
343}
344
41c1b1c9 345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 346{
41c1b1c9
PB
347 PageDesc *pd;
348 void **lp;
349 int i;
350
5cd2c5b6 351#if defined(CONFIG_USER_ONLY)
7267c094 352 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
357 } while (0)
358#else
359# define ALLOC(P, SIZE) \
7267c094 360 do { P = g_malloc0(SIZE); } while (0)
17e2377a 361#endif
434929bf 362
5cd2c5b6
RH
363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
17e2377a 376 }
5cd2c5b6
RH
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 }
380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
54936004 388 }
5cd2c5b6
RH
389
390#undef ALLOC
5cd2c5b6
RH
391
392 return pd + (index & (L2_SIZE - 1));
54936004
FB
393}
394
41c1b1c9 395static inline PageDesc *page_find(tb_page_addr_t index)
54936004 396{
5cd2c5b6 397 return page_find_alloc(index, 0);
fd6ce8f6
FB
398}
399
6d9a1304 400#if !defined(CONFIG_USER_ONLY)
c227f099 401static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 402{
e3f4e2a4 403 PhysPageDesc *pd;
5cd2c5b6
RH
404 void **lp;
405 int i;
92e873b9 406
5cd2c5b6
RH
407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 409
5cd2c5b6
RH
410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
7267c094 417 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
418 }
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 420 }
5cd2c5b6 421
e3f4e2a4 422 pd = *lp;
5cd2c5b6 423 if (pd == NULL) {
e3f4e2a4 424 int i;
5ab97b7f 425 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
426
427 if (!alloc) {
108c49b8 428 return NULL;
5cd2c5b6
RH
429 }
430
7267c094 431 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 432
67c4d23c 433 for (i = 0; i < L2_SIZE; i++) {
0e0df1e2 434 pd[i].phys_offset = io_mem_unassigned.ram_addr;
5ab97b7f 435 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 436 }
92e873b9 437 }
5cd2c5b6
RH
438
439 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
440}
441
f1f6e3b8 442static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 443{
f1f6e3b8
AK
444 PhysPageDesc *p = phys_page_find_alloc(index, 0);
445
446 if (p) {
447 return *p;
448 } else {
449 return (PhysPageDesc) {
0e0df1e2 450 .phys_offset = io_mem_unassigned.ram_addr,
f1f6e3b8
AK
451 .region_offset = index << TARGET_PAGE_BITS,
452 };
453 }
92e873b9
FB
454}
455
c227f099
AL
456static void tlb_protect_code(ram_addr_t ram_addr);
457static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 458 target_ulong vaddr);
c8a706fe
PB
459#define mmap_lock() do { } while(0)
460#define mmap_unlock() do { } while(0)
9fa3e853 461#endif
fd6ce8f6 462
4369415f
FB
463#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464
465#if defined(CONFIG_USER_ONLY)
ccbb4d44 466/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
467 user mode. It will change when a dedicated libc will be used */
468#define USE_STATIC_CODE_GEN_BUFFER
469#endif
470
471#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
472static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
474#endif
475
8fcd3692 476static void code_gen_alloc(unsigned long tb_size)
26a5f13b 477{
4369415f
FB
478#ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#else
26a5f13b
FB
483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
4369415f 485#if defined(CONFIG_USER_ONLY)
4369415f
FB
486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487#else
ccbb4d44 488 /* XXX: needs adjustments */
94a6b54f 489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 490#endif
26a5f13b
FB
491 }
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496#if defined(__linux__)
497 {
498 int flags;
141ac468
BS
499 void *start = NULL;
500
26a5f13b
FB
501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502#if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
507#elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 513#elif defined(__arm__)
5c84bd90 514 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
515 if (code_gen_buffer_size > 16 * 1024 * 1024)
516 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
517#elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 }
523 start = (void *)0x90000000UL;
26a5f13b 524#endif
141ac468
BS
525 code_gen_buffer = mmap(start, code_gen_buffer_size,
526 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
527 flags, -1, 0);
528 if (code_gen_buffer == MAP_FAILED) {
529 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 exit(1);
531 }
532 }
cbb608a5 533#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
06e67a82
AL
536 {
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540#if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
548#elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
550 flags |= MAP_FIXED;
551 addr = (void *) 0x60000000UL;
552 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 code_gen_buffer_size = (512 * 1024 * 1024);
554 }
06e67a82
AL
555#endif
556 code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
26a5f13b 564#else
7267c094 565 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
566 map_exec(code_gen_buffer, code_gen_buffer_size);
567#endif
4369415f 568#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 569 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
570 code_gen_buffer_max_size = code_gen_buffer_size -
571 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 572 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 573 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
574}
575
576/* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
578 size. */
d5ab9713 579void tcg_exec_init(unsigned long tb_size)
26a5f13b 580{
26a5f13b
FB
581 cpu_gen_init();
582 code_gen_alloc(tb_size);
583 code_gen_ptr = code_gen_buffer;
4369415f 584 page_init();
9002ec79
RH
585#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx);
589#endif
26a5f13b
FB
590}
591
d5ab9713
JK
592bool tcg_enabled(void)
593{
594 return code_gen_buffer != NULL;
595}
596
597void cpu_exec_init_all(void)
598{
599#if !defined(CONFIG_USER_ONLY)
600 memory_map_init();
601 io_mem_init();
602#endif
603}
604
9656f324
PB
605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606
e59fb374 607static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
608{
609 CPUState *env = opaque;
9656f324 610
3098dba0
AJ
611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env->interrupt_request &= ~0x01;
9656f324
PB
614 tlb_flush(env, 1);
615
616 return 0;
617}
e7f4eff7
JQ
618
619static const VMStateDescription vmstate_cpu_common = {
620 .name = "cpu_common",
621 .version_id = 1,
622 .minimum_version_id = 1,
623 .minimum_version_id_old = 1,
e7f4eff7
JQ
624 .post_load = cpu_common_post_load,
625 .fields = (VMStateField []) {
626 VMSTATE_UINT32(halted, CPUState),
627 VMSTATE_UINT32(interrupt_request, CPUState),
628 VMSTATE_END_OF_LIST()
629 }
630};
9656f324
PB
631#endif
632
950f1472
GC
633CPUState *qemu_get_cpu(int cpu)
634{
635 CPUState *env = first_cpu;
636
637 while (env) {
638 if (env->cpu_index == cpu)
639 break;
640 env = env->next_cpu;
641 }
642
643 return env;
644}
645
6a00d601 646void cpu_exec_init(CPUState *env)
fd6ce8f6 647{
6a00d601
FB
648 CPUState **penv;
649 int cpu_index;
650
c2764719
PB
651#if defined(CONFIG_USER_ONLY)
652 cpu_list_lock();
653#endif
6a00d601
FB
654 env->next_cpu = NULL;
655 penv = &first_cpu;
656 cpu_index = 0;
657 while (*penv != NULL) {
1e9fa730 658 penv = &(*penv)->next_cpu;
6a00d601
FB
659 cpu_index++;
660 }
661 env->cpu_index = cpu_index;
268a362c 662 env->numa_node = 0;
72cf2d4f
BS
663 QTAILQ_INIT(&env->breakpoints);
664 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
665#ifndef CONFIG_USER_ONLY
666 env->thread_id = qemu_get_thread_id();
667#endif
6a00d601 668 *penv = env;
c2764719
PB
669#if defined(CONFIG_USER_ONLY)
670 cpu_list_unlock();
671#endif
b3c7724c 672#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
673 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
675 cpu_save, cpu_load, env);
676#endif
fd6ce8f6
FB
677}
678
d1a1eb74
TG
679/* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681static TranslationBlock *tb_alloc(target_ulong pc)
682{
683 TranslationBlock *tb;
684
685 if (nb_tbs >= code_gen_max_blocks ||
686 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 return NULL;
688 tb = &tbs[nb_tbs++];
689 tb->pc = pc;
690 tb->cflags = 0;
691 return tb;
692}
693
694void tb_free(TranslationBlock *tb)
695{
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 code_gen_ptr = tb->tc_ptr;
701 nb_tbs--;
702 }
703}
704
9fa3e853
FB
705static inline void invalidate_page_bitmap(PageDesc *p)
706{
707 if (p->code_bitmap) {
7267c094 708 g_free(p->code_bitmap);
9fa3e853
FB
709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712}
713
5cd2c5b6
RH
714/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715
716static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 717{
5cd2c5b6 718 int i;
fd6ce8f6 719
5cd2c5b6
RH
720 if (*lp == NULL) {
721 return;
722 }
723 if (level == 0) {
724 PageDesc *pd = *lp;
7296abac 725 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
726 pd[i].first_tb = NULL;
727 invalidate_page_bitmap(pd + i);
fd6ce8f6 728 }
5cd2c5b6
RH
729 } else {
730 void **pp = *lp;
7296abac 731 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
732 page_flush_tb_1 (level - 1, pp + i);
733 }
734 }
735}
736
737static void page_flush_tb(void)
738{
739 int i;
740 for (i = 0; i < V_L1_SIZE; i++) {
741 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
742 }
743}
744
745/* flush all the translation blocks */
d4e8164f 746/* XXX: tb_flush is currently not thread safe */
6a00d601 747void tb_flush(CPUState *env1)
fd6ce8f6 748{
6a00d601 749 CPUState *env;
0124311e 750#if defined(DEBUG_FLUSH)
ab3d1727
BS
751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 755#endif
26a5f13b 756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
fd6ce8f6 759 nb_tbs = 0;
3b46e624 760
6a00d601
FB
761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
9fa3e853 764
8a8a608f 765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 766 page_flush_tb();
9fa3e853 767
fd6ce8f6 768 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
e3db7226 771 tb_flush_count++;
fd6ce8f6
FB
772}
773
774#ifdef DEBUG_TB_CHECK
775
bc98a7ef 776static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
777{
778 TranslationBlock *tb;
779 int i;
780 address &= TARGET_PAGE_MASK;
99773bd4
PB
781 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
783 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 address >= tb->pc + tb->size)) {
0bf9e31a
BS
785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
99773bd4 787 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
788 }
789 }
790 }
791}
792
793/* verify that all the pages have correct rights for code */
794static void tb_page_check(void)
795{
796 TranslationBlock *tb;
797 int i, flags1, flags2;
3b46e624 798
99773bd4
PB
799 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
801 flags1 = page_get_flags(tb->pc);
802 flags2 = page_get_flags(tb->pc + tb->size - 1);
803 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 805 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
806 }
807 }
808 }
809}
810
811#endif
812
813/* invalidate one TB */
814static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 int next_offset)
816{
817 TranslationBlock *tb1;
818 for(;;) {
819 tb1 = *ptb;
820 if (tb1 == tb) {
821 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 break;
823 }
824 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 }
826}
827
9fa3e853
FB
828static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829{
830 TranslationBlock *tb1;
831 unsigned int n1;
832
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (tb1 == tb) {
838 *ptb = tb1->page_next[n1];
839 break;
840 }
841 ptb = &tb1->page_next[n1];
842 }
843}
844
d4e8164f
FB
845static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846{
847 TranslationBlock *tb1, **ptb;
848 unsigned int n1;
849
850 ptb = &tb->jmp_next[n];
851 tb1 = *ptb;
852 if (tb1) {
853 /* find tb(n) in circular list */
854 for(;;) {
855 tb1 = *ptb;
856 n1 = (long)tb1 & 3;
857 tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 if (n1 == n && tb1 == tb)
859 break;
860 if (n1 == 2) {
861 ptb = &tb1->jmp_first;
862 } else {
863 ptb = &tb1->jmp_next[n1];
864 }
865 }
866 /* now we can suppress tb(n) from the list */
867 *ptb = tb->jmp_next[n];
868
869 tb->jmp_next[n] = NULL;
870 }
871}
872
873/* reset the jump entry 'n' of a TB so that it is not chained to
874 another TB */
875static inline void tb_reset_jump(TranslationBlock *tb, int n)
876{
877 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878}
879
41c1b1c9 880void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 881{
6a00d601 882 CPUState *env;
8a40a180 883 PageDesc *p;
d4e8164f 884 unsigned int h, n1;
41c1b1c9 885 tb_page_addr_t phys_pc;
8a40a180 886 TranslationBlock *tb1, *tb2;
3b46e624 887
8a40a180
FB
888 /* remove the TB from the hash list */
889 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 h = tb_phys_hash_func(phys_pc);
5fafdf24 891 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
892 offsetof(TranslationBlock, phys_hash_next));
893
894 /* remove the TB from the page list */
895 if (tb->page_addr[0] != page_addr) {
896 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905
36bdbe54 906 tb_invalidated_flag = 1;
59817ccb 907
fd6ce8f6 908 /* remove the TB from the hash list */
8a40a180 909 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
910 for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 if (env->tb_jmp_cache[h] == tb)
912 env->tb_jmp_cache[h] = NULL;
913 }
d4e8164f
FB
914
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb, 0);
917 tb_jmp_remove(tb, 1);
918
919 /* suppress any remaining jumps to this TB */
920 tb1 = tb->jmp_first;
921 for(;;) {
922 n1 = (long)tb1 & 3;
923 if (n1 == 2)
924 break;
925 tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 tb2 = tb1->jmp_next[n1];
927 tb_reset_jump(tb1, n1);
928 tb1->jmp_next[n1] = NULL;
929 tb1 = tb2;
930 }
931 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 932
e3db7226 933 tb_phys_invalidate_count++;
9fa3e853
FB
934}
935
936static inline void set_bits(uint8_t *tab, int start, int len)
937{
938 int end, mask, end1;
939
940 end = start + len;
941 tab += start >> 3;
942 mask = 0xff << (start & 7);
943 if ((start & ~7) == (end & ~7)) {
944 if (start < end) {
945 mask &= ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 } else {
949 *tab++ |= mask;
950 start = (start + 8) & ~7;
951 end1 = end & ~7;
952 while (start < end1) {
953 *tab++ = 0xff;
954 start += 8;
955 }
956 if (start < end) {
957 mask = ~(0xff << (end & 7));
958 *tab |= mask;
959 }
960 }
961}
962
963static void build_page_bitmap(PageDesc *p)
964{
965 int n, tb_start, tb_end;
966 TranslationBlock *tb;
3b46e624 967
7267c094 968 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
969
970 tb = p->first_tb;
971 while (tb != NULL) {
972 n = (long)tb & 3;
973 tb = (TranslationBlock *)((long)tb & ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
975 if (n == 0) {
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 tb_end = tb_start + tb->size;
980 if (tb_end > TARGET_PAGE_SIZE)
981 tb_end = TARGET_PAGE_SIZE;
982 } else {
983 tb_start = 0;
984 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 }
986 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 tb = tb->page_next[n];
988 }
989}
990
2e70f6ef
PB
991TranslationBlock *tb_gen_code(CPUState *env,
992 target_ulong pc, target_ulong cs_base,
993 int flags, int cflags)
d720b93d
FB
994{
995 TranslationBlock *tb;
996 uint8_t *tc_ptr;
41c1b1c9
PB
997 tb_page_addr_t phys_pc, phys_page2;
998 target_ulong virt_page2;
d720b93d
FB
999 int code_gen_size;
1000
41c1b1c9 1001 phys_pc = get_page_addr_code(env, pc);
c27004ec 1002 tb = tb_alloc(pc);
d720b93d
FB
1003 if (!tb) {
1004 /* flush must be done */
1005 tb_flush(env);
1006 /* cannot fail at this point */
c27004ec 1007 tb = tb_alloc(pc);
2e70f6ef
PB
1008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag = 1;
d720b93d
FB
1010 }
1011 tc_ptr = code_gen_ptr;
1012 tb->tc_ptr = tc_ptr;
1013 tb->cs_base = cs_base;
1014 tb->flags = flags;
1015 tb->cflags = cflags;
d07bde88 1016 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1017 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1018
d720b93d 1019 /* check next page if needed */
c27004ec 1020 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1021 phys_page2 = -1;
c27004ec 1022 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1023 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1024 }
41c1b1c9 1025 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1026 return tb;
d720b93d 1027}
3b46e624 1028
9fa3e853
FB
1029/* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
41c1b1c9 1034void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1035 int is_cpu_write_access)
1036{
6b917547 1037 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1038 CPUState *env = cpu_single_env;
41c1b1c9 1039 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1040 PageDesc *p;
1041 int n;
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found = is_cpu_write_access;
1044 TranslationBlock *current_tb = NULL;
1045 int current_tb_modified = 0;
1046 target_ulong current_pc = 0;
1047 target_ulong current_cs_base = 0;
1048 int current_flags = 0;
1049#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1050
1051 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1052 if (!p)
9fa3e853 1053 return;
5fafdf24 1054 if (!p->code_bitmap &&
d720b93d
FB
1055 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 is_cpu_write_access) {
9fa3e853
FB
1057 /* build code bitmap */
1058 build_page_bitmap(p);
1059 }
1060
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 tb = p->first_tb;
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 tb_next = tb->page_next[n];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1069 if (n == 0) {
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 tb_end = tb_start + tb->size;
1074 } else {
1075 tb_start = tb->page_addr[1];
1076 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 }
1078 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found) {
1081 current_tb_not_found = 0;
1082 current_tb = NULL;
2e70f6ef 1083 if (env->mem_io_pc) {
d720b93d 1084 /* now we have a real cpu fault */
2e70f6ef 1085 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1086 }
1087 }
1088 if (current_tb == tb &&
2e70f6ef 1089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
3b46e624 1095
d720b93d 1096 current_tb_modified = 1;
618ba8e6 1097 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
d720b93d
FB
1100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1104 saved_tb = NULL;
1105 if (env) {
1106 saved_tb = env->current_tb;
1107 env->current_tb = NULL;
1108 }
9fa3e853 1109 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1110 if (env) {
1111 env->current_tb = saved_tb;
1112 if (env->interrupt_request && env->current_tb)
1113 cpu_interrupt(env, env->interrupt_request);
1114 }
9fa3e853
FB
1115 }
1116 tb = tb_next;
1117 }
1118#if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1120 if (!p->first_tb) {
1121 invalidate_page_bitmap(p);
d720b93d 1122 if (is_cpu_write_access) {
2e70f6ef 1123 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1124 }
1125 }
1126#endif
1127#ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1131 itself */
ea1c1802 1132 env->current_tb = NULL;
2e70f6ef 1133 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1134 cpu_resume_from_signal(env, NULL);
9fa3e853 1135 }
fd6ce8f6 1136#endif
9fa3e853 1137}
fd6ce8f6 1138
9fa3e853 1139/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1140static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1141{
1142 PageDesc *p;
1143 int offset, b;
59817ccb 1144#if 0
a4193c8a 1145 if (1) {
93fcfe39
AL
1146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env->mem_io_vaddr, len,
1148 cpu_single_env->eip,
1149 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1150 }
1151#endif
9fa3e853 1152 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1153 if (!p)
9fa3e853
FB
1154 return;
1155 if (p->code_bitmap) {
1156 offset = start & ~TARGET_PAGE_MASK;
1157 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 if (b & ((1 << len) - 1))
1159 goto do_invalidate;
1160 } else {
1161 do_invalidate:
d720b93d 1162 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1163 }
1164}
1165
9fa3e853 1166#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1167static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1168 unsigned long pc, void *puc)
9fa3e853 1169{
6b917547 1170 TranslationBlock *tb;
9fa3e853 1171 PageDesc *p;
6b917547 1172 int n;
d720b93d 1173#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1174 TranslationBlock *current_tb = NULL;
d720b93d 1175 CPUState *env = cpu_single_env;
6b917547
AL
1176 int current_tb_modified = 0;
1177 target_ulong current_pc = 0;
1178 target_ulong current_cs_base = 0;
1179 int current_flags = 0;
d720b93d 1180#endif
9fa3e853
FB
1181
1182 addr &= TARGET_PAGE_MASK;
1183 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1184 if (!p)
9fa3e853
FB
1185 return;
1186 tb = p->first_tb;
d720b93d
FB
1187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb && pc != 0) {
1189 current_tb = tb_find_pc(pc);
1190 }
1191#endif
9fa3e853
FB
1192 while (tb != NULL) {
1193 n = (long)tb & 3;
1194 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb == tb &&
2e70f6ef 1197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
3b46e624 1203
d720b93d 1204 current_tb_modified = 1;
618ba8e6 1205 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1206 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 &current_flags);
d720b93d
FB
1208 }
1209#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1210 tb_phys_invalidate(tb, addr);
1211 tb = tb->page_next[n];
1212 }
fd6ce8f6 1213 p->first_tb = NULL;
d720b93d
FB
1214#ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1218 itself */
ea1c1802 1219 env->current_tb = NULL;
2e70f6ef 1220 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1221 cpu_resume_from_signal(env, puc);
1222 }
1223#endif
fd6ce8f6 1224}
9fa3e853 1225#endif
fd6ce8f6
FB
1226
1227/* add the tb in the target page and protect it if necessary */
5fafdf24 1228static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1229 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1230{
1231 PageDesc *p;
4429ab44
JQ
1232#ifndef CONFIG_USER_ONLY
1233 bool page_already_protected;
1234#endif
9fa3e853
FB
1235
1236 tb->page_addr[n] = page_addr;
5cd2c5b6 1237 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1238 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1239#ifndef CONFIG_USER_ONLY
1240 page_already_protected = p->first_tb != NULL;
1241#endif
9fa3e853
FB
1242 p->first_tb = (TranslationBlock *)((long)tb | n);
1243 invalidate_page_bitmap(p);
fd6ce8f6 1244
107db443 1245#if defined(TARGET_HAS_SMC) || 1
d720b93d 1246
9fa3e853 1247#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1248 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1249 target_ulong addr;
1250 PageDesc *p2;
9fa3e853
FB
1251 int prot;
1252
fd6ce8f6
FB
1253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
53a5960a 1255 page_addr &= qemu_host_page_mask;
fd6ce8f6 1256 prot = 0;
53a5960a
PB
1257 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 addr += TARGET_PAGE_SIZE) {
1259
1260 p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 if (!p2)
1262 continue;
1263 prot |= p2->flags;
1264 p2->flags &= ~PAGE_WRITE;
53a5960a 1265 }
5fafdf24 1266 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1267 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1269 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1270 page_addr);
fd6ce8f6 1271#endif
fd6ce8f6 1272 }
9fa3e853
FB
1273#else
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
4429ab44 1277 if (!page_already_protected) {
6a00d601 1278 tlb_protect_code(page_addr);
9fa3e853
FB
1279 }
1280#endif
d720b93d
FB
1281
1282#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1283}
1284
9fa3e853
FB
1285/* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1287void tb_link_page(TranslationBlock *tb,
1288 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1289{
9fa3e853
FB
1290 unsigned int h;
1291 TranslationBlock **ptb;
1292
c8a706fe
PB
1293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1295 mmap_lock();
9fa3e853
FB
1296 /* add in the physical hash table */
1297 h = tb_phys_hash_func(phys_pc);
1298 ptb = &tb_phys_hash[h];
1299 tb->phys_hash_next = *ptb;
1300 *ptb = tb;
fd6ce8f6
FB
1301
1302 /* add in the page list */
9fa3e853
FB
1303 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 if (phys_page2 != -1)
1305 tb_alloc_page(tb, 1, phys_page2);
1306 else
1307 tb->page_addr[1] = -1;
9fa3e853 1308
d4e8164f
FB
1309 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 tb->jmp_next[0] = NULL;
1311 tb->jmp_next[1] = NULL;
1312
1313 /* init original jump addresses */
1314 if (tb->tb_next_offset[0] != 0xffff)
1315 tb_reset_jump(tb, 0);
1316 if (tb->tb_next_offset[1] != 0xffff)
1317 tb_reset_jump(tb, 1);
8a40a180
FB
1318
1319#ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321#endif
c8a706fe 1322 mmap_unlock();
fd6ce8f6
FB
1323}
1324
9fa3e853
FB
1325/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1328{
9fa3e853
FB
1329 int m_min, m_max, m;
1330 unsigned long v;
1331 TranslationBlock *tb;
a513fe19
FB
1332
1333 if (nb_tbs <= 0)
1334 return NULL;
1335 if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 tc_ptr >= (unsigned long)code_gen_ptr)
1337 return NULL;
1338 /* binary search (cf Knuth) */
1339 m_min = 0;
1340 m_max = nb_tbs - 1;
1341 while (m_min <= m_max) {
1342 m = (m_min + m_max) >> 1;
1343 tb = &tbs[m];
1344 v = (unsigned long)tb->tc_ptr;
1345 if (v == tc_ptr)
1346 return tb;
1347 else if (tc_ptr < v) {
1348 m_max = m - 1;
1349 } else {
1350 m_min = m + 1;
1351 }
5fafdf24 1352 }
a513fe19
FB
1353 return &tbs[m_max];
1354}
7501267e 1355
ea041c0e
FB
1356static void tb_reset_jump_recursive(TranslationBlock *tb);
1357
1358static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359{
1360 TranslationBlock *tb1, *tb_next, **ptb;
1361 unsigned int n1;
1362
1363 tb1 = tb->jmp_next[n];
1364 if (tb1 != NULL) {
1365 /* find head of list */
1366 for(;;) {
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == 2)
1370 break;
1371 tb1 = tb1->jmp_next[n1];
1372 }
1373 /* we are now sure now that tb jumps to tb1 */
1374 tb_next = tb1;
1375
1376 /* remove tb from the jmp_first list */
1377 ptb = &tb_next->jmp_first;
1378 for(;;) {
1379 tb1 = *ptb;
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == n && tb1 == tb)
1383 break;
1384 ptb = &tb1->jmp_next[n1];
1385 }
1386 *ptb = tb->jmp_next[n];
1387 tb->jmp_next[n] = NULL;
3b46e624 1388
ea041c0e
FB
1389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb, n);
1391
0124311e 1392 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1393 tb_reset_jump_recursive(tb_next);
1394 }
1395}
1396
1397static void tb_reset_jump_recursive(TranslationBlock *tb)
1398{
1399 tb_reset_jump_recursive2(tb, 0);
1400 tb_reset_jump_recursive2(tb, 1);
1401}
1402
1fddef4b 1403#if defined(TARGET_HAS_ICE)
94df27fd
PB
1404#if defined(CONFIG_USER_ONLY)
1405static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406{
1407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408}
1409#else
d720b93d
FB
1410static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411{
c227f099 1412 target_phys_addr_t addr;
9b3c35e0 1413 target_ulong pd;
c227f099 1414 ram_addr_t ram_addr;
f1f6e3b8 1415 PhysPageDesc p;
d720b93d 1416
c2f07f81
PB
1417 addr = cpu_get_phys_page_debug(env, pc);
1418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1419 pd = p.phys_offset;
c2f07f81 1420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1422}
c27004ec 1423#endif
94df27fd 1424#endif /* TARGET_HAS_ICE */
d720b93d 1425
c527ee8f
PB
1426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
6658ffb8 1438/* Add a watchpoint. */
a1d1bb31
AL
1439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1441{
b4051334 1442 target_ulong len_mask = ~(len - 1);
c0ce998e 1443 CPUWatchpoint *wp;
6658ffb8 1444
b4051334
AL
1445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
7267c094 1451 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1452
1453 wp->vaddr = addr;
b4051334 1454 wp->len_mask = len_mask;
a1d1bb31
AL
1455 wp->flags = flags;
1456
2dc9f411 1457 /* keep all GDB-injected watchpoints in front */
c0ce998e 1458 if (flags & BP_GDB)
72cf2d4f 1459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1460 else
72cf2d4f 1461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1462
6658ffb8 1463 tlb_flush_page(env, addr);
a1d1bb31
AL
1464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
6658ffb8
PB
1468}
1469
a1d1bb31
AL
1470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
6658ffb8 1473{
b4051334 1474 target_ulong len_mask = ~(len - 1);
a1d1bb31 1475 CPUWatchpoint *wp;
6658ffb8 1476
72cf2d4f 1477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1478 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1480 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1481 return 0;
1482 }
1483 }
a1d1bb31 1484 return -ENOENT;
6658ffb8
PB
1485}
1486
a1d1bb31
AL
1487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
72cf2d4f 1490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1491
a1d1bb31
AL
1492 tlb_flush_page(env, watchpoint->vaddr);
1493
7267c094 1494 g_free(watchpoint);
a1d1bb31
AL
1495}
1496
1497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
c0ce998e 1500 CPUWatchpoint *wp, *next;
a1d1bb31 1501
72cf2d4f 1502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1505 }
7d03f82f 1506}
c527ee8f 1507#endif
7d03f82f 1508
a1d1bb31
AL
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
4c3a88a2 1512{
1fddef4b 1513#if defined(TARGET_HAS_ICE)
c0ce998e 1514 CPUBreakpoint *bp;
3b46e624 1515
7267c094 1516 bp = g_malloc(sizeof(*bp));
4c3a88a2 1517
a1d1bb31
AL
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
2dc9f411 1521 /* keep all GDB-injected breakpoints in front */
c0ce998e 1522 if (flags & BP_GDB)
72cf2d4f 1523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1524 else
72cf2d4f 1525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1526
d720b93d 1527 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
4c3a88a2
FB
1531 return 0;
1532#else
a1d1bb31 1533 return -ENOSYS;
4c3a88a2
FB
1534#endif
1535}
1536
a1d1bb31
AL
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
7d03f82f 1540#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1541 CPUBreakpoint *bp;
1542
72cf2d4f 1543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
1546 return 0;
1547 }
7d03f82f 1548 }
a1d1bb31
AL
1549 return -ENOENT;
1550#else
1551 return -ENOSYS;
7d03f82f
EI
1552#endif
1553}
1554
a1d1bb31
AL
1555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1557{
1fddef4b 1558#if defined(TARGET_HAS_ICE)
72cf2d4f 1559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1560
a1d1bb31
AL
1561 breakpoint_invalidate(env, breakpoint->pc);
1562
7267c094 1563 g_free(breakpoint);
a1d1bb31
AL
1564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
c0ce998e 1571 CPUBreakpoint *bp, *next;
a1d1bb31 1572
72cf2d4f 1573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1576 }
4c3a88a2
FB
1577#endif
1578}
1579
c33a346e
FB
1580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
1fddef4b 1584#if defined(TARGET_HAS_ICE)
c33a346e
FB
1585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
e22a25c9
AL
1587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
ccbb4d44 1590 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
c33a346e
FB
1594 }
1595#endif
1596}
1597
34865134
FB
1598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
11fcfab4 1603 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
9fa3e853
FB
1608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
b55266b5 1611 static char logfile_buf[4096];
9fa3e853
FB
1612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
daf767b1
SW
1614#elif defined(_WIN32)
1615 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 setvbuf(logfile, NULL, _IONBF, 0);
1617#else
34865134 1618 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1619#endif
e735b91c
PB
1620 log_append = 1;
1621 }
1622 if (!loglevel && logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
34865134
FB
1625 }
1626}
1627
1628void cpu_set_log_filename(const char *filename)
1629{
1630 logfilename = strdup(filename);
e735b91c
PB
1631 if (logfile) {
1632 fclose(logfile);
1633 logfile = NULL;
1634 }
1635 cpu_set_log(loglevel);
34865134 1636}
c33a346e 1637
3098dba0 1638static void cpu_unlink_tb(CPUState *env)
ea041c0e 1639{
3098dba0
AJ
1640 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1641 problem and hope the cpu will stop of its own accord. For userspace
1642 emulation this often isn't actually as bad as it sounds. Often
1643 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1644 TranslationBlock *tb;
c227f099 1645 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1646
cab1b4bd 1647 spin_lock(&interrupt_lock);
3098dba0
AJ
1648 tb = env->current_tb;
1649 /* if the cpu is currently executing code, we must unlink it and
1650 all the potentially executing TB */
f76cfe56 1651 if (tb) {
3098dba0
AJ
1652 env->current_tb = NULL;
1653 tb_reset_jump_recursive(tb);
be214e6c 1654 }
cab1b4bd 1655 spin_unlock(&interrupt_lock);
3098dba0
AJ
1656}
1657
97ffbd8d 1658#ifndef CONFIG_USER_ONLY
3098dba0 1659/* mask must never be zero, except for A20 change call */
ec6959d0 1660static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1661{
1662 int old_mask;
be214e6c 1663
2e70f6ef 1664 old_mask = env->interrupt_request;
68a79315 1665 env->interrupt_request |= mask;
3098dba0 1666
8edac960
AL
1667 /*
1668 * If called from iothread context, wake the target cpu in
1669 * case its halted.
1670 */
b7680cb6 1671 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1672 qemu_cpu_kick(env);
1673 return;
1674 }
8edac960 1675
2e70f6ef 1676 if (use_icount) {
266910c4 1677 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1678 if (!can_do_io(env)
be214e6c 1679 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1680 cpu_abort(env, "Raised interrupt while not in I/O function");
1681 }
2e70f6ef 1682 } else {
3098dba0 1683 cpu_unlink_tb(env);
ea041c0e
FB
1684 }
1685}
1686
ec6959d0
JK
1687CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688
97ffbd8d
JK
1689#else /* CONFIG_USER_ONLY */
1690
1691void cpu_interrupt(CPUState *env, int mask)
1692{
1693 env->interrupt_request |= mask;
1694 cpu_unlink_tb(env);
1695}
1696#endif /* CONFIG_USER_ONLY */
1697
b54ad049
FB
1698void cpu_reset_interrupt(CPUState *env, int mask)
1699{
1700 env->interrupt_request &= ~mask;
1701}
1702
3098dba0
AJ
1703void cpu_exit(CPUState *env)
1704{
1705 env->exit_request = 1;
1706 cpu_unlink_tb(env);
1707}
1708
c7cd6a37 1709const CPULogItem cpu_log_items[] = {
5fafdf24 1710 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1711 "show generated host assembly code for each compiled TB" },
1712 { CPU_LOG_TB_IN_ASM, "in_asm",
1713 "show target assembly code for each compiled TB" },
5fafdf24 1714 { CPU_LOG_TB_OP, "op",
57fec1fe 1715 "show micro ops for each compiled TB" },
f193c797 1716 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1717 "show micro ops "
1718#ifdef TARGET_I386
1719 "before eflags optimization and "
f193c797 1720#endif
e01a1157 1721 "after liveness analysis" },
f193c797
FB
1722 { CPU_LOG_INT, "int",
1723 "show interrupts/exceptions in short format" },
1724 { CPU_LOG_EXEC, "exec",
1725 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1726 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1727 "show CPU state before block translation" },
f193c797
FB
1728#ifdef TARGET_I386
1729 { CPU_LOG_PCALL, "pcall",
1730 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1731 { CPU_LOG_RESET, "cpu_reset",
1732 "show CPU state before CPU resets" },
f193c797 1733#endif
8e3a9fd2 1734#ifdef DEBUG_IOPORT
fd872598
FB
1735 { CPU_LOG_IOPORT, "ioport",
1736 "show all i/o ports accesses" },
8e3a9fd2 1737#endif
f193c797
FB
1738 { 0, NULL, NULL },
1739};
1740
1741static int cmp1(const char *s1, int n, const char *s2)
1742{
1743 if (strlen(s2) != n)
1744 return 0;
1745 return memcmp(s1, s2, n) == 0;
1746}
3b46e624 1747
f193c797
FB
1748/* takes a comma separated list of log masks. Return 0 if error. */
1749int cpu_str_to_log_mask(const char *str)
1750{
c7cd6a37 1751 const CPULogItem *item;
f193c797
FB
1752 int mask;
1753 const char *p, *p1;
1754
1755 p = str;
1756 mask = 0;
1757 for(;;) {
1758 p1 = strchr(p, ',');
1759 if (!p1)
1760 p1 = p + strlen(p);
9742bf26
YT
1761 if(cmp1(p,p1-p,"all")) {
1762 for(item = cpu_log_items; item->mask != 0; item++) {
1763 mask |= item->mask;
1764 }
1765 } else {
1766 for(item = cpu_log_items; item->mask != 0; item++) {
1767 if (cmp1(p, p1 - p, item->name))
1768 goto found;
1769 }
1770 return 0;
f193c797 1771 }
f193c797
FB
1772 found:
1773 mask |= item->mask;
1774 if (*p1 != ',')
1775 break;
1776 p = p1 + 1;
1777 }
1778 return mask;
1779}
ea041c0e 1780
7501267e
FB
1781void cpu_abort(CPUState *env, const char *fmt, ...)
1782{
1783 va_list ap;
493ae1f0 1784 va_list ap2;
7501267e
FB
1785
1786 va_start(ap, fmt);
493ae1f0 1787 va_copy(ap2, ap);
7501267e
FB
1788 fprintf(stderr, "qemu: fatal: ");
1789 vfprintf(stderr, fmt, ap);
1790 fprintf(stderr, "\n");
1791#ifdef TARGET_I386
7fe48483
FB
1792 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793#else
1794 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1795#endif
93fcfe39
AL
1796 if (qemu_log_enabled()) {
1797 qemu_log("qemu: fatal: ");
1798 qemu_log_vprintf(fmt, ap2);
1799 qemu_log("\n");
f9373291 1800#ifdef TARGET_I386
93fcfe39 1801 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1802#else
93fcfe39 1803 log_cpu_state(env, 0);
f9373291 1804#endif
31b1a7b4 1805 qemu_log_flush();
93fcfe39 1806 qemu_log_close();
924edcae 1807 }
493ae1f0 1808 va_end(ap2);
f9373291 1809 va_end(ap);
fd052bf6
RV
1810#if defined(CONFIG_USER_ONLY)
1811 {
1812 struct sigaction act;
1813 sigfillset(&act.sa_mask);
1814 act.sa_handler = SIG_DFL;
1815 sigaction(SIGABRT, &act, NULL);
1816 }
1817#endif
7501267e
FB
1818 abort();
1819}
1820
c5be9f08
TS
1821CPUState *cpu_copy(CPUState *env)
1822{
01ba9816 1823 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1824 CPUState *next_cpu = new_env->next_cpu;
1825 int cpu_index = new_env->cpu_index;
5a38f081
AL
1826#if defined(TARGET_HAS_ICE)
1827 CPUBreakpoint *bp;
1828 CPUWatchpoint *wp;
1829#endif
1830
c5be9f08 1831 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1832
1833 /* Preserve chaining and index. */
c5be9f08
TS
1834 new_env->next_cpu = next_cpu;
1835 new_env->cpu_index = cpu_index;
5a38f081
AL
1836
1837 /* Clone all break/watchpoints.
1838 Note: Once we support ptrace with hw-debug register access, make sure
1839 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1840 QTAILQ_INIT(&env->breakpoints);
1841 QTAILQ_INIT(&env->watchpoints);
5a38f081 1842#if defined(TARGET_HAS_ICE)
72cf2d4f 1843 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1844 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 }
72cf2d4f 1846 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1847 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848 wp->flags, NULL);
1849 }
1850#endif
1851
c5be9f08
TS
1852 return new_env;
1853}
1854
0124311e
FB
1855#if !defined(CONFIG_USER_ONLY)
1856
5c751e99
EI
1857static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858{
1859 unsigned int i;
1860
1861 /* Discard jump cache entries for any tb which might potentially
1862 overlap the flushed page. */
1863 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1865 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1866
1867 i = tb_jmp_cache_hash_page(addr);
1868 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1869 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1870}
1871
08738984
IK
1872static CPUTLBEntry s_cputlb_empty_entry = {
1873 .addr_read = -1,
1874 .addr_write = -1,
1875 .addr_code = -1,
1876 .addend = -1,
1877};
1878
771124e1
PM
1879/* NOTE:
1880 * If flush_global is true (the usual case), flush all tlb entries.
1881 * If flush_global is false, flush (at least) all tlb entries not
1882 * marked global.
1883 *
1884 * Since QEMU doesn't currently implement a global/not-global flag
1885 * for tlb entries, at the moment tlb_flush() will also flush all
1886 * tlb entries in the flush_global == false case. This is OK because
1887 * CPU architectures generally permit an implementation to drop
1888 * entries from the TLB at any time, so flushing more entries than
1889 * required is only an efficiency issue, not a correctness issue.
1890 */
ee8b7021 1891void tlb_flush(CPUState *env, int flush_global)
33417e70 1892{
33417e70 1893 int i;
0124311e 1894
9fa3e853
FB
1895#if defined(DEBUG_TLB)
1896 printf("tlb_flush:\n");
1897#endif
0124311e
FB
1898 /* must reset current TB so that interrupts cannot modify the
1899 links while we are modifying them */
1900 env->current_tb = NULL;
1901
33417e70 1902 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1903 int mmu_idx;
1904 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1905 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1906 }
33417e70 1907 }
9fa3e853 1908
8a40a180 1909 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1910
d4c430a8
PB
1911 env->tlb_flush_addr = -1;
1912 env->tlb_flush_mask = 0;
e3db7226 1913 tlb_flush_count++;
33417e70
FB
1914}
1915
274da6b2 1916static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1917{
5fafdf24 1918 if (addr == (tlb_entry->addr_read &
84b7b8e7 1919 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1920 addr == (tlb_entry->addr_write &
84b7b8e7 1921 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1922 addr == (tlb_entry->addr_code &
84b7b8e7 1923 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1924 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1925 }
61382a50
FB
1926}
1927
2e12669a 1928void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1929{
8a40a180 1930 int i;
cfde4bd9 1931 int mmu_idx;
0124311e 1932
9fa3e853 1933#if defined(DEBUG_TLB)
108c49b8 1934 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1935#endif
d4c430a8
PB
1936 /* Check if we need to flush due to large pages. */
1937 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1938#if defined(DEBUG_TLB)
1939 printf("tlb_flush_page: forced full flush ("
1940 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1941 env->tlb_flush_addr, env->tlb_flush_mask);
1942#endif
1943 tlb_flush(env, 1);
1944 return;
1945 }
0124311e
FB
1946 /* must reset current TB so that interrupts cannot modify the
1947 links while we are modifying them */
1948 env->current_tb = NULL;
61382a50
FB
1949
1950 addr &= TARGET_PAGE_MASK;
1951 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1952 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1953 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1954
5c751e99 1955 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1956}
1957
9fa3e853
FB
1958/* update the TLBs so that writes to code in the virtual page 'addr'
1959 can be detected */
c227f099 1960static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1961{
5fafdf24 1962 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1963 ram_addr + TARGET_PAGE_SIZE,
1964 CODE_DIRTY_FLAG);
9fa3e853
FB
1965}
1966
9fa3e853 1967/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1968 tested for self modifying code */
c227f099 1969static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1970 target_ulong vaddr)
9fa3e853 1971{
f7c11b53 1972 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1973}
1974
5fafdf24 1975static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1976 unsigned long start, unsigned long length)
1977{
1978 unsigned long addr;
0e0df1e2 1979 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
84b7b8e7 1980 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1981 if ((addr - start) < length) {
0f459d16 1982 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1983 }
1984 }
1985}
1986
5579c7f3 1987/* Note: start and end must be within the same ram block. */
c227f099 1988void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1989 int dirty_flags)
1ccde1cb
FB
1990{
1991 CPUState *env;
4f2ac237 1992 unsigned long length, start1;
f7c11b53 1993 int i;
1ccde1cb
FB
1994
1995 start &= TARGET_PAGE_MASK;
1996 end = TARGET_PAGE_ALIGN(end);
1997
1998 length = end - start;
1999 if (length == 0)
2000 return;
f7c11b53 2001 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2002
1ccde1cb
FB
2003 /* we modify the TLB cache so that the dirty bit will be set again
2004 when accessing the range */
b2e0a138 2005 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2006 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2007 address comparisons below. */
b2e0a138 2008 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2009 != (end - 1) - start) {
2010 abort();
2011 }
2012
6a00d601 2013 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2014 int mmu_idx;
2015 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2016 for(i = 0; i < CPU_TLB_SIZE; i++)
2017 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2018 start1, length);
2019 }
6a00d601 2020 }
1ccde1cb
FB
2021}
2022
74576198
AL
2023int cpu_physical_memory_set_dirty_tracking(int enable)
2024{
f6f3fbca 2025 int ret = 0;
74576198 2026 in_migration = enable;
f6f3fbca 2027 return ret;
74576198
AL
2028}
2029
3a7d929e
FB
2030static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2031{
c227f099 2032 ram_addr_t ram_addr;
5579c7f3 2033 void *p;
3a7d929e 2034
0e0df1e2 2035 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
5579c7f3
PB
2036 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2037 + tlb_entry->addend);
e890261f 2038 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2039 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2040 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2041 }
2042 }
2043}
2044
2045/* update the TLB according to the current state of the dirty bits */
2046void cpu_tlb_update_dirty(CPUState *env)
2047{
2048 int i;
cfde4bd9
IY
2049 int mmu_idx;
2050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051 for(i = 0; i < CPU_TLB_SIZE; i++)
2052 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2053 }
3a7d929e
FB
2054}
2055
0f459d16 2056static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2057{
0f459d16
PB
2058 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2059 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2060}
2061
0f459d16
PB
2062/* update the TLB corresponding to virtual page vaddr
2063 so that it is no longer dirty */
2064static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2065{
1ccde1cb 2066 int i;
cfde4bd9 2067 int mmu_idx;
1ccde1cb 2068
0f459d16 2069 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2070 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2071 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2072 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2073}
2074
d4c430a8
PB
2075/* Our TLB does not support large pages, so remember the area covered by
2076 large pages and trigger a full TLB flush if these are invalidated. */
2077static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2078 target_ulong size)
2079{
2080 target_ulong mask = ~(size - 1);
2081
2082 if (env->tlb_flush_addr == (target_ulong)-1) {
2083 env->tlb_flush_addr = vaddr & mask;
2084 env->tlb_flush_mask = mask;
2085 return;
2086 }
2087 /* Extend the existing region to include the new page.
2088 This is a compromise between unnecessary flushes and the cost
2089 of maintaining a full variable size TLB. */
2090 mask &= env->tlb_flush_mask;
2091 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2092 mask <<= 1;
2093 }
2094 env->tlb_flush_addr &= mask;
2095 env->tlb_flush_mask = mask;
2096}
2097
1d393fa2
AK
2098static bool is_ram_rom(ram_addr_t pd)
2099{
2100 pd &= ~TARGET_PAGE_MASK;
0e0df1e2 2101 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
1d393fa2
AK
2102}
2103
75c578dc
AK
2104static bool is_romd(ram_addr_t pd)
2105{
2106 MemoryRegion *mr;
2107
2108 pd &= ~TARGET_PAGE_MASK;
11c7ef0c 2109 mr = io_mem_region[pd];
75c578dc
AK
2110 return mr->rom_device && mr->readable;
2111}
2112
1d393fa2
AK
2113static bool is_ram_rom_romd(ram_addr_t pd)
2114{
75c578dc 2115 return is_ram_rom(pd) || is_romd(pd);
1d393fa2
AK
2116}
2117
d4c430a8
PB
2118/* Add a new TLB entry. At most one entry for a given virtual address
2119 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2120 supplied size is only used by tlb_flush_page. */
2121void tlb_set_page(CPUState *env, target_ulong vaddr,
2122 target_phys_addr_t paddr, int prot,
2123 int mmu_idx, target_ulong size)
9fa3e853 2124{
f1f6e3b8 2125 PhysPageDesc p;
4f2ac237 2126 unsigned long pd;
9fa3e853 2127 unsigned int index;
4f2ac237 2128 target_ulong address;
0f459d16 2129 target_ulong code_address;
355b1943 2130 unsigned long addend;
84b7b8e7 2131 CPUTLBEntry *te;
a1d1bb31 2132 CPUWatchpoint *wp;
c227f099 2133 target_phys_addr_t iotlb;
9fa3e853 2134
d4c430a8
PB
2135 assert(size >= TARGET_PAGE_SIZE);
2136 if (size != TARGET_PAGE_SIZE) {
2137 tlb_add_large_page(env, vaddr, size);
2138 }
92e873b9 2139 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2140 pd = p.phys_offset;
9fa3e853 2141#if defined(DEBUG_TLB)
7fd3f494
SW
2142 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2143 " prot=%x idx=%d pd=0x%08lx\n",
2144 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2145#endif
2146
0f459d16 2147 address = vaddr;
1d393fa2 2148 if (!is_ram_rom_romd(pd)) {
0f459d16
PB
2149 /* IO memory case (romd handled later) */
2150 address |= TLB_MMIO;
2151 }
5579c7f3 2152 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1d393fa2 2153 if (is_ram_rom(pd)) {
0f459d16
PB
2154 /* Normal RAM. */
2155 iotlb = pd & TARGET_PAGE_MASK;
0e0df1e2
AK
2156 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2157 iotlb |= io_mem_notdirty.ram_addr;
0f459d16 2158 else
0e0df1e2 2159 iotlb |= io_mem_rom.ram_addr;
0f459d16 2160 } else {
ccbb4d44 2161 /* IO handlers are currently passed a physical address.
0f459d16
PB
2162 It would be nice to pass an offset from the base address
2163 of that region. This would avoid having to special case RAM,
2164 and avoid full address decoding in every device.
2165 We can't use the high bits of pd for this because
2166 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2167 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2168 iotlb += p.region_offset;
0f459d16
PB
2169 }
2170
2171 code_address = address;
2172 /* Make accesses to pages with watchpoints go via the
2173 watchpoint trap routines. */
72cf2d4f 2174 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2175 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2176 /* Avoid trapping reads of pages with a write breakpoint. */
2177 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1ec9b909 2178 iotlb = io_mem_watch.ram_addr + paddr;
bf298f83
JK
2179 address |= TLB_MMIO;
2180 break;
2181 }
6658ffb8 2182 }
0f459d16 2183 }
d79acba4 2184
0f459d16
PB
2185 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2186 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2187 te = &env->tlb_table[mmu_idx][index];
2188 te->addend = addend - vaddr;
2189 if (prot & PAGE_READ) {
2190 te->addr_read = address;
2191 } else {
2192 te->addr_read = -1;
2193 }
5c751e99 2194
0f459d16
PB
2195 if (prot & PAGE_EXEC) {
2196 te->addr_code = code_address;
2197 } else {
2198 te->addr_code = -1;
2199 }
2200 if (prot & PAGE_WRITE) {
75c578dc 2201 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
0f459d16
PB
2202 /* Write access calls the I/O callback. */
2203 te->addr_write = address | TLB_MMIO;
0e0df1e2 2204 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
0f459d16
PB
2205 !cpu_physical_memory_is_dirty(pd)) {
2206 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2207 } else {
0f459d16 2208 te->addr_write = address;
9fa3e853 2209 }
0f459d16
PB
2210 } else {
2211 te->addr_write = -1;
9fa3e853 2212 }
9fa3e853
FB
2213}
2214
0124311e
FB
2215#else
2216
ee8b7021 2217void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2218{
2219}
2220
2e12669a 2221void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2222{
2223}
2224
edf8e2af
MW
2225/*
2226 * Walks guest process memory "regions" one by one
2227 * and calls callback function 'fn' for each region.
2228 */
5cd2c5b6
RH
2229
2230struct walk_memory_regions_data
2231{
2232 walk_memory_regions_fn fn;
2233 void *priv;
2234 unsigned long start;
2235 int prot;
2236};
2237
2238static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2239 abi_ulong end, int new_prot)
5cd2c5b6
RH
2240{
2241 if (data->start != -1ul) {
2242 int rc = data->fn(data->priv, data->start, end, data->prot);
2243 if (rc != 0) {
2244 return rc;
2245 }
2246 }
2247
2248 data->start = (new_prot ? end : -1ul);
2249 data->prot = new_prot;
2250
2251 return 0;
2252}
2253
2254static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2255 abi_ulong base, int level, void **lp)
5cd2c5b6 2256{
b480d9b7 2257 abi_ulong pa;
5cd2c5b6
RH
2258 int i, rc;
2259
2260 if (*lp == NULL) {
2261 return walk_memory_regions_end(data, base, 0);
2262 }
2263
2264 if (level == 0) {
2265 PageDesc *pd = *lp;
7296abac 2266 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2267 int prot = pd[i].flags;
2268
2269 pa = base | (i << TARGET_PAGE_BITS);
2270 if (prot != data->prot) {
2271 rc = walk_memory_regions_end(data, pa, prot);
2272 if (rc != 0) {
2273 return rc;
9fa3e853 2274 }
9fa3e853 2275 }
5cd2c5b6
RH
2276 }
2277 } else {
2278 void **pp = *lp;
7296abac 2279 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2280 pa = base | ((abi_ulong)i <<
2281 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2282 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2283 if (rc != 0) {
2284 return rc;
2285 }
2286 }
2287 }
2288
2289 return 0;
2290}
2291
2292int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2293{
2294 struct walk_memory_regions_data data;
2295 unsigned long i;
2296
2297 data.fn = fn;
2298 data.priv = priv;
2299 data.start = -1ul;
2300 data.prot = 0;
2301
2302 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2303 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2304 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2305 if (rc != 0) {
2306 return rc;
9fa3e853 2307 }
33417e70 2308 }
5cd2c5b6
RH
2309
2310 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2311}
2312
b480d9b7
PB
2313static int dump_region(void *priv, abi_ulong start,
2314 abi_ulong end, unsigned long prot)
edf8e2af
MW
2315{
2316 FILE *f = (FILE *)priv;
2317
b480d9b7
PB
2318 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2319 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2320 start, end, end - start,
2321 ((prot & PAGE_READ) ? 'r' : '-'),
2322 ((prot & PAGE_WRITE) ? 'w' : '-'),
2323 ((prot & PAGE_EXEC) ? 'x' : '-'));
2324
2325 return (0);
2326}
2327
2328/* dump memory mappings */
2329void page_dump(FILE *f)
2330{
2331 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2332 "start", "end", "size", "prot");
2333 walk_memory_regions(f, dump_region);
33417e70
FB
2334}
2335
53a5960a 2336int page_get_flags(target_ulong address)
33417e70 2337{
9fa3e853
FB
2338 PageDesc *p;
2339
2340 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2341 if (!p)
9fa3e853
FB
2342 return 0;
2343 return p->flags;
2344}
2345
376a7909
RH
2346/* Modify the flags of a page and invalidate the code if necessary.
2347 The flag PAGE_WRITE_ORG is positioned automatically depending
2348 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2349void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2350{
376a7909
RH
2351 target_ulong addr, len;
2352
2353 /* This function should never be called with addresses outside the
2354 guest address space. If this assert fires, it probably indicates
2355 a missing call to h2g_valid. */
b480d9b7
PB
2356#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2357 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2358#endif
2359 assert(start < end);
9fa3e853
FB
2360
2361 start = start & TARGET_PAGE_MASK;
2362 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2363
2364 if (flags & PAGE_WRITE) {
9fa3e853 2365 flags |= PAGE_WRITE_ORG;
376a7909
RH
2366 }
2367
2368 for (addr = start, len = end - start;
2369 len != 0;
2370 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2371 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2372
2373 /* If the write protection bit is set, then we invalidate
2374 the code inside. */
5fafdf24 2375 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2376 (flags & PAGE_WRITE) &&
2377 p->first_tb) {
d720b93d 2378 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2379 }
2380 p->flags = flags;
2381 }
33417e70
FB
2382}
2383
3d97b40b
TS
2384int page_check_range(target_ulong start, target_ulong len, int flags)
2385{
2386 PageDesc *p;
2387 target_ulong end;
2388 target_ulong addr;
2389
376a7909
RH
2390 /* This function should never be called with addresses outside the
2391 guest address space. If this assert fires, it probably indicates
2392 a missing call to h2g_valid. */
338e9e6c
BS
2393#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2394 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2395#endif
2396
3e0650a9
RH
2397 if (len == 0) {
2398 return 0;
2399 }
376a7909
RH
2400 if (start + len - 1 < start) {
2401 /* We've wrapped around. */
55f280c9 2402 return -1;
376a7909 2403 }
55f280c9 2404
3d97b40b
TS
2405 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2406 start = start & TARGET_PAGE_MASK;
2407
376a7909
RH
2408 for (addr = start, len = end - start;
2409 len != 0;
2410 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2411 p = page_find(addr >> TARGET_PAGE_BITS);
2412 if( !p )
2413 return -1;
2414 if( !(p->flags & PAGE_VALID) )
2415 return -1;
2416
dae3270c 2417 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2418 return -1;
dae3270c
FB
2419 if (flags & PAGE_WRITE) {
2420 if (!(p->flags & PAGE_WRITE_ORG))
2421 return -1;
2422 /* unprotect the page if it was put read-only because it
2423 contains translated code */
2424 if (!(p->flags & PAGE_WRITE)) {
2425 if (!page_unprotect(addr, 0, NULL))
2426 return -1;
2427 }
2428 return 0;
2429 }
3d97b40b
TS
2430 }
2431 return 0;
2432}
2433
9fa3e853 2434/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2435 page. Return TRUE if the fault was successfully handled. */
53a5960a 2436int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2437{
45d679d6
AJ
2438 unsigned int prot;
2439 PageDesc *p;
53a5960a 2440 target_ulong host_start, host_end, addr;
9fa3e853 2441
c8a706fe
PB
2442 /* Technically this isn't safe inside a signal handler. However we
2443 know this only ever happens in a synchronous SEGV handler, so in
2444 practice it seems to be ok. */
2445 mmap_lock();
2446
45d679d6
AJ
2447 p = page_find(address >> TARGET_PAGE_BITS);
2448 if (!p) {
c8a706fe 2449 mmap_unlock();
9fa3e853 2450 return 0;
c8a706fe 2451 }
45d679d6 2452
9fa3e853
FB
2453 /* if the page was really writable, then we change its
2454 protection back to writable */
45d679d6
AJ
2455 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2456 host_start = address & qemu_host_page_mask;
2457 host_end = host_start + qemu_host_page_size;
2458
2459 prot = 0;
2460 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2461 p = page_find(addr >> TARGET_PAGE_BITS);
2462 p->flags |= PAGE_WRITE;
2463 prot |= p->flags;
2464
9fa3e853
FB
2465 /* and since the content will be modified, we must invalidate
2466 the corresponding translated code. */
45d679d6 2467 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2468#ifdef DEBUG_TB_CHECK
45d679d6 2469 tb_invalidate_check(addr);
9fa3e853 2470#endif
9fa3e853 2471 }
45d679d6
AJ
2472 mprotect((void *)g2h(host_start), qemu_host_page_size,
2473 prot & PAGE_BITS);
2474
2475 mmap_unlock();
2476 return 1;
9fa3e853 2477 }
c8a706fe 2478 mmap_unlock();
9fa3e853
FB
2479 return 0;
2480}
2481
6a00d601
FB
2482static inline void tlb_set_dirty(CPUState *env,
2483 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2484{
2485}
9fa3e853
FB
2486#endif /* defined(CONFIG_USER_ONLY) */
2487
e2eef170 2488#if !defined(CONFIG_USER_ONLY)
8da3ff18 2489
c04b2b78
PB
2490#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2491typedef struct subpage_t {
70c68e44 2492 MemoryRegion iomem;
c04b2b78 2493 target_phys_addr_t base;
f6405247
RH
2494 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2495 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2496} subpage_t;
2497
c227f099
AL
2498static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2499 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2500static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2501 ram_addr_t orig_memory,
2502 ram_addr_t region_offset);
db7b5426
BS
2503#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2504 need_subpage) \
2505 do { \
2506 if (addr > start_addr) \
2507 start_addr2 = 0; \
2508 else { \
2509 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2510 if (start_addr2 > 0) \
2511 need_subpage = 1; \
2512 } \
2513 \
49e9fba2 2514 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2515 end_addr2 = TARGET_PAGE_SIZE - 1; \
2516 else { \
2517 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2518 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2519 need_subpage = 1; \
2520 } \
2521 } while (0)
2522
8f2498f9
MT
2523/* register physical memory.
2524 For RAM, 'size' must be a multiple of the target page size.
2525 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2526 io memory page. The address used when calling the IO function is
2527 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2528 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2529 before calculating this offset. This should not be a problem unless
2530 the low bits of start_addr and region_offset differ. */
dd81124b
AK
2531void cpu_register_physical_memory_log(MemoryRegionSection *section,
2532 bool readable, bool readonly)
33417e70 2533{
dd81124b
AK
2534 target_phys_addr_t start_addr = section->offset_within_address_space;
2535 ram_addr_t size = section->size;
2536 ram_addr_t phys_offset = section->mr->ram_addr;
2537 ram_addr_t region_offset = section->offset_within_region;
c227f099 2538 target_phys_addr_t addr, end_addr;
92e873b9 2539 PhysPageDesc *p;
9d42037b 2540 CPUState *env;
c227f099 2541 ram_addr_t orig_size = size;
f6405247 2542 subpage_t *subpage;
33417e70 2543
dd81124b
AK
2544 if (memory_region_is_ram(section->mr)) {
2545 phys_offset += region_offset;
2546 region_offset = 0;
2547 }
2548
dd81124b
AK
2549 if (readonly) {
2550 phys_offset |= io_mem_rom.ram_addr;
2551 }
2552
3b8e6a2d 2553 assert(size);
f6f3fbca 2554
0e0df1e2 2555 if (phys_offset == io_mem_unassigned.ram_addr) {
67c4d23c
PB
2556 region_offset = start_addr;
2557 }
8da3ff18 2558 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2559 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2560 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2561
2562 addr = start_addr;
2563 do {
f1f6e3b8 2564 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
0e0df1e2 2565 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
c227f099
AL
2566 ram_addr_t orig_memory = p->phys_offset;
2567 target_phys_addr_t start_addr2, end_addr2;
db7b5426 2568 int need_subpage = 0;
11c7ef0c 2569 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
db7b5426
BS
2570
2571 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2572 need_subpage);
f6405247 2573 if (need_subpage) {
b3b00c78 2574 if (!(mr->subpage)) {
db7b5426 2575 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2576 &p->phys_offset, orig_memory,
2577 p->region_offset);
db7b5426 2578 } else {
a621f38d 2579 subpage = container_of(mr, subpage_t, iomem);
db7b5426 2580 }
8da3ff18
PB
2581 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2582 region_offset);
2583 p->region_offset = 0;
db7b5426
BS
2584 } else {
2585 p->phys_offset = phys_offset;
2774c6d0 2586 p->region_offset = region_offset;
1d393fa2 2587 if (is_ram_rom_romd(phys_offset))
db7b5426
BS
2588 phys_offset += TARGET_PAGE_SIZE;
2589 }
2590 } else {
2591 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2592 p->phys_offset = phys_offset;
8da3ff18 2593 p->region_offset = region_offset;
1d393fa2 2594 if (is_ram_rom_romd(phys_offset)) {
db7b5426 2595 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2596 } else {
c227f099 2597 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2598 int need_subpage = 0;
2599
2600 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2601 end_addr2, need_subpage);
2602
f6405247 2603 if (need_subpage) {
db7b5426 2604 subpage = subpage_init((addr & TARGET_PAGE_MASK),
0e0df1e2
AK
2605 &p->phys_offset,
2606 io_mem_unassigned.ram_addr,
67c4d23c 2607 addr & TARGET_PAGE_MASK);
db7b5426 2608 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2609 phys_offset, region_offset);
2610 p->region_offset = 0;
db7b5426
BS
2611 }
2612 }
2613 }
8da3ff18 2614 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2615 addr += TARGET_PAGE_SIZE;
2616 } while (addr != end_addr);
3b46e624 2617
9d42037b
FB
2618 /* since each CPU stores ram addresses in its TLB cache, we must
2619 reset the modified entries */
2620 /* XXX: slow ! */
2621 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2622 tlb_flush(env, 1);
2623 }
33417e70
FB
2624}
2625
c227f099 2626void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2627{
2628 if (kvm_enabled())
2629 kvm_coalesce_mmio_region(addr, size);
2630}
2631
c227f099 2632void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2633{
2634 if (kvm_enabled())
2635 kvm_uncoalesce_mmio_region(addr, size);
2636}
2637
62a2744c
SY
2638void qemu_flush_coalesced_mmio_buffer(void)
2639{
2640 if (kvm_enabled())
2641 kvm_flush_coalesced_mmio_buffer();
2642}
2643
c902760f
MT
2644#if defined(__linux__) && !defined(TARGET_S390X)
2645
2646#include <sys/vfs.h>
2647
2648#define HUGETLBFS_MAGIC 0x958458f6
2649
2650static long gethugepagesize(const char *path)
2651{
2652 struct statfs fs;
2653 int ret;
2654
2655 do {
9742bf26 2656 ret = statfs(path, &fs);
c902760f
MT
2657 } while (ret != 0 && errno == EINTR);
2658
2659 if (ret != 0) {
9742bf26
YT
2660 perror(path);
2661 return 0;
c902760f
MT
2662 }
2663
2664 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2665 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2666
2667 return fs.f_bsize;
2668}
2669
04b16653
AW
2670static void *file_ram_alloc(RAMBlock *block,
2671 ram_addr_t memory,
2672 const char *path)
c902760f
MT
2673{
2674 char *filename;
2675 void *area;
2676 int fd;
2677#ifdef MAP_POPULATE
2678 int flags;
2679#endif
2680 unsigned long hpagesize;
2681
2682 hpagesize = gethugepagesize(path);
2683 if (!hpagesize) {
9742bf26 2684 return NULL;
c902760f
MT
2685 }
2686
2687 if (memory < hpagesize) {
2688 return NULL;
2689 }
2690
2691 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2692 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2693 return NULL;
2694 }
2695
2696 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2697 return NULL;
c902760f
MT
2698 }
2699
2700 fd = mkstemp(filename);
2701 if (fd < 0) {
9742bf26
YT
2702 perror("unable to create backing store for hugepages");
2703 free(filename);
2704 return NULL;
c902760f
MT
2705 }
2706 unlink(filename);
2707 free(filename);
2708
2709 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2710
2711 /*
2712 * ftruncate is not supported by hugetlbfs in older
2713 * hosts, so don't bother bailing out on errors.
2714 * If anything goes wrong with it under other filesystems,
2715 * mmap will fail.
2716 */
2717 if (ftruncate(fd, memory))
9742bf26 2718 perror("ftruncate");
c902760f
MT
2719
2720#ifdef MAP_POPULATE
2721 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2722 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2723 * to sidestep this quirk.
2724 */
2725 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2726 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2727#else
2728 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2729#endif
2730 if (area == MAP_FAILED) {
9742bf26
YT
2731 perror("file_ram_alloc: can't mmap RAM pages");
2732 close(fd);
2733 return (NULL);
c902760f 2734 }
04b16653 2735 block->fd = fd;
c902760f
MT
2736 return area;
2737}
2738#endif
2739
d17b5288 2740static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2741{
2742 RAMBlock *block, *next_block;
3e837b2c 2743 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2744
2745 if (QLIST_EMPTY(&ram_list.blocks))
2746 return 0;
2747
2748 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2749 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2750
2751 end = block->offset + block->length;
2752
2753 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2754 if (next_block->offset >= end) {
2755 next = MIN(next, next_block->offset);
2756 }
2757 }
2758 if (next - end >= size && next - end < mingap) {
3e837b2c 2759 offset = end;
04b16653
AW
2760 mingap = next - end;
2761 }
2762 }
3e837b2c
AW
2763
2764 if (offset == RAM_ADDR_MAX) {
2765 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2766 (uint64_t)size);
2767 abort();
2768 }
2769
04b16653
AW
2770 return offset;
2771}
2772
2773static ram_addr_t last_ram_offset(void)
d17b5288
AW
2774{
2775 RAMBlock *block;
2776 ram_addr_t last = 0;
2777
2778 QLIST_FOREACH(block, &ram_list.blocks, next)
2779 last = MAX(last, block->offset + block->length);
2780
2781 return last;
2782}
2783
c5705a77 2784void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2785{
2786 RAMBlock *new_block, *block;
2787
c5705a77
AK
2788 new_block = NULL;
2789 QLIST_FOREACH(block, &ram_list.blocks, next) {
2790 if (block->offset == addr) {
2791 new_block = block;
2792 break;
2793 }
2794 }
2795 assert(new_block);
2796 assert(!new_block->idstr[0]);
84b89d78
CM
2797
2798 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2799 char *id = dev->parent_bus->info->get_dev_path(dev);
2800 if (id) {
2801 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2802 g_free(id);
84b89d78
CM
2803 }
2804 }
2805 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2806
2807 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2808 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2809 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2810 new_block->idstr);
2811 abort();
2812 }
2813 }
c5705a77
AK
2814}
2815
2816ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2817 MemoryRegion *mr)
2818{
2819 RAMBlock *new_block;
2820
2821 size = TARGET_PAGE_ALIGN(size);
2822 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2823
7c637366 2824 new_block->mr = mr;
432d268c 2825 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2826 if (host) {
2827 new_block->host = host;
cd19cfa2 2828 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2829 } else {
2830 if (mem_path) {
c902760f 2831#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2832 new_block->host = file_ram_alloc(new_block, size, mem_path);
2833 if (!new_block->host) {
2834 new_block->host = qemu_vmalloc(size);
e78815a5 2835 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2836 }
c902760f 2837#else
6977dfe6
YT
2838 fprintf(stderr, "-mem-path option unsupported\n");
2839 exit(1);
c902760f 2840#endif
6977dfe6 2841 } else {
6b02494d 2842#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2843 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2844 an system defined value, which is at least 256GB. Larger systems
2845 have larger values. We put the guest between the end of data
2846 segment (system break) and this value. We use 32GB as a base to
2847 have enough room for the system break to grow. */
2848 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2849 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2850 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2851 if (new_block->host == MAP_FAILED) {
2852 fprintf(stderr, "Allocating RAM failed\n");
2853 abort();
2854 }
6b02494d 2855#else
868bb33f 2856 if (xen_enabled()) {
fce537d4 2857 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2858 } else {
2859 new_block->host = qemu_vmalloc(size);
2860 }
6b02494d 2861#endif
e78815a5 2862 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2863 }
c902760f 2864 }
94a6b54f
PB
2865 new_block->length = size;
2866
f471a17e 2867 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2868
7267c094 2869 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2870 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2871 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2872 0xff, size >> TARGET_PAGE_BITS);
2873
6f0437e8
JK
2874 if (kvm_enabled())
2875 kvm_setup_guest_memory(new_block->host, size);
2876
94a6b54f
PB
2877 return new_block->offset;
2878}
e9a1ab19 2879
c5705a77 2880ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2881{
c5705a77 2882 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2883}
2884
1f2e98b6
AW
2885void qemu_ram_free_from_ptr(ram_addr_t addr)
2886{
2887 RAMBlock *block;
2888
2889 QLIST_FOREACH(block, &ram_list.blocks, next) {
2890 if (addr == block->offset) {
2891 QLIST_REMOVE(block, next);
7267c094 2892 g_free(block);
1f2e98b6
AW
2893 return;
2894 }
2895 }
2896}
2897
c227f099 2898void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2899{
04b16653
AW
2900 RAMBlock *block;
2901
2902 QLIST_FOREACH(block, &ram_list.blocks, next) {
2903 if (addr == block->offset) {
2904 QLIST_REMOVE(block, next);
cd19cfa2
HY
2905 if (block->flags & RAM_PREALLOC_MASK) {
2906 ;
2907 } else if (mem_path) {
04b16653
AW
2908#if defined (__linux__) && !defined(TARGET_S390X)
2909 if (block->fd) {
2910 munmap(block->host, block->length);
2911 close(block->fd);
2912 } else {
2913 qemu_vfree(block->host);
2914 }
fd28aa13
JK
2915#else
2916 abort();
04b16653
AW
2917#endif
2918 } else {
2919#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2920 munmap(block->host, block->length);
2921#else
868bb33f 2922 if (xen_enabled()) {
e41d7c69 2923 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2924 } else {
2925 qemu_vfree(block->host);
2926 }
04b16653
AW
2927#endif
2928 }
7267c094 2929 g_free(block);
04b16653
AW
2930 return;
2931 }
2932 }
2933
e9a1ab19
FB
2934}
2935
cd19cfa2
HY
2936#ifndef _WIN32
2937void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2938{
2939 RAMBlock *block;
2940 ram_addr_t offset;
2941 int flags;
2942 void *area, *vaddr;
2943
2944 QLIST_FOREACH(block, &ram_list.blocks, next) {
2945 offset = addr - block->offset;
2946 if (offset < block->length) {
2947 vaddr = block->host + offset;
2948 if (block->flags & RAM_PREALLOC_MASK) {
2949 ;
2950 } else {
2951 flags = MAP_FIXED;
2952 munmap(vaddr, length);
2953 if (mem_path) {
2954#if defined(__linux__) && !defined(TARGET_S390X)
2955 if (block->fd) {
2956#ifdef MAP_POPULATE
2957 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2958 MAP_PRIVATE;
2959#else
2960 flags |= MAP_PRIVATE;
2961#endif
2962 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2963 flags, block->fd, offset);
2964 } else {
2965 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2966 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2967 flags, -1, 0);
2968 }
fd28aa13
JK
2969#else
2970 abort();
cd19cfa2
HY
2971#endif
2972 } else {
2973#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2974 flags |= MAP_SHARED | MAP_ANONYMOUS;
2975 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2976 flags, -1, 0);
2977#else
2978 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2979 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2980 flags, -1, 0);
2981#endif
2982 }
2983 if (area != vaddr) {
f15fbc4b
AP
2984 fprintf(stderr, "Could not remap addr: "
2985 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2986 length, addr);
2987 exit(1);
2988 }
2989 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2990 }
2991 return;
2992 }
2993 }
2994}
2995#endif /* !_WIN32 */
2996
dc828ca1 2997/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2998 With the exception of the softmmu code in this file, this should
2999 only be used for local memory (e.g. video ram) that the device owns,
3000 and knows it isn't going to access beyond the end of the block.
3001
3002 It should not be used for general purpose DMA.
3003 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3004 */
c227f099 3005void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3006{
94a6b54f
PB
3007 RAMBlock *block;
3008
f471a17e
AW
3009 QLIST_FOREACH(block, &ram_list.blocks, next) {
3010 if (addr - block->offset < block->length) {
7d82af38
VP
3011 /* Move this entry to to start of the list. */
3012 if (block != QLIST_FIRST(&ram_list.blocks)) {
3013 QLIST_REMOVE(block, next);
3014 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3015 }
868bb33f 3016 if (xen_enabled()) {
432d268c
JN
3017 /* We need to check if the requested address is in the RAM
3018 * because we don't want to map the entire memory in QEMU.
712c2b41 3019 * In that case just map until the end of the page.
432d268c
JN
3020 */
3021 if (block->offset == 0) {
e41d7c69 3022 return xen_map_cache(addr, 0, 0);
432d268c 3023 } else if (block->host == NULL) {
e41d7c69
JK
3024 block->host =
3025 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3026 }
3027 }
f471a17e
AW
3028 return block->host + (addr - block->offset);
3029 }
94a6b54f 3030 }
f471a17e
AW
3031
3032 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3033 abort();
3034
3035 return NULL;
dc828ca1
PB
3036}
3037
b2e0a138
MT
3038/* Return a host pointer to ram allocated with qemu_ram_alloc.
3039 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3040 */
3041void *qemu_safe_ram_ptr(ram_addr_t addr)
3042{
3043 RAMBlock *block;
3044
3045 QLIST_FOREACH(block, &ram_list.blocks, next) {
3046 if (addr - block->offset < block->length) {
868bb33f 3047 if (xen_enabled()) {
432d268c
JN
3048 /* We need to check if the requested address is in the RAM
3049 * because we don't want to map the entire memory in QEMU.
712c2b41 3050 * In that case just map until the end of the page.
432d268c
JN
3051 */
3052 if (block->offset == 0) {
e41d7c69 3053 return xen_map_cache(addr, 0, 0);
432d268c 3054 } else if (block->host == NULL) {
e41d7c69
JK
3055 block->host =
3056 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3057 }
3058 }
b2e0a138
MT
3059 return block->host + (addr - block->offset);
3060 }
3061 }
3062
3063 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3064 abort();
3065
3066 return NULL;
3067}
3068
38bee5dc
SS
3069/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3070 * but takes a size argument */
8ab934f9 3071void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3072{
8ab934f9
SS
3073 if (*size == 0) {
3074 return NULL;
3075 }
868bb33f 3076 if (xen_enabled()) {
e41d7c69 3077 return xen_map_cache(addr, *size, 1);
868bb33f 3078 } else {
38bee5dc
SS
3079 RAMBlock *block;
3080
3081 QLIST_FOREACH(block, &ram_list.blocks, next) {
3082 if (addr - block->offset < block->length) {
3083 if (addr - block->offset + *size > block->length)
3084 *size = block->length - addr + block->offset;
3085 return block->host + (addr - block->offset);
3086 }
3087 }
3088
3089 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3090 abort();
38bee5dc
SS
3091 }
3092}
3093
050a0ddf
AP
3094void qemu_put_ram_ptr(void *addr)
3095{
3096 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3097}
3098
e890261f 3099int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3100{
94a6b54f
PB
3101 RAMBlock *block;
3102 uint8_t *host = ptr;
3103
868bb33f 3104 if (xen_enabled()) {
e41d7c69 3105 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3106 return 0;
3107 }
3108
f471a17e 3109 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3110 /* This case append when the block is not mapped. */
3111 if (block->host == NULL) {
3112 continue;
3113 }
f471a17e 3114 if (host - block->host < block->length) {
e890261f
MT
3115 *ram_addr = block->offset + (host - block->host);
3116 return 0;
f471a17e 3117 }
94a6b54f 3118 }
432d268c 3119
e890261f
MT
3120 return -1;
3121}
f471a17e 3122
e890261f
MT
3123/* Some of the softmmu routines need to translate from a host pointer
3124 (typically a TLB entry) back to a ram offset. */
3125ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3126{
3127 ram_addr_t ram_addr;
f471a17e 3128
e890261f
MT
3129 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3130 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3131 abort();
3132 }
3133 return ram_addr;
5579c7f3
PB
3134}
3135
0e0df1e2
AK
3136static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3137 unsigned size)
e18231a3
BS
3138{
3139#ifdef DEBUG_UNASSIGNED
3140 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3141#endif
5b450407 3142#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3143 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3144#endif
3145 return 0;
3146}
3147
0e0df1e2
AK
3148static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3149 uint64_t val, unsigned size)
e18231a3
BS
3150{
3151#ifdef DEBUG_UNASSIGNED
0e0df1e2 3152 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3153#endif
5b450407 3154#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3155 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3156#endif
33417e70
FB
3157}
3158
0e0df1e2
AK
3159static const MemoryRegionOps unassigned_mem_ops = {
3160 .read = unassigned_mem_read,
3161 .write = unassigned_mem_write,
3162 .endianness = DEVICE_NATIVE_ENDIAN,
3163};
e18231a3 3164
0e0df1e2
AK
3165static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3166 unsigned size)
e18231a3 3167{
0e0df1e2 3168 abort();
e18231a3
BS
3169}
3170
0e0df1e2
AK
3171static void error_mem_write(void *opaque, target_phys_addr_t addr,
3172 uint64_t value, unsigned size)
e18231a3 3173{
0e0df1e2 3174 abort();
33417e70
FB
3175}
3176
0e0df1e2
AK
3177static const MemoryRegionOps error_mem_ops = {
3178 .read = error_mem_read,
3179 .write = error_mem_write,
3180 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3181};
3182
0e0df1e2
AK
3183static const MemoryRegionOps rom_mem_ops = {
3184 .read = error_mem_read,
3185 .write = unassigned_mem_write,
3186 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3187};
3188
0e0df1e2
AK
3189static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3190 uint64_t val, unsigned size)
9fa3e853 3191{
3a7d929e 3192 int dirty_flags;
f7c11b53 3193 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3194 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3195#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3196 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3197 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3198#endif
3a7d929e 3199 }
0e0df1e2
AK
3200 switch (size) {
3201 case 1:
3202 stb_p(qemu_get_ram_ptr(ram_addr), val);
3203 break;
3204 case 2:
3205 stw_p(qemu_get_ram_ptr(ram_addr), val);
3206 break;
3207 case 4:
3208 stl_p(qemu_get_ram_ptr(ram_addr), val);
3209 break;
3210 default:
3211 abort();
3a7d929e 3212 }
f23db169 3213 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3214 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3215 /* we remove the notdirty callback only if the code has been
3216 flushed */
3217 if (dirty_flags == 0xff)
2e70f6ef 3218 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3219}
3220
0e0df1e2
AK
3221static const MemoryRegionOps notdirty_mem_ops = {
3222 .read = error_mem_read,
3223 .write = notdirty_mem_write,
3224 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3225};
3226
0f459d16 3227/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3228static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3229{
3230 CPUState *env = cpu_single_env;
06d55cc1
AL
3231 target_ulong pc, cs_base;
3232 TranslationBlock *tb;
0f459d16 3233 target_ulong vaddr;
a1d1bb31 3234 CPUWatchpoint *wp;
06d55cc1 3235 int cpu_flags;
0f459d16 3236
06d55cc1
AL
3237 if (env->watchpoint_hit) {
3238 /* We re-entered the check after replacing the TB. Now raise
3239 * the debug interrupt so that is will trigger after the
3240 * current instruction. */
3241 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3242 return;
3243 }
2e70f6ef 3244 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3245 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3246 if ((vaddr == (wp->vaddr & len_mask) ||
3247 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3248 wp->flags |= BP_WATCHPOINT_HIT;
3249 if (!env->watchpoint_hit) {
3250 env->watchpoint_hit = wp;
3251 tb = tb_find_pc(env->mem_io_pc);
3252 if (!tb) {
3253 cpu_abort(env, "check_watchpoint: could not find TB for "
3254 "pc=%p", (void *)env->mem_io_pc);
3255 }
618ba8e6 3256 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3257 tb_phys_invalidate(tb, -1);
3258 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3259 env->exception_index = EXCP_DEBUG;
3260 } else {
3261 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3262 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3263 }
3264 cpu_resume_from_signal(env, NULL);
06d55cc1 3265 }
6e140f28
AL
3266 } else {
3267 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3268 }
3269 }
3270}
3271
6658ffb8
PB
3272/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3273 so these check for a hit then pass through to the normal out-of-line
3274 phys routines. */
1ec9b909
AK
3275static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3276 unsigned size)
6658ffb8 3277{
1ec9b909
AK
3278 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3279 switch (size) {
3280 case 1: return ldub_phys(addr);
3281 case 2: return lduw_phys(addr);
3282 case 4: return ldl_phys(addr);
3283 default: abort();
3284 }
6658ffb8
PB
3285}
3286
1ec9b909
AK
3287static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3288 uint64_t val, unsigned size)
6658ffb8 3289{
1ec9b909
AK
3290 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3291 switch (size) {
67364150
MF
3292 case 1:
3293 stb_phys(addr, val);
3294 break;
3295 case 2:
3296 stw_phys(addr, val);
3297 break;
3298 case 4:
3299 stl_phys(addr, val);
3300 break;
1ec9b909
AK
3301 default: abort();
3302 }
6658ffb8
PB
3303}
3304
1ec9b909
AK
3305static const MemoryRegionOps watch_mem_ops = {
3306 .read = watch_mem_read,
3307 .write = watch_mem_write,
3308 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3309};
6658ffb8 3310
70c68e44
AK
3311static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3312 unsigned len)
db7b5426 3313{
70c68e44 3314 subpage_t *mmio = opaque;
f6405247 3315 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3316#if defined(DEBUG_SUBPAGE)
3317 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3318 mmio, len, addr, idx);
3319#endif
db7b5426 3320
f6405247
RH
3321 addr += mmio->region_offset[idx];
3322 idx = mmio->sub_io_index[idx];
70c68e44 3323 return io_mem_read(idx, addr, len);
db7b5426
BS
3324}
3325
70c68e44
AK
3326static void subpage_write(void *opaque, target_phys_addr_t addr,
3327 uint64_t value, unsigned len)
db7b5426 3328{
70c68e44 3329 subpage_t *mmio = opaque;
f6405247 3330 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3331#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3332 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3333 " idx %d value %"PRIx64"\n",
f6405247 3334 __func__, mmio, len, addr, idx, value);
db7b5426 3335#endif
f6405247
RH
3336
3337 addr += mmio->region_offset[idx];
3338 idx = mmio->sub_io_index[idx];
70c68e44 3339 io_mem_write(idx, addr, value, len);
db7b5426
BS
3340}
3341
70c68e44
AK
3342static const MemoryRegionOps subpage_ops = {
3343 .read = subpage_read,
3344 .write = subpage_write,
3345 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3346};
3347
de712f94
AK
3348static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3349 unsigned size)
56384e8b
AF
3350{
3351 ram_addr_t raddr = addr;
3352 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3353 switch (size) {
3354 case 1: return ldub_p(ptr);
3355 case 2: return lduw_p(ptr);
3356 case 4: return ldl_p(ptr);
3357 default: abort();
3358 }
56384e8b
AF
3359}
3360
de712f94
AK
3361static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3362 uint64_t value, unsigned size)
56384e8b
AF
3363{
3364 ram_addr_t raddr = addr;
3365 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3366 switch (size) {
3367 case 1: return stb_p(ptr, value);
3368 case 2: return stw_p(ptr, value);
3369 case 4: return stl_p(ptr, value);
3370 default: abort();
3371 }
56384e8b
AF
3372}
3373
de712f94
AK
3374static const MemoryRegionOps subpage_ram_ops = {
3375 .read = subpage_ram_read,
3376 .write = subpage_ram_write,
3377 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3378};
3379
c227f099
AL
3380static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3381 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3382{
3383 int idx, eidx;
3384
3385 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3386 return -1;
3387 idx = SUBPAGE_IDX(start);
3388 eidx = SUBPAGE_IDX(end);
3389#if defined(DEBUG_SUBPAGE)
0bf9e31a 3390 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3391 mmio, start, end, idx, eidx, memory);
3392#endif
0e0df1e2 3393 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
de712f94 3394 memory = io_mem_subpage_ram.ram_addr;
56384e8b 3395 }
11c7ef0c 3396 memory &= IO_MEM_NB_ENTRIES - 1;
db7b5426 3397 for (; idx <= eidx; idx++) {
f6405247
RH
3398 mmio->sub_io_index[idx] = memory;
3399 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3400 }
3401
3402 return 0;
3403}
3404
f6405247
RH
3405static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3406 ram_addr_t orig_memory,
3407 ram_addr_t region_offset)
db7b5426 3408{
c227f099 3409 subpage_t *mmio;
db7b5426
BS
3410 int subpage_memory;
3411
7267c094 3412 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3413
3414 mmio->base = base;
70c68e44
AK
3415 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3416 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3417 mmio->iomem.subpage = true;
70c68e44 3418 subpage_memory = mmio->iomem.ram_addr;
db7b5426 3419#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3420 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3421 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3422#endif
b3b00c78 3423 *phys = subpage_memory;
f6405247 3424 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3425
3426 return mmio;
3427}
3428
88715657
AL
3429static int get_free_io_mem_idx(void)
3430{
3431 int i;
3432
3433 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3434 if (!io_mem_used[i]) {
3435 io_mem_used[i] = 1;
3436 return i;
3437 }
c6703b47 3438 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3439 return -1;
3440}
3441
33417e70
FB
3442/* mem_read and mem_write are arrays of functions containing the
3443 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3444 2). Functions can be omitted with a NULL function pointer.
3ee89922 3445 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3446 modified. If it is zero, a new io zone is allocated. The return
3447 value can be used with cpu_register_physical_memory(). (-1) is
3448 returned if error. */
a621f38d 3449static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
33417e70 3450{
33417e70 3451 if (io_index <= 0) {
88715657
AL
3452 io_index = get_free_io_mem_idx();
3453 if (io_index == -1)
3454 return io_index;
33417e70
FB
3455 } else {
3456 if (io_index >= IO_MEM_NB_ENTRIES)
3457 return -1;
3458 }
b5ff1b31 3459
a621f38d 3460 io_mem_region[io_index] = mr;
f6405247 3461
11c7ef0c 3462 return io_index;
33417e70 3463}
61382a50 3464
a621f38d 3465int cpu_register_io_memory(MemoryRegion *mr)
1eed09cb 3466{
a621f38d 3467 return cpu_register_io_memory_fixed(0, mr);
1eed09cb
AK
3468}
3469
11c7ef0c 3470void cpu_unregister_io_memory(int io_index)
88715657 3471{
a621f38d 3472 io_mem_region[io_index] = NULL;
88715657
AL
3473 io_mem_used[io_index] = 0;
3474}
3475
e9179ce1
AK
3476static void io_mem_init(void)
3477{
3478 int i;
3479
0e0df1e2
AK
3480 /* Must be first: */
3481 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3482 assert(io_mem_ram.ram_addr == 0);
3483 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3484 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3485 "unassigned", UINT64_MAX);
3486 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3487 "notdirty", UINT64_MAX);
de712f94
AK
3488 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3489 "subpage-ram", UINT64_MAX);
e9179ce1
AK
3490 for (i=0; i<5; i++)
3491 io_mem_used[i] = 1;
3492
1ec9b909
AK
3493 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3494 "watch", UINT64_MAX);
e9179ce1
AK
3495}
3496
62152b8a
AK
3497static void memory_map_init(void)
3498{
7267c094 3499 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3500 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3501 set_system_memory_map(system_memory);
309cb471 3502
7267c094 3503 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3504 memory_region_init(system_io, "io", 65536);
3505 set_system_io_map(system_io);
62152b8a
AK
3506}
3507
3508MemoryRegion *get_system_memory(void)
3509{
3510 return system_memory;
3511}
3512
309cb471
AK
3513MemoryRegion *get_system_io(void)
3514{
3515 return system_io;
3516}
3517
e2eef170
PB
3518#endif /* !defined(CONFIG_USER_ONLY) */
3519
13eb76e0
FB
3520/* physical memory access (slow version, mainly for debug) */
3521#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3522int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3523 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3524{
3525 int l, flags;
3526 target_ulong page;
53a5960a 3527 void * p;
13eb76e0
FB
3528
3529 while (len > 0) {
3530 page = addr & TARGET_PAGE_MASK;
3531 l = (page + TARGET_PAGE_SIZE) - addr;
3532 if (l > len)
3533 l = len;
3534 flags = page_get_flags(page);
3535 if (!(flags & PAGE_VALID))
a68fe89c 3536 return -1;
13eb76e0
FB
3537 if (is_write) {
3538 if (!(flags & PAGE_WRITE))
a68fe89c 3539 return -1;
579a97f7 3540 /* XXX: this code should not depend on lock_user */
72fb7daa 3541 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3542 return -1;
72fb7daa
AJ
3543 memcpy(p, buf, l);
3544 unlock_user(p, addr, l);
13eb76e0
FB
3545 } else {
3546 if (!(flags & PAGE_READ))
a68fe89c 3547 return -1;
579a97f7 3548 /* XXX: this code should not depend on lock_user */
72fb7daa 3549 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3550 return -1;
72fb7daa 3551 memcpy(buf, p, l);
5b257578 3552 unlock_user(p, addr, 0);
13eb76e0
FB
3553 }
3554 len -= l;
3555 buf += l;
3556 addr += l;
3557 }
a68fe89c 3558 return 0;
13eb76e0 3559}
8df1cd07 3560
13eb76e0 3561#else
c227f099 3562void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3563 int len, int is_write)
3564{
3565 int l, io_index;
3566 uint8_t *ptr;
3567 uint32_t val;
c227f099 3568 target_phys_addr_t page;
8ca5692d 3569 ram_addr_t pd;
f1f6e3b8 3570 PhysPageDesc p;
3b46e624 3571
13eb76e0
FB
3572 while (len > 0) {
3573 page = addr & TARGET_PAGE_MASK;
3574 l = (page + TARGET_PAGE_SIZE) - addr;
3575 if (l > len)
3576 l = len;
92e873b9 3577 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3578 pd = p.phys_offset;
3b46e624 3579
13eb76e0 3580 if (is_write) {
0e0df1e2 3581 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
f1f6e3b8 3582 target_phys_addr_t addr1;
11c7ef0c 3583 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3584 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3585 /* XXX: could force cpu_single_env to NULL to avoid
3586 potential bugs */
6c2934db 3587 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3588 /* 32 bit write access */
c27004ec 3589 val = ldl_p(buf);
acbbec5d 3590 io_mem_write(io_index, addr1, val, 4);
13eb76e0 3591 l = 4;
6c2934db 3592 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3593 /* 16 bit write access */
c27004ec 3594 val = lduw_p(buf);
acbbec5d 3595 io_mem_write(io_index, addr1, val, 2);
13eb76e0
FB
3596 l = 2;
3597 } else {
1c213d19 3598 /* 8 bit write access */
c27004ec 3599 val = ldub_p(buf);
acbbec5d 3600 io_mem_write(io_index, addr1, val, 1);
13eb76e0
FB
3601 l = 1;
3602 }
3603 } else {
8ca5692d 3604 ram_addr_t addr1;
b448f2f3 3605 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3606 /* RAM case */
5579c7f3 3607 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3608 memcpy(ptr, buf, l);
3a7d929e
FB
3609 if (!cpu_physical_memory_is_dirty(addr1)) {
3610 /* invalidate code */
3611 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3612 /* set dirty bit */
f7c11b53
YT
3613 cpu_physical_memory_set_dirty_flags(
3614 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3615 }
050a0ddf 3616 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3617 }
3618 } else {
1d393fa2 3619 if (!is_ram_rom_romd(pd)) {
f1f6e3b8 3620 target_phys_addr_t addr1;
13eb76e0 3621 /* I/O case */
11c7ef0c 3622 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3623 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3624 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3625 /* 32 bit read access */
acbbec5d 3626 val = io_mem_read(io_index, addr1, 4);
c27004ec 3627 stl_p(buf, val);
13eb76e0 3628 l = 4;
6c2934db 3629 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3630 /* 16 bit read access */
acbbec5d 3631 val = io_mem_read(io_index, addr1, 2);
c27004ec 3632 stw_p(buf, val);
13eb76e0
FB
3633 l = 2;
3634 } else {
1c213d19 3635 /* 8 bit read access */
acbbec5d 3636 val = io_mem_read(io_index, addr1, 1);
c27004ec 3637 stb_p(buf, val);
13eb76e0
FB
3638 l = 1;
3639 }
3640 } else {
3641 /* RAM case */
050a0ddf
AP
3642 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3643 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3644 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3645 }
3646 }
3647 len -= l;
3648 buf += l;
3649 addr += l;
3650 }
3651}
8df1cd07 3652
d0ecd2aa 3653/* used for ROM loading : can write in RAM and ROM */
c227f099 3654void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3655 const uint8_t *buf, int len)
3656{
3657 int l;
3658 uint8_t *ptr;
c227f099 3659 target_phys_addr_t page;
d0ecd2aa 3660 unsigned long pd;
f1f6e3b8 3661 PhysPageDesc p;
3b46e624 3662
d0ecd2aa
FB
3663 while (len > 0) {
3664 page = addr & TARGET_PAGE_MASK;
3665 l = (page + TARGET_PAGE_SIZE) - addr;
3666 if (l > len)
3667 l = len;
3668 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3669 pd = p.phys_offset;
3b46e624 3670
1d393fa2 3671 if (!is_ram_rom_romd(pd)) {
d0ecd2aa
FB
3672 /* do nothing */
3673 } else {
3674 unsigned long addr1;
3675 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3676 /* ROM/RAM case */
5579c7f3 3677 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3678 memcpy(ptr, buf, l);
050a0ddf 3679 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3680 }
3681 len -= l;
3682 buf += l;
3683 addr += l;
3684 }
3685}
3686
6d16c2f8
AL
3687typedef struct {
3688 void *buffer;
c227f099
AL
3689 target_phys_addr_t addr;
3690 target_phys_addr_t len;
6d16c2f8
AL
3691} BounceBuffer;
3692
3693static BounceBuffer bounce;
3694
ba223c29
AL
3695typedef struct MapClient {
3696 void *opaque;
3697 void (*callback)(void *opaque);
72cf2d4f 3698 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3699} MapClient;
3700
72cf2d4f
BS
3701static QLIST_HEAD(map_client_list, MapClient) map_client_list
3702 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3703
3704void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3705{
7267c094 3706 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3707
3708 client->opaque = opaque;
3709 client->callback = callback;
72cf2d4f 3710 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3711 return client;
3712}
3713
3714void cpu_unregister_map_client(void *_client)
3715{
3716 MapClient *client = (MapClient *)_client;
3717
72cf2d4f 3718 QLIST_REMOVE(client, link);
7267c094 3719 g_free(client);
ba223c29
AL
3720}
3721
3722static void cpu_notify_map_clients(void)
3723{
3724 MapClient *client;
3725
72cf2d4f
BS
3726 while (!QLIST_EMPTY(&map_client_list)) {
3727 client = QLIST_FIRST(&map_client_list);
ba223c29 3728 client->callback(client->opaque);
34d5e948 3729 cpu_unregister_map_client(client);
ba223c29
AL
3730 }
3731}
3732
6d16c2f8
AL
3733/* Map a physical memory region into a host virtual address.
3734 * May map a subset of the requested range, given by and returned in *plen.
3735 * May return NULL if resources needed to perform the mapping are exhausted.
3736 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3737 * Use cpu_register_map_client() to know when retrying the map operation is
3738 * likely to succeed.
6d16c2f8 3739 */
c227f099
AL
3740void *cpu_physical_memory_map(target_phys_addr_t addr,
3741 target_phys_addr_t *plen,
6d16c2f8
AL
3742 int is_write)
3743{
c227f099 3744 target_phys_addr_t len = *plen;
38bee5dc 3745 target_phys_addr_t todo = 0;
6d16c2f8 3746 int l;
c227f099 3747 target_phys_addr_t page;
6d16c2f8 3748 unsigned long pd;
f1f6e3b8 3749 PhysPageDesc p;
f15fbc4b 3750 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3751 ram_addr_t rlen;
3752 void *ret;
6d16c2f8
AL
3753
3754 while (len > 0) {
3755 page = addr & TARGET_PAGE_MASK;
3756 l = (page + TARGET_PAGE_SIZE) - addr;
3757 if (l > len)
3758 l = len;
3759 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3760 pd = p.phys_offset;
6d16c2f8 3761
0e0df1e2 3762 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
38bee5dc 3763 if (todo || bounce.buffer) {
6d16c2f8
AL
3764 break;
3765 }
3766 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3767 bounce.addr = addr;
3768 bounce.len = l;
3769 if (!is_write) {
54f7b4a3 3770 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3771 }
38bee5dc
SS
3772
3773 *plen = l;
3774 return bounce.buffer;
6d16c2f8 3775 }
8ab934f9
SS
3776 if (!todo) {
3777 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3778 }
6d16c2f8
AL
3779
3780 len -= l;
3781 addr += l;
38bee5dc 3782 todo += l;
6d16c2f8 3783 }
8ab934f9
SS
3784 rlen = todo;
3785 ret = qemu_ram_ptr_length(raddr, &rlen);
3786 *plen = rlen;
3787 return ret;
6d16c2f8
AL
3788}
3789
3790/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3791 * Will also mark the memory as dirty if is_write == 1. access_len gives
3792 * the amount of memory that was actually read or written by the caller.
3793 */
c227f099
AL
3794void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3795 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3796{
3797 if (buffer != bounce.buffer) {
3798 if (is_write) {
e890261f 3799 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3800 while (access_len) {
3801 unsigned l;
3802 l = TARGET_PAGE_SIZE;
3803 if (l > access_len)
3804 l = access_len;
3805 if (!cpu_physical_memory_is_dirty(addr1)) {
3806 /* invalidate code */
3807 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3808 /* set dirty bit */
f7c11b53
YT
3809 cpu_physical_memory_set_dirty_flags(
3810 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3811 }
3812 addr1 += l;
3813 access_len -= l;
3814 }
3815 }
868bb33f 3816 if (xen_enabled()) {
e41d7c69 3817 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3818 }
6d16c2f8
AL
3819 return;
3820 }
3821 if (is_write) {
3822 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3823 }
f8a83245 3824 qemu_vfree(bounce.buffer);
6d16c2f8 3825 bounce.buffer = NULL;
ba223c29 3826 cpu_notify_map_clients();
6d16c2f8 3827}
d0ecd2aa 3828
8df1cd07 3829/* warning: addr must be aligned */
1e78bcc1
AG
3830static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3831 enum device_endian endian)
8df1cd07
FB
3832{
3833 int io_index;
3834 uint8_t *ptr;
3835 uint32_t val;
3836 unsigned long pd;
f1f6e3b8 3837 PhysPageDesc p;
8df1cd07
FB
3838
3839 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3840 pd = p.phys_offset;
3b46e624 3841
1d393fa2 3842 if (!is_ram_rom_romd(pd)) {
8df1cd07 3843 /* I/O case */
11c7ef0c 3844 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3845 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3846 val = io_mem_read(io_index, addr, 4);
1e78bcc1
AG
3847#if defined(TARGET_WORDS_BIGENDIAN)
3848 if (endian == DEVICE_LITTLE_ENDIAN) {
3849 val = bswap32(val);
3850 }
3851#else
3852 if (endian == DEVICE_BIG_ENDIAN) {
3853 val = bswap32(val);
3854 }
3855#endif
8df1cd07
FB
3856 } else {
3857 /* RAM case */
5579c7f3 3858 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3859 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3860 switch (endian) {
3861 case DEVICE_LITTLE_ENDIAN:
3862 val = ldl_le_p(ptr);
3863 break;
3864 case DEVICE_BIG_ENDIAN:
3865 val = ldl_be_p(ptr);
3866 break;
3867 default:
3868 val = ldl_p(ptr);
3869 break;
3870 }
8df1cd07
FB
3871 }
3872 return val;
3873}
3874
1e78bcc1
AG
3875uint32_t ldl_phys(target_phys_addr_t addr)
3876{
3877 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3878}
3879
3880uint32_t ldl_le_phys(target_phys_addr_t addr)
3881{
3882 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3883}
3884
3885uint32_t ldl_be_phys(target_phys_addr_t addr)
3886{
3887 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3888}
3889
84b7b8e7 3890/* warning: addr must be aligned */
1e78bcc1
AG
3891static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3892 enum device_endian endian)
84b7b8e7
FB
3893{
3894 int io_index;
3895 uint8_t *ptr;
3896 uint64_t val;
3897 unsigned long pd;
f1f6e3b8 3898 PhysPageDesc p;
84b7b8e7
FB
3899
3900 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3901 pd = p.phys_offset;
3b46e624 3902
1d393fa2 3903 if (!is_ram_rom_romd(pd)) {
84b7b8e7 3904 /* I/O case */
11c7ef0c 3905 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3906 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
3907
3908 /* XXX This is broken when device endian != cpu endian.
3909 Fix and add "endian" variable check */
84b7b8e7 3910#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
3911 val = io_mem_read(io_index, addr, 4) << 32;
3912 val |= io_mem_read(io_index, addr + 4, 4);
84b7b8e7 3913#else
acbbec5d
AK
3914 val = io_mem_read(io_index, addr, 4);
3915 val |= io_mem_read(io_index, addr + 4, 4) << 32;
84b7b8e7
FB
3916#endif
3917 } else {
3918 /* RAM case */
5579c7f3 3919 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 3920 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3921 switch (endian) {
3922 case DEVICE_LITTLE_ENDIAN:
3923 val = ldq_le_p(ptr);
3924 break;
3925 case DEVICE_BIG_ENDIAN:
3926 val = ldq_be_p(ptr);
3927 break;
3928 default:
3929 val = ldq_p(ptr);
3930 break;
3931 }
84b7b8e7
FB
3932 }
3933 return val;
3934}
3935
1e78bcc1
AG
3936uint64_t ldq_phys(target_phys_addr_t addr)
3937{
3938 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3939}
3940
3941uint64_t ldq_le_phys(target_phys_addr_t addr)
3942{
3943 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3944}
3945
3946uint64_t ldq_be_phys(target_phys_addr_t addr)
3947{
3948 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3949}
3950
aab33094 3951/* XXX: optimize */
c227f099 3952uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3953{
3954 uint8_t val;
3955 cpu_physical_memory_read(addr, &val, 1);
3956 return val;
3957}
3958
733f0b02 3959/* warning: addr must be aligned */
1e78bcc1
AG
3960static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3961 enum device_endian endian)
aab33094 3962{
733f0b02
MT
3963 int io_index;
3964 uint8_t *ptr;
3965 uint64_t val;
3966 unsigned long pd;
f1f6e3b8 3967 PhysPageDesc p;
733f0b02
MT
3968
3969 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3970 pd = p.phys_offset;
733f0b02 3971
1d393fa2 3972 if (!is_ram_rom_romd(pd)) {
733f0b02 3973 /* I/O case */
11c7ef0c 3974 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3975 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3976 val = io_mem_read(io_index, addr, 2);
1e78bcc1
AG
3977#if defined(TARGET_WORDS_BIGENDIAN)
3978 if (endian == DEVICE_LITTLE_ENDIAN) {
3979 val = bswap16(val);
3980 }
3981#else
3982 if (endian == DEVICE_BIG_ENDIAN) {
3983 val = bswap16(val);
3984 }
3985#endif
733f0b02
MT
3986 } else {
3987 /* RAM case */
3988 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3989 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3990 switch (endian) {
3991 case DEVICE_LITTLE_ENDIAN:
3992 val = lduw_le_p(ptr);
3993 break;
3994 case DEVICE_BIG_ENDIAN:
3995 val = lduw_be_p(ptr);
3996 break;
3997 default:
3998 val = lduw_p(ptr);
3999 break;
4000 }
733f0b02
MT
4001 }
4002 return val;
aab33094
FB
4003}
4004
1e78bcc1
AG
4005uint32_t lduw_phys(target_phys_addr_t addr)
4006{
4007 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4008}
4009
4010uint32_t lduw_le_phys(target_phys_addr_t addr)
4011{
4012 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4013}
4014
4015uint32_t lduw_be_phys(target_phys_addr_t addr)
4016{
4017 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4018}
4019
8df1cd07
FB
4020/* warning: addr must be aligned. The ram page is not masked as dirty
4021 and the code inside is not invalidated. It is useful if the dirty
4022 bits are used to track modified PTEs */
c227f099 4023void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4024{
4025 int io_index;
4026 uint8_t *ptr;
4027 unsigned long pd;
f1f6e3b8 4028 PhysPageDesc p;
8df1cd07
FB
4029
4030 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4031 pd = p.phys_offset;
3b46e624 4032
0e0df1e2 4033 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4034 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4035 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4036 io_mem_write(io_index, addr, val, 4);
8df1cd07 4037 } else {
74576198 4038 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4039 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4040 stl_p(ptr, val);
74576198
AL
4041
4042 if (unlikely(in_migration)) {
4043 if (!cpu_physical_memory_is_dirty(addr1)) {
4044 /* invalidate code */
4045 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4046 /* set dirty bit */
f7c11b53
YT
4047 cpu_physical_memory_set_dirty_flags(
4048 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4049 }
4050 }
8df1cd07
FB
4051 }
4052}
4053
c227f099 4054void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4055{
4056 int io_index;
4057 uint8_t *ptr;
4058 unsigned long pd;
f1f6e3b8 4059 PhysPageDesc p;
bc98a7ef
JM
4060
4061 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4062 pd = p.phys_offset;
3b46e624 4063
0e0df1e2 4064 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4065 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4066 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef 4067#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4068 io_mem_write(io_index, addr, val >> 32, 4);
4069 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
bc98a7ef 4070#else
acbbec5d
AK
4071 io_mem_write(io_index, addr, (uint32_t)val, 4);
4072 io_mem_write(io_index, addr + 4, val >> 32, 4);
bc98a7ef
JM
4073#endif
4074 } else {
5579c7f3 4075 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4076 (addr & ~TARGET_PAGE_MASK);
4077 stq_p(ptr, val);
4078 }
4079}
4080
8df1cd07 4081/* warning: addr must be aligned */
1e78bcc1
AG
4082static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4083 enum device_endian endian)
8df1cd07
FB
4084{
4085 int io_index;
4086 uint8_t *ptr;
4087 unsigned long pd;
f1f6e3b8 4088 PhysPageDesc p;
8df1cd07
FB
4089
4090 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4091 pd = p.phys_offset;
3b46e624 4092
0e0df1e2 4093 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4094 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4095 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4096#if defined(TARGET_WORDS_BIGENDIAN)
4097 if (endian == DEVICE_LITTLE_ENDIAN) {
4098 val = bswap32(val);
4099 }
4100#else
4101 if (endian == DEVICE_BIG_ENDIAN) {
4102 val = bswap32(val);
4103 }
4104#endif
acbbec5d 4105 io_mem_write(io_index, addr, val, 4);
8df1cd07
FB
4106 } else {
4107 unsigned long addr1;
4108 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4109 /* RAM case */
5579c7f3 4110 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4111 switch (endian) {
4112 case DEVICE_LITTLE_ENDIAN:
4113 stl_le_p(ptr, val);
4114 break;
4115 case DEVICE_BIG_ENDIAN:
4116 stl_be_p(ptr, val);
4117 break;
4118 default:
4119 stl_p(ptr, val);
4120 break;
4121 }
3a7d929e
FB
4122 if (!cpu_physical_memory_is_dirty(addr1)) {
4123 /* invalidate code */
4124 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4125 /* set dirty bit */
f7c11b53
YT
4126 cpu_physical_memory_set_dirty_flags(addr1,
4127 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4128 }
8df1cd07
FB
4129 }
4130}
4131
1e78bcc1
AG
4132void stl_phys(target_phys_addr_t addr, uint32_t val)
4133{
4134 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4135}
4136
4137void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4138{
4139 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4140}
4141
4142void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4143{
4144 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4145}
4146
aab33094 4147/* XXX: optimize */
c227f099 4148void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4149{
4150 uint8_t v = val;
4151 cpu_physical_memory_write(addr, &v, 1);
4152}
4153
733f0b02 4154/* warning: addr must be aligned */
1e78bcc1
AG
4155static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4156 enum device_endian endian)
aab33094 4157{
733f0b02
MT
4158 int io_index;
4159 uint8_t *ptr;
4160 unsigned long pd;
f1f6e3b8 4161 PhysPageDesc p;
733f0b02
MT
4162
4163 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4164 pd = p.phys_offset;
733f0b02 4165
0e0df1e2 4166 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4167 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4168 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4169#if defined(TARGET_WORDS_BIGENDIAN)
4170 if (endian == DEVICE_LITTLE_ENDIAN) {
4171 val = bswap16(val);
4172 }
4173#else
4174 if (endian == DEVICE_BIG_ENDIAN) {
4175 val = bswap16(val);
4176 }
4177#endif
acbbec5d 4178 io_mem_write(io_index, addr, val, 2);
733f0b02
MT
4179 } else {
4180 unsigned long addr1;
4181 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4182 /* RAM case */
4183 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4184 switch (endian) {
4185 case DEVICE_LITTLE_ENDIAN:
4186 stw_le_p(ptr, val);
4187 break;
4188 case DEVICE_BIG_ENDIAN:
4189 stw_be_p(ptr, val);
4190 break;
4191 default:
4192 stw_p(ptr, val);
4193 break;
4194 }
733f0b02
MT
4195 if (!cpu_physical_memory_is_dirty(addr1)) {
4196 /* invalidate code */
4197 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4198 /* set dirty bit */
4199 cpu_physical_memory_set_dirty_flags(addr1,
4200 (0xff & ~CODE_DIRTY_FLAG));
4201 }
4202 }
aab33094
FB
4203}
4204
1e78bcc1
AG
4205void stw_phys(target_phys_addr_t addr, uint32_t val)
4206{
4207 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4208}
4209
4210void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4211{
4212 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4213}
4214
4215void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4216{
4217 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4218}
4219
aab33094 4220/* XXX: optimize */
c227f099 4221void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4222{
4223 val = tswap64(val);
71d2b725 4224 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4225}
4226
1e78bcc1
AG
4227void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4228{
4229 val = cpu_to_le64(val);
4230 cpu_physical_memory_write(addr, &val, 8);
4231}
4232
4233void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4234{
4235 val = cpu_to_be64(val);
4236 cpu_physical_memory_write(addr, &val, 8);
4237}
4238
5e2972fd 4239/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4240int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4241 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4242{
4243 int l;
c227f099 4244 target_phys_addr_t phys_addr;
9b3c35e0 4245 target_ulong page;
13eb76e0
FB
4246
4247 while (len > 0) {
4248 page = addr & TARGET_PAGE_MASK;
4249 phys_addr = cpu_get_phys_page_debug(env, page);
4250 /* if no physical page mapped, return an error */
4251 if (phys_addr == -1)
4252 return -1;
4253 l = (page + TARGET_PAGE_SIZE) - addr;
4254 if (l > len)
4255 l = len;
5e2972fd 4256 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4257 if (is_write)
4258 cpu_physical_memory_write_rom(phys_addr, buf, l);
4259 else
5e2972fd 4260 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4261 len -= l;
4262 buf += l;
4263 addr += l;
4264 }
4265 return 0;
4266}
a68fe89c 4267#endif
13eb76e0 4268
2e70f6ef
PB
4269/* in deterministic execution mode, instructions doing device I/Os
4270 must be at the end of the TB */
4271void cpu_io_recompile(CPUState *env, void *retaddr)
4272{
4273 TranslationBlock *tb;
4274 uint32_t n, cflags;
4275 target_ulong pc, cs_base;
4276 uint64_t flags;
4277
4278 tb = tb_find_pc((unsigned long)retaddr);
4279 if (!tb) {
4280 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4281 retaddr);
4282 }
4283 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4284 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4285 /* Calculate how many instructions had been executed before the fault
bf20dc07 4286 occurred. */
2e70f6ef
PB
4287 n = n - env->icount_decr.u16.low;
4288 /* Generate a new TB ending on the I/O insn. */
4289 n++;
4290 /* On MIPS and SH, delay slot instructions can only be restarted if
4291 they were already the first instruction in the TB. If this is not
bf20dc07 4292 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4293 branch. */
4294#if defined(TARGET_MIPS)
4295 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4296 env->active_tc.PC -= 4;
4297 env->icount_decr.u16.low++;
4298 env->hflags &= ~MIPS_HFLAG_BMASK;
4299 }
4300#elif defined(TARGET_SH4)
4301 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4302 && n > 1) {
4303 env->pc -= 2;
4304 env->icount_decr.u16.low++;
4305 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4306 }
4307#endif
4308 /* This should never happen. */
4309 if (n > CF_COUNT_MASK)
4310 cpu_abort(env, "TB too big during recompile");
4311
4312 cflags = n | CF_LAST_IO;
4313 pc = tb->pc;
4314 cs_base = tb->cs_base;
4315 flags = tb->flags;
4316 tb_phys_invalidate(tb, -1);
4317 /* FIXME: In theory this could raise an exception. In practice
4318 we have already translated the block once so it's probably ok. */
4319 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4320 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4321 the first in the TB) then we end up generating a whole new TB and
4322 repeating the fault, which is horribly inefficient.
4323 Better would be to execute just this insn uncached, or generate a
4324 second new TB. */
4325 cpu_resume_from_signal(env, NULL);
4326}
4327
b3755a91
PB
4328#if !defined(CONFIG_USER_ONLY)
4329
055403b2 4330void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4331{
4332 int i, target_code_size, max_target_code_size;
4333 int direct_jmp_count, direct_jmp2_count, cross_page;
4334 TranslationBlock *tb;
3b46e624 4335
e3db7226
FB
4336 target_code_size = 0;
4337 max_target_code_size = 0;
4338 cross_page = 0;
4339 direct_jmp_count = 0;
4340 direct_jmp2_count = 0;
4341 for(i = 0; i < nb_tbs; i++) {
4342 tb = &tbs[i];
4343 target_code_size += tb->size;
4344 if (tb->size > max_target_code_size)
4345 max_target_code_size = tb->size;
4346 if (tb->page_addr[1] != -1)
4347 cross_page++;
4348 if (tb->tb_next_offset[0] != 0xffff) {
4349 direct_jmp_count++;
4350 if (tb->tb_next_offset[1] != 0xffff) {
4351 direct_jmp2_count++;
4352 }
4353 }
4354 }
4355 /* XXX: avoid using doubles ? */
57fec1fe 4356 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4357 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4358 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4359 cpu_fprintf(f, "TB count %d/%d\n",
4360 nb_tbs, code_gen_max_blocks);
5fafdf24 4361 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4362 nb_tbs ? target_code_size / nb_tbs : 0,
4363 max_target_code_size);
055403b2 4364 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4365 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4366 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4367 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4368 cross_page,
e3db7226
FB
4369 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4370 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4371 direct_jmp_count,
e3db7226
FB
4372 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4373 direct_jmp2_count,
4374 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4375 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4376 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4377 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4378 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4379 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4380}
4381
d39e8222
AK
4382/* NOTE: this function can trigger an exception */
4383/* NOTE2: the returned address is not exactly the physical address: it
4384 is the offset relative to phys_ram_base */
4385tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4386{
4387 int mmu_idx, page_index, pd;
4388 void *p;
4389
4390 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4391 mmu_idx = cpu_mmu_index(env1);
4392 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4393 (addr & TARGET_PAGE_MASK))) {
4394 ldub_code(addr);
4395 }
4396 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
0e0df1e2 4397 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
75c578dc 4398 && !is_romd(pd)) {
d39e8222
AK
4399#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4400 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4401#else
4402 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4403#endif
4404 }
4405 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4406 return qemu_ram_addr_from_host_nofail(p);
4407}
4408
82afa586
BH
4409/*
4410 * A helper function for the _utterly broken_ virtio device model to find out if
4411 * it's running on a big endian machine. Don't do this at home kids!
4412 */
4413bool virtio_is_big_endian(void);
4414bool virtio_is_big_endian(void)
4415{
4416#if defined(TARGET_WORDS_BIGENDIAN)
4417 return true;
4418#else
4419 return false;
4420#endif
4421}
4422
61382a50 4423#define MMUSUFFIX _cmmu
3917149d 4424#undef GETPC
61382a50
FB
4425#define GETPC() NULL
4426#define env cpu_single_env
b769d8fe 4427#define SOFTMMU_CODE_ACCESS
61382a50
FB
4428
4429#define SHIFT 0
4430#include "softmmu_template.h"
4431
4432#define SHIFT 1
4433#include "softmmu_template.h"
4434
4435#define SHIFT 2
4436#include "softmmu_template.h"
4437
4438#define SHIFT 3
4439#include "softmmu_template.h"
4440
4441#undef env
4442
4443#endif