]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
configure: Modify detection of supported warning options
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2 121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 122static MemoryRegion io_mem_subpage_ram;
0e0df1e2 123
e2eef170 124#endif
9fa3e853 125
6a00d601
FB
126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
b3c4bbe5 129DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 130/* 0 = Do not count executed instructions.
bf20dc07 131 1 = Precise instruction counting.
2e70f6ef
PB
132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
6a00d601 134
54936004 135typedef struct PageDesc {
92e873b9 136 /* list of TBs intersecting this ram page */
fd6ce8f6 137 TranslationBlock *first_tb;
9fa3e853
FB
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
54936004
FB
145} PageDesc;
146
41c1b1c9 147/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 154#endif
bedb69ea 155#else
5cd2c5b6 156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 157#endif
54936004 158
5cd2c5b6
RH
159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
54936004
FB
161#define L2_SIZE (1 << L2_BITS)
162
5cd2c5b6
RH
163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
83fb7adf 188unsigned long qemu_real_host_page_size;
83fb7adf
FB
189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
54936004 191
5cd2c5b6
RH
192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
54936004 195
e2eef170 196#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
5cd2c5b6
RH
203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
6d9a1304 206
e2eef170 207static void io_mem_init(void);
62152b8a 208static void memory_map_init(void);
e2eef170 209
33417e70 210/* io memory support */
a621f38d 211MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
511d2b14 212static char io_mem_used[IO_MEM_NB_ENTRIES];
1ec9b909 213static MemoryRegion io_mem_watch;
6658ffb8 214#endif
33417e70 215
34865134 216/* log support */
1e8b27ca
JR
217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
d9b630fd 220static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 221#endif
34865134
FB
222FILE *logfile;
223int loglevel;
e735b91c 224static int log_append = 0;
34865134 225
e3db7226 226/* statistics */
b3755a91 227#if !defined(CONFIG_USER_ONLY)
e3db7226 228static int tlb_flush_count;
b3755a91 229#endif
e3db7226
FB
230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
7cb69cae
FB
233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
4369415f 244 unsigned long start, end, page_size;
7cb69cae 245
4369415f 246 page_size = getpagesize();
7cb69cae 247 start = (unsigned long)addr;
4369415f 248 start &= ~(page_size - 1);
7cb69cae
FB
249
250 end = (unsigned long)addr + size;
4369415f
FB
251 end += page_size - 1;
252 end &= ~(page_size - 1);
7cb69cae
FB
253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
b346ff46 259static void page_init(void)
54936004 260{
83fb7adf 261 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 262 TARGET_PAGE_SIZE */
c2b48b69
AL
263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
83fb7adf
FB
273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 278
2e9a5713 279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 280 {
f01576f1
JL
281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
fd436907 302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
50a9569b 311 FILE *f;
50a9569b 312
0776590d 313 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 314
fd436907 315 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 316 if (f) {
5cd2c5b6
RH
317 mmap_lock();
318
50a9569b 319 do {
5cd2c5b6
RH
320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
334 }
335 } while (!feof(f));
5cd2c5b6 336
50a9569b 337 fclose(f);
5cd2c5b6 338 mmap_unlock();
50a9569b 339 }
f01576f1 340#endif
50a9569b
AZ
341 }
342#endif
54936004
FB
343}
344
41c1b1c9 345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 346{
41c1b1c9
PB
347 PageDesc *pd;
348 void **lp;
349 int i;
350
5cd2c5b6 351#if defined(CONFIG_USER_ONLY)
7267c094 352 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
357 } while (0)
358#else
359# define ALLOC(P, SIZE) \
7267c094 360 do { P = g_malloc0(SIZE); } while (0)
17e2377a 361#endif
434929bf 362
5cd2c5b6
RH
363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
17e2377a 376 }
5cd2c5b6
RH
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 }
380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
54936004 388 }
5cd2c5b6
RH
389
390#undef ALLOC
5cd2c5b6
RH
391
392 return pd + (index & (L2_SIZE - 1));
54936004
FB
393}
394
41c1b1c9 395static inline PageDesc *page_find(tb_page_addr_t index)
54936004 396{
5cd2c5b6 397 return page_find_alloc(index, 0);
fd6ce8f6
FB
398}
399
6d9a1304 400#if !defined(CONFIG_USER_ONLY)
c227f099 401static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 402{
e3f4e2a4 403 PhysPageDesc *pd;
5cd2c5b6
RH
404 void **lp;
405 int i;
92e873b9 406
5cd2c5b6
RH
407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 409
5cd2c5b6
RH
410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
7267c094 417 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
418 }
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 420 }
5cd2c5b6 421
e3f4e2a4 422 pd = *lp;
5cd2c5b6 423 if (pd == NULL) {
e3f4e2a4 424 int i;
5ab97b7f 425 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
426
427 if (!alloc) {
108c49b8 428 return NULL;
5cd2c5b6
RH
429 }
430
7267c094 431 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 432
67c4d23c 433 for (i = 0; i < L2_SIZE; i++) {
0e0df1e2 434 pd[i].phys_offset = io_mem_unassigned.ram_addr;
5ab97b7f 435 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 436 }
92e873b9 437 }
5cd2c5b6
RH
438
439 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
440}
441
f1f6e3b8 442static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 443{
f1f6e3b8
AK
444 PhysPageDesc *p = phys_page_find_alloc(index, 0);
445
446 if (p) {
447 return *p;
448 } else {
449 return (PhysPageDesc) {
0e0df1e2 450 .phys_offset = io_mem_unassigned.ram_addr,
f1f6e3b8
AK
451 .region_offset = index << TARGET_PAGE_BITS,
452 };
453 }
92e873b9
FB
454}
455
c227f099
AL
456static void tlb_protect_code(ram_addr_t ram_addr);
457static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 458 target_ulong vaddr);
c8a706fe
PB
459#define mmap_lock() do { } while(0)
460#define mmap_unlock() do { } while(0)
9fa3e853 461#endif
fd6ce8f6 462
4369415f
FB
463#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464
465#if defined(CONFIG_USER_ONLY)
ccbb4d44 466/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
467 user mode. It will change when a dedicated libc will be used */
468#define USE_STATIC_CODE_GEN_BUFFER
469#endif
470
471#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
472static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
474#endif
475
8fcd3692 476static void code_gen_alloc(unsigned long tb_size)
26a5f13b 477{
4369415f
FB
478#ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#else
26a5f13b
FB
483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
4369415f 485#if defined(CONFIG_USER_ONLY)
4369415f
FB
486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487#else
ccbb4d44 488 /* XXX: needs adjustments */
94a6b54f 489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 490#endif
26a5f13b
FB
491 }
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496#if defined(__linux__)
497 {
498 int flags;
141ac468
BS
499 void *start = NULL;
500
26a5f13b
FB
501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502#if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
507#elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 513#elif defined(__arm__)
222f23f5 514 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
515 if (code_gen_buffer_size > 16 * 1024 * 1024)
516 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
517#elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 }
523 start = (void *)0x90000000UL;
26a5f13b 524#endif
141ac468
BS
525 code_gen_buffer = mmap(start, code_gen_buffer_size,
526 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
527 flags, -1, 0);
528 if (code_gen_buffer == MAP_FAILED) {
529 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 exit(1);
531 }
532 }
cbb608a5 533#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
06e67a82
AL
536 {
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540#if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
548#elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
550 flags |= MAP_FIXED;
551 addr = (void *) 0x60000000UL;
552 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 code_gen_buffer_size = (512 * 1024 * 1024);
554 }
06e67a82
AL
555#endif
556 code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
26a5f13b 564#else
7267c094 565 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
566 map_exec(code_gen_buffer, code_gen_buffer_size);
567#endif
4369415f 568#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 569 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
570 code_gen_buffer_max_size = code_gen_buffer_size -
571 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 572 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 573 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
574}
575
576/* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
578 size. */
d5ab9713 579void tcg_exec_init(unsigned long tb_size)
26a5f13b 580{
26a5f13b
FB
581 cpu_gen_init();
582 code_gen_alloc(tb_size);
583 code_gen_ptr = code_gen_buffer;
4369415f 584 page_init();
9002ec79
RH
585#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx);
589#endif
26a5f13b
FB
590}
591
d5ab9713
JK
592bool tcg_enabled(void)
593{
594 return code_gen_buffer != NULL;
595}
596
597void cpu_exec_init_all(void)
598{
599#if !defined(CONFIG_USER_ONLY)
600 memory_map_init();
601 io_mem_init();
602#endif
603}
604
9656f324
PB
605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606
e59fb374 607static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
608{
609 CPUState *env = opaque;
9656f324 610
3098dba0
AJ
611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env->interrupt_request &= ~0x01;
9656f324
PB
614 tlb_flush(env, 1);
615
616 return 0;
617}
e7f4eff7
JQ
618
619static const VMStateDescription vmstate_cpu_common = {
620 .name = "cpu_common",
621 .version_id = 1,
622 .minimum_version_id = 1,
623 .minimum_version_id_old = 1,
e7f4eff7
JQ
624 .post_load = cpu_common_post_load,
625 .fields = (VMStateField []) {
626 VMSTATE_UINT32(halted, CPUState),
627 VMSTATE_UINT32(interrupt_request, CPUState),
628 VMSTATE_END_OF_LIST()
629 }
630};
9656f324
PB
631#endif
632
950f1472
GC
633CPUState *qemu_get_cpu(int cpu)
634{
635 CPUState *env = first_cpu;
636
637 while (env) {
638 if (env->cpu_index == cpu)
639 break;
640 env = env->next_cpu;
641 }
642
643 return env;
644}
645
6a00d601 646void cpu_exec_init(CPUState *env)
fd6ce8f6 647{
6a00d601
FB
648 CPUState **penv;
649 int cpu_index;
650
c2764719
PB
651#if defined(CONFIG_USER_ONLY)
652 cpu_list_lock();
653#endif
6a00d601
FB
654 env->next_cpu = NULL;
655 penv = &first_cpu;
656 cpu_index = 0;
657 while (*penv != NULL) {
1e9fa730 658 penv = &(*penv)->next_cpu;
6a00d601
FB
659 cpu_index++;
660 }
661 env->cpu_index = cpu_index;
268a362c 662 env->numa_node = 0;
72cf2d4f
BS
663 QTAILQ_INIT(&env->breakpoints);
664 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
665#ifndef CONFIG_USER_ONLY
666 env->thread_id = qemu_get_thread_id();
667#endif
6a00d601 668 *penv = env;
c2764719
PB
669#if defined(CONFIG_USER_ONLY)
670 cpu_list_unlock();
671#endif
b3c7724c 672#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
673 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
675 cpu_save, cpu_load, env);
676#endif
fd6ce8f6
FB
677}
678
d1a1eb74
TG
679/* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681static TranslationBlock *tb_alloc(target_ulong pc)
682{
683 TranslationBlock *tb;
684
685 if (nb_tbs >= code_gen_max_blocks ||
686 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 return NULL;
688 tb = &tbs[nb_tbs++];
689 tb->pc = pc;
690 tb->cflags = 0;
691 return tb;
692}
693
694void tb_free(TranslationBlock *tb)
695{
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 code_gen_ptr = tb->tc_ptr;
701 nb_tbs--;
702 }
703}
704
9fa3e853
FB
705static inline void invalidate_page_bitmap(PageDesc *p)
706{
707 if (p->code_bitmap) {
7267c094 708 g_free(p->code_bitmap);
9fa3e853
FB
709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712}
713
5cd2c5b6
RH
714/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715
716static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 717{
5cd2c5b6 718 int i;
fd6ce8f6 719
5cd2c5b6
RH
720 if (*lp == NULL) {
721 return;
722 }
723 if (level == 0) {
724 PageDesc *pd = *lp;
7296abac 725 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
726 pd[i].first_tb = NULL;
727 invalidate_page_bitmap(pd + i);
fd6ce8f6 728 }
5cd2c5b6
RH
729 } else {
730 void **pp = *lp;
7296abac 731 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
732 page_flush_tb_1 (level - 1, pp + i);
733 }
734 }
735}
736
737static void page_flush_tb(void)
738{
739 int i;
740 for (i = 0; i < V_L1_SIZE; i++) {
741 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
742 }
743}
744
745/* flush all the translation blocks */
d4e8164f 746/* XXX: tb_flush is currently not thread safe */
6a00d601 747void tb_flush(CPUState *env1)
fd6ce8f6 748{
6a00d601 749 CPUState *env;
0124311e 750#if defined(DEBUG_FLUSH)
ab3d1727
BS
751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 755#endif
26a5f13b 756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
fd6ce8f6 759 nb_tbs = 0;
3b46e624 760
6a00d601
FB
761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
9fa3e853 764
8a8a608f 765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 766 page_flush_tb();
9fa3e853 767
fd6ce8f6 768 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
e3db7226 771 tb_flush_count++;
fd6ce8f6
FB
772}
773
774#ifdef DEBUG_TB_CHECK
775
bc98a7ef 776static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
777{
778 TranslationBlock *tb;
779 int i;
780 address &= TARGET_PAGE_MASK;
99773bd4
PB
781 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
783 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 address >= tb->pc + tb->size)) {
0bf9e31a
BS
785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
99773bd4 787 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
788 }
789 }
790 }
791}
792
793/* verify that all the pages have correct rights for code */
794static void tb_page_check(void)
795{
796 TranslationBlock *tb;
797 int i, flags1, flags2;
3b46e624 798
99773bd4
PB
799 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
801 flags1 = page_get_flags(tb->pc);
802 flags2 = page_get_flags(tb->pc + tb->size - 1);
803 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 805 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
806 }
807 }
808 }
809}
810
811#endif
812
813/* invalidate one TB */
814static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 int next_offset)
816{
817 TranslationBlock *tb1;
818 for(;;) {
819 tb1 = *ptb;
820 if (tb1 == tb) {
821 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 break;
823 }
824 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 }
826}
827
9fa3e853
FB
828static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829{
830 TranslationBlock *tb1;
831 unsigned int n1;
832
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (tb1 == tb) {
838 *ptb = tb1->page_next[n1];
839 break;
840 }
841 ptb = &tb1->page_next[n1];
842 }
843}
844
d4e8164f
FB
845static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846{
847 TranslationBlock *tb1, **ptb;
848 unsigned int n1;
849
850 ptb = &tb->jmp_next[n];
851 tb1 = *ptb;
852 if (tb1) {
853 /* find tb(n) in circular list */
854 for(;;) {
855 tb1 = *ptb;
856 n1 = (long)tb1 & 3;
857 tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 if (n1 == n && tb1 == tb)
859 break;
860 if (n1 == 2) {
861 ptb = &tb1->jmp_first;
862 } else {
863 ptb = &tb1->jmp_next[n1];
864 }
865 }
866 /* now we can suppress tb(n) from the list */
867 *ptb = tb->jmp_next[n];
868
869 tb->jmp_next[n] = NULL;
870 }
871}
872
873/* reset the jump entry 'n' of a TB so that it is not chained to
874 another TB */
875static inline void tb_reset_jump(TranslationBlock *tb, int n)
876{
877 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878}
879
41c1b1c9 880void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 881{
6a00d601 882 CPUState *env;
8a40a180 883 PageDesc *p;
d4e8164f 884 unsigned int h, n1;
41c1b1c9 885 tb_page_addr_t phys_pc;
8a40a180 886 TranslationBlock *tb1, *tb2;
3b46e624 887
8a40a180
FB
888 /* remove the TB from the hash list */
889 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 h = tb_phys_hash_func(phys_pc);
5fafdf24 891 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
892 offsetof(TranslationBlock, phys_hash_next));
893
894 /* remove the TB from the page list */
895 if (tb->page_addr[0] != page_addr) {
896 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905
36bdbe54 906 tb_invalidated_flag = 1;
59817ccb 907
fd6ce8f6 908 /* remove the TB from the hash list */
8a40a180 909 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
910 for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 if (env->tb_jmp_cache[h] == tb)
912 env->tb_jmp_cache[h] = NULL;
913 }
d4e8164f
FB
914
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb, 0);
917 tb_jmp_remove(tb, 1);
918
919 /* suppress any remaining jumps to this TB */
920 tb1 = tb->jmp_first;
921 for(;;) {
922 n1 = (long)tb1 & 3;
923 if (n1 == 2)
924 break;
925 tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 tb2 = tb1->jmp_next[n1];
927 tb_reset_jump(tb1, n1);
928 tb1->jmp_next[n1] = NULL;
929 tb1 = tb2;
930 }
931 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 932
e3db7226 933 tb_phys_invalidate_count++;
9fa3e853
FB
934}
935
936static inline void set_bits(uint8_t *tab, int start, int len)
937{
938 int end, mask, end1;
939
940 end = start + len;
941 tab += start >> 3;
942 mask = 0xff << (start & 7);
943 if ((start & ~7) == (end & ~7)) {
944 if (start < end) {
945 mask &= ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 } else {
949 *tab++ |= mask;
950 start = (start + 8) & ~7;
951 end1 = end & ~7;
952 while (start < end1) {
953 *tab++ = 0xff;
954 start += 8;
955 }
956 if (start < end) {
957 mask = ~(0xff << (end & 7));
958 *tab |= mask;
959 }
960 }
961}
962
963static void build_page_bitmap(PageDesc *p)
964{
965 int n, tb_start, tb_end;
966 TranslationBlock *tb;
3b46e624 967
7267c094 968 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
969
970 tb = p->first_tb;
971 while (tb != NULL) {
972 n = (long)tb & 3;
973 tb = (TranslationBlock *)((long)tb & ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
975 if (n == 0) {
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 tb_end = tb_start + tb->size;
980 if (tb_end > TARGET_PAGE_SIZE)
981 tb_end = TARGET_PAGE_SIZE;
982 } else {
983 tb_start = 0;
984 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 }
986 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 tb = tb->page_next[n];
988 }
989}
990
2e70f6ef
PB
991TranslationBlock *tb_gen_code(CPUState *env,
992 target_ulong pc, target_ulong cs_base,
993 int flags, int cflags)
d720b93d
FB
994{
995 TranslationBlock *tb;
996 uint8_t *tc_ptr;
41c1b1c9
PB
997 tb_page_addr_t phys_pc, phys_page2;
998 target_ulong virt_page2;
d720b93d
FB
999 int code_gen_size;
1000
41c1b1c9 1001 phys_pc = get_page_addr_code(env, pc);
c27004ec 1002 tb = tb_alloc(pc);
d720b93d
FB
1003 if (!tb) {
1004 /* flush must be done */
1005 tb_flush(env);
1006 /* cannot fail at this point */
c27004ec 1007 tb = tb_alloc(pc);
2e70f6ef
PB
1008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag = 1;
d720b93d
FB
1010 }
1011 tc_ptr = code_gen_ptr;
1012 tb->tc_ptr = tc_ptr;
1013 tb->cs_base = cs_base;
1014 tb->flags = flags;
1015 tb->cflags = cflags;
d07bde88 1016 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1017 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1018
d720b93d 1019 /* check next page if needed */
c27004ec 1020 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1021 phys_page2 = -1;
c27004ec 1022 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1023 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1024 }
41c1b1c9 1025 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1026 return tb;
d720b93d 1027}
3b46e624 1028
9fa3e853
FB
1029/* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
41c1b1c9 1034void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1035 int is_cpu_write_access)
1036{
6b917547 1037 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1038 CPUState *env = cpu_single_env;
41c1b1c9 1039 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1040 PageDesc *p;
1041 int n;
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found = is_cpu_write_access;
1044 TranslationBlock *current_tb = NULL;
1045 int current_tb_modified = 0;
1046 target_ulong current_pc = 0;
1047 target_ulong current_cs_base = 0;
1048 int current_flags = 0;
1049#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1050
1051 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1052 if (!p)
9fa3e853 1053 return;
5fafdf24 1054 if (!p->code_bitmap &&
d720b93d
FB
1055 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 is_cpu_write_access) {
9fa3e853
FB
1057 /* build code bitmap */
1058 build_page_bitmap(p);
1059 }
1060
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 tb = p->first_tb;
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 tb_next = tb->page_next[n];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1069 if (n == 0) {
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 tb_end = tb_start + tb->size;
1074 } else {
1075 tb_start = tb->page_addr[1];
1076 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 }
1078 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found) {
1081 current_tb_not_found = 0;
1082 current_tb = NULL;
2e70f6ef 1083 if (env->mem_io_pc) {
d720b93d 1084 /* now we have a real cpu fault */
2e70f6ef 1085 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1086 }
1087 }
1088 if (current_tb == tb &&
2e70f6ef 1089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
3b46e624 1095
d720b93d 1096 current_tb_modified = 1;
618ba8e6 1097 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
d720b93d
FB
1100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1104 saved_tb = NULL;
1105 if (env) {
1106 saved_tb = env->current_tb;
1107 env->current_tb = NULL;
1108 }
9fa3e853 1109 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1110 if (env) {
1111 env->current_tb = saved_tb;
1112 if (env->interrupt_request && env->current_tb)
1113 cpu_interrupt(env, env->interrupt_request);
1114 }
9fa3e853
FB
1115 }
1116 tb = tb_next;
1117 }
1118#if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1120 if (!p->first_tb) {
1121 invalidate_page_bitmap(p);
d720b93d 1122 if (is_cpu_write_access) {
2e70f6ef 1123 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1124 }
1125 }
1126#endif
1127#ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1131 itself */
ea1c1802 1132 env->current_tb = NULL;
2e70f6ef 1133 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1134 cpu_resume_from_signal(env, NULL);
9fa3e853 1135 }
fd6ce8f6 1136#endif
9fa3e853 1137}
fd6ce8f6 1138
9fa3e853 1139/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1140static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1141{
1142 PageDesc *p;
1143 int offset, b;
59817ccb 1144#if 0
a4193c8a 1145 if (1) {
93fcfe39
AL
1146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env->mem_io_vaddr, len,
1148 cpu_single_env->eip,
1149 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1150 }
1151#endif
9fa3e853 1152 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1153 if (!p)
9fa3e853
FB
1154 return;
1155 if (p->code_bitmap) {
1156 offset = start & ~TARGET_PAGE_MASK;
1157 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 if (b & ((1 << len) - 1))
1159 goto do_invalidate;
1160 } else {
1161 do_invalidate:
d720b93d 1162 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1163 }
1164}
1165
9fa3e853 1166#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1167static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1168 unsigned long pc, void *puc)
9fa3e853 1169{
6b917547 1170 TranslationBlock *tb;
9fa3e853 1171 PageDesc *p;
6b917547 1172 int n;
d720b93d 1173#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1174 TranslationBlock *current_tb = NULL;
d720b93d 1175 CPUState *env = cpu_single_env;
6b917547
AL
1176 int current_tb_modified = 0;
1177 target_ulong current_pc = 0;
1178 target_ulong current_cs_base = 0;
1179 int current_flags = 0;
d720b93d 1180#endif
9fa3e853
FB
1181
1182 addr &= TARGET_PAGE_MASK;
1183 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1184 if (!p)
9fa3e853
FB
1185 return;
1186 tb = p->first_tb;
d720b93d
FB
1187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb && pc != 0) {
1189 current_tb = tb_find_pc(pc);
1190 }
1191#endif
9fa3e853
FB
1192 while (tb != NULL) {
1193 n = (long)tb & 3;
1194 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb == tb &&
2e70f6ef 1197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
3b46e624 1203
d720b93d 1204 current_tb_modified = 1;
618ba8e6 1205 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1206 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 &current_flags);
d720b93d
FB
1208 }
1209#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1210 tb_phys_invalidate(tb, addr);
1211 tb = tb->page_next[n];
1212 }
fd6ce8f6 1213 p->first_tb = NULL;
d720b93d
FB
1214#ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1218 itself */
ea1c1802 1219 env->current_tb = NULL;
2e70f6ef 1220 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1221 cpu_resume_from_signal(env, puc);
1222 }
1223#endif
fd6ce8f6 1224}
9fa3e853 1225#endif
fd6ce8f6
FB
1226
1227/* add the tb in the target page and protect it if necessary */
5fafdf24 1228static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1229 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1230{
1231 PageDesc *p;
4429ab44
JQ
1232#ifndef CONFIG_USER_ONLY
1233 bool page_already_protected;
1234#endif
9fa3e853
FB
1235
1236 tb->page_addr[n] = page_addr;
5cd2c5b6 1237 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1238 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1239#ifndef CONFIG_USER_ONLY
1240 page_already_protected = p->first_tb != NULL;
1241#endif
9fa3e853
FB
1242 p->first_tb = (TranslationBlock *)((long)tb | n);
1243 invalidate_page_bitmap(p);
fd6ce8f6 1244
107db443 1245#if defined(TARGET_HAS_SMC) || 1
d720b93d 1246
9fa3e853 1247#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1248 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1249 target_ulong addr;
1250 PageDesc *p2;
9fa3e853
FB
1251 int prot;
1252
fd6ce8f6
FB
1253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
53a5960a 1255 page_addr &= qemu_host_page_mask;
fd6ce8f6 1256 prot = 0;
53a5960a
PB
1257 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 addr += TARGET_PAGE_SIZE) {
1259
1260 p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 if (!p2)
1262 continue;
1263 prot |= p2->flags;
1264 p2->flags &= ~PAGE_WRITE;
53a5960a 1265 }
5fafdf24 1266 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1267 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1269 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1270 page_addr);
fd6ce8f6 1271#endif
fd6ce8f6 1272 }
9fa3e853
FB
1273#else
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
4429ab44 1277 if (!page_already_protected) {
6a00d601 1278 tlb_protect_code(page_addr);
9fa3e853
FB
1279 }
1280#endif
d720b93d
FB
1281
1282#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1283}
1284
9fa3e853
FB
1285/* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1287void tb_link_page(TranslationBlock *tb,
1288 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1289{
9fa3e853
FB
1290 unsigned int h;
1291 TranslationBlock **ptb;
1292
c8a706fe
PB
1293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1295 mmap_lock();
9fa3e853
FB
1296 /* add in the physical hash table */
1297 h = tb_phys_hash_func(phys_pc);
1298 ptb = &tb_phys_hash[h];
1299 tb->phys_hash_next = *ptb;
1300 *ptb = tb;
fd6ce8f6
FB
1301
1302 /* add in the page list */
9fa3e853
FB
1303 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 if (phys_page2 != -1)
1305 tb_alloc_page(tb, 1, phys_page2);
1306 else
1307 tb->page_addr[1] = -1;
9fa3e853 1308
d4e8164f
FB
1309 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 tb->jmp_next[0] = NULL;
1311 tb->jmp_next[1] = NULL;
1312
1313 /* init original jump addresses */
1314 if (tb->tb_next_offset[0] != 0xffff)
1315 tb_reset_jump(tb, 0);
1316 if (tb->tb_next_offset[1] != 0xffff)
1317 tb_reset_jump(tb, 1);
8a40a180
FB
1318
1319#ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321#endif
c8a706fe 1322 mmap_unlock();
fd6ce8f6
FB
1323}
1324
9fa3e853
FB
1325/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1328{
9fa3e853
FB
1329 int m_min, m_max, m;
1330 unsigned long v;
1331 TranslationBlock *tb;
a513fe19
FB
1332
1333 if (nb_tbs <= 0)
1334 return NULL;
1335 if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 tc_ptr >= (unsigned long)code_gen_ptr)
1337 return NULL;
1338 /* binary search (cf Knuth) */
1339 m_min = 0;
1340 m_max = nb_tbs - 1;
1341 while (m_min <= m_max) {
1342 m = (m_min + m_max) >> 1;
1343 tb = &tbs[m];
1344 v = (unsigned long)tb->tc_ptr;
1345 if (v == tc_ptr)
1346 return tb;
1347 else if (tc_ptr < v) {
1348 m_max = m - 1;
1349 } else {
1350 m_min = m + 1;
1351 }
5fafdf24 1352 }
a513fe19
FB
1353 return &tbs[m_max];
1354}
7501267e 1355
ea041c0e
FB
1356static void tb_reset_jump_recursive(TranslationBlock *tb);
1357
1358static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359{
1360 TranslationBlock *tb1, *tb_next, **ptb;
1361 unsigned int n1;
1362
1363 tb1 = tb->jmp_next[n];
1364 if (tb1 != NULL) {
1365 /* find head of list */
1366 for(;;) {
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == 2)
1370 break;
1371 tb1 = tb1->jmp_next[n1];
1372 }
1373 /* we are now sure now that tb jumps to tb1 */
1374 tb_next = tb1;
1375
1376 /* remove tb from the jmp_first list */
1377 ptb = &tb_next->jmp_first;
1378 for(;;) {
1379 tb1 = *ptb;
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == n && tb1 == tb)
1383 break;
1384 ptb = &tb1->jmp_next[n1];
1385 }
1386 *ptb = tb->jmp_next[n];
1387 tb->jmp_next[n] = NULL;
3b46e624 1388
ea041c0e
FB
1389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb, n);
1391
0124311e 1392 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1393 tb_reset_jump_recursive(tb_next);
1394 }
1395}
1396
1397static void tb_reset_jump_recursive(TranslationBlock *tb)
1398{
1399 tb_reset_jump_recursive2(tb, 0);
1400 tb_reset_jump_recursive2(tb, 1);
1401}
1402
1fddef4b 1403#if defined(TARGET_HAS_ICE)
94df27fd
PB
1404#if defined(CONFIG_USER_ONLY)
1405static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406{
1407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408}
1409#else
d720b93d
FB
1410static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411{
c227f099 1412 target_phys_addr_t addr;
9b3c35e0 1413 target_ulong pd;
c227f099 1414 ram_addr_t ram_addr;
f1f6e3b8 1415 PhysPageDesc p;
d720b93d 1416
c2f07f81
PB
1417 addr = cpu_get_phys_page_debug(env, pc);
1418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1419 pd = p.phys_offset;
c2f07f81 1420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1422}
c27004ec 1423#endif
94df27fd 1424#endif /* TARGET_HAS_ICE */
d720b93d 1425
c527ee8f
PB
1426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
6658ffb8 1438/* Add a watchpoint. */
a1d1bb31
AL
1439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1441{
b4051334 1442 target_ulong len_mask = ~(len - 1);
c0ce998e 1443 CPUWatchpoint *wp;
6658ffb8 1444
b4051334
AL
1445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
7267c094 1451 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1452
1453 wp->vaddr = addr;
b4051334 1454 wp->len_mask = len_mask;
a1d1bb31
AL
1455 wp->flags = flags;
1456
2dc9f411 1457 /* keep all GDB-injected watchpoints in front */
c0ce998e 1458 if (flags & BP_GDB)
72cf2d4f 1459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1460 else
72cf2d4f 1461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1462
6658ffb8 1463 tlb_flush_page(env, addr);
a1d1bb31
AL
1464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
6658ffb8
PB
1468}
1469
a1d1bb31
AL
1470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
6658ffb8 1473{
b4051334 1474 target_ulong len_mask = ~(len - 1);
a1d1bb31 1475 CPUWatchpoint *wp;
6658ffb8 1476
72cf2d4f 1477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1478 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1480 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1481 return 0;
1482 }
1483 }
a1d1bb31 1484 return -ENOENT;
6658ffb8
PB
1485}
1486
a1d1bb31
AL
1487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
72cf2d4f 1490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1491
a1d1bb31
AL
1492 tlb_flush_page(env, watchpoint->vaddr);
1493
7267c094 1494 g_free(watchpoint);
a1d1bb31
AL
1495}
1496
1497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
c0ce998e 1500 CPUWatchpoint *wp, *next;
a1d1bb31 1501
72cf2d4f 1502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1505 }
7d03f82f 1506}
c527ee8f 1507#endif
7d03f82f 1508
a1d1bb31
AL
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
4c3a88a2 1512{
1fddef4b 1513#if defined(TARGET_HAS_ICE)
c0ce998e 1514 CPUBreakpoint *bp;
3b46e624 1515
7267c094 1516 bp = g_malloc(sizeof(*bp));
4c3a88a2 1517
a1d1bb31
AL
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
2dc9f411 1521 /* keep all GDB-injected breakpoints in front */
c0ce998e 1522 if (flags & BP_GDB)
72cf2d4f 1523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1524 else
72cf2d4f 1525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1526
d720b93d 1527 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
4c3a88a2
FB
1531 return 0;
1532#else
a1d1bb31 1533 return -ENOSYS;
4c3a88a2
FB
1534#endif
1535}
1536
a1d1bb31
AL
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
7d03f82f 1540#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1541 CPUBreakpoint *bp;
1542
72cf2d4f 1543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
1546 return 0;
1547 }
7d03f82f 1548 }
a1d1bb31
AL
1549 return -ENOENT;
1550#else
1551 return -ENOSYS;
7d03f82f
EI
1552#endif
1553}
1554
a1d1bb31
AL
1555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1557{
1fddef4b 1558#if defined(TARGET_HAS_ICE)
72cf2d4f 1559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1560
a1d1bb31
AL
1561 breakpoint_invalidate(env, breakpoint->pc);
1562
7267c094 1563 g_free(breakpoint);
a1d1bb31
AL
1564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
c0ce998e 1571 CPUBreakpoint *bp, *next;
a1d1bb31 1572
72cf2d4f 1573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1576 }
4c3a88a2
FB
1577#endif
1578}
1579
c33a346e
FB
1580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
1fddef4b 1584#if defined(TARGET_HAS_ICE)
c33a346e
FB
1585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
e22a25c9
AL
1587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
ccbb4d44 1590 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
c33a346e
FB
1594 }
1595#endif
1596}
1597
34865134
FB
1598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
11fcfab4 1603 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
9fa3e853
FB
1608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
b55266b5 1611 static char logfile_buf[4096];
9fa3e853
FB
1612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
daf767b1
SW
1614#elif defined(_WIN32)
1615 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 setvbuf(logfile, NULL, _IONBF, 0);
1617#else
34865134 1618 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1619#endif
e735b91c
PB
1620 log_append = 1;
1621 }
1622 if (!loglevel && logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
34865134
FB
1625 }
1626}
1627
1628void cpu_set_log_filename(const char *filename)
1629{
1630 logfilename = strdup(filename);
e735b91c
PB
1631 if (logfile) {
1632 fclose(logfile);
1633 logfile = NULL;
1634 }
1635 cpu_set_log(loglevel);
34865134 1636}
c33a346e 1637
3098dba0 1638static void cpu_unlink_tb(CPUState *env)
ea041c0e 1639{
3098dba0
AJ
1640 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1641 problem and hope the cpu will stop of its own accord. For userspace
1642 emulation this often isn't actually as bad as it sounds. Often
1643 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1644 TranslationBlock *tb;
c227f099 1645 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1646
cab1b4bd 1647 spin_lock(&interrupt_lock);
3098dba0
AJ
1648 tb = env->current_tb;
1649 /* if the cpu is currently executing code, we must unlink it and
1650 all the potentially executing TB */
f76cfe56 1651 if (tb) {
3098dba0
AJ
1652 env->current_tb = NULL;
1653 tb_reset_jump_recursive(tb);
be214e6c 1654 }
cab1b4bd 1655 spin_unlock(&interrupt_lock);
3098dba0
AJ
1656}
1657
97ffbd8d 1658#ifndef CONFIG_USER_ONLY
3098dba0 1659/* mask must never be zero, except for A20 change call */
ec6959d0 1660static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1661{
1662 int old_mask;
be214e6c 1663
2e70f6ef 1664 old_mask = env->interrupt_request;
68a79315 1665 env->interrupt_request |= mask;
3098dba0 1666
8edac960
AL
1667 /*
1668 * If called from iothread context, wake the target cpu in
1669 * case its halted.
1670 */
b7680cb6 1671 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1672 qemu_cpu_kick(env);
1673 return;
1674 }
8edac960 1675
2e70f6ef 1676 if (use_icount) {
266910c4 1677 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1678 if (!can_do_io(env)
be214e6c 1679 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1680 cpu_abort(env, "Raised interrupt while not in I/O function");
1681 }
2e70f6ef 1682 } else {
3098dba0 1683 cpu_unlink_tb(env);
ea041c0e
FB
1684 }
1685}
1686
ec6959d0
JK
1687CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688
97ffbd8d
JK
1689#else /* CONFIG_USER_ONLY */
1690
1691void cpu_interrupt(CPUState *env, int mask)
1692{
1693 env->interrupt_request |= mask;
1694 cpu_unlink_tb(env);
1695}
1696#endif /* CONFIG_USER_ONLY */
1697
b54ad049
FB
1698void cpu_reset_interrupt(CPUState *env, int mask)
1699{
1700 env->interrupt_request &= ~mask;
1701}
1702
3098dba0
AJ
1703void cpu_exit(CPUState *env)
1704{
1705 env->exit_request = 1;
1706 cpu_unlink_tb(env);
1707}
1708
c7cd6a37 1709const CPULogItem cpu_log_items[] = {
5fafdf24 1710 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1711 "show generated host assembly code for each compiled TB" },
1712 { CPU_LOG_TB_IN_ASM, "in_asm",
1713 "show target assembly code for each compiled TB" },
5fafdf24 1714 { CPU_LOG_TB_OP, "op",
57fec1fe 1715 "show micro ops for each compiled TB" },
f193c797 1716 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1717 "show micro ops "
1718#ifdef TARGET_I386
1719 "before eflags optimization and "
f193c797 1720#endif
e01a1157 1721 "after liveness analysis" },
f193c797
FB
1722 { CPU_LOG_INT, "int",
1723 "show interrupts/exceptions in short format" },
1724 { CPU_LOG_EXEC, "exec",
1725 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1726 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1727 "show CPU state before block translation" },
f193c797
FB
1728#ifdef TARGET_I386
1729 { CPU_LOG_PCALL, "pcall",
1730 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1731 { CPU_LOG_RESET, "cpu_reset",
1732 "show CPU state before CPU resets" },
f193c797 1733#endif
8e3a9fd2 1734#ifdef DEBUG_IOPORT
fd872598
FB
1735 { CPU_LOG_IOPORT, "ioport",
1736 "show all i/o ports accesses" },
8e3a9fd2 1737#endif
f193c797
FB
1738 { 0, NULL, NULL },
1739};
1740
1741static int cmp1(const char *s1, int n, const char *s2)
1742{
1743 if (strlen(s2) != n)
1744 return 0;
1745 return memcmp(s1, s2, n) == 0;
1746}
3b46e624 1747
f193c797
FB
1748/* takes a comma separated list of log masks. Return 0 if error. */
1749int cpu_str_to_log_mask(const char *str)
1750{
c7cd6a37 1751 const CPULogItem *item;
f193c797
FB
1752 int mask;
1753 const char *p, *p1;
1754
1755 p = str;
1756 mask = 0;
1757 for(;;) {
1758 p1 = strchr(p, ',');
1759 if (!p1)
1760 p1 = p + strlen(p);
9742bf26
YT
1761 if(cmp1(p,p1-p,"all")) {
1762 for(item = cpu_log_items; item->mask != 0; item++) {
1763 mask |= item->mask;
1764 }
1765 } else {
1766 for(item = cpu_log_items; item->mask != 0; item++) {
1767 if (cmp1(p, p1 - p, item->name))
1768 goto found;
1769 }
1770 return 0;
f193c797 1771 }
f193c797
FB
1772 found:
1773 mask |= item->mask;
1774 if (*p1 != ',')
1775 break;
1776 p = p1 + 1;
1777 }
1778 return mask;
1779}
ea041c0e 1780
7501267e
FB
1781void cpu_abort(CPUState *env, const char *fmt, ...)
1782{
1783 va_list ap;
493ae1f0 1784 va_list ap2;
7501267e
FB
1785
1786 va_start(ap, fmt);
493ae1f0 1787 va_copy(ap2, ap);
7501267e
FB
1788 fprintf(stderr, "qemu: fatal: ");
1789 vfprintf(stderr, fmt, ap);
1790 fprintf(stderr, "\n");
1791#ifdef TARGET_I386
7fe48483
FB
1792 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793#else
1794 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1795#endif
93fcfe39
AL
1796 if (qemu_log_enabled()) {
1797 qemu_log("qemu: fatal: ");
1798 qemu_log_vprintf(fmt, ap2);
1799 qemu_log("\n");
f9373291 1800#ifdef TARGET_I386
93fcfe39 1801 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1802#else
93fcfe39 1803 log_cpu_state(env, 0);
f9373291 1804#endif
31b1a7b4 1805 qemu_log_flush();
93fcfe39 1806 qemu_log_close();
924edcae 1807 }
493ae1f0 1808 va_end(ap2);
f9373291 1809 va_end(ap);
fd052bf6
RV
1810#if defined(CONFIG_USER_ONLY)
1811 {
1812 struct sigaction act;
1813 sigfillset(&act.sa_mask);
1814 act.sa_handler = SIG_DFL;
1815 sigaction(SIGABRT, &act, NULL);
1816 }
1817#endif
7501267e
FB
1818 abort();
1819}
1820
c5be9f08
TS
1821CPUState *cpu_copy(CPUState *env)
1822{
01ba9816 1823 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1824 CPUState *next_cpu = new_env->next_cpu;
1825 int cpu_index = new_env->cpu_index;
5a38f081
AL
1826#if defined(TARGET_HAS_ICE)
1827 CPUBreakpoint *bp;
1828 CPUWatchpoint *wp;
1829#endif
1830
c5be9f08 1831 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1832
1833 /* Preserve chaining and index. */
c5be9f08
TS
1834 new_env->next_cpu = next_cpu;
1835 new_env->cpu_index = cpu_index;
5a38f081
AL
1836
1837 /* Clone all break/watchpoints.
1838 Note: Once we support ptrace with hw-debug register access, make sure
1839 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1840 QTAILQ_INIT(&env->breakpoints);
1841 QTAILQ_INIT(&env->watchpoints);
5a38f081 1842#if defined(TARGET_HAS_ICE)
72cf2d4f 1843 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1844 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 }
72cf2d4f 1846 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1847 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848 wp->flags, NULL);
1849 }
1850#endif
1851
c5be9f08
TS
1852 return new_env;
1853}
1854
0124311e
FB
1855#if !defined(CONFIG_USER_ONLY)
1856
5c751e99
EI
1857static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858{
1859 unsigned int i;
1860
1861 /* Discard jump cache entries for any tb which might potentially
1862 overlap the flushed page. */
1863 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1865 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1866
1867 i = tb_jmp_cache_hash_page(addr);
1868 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1869 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1870}
1871
08738984
IK
1872static CPUTLBEntry s_cputlb_empty_entry = {
1873 .addr_read = -1,
1874 .addr_write = -1,
1875 .addr_code = -1,
1876 .addend = -1,
1877};
1878
ee8b7021
FB
1879/* NOTE: if flush_global is true, also flush global entries (not
1880 implemented yet) */
1881void tlb_flush(CPUState *env, int flush_global)
33417e70 1882{
33417e70 1883 int i;
0124311e 1884
9fa3e853
FB
1885#if defined(DEBUG_TLB)
1886 printf("tlb_flush:\n");
1887#endif
0124311e
FB
1888 /* must reset current TB so that interrupts cannot modify the
1889 links while we are modifying them */
1890 env->current_tb = NULL;
1891
33417e70 1892 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1893 int mmu_idx;
1894 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1895 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1896 }
33417e70 1897 }
9fa3e853 1898
8a40a180 1899 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1900
d4c430a8
PB
1901 env->tlb_flush_addr = -1;
1902 env->tlb_flush_mask = 0;
e3db7226 1903 tlb_flush_count++;
33417e70
FB
1904}
1905
274da6b2 1906static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1907{
5fafdf24 1908 if (addr == (tlb_entry->addr_read &
84b7b8e7 1909 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1910 addr == (tlb_entry->addr_write &
84b7b8e7 1911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1912 addr == (tlb_entry->addr_code &
84b7b8e7 1913 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1914 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1915 }
61382a50
FB
1916}
1917
2e12669a 1918void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1919{
8a40a180 1920 int i;
cfde4bd9 1921 int mmu_idx;
0124311e 1922
9fa3e853 1923#if defined(DEBUG_TLB)
108c49b8 1924 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1925#endif
d4c430a8
PB
1926 /* Check if we need to flush due to large pages. */
1927 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1928#if defined(DEBUG_TLB)
1929 printf("tlb_flush_page: forced full flush ("
1930 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1931 env->tlb_flush_addr, env->tlb_flush_mask);
1932#endif
1933 tlb_flush(env, 1);
1934 return;
1935 }
0124311e
FB
1936 /* must reset current TB so that interrupts cannot modify the
1937 links while we are modifying them */
1938 env->current_tb = NULL;
61382a50
FB
1939
1940 addr &= TARGET_PAGE_MASK;
1941 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1942 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1943 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1944
5c751e99 1945 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1946}
1947
9fa3e853
FB
1948/* update the TLBs so that writes to code in the virtual page 'addr'
1949 can be detected */
c227f099 1950static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1951{
5fafdf24 1952 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1953 ram_addr + TARGET_PAGE_SIZE,
1954 CODE_DIRTY_FLAG);
9fa3e853
FB
1955}
1956
9fa3e853 1957/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1958 tested for self modifying code */
c227f099 1959static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1960 target_ulong vaddr)
9fa3e853 1961{
f7c11b53 1962 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1963}
1964
5fafdf24 1965static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1966 unsigned long start, unsigned long length)
1967{
1968 unsigned long addr;
0e0df1e2 1969 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
84b7b8e7 1970 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1971 if ((addr - start) < length) {
0f459d16 1972 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1973 }
1974 }
1975}
1976
5579c7f3 1977/* Note: start and end must be within the same ram block. */
c227f099 1978void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1979 int dirty_flags)
1ccde1cb
FB
1980{
1981 CPUState *env;
4f2ac237 1982 unsigned long length, start1;
f7c11b53 1983 int i;
1ccde1cb
FB
1984
1985 start &= TARGET_PAGE_MASK;
1986 end = TARGET_PAGE_ALIGN(end);
1987
1988 length = end - start;
1989 if (length == 0)
1990 return;
f7c11b53 1991 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1992
1ccde1cb
FB
1993 /* we modify the TLB cache so that the dirty bit will be set again
1994 when accessing the range */
b2e0a138 1995 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1996 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1997 address comparisons below. */
b2e0a138 1998 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
1999 != (end - 1) - start) {
2000 abort();
2001 }
2002
6a00d601 2003 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2004 int mmu_idx;
2005 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2006 for(i = 0; i < CPU_TLB_SIZE; i++)
2007 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2008 start1, length);
2009 }
6a00d601 2010 }
1ccde1cb
FB
2011}
2012
74576198
AL
2013int cpu_physical_memory_set_dirty_tracking(int enable)
2014{
f6f3fbca 2015 int ret = 0;
74576198 2016 in_migration = enable;
f6f3fbca 2017 return ret;
74576198
AL
2018}
2019
3a7d929e
FB
2020static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2021{
c227f099 2022 ram_addr_t ram_addr;
5579c7f3 2023 void *p;
3a7d929e 2024
0e0df1e2 2025 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
5579c7f3
PB
2026 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2027 + tlb_entry->addend);
e890261f 2028 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2029 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2030 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2031 }
2032 }
2033}
2034
2035/* update the TLB according to the current state of the dirty bits */
2036void cpu_tlb_update_dirty(CPUState *env)
2037{
2038 int i;
cfde4bd9
IY
2039 int mmu_idx;
2040 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2041 for(i = 0; i < CPU_TLB_SIZE; i++)
2042 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2043 }
3a7d929e
FB
2044}
2045
0f459d16 2046static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2047{
0f459d16
PB
2048 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2049 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2050}
2051
0f459d16
PB
2052/* update the TLB corresponding to virtual page vaddr
2053 so that it is no longer dirty */
2054static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2055{
1ccde1cb 2056 int i;
cfde4bd9 2057 int mmu_idx;
1ccde1cb 2058
0f459d16 2059 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2060 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2062 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2063}
2064
d4c430a8
PB
2065/* Our TLB does not support large pages, so remember the area covered by
2066 large pages and trigger a full TLB flush if these are invalidated. */
2067static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2068 target_ulong size)
2069{
2070 target_ulong mask = ~(size - 1);
2071
2072 if (env->tlb_flush_addr == (target_ulong)-1) {
2073 env->tlb_flush_addr = vaddr & mask;
2074 env->tlb_flush_mask = mask;
2075 return;
2076 }
2077 /* Extend the existing region to include the new page.
2078 This is a compromise between unnecessary flushes and the cost
2079 of maintaining a full variable size TLB. */
2080 mask &= env->tlb_flush_mask;
2081 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2082 mask <<= 1;
2083 }
2084 env->tlb_flush_addr &= mask;
2085 env->tlb_flush_mask = mask;
2086}
2087
1d393fa2
AK
2088static bool is_ram_rom(ram_addr_t pd)
2089{
2090 pd &= ~TARGET_PAGE_MASK;
0e0df1e2 2091 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
1d393fa2
AK
2092}
2093
75c578dc
AK
2094static bool is_romd(ram_addr_t pd)
2095{
2096 MemoryRegion *mr;
2097
2098 pd &= ~TARGET_PAGE_MASK;
11c7ef0c 2099 mr = io_mem_region[pd];
75c578dc
AK
2100 return mr->rom_device && mr->readable;
2101}
2102
1d393fa2
AK
2103static bool is_ram_rom_romd(ram_addr_t pd)
2104{
75c578dc 2105 return is_ram_rom(pd) || is_romd(pd);
1d393fa2
AK
2106}
2107
d4c430a8
PB
2108/* Add a new TLB entry. At most one entry for a given virtual address
2109 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2110 supplied size is only used by tlb_flush_page. */
2111void tlb_set_page(CPUState *env, target_ulong vaddr,
2112 target_phys_addr_t paddr, int prot,
2113 int mmu_idx, target_ulong size)
9fa3e853 2114{
f1f6e3b8 2115 PhysPageDesc p;
4f2ac237 2116 unsigned long pd;
9fa3e853 2117 unsigned int index;
4f2ac237 2118 target_ulong address;
0f459d16 2119 target_ulong code_address;
355b1943 2120 unsigned long addend;
84b7b8e7 2121 CPUTLBEntry *te;
a1d1bb31 2122 CPUWatchpoint *wp;
c227f099 2123 target_phys_addr_t iotlb;
9fa3e853 2124
d4c430a8
PB
2125 assert(size >= TARGET_PAGE_SIZE);
2126 if (size != TARGET_PAGE_SIZE) {
2127 tlb_add_large_page(env, vaddr, size);
2128 }
92e873b9 2129 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2130 pd = p.phys_offset;
9fa3e853 2131#if defined(DEBUG_TLB)
7fd3f494
SW
2132 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2133 " prot=%x idx=%d pd=0x%08lx\n",
2134 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2135#endif
2136
0f459d16 2137 address = vaddr;
1d393fa2 2138 if (!is_ram_rom_romd(pd)) {
0f459d16
PB
2139 /* IO memory case (romd handled later) */
2140 address |= TLB_MMIO;
2141 }
5579c7f3 2142 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1d393fa2 2143 if (is_ram_rom(pd)) {
0f459d16
PB
2144 /* Normal RAM. */
2145 iotlb = pd & TARGET_PAGE_MASK;
0e0df1e2
AK
2146 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2147 iotlb |= io_mem_notdirty.ram_addr;
0f459d16 2148 else
0e0df1e2 2149 iotlb |= io_mem_rom.ram_addr;
0f459d16 2150 } else {
ccbb4d44 2151 /* IO handlers are currently passed a physical address.
0f459d16
PB
2152 It would be nice to pass an offset from the base address
2153 of that region. This would avoid having to special case RAM,
2154 and avoid full address decoding in every device.
2155 We can't use the high bits of pd for this because
2156 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2157 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2158 iotlb += p.region_offset;
0f459d16
PB
2159 }
2160
2161 code_address = address;
2162 /* Make accesses to pages with watchpoints go via the
2163 watchpoint trap routines. */
72cf2d4f 2164 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2165 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2166 /* Avoid trapping reads of pages with a write breakpoint. */
2167 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1ec9b909 2168 iotlb = io_mem_watch.ram_addr + paddr;
bf298f83
JK
2169 address |= TLB_MMIO;
2170 break;
2171 }
6658ffb8 2172 }
0f459d16 2173 }
d79acba4 2174
0f459d16
PB
2175 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2176 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2177 te = &env->tlb_table[mmu_idx][index];
2178 te->addend = addend - vaddr;
2179 if (prot & PAGE_READ) {
2180 te->addr_read = address;
2181 } else {
2182 te->addr_read = -1;
2183 }
5c751e99 2184
0f459d16
PB
2185 if (prot & PAGE_EXEC) {
2186 te->addr_code = code_address;
2187 } else {
2188 te->addr_code = -1;
2189 }
2190 if (prot & PAGE_WRITE) {
75c578dc 2191 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
0f459d16
PB
2192 /* Write access calls the I/O callback. */
2193 te->addr_write = address | TLB_MMIO;
0e0df1e2 2194 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
0f459d16
PB
2195 !cpu_physical_memory_is_dirty(pd)) {
2196 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2197 } else {
0f459d16 2198 te->addr_write = address;
9fa3e853 2199 }
0f459d16
PB
2200 } else {
2201 te->addr_write = -1;
9fa3e853 2202 }
9fa3e853
FB
2203}
2204
0124311e
FB
2205#else
2206
ee8b7021 2207void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2208{
2209}
2210
2e12669a 2211void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2212{
2213}
2214
edf8e2af
MW
2215/*
2216 * Walks guest process memory "regions" one by one
2217 * and calls callback function 'fn' for each region.
2218 */
5cd2c5b6
RH
2219
2220struct walk_memory_regions_data
2221{
2222 walk_memory_regions_fn fn;
2223 void *priv;
2224 unsigned long start;
2225 int prot;
2226};
2227
2228static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2229 abi_ulong end, int new_prot)
5cd2c5b6
RH
2230{
2231 if (data->start != -1ul) {
2232 int rc = data->fn(data->priv, data->start, end, data->prot);
2233 if (rc != 0) {
2234 return rc;
2235 }
2236 }
2237
2238 data->start = (new_prot ? end : -1ul);
2239 data->prot = new_prot;
2240
2241 return 0;
2242}
2243
2244static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2245 abi_ulong base, int level, void **lp)
5cd2c5b6 2246{
b480d9b7 2247 abi_ulong pa;
5cd2c5b6
RH
2248 int i, rc;
2249
2250 if (*lp == NULL) {
2251 return walk_memory_regions_end(data, base, 0);
2252 }
2253
2254 if (level == 0) {
2255 PageDesc *pd = *lp;
7296abac 2256 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2257 int prot = pd[i].flags;
2258
2259 pa = base | (i << TARGET_PAGE_BITS);
2260 if (prot != data->prot) {
2261 rc = walk_memory_regions_end(data, pa, prot);
2262 if (rc != 0) {
2263 return rc;
9fa3e853 2264 }
9fa3e853 2265 }
5cd2c5b6
RH
2266 }
2267 } else {
2268 void **pp = *lp;
7296abac 2269 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2270 pa = base | ((abi_ulong)i <<
2271 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2272 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2273 if (rc != 0) {
2274 return rc;
2275 }
2276 }
2277 }
2278
2279 return 0;
2280}
2281
2282int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2283{
2284 struct walk_memory_regions_data data;
2285 unsigned long i;
2286
2287 data.fn = fn;
2288 data.priv = priv;
2289 data.start = -1ul;
2290 data.prot = 0;
2291
2292 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2293 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2294 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2295 if (rc != 0) {
2296 return rc;
9fa3e853 2297 }
33417e70 2298 }
5cd2c5b6
RH
2299
2300 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2301}
2302
b480d9b7
PB
2303static int dump_region(void *priv, abi_ulong start,
2304 abi_ulong end, unsigned long prot)
edf8e2af
MW
2305{
2306 FILE *f = (FILE *)priv;
2307
b480d9b7
PB
2308 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2309 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2310 start, end, end - start,
2311 ((prot & PAGE_READ) ? 'r' : '-'),
2312 ((prot & PAGE_WRITE) ? 'w' : '-'),
2313 ((prot & PAGE_EXEC) ? 'x' : '-'));
2314
2315 return (0);
2316}
2317
2318/* dump memory mappings */
2319void page_dump(FILE *f)
2320{
2321 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2322 "start", "end", "size", "prot");
2323 walk_memory_regions(f, dump_region);
33417e70
FB
2324}
2325
53a5960a 2326int page_get_flags(target_ulong address)
33417e70 2327{
9fa3e853
FB
2328 PageDesc *p;
2329
2330 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2331 if (!p)
9fa3e853
FB
2332 return 0;
2333 return p->flags;
2334}
2335
376a7909
RH
2336/* Modify the flags of a page and invalidate the code if necessary.
2337 The flag PAGE_WRITE_ORG is positioned automatically depending
2338 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2339void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2340{
376a7909
RH
2341 target_ulong addr, len;
2342
2343 /* This function should never be called with addresses outside the
2344 guest address space. If this assert fires, it probably indicates
2345 a missing call to h2g_valid. */
b480d9b7
PB
2346#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2347 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2348#endif
2349 assert(start < end);
9fa3e853
FB
2350
2351 start = start & TARGET_PAGE_MASK;
2352 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2353
2354 if (flags & PAGE_WRITE) {
9fa3e853 2355 flags |= PAGE_WRITE_ORG;
376a7909
RH
2356 }
2357
2358 for (addr = start, len = end - start;
2359 len != 0;
2360 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2361 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2362
2363 /* If the write protection bit is set, then we invalidate
2364 the code inside. */
5fafdf24 2365 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2366 (flags & PAGE_WRITE) &&
2367 p->first_tb) {
d720b93d 2368 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2369 }
2370 p->flags = flags;
2371 }
33417e70
FB
2372}
2373
3d97b40b
TS
2374int page_check_range(target_ulong start, target_ulong len, int flags)
2375{
2376 PageDesc *p;
2377 target_ulong end;
2378 target_ulong addr;
2379
376a7909
RH
2380 /* This function should never be called with addresses outside the
2381 guest address space. If this assert fires, it probably indicates
2382 a missing call to h2g_valid. */
338e9e6c
BS
2383#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2384 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2385#endif
2386
3e0650a9
RH
2387 if (len == 0) {
2388 return 0;
2389 }
376a7909
RH
2390 if (start + len - 1 < start) {
2391 /* We've wrapped around. */
55f280c9 2392 return -1;
376a7909 2393 }
55f280c9 2394
3d97b40b
TS
2395 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2396 start = start & TARGET_PAGE_MASK;
2397
376a7909
RH
2398 for (addr = start, len = end - start;
2399 len != 0;
2400 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2401 p = page_find(addr >> TARGET_PAGE_BITS);
2402 if( !p )
2403 return -1;
2404 if( !(p->flags & PAGE_VALID) )
2405 return -1;
2406
dae3270c 2407 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2408 return -1;
dae3270c
FB
2409 if (flags & PAGE_WRITE) {
2410 if (!(p->flags & PAGE_WRITE_ORG))
2411 return -1;
2412 /* unprotect the page if it was put read-only because it
2413 contains translated code */
2414 if (!(p->flags & PAGE_WRITE)) {
2415 if (!page_unprotect(addr, 0, NULL))
2416 return -1;
2417 }
2418 return 0;
2419 }
3d97b40b
TS
2420 }
2421 return 0;
2422}
2423
9fa3e853 2424/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2425 page. Return TRUE if the fault was successfully handled. */
53a5960a 2426int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2427{
45d679d6
AJ
2428 unsigned int prot;
2429 PageDesc *p;
53a5960a 2430 target_ulong host_start, host_end, addr;
9fa3e853 2431
c8a706fe
PB
2432 /* Technically this isn't safe inside a signal handler. However we
2433 know this only ever happens in a synchronous SEGV handler, so in
2434 practice it seems to be ok. */
2435 mmap_lock();
2436
45d679d6
AJ
2437 p = page_find(address >> TARGET_PAGE_BITS);
2438 if (!p) {
c8a706fe 2439 mmap_unlock();
9fa3e853 2440 return 0;
c8a706fe 2441 }
45d679d6 2442
9fa3e853
FB
2443 /* if the page was really writable, then we change its
2444 protection back to writable */
45d679d6
AJ
2445 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2446 host_start = address & qemu_host_page_mask;
2447 host_end = host_start + qemu_host_page_size;
2448
2449 prot = 0;
2450 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2451 p = page_find(addr >> TARGET_PAGE_BITS);
2452 p->flags |= PAGE_WRITE;
2453 prot |= p->flags;
2454
9fa3e853
FB
2455 /* and since the content will be modified, we must invalidate
2456 the corresponding translated code. */
45d679d6 2457 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2458#ifdef DEBUG_TB_CHECK
45d679d6 2459 tb_invalidate_check(addr);
9fa3e853 2460#endif
9fa3e853 2461 }
45d679d6
AJ
2462 mprotect((void *)g2h(host_start), qemu_host_page_size,
2463 prot & PAGE_BITS);
2464
2465 mmap_unlock();
2466 return 1;
9fa3e853 2467 }
c8a706fe 2468 mmap_unlock();
9fa3e853
FB
2469 return 0;
2470}
2471
6a00d601
FB
2472static inline void tlb_set_dirty(CPUState *env,
2473 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2474{
2475}
9fa3e853
FB
2476#endif /* defined(CONFIG_USER_ONLY) */
2477
e2eef170 2478#if !defined(CONFIG_USER_ONLY)
8da3ff18 2479
c04b2b78
PB
2480#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2481typedef struct subpage_t {
70c68e44 2482 MemoryRegion iomem;
c04b2b78 2483 target_phys_addr_t base;
f6405247
RH
2484 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2485 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2486} subpage_t;
2487
c227f099
AL
2488static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2489 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2490static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2491 ram_addr_t orig_memory,
2492 ram_addr_t region_offset);
db7b5426
BS
2493#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2494 need_subpage) \
2495 do { \
2496 if (addr > start_addr) \
2497 start_addr2 = 0; \
2498 else { \
2499 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2500 if (start_addr2 > 0) \
2501 need_subpage = 1; \
2502 } \
2503 \
49e9fba2 2504 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2505 end_addr2 = TARGET_PAGE_SIZE - 1; \
2506 else { \
2507 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2508 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2509 need_subpage = 1; \
2510 } \
2511 } while (0)
2512
8f2498f9
MT
2513/* register physical memory.
2514 For RAM, 'size' must be a multiple of the target page size.
2515 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2516 io memory page. The address used when calling the IO function is
2517 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2518 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2519 before calculating this offset. This should not be a problem unless
2520 the low bits of start_addr and region_offset differ. */
dd81124b
AK
2521void cpu_register_physical_memory_log(MemoryRegionSection *section,
2522 bool readable, bool readonly)
33417e70 2523{
dd81124b
AK
2524 target_phys_addr_t start_addr = section->offset_within_address_space;
2525 ram_addr_t size = section->size;
2526 ram_addr_t phys_offset = section->mr->ram_addr;
2527 ram_addr_t region_offset = section->offset_within_region;
c227f099 2528 target_phys_addr_t addr, end_addr;
92e873b9 2529 PhysPageDesc *p;
9d42037b 2530 CPUState *env;
c227f099 2531 ram_addr_t orig_size = size;
f6405247 2532 subpage_t *subpage;
33417e70 2533
dd81124b
AK
2534 if (memory_region_is_ram(section->mr)) {
2535 phys_offset += region_offset;
2536 region_offset = 0;
2537 }
2538
dd81124b
AK
2539 if (readonly) {
2540 phys_offset |= io_mem_rom.ram_addr;
2541 }
2542
3b8e6a2d 2543 assert(size);
f6f3fbca 2544
0e0df1e2 2545 if (phys_offset == io_mem_unassigned.ram_addr) {
67c4d23c
PB
2546 region_offset = start_addr;
2547 }
8da3ff18 2548 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2549 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2550 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2551
2552 addr = start_addr;
2553 do {
f1f6e3b8 2554 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
0e0df1e2 2555 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
c227f099
AL
2556 ram_addr_t orig_memory = p->phys_offset;
2557 target_phys_addr_t start_addr2, end_addr2;
db7b5426 2558 int need_subpage = 0;
11c7ef0c 2559 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
db7b5426
BS
2560
2561 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2562 need_subpage);
f6405247 2563 if (need_subpage) {
b3b00c78 2564 if (!(mr->subpage)) {
db7b5426 2565 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2566 &p->phys_offset, orig_memory,
2567 p->region_offset);
db7b5426 2568 } else {
a621f38d 2569 subpage = container_of(mr, subpage_t, iomem);
db7b5426 2570 }
8da3ff18
PB
2571 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2572 region_offset);
2573 p->region_offset = 0;
db7b5426
BS
2574 } else {
2575 p->phys_offset = phys_offset;
2774c6d0 2576 p->region_offset = region_offset;
1d393fa2 2577 if (is_ram_rom_romd(phys_offset))
db7b5426
BS
2578 phys_offset += TARGET_PAGE_SIZE;
2579 }
2580 } else {
2581 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2582 p->phys_offset = phys_offset;
8da3ff18 2583 p->region_offset = region_offset;
1d393fa2 2584 if (is_ram_rom_romd(phys_offset)) {
db7b5426 2585 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2586 } else {
c227f099 2587 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2588 int need_subpage = 0;
2589
2590 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2591 end_addr2, need_subpage);
2592
f6405247 2593 if (need_subpage) {
db7b5426 2594 subpage = subpage_init((addr & TARGET_PAGE_MASK),
0e0df1e2
AK
2595 &p->phys_offset,
2596 io_mem_unassigned.ram_addr,
67c4d23c 2597 addr & TARGET_PAGE_MASK);
db7b5426 2598 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2599 phys_offset, region_offset);
2600 p->region_offset = 0;
db7b5426
BS
2601 }
2602 }
2603 }
8da3ff18 2604 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2605 addr += TARGET_PAGE_SIZE;
2606 } while (addr != end_addr);
3b46e624 2607
9d42037b
FB
2608 /* since each CPU stores ram addresses in its TLB cache, we must
2609 reset the modified entries */
2610 /* XXX: slow ! */
2611 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2612 tlb_flush(env, 1);
2613 }
33417e70
FB
2614}
2615
c227f099 2616void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2617{
2618 if (kvm_enabled())
2619 kvm_coalesce_mmio_region(addr, size);
2620}
2621
c227f099 2622void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2623{
2624 if (kvm_enabled())
2625 kvm_uncoalesce_mmio_region(addr, size);
2626}
2627
62a2744c
SY
2628void qemu_flush_coalesced_mmio_buffer(void)
2629{
2630 if (kvm_enabled())
2631 kvm_flush_coalesced_mmio_buffer();
2632}
2633
c902760f
MT
2634#if defined(__linux__) && !defined(TARGET_S390X)
2635
2636#include <sys/vfs.h>
2637
2638#define HUGETLBFS_MAGIC 0x958458f6
2639
2640static long gethugepagesize(const char *path)
2641{
2642 struct statfs fs;
2643 int ret;
2644
2645 do {
9742bf26 2646 ret = statfs(path, &fs);
c902760f
MT
2647 } while (ret != 0 && errno == EINTR);
2648
2649 if (ret != 0) {
9742bf26
YT
2650 perror(path);
2651 return 0;
c902760f
MT
2652 }
2653
2654 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2655 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2656
2657 return fs.f_bsize;
2658}
2659
04b16653
AW
2660static void *file_ram_alloc(RAMBlock *block,
2661 ram_addr_t memory,
2662 const char *path)
c902760f
MT
2663{
2664 char *filename;
2665 void *area;
2666 int fd;
2667#ifdef MAP_POPULATE
2668 int flags;
2669#endif
2670 unsigned long hpagesize;
2671
2672 hpagesize = gethugepagesize(path);
2673 if (!hpagesize) {
9742bf26 2674 return NULL;
c902760f
MT
2675 }
2676
2677 if (memory < hpagesize) {
2678 return NULL;
2679 }
2680
2681 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2682 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2683 return NULL;
2684 }
2685
2686 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2687 return NULL;
c902760f
MT
2688 }
2689
2690 fd = mkstemp(filename);
2691 if (fd < 0) {
9742bf26
YT
2692 perror("unable to create backing store for hugepages");
2693 free(filename);
2694 return NULL;
c902760f
MT
2695 }
2696 unlink(filename);
2697 free(filename);
2698
2699 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2700
2701 /*
2702 * ftruncate is not supported by hugetlbfs in older
2703 * hosts, so don't bother bailing out on errors.
2704 * If anything goes wrong with it under other filesystems,
2705 * mmap will fail.
2706 */
2707 if (ftruncate(fd, memory))
9742bf26 2708 perror("ftruncate");
c902760f
MT
2709
2710#ifdef MAP_POPULATE
2711 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2712 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2713 * to sidestep this quirk.
2714 */
2715 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2716 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2717#else
2718 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2719#endif
2720 if (area == MAP_FAILED) {
9742bf26
YT
2721 perror("file_ram_alloc: can't mmap RAM pages");
2722 close(fd);
2723 return (NULL);
c902760f 2724 }
04b16653 2725 block->fd = fd;
c902760f
MT
2726 return area;
2727}
2728#endif
2729
d17b5288 2730static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2731{
2732 RAMBlock *block, *next_block;
3e837b2c 2733 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2734
2735 if (QLIST_EMPTY(&ram_list.blocks))
2736 return 0;
2737
2738 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2739 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2740
2741 end = block->offset + block->length;
2742
2743 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2744 if (next_block->offset >= end) {
2745 next = MIN(next, next_block->offset);
2746 }
2747 }
2748 if (next - end >= size && next - end < mingap) {
3e837b2c 2749 offset = end;
04b16653
AW
2750 mingap = next - end;
2751 }
2752 }
3e837b2c
AW
2753
2754 if (offset == RAM_ADDR_MAX) {
2755 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2756 (uint64_t)size);
2757 abort();
2758 }
2759
04b16653
AW
2760 return offset;
2761}
2762
2763static ram_addr_t last_ram_offset(void)
d17b5288
AW
2764{
2765 RAMBlock *block;
2766 ram_addr_t last = 0;
2767
2768 QLIST_FOREACH(block, &ram_list.blocks, next)
2769 last = MAX(last, block->offset + block->length);
2770
2771 return last;
2772}
2773
c5705a77 2774void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2775{
2776 RAMBlock *new_block, *block;
2777
c5705a77
AK
2778 new_block = NULL;
2779 QLIST_FOREACH(block, &ram_list.blocks, next) {
2780 if (block->offset == addr) {
2781 new_block = block;
2782 break;
2783 }
2784 }
2785 assert(new_block);
2786 assert(!new_block->idstr[0]);
84b89d78
CM
2787
2788 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2789 char *id = dev->parent_bus->info->get_dev_path(dev);
2790 if (id) {
2791 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2792 g_free(id);
84b89d78
CM
2793 }
2794 }
2795 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2796
2797 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2798 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2799 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2800 new_block->idstr);
2801 abort();
2802 }
2803 }
c5705a77
AK
2804}
2805
2806ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2807 MemoryRegion *mr)
2808{
2809 RAMBlock *new_block;
2810
2811 size = TARGET_PAGE_ALIGN(size);
2812 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2813
7c637366 2814 new_block->mr = mr;
432d268c 2815 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2816 if (host) {
2817 new_block->host = host;
cd19cfa2 2818 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2819 } else {
2820 if (mem_path) {
c902760f 2821#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2822 new_block->host = file_ram_alloc(new_block, size, mem_path);
2823 if (!new_block->host) {
2824 new_block->host = qemu_vmalloc(size);
e78815a5 2825 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2826 }
c902760f 2827#else
6977dfe6
YT
2828 fprintf(stderr, "-mem-path option unsupported\n");
2829 exit(1);
c902760f 2830#endif
6977dfe6 2831 } else {
6b02494d 2832#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2833 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2834 an system defined value, which is at least 256GB. Larger systems
2835 have larger values. We put the guest between the end of data
2836 segment (system break) and this value. We use 32GB as a base to
2837 have enough room for the system break to grow. */
2838 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2839 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2840 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2841 if (new_block->host == MAP_FAILED) {
2842 fprintf(stderr, "Allocating RAM failed\n");
2843 abort();
2844 }
6b02494d 2845#else
868bb33f 2846 if (xen_enabled()) {
fce537d4 2847 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2848 } else {
2849 new_block->host = qemu_vmalloc(size);
2850 }
6b02494d 2851#endif
e78815a5 2852 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2853 }
c902760f 2854 }
94a6b54f
PB
2855 new_block->length = size;
2856
f471a17e 2857 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2858
7267c094 2859 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2860 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2861 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2862 0xff, size >> TARGET_PAGE_BITS);
2863
6f0437e8
JK
2864 if (kvm_enabled())
2865 kvm_setup_guest_memory(new_block->host, size);
2866
94a6b54f
PB
2867 return new_block->offset;
2868}
e9a1ab19 2869
c5705a77 2870ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2871{
c5705a77 2872 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2873}
2874
1f2e98b6
AW
2875void qemu_ram_free_from_ptr(ram_addr_t addr)
2876{
2877 RAMBlock *block;
2878
2879 QLIST_FOREACH(block, &ram_list.blocks, next) {
2880 if (addr == block->offset) {
2881 QLIST_REMOVE(block, next);
7267c094 2882 g_free(block);
1f2e98b6
AW
2883 return;
2884 }
2885 }
2886}
2887
c227f099 2888void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2889{
04b16653
AW
2890 RAMBlock *block;
2891
2892 QLIST_FOREACH(block, &ram_list.blocks, next) {
2893 if (addr == block->offset) {
2894 QLIST_REMOVE(block, next);
cd19cfa2
HY
2895 if (block->flags & RAM_PREALLOC_MASK) {
2896 ;
2897 } else if (mem_path) {
04b16653
AW
2898#if defined (__linux__) && !defined(TARGET_S390X)
2899 if (block->fd) {
2900 munmap(block->host, block->length);
2901 close(block->fd);
2902 } else {
2903 qemu_vfree(block->host);
2904 }
fd28aa13
JK
2905#else
2906 abort();
04b16653
AW
2907#endif
2908 } else {
2909#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2910 munmap(block->host, block->length);
2911#else
868bb33f 2912 if (xen_enabled()) {
e41d7c69 2913 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2914 } else {
2915 qemu_vfree(block->host);
2916 }
04b16653
AW
2917#endif
2918 }
7267c094 2919 g_free(block);
04b16653
AW
2920 return;
2921 }
2922 }
2923
e9a1ab19
FB
2924}
2925
cd19cfa2
HY
2926#ifndef _WIN32
2927void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2928{
2929 RAMBlock *block;
2930 ram_addr_t offset;
2931 int flags;
2932 void *area, *vaddr;
2933
2934 QLIST_FOREACH(block, &ram_list.blocks, next) {
2935 offset = addr - block->offset;
2936 if (offset < block->length) {
2937 vaddr = block->host + offset;
2938 if (block->flags & RAM_PREALLOC_MASK) {
2939 ;
2940 } else {
2941 flags = MAP_FIXED;
2942 munmap(vaddr, length);
2943 if (mem_path) {
2944#if defined(__linux__) && !defined(TARGET_S390X)
2945 if (block->fd) {
2946#ifdef MAP_POPULATE
2947 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2948 MAP_PRIVATE;
2949#else
2950 flags |= MAP_PRIVATE;
2951#endif
2952 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2953 flags, block->fd, offset);
2954 } else {
2955 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2956 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2957 flags, -1, 0);
2958 }
fd28aa13
JK
2959#else
2960 abort();
cd19cfa2
HY
2961#endif
2962 } else {
2963#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2964 flags |= MAP_SHARED | MAP_ANONYMOUS;
2965 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2966 flags, -1, 0);
2967#else
2968 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2969 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2970 flags, -1, 0);
2971#endif
2972 }
2973 if (area != vaddr) {
f15fbc4b
AP
2974 fprintf(stderr, "Could not remap addr: "
2975 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2976 length, addr);
2977 exit(1);
2978 }
2979 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2980 }
2981 return;
2982 }
2983 }
2984}
2985#endif /* !_WIN32 */
2986
dc828ca1 2987/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2988 With the exception of the softmmu code in this file, this should
2989 only be used for local memory (e.g. video ram) that the device owns,
2990 and knows it isn't going to access beyond the end of the block.
2991
2992 It should not be used for general purpose DMA.
2993 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2994 */
c227f099 2995void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2996{
94a6b54f
PB
2997 RAMBlock *block;
2998
f471a17e
AW
2999 QLIST_FOREACH(block, &ram_list.blocks, next) {
3000 if (addr - block->offset < block->length) {
7d82af38
VP
3001 /* Move this entry to to start of the list. */
3002 if (block != QLIST_FIRST(&ram_list.blocks)) {
3003 QLIST_REMOVE(block, next);
3004 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3005 }
868bb33f 3006 if (xen_enabled()) {
432d268c
JN
3007 /* We need to check if the requested address is in the RAM
3008 * because we don't want to map the entire memory in QEMU.
712c2b41 3009 * In that case just map until the end of the page.
432d268c
JN
3010 */
3011 if (block->offset == 0) {
e41d7c69 3012 return xen_map_cache(addr, 0, 0);
432d268c 3013 } else if (block->host == NULL) {
e41d7c69
JK
3014 block->host =
3015 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3016 }
3017 }
f471a17e
AW
3018 return block->host + (addr - block->offset);
3019 }
94a6b54f 3020 }
f471a17e
AW
3021
3022 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3023 abort();
3024
3025 return NULL;
dc828ca1
PB
3026}
3027
b2e0a138
MT
3028/* Return a host pointer to ram allocated with qemu_ram_alloc.
3029 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3030 */
3031void *qemu_safe_ram_ptr(ram_addr_t addr)
3032{
3033 RAMBlock *block;
3034
3035 QLIST_FOREACH(block, &ram_list.blocks, next) {
3036 if (addr - block->offset < block->length) {
868bb33f 3037 if (xen_enabled()) {
432d268c
JN
3038 /* We need to check if the requested address is in the RAM
3039 * because we don't want to map the entire memory in QEMU.
712c2b41 3040 * In that case just map until the end of the page.
432d268c
JN
3041 */
3042 if (block->offset == 0) {
e41d7c69 3043 return xen_map_cache(addr, 0, 0);
432d268c 3044 } else if (block->host == NULL) {
e41d7c69
JK
3045 block->host =
3046 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3047 }
3048 }
b2e0a138
MT
3049 return block->host + (addr - block->offset);
3050 }
3051 }
3052
3053 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3054 abort();
3055
3056 return NULL;
3057}
3058
38bee5dc
SS
3059/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3060 * but takes a size argument */
8ab934f9 3061void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3062{
8ab934f9
SS
3063 if (*size == 0) {
3064 return NULL;
3065 }
868bb33f 3066 if (xen_enabled()) {
e41d7c69 3067 return xen_map_cache(addr, *size, 1);
868bb33f 3068 } else {
38bee5dc
SS
3069 RAMBlock *block;
3070
3071 QLIST_FOREACH(block, &ram_list.blocks, next) {
3072 if (addr - block->offset < block->length) {
3073 if (addr - block->offset + *size > block->length)
3074 *size = block->length - addr + block->offset;
3075 return block->host + (addr - block->offset);
3076 }
3077 }
3078
3079 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3080 abort();
38bee5dc
SS
3081 }
3082}
3083
050a0ddf
AP
3084void qemu_put_ram_ptr(void *addr)
3085{
3086 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3087}
3088
e890261f 3089int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3090{
94a6b54f
PB
3091 RAMBlock *block;
3092 uint8_t *host = ptr;
3093
868bb33f 3094 if (xen_enabled()) {
e41d7c69 3095 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3096 return 0;
3097 }
3098
f471a17e 3099 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3100 /* This case append when the block is not mapped. */
3101 if (block->host == NULL) {
3102 continue;
3103 }
f471a17e 3104 if (host - block->host < block->length) {
e890261f
MT
3105 *ram_addr = block->offset + (host - block->host);
3106 return 0;
f471a17e 3107 }
94a6b54f 3108 }
432d268c 3109
e890261f
MT
3110 return -1;
3111}
f471a17e 3112
e890261f
MT
3113/* Some of the softmmu routines need to translate from a host pointer
3114 (typically a TLB entry) back to a ram offset. */
3115ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3116{
3117 ram_addr_t ram_addr;
f471a17e 3118
e890261f
MT
3119 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3120 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3121 abort();
3122 }
3123 return ram_addr;
5579c7f3
PB
3124}
3125
0e0df1e2
AK
3126static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3127 unsigned size)
e18231a3
BS
3128{
3129#ifdef DEBUG_UNASSIGNED
3130 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3131#endif
5b450407 3132#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3133 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3134#endif
3135 return 0;
3136}
3137
0e0df1e2
AK
3138static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3139 uint64_t val, unsigned size)
e18231a3
BS
3140{
3141#ifdef DEBUG_UNASSIGNED
0e0df1e2 3142 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3143#endif
5b450407 3144#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3145 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3146#endif
33417e70
FB
3147}
3148
0e0df1e2
AK
3149static const MemoryRegionOps unassigned_mem_ops = {
3150 .read = unassigned_mem_read,
3151 .write = unassigned_mem_write,
3152 .endianness = DEVICE_NATIVE_ENDIAN,
3153};
e18231a3 3154
0e0df1e2
AK
3155static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3156 unsigned size)
e18231a3 3157{
0e0df1e2 3158 abort();
e18231a3
BS
3159}
3160
0e0df1e2
AK
3161static void error_mem_write(void *opaque, target_phys_addr_t addr,
3162 uint64_t value, unsigned size)
e18231a3 3163{
0e0df1e2 3164 abort();
33417e70
FB
3165}
3166
0e0df1e2
AK
3167static const MemoryRegionOps error_mem_ops = {
3168 .read = error_mem_read,
3169 .write = error_mem_write,
3170 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3171};
3172
0e0df1e2
AK
3173static const MemoryRegionOps rom_mem_ops = {
3174 .read = error_mem_read,
3175 .write = unassigned_mem_write,
3176 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3177};
3178
0e0df1e2
AK
3179static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3180 uint64_t val, unsigned size)
9fa3e853 3181{
3a7d929e 3182 int dirty_flags;
f7c11b53 3183 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3184 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3185#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3186 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3187 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3188#endif
3a7d929e 3189 }
0e0df1e2
AK
3190 switch (size) {
3191 case 1:
3192 stb_p(qemu_get_ram_ptr(ram_addr), val);
3193 break;
3194 case 2:
3195 stw_p(qemu_get_ram_ptr(ram_addr), val);
3196 break;
3197 case 4:
3198 stl_p(qemu_get_ram_ptr(ram_addr), val);
3199 break;
3200 default:
3201 abort();
3a7d929e 3202 }
f23db169 3203 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3204 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3205 /* we remove the notdirty callback only if the code has been
3206 flushed */
3207 if (dirty_flags == 0xff)
2e70f6ef 3208 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3209}
3210
0e0df1e2
AK
3211static const MemoryRegionOps notdirty_mem_ops = {
3212 .read = error_mem_read,
3213 .write = notdirty_mem_write,
3214 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3215};
3216
0f459d16 3217/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3218static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3219{
3220 CPUState *env = cpu_single_env;
06d55cc1
AL
3221 target_ulong pc, cs_base;
3222 TranslationBlock *tb;
0f459d16 3223 target_ulong vaddr;
a1d1bb31 3224 CPUWatchpoint *wp;
06d55cc1 3225 int cpu_flags;
0f459d16 3226
06d55cc1
AL
3227 if (env->watchpoint_hit) {
3228 /* We re-entered the check after replacing the TB. Now raise
3229 * the debug interrupt so that is will trigger after the
3230 * current instruction. */
3231 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3232 return;
3233 }
2e70f6ef 3234 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3235 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3236 if ((vaddr == (wp->vaddr & len_mask) ||
3237 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3238 wp->flags |= BP_WATCHPOINT_HIT;
3239 if (!env->watchpoint_hit) {
3240 env->watchpoint_hit = wp;
3241 tb = tb_find_pc(env->mem_io_pc);
3242 if (!tb) {
3243 cpu_abort(env, "check_watchpoint: could not find TB for "
3244 "pc=%p", (void *)env->mem_io_pc);
3245 }
618ba8e6 3246 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3247 tb_phys_invalidate(tb, -1);
3248 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3249 env->exception_index = EXCP_DEBUG;
3250 } else {
3251 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3252 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3253 }
3254 cpu_resume_from_signal(env, NULL);
06d55cc1 3255 }
6e140f28
AL
3256 } else {
3257 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3258 }
3259 }
3260}
3261
6658ffb8
PB
3262/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3263 so these check for a hit then pass through to the normal out-of-line
3264 phys routines. */
1ec9b909
AK
3265static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3266 unsigned size)
6658ffb8 3267{
1ec9b909
AK
3268 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3269 switch (size) {
3270 case 1: return ldub_phys(addr);
3271 case 2: return lduw_phys(addr);
3272 case 4: return ldl_phys(addr);
3273 default: abort();
3274 }
6658ffb8
PB
3275}
3276
1ec9b909
AK
3277static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3278 uint64_t val, unsigned size)
6658ffb8 3279{
1ec9b909
AK
3280 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3281 switch (size) {
3282 case 1: stb_phys(addr, val);
3283 case 2: stw_phys(addr, val);
3284 case 4: stl_phys(addr, val);
3285 default: abort();
3286 }
6658ffb8
PB
3287}
3288
1ec9b909
AK
3289static const MemoryRegionOps watch_mem_ops = {
3290 .read = watch_mem_read,
3291 .write = watch_mem_write,
3292 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3293};
6658ffb8 3294
70c68e44
AK
3295static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3296 unsigned len)
db7b5426 3297{
70c68e44 3298 subpage_t *mmio = opaque;
f6405247 3299 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3300#if defined(DEBUG_SUBPAGE)
3301 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3302 mmio, len, addr, idx);
3303#endif
db7b5426 3304
f6405247
RH
3305 addr += mmio->region_offset[idx];
3306 idx = mmio->sub_io_index[idx];
70c68e44 3307 return io_mem_read(idx, addr, len);
db7b5426
BS
3308}
3309
70c68e44
AK
3310static void subpage_write(void *opaque, target_phys_addr_t addr,
3311 uint64_t value, unsigned len)
db7b5426 3312{
70c68e44 3313 subpage_t *mmio = opaque;
f6405247 3314 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3315#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3316 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3317 " idx %d value %"PRIx64"\n",
f6405247 3318 __func__, mmio, len, addr, idx, value);
db7b5426 3319#endif
f6405247
RH
3320
3321 addr += mmio->region_offset[idx];
3322 idx = mmio->sub_io_index[idx];
70c68e44 3323 io_mem_write(idx, addr, value, len);
db7b5426
BS
3324}
3325
70c68e44
AK
3326static const MemoryRegionOps subpage_ops = {
3327 .read = subpage_read,
3328 .write = subpage_write,
3329 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3330};
3331
de712f94
AK
3332static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3333 unsigned size)
56384e8b
AF
3334{
3335 ram_addr_t raddr = addr;
3336 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3337 switch (size) {
3338 case 1: return ldub_p(ptr);
3339 case 2: return lduw_p(ptr);
3340 case 4: return ldl_p(ptr);
3341 default: abort();
3342 }
56384e8b
AF
3343}
3344
de712f94
AK
3345static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3346 uint64_t value, unsigned size)
56384e8b
AF
3347{
3348 ram_addr_t raddr = addr;
3349 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3350 switch (size) {
3351 case 1: return stb_p(ptr, value);
3352 case 2: return stw_p(ptr, value);
3353 case 4: return stl_p(ptr, value);
3354 default: abort();
3355 }
56384e8b
AF
3356}
3357
de712f94
AK
3358static const MemoryRegionOps subpage_ram_ops = {
3359 .read = subpage_ram_read,
3360 .write = subpage_ram_write,
3361 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3362};
3363
c227f099
AL
3364static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3365 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3366{
3367 int idx, eidx;
3368
3369 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3370 return -1;
3371 idx = SUBPAGE_IDX(start);
3372 eidx = SUBPAGE_IDX(end);
3373#if defined(DEBUG_SUBPAGE)
0bf9e31a 3374 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3375 mmio, start, end, idx, eidx, memory);
3376#endif
0e0df1e2 3377 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
de712f94 3378 memory = io_mem_subpage_ram.ram_addr;
56384e8b 3379 }
11c7ef0c 3380 memory &= IO_MEM_NB_ENTRIES - 1;
db7b5426 3381 for (; idx <= eidx; idx++) {
f6405247
RH
3382 mmio->sub_io_index[idx] = memory;
3383 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3384 }
3385
3386 return 0;
3387}
3388
f6405247
RH
3389static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3390 ram_addr_t orig_memory,
3391 ram_addr_t region_offset)
db7b5426 3392{
c227f099 3393 subpage_t *mmio;
db7b5426
BS
3394 int subpage_memory;
3395
7267c094 3396 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3397
3398 mmio->base = base;
70c68e44
AK
3399 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3400 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3401 mmio->iomem.subpage = true;
70c68e44 3402 subpage_memory = mmio->iomem.ram_addr;
db7b5426 3403#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3404 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3405 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3406#endif
b3b00c78 3407 *phys = subpage_memory;
f6405247 3408 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3409
3410 return mmio;
3411}
3412
88715657
AL
3413static int get_free_io_mem_idx(void)
3414{
3415 int i;
3416
3417 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3418 if (!io_mem_used[i]) {
3419 io_mem_used[i] = 1;
3420 return i;
3421 }
c6703b47 3422 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3423 return -1;
3424}
3425
33417e70
FB
3426/* mem_read and mem_write are arrays of functions containing the
3427 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3428 2). Functions can be omitted with a NULL function pointer.
3ee89922 3429 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3430 modified. If it is zero, a new io zone is allocated. The return
3431 value can be used with cpu_register_physical_memory(). (-1) is
3432 returned if error. */
a621f38d 3433static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
33417e70 3434{
33417e70 3435 if (io_index <= 0) {
88715657
AL
3436 io_index = get_free_io_mem_idx();
3437 if (io_index == -1)
3438 return io_index;
33417e70
FB
3439 } else {
3440 if (io_index >= IO_MEM_NB_ENTRIES)
3441 return -1;
3442 }
b5ff1b31 3443
a621f38d 3444 io_mem_region[io_index] = mr;
f6405247 3445
11c7ef0c 3446 return io_index;
33417e70 3447}
61382a50 3448
a621f38d 3449int cpu_register_io_memory(MemoryRegion *mr)
1eed09cb 3450{
a621f38d 3451 return cpu_register_io_memory_fixed(0, mr);
1eed09cb
AK
3452}
3453
11c7ef0c 3454void cpu_unregister_io_memory(int io_index)
88715657 3455{
a621f38d 3456 io_mem_region[io_index] = NULL;
88715657
AL
3457 io_mem_used[io_index] = 0;
3458}
3459
e9179ce1
AK
3460static void io_mem_init(void)
3461{
3462 int i;
3463
0e0df1e2
AK
3464 /* Must be first: */
3465 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3466 assert(io_mem_ram.ram_addr == 0);
3467 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3468 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3469 "unassigned", UINT64_MAX);
3470 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3471 "notdirty", UINT64_MAX);
de712f94
AK
3472 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3473 "subpage-ram", UINT64_MAX);
e9179ce1
AK
3474 for (i=0; i<5; i++)
3475 io_mem_used[i] = 1;
3476
1ec9b909
AK
3477 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3478 "watch", UINT64_MAX);
e9179ce1
AK
3479}
3480
62152b8a
AK
3481static void memory_map_init(void)
3482{
7267c094 3483 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3484 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3485 set_system_memory_map(system_memory);
309cb471 3486
7267c094 3487 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3488 memory_region_init(system_io, "io", 65536);
3489 set_system_io_map(system_io);
62152b8a
AK
3490}
3491
3492MemoryRegion *get_system_memory(void)
3493{
3494 return system_memory;
3495}
3496
309cb471
AK
3497MemoryRegion *get_system_io(void)
3498{
3499 return system_io;
3500}
3501
e2eef170
PB
3502#endif /* !defined(CONFIG_USER_ONLY) */
3503
13eb76e0
FB
3504/* physical memory access (slow version, mainly for debug) */
3505#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3506int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3507 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3508{
3509 int l, flags;
3510 target_ulong page;
53a5960a 3511 void * p;
13eb76e0
FB
3512
3513 while (len > 0) {
3514 page = addr & TARGET_PAGE_MASK;
3515 l = (page + TARGET_PAGE_SIZE) - addr;
3516 if (l > len)
3517 l = len;
3518 flags = page_get_flags(page);
3519 if (!(flags & PAGE_VALID))
a68fe89c 3520 return -1;
13eb76e0
FB
3521 if (is_write) {
3522 if (!(flags & PAGE_WRITE))
a68fe89c 3523 return -1;
579a97f7 3524 /* XXX: this code should not depend on lock_user */
72fb7daa 3525 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3526 return -1;
72fb7daa
AJ
3527 memcpy(p, buf, l);
3528 unlock_user(p, addr, l);
13eb76e0
FB
3529 } else {
3530 if (!(flags & PAGE_READ))
a68fe89c 3531 return -1;
579a97f7 3532 /* XXX: this code should not depend on lock_user */
72fb7daa 3533 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3534 return -1;
72fb7daa 3535 memcpy(buf, p, l);
5b257578 3536 unlock_user(p, addr, 0);
13eb76e0
FB
3537 }
3538 len -= l;
3539 buf += l;
3540 addr += l;
3541 }
a68fe89c 3542 return 0;
13eb76e0 3543}
8df1cd07 3544
13eb76e0 3545#else
c227f099 3546void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3547 int len, int is_write)
3548{
3549 int l, io_index;
3550 uint8_t *ptr;
3551 uint32_t val;
c227f099 3552 target_phys_addr_t page;
8ca5692d 3553 ram_addr_t pd;
f1f6e3b8 3554 PhysPageDesc p;
3b46e624 3555
13eb76e0
FB
3556 while (len > 0) {
3557 page = addr & TARGET_PAGE_MASK;
3558 l = (page + TARGET_PAGE_SIZE) - addr;
3559 if (l > len)
3560 l = len;
92e873b9 3561 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3562 pd = p.phys_offset;
3b46e624 3563
13eb76e0 3564 if (is_write) {
0e0df1e2 3565 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
f1f6e3b8 3566 target_phys_addr_t addr1;
11c7ef0c 3567 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3568 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3569 /* XXX: could force cpu_single_env to NULL to avoid
3570 potential bugs */
6c2934db 3571 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3572 /* 32 bit write access */
c27004ec 3573 val = ldl_p(buf);
acbbec5d 3574 io_mem_write(io_index, addr1, val, 4);
13eb76e0 3575 l = 4;
6c2934db 3576 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3577 /* 16 bit write access */
c27004ec 3578 val = lduw_p(buf);
acbbec5d 3579 io_mem_write(io_index, addr1, val, 2);
13eb76e0
FB
3580 l = 2;
3581 } else {
1c213d19 3582 /* 8 bit write access */
c27004ec 3583 val = ldub_p(buf);
acbbec5d 3584 io_mem_write(io_index, addr1, val, 1);
13eb76e0
FB
3585 l = 1;
3586 }
3587 } else {
8ca5692d 3588 ram_addr_t addr1;
b448f2f3 3589 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3590 /* RAM case */
5579c7f3 3591 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3592 memcpy(ptr, buf, l);
3a7d929e
FB
3593 if (!cpu_physical_memory_is_dirty(addr1)) {
3594 /* invalidate code */
3595 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3596 /* set dirty bit */
f7c11b53
YT
3597 cpu_physical_memory_set_dirty_flags(
3598 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3599 }
050a0ddf 3600 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3601 }
3602 } else {
1d393fa2 3603 if (!is_ram_rom_romd(pd)) {
f1f6e3b8 3604 target_phys_addr_t addr1;
13eb76e0 3605 /* I/O case */
11c7ef0c 3606 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3607 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3608 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3609 /* 32 bit read access */
acbbec5d 3610 val = io_mem_read(io_index, addr1, 4);
c27004ec 3611 stl_p(buf, val);
13eb76e0 3612 l = 4;
6c2934db 3613 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3614 /* 16 bit read access */
acbbec5d 3615 val = io_mem_read(io_index, addr1, 2);
c27004ec 3616 stw_p(buf, val);
13eb76e0
FB
3617 l = 2;
3618 } else {
1c213d19 3619 /* 8 bit read access */
acbbec5d 3620 val = io_mem_read(io_index, addr1, 1);
c27004ec 3621 stb_p(buf, val);
13eb76e0
FB
3622 l = 1;
3623 }
3624 } else {
3625 /* RAM case */
050a0ddf
AP
3626 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3627 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3628 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3629 }
3630 }
3631 len -= l;
3632 buf += l;
3633 addr += l;
3634 }
3635}
8df1cd07 3636
d0ecd2aa 3637/* used for ROM loading : can write in RAM and ROM */
c227f099 3638void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3639 const uint8_t *buf, int len)
3640{
3641 int l;
3642 uint8_t *ptr;
c227f099 3643 target_phys_addr_t page;
d0ecd2aa 3644 unsigned long pd;
f1f6e3b8 3645 PhysPageDesc p;
3b46e624 3646
d0ecd2aa
FB
3647 while (len > 0) {
3648 page = addr & TARGET_PAGE_MASK;
3649 l = (page + TARGET_PAGE_SIZE) - addr;
3650 if (l > len)
3651 l = len;
3652 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3653 pd = p.phys_offset;
3b46e624 3654
1d393fa2 3655 if (!is_ram_rom_romd(pd)) {
d0ecd2aa
FB
3656 /* do nothing */
3657 } else {
3658 unsigned long addr1;
3659 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3660 /* ROM/RAM case */
5579c7f3 3661 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3662 memcpy(ptr, buf, l);
050a0ddf 3663 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3664 }
3665 len -= l;
3666 buf += l;
3667 addr += l;
3668 }
3669}
3670
6d16c2f8
AL
3671typedef struct {
3672 void *buffer;
c227f099
AL
3673 target_phys_addr_t addr;
3674 target_phys_addr_t len;
6d16c2f8
AL
3675} BounceBuffer;
3676
3677static BounceBuffer bounce;
3678
ba223c29
AL
3679typedef struct MapClient {
3680 void *opaque;
3681 void (*callback)(void *opaque);
72cf2d4f 3682 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3683} MapClient;
3684
72cf2d4f
BS
3685static QLIST_HEAD(map_client_list, MapClient) map_client_list
3686 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3687
3688void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3689{
7267c094 3690 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3691
3692 client->opaque = opaque;
3693 client->callback = callback;
72cf2d4f 3694 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3695 return client;
3696}
3697
3698void cpu_unregister_map_client(void *_client)
3699{
3700 MapClient *client = (MapClient *)_client;
3701
72cf2d4f 3702 QLIST_REMOVE(client, link);
7267c094 3703 g_free(client);
ba223c29
AL
3704}
3705
3706static void cpu_notify_map_clients(void)
3707{
3708 MapClient *client;
3709
72cf2d4f
BS
3710 while (!QLIST_EMPTY(&map_client_list)) {
3711 client = QLIST_FIRST(&map_client_list);
ba223c29 3712 client->callback(client->opaque);
34d5e948 3713 cpu_unregister_map_client(client);
ba223c29
AL
3714 }
3715}
3716
6d16c2f8
AL
3717/* Map a physical memory region into a host virtual address.
3718 * May map a subset of the requested range, given by and returned in *plen.
3719 * May return NULL if resources needed to perform the mapping are exhausted.
3720 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3721 * Use cpu_register_map_client() to know when retrying the map operation is
3722 * likely to succeed.
6d16c2f8 3723 */
c227f099
AL
3724void *cpu_physical_memory_map(target_phys_addr_t addr,
3725 target_phys_addr_t *plen,
6d16c2f8
AL
3726 int is_write)
3727{
c227f099 3728 target_phys_addr_t len = *plen;
38bee5dc 3729 target_phys_addr_t todo = 0;
6d16c2f8 3730 int l;
c227f099 3731 target_phys_addr_t page;
6d16c2f8 3732 unsigned long pd;
f1f6e3b8 3733 PhysPageDesc p;
f15fbc4b 3734 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3735 ram_addr_t rlen;
3736 void *ret;
6d16c2f8
AL
3737
3738 while (len > 0) {
3739 page = addr & TARGET_PAGE_MASK;
3740 l = (page + TARGET_PAGE_SIZE) - addr;
3741 if (l > len)
3742 l = len;
3743 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3744 pd = p.phys_offset;
6d16c2f8 3745
0e0df1e2 3746 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
38bee5dc 3747 if (todo || bounce.buffer) {
6d16c2f8
AL
3748 break;
3749 }
3750 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3751 bounce.addr = addr;
3752 bounce.len = l;
3753 if (!is_write) {
54f7b4a3 3754 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3755 }
38bee5dc
SS
3756
3757 *plen = l;
3758 return bounce.buffer;
6d16c2f8 3759 }
8ab934f9
SS
3760 if (!todo) {
3761 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3762 }
6d16c2f8
AL
3763
3764 len -= l;
3765 addr += l;
38bee5dc 3766 todo += l;
6d16c2f8 3767 }
8ab934f9
SS
3768 rlen = todo;
3769 ret = qemu_ram_ptr_length(raddr, &rlen);
3770 *plen = rlen;
3771 return ret;
6d16c2f8
AL
3772}
3773
3774/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3775 * Will also mark the memory as dirty if is_write == 1. access_len gives
3776 * the amount of memory that was actually read or written by the caller.
3777 */
c227f099
AL
3778void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3779 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3780{
3781 if (buffer != bounce.buffer) {
3782 if (is_write) {
e890261f 3783 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3784 while (access_len) {
3785 unsigned l;
3786 l = TARGET_PAGE_SIZE;
3787 if (l > access_len)
3788 l = access_len;
3789 if (!cpu_physical_memory_is_dirty(addr1)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3792 /* set dirty bit */
f7c11b53
YT
3793 cpu_physical_memory_set_dirty_flags(
3794 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3795 }
3796 addr1 += l;
3797 access_len -= l;
3798 }
3799 }
868bb33f 3800 if (xen_enabled()) {
e41d7c69 3801 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3802 }
6d16c2f8
AL
3803 return;
3804 }
3805 if (is_write) {
3806 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3807 }
f8a83245 3808 qemu_vfree(bounce.buffer);
6d16c2f8 3809 bounce.buffer = NULL;
ba223c29 3810 cpu_notify_map_clients();
6d16c2f8 3811}
d0ecd2aa 3812
8df1cd07 3813/* warning: addr must be aligned */
1e78bcc1
AG
3814static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3815 enum device_endian endian)
8df1cd07
FB
3816{
3817 int io_index;
3818 uint8_t *ptr;
3819 uint32_t val;
3820 unsigned long pd;
f1f6e3b8 3821 PhysPageDesc p;
8df1cd07
FB
3822
3823 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3824 pd = p.phys_offset;
3b46e624 3825
1d393fa2 3826 if (!is_ram_rom_romd(pd)) {
8df1cd07 3827 /* I/O case */
11c7ef0c 3828 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3829 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3830 val = io_mem_read(io_index, addr, 4);
1e78bcc1
AG
3831#if defined(TARGET_WORDS_BIGENDIAN)
3832 if (endian == DEVICE_LITTLE_ENDIAN) {
3833 val = bswap32(val);
3834 }
3835#else
3836 if (endian == DEVICE_BIG_ENDIAN) {
3837 val = bswap32(val);
3838 }
3839#endif
8df1cd07
FB
3840 } else {
3841 /* RAM case */
5579c7f3 3842 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3843 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3844 switch (endian) {
3845 case DEVICE_LITTLE_ENDIAN:
3846 val = ldl_le_p(ptr);
3847 break;
3848 case DEVICE_BIG_ENDIAN:
3849 val = ldl_be_p(ptr);
3850 break;
3851 default:
3852 val = ldl_p(ptr);
3853 break;
3854 }
8df1cd07
FB
3855 }
3856 return val;
3857}
3858
1e78bcc1
AG
3859uint32_t ldl_phys(target_phys_addr_t addr)
3860{
3861 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3862}
3863
3864uint32_t ldl_le_phys(target_phys_addr_t addr)
3865{
3866 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3867}
3868
3869uint32_t ldl_be_phys(target_phys_addr_t addr)
3870{
3871 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3872}
3873
84b7b8e7 3874/* warning: addr must be aligned */
1e78bcc1
AG
3875static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3876 enum device_endian endian)
84b7b8e7
FB
3877{
3878 int io_index;
3879 uint8_t *ptr;
3880 uint64_t val;
3881 unsigned long pd;
f1f6e3b8 3882 PhysPageDesc p;
84b7b8e7
FB
3883
3884 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3885 pd = p.phys_offset;
3b46e624 3886
1d393fa2 3887 if (!is_ram_rom_romd(pd)) {
84b7b8e7 3888 /* I/O case */
11c7ef0c 3889 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3890 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
3891
3892 /* XXX This is broken when device endian != cpu endian.
3893 Fix and add "endian" variable check */
84b7b8e7 3894#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
3895 val = io_mem_read(io_index, addr, 4) << 32;
3896 val |= io_mem_read(io_index, addr + 4, 4);
84b7b8e7 3897#else
acbbec5d
AK
3898 val = io_mem_read(io_index, addr, 4);
3899 val |= io_mem_read(io_index, addr + 4, 4) << 32;
84b7b8e7
FB
3900#endif
3901 } else {
3902 /* RAM case */
5579c7f3 3903 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 3904 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3905 switch (endian) {
3906 case DEVICE_LITTLE_ENDIAN:
3907 val = ldq_le_p(ptr);
3908 break;
3909 case DEVICE_BIG_ENDIAN:
3910 val = ldq_be_p(ptr);
3911 break;
3912 default:
3913 val = ldq_p(ptr);
3914 break;
3915 }
84b7b8e7
FB
3916 }
3917 return val;
3918}
3919
1e78bcc1
AG
3920uint64_t ldq_phys(target_phys_addr_t addr)
3921{
3922 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3923}
3924
3925uint64_t ldq_le_phys(target_phys_addr_t addr)
3926{
3927 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3928}
3929
3930uint64_t ldq_be_phys(target_phys_addr_t addr)
3931{
3932 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3933}
3934
aab33094 3935/* XXX: optimize */
c227f099 3936uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3937{
3938 uint8_t val;
3939 cpu_physical_memory_read(addr, &val, 1);
3940 return val;
3941}
3942
733f0b02 3943/* warning: addr must be aligned */
1e78bcc1
AG
3944static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3945 enum device_endian endian)
aab33094 3946{
733f0b02
MT
3947 int io_index;
3948 uint8_t *ptr;
3949 uint64_t val;
3950 unsigned long pd;
f1f6e3b8 3951 PhysPageDesc p;
733f0b02
MT
3952
3953 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3954 pd = p.phys_offset;
733f0b02 3955
1d393fa2 3956 if (!is_ram_rom_romd(pd)) {
733f0b02 3957 /* I/O case */
11c7ef0c 3958 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3959 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3960 val = io_mem_read(io_index, addr, 2);
1e78bcc1
AG
3961#if defined(TARGET_WORDS_BIGENDIAN)
3962 if (endian == DEVICE_LITTLE_ENDIAN) {
3963 val = bswap16(val);
3964 }
3965#else
3966 if (endian == DEVICE_BIG_ENDIAN) {
3967 val = bswap16(val);
3968 }
3969#endif
733f0b02
MT
3970 } else {
3971 /* RAM case */
3972 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3973 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3974 switch (endian) {
3975 case DEVICE_LITTLE_ENDIAN:
3976 val = lduw_le_p(ptr);
3977 break;
3978 case DEVICE_BIG_ENDIAN:
3979 val = lduw_be_p(ptr);
3980 break;
3981 default:
3982 val = lduw_p(ptr);
3983 break;
3984 }
733f0b02
MT
3985 }
3986 return val;
aab33094
FB
3987}
3988
1e78bcc1
AG
3989uint32_t lduw_phys(target_phys_addr_t addr)
3990{
3991 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3992}
3993
3994uint32_t lduw_le_phys(target_phys_addr_t addr)
3995{
3996 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3997}
3998
3999uint32_t lduw_be_phys(target_phys_addr_t addr)
4000{
4001 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4002}
4003
8df1cd07
FB
4004/* warning: addr must be aligned. The ram page is not masked as dirty
4005 and the code inside is not invalidated. It is useful if the dirty
4006 bits are used to track modified PTEs */
c227f099 4007void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4008{
4009 int io_index;
4010 uint8_t *ptr;
4011 unsigned long pd;
f1f6e3b8 4012 PhysPageDesc p;
8df1cd07
FB
4013
4014 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4015 pd = p.phys_offset;
3b46e624 4016
0e0df1e2 4017 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4018 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4019 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4020 io_mem_write(io_index, addr, val, 4);
8df1cd07 4021 } else {
74576198 4022 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4023 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4024 stl_p(ptr, val);
74576198
AL
4025
4026 if (unlikely(in_migration)) {
4027 if (!cpu_physical_memory_is_dirty(addr1)) {
4028 /* invalidate code */
4029 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4030 /* set dirty bit */
f7c11b53
YT
4031 cpu_physical_memory_set_dirty_flags(
4032 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4033 }
4034 }
8df1cd07
FB
4035 }
4036}
4037
c227f099 4038void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4039{
4040 int io_index;
4041 uint8_t *ptr;
4042 unsigned long pd;
f1f6e3b8 4043 PhysPageDesc p;
bc98a7ef
JM
4044
4045 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4046 pd = p.phys_offset;
3b46e624 4047
0e0df1e2 4048 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4049 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4050 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef 4051#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4052 io_mem_write(io_index, addr, val >> 32, 4);
4053 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
bc98a7ef 4054#else
acbbec5d
AK
4055 io_mem_write(io_index, addr, (uint32_t)val, 4);
4056 io_mem_write(io_index, addr + 4, val >> 32, 4);
bc98a7ef
JM
4057#endif
4058 } else {
5579c7f3 4059 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4060 (addr & ~TARGET_PAGE_MASK);
4061 stq_p(ptr, val);
4062 }
4063}
4064
8df1cd07 4065/* warning: addr must be aligned */
1e78bcc1
AG
4066static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4067 enum device_endian endian)
8df1cd07
FB
4068{
4069 int io_index;
4070 uint8_t *ptr;
4071 unsigned long pd;
f1f6e3b8 4072 PhysPageDesc p;
8df1cd07
FB
4073
4074 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4075 pd = p.phys_offset;
3b46e624 4076
0e0df1e2 4077 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4078 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4079 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4080#if defined(TARGET_WORDS_BIGENDIAN)
4081 if (endian == DEVICE_LITTLE_ENDIAN) {
4082 val = bswap32(val);
4083 }
4084#else
4085 if (endian == DEVICE_BIG_ENDIAN) {
4086 val = bswap32(val);
4087 }
4088#endif
acbbec5d 4089 io_mem_write(io_index, addr, val, 4);
8df1cd07
FB
4090 } else {
4091 unsigned long addr1;
4092 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4093 /* RAM case */
5579c7f3 4094 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4095 switch (endian) {
4096 case DEVICE_LITTLE_ENDIAN:
4097 stl_le_p(ptr, val);
4098 break;
4099 case DEVICE_BIG_ENDIAN:
4100 stl_be_p(ptr, val);
4101 break;
4102 default:
4103 stl_p(ptr, val);
4104 break;
4105 }
3a7d929e
FB
4106 if (!cpu_physical_memory_is_dirty(addr1)) {
4107 /* invalidate code */
4108 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4109 /* set dirty bit */
f7c11b53
YT
4110 cpu_physical_memory_set_dirty_flags(addr1,
4111 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4112 }
8df1cd07
FB
4113 }
4114}
4115
1e78bcc1
AG
4116void stl_phys(target_phys_addr_t addr, uint32_t val)
4117{
4118 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4119}
4120
4121void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4122{
4123 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4124}
4125
4126void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4127{
4128 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4129}
4130
aab33094 4131/* XXX: optimize */
c227f099 4132void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4133{
4134 uint8_t v = val;
4135 cpu_physical_memory_write(addr, &v, 1);
4136}
4137
733f0b02 4138/* warning: addr must be aligned */
1e78bcc1
AG
4139static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4140 enum device_endian endian)
aab33094 4141{
733f0b02
MT
4142 int io_index;
4143 uint8_t *ptr;
4144 unsigned long pd;
f1f6e3b8 4145 PhysPageDesc p;
733f0b02
MT
4146
4147 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4148 pd = p.phys_offset;
733f0b02 4149
0e0df1e2 4150 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
11c7ef0c 4151 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4152 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4153#if defined(TARGET_WORDS_BIGENDIAN)
4154 if (endian == DEVICE_LITTLE_ENDIAN) {
4155 val = bswap16(val);
4156 }
4157#else
4158 if (endian == DEVICE_BIG_ENDIAN) {
4159 val = bswap16(val);
4160 }
4161#endif
acbbec5d 4162 io_mem_write(io_index, addr, val, 2);
733f0b02
MT
4163 } else {
4164 unsigned long addr1;
4165 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4166 /* RAM case */
4167 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4168 switch (endian) {
4169 case DEVICE_LITTLE_ENDIAN:
4170 stw_le_p(ptr, val);
4171 break;
4172 case DEVICE_BIG_ENDIAN:
4173 stw_be_p(ptr, val);
4174 break;
4175 default:
4176 stw_p(ptr, val);
4177 break;
4178 }
733f0b02
MT
4179 if (!cpu_physical_memory_is_dirty(addr1)) {
4180 /* invalidate code */
4181 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4182 /* set dirty bit */
4183 cpu_physical_memory_set_dirty_flags(addr1,
4184 (0xff & ~CODE_DIRTY_FLAG));
4185 }
4186 }
aab33094
FB
4187}
4188
1e78bcc1
AG
4189void stw_phys(target_phys_addr_t addr, uint32_t val)
4190{
4191 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4192}
4193
4194void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4195{
4196 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4197}
4198
4199void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4200{
4201 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4202}
4203
aab33094 4204/* XXX: optimize */
c227f099 4205void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4206{
4207 val = tswap64(val);
71d2b725 4208 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4209}
4210
1e78bcc1
AG
4211void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4212{
4213 val = cpu_to_le64(val);
4214 cpu_physical_memory_write(addr, &val, 8);
4215}
4216
4217void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4218{
4219 val = cpu_to_be64(val);
4220 cpu_physical_memory_write(addr, &val, 8);
4221}
4222
5e2972fd 4223/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4224int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4225 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4226{
4227 int l;
c227f099 4228 target_phys_addr_t phys_addr;
9b3c35e0 4229 target_ulong page;
13eb76e0
FB
4230
4231 while (len > 0) {
4232 page = addr & TARGET_PAGE_MASK;
4233 phys_addr = cpu_get_phys_page_debug(env, page);
4234 /* if no physical page mapped, return an error */
4235 if (phys_addr == -1)
4236 return -1;
4237 l = (page + TARGET_PAGE_SIZE) - addr;
4238 if (l > len)
4239 l = len;
5e2972fd 4240 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4241 if (is_write)
4242 cpu_physical_memory_write_rom(phys_addr, buf, l);
4243 else
5e2972fd 4244 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4245 len -= l;
4246 buf += l;
4247 addr += l;
4248 }
4249 return 0;
4250}
a68fe89c 4251#endif
13eb76e0 4252
2e70f6ef
PB
4253/* in deterministic execution mode, instructions doing device I/Os
4254 must be at the end of the TB */
4255void cpu_io_recompile(CPUState *env, void *retaddr)
4256{
4257 TranslationBlock *tb;
4258 uint32_t n, cflags;
4259 target_ulong pc, cs_base;
4260 uint64_t flags;
4261
4262 tb = tb_find_pc((unsigned long)retaddr);
4263 if (!tb) {
4264 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4265 retaddr);
4266 }
4267 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4268 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4269 /* Calculate how many instructions had been executed before the fault
bf20dc07 4270 occurred. */
2e70f6ef
PB
4271 n = n - env->icount_decr.u16.low;
4272 /* Generate a new TB ending on the I/O insn. */
4273 n++;
4274 /* On MIPS and SH, delay slot instructions can only be restarted if
4275 they were already the first instruction in the TB. If this is not
bf20dc07 4276 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4277 branch. */
4278#if defined(TARGET_MIPS)
4279 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4280 env->active_tc.PC -= 4;
4281 env->icount_decr.u16.low++;
4282 env->hflags &= ~MIPS_HFLAG_BMASK;
4283 }
4284#elif defined(TARGET_SH4)
4285 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4286 && n > 1) {
4287 env->pc -= 2;
4288 env->icount_decr.u16.low++;
4289 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4290 }
4291#endif
4292 /* This should never happen. */
4293 if (n > CF_COUNT_MASK)
4294 cpu_abort(env, "TB too big during recompile");
4295
4296 cflags = n | CF_LAST_IO;
4297 pc = tb->pc;
4298 cs_base = tb->cs_base;
4299 flags = tb->flags;
4300 tb_phys_invalidate(tb, -1);
4301 /* FIXME: In theory this could raise an exception. In practice
4302 we have already translated the block once so it's probably ok. */
4303 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4304 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4305 the first in the TB) then we end up generating a whole new TB and
4306 repeating the fault, which is horribly inefficient.
4307 Better would be to execute just this insn uncached, or generate a
4308 second new TB. */
4309 cpu_resume_from_signal(env, NULL);
4310}
4311
b3755a91
PB
4312#if !defined(CONFIG_USER_ONLY)
4313
055403b2 4314void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4315{
4316 int i, target_code_size, max_target_code_size;
4317 int direct_jmp_count, direct_jmp2_count, cross_page;
4318 TranslationBlock *tb;
3b46e624 4319
e3db7226
FB
4320 target_code_size = 0;
4321 max_target_code_size = 0;
4322 cross_page = 0;
4323 direct_jmp_count = 0;
4324 direct_jmp2_count = 0;
4325 for(i = 0; i < nb_tbs; i++) {
4326 tb = &tbs[i];
4327 target_code_size += tb->size;
4328 if (tb->size > max_target_code_size)
4329 max_target_code_size = tb->size;
4330 if (tb->page_addr[1] != -1)
4331 cross_page++;
4332 if (tb->tb_next_offset[0] != 0xffff) {
4333 direct_jmp_count++;
4334 if (tb->tb_next_offset[1] != 0xffff) {
4335 direct_jmp2_count++;
4336 }
4337 }
4338 }
4339 /* XXX: avoid using doubles ? */
57fec1fe 4340 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4341 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4342 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4343 cpu_fprintf(f, "TB count %d/%d\n",
4344 nb_tbs, code_gen_max_blocks);
5fafdf24 4345 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4346 nb_tbs ? target_code_size / nb_tbs : 0,
4347 max_target_code_size);
055403b2 4348 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4349 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4350 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4351 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4352 cross_page,
e3db7226
FB
4353 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4354 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4355 direct_jmp_count,
e3db7226
FB
4356 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4357 direct_jmp2_count,
4358 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4359 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4360 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4361 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4362 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4363 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4364}
4365
d39e8222
AK
4366/* NOTE: this function can trigger an exception */
4367/* NOTE2: the returned address is not exactly the physical address: it
4368 is the offset relative to phys_ram_base */
4369tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4370{
4371 int mmu_idx, page_index, pd;
4372 void *p;
4373
4374 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4375 mmu_idx = cpu_mmu_index(env1);
4376 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4377 (addr & TARGET_PAGE_MASK))) {
4378 ldub_code(addr);
4379 }
4380 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
0e0df1e2 4381 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
75c578dc 4382 && !is_romd(pd)) {
d39e8222
AK
4383#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4384 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4385#else
4386 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4387#endif
4388 }
4389 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4390 return qemu_ram_addr_from_host_nofail(p);
4391}
4392
61382a50 4393#define MMUSUFFIX _cmmu
3917149d 4394#undef GETPC
61382a50
FB
4395#define GETPC() NULL
4396#define env cpu_single_env
b769d8fe 4397#define SOFTMMU_CODE_ACCESS
61382a50
FB
4398
4399#define SHIFT 0
4400#include "softmmu_template.h"
4401
4402#define SHIFT 1
4403#include "softmmu_template.h"
4404
4405#define SHIFT 2
4406#include "softmmu_template.h"
4407
4408#define SHIFT 3
4409#include "softmmu_template.h"
4410
4411#undef env
4412
4413#endif