]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
vmstate, memory: decouple vmstate from memory API
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
e2eef170 121#endif
9fa3e853 122
6a00d601
FB
123CPUState *first_cpu;
124/* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
b3c4bbe5 126DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 127/* 0 = Do not count executed instructions.
bf20dc07 128 1 = Precise instruction counting.
2e70f6ef
PB
129 2 = Adaptive rate instruction counting. */
130int use_icount = 0;
6a00d601 131
54936004 132typedef struct PageDesc {
92e873b9 133 /* list of TBs intersecting this ram page */
fd6ce8f6 134 TranslationBlock *first_tb;
9fa3e853
FB
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
54936004
FB
142} PageDesc;
143
41c1b1c9 144/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 151#endif
bedb69ea 152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 154#endif
54936004 155
5cd2c5b6
RH
156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
54936004
FB
158#define L2_SIZE (1 << L2_BITS)
159
5cd2c5b6
RH
160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
83fb7adf 185unsigned long qemu_real_host_page_size;
83fb7adf
FB
186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
54936004 188
5cd2c5b6
RH
189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
54936004 192
e2eef170 193#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
5cd2c5b6
RH
200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
6d9a1304 203
e2eef170 204static void io_mem_init(void);
62152b8a 205static void memory_map_init(void);
e2eef170 206
33417e70 207/* io memory support */
33417e70
FB
208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 211static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
212static int io_mem_watch;
213#endif
33417e70 214
34865134 215/* log support */
1e8b27ca
JR
216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
d9b630fd 219static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 220#endif
34865134
FB
221FILE *logfile;
222int loglevel;
e735b91c 223static int log_append = 0;
34865134 224
e3db7226 225/* statistics */
b3755a91 226#if !defined(CONFIG_USER_ONLY)
e3db7226 227static int tlb_flush_count;
b3755a91 228#endif
e3db7226
FB
229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
7cb69cae
FB
232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
4369415f 243 unsigned long start, end, page_size;
7cb69cae 244
4369415f 245 page_size = getpagesize();
7cb69cae 246 start = (unsigned long)addr;
4369415f 247 start &= ~(page_size - 1);
7cb69cae
FB
248
249 end = (unsigned long)addr + size;
4369415f
FB
250 end += page_size - 1;
251 end &= ~(page_size - 1);
7cb69cae
FB
252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
b346ff46 258static void page_init(void)
54936004 259{
83fb7adf 260 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 261 TARGET_PAGE_SIZE */
c2b48b69
AL
262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
83fb7adf
FB
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 277
2e9a5713 278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 279 {
f01576f1
JL
280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
fd436907 297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
fd436907 301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
50a9569b 310 FILE *f;
50a9569b 311
0776590d 312 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 313
fd436907 314 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 315 if (f) {
5cd2c5b6
RH
316 mmap_lock();
317
50a9569b 318 do {
5cd2c5b6
RH
319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
333 }
334 } while (!feof(f));
5cd2c5b6 335
50a9569b 336 fclose(f);
5cd2c5b6 337 mmap_unlock();
50a9569b 338 }
f01576f1 339#endif
50a9569b
AZ
340 }
341#endif
54936004
FB
342}
343
41c1b1c9 344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 345{
41c1b1c9
PB
346 PageDesc *pd;
347 void **lp;
348 int i;
349
5cd2c5b6 350#if defined(CONFIG_USER_ONLY)
7267c094 351 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
356 } while (0)
357#else
358# define ALLOC(P, SIZE) \
7267c094 359 do { P = g_malloc0(SIZE); } while (0)
17e2377a 360#endif
434929bf 361
5cd2c5b6
RH
362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
17e2377a 375 }
5cd2c5b6
RH
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
378 }
379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
54936004 387 }
5cd2c5b6
RH
388
389#undef ALLOC
5cd2c5b6
RH
390
391 return pd + (index & (L2_SIZE - 1));
54936004
FB
392}
393
41c1b1c9 394static inline PageDesc *page_find(tb_page_addr_t index)
54936004 395{
5cd2c5b6 396 return page_find_alloc(index, 0);
fd6ce8f6
FB
397}
398
6d9a1304 399#if !defined(CONFIG_USER_ONLY)
c227f099 400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 401{
e3f4e2a4 402 PhysPageDesc *pd;
5cd2c5b6
RH
403 void **lp;
404 int i;
92e873b9 405
5cd2c5b6
RH
406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 408
5cd2c5b6
RH
409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
7267c094 416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 419 }
5cd2c5b6 420
e3f4e2a4 421 pd = *lp;
5cd2c5b6 422 if (pd == NULL) {
e3f4e2a4 423 int i;
5ab97b7f 424 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
425
426 if (!alloc) {
108c49b8 427 return NULL;
5cd2c5b6
RH
428 }
429
7267c094 430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 431
67c4d23c 432 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6 433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
5ab97b7f 434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 435 }
92e873b9 436 }
5cd2c5b6
RH
437
438 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
439}
440
c227f099 441static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 442{
108c49b8 443 return phys_page_find_alloc(index, 0);
92e873b9
FB
444}
445
c227f099
AL
446static void tlb_protect_code(ram_addr_t ram_addr);
447static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 448 target_ulong vaddr);
c8a706fe
PB
449#define mmap_lock() do { } while(0)
450#define mmap_unlock() do { } while(0)
9fa3e853 451#endif
fd6ce8f6 452
4369415f
FB
453#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
454
455#if defined(CONFIG_USER_ONLY)
ccbb4d44 456/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
457 user mode. It will change when a dedicated libc will be used */
458#define USE_STATIC_CODE_GEN_BUFFER
459#endif
460
461#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
462static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
463 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
464#endif
465
8fcd3692 466static void code_gen_alloc(unsigned long tb_size)
26a5f13b 467{
4369415f
FB
468#ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer = static_code_gen_buffer;
470 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
471 map_exec(code_gen_buffer, code_gen_buffer_size);
472#else
26a5f13b
FB
473 code_gen_buffer_size = tb_size;
474 if (code_gen_buffer_size == 0) {
4369415f 475#if defined(CONFIG_USER_ONLY)
4369415f
FB
476 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
477#else
ccbb4d44 478 /* XXX: needs adjustments */
94a6b54f 479 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 480#endif
26a5f13b
FB
481 }
482 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
483 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486#if defined(__linux__)
487 {
488 int flags;
141ac468
BS
489 void *start = NULL;
490
26a5f13b
FB
491 flags = MAP_PRIVATE | MAP_ANONYMOUS;
492#if defined(__x86_64__)
493 flags |= MAP_32BIT;
494 /* Cannot map more than that */
495 if (code_gen_buffer_size > (800 * 1024 * 1024))
496 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
497#elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
499 flags |= MAP_FIXED;
500 start = (void *) 0x60000000UL;
501 if (code_gen_buffer_size > (512 * 1024 * 1024))
502 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 503#elif defined(__arm__)
222f23f5 504 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
505 if (code_gen_buffer_size > 16 * 1024 * 1024)
506 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
507#elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 }
513 start = (void *)0x90000000UL;
26a5f13b 514#endif
141ac468
BS
515 code_gen_buffer = mmap(start, code_gen_buffer_size,
516 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
517 flags, -1, 0);
518 if (code_gen_buffer == MAP_FAILED) {
519 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
520 exit(1);
521 }
522 }
cbb608a5 523#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
524 || defined(__DragonFly__) || defined(__OpenBSD__) \
525 || defined(__NetBSD__)
06e67a82
AL
526 {
527 int flags;
528 void *addr = NULL;
529 flags = MAP_PRIVATE | MAP_ANONYMOUS;
530#if defined(__x86_64__)
531 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
532 * 0x40000000 is free */
533 flags |= MAP_FIXED;
534 addr = (void *)0x40000000;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size > (800 * 1024 * 1024))
537 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
538#elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
540 flags |= MAP_FIXED;
541 addr = (void *) 0x60000000UL;
542 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
543 code_gen_buffer_size = (512 * 1024 * 1024);
544 }
06e67a82
AL
545#endif
546 code_gen_buffer = mmap(addr, code_gen_buffer_size,
547 PROT_WRITE | PROT_READ | PROT_EXEC,
548 flags, -1, 0);
549 if (code_gen_buffer == MAP_FAILED) {
550 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
551 exit(1);
552 }
553 }
26a5f13b 554#else
7267c094 555 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
556 map_exec(code_gen_buffer, code_gen_buffer_size);
557#endif
4369415f 558#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 559 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
560 code_gen_buffer_max_size = code_gen_buffer_size -
561 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 562 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 563 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
564}
565
566/* Must be called before using the QEMU cpus. 'tb_size' is the size
567 (in bytes) allocated to the translation buffer. Zero means default
568 size. */
d5ab9713 569void tcg_exec_init(unsigned long tb_size)
26a5f13b 570{
26a5f13b
FB
571 cpu_gen_init();
572 code_gen_alloc(tb_size);
573 code_gen_ptr = code_gen_buffer;
4369415f 574 page_init();
9002ec79
RH
575#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
576 /* There's no guest base to take into account, so go ahead and
577 initialize the prologue now. */
578 tcg_prologue_init(&tcg_ctx);
579#endif
26a5f13b
FB
580}
581
d5ab9713
JK
582bool tcg_enabled(void)
583{
584 return code_gen_buffer != NULL;
585}
586
587void cpu_exec_init_all(void)
588{
589#if !defined(CONFIG_USER_ONLY)
590 memory_map_init();
591 io_mem_init();
592#endif
593}
594
9656f324
PB
595#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596
e59fb374 597static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
598{
599 CPUState *env = opaque;
9656f324 600
3098dba0
AJ
601 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
602 version_id is increased. */
603 env->interrupt_request &= ~0x01;
9656f324
PB
604 tlb_flush(env, 1);
605
606 return 0;
607}
e7f4eff7
JQ
608
609static const VMStateDescription vmstate_cpu_common = {
610 .name = "cpu_common",
611 .version_id = 1,
612 .minimum_version_id = 1,
613 .minimum_version_id_old = 1,
e7f4eff7
JQ
614 .post_load = cpu_common_post_load,
615 .fields = (VMStateField []) {
616 VMSTATE_UINT32(halted, CPUState),
617 VMSTATE_UINT32(interrupt_request, CPUState),
618 VMSTATE_END_OF_LIST()
619 }
620};
9656f324
PB
621#endif
622
950f1472
GC
623CPUState *qemu_get_cpu(int cpu)
624{
625 CPUState *env = first_cpu;
626
627 while (env) {
628 if (env->cpu_index == cpu)
629 break;
630 env = env->next_cpu;
631 }
632
633 return env;
634}
635
6a00d601 636void cpu_exec_init(CPUState *env)
fd6ce8f6 637{
6a00d601
FB
638 CPUState **penv;
639 int cpu_index;
640
c2764719
PB
641#if defined(CONFIG_USER_ONLY)
642 cpu_list_lock();
643#endif
6a00d601
FB
644 env->next_cpu = NULL;
645 penv = &first_cpu;
646 cpu_index = 0;
647 while (*penv != NULL) {
1e9fa730 648 penv = &(*penv)->next_cpu;
6a00d601
FB
649 cpu_index++;
650 }
651 env->cpu_index = cpu_index;
268a362c 652 env->numa_node = 0;
72cf2d4f
BS
653 QTAILQ_INIT(&env->breakpoints);
654 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
655#ifndef CONFIG_USER_ONLY
656 env->thread_id = qemu_get_thread_id();
657#endif
6a00d601 658 *penv = env;
c2764719
PB
659#if defined(CONFIG_USER_ONLY)
660 cpu_list_unlock();
661#endif
b3c7724c 662#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
663 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
664 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
665 cpu_save, cpu_load, env);
666#endif
fd6ce8f6
FB
667}
668
d1a1eb74
TG
669/* Allocate a new translation block. Flush the translation buffer if
670 too many translation blocks or too much generated code. */
671static TranslationBlock *tb_alloc(target_ulong pc)
672{
673 TranslationBlock *tb;
674
675 if (nb_tbs >= code_gen_max_blocks ||
676 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
677 return NULL;
678 tb = &tbs[nb_tbs++];
679 tb->pc = pc;
680 tb->cflags = 0;
681 return tb;
682}
683
684void tb_free(TranslationBlock *tb)
685{
686 /* In practice this is mostly used for single use temporary TB
687 Ignore the hard cases and just back up if this TB happens to
688 be the last one generated. */
689 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
690 code_gen_ptr = tb->tc_ptr;
691 nb_tbs--;
692 }
693}
694
9fa3e853
FB
695static inline void invalidate_page_bitmap(PageDesc *p)
696{
697 if (p->code_bitmap) {
7267c094 698 g_free(p->code_bitmap);
9fa3e853
FB
699 p->code_bitmap = NULL;
700 }
701 p->code_write_count = 0;
702}
703
5cd2c5b6
RH
704/* Set to NULL all the 'first_tb' fields in all PageDescs. */
705
706static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 707{
5cd2c5b6 708 int i;
fd6ce8f6 709
5cd2c5b6
RH
710 if (*lp == NULL) {
711 return;
712 }
713 if (level == 0) {
714 PageDesc *pd = *lp;
7296abac 715 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
716 pd[i].first_tb = NULL;
717 invalidate_page_bitmap(pd + i);
fd6ce8f6 718 }
5cd2c5b6
RH
719 } else {
720 void **pp = *lp;
7296abac 721 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
722 page_flush_tb_1 (level - 1, pp + i);
723 }
724 }
725}
726
727static void page_flush_tb(void)
728{
729 int i;
730 for (i = 0; i < V_L1_SIZE; i++) {
731 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
732 }
733}
734
735/* flush all the translation blocks */
d4e8164f 736/* XXX: tb_flush is currently not thread safe */
6a00d601 737void tb_flush(CPUState *env1)
fd6ce8f6 738{
6a00d601 739 CPUState *env;
0124311e 740#if defined(DEBUG_FLUSH)
ab3d1727
BS
741 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
742 (unsigned long)(code_gen_ptr - code_gen_buffer),
743 nb_tbs, nb_tbs > 0 ?
744 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 745#endif
26a5f13b 746 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
747 cpu_abort(env1, "Internal error: code buffer overflow\n");
748
fd6ce8f6 749 nb_tbs = 0;
3b46e624 750
6a00d601
FB
751 for(env = first_cpu; env != NULL; env = env->next_cpu) {
752 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
753 }
9fa3e853 754
8a8a608f 755 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 756 page_flush_tb();
9fa3e853 757
fd6ce8f6 758 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
759 /* XXX: flush processor icache at this point if cache flush is
760 expensive */
e3db7226 761 tb_flush_count++;
fd6ce8f6
FB
762}
763
764#ifdef DEBUG_TB_CHECK
765
bc98a7ef 766static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
767{
768 TranslationBlock *tb;
769 int i;
770 address &= TARGET_PAGE_MASK;
99773bd4
PB
771 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
772 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
773 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
774 address >= tb->pc + tb->size)) {
0bf9e31a
BS
775 printf("ERROR invalidate: address=" TARGET_FMT_lx
776 " PC=%08lx size=%04x\n",
99773bd4 777 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
778 }
779 }
780 }
781}
782
783/* verify that all the pages have correct rights for code */
784static void tb_page_check(void)
785{
786 TranslationBlock *tb;
787 int i, flags1, flags2;
3b46e624 788
99773bd4
PB
789 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
790 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
791 flags1 = page_get_flags(tb->pc);
792 flags2 = page_get_flags(tb->pc + tb->size - 1);
793 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
794 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 795 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
796 }
797 }
798 }
799}
800
801#endif
802
803/* invalidate one TB */
804static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
805 int next_offset)
806{
807 TranslationBlock *tb1;
808 for(;;) {
809 tb1 = *ptb;
810 if (tb1 == tb) {
811 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
812 break;
813 }
814 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
815 }
816}
817
9fa3e853
FB
818static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
819{
820 TranslationBlock *tb1;
821 unsigned int n1;
822
823 for(;;) {
824 tb1 = *ptb;
825 n1 = (long)tb1 & 3;
826 tb1 = (TranslationBlock *)((long)tb1 & ~3);
827 if (tb1 == tb) {
828 *ptb = tb1->page_next[n1];
829 break;
830 }
831 ptb = &tb1->page_next[n1];
832 }
833}
834
d4e8164f
FB
835static inline void tb_jmp_remove(TranslationBlock *tb, int n)
836{
837 TranslationBlock *tb1, **ptb;
838 unsigned int n1;
839
840 ptb = &tb->jmp_next[n];
841 tb1 = *ptb;
842 if (tb1) {
843 /* find tb(n) in circular list */
844 for(;;) {
845 tb1 = *ptb;
846 n1 = (long)tb1 & 3;
847 tb1 = (TranslationBlock *)((long)tb1 & ~3);
848 if (n1 == n && tb1 == tb)
849 break;
850 if (n1 == 2) {
851 ptb = &tb1->jmp_first;
852 } else {
853 ptb = &tb1->jmp_next[n1];
854 }
855 }
856 /* now we can suppress tb(n) from the list */
857 *ptb = tb->jmp_next[n];
858
859 tb->jmp_next[n] = NULL;
860 }
861}
862
863/* reset the jump entry 'n' of a TB so that it is not chained to
864 another TB */
865static inline void tb_reset_jump(TranslationBlock *tb, int n)
866{
867 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
868}
869
41c1b1c9 870void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 871{
6a00d601 872 CPUState *env;
8a40a180 873 PageDesc *p;
d4e8164f 874 unsigned int h, n1;
41c1b1c9 875 tb_page_addr_t phys_pc;
8a40a180 876 TranslationBlock *tb1, *tb2;
3b46e624 877
8a40a180
FB
878 /* remove the TB from the hash list */
879 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
880 h = tb_phys_hash_func(phys_pc);
5fafdf24 881 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
882 offsetof(TranslationBlock, phys_hash_next));
883
884 /* remove the TB from the page list */
885 if (tb->page_addr[0] != page_addr) {
886 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
887 tb_page_remove(&p->first_tb, tb);
888 invalidate_page_bitmap(p);
889 }
890 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
891 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
892 tb_page_remove(&p->first_tb, tb);
893 invalidate_page_bitmap(p);
894 }
895
36bdbe54 896 tb_invalidated_flag = 1;
59817ccb 897
fd6ce8f6 898 /* remove the TB from the hash list */
8a40a180 899 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
900 for(env = first_cpu; env != NULL; env = env->next_cpu) {
901 if (env->tb_jmp_cache[h] == tb)
902 env->tb_jmp_cache[h] = NULL;
903 }
d4e8164f
FB
904
905 /* suppress this TB from the two jump lists */
906 tb_jmp_remove(tb, 0);
907 tb_jmp_remove(tb, 1);
908
909 /* suppress any remaining jumps to this TB */
910 tb1 = tb->jmp_first;
911 for(;;) {
912 n1 = (long)tb1 & 3;
913 if (n1 == 2)
914 break;
915 tb1 = (TranslationBlock *)((long)tb1 & ~3);
916 tb2 = tb1->jmp_next[n1];
917 tb_reset_jump(tb1, n1);
918 tb1->jmp_next[n1] = NULL;
919 tb1 = tb2;
920 }
921 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 922
e3db7226 923 tb_phys_invalidate_count++;
9fa3e853
FB
924}
925
926static inline void set_bits(uint8_t *tab, int start, int len)
927{
928 int end, mask, end1;
929
930 end = start + len;
931 tab += start >> 3;
932 mask = 0xff << (start & 7);
933 if ((start & ~7) == (end & ~7)) {
934 if (start < end) {
935 mask &= ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 } else {
939 *tab++ |= mask;
940 start = (start + 8) & ~7;
941 end1 = end & ~7;
942 while (start < end1) {
943 *tab++ = 0xff;
944 start += 8;
945 }
946 if (start < end) {
947 mask = ~(0xff << (end & 7));
948 *tab |= mask;
949 }
950 }
951}
952
953static void build_page_bitmap(PageDesc *p)
954{
955 int n, tb_start, tb_end;
956 TranslationBlock *tb;
3b46e624 957
7267c094 958 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
959
960 tb = p->first_tb;
961 while (tb != NULL) {
962 n = (long)tb & 3;
963 tb = (TranslationBlock *)((long)tb & ~3);
964 /* NOTE: this is subtle as a TB may span two physical pages */
965 if (n == 0) {
966 /* NOTE: tb_end may be after the end of the page, but
967 it is not a problem */
968 tb_start = tb->pc & ~TARGET_PAGE_MASK;
969 tb_end = tb_start + tb->size;
970 if (tb_end > TARGET_PAGE_SIZE)
971 tb_end = TARGET_PAGE_SIZE;
972 } else {
973 tb_start = 0;
974 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
975 }
976 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
977 tb = tb->page_next[n];
978 }
979}
980
2e70f6ef
PB
981TranslationBlock *tb_gen_code(CPUState *env,
982 target_ulong pc, target_ulong cs_base,
983 int flags, int cflags)
d720b93d
FB
984{
985 TranslationBlock *tb;
986 uint8_t *tc_ptr;
41c1b1c9
PB
987 tb_page_addr_t phys_pc, phys_page2;
988 target_ulong virt_page2;
d720b93d
FB
989 int code_gen_size;
990
41c1b1c9 991 phys_pc = get_page_addr_code(env, pc);
c27004ec 992 tb = tb_alloc(pc);
d720b93d
FB
993 if (!tb) {
994 /* flush must be done */
995 tb_flush(env);
996 /* cannot fail at this point */
c27004ec 997 tb = tb_alloc(pc);
2e70f6ef
PB
998 /* Don't forget to invalidate previous TB info. */
999 tb_invalidated_flag = 1;
d720b93d
FB
1000 }
1001 tc_ptr = code_gen_ptr;
1002 tb->tc_ptr = tc_ptr;
1003 tb->cs_base = cs_base;
1004 tb->flags = flags;
1005 tb->cflags = cflags;
d07bde88 1006 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1007 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1008
d720b93d 1009 /* check next page if needed */
c27004ec 1010 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1011 phys_page2 = -1;
c27004ec 1012 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1013 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1014 }
41c1b1c9 1015 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1016 return tb;
d720b93d 1017}
3b46e624 1018
9fa3e853
FB
1019/* invalidate all TBs which intersect with the target physical page
1020 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1021 the same physical page. 'is_cpu_write_access' should be true if called
1022 from a real cpu write access: the virtual CPU will exit the current
1023 TB if code is modified inside this TB. */
41c1b1c9 1024void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1025 int is_cpu_write_access)
1026{
6b917547 1027 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1028 CPUState *env = cpu_single_env;
41c1b1c9 1029 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1030 PageDesc *p;
1031 int n;
1032#ifdef TARGET_HAS_PRECISE_SMC
1033 int current_tb_not_found = is_cpu_write_access;
1034 TranslationBlock *current_tb = NULL;
1035 int current_tb_modified = 0;
1036 target_ulong current_pc = 0;
1037 target_ulong current_cs_base = 0;
1038 int current_flags = 0;
1039#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1040
1041 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1042 if (!p)
9fa3e853 1043 return;
5fafdf24 1044 if (!p->code_bitmap &&
d720b93d
FB
1045 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1046 is_cpu_write_access) {
9fa3e853
FB
1047 /* build code bitmap */
1048 build_page_bitmap(p);
1049 }
1050
1051 /* we remove all the TBs in the range [start, end[ */
1052 /* XXX: see if in some cases it could be faster to invalidate all the code */
1053 tb = p->first_tb;
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 tb_next = tb->page_next[n];
1058 /* NOTE: this is subtle as a TB may span two physical pages */
1059 if (n == 0) {
1060 /* NOTE: tb_end may be after the end of the page, but
1061 it is not a problem */
1062 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1063 tb_end = tb_start + tb->size;
1064 } else {
1065 tb_start = tb->page_addr[1];
1066 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1067 }
1068 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1069#ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_not_found) {
1071 current_tb_not_found = 0;
1072 current_tb = NULL;
2e70f6ef 1073 if (env->mem_io_pc) {
d720b93d 1074 /* now we have a real cpu fault */
2e70f6ef 1075 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1076 }
1077 }
1078 if (current_tb == tb &&
2e70f6ef 1079 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1080 /* If we are modifying the current TB, we must stop
1081 its execution. We could be more precise by checking
1082 that the modification is after the current PC, but it
1083 would require a specialized function to partially
1084 restore the CPU state */
3b46e624 1085
d720b93d 1086 current_tb_modified = 1;
618ba8e6 1087 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1088 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1089 &current_flags);
d720b93d
FB
1090 }
1091#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1092 /* we need to do that to handle the case where a signal
1093 occurs while doing tb_phys_invalidate() */
1094 saved_tb = NULL;
1095 if (env) {
1096 saved_tb = env->current_tb;
1097 env->current_tb = NULL;
1098 }
9fa3e853 1099 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1100 if (env) {
1101 env->current_tb = saved_tb;
1102 if (env->interrupt_request && env->current_tb)
1103 cpu_interrupt(env, env->interrupt_request);
1104 }
9fa3e853
FB
1105 }
1106 tb = tb_next;
1107 }
1108#if !defined(CONFIG_USER_ONLY)
1109 /* if no code remaining, no need to continue to use slow writes */
1110 if (!p->first_tb) {
1111 invalidate_page_bitmap(p);
d720b93d 1112 if (is_cpu_write_access) {
2e70f6ef 1113 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1114 }
1115 }
1116#endif
1117#ifdef TARGET_HAS_PRECISE_SMC
1118 if (current_tb_modified) {
1119 /* we generate a block containing just the instruction
1120 modifying the memory. It will ensure that it cannot modify
1121 itself */
ea1c1802 1122 env->current_tb = NULL;
2e70f6ef 1123 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1124 cpu_resume_from_signal(env, NULL);
9fa3e853 1125 }
fd6ce8f6 1126#endif
9fa3e853 1127}
fd6ce8f6 1128
9fa3e853 1129/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1130static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1131{
1132 PageDesc *p;
1133 int offset, b;
59817ccb 1134#if 0
a4193c8a 1135 if (1) {
93fcfe39
AL
1136 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1137 cpu_single_env->mem_io_vaddr, len,
1138 cpu_single_env->eip,
1139 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1140 }
1141#endif
9fa3e853 1142 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1143 if (!p)
9fa3e853
FB
1144 return;
1145 if (p->code_bitmap) {
1146 offset = start & ~TARGET_PAGE_MASK;
1147 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1148 if (b & ((1 << len) - 1))
1149 goto do_invalidate;
1150 } else {
1151 do_invalidate:
d720b93d 1152 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1153 }
1154}
1155
9fa3e853 1156#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1157static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1158 unsigned long pc, void *puc)
9fa3e853 1159{
6b917547 1160 TranslationBlock *tb;
9fa3e853 1161 PageDesc *p;
6b917547 1162 int n;
d720b93d 1163#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1164 TranslationBlock *current_tb = NULL;
d720b93d 1165 CPUState *env = cpu_single_env;
6b917547
AL
1166 int current_tb_modified = 0;
1167 target_ulong current_pc = 0;
1168 target_ulong current_cs_base = 0;
1169 int current_flags = 0;
d720b93d 1170#endif
9fa3e853
FB
1171
1172 addr &= TARGET_PAGE_MASK;
1173 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1174 if (!p)
9fa3e853
FB
1175 return;
1176 tb = p->first_tb;
d720b93d
FB
1177#ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb && pc != 0) {
1179 current_tb = tb_find_pc(pc);
1180 }
1181#endif
9fa3e853
FB
1182 while (tb != NULL) {
1183 n = (long)tb & 3;
1184 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb == tb &&
2e70f6ef 1187 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
3b46e624 1193
d720b93d 1194 current_tb_modified = 1;
618ba8e6 1195 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1196 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1197 &current_flags);
d720b93d
FB
1198 }
1199#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1200 tb_phys_invalidate(tb, addr);
1201 tb = tb->page_next[n];
1202 }
fd6ce8f6 1203 p->first_tb = NULL;
d720b93d
FB
1204#ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1208 itself */
ea1c1802 1209 env->current_tb = NULL;
2e70f6ef 1210 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1211 cpu_resume_from_signal(env, puc);
1212 }
1213#endif
fd6ce8f6 1214}
9fa3e853 1215#endif
fd6ce8f6
FB
1216
1217/* add the tb in the target page and protect it if necessary */
5fafdf24 1218static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1219 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1220{
1221 PageDesc *p;
4429ab44
JQ
1222#ifndef CONFIG_USER_ONLY
1223 bool page_already_protected;
1224#endif
9fa3e853
FB
1225
1226 tb->page_addr[n] = page_addr;
5cd2c5b6 1227 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1228 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1229#ifndef CONFIG_USER_ONLY
1230 page_already_protected = p->first_tb != NULL;
1231#endif
9fa3e853
FB
1232 p->first_tb = (TranslationBlock *)((long)tb | n);
1233 invalidate_page_bitmap(p);
fd6ce8f6 1234
107db443 1235#if defined(TARGET_HAS_SMC) || 1
d720b93d 1236
9fa3e853 1237#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1238 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1239 target_ulong addr;
1240 PageDesc *p2;
9fa3e853
FB
1241 int prot;
1242
fd6ce8f6
FB
1243 /* force the host page as non writable (writes will have a
1244 page fault + mprotect overhead) */
53a5960a 1245 page_addr &= qemu_host_page_mask;
fd6ce8f6 1246 prot = 0;
53a5960a
PB
1247 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1248 addr += TARGET_PAGE_SIZE) {
1249
1250 p2 = page_find (addr >> TARGET_PAGE_BITS);
1251 if (!p2)
1252 continue;
1253 prot |= p2->flags;
1254 p2->flags &= ~PAGE_WRITE;
53a5960a 1255 }
5fafdf24 1256 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1257 (prot & PAGE_BITS) & ~PAGE_WRITE);
1258#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1259 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1260 page_addr);
fd6ce8f6 1261#endif
fd6ce8f6 1262 }
9fa3e853
FB
1263#else
1264 /* if some code is already present, then the pages are already
1265 protected. So we handle the case where only the first TB is
1266 allocated in a physical page */
4429ab44 1267 if (!page_already_protected) {
6a00d601 1268 tlb_protect_code(page_addr);
9fa3e853
FB
1269 }
1270#endif
d720b93d
FB
1271
1272#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1273}
1274
9fa3e853
FB
1275/* add a new TB and link it to the physical page tables. phys_page2 is
1276 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1277void tb_link_page(TranslationBlock *tb,
1278 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1279{
9fa3e853
FB
1280 unsigned int h;
1281 TranslationBlock **ptb;
1282
c8a706fe
PB
1283 /* Grab the mmap lock to stop another thread invalidating this TB
1284 before we are done. */
1285 mmap_lock();
9fa3e853
FB
1286 /* add in the physical hash table */
1287 h = tb_phys_hash_func(phys_pc);
1288 ptb = &tb_phys_hash[h];
1289 tb->phys_hash_next = *ptb;
1290 *ptb = tb;
fd6ce8f6
FB
1291
1292 /* add in the page list */
9fa3e853
FB
1293 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1294 if (phys_page2 != -1)
1295 tb_alloc_page(tb, 1, phys_page2);
1296 else
1297 tb->page_addr[1] = -1;
9fa3e853 1298
d4e8164f
FB
1299 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1300 tb->jmp_next[0] = NULL;
1301 tb->jmp_next[1] = NULL;
1302
1303 /* init original jump addresses */
1304 if (tb->tb_next_offset[0] != 0xffff)
1305 tb_reset_jump(tb, 0);
1306 if (tb->tb_next_offset[1] != 0xffff)
1307 tb_reset_jump(tb, 1);
8a40a180
FB
1308
1309#ifdef DEBUG_TB_CHECK
1310 tb_page_check();
1311#endif
c8a706fe 1312 mmap_unlock();
fd6ce8f6
FB
1313}
1314
9fa3e853
FB
1315/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1316 tb[1].tc_ptr. Return NULL if not found */
1317TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1318{
9fa3e853
FB
1319 int m_min, m_max, m;
1320 unsigned long v;
1321 TranslationBlock *tb;
a513fe19
FB
1322
1323 if (nb_tbs <= 0)
1324 return NULL;
1325 if (tc_ptr < (unsigned long)code_gen_buffer ||
1326 tc_ptr >= (unsigned long)code_gen_ptr)
1327 return NULL;
1328 /* binary search (cf Knuth) */
1329 m_min = 0;
1330 m_max = nb_tbs - 1;
1331 while (m_min <= m_max) {
1332 m = (m_min + m_max) >> 1;
1333 tb = &tbs[m];
1334 v = (unsigned long)tb->tc_ptr;
1335 if (v == tc_ptr)
1336 return tb;
1337 else if (tc_ptr < v) {
1338 m_max = m - 1;
1339 } else {
1340 m_min = m + 1;
1341 }
5fafdf24 1342 }
a513fe19
FB
1343 return &tbs[m_max];
1344}
7501267e 1345
ea041c0e
FB
1346static void tb_reset_jump_recursive(TranslationBlock *tb);
1347
1348static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1349{
1350 TranslationBlock *tb1, *tb_next, **ptb;
1351 unsigned int n1;
1352
1353 tb1 = tb->jmp_next[n];
1354 if (tb1 != NULL) {
1355 /* find head of list */
1356 for(;;) {
1357 n1 = (long)tb1 & 3;
1358 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1359 if (n1 == 2)
1360 break;
1361 tb1 = tb1->jmp_next[n1];
1362 }
1363 /* we are now sure now that tb jumps to tb1 */
1364 tb_next = tb1;
1365
1366 /* remove tb from the jmp_first list */
1367 ptb = &tb_next->jmp_first;
1368 for(;;) {
1369 tb1 = *ptb;
1370 n1 = (long)tb1 & 3;
1371 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1372 if (n1 == n && tb1 == tb)
1373 break;
1374 ptb = &tb1->jmp_next[n1];
1375 }
1376 *ptb = tb->jmp_next[n];
1377 tb->jmp_next[n] = NULL;
3b46e624 1378
ea041c0e
FB
1379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb, n);
1381
0124311e 1382 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1383 tb_reset_jump_recursive(tb_next);
1384 }
1385}
1386
1387static void tb_reset_jump_recursive(TranslationBlock *tb)
1388{
1389 tb_reset_jump_recursive2(tb, 0);
1390 tb_reset_jump_recursive2(tb, 1);
1391}
1392
1fddef4b 1393#if defined(TARGET_HAS_ICE)
94df27fd
PB
1394#if defined(CONFIG_USER_ONLY)
1395static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1396{
1397 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1398}
1399#else
d720b93d
FB
1400static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1401{
c227f099 1402 target_phys_addr_t addr;
9b3c35e0 1403 target_ulong pd;
c227f099 1404 ram_addr_t ram_addr;
c2f07f81 1405 PhysPageDesc *p;
d720b93d 1406
c2f07f81
PB
1407 addr = cpu_get_phys_page_debug(env, pc);
1408 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1409 if (!p) {
1410 pd = IO_MEM_UNASSIGNED;
1411 } else {
1412 pd = p->phys_offset;
1413 }
1414 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1415 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1416}
c27004ec 1417#endif
94df27fd 1418#endif /* TARGET_HAS_ICE */
d720b93d 1419
c527ee8f
PB
1420#if defined(CONFIG_USER_ONLY)
1421void cpu_watchpoint_remove_all(CPUState *env, int mask)
1422
1423{
1424}
1425
1426int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1427 int flags, CPUWatchpoint **watchpoint)
1428{
1429 return -ENOSYS;
1430}
1431#else
6658ffb8 1432/* Add a watchpoint. */
a1d1bb31
AL
1433int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1434 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1435{
b4051334 1436 target_ulong len_mask = ~(len - 1);
c0ce998e 1437 CPUWatchpoint *wp;
6658ffb8 1438
b4051334
AL
1439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1440 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1441 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1442 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1443 return -EINVAL;
1444 }
7267c094 1445 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1446
1447 wp->vaddr = addr;
b4051334 1448 wp->len_mask = len_mask;
a1d1bb31
AL
1449 wp->flags = flags;
1450
2dc9f411 1451 /* keep all GDB-injected watchpoints in front */
c0ce998e 1452 if (flags & BP_GDB)
72cf2d4f 1453 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1454 else
72cf2d4f 1455 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1456
6658ffb8 1457 tlb_flush_page(env, addr);
a1d1bb31
AL
1458
1459 if (watchpoint)
1460 *watchpoint = wp;
1461 return 0;
6658ffb8
PB
1462}
1463
a1d1bb31
AL
1464/* Remove a specific watchpoint. */
1465int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1466 int flags)
6658ffb8 1467{
b4051334 1468 target_ulong len_mask = ~(len - 1);
a1d1bb31 1469 CPUWatchpoint *wp;
6658ffb8 1470
72cf2d4f 1471 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1472 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1473 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1474 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1475 return 0;
1476 }
1477 }
a1d1bb31 1478 return -ENOENT;
6658ffb8
PB
1479}
1480
a1d1bb31
AL
1481/* Remove a specific watchpoint by reference. */
1482void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1483{
72cf2d4f 1484 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1485
a1d1bb31
AL
1486 tlb_flush_page(env, watchpoint->vaddr);
1487
7267c094 1488 g_free(watchpoint);
a1d1bb31
AL
1489}
1490
1491/* Remove all matching watchpoints. */
1492void cpu_watchpoint_remove_all(CPUState *env, int mask)
1493{
c0ce998e 1494 CPUWatchpoint *wp, *next;
a1d1bb31 1495
72cf2d4f 1496 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1497 if (wp->flags & mask)
1498 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1499 }
7d03f82f 1500}
c527ee8f 1501#endif
7d03f82f 1502
a1d1bb31
AL
1503/* Add a breakpoint. */
1504int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1505 CPUBreakpoint **breakpoint)
4c3a88a2 1506{
1fddef4b 1507#if defined(TARGET_HAS_ICE)
c0ce998e 1508 CPUBreakpoint *bp;
3b46e624 1509
7267c094 1510 bp = g_malloc(sizeof(*bp));
4c3a88a2 1511
a1d1bb31
AL
1512 bp->pc = pc;
1513 bp->flags = flags;
1514
2dc9f411 1515 /* keep all GDB-injected breakpoints in front */
c0ce998e 1516 if (flags & BP_GDB)
72cf2d4f 1517 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1518 else
72cf2d4f 1519 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1520
d720b93d 1521 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1522
1523 if (breakpoint)
1524 *breakpoint = bp;
4c3a88a2
FB
1525 return 0;
1526#else
a1d1bb31 1527 return -ENOSYS;
4c3a88a2
FB
1528#endif
1529}
1530
a1d1bb31
AL
1531/* Remove a specific breakpoint. */
1532int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1533{
7d03f82f 1534#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1535 CPUBreakpoint *bp;
1536
72cf2d4f 1537 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1538 if (bp->pc == pc && bp->flags == flags) {
1539 cpu_breakpoint_remove_by_ref(env, bp);
1540 return 0;
1541 }
7d03f82f 1542 }
a1d1bb31
AL
1543 return -ENOENT;
1544#else
1545 return -ENOSYS;
7d03f82f
EI
1546#endif
1547}
1548
a1d1bb31
AL
1549/* Remove a specific breakpoint by reference. */
1550void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1551{
1fddef4b 1552#if defined(TARGET_HAS_ICE)
72cf2d4f 1553 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1554
a1d1bb31
AL
1555 breakpoint_invalidate(env, breakpoint->pc);
1556
7267c094 1557 g_free(breakpoint);
a1d1bb31
AL
1558#endif
1559}
1560
1561/* Remove all matching breakpoints. */
1562void cpu_breakpoint_remove_all(CPUState *env, int mask)
1563{
1564#if defined(TARGET_HAS_ICE)
c0ce998e 1565 CPUBreakpoint *bp, *next;
a1d1bb31 1566
72cf2d4f 1567 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1568 if (bp->flags & mask)
1569 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1570 }
4c3a88a2
FB
1571#endif
1572}
1573
c33a346e
FB
1574/* enable or disable single step mode. EXCP_DEBUG is returned by the
1575 CPU loop after each instruction */
1576void cpu_single_step(CPUState *env, int enabled)
1577{
1fddef4b 1578#if defined(TARGET_HAS_ICE)
c33a346e
FB
1579 if (env->singlestep_enabled != enabled) {
1580 env->singlestep_enabled = enabled;
e22a25c9
AL
1581 if (kvm_enabled())
1582 kvm_update_guest_debug(env, 0);
1583 else {
ccbb4d44 1584 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1585 /* XXX: only flush what is necessary */
1586 tb_flush(env);
1587 }
c33a346e
FB
1588 }
1589#endif
1590}
1591
34865134
FB
1592/* enable or disable low levels log */
1593void cpu_set_log(int log_flags)
1594{
1595 loglevel = log_flags;
1596 if (loglevel && !logfile) {
11fcfab4 1597 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1598 if (!logfile) {
1599 perror(logfilename);
1600 _exit(1);
1601 }
9fa3e853
FB
1602#if !defined(CONFIG_SOFTMMU)
1603 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1604 {
b55266b5 1605 static char logfile_buf[4096];
9fa3e853
FB
1606 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1607 }
daf767b1
SW
1608#elif defined(_WIN32)
1609 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1610 setvbuf(logfile, NULL, _IONBF, 0);
1611#else
34865134 1612 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1613#endif
e735b91c
PB
1614 log_append = 1;
1615 }
1616 if (!loglevel && logfile) {
1617 fclose(logfile);
1618 logfile = NULL;
34865134
FB
1619 }
1620}
1621
1622void cpu_set_log_filename(const char *filename)
1623{
1624 logfilename = strdup(filename);
e735b91c
PB
1625 if (logfile) {
1626 fclose(logfile);
1627 logfile = NULL;
1628 }
1629 cpu_set_log(loglevel);
34865134 1630}
c33a346e 1631
3098dba0 1632static void cpu_unlink_tb(CPUState *env)
ea041c0e 1633{
3098dba0
AJ
1634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1638 TranslationBlock *tb;
c227f099 1639 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1640
cab1b4bd 1641 spin_lock(&interrupt_lock);
3098dba0
AJ
1642 tb = env->current_tb;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
f76cfe56 1645 if (tb) {
3098dba0
AJ
1646 env->current_tb = NULL;
1647 tb_reset_jump_recursive(tb);
be214e6c 1648 }
cab1b4bd 1649 spin_unlock(&interrupt_lock);
3098dba0
AJ
1650}
1651
97ffbd8d 1652#ifndef CONFIG_USER_ONLY
3098dba0 1653/* mask must never be zero, except for A20 change call */
ec6959d0 1654static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1655{
1656 int old_mask;
be214e6c 1657
2e70f6ef 1658 old_mask = env->interrupt_request;
68a79315 1659 env->interrupt_request |= mask;
3098dba0 1660
8edac960
AL
1661 /*
1662 * If called from iothread context, wake the target cpu in
1663 * case its halted.
1664 */
b7680cb6 1665 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1666 qemu_cpu_kick(env);
1667 return;
1668 }
8edac960 1669
2e70f6ef 1670 if (use_icount) {
266910c4 1671 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1672 if (!can_do_io(env)
be214e6c 1673 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1674 cpu_abort(env, "Raised interrupt while not in I/O function");
1675 }
2e70f6ef 1676 } else {
3098dba0 1677 cpu_unlink_tb(env);
ea041c0e
FB
1678 }
1679}
1680
ec6959d0
JK
1681CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1682
97ffbd8d
JK
1683#else /* CONFIG_USER_ONLY */
1684
1685void cpu_interrupt(CPUState *env, int mask)
1686{
1687 env->interrupt_request |= mask;
1688 cpu_unlink_tb(env);
1689}
1690#endif /* CONFIG_USER_ONLY */
1691
b54ad049
FB
1692void cpu_reset_interrupt(CPUState *env, int mask)
1693{
1694 env->interrupt_request &= ~mask;
1695}
1696
3098dba0
AJ
1697void cpu_exit(CPUState *env)
1698{
1699 env->exit_request = 1;
1700 cpu_unlink_tb(env);
1701}
1702
c7cd6a37 1703const CPULogItem cpu_log_items[] = {
5fafdf24 1704 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM, "in_asm",
1707 "show target assembly code for each compiled TB" },
5fafdf24 1708 { CPU_LOG_TB_OP, "op",
57fec1fe 1709 "show micro ops for each compiled TB" },
f193c797 1710 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1711 "show micro ops "
1712#ifdef TARGET_I386
1713 "before eflags optimization and "
f193c797 1714#endif
e01a1157 1715 "after liveness analysis" },
f193c797
FB
1716 { CPU_LOG_INT, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC, "exec",
1719 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1720 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1721 "show CPU state before block translation" },
f193c797
FB
1722#ifdef TARGET_I386
1723 { CPU_LOG_PCALL, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1725 { CPU_LOG_RESET, "cpu_reset",
1726 "show CPU state before CPU resets" },
f193c797 1727#endif
8e3a9fd2 1728#ifdef DEBUG_IOPORT
fd872598
FB
1729 { CPU_LOG_IOPORT, "ioport",
1730 "show all i/o ports accesses" },
8e3a9fd2 1731#endif
f193c797
FB
1732 { 0, NULL, NULL },
1733};
1734
1735static int cmp1(const char *s1, int n, const char *s2)
1736{
1737 if (strlen(s2) != n)
1738 return 0;
1739 return memcmp(s1, s2, n) == 0;
1740}
3b46e624 1741
f193c797
FB
1742/* takes a comma separated list of log masks. Return 0 if error. */
1743int cpu_str_to_log_mask(const char *str)
1744{
c7cd6a37 1745 const CPULogItem *item;
f193c797
FB
1746 int mask;
1747 const char *p, *p1;
1748
1749 p = str;
1750 mask = 0;
1751 for(;;) {
1752 p1 = strchr(p, ',');
1753 if (!p1)
1754 p1 = p + strlen(p);
9742bf26
YT
1755 if(cmp1(p,p1-p,"all")) {
1756 for(item = cpu_log_items; item->mask != 0; item++) {
1757 mask |= item->mask;
1758 }
1759 } else {
1760 for(item = cpu_log_items; item->mask != 0; item++) {
1761 if (cmp1(p, p1 - p, item->name))
1762 goto found;
1763 }
1764 return 0;
f193c797 1765 }
f193c797
FB
1766 found:
1767 mask |= item->mask;
1768 if (*p1 != ',')
1769 break;
1770 p = p1 + 1;
1771 }
1772 return mask;
1773}
ea041c0e 1774
7501267e
FB
1775void cpu_abort(CPUState *env, const char *fmt, ...)
1776{
1777 va_list ap;
493ae1f0 1778 va_list ap2;
7501267e
FB
1779
1780 va_start(ap, fmt);
493ae1f0 1781 va_copy(ap2, ap);
7501267e
FB
1782 fprintf(stderr, "qemu: fatal: ");
1783 vfprintf(stderr, fmt, ap);
1784 fprintf(stderr, "\n");
1785#ifdef TARGET_I386
7fe48483
FB
1786 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1787#else
1788 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1789#endif
93fcfe39
AL
1790 if (qemu_log_enabled()) {
1791 qemu_log("qemu: fatal: ");
1792 qemu_log_vprintf(fmt, ap2);
1793 qemu_log("\n");
f9373291 1794#ifdef TARGET_I386
93fcfe39 1795 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1796#else
93fcfe39 1797 log_cpu_state(env, 0);
f9373291 1798#endif
31b1a7b4 1799 qemu_log_flush();
93fcfe39 1800 qemu_log_close();
924edcae 1801 }
493ae1f0 1802 va_end(ap2);
f9373291 1803 va_end(ap);
fd052bf6
RV
1804#if defined(CONFIG_USER_ONLY)
1805 {
1806 struct sigaction act;
1807 sigfillset(&act.sa_mask);
1808 act.sa_handler = SIG_DFL;
1809 sigaction(SIGABRT, &act, NULL);
1810 }
1811#endif
7501267e
FB
1812 abort();
1813}
1814
c5be9f08
TS
1815CPUState *cpu_copy(CPUState *env)
1816{
01ba9816 1817 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1818 CPUState *next_cpu = new_env->next_cpu;
1819 int cpu_index = new_env->cpu_index;
5a38f081
AL
1820#if defined(TARGET_HAS_ICE)
1821 CPUBreakpoint *bp;
1822 CPUWatchpoint *wp;
1823#endif
1824
c5be9f08 1825 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1826
1827 /* Preserve chaining and index. */
c5be9f08
TS
1828 new_env->next_cpu = next_cpu;
1829 new_env->cpu_index = cpu_index;
5a38f081
AL
1830
1831 /* Clone all break/watchpoints.
1832 Note: Once we support ptrace with hw-debug register access, make sure
1833 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1834 QTAILQ_INIT(&env->breakpoints);
1835 QTAILQ_INIT(&env->watchpoints);
5a38f081 1836#if defined(TARGET_HAS_ICE)
72cf2d4f 1837 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1838 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1839 }
72cf2d4f 1840 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1841 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1842 wp->flags, NULL);
1843 }
1844#endif
1845
c5be9f08
TS
1846 return new_env;
1847}
1848
0124311e
FB
1849#if !defined(CONFIG_USER_ONLY)
1850
5c751e99
EI
1851static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1852{
1853 unsigned int i;
1854
1855 /* Discard jump cache entries for any tb which might potentially
1856 overlap the flushed page. */
1857 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1858 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1859 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1860
1861 i = tb_jmp_cache_hash_page(addr);
1862 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1863 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1864}
1865
08738984
IK
1866static CPUTLBEntry s_cputlb_empty_entry = {
1867 .addr_read = -1,
1868 .addr_write = -1,
1869 .addr_code = -1,
1870 .addend = -1,
1871};
1872
ee8b7021
FB
1873/* NOTE: if flush_global is true, also flush global entries (not
1874 implemented yet) */
1875void tlb_flush(CPUState *env, int flush_global)
33417e70 1876{
33417e70 1877 int i;
0124311e 1878
9fa3e853
FB
1879#if defined(DEBUG_TLB)
1880 printf("tlb_flush:\n");
1881#endif
0124311e
FB
1882 /* must reset current TB so that interrupts cannot modify the
1883 links while we are modifying them */
1884 env->current_tb = NULL;
1885
33417e70 1886 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1887 int mmu_idx;
1888 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1889 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1890 }
33417e70 1891 }
9fa3e853 1892
8a40a180 1893 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1894
d4c430a8
PB
1895 env->tlb_flush_addr = -1;
1896 env->tlb_flush_mask = 0;
e3db7226 1897 tlb_flush_count++;
33417e70
FB
1898}
1899
274da6b2 1900static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1901{
5fafdf24 1902 if (addr == (tlb_entry->addr_read &
84b7b8e7 1903 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1904 addr == (tlb_entry->addr_write &
84b7b8e7 1905 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1906 addr == (tlb_entry->addr_code &
84b7b8e7 1907 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1908 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1909 }
61382a50
FB
1910}
1911
2e12669a 1912void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1913{
8a40a180 1914 int i;
cfde4bd9 1915 int mmu_idx;
0124311e 1916
9fa3e853 1917#if defined(DEBUG_TLB)
108c49b8 1918 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1919#endif
d4c430a8
PB
1920 /* Check if we need to flush due to large pages. */
1921 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1922#if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: forced full flush ("
1924 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1925 env->tlb_flush_addr, env->tlb_flush_mask);
1926#endif
1927 tlb_flush(env, 1);
1928 return;
1929 }
0124311e
FB
1930 /* must reset current TB so that interrupts cannot modify the
1931 links while we are modifying them */
1932 env->current_tb = NULL;
61382a50
FB
1933
1934 addr &= TARGET_PAGE_MASK;
1935 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1936 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1937 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1938
5c751e99 1939 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1940}
1941
9fa3e853
FB
1942/* update the TLBs so that writes to code in the virtual page 'addr'
1943 can be detected */
c227f099 1944static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1945{
5fafdf24 1946 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1947 ram_addr + TARGET_PAGE_SIZE,
1948 CODE_DIRTY_FLAG);
9fa3e853
FB
1949}
1950
9fa3e853 1951/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1952 tested for self modifying code */
c227f099 1953static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1954 target_ulong vaddr)
9fa3e853 1955{
f7c11b53 1956 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1957}
1958
5fafdf24 1959static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1960 unsigned long start, unsigned long length)
1961{
1962 unsigned long addr;
84b7b8e7
FB
1963 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1964 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1965 if ((addr - start) < length) {
0f459d16 1966 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1967 }
1968 }
1969}
1970
5579c7f3 1971/* Note: start and end must be within the same ram block. */
c227f099 1972void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1973 int dirty_flags)
1ccde1cb
FB
1974{
1975 CPUState *env;
4f2ac237 1976 unsigned long length, start1;
f7c11b53 1977 int i;
1ccde1cb
FB
1978
1979 start &= TARGET_PAGE_MASK;
1980 end = TARGET_PAGE_ALIGN(end);
1981
1982 length = end - start;
1983 if (length == 0)
1984 return;
f7c11b53 1985 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1986
1ccde1cb
FB
1987 /* we modify the TLB cache so that the dirty bit will be set again
1988 when accessing the range */
b2e0a138 1989 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1990 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1991 address comparisons below. */
b2e0a138 1992 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
1993 != (end - 1) - start) {
1994 abort();
1995 }
1996
6a00d601 1997 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
1998 int mmu_idx;
1999 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2000 for(i = 0; i < CPU_TLB_SIZE; i++)
2001 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2002 start1, length);
2003 }
6a00d601 2004 }
1ccde1cb
FB
2005}
2006
74576198
AL
2007int cpu_physical_memory_set_dirty_tracking(int enable)
2008{
f6f3fbca 2009 int ret = 0;
74576198 2010 in_migration = enable;
dcd97e33
AK
2011 if (enable) {
2012 memory_global_dirty_log_start();
2013 } else {
2014 memory_global_dirty_log_stop();
2015 }
f6f3fbca 2016 return ret;
74576198
AL
2017}
2018
2019int cpu_physical_memory_get_dirty_tracking(void)
2020{
2021 return in_migration;
2022}
2023
3a7d929e
FB
2024static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2025{
c227f099 2026 ram_addr_t ram_addr;
5579c7f3 2027 void *p;
3a7d929e 2028
84b7b8e7 2029 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2030 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2031 + tlb_entry->addend);
e890261f 2032 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2033 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2034 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2035 }
2036 }
2037}
2038
2039/* update the TLB according to the current state of the dirty bits */
2040void cpu_tlb_update_dirty(CPUState *env)
2041{
2042 int i;
cfde4bd9
IY
2043 int mmu_idx;
2044 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2045 for(i = 0; i < CPU_TLB_SIZE; i++)
2046 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2047 }
3a7d929e
FB
2048}
2049
0f459d16 2050static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2051{
0f459d16
PB
2052 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2053 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2054}
2055
0f459d16
PB
2056/* update the TLB corresponding to virtual page vaddr
2057 so that it is no longer dirty */
2058static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2059{
1ccde1cb 2060 int i;
cfde4bd9 2061 int mmu_idx;
1ccde1cb 2062
0f459d16 2063 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2064 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2066 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2067}
2068
d4c430a8
PB
2069/* Our TLB does not support large pages, so remember the area covered by
2070 large pages and trigger a full TLB flush if these are invalidated. */
2071static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2072 target_ulong size)
2073{
2074 target_ulong mask = ~(size - 1);
2075
2076 if (env->tlb_flush_addr == (target_ulong)-1) {
2077 env->tlb_flush_addr = vaddr & mask;
2078 env->tlb_flush_mask = mask;
2079 return;
2080 }
2081 /* Extend the existing region to include the new page.
2082 This is a compromise between unnecessary flushes and the cost
2083 of maintaining a full variable size TLB. */
2084 mask &= env->tlb_flush_mask;
2085 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2086 mask <<= 1;
2087 }
2088 env->tlb_flush_addr &= mask;
2089 env->tlb_flush_mask = mask;
2090}
2091
2092/* Add a new TLB entry. At most one entry for a given virtual address
2093 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2094 supplied size is only used by tlb_flush_page. */
2095void tlb_set_page(CPUState *env, target_ulong vaddr,
2096 target_phys_addr_t paddr, int prot,
2097 int mmu_idx, target_ulong size)
9fa3e853 2098{
92e873b9 2099 PhysPageDesc *p;
4f2ac237 2100 unsigned long pd;
9fa3e853 2101 unsigned int index;
4f2ac237 2102 target_ulong address;
0f459d16 2103 target_ulong code_address;
355b1943 2104 unsigned long addend;
84b7b8e7 2105 CPUTLBEntry *te;
a1d1bb31 2106 CPUWatchpoint *wp;
c227f099 2107 target_phys_addr_t iotlb;
9fa3e853 2108
d4c430a8
PB
2109 assert(size >= TARGET_PAGE_SIZE);
2110 if (size != TARGET_PAGE_SIZE) {
2111 tlb_add_large_page(env, vaddr, size);
2112 }
92e873b9 2113 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2114 if (!p) {
2115 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2116 } else {
2117 pd = p->phys_offset;
9fa3e853
FB
2118 }
2119#if defined(DEBUG_TLB)
7fd3f494
SW
2120 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2121 " prot=%x idx=%d pd=0x%08lx\n",
2122 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2123#endif
2124
0f459d16
PB
2125 address = vaddr;
2126 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2127 /* IO memory case (romd handled later) */
2128 address |= TLB_MMIO;
2129 }
5579c7f3 2130 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2131 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2132 /* Normal RAM. */
2133 iotlb = pd & TARGET_PAGE_MASK;
2134 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2135 iotlb |= IO_MEM_NOTDIRTY;
2136 else
2137 iotlb |= IO_MEM_ROM;
2138 } else {
ccbb4d44 2139 /* IO handlers are currently passed a physical address.
0f459d16
PB
2140 It would be nice to pass an offset from the base address
2141 of that region. This would avoid having to special case RAM,
2142 and avoid full address decoding in every device.
2143 We can't use the high bits of pd for this because
2144 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2145 iotlb = (pd & ~TARGET_PAGE_MASK);
2146 if (p) {
8da3ff18
PB
2147 iotlb += p->region_offset;
2148 } else {
2149 iotlb += paddr;
2150 }
0f459d16
PB
2151 }
2152
2153 code_address = address;
2154 /* Make accesses to pages with watchpoints go via the
2155 watchpoint trap routines. */
72cf2d4f 2156 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2157 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2158 /* Avoid trapping reads of pages with a write breakpoint. */
2159 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2160 iotlb = io_mem_watch + paddr;
2161 address |= TLB_MMIO;
2162 break;
2163 }
6658ffb8 2164 }
0f459d16 2165 }
d79acba4 2166
0f459d16
PB
2167 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2168 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2169 te = &env->tlb_table[mmu_idx][index];
2170 te->addend = addend - vaddr;
2171 if (prot & PAGE_READ) {
2172 te->addr_read = address;
2173 } else {
2174 te->addr_read = -1;
2175 }
5c751e99 2176
0f459d16
PB
2177 if (prot & PAGE_EXEC) {
2178 te->addr_code = code_address;
2179 } else {
2180 te->addr_code = -1;
2181 }
2182 if (prot & PAGE_WRITE) {
2183 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2184 (pd & IO_MEM_ROMD)) {
2185 /* Write access calls the I/O callback. */
2186 te->addr_write = address | TLB_MMIO;
2187 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2188 !cpu_physical_memory_is_dirty(pd)) {
2189 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2190 } else {
0f459d16 2191 te->addr_write = address;
9fa3e853 2192 }
0f459d16
PB
2193 } else {
2194 te->addr_write = -1;
9fa3e853 2195 }
9fa3e853
FB
2196}
2197
0124311e
FB
2198#else
2199
ee8b7021 2200void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2201{
2202}
2203
2e12669a 2204void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2205{
2206}
2207
edf8e2af
MW
2208/*
2209 * Walks guest process memory "regions" one by one
2210 * and calls callback function 'fn' for each region.
2211 */
5cd2c5b6
RH
2212
2213struct walk_memory_regions_data
2214{
2215 walk_memory_regions_fn fn;
2216 void *priv;
2217 unsigned long start;
2218 int prot;
2219};
2220
2221static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2222 abi_ulong end, int new_prot)
5cd2c5b6
RH
2223{
2224 if (data->start != -1ul) {
2225 int rc = data->fn(data->priv, data->start, end, data->prot);
2226 if (rc != 0) {
2227 return rc;
2228 }
2229 }
2230
2231 data->start = (new_prot ? end : -1ul);
2232 data->prot = new_prot;
2233
2234 return 0;
2235}
2236
2237static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2238 abi_ulong base, int level, void **lp)
5cd2c5b6 2239{
b480d9b7 2240 abi_ulong pa;
5cd2c5b6
RH
2241 int i, rc;
2242
2243 if (*lp == NULL) {
2244 return walk_memory_regions_end(data, base, 0);
2245 }
2246
2247 if (level == 0) {
2248 PageDesc *pd = *lp;
7296abac 2249 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2250 int prot = pd[i].flags;
2251
2252 pa = base | (i << TARGET_PAGE_BITS);
2253 if (prot != data->prot) {
2254 rc = walk_memory_regions_end(data, pa, prot);
2255 if (rc != 0) {
2256 return rc;
9fa3e853 2257 }
9fa3e853 2258 }
5cd2c5b6
RH
2259 }
2260 } else {
2261 void **pp = *lp;
7296abac 2262 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2263 pa = base | ((abi_ulong)i <<
2264 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2265 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2266 if (rc != 0) {
2267 return rc;
2268 }
2269 }
2270 }
2271
2272 return 0;
2273}
2274
2275int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2276{
2277 struct walk_memory_regions_data data;
2278 unsigned long i;
2279
2280 data.fn = fn;
2281 data.priv = priv;
2282 data.start = -1ul;
2283 data.prot = 0;
2284
2285 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2286 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2287 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2288 if (rc != 0) {
2289 return rc;
9fa3e853 2290 }
33417e70 2291 }
5cd2c5b6
RH
2292
2293 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2294}
2295
b480d9b7
PB
2296static int dump_region(void *priv, abi_ulong start,
2297 abi_ulong end, unsigned long prot)
edf8e2af
MW
2298{
2299 FILE *f = (FILE *)priv;
2300
b480d9b7
PB
2301 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2302 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2303 start, end, end - start,
2304 ((prot & PAGE_READ) ? 'r' : '-'),
2305 ((prot & PAGE_WRITE) ? 'w' : '-'),
2306 ((prot & PAGE_EXEC) ? 'x' : '-'));
2307
2308 return (0);
2309}
2310
2311/* dump memory mappings */
2312void page_dump(FILE *f)
2313{
2314 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2315 "start", "end", "size", "prot");
2316 walk_memory_regions(f, dump_region);
33417e70
FB
2317}
2318
53a5960a 2319int page_get_flags(target_ulong address)
33417e70 2320{
9fa3e853
FB
2321 PageDesc *p;
2322
2323 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2324 if (!p)
9fa3e853
FB
2325 return 0;
2326 return p->flags;
2327}
2328
376a7909
RH
2329/* Modify the flags of a page and invalidate the code if necessary.
2330 The flag PAGE_WRITE_ORG is positioned automatically depending
2331 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2332void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2333{
376a7909
RH
2334 target_ulong addr, len;
2335
2336 /* This function should never be called with addresses outside the
2337 guest address space. If this assert fires, it probably indicates
2338 a missing call to h2g_valid. */
b480d9b7
PB
2339#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2340 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2341#endif
2342 assert(start < end);
9fa3e853
FB
2343
2344 start = start & TARGET_PAGE_MASK;
2345 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2346
2347 if (flags & PAGE_WRITE) {
9fa3e853 2348 flags |= PAGE_WRITE_ORG;
376a7909
RH
2349 }
2350
2351 for (addr = start, len = end - start;
2352 len != 0;
2353 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2354 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2355
2356 /* If the write protection bit is set, then we invalidate
2357 the code inside. */
5fafdf24 2358 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2359 (flags & PAGE_WRITE) &&
2360 p->first_tb) {
d720b93d 2361 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2362 }
2363 p->flags = flags;
2364 }
33417e70
FB
2365}
2366
3d97b40b
TS
2367int page_check_range(target_ulong start, target_ulong len, int flags)
2368{
2369 PageDesc *p;
2370 target_ulong end;
2371 target_ulong addr;
2372
376a7909
RH
2373 /* This function should never be called with addresses outside the
2374 guest address space. If this assert fires, it probably indicates
2375 a missing call to h2g_valid. */
338e9e6c
BS
2376#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2377 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2378#endif
2379
3e0650a9
RH
2380 if (len == 0) {
2381 return 0;
2382 }
376a7909
RH
2383 if (start + len - 1 < start) {
2384 /* We've wrapped around. */
55f280c9 2385 return -1;
376a7909 2386 }
55f280c9 2387
3d97b40b
TS
2388 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2389 start = start & TARGET_PAGE_MASK;
2390
376a7909
RH
2391 for (addr = start, len = end - start;
2392 len != 0;
2393 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2394 p = page_find(addr >> TARGET_PAGE_BITS);
2395 if( !p )
2396 return -1;
2397 if( !(p->flags & PAGE_VALID) )
2398 return -1;
2399
dae3270c 2400 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2401 return -1;
dae3270c
FB
2402 if (flags & PAGE_WRITE) {
2403 if (!(p->flags & PAGE_WRITE_ORG))
2404 return -1;
2405 /* unprotect the page if it was put read-only because it
2406 contains translated code */
2407 if (!(p->flags & PAGE_WRITE)) {
2408 if (!page_unprotect(addr, 0, NULL))
2409 return -1;
2410 }
2411 return 0;
2412 }
3d97b40b
TS
2413 }
2414 return 0;
2415}
2416
9fa3e853 2417/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2418 page. Return TRUE if the fault was successfully handled. */
53a5960a 2419int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2420{
45d679d6
AJ
2421 unsigned int prot;
2422 PageDesc *p;
53a5960a 2423 target_ulong host_start, host_end, addr;
9fa3e853 2424
c8a706fe
PB
2425 /* Technically this isn't safe inside a signal handler. However we
2426 know this only ever happens in a synchronous SEGV handler, so in
2427 practice it seems to be ok. */
2428 mmap_lock();
2429
45d679d6
AJ
2430 p = page_find(address >> TARGET_PAGE_BITS);
2431 if (!p) {
c8a706fe 2432 mmap_unlock();
9fa3e853 2433 return 0;
c8a706fe 2434 }
45d679d6 2435
9fa3e853
FB
2436 /* if the page was really writable, then we change its
2437 protection back to writable */
45d679d6
AJ
2438 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2439 host_start = address & qemu_host_page_mask;
2440 host_end = host_start + qemu_host_page_size;
2441
2442 prot = 0;
2443 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2444 p = page_find(addr >> TARGET_PAGE_BITS);
2445 p->flags |= PAGE_WRITE;
2446 prot |= p->flags;
2447
9fa3e853
FB
2448 /* and since the content will be modified, we must invalidate
2449 the corresponding translated code. */
45d679d6 2450 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2451#ifdef DEBUG_TB_CHECK
45d679d6 2452 tb_invalidate_check(addr);
9fa3e853 2453#endif
9fa3e853 2454 }
45d679d6
AJ
2455 mprotect((void *)g2h(host_start), qemu_host_page_size,
2456 prot & PAGE_BITS);
2457
2458 mmap_unlock();
2459 return 1;
9fa3e853 2460 }
c8a706fe 2461 mmap_unlock();
9fa3e853
FB
2462 return 0;
2463}
2464
6a00d601
FB
2465static inline void tlb_set_dirty(CPUState *env,
2466 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2467{
2468}
9fa3e853
FB
2469#endif /* defined(CONFIG_USER_ONLY) */
2470
e2eef170 2471#if !defined(CONFIG_USER_ONLY)
8da3ff18 2472
c04b2b78
PB
2473#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2474typedef struct subpage_t {
2475 target_phys_addr_t base;
f6405247
RH
2476 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2477 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2478} subpage_t;
2479
c227f099
AL
2480static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2481 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2482static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2483 ram_addr_t orig_memory,
2484 ram_addr_t region_offset);
db7b5426
BS
2485#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2486 need_subpage) \
2487 do { \
2488 if (addr > start_addr) \
2489 start_addr2 = 0; \
2490 else { \
2491 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2492 if (start_addr2 > 0) \
2493 need_subpage = 1; \
2494 } \
2495 \
49e9fba2 2496 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2497 end_addr2 = TARGET_PAGE_SIZE - 1; \
2498 else { \
2499 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2500 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2501 need_subpage = 1; \
2502 } \
2503 } while (0)
2504
8f2498f9
MT
2505/* register physical memory.
2506 For RAM, 'size' must be a multiple of the target page size.
2507 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2508 io memory page. The address used when calling the IO function is
2509 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2510 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2511 before calculating this offset. This should not be a problem unless
2512 the low bits of start_addr and region_offset differ. */
0fd542fb 2513void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2514 ram_addr_t size,
2515 ram_addr_t phys_offset,
0fd542fb
MT
2516 ram_addr_t region_offset,
2517 bool log_dirty)
33417e70 2518{
c227f099 2519 target_phys_addr_t addr, end_addr;
92e873b9 2520 PhysPageDesc *p;
9d42037b 2521 CPUState *env;
c227f099 2522 ram_addr_t orig_size = size;
f6405247 2523 subpage_t *subpage;
33417e70 2524
3b8e6a2d 2525 assert(size);
f6f3fbca 2526
67c4d23c
PB
2527 if (phys_offset == IO_MEM_UNASSIGNED) {
2528 region_offset = start_addr;
2529 }
8da3ff18 2530 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2531 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2532 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2533
2534 addr = start_addr;
2535 do {
db7b5426
BS
2536 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2537 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2538 ram_addr_t orig_memory = p->phys_offset;
2539 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2540 int need_subpage = 0;
2541
2542 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2543 need_subpage);
f6405247 2544 if (need_subpage) {
db7b5426
BS
2545 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2546 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2547 &p->phys_offset, orig_memory,
2548 p->region_offset);
db7b5426
BS
2549 } else {
2550 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2551 >> IO_MEM_SHIFT];
2552 }
8da3ff18
PB
2553 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2554 region_offset);
2555 p->region_offset = 0;
db7b5426
BS
2556 } else {
2557 p->phys_offset = phys_offset;
2558 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2559 (phys_offset & IO_MEM_ROMD))
2560 phys_offset += TARGET_PAGE_SIZE;
2561 }
2562 } else {
2563 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2564 p->phys_offset = phys_offset;
8da3ff18 2565 p->region_offset = region_offset;
db7b5426 2566 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2567 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2568 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2569 } else {
c227f099 2570 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2571 int need_subpage = 0;
2572
2573 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2574 end_addr2, need_subpage);
2575
f6405247 2576 if (need_subpage) {
db7b5426 2577 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2578 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2579 addr & TARGET_PAGE_MASK);
db7b5426 2580 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2581 phys_offset, region_offset);
2582 p->region_offset = 0;
db7b5426
BS
2583 }
2584 }
2585 }
8da3ff18 2586 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2587 addr += TARGET_PAGE_SIZE;
2588 } while (addr != end_addr);
3b46e624 2589
9d42037b
FB
2590 /* since each CPU stores ram addresses in its TLB cache, we must
2591 reset the modified entries */
2592 /* XXX: slow ! */
2593 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2594 tlb_flush(env, 1);
2595 }
33417e70
FB
2596}
2597
c227f099 2598void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2599{
2600 if (kvm_enabled())
2601 kvm_coalesce_mmio_region(addr, size);
2602}
2603
c227f099 2604void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2605{
2606 if (kvm_enabled())
2607 kvm_uncoalesce_mmio_region(addr, size);
2608}
2609
62a2744c
SY
2610void qemu_flush_coalesced_mmio_buffer(void)
2611{
2612 if (kvm_enabled())
2613 kvm_flush_coalesced_mmio_buffer();
2614}
2615
c902760f
MT
2616#if defined(__linux__) && !defined(TARGET_S390X)
2617
2618#include <sys/vfs.h>
2619
2620#define HUGETLBFS_MAGIC 0x958458f6
2621
2622static long gethugepagesize(const char *path)
2623{
2624 struct statfs fs;
2625 int ret;
2626
2627 do {
9742bf26 2628 ret = statfs(path, &fs);
c902760f
MT
2629 } while (ret != 0 && errno == EINTR);
2630
2631 if (ret != 0) {
9742bf26
YT
2632 perror(path);
2633 return 0;
c902760f
MT
2634 }
2635
2636 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2637 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2638
2639 return fs.f_bsize;
2640}
2641
04b16653
AW
2642static void *file_ram_alloc(RAMBlock *block,
2643 ram_addr_t memory,
2644 const char *path)
c902760f
MT
2645{
2646 char *filename;
2647 void *area;
2648 int fd;
2649#ifdef MAP_POPULATE
2650 int flags;
2651#endif
2652 unsigned long hpagesize;
2653
2654 hpagesize = gethugepagesize(path);
2655 if (!hpagesize) {
9742bf26 2656 return NULL;
c902760f
MT
2657 }
2658
2659 if (memory < hpagesize) {
2660 return NULL;
2661 }
2662
2663 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2664 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2665 return NULL;
2666 }
2667
2668 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2669 return NULL;
c902760f
MT
2670 }
2671
2672 fd = mkstemp(filename);
2673 if (fd < 0) {
9742bf26
YT
2674 perror("unable to create backing store for hugepages");
2675 free(filename);
2676 return NULL;
c902760f
MT
2677 }
2678 unlink(filename);
2679 free(filename);
2680
2681 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2682
2683 /*
2684 * ftruncate is not supported by hugetlbfs in older
2685 * hosts, so don't bother bailing out on errors.
2686 * If anything goes wrong with it under other filesystems,
2687 * mmap will fail.
2688 */
2689 if (ftruncate(fd, memory))
9742bf26 2690 perror("ftruncate");
c902760f
MT
2691
2692#ifdef MAP_POPULATE
2693 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2694 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2695 * to sidestep this quirk.
2696 */
2697 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2698 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2699#else
2700 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2701#endif
2702 if (area == MAP_FAILED) {
9742bf26
YT
2703 perror("file_ram_alloc: can't mmap RAM pages");
2704 close(fd);
2705 return (NULL);
c902760f 2706 }
04b16653 2707 block->fd = fd;
c902760f
MT
2708 return area;
2709}
2710#endif
2711
d17b5288 2712static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2713{
2714 RAMBlock *block, *next_block;
3e837b2c 2715 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2716
2717 if (QLIST_EMPTY(&ram_list.blocks))
2718 return 0;
2719
2720 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2721 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2722
2723 end = block->offset + block->length;
2724
2725 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2726 if (next_block->offset >= end) {
2727 next = MIN(next, next_block->offset);
2728 }
2729 }
2730 if (next - end >= size && next - end < mingap) {
3e837b2c 2731 offset = end;
04b16653
AW
2732 mingap = next - end;
2733 }
2734 }
3e837b2c
AW
2735
2736 if (offset == RAM_ADDR_MAX) {
2737 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2738 (uint64_t)size);
2739 abort();
2740 }
2741
04b16653
AW
2742 return offset;
2743}
2744
2745static ram_addr_t last_ram_offset(void)
d17b5288
AW
2746{
2747 RAMBlock *block;
2748 ram_addr_t last = 0;
2749
2750 QLIST_FOREACH(block, &ram_list.blocks, next)
2751 last = MAX(last, block->offset + block->length);
2752
2753 return last;
2754}
2755
c5705a77 2756void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2757{
2758 RAMBlock *new_block, *block;
2759
c5705a77
AK
2760 new_block = NULL;
2761 QLIST_FOREACH(block, &ram_list.blocks, next) {
2762 if (block->offset == addr) {
2763 new_block = block;
2764 break;
2765 }
2766 }
2767 assert(new_block);
2768 assert(!new_block->idstr[0]);
84b89d78
CM
2769
2770 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2771 char *id = dev->parent_bus->info->get_dev_path(dev);
2772 if (id) {
2773 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2774 g_free(id);
84b89d78
CM
2775 }
2776 }
2777 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2778
2779 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2780 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2781 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2782 new_block->idstr);
2783 abort();
2784 }
2785 }
c5705a77
AK
2786}
2787
2788ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2789 MemoryRegion *mr)
2790{
2791 RAMBlock *new_block;
2792
2793 size = TARGET_PAGE_ALIGN(size);
2794 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2795
432d268c 2796 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2797 if (host) {
2798 new_block->host = host;
cd19cfa2 2799 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2800 } else {
2801 if (mem_path) {
c902760f 2802#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2803 new_block->host = file_ram_alloc(new_block, size, mem_path);
2804 if (!new_block->host) {
2805 new_block->host = qemu_vmalloc(size);
e78815a5 2806 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2807 }
c902760f 2808#else
6977dfe6
YT
2809 fprintf(stderr, "-mem-path option unsupported\n");
2810 exit(1);
c902760f 2811#endif
6977dfe6 2812 } else {
6b02494d 2813#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2814 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2815 an system defined value, which is at least 256GB. Larger systems
2816 have larger values. We put the guest between the end of data
2817 segment (system break) and this value. We use 32GB as a base to
2818 have enough room for the system break to grow. */
2819 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2820 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2821 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2822 if (new_block->host == MAP_FAILED) {
2823 fprintf(stderr, "Allocating RAM failed\n");
2824 abort();
2825 }
6b02494d 2826#else
868bb33f 2827 if (xen_enabled()) {
fce537d4 2828 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2829 } else {
2830 new_block->host = qemu_vmalloc(size);
2831 }
6b02494d 2832#endif
e78815a5 2833 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2834 }
c902760f 2835 }
94a6b54f
PB
2836 new_block->length = size;
2837
f471a17e 2838 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2839
7267c094 2840 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2841 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2842 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2843 0xff, size >> TARGET_PAGE_BITS);
2844
6f0437e8
JK
2845 if (kvm_enabled())
2846 kvm_setup_guest_memory(new_block->host, size);
2847
94a6b54f
PB
2848 return new_block->offset;
2849}
e9a1ab19 2850
c5705a77 2851ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2852{
c5705a77 2853 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2854}
2855
1f2e98b6
AW
2856void qemu_ram_free_from_ptr(ram_addr_t addr)
2857{
2858 RAMBlock *block;
2859
2860 QLIST_FOREACH(block, &ram_list.blocks, next) {
2861 if (addr == block->offset) {
2862 QLIST_REMOVE(block, next);
7267c094 2863 g_free(block);
1f2e98b6
AW
2864 return;
2865 }
2866 }
2867}
2868
c227f099 2869void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2870{
04b16653
AW
2871 RAMBlock *block;
2872
2873 QLIST_FOREACH(block, &ram_list.blocks, next) {
2874 if (addr == block->offset) {
2875 QLIST_REMOVE(block, next);
cd19cfa2
HY
2876 if (block->flags & RAM_PREALLOC_MASK) {
2877 ;
2878 } else if (mem_path) {
04b16653
AW
2879#if defined (__linux__) && !defined(TARGET_S390X)
2880 if (block->fd) {
2881 munmap(block->host, block->length);
2882 close(block->fd);
2883 } else {
2884 qemu_vfree(block->host);
2885 }
fd28aa13
JK
2886#else
2887 abort();
04b16653
AW
2888#endif
2889 } else {
2890#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2891 munmap(block->host, block->length);
2892#else
868bb33f 2893 if (xen_enabled()) {
e41d7c69 2894 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2895 } else {
2896 qemu_vfree(block->host);
2897 }
04b16653
AW
2898#endif
2899 }
7267c094 2900 g_free(block);
04b16653
AW
2901 return;
2902 }
2903 }
2904
e9a1ab19
FB
2905}
2906
cd19cfa2
HY
2907#ifndef _WIN32
2908void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2909{
2910 RAMBlock *block;
2911 ram_addr_t offset;
2912 int flags;
2913 void *area, *vaddr;
2914
2915 QLIST_FOREACH(block, &ram_list.blocks, next) {
2916 offset = addr - block->offset;
2917 if (offset < block->length) {
2918 vaddr = block->host + offset;
2919 if (block->flags & RAM_PREALLOC_MASK) {
2920 ;
2921 } else {
2922 flags = MAP_FIXED;
2923 munmap(vaddr, length);
2924 if (mem_path) {
2925#if defined(__linux__) && !defined(TARGET_S390X)
2926 if (block->fd) {
2927#ifdef MAP_POPULATE
2928 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2929 MAP_PRIVATE;
2930#else
2931 flags |= MAP_PRIVATE;
2932#endif
2933 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2934 flags, block->fd, offset);
2935 } else {
2936 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2937 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2938 flags, -1, 0);
2939 }
fd28aa13
JK
2940#else
2941 abort();
cd19cfa2
HY
2942#endif
2943 } else {
2944#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2945 flags |= MAP_SHARED | MAP_ANONYMOUS;
2946 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2947 flags, -1, 0);
2948#else
2949 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2950 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2951 flags, -1, 0);
2952#endif
2953 }
2954 if (area != vaddr) {
f15fbc4b
AP
2955 fprintf(stderr, "Could not remap addr: "
2956 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2957 length, addr);
2958 exit(1);
2959 }
2960 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2961 }
2962 return;
2963 }
2964 }
2965}
2966#endif /* !_WIN32 */
2967
dc828ca1 2968/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2969 With the exception of the softmmu code in this file, this should
2970 only be used for local memory (e.g. video ram) that the device owns,
2971 and knows it isn't going to access beyond the end of the block.
2972
2973 It should not be used for general purpose DMA.
2974 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2975 */
c227f099 2976void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2977{
94a6b54f
PB
2978 RAMBlock *block;
2979
f471a17e
AW
2980 QLIST_FOREACH(block, &ram_list.blocks, next) {
2981 if (addr - block->offset < block->length) {
7d82af38
VP
2982 /* Move this entry to to start of the list. */
2983 if (block != QLIST_FIRST(&ram_list.blocks)) {
2984 QLIST_REMOVE(block, next);
2985 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2986 }
868bb33f 2987 if (xen_enabled()) {
432d268c
JN
2988 /* We need to check if the requested address is in the RAM
2989 * because we don't want to map the entire memory in QEMU.
712c2b41 2990 * In that case just map until the end of the page.
432d268c
JN
2991 */
2992 if (block->offset == 0) {
e41d7c69 2993 return xen_map_cache(addr, 0, 0);
432d268c 2994 } else if (block->host == NULL) {
e41d7c69
JK
2995 block->host =
2996 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2997 }
2998 }
f471a17e
AW
2999 return block->host + (addr - block->offset);
3000 }
94a6b54f 3001 }
f471a17e
AW
3002
3003 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3004 abort();
3005
3006 return NULL;
dc828ca1
PB
3007}
3008
b2e0a138
MT
3009/* Return a host pointer to ram allocated with qemu_ram_alloc.
3010 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3011 */
3012void *qemu_safe_ram_ptr(ram_addr_t addr)
3013{
3014 RAMBlock *block;
3015
3016 QLIST_FOREACH(block, &ram_list.blocks, next) {
3017 if (addr - block->offset < block->length) {
868bb33f 3018 if (xen_enabled()) {
432d268c
JN
3019 /* We need to check if the requested address is in the RAM
3020 * because we don't want to map the entire memory in QEMU.
712c2b41 3021 * In that case just map until the end of the page.
432d268c
JN
3022 */
3023 if (block->offset == 0) {
e41d7c69 3024 return xen_map_cache(addr, 0, 0);
432d268c 3025 } else if (block->host == NULL) {
e41d7c69
JK
3026 block->host =
3027 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3028 }
3029 }
b2e0a138
MT
3030 return block->host + (addr - block->offset);
3031 }
3032 }
3033
3034 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3035 abort();
3036
3037 return NULL;
3038}
3039
38bee5dc
SS
3040/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3041 * but takes a size argument */
8ab934f9 3042void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3043{
8ab934f9
SS
3044 if (*size == 0) {
3045 return NULL;
3046 }
868bb33f 3047 if (xen_enabled()) {
e41d7c69 3048 return xen_map_cache(addr, *size, 1);
868bb33f 3049 } else {
38bee5dc
SS
3050 RAMBlock *block;
3051
3052 QLIST_FOREACH(block, &ram_list.blocks, next) {
3053 if (addr - block->offset < block->length) {
3054 if (addr - block->offset + *size > block->length)
3055 *size = block->length - addr + block->offset;
3056 return block->host + (addr - block->offset);
3057 }
3058 }
3059
3060 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3061 abort();
38bee5dc
SS
3062 }
3063}
3064
050a0ddf
AP
3065void qemu_put_ram_ptr(void *addr)
3066{
3067 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3068}
3069
e890261f 3070int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3071{
94a6b54f
PB
3072 RAMBlock *block;
3073 uint8_t *host = ptr;
3074
868bb33f 3075 if (xen_enabled()) {
e41d7c69 3076 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3077 return 0;
3078 }
3079
f471a17e 3080 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3081 /* This case append when the block is not mapped. */
3082 if (block->host == NULL) {
3083 continue;
3084 }
f471a17e 3085 if (host - block->host < block->length) {
e890261f
MT
3086 *ram_addr = block->offset + (host - block->host);
3087 return 0;
f471a17e 3088 }
94a6b54f 3089 }
432d268c 3090
e890261f
MT
3091 return -1;
3092}
f471a17e 3093
e890261f
MT
3094/* Some of the softmmu routines need to translate from a host pointer
3095 (typically a TLB entry) back to a ram offset. */
3096ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3097{
3098 ram_addr_t ram_addr;
f471a17e 3099
e890261f
MT
3100 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3101 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3102 abort();
3103 }
3104 return ram_addr;
5579c7f3
PB
3105}
3106
c227f099 3107static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3108{
67d3b957 3109#ifdef DEBUG_UNASSIGNED
ab3d1727 3110 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3111#endif
5b450407 3112#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3113 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3114#endif
3115 return 0;
3116}
3117
c227f099 3118static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3119{
3120#ifdef DEBUG_UNASSIGNED
3121 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3122#endif
5b450407 3123#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3124 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3125#endif
3126 return 0;
3127}
3128
c227f099 3129static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3130{
3131#ifdef DEBUG_UNASSIGNED
3132 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3133#endif
5b450407 3134#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3135 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3136#endif
33417e70
FB
3137 return 0;
3138}
3139
c227f099 3140static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3141{
67d3b957 3142#ifdef DEBUG_UNASSIGNED
ab3d1727 3143 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3144#endif
5b450407 3145#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3146 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3147#endif
3148}
3149
c227f099 3150static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3151{
3152#ifdef DEBUG_UNASSIGNED
3153 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3154#endif
5b450407 3155#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3156 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3157#endif
3158}
3159
c227f099 3160static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3161{
3162#ifdef DEBUG_UNASSIGNED
3163 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3164#endif
5b450407 3165#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3166 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3167#endif
33417e70
FB
3168}
3169
d60efc6b 3170static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3171 unassigned_mem_readb,
e18231a3
BS
3172 unassigned_mem_readw,
3173 unassigned_mem_readl,
33417e70
FB
3174};
3175
d60efc6b 3176static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3177 unassigned_mem_writeb,
e18231a3
BS
3178 unassigned_mem_writew,
3179 unassigned_mem_writel,
33417e70
FB
3180};
3181
c227f099 3182static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3183 uint32_t val)
9fa3e853 3184{
3a7d929e 3185 int dirty_flags;
f7c11b53 3186 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3187 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3188#if !defined(CONFIG_USER_ONLY)
3a7d929e 3189 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3190 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3191#endif
3a7d929e 3192 }
5579c7f3 3193 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3194 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3195 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3196 /* we remove the notdirty callback only if the code has been
3197 flushed */
3198 if (dirty_flags == 0xff)
2e70f6ef 3199 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3200}
3201
c227f099 3202static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3203 uint32_t val)
9fa3e853 3204{
3a7d929e 3205 int dirty_flags;
f7c11b53 3206 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3207 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3208#if !defined(CONFIG_USER_ONLY)
3a7d929e 3209 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3210 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3211#endif
3a7d929e 3212 }
5579c7f3 3213 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3214 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3215 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3216 /* we remove the notdirty callback only if the code has been
3217 flushed */
3218 if (dirty_flags == 0xff)
2e70f6ef 3219 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3220}
3221
c227f099 3222static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3223 uint32_t val)
9fa3e853 3224{
3a7d929e 3225 int dirty_flags;
f7c11b53 3226 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3227 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3228#if !defined(CONFIG_USER_ONLY)
3a7d929e 3229 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3230 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3231#endif
3a7d929e 3232 }
5579c7f3 3233 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3234 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3235 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3236 /* we remove the notdirty callback only if the code has been
3237 flushed */
3238 if (dirty_flags == 0xff)
2e70f6ef 3239 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3240}
3241
d60efc6b 3242static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3243 NULL, /* never used */
3244 NULL, /* never used */
3245 NULL, /* never used */
3246};
3247
d60efc6b 3248static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3249 notdirty_mem_writeb,
3250 notdirty_mem_writew,
3251 notdirty_mem_writel,
3252};
3253
0f459d16 3254/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3255static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3256{
3257 CPUState *env = cpu_single_env;
06d55cc1
AL
3258 target_ulong pc, cs_base;
3259 TranslationBlock *tb;
0f459d16 3260 target_ulong vaddr;
a1d1bb31 3261 CPUWatchpoint *wp;
06d55cc1 3262 int cpu_flags;
0f459d16 3263
06d55cc1
AL
3264 if (env->watchpoint_hit) {
3265 /* We re-entered the check after replacing the TB. Now raise
3266 * the debug interrupt so that is will trigger after the
3267 * current instruction. */
3268 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3269 return;
3270 }
2e70f6ef 3271 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3272 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3273 if ((vaddr == (wp->vaddr & len_mask) ||
3274 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3275 wp->flags |= BP_WATCHPOINT_HIT;
3276 if (!env->watchpoint_hit) {
3277 env->watchpoint_hit = wp;
3278 tb = tb_find_pc(env->mem_io_pc);
3279 if (!tb) {
3280 cpu_abort(env, "check_watchpoint: could not find TB for "
3281 "pc=%p", (void *)env->mem_io_pc);
3282 }
618ba8e6 3283 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3284 tb_phys_invalidate(tb, -1);
3285 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3286 env->exception_index = EXCP_DEBUG;
3287 } else {
3288 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3289 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3290 }
3291 cpu_resume_from_signal(env, NULL);
06d55cc1 3292 }
6e140f28
AL
3293 } else {
3294 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3295 }
3296 }
3297}
3298
6658ffb8
PB
3299/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3300 so these check for a hit then pass through to the normal out-of-line
3301 phys routines. */
c227f099 3302static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3303{
b4051334 3304 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3305 return ldub_phys(addr);
3306}
3307
c227f099 3308static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3309{
b4051334 3310 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3311 return lduw_phys(addr);
3312}
3313
c227f099 3314static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3315{
b4051334 3316 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3317 return ldl_phys(addr);
3318}
3319
c227f099 3320static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3321 uint32_t val)
3322{
b4051334 3323 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3324 stb_phys(addr, val);
3325}
3326
c227f099 3327static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3328 uint32_t val)
3329{
b4051334 3330 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3331 stw_phys(addr, val);
3332}
3333
c227f099 3334static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3335 uint32_t val)
3336{
b4051334 3337 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3338 stl_phys(addr, val);
3339}
3340
d60efc6b 3341static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3342 watch_mem_readb,
3343 watch_mem_readw,
3344 watch_mem_readl,
3345};
3346
d60efc6b 3347static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3348 watch_mem_writeb,
3349 watch_mem_writew,
3350 watch_mem_writel,
3351};
6658ffb8 3352
f6405247
RH
3353static inline uint32_t subpage_readlen (subpage_t *mmio,
3354 target_phys_addr_t addr,
3355 unsigned int len)
db7b5426 3356{
f6405247 3357 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3358#if defined(DEBUG_SUBPAGE)
3359 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3360 mmio, len, addr, idx);
3361#endif
db7b5426 3362
f6405247
RH
3363 addr += mmio->region_offset[idx];
3364 idx = mmio->sub_io_index[idx];
3365 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3366}
3367
c227f099 3368static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3369 uint32_t value, unsigned int len)
db7b5426 3370{
f6405247 3371 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3372#if defined(DEBUG_SUBPAGE)
f6405247
RH
3373 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3374 __func__, mmio, len, addr, idx, value);
db7b5426 3375#endif
f6405247
RH
3376
3377 addr += mmio->region_offset[idx];
3378 idx = mmio->sub_io_index[idx];
3379 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3380}
3381
c227f099 3382static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3383{
db7b5426
BS
3384 return subpage_readlen(opaque, addr, 0);
3385}
3386
c227f099 3387static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3388 uint32_t value)
3389{
db7b5426
BS
3390 subpage_writelen(opaque, addr, value, 0);
3391}
3392
c227f099 3393static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3394{
db7b5426
BS
3395 return subpage_readlen(opaque, addr, 1);
3396}
3397
c227f099 3398static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3399 uint32_t value)
3400{
db7b5426
BS
3401 subpage_writelen(opaque, addr, value, 1);
3402}
3403
c227f099 3404static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3405{
db7b5426
BS
3406 return subpage_readlen(opaque, addr, 2);
3407}
3408
f6405247
RH
3409static void subpage_writel (void *opaque, target_phys_addr_t addr,
3410 uint32_t value)
db7b5426 3411{
db7b5426
BS
3412 subpage_writelen(opaque, addr, value, 2);
3413}
3414
d60efc6b 3415static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3416 &subpage_readb,
3417 &subpage_readw,
3418 &subpage_readl,
3419};
3420
d60efc6b 3421static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3422 &subpage_writeb,
3423 &subpage_writew,
3424 &subpage_writel,
3425};
3426
56384e8b
AF
3427static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3428{
3429 ram_addr_t raddr = addr;
3430 void *ptr = qemu_get_ram_ptr(raddr);
3431 return ldub_p(ptr);
3432}
3433
3434static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3435 uint32_t value)
3436{
3437 ram_addr_t raddr = addr;
3438 void *ptr = qemu_get_ram_ptr(raddr);
3439 stb_p(ptr, value);
3440}
3441
3442static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3443{
3444 ram_addr_t raddr = addr;
3445 void *ptr = qemu_get_ram_ptr(raddr);
3446 return lduw_p(ptr);
3447}
3448
3449static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3450 uint32_t value)
3451{
3452 ram_addr_t raddr = addr;
3453 void *ptr = qemu_get_ram_ptr(raddr);
3454 stw_p(ptr, value);
3455}
3456
3457static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3458{
3459 ram_addr_t raddr = addr;
3460 void *ptr = qemu_get_ram_ptr(raddr);
3461 return ldl_p(ptr);
3462}
3463
3464static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3465 uint32_t value)
3466{
3467 ram_addr_t raddr = addr;
3468 void *ptr = qemu_get_ram_ptr(raddr);
3469 stl_p(ptr, value);
3470}
3471
3472static CPUReadMemoryFunc * const subpage_ram_read[] = {
3473 &subpage_ram_readb,
3474 &subpage_ram_readw,
3475 &subpage_ram_readl,
3476};
3477
3478static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3479 &subpage_ram_writeb,
3480 &subpage_ram_writew,
3481 &subpage_ram_writel,
3482};
3483
c227f099
AL
3484static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3485 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3486{
3487 int idx, eidx;
3488
3489 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3490 return -1;
3491 idx = SUBPAGE_IDX(start);
3492 eidx = SUBPAGE_IDX(end);
3493#if defined(DEBUG_SUBPAGE)
0bf9e31a 3494 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3495 mmio, start, end, idx, eidx, memory);
3496#endif
56384e8b
AF
3497 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3498 memory = IO_MEM_SUBPAGE_RAM;
3499 }
f6405247 3500 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3501 for (; idx <= eidx; idx++) {
f6405247
RH
3502 mmio->sub_io_index[idx] = memory;
3503 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3504 }
3505
3506 return 0;
3507}
3508
f6405247
RH
3509static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3510 ram_addr_t orig_memory,
3511 ram_addr_t region_offset)
db7b5426 3512{
c227f099 3513 subpage_t *mmio;
db7b5426
BS
3514 int subpage_memory;
3515
7267c094 3516 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3517
3518 mmio->base = base;
2507c12a
AG
3519 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3520 DEVICE_NATIVE_ENDIAN);
db7b5426 3521#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3522 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3523 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3524#endif
1eec614b 3525 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3526 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3527
3528 return mmio;
3529}
3530
88715657
AL
3531static int get_free_io_mem_idx(void)
3532{
3533 int i;
3534
3535 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3536 if (!io_mem_used[i]) {
3537 io_mem_used[i] = 1;
3538 return i;
3539 }
c6703b47 3540 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3541 return -1;
3542}
3543
dd310534
AG
3544/*
3545 * Usually, devices operate in little endian mode. There are devices out
3546 * there that operate in big endian too. Each device gets byte swapped
3547 * mmio if plugged onto a CPU that does the other endianness.
3548 *
3549 * CPU Device swap?
3550 *
3551 * little little no
3552 * little big yes
3553 * big little yes
3554 * big big no
3555 */
3556
3557typedef struct SwapEndianContainer {
3558 CPUReadMemoryFunc *read[3];
3559 CPUWriteMemoryFunc *write[3];
3560 void *opaque;
3561} SwapEndianContainer;
3562
3563static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3564{
3565 uint32_t val;
3566 SwapEndianContainer *c = opaque;
3567 val = c->read[0](c->opaque, addr);
3568 return val;
3569}
3570
3571static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3572{
3573 uint32_t val;
3574 SwapEndianContainer *c = opaque;
3575 val = bswap16(c->read[1](c->opaque, addr));
3576 return val;
3577}
3578
3579static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3580{
3581 uint32_t val;
3582 SwapEndianContainer *c = opaque;
3583 val = bswap32(c->read[2](c->opaque, addr));
3584 return val;
3585}
3586
3587static CPUReadMemoryFunc * const swapendian_readfn[3]={
3588 swapendian_mem_readb,
3589 swapendian_mem_readw,
3590 swapendian_mem_readl
3591};
3592
3593static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3594 uint32_t val)
3595{
3596 SwapEndianContainer *c = opaque;
3597 c->write[0](c->opaque, addr, val);
3598}
3599
3600static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3601 uint32_t val)
3602{
3603 SwapEndianContainer *c = opaque;
3604 c->write[1](c->opaque, addr, bswap16(val));
3605}
3606
3607static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3608 uint32_t val)
3609{
3610 SwapEndianContainer *c = opaque;
3611 c->write[2](c->opaque, addr, bswap32(val));
3612}
3613
3614static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3615 swapendian_mem_writeb,
3616 swapendian_mem_writew,
3617 swapendian_mem_writel
3618};
3619
3620static void swapendian_init(int io_index)
3621{
7267c094 3622 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
dd310534
AG
3623 int i;
3624
3625 /* Swap mmio for big endian targets */
3626 c->opaque = io_mem_opaque[io_index];
3627 for (i = 0; i < 3; i++) {
3628 c->read[i] = io_mem_read[io_index][i];
3629 c->write[i] = io_mem_write[io_index][i];
3630
3631 io_mem_read[io_index][i] = swapendian_readfn[i];
3632 io_mem_write[io_index][i] = swapendian_writefn[i];
3633 }
3634 io_mem_opaque[io_index] = c;
3635}
3636
3637static void swapendian_del(int io_index)
3638{
3639 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
7267c094 3640 g_free(io_mem_opaque[io_index]);
dd310534
AG
3641 }
3642}
3643
33417e70
FB
3644/* mem_read and mem_write are arrays of functions containing the
3645 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3646 2). Functions can be omitted with a NULL function pointer.
3ee89922 3647 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3648 modified. If it is zero, a new io zone is allocated. The return
3649 value can be used with cpu_register_physical_memory(). (-1) is
3650 returned if error. */
1eed09cb 3651static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3652 CPUReadMemoryFunc * const *mem_read,
3653 CPUWriteMemoryFunc * const *mem_write,
dd310534 3654 void *opaque, enum device_endian endian)
33417e70 3655{
3cab721d
RH
3656 int i;
3657
33417e70 3658 if (io_index <= 0) {
88715657
AL
3659 io_index = get_free_io_mem_idx();
3660 if (io_index == -1)
3661 return io_index;
33417e70 3662 } else {
1eed09cb 3663 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3664 if (io_index >= IO_MEM_NB_ENTRIES)
3665 return -1;
3666 }
b5ff1b31 3667
3cab721d
RH
3668 for (i = 0; i < 3; ++i) {
3669 io_mem_read[io_index][i]
3670 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3671 }
3672 for (i = 0; i < 3; ++i) {
3673 io_mem_write[io_index][i]
3674 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3675 }
a4193c8a 3676 io_mem_opaque[io_index] = opaque;
f6405247 3677
dd310534
AG
3678 switch (endian) {
3679 case DEVICE_BIG_ENDIAN:
3680#ifndef TARGET_WORDS_BIGENDIAN
3681 swapendian_init(io_index);
3682#endif
3683 break;
3684 case DEVICE_LITTLE_ENDIAN:
3685#ifdef TARGET_WORDS_BIGENDIAN
3686 swapendian_init(io_index);
3687#endif
3688 break;
3689 case DEVICE_NATIVE_ENDIAN:
3690 default:
3691 break;
3692 }
3693
f6405247 3694 return (io_index << IO_MEM_SHIFT);
33417e70 3695}
61382a50 3696
d60efc6b
BS
3697int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3698 CPUWriteMemoryFunc * const *mem_write,
dd310534 3699 void *opaque, enum device_endian endian)
1eed09cb 3700{
2507c12a 3701 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3702}
3703
88715657
AL
3704void cpu_unregister_io_memory(int io_table_address)
3705{
3706 int i;
3707 int io_index = io_table_address >> IO_MEM_SHIFT;
3708
dd310534
AG
3709 swapendian_del(io_index);
3710
88715657
AL
3711 for (i=0;i < 3; i++) {
3712 io_mem_read[io_index][i] = unassigned_mem_read[i];
3713 io_mem_write[io_index][i] = unassigned_mem_write[i];
3714 }
3715 io_mem_opaque[io_index] = NULL;
3716 io_mem_used[io_index] = 0;
3717}
3718
e9179ce1
AK
3719static void io_mem_init(void)
3720{
3721 int i;
3722
2507c12a
AG
3723 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3724 unassigned_mem_write, NULL,
3725 DEVICE_NATIVE_ENDIAN);
3726 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3727 unassigned_mem_write, NULL,
3728 DEVICE_NATIVE_ENDIAN);
3729 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3730 notdirty_mem_write, NULL,
3731 DEVICE_NATIVE_ENDIAN);
56384e8b
AF
3732 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3733 subpage_ram_write, NULL,
3734 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3735 for (i=0; i<5; i++)
3736 io_mem_used[i] = 1;
3737
3738 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3739 watch_mem_write, NULL,
3740 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3741}
3742
62152b8a
AK
3743static void memory_map_init(void)
3744{
7267c094 3745 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3746 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3747 set_system_memory_map(system_memory);
309cb471 3748
7267c094 3749 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3750 memory_region_init(system_io, "io", 65536);
3751 set_system_io_map(system_io);
62152b8a
AK
3752}
3753
3754MemoryRegion *get_system_memory(void)
3755{
3756 return system_memory;
3757}
3758
309cb471
AK
3759MemoryRegion *get_system_io(void)
3760{
3761 return system_io;
3762}
3763
e2eef170
PB
3764#endif /* !defined(CONFIG_USER_ONLY) */
3765
13eb76e0
FB
3766/* physical memory access (slow version, mainly for debug) */
3767#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3768int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3769 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3770{
3771 int l, flags;
3772 target_ulong page;
53a5960a 3773 void * p;
13eb76e0
FB
3774
3775 while (len > 0) {
3776 page = addr & TARGET_PAGE_MASK;
3777 l = (page + TARGET_PAGE_SIZE) - addr;
3778 if (l > len)
3779 l = len;
3780 flags = page_get_flags(page);
3781 if (!(flags & PAGE_VALID))
a68fe89c 3782 return -1;
13eb76e0
FB
3783 if (is_write) {
3784 if (!(flags & PAGE_WRITE))
a68fe89c 3785 return -1;
579a97f7 3786 /* XXX: this code should not depend on lock_user */
72fb7daa 3787 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3788 return -1;
72fb7daa
AJ
3789 memcpy(p, buf, l);
3790 unlock_user(p, addr, l);
13eb76e0
FB
3791 } else {
3792 if (!(flags & PAGE_READ))
a68fe89c 3793 return -1;
579a97f7 3794 /* XXX: this code should not depend on lock_user */
72fb7daa 3795 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3796 return -1;
72fb7daa 3797 memcpy(buf, p, l);
5b257578 3798 unlock_user(p, addr, 0);
13eb76e0
FB
3799 }
3800 len -= l;
3801 buf += l;
3802 addr += l;
3803 }
a68fe89c 3804 return 0;
13eb76e0 3805}
8df1cd07 3806
13eb76e0 3807#else
c227f099 3808void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3809 int len, int is_write)
3810{
3811 int l, io_index;
3812 uint8_t *ptr;
3813 uint32_t val;
c227f099 3814 target_phys_addr_t page;
8ca5692d 3815 ram_addr_t pd;
92e873b9 3816 PhysPageDesc *p;
3b46e624 3817
13eb76e0
FB
3818 while (len > 0) {
3819 page = addr & TARGET_PAGE_MASK;
3820 l = (page + TARGET_PAGE_SIZE) - addr;
3821 if (l > len)
3822 l = len;
92e873b9 3823 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3824 if (!p) {
3825 pd = IO_MEM_UNASSIGNED;
3826 } else {
3827 pd = p->phys_offset;
3828 }
3b46e624 3829
13eb76e0 3830 if (is_write) {
3a7d929e 3831 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3832 target_phys_addr_t addr1 = addr;
13eb76e0 3833 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3834 if (p)
6c2934db 3835 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3836 /* XXX: could force cpu_single_env to NULL to avoid
3837 potential bugs */
6c2934db 3838 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3839 /* 32 bit write access */
c27004ec 3840 val = ldl_p(buf);
6c2934db 3841 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3842 l = 4;
6c2934db 3843 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3844 /* 16 bit write access */
c27004ec 3845 val = lduw_p(buf);
6c2934db 3846 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3847 l = 2;
3848 } else {
1c213d19 3849 /* 8 bit write access */
c27004ec 3850 val = ldub_p(buf);
6c2934db 3851 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3852 l = 1;
3853 }
3854 } else {
8ca5692d 3855 ram_addr_t addr1;
b448f2f3 3856 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3857 /* RAM case */
5579c7f3 3858 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3859 memcpy(ptr, buf, l);
3a7d929e
FB
3860 if (!cpu_physical_memory_is_dirty(addr1)) {
3861 /* invalidate code */
3862 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3863 /* set dirty bit */
f7c11b53
YT
3864 cpu_physical_memory_set_dirty_flags(
3865 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3866 }
050a0ddf 3867 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3868 }
3869 } else {
5fafdf24 3870 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3871 !(pd & IO_MEM_ROMD)) {
c227f099 3872 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3873 /* I/O case */
3874 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3875 if (p)
6c2934db
AJ
3876 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3877 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3878 /* 32 bit read access */
6c2934db 3879 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3880 stl_p(buf, val);
13eb76e0 3881 l = 4;
6c2934db 3882 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3883 /* 16 bit read access */
6c2934db 3884 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3885 stw_p(buf, val);
13eb76e0
FB
3886 l = 2;
3887 } else {
1c213d19 3888 /* 8 bit read access */
6c2934db 3889 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3890 stb_p(buf, val);
13eb76e0
FB
3891 l = 1;
3892 }
3893 } else {
3894 /* RAM case */
050a0ddf
AP
3895 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3896 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3897 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3898 }
3899 }
3900 len -= l;
3901 buf += l;
3902 addr += l;
3903 }
3904}
8df1cd07 3905
d0ecd2aa 3906/* used for ROM loading : can write in RAM and ROM */
c227f099 3907void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3908 const uint8_t *buf, int len)
3909{
3910 int l;
3911 uint8_t *ptr;
c227f099 3912 target_phys_addr_t page;
d0ecd2aa
FB
3913 unsigned long pd;
3914 PhysPageDesc *p;
3b46e624 3915
d0ecd2aa
FB
3916 while (len > 0) {
3917 page = addr & TARGET_PAGE_MASK;
3918 l = (page + TARGET_PAGE_SIZE) - addr;
3919 if (l > len)
3920 l = len;
3921 p = phys_page_find(page >> TARGET_PAGE_BITS);
3922 if (!p) {
3923 pd = IO_MEM_UNASSIGNED;
3924 } else {
3925 pd = p->phys_offset;
3926 }
3b46e624 3927
d0ecd2aa 3928 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3929 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3930 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3931 /* do nothing */
3932 } else {
3933 unsigned long addr1;
3934 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3935 /* ROM/RAM case */
5579c7f3 3936 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3937 memcpy(ptr, buf, l);
050a0ddf 3938 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3939 }
3940 len -= l;
3941 buf += l;
3942 addr += l;
3943 }
3944}
3945
6d16c2f8
AL
3946typedef struct {
3947 void *buffer;
c227f099
AL
3948 target_phys_addr_t addr;
3949 target_phys_addr_t len;
6d16c2f8
AL
3950} BounceBuffer;
3951
3952static BounceBuffer bounce;
3953
ba223c29
AL
3954typedef struct MapClient {
3955 void *opaque;
3956 void (*callback)(void *opaque);
72cf2d4f 3957 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3958} MapClient;
3959
72cf2d4f
BS
3960static QLIST_HEAD(map_client_list, MapClient) map_client_list
3961 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3962
3963void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3964{
7267c094 3965 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3966
3967 client->opaque = opaque;
3968 client->callback = callback;
72cf2d4f 3969 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3970 return client;
3971}
3972
3973void cpu_unregister_map_client(void *_client)
3974{
3975 MapClient *client = (MapClient *)_client;
3976
72cf2d4f 3977 QLIST_REMOVE(client, link);
7267c094 3978 g_free(client);
ba223c29
AL
3979}
3980
3981static void cpu_notify_map_clients(void)
3982{
3983 MapClient *client;
3984
72cf2d4f
BS
3985 while (!QLIST_EMPTY(&map_client_list)) {
3986 client = QLIST_FIRST(&map_client_list);
ba223c29 3987 client->callback(client->opaque);
34d5e948 3988 cpu_unregister_map_client(client);
ba223c29
AL
3989 }
3990}
3991
6d16c2f8
AL
3992/* Map a physical memory region into a host virtual address.
3993 * May map a subset of the requested range, given by and returned in *plen.
3994 * May return NULL if resources needed to perform the mapping are exhausted.
3995 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3996 * Use cpu_register_map_client() to know when retrying the map operation is
3997 * likely to succeed.
6d16c2f8 3998 */
c227f099
AL
3999void *cpu_physical_memory_map(target_phys_addr_t addr,
4000 target_phys_addr_t *plen,
6d16c2f8
AL
4001 int is_write)
4002{
c227f099 4003 target_phys_addr_t len = *plen;
38bee5dc 4004 target_phys_addr_t todo = 0;
6d16c2f8 4005 int l;
c227f099 4006 target_phys_addr_t page;
6d16c2f8
AL
4007 unsigned long pd;
4008 PhysPageDesc *p;
f15fbc4b 4009 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
4010 ram_addr_t rlen;
4011 void *ret;
6d16c2f8
AL
4012
4013 while (len > 0) {
4014 page = addr & TARGET_PAGE_MASK;
4015 l = (page + TARGET_PAGE_SIZE) - addr;
4016 if (l > len)
4017 l = len;
4018 p = phys_page_find(page >> TARGET_PAGE_BITS);
4019 if (!p) {
4020 pd = IO_MEM_UNASSIGNED;
4021 } else {
4022 pd = p->phys_offset;
4023 }
4024
4025 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 4026 if (todo || bounce.buffer) {
6d16c2f8
AL
4027 break;
4028 }
4029 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4030 bounce.addr = addr;
4031 bounce.len = l;
4032 if (!is_write) {
54f7b4a3 4033 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4034 }
38bee5dc
SS
4035
4036 *plen = l;
4037 return bounce.buffer;
6d16c2f8 4038 }
8ab934f9
SS
4039 if (!todo) {
4040 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4041 }
6d16c2f8
AL
4042
4043 len -= l;
4044 addr += l;
38bee5dc 4045 todo += l;
6d16c2f8 4046 }
8ab934f9
SS
4047 rlen = todo;
4048 ret = qemu_ram_ptr_length(raddr, &rlen);
4049 *plen = rlen;
4050 return ret;
6d16c2f8
AL
4051}
4052
4053/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4054 * Will also mark the memory as dirty if is_write == 1. access_len gives
4055 * the amount of memory that was actually read or written by the caller.
4056 */
c227f099
AL
4057void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4058 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4059{
4060 if (buffer != bounce.buffer) {
4061 if (is_write) {
e890261f 4062 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4063 while (access_len) {
4064 unsigned l;
4065 l = TARGET_PAGE_SIZE;
4066 if (l > access_len)
4067 l = access_len;
4068 if (!cpu_physical_memory_is_dirty(addr1)) {
4069 /* invalidate code */
4070 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4071 /* set dirty bit */
f7c11b53
YT
4072 cpu_physical_memory_set_dirty_flags(
4073 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4074 }
4075 addr1 += l;
4076 access_len -= l;
4077 }
4078 }
868bb33f 4079 if (xen_enabled()) {
e41d7c69 4080 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4081 }
6d16c2f8
AL
4082 return;
4083 }
4084 if (is_write) {
4085 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4086 }
f8a83245 4087 qemu_vfree(bounce.buffer);
6d16c2f8 4088 bounce.buffer = NULL;
ba223c29 4089 cpu_notify_map_clients();
6d16c2f8 4090}
d0ecd2aa 4091
8df1cd07 4092/* warning: addr must be aligned */
1e78bcc1
AG
4093static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4094 enum device_endian endian)
8df1cd07
FB
4095{
4096 int io_index;
4097 uint8_t *ptr;
4098 uint32_t val;
4099 unsigned long pd;
4100 PhysPageDesc *p;
4101
4102 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4103 if (!p) {
4104 pd = IO_MEM_UNASSIGNED;
4105 } else {
4106 pd = p->phys_offset;
4107 }
3b46e624 4108
5fafdf24 4109 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4110 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4111 /* I/O case */
4112 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4113 if (p)
4114 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07 4115 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4116#if defined(TARGET_WORDS_BIGENDIAN)
4117 if (endian == DEVICE_LITTLE_ENDIAN) {
4118 val = bswap32(val);
4119 }
4120#else
4121 if (endian == DEVICE_BIG_ENDIAN) {
4122 val = bswap32(val);
4123 }
4124#endif
8df1cd07
FB
4125 } else {
4126 /* RAM case */
5579c7f3 4127 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 4128 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4129 switch (endian) {
4130 case DEVICE_LITTLE_ENDIAN:
4131 val = ldl_le_p(ptr);
4132 break;
4133 case DEVICE_BIG_ENDIAN:
4134 val = ldl_be_p(ptr);
4135 break;
4136 default:
4137 val = ldl_p(ptr);
4138 break;
4139 }
8df1cd07
FB
4140 }
4141 return val;
4142}
4143
1e78bcc1
AG
4144uint32_t ldl_phys(target_phys_addr_t addr)
4145{
4146 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4147}
4148
4149uint32_t ldl_le_phys(target_phys_addr_t addr)
4150{
4151 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4152}
4153
4154uint32_t ldl_be_phys(target_phys_addr_t addr)
4155{
4156 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4157}
4158
84b7b8e7 4159/* warning: addr must be aligned */
1e78bcc1
AG
4160static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4161 enum device_endian endian)
84b7b8e7
FB
4162{
4163 int io_index;
4164 uint8_t *ptr;
4165 uint64_t val;
4166 unsigned long pd;
4167 PhysPageDesc *p;
4168
4169 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4170 if (!p) {
4171 pd = IO_MEM_UNASSIGNED;
4172 } else {
4173 pd = p->phys_offset;
4174 }
3b46e624 4175
2a4188a3
FB
4176 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4177 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4178 /* I/O case */
4179 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4180 if (p)
4181 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4182
4183 /* XXX This is broken when device endian != cpu endian.
4184 Fix and add "endian" variable check */
84b7b8e7
FB
4185#ifdef TARGET_WORDS_BIGENDIAN
4186 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4187 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4188#else
4189 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4190 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4191#endif
4192 } else {
4193 /* RAM case */
5579c7f3 4194 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4195 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4196 switch (endian) {
4197 case DEVICE_LITTLE_ENDIAN:
4198 val = ldq_le_p(ptr);
4199 break;
4200 case DEVICE_BIG_ENDIAN:
4201 val = ldq_be_p(ptr);
4202 break;
4203 default:
4204 val = ldq_p(ptr);
4205 break;
4206 }
84b7b8e7
FB
4207 }
4208 return val;
4209}
4210
1e78bcc1
AG
4211uint64_t ldq_phys(target_phys_addr_t addr)
4212{
4213 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4214}
4215
4216uint64_t ldq_le_phys(target_phys_addr_t addr)
4217{
4218 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4219}
4220
4221uint64_t ldq_be_phys(target_phys_addr_t addr)
4222{
4223 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4224}
4225
aab33094 4226/* XXX: optimize */
c227f099 4227uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4228{
4229 uint8_t val;
4230 cpu_physical_memory_read(addr, &val, 1);
4231 return val;
4232}
4233
733f0b02 4234/* warning: addr must be aligned */
1e78bcc1
AG
4235static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4236 enum device_endian endian)
aab33094 4237{
733f0b02
MT
4238 int io_index;
4239 uint8_t *ptr;
4240 uint64_t val;
4241 unsigned long pd;
4242 PhysPageDesc *p;
4243
4244 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4245 if (!p) {
4246 pd = IO_MEM_UNASSIGNED;
4247 } else {
4248 pd = p->phys_offset;
4249 }
4250
4251 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4252 !(pd & IO_MEM_ROMD)) {
4253 /* I/O case */
4254 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4255 if (p)
4256 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4257 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4258#if defined(TARGET_WORDS_BIGENDIAN)
4259 if (endian == DEVICE_LITTLE_ENDIAN) {
4260 val = bswap16(val);
4261 }
4262#else
4263 if (endian == DEVICE_BIG_ENDIAN) {
4264 val = bswap16(val);
4265 }
4266#endif
733f0b02
MT
4267 } else {
4268 /* RAM case */
4269 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4270 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4271 switch (endian) {
4272 case DEVICE_LITTLE_ENDIAN:
4273 val = lduw_le_p(ptr);
4274 break;
4275 case DEVICE_BIG_ENDIAN:
4276 val = lduw_be_p(ptr);
4277 break;
4278 default:
4279 val = lduw_p(ptr);
4280 break;
4281 }
733f0b02
MT
4282 }
4283 return val;
aab33094
FB
4284}
4285
1e78bcc1
AG
4286uint32_t lduw_phys(target_phys_addr_t addr)
4287{
4288 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4289}
4290
4291uint32_t lduw_le_phys(target_phys_addr_t addr)
4292{
4293 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4294}
4295
4296uint32_t lduw_be_phys(target_phys_addr_t addr)
4297{
4298 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4299}
4300
8df1cd07
FB
4301/* warning: addr must be aligned. The ram page is not masked as dirty
4302 and the code inside is not invalidated. It is useful if the dirty
4303 bits are used to track modified PTEs */
c227f099 4304void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4305{
4306 int io_index;
4307 uint8_t *ptr;
4308 unsigned long pd;
4309 PhysPageDesc *p;
4310
4311 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4312 if (!p) {
4313 pd = IO_MEM_UNASSIGNED;
4314 } else {
4315 pd = p->phys_offset;
4316 }
3b46e624 4317
3a7d929e 4318 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4319 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4320 if (p)
4321 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4322 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4323 } else {
74576198 4324 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4325 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4326 stl_p(ptr, val);
74576198
AL
4327
4328 if (unlikely(in_migration)) {
4329 if (!cpu_physical_memory_is_dirty(addr1)) {
4330 /* invalidate code */
4331 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4332 /* set dirty bit */
f7c11b53
YT
4333 cpu_physical_memory_set_dirty_flags(
4334 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4335 }
4336 }
8df1cd07
FB
4337 }
4338}
4339
c227f099 4340void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4341{
4342 int io_index;
4343 uint8_t *ptr;
4344 unsigned long pd;
4345 PhysPageDesc *p;
4346
4347 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4348 if (!p) {
4349 pd = IO_MEM_UNASSIGNED;
4350 } else {
4351 pd = p->phys_offset;
4352 }
3b46e624 4353
bc98a7ef
JM
4354 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4355 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4356 if (p)
4357 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4358#ifdef TARGET_WORDS_BIGENDIAN
4359 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4360 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4361#else
4362 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4363 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4364#endif
4365 } else {
5579c7f3 4366 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4367 (addr & ~TARGET_PAGE_MASK);
4368 stq_p(ptr, val);
4369 }
4370}
4371
8df1cd07 4372/* warning: addr must be aligned */
1e78bcc1
AG
4373static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4374 enum device_endian endian)
8df1cd07
FB
4375{
4376 int io_index;
4377 uint8_t *ptr;
4378 unsigned long pd;
4379 PhysPageDesc *p;
4380
4381 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4382 if (!p) {
4383 pd = IO_MEM_UNASSIGNED;
4384 } else {
4385 pd = p->phys_offset;
4386 }
3b46e624 4387
3a7d929e 4388 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4389 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4390 if (p)
4391 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4392#if defined(TARGET_WORDS_BIGENDIAN)
4393 if (endian == DEVICE_LITTLE_ENDIAN) {
4394 val = bswap32(val);
4395 }
4396#else
4397 if (endian == DEVICE_BIG_ENDIAN) {
4398 val = bswap32(val);
4399 }
4400#endif
8df1cd07
FB
4401 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4402 } else {
4403 unsigned long addr1;
4404 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4405 /* RAM case */
5579c7f3 4406 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4407 switch (endian) {
4408 case DEVICE_LITTLE_ENDIAN:
4409 stl_le_p(ptr, val);
4410 break;
4411 case DEVICE_BIG_ENDIAN:
4412 stl_be_p(ptr, val);
4413 break;
4414 default:
4415 stl_p(ptr, val);
4416 break;
4417 }
3a7d929e
FB
4418 if (!cpu_physical_memory_is_dirty(addr1)) {
4419 /* invalidate code */
4420 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4421 /* set dirty bit */
f7c11b53
YT
4422 cpu_physical_memory_set_dirty_flags(addr1,
4423 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4424 }
8df1cd07
FB
4425 }
4426}
4427
1e78bcc1
AG
4428void stl_phys(target_phys_addr_t addr, uint32_t val)
4429{
4430 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4431}
4432
4433void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4434{
4435 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4436}
4437
4438void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4439{
4440 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4441}
4442
aab33094 4443/* XXX: optimize */
c227f099 4444void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4445{
4446 uint8_t v = val;
4447 cpu_physical_memory_write(addr, &v, 1);
4448}
4449
733f0b02 4450/* warning: addr must be aligned */
1e78bcc1
AG
4451static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4452 enum device_endian endian)
aab33094 4453{
733f0b02
MT
4454 int io_index;
4455 uint8_t *ptr;
4456 unsigned long pd;
4457 PhysPageDesc *p;
4458
4459 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4460 if (!p) {
4461 pd = IO_MEM_UNASSIGNED;
4462 } else {
4463 pd = p->phys_offset;
4464 }
4465
4466 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4467 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4468 if (p)
4469 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4470#if defined(TARGET_WORDS_BIGENDIAN)
4471 if (endian == DEVICE_LITTLE_ENDIAN) {
4472 val = bswap16(val);
4473 }
4474#else
4475 if (endian == DEVICE_BIG_ENDIAN) {
4476 val = bswap16(val);
4477 }
4478#endif
733f0b02
MT
4479 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4480 } else {
4481 unsigned long addr1;
4482 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4483 /* RAM case */
4484 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4485 switch (endian) {
4486 case DEVICE_LITTLE_ENDIAN:
4487 stw_le_p(ptr, val);
4488 break;
4489 case DEVICE_BIG_ENDIAN:
4490 stw_be_p(ptr, val);
4491 break;
4492 default:
4493 stw_p(ptr, val);
4494 break;
4495 }
733f0b02
MT
4496 if (!cpu_physical_memory_is_dirty(addr1)) {
4497 /* invalidate code */
4498 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4499 /* set dirty bit */
4500 cpu_physical_memory_set_dirty_flags(addr1,
4501 (0xff & ~CODE_DIRTY_FLAG));
4502 }
4503 }
aab33094
FB
4504}
4505
1e78bcc1
AG
4506void stw_phys(target_phys_addr_t addr, uint32_t val)
4507{
4508 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4509}
4510
4511void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4512{
4513 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4514}
4515
4516void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4517{
4518 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4519}
4520
aab33094 4521/* XXX: optimize */
c227f099 4522void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4523{
4524 val = tswap64(val);
71d2b725 4525 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4526}
4527
1e78bcc1
AG
4528void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4529{
4530 val = cpu_to_le64(val);
4531 cpu_physical_memory_write(addr, &val, 8);
4532}
4533
4534void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4535{
4536 val = cpu_to_be64(val);
4537 cpu_physical_memory_write(addr, &val, 8);
4538}
4539
5e2972fd 4540/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4541int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4542 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4543{
4544 int l;
c227f099 4545 target_phys_addr_t phys_addr;
9b3c35e0 4546 target_ulong page;
13eb76e0
FB
4547
4548 while (len > 0) {
4549 page = addr & TARGET_PAGE_MASK;
4550 phys_addr = cpu_get_phys_page_debug(env, page);
4551 /* if no physical page mapped, return an error */
4552 if (phys_addr == -1)
4553 return -1;
4554 l = (page + TARGET_PAGE_SIZE) - addr;
4555 if (l > len)
4556 l = len;
5e2972fd 4557 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4558 if (is_write)
4559 cpu_physical_memory_write_rom(phys_addr, buf, l);
4560 else
5e2972fd 4561 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4562 len -= l;
4563 buf += l;
4564 addr += l;
4565 }
4566 return 0;
4567}
a68fe89c 4568#endif
13eb76e0 4569
2e70f6ef
PB
4570/* in deterministic execution mode, instructions doing device I/Os
4571 must be at the end of the TB */
4572void cpu_io_recompile(CPUState *env, void *retaddr)
4573{
4574 TranslationBlock *tb;
4575 uint32_t n, cflags;
4576 target_ulong pc, cs_base;
4577 uint64_t flags;
4578
4579 tb = tb_find_pc((unsigned long)retaddr);
4580 if (!tb) {
4581 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4582 retaddr);
4583 }
4584 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4585 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4586 /* Calculate how many instructions had been executed before the fault
bf20dc07 4587 occurred. */
2e70f6ef
PB
4588 n = n - env->icount_decr.u16.low;
4589 /* Generate a new TB ending on the I/O insn. */
4590 n++;
4591 /* On MIPS and SH, delay slot instructions can only be restarted if
4592 they were already the first instruction in the TB. If this is not
bf20dc07 4593 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4594 branch. */
4595#if defined(TARGET_MIPS)
4596 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4597 env->active_tc.PC -= 4;
4598 env->icount_decr.u16.low++;
4599 env->hflags &= ~MIPS_HFLAG_BMASK;
4600 }
4601#elif defined(TARGET_SH4)
4602 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4603 && n > 1) {
4604 env->pc -= 2;
4605 env->icount_decr.u16.low++;
4606 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4607 }
4608#endif
4609 /* This should never happen. */
4610 if (n > CF_COUNT_MASK)
4611 cpu_abort(env, "TB too big during recompile");
4612
4613 cflags = n | CF_LAST_IO;
4614 pc = tb->pc;
4615 cs_base = tb->cs_base;
4616 flags = tb->flags;
4617 tb_phys_invalidate(tb, -1);
4618 /* FIXME: In theory this could raise an exception. In practice
4619 we have already translated the block once so it's probably ok. */
4620 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4621 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4622 the first in the TB) then we end up generating a whole new TB and
4623 repeating the fault, which is horribly inefficient.
4624 Better would be to execute just this insn uncached, or generate a
4625 second new TB. */
4626 cpu_resume_from_signal(env, NULL);
4627}
4628
b3755a91
PB
4629#if !defined(CONFIG_USER_ONLY)
4630
055403b2 4631void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4632{
4633 int i, target_code_size, max_target_code_size;
4634 int direct_jmp_count, direct_jmp2_count, cross_page;
4635 TranslationBlock *tb;
3b46e624 4636
e3db7226
FB
4637 target_code_size = 0;
4638 max_target_code_size = 0;
4639 cross_page = 0;
4640 direct_jmp_count = 0;
4641 direct_jmp2_count = 0;
4642 for(i = 0; i < nb_tbs; i++) {
4643 tb = &tbs[i];
4644 target_code_size += tb->size;
4645 if (tb->size > max_target_code_size)
4646 max_target_code_size = tb->size;
4647 if (tb->page_addr[1] != -1)
4648 cross_page++;
4649 if (tb->tb_next_offset[0] != 0xffff) {
4650 direct_jmp_count++;
4651 if (tb->tb_next_offset[1] != 0xffff) {
4652 direct_jmp2_count++;
4653 }
4654 }
4655 }
4656 /* XXX: avoid using doubles ? */
57fec1fe 4657 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4658 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4659 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4660 cpu_fprintf(f, "TB count %d/%d\n",
4661 nb_tbs, code_gen_max_blocks);
5fafdf24 4662 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4663 nb_tbs ? target_code_size / nb_tbs : 0,
4664 max_target_code_size);
055403b2 4665 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4666 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4667 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4668 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4669 cross_page,
e3db7226
FB
4670 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4671 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4672 direct_jmp_count,
e3db7226
FB
4673 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4674 direct_jmp2_count,
4675 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4676 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4677 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4678 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4679 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4680 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4681}
4682
61382a50 4683#define MMUSUFFIX _cmmu
3917149d 4684#undef GETPC
61382a50
FB
4685#define GETPC() NULL
4686#define env cpu_single_env
b769d8fe 4687#define SOFTMMU_CODE_ACCESS
61382a50
FB
4688
4689#define SHIFT 0
4690#include "softmmu_template.h"
4691
4692#define SHIFT 1
4693#include "softmmu_template.h"
4694
4695#define SHIFT 2
4696#include "softmmu_template.h"
4697
4698#define SHIFT 3
4699#include "softmmu_template.h"
4700
4701#undef env
4702
4703#endif