]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
exec: make phys_page_find() return a temporary
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
e2eef170 121#endif
9fa3e853 122
6a00d601
FB
123CPUState *first_cpu;
124/* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
b3c4bbe5 126DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 127/* 0 = Do not count executed instructions.
bf20dc07 128 1 = Precise instruction counting.
2e70f6ef
PB
129 2 = Adaptive rate instruction counting. */
130int use_icount = 0;
6a00d601 131
54936004 132typedef struct PageDesc {
92e873b9 133 /* list of TBs intersecting this ram page */
fd6ce8f6 134 TranslationBlock *first_tb;
9fa3e853
FB
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
54936004
FB
142} PageDesc;
143
41c1b1c9 144/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 151#endif
bedb69ea 152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 154#endif
54936004 155
5cd2c5b6
RH
156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
54936004
FB
158#define L2_SIZE (1 << L2_BITS)
159
5cd2c5b6
RH
160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
83fb7adf 185unsigned long qemu_real_host_page_size;
83fb7adf
FB
186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
54936004 188
5cd2c5b6
RH
189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
54936004 192
e2eef170 193#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
5cd2c5b6
RH
200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
6d9a1304 203
e2eef170 204static void io_mem_init(void);
62152b8a 205static void memory_map_init(void);
e2eef170 206
33417e70 207/* io memory support */
33417e70
FB
208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 211static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
212static int io_mem_watch;
213#endif
33417e70 214
34865134 215/* log support */
1e8b27ca
JR
216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
d9b630fd 219static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 220#endif
34865134
FB
221FILE *logfile;
222int loglevel;
e735b91c 223static int log_append = 0;
34865134 224
e3db7226 225/* statistics */
b3755a91 226#if !defined(CONFIG_USER_ONLY)
e3db7226 227static int tlb_flush_count;
b3755a91 228#endif
e3db7226
FB
229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
7cb69cae
FB
232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
4369415f 243 unsigned long start, end, page_size;
7cb69cae 244
4369415f 245 page_size = getpagesize();
7cb69cae 246 start = (unsigned long)addr;
4369415f 247 start &= ~(page_size - 1);
7cb69cae
FB
248
249 end = (unsigned long)addr + size;
4369415f
FB
250 end += page_size - 1;
251 end &= ~(page_size - 1);
7cb69cae
FB
252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
b346ff46 258static void page_init(void)
54936004 259{
83fb7adf 260 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 261 TARGET_PAGE_SIZE */
c2b48b69
AL
262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
83fb7adf
FB
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 277
2e9a5713 278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 279 {
f01576f1
JL
280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
fd436907 297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
fd436907 301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
50a9569b 310 FILE *f;
50a9569b 311
0776590d 312 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 313
fd436907 314 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 315 if (f) {
5cd2c5b6
RH
316 mmap_lock();
317
50a9569b 318 do {
5cd2c5b6
RH
319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
333 }
334 } while (!feof(f));
5cd2c5b6 335
50a9569b 336 fclose(f);
5cd2c5b6 337 mmap_unlock();
50a9569b 338 }
f01576f1 339#endif
50a9569b
AZ
340 }
341#endif
54936004
FB
342}
343
41c1b1c9 344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 345{
41c1b1c9
PB
346 PageDesc *pd;
347 void **lp;
348 int i;
349
5cd2c5b6 350#if defined(CONFIG_USER_ONLY)
7267c094 351 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
356 } while (0)
357#else
358# define ALLOC(P, SIZE) \
7267c094 359 do { P = g_malloc0(SIZE); } while (0)
17e2377a 360#endif
434929bf 361
5cd2c5b6
RH
362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
17e2377a 375 }
5cd2c5b6
RH
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
378 }
379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
54936004 387 }
5cd2c5b6
RH
388
389#undef ALLOC
5cd2c5b6
RH
390
391 return pd + (index & (L2_SIZE - 1));
54936004
FB
392}
393
41c1b1c9 394static inline PageDesc *page_find(tb_page_addr_t index)
54936004 395{
5cd2c5b6 396 return page_find_alloc(index, 0);
fd6ce8f6
FB
397}
398
6d9a1304 399#if !defined(CONFIG_USER_ONLY)
c227f099 400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 401{
e3f4e2a4 402 PhysPageDesc *pd;
5cd2c5b6
RH
403 void **lp;
404 int i;
92e873b9 405
5cd2c5b6
RH
406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 408
5cd2c5b6
RH
409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
7267c094 416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 419 }
5cd2c5b6 420
e3f4e2a4 421 pd = *lp;
5cd2c5b6 422 if (pd == NULL) {
e3f4e2a4 423 int i;
5ab97b7f 424 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
425
426 if (!alloc) {
108c49b8 427 return NULL;
5cd2c5b6
RH
428 }
429
7267c094 430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 431
67c4d23c 432 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6 433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
5ab97b7f 434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 435 }
92e873b9 436 }
5cd2c5b6
RH
437
438 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
439}
440
f1f6e3b8 441static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 442{
f1f6e3b8
AK
443 PhysPageDesc *p = phys_page_find_alloc(index, 0);
444
445 if (p) {
446 return *p;
447 } else {
448 return (PhysPageDesc) {
449 .phys_offset = IO_MEM_UNASSIGNED,
450 .region_offset = index << TARGET_PAGE_BITS,
451 };
452 }
92e873b9
FB
453}
454
c227f099
AL
455static void tlb_protect_code(ram_addr_t ram_addr);
456static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 457 target_ulong vaddr);
c8a706fe
PB
458#define mmap_lock() do { } while(0)
459#define mmap_unlock() do { } while(0)
9fa3e853 460#endif
fd6ce8f6 461
4369415f
FB
462#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
463
464#if defined(CONFIG_USER_ONLY)
ccbb4d44 465/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
466 user mode. It will change when a dedicated libc will be used */
467#define USE_STATIC_CODE_GEN_BUFFER
468#endif
469
470#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
471static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
472 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
473#endif
474
8fcd3692 475static void code_gen_alloc(unsigned long tb_size)
26a5f13b 476{
4369415f
FB
477#ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481#else
26a5f13b
FB
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
4369415f 484#if defined(CONFIG_USER_ONLY)
4369415f
FB
485 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486#else
ccbb4d44 487 /* XXX: needs adjustments */
94a6b54f 488 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 489#endif
26a5f13b
FB
490 }
491 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
492 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
493 /* The code gen buffer location may have constraints depending on
494 the host cpu and OS */
495#if defined(__linux__)
496 {
497 int flags;
141ac468
BS
498 void *start = NULL;
499
26a5f13b
FB
500 flags = MAP_PRIVATE | MAP_ANONYMOUS;
501#if defined(__x86_64__)
502 flags |= MAP_32BIT;
503 /* Cannot map more than that */
504 if (code_gen_buffer_size > (800 * 1024 * 1024))
505 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
506#elif defined(__sparc_v9__)
507 // Map the buffer below 2G, so we can use direct calls and branches
508 flags |= MAP_FIXED;
509 start = (void *) 0x60000000UL;
510 if (code_gen_buffer_size > (512 * 1024 * 1024))
511 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 512#elif defined(__arm__)
222f23f5 513 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
514 if (code_gen_buffer_size > 16 * 1024 * 1024)
515 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
516#elif defined(__s390x__)
517 /* Map the buffer so that we can use direct calls and branches. */
518 /* We have a +- 4GB range on the branches; leave some slop. */
519 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
520 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
521 }
522 start = (void *)0x90000000UL;
26a5f13b 523#endif
141ac468
BS
524 code_gen_buffer = mmap(start, code_gen_buffer_size,
525 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
526 flags, -1, 0);
527 if (code_gen_buffer == MAP_FAILED) {
528 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
529 exit(1);
530 }
531 }
cbb608a5 532#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
533 || defined(__DragonFly__) || defined(__OpenBSD__) \
534 || defined(__NetBSD__)
06e67a82
AL
535 {
536 int flags;
537 void *addr = NULL;
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
541 * 0x40000000 is free */
542 flags |= MAP_FIXED;
543 addr = (void *)0x40000000;
544 /* Cannot map more than that */
545 if (code_gen_buffer_size > (800 * 1024 * 1024))
546 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
547#elif defined(__sparc_v9__)
548 // Map the buffer below 2G, so we can use direct calls and branches
549 flags |= MAP_FIXED;
550 addr = (void *) 0x60000000UL;
551 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
552 code_gen_buffer_size = (512 * 1024 * 1024);
553 }
06e67a82
AL
554#endif
555 code_gen_buffer = mmap(addr, code_gen_buffer_size,
556 PROT_WRITE | PROT_READ | PROT_EXEC,
557 flags, -1, 0);
558 if (code_gen_buffer == MAP_FAILED) {
559 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
560 exit(1);
561 }
562 }
26a5f13b 563#else
7267c094 564 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
565 map_exec(code_gen_buffer, code_gen_buffer_size);
566#endif
4369415f 567#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 568 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
569 code_gen_buffer_max_size = code_gen_buffer_size -
570 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 571 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 572 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
573}
574
575/* Must be called before using the QEMU cpus. 'tb_size' is the size
576 (in bytes) allocated to the translation buffer. Zero means default
577 size. */
d5ab9713 578void tcg_exec_init(unsigned long tb_size)
26a5f13b 579{
26a5f13b
FB
580 cpu_gen_init();
581 code_gen_alloc(tb_size);
582 code_gen_ptr = code_gen_buffer;
4369415f 583 page_init();
9002ec79
RH
584#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx);
588#endif
26a5f13b
FB
589}
590
d5ab9713
JK
591bool tcg_enabled(void)
592{
593 return code_gen_buffer != NULL;
594}
595
596void cpu_exec_init_all(void)
597{
598#if !defined(CONFIG_USER_ONLY)
599 memory_map_init();
600 io_mem_init();
601#endif
602}
603
9656f324
PB
604#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605
e59fb374 606static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
607{
608 CPUState *env = opaque;
9656f324 609
3098dba0
AJ
610 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
611 version_id is increased. */
612 env->interrupt_request &= ~0x01;
9656f324
PB
613 tlb_flush(env, 1);
614
615 return 0;
616}
e7f4eff7
JQ
617
618static const VMStateDescription vmstate_cpu_common = {
619 .name = "cpu_common",
620 .version_id = 1,
621 .minimum_version_id = 1,
622 .minimum_version_id_old = 1,
e7f4eff7
JQ
623 .post_load = cpu_common_post_load,
624 .fields = (VMStateField []) {
625 VMSTATE_UINT32(halted, CPUState),
626 VMSTATE_UINT32(interrupt_request, CPUState),
627 VMSTATE_END_OF_LIST()
628 }
629};
9656f324
PB
630#endif
631
950f1472
GC
632CPUState *qemu_get_cpu(int cpu)
633{
634 CPUState *env = first_cpu;
635
636 while (env) {
637 if (env->cpu_index == cpu)
638 break;
639 env = env->next_cpu;
640 }
641
642 return env;
643}
644
6a00d601 645void cpu_exec_init(CPUState *env)
fd6ce8f6 646{
6a00d601
FB
647 CPUState **penv;
648 int cpu_index;
649
c2764719
PB
650#if defined(CONFIG_USER_ONLY)
651 cpu_list_lock();
652#endif
6a00d601
FB
653 env->next_cpu = NULL;
654 penv = &first_cpu;
655 cpu_index = 0;
656 while (*penv != NULL) {
1e9fa730 657 penv = &(*penv)->next_cpu;
6a00d601
FB
658 cpu_index++;
659 }
660 env->cpu_index = cpu_index;
268a362c 661 env->numa_node = 0;
72cf2d4f
BS
662 QTAILQ_INIT(&env->breakpoints);
663 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
664#ifndef CONFIG_USER_ONLY
665 env->thread_id = qemu_get_thread_id();
666#endif
6a00d601 667 *penv = env;
c2764719
PB
668#if defined(CONFIG_USER_ONLY)
669 cpu_list_unlock();
670#endif
b3c7724c 671#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
672 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
673 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
674 cpu_save, cpu_load, env);
675#endif
fd6ce8f6
FB
676}
677
d1a1eb74
TG
678/* Allocate a new translation block. Flush the translation buffer if
679 too many translation blocks or too much generated code. */
680static TranslationBlock *tb_alloc(target_ulong pc)
681{
682 TranslationBlock *tb;
683
684 if (nb_tbs >= code_gen_max_blocks ||
685 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
686 return NULL;
687 tb = &tbs[nb_tbs++];
688 tb->pc = pc;
689 tb->cflags = 0;
690 return tb;
691}
692
693void tb_free(TranslationBlock *tb)
694{
695 /* In practice this is mostly used for single use temporary TB
696 Ignore the hard cases and just back up if this TB happens to
697 be the last one generated. */
698 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
699 code_gen_ptr = tb->tc_ptr;
700 nb_tbs--;
701 }
702}
703
9fa3e853
FB
704static inline void invalidate_page_bitmap(PageDesc *p)
705{
706 if (p->code_bitmap) {
7267c094 707 g_free(p->code_bitmap);
9fa3e853
FB
708 p->code_bitmap = NULL;
709 }
710 p->code_write_count = 0;
711}
712
5cd2c5b6
RH
713/* Set to NULL all the 'first_tb' fields in all PageDescs. */
714
715static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 716{
5cd2c5b6 717 int i;
fd6ce8f6 718
5cd2c5b6
RH
719 if (*lp == NULL) {
720 return;
721 }
722 if (level == 0) {
723 PageDesc *pd = *lp;
7296abac 724 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
725 pd[i].first_tb = NULL;
726 invalidate_page_bitmap(pd + i);
fd6ce8f6 727 }
5cd2c5b6
RH
728 } else {
729 void **pp = *lp;
7296abac 730 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
731 page_flush_tb_1 (level - 1, pp + i);
732 }
733 }
734}
735
736static void page_flush_tb(void)
737{
738 int i;
739 for (i = 0; i < V_L1_SIZE; i++) {
740 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
741 }
742}
743
744/* flush all the translation blocks */
d4e8164f 745/* XXX: tb_flush is currently not thread safe */
6a00d601 746void tb_flush(CPUState *env1)
fd6ce8f6 747{
6a00d601 748 CPUState *env;
0124311e 749#if defined(DEBUG_FLUSH)
ab3d1727
BS
750 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
751 (unsigned long)(code_gen_ptr - code_gen_buffer),
752 nb_tbs, nb_tbs > 0 ?
753 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 754#endif
26a5f13b 755 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
756 cpu_abort(env1, "Internal error: code buffer overflow\n");
757
fd6ce8f6 758 nb_tbs = 0;
3b46e624 759
6a00d601
FB
760 for(env = first_cpu; env != NULL; env = env->next_cpu) {
761 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
762 }
9fa3e853 763
8a8a608f 764 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 765 page_flush_tb();
9fa3e853 766
fd6ce8f6 767 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
768 /* XXX: flush processor icache at this point if cache flush is
769 expensive */
e3db7226 770 tb_flush_count++;
fd6ce8f6
FB
771}
772
773#ifdef DEBUG_TB_CHECK
774
bc98a7ef 775static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
776{
777 TranslationBlock *tb;
778 int i;
779 address &= TARGET_PAGE_MASK;
99773bd4
PB
780 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
781 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
782 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
783 address >= tb->pc + tb->size)) {
0bf9e31a
BS
784 printf("ERROR invalidate: address=" TARGET_FMT_lx
785 " PC=%08lx size=%04x\n",
99773bd4 786 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
787 }
788 }
789 }
790}
791
792/* verify that all the pages have correct rights for code */
793static void tb_page_check(void)
794{
795 TranslationBlock *tb;
796 int i, flags1, flags2;
3b46e624 797
99773bd4
PB
798 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
799 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
800 flags1 = page_get_flags(tb->pc);
801 flags2 = page_get_flags(tb->pc + tb->size - 1);
802 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
803 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 804 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
805 }
806 }
807 }
808}
809
810#endif
811
812/* invalidate one TB */
813static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
814 int next_offset)
815{
816 TranslationBlock *tb1;
817 for(;;) {
818 tb1 = *ptb;
819 if (tb1 == tb) {
820 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
821 break;
822 }
823 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
824 }
825}
826
9fa3e853
FB
827static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
828{
829 TranslationBlock *tb1;
830 unsigned int n1;
831
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (tb1 == tb) {
837 *ptb = tb1->page_next[n1];
838 break;
839 }
840 ptb = &tb1->page_next[n1];
841 }
842}
843
d4e8164f
FB
844static inline void tb_jmp_remove(TranslationBlock *tb, int n)
845{
846 TranslationBlock *tb1, **ptb;
847 unsigned int n1;
848
849 ptb = &tb->jmp_next[n];
850 tb1 = *ptb;
851 if (tb1) {
852 /* find tb(n) in circular list */
853 for(;;) {
854 tb1 = *ptb;
855 n1 = (long)tb1 & 3;
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (n1 == n && tb1 == tb)
858 break;
859 if (n1 == 2) {
860 ptb = &tb1->jmp_first;
861 } else {
862 ptb = &tb1->jmp_next[n1];
863 }
864 }
865 /* now we can suppress tb(n) from the list */
866 *ptb = tb->jmp_next[n];
867
868 tb->jmp_next[n] = NULL;
869 }
870}
871
872/* reset the jump entry 'n' of a TB so that it is not chained to
873 another TB */
874static inline void tb_reset_jump(TranslationBlock *tb, int n)
875{
876 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
877}
878
41c1b1c9 879void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 880{
6a00d601 881 CPUState *env;
8a40a180 882 PageDesc *p;
d4e8164f 883 unsigned int h, n1;
41c1b1c9 884 tb_page_addr_t phys_pc;
8a40a180 885 TranslationBlock *tb1, *tb2;
3b46e624 886
8a40a180
FB
887 /* remove the TB from the hash list */
888 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
889 h = tb_phys_hash_func(phys_pc);
5fafdf24 890 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
891 offsetof(TranslationBlock, phys_hash_next));
892
893 /* remove the TB from the page list */
894 if (tb->page_addr[0] != page_addr) {
895 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
896 tb_page_remove(&p->first_tb, tb);
897 invalidate_page_bitmap(p);
898 }
899 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
900 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
901 tb_page_remove(&p->first_tb, tb);
902 invalidate_page_bitmap(p);
903 }
904
36bdbe54 905 tb_invalidated_flag = 1;
59817ccb 906
fd6ce8f6 907 /* remove the TB from the hash list */
8a40a180 908 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
909 for(env = first_cpu; env != NULL; env = env->next_cpu) {
910 if (env->tb_jmp_cache[h] == tb)
911 env->tb_jmp_cache[h] = NULL;
912 }
d4e8164f
FB
913
914 /* suppress this TB from the two jump lists */
915 tb_jmp_remove(tb, 0);
916 tb_jmp_remove(tb, 1);
917
918 /* suppress any remaining jumps to this TB */
919 tb1 = tb->jmp_first;
920 for(;;) {
921 n1 = (long)tb1 & 3;
922 if (n1 == 2)
923 break;
924 tb1 = (TranslationBlock *)((long)tb1 & ~3);
925 tb2 = tb1->jmp_next[n1];
926 tb_reset_jump(tb1, n1);
927 tb1->jmp_next[n1] = NULL;
928 tb1 = tb2;
929 }
930 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 931
e3db7226 932 tb_phys_invalidate_count++;
9fa3e853
FB
933}
934
935static inline void set_bits(uint8_t *tab, int start, int len)
936{
937 int end, mask, end1;
938
939 end = start + len;
940 tab += start >> 3;
941 mask = 0xff << (start & 7);
942 if ((start & ~7) == (end & ~7)) {
943 if (start < end) {
944 mask &= ~(0xff << (end & 7));
945 *tab |= mask;
946 }
947 } else {
948 *tab++ |= mask;
949 start = (start + 8) & ~7;
950 end1 = end & ~7;
951 while (start < end1) {
952 *tab++ = 0xff;
953 start += 8;
954 }
955 if (start < end) {
956 mask = ~(0xff << (end & 7));
957 *tab |= mask;
958 }
959 }
960}
961
962static void build_page_bitmap(PageDesc *p)
963{
964 int n, tb_start, tb_end;
965 TranslationBlock *tb;
3b46e624 966
7267c094 967 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
968
969 tb = p->first_tb;
970 while (tb != NULL) {
971 n = (long)tb & 3;
972 tb = (TranslationBlock *)((long)tb & ~3);
973 /* NOTE: this is subtle as a TB may span two physical pages */
974 if (n == 0) {
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start = tb->pc & ~TARGET_PAGE_MASK;
978 tb_end = tb_start + tb->size;
979 if (tb_end > TARGET_PAGE_SIZE)
980 tb_end = TARGET_PAGE_SIZE;
981 } else {
982 tb_start = 0;
983 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
984 }
985 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
986 tb = tb->page_next[n];
987 }
988}
989
2e70f6ef
PB
990TranslationBlock *tb_gen_code(CPUState *env,
991 target_ulong pc, target_ulong cs_base,
992 int flags, int cflags)
d720b93d
FB
993{
994 TranslationBlock *tb;
995 uint8_t *tc_ptr;
41c1b1c9
PB
996 tb_page_addr_t phys_pc, phys_page2;
997 target_ulong virt_page2;
d720b93d
FB
998 int code_gen_size;
999
41c1b1c9 1000 phys_pc = get_page_addr_code(env, pc);
c27004ec 1001 tb = tb_alloc(pc);
d720b93d
FB
1002 if (!tb) {
1003 /* flush must be done */
1004 tb_flush(env);
1005 /* cannot fail at this point */
c27004ec 1006 tb = tb_alloc(pc);
2e70f6ef
PB
1007 /* Don't forget to invalidate previous TB info. */
1008 tb_invalidated_flag = 1;
d720b93d
FB
1009 }
1010 tc_ptr = code_gen_ptr;
1011 tb->tc_ptr = tc_ptr;
1012 tb->cs_base = cs_base;
1013 tb->flags = flags;
1014 tb->cflags = cflags;
d07bde88 1015 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1016 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1017
d720b93d 1018 /* check next page if needed */
c27004ec 1019 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1020 phys_page2 = -1;
c27004ec 1021 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1022 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1023 }
41c1b1c9 1024 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1025 return tb;
d720b93d 1026}
3b46e624 1027
9fa3e853
FB
1028/* invalidate all TBs which intersect with the target physical page
1029 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1030 the same physical page. 'is_cpu_write_access' should be true if called
1031 from a real cpu write access: the virtual CPU will exit the current
1032 TB if code is modified inside this TB. */
41c1b1c9 1033void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1034 int is_cpu_write_access)
1035{
6b917547 1036 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1037 CPUState *env = cpu_single_env;
41c1b1c9 1038 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1039 PageDesc *p;
1040 int n;
1041#ifdef TARGET_HAS_PRECISE_SMC
1042 int current_tb_not_found = is_cpu_write_access;
1043 TranslationBlock *current_tb = NULL;
1044 int current_tb_modified = 0;
1045 target_ulong current_pc = 0;
1046 target_ulong current_cs_base = 0;
1047 int current_flags = 0;
1048#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1049
1050 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1051 if (!p)
9fa3e853 1052 return;
5fafdf24 1053 if (!p->code_bitmap &&
d720b93d
FB
1054 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1055 is_cpu_write_access) {
9fa3e853
FB
1056 /* build code bitmap */
1057 build_page_bitmap(p);
1058 }
1059
1060 /* we remove all the TBs in the range [start, end[ */
1061 /* XXX: see if in some cases it could be faster to invalidate all the code */
1062 tb = p->first_tb;
1063 while (tb != NULL) {
1064 n = (long)tb & 3;
1065 tb = (TranslationBlock *)((long)tb & ~3);
1066 tb_next = tb->page_next[n];
1067 /* NOTE: this is subtle as a TB may span two physical pages */
1068 if (n == 0) {
1069 /* NOTE: tb_end may be after the end of the page, but
1070 it is not a problem */
1071 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1072 tb_end = tb_start + tb->size;
1073 } else {
1074 tb_start = tb->page_addr[1];
1075 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1076 }
1077 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_not_found) {
1080 current_tb_not_found = 0;
1081 current_tb = NULL;
2e70f6ef 1082 if (env->mem_io_pc) {
d720b93d 1083 /* now we have a real cpu fault */
2e70f6ef 1084 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1085 }
1086 }
1087 if (current_tb == tb &&
2e70f6ef 1088 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1089 /* If we are modifying the current TB, we must stop
1090 its execution. We could be more precise by checking
1091 that the modification is after the current PC, but it
1092 would require a specialized function to partially
1093 restore the CPU state */
3b46e624 1094
d720b93d 1095 current_tb_modified = 1;
618ba8e6 1096 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1097 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1098 &current_flags);
d720b93d
FB
1099 }
1100#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1101 /* we need to do that to handle the case where a signal
1102 occurs while doing tb_phys_invalidate() */
1103 saved_tb = NULL;
1104 if (env) {
1105 saved_tb = env->current_tb;
1106 env->current_tb = NULL;
1107 }
9fa3e853 1108 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1109 if (env) {
1110 env->current_tb = saved_tb;
1111 if (env->interrupt_request && env->current_tb)
1112 cpu_interrupt(env, env->interrupt_request);
1113 }
9fa3e853
FB
1114 }
1115 tb = tb_next;
1116 }
1117#if !defined(CONFIG_USER_ONLY)
1118 /* if no code remaining, no need to continue to use slow writes */
1119 if (!p->first_tb) {
1120 invalidate_page_bitmap(p);
d720b93d 1121 if (is_cpu_write_access) {
2e70f6ef 1122 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1123 }
1124 }
1125#endif
1126#ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1130 itself */
ea1c1802 1131 env->current_tb = NULL;
2e70f6ef 1132 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1133 cpu_resume_from_signal(env, NULL);
9fa3e853 1134 }
fd6ce8f6 1135#endif
9fa3e853 1136}
fd6ce8f6 1137
9fa3e853 1138/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1139static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1140{
1141 PageDesc *p;
1142 int offset, b;
59817ccb 1143#if 0
a4193c8a 1144 if (1) {
93fcfe39
AL
1145 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1146 cpu_single_env->mem_io_vaddr, len,
1147 cpu_single_env->eip,
1148 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1149 }
1150#endif
9fa3e853 1151 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1152 if (!p)
9fa3e853
FB
1153 return;
1154 if (p->code_bitmap) {
1155 offset = start & ~TARGET_PAGE_MASK;
1156 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1157 if (b & ((1 << len) - 1))
1158 goto do_invalidate;
1159 } else {
1160 do_invalidate:
d720b93d 1161 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1162 }
1163}
1164
9fa3e853 1165#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1166static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1167 unsigned long pc, void *puc)
9fa3e853 1168{
6b917547 1169 TranslationBlock *tb;
9fa3e853 1170 PageDesc *p;
6b917547 1171 int n;
d720b93d 1172#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1173 TranslationBlock *current_tb = NULL;
d720b93d 1174 CPUState *env = cpu_single_env;
6b917547
AL
1175 int current_tb_modified = 0;
1176 target_ulong current_pc = 0;
1177 target_ulong current_cs_base = 0;
1178 int current_flags = 0;
d720b93d 1179#endif
9fa3e853
FB
1180
1181 addr &= TARGET_PAGE_MASK;
1182 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1183 if (!p)
9fa3e853
FB
1184 return;
1185 tb = p->first_tb;
d720b93d
FB
1186#ifdef TARGET_HAS_PRECISE_SMC
1187 if (tb && pc != 0) {
1188 current_tb = tb_find_pc(pc);
1189 }
1190#endif
9fa3e853
FB
1191 while (tb != NULL) {
1192 n = (long)tb & 3;
1193 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1194#ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb == tb &&
2e70f6ef 1196 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
3b46e624 1202
d720b93d 1203 current_tb_modified = 1;
618ba8e6 1204 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1205 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1206 &current_flags);
d720b93d
FB
1207 }
1208#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1209 tb_phys_invalidate(tb, addr);
1210 tb = tb->page_next[n];
1211 }
fd6ce8f6 1212 p->first_tb = NULL;
d720b93d
FB
1213#ifdef TARGET_HAS_PRECISE_SMC
1214 if (current_tb_modified) {
1215 /* we generate a block containing just the instruction
1216 modifying the memory. It will ensure that it cannot modify
1217 itself */
ea1c1802 1218 env->current_tb = NULL;
2e70f6ef 1219 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1220 cpu_resume_from_signal(env, puc);
1221 }
1222#endif
fd6ce8f6 1223}
9fa3e853 1224#endif
fd6ce8f6
FB
1225
1226/* add the tb in the target page and protect it if necessary */
5fafdf24 1227static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1228 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1229{
1230 PageDesc *p;
4429ab44
JQ
1231#ifndef CONFIG_USER_ONLY
1232 bool page_already_protected;
1233#endif
9fa3e853
FB
1234
1235 tb->page_addr[n] = page_addr;
5cd2c5b6 1236 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1237 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1238#ifndef CONFIG_USER_ONLY
1239 page_already_protected = p->first_tb != NULL;
1240#endif
9fa3e853
FB
1241 p->first_tb = (TranslationBlock *)((long)tb | n);
1242 invalidate_page_bitmap(p);
fd6ce8f6 1243
107db443 1244#if defined(TARGET_HAS_SMC) || 1
d720b93d 1245
9fa3e853 1246#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1247 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1248 target_ulong addr;
1249 PageDesc *p2;
9fa3e853
FB
1250 int prot;
1251
fd6ce8f6
FB
1252 /* force the host page as non writable (writes will have a
1253 page fault + mprotect overhead) */
53a5960a 1254 page_addr &= qemu_host_page_mask;
fd6ce8f6 1255 prot = 0;
53a5960a
PB
1256 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1257 addr += TARGET_PAGE_SIZE) {
1258
1259 p2 = page_find (addr >> TARGET_PAGE_BITS);
1260 if (!p2)
1261 continue;
1262 prot |= p2->flags;
1263 p2->flags &= ~PAGE_WRITE;
53a5960a 1264 }
5fafdf24 1265 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1266 (prot & PAGE_BITS) & ~PAGE_WRITE);
1267#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1268 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1269 page_addr);
fd6ce8f6 1270#endif
fd6ce8f6 1271 }
9fa3e853
FB
1272#else
1273 /* if some code is already present, then the pages are already
1274 protected. So we handle the case where only the first TB is
1275 allocated in a physical page */
4429ab44 1276 if (!page_already_protected) {
6a00d601 1277 tlb_protect_code(page_addr);
9fa3e853
FB
1278 }
1279#endif
d720b93d
FB
1280
1281#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1282}
1283
9fa3e853
FB
1284/* add a new TB and link it to the physical page tables. phys_page2 is
1285 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1286void tb_link_page(TranslationBlock *tb,
1287 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1288{
9fa3e853
FB
1289 unsigned int h;
1290 TranslationBlock **ptb;
1291
c8a706fe
PB
1292 /* Grab the mmap lock to stop another thread invalidating this TB
1293 before we are done. */
1294 mmap_lock();
9fa3e853
FB
1295 /* add in the physical hash table */
1296 h = tb_phys_hash_func(phys_pc);
1297 ptb = &tb_phys_hash[h];
1298 tb->phys_hash_next = *ptb;
1299 *ptb = tb;
fd6ce8f6
FB
1300
1301 /* add in the page list */
9fa3e853
FB
1302 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1303 if (phys_page2 != -1)
1304 tb_alloc_page(tb, 1, phys_page2);
1305 else
1306 tb->page_addr[1] = -1;
9fa3e853 1307
d4e8164f
FB
1308 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1309 tb->jmp_next[0] = NULL;
1310 tb->jmp_next[1] = NULL;
1311
1312 /* init original jump addresses */
1313 if (tb->tb_next_offset[0] != 0xffff)
1314 tb_reset_jump(tb, 0);
1315 if (tb->tb_next_offset[1] != 0xffff)
1316 tb_reset_jump(tb, 1);
8a40a180
FB
1317
1318#ifdef DEBUG_TB_CHECK
1319 tb_page_check();
1320#endif
c8a706fe 1321 mmap_unlock();
fd6ce8f6
FB
1322}
1323
9fa3e853
FB
1324/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1327{
9fa3e853
FB
1328 int m_min, m_max, m;
1329 unsigned long v;
1330 TranslationBlock *tb;
a513fe19
FB
1331
1332 if (nb_tbs <= 0)
1333 return NULL;
1334 if (tc_ptr < (unsigned long)code_gen_buffer ||
1335 tc_ptr >= (unsigned long)code_gen_ptr)
1336 return NULL;
1337 /* binary search (cf Knuth) */
1338 m_min = 0;
1339 m_max = nb_tbs - 1;
1340 while (m_min <= m_max) {
1341 m = (m_min + m_max) >> 1;
1342 tb = &tbs[m];
1343 v = (unsigned long)tb->tc_ptr;
1344 if (v == tc_ptr)
1345 return tb;
1346 else if (tc_ptr < v) {
1347 m_max = m - 1;
1348 } else {
1349 m_min = m + 1;
1350 }
5fafdf24 1351 }
a513fe19
FB
1352 return &tbs[m_max];
1353}
7501267e 1354
ea041c0e
FB
1355static void tb_reset_jump_recursive(TranslationBlock *tb);
1356
1357static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1358{
1359 TranslationBlock *tb1, *tb_next, **ptb;
1360 unsigned int n1;
1361
1362 tb1 = tb->jmp_next[n];
1363 if (tb1 != NULL) {
1364 /* find head of list */
1365 for(;;) {
1366 n1 = (long)tb1 & 3;
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1368 if (n1 == 2)
1369 break;
1370 tb1 = tb1->jmp_next[n1];
1371 }
1372 /* we are now sure now that tb jumps to tb1 */
1373 tb_next = tb1;
1374
1375 /* remove tb from the jmp_first list */
1376 ptb = &tb_next->jmp_first;
1377 for(;;) {
1378 tb1 = *ptb;
1379 n1 = (long)tb1 & 3;
1380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1381 if (n1 == n && tb1 == tb)
1382 break;
1383 ptb = &tb1->jmp_next[n1];
1384 }
1385 *ptb = tb->jmp_next[n];
1386 tb->jmp_next[n] = NULL;
3b46e624 1387
ea041c0e
FB
1388 /* suppress the jump to next tb in generated code */
1389 tb_reset_jump(tb, n);
1390
0124311e 1391 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1392 tb_reset_jump_recursive(tb_next);
1393 }
1394}
1395
1396static void tb_reset_jump_recursive(TranslationBlock *tb)
1397{
1398 tb_reset_jump_recursive2(tb, 0);
1399 tb_reset_jump_recursive2(tb, 1);
1400}
1401
1fddef4b 1402#if defined(TARGET_HAS_ICE)
94df27fd
PB
1403#if defined(CONFIG_USER_ONLY)
1404static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1405{
1406 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1407}
1408#else
d720b93d
FB
1409static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1410{
c227f099 1411 target_phys_addr_t addr;
9b3c35e0 1412 target_ulong pd;
c227f099 1413 ram_addr_t ram_addr;
f1f6e3b8 1414 PhysPageDesc p;
d720b93d 1415
c2f07f81
PB
1416 addr = cpu_get_phys_page_debug(env, pc);
1417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1418 pd = p.phys_offset;
c2f07f81 1419 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1420 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1421}
c27004ec 1422#endif
94df27fd 1423#endif /* TARGET_HAS_ICE */
d720b93d 1424
c527ee8f
PB
1425#if defined(CONFIG_USER_ONLY)
1426void cpu_watchpoint_remove_all(CPUState *env, int mask)
1427
1428{
1429}
1430
1431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
1433{
1434 return -ENOSYS;
1435}
1436#else
6658ffb8 1437/* Add a watchpoint. */
a1d1bb31
AL
1438int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1440{
b4051334 1441 target_ulong len_mask = ~(len - 1);
c0ce998e 1442 CPUWatchpoint *wp;
6658ffb8 1443
b4051334
AL
1444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1448 return -EINVAL;
1449 }
7267c094 1450 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1451
1452 wp->vaddr = addr;
b4051334 1453 wp->len_mask = len_mask;
a1d1bb31
AL
1454 wp->flags = flags;
1455
2dc9f411 1456 /* keep all GDB-injected watchpoints in front */
c0ce998e 1457 if (flags & BP_GDB)
72cf2d4f 1458 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1459 else
72cf2d4f 1460 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1461
6658ffb8 1462 tlb_flush_page(env, addr);
a1d1bb31
AL
1463
1464 if (watchpoint)
1465 *watchpoint = wp;
1466 return 0;
6658ffb8
PB
1467}
1468
a1d1bb31
AL
1469/* Remove a specific watchpoint. */
1470int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1471 int flags)
6658ffb8 1472{
b4051334 1473 target_ulong len_mask = ~(len - 1);
a1d1bb31 1474 CPUWatchpoint *wp;
6658ffb8 1475
72cf2d4f 1476 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1477 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1478 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1479 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1480 return 0;
1481 }
1482 }
a1d1bb31 1483 return -ENOENT;
6658ffb8
PB
1484}
1485
a1d1bb31
AL
1486/* Remove a specific watchpoint by reference. */
1487void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1488{
72cf2d4f 1489 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1490
a1d1bb31
AL
1491 tlb_flush_page(env, watchpoint->vaddr);
1492
7267c094 1493 g_free(watchpoint);
a1d1bb31
AL
1494}
1495
1496/* Remove all matching watchpoints. */
1497void cpu_watchpoint_remove_all(CPUState *env, int mask)
1498{
c0ce998e 1499 CPUWatchpoint *wp, *next;
a1d1bb31 1500
72cf2d4f 1501 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1502 if (wp->flags & mask)
1503 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1504 }
7d03f82f 1505}
c527ee8f 1506#endif
7d03f82f 1507
a1d1bb31
AL
1508/* Add a breakpoint. */
1509int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 CPUBreakpoint **breakpoint)
4c3a88a2 1511{
1fddef4b 1512#if defined(TARGET_HAS_ICE)
c0ce998e 1513 CPUBreakpoint *bp;
3b46e624 1514
7267c094 1515 bp = g_malloc(sizeof(*bp));
4c3a88a2 1516
a1d1bb31
AL
1517 bp->pc = pc;
1518 bp->flags = flags;
1519
2dc9f411 1520 /* keep all GDB-injected breakpoints in front */
c0ce998e 1521 if (flags & BP_GDB)
72cf2d4f 1522 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1523 else
72cf2d4f 1524 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1525
d720b93d 1526 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1527
1528 if (breakpoint)
1529 *breakpoint = bp;
4c3a88a2
FB
1530 return 0;
1531#else
a1d1bb31 1532 return -ENOSYS;
4c3a88a2
FB
1533#endif
1534}
1535
a1d1bb31
AL
1536/* Remove a specific breakpoint. */
1537int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1538{
7d03f82f 1539#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1540 CPUBreakpoint *bp;
1541
72cf2d4f 1542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1543 if (bp->pc == pc && bp->flags == flags) {
1544 cpu_breakpoint_remove_by_ref(env, bp);
1545 return 0;
1546 }
7d03f82f 1547 }
a1d1bb31
AL
1548 return -ENOENT;
1549#else
1550 return -ENOSYS;
7d03f82f
EI
1551#endif
1552}
1553
a1d1bb31
AL
1554/* Remove a specific breakpoint by reference. */
1555void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1556{
1fddef4b 1557#if defined(TARGET_HAS_ICE)
72cf2d4f 1558 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1559
a1d1bb31
AL
1560 breakpoint_invalidate(env, breakpoint->pc);
1561
7267c094 1562 g_free(breakpoint);
a1d1bb31
AL
1563#endif
1564}
1565
1566/* Remove all matching breakpoints. */
1567void cpu_breakpoint_remove_all(CPUState *env, int mask)
1568{
1569#if defined(TARGET_HAS_ICE)
c0ce998e 1570 CPUBreakpoint *bp, *next;
a1d1bb31 1571
72cf2d4f 1572 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1573 if (bp->flags & mask)
1574 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1575 }
4c3a88a2
FB
1576#endif
1577}
1578
c33a346e
FB
1579/* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581void cpu_single_step(CPUState *env, int enabled)
1582{
1fddef4b 1583#if defined(TARGET_HAS_ICE)
c33a346e
FB
1584 if (env->singlestep_enabled != enabled) {
1585 env->singlestep_enabled = enabled;
e22a25c9
AL
1586 if (kvm_enabled())
1587 kvm_update_guest_debug(env, 0);
1588 else {
ccbb4d44 1589 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1590 /* XXX: only flush what is necessary */
1591 tb_flush(env);
1592 }
c33a346e
FB
1593 }
1594#endif
1595}
1596
34865134
FB
1597/* enable or disable low levels log */
1598void cpu_set_log(int log_flags)
1599{
1600 loglevel = log_flags;
1601 if (loglevel && !logfile) {
11fcfab4 1602 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1603 if (!logfile) {
1604 perror(logfilename);
1605 _exit(1);
1606 }
9fa3e853
FB
1607#if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1609 {
b55266b5 1610 static char logfile_buf[4096];
9fa3e853
FB
1611 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1612 }
daf767b1
SW
1613#elif defined(_WIN32)
1614 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1615 setvbuf(logfile, NULL, _IONBF, 0);
1616#else
34865134 1617 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1618#endif
e735b91c
PB
1619 log_append = 1;
1620 }
1621 if (!loglevel && logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
34865134
FB
1624 }
1625}
1626
1627void cpu_set_log_filename(const char *filename)
1628{
1629 logfilename = strdup(filename);
e735b91c
PB
1630 if (logfile) {
1631 fclose(logfile);
1632 logfile = NULL;
1633 }
1634 cpu_set_log(loglevel);
34865134 1635}
c33a346e 1636
3098dba0 1637static void cpu_unlink_tb(CPUState *env)
ea041c0e 1638{
3098dba0
AJ
1639 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1640 problem and hope the cpu will stop of its own accord. For userspace
1641 emulation this often isn't actually as bad as it sounds. Often
1642 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1643 TranslationBlock *tb;
c227f099 1644 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1645
cab1b4bd 1646 spin_lock(&interrupt_lock);
3098dba0
AJ
1647 tb = env->current_tb;
1648 /* if the cpu is currently executing code, we must unlink it and
1649 all the potentially executing TB */
f76cfe56 1650 if (tb) {
3098dba0
AJ
1651 env->current_tb = NULL;
1652 tb_reset_jump_recursive(tb);
be214e6c 1653 }
cab1b4bd 1654 spin_unlock(&interrupt_lock);
3098dba0
AJ
1655}
1656
97ffbd8d 1657#ifndef CONFIG_USER_ONLY
3098dba0 1658/* mask must never be zero, except for A20 change call */
ec6959d0 1659static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1660{
1661 int old_mask;
be214e6c 1662
2e70f6ef 1663 old_mask = env->interrupt_request;
68a79315 1664 env->interrupt_request |= mask;
3098dba0 1665
8edac960
AL
1666 /*
1667 * If called from iothread context, wake the target cpu in
1668 * case its halted.
1669 */
b7680cb6 1670 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1671 qemu_cpu_kick(env);
1672 return;
1673 }
8edac960 1674
2e70f6ef 1675 if (use_icount) {
266910c4 1676 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1677 if (!can_do_io(env)
be214e6c 1678 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1679 cpu_abort(env, "Raised interrupt while not in I/O function");
1680 }
2e70f6ef 1681 } else {
3098dba0 1682 cpu_unlink_tb(env);
ea041c0e
FB
1683 }
1684}
1685
ec6959d0
JK
1686CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1687
97ffbd8d
JK
1688#else /* CONFIG_USER_ONLY */
1689
1690void cpu_interrupt(CPUState *env, int mask)
1691{
1692 env->interrupt_request |= mask;
1693 cpu_unlink_tb(env);
1694}
1695#endif /* CONFIG_USER_ONLY */
1696
b54ad049
FB
1697void cpu_reset_interrupt(CPUState *env, int mask)
1698{
1699 env->interrupt_request &= ~mask;
1700}
1701
3098dba0
AJ
1702void cpu_exit(CPUState *env)
1703{
1704 env->exit_request = 1;
1705 cpu_unlink_tb(env);
1706}
1707
c7cd6a37 1708const CPULogItem cpu_log_items[] = {
5fafdf24 1709 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1710 "show generated host assembly code for each compiled TB" },
1711 { CPU_LOG_TB_IN_ASM, "in_asm",
1712 "show target assembly code for each compiled TB" },
5fafdf24 1713 { CPU_LOG_TB_OP, "op",
57fec1fe 1714 "show micro ops for each compiled TB" },
f193c797 1715 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1716 "show micro ops "
1717#ifdef TARGET_I386
1718 "before eflags optimization and "
f193c797 1719#endif
e01a1157 1720 "after liveness analysis" },
f193c797
FB
1721 { CPU_LOG_INT, "int",
1722 "show interrupts/exceptions in short format" },
1723 { CPU_LOG_EXEC, "exec",
1724 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1725 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1726 "show CPU state before block translation" },
f193c797
FB
1727#ifdef TARGET_I386
1728 { CPU_LOG_PCALL, "pcall",
1729 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1730 { CPU_LOG_RESET, "cpu_reset",
1731 "show CPU state before CPU resets" },
f193c797 1732#endif
8e3a9fd2 1733#ifdef DEBUG_IOPORT
fd872598
FB
1734 { CPU_LOG_IOPORT, "ioport",
1735 "show all i/o ports accesses" },
8e3a9fd2 1736#endif
f193c797
FB
1737 { 0, NULL, NULL },
1738};
1739
1740static int cmp1(const char *s1, int n, const char *s2)
1741{
1742 if (strlen(s2) != n)
1743 return 0;
1744 return memcmp(s1, s2, n) == 0;
1745}
3b46e624 1746
f193c797
FB
1747/* takes a comma separated list of log masks. Return 0 if error. */
1748int cpu_str_to_log_mask(const char *str)
1749{
c7cd6a37 1750 const CPULogItem *item;
f193c797
FB
1751 int mask;
1752 const char *p, *p1;
1753
1754 p = str;
1755 mask = 0;
1756 for(;;) {
1757 p1 = strchr(p, ',');
1758 if (!p1)
1759 p1 = p + strlen(p);
9742bf26
YT
1760 if(cmp1(p,p1-p,"all")) {
1761 for(item = cpu_log_items; item->mask != 0; item++) {
1762 mask |= item->mask;
1763 }
1764 } else {
1765 for(item = cpu_log_items; item->mask != 0; item++) {
1766 if (cmp1(p, p1 - p, item->name))
1767 goto found;
1768 }
1769 return 0;
f193c797 1770 }
f193c797
FB
1771 found:
1772 mask |= item->mask;
1773 if (*p1 != ',')
1774 break;
1775 p = p1 + 1;
1776 }
1777 return mask;
1778}
ea041c0e 1779
7501267e
FB
1780void cpu_abort(CPUState *env, const char *fmt, ...)
1781{
1782 va_list ap;
493ae1f0 1783 va_list ap2;
7501267e
FB
1784
1785 va_start(ap, fmt);
493ae1f0 1786 va_copy(ap2, ap);
7501267e
FB
1787 fprintf(stderr, "qemu: fatal: ");
1788 vfprintf(stderr, fmt, ap);
1789 fprintf(stderr, "\n");
1790#ifdef TARGET_I386
7fe48483
FB
1791 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1792#else
1793 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1794#endif
93fcfe39
AL
1795 if (qemu_log_enabled()) {
1796 qemu_log("qemu: fatal: ");
1797 qemu_log_vprintf(fmt, ap2);
1798 qemu_log("\n");
f9373291 1799#ifdef TARGET_I386
93fcfe39 1800 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1801#else
93fcfe39 1802 log_cpu_state(env, 0);
f9373291 1803#endif
31b1a7b4 1804 qemu_log_flush();
93fcfe39 1805 qemu_log_close();
924edcae 1806 }
493ae1f0 1807 va_end(ap2);
f9373291 1808 va_end(ap);
fd052bf6
RV
1809#if defined(CONFIG_USER_ONLY)
1810 {
1811 struct sigaction act;
1812 sigfillset(&act.sa_mask);
1813 act.sa_handler = SIG_DFL;
1814 sigaction(SIGABRT, &act, NULL);
1815 }
1816#endif
7501267e
FB
1817 abort();
1818}
1819
c5be9f08
TS
1820CPUState *cpu_copy(CPUState *env)
1821{
01ba9816 1822 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1823 CPUState *next_cpu = new_env->next_cpu;
1824 int cpu_index = new_env->cpu_index;
5a38f081
AL
1825#if defined(TARGET_HAS_ICE)
1826 CPUBreakpoint *bp;
1827 CPUWatchpoint *wp;
1828#endif
1829
c5be9f08 1830 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1831
1832 /* Preserve chaining and index. */
c5be9f08
TS
1833 new_env->next_cpu = next_cpu;
1834 new_env->cpu_index = cpu_index;
5a38f081
AL
1835
1836 /* Clone all break/watchpoints.
1837 Note: Once we support ptrace with hw-debug register access, make sure
1838 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1839 QTAILQ_INIT(&env->breakpoints);
1840 QTAILQ_INIT(&env->watchpoints);
5a38f081 1841#if defined(TARGET_HAS_ICE)
72cf2d4f 1842 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1843 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1844 }
72cf2d4f 1845 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1846 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1847 wp->flags, NULL);
1848 }
1849#endif
1850
c5be9f08
TS
1851 return new_env;
1852}
1853
0124311e
FB
1854#if !defined(CONFIG_USER_ONLY)
1855
5c751e99
EI
1856static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1857{
1858 unsigned int i;
1859
1860 /* Discard jump cache entries for any tb which might potentially
1861 overlap the flushed page. */
1862 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1863 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1864 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1865
1866 i = tb_jmp_cache_hash_page(addr);
1867 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1868 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1869}
1870
08738984
IK
1871static CPUTLBEntry s_cputlb_empty_entry = {
1872 .addr_read = -1,
1873 .addr_write = -1,
1874 .addr_code = -1,
1875 .addend = -1,
1876};
1877
ee8b7021
FB
1878/* NOTE: if flush_global is true, also flush global entries (not
1879 implemented yet) */
1880void tlb_flush(CPUState *env, int flush_global)
33417e70 1881{
33417e70 1882 int i;
0124311e 1883
9fa3e853
FB
1884#if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1886#endif
0124311e
FB
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1890
33417e70 1891 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1892 int mmu_idx;
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1895 }
33417e70 1896 }
9fa3e853 1897
8a40a180 1898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1899
d4c430a8
PB
1900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
e3db7226 1902 tlb_flush_count++;
33417e70
FB
1903}
1904
274da6b2 1905static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1906{
5fafdf24 1907 if (addr == (tlb_entry->addr_read &
84b7b8e7 1908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1909 addr == (tlb_entry->addr_write &
84b7b8e7 1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1911 addr == (tlb_entry->addr_code &
84b7b8e7 1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1913 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1914 }
61382a50
FB
1915}
1916
2e12669a 1917void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1918{
8a40a180 1919 int i;
cfde4bd9 1920 int mmu_idx;
0124311e 1921
9fa3e853 1922#if defined(DEBUG_TLB)
108c49b8 1923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1924#endif
d4c430a8
PB
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927#if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1931#endif
1932 tlb_flush(env, 1);
1933 return;
1934 }
0124311e
FB
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
61382a50
FB
1938
1939 addr &= TARGET_PAGE_MASK;
1940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1943
5c751e99 1944 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1945}
1946
9fa3e853
FB
1947/* update the TLBs so that writes to code in the virtual page 'addr'
1948 can be detected */
c227f099 1949static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1950{
5fafdf24 1951 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1952 ram_addr + TARGET_PAGE_SIZE,
1953 CODE_DIRTY_FLAG);
9fa3e853
FB
1954}
1955
9fa3e853 1956/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1957 tested for self modifying code */
c227f099 1958static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1959 target_ulong vaddr)
9fa3e853 1960{
f7c11b53 1961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1962}
1963
5fafdf24 1964static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1965 unsigned long start, unsigned long length)
1966{
1967 unsigned long addr;
84b7b8e7
FB
1968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1970 if ((addr - start) < length) {
0f459d16 1971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1972 }
1973 }
1974}
1975
5579c7f3 1976/* Note: start and end must be within the same ram block. */
c227f099 1977void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1978 int dirty_flags)
1ccde1cb
FB
1979{
1980 CPUState *env;
4f2ac237 1981 unsigned long length, start1;
f7c11b53 1982 int i;
1ccde1cb
FB
1983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
f7c11b53 1990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1991
1ccde1cb
FB
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
b2e0a138 1994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1995 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1996 address comparisons below. */
b2e0a138 1997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
1998 != (end - 1) - start) {
1999 abort();
2000 }
2001
6a00d601 2002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2003 int mmu_idx;
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2007 start1, length);
2008 }
6a00d601 2009 }
1ccde1cb
FB
2010}
2011
74576198
AL
2012int cpu_physical_memory_set_dirty_tracking(int enable)
2013{
f6f3fbca 2014 int ret = 0;
74576198 2015 in_migration = enable;
f6f3fbca 2016 return ret;
74576198
AL
2017}
2018
3a7d929e
FB
2019static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2020{
c227f099 2021 ram_addr_t ram_addr;
5579c7f3 2022 void *p;
3a7d929e 2023
84b7b8e7 2024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
e890261f 2027 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2029 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2030 }
2031 }
2032}
2033
2034/* update the TLB according to the current state of the dirty bits */
2035void cpu_tlb_update_dirty(CPUState *env)
2036{
2037 int i;
cfde4bd9
IY
2038 int mmu_idx;
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2042 }
3a7d929e
FB
2043}
2044
0f459d16 2045static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2046{
0f459d16
PB
2047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2049}
2050
0f459d16
PB
2051/* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2054{
1ccde1cb 2055 int i;
cfde4bd9 2056 int mmu_idx;
1ccde1cb 2057
0f459d16 2058 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2062}
2063
d4c430a8
PB
2064/* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2067 target_ulong size)
2068{
2069 target_ulong mask = ~(size - 1);
2070
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2074 return;
2075 }
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2081 mask <<= 1;
2082 }
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2085}
2086
2087/* Add a new TLB entry. At most one entry for a given virtual address
2088 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2089 supplied size is only used by tlb_flush_page. */
2090void tlb_set_page(CPUState *env, target_ulong vaddr,
2091 target_phys_addr_t paddr, int prot,
2092 int mmu_idx, target_ulong size)
9fa3e853 2093{
f1f6e3b8 2094 PhysPageDesc p;
4f2ac237 2095 unsigned long pd;
9fa3e853 2096 unsigned int index;
4f2ac237 2097 target_ulong address;
0f459d16 2098 target_ulong code_address;
355b1943 2099 unsigned long addend;
84b7b8e7 2100 CPUTLBEntry *te;
a1d1bb31 2101 CPUWatchpoint *wp;
c227f099 2102 target_phys_addr_t iotlb;
9fa3e853 2103
d4c430a8
PB
2104 assert(size >= TARGET_PAGE_SIZE);
2105 if (size != TARGET_PAGE_SIZE) {
2106 tlb_add_large_page(env, vaddr, size);
2107 }
92e873b9 2108 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2109 pd = p.phys_offset;
9fa3e853 2110#if defined(DEBUG_TLB)
7fd3f494
SW
2111 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2112 " prot=%x idx=%d pd=0x%08lx\n",
2113 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2114#endif
2115
0f459d16
PB
2116 address = vaddr;
2117 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2118 /* IO memory case (romd handled later) */
2119 address |= TLB_MMIO;
2120 }
5579c7f3 2121 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2122 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2123 /* Normal RAM. */
2124 iotlb = pd & TARGET_PAGE_MASK;
2125 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2126 iotlb |= IO_MEM_NOTDIRTY;
2127 else
2128 iotlb |= IO_MEM_ROM;
2129 } else {
ccbb4d44 2130 /* IO handlers are currently passed a physical address.
0f459d16
PB
2131 It would be nice to pass an offset from the base address
2132 of that region. This would avoid having to special case RAM,
2133 and avoid full address decoding in every device.
2134 We can't use the high bits of pd for this because
2135 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2136 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2137 iotlb += p.region_offset;
0f459d16
PB
2138 }
2139
2140 code_address = address;
2141 /* Make accesses to pages with watchpoints go via the
2142 watchpoint trap routines. */
72cf2d4f 2143 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2144 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2145 /* Avoid trapping reads of pages with a write breakpoint. */
2146 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2147 iotlb = io_mem_watch + paddr;
2148 address |= TLB_MMIO;
2149 break;
2150 }
6658ffb8 2151 }
0f459d16 2152 }
d79acba4 2153
0f459d16
PB
2154 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2155 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2156 te = &env->tlb_table[mmu_idx][index];
2157 te->addend = addend - vaddr;
2158 if (prot & PAGE_READ) {
2159 te->addr_read = address;
2160 } else {
2161 te->addr_read = -1;
2162 }
5c751e99 2163
0f459d16
PB
2164 if (prot & PAGE_EXEC) {
2165 te->addr_code = code_address;
2166 } else {
2167 te->addr_code = -1;
2168 }
2169 if (prot & PAGE_WRITE) {
2170 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2171 (pd & IO_MEM_ROMD)) {
2172 /* Write access calls the I/O callback. */
2173 te->addr_write = address | TLB_MMIO;
2174 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2175 !cpu_physical_memory_is_dirty(pd)) {
2176 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2177 } else {
0f459d16 2178 te->addr_write = address;
9fa3e853 2179 }
0f459d16
PB
2180 } else {
2181 te->addr_write = -1;
9fa3e853 2182 }
9fa3e853
FB
2183}
2184
0124311e
FB
2185#else
2186
ee8b7021 2187void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2188{
2189}
2190
2e12669a 2191void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2192{
2193}
2194
edf8e2af
MW
2195/*
2196 * Walks guest process memory "regions" one by one
2197 * and calls callback function 'fn' for each region.
2198 */
5cd2c5b6
RH
2199
2200struct walk_memory_regions_data
2201{
2202 walk_memory_regions_fn fn;
2203 void *priv;
2204 unsigned long start;
2205 int prot;
2206};
2207
2208static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2209 abi_ulong end, int new_prot)
5cd2c5b6
RH
2210{
2211 if (data->start != -1ul) {
2212 int rc = data->fn(data->priv, data->start, end, data->prot);
2213 if (rc != 0) {
2214 return rc;
2215 }
2216 }
2217
2218 data->start = (new_prot ? end : -1ul);
2219 data->prot = new_prot;
2220
2221 return 0;
2222}
2223
2224static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2225 abi_ulong base, int level, void **lp)
5cd2c5b6 2226{
b480d9b7 2227 abi_ulong pa;
5cd2c5b6
RH
2228 int i, rc;
2229
2230 if (*lp == NULL) {
2231 return walk_memory_regions_end(data, base, 0);
2232 }
2233
2234 if (level == 0) {
2235 PageDesc *pd = *lp;
7296abac 2236 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2237 int prot = pd[i].flags;
2238
2239 pa = base | (i << TARGET_PAGE_BITS);
2240 if (prot != data->prot) {
2241 rc = walk_memory_regions_end(data, pa, prot);
2242 if (rc != 0) {
2243 return rc;
9fa3e853 2244 }
9fa3e853 2245 }
5cd2c5b6
RH
2246 }
2247 } else {
2248 void **pp = *lp;
7296abac 2249 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2250 pa = base | ((abi_ulong)i <<
2251 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2252 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2253 if (rc != 0) {
2254 return rc;
2255 }
2256 }
2257 }
2258
2259 return 0;
2260}
2261
2262int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2263{
2264 struct walk_memory_regions_data data;
2265 unsigned long i;
2266
2267 data.fn = fn;
2268 data.priv = priv;
2269 data.start = -1ul;
2270 data.prot = 0;
2271
2272 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2273 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2274 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2275 if (rc != 0) {
2276 return rc;
9fa3e853 2277 }
33417e70 2278 }
5cd2c5b6
RH
2279
2280 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2281}
2282
b480d9b7
PB
2283static int dump_region(void *priv, abi_ulong start,
2284 abi_ulong end, unsigned long prot)
edf8e2af
MW
2285{
2286 FILE *f = (FILE *)priv;
2287
b480d9b7
PB
2288 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2289 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2290 start, end, end - start,
2291 ((prot & PAGE_READ) ? 'r' : '-'),
2292 ((prot & PAGE_WRITE) ? 'w' : '-'),
2293 ((prot & PAGE_EXEC) ? 'x' : '-'));
2294
2295 return (0);
2296}
2297
2298/* dump memory mappings */
2299void page_dump(FILE *f)
2300{
2301 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2302 "start", "end", "size", "prot");
2303 walk_memory_regions(f, dump_region);
33417e70
FB
2304}
2305
53a5960a 2306int page_get_flags(target_ulong address)
33417e70 2307{
9fa3e853
FB
2308 PageDesc *p;
2309
2310 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2311 if (!p)
9fa3e853
FB
2312 return 0;
2313 return p->flags;
2314}
2315
376a7909
RH
2316/* Modify the flags of a page and invalidate the code if necessary.
2317 The flag PAGE_WRITE_ORG is positioned automatically depending
2318 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2319void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2320{
376a7909
RH
2321 target_ulong addr, len;
2322
2323 /* This function should never be called with addresses outside the
2324 guest address space. If this assert fires, it probably indicates
2325 a missing call to h2g_valid. */
b480d9b7
PB
2326#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2327 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2328#endif
2329 assert(start < end);
9fa3e853
FB
2330
2331 start = start & TARGET_PAGE_MASK;
2332 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2333
2334 if (flags & PAGE_WRITE) {
9fa3e853 2335 flags |= PAGE_WRITE_ORG;
376a7909
RH
2336 }
2337
2338 for (addr = start, len = end - start;
2339 len != 0;
2340 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2341 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2342
2343 /* If the write protection bit is set, then we invalidate
2344 the code inside. */
5fafdf24 2345 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2346 (flags & PAGE_WRITE) &&
2347 p->first_tb) {
d720b93d 2348 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2349 }
2350 p->flags = flags;
2351 }
33417e70
FB
2352}
2353
3d97b40b
TS
2354int page_check_range(target_ulong start, target_ulong len, int flags)
2355{
2356 PageDesc *p;
2357 target_ulong end;
2358 target_ulong addr;
2359
376a7909
RH
2360 /* This function should never be called with addresses outside the
2361 guest address space. If this assert fires, it probably indicates
2362 a missing call to h2g_valid. */
338e9e6c
BS
2363#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2364 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2365#endif
2366
3e0650a9
RH
2367 if (len == 0) {
2368 return 0;
2369 }
376a7909
RH
2370 if (start + len - 1 < start) {
2371 /* We've wrapped around. */
55f280c9 2372 return -1;
376a7909 2373 }
55f280c9 2374
3d97b40b
TS
2375 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2376 start = start & TARGET_PAGE_MASK;
2377
376a7909
RH
2378 for (addr = start, len = end - start;
2379 len != 0;
2380 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2381 p = page_find(addr >> TARGET_PAGE_BITS);
2382 if( !p )
2383 return -1;
2384 if( !(p->flags & PAGE_VALID) )
2385 return -1;
2386
dae3270c 2387 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2388 return -1;
dae3270c
FB
2389 if (flags & PAGE_WRITE) {
2390 if (!(p->flags & PAGE_WRITE_ORG))
2391 return -1;
2392 /* unprotect the page if it was put read-only because it
2393 contains translated code */
2394 if (!(p->flags & PAGE_WRITE)) {
2395 if (!page_unprotect(addr, 0, NULL))
2396 return -1;
2397 }
2398 return 0;
2399 }
3d97b40b
TS
2400 }
2401 return 0;
2402}
2403
9fa3e853 2404/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2405 page. Return TRUE if the fault was successfully handled. */
53a5960a 2406int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2407{
45d679d6
AJ
2408 unsigned int prot;
2409 PageDesc *p;
53a5960a 2410 target_ulong host_start, host_end, addr;
9fa3e853 2411
c8a706fe
PB
2412 /* Technically this isn't safe inside a signal handler. However we
2413 know this only ever happens in a synchronous SEGV handler, so in
2414 practice it seems to be ok. */
2415 mmap_lock();
2416
45d679d6
AJ
2417 p = page_find(address >> TARGET_PAGE_BITS);
2418 if (!p) {
c8a706fe 2419 mmap_unlock();
9fa3e853 2420 return 0;
c8a706fe 2421 }
45d679d6 2422
9fa3e853
FB
2423 /* if the page was really writable, then we change its
2424 protection back to writable */
45d679d6
AJ
2425 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2426 host_start = address & qemu_host_page_mask;
2427 host_end = host_start + qemu_host_page_size;
2428
2429 prot = 0;
2430 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2431 p = page_find(addr >> TARGET_PAGE_BITS);
2432 p->flags |= PAGE_WRITE;
2433 prot |= p->flags;
2434
9fa3e853
FB
2435 /* and since the content will be modified, we must invalidate
2436 the corresponding translated code. */
45d679d6 2437 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2438#ifdef DEBUG_TB_CHECK
45d679d6 2439 tb_invalidate_check(addr);
9fa3e853 2440#endif
9fa3e853 2441 }
45d679d6
AJ
2442 mprotect((void *)g2h(host_start), qemu_host_page_size,
2443 prot & PAGE_BITS);
2444
2445 mmap_unlock();
2446 return 1;
9fa3e853 2447 }
c8a706fe 2448 mmap_unlock();
9fa3e853
FB
2449 return 0;
2450}
2451
6a00d601
FB
2452static inline void tlb_set_dirty(CPUState *env,
2453 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2454{
2455}
9fa3e853
FB
2456#endif /* defined(CONFIG_USER_ONLY) */
2457
e2eef170 2458#if !defined(CONFIG_USER_ONLY)
8da3ff18 2459
c04b2b78
PB
2460#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2461typedef struct subpage_t {
2462 target_phys_addr_t base;
f6405247
RH
2463 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2464 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2465} subpage_t;
2466
c227f099
AL
2467static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2468 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2469static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2470 ram_addr_t orig_memory,
2471 ram_addr_t region_offset);
db7b5426
BS
2472#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2473 need_subpage) \
2474 do { \
2475 if (addr > start_addr) \
2476 start_addr2 = 0; \
2477 else { \
2478 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2479 if (start_addr2 > 0) \
2480 need_subpage = 1; \
2481 } \
2482 \
49e9fba2 2483 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2484 end_addr2 = TARGET_PAGE_SIZE - 1; \
2485 else { \
2486 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2487 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2488 need_subpage = 1; \
2489 } \
2490 } while (0)
2491
8f2498f9
MT
2492/* register physical memory.
2493 For RAM, 'size' must be a multiple of the target page size.
2494 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2495 io memory page. The address used when calling the IO function is
2496 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2497 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2498 before calculating this offset. This should not be a problem unless
2499 the low bits of start_addr and region_offset differ. */
0fd542fb 2500void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2501 ram_addr_t size,
2502 ram_addr_t phys_offset,
0fd542fb
MT
2503 ram_addr_t region_offset,
2504 bool log_dirty)
33417e70 2505{
c227f099 2506 target_phys_addr_t addr, end_addr;
92e873b9 2507 PhysPageDesc *p;
9d42037b 2508 CPUState *env;
c227f099 2509 ram_addr_t orig_size = size;
f6405247 2510 subpage_t *subpage;
33417e70 2511
3b8e6a2d 2512 assert(size);
f6f3fbca 2513
67c4d23c
PB
2514 if (phys_offset == IO_MEM_UNASSIGNED) {
2515 region_offset = start_addr;
2516 }
8da3ff18 2517 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2518 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2519 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2520
2521 addr = start_addr;
2522 do {
f1f6e3b8 2523 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
db7b5426 2524 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2525 ram_addr_t orig_memory = p->phys_offset;
2526 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2527 int need_subpage = 0;
2528
2529 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2530 need_subpage);
f6405247 2531 if (need_subpage) {
db7b5426
BS
2532 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2533 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2534 &p->phys_offset, orig_memory,
2535 p->region_offset);
db7b5426
BS
2536 } else {
2537 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2538 >> IO_MEM_SHIFT];
2539 }
8da3ff18
PB
2540 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2541 region_offset);
2542 p->region_offset = 0;
db7b5426
BS
2543 } else {
2544 p->phys_offset = phys_offset;
2545 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2546 (phys_offset & IO_MEM_ROMD))
2547 phys_offset += TARGET_PAGE_SIZE;
2548 }
2549 } else {
2550 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2551 p->phys_offset = phys_offset;
8da3ff18 2552 p->region_offset = region_offset;
db7b5426 2553 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2554 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2555 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2556 } else {
c227f099 2557 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2558 int need_subpage = 0;
2559
2560 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2561 end_addr2, need_subpage);
2562
f6405247 2563 if (need_subpage) {
db7b5426 2564 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2565 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2566 addr & TARGET_PAGE_MASK);
db7b5426 2567 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2568 phys_offset, region_offset);
2569 p->region_offset = 0;
db7b5426
BS
2570 }
2571 }
2572 }
8da3ff18 2573 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2574 addr += TARGET_PAGE_SIZE;
2575 } while (addr != end_addr);
3b46e624 2576
9d42037b
FB
2577 /* since each CPU stores ram addresses in its TLB cache, we must
2578 reset the modified entries */
2579 /* XXX: slow ! */
2580 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2581 tlb_flush(env, 1);
2582 }
33417e70
FB
2583}
2584
c227f099 2585void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2586{
2587 if (kvm_enabled())
2588 kvm_coalesce_mmio_region(addr, size);
2589}
2590
c227f099 2591void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2592{
2593 if (kvm_enabled())
2594 kvm_uncoalesce_mmio_region(addr, size);
2595}
2596
62a2744c
SY
2597void qemu_flush_coalesced_mmio_buffer(void)
2598{
2599 if (kvm_enabled())
2600 kvm_flush_coalesced_mmio_buffer();
2601}
2602
c902760f
MT
2603#if defined(__linux__) && !defined(TARGET_S390X)
2604
2605#include <sys/vfs.h>
2606
2607#define HUGETLBFS_MAGIC 0x958458f6
2608
2609static long gethugepagesize(const char *path)
2610{
2611 struct statfs fs;
2612 int ret;
2613
2614 do {
9742bf26 2615 ret = statfs(path, &fs);
c902760f
MT
2616 } while (ret != 0 && errno == EINTR);
2617
2618 if (ret != 0) {
9742bf26
YT
2619 perror(path);
2620 return 0;
c902760f
MT
2621 }
2622
2623 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2624 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2625
2626 return fs.f_bsize;
2627}
2628
04b16653
AW
2629static void *file_ram_alloc(RAMBlock *block,
2630 ram_addr_t memory,
2631 const char *path)
c902760f
MT
2632{
2633 char *filename;
2634 void *area;
2635 int fd;
2636#ifdef MAP_POPULATE
2637 int flags;
2638#endif
2639 unsigned long hpagesize;
2640
2641 hpagesize = gethugepagesize(path);
2642 if (!hpagesize) {
9742bf26 2643 return NULL;
c902760f
MT
2644 }
2645
2646 if (memory < hpagesize) {
2647 return NULL;
2648 }
2649
2650 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2651 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2652 return NULL;
2653 }
2654
2655 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2656 return NULL;
c902760f
MT
2657 }
2658
2659 fd = mkstemp(filename);
2660 if (fd < 0) {
9742bf26
YT
2661 perror("unable to create backing store for hugepages");
2662 free(filename);
2663 return NULL;
c902760f
MT
2664 }
2665 unlink(filename);
2666 free(filename);
2667
2668 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2669
2670 /*
2671 * ftruncate is not supported by hugetlbfs in older
2672 * hosts, so don't bother bailing out on errors.
2673 * If anything goes wrong with it under other filesystems,
2674 * mmap will fail.
2675 */
2676 if (ftruncate(fd, memory))
9742bf26 2677 perror("ftruncate");
c902760f
MT
2678
2679#ifdef MAP_POPULATE
2680 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2681 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2682 * to sidestep this quirk.
2683 */
2684 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2685 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2686#else
2687 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2688#endif
2689 if (area == MAP_FAILED) {
9742bf26
YT
2690 perror("file_ram_alloc: can't mmap RAM pages");
2691 close(fd);
2692 return (NULL);
c902760f 2693 }
04b16653 2694 block->fd = fd;
c902760f
MT
2695 return area;
2696}
2697#endif
2698
d17b5288 2699static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2700{
2701 RAMBlock *block, *next_block;
3e837b2c 2702 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2703
2704 if (QLIST_EMPTY(&ram_list.blocks))
2705 return 0;
2706
2707 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2708 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2709
2710 end = block->offset + block->length;
2711
2712 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2713 if (next_block->offset >= end) {
2714 next = MIN(next, next_block->offset);
2715 }
2716 }
2717 if (next - end >= size && next - end < mingap) {
3e837b2c 2718 offset = end;
04b16653
AW
2719 mingap = next - end;
2720 }
2721 }
3e837b2c
AW
2722
2723 if (offset == RAM_ADDR_MAX) {
2724 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2725 (uint64_t)size);
2726 abort();
2727 }
2728
04b16653
AW
2729 return offset;
2730}
2731
2732static ram_addr_t last_ram_offset(void)
d17b5288
AW
2733{
2734 RAMBlock *block;
2735 ram_addr_t last = 0;
2736
2737 QLIST_FOREACH(block, &ram_list.blocks, next)
2738 last = MAX(last, block->offset + block->length);
2739
2740 return last;
2741}
2742
c5705a77 2743void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2744{
2745 RAMBlock *new_block, *block;
2746
c5705a77
AK
2747 new_block = NULL;
2748 QLIST_FOREACH(block, &ram_list.blocks, next) {
2749 if (block->offset == addr) {
2750 new_block = block;
2751 break;
2752 }
2753 }
2754 assert(new_block);
2755 assert(!new_block->idstr[0]);
84b89d78
CM
2756
2757 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2758 char *id = dev->parent_bus->info->get_dev_path(dev);
2759 if (id) {
2760 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2761 g_free(id);
84b89d78
CM
2762 }
2763 }
2764 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2765
2766 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2767 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2768 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2769 new_block->idstr);
2770 abort();
2771 }
2772 }
c5705a77
AK
2773}
2774
2775ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2776 MemoryRegion *mr)
2777{
2778 RAMBlock *new_block;
2779
2780 size = TARGET_PAGE_ALIGN(size);
2781 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2782
7c637366 2783 new_block->mr = mr;
432d268c 2784 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2785 if (host) {
2786 new_block->host = host;
cd19cfa2 2787 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2788 } else {
2789 if (mem_path) {
c902760f 2790#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2791 new_block->host = file_ram_alloc(new_block, size, mem_path);
2792 if (!new_block->host) {
2793 new_block->host = qemu_vmalloc(size);
e78815a5 2794 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2795 }
c902760f 2796#else
6977dfe6
YT
2797 fprintf(stderr, "-mem-path option unsupported\n");
2798 exit(1);
c902760f 2799#endif
6977dfe6 2800 } else {
6b02494d 2801#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2802 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2803 an system defined value, which is at least 256GB. Larger systems
2804 have larger values. We put the guest between the end of data
2805 segment (system break) and this value. We use 32GB as a base to
2806 have enough room for the system break to grow. */
2807 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2808 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2809 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2810 if (new_block->host == MAP_FAILED) {
2811 fprintf(stderr, "Allocating RAM failed\n");
2812 abort();
2813 }
6b02494d 2814#else
868bb33f 2815 if (xen_enabled()) {
fce537d4 2816 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2817 } else {
2818 new_block->host = qemu_vmalloc(size);
2819 }
6b02494d 2820#endif
e78815a5 2821 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2822 }
c902760f 2823 }
94a6b54f
PB
2824 new_block->length = size;
2825
f471a17e 2826 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2827
7267c094 2828 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2829 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2830 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2831 0xff, size >> TARGET_PAGE_BITS);
2832
6f0437e8
JK
2833 if (kvm_enabled())
2834 kvm_setup_guest_memory(new_block->host, size);
2835
94a6b54f
PB
2836 return new_block->offset;
2837}
e9a1ab19 2838
c5705a77 2839ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2840{
c5705a77 2841 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2842}
2843
1f2e98b6
AW
2844void qemu_ram_free_from_ptr(ram_addr_t addr)
2845{
2846 RAMBlock *block;
2847
2848 QLIST_FOREACH(block, &ram_list.blocks, next) {
2849 if (addr == block->offset) {
2850 QLIST_REMOVE(block, next);
7267c094 2851 g_free(block);
1f2e98b6
AW
2852 return;
2853 }
2854 }
2855}
2856
c227f099 2857void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2858{
04b16653
AW
2859 RAMBlock *block;
2860
2861 QLIST_FOREACH(block, &ram_list.blocks, next) {
2862 if (addr == block->offset) {
2863 QLIST_REMOVE(block, next);
cd19cfa2
HY
2864 if (block->flags & RAM_PREALLOC_MASK) {
2865 ;
2866 } else if (mem_path) {
04b16653
AW
2867#if defined (__linux__) && !defined(TARGET_S390X)
2868 if (block->fd) {
2869 munmap(block->host, block->length);
2870 close(block->fd);
2871 } else {
2872 qemu_vfree(block->host);
2873 }
fd28aa13
JK
2874#else
2875 abort();
04b16653
AW
2876#endif
2877 } else {
2878#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2879 munmap(block->host, block->length);
2880#else
868bb33f 2881 if (xen_enabled()) {
e41d7c69 2882 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2883 } else {
2884 qemu_vfree(block->host);
2885 }
04b16653
AW
2886#endif
2887 }
7267c094 2888 g_free(block);
04b16653
AW
2889 return;
2890 }
2891 }
2892
e9a1ab19
FB
2893}
2894
cd19cfa2
HY
2895#ifndef _WIN32
2896void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2897{
2898 RAMBlock *block;
2899 ram_addr_t offset;
2900 int flags;
2901 void *area, *vaddr;
2902
2903 QLIST_FOREACH(block, &ram_list.blocks, next) {
2904 offset = addr - block->offset;
2905 if (offset < block->length) {
2906 vaddr = block->host + offset;
2907 if (block->flags & RAM_PREALLOC_MASK) {
2908 ;
2909 } else {
2910 flags = MAP_FIXED;
2911 munmap(vaddr, length);
2912 if (mem_path) {
2913#if defined(__linux__) && !defined(TARGET_S390X)
2914 if (block->fd) {
2915#ifdef MAP_POPULATE
2916 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2917 MAP_PRIVATE;
2918#else
2919 flags |= MAP_PRIVATE;
2920#endif
2921 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2922 flags, block->fd, offset);
2923 } else {
2924 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2925 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2926 flags, -1, 0);
2927 }
fd28aa13
JK
2928#else
2929 abort();
cd19cfa2
HY
2930#endif
2931 } else {
2932#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2933 flags |= MAP_SHARED | MAP_ANONYMOUS;
2934 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2935 flags, -1, 0);
2936#else
2937 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2938 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2939 flags, -1, 0);
2940#endif
2941 }
2942 if (area != vaddr) {
f15fbc4b
AP
2943 fprintf(stderr, "Could not remap addr: "
2944 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2945 length, addr);
2946 exit(1);
2947 }
2948 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2949 }
2950 return;
2951 }
2952 }
2953}
2954#endif /* !_WIN32 */
2955
dc828ca1 2956/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2957 With the exception of the softmmu code in this file, this should
2958 only be used for local memory (e.g. video ram) that the device owns,
2959 and knows it isn't going to access beyond the end of the block.
2960
2961 It should not be used for general purpose DMA.
2962 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2963 */
c227f099 2964void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2965{
94a6b54f
PB
2966 RAMBlock *block;
2967
f471a17e
AW
2968 QLIST_FOREACH(block, &ram_list.blocks, next) {
2969 if (addr - block->offset < block->length) {
7d82af38
VP
2970 /* Move this entry to to start of the list. */
2971 if (block != QLIST_FIRST(&ram_list.blocks)) {
2972 QLIST_REMOVE(block, next);
2973 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2974 }
868bb33f 2975 if (xen_enabled()) {
432d268c
JN
2976 /* We need to check if the requested address is in the RAM
2977 * because we don't want to map the entire memory in QEMU.
712c2b41 2978 * In that case just map until the end of the page.
432d268c
JN
2979 */
2980 if (block->offset == 0) {
e41d7c69 2981 return xen_map_cache(addr, 0, 0);
432d268c 2982 } else if (block->host == NULL) {
e41d7c69
JK
2983 block->host =
2984 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2985 }
2986 }
f471a17e
AW
2987 return block->host + (addr - block->offset);
2988 }
94a6b54f 2989 }
f471a17e
AW
2990
2991 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2992 abort();
2993
2994 return NULL;
dc828ca1
PB
2995}
2996
b2e0a138
MT
2997/* Return a host pointer to ram allocated with qemu_ram_alloc.
2998 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2999 */
3000void *qemu_safe_ram_ptr(ram_addr_t addr)
3001{
3002 RAMBlock *block;
3003
3004 QLIST_FOREACH(block, &ram_list.blocks, next) {
3005 if (addr - block->offset < block->length) {
868bb33f 3006 if (xen_enabled()) {
432d268c
JN
3007 /* We need to check if the requested address is in the RAM
3008 * because we don't want to map the entire memory in QEMU.
712c2b41 3009 * In that case just map until the end of the page.
432d268c
JN
3010 */
3011 if (block->offset == 0) {
e41d7c69 3012 return xen_map_cache(addr, 0, 0);
432d268c 3013 } else if (block->host == NULL) {
e41d7c69
JK
3014 block->host =
3015 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3016 }
3017 }
b2e0a138
MT
3018 return block->host + (addr - block->offset);
3019 }
3020 }
3021
3022 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3023 abort();
3024
3025 return NULL;
3026}
3027
38bee5dc
SS
3028/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3029 * but takes a size argument */
8ab934f9 3030void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3031{
8ab934f9
SS
3032 if (*size == 0) {
3033 return NULL;
3034 }
868bb33f 3035 if (xen_enabled()) {
e41d7c69 3036 return xen_map_cache(addr, *size, 1);
868bb33f 3037 } else {
38bee5dc
SS
3038 RAMBlock *block;
3039
3040 QLIST_FOREACH(block, &ram_list.blocks, next) {
3041 if (addr - block->offset < block->length) {
3042 if (addr - block->offset + *size > block->length)
3043 *size = block->length - addr + block->offset;
3044 return block->host + (addr - block->offset);
3045 }
3046 }
3047
3048 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3049 abort();
38bee5dc
SS
3050 }
3051}
3052
050a0ddf
AP
3053void qemu_put_ram_ptr(void *addr)
3054{
3055 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3056}
3057
e890261f 3058int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3059{
94a6b54f
PB
3060 RAMBlock *block;
3061 uint8_t *host = ptr;
3062
868bb33f 3063 if (xen_enabled()) {
e41d7c69 3064 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3065 return 0;
3066 }
3067
f471a17e 3068 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3069 /* This case append when the block is not mapped. */
3070 if (block->host == NULL) {
3071 continue;
3072 }
f471a17e 3073 if (host - block->host < block->length) {
e890261f
MT
3074 *ram_addr = block->offset + (host - block->host);
3075 return 0;
f471a17e 3076 }
94a6b54f 3077 }
432d268c 3078
e890261f
MT
3079 return -1;
3080}
f471a17e 3081
e890261f
MT
3082/* Some of the softmmu routines need to translate from a host pointer
3083 (typically a TLB entry) back to a ram offset. */
3084ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3085{
3086 ram_addr_t ram_addr;
f471a17e 3087
e890261f
MT
3088 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3089 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3090 abort();
3091 }
3092 return ram_addr;
5579c7f3
PB
3093}
3094
c227f099 3095static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3096{
67d3b957 3097#ifdef DEBUG_UNASSIGNED
ab3d1727 3098 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3099#endif
5b450407 3100#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3101 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3102#endif
3103 return 0;
3104}
3105
c227f099 3106static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3107{
3108#ifdef DEBUG_UNASSIGNED
3109 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3110#endif
5b450407 3111#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3112 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3113#endif
3114 return 0;
3115}
3116
c227f099 3117static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3118{
3119#ifdef DEBUG_UNASSIGNED
3120 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3121#endif
5b450407 3122#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3123 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3124#endif
33417e70
FB
3125 return 0;
3126}
3127
c227f099 3128static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3129{
67d3b957 3130#ifdef DEBUG_UNASSIGNED
ab3d1727 3131 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3132#endif
5b450407 3133#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3134 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3135#endif
3136}
3137
c227f099 3138static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3139{
3140#ifdef DEBUG_UNASSIGNED
3141 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3142#endif
5b450407 3143#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3144 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3145#endif
3146}
3147
c227f099 3148static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3149{
3150#ifdef DEBUG_UNASSIGNED
3151 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3152#endif
5b450407 3153#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3154 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3155#endif
33417e70
FB
3156}
3157
d60efc6b 3158static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3159 unassigned_mem_readb,
e18231a3
BS
3160 unassigned_mem_readw,
3161 unassigned_mem_readl,
33417e70
FB
3162};
3163
d60efc6b 3164static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3165 unassigned_mem_writeb,
e18231a3
BS
3166 unassigned_mem_writew,
3167 unassigned_mem_writel,
33417e70
FB
3168};
3169
c227f099 3170static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3171 uint32_t val)
9fa3e853 3172{
3a7d929e 3173 int dirty_flags;
f7c11b53 3174 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3175 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3176#if !defined(CONFIG_USER_ONLY)
3a7d929e 3177 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3178 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3179#endif
3a7d929e 3180 }
5579c7f3 3181 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3182 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3183 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3184 /* we remove the notdirty callback only if the code has been
3185 flushed */
3186 if (dirty_flags == 0xff)
2e70f6ef 3187 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3188}
3189
c227f099 3190static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3191 uint32_t val)
9fa3e853 3192{
3a7d929e 3193 int dirty_flags;
f7c11b53 3194 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3195 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3196#if !defined(CONFIG_USER_ONLY)
3a7d929e 3197 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3198 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3199#endif
3a7d929e 3200 }
5579c7f3 3201 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3202 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3203 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3204 /* we remove the notdirty callback only if the code has been
3205 flushed */
3206 if (dirty_flags == 0xff)
2e70f6ef 3207 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3208}
3209
c227f099 3210static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3211 uint32_t val)
9fa3e853 3212{
3a7d929e 3213 int dirty_flags;
f7c11b53 3214 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3215 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3216#if !defined(CONFIG_USER_ONLY)
3a7d929e 3217 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3218 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3219#endif
3a7d929e 3220 }
5579c7f3 3221 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3222 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3223 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3224 /* we remove the notdirty callback only if the code has been
3225 flushed */
3226 if (dirty_flags == 0xff)
2e70f6ef 3227 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3228}
3229
d60efc6b 3230static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3231 NULL, /* never used */
3232 NULL, /* never used */
3233 NULL, /* never used */
3234};
3235
d60efc6b 3236static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3237 notdirty_mem_writeb,
3238 notdirty_mem_writew,
3239 notdirty_mem_writel,
3240};
3241
0f459d16 3242/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3243static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3244{
3245 CPUState *env = cpu_single_env;
06d55cc1
AL
3246 target_ulong pc, cs_base;
3247 TranslationBlock *tb;
0f459d16 3248 target_ulong vaddr;
a1d1bb31 3249 CPUWatchpoint *wp;
06d55cc1 3250 int cpu_flags;
0f459d16 3251
06d55cc1
AL
3252 if (env->watchpoint_hit) {
3253 /* We re-entered the check after replacing the TB. Now raise
3254 * the debug interrupt so that is will trigger after the
3255 * current instruction. */
3256 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3257 return;
3258 }
2e70f6ef 3259 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3260 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3261 if ((vaddr == (wp->vaddr & len_mask) ||
3262 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3263 wp->flags |= BP_WATCHPOINT_HIT;
3264 if (!env->watchpoint_hit) {
3265 env->watchpoint_hit = wp;
3266 tb = tb_find_pc(env->mem_io_pc);
3267 if (!tb) {
3268 cpu_abort(env, "check_watchpoint: could not find TB for "
3269 "pc=%p", (void *)env->mem_io_pc);
3270 }
618ba8e6 3271 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3272 tb_phys_invalidate(tb, -1);
3273 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3274 env->exception_index = EXCP_DEBUG;
3275 } else {
3276 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3277 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3278 }
3279 cpu_resume_from_signal(env, NULL);
06d55cc1 3280 }
6e140f28
AL
3281 } else {
3282 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3283 }
3284 }
3285}
3286
6658ffb8
PB
3287/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3288 so these check for a hit then pass through to the normal out-of-line
3289 phys routines. */
c227f099 3290static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3291{
b4051334 3292 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3293 return ldub_phys(addr);
3294}
3295
c227f099 3296static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3297{
b4051334 3298 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3299 return lduw_phys(addr);
3300}
3301
c227f099 3302static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3303{
b4051334 3304 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3305 return ldl_phys(addr);
3306}
3307
c227f099 3308static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3309 uint32_t val)
3310{
b4051334 3311 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3312 stb_phys(addr, val);
3313}
3314
c227f099 3315static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3316 uint32_t val)
3317{
b4051334 3318 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3319 stw_phys(addr, val);
3320}
3321
c227f099 3322static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3323 uint32_t val)
3324{
b4051334 3325 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3326 stl_phys(addr, val);
3327}
3328
d60efc6b 3329static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3330 watch_mem_readb,
3331 watch_mem_readw,
3332 watch_mem_readl,
3333};
3334
d60efc6b 3335static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3336 watch_mem_writeb,
3337 watch_mem_writew,
3338 watch_mem_writel,
3339};
6658ffb8 3340
f6405247
RH
3341static inline uint32_t subpage_readlen (subpage_t *mmio,
3342 target_phys_addr_t addr,
3343 unsigned int len)
db7b5426 3344{
f6405247 3345 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3346#if defined(DEBUG_SUBPAGE)
3347 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3348 mmio, len, addr, idx);
3349#endif
db7b5426 3350
f6405247
RH
3351 addr += mmio->region_offset[idx];
3352 idx = mmio->sub_io_index[idx];
3353 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3354}
3355
c227f099 3356static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3357 uint32_t value, unsigned int len)
db7b5426 3358{
f6405247 3359 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3360#if defined(DEBUG_SUBPAGE)
f6405247
RH
3361 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3362 __func__, mmio, len, addr, idx, value);
db7b5426 3363#endif
f6405247
RH
3364
3365 addr += mmio->region_offset[idx];
3366 idx = mmio->sub_io_index[idx];
3367 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3368}
3369
c227f099 3370static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3371{
db7b5426
BS
3372 return subpage_readlen(opaque, addr, 0);
3373}
3374
c227f099 3375static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3376 uint32_t value)
3377{
db7b5426
BS
3378 subpage_writelen(opaque, addr, value, 0);
3379}
3380
c227f099 3381static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3382{
db7b5426
BS
3383 return subpage_readlen(opaque, addr, 1);
3384}
3385
c227f099 3386static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3387 uint32_t value)
3388{
db7b5426
BS
3389 subpage_writelen(opaque, addr, value, 1);
3390}
3391
c227f099 3392static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3393{
db7b5426
BS
3394 return subpage_readlen(opaque, addr, 2);
3395}
3396
f6405247
RH
3397static void subpage_writel (void *opaque, target_phys_addr_t addr,
3398 uint32_t value)
db7b5426 3399{
db7b5426
BS
3400 subpage_writelen(opaque, addr, value, 2);
3401}
3402
d60efc6b 3403static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3404 &subpage_readb,
3405 &subpage_readw,
3406 &subpage_readl,
3407};
3408
d60efc6b 3409static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3410 &subpage_writeb,
3411 &subpage_writew,
3412 &subpage_writel,
3413};
3414
56384e8b
AF
3415static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3416{
3417 ram_addr_t raddr = addr;
3418 void *ptr = qemu_get_ram_ptr(raddr);
3419 return ldub_p(ptr);
3420}
3421
3422static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3423 uint32_t value)
3424{
3425 ram_addr_t raddr = addr;
3426 void *ptr = qemu_get_ram_ptr(raddr);
3427 stb_p(ptr, value);
3428}
3429
3430static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3431{
3432 ram_addr_t raddr = addr;
3433 void *ptr = qemu_get_ram_ptr(raddr);
3434 return lduw_p(ptr);
3435}
3436
3437static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3438 uint32_t value)
3439{
3440 ram_addr_t raddr = addr;
3441 void *ptr = qemu_get_ram_ptr(raddr);
3442 stw_p(ptr, value);
3443}
3444
3445static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3446{
3447 ram_addr_t raddr = addr;
3448 void *ptr = qemu_get_ram_ptr(raddr);
3449 return ldl_p(ptr);
3450}
3451
3452static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3453 uint32_t value)
3454{
3455 ram_addr_t raddr = addr;
3456 void *ptr = qemu_get_ram_ptr(raddr);
3457 stl_p(ptr, value);
3458}
3459
3460static CPUReadMemoryFunc * const subpage_ram_read[] = {
3461 &subpage_ram_readb,
3462 &subpage_ram_readw,
3463 &subpage_ram_readl,
3464};
3465
3466static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3467 &subpage_ram_writeb,
3468 &subpage_ram_writew,
3469 &subpage_ram_writel,
3470};
3471
c227f099
AL
3472static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3473 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3474{
3475 int idx, eidx;
3476
3477 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3478 return -1;
3479 idx = SUBPAGE_IDX(start);
3480 eidx = SUBPAGE_IDX(end);
3481#if defined(DEBUG_SUBPAGE)
0bf9e31a 3482 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3483 mmio, start, end, idx, eidx, memory);
3484#endif
56384e8b
AF
3485 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3486 memory = IO_MEM_SUBPAGE_RAM;
3487 }
f6405247 3488 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3489 for (; idx <= eidx; idx++) {
f6405247
RH
3490 mmio->sub_io_index[idx] = memory;
3491 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3492 }
3493
3494 return 0;
3495}
3496
f6405247
RH
3497static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3498 ram_addr_t orig_memory,
3499 ram_addr_t region_offset)
db7b5426 3500{
c227f099 3501 subpage_t *mmio;
db7b5426
BS
3502 int subpage_memory;
3503
7267c094 3504 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3505
3506 mmio->base = base;
be675c97 3507 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3508#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3509 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3510 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3511#endif
1eec614b 3512 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3513 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3514
3515 return mmio;
3516}
3517
88715657
AL
3518static int get_free_io_mem_idx(void)
3519{
3520 int i;
3521
3522 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3523 if (!io_mem_used[i]) {
3524 io_mem_used[i] = 1;
3525 return i;
3526 }
c6703b47 3527 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3528 return -1;
3529}
3530
33417e70
FB
3531/* mem_read and mem_write are arrays of functions containing the
3532 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3533 2). Functions can be omitted with a NULL function pointer.
3ee89922 3534 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3535 modified. If it is zero, a new io zone is allocated. The return
3536 value can be used with cpu_register_physical_memory(). (-1) is
3537 returned if error. */
1eed09cb 3538static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3539 CPUReadMemoryFunc * const *mem_read,
3540 CPUWriteMemoryFunc * const *mem_write,
be675c97 3541 void *opaque)
33417e70 3542{
3cab721d
RH
3543 int i;
3544
33417e70 3545 if (io_index <= 0) {
88715657
AL
3546 io_index = get_free_io_mem_idx();
3547 if (io_index == -1)
3548 return io_index;
33417e70 3549 } else {
1eed09cb 3550 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3551 if (io_index >= IO_MEM_NB_ENTRIES)
3552 return -1;
3553 }
b5ff1b31 3554
3cab721d
RH
3555 for (i = 0; i < 3; ++i) {
3556 io_mem_read[io_index][i]
3557 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3558 }
3559 for (i = 0; i < 3; ++i) {
3560 io_mem_write[io_index][i]
3561 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3562 }
a4193c8a 3563 io_mem_opaque[io_index] = opaque;
f6405247
RH
3564
3565 return (io_index << IO_MEM_SHIFT);
33417e70 3566}
61382a50 3567
d60efc6b
BS
3568int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3569 CPUWriteMemoryFunc * const *mem_write,
be675c97 3570 void *opaque)
1eed09cb 3571{
be675c97 3572 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
1eed09cb
AK
3573}
3574
88715657
AL
3575void cpu_unregister_io_memory(int io_table_address)
3576{
3577 int i;
3578 int io_index = io_table_address >> IO_MEM_SHIFT;
3579
3580 for (i=0;i < 3; i++) {
3581 io_mem_read[io_index][i] = unassigned_mem_read[i];
3582 io_mem_write[io_index][i] = unassigned_mem_write[i];
3583 }
3584 io_mem_opaque[io_index] = NULL;
3585 io_mem_used[io_index] = 0;
3586}
3587
e9179ce1
AK
3588static void io_mem_init(void)
3589{
3590 int i;
3591
2507c12a 3592 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
be675c97 3593 unassigned_mem_write, NULL);
2507c12a 3594 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
be675c97 3595 unassigned_mem_write, NULL);
2507c12a 3596 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
be675c97 3597 notdirty_mem_write, NULL);
56384e8b 3598 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
be675c97 3599 subpage_ram_write, NULL);
e9179ce1
AK
3600 for (i=0; i<5; i++)
3601 io_mem_used[i] = 1;
3602
3603 io_mem_watch = cpu_register_io_memory(watch_mem_read,
be675c97 3604 watch_mem_write, NULL);
e9179ce1
AK
3605}
3606
62152b8a
AK
3607static void memory_map_init(void)
3608{
7267c094 3609 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3610 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3611 set_system_memory_map(system_memory);
309cb471 3612
7267c094 3613 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3614 memory_region_init(system_io, "io", 65536);
3615 set_system_io_map(system_io);
62152b8a
AK
3616}
3617
3618MemoryRegion *get_system_memory(void)
3619{
3620 return system_memory;
3621}
3622
309cb471
AK
3623MemoryRegion *get_system_io(void)
3624{
3625 return system_io;
3626}
3627
e2eef170
PB
3628#endif /* !defined(CONFIG_USER_ONLY) */
3629
13eb76e0
FB
3630/* physical memory access (slow version, mainly for debug) */
3631#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3632int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3633 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3634{
3635 int l, flags;
3636 target_ulong page;
53a5960a 3637 void * p;
13eb76e0
FB
3638
3639 while (len > 0) {
3640 page = addr & TARGET_PAGE_MASK;
3641 l = (page + TARGET_PAGE_SIZE) - addr;
3642 if (l > len)
3643 l = len;
3644 flags = page_get_flags(page);
3645 if (!(flags & PAGE_VALID))
a68fe89c 3646 return -1;
13eb76e0
FB
3647 if (is_write) {
3648 if (!(flags & PAGE_WRITE))
a68fe89c 3649 return -1;
579a97f7 3650 /* XXX: this code should not depend on lock_user */
72fb7daa 3651 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3652 return -1;
72fb7daa
AJ
3653 memcpy(p, buf, l);
3654 unlock_user(p, addr, l);
13eb76e0
FB
3655 } else {
3656 if (!(flags & PAGE_READ))
a68fe89c 3657 return -1;
579a97f7 3658 /* XXX: this code should not depend on lock_user */
72fb7daa 3659 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3660 return -1;
72fb7daa 3661 memcpy(buf, p, l);
5b257578 3662 unlock_user(p, addr, 0);
13eb76e0
FB
3663 }
3664 len -= l;
3665 buf += l;
3666 addr += l;
3667 }
a68fe89c 3668 return 0;
13eb76e0 3669}
8df1cd07 3670
13eb76e0 3671#else
c227f099 3672void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3673 int len, int is_write)
3674{
3675 int l, io_index;
3676 uint8_t *ptr;
3677 uint32_t val;
c227f099 3678 target_phys_addr_t page;
8ca5692d 3679 ram_addr_t pd;
f1f6e3b8 3680 PhysPageDesc p;
3b46e624 3681
13eb76e0
FB
3682 while (len > 0) {
3683 page = addr & TARGET_PAGE_MASK;
3684 l = (page + TARGET_PAGE_SIZE) - addr;
3685 if (l > len)
3686 l = len;
92e873b9 3687 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3688 pd = p.phys_offset;
3b46e624 3689
13eb76e0 3690 if (is_write) {
3a7d929e 3691 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
f1f6e3b8 3692 target_phys_addr_t addr1;
13eb76e0 3693 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3694 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3695 /* XXX: could force cpu_single_env to NULL to avoid
3696 potential bugs */
6c2934db 3697 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3698 /* 32 bit write access */
c27004ec 3699 val = ldl_p(buf);
6c2934db 3700 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3701 l = 4;
6c2934db 3702 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3703 /* 16 bit write access */
c27004ec 3704 val = lduw_p(buf);
6c2934db 3705 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3706 l = 2;
3707 } else {
1c213d19 3708 /* 8 bit write access */
c27004ec 3709 val = ldub_p(buf);
6c2934db 3710 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3711 l = 1;
3712 }
3713 } else {
8ca5692d 3714 ram_addr_t addr1;
b448f2f3 3715 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3716 /* RAM case */
5579c7f3 3717 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3718 memcpy(ptr, buf, l);
3a7d929e
FB
3719 if (!cpu_physical_memory_is_dirty(addr1)) {
3720 /* invalidate code */
3721 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3722 /* set dirty bit */
f7c11b53
YT
3723 cpu_physical_memory_set_dirty_flags(
3724 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3725 }
050a0ddf 3726 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3727 }
3728 } else {
5fafdf24 3729 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3730 !(pd & IO_MEM_ROMD)) {
f1f6e3b8 3731 target_phys_addr_t addr1;
13eb76e0
FB
3732 /* I/O case */
3733 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3734 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3735 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3736 /* 32 bit read access */
6c2934db 3737 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3738 stl_p(buf, val);
13eb76e0 3739 l = 4;
6c2934db 3740 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3741 /* 16 bit read access */
6c2934db 3742 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3743 stw_p(buf, val);
13eb76e0
FB
3744 l = 2;
3745 } else {
1c213d19 3746 /* 8 bit read access */
6c2934db 3747 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3748 stb_p(buf, val);
13eb76e0
FB
3749 l = 1;
3750 }
3751 } else {
3752 /* RAM case */
050a0ddf
AP
3753 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3754 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3755 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3756 }
3757 }
3758 len -= l;
3759 buf += l;
3760 addr += l;
3761 }
3762}
8df1cd07 3763
d0ecd2aa 3764/* used for ROM loading : can write in RAM and ROM */
c227f099 3765void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3766 const uint8_t *buf, int len)
3767{
3768 int l;
3769 uint8_t *ptr;
c227f099 3770 target_phys_addr_t page;
d0ecd2aa 3771 unsigned long pd;
f1f6e3b8 3772 PhysPageDesc p;
3b46e624 3773
d0ecd2aa
FB
3774 while (len > 0) {
3775 page = addr & TARGET_PAGE_MASK;
3776 l = (page + TARGET_PAGE_SIZE) - addr;
3777 if (l > len)
3778 l = len;
3779 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3780 pd = p.phys_offset;
3b46e624 3781
d0ecd2aa 3782 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3783 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3784 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3785 /* do nothing */
3786 } else {
3787 unsigned long addr1;
3788 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3789 /* ROM/RAM case */
5579c7f3 3790 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3791 memcpy(ptr, buf, l);
050a0ddf 3792 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3793 }
3794 len -= l;
3795 buf += l;
3796 addr += l;
3797 }
3798}
3799
6d16c2f8
AL
3800typedef struct {
3801 void *buffer;
c227f099
AL
3802 target_phys_addr_t addr;
3803 target_phys_addr_t len;
6d16c2f8
AL
3804} BounceBuffer;
3805
3806static BounceBuffer bounce;
3807
ba223c29
AL
3808typedef struct MapClient {
3809 void *opaque;
3810 void (*callback)(void *opaque);
72cf2d4f 3811 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3812} MapClient;
3813
72cf2d4f
BS
3814static QLIST_HEAD(map_client_list, MapClient) map_client_list
3815 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3816
3817void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3818{
7267c094 3819 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3820
3821 client->opaque = opaque;
3822 client->callback = callback;
72cf2d4f 3823 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3824 return client;
3825}
3826
3827void cpu_unregister_map_client(void *_client)
3828{
3829 MapClient *client = (MapClient *)_client;
3830
72cf2d4f 3831 QLIST_REMOVE(client, link);
7267c094 3832 g_free(client);
ba223c29
AL
3833}
3834
3835static void cpu_notify_map_clients(void)
3836{
3837 MapClient *client;
3838
72cf2d4f
BS
3839 while (!QLIST_EMPTY(&map_client_list)) {
3840 client = QLIST_FIRST(&map_client_list);
ba223c29 3841 client->callback(client->opaque);
34d5e948 3842 cpu_unregister_map_client(client);
ba223c29
AL
3843 }
3844}
3845
6d16c2f8
AL
3846/* Map a physical memory region into a host virtual address.
3847 * May map a subset of the requested range, given by and returned in *plen.
3848 * May return NULL if resources needed to perform the mapping are exhausted.
3849 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3850 * Use cpu_register_map_client() to know when retrying the map operation is
3851 * likely to succeed.
6d16c2f8 3852 */
c227f099
AL
3853void *cpu_physical_memory_map(target_phys_addr_t addr,
3854 target_phys_addr_t *plen,
6d16c2f8
AL
3855 int is_write)
3856{
c227f099 3857 target_phys_addr_t len = *plen;
38bee5dc 3858 target_phys_addr_t todo = 0;
6d16c2f8 3859 int l;
c227f099 3860 target_phys_addr_t page;
6d16c2f8 3861 unsigned long pd;
f1f6e3b8 3862 PhysPageDesc p;
f15fbc4b 3863 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3864 ram_addr_t rlen;
3865 void *ret;
6d16c2f8
AL
3866
3867 while (len > 0) {
3868 page = addr & TARGET_PAGE_MASK;
3869 l = (page + TARGET_PAGE_SIZE) - addr;
3870 if (l > len)
3871 l = len;
3872 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3873 pd = p.phys_offset;
6d16c2f8
AL
3874
3875 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 3876 if (todo || bounce.buffer) {
6d16c2f8
AL
3877 break;
3878 }
3879 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3880 bounce.addr = addr;
3881 bounce.len = l;
3882 if (!is_write) {
54f7b4a3 3883 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3884 }
38bee5dc
SS
3885
3886 *plen = l;
3887 return bounce.buffer;
6d16c2f8 3888 }
8ab934f9
SS
3889 if (!todo) {
3890 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3891 }
6d16c2f8
AL
3892
3893 len -= l;
3894 addr += l;
38bee5dc 3895 todo += l;
6d16c2f8 3896 }
8ab934f9
SS
3897 rlen = todo;
3898 ret = qemu_ram_ptr_length(raddr, &rlen);
3899 *plen = rlen;
3900 return ret;
6d16c2f8
AL
3901}
3902
3903/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3904 * Will also mark the memory as dirty if is_write == 1. access_len gives
3905 * the amount of memory that was actually read or written by the caller.
3906 */
c227f099
AL
3907void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3908 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3909{
3910 if (buffer != bounce.buffer) {
3911 if (is_write) {
e890261f 3912 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3913 while (access_len) {
3914 unsigned l;
3915 l = TARGET_PAGE_SIZE;
3916 if (l > access_len)
3917 l = access_len;
3918 if (!cpu_physical_memory_is_dirty(addr1)) {
3919 /* invalidate code */
3920 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3921 /* set dirty bit */
f7c11b53
YT
3922 cpu_physical_memory_set_dirty_flags(
3923 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3924 }
3925 addr1 += l;
3926 access_len -= l;
3927 }
3928 }
868bb33f 3929 if (xen_enabled()) {
e41d7c69 3930 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3931 }
6d16c2f8
AL
3932 return;
3933 }
3934 if (is_write) {
3935 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3936 }
f8a83245 3937 qemu_vfree(bounce.buffer);
6d16c2f8 3938 bounce.buffer = NULL;
ba223c29 3939 cpu_notify_map_clients();
6d16c2f8 3940}
d0ecd2aa 3941
8df1cd07 3942/* warning: addr must be aligned */
1e78bcc1
AG
3943static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3944 enum device_endian endian)
8df1cd07
FB
3945{
3946 int io_index;
3947 uint8_t *ptr;
3948 uint32_t val;
3949 unsigned long pd;
f1f6e3b8 3950 PhysPageDesc p;
8df1cd07
FB
3951
3952 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3953 pd = p.phys_offset;
3b46e624 3954
5fafdf24 3955 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3956 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3957 /* I/O case */
3958 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3959 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
8df1cd07 3960 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
3961#if defined(TARGET_WORDS_BIGENDIAN)
3962 if (endian == DEVICE_LITTLE_ENDIAN) {
3963 val = bswap32(val);
3964 }
3965#else
3966 if (endian == DEVICE_BIG_ENDIAN) {
3967 val = bswap32(val);
3968 }
3969#endif
8df1cd07
FB
3970 } else {
3971 /* RAM case */
5579c7f3 3972 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3973 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3974 switch (endian) {
3975 case DEVICE_LITTLE_ENDIAN:
3976 val = ldl_le_p(ptr);
3977 break;
3978 case DEVICE_BIG_ENDIAN:
3979 val = ldl_be_p(ptr);
3980 break;
3981 default:
3982 val = ldl_p(ptr);
3983 break;
3984 }
8df1cd07
FB
3985 }
3986 return val;
3987}
3988
1e78bcc1
AG
3989uint32_t ldl_phys(target_phys_addr_t addr)
3990{
3991 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3992}
3993
3994uint32_t ldl_le_phys(target_phys_addr_t addr)
3995{
3996 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3997}
3998
3999uint32_t ldl_be_phys(target_phys_addr_t addr)
4000{
4001 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4002}
4003
84b7b8e7 4004/* warning: addr must be aligned */
1e78bcc1
AG
4005static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4006 enum device_endian endian)
84b7b8e7
FB
4007{
4008 int io_index;
4009 uint8_t *ptr;
4010 uint64_t val;
4011 unsigned long pd;
f1f6e3b8 4012 PhysPageDesc p;
84b7b8e7
FB
4013
4014 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4015 pd = p.phys_offset;
3b46e624 4016
2a4188a3
FB
4017 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4018 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4019 /* I/O case */
4020 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4021 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4022
4023 /* XXX This is broken when device endian != cpu endian.
4024 Fix and add "endian" variable check */
84b7b8e7
FB
4025#ifdef TARGET_WORDS_BIGENDIAN
4026 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4027 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4028#else
4029 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4030 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4031#endif
4032 } else {
4033 /* RAM case */
5579c7f3 4034 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4035 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4036 switch (endian) {
4037 case DEVICE_LITTLE_ENDIAN:
4038 val = ldq_le_p(ptr);
4039 break;
4040 case DEVICE_BIG_ENDIAN:
4041 val = ldq_be_p(ptr);
4042 break;
4043 default:
4044 val = ldq_p(ptr);
4045 break;
4046 }
84b7b8e7
FB
4047 }
4048 return val;
4049}
4050
1e78bcc1
AG
4051uint64_t ldq_phys(target_phys_addr_t addr)
4052{
4053 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4054}
4055
4056uint64_t ldq_le_phys(target_phys_addr_t addr)
4057{
4058 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4059}
4060
4061uint64_t ldq_be_phys(target_phys_addr_t addr)
4062{
4063 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4064}
4065
aab33094 4066/* XXX: optimize */
c227f099 4067uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4068{
4069 uint8_t val;
4070 cpu_physical_memory_read(addr, &val, 1);
4071 return val;
4072}
4073
733f0b02 4074/* warning: addr must be aligned */
1e78bcc1
AG
4075static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4076 enum device_endian endian)
aab33094 4077{
733f0b02
MT
4078 int io_index;
4079 uint8_t *ptr;
4080 uint64_t val;
4081 unsigned long pd;
f1f6e3b8 4082 PhysPageDesc p;
733f0b02
MT
4083
4084 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4085 pd = p.phys_offset;
733f0b02
MT
4086
4087 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4088 !(pd & IO_MEM_ROMD)) {
4089 /* I/O case */
4090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4091 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
733f0b02 4092 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4093#if defined(TARGET_WORDS_BIGENDIAN)
4094 if (endian == DEVICE_LITTLE_ENDIAN) {
4095 val = bswap16(val);
4096 }
4097#else
4098 if (endian == DEVICE_BIG_ENDIAN) {
4099 val = bswap16(val);
4100 }
4101#endif
733f0b02
MT
4102 } else {
4103 /* RAM case */
4104 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4105 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4106 switch (endian) {
4107 case DEVICE_LITTLE_ENDIAN:
4108 val = lduw_le_p(ptr);
4109 break;
4110 case DEVICE_BIG_ENDIAN:
4111 val = lduw_be_p(ptr);
4112 break;
4113 default:
4114 val = lduw_p(ptr);
4115 break;
4116 }
733f0b02
MT
4117 }
4118 return val;
aab33094
FB
4119}
4120
1e78bcc1
AG
4121uint32_t lduw_phys(target_phys_addr_t addr)
4122{
4123 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4124}
4125
4126uint32_t lduw_le_phys(target_phys_addr_t addr)
4127{
4128 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4129}
4130
4131uint32_t lduw_be_phys(target_phys_addr_t addr)
4132{
4133 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4134}
4135
8df1cd07
FB
4136/* warning: addr must be aligned. The ram page is not masked as dirty
4137 and the code inside is not invalidated. It is useful if the dirty
4138 bits are used to track modified PTEs */
c227f099 4139void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4140{
4141 int io_index;
4142 uint8_t *ptr;
4143 unsigned long pd;
f1f6e3b8 4144 PhysPageDesc p;
8df1cd07
FB
4145
4146 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4147 pd = p.phys_offset;
3b46e624 4148
3a7d929e 4149 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4150 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4151 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
8df1cd07
FB
4152 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4153 } else {
74576198 4154 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4155 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4156 stl_p(ptr, val);
74576198
AL
4157
4158 if (unlikely(in_migration)) {
4159 if (!cpu_physical_memory_is_dirty(addr1)) {
4160 /* invalidate code */
4161 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4162 /* set dirty bit */
f7c11b53
YT
4163 cpu_physical_memory_set_dirty_flags(
4164 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4165 }
4166 }
8df1cd07
FB
4167 }
4168}
4169
c227f099 4170void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4171{
4172 int io_index;
4173 uint8_t *ptr;
4174 unsigned long pd;
f1f6e3b8 4175 PhysPageDesc p;
bc98a7ef
JM
4176
4177 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4178 pd = p.phys_offset;
3b46e624 4179
bc98a7ef
JM
4180 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4181 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4182 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef
JM
4183#ifdef TARGET_WORDS_BIGENDIAN
4184 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4185 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4186#else
4187 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4188 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4189#endif
4190 } else {
5579c7f3 4191 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4192 (addr & ~TARGET_PAGE_MASK);
4193 stq_p(ptr, val);
4194 }
4195}
4196
8df1cd07 4197/* warning: addr must be aligned */
1e78bcc1
AG
4198static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4199 enum device_endian endian)
8df1cd07
FB
4200{
4201 int io_index;
4202 uint8_t *ptr;
4203 unsigned long pd;
f1f6e3b8 4204 PhysPageDesc p;
8df1cd07
FB
4205
4206 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4207 pd = p.phys_offset;
3b46e624 4208
3a7d929e 4209 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4210 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4211 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4212#if defined(TARGET_WORDS_BIGENDIAN)
4213 if (endian == DEVICE_LITTLE_ENDIAN) {
4214 val = bswap32(val);
4215 }
4216#else
4217 if (endian == DEVICE_BIG_ENDIAN) {
4218 val = bswap32(val);
4219 }
4220#endif
8df1cd07
FB
4221 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4222 } else {
4223 unsigned long addr1;
4224 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4225 /* RAM case */
5579c7f3 4226 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4227 switch (endian) {
4228 case DEVICE_LITTLE_ENDIAN:
4229 stl_le_p(ptr, val);
4230 break;
4231 case DEVICE_BIG_ENDIAN:
4232 stl_be_p(ptr, val);
4233 break;
4234 default:
4235 stl_p(ptr, val);
4236 break;
4237 }
3a7d929e
FB
4238 if (!cpu_physical_memory_is_dirty(addr1)) {
4239 /* invalidate code */
4240 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4241 /* set dirty bit */
f7c11b53
YT
4242 cpu_physical_memory_set_dirty_flags(addr1,
4243 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4244 }
8df1cd07
FB
4245 }
4246}
4247
1e78bcc1
AG
4248void stl_phys(target_phys_addr_t addr, uint32_t val)
4249{
4250 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4251}
4252
4253void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4254{
4255 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4256}
4257
4258void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4259{
4260 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4261}
4262
aab33094 4263/* XXX: optimize */
c227f099 4264void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4265{
4266 uint8_t v = val;
4267 cpu_physical_memory_write(addr, &v, 1);
4268}
4269
733f0b02 4270/* warning: addr must be aligned */
1e78bcc1
AG
4271static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4272 enum device_endian endian)
aab33094 4273{
733f0b02
MT
4274 int io_index;
4275 uint8_t *ptr;
4276 unsigned long pd;
f1f6e3b8 4277 PhysPageDesc p;
733f0b02
MT
4278
4279 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4280 pd = p.phys_offset;
733f0b02
MT
4281
4282 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4283 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4284 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4285#if defined(TARGET_WORDS_BIGENDIAN)
4286 if (endian == DEVICE_LITTLE_ENDIAN) {
4287 val = bswap16(val);
4288 }
4289#else
4290 if (endian == DEVICE_BIG_ENDIAN) {
4291 val = bswap16(val);
4292 }
4293#endif
733f0b02
MT
4294 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4295 } else {
4296 unsigned long addr1;
4297 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4298 /* RAM case */
4299 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4300 switch (endian) {
4301 case DEVICE_LITTLE_ENDIAN:
4302 stw_le_p(ptr, val);
4303 break;
4304 case DEVICE_BIG_ENDIAN:
4305 stw_be_p(ptr, val);
4306 break;
4307 default:
4308 stw_p(ptr, val);
4309 break;
4310 }
733f0b02
MT
4311 if (!cpu_physical_memory_is_dirty(addr1)) {
4312 /* invalidate code */
4313 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4314 /* set dirty bit */
4315 cpu_physical_memory_set_dirty_flags(addr1,
4316 (0xff & ~CODE_DIRTY_FLAG));
4317 }
4318 }
aab33094
FB
4319}
4320
1e78bcc1
AG
4321void stw_phys(target_phys_addr_t addr, uint32_t val)
4322{
4323 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4324}
4325
4326void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4327{
4328 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4329}
4330
4331void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4332{
4333 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4334}
4335
aab33094 4336/* XXX: optimize */
c227f099 4337void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4338{
4339 val = tswap64(val);
71d2b725 4340 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4341}
4342
1e78bcc1
AG
4343void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4344{
4345 val = cpu_to_le64(val);
4346 cpu_physical_memory_write(addr, &val, 8);
4347}
4348
4349void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4350{
4351 val = cpu_to_be64(val);
4352 cpu_physical_memory_write(addr, &val, 8);
4353}
4354
5e2972fd 4355/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4356int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4357 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4358{
4359 int l;
c227f099 4360 target_phys_addr_t phys_addr;
9b3c35e0 4361 target_ulong page;
13eb76e0
FB
4362
4363 while (len > 0) {
4364 page = addr & TARGET_PAGE_MASK;
4365 phys_addr = cpu_get_phys_page_debug(env, page);
4366 /* if no physical page mapped, return an error */
4367 if (phys_addr == -1)
4368 return -1;
4369 l = (page + TARGET_PAGE_SIZE) - addr;
4370 if (l > len)
4371 l = len;
5e2972fd 4372 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4373 if (is_write)
4374 cpu_physical_memory_write_rom(phys_addr, buf, l);
4375 else
5e2972fd 4376 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4377 len -= l;
4378 buf += l;
4379 addr += l;
4380 }
4381 return 0;
4382}
a68fe89c 4383#endif
13eb76e0 4384
2e70f6ef
PB
4385/* in deterministic execution mode, instructions doing device I/Os
4386 must be at the end of the TB */
4387void cpu_io_recompile(CPUState *env, void *retaddr)
4388{
4389 TranslationBlock *tb;
4390 uint32_t n, cflags;
4391 target_ulong pc, cs_base;
4392 uint64_t flags;
4393
4394 tb = tb_find_pc((unsigned long)retaddr);
4395 if (!tb) {
4396 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4397 retaddr);
4398 }
4399 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4400 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4401 /* Calculate how many instructions had been executed before the fault
bf20dc07 4402 occurred. */
2e70f6ef
PB
4403 n = n - env->icount_decr.u16.low;
4404 /* Generate a new TB ending on the I/O insn. */
4405 n++;
4406 /* On MIPS and SH, delay slot instructions can only be restarted if
4407 they were already the first instruction in the TB. If this is not
bf20dc07 4408 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4409 branch. */
4410#if defined(TARGET_MIPS)
4411 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4412 env->active_tc.PC -= 4;
4413 env->icount_decr.u16.low++;
4414 env->hflags &= ~MIPS_HFLAG_BMASK;
4415 }
4416#elif defined(TARGET_SH4)
4417 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4418 && n > 1) {
4419 env->pc -= 2;
4420 env->icount_decr.u16.low++;
4421 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4422 }
4423#endif
4424 /* This should never happen. */
4425 if (n > CF_COUNT_MASK)
4426 cpu_abort(env, "TB too big during recompile");
4427
4428 cflags = n | CF_LAST_IO;
4429 pc = tb->pc;
4430 cs_base = tb->cs_base;
4431 flags = tb->flags;
4432 tb_phys_invalidate(tb, -1);
4433 /* FIXME: In theory this could raise an exception. In practice
4434 we have already translated the block once so it's probably ok. */
4435 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4436 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4437 the first in the TB) then we end up generating a whole new TB and
4438 repeating the fault, which is horribly inefficient.
4439 Better would be to execute just this insn uncached, or generate a
4440 second new TB. */
4441 cpu_resume_from_signal(env, NULL);
4442}
4443
b3755a91
PB
4444#if !defined(CONFIG_USER_ONLY)
4445
055403b2 4446void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4447{
4448 int i, target_code_size, max_target_code_size;
4449 int direct_jmp_count, direct_jmp2_count, cross_page;
4450 TranslationBlock *tb;
3b46e624 4451
e3db7226
FB
4452 target_code_size = 0;
4453 max_target_code_size = 0;
4454 cross_page = 0;
4455 direct_jmp_count = 0;
4456 direct_jmp2_count = 0;
4457 for(i = 0; i < nb_tbs; i++) {
4458 tb = &tbs[i];
4459 target_code_size += tb->size;
4460 if (tb->size > max_target_code_size)
4461 max_target_code_size = tb->size;
4462 if (tb->page_addr[1] != -1)
4463 cross_page++;
4464 if (tb->tb_next_offset[0] != 0xffff) {
4465 direct_jmp_count++;
4466 if (tb->tb_next_offset[1] != 0xffff) {
4467 direct_jmp2_count++;
4468 }
4469 }
4470 }
4471 /* XXX: avoid using doubles ? */
57fec1fe 4472 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4473 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4474 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4475 cpu_fprintf(f, "TB count %d/%d\n",
4476 nb_tbs, code_gen_max_blocks);
5fafdf24 4477 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4478 nb_tbs ? target_code_size / nb_tbs : 0,
4479 max_target_code_size);
055403b2 4480 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4481 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4482 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4483 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4484 cross_page,
e3db7226
FB
4485 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4486 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4487 direct_jmp_count,
e3db7226
FB
4488 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4489 direct_jmp2_count,
4490 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4491 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4492 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4493 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4494 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4495 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4496}
4497
61382a50 4498#define MMUSUFFIX _cmmu
3917149d 4499#undef GETPC
61382a50
FB
4500#define GETPC() NULL
4501#define env cpu_single_env
b769d8fe 4502#define SOFTMMU_CODE_ACCESS
61382a50
FB
4503
4504#define SHIFT 0
4505#include "softmmu_template.h"
4506
4507#define SHIFT 1
4508#include "softmmu_template.h"
4509
4510#define SHIFT 2
4511#include "softmmu_template.h"
4512
4513#define SHIFT 3
4514#include "softmmu_template.h"
4515
4516#undef env
4517
4518#endif